summaryrefslogtreecommitdiffstats
path: root/main
diff options
context:
space:
mode:
authorCarlo Landmeter <clandmeter@gmail.com>2011-03-24 13:58:29 +0000
committerCarlo Landmeter <clandmeter@gmail.com>2011-03-24 13:58:29 +0000
commit7743b2793cae68e3ef70e64b67e85c7166e5aac6 (patch)
tree240708727c71e5993948231714ce6e0a5426eca9 /main
parent7b7af3b42a8304c2c4a6bd851dc29209e03b52e6 (diff)
downloadaports-7743b2793cae68e3ef70e64b67e85c7166e5aac6.tar.bz2
aports-7743b2793cae68e3ef70e64b67e85c7166e5aac6.tar.xz
main/{linux-scst|scstadmin}: move from testing to main
Diffstat (limited to 'main')
-rw-r--r--main/linux-scst/0004-arp-flush-arp-cache-on-device-change.patch29
-rw-r--r--main/linux-scst/APKBUILD148
-rw-r--r--main/linux-scst/kernelconfig.x86_644729
-rw-r--r--main/linux-scst/scst-2.0.0.1-2.6.36.patch76096
-rw-r--r--main/linux-scst/setlocalversion.patch11
-rw-r--r--main/linux-scst/unionfs-2.5.7_for_2.6.36.diff11253
-rw-r--r--main/scstadmin/APKBUILD38
-rw-r--r--main/scstadmin/scst-init-ash-comapt.patch35
8 files changed, 92339 insertions, 0 deletions
diff --git a/main/linux-scst/0004-arp-flush-arp-cache-on-device-change.patch b/main/linux-scst/0004-arp-flush-arp-cache-on-device-change.patch
new file mode 100644
index 000000000..85161ea3a
--- /dev/null
+++ b/main/linux-scst/0004-arp-flush-arp-cache-on-device-change.patch
@@ -0,0 +1,29 @@
+From 8a0e3ea4924059a7268446177d6869e3399adbb2 Mon Sep 17 00:00:00 2001
+From: Timo Teras <timo.teras@iki.fi>
+Date: Mon, 12 Apr 2010 13:46:45 +0000
+Subject: [PATCH 04/18] arp: flush arp cache on device change
+
+If IFF_NOARP is changed, we must flush the arp cache.
+
+Signed-off-by: Timo Teras <timo.teras@iki.fi>
+---
+ net/ipv4/arp.c | 3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 4e80f33..580bfc3 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -1200,6 +1200,9 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, vo
+ neigh_changeaddr(&arp_tbl, dev);
+ rt_cache_flush(dev_net(dev), 0);
+ break;
++ case NETDEV_CHANGE:
++ neigh_changeaddr(&arp_tbl, dev);
++ break;
+ default:
+ break;
+ }
+--
+1.7.0.2
+
diff --git a/main/linux-scst/APKBUILD b/main/linux-scst/APKBUILD
new file mode 100644
index 000000000..6caf4f285
--- /dev/null
+++ b/main/linux-scst/APKBUILD
@@ -0,0 +1,148 @@
+# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
+
+_flavor=scst
+pkgname=linux-${_flavor}
+pkgver=2.6.36.3
+_kernver=2.6.36
+pkgrel=0
+pkgdesc="Linux kernel optimised for scst"
+url="http://scst.sourceforge.net"
+depends="mkinitfs linux-firmware"
+makedepends="perl installkernel bash"
+options="!strip"
+_config=${config:-kernelconfig.${CARCH}}
+install=
+source="ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-$_kernver.tar.bz2
+ ftp://ftp.kernel.org/pub/linux/kernel/v2.6/patch-$pkgver.bz2
+ kernelconfig.x86_64
+ scst-2.0.0.1-2.6.36.patch
+ unionfs-2.5.7_for_$_kernver.diff
+ 0004-arp-flush-arp-cache-on-device-change.patch
+ "
+subpackages="$pkgname-dev linux-firmware:firmware"
+arch="x86_64"
+license="GPL-2"
+
+_abi_release=${pkgver}-${_flavor}
+
+prepare() {
+ local _patch_failed=
+ cd "$srcdir"/linux-$_kernver
+ if [ "$_kernver" != "$pkgver" ]; then
+ bunzip2 -c < ../patch-$pkgver.bz2 | patch -p1 -N || return 1
+ fi
+
+ # first apply patches in specified order
+ for i in $source; do
+ case $i in
+ *.patch)
+ msg "Applying $i..."
+ if ! patch -s -p1 -N -i "$srcdir"/$i; then
+ echo $i >>failed
+ _patch_failed=1
+ fi
+ ;;
+ esac
+ done
+
+ if ! [ -z "$_patch_failed" ]; then
+ error "The following patches failed:"
+ cat failed
+ return 1
+ fi
+
+ echo "-scst" > "$srcdir"/linux-$_kernver/localversion-scst
+
+ mkdir -p "$srcdir"/build
+ cp "$srcdir"/$_config "$srcdir"/build/.config || return 1
+ make -C "$srcdir"/linux-$_kernver O="$srcdir"/build HOSTCC="${CC:-gcc}" \
+ silentoldconfig
+}
+
+# this is so we can do: 'abuild menuconfig' to reconfigure kernel
+menuconfig() {
+ cd "$srcdir"/build || return 1
+ make menuconfig
+ cp .config "$startdir"/$_config
+}
+
+build() {
+ cd "$srcdir"/build
+ make CC="${CC:-gcc}" \
+ KBUILD_BUILD_VERSION="$((pkgrel + 1 ))-Alpine" \
+ || return 1
+}
+
+package() {
+ cd "$srcdir"/build
+ mkdir -p "$pkgdir"/boot "$pkgdir"/lib/modules
+ make -j1 modules_install firmware_install install \
+ INSTALL_MOD_PATH="$pkgdir" \
+ INSTALL_PATH="$pkgdir"/boot \
+ || return 1
+
+ rm -f "$pkgdir"/lib/modules/${_abi_release}/build \
+ "$pkgdir"/lib/modules/${_abi_release}/source
+ install -D include/config/kernel.release \
+ "$pkgdir"/usr/share/kernel/$_flavor/kernel.release
+}
+
+dev() {
+ # copy the only the parts that we really need for build 3rd party
+ # kernel modules and install those as /usr/src/linux-headers,
+ # simlar to what ubuntu does
+ #
+ # this way you dont need to install the 300-400 kernel sources to
+ # build a tiny kernel module
+ #
+ pkgdesc="Headers and script for third party modules for grsec kernel"
+ local dir="$subpkgdir"/usr/src/linux-headers-${_abi_release}
+
+ # first we import config, run prepare to set up for building
+ # external modules, and create the scripts
+ mkdir -p "$dir"
+ cp "$srcdir"/$_config "$dir"/.config
+ make -j1 -C "$srcdir"/linux-$_kernver O="$dir" HOSTCC="${CC:-gcc}" \
+ silentoldconfig prepare scripts
+
+ # remove the stuff that poits to real sources. we want 3rd party
+ # modules to believe this is the soruces
+ rm "$dir"/Makefile "$dir"/source
+
+ # copy the needed stuff from real sources
+ #
+ # this is taken from ubuntu kernel build script
+ # http://kernel.ubuntu.com/git?p=ubuntu/ubuntu-jaunty.git;a=blob;f=debian/rules.d/3-binary-indep.mk;hb=HEAD
+ cd "$srcdir"/linux-$_kernver
+ find . -path './include/*' -prune -o -path './scripts/*' -prune \
+ -o -type f \( -name 'Makefile*' -o -name 'Kconfig*' \
+ -o -name 'Kbuild*' -o -name '*.sh' -o -name '*.pl' \
+ -o -name '*.lds' \) | cpio -pdm "$dir"
+ cp -a drivers/media/dvb/dvb-core/*.h "$dir"/drivers/media/dvb/dvb-core
+ cp -a drivers/media/video/*.h "$dir"/drivers/media/video
+ cp -a drivers/media/dvb/frontends/*.h "$dir"/drivers/media/dvb/frontends
+ cp -a scripts include "$dir"
+ find $(find arch -name include -type d -print) -type f \
+ | cpio -pdm "$dir"
+
+ install -Dm644 "$srcdir"/build/Module.symvers \
+ "$dir"/Module.symvers
+
+ mkdir -p "$subpkgdir"/lib/modules/${_abi_release}
+ ln -sf /usr/src/linux-headers-${_abi_release} \
+ "$subpkgdir"/lib/modules/${_abi_release}/build
+}
+
+firmware() {
+ pkgdesc="Firmware for linux kernel"
+ replaces="linux-grsec linux-vserver"
+ mkdir -p "$subpkgdir"/lib
+ mv "$pkgdir"/lib/firmware "$subpkgdir"/lib/
+}
+
+md5sums="61f3739a73afb6914cb007f37fb09b62 linux-2.6.36.tar.bz2
+33f51375d4baa343502b39acf94d5a6c patch-2.6.36.3.bz2
+68d4cbd30411aca485293117bd98ec38 kernelconfig.x86_64
+e62cd51e9452633821e4457564a094f3 scst-2.0.0.1-2.6.36.patch
+fec281a4e03fed560ce309ad8fc5a592 unionfs-2.5.7_for_2.6.36.diff
+776adeeb5272093574f8836c5037dd7d 0004-arp-flush-arp-cache-on-device-change.patch"
diff --git a/main/linux-scst/kernelconfig.x86_64 b/main/linux-scst/kernelconfig.x86_64
new file mode 100644
index 000000000..d98eb305a
--- /dev/null
+++ b/main/linux-scst/kernelconfig.x86_64
@@ -0,0 +1,4729 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.36.2
+# Thu Dec 23 12:32:35 2010
+#
+CONFIG_64BIT=y
+# CONFIG_X86_32 is not set
+CONFIG_X86_64=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_OUTPUT_FORMAT="elf64-x86-64"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig"
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_ZONE_DMA=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_HAVE_CPUMASK_OF_CPU_MAP=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ZONE_DMA32=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_AUDIT_ARCH=y
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_HAVE_EARLY_RES=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_X86_64_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_TRAMPOLINE=y
+CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11"
+# CONFIG_KTIME_SCALAR is not set
+CONFIG_ARCH_CPU_PROBE_RELEASE=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+CONFIG_RCU_FAST_NO_HZ=y
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=m
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+CONFIG_PERF_COUNTERS=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=m
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_PREEMPT_NOTIFIERS=y
+CONFIG_PADATA=y
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SMP=y
+# CONFIG_SPARSE_IRQ is not set
+CONFIG_X86_MPPARSE=y
+CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_VSMP is not set
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_PARAVIRT_GUEST=y
+# CONFIG_XEN is not set
+CONFIG_KVM_CLOCK=y
+CONFIG_KVM_GUEST=y
+CONFIG_PARAVIRT=y
+# CONFIG_PARAVIRT_SPINLOCKS is not set
+CONFIG_PARAVIRT_CLOCK=y
+CONFIG_NO_BOOTMEM=y
+# CONFIG_MEMTEST is not set
+# CONFIG_MK8 is not set
+# CONFIG_MPSC is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_MATOM is not set
+CONFIG_GENERIC_CPU=y
+CONFIG_X86_CPU=y
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_XADD=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=64
+CONFIG_X86_DEBUGCTLMSR=y
+# CONFIG_PROCESSOR_SELECT is not set
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_HPET_TIMER=y
+CONFIG_HPET_EMULATE_RTC=y
+CONFIG_DMI=y
+CONFIG_GART_IOMMU=y
+CONFIG_CALGARY_IOMMU=y
+CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
+CONFIG_AMD_IOMMU=y
+# CONFIG_AMD_IOMMU_STATS is not set
+CONFIG_SWIOTLB=y
+CONFIG_IOMMU_HELPER=y
+CONFIG_IOMMU_API=y
+CONFIG_NR_CPUS=8
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+# CONFIG_X86_MCE is not set
+CONFIG_I8K=m
+CONFIG_MICROCODE=m
+CONFIG_MICROCODE_INTEL=y
+CONFIG_MICROCODE_AMD=y
+CONFIG_MICROCODE_OLD_INTERFACE=y
+CONFIG_X86_MSR=m
+CONFIG_X86_CPUID=m
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_DIRECT_GBPAGES=y
+# CONFIG_NUMA is not set
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+# CONFIG_MEMORY_HOTPLUG is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_MMU_NOTIFIER=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_X86_RESERVE_LOW_64K=y
+CONFIG_MTRR=y
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+# CONFIG_EFI is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_HZ_100 is not set
+# CONFIG_HZ_250 is not set
+CONFIG_HZ_300=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=300
+CONFIG_SCHED_HRTICK=y
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PHYSICAL_START=0x1000000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x1000000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+
+#
+# Power management and ACPI options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND_NVS=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
+CONFIG_ACPI=y
+CONFIG_ACPI_SLEEP=y
+CONFIG_ACPI_PROCFS=y
+CONFIG_ACPI_PROCFS_POWER=y
+# CONFIG_ACPI_POWER_METER is not set
+CONFIG_ACPI_SYSFS_POWER=y
+CONFIG_ACPI_EC_DEBUGFS=m
+CONFIG_ACPI_PROC_EVENT=y
+CONFIG_ACPI_AC=m
+CONFIG_ACPI_BATTERY=m
+CONFIG_ACPI_BUTTON=m
+CONFIG_ACPI_VIDEO=m
+CONFIG_ACPI_FAN=m
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_PROCESSOR=m
+CONFIG_ACPI_HOTPLUG_CPU=y
+# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set
+CONFIG_ACPI_THERMAL=m
+# CONFIG_ACPI_CUSTOM_DSDT is not set
+CONFIG_ACPI_BLACKLIST_YEAR=0
+# CONFIG_ACPI_DEBUG is not set
+CONFIG_ACPI_PCI_SLOT=m
+CONFIG_X86_PM_TIMER=y
+CONFIG_ACPI_CONTAINER=m
+CONFIG_ACPI_SBS=m
+CONFIG_ACPI_HED=m
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=m
+CONFIG_ACPI_APEI_EINJ=m
+CONFIG_ACPI_APEI_ERST_DEBUG=m
+# CONFIG_SFI is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=m
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=m
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+
+#
+# CPUFreq processor drivers
+#
+CONFIG_X86_PCC_CPUFREQ=m
+CONFIG_X86_ACPI_CPUFREQ=m
+CONFIG_X86_POWERNOW_K8=m
+CONFIG_X86_SPEEDSTEP_CENTRINO=m
+CONFIG_X86_P4_CLOCKMOD=m
+
+#
+# shared options
+#
+CONFIG_X86_SPEEDSTEP_LIB=m
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_INTEL_IDLE is not set
+
+#
+# Memory power savings
+#
+CONFIG_I7300_IDLE_IOAT_CHANNEL=y
+CONFIG_I7300_IDLE=m
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_CNB20LE_QUIRK=y
+# CONFIG_DMAR is not set
+# CONFIG_INTR_REMAP is not set
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=m
+# CONFIG_PCIEAER is not set
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_STUB=m
+CONFIG_HT_IRQ=y
+# CONFIG_PCI_IOV is not set
+CONFIG_PCI_IOAPIC=y
+CONFIG_ISA_DMA_API=y
+CONFIG_K8_NB=y
+CONFIG_PCCARD=m
+CONFIG_PCMCIA=m
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_CARDBUS=y
+
+#
+# PC-card bridges
+#
+CONFIG_YENTA=m
+CONFIG_YENTA_O2=y
+CONFIG_YENTA_RICOH=y
+CONFIG_YENTA_TI=y
+CONFIG_YENTA_ENE_TUNE=y
+CONFIG_YENTA_TOSHIBA=y
+CONFIG_PD6729=m
+CONFIG_I82092=m
+CONFIG_PCCARD_NONSTATIC=y
+CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI_FAKE=m
+CONFIG_HOTPLUG_PCI_ACPI=m
+CONFIG_HOTPLUG_PCI_ACPI_IBM=m
+CONFIG_HOTPLUG_PCI_CPCI=y
+CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
+CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
+CONFIG_HOTPLUG_PCI_SHPC=m
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+CONFIG_BINFMT_MISC=m
+# CONFIG_IA32_EMULATION is not set
+# CONFIG_COMPAT_FOR_U64_ALIGNMENT is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=m
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=m
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+# CONFIG_IP_PIMSM_V1 is not set
+CONFIG_IP_PIMSM_V2=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_TUNNEL=m
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=m
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_TCP_CONG_WESTWOOD=m
+CONFIG_TCP_CONG_HTCP=m
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_DEFAULT_CUBIC=y
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETLABEL=y
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_QUEUE=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_GRE=m
+CONFIG_NF_CT_PROTO_SCTP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XTABLES=m
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_CONNMARK=m
+
+#
+# Xtables targets
+#
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_RATEEST=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+
+#
+# Xtables matches
+#
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_HL=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_SCTP=y
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NF_NAT_PROTO_DCCP=m
+CONFIG_NF_NAT_PROTO_GRE=m
+CONFIG_NF_NAT_PROTO_UDPLITE=m
+CONFIG_NF_NAT_PROTO_SCTP=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_TFTP=m
+CONFIG_NF_NAT_AMANDA=m
+CONFIG_NF_NAT_PPTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+
+#
+# DECnet: Netfilter Configuration
+#
+CONFIG_DECNET_NF_GRABULATOR=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_DCCP=m
+CONFIG_INET_DCCP_DIAG=m
+
+#
+# DCCP CCIDs Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP_CCID2_DEBUG is not set
+CONFIG_IP_DCCP_CCID3=y
+# CONFIG_IP_DCCP_CCID3_DEBUG is not set
+CONFIG_IP_DCCP_CCID3_RTO=100
+CONFIG_IP_DCCP_TFRC_LIB=y
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+CONFIG_SCTP_HMAC_SHA1=y
+# CONFIG_SCTP_HMAC_MD5 is not set
+CONFIG_RDS=m
+# CONFIG_RDS_RDMA is not set
+# CONFIG_RDS_TCP is not set
+# CONFIG_RDS_DEBUG is not set
+CONFIG_TIPC=m
+# CONFIG_TIPC_ADVANCED is not set
+# CONFIG_TIPC_DEBUG is not set
+CONFIG_ATM=m
+CONFIG_ATM_CLIP=m
+# CONFIG_ATM_CLIP_NO_ICMP is not set
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+# CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_STP=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
+CONFIG_DECNET=m
+CONFIG_DECNET_ROUTER=y
+CONFIG_LLC=m
+CONFIG_LLC2=m
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_X25=m
+CONFIG_LAPB=m
+CONFIG_ECONET=m
+CONFIG_ECONET_AUNUDP=y
+CONFIG_ECONET_NATIVE=y
+CONFIG_WAN_ROUTER=m
+CONFIG_PHONET=m
+CONFIG_IEEE802154=m
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_INGRESS=m
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+# CONFIG_NET_CLS_IND is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+CONFIG_RPS=y
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=m
+# CONFIG_HAMRADIO is not set
+CONFIG_CAN=m
+CONFIG_CAN_RAW=m
+CONFIG_CAN_BCM=m
+
+#
+# CAN Device Drivers
+#
+CONFIG_CAN_VCAN=m
+CONFIG_CAN_DEV=m
+# CONFIG_CAN_CALC_BITTIMING is not set
+CONFIG_CAN_MCP251X=m
+CONFIG_CAN_JANZ_ICAN3=m
+CONFIG_CAN_SJA1000=m
+CONFIG_CAN_SJA1000_PLATFORM=m
+CONFIG_CAN_EMS_PCI=m
+CONFIG_CAN_KVASER_PCI=m
+CONFIG_CAN_PLX_PCI=m
+
+#
+# CAN USB interfaces
+#
+# CONFIG_CAN_EMS_USB is not set
+CONFIG_CAN_ESD_USB2=m
+# CONFIG_CAN_DEBUG_DEVICES is not set
+CONFIG_IRDA=m
+
+#
+# IrDA protocols
+#
+CONFIG_IRLAN=m
+CONFIG_IRNET=m
+CONFIG_IRCOMM=m
+CONFIG_IRDA_ULTRA=y
+
+#
+# IrDA options
+#
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRDA_FAST_RR=y
+# CONFIG_IRDA_DEBUG is not set
+
+#
+# Infrared-port device drivers
+#
+
+#
+# SIR device drivers
+#
+CONFIG_IRTTY_SIR=m
+
+#
+# Dongle support
+#
+CONFIG_DONGLE=y
+CONFIG_ESI_DONGLE=m
+CONFIG_ACTISYS_DONGLE=m
+CONFIG_TEKRAM_DONGLE=m
+CONFIG_TOIM3232_DONGLE=m
+CONFIG_LITELINK_DONGLE=m
+CONFIG_MA600_DONGLE=m
+CONFIG_GIRBIL_DONGLE=m
+CONFIG_MCP2120_DONGLE=m
+CONFIG_OLD_BELKIN_DONGLE=m
+CONFIG_ACT200L_DONGLE=m
+CONFIG_KINGSUN_DONGLE=m
+CONFIG_KSDAZZLE_DONGLE=m
+CONFIG_KS959_DONGLE=m
+
+#
+# FIR device drivers
+#
+CONFIG_USB_IRDA=m
+CONFIG_SIGMATEL_FIR=m
+CONFIG_NSC_FIR=m
+CONFIG_WINBOND_FIR=m
+CONFIG_SMC_IRCC_FIR=m
+CONFIG_ALI_FIR=m
+CONFIG_VLSI_FIR=m
+CONFIG_VIA_FIR=m
+CONFIG_MCS_FIR=m
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_CMTP=m
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+# CONFIG_BT_HCIUART_ATH3K is not set
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIDTL1=m
+CONFIG_BT_HCIBT3C=m
+CONFIG_BT_HCIBLUECARD=m
+CONFIG_BT_HCIBTUART=m
+CONFIG_BT_HCIVHCI=m
+# CONFIG_BT_MRVL is not set
+CONFIG_BT_ATH3K=m
+CONFIG_AF_RXRPC=m
+# CONFIG_AF_RXRPC_DEBUG is not set
+CONFIG_RXKAD=m
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_HAS_RC=y
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
+CONFIG_MAC80211_RC_DEFAULT="pid"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_WIMAX=m
+CONFIG_WIMAX_DEBUG_LEVEL=8
+CONFIG_RFKILL=m
+CONFIG_RFKILL_LEDS=y
+# CONFIG_RFKILL_INPUT is not set
+CONFIG_NET_9P=m
+CONFIG_NET_9P_VIRTIO=m
+CONFIG_NET_9P_RDMA=m
+# CONFIG_NET_9P_DEBUG is not set
+CONFIG_CAIF=m
+# CONFIG_CAIF_DEBUG is not set
+CONFIG_CAIF_NETDEV=m
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_FW_LOADER=m
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=m
+CONFIG_MTD=m
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CONCAT=m
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=m
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+CONFIG_MTD_AR7_PARTS=m
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=m
+CONFIG_HAVE_MTD_OTP=y
+CONFIG_MTD_BLKDEVS=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_FTL=m
+CONFIG_NFTL=m
+CONFIG_NFTL_RW=y
+CONFIG_INFTL=m
+CONFIG_RFD_FTL=m
+CONFIG_SSFDC=m
+CONFIG_SM_FTL=m
+CONFIG_MTD_OOPS=m
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_GEN_PROBE=m
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_CFI_UTIL=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_MTD_ABSENT=m
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=m
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+CONFIG_MTD_SC520CDP=m
+CONFIG_MTD_NETSC520=m
+CONFIG_MTD_TS5500=m
+CONFIG_MTD_SBC_GXX=m
+CONFIG_MTD_AMD76XROM=m
+CONFIG_MTD_ICHXROM=m
+CONFIG_MTD_ESB2ROM=m
+CONFIG_MTD_CK804XROM=m
+CONFIG_MTD_SCB2_FLASH=m
+CONFIG_MTD_NETtel=m
+CONFIG_MTD_L440GX=m
+CONFIG_MTD_PCI=m
+CONFIG_MTD_PCMCIA=m
+# CONFIG_MTD_PCMCIA_ANONYMOUS is not set
+# CONFIG_MTD_GPIO_ADDR is not set
+CONFIG_MTD_INTEL_VR_NOR=m
+CONFIG_MTD_PLATRAM=m
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_PMC551=m
+CONFIG_MTD_PMC551_BUGFIX=y
+# CONFIG_MTD_PMC551_DEBUG is not set
+CONFIG_MTD_DATAFLASH=m
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+CONFIG_MTD_M25P80=m
+CONFIG_M25PXX_USE_FAST_READ=y
+# CONFIG_MTD_SST25L is not set
+CONFIG_MTD_SLRAM=m
+CONFIG_MTD_PHRAM=m
+CONFIG_MTD_MTDRAM=m
+CONFIG_MTDRAM_TOTAL_SIZE=4096
+CONFIG_MTDRAM_ERASE_SIZE=128
+CONFIG_MTD_BLOCK2MTD=m
+
+#
+# Disk-On-Chip Device Drivers
+#
+CONFIG_MTD_DOC2000=m
+CONFIG_MTD_DOC2001=m
+CONFIG_MTD_DOC2001PLUS=m
+CONFIG_MTD_DOCPROBE=m
+CONFIG_MTD_DOCECC=m
+CONFIG_MTD_DOCPROBE_ADVANCED=y
+CONFIG_MTD_DOCPROBE_ADDRESS=0x0000
+# CONFIG_MTD_DOCPROBE_HIGH is not set
+# CONFIG_MTD_DOCPROBE_55AA is not set
+CONFIG_MTD_NAND_ECC=m
+CONFIG_MTD_NAND_ECC_SMC=y
+CONFIG_MTD_NAND=m
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+CONFIG_MTD_SM_COMMON=m
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_DENALI=m
+CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR=0xFF108018
+CONFIG_MTD_NAND_IDS=m
+CONFIG_MTD_NAND_RICOH=m
+CONFIG_MTD_NAND_DISKONCHIP=m
+# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
+CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
+# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
+CONFIG_MTD_NAND_CAFE=m
+CONFIG_MTD_NAND_NANDSIM=m
+CONFIG_MTD_NAND_PLATFORM=m
+CONFIG_MTD_ALAUDA=m
+CONFIG_MTD_ONENAND=m
+# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
+# CONFIG_MTD_ONENAND_GENERIC is not set
+CONFIG_MTD_ONENAND_OTP=y
+CONFIG_MTD_ONENAND_2X_PROGRAM=y
+CONFIG_MTD_ONENAND_SIM=m
+
+#
+# LPDDR flash memory drivers
+#
+CONFIG_MTD_LPDDR=m
+CONFIG_MTD_QINFO_PROBE=m
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+CONFIG_PARPORT=m
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_SERIAL=m
+# CONFIG_PARPORT_PC_FIFO is not set
+# CONFIG_PARPORT_PC_SUPERIO is not set
+CONFIG_PARPORT_PC_PCMCIA=m
+# CONFIG_PARPORT_GSC is not set
+CONFIG_PARPORT_AX88796=m
+# CONFIG_PARPORT_1284 is not set
+CONFIG_PARPORT_NOT_PC=y
+CONFIG_PNP=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
+CONFIG_BLK_DEV=y
+CONFIG_BLK_DEV_FD=m
+# CONFIG_PARIDE is not set
+CONFIG_BLK_CPQ_DA=m
+CONFIG_BLK_CPQ_CISS_DA=m
+CONFIG_CISS_SCSI_TAPE=y
+CONFIG_BLK_DEV_DAC960=m
+CONFIG_BLK_DEV_UMEM=m
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_DRBD is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_SX8=m
+CONFIG_BLK_DEV_UB=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=m
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+CONFIG_AD525X_DPOT=m
+CONFIG_AD525X_DPOT_I2C=m
+CONFIG_AD525X_DPOT_SPI=m
+CONFIG_IBM_ASM=m
+CONFIG_PHANTOM=m
+CONFIG_SGI_IOC4=m
+CONFIG_TIFM_CORE=m
+CONFIG_TIFM_7XX1=m
+CONFIG_ICS932S401=m
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_CS5535_MFGPT=m
+CONFIG_CS5535_MFGPT_DEFAULT_IRQ=7
+CONFIG_CS5535_CLOCK_EVENT_SRC=m
+CONFIG_HP_ILO=m
+CONFIG_ISL29003=m
+CONFIG_SENSORS_TSL2550=m
+CONFIG_SENSORS_BH1780=m
+CONFIG_HMC6352=m
+CONFIG_DS1682=m
+CONFIG_TI_DAC7512=m
+CONFIG_VMWARE_BALLOON=m
+CONFIG_BMP085=m
+CONFIG_C2PORT=m
+CONFIG_C2PORT_DURAMAR_2150=m
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+CONFIG_EEPROM_AT25=m
+CONFIG_EEPROM_LEGACY=m
+CONFIG_EEPROM_MAX6875=m
+CONFIG_EEPROM_93CX6=m
+CONFIG_CB710_CORE=m
+# CONFIG_CB710_DEBUG is not set
+CONFIG_CB710_DEBUG_ASSUMPTIONS=y
+CONFIG_IWMC3200TOP=m
+# CONFIG_IWMC3200TOP_DEBUG is not set
+# CONFIG_IWMC3200TOP_DEBUGFS is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+CONFIG_SCSI_TGT=m
+CONFIG_SCSI_NETLINK=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_FC_TGT_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SAS_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_SAS_HOST_SMP=y
+# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_SCSI_CXGB3_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+# CONFIG_BE2ISCSI is not set
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_HPSA=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_3W_SAS=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
+CONFIG_AIC7XXX_DEBUG_ENABLE=y
+CONFIG_AIC7XXX_DEBUG_MASK=0
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
+CONFIG_SCSI_AIC7XXX_OLD=m
+CONFIG_SCSI_AIC79XX=m
+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
+CONFIG_AIC79XX_DEBUG_ENABLE=y
+CONFIG_AIC79XX_DEBUG_MASK=0
+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
+CONFIG_SCSI_AIC94XX=m
+# CONFIG_AIC94XX_DEBUG is not set
+CONFIG_SCSI_MVSAS=m
+CONFIG_SCSI_MVSAS_DEBUG=y
+CONFIG_SCSI_DPT_I2O=m
+CONFIG_SCSI_ADVANSYS=m
+CONFIG_SCSI_ARCMSR=m
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=m
+CONFIG_MEGARAID_MAILBOX=m
+CONFIG_MEGARAID_LEGACY=m
+CONFIG_MEGARAID_SAS=m
+CONFIG_SCSI_MPT2SAS=m
+CONFIG_SCSI_MPT2SAS_MAX_SGE=128
+# CONFIG_SCSI_MPT2SAS_LOGGING is not set
+CONFIG_SCSI_HPTIOP=m
+CONFIG_SCSI_BUSLOGIC=m
+CONFIG_VMWARE_PVSCSI=m
+CONFIG_LIBFC=m
+CONFIG_LIBFCOE=m
+CONFIG_FCOE=m
+CONFIG_FCOE_FNIC=m
+CONFIG_SCSI_DMX3191D=m
+CONFIG_SCSI_EATA=m
+# CONFIG_SCSI_EATA_TAGGED_QUEUE is not set
+# CONFIG_SCSI_EATA_LINKED_COMMANDS is not set
+CONFIG_SCSI_EATA_MAX_TAGS=16
+CONFIG_SCSI_FUTURE_DOMAIN=m
+CONFIG_SCSI_GDTH=m
+CONFIG_SCSI_IPS=m
+CONFIG_SCSI_INITIO=m
+CONFIG_SCSI_INIA100=m
+CONFIG_SCSI_PPA=m
+CONFIG_SCSI_IMM=m
+# CONFIG_SCSI_IZIP_EPP16 is not set
+# CONFIG_SCSI_IZIP_SLOW_CTR is not set
+CONFIG_SCSI_STEX=m
+CONFIG_SCSI_SYM53C8XX_2=m
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+CONFIG_SCSI_SYM53C8XX_MMIO=y
+CONFIG_SCSI_IPR=m
+CONFIG_SCSI_IPR_TRACE=y
+# CONFIG_SCSI_IPR_DUMP is not set
+CONFIG_SCSI_QLOGIC_1280=m
+CONFIG_SCSI_QLA_FC=m
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_LPFC=m
+# CONFIG_SCSI_LPFC_DEBUG_FS is not set
+CONFIG_SCSI_DC395x=m
+CONFIG_SCSI_DC390T=m
+CONFIG_SCSI_DEBUG=m
+# CONFIG_SCSI_PMCRAID is not set
+CONFIG_SCSI_PM8001=m
+CONFIG_SCSI_SRP=m
+# CONFIG_SCSI_BFA_FC is not set
+CONFIG_SCSI_LOWLEVEL_PCMCIA=y
+CONFIG_PCMCIA_FDOMAIN=m
+CONFIG_PCMCIA_QLOGIC=m
+CONFIG_PCMCIA_SYM53C500=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_SCSI_OSD_DPRINT_SENSE=1
+# CONFIG_SCSI_OSD_DEBUG is not set
+
+#
+# SCSI target (SCST) support
+#
+CONFIG_SCST=m
+CONFIG_SCST_DISK=m
+CONFIG_SCST_TAPE=m
+CONFIG_SCST_CDROM=m
+CONFIG_SCST_MODISK=m
+CONFIG_SCST_CHANGER=m
+CONFIG_SCST_PROCESSOR=m
+CONFIG_SCST_RAID=m
+CONFIG_SCST_VDISK=m
+CONFIG_SCST_USER=m
+# CONFIG_SCST_STRICT_SERIALIZING is not set
+# CONFIG_SCST_STRICT_SECURITY is not set
+# CONFIG_SCST_TEST_IO_IN_SIRQ is not set
+# CONFIG_SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING is not set
+# CONFIG_SCST_USE_EXPECTED_VALUES is not set
+# CONFIG_SCST_EXTRACHECKS is not set
+CONFIG_SCST_TRACING=y
+# CONFIG_SCST_DEBUG is not set
+# CONFIG_SCST_DEBUG_OOM is not set
+# CONFIG_SCST_DEBUG_RETRY is not set
+# CONFIG_SCST_DEBUG_SN is not set
+# CONFIG_SCST_MEASURE_LATENCY is not set
+CONFIG_SCST_ISCSI=m
+# CONFIG_SCST_ISCSI_DEBUG_DIGEST_FAILURES is not set
+CONFIG_SCST_SRPT=m
+CONFIG_ATA=m
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+CONFIG_ATA_ACPI=y
+CONFIG_SATA_PMP=y
+
+#
+# Controllers with non-SFF native interface
+#
+CONFIG_SATA_AHCI=m
+CONFIG_SATA_AHCI_PLATFORM=m
+CONFIG_SATA_INIC162X=m
+CONFIG_SATA_SIL24=m
+CONFIG_ATA_SFF=y
+
+#
+# SFF controllers with custom DMA interface
+#
+CONFIG_PDC_ADMA=m
+CONFIG_SATA_QSTOR=m
+CONFIG_SATA_SX4=m
+CONFIG_ATA_BMDMA=y
+
+#
+# SATA SFF controllers with BMDMA
+#
+CONFIG_ATA_PIIX=m
+CONFIG_SATA_MV=m
+CONFIG_SATA_NV=m
+CONFIG_SATA_PROMISE=m
+CONFIG_SATA_SIL=m
+CONFIG_SATA_SIS=m
+CONFIG_SATA_SVW=m
+CONFIG_SATA_ULI=m
+CONFIG_SATA_VIA=m
+CONFIG_SATA_VITESSE=m
+
+#
+# PATA SFF controllers with BMDMA
+#
+CONFIG_PATA_ALI=m
+CONFIG_PATA_AMD=m
+CONFIG_PATA_ARTOP=m
+CONFIG_PATA_ATIIXP=m
+CONFIG_PATA_ATP867X=m
+CONFIG_PATA_CMD64X=m
+CONFIG_PATA_CS5520=m
+CONFIG_PATA_CS5530=m
+CONFIG_PATA_CYPRESS=m
+CONFIG_PATA_EFAR=m
+CONFIG_PATA_HPT366=m
+CONFIG_PATA_HPT37X=m
+CONFIG_PATA_HPT3X2N=m
+CONFIG_PATA_HPT3X3=m
+CONFIG_PATA_HPT3X3_DMA=y
+CONFIG_PATA_IT8213=m
+CONFIG_PATA_IT821X=m
+CONFIG_PATA_JMICRON=m
+CONFIG_PATA_MARVELL=m
+CONFIG_PATA_NETCELL=m
+CONFIG_PATA_NINJA32=m
+CONFIG_PATA_NS87415=m
+CONFIG_PATA_OLDPIIX=m
+CONFIG_PATA_OPTIDMA=m
+CONFIG_PATA_PDC2027X=m
+CONFIG_PATA_PDC_OLD=m
+CONFIG_PATA_RADISYS=m
+CONFIG_PATA_RDC=m
+CONFIG_PATA_SC1200=m
+CONFIG_PATA_SCH=m
+CONFIG_PATA_SERVERWORKS=m
+CONFIG_PATA_SIL680=m
+CONFIG_PATA_SIS=m
+CONFIG_PATA_TOSHIBA=m
+CONFIG_PATA_TRIFLEX=m
+CONFIG_PATA_VIA=m
+CONFIG_PATA_WINBOND=m
+
+#
+# PIO-only SFF controllers
+#
+CONFIG_PATA_CMD640_PCI=m
+CONFIG_PATA_MPIIX=m
+CONFIG_PATA_NS87410=m
+CONFIG_PATA_OPTI=m
+CONFIG_PATA_PCMCIA=m
+CONFIG_PATA_PLATFORM=m
+CONFIG_PATA_RZ1000=m
+
+#
+# Generic fallback / legacy drivers
+#
+CONFIG_PATA_ACPI=m
+CONFIG_ATA_GENERIC=m
+CONFIG_PATA_LEGACY=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_AUTODETECT is not set
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+# CONFIG_MULTICORE_RAID456 is not set
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+# CONFIG_DM_DEBUG is not set
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+# CONFIG_DM_UEVENT is not set
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=m
+CONFIG_FUSION_FC=m
+CONFIG_FUSION_SAS=m
+CONFIG_FUSION_MAX_SGE=128
+CONFIG_FUSION_CTL=m
+# CONFIG_FUSION_LOGGING is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
+#
+CONFIG_FIREWIRE=m
+CONFIG_FIREWIRE_OHCI=m
+CONFIG_FIREWIRE_OHCI_DEBUG=y
+CONFIG_FIREWIRE_SBP2=m
+CONFIG_FIREWIRE_NET=m
+CONFIG_IEEE1394=m
+CONFIG_IEEE1394_OHCI1394=m
+CONFIG_IEEE1394_PCILYNX=m
+CONFIG_IEEE1394_SBP2=m
+# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
+CONFIG_IEEE1394_ETH1394_ROM_ENTRY=y
+CONFIG_IEEE1394_ETH1394=m
+CONFIG_IEEE1394_RAWIO=m
+CONFIG_IEEE1394_VIDEO1394=m
+CONFIG_IEEE1394_DV1394=m
+# CONFIG_IEEE1394_VERBOSEDEBUG is not set
+CONFIG_FIREWIRE_NOSY=m
+CONFIG_I2O=m
+CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
+CONFIG_I2O_EXT_ADAPTEC=y
+CONFIG_I2O_EXT_ADAPTEC_DMA64=y
+CONFIG_I2O_CONFIG=m
+CONFIG_I2O_CONFIG_OLD_IOCTL=y
+CONFIG_I2O_BUS=m
+CONFIG_I2O_BLOCK=m
+CONFIG_I2O_SCSI=m
+CONFIG_I2O_PROC=m
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+CONFIG_IFB=m
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_NET_SB1000=m
+CONFIG_ARCNET=m
+CONFIG_ARCNET_1201=m
+CONFIG_ARCNET_1051=m
+CONFIG_ARCNET_RAW=m
+CONFIG_ARCNET_CAP=m
+CONFIG_ARCNET_COM90xx=m
+CONFIG_ARCNET_COM90xxIO=m
+CONFIG_ARCNET_RIM_I=m
+CONFIG_ARCNET_COM20020=m
+CONFIG_ARCNET_COM20020_PCI=m
+CONFIG_PHYLIB=m
+
+#
+# MII PHY device drivers
+#
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_STE10XP=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_MICREL_PHY=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_MDIO_GPIO=m
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_HAPPYMEAL=m
+CONFIG_SUNGEM=m
+CONFIG_CASSINI=m
+CONFIG_NET_VENDOR_3COM=y
+CONFIG_VORTEX=m
+CONFIG_TYPHOON=m
+CONFIG_ENC28J60=m
+# CONFIG_ENC28J60_WRITEVERIFY is not set
+CONFIG_ETHOC=m
+CONFIG_DNET=m
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_DE2104X_DSL=0
+CONFIG_TULIP=m
+# CONFIG_TULIP_MWI is not set
+# CONFIG_TULIP_MMIO is not set
+# CONFIG_TULIP_NAPI is not set
+CONFIG_DE4X5=m
+CONFIG_WINBOND_840=m
+CONFIG_DM9102=m
+CONFIG_ULI526X=m
+CONFIG_PCMCIA_XIRCOM=m
+CONFIG_HP100=m
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=m
+CONFIG_AMD8111_ETH=m
+CONFIG_ADAPTEC_STARFIRE=m
+CONFIG_KSZ884X_PCI=m
+CONFIG_B44=m
+CONFIG_B44_PCI_AUTOSELECT=y
+CONFIG_B44_PCICORE_AUTOSELECT=y
+CONFIG_B44_PCI=y
+CONFIG_FORCEDETH=m
+CONFIG_E100=m
+CONFIG_FEALNX=m
+CONFIG_NATSEMI=m
+CONFIG_NE2K_PCI=m
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+CONFIG_8139TOO_PIO=y
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_R6040=m
+CONFIG_SIS900=m
+CONFIG_EPIC100=m
+CONFIG_SMSC9420=m
+CONFIG_SUNDANCE=m
+# CONFIG_SUNDANCE_MMIO is not set
+CONFIG_TLAN=m
+CONFIG_KS8842=m
+CONFIG_KS8851=m
+CONFIG_KS8851_MLL=m
+CONFIG_VIA_RHINE=m
+# CONFIG_VIA_RHINE_MMIO is not set
+CONFIG_SC92031=m
+CONFIG_NET_POCKET=y
+CONFIG_ATP=m
+CONFIG_DE600=m
+CONFIG_DE620=m
+CONFIG_ATL2=m
+CONFIG_NETDEV_1000=y
+CONFIG_ACENIC=m
+# CONFIG_ACENIC_OMIT_TIGON_I is not set
+CONFIG_DL2K=m
+CONFIG_E1000=m
+CONFIG_E1000E=m
+CONFIG_IP1000=m
+CONFIG_IGB=m
+CONFIG_IGB_DCA=y
+CONFIG_IGBVF=m
+CONFIG_NS83820=m
+CONFIG_HAMACHI=m
+CONFIG_YELLOWFIN=m
+CONFIG_R8169=m
+CONFIG_R8169_VLAN=y
+CONFIG_SIS190=m
+CONFIG_SKGE=m
+# CONFIG_SKGE_DEBUG is not set
+CONFIG_SKY2=m
+# CONFIG_SKY2_DEBUG is not set
+CONFIG_VIA_VELOCITY=m
+CONFIG_TIGON3=m
+CONFIG_BNX2=m
+CONFIG_CNIC=m
+CONFIG_QLA3XXX=m
+CONFIG_ATL1=m
+CONFIG_ATL1E=m
+CONFIG_ATL1C=m
+CONFIG_JME=m
+CONFIG_NETDEV_10000=y
+CONFIG_MDIO=m
+CONFIG_CHELSIO_T1=m
+CONFIG_CHELSIO_T1_1G=y
+CONFIG_CHELSIO_T3_DEPENDS=y
+CONFIG_CHELSIO_T3=m
+CONFIG_CHELSIO_T4_DEPENDS=y
+CONFIG_CHELSIO_T4=m
+CONFIG_CHELSIO_T4VF_DEPENDS=y
+CONFIG_CHELSIO_T4VF=m
+CONFIG_ENIC=m
+CONFIG_IXGBE=m
+CONFIG_IXGBE_DCA=y
+# CONFIG_IXGBEVF is not set
+CONFIG_IXGB=m
+CONFIG_S2IO=m
+CONFIG_VXGE=m
+# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
+CONFIG_MYRI10GE=m
+CONFIG_MYRI10GE_DCA=y
+CONFIG_NETXEN_NIC=m
+CONFIG_NIU=m
+CONFIG_MLX4_EN=m
+CONFIG_MLX4_CORE=m
+CONFIG_MLX4_DEBUG=y
+CONFIG_TEHUTI=m
+CONFIG_BNX2X=m
+CONFIG_QLCNIC=m
+CONFIG_QLGE=m
+CONFIG_SFC=m
+CONFIG_SFC_MTD=y
+CONFIG_BE2NET=m
+# CONFIG_TR is not set
+CONFIG_WLAN=y
+CONFIG_PCMCIA_RAYCS=m
+CONFIG_LIBERTAS_THINFIRM=m
+# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
+CONFIG_LIBERTAS_THINFIRM_USB=m
+CONFIG_AIRO=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PCMCIA_ATMEL=m
+CONFIG_AT76C50X_USB=m
+CONFIG_AIRO_CS=m
+CONFIG_PCMCIA_WL3501=m
+CONFIG_PRISM54=m
+CONFIG_USB_ZD1201=m
+CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_RTL8180=m
+CONFIG_RTL8187=m
+CONFIG_RTL8187_LEDS=y
+CONFIG_ADM8211=m
+CONFIG_MAC80211_HWSIM=m
+CONFIG_MWL8K=m
+CONFIG_ATH_COMMON=m
+# CONFIG_ATH_DEBUG is not set
+CONFIG_ATH5K=m
+# CONFIG_ATH5K_DEBUG is not set
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+CONFIG_ATH9K=m
+# CONFIG_ATH9K_DEBUGFS is not set
+CONFIG_ATH9K_HTC=m
+# CONFIG_ATH9K_HTC_DEBUGFS is not set
+CONFIG_AR9170_USB=m
+CONFIG_AR9170_LEDS=y
+CONFIG_B43=m
+CONFIG_B43_PCI_AUTOSELECT=y
+CONFIG_B43_PCICORE_AUTOSELECT=y
+CONFIG_B43_PCMCIA=y
+CONFIG_B43_SDIO=y
+CONFIG_B43_PIO=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_LEDS=y
+CONFIG_B43_HWRNG=y
+# CONFIG_B43_DEBUG is not set
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_PCI_AUTOSELECT=y
+CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_HWRNG=y
+CONFIG_B43LEGACY_DEBUG=y
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+# CONFIG_B43LEGACY_DMA_MODE is not set
+# CONFIG_B43LEGACY_PIO_MODE is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_HOSTAP_CS=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+# CONFIG_IPW2100_DEBUG is not set
+CONFIG_IPW2200=m
+CONFIG_IPW2200_MONITOR=y
+CONFIG_IPW2200_RADIOTAP=y
+CONFIG_IPW2200_PROMISCUOUS=y
+CONFIG_IPW2200_QOS=y
+# CONFIG_IPW2200_DEBUG is not set
+CONFIG_LIBIPW=m
+# CONFIG_LIBIPW_DEBUG is not set
+CONFIG_IWLWIFI=m
+# CONFIG_IWLWIFI_DEBUG is not set
+CONFIG_IWLAGN=m
+CONFIG_IWL4965=y
+CONFIG_IWL5000=y
+CONFIG_IWL3945=m
+CONFIG_IWM=m
+# CONFIG_IWM_DEBUG is not set
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+CONFIG_LIBERTAS_CS=m
+CONFIG_LIBERTAS_SDIO=m
+CONFIG_LIBERTAS_SPI=m
+# CONFIG_LIBERTAS_DEBUG is not set
+CONFIG_LIBERTAS_MESH=y
+CONFIG_HERMES=m
+# CONFIG_HERMES_PRISM is not set
+CONFIG_HERMES_CACHE_FW_ON_INIT=y
+CONFIG_PLX_HERMES=m
+CONFIG_TMD_HERMES=m
+CONFIG_NORTEL_HERMES=m
+CONFIG_PCMCIA_HERMES=m
+CONFIG_PCMCIA_SPECTRUM=m
+CONFIG_ORINOCO_USB=m
+CONFIG_P54_COMMON=m
+CONFIG_P54_USB=m
+CONFIG_P54_PCI=m
+CONFIG_P54_SPI=m
+CONFIG_P54_LEDS=y
+CONFIG_RT2X00=m
+CONFIG_RT2400PCI=m
+CONFIG_RT2500PCI=m
+CONFIG_RT61PCI=m
+CONFIG_RT2800PCI_PCI=y
+CONFIG_RT2800PCI=m
+CONFIG_RT2800PCI_RT30XX=y
+# CONFIG_RT2800PCI_RT35XX is not set
+CONFIG_RT2500USB=m
+CONFIG_RT73USB=m
+CONFIG_RT2800USB=m
+CONFIG_RT2800USB_RT30XX=y
+# CONFIG_RT2800USB_RT35XX is not set
+CONFIG_RT2800USB_UNKNOWN=y
+CONFIG_RT2800_LIB=m
+CONFIG_RT2X00_LIB_PCI=m
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_HT=y
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+CONFIG_WL12XX=m
+CONFIG_WL1251=m
+CONFIG_WL1251_SPI=m
+CONFIG_WL1251_SDIO=m
+CONFIG_WL1271=m
+CONFIG_WL1271_SPI=m
+CONFIG_WL1271_SDIO=m
+CONFIG_ZD1211RW=m
+# CONFIG_ZD1211RW_DEBUG is not set
+
+#
+# WiMAX Wireless Broadband devices
+#
+CONFIG_WIMAX_I2400M=m
+CONFIG_WIMAX_I2400M_USB=m
+CONFIG_WIMAX_I2400M_SDIO=m
+CONFIG_WIMAX_IWMC3200_SDIO=y
+CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_CDCETHER=m
+CONFIG_USB_NET_CDC_EEM=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_ZAURUS=m
+CONFIG_USB_HSO=m
+CONFIG_USB_NET_INT51X1=m
+CONFIG_USB_CDC_PHONET=m
+CONFIG_USB_IPHETH=m
+CONFIG_USB_SIERRA_NET=m
+CONFIG_NET_PCMCIA=y
+CONFIG_PCMCIA_3C589=m
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_FMVJ18X=m
+CONFIG_PCMCIA_PCNET=m
+CONFIG_PCMCIA_NMCLAN=m
+CONFIG_PCMCIA_SMC91C92=m
+CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_PCMCIA_AXNET=m
+CONFIG_ARCNET_COM20020_CS=m
+CONFIG_WAN=y
+CONFIG_LANMEDIA=m
+CONFIG_HDLC=m
+CONFIG_HDLC_RAW=m
+CONFIG_HDLC_RAW_ETH=m
+CONFIG_HDLC_CISCO=m
+CONFIG_HDLC_FR=m
+CONFIG_HDLC_PPP=m
+CONFIG_HDLC_X25=m
+CONFIG_PCI200SYN=m
+CONFIG_WANXL=m
+# CONFIG_WANXL_BUILD_FIRMWARE is not set
+CONFIG_PC300TOO=m
+CONFIG_FARSYNC=m
+CONFIG_DSCC4=m
+CONFIG_DSCC4_PCISYNC=y
+CONFIG_DSCC4_PCI_RST=y
+CONFIG_DLCI=m
+CONFIG_DLCI_MAX=8
+CONFIG_WAN_ROUTER_DRIVERS=m
+CONFIG_CYCLADES_SYNC=m
+CONFIG_CYCLOMX_X25=y
+CONFIG_LAPBETHER=m
+CONFIG_X25_ASY=m
+CONFIG_SBNI=m
+CONFIG_SBNI_MULTILINE=y
+CONFIG_ATM_DRIVERS=y
+CONFIG_ATM_DUMMY=m
+CONFIG_ATM_TCP=m
+CONFIG_ATM_LANAI=m
+CONFIG_ATM_ENI=m
+# CONFIG_ATM_ENI_DEBUG is not set
+# CONFIG_ATM_ENI_TUNE_BURST is not set
+CONFIG_ATM_FIRESTREAM=m
+CONFIG_ATM_ZATM=m
+# CONFIG_ATM_ZATM_DEBUG is not set
+CONFIG_ATM_NICSTAR=m
+# CONFIG_ATM_NICSTAR_USE_SUNI is not set
+# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
+CONFIG_ATM_IDT77252=m
+# CONFIG_ATM_IDT77252_DEBUG is not set
+# CONFIG_ATM_IDT77252_RCV_ALL is not set
+CONFIG_ATM_IDT77252_USE_SUNI=y
+CONFIG_ATM_AMBASSADOR=m
+# CONFIG_ATM_AMBASSADOR_DEBUG is not set
+CONFIG_ATM_HORIZON=m
+# CONFIG_ATM_HORIZON_DEBUG is not set
+CONFIG_ATM_IA=m
+# CONFIG_ATM_IA_DEBUG is not set
+CONFIG_ATM_FORE200E=m
+CONFIG_ATM_FORE200E_USE_TASKLET=y
+CONFIG_ATM_FORE200E_TX_RETRY=16
+CONFIG_ATM_FORE200E_DEBUG=0
+CONFIG_ATM_HE=m
+CONFIG_ATM_HE_USE_SUNI=y
+CONFIG_ATM_SOLOS=m
+CONFIG_IEEE802154_DRIVERS=m
+CONFIG_IEEE802154_FAKEHARD=m
+
+#
+# CAIF transport drivers
+#
+CONFIG_CAIF_TTY=m
+CONFIG_CAIF_SPI_SLAVE=m
+# CONFIG_CAIF_SPI_SYNC is not set
+CONFIG_FDDI=y
+CONFIG_DEFXX=m
+# CONFIG_DEFXX_MMIO is not set
+CONFIG_SKFP=m
+CONFIG_HIPPI=y
+CONFIG_ROADRUNNER=m
+# CONFIG_ROADRUNNER_LARGE_RINGS is not set
+CONFIG_PLIP=m
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPPOATM=m
+CONFIG_PPPOL2TP=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLHC=m
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+# CONFIG_NET_FC is not set
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+CONFIG_VIRTIO_NET=m
+CONFIG_VMXNET3=m
+CONFIG_ISDN=y
+# CONFIG_ISDN_I4L is not set
+CONFIG_ISDN_CAPI=m
+# CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON is not set
+# CONFIG_CAPI_TRACE is not set
+CONFIG_ISDN_CAPI_MIDDLEWARE=y
+CONFIG_ISDN_CAPI_CAPI20=m
+CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
+CONFIG_ISDN_CAPI_CAPIFS=m
+
+#
+# CAPI hardware drivers
+#
+CONFIG_CAPI_AVM=y
+CONFIG_ISDN_DRV_AVMB1_B1PCI=m
+CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
+CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
+CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
+CONFIG_ISDN_DRV_AVMB1_T1PCI=m
+CONFIG_ISDN_DRV_AVMB1_C4=m
+CONFIG_CAPI_EICON=y
+CONFIG_ISDN_DIVAS=m
+CONFIG_ISDN_DIVAS_BRIPCI=y
+CONFIG_ISDN_DIVAS_PRIPCI=y
+CONFIG_ISDN_DIVAS_DIVACAPI=m
+CONFIG_ISDN_DIVAS_USERIDI=m
+CONFIG_ISDN_DIVAS_MAINT=m
+CONFIG_ISDN_DRV_GIGASET=m
+CONFIG_GIGASET_CAPI=y
+# CONFIG_GIGASET_DUMMYLL is not set
+CONFIG_GIGASET_BASE=m
+CONFIG_GIGASET_M105=m
+CONFIG_GIGASET_M101=m
+# CONFIG_GIGASET_DEBUG is not set
+CONFIG_HYSDN=m
+CONFIG_HYSDN_CAPI=y
+CONFIG_MISDN=m
+CONFIG_MISDN_DSP=m
+CONFIG_MISDN_L1OIP=m
+
+#
+# mISDN hardware drivers
+#
+CONFIG_MISDN_HFCPCI=m
+CONFIG_MISDN_HFCMULTI=m
+CONFIG_MISDN_HFCUSB=m
+CONFIG_MISDN_AVMFRITZ=m
+# CONFIG_MISDN_SPEEDFAX is not set
+# CONFIG_MISDN_INFINEON is not set
+# CONFIG_MISDN_W6692 is not set
+# CONFIG_MISDN_NETJET is not set
+CONFIG_MISDN_IPAC=m
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=m
+CONFIG_INPUT_POLLDEV=m
+CONFIG_INPUT_SPARSEKMAP=m
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=m
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+CONFIG_INPUT_JOYDEV=m
+CONFIG_INPUT_EVDEV=m
+CONFIG_INPUT_EVBUG=m
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_QT2160 is not set
+CONFIG_KEYBOARD_LKKBD=m
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_TCA6416=m
+CONFIG_KEYBOARD_MATRIX=m
+CONFIG_KEYBOARD_LM8323=m
+# CONFIG_KEYBOARD_MAX7359 is not set
+CONFIG_KEYBOARD_MCS=m
+CONFIG_KEYBOARD_NEWTON=m
+# CONFIG_KEYBOARD_OPENCORES is not set
+CONFIG_KEYBOARD_STOWAWAY=m
+CONFIG_KEYBOARD_SUNKBD=m
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+CONFIG_MOUSE_SERIAL=m
+CONFIG_MOUSE_APPLETOUCH=m
+CONFIG_MOUSE_BCM5974=m
+CONFIG_MOUSE_VSXXXAA=m
+CONFIG_MOUSE_GPIO=m
+CONFIG_MOUSE_SYNAPTICS_I2C=m
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=m
+CONFIG_TOUCHSCREEN_AD7877=m
+CONFIG_TOUCHSCREEN_AD7879=m
+CONFIG_TOUCHSCREEN_AD7879_I2C=m
+CONFIG_TOUCHSCREEN_AD7879_SPI=m
+CONFIG_TOUCHSCREEN_CY8CTMG110=m
+CONFIG_TOUCHSCREEN_DYNAPRO=m
+CONFIG_TOUCHSCREEN_HAMPSHIRE=m
+CONFIG_TOUCHSCREEN_EETI=m
+CONFIG_TOUCHSCREEN_FUJITSU=m
+CONFIG_TOUCHSCREEN_GUNZE=m
+CONFIG_TOUCHSCREEN_ELO=m
+CONFIG_TOUCHSCREEN_WACOM_W8001=m
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+CONFIG_TOUCHSCREEN_MTOUCH=m
+CONFIG_TOUCHSCREEN_INEXIO=m
+CONFIG_TOUCHSCREEN_MK712=m
+CONFIG_TOUCHSCREEN_PENMOUNT=m
+CONFIG_TOUCHSCREEN_QT602240=m
+CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
+CONFIG_TOUCHSCREEN_TOUCHWIN=m
+CONFIG_TOUCHSCREEN_UCB1400=m
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
+CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
+CONFIG_TOUCHSCREEN_USB_EGALAX=y
+CONFIG_TOUCHSCREEN_USB_PANJIT=y
+CONFIG_TOUCHSCREEN_USB_3M=y
+CONFIG_TOUCHSCREEN_USB_ITM=y
+CONFIG_TOUCHSCREEN_USB_ETURBO=y
+CONFIG_TOUCHSCREEN_USB_GUNZE=y
+CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
+CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
+CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
+CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
+CONFIG_TOUCHSCREEN_USB_GOTOP=y
+CONFIG_TOUCHSCREEN_USB_JASTEC=y
+CONFIG_TOUCHSCREEN_USB_E2I=y
+CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
+CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
+CONFIG_TOUCHSCREEN_USB_NEXIO=y
+CONFIG_TOUCHSCREEN_TOUCHIT213=m
+CONFIG_TOUCHSCREEN_TSC2007=m
+CONFIG_TOUCHSCREEN_TPS6507X=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_AD714X=m
+CONFIG_INPUT_AD714X_I2C=m
+CONFIG_INPUT_AD714X_SPI=m
+CONFIG_INPUT_PCSPKR=m
+CONFIG_INPUT_APANEL=m
+CONFIG_INPUT_ATLAS_BTNS=m
+CONFIG_INPUT_ATI_REMOTE=m
+CONFIG_INPUT_ATI_REMOTE2=m
+CONFIG_INPUT_KEYSPAN_REMOTE=m
+CONFIG_INPUT_POWERMATE=m
+CONFIG_INPUT_YEALINK=m
+CONFIG_INPUT_CM109=m
+CONFIG_INPUT_UINPUT=m
+CONFIG_INPUT_WINBOND_CIR=m
+CONFIG_INPUT_PCF50633_PMU=m
+CONFIG_INPUT_PCF8574=m
+CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
+CONFIG_INPUT_ADXL34X=m
+CONFIG_INPUT_ADXL34X_I2C=m
+CONFIG_INPUT_ADXL34X_SPI=m
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_CT82C710=m
+CONFIG_SERIO_PARKBD=m
+CONFIG_SERIO_PCIPS2=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_SERIO_ALTERA_PS2=m
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_COMPUTONE=m
+CONFIG_ROCKETPORT=m
+CONFIG_CYCLADES=m
+# CONFIG_CYZ_INTR is not set
+CONFIG_DIGIEPCA=m
+CONFIG_MOXA_INTELLIO=m
+CONFIG_MOXA_SMARTIO=m
+CONFIG_ISI=m
+CONFIG_SYNCLINK=m
+CONFIG_SYNCLINKMP=m
+CONFIG_SYNCLINK_GT=m
+CONFIG_N_HDLC=m
+# CONFIG_N_GSM is not set
+CONFIG_RISCOM8=m
+CONFIG_SPECIALIX=m
+CONFIG_STALDRV=y
+CONFIG_STALLION=m
+CONFIG_ISTALLION=m
+CONFIG_NOZOMI=m
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_PNP=y
+CONFIG_SERIAL_8250_CS=m
+CONFIG_SERIAL_8250_NR_UARTS=16
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_MAX3100=m
+CONFIG_SERIAL_MAX3107=m
+CONFIG_SERIAL_MRST_MAX3110=m
+CONFIG_SERIAL_MFD_HSU=m
+CONFIG_SERIAL_UARTLITE=m
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_JSM=m
+CONFIG_SERIAL_TIMBERDALE=m
+CONFIG_SERIAL_ALTERA_JTAGUART=m
+CONFIG_SERIAL_ALTERA_UART=m
+CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
+CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_PRINTER=m
+# CONFIG_LP_CONSOLE is not set
+CONFIG_PPDEV=m
+CONFIG_HVC_DRIVER=y
+CONFIG_VIRTIO_CONSOLE=m
+CONFIG_IPMI_HANDLER=m
+# CONFIG_IPMI_PANIC_EVENT is not set
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_WATCHDOG=m
+CONFIG_IPMI_POWEROFF=m
+CONFIG_HW_RANDOM=m
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_HW_RANDOM_INTEL=m
+CONFIG_HW_RANDOM_AMD=m
+CONFIG_HW_RANDOM_VIA=m
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_NVRAM=m
+CONFIG_R3964=m
+CONFIG_APPLICOM=m
+
+#
+# PCMCIA character devices
+#
+CONFIG_SYNCLINK_CS=m
+CONFIG_CARDMAN_4000=m
+CONFIG_CARDMAN_4040=m
+CONFIG_IPWIRELESS=m
+CONFIG_MWAVE=m
+CONFIG_RAW_DRIVER=m
+CONFIG_MAX_RAW_DEVS=256
+CONFIG_HPET=y
+CONFIG_HPET_MMAP=y
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TCG_TPM=m
+CONFIG_TCG_TIS=m
+CONFIG_TCG_NSC=m
+CONFIG_TCG_ATMEL=m
+CONFIG_TCG_INFINEON=m
+CONFIG_TELCLOCK=m
+CONFIG_DEVPORT=y
+CONFIG_RAMOOPS=m
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_MUX=m
+
+#
+# Multiplexer I2C Chip support
+#
+CONFIG_I2C_MUX_PCA954x=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_SMBUS=m
+CONFIG_I2C_ALGOBIT=m
+CONFIG_I2C_ALGOPCA=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+CONFIG_I2C_ALI1535=m
+CONFIG_I2C_ALI1563=m
+CONFIG_I2C_ALI15X3=m
+CONFIG_I2C_AMD756=m
+CONFIG_I2C_AMD756_S4882=m
+CONFIG_I2C_AMD8111=m
+CONFIG_I2C_I801=m
+CONFIG_I2C_ISCH=m
+CONFIG_I2C_PIIX4=m
+CONFIG_I2C_NFORCE2=m
+CONFIG_I2C_NFORCE2_S4985=m
+CONFIG_I2C_SIS5595=m
+CONFIG_I2C_SIS630=m
+CONFIG_I2C_SIS96X=m
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
+
+#
+# ACPI drivers
+#
+CONFIG_I2C_SCMI=m
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_GPIO=m
+CONFIG_I2C_OCORES=m
+CONFIG_I2C_PCA_PLATFORM=m
+CONFIG_I2C_SIMTEC=m
+CONFIG_I2C_XILINX=m
+
+#
+# External I2C/SMBus adapter drivers
+#
+CONFIG_I2C_PARPORT=m
+CONFIG_I2C_PARPORT_LIGHT=m
+CONFIG_I2C_TAOS_EVM=m
+CONFIG_I2C_TINY_USB=m
+
+#
+# Other I2C/SMBus bus drivers
+#
+CONFIG_I2C_STUB=m
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=m
+CONFIG_SPI_BUTTERFLY=m
+CONFIG_SPI_GPIO=m
+CONFIG_SPI_LM70_LLP=m
+# CONFIG_SPI_XILINX is not set
+CONFIG_SPI_DESIGNWARE=m
+CONFIG_SPI_DW_PCI=m
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPI_TLE62X0=m
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_GPIO_SYSFS is not set
+CONFIG_GPIO_MAX730X=m
+
+#
+# Memory mapped GPIO expanders:
+#
+CONFIG_GPIO_IT8761E=m
+CONFIG_GPIO_SCH=m
+
+#
+# I2C GPIO expanders:
+#
+CONFIG_GPIO_MAX7300=m
+CONFIG_GPIO_MAX732X=m
+CONFIG_GPIO_PCA953X=m
+CONFIG_GPIO_PCF857X=m
+CONFIG_GPIO_ADP5588=m
+
+#
+# PCI GPIO expanders:
+#
+CONFIG_GPIO_CS5535=m
+# CONFIG_GPIO_LANGWELL is not set
+CONFIG_GPIO_TIMBERDALE=y
+CONFIG_GPIO_RDC321X=m
+
+#
+# SPI GPIO expanders:
+#
+CONFIG_GPIO_MAX7301=m
+CONFIG_GPIO_MCP23S08=m
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_GPIO_UCB1400 is not set
+
+#
+# MODULbus GPIO expanders:
+#
+CONFIG_GPIO_JANZ_TTL=m
+CONFIG_W1=m
+CONFIG_W1_CON=y
+
+#
+# 1-wire Bus Masters
+#
+CONFIG_W1_MASTER_MATROX=m
+CONFIG_W1_MASTER_DS2490=m
+CONFIG_W1_MASTER_DS2482=m
+CONFIG_W1_MASTER_GPIO=m
+
+#
+# 1-wire Slaves
+#
+CONFIG_W1_SLAVE_THERM=m
+CONFIG_W1_SLAVE_SMEM=m
+CONFIG_W1_SLAVE_DS2431=m
+CONFIG_W1_SLAVE_DS2433=m
+# CONFIG_W1_SLAVE_DS2433_CRC is not set
+CONFIG_W1_SLAVE_DS2760=m
+CONFIG_W1_SLAVE_BQ27000=m
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+CONFIG_PDA_POWER=m
+CONFIG_TEST_POWER=m
+CONFIG_BATTERY_DS2760=m
+CONFIG_BATTERY_DS2782=m
+CONFIG_BATTERY_BQ27x00=m
+CONFIG_BATTERY_MAX17040=m
+CONFIG_CHARGER_PCF50633=m
+CONFIG_HWMON=m
+CONFIG_HWMON_VID=m
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+CONFIG_SENSORS_ABITUGURU=m
+CONFIG_SENSORS_ABITUGURU3=m
+CONFIG_SENSORS_AD7414=m
+CONFIG_SENSORS_AD7418=m
+CONFIG_SENSORS_ADCXX=m
+CONFIG_SENSORS_ADM1021=m
+CONFIG_SENSORS_ADM1025=m
+CONFIG_SENSORS_ADM1026=m
+CONFIG_SENSORS_ADM1029=m
+CONFIG_SENSORS_ADM1031=m
+CONFIG_SENSORS_ADM9240=m
+CONFIG_SENSORS_ADT7411=m
+CONFIG_SENSORS_ADT7462=m
+CONFIG_SENSORS_ADT7470=m
+CONFIG_SENSORS_ADT7475=m
+CONFIG_SENSORS_ASC7621=m
+CONFIG_SENSORS_K8TEMP=m
+CONFIG_SENSORS_K10TEMP=m
+CONFIG_SENSORS_ASB100=m
+CONFIG_SENSORS_ATXP1=m
+CONFIG_SENSORS_DS1621=m
+CONFIG_SENSORS_I5K_AMB=m
+CONFIG_SENSORS_F71805F=m
+CONFIG_SENSORS_F71882FG=m
+CONFIG_SENSORS_F75375S=m
+CONFIG_SENSORS_FSCHMD=m
+CONFIG_SENSORS_G760A=m
+CONFIG_SENSORS_GL518SM=m
+CONFIG_SENSORS_GL520SM=m
+CONFIG_SENSORS_CORETEMP=m
+CONFIG_SENSORS_PKGTEMP=m
+CONFIG_SENSORS_IBMAEM=m
+CONFIG_SENSORS_IBMPEX=m
+CONFIG_SENSORS_IT87=m
+CONFIG_SENSORS_JC42=m
+CONFIG_SENSORS_LM63=m
+CONFIG_SENSORS_LM70=m
+CONFIG_SENSORS_LM73=m
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM77=m
+CONFIG_SENSORS_LM78=m
+CONFIG_SENSORS_LM80=m
+CONFIG_SENSORS_LM83=m
+CONFIG_SENSORS_LM85=m
+CONFIG_SENSORS_LM87=m
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_LM92=m
+CONFIG_SENSORS_LM93=m
+CONFIG_SENSORS_LTC4215=m
+CONFIG_SENSORS_LTC4245=m
+CONFIG_SENSORS_LM95241=m
+CONFIG_SENSORS_MAX1111=m
+CONFIG_SENSORS_MAX1619=m
+CONFIG_SENSORS_MAX6650=m
+CONFIG_SENSORS_PC87360=m
+CONFIG_SENSORS_PC87427=m
+CONFIG_SENSORS_PCF8591=m
+CONFIG_SENSORS_SHT15=m
+CONFIG_SENSORS_SIS5595=m
+CONFIG_SENSORS_SMM665=m
+CONFIG_SENSORS_DME1737=m
+CONFIG_SENSORS_EMC1403=m
+CONFIG_SENSORS_EMC2103=m
+CONFIG_SENSORS_SMSC47M1=m
+CONFIG_SENSORS_SMSC47M192=m
+CONFIG_SENSORS_SMSC47B397=m
+CONFIG_SENSORS_ADS7828=m
+CONFIG_SENSORS_ADS7871=m
+CONFIG_SENSORS_AMC6821=m
+CONFIG_SENSORS_THMC50=m
+CONFIG_SENSORS_TMP102=m
+CONFIG_SENSORS_TMP401=m
+CONFIG_SENSORS_TMP421=m
+CONFIG_SENSORS_VIA_CPUTEMP=m
+CONFIG_SENSORS_VIA686A=m
+CONFIG_SENSORS_VT1211=m
+CONFIG_SENSORS_VT8231=m
+CONFIG_SENSORS_W83781D=m
+CONFIG_SENSORS_W83791D=m
+CONFIG_SENSORS_W83792D=m
+CONFIG_SENSORS_W83793=m
+CONFIG_SENSORS_W83L785TS=m
+CONFIG_SENSORS_W83L786NG=m
+CONFIG_SENSORS_W83627HF=m
+CONFIG_SENSORS_W83627EHF=m
+CONFIG_SENSORS_HDAPS=m
+CONFIG_SENSORS_LIS3_I2C=m
+CONFIG_SENSORS_APPLESMC=m
+
+#
+# ACPI drivers
+#
+CONFIG_SENSORS_ATK0110=m
+CONFIG_SENSORS_LIS3LV02D=m
+CONFIG_THERMAL=y
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ACQUIRE_WDT=m
+CONFIG_ADVANTECH_WDT=m
+CONFIG_ALIM1535_WDT=m
+CONFIG_ALIM7101_WDT=m
+CONFIG_F71808E_WDT=m
+CONFIG_GEODE_WDT=m
+CONFIG_SC520_WDT=m
+# CONFIG_SBC_FITPC2_WATCHDOG is not set
+CONFIG_EUROTECH_WDT=m
+CONFIG_IB700_WDT=m
+CONFIG_IBMASR=m
+CONFIG_WAFER_WDT=m
+CONFIG_I6300ESB_WDT=m
+CONFIG_ITCO_WDT=m
+CONFIG_ITCO_VENDOR_SUPPORT=y
+CONFIG_IT8712F_WDT=m
+CONFIG_IT87_WDT=m
+# CONFIG_HP_WATCHDOG is not set
+CONFIG_SC1200_WDT=m
+CONFIG_PC87413_WDT=m
+CONFIG_60XX_WDT=m
+CONFIG_SBC8360_WDT=m
+CONFIG_CPU5_WDT=m
+CONFIG_SMSC_SCH311X_WDT=m
+CONFIG_SMSC37B787_WDT=m
+CONFIG_W83627HF_WDT=m
+CONFIG_W83697HF_WDT=m
+CONFIG_W83697UG_WDT=m
+CONFIG_W83877F_WDT=m
+CONFIG_W83977F_WDT=m
+CONFIG_MACHZ_WDT=m
+CONFIG_SBC_EPX_C3_WATCHDOG=m
+
+#
+# PCI-based Watchdog Cards
+#
+CONFIG_PCIPCWATCHDOG=m
+CONFIG_WDTPCI=m
+
+#
+# USB-based Watchdog Cards
+#
+CONFIG_USBPCWATCHDOG=m
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB=m
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_BLOCKIO=y
+CONFIG_SSB_PCIHOST_POSSIBLE=y
+CONFIG_SSB_PCIHOST=y
+CONFIG_SSB_B43_PCI_BRIDGE=y
+CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
+CONFIG_SSB_PCMCIAHOST=y
+CONFIG_SSB_SDIOHOST_POSSIBLE=y
+CONFIG_SSB_SDIOHOST=y
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_MFD_SUPPORT=y
+CONFIG_MFD_CORE=y
+CONFIG_MFD_SM501=m
+# CONFIG_MFD_SM501_GPIO is not set
+CONFIG_HTC_PASIC3=m
+CONFIG_UCB1400_CORE=m
+CONFIG_TPS65010=m
+CONFIG_TPS6507X=m
+# CONFIG_MFD_TMIO is not set
+CONFIG_MFD_WM8400=m
+CONFIG_MFD_PCF50633=m
+# CONFIG_MFD_MC13783 is not set
+CONFIG_PCF50633_ADC=m
+CONFIG_PCF50633_GPIO=m
+CONFIG_ABX500_CORE=y
+# CONFIG_EZX_PCAP is not set
+CONFIG_AB8500_CORE=y
+CONFIG_MFD_TIMBERDALE=m
+CONFIG_LPC_SCH=m
+CONFIG_MFD_RDC321X=m
+CONFIG_MFD_JANZ_CMODIO=m
+CONFIG_MFD_TPS6586X=m
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
+CONFIG_REGULATOR_USERSPACE_CONSUMER=m
+CONFIG_REGULATOR_BQ24022=m
+CONFIG_REGULATOR_MAX1586=m
+CONFIG_REGULATOR_MAX8649=m
+CONFIG_REGULATOR_MAX8660=m
+CONFIG_REGULATOR_WM8400=m
+CONFIG_REGULATOR_PCF50633=m
+CONFIG_REGULATOR_LP3971=m
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+CONFIG_REGULATOR_ISL6271A=m
+CONFIG_REGULATOR_AD5398=m
+# CONFIG_REGULATOR_AB8500 is not set
+CONFIG_REGULATOR_TPS6586X=m
+CONFIG_MEDIA_SUPPORT=m
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+# CONFIG_VIDEO_ALLOW_V4L1 is not set
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+CONFIG_VIDEO_SAA7146=m
+CONFIG_VIDEO_SAA7146_VV=m
+CONFIG_IR_CORE=m
+CONFIG_VIDEO_IR=m
+CONFIG_LIRC=m
+CONFIG_RC_MAP=m
+CONFIG_IR_NEC_DECODER=m
+CONFIG_IR_RC5_DECODER=m
+CONFIG_IR_RC6_DECODER=m
+CONFIG_IR_JVC_DECODER=m
+CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_LIRC_CODEC=m
+CONFIG_IR_IMON=m
+CONFIG_IR_MCEUSB=m
+CONFIG_IR_ENE=m
+CONFIG_IR_STREAMZAP=m
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=m
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA827X=m
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_MT2060=m
+CONFIG_MEDIA_TUNER_MT2266=m
+CONFIG_MEDIA_TUNER_MT2131=m
+CONFIG_MEDIA_TUNER_QT1010=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_MXL5005S=m
+CONFIG_MEDIA_TUNER_MXL5007T=m
+CONFIG_MEDIA_TUNER_MC44S803=m
+CONFIG_MEDIA_TUNER_MAX2165=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEOBUF_DMA_SG=m
+CONFIG_VIDEOBUF_VMALLOC=m
+CONFIG_VIDEOBUF_DMA_CONTIG=m
+CONFIG_VIDEOBUF_DVB=m
+CONFIG_VIDEO_BTCX=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_V4L2_MEM2MEM_DEV=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
+CONFIG_VIDEO_IR_I2C=m
+
+#
+# Encoders/decoders and other helper chips
+#
+
+#
+# Audio decoders
+#
+CONFIG_VIDEO_TVAUDIO=m
+CONFIG_VIDEO_TDA7432=m
+CONFIG_VIDEO_TDA9840=m
+CONFIG_VIDEO_TDA9875=m
+CONFIG_VIDEO_TEA6415C=m
+CONFIG_VIDEO_TEA6420=m
+CONFIG_VIDEO_MSP3400=m
+CONFIG_VIDEO_CS5345=m
+CONFIG_VIDEO_CS53L32A=m
+CONFIG_VIDEO_M52790=m
+CONFIG_VIDEO_TLV320AIC23B=m
+CONFIG_VIDEO_WM8775=m
+CONFIG_VIDEO_WM8739=m
+CONFIG_VIDEO_VP27SMPX=m
+
+#
+# RDS decoders
+#
+CONFIG_VIDEO_SAA6588=m
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_ADV7180 is not set
+CONFIG_VIDEO_BT819=m
+CONFIG_VIDEO_BT856=m
+CONFIG_VIDEO_BT866=m
+CONFIG_VIDEO_KS0127=m
+CONFIG_VIDEO_OV7670=m
+CONFIG_VIDEO_MT9V011=m
+CONFIG_VIDEO_TCM825X=m
+CONFIG_VIDEO_SAA7110=m
+CONFIG_VIDEO_SAA711X=m
+CONFIG_VIDEO_SAA717X=m
+CONFIG_VIDEO_SAA7191=m
+CONFIG_VIDEO_TVP514X=m
+CONFIG_VIDEO_TVP5150=m
+CONFIG_VIDEO_TVP7002=m
+CONFIG_VIDEO_VPX3220=m
+
+#
+# Video and audio decoders
+#
+CONFIG_VIDEO_CX25840=m
+
+#
+# MPEG video encoders
+#
+CONFIG_VIDEO_CX2341X=m
+
+#
+# Video encoders
+#
+CONFIG_VIDEO_SAA7127=m
+CONFIG_VIDEO_SAA7185=m
+CONFIG_VIDEO_ADV7170=m
+CONFIG_VIDEO_ADV7175=m
+CONFIG_VIDEO_THS7303=m
+CONFIG_VIDEO_ADV7343=m
+CONFIG_VIDEO_AK881X=m
+
+#
+# Video improvement chips
+#
+CONFIG_VIDEO_UPD64031A=m
+CONFIG_VIDEO_UPD64083=m
+CONFIG_VIDEO_BT848=m
+CONFIG_VIDEO_BT848_DVB=y
+CONFIG_VIDEO_BWQCAM=m
+CONFIG_VIDEO_CQCAM=m
+CONFIG_VIDEO_SAA5246A=m
+CONFIG_VIDEO_SAA5249=m
+CONFIG_VIDEO_ZORAN=m
+CONFIG_VIDEO_ZORAN_DC30=m
+CONFIG_VIDEO_ZORAN_ZR36060=m
+CONFIG_VIDEO_ZORAN_BUZ=m
+CONFIG_VIDEO_ZORAN_DC10=m
+CONFIG_VIDEO_ZORAN_LML33=m
+CONFIG_VIDEO_ZORAN_LML33R10=m
+CONFIG_VIDEO_ZORAN_AVS6EYES=m
+CONFIG_VIDEO_MEYE=m
+CONFIG_VIDEO_SAA7134=m
+CONFIG_VIDEO_SAA7134_ALSA=m
+CONFIG_VIDEO_SAA7134_DVB=m
+CONFIG_VIDEO_MXB=m
+CONFIG_VIDEO_HEXIUM_ORION=m
+CONFIG_VIDEO_HEXIUM_GEMINI=m
+CONFIG_VIDEO_CX88=m
+CONFIG_VIDEO_CX88_ALSA=m
+CONFIG_VIDEO_CX88_BLACKBIRD=m
+CONFIG_VIDEO_CX88_DVB=m
+CONFIG_VIDEO_CX88_MPEG=m
+CONFIG_VIDEO_CX88_VP3054=m
+CONFIG_VIDEO_CX23885=m
+CONFIG_VIDEO_AU0828=m
+CONFIG_VIDEO_IVTV=m
+CONFIG_VIDEO_FB_IVTV=m
+CONFIG_VIDEO_CX18=m
+CONFIG_VIDEO_CX18_ALSA=m
+CONFIG_VIDEO_SAA7164=m
+CONFIG_VIDEO_CAFE_CCIC=m
+CONFIG_SOC_CAMERA=m
+CONFIG_SOC_CAMERA_MT9M001=m
+CONFIG_SOC_CAMERA_MT9M111=m
+CONFIG_SOC_CAMERA_MT9T031=m
+CONFIG_SOC_CAMERA_MT9T112=m
+CONFIG_SOC_CAMERA_MT9V022=m
+CONFIG_SOC_CAMERA_RJ54N1=m
+CONFIG_SOC_CAMERA_TW9910=m
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_SOC_CAMERA_OV772X=m
+CONFIG_SOC_CAMERA_OV9640=m
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_VIDEO_PVRUSB2=m
+CONFIG_VIDEO_PVRUSB2_SYSFS=y
+CONFIG_VIDEO_PVRUSB2_DVB=y
+# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
+CONFIG_VIDEO_HDPVR=m
+CONFIG_VIDEO_EM28XX=m
+CONFIG_VIDEO_EM28XX_ALSA=m
+CONFIG_VIDEO_EM28XX_DVB=m
+CONFIG_VIDEO_TLG2300=m
+CONFIG_VIDEO_CX231XX=m
+CONFIG_VIDEO_CX231XX_ALSA=m
+CONFIG_VIDEO_CX231XX_DVB=m
+CONFIG_VIDEO_USBVISION=m
+CONFIG_USB_ET61X251=m
+CONFIG_USB_SN9C102=m
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_MEM2MEM_TESTDEV=m
+CONFIG_RADIO_ADAPTERS=y
+CONFIG_RADIO_GEMTEK_PCI=m
+CONFIG_RADIO_MAXIRADIO=m
+CONFIG_RADIO_MAESTRO=m
+# CONFIG_I2C_SI4713 is not set
+# CONFIG_RADIO_SI4713 is not set
+CONFIG_USB_DSBR=m
+# CONFIG_RADIO_SI470X is not set
+CONFIG_USB_MR800=m
+CONFIG_RADIO_TEA5764=m
+CONFIG_RADIO_SAA7706H=m
+CONFIG_RADIO_TEF6862=m
+CONFIG_RADIO_TIMBERDALE=m
+CONFIG_DVB_MAX_ADAPTERS=8
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+CONFIG_DVB_CAPTURE_DRIVERS=y
+
+#
+# Supported SAA7146 based PCI Adapters
+#
+CONFIG_TTPCI_EEPROM=m
+CONFIG_DVB_AV7110=m
+CONFIG_DVB_AV7110_OSD=y
+CONFIG_DVB_BUDGET_CORE=m
+CONFIG_DVB_BUDGET=m
+CONFIG_DVB_BUDGET_CI=m
+CONFIG_DVB_BUDGET_AV=m
+CONFIG_DVB_BUDGET_PATCH=m
+
+#
+# Supported USB Adapters
+#
+CONFIG_DVB_USB=m
+# CONFIG_DVB_USB_DEBUG is not set
+CONFIG_DVB_USB_A800=m
+CONFIG_DVB_USB_DIBUSB_MB=m
+# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
+CONFIG_DVB_USB_DIBUSB_MC=m
+CONFIG_DVB_USB_DIB0700=m
+CONFIG_DVB_USB_UMT_010=m
+CONFIG_DVB_USB_CXUSB=m
+CONFIG_DVB_USB_M920X=m
+CONFIG_DVB_USB_GL861=m
+CONFIG_DVB_USB_AU6610=m
+CONFIG_DVB_USB_DIGITV=m
+CONFIG_DVB_USB_VP7045=m
+CONFIG_DVB_USB_VP702X=m
+CONFIG_DVB_USB_GP8PSK=m
+CONFIG_DVB_USB_NOVA_T_USB2=m
+CONFIG_DVB_USB_TTUSB2=m
+CONFIG_DVB_USB_DTT200U=m
+CONFIG_DVB_USB_OPERA1=m
+CONFIG_DVB_USB_AF9005=m
+CONFIG_DVB_USB_AF9005_REMOTE=m
+CONFIG_DVB_USB_DW2102=m
+CONFIG_DVB_USB_CINERGY_T2=m
+CONFIG_DVB_USB_ANYSEE=m
+CONFIG_DVB_USB_DTV5100=m
+CONFIG_DVB_USB_AF9015=m
+CONFIG_DVB_USB_CE6230=m
+# CONFIG_DVB_USB_FRIIO is not set
+CONFIG_DVB_USB_EC168=m
+CONFIG_DVB_USB_AZ6027=m
+CONFIG_DVB_TTUSB_BUDGET=m
+CONFIG_DVB_TTUSB_DEC=m
+CONFIG_SMS_SIANO_MDTV=m
+
+#
+# Siano module components
+#
+CONFIG_SMS_USB_DRV=m
+CONFIG_SMS_SDIO_DRV=m
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+CONFIG_DVB_B2C2_FLEXCOP=m
+CONFIG_DVB_B2C2_FLEXCOP_PCI=m
+CONFIG_DVB_B2C2_FLEXCOP_USB=m
+# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
+
+#
+# Supported BT878 Adapters
+#
+CONFIG_DVB_BT8XX=m
+
+#
+# Supported Pluto2 Adapters
+#
+CONFIG_DVB_PLUTO2=m
+
+#
+# Supported SDMC DM1105 Adapters
+#
+CONFIG_DVB_DM1105=m
+
+#
+# Supported FireWire (IEEE 1394) Adapters
+#
+CONFIG_DVB_FIREDTV=m
+CONFIG_DVB_FIREDTV_FIREWIRE=y
+CONFIG_DVB_FIREDTV_IEEE1394=y
+CONFIG_DVB_FIREDTV_INPUT=y
+
+#
+# Supported Earthsoft PT1 Adapters
+#
+# CONFIG_DVB_PT1 is not set
+
+#
+# Supported Mantis Adapters
+#
+CONFIG_MANTIS_CORE=m
+CONFIG_DVB_MANTIS=m
+CONFIG_DVB_HOPPER=m
+
+#
+# Supported nGene Adapters
+#
+CONFIG_DVB_NGENE=m
+
+#
+# Supported DVB Frontends
+#
+# CONFIG_DVB_FE_CUSTOMISE is not set
+CONFIG_DVB_STB0899=m
+CONFIG_DVB_STB6100=m
+CONFIG_DVB_STV090x=m
+CONFIG_DVB_STV6110x=m
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_CX24123=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_ZL10036=m
+CONFIG_DVB_ZL10039=m
+CONFIG_DVB_S5H1420=m
+CONFIG_DVB_STV0288=m
+CONFIG_DVB_STB6000=m
+CONFIG_DVB_STV0299=m
+CONFIG_DVB_STV6110=m
+CONFIG_DVB_STV0900=m
+CONFIG_DVB_TDA8083=m
+CONFIG_DVB_TDA10086=m
+CONFIG_DVB_TDA8261=m
+CONFIG_DVB_VES1X93=m
+CONFIG_DVB_TUNER_ITD1000=m
+CONFIG_DVB_TUNER_CX24113=m
+CONFIG_DVB_TDA826X=m
+CONFIG_DVB_TUA6100=m
+CONFIG_DVB_CX24116=m
+CONFIG_DVB_SI21XX=m
+CONFIG_DVB_DS3000=m
+CONFIG_DVB_MB86A16=m
+CONFIG_DVB_SP8870=m
+CONFIG_DVB_SP887X=m
+CONFIG_DVB_CX22700=m
+CONFIG_DVB_CX22702=m
+CONFIG_DVB_L64781=m
+CONFIG_DVB_TDA1004X=m
+CONFIG_DVB_NXT6000=m
+CONFIG_DVB_MT352=m
+CONFIG_DVB_ZL10353=m
+CONFIG_DVB_DIB3000MB=m
+CONFIG_DVB_DIB3000MC=m
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+CONFIG_DVB_TDA10048=m
+CONFIG_DVB_AF9013=m
+CONFIG_DVB_EC100=m
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_TDA10021=m
+CONFIG_DVB_TDA10023=m
+CONFIG_DVB_STV0297=m
+CONFIG_DVB_NXT200X=m
+CONFIG_DVB_OR51211=m
+CONFIG_DVB_OR51132=m
+CONFIG_DVB_BCM3510=m
+CONFIG_DVB_LGDT330X=m
+CONFIG_DVB_LGDT3305=m
+CONFIG_DVB_S5H1409=m
+CONFIG_DVB_AU8522=m
+CONFIG_DVB_S5H1411=m
+CONFIG_DVB_DIB8000=m
+CONFIG_DVB_PLL=m
+CONFIG_DVB_TUNER_DIB0070=m
+CONFIG_DVB_TUNER_DIB0090=m
+CONFIG_DVB_LNBP21=m
+CONFIG_DVB_ISL6405=m
+CONFIG_DVB_ISL6421=m
+CONFIG_DVB_ISL6423=m
+CONFIG_DVB_LGS8GXX=m
+CONFIG_DVB_ATBM8830=m
+CONFIG_DVB_TDA665x=m
+CONFIG_DAB=y
+CONFIG_USB_DABUSB=m
+
+#
+# Graphics support
+#
+CONFIG_AGP=m
+CONFIG_AGP_AMD64=m
+CONFIG_AGP_INTEL=m
+CONFIG_AGP_SIS=m
+CONFIG_AGP_VIA=m
+# CONFIG_VGA_ARB is not set
+CONFIG_VGA_SWITCHEROO=y
+CONFIG_DRM=m
+CONFIG_DRM_KMS_HELPER=m
+CONFIG_DRM_TTM=m
+CONFIG_DRM_TDFX=m
+CONFIG_DRM_R128=m
+CONFIG_DRM_RADEON=m
+# CONFIG_DRM_RADEON_KMS is not set
+CONFIG_DRM_I810=m
+CONFIG_DRM_I830=m
+CONFIG_DRM_I915=m
+# CONFIG_DRM_I915_KMS is not set
+CONFIG_DRM_MGA=m
+CONFIG_DRM_SIS=m
+CONFIG_DRM_VIA=m
+CONFIG_DRM_SAVAGE=m
+CONFIG_VGASTATE=m
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=m
+# CONFIG_FIRMWARE_EDID is not set
+CONFIG_FB_DDC=m
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=m
+CONFIG_FB_CFB_COPYAREA=m
+CONFIG_FB_CFB_IMAGEBLIT=m
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+CONFIG_FB_SYS_FILLRECT=m
+CONFIG_FB_SYS_COPYAREA=m
+CONFIG_FB_SYS_IMAGEBLIT=m
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+CONFIG_FB_SYS_FOPS=m
+CONFIG_FB_DEFERRED_IO=y
+CONFIG_FB_HECUBA=m
+CONFIG_FB_SVGALIB=m
+# CONFIG_FB_MACMODES is not set
+CONFIG_FB_BACKLIGHT=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+CONFIG_FB_CIRRUS=m
+CONFIG_FB_PM2=m
+CONFIG_FB_PM2_FIFO_DISCONNECT=y
+CONFIG_FB_CYBER2000=m
+CONFIG_FB_ARC=m
+CONFIG_FB_VGA16=m
+CONFIG_FB_UVESA=m
+CONFIG_FB_N411=m
+CONFIG_FB_HGA=m
+# CONFIG_FB_HGA_ACCEL is not set
+CONFIG_FB_S1D13XXX=m
+CONFIG_FB_NVIDIA=m
+CONFIG_FB_NVIDIA_I2C=y
+# CONFIG_FB_NVIDIA_DEBUG is not set
+CONFIG_FB_NVIDIA_BACKLIGHT=y
+CONFIG_FB_RIVA=m
+CONFIG_FB_RIVA_I2C=y
+# CONFIG_FB_RIVA_DEBUG is not set
+CONFIG_FB_RIVA_BACKLIGHT=y
+CONFIG_FB_LE80578=m
+CONFIG_FB_CARILLO_RANCH=m
+CONFIG_FB_INTEL=m
+# CONFIG_FB_INTEL_DEBUG is not set
+CONFIG_FB_INTEL_I2C=y
+CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_MILLENIUM=y
+CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_FB_MATROX_G=y
+CONFIG_FB_MATROX_I2C=m
+CONFIG_FB_MATROX_MAVEN=m
+CONFIG_FB_RADEON=m
+CONFIG_FB_RADEON_I2C=y
+CONFIG_FB_RADEON_BACKLIGHT=y
+# CONFIG_FB_RADEON_DEBUG is not set
+CONFIG_FB_ATY128=m
+CONFIG_FB_ATY128_BACKLIGHT=y
+CONFIG_FB_ATY=m
+CONFIG_FB_ATY_CT=y
+CONFIG_FB_ATY_GENERIC_LCD=y
+CONFIG_FB_ATY_GX=y
+CONFIG_FB_ATY_BACKLIGHT=y
+CONFIG_FB_S3=m
+CONFIG_FB_SAVAGE=m
+CONFIG_FB_SAVAGE_I2C=y
+CONFIG_FB_SAVAGE_ACCEL=y
+CONFIG_FB_SIS=m
+CONFIG_FB_SIS_300=y
+CONFIG_FB_SIS_315=y
+CONFIG_FB_VIA=m
+# CONFIG_FB_VIA_DIRECT_PROCFS is not set
+CONFIG_FB_NEOMAGIC=m
+CONFIG_FB_KYRO=m
+CONFIG_FB_3DFX=m
+CONFIG_FB_3DFX_ACCEL=y
+CONFIG_FB_3DFX_I2C=y
+CONFIG_FB_VOODOO1=m
+CONFIG_FB_VT8623=m
+CONFIG_FB_TRIDENT=m
+CONFIG_FB_ARK=m
+CONFIG_FB_PM3=m
+CONFIG_FB_CARMINE=m
+CONFIG_FB_CARMINE_DRAM_EVAL=y
+# CONFIG_CARMINE_DRAM_CUSTOM is not set
+CONFIG_FB_GEODE=y
+CONFIG_FB_GEODE_LX=m
+CONFIG_FB_GEODE_GX=m
+CONFIG_FB_GEODE_GX1=m
+CONFIG_FB_TMIO=m
+CONFIG_FB_TMIO_ACCELL=y
+CONFIG_FB_SM501=m
+# CONFIG_FB_VIRTUAL is not set
+CONFIG_FB_METRONOME=m
+CONFIG_FB_MB862XX=m
+# CONFIG_FB_MB862XX_PCI_GDC is not set
+CONFIG_FB_BROADSHEET=m
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=m
+CONFIG_LCD_L4F00242T03=m
+CONFIG_LCD_LMS283GF05=m
+CONFIG_LCD_LTV350QV=m
+CONFIG_LCD_ILI9320=m
+CONFIG_LCD_TDO24M=m
+CONFIG_LCD_VGG2432A4=m
+CONFIG_LCD_PLATFORM=m
+CONFIG_LCD_S6E63M0=m
+CONFIG_BACKLIGHT_CLASS_DEVICE=m
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_PROGEAR=m
+CONFIG_BACKLIGHT_CARILLO_RANCH=m
+CONFIG_BACKLIGHT_MBP_NVIDIA=m
+CONFIG_BACKLIGHT_SAHARA=m
+CONFIG_BACKLIGHT_ADP8860=m
+CONFIG_BACKLIGHT_PCF50633=m
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=m
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=m
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=m
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_HRTIMER=m
+CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+CONFIG_SND_DMA_SGBUF=y
+CONFIG_SND_RAWMIDI_SEQ=m
+CONFIG_SND_OPL3_LIB_SEQ=m
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+CONFIG_SND_EMU10K1_SEQ=m
+CONFIG_SND_MPU401_UART=m
+CONFIG_SND_OPL3_LIB=m
+CONFIG_SND_VX_LIB=m
+CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_DRIVERS=y
+CONFIG_SND_PCSP=m
+CONFIG_SND_DUMMY=m
+CONFIG_SND_VIRMIDI=m
+CONFIG_SND_MTPAV=m
+CONFIG_SND_MTS64=m
+CONFIG_SND_SERIAL_U16550=m
+CONFIG_SND_MPU401=m
+CONFIG_SND_PORTMAN2X4=m
+CONFIG_SND_AC97_POWER_SAVE=y
+CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
+CONFIG_SND_SB_COMMON=m
+CONFIG_SND_SB16_DSP=m
+CONFIG_SND_PCI=y
+CONFIG_SND_AD1889=m
+CONFIG_SND_ALS300=m
+CONFIG_SND_ALS4000=m
+CONFIG_SND_ALI5451=m
+CONFIG_SND_ASIHPI=m
+CONFIG_SND_ATIIXP=m
+CONFIG_SND_ATIIXP_MODEM=m
+CONFIG_SND_AU8810=m
+CONFIG_SND_AU8820=m
+CONFIG_SND_AU8830=m
+CONFIG_SND_AW2=m
+CONFIG_SND_AZT3328=m
+CONFIG_SND_BT87X=m
+# CONFIG_SND_BT87X_OVERCLOCK is not set
+CONFIG_SND_CA0106=m
+CONFIG_SND_CMIPCI=m
+CONFIG_SND_OXYGEN_LIB=m
+CONFIG_SND_OXYGEN=m
+CONFIG_SND_CS4281=m
+CONFIG_SND_CS46XX=m
+CONFIG_SND_CS46XX_NEW_DSP=y
+CONFIG_SND_CS5530=m
+CONFIG_SND_CS5535AUDIO=m
+CONFIG_SND_CTXFI=m
+CONFIG_SND_DARLA20=m
+CONFIG_SND_GINA20=m
+CONFIG_SND_LAYLA20=m
+CONFIG_SND_DARLA24=m
+CONFIG_SND_GINA24=m
+CONFIG_SND_LAYLA24=m
+CONFIG_SND_MONA=m
+CONFIG_SND_MIA=m
+CONFIG_SND_ECHO3G=m
+CONFIG_SND_INDIGO=m
+CONFIG_SND_INDIGOIO=m
+CONFIG_SND_INDIGODJ=m
+CONFIG_SND_INDIGOIOX=m
+CONFIG_SND_INDIGODJX=m
+CONFIG_SND_EMU10K1=m
+CONFIG_SND_EMU10K1X=m
+CONFIG_SND_ENS1370=m
+CONFIG_SND_ENS1371=m
+CONFIG_SND_ES1938=m
+CONFIG_SND_ES1968=m
+CONFIG_SND_ES1968_INPUT=y
+CONFIG_SND_FM801=m
+# CONFIG_SND_FM801_TEA575X_BOOL is not set
+CONFIG_SND_HDA_INTEL=m
+CONFIG_SND_HDA_HWDEP=y
+# CONFIG_SND_HDA_RECONFIG is not set
+CONFIG_SND_HDA_INPUT_BEEP=y
+CONFIG_SND_HDA_INPUT_BEEP_MODE=0
+CONFIG_SND_HDA_INPUT_JACK=y
+# CONFIG_SND_HDA_PATCH_LOADER is not set
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_ANALOG=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_VIA=y
+CONFIG_SND_HDA_CODEC_ATIHDMI=y
+CONFIG_SND_HDA_CODEC_NVHDMI=y
+CONFIG_SND_HDA_CODEC_INTELHDMI=y
+CONFIG_SND_HDA_ELD=y
+CONFIG_SND_HDA_CODEC_CIRRUS=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+CONFIG_SND_HDA_CODEC_CA0110=y
+CONFIG_SND_HDA_CODEC_CMEDIA=y
+CONFIG_SND_HDA_CODEC_SI3054=y
+CONFIG_SND_HDA_GENERIC=y
+# CONFIG_SND_HDA_POWER_SAVE is not set
+CONFIG_SND_HDSP=m
+CONFIG_SND_HDSPM=m
+CONFIG_SND_HIFIER=m
+CONFIG_SND_ICE1712=m
+CONFIG_SND_ICE1724=m
+CONFIG_SND_INTEL8X0=m
+CONFIG_SND_INTEL8X0M=m
+CONFIG_SND_KORG1212=m
+CONFIG_SND_LX6464ES=m
+CONFIG_SND_MAESTRO3=m
+CONFIG_SND_MAESTRO3_INPUT=y
+CONFIG_SND_MIXART=m
+CONFIG_SND_NM256=m
+CONFIG_SND_PCXHR=m
+CONFIG_SND_RIPTIDE=m
+CONFIG_SND_RME32=m
+CONFIG_SND_RME96=m
+CONFIG_SND_RME9652=m
+CONFIG_SND_SONICVIBES=m
+CONFIG_SND_TRIDENT=m
+CONFIG_SND_VIA82XX=m
+CONFIG_SND_VIA82XX_MODEM=m
+CONFIG_SND_VIRTUOSO=m
+CONFIG_SND_VX222=m
+CONFIG_SND_YMFPCI=m
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_UA101=m
+CONFIG_SND_USB_USX2Y=m
+CONFIG_SND_USB_CAIAQ=m
+# CONFIG_SND_USB_CAIAQ_INPUT is not set
+CONFIG_SND_USB_US122L=m
+CONFIG_SND_PCMCIA=y
+CONFIG_SND_VXPOCKET=m
+CONFIG_SND_PDAUDIOCF=m
+CONFIG_SND_SOC=m
+CONFIG_SND_SOC_I2C_AND_SPI=m
+CONFIG_SND_SOC_ALL_CODECS=m
+CONFIG_SND_SOC_WM_HUBS=m
+CONFIG_SND_SOC_AD1836=m
+CONFIG_SND_SOC_AD193X=m
+CONFIG_SND_SOC_AD73311=m
+CONFIG_SND_SOC_ADS117X=m
+CONFIG_SND_SOC_AK4104=m
+CONFIG_SND_SOC_AK4535=m
+CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_AK4671=m
+CONFIG_SND_SOC_CS42L51=m
+CONFIG_SND_SOC_CS4270=m
+CONFIG_SND_SOC_L3=m
+CONFIG_SND_SOC_DA7210=m
+CONFIG_SND_SOC_PCM3008=m
+CONFIG_SND_SOC_SPDIF=m
+CONFIG_SND_SOC_SSM2602=m
+CONFIG_SND_SOC_TLV320AIC23=m
+CONFIG_SND_SOC_TLV320AIC26=m
+CONFIG_SND_SOC_TLV320AIC3X=m
+CONFIG_SND_SOC_TLV320DAC33=m
+CONFIG_SND_SOC_UDA134X=m
+CONFIG_SND_SOC_UDA1380=m
+CONFIG_SND_SOC_WM8400=m
+CONFIG_SND_SOC_WM8510=m
+CONFIG_SND_SOC_WM8523=m
+CONFIG_SND_SOC_WM8580=m
+CONFIG_SND_SOC_WM8711=m
+CONFIG_SND_SOC_WM8727=m
+CONFIG_SND_SOC_WM8728=m
+CONFIG_SND_SOC_WM8731=m
+CONFIG_SND_SOC_WM8741=m
+CONFIG_SND_SOC_WM8750=m
+CONFIG_SND_SOC_WM8753=m
+CONFIG_SND_SOC_WM8776=m
+CONFIG_SND_SOC_WM8900=m
+CONFIG_SND_SOC_WM8903=m
+CONFIG_SND_SOC_WM8904=m
+CONFIG_SND_SOC_WM8940=m
+CONFIG_SND_SOC_WM8955=m
+CONFIG_SND_SOC_WM8960=m
+CONFIG_SND_SOC_WM8961=m
+CONFIG_SND_SOC_WM8971=m
+CONFIG_SND_SOC_WM8974=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SOC_WM8988=m
+CONFIG_SND_SOC_WM8990=m
+CONFIG_SND_SOC_WM8993=m
+CONFIG_SND_SOC_WM9081=m
+CONFIG_SND_SOC_MAX9877=m
+CONFIG_SND_SOC_TPA6130A2=m
+CONFIG_SND_SOC_WM2000=m
+CONFIG_SND_SOC_WM9090=m
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=m
+CONFIG_HIDRAW=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=m
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+CONFIG_USB_KBD=m
+CONFIG_USB_MOUSE=m
+
+#
+# Special HID drivers
+#
+CONFIG_HID_3M_PCT=m
+# CONFIG_HID_A4TECH is not set
+CONFIG_HID_ACRUX_FF=m
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+CONFIG_HID_CANDO=m
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+CONFIG_HID_PRODIKEYS=m
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+CONFIG_HID_EGALAX=m
+CONFIG_HID_ELECOM=m
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+CONFIG_HID_MAGICMOUSE=m
+# CONFIG_HID_MICROSOFT is not set
+CONFIG_HID_MOSART=m
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+CONFIG_HID_ORTEK=m
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+CONFIG_HID_PICOLCD=m
+CONFIG_HID_PICOLCD_FB=y
+CONFIG_HID_PICOLCD_BACKLIGHT=y
+CONFIG_HID_PICOLCD_LCD=y
+CONFIG_HID_PICOLCD_LEDS=y
+CONFIG_HID_QUANTA=m
+CONFIG_HID_ROCCAT=m
+CONFIG_HID_ROCCAT_KONE=m
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+CONFIG_HID_STANTUM=m
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_ZEROPLUS is not set
+CONFIG_HID_ZYDACRON=m
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=m
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=m
+CONFIG_USB_WUSB=m
+CONFIG_USB_WUSB_CBAF=m
+# CONFIG_USB_WUSB_CBAF_DEBUG is not set
+
+#
+# USB Host Controller Drivers
+#
+CONFIG_USB_C67X00_HCD=m
+CONFIG_USB_XHCI_HCD=m
+# CONFIG_USB_XHCI_HCD_DEBUGGING is not set
+CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OXU210HP_HCD=m
+CONFIG_USB_ISP116X_HCD=m
+CONFIG_USB_ISP1760_HCD=m
+CONFIG_USB_ISP1362_HCD=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_OHCI_HCD_SSB=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_U132_HCD=m
+CONFIG_USB_SL811_HCD=m
+CONFIG_USB_SL811_CS=m
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_WHCI_HCD=m
+CONFIG_USB_HWA_HCD=m
+
+#
+# Enable Host or Gadget support to see Inventra options
+#
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+CONFIG_USB_TMC=m
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+CONFIG_USB_STORAGE_ONETOUCH=m
+CONFIG_USB_STORAGE_KARMA=m
+CONFIG_USB_STORAGE_CYPRESS_ATACB=m
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_USS720=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_FUNSOFT=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7715_PARPORT=y
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_MOTOROLA=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_QCAUX=m
+CONFIG_USB_SERIAL_QUALCOMM=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SAFE_PADDED=y
+CONFIG_USB_SERIAL_SIEMENS_MPI=m
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_SYMBOL=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_WWAN=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_OPTICON=m
+CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m
+CONFIG_USB_SERIAL_ZIO=m
+CONFIG_USB_SERIAL_SSU100=m
+CONFIG_USB_SERIAL_DEBUG=m
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+CONFIG_USB_ADUTUX=m
+CONFIG_USB_SEVSEG=m
+CONFIG_USB_RIO500=m
+# CONFIG_USB_LEGOTOWER is not set
+CONFIG_USB_LCD=m
+CONFIG_USB_LED=m
+CONFIG_USB_CYPRESS_CY7C63=m
+CONFIG_USB_CYTHERM=m
+CONFIG_USB_IDMOUSE=m
+CONFIG_USB_FTDI_ELAN=m
+# CONFIG_USB_APPLEDISPLAY is not set
+CONFIG_USB_SISUSBVGA=m
+CONFIG_USB_SISUSBVGA_CON=y
+CONFIG_USB_LD=m
+# CONFIG_USB_TRANCEVIBRATOR is not set
+CONFIG_USB_IOWARRIOR=m
+CONFIG_USB_TEST=m
+CONFIG_USB_ISIGHTFW=m
+CONFIG_USB_ATM=m
+CONFIG_USB_SPEEDTOUCH=m
+CONFIG_USB_CXACRU=m
+CONFIG_USB_UEAGLEATM=m
+CONFIG_USB_XUSBATM=m
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+CONFIG_USB_GPIO_VBUS=m
+CONFIG_NOP_USB_XCEIV=m
+CONFIG_UWB=m
+CONFIG_UWB_HWA=m
+CONFIG_UWB_WHCI=m
+CONFIG_UWB_WLP=m
+CONFIG_UWB_I1480U=m
+CONFIG_UWB_I1480U_WLP=m
+CONFIG_MMC=m
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=m
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=m
+CONFIG_MMC_TEST=m
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=m
+CONFIG_MMC_SDHCI_PCI=m
+CONFIG_MMC_RICOH_MMC=y
+CONFIG_MMC_SDHCI_PLTFM=m
+CONFIG_MMC_WBSD=m
+CONFIG_MMC_TIFM_SD=m
+# CONFIG_MMC_SPI is not set
+CONFIG_MMC_SDRICOH_CS=m
+CONFIG_MMC_CB710=m
+CONFIG_MMC_VIA_SDMMC=m
+CONFIG_MEMSTICK=m
+# CONFIG_MEMSTICK_DEBUG is not set
+
+#
+# MemoryStick drivers
+#
+# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
+CONFIG_MSPRO_BLOCK=m
+
+#
+# MemoryStick Host Controller Drivers
+#
+CONFIG_MEMSTICK_TIFM_MS=m
+CONFIG_MEMSTICK_JMICRON_38X=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+
+#
+# LED drivers
+#
+CONFIG_LEDS_NET5501=m
+CONFIG_LEDS_ALIX2=m
+CONFIG_LEDS_PCA9532=m
+CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_GPIO_PLATFORM=y
+CONFIG_LEDS_LP3944=m
+CONFIG_LEDS_CLEVO_MAIL=m
+CONFIG_LEDS_PCA955X=m
+CONFIG_LEDS_DAC124S085=m
+CONFIG_LEDS_REGULATOR=m
+CONFIG_LEDS_BD2802=m
+CONFIG_LEDS_INTEL_SS4200=m
+CONFIG_LEDS_LT3593=m
+CONFIG_LEDS_DELL_NETBOOKS=m
+CONFIG_LEDS_TRIGGERS=y
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_GPIO=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+CONFIG_ACCESSIBILITY=y
+# CONFIG_A11Y_BRAILLE_CONSOLE is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_MAD=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_INFINIBAND_USER_MEM=y
+CONFIG_INFINIBAND_ADDR_TRANS=y
+CONFIG_INFINIBAND_MTHCA=m
+# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
+# CONFIG_INFINIBAND_IPATH is not set
+# CONFIG_INFINIBAND_QIB is not set
+CONFIG_INFINIBAND_AMSO1100=m
+# CONFIG_INFINIBAND_AMSO1100_DEBUG is not set
+CONFIG_INFINIBAND_CXGB3=m
+# CONFIG_INFINIBAND_CXGB3_DEBUG is not set
+CONFIG_INFINIBAND_CXGB4=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_INFINIBAND_NES=m
+# CONFIG_INFINIBAND_NES_DEBUG is not set
+CONFIG_INFINIBAND_IPOIB=m
+# CONFIG_INFINIBAND_IPOIB_CM is not set
+# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
+CONFIG_INFINIBAND_SRP=m
+CONFIG_INFINIBAND_ISER=m
+# CONFIG_EDAC is not set
+CONFIG_RTC_LIB=m
+CONFIG_RTC_CLASS=m
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_TEST=m
+
+#
+# I2C RTC drivers
+#
+CONFIG_RTC_DRV_DS1307=m
+CONFIG_RTC_DRV_DS1374=m
+CONFIG_RTC_DRV_DS1672=m
+CONFIG_RTC_DRV_DS3232=m
+CONFIG_RTC_DRV_MAX6900=m
+CONFIG_RTC_DRV_RS5C372=m
+CONFIG_RTC_DRV_ISL1208=m
+CONFIG_RTC_DRV_ISL12022=m
+CONFIG_RTC_DRV_X1205=m
+CONFIG_RTC_DRV_PCF8563=m
+CONFIG_RTC_DRV_PCF8583=m
+CONFIG_RTC_DRV_M41T80=m
+CONFIG_RTC_DRV_M41T80_WDT=y
+CONFIG_RTC_DRV_BQ32K=m
+CONFIG_RTC_DRV_S35390A=m
+CONFIG_RTC_DRV_FM3130=m
+CONFIG_RTC_DRV_RX8581=m
+CONFIG_RTC_DRV_RX8025=m
+
+#
+# SPI RTC drivers
+#
+CONFIG_RTC_DRV_M41T94=m
+CONFIG_RTC_DRV_DS1305=m
+CONFIG_RTC_DRV_DS1390=m
+CONFIG_RTC_DRV_MAX6902=m
+CONFIG_RTC_DRV_R9701=m
+CONFIG_RTC_DRV_RS5C348=m
+CONFIG_RTC_DRV_DS3234=m
+CONFIG_RTC_DRV_PCF2123=m
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=m
+CONFIG_RTC_DRV_DS1286=m
+CONFIG_RTC_DRV_DS1511=m
+CONFIG_RTC_DRV_DS1553=m
+CONFIG_RTC_DRV_DS1742=m
+CONFIG_RTC_DRV_STK17TA8=m
+CONFIG_RTC_DRV_M48T86=m
+CONFIG_RTC_DRV_M48T35=m
+CONFIG_RTC_DRV_M48T59=m
+CONFIG_RTC_DRV_MSM6242=m
+CONFIG_RTC_DRV_BQ4802=m
+CONFIG_RTC_DRV_RP5C01=m
+CONFIG_RTC_DRV_V3020=m
+CONFIG_RTC_DRV_PCF50633=m
+CONFIG_RTC_DRV_AB8500=m
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_INTEL_MID_DMAC=m
+CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y
+CONFIG_INTEL_IOATDMA=m
+CONFIG_TIMB_DMA=m
+CONFIG_PCH_DMA=m
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+CONFIG_NET_DMA=y
+# CONFIG_ASYNC_TX_DMA is not set
+CONFIG_DMATEST=m
+CONFIG_DCA=m
+CONFIG_AUXDISPLAY=y
+CONFIG_KS0108=m
+CONFIG_KS0108_PORT=0x378
+CONFIG_KS0108_DELAY=2
+CONFIG_CFAG12864B=m
+CONFIG_CFAG12864B_RATE=20
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_UIO_PDRV=m
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_AEC=m
+CONFIG_UIO_SERCOS3=m
+# CONFIG_UIO_PCI_GENERIC is not set
+CONFIG_UIO_NETX=m
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+# CONFIG_ET131X is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_VIDEO_GO7007 is not set
+# CONFIG_VIDEO_CX25821 is not set
+# CONFIG_VIDEO_TM6000 is not set
+# CONFIG_USB_IP_COMMON is not set
+# CONFIG_W35UND is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_OTUS is not set
+# CONFIG_RT2860 is not set
+# CONFIG_RT2870 is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_PANEL is not set
+# CONFIG_R8187SE is not set
+# CONFIG_RTL8192SU is not set
+# CONFIG_RTL8192U is not set
+# CONFIG_RTL8192E is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_POHMELFS is not set
+# CONFIG_IDE_PHISON is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_DRM_VMWGFX is not set
+# CONFIG_DRM_NOUVEAU is not set
+
+#
+# I2C encoder or helper chips
+#
+# CONFIG_DRM_I2C_CH7006 is not set
+CONFIG_DRM_I2C_SIL164=m
+# CONFIG_USB_SERIAL_QUATECH2 is not set
+# CONFIG_USB_SERIAL_QUATECH_USB2 is not set
+# CONFIG_VT6655 is not set
+# CONFIG_VT6656 is not set
+# CONFIG_FB_UDL is not set
+CONFIG_HYPERV=m
+CONFIG_HYPERV_STORAGE=m
+CONFIG_HYPERV_BLOCK=m
+CONFIG_HYPERV_NET=m
+CONFIG_HYPERV_UTILS=m
+# CONFIG_VME_BUS is not set
+# CONFIG_IIO is not set
+CONFIG_ZRAM=m
+CONFIG_ZRAM_STATS=y
+# CONFIG_WLAGS49_H2 is not set
+# CONFIG_WLAGS49_H25 is not set
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_SAMSUNG_LAPTOP is not set
+# CONFIG_FB_SM7XX is not set
+# CONFIG_VIDEO_DT3155 is not set
+# CONFIG_CRYSTALHD is not set
+# CONFIG_CXT1E1 is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_ST_BT is not set
+# CONFIG_ADIS16255 is not set
+# CONFIG_FB_XGI is not set
+# CONFIG_LIRC_STAGING is not set
+CONFIG_EASYCAP=m
+CONFIG_SOLO6X10=m
+CONFIG_ACPI_QUICKSTART=m
+CONFIG_X86_PLATFORM_DEVICES=y
+CONFIG_ACER_WMI=m
+CONFIG_ASUS_LAPTOP=m
+CONFIG_DELL_LAPTOP=m
+CONFIG_DELL_WMI=m
+CONFIG_FUJITSU_LAPTOP=m
+# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
+CONFIG_HP_WMI=m
+CONFIG_MSI_LAPTOP=m
+CONFIG_PANASONIC_LAPTOP=m
+CONFIG_COMPAL_LAPTOP=m
+CONFIG_SONY_LAPTOP=m
+# CONFIG_SONYPI_COMPAT is not set
+CONFIG_IDEAPAD_ACPI=m
+CONFIG_THINKPAD_ACPI=m
+CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
+# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
+# CONFIG_THINKPAD_ACPI_DEBUG is not set
+# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
+CONFIG_THINKPAD_ACPI_VIDEO=y
+CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
+CONFIG_INTEL_MENLOW=m
+CONFIG_EEEPC_LAPTOP=m
+CONFIG_EEEPC_WMI=m
+CONFIG_ACPI_WMI=m
+CONFIG_MSI_WMI=m
+CONFIG_ACPI_ASUS=m
+# CONFIG_TOPSTAR_LAPTOP is not set
+CONFIG_ACPI_TOSHIBA=m
+CONFIG_TOSHIBA_BT_RFKILL=m
+CONFIG_ACPI_CMPC=m
+CONFIG_INTEL_IPS=m
+
+#
+# Firmware Drivers
+#
+CONFIG_EDD=m
+# CONFIG_EDD_OFF is not set
+CONFIG_FIRMWARE_MEMMAP=y
+CONFIG_DELL_RBU=m
+CONFIG_DCDBAS=m
+CONFIG_DMIID=y
+# CONFIG_ISCSI_IBFT_FIND is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=m
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=m
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=m
+CONFIG_EXT4_FS_XATTR=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_FS_XIP=y
+CONFIG_JBD=m
+# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=m
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=m
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+# CONFIG_REISERFS_FS_SECURITY is not set
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+# CONFIG_JFS_DEBUG is not set
+CONFIG_JFS_STATISTICS=y
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+# CONFIG_XFS_DEBUG is not set
+CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
+CONFIG_OCFS2_FS=m
+CONFIG_OCFS2_FS_O2CB=m
+CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
+CONFIG_OCFS2_FS_STATS=y
+CONFIG_OCFS2_DEBUG_MASKLOG=y
+# CONFIG_OCFS2_DEBUG_FS is not set
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=m
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_QUOTACTL=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+CONFIG_FSCACHE=m
+CONFIG_FSCACHE_STATS=y
+CONFIG_FSCACHE_HISTOGRAM=y
+# CONFIG_FSCACHE_DEBUG is not set
+# CONFIG_FSCACHE_OBJECT_LIST is not set
+CONFIG_CACHEFILES=m
+# CONFIG_CACHEFILES_DEBUG is not set
+# CONFIG_CACHEFILES_HISTOGRAM is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=m
+# CONFIG_NTFS_DEBUG is not set
+CONFIG_NTFS_RW=y
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+CONFIG_ECRYPT_FS=m
+CONFIG_UNION_FS=m
+# CONFIG_UNION_FS_XATTR is not set
+# CONFIG_UNION_FS_DEBUG is not set
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_RUBIN=y
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+CONFIG_UBIFS_FS=m
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+CONFIG_LOGFS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+# CONFIG_SQUASHFS_XATTR is not set
+# CONFIG_SQUASHFS_LZO is not set
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_ROMFS_FS=m
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+# CONFIG_ROMFS_BACKED_BY_BOTH is not set
+CONFIG_ROMFS_ON_BLOCK=y
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+# CONFIG_UFS_DEBUG is not set
+CONFIG_EXOFS_FS=m
+# CONFIG_EXOFS_DEBUG is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_NFS_FSCACHE is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+CONFIG_NFSD_V4=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_SUNRPC_XPRT_RDMA=m
+CONFIG_RPCSEC_GSS_KRB5=m
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CEPH_FS=m
+# CONFIG_CEPH_FS_PRETTYDEBUG is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_UPCALL is not set
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG2 is not set
+CONFIG_CIFS_DFS_UPCALL=y
+# CONFIG_CIFS_FSCACHE is not set
+CONFIG_CIFS_EXPERIMENTAL=y
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+# CONFIG_DLM_DEBUG is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_RING_BUFFER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
+# CONFIG_FIREWIRE_OHCI_REMOTE_DMA is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_HAVE_ARCH_KMEMCHECK=y
+CONFIG_STRICT_DEVMEM=y
+# CONFIG_X86_VERBOSE_BOOTUP is not set
+# CONFIG_EARLY_PRINTK is not set
+# CONFIG_IOMMU_STRESS is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+# CONFIG_OPTIMIZE_INLINING is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+# CONFIG_SECURITY_NETWORK is not set
+# CONFIG_SECURITY_PATH is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_IMA is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
+CONFIG_ASYNC_PQ=m
+CONFIG_ASYNC_RAID6_RECOV=m
+# CONFIG_ASYNC_RAID6_TEST is not set
+CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y
+CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_FIPS=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=m
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCRYPT=m
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_CRC32C_INTEL=m
+CONFIG_CRYPTO_GHASH=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_AES_X86_64 is not set
+# CONFIG_CRYPTO_AES_NI_INTEL is not set
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+# CONFIG_CRYPTO_SALSA20_X86_64 is not set
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+# CONFIG_CRYPTO_TWOFISH_X86_64 is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_ZLIB=m
+CONFIG_CRYPTO_LZO=m
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_PADLOCK=m
+CONFIG_CRYPTO_DEV_PADLOCK_AES=m
+CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
+CONFIG_CRYPTO_DEV_HIFN_795X=m
+CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y
+CONFIG_HAVE_KVM=y
+CONFIG_HAVE_KVM_IRQCHIP=y
+CONFIG_HAVE_KVM_EVENTFD=y
+CONFIG_KVM_APIC_ARCHITECTURE=y
+CONFIG_KVM_MMIO=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_INTEL=m
+CONFIG_KVM_AMD=m
+CONFIG_VHOST_NET=m
+CONFIG_VIRTIO=m
+CONFIG_VIRTIO_RING=m
+CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_RAID6_PQ=m
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=m
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+CONFIG_CRC7=m
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_LZO_COMPRESS=m
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_REED_SOLOMON=m
+CONFIG_REED_SOLOMON_DEC16=y
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=m
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+CONFIG_BTREE=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_CHECK_SIGNATURE=y
+CONFIG_NLATTR=y
diff --git a/main/linux-scst/scst-2.0.0.1-2.6.36.patch b/main/linux-scst/scst-2.0.0.1-2.6.36.patch
new file mode 100644
index 000000000..c8699d826
--- /dev/null
+++ b/main/linux-scst/scst-2.0.0.1-2.6.36.patch
@@ -0,0 +1,76096 @@
+Signed-off-by:
+
+diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
+--- linux-2.6.36/block/blk-map.c 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/block/blk-map.c 2010-11-26 17:52:19.467689539 +0300
+@@ -5,6 +5,8 @@
+ #include <linux/module.h>
+ #include <linux/bio.h>
+ #include <linux/blkdev.h>
++#include <linux/scatterlist.h>
++#include <linux/slab.h>
+ #include <scsi/sg.h> /* for struct sg_iovec */
+
+ #include "blk.h"
+@@ -271,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
+ }
+ EXPORT_SYMBOL(blk_rq_unmap_user);
+
++struct blk_kern_sg_work {
++ atomic_t bios_inflight;
++ struct sg_table sg_table;
++ struct scatterlist *src_sgl;
++};
++
++static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
++{
++ sg_free_table(&bw->sg_table);
++ kfree(bw);
++ return;
++}
++
++static void blk_bio_map_kern_endio(struct bio *bio, int err)
++{
++ struct blk_kern_sg_work *bw = bio->bi_private;
++
++ if (bw != NULL) {
++ /* Decrement the bios in processing and, if zero, free */
++ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
++ if (atomic_dec_and_test(&bw->bios_inflight)) {
++ if ((bio_data_dir(bio) == READ) && (err == 0)) {
++ unsigned long flags;
++
++ local_irq_save(flags); /* to protect KMs */
++ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
++ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
++ local_irq_restore(flags);
++ }
++ blk_free_kern_sg_work(bw);
++ }
++ }
++
++ bio_put(bio);
++ return;
++}
++
++static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
++ int nents, struct blk_kern_sg_work **pbw,
++ gfp_t gfp, gfp_t page_gfp)
++{
++ int res = 0, i;
++ struct scatterlist *sg;
++ struct scatterlist *new_sgl;
++ int new_sgl_nents;
++ size_t len = 0, to_copy;
++ struct blk_kern_sg_work *bw;
++
++ bw = kzalloc(sizeof(*bw), gfp);
++ if (bw == NULL)
++ goto out;
++
++ bw->src_sgl = sgl;
++
++ for_each_sg(sgl, sg, nents, i)
++ len += sg->length;
++ to_copy = len;
++
++ new_sgl_nents = PFN_UP(len);
++
++ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
++ if (res != 0)
++ goto out_free_bw;
++
++ new_sgl = bw->sg_table.sgl;
++
++ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
++ struct page *pg;
++
++ pg = alloc_page(page_gfp);
++ if (pg == NULL)
++ goto err_free_new_sgl;
++
++ sg_assign_page(sg, pg);
++ sg->length = min_t(size_t, PAGE_SIZE, len);
++
++ len -= PAGE_SIZE;
++ }
++
++ if (rq_data_dir(rq) == WRITE) {
++ /*
++ * We need to limit amount of copied data to to_copy, because
++ * sgl might have the last element in sgl not marked as last in
++ * SG chaining.
++ */
++ sg_copy(new_sgl, sgl, 0, to_copy,
++ KM_USER0, KM_USER1);
++ }
++
++ *pbw = bw;
++ /*
++ * REQ_COPY_USER name is misleading. It should be something like
++ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
++ */
++ rq->cmd_flags |= REQ_COPY_USER;
++
++out:
++ return res;
++
++err_free_new_sgl:
++ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
++ struct page *pg = sg_page(sg);
++ if (pg == NULL)
++ break;
++ __free_page(pg);
++ }
++ sg_free_table(&bw->sg_table);
++
++out_free_bw:
++ kfree(bw);
++ res = -ENOMEM;
++ goto out;
++}
++
++static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
++ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
++{
++ int res;
++ struct request_queue *q = rq->q;
++ int rw = rq_data_dir(rq);
++ int max_nr_vecs, i;
++ size_t tot_len;
++ bool need_new_bio;
++ struct scatterlist *sg, *prev_sg = NULL;
++ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
++ int bios;
++
++ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
++ WARN_ON(1);
++ res = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * Let's keep each bio allocation inside a single page to decrease
++ * probability of failure.
++ */
++ max_nr_vecs = min_t(size_t,
++ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
++ BIO_MAX_PAGES);
++
++ need_new_bio = true;
++ tot_len = 0;
++ bios = 0;
++ for_each_sg(sgl, sg, nents, i) {
++ struct page *page = sg_page(sg);
++ void *page_addr = page_address(page);
++ size_t len = sg->length, l;
++ size_t offset = sg->offset;
++
++ tot_len += len;
++ prev_sg = sg;
++
++ /*
++ * Each segment must be aligned on DMA boundary and
++ * not on stack. The last one may have unaligned
++ * length as long as the total length is aligned to
++ * DMA padding alignment.
++ */
++ if (i == nents - 1)
++ l = 0;
++ else
++ l = len;
++ if (((sg->offset | l) & queue_dma_alignment(q)) ||
++ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
++ res = -EINVAL;
++ goto out_free_bios;
++ }
++
++ while (len > 0) {
++ size_t bytes;
++ int rc;
++
++ if (need_new_bio) {
++ bio = bio_kmalloc(gfp, max_nr_vecs);
++ if (bio == NULL) {
++ res = -ENOMEM;
++ goto out_free_bios;
++ }
++
++ if (rw == WRITE)
++ bio->bi_rw |= REQ_WRITE;
++
++ bios++;
++ bio->bi_private = bw;
++ bio->bi_end_io = blk_bio_map_kern_endio;
++
++ if (hbio == NULL)
++ hbio = tbio = bio;
++ else
++ tbio = tbio->bi_next = bio;
++ }
++
++ bytes = min_t(size_t, len, PAGE_SIZE - offset);
++
++ rc = bio_add_pc_page(q, bio, page, bytes, offset);
++ if (rc < bytes) {
++ if (unlikely(need_new_bio || (rc < 0))) {
++ if (rc < 0)
++ res = rc;
++ else
++ res = -EIO;
++ goto out_free_bios;
++ } else {
++ need_new_bio = true;
++ len -= rc;
++ offset += rc;
++ continue;
++ }
++ }
++
++ need_new_bio = false;
++ offset = 0;
++ len -= bytes;
++ page = nth_page(page, 1);
++ }
++ }
++
++ if (hbio == NULL) {
++ res = -EINVAL;
++ goto out_free_bios;
++ }
++
++ /* Total length must be aligned on DMA padding alignment */
++ if ((tot_len & q->dma_pad_mask) &&
++ !(rq->cmd_flags & REQ_COPY_USER)) {
++ res = -EINVAL;
++ goto out_free_bios;
++ }
++
++ if (bw != NULL)
++ atomic_set(&bw->bios_inflight, bios);
++
++ while (hbio != NULL) {
++ bio = hbio;
++ hbio = hbio->bi_next;
++ bio->bi_next = NULL;
++
++ blk_queue_bounce(q, &bio);
++
++ res = blk_rq_append_bio(q, rq, bio);
++ if (unlikely(res != 0)) {
++ bio->bi_next = hbio;
++ hbio = bio;
++ /* We can have one or more bios bounced */
++ goto out_unmap_bios;
++ }
++ }
++
++ res = 0;
++
++ rq->buffer = NULL;
++out:
++ return res;
++
++out_unmap_bios:
++ blk_rq_unmap_kern_sg(rq, res);
++
++out_free_bios:
++ while (hbio != NULL) {
++ bio = hbio;
++ hbio = hbio->bi_next;
++ bio_put(bio);
++ }
++ goto out;
++}
++
++/**
++ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
++ * @rq: request to fill
++ * @sgl: area to map
++ * @nents: number of elements in @sgl
++ * @gfp: memory allocation flags
++ *
++ * Description:
++ * Data will be mapped directly if possible. Otherwise a bounce
++ * buffer will be used.
++ */
++int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
++ int nents, gfp_t gfp)
++{
++ int res;
++
++ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
++ if (unlikely(res != 0)) {
++ struct blk_kern_sg_work *bw = NULL;
++
++ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
++ gfp, rq->q->bounce_gfp | gfp);
++ if (unlikely(res != 0))
++ goto out;
++
++ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
++ bw->sg_table.nents, bw, gfp);
++ if (res != 0) {
++ blk_free_kern_sg_work(bw);
++ goto out;
++ }
++ }
++
++ rq->buffer = NULL;
++
++out:
++ return res;
++}
++EXPORT_SYMBOL(blk_rq_map_kern_sg);
++
++/**
++ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
++ * @rq: request to unmap
++ * @err: non-zero error code
++ *
++ * Description:
++ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
++ * only in case of an error!
++ */
++void blk_rq_unmap_kern_sg(struct request *rq, int err)
++{
++ struct bio *bio = rq->bio;
++
++ while (bio) {
++ struct bio *b = bio;
++ bio = bio->bi_next;
++ b->bi_end_io(b, err);
++ }
++ rq->bio = NULL;
++
++ return;
++}
++EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
++
+ /**
+ * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
+ * @q: request queue where request should be inserted
+diff -upkr linux-2.6.36/include/linux/blkdev.h linux-2.6.36/include/linux/blkdev.h
+--- linux-2.6.36/include/linux/blkdev.h 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/include/linux/blkdev.h 2010-10-26 12:00:15.899759399 +0400
+@@ -746,6 +748,9 @@ extern int blk_rq_map_kern(struct reques
+ extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
+ struct rq_map_data *, struct sg_iovec *, int,
+ unsigned int, gfp_t);
++extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
++ int nents, gfp_t gfp);
++extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
+ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
+ struct request *, int);
+ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
+diff -upkr linux-2.6.36/include/linux/scatterlist.h linux-2.6.36/include/linux/scatterlist.h
+--- linux-2.6.36/include/linux/scatterlist.h 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/include/linux/scatterlist.h 2010-10-26 12:00:15.899759399 +0400
+@@ -3,6 +3,7 @@
+
+ #include <asm/types.h>
+ #include <asm/scatterlist.h>
++#include <asm/kmap_types.h>
+ #include <linux/mm.h>
+ #include <linux/string.h>
+ #include <asm/io.h>
+@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
+ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen);
+
++int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
++ int nents_to_copy, size_t copy_len,
++ enum km_type d_km_type, enum km_type s_km_type);
++
+ /*
+ * Maximum number of entries that will be allocated in one piece, if
+ * a list larger than this is required then chaining will be utilized.
+diff -upkr linux-2.6.36/lib/scatterlist.c linux-2.6.36/lib/scatterlist.c
+--- linux-2.6.36/lib/scatterlist.c 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/lib/scatterlist.c 2010-10-26 12:00:15.899759399 +0400
+@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
+ return sg_copy_buffer(sgl, nents, buf, buflen, 1);
+ }
+ EXPORT_SYMBOL(sg_copy_to_buffer);
++
++/*
++ * Can switch to the next dst_sg element, so, to copy to strictly only
++ * one dst_sg element, it must be either last in the chain, or
++ * copy_len == dst_sg->length.
++ */
++static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
++ size_t *pdst_offs, struct scatterlist *src_sg,
++ size_t copy_len,
++ enum km_type d_km_type, enum km_type s_km_type)
++{
++ int res = 0;
++ struct scatterlist *dst_sg;
++ size_t src_len, dst_len, src_offs, dst_offs;
++ struct page *src_page, *dst_page;
++
++ dst_sg = *pdst_sg;
++ dst_len = *pdst_len;
++ dst_offs = *pdst_offs;
++ dst_page = sg_page(dst_sg);
++
++ src_page = sg_page(src_sg);
++ src_len = src_sg->length;
++ src_offs = src_sg->offset;
++
++ do {
++ void *saddr, *daddr;
++ size_t n;
++
++ saddr = kmap_atomic(src_page +
++ (src_offs >> PAGE_SHIFT), s_km_type) +
++ (src_offs & ~PAGE_MASK);
++ daddr = kmap_atomic(dst_page +
++ (dst_offs >> PAGE_SHIFT), d_km_type) +
++ (dst_offs & ~PAGE_MASK);
++
++ if (((src_offs & ~PAGE_MASK) == 0) &&
++ ((dst_offs & ~PAGE_MASK) == 0) &&
++ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
++ (copy_len >= PAGE_SIZE)) {
++ copy_page(daddr, saddr);
++ n = PAGE_SIZE;
++ } else {
++ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
++ PAGE_SIZE - (src_offs & ~PAGE_MASK));
++ n = min(n, src_len);
++ n = min(n, dst_len);
++ n = min_t(size_t, n, copy_len);
++ memcpy(daddr, saddr, n);
++ }
++ dst_offs += n;
++ src_offs += n;
++
++ kunmap_atomic(saddr, s_km_type);
++ kunmap_atomic(daddr, d_km_type);
++
++ res += n;
++ copy_len -= n;
++ if (copy_len == 0)
++ goto out;
++
++ src_len -= n;
++ dst_len -= n;
++ if (dst_len == 0) {
++ dst_sg = sg_next(dst_sg);
++ if (dst_sg == NULL)
++ goto out;
++ dst_page = sg_page(dst_sg);
++ dst_len = dst_sg->length;
++ dst_offs = dst_sg->offset;
++ }
++ } while (src_len > 0);
++
++out:
++ *pdst_sg = dst_sg;
++ *pdst_len = dst_len;
++ *pdst_offs = dst_offs;
++ return res;
++}
++
++/**
++ * sg_copy - copy one SG vector to another
++ * @dst_sg: destination SG
++ * @src_sg: source SG
++ * @nents_to_copy: maximum number of entries to copy
++ * @copy_len: maximum amount of data to copy. If 0, then copy all.
++ * @d_km_type: kmap_atomic type for the destination SG
++ * @s_km_type: kmap_atomic type for the source SG
++ *
++ * Description:
++ * Data from the source SG vector will be copied to the destination SG
++ * vector. End of the vectors will be determined by sg_next() returning
++ * NULL. Returns number of bytes copied.
++ */
++int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
++ int nents_to_copy, size_t copy_len,
++ enum km_type d_km_type, enum km_type s_km_type)
++{
++ int res = 0;
++ size_t dst_len, dst_offs;
++
++ if (copy_len == 0)
++ copy_len = 0x7FFFFFFF; /* copy all */
++
++ if (nents_to_copy == 0)
++ nents_to_copy = 0x7FFFFFFF; /* copy all */
++
++ dst_len = dst_sg->length;
++ dst_offs = dst_sg->offset;
++
++ do {
++ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
++ src_sg, copy_len, d_km_type, s_km_type);
++ copy_len -= copied;
++ res += copied;
++ if ((copy_len == 0) || (dst_sg == NULL))
++ goto out;
++
++ nents_to_copy--;
++ if (nents_to_copy == 0)
++ goto out;
++
++ src_sg = sg_next(src_sg);
++ } while (src_sg != NULL);
++
++out:
++ return res;
++}
++EXPORT_SYMBOL(sg_copy);
+
+diff -upkr linux-2.6.36/include/linux/mm_types.h linux-2.6.36/include/linux/mm_types.h
+--- linux-2.6.36/include/linux/mm_types.h 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/include/linux/mm_types.h 2010-10-26 12:01:40.651752329 +0400
+@@ -100,6 +100,18 @@ struct page {
+ */
+ void *shadow;
+ #endif
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ /*
++ * Used to implement support for notification on zero-copy TCP transfer
++ * completion. It might look as not good to have this field here and
++ * it's better to have it in struct sk_buff, but it would make the code
++ * much more complicated and fragile, since all skb then would have to
++ * contain only pages with the same value in this field.
++ */
++ void *net_priv;
++#endif
++
+ };
+
+ /*
+diff -upkr linux-2.6.36/include/linux/net.h linux-2.6.36/include/linux/net.h
+--- linux-2.6.36/include/linux/net.h 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/include/linux/net.h 2010-10-26 12:01:40.651752329 +0400
+@@ -20,6 +20,7 @@
+
+ #include <linux/socket.h>
+ #include <asm/socket.h>
++#include <linux/mm.h>
+
+ #define NPROTO AF_MAX
+
+@@ -291,5 +292,44 @@ extern int kernel_sock_shutdown(struct s
+ extern struct ratelimit_state net_ratelimit_state;
+ #endif
+
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++/* Support for notification on zero-copy TCP transfer completion */
++typedef void (*net_get_page_callback_t)(struct page *page);
++typedef void (*net_put_page_callback_t)(struct page *page);
++
++extern net_get_page_callback_t net_get_page_callback;
++extern net_put_page_callback_t net_put_page_callback;
++
++extern int net_set_get_put_page_callbacks(
++ net_get_page_callback_t get_callback,
++ net_put_page_callback_t put_callback);
++
++/*
++ * See comment for net_set_get_put_page_callbacks() why those functions
++ * don't need any protection.
++ */
++static inline void net_get_page(struct page *page)
++{
++ if (page->net_priv != 0)
++ net_get_page_callback(page);
++ get_page(page);
++}
++static inline void net_put_page(struct page *page)
++{
++ if (page->net_priv != 0)
++ net_put_page_callback(page);
++ put_page(page);
++}
++#else
++static inline void net_get_page(struct page *page)
++{
++ get_page(page);
++}
++static inline void net_put_page(struct page *page)
++{
++ put_page(page);
++}
++#endif /* CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION */
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_NET_H */
+diff -upkr linux-2.6.36/net/core/dev.c linux-2.6.36/net/core/dev.c
+--- linux-2.6.36/net/core/dev.c 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/net/core/dev.c 2010-10-26 12:01:40.651752329 +0400
+@@ -3140,7 +3140,7 @@ pull:
+ skb_shinfo(skb)->frags[0].size -= grow;
+
+ if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
+- put_page(skb_shinfo(skb)->frags[0].page);
++ net_put_page(skb_shinfo(skb)->frags[0].page);
+ memmove(skb_shinfo(skb)->frags,
+ skb_shinfo(skb)->frags + 1,
+ --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
+diff -upkr linux-2.6.36/net/core/skbuff.c linux-2.6.36/net/core/skbuff.c
+--- linux-2.6.36/net/core/skbuff.c 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/net/core/skbuff.c 2010-10-26 12:01:40.655752708 +0400
+@@ -76,13 +76,13 @@ static struct kmem_cache *skbuff_fclone_
+ static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+ {
+- put_page(buf->page);
++ net_put_page(buf->page);
+ }
+
+ static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+ {
+- get_page(buf->page);
++ net_get_page(buf->page);
+ }
+
+ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
+@@ -337,7 +337,7 @@ static void skb_release_data(struct sk_b
+ if (skb_shinfo(skb)->nr_frags) {
+ int i;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+- put_page(skb_shinfo(skb)->frags[i].page);
++ net_put_page(skb_shinfo(skb)->frags[i].page);
+ }
+
+ if (skb_has_frags(skb))
+@@ -754,7 +754,7 @@ struct sk_buff *pskb_copy(struct sk_buff
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
+- get_page(skb_shinfo(n)->frags[i].page);
++ net_get_page(skb_shinfo(n)->frags[i].page);
+ }
+ skb_shinfo(n)->nr_frags = i;
+ }
+@@ -820,7 +820,7 @@ int pskb_expand_head(struct sk_buff *skb
+ offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+- get_page(skb_shinfo(skb)->frags[i].page);
++ net_get_page(skb_shinfo(skb)->frags[i].page);
+
+ if (skb_has_frags(skb))
+ skb_clone_fraglist(skb);
+@@ -1097,7 +1097,7 @@ drop_pages:
+ skb_shinfo(skb)->nr_frags = i;
+
+ for (; i < nfrags; i++)
+- put_page(skb_shinfo(skb)->frags[i].page);
++ net_put_page(skb_shinfo(skb)->frags[i].page);
+
+ if (skb_has_frags(skb))
+ skb_drop_fraglist(skb);
+@@ -1266,7 +1266,7 @@ pull_pages:
+ k = 0;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ if (skb_shinfo(skb)->frags[i].size <= eat) {
+- put_page(skb_shinfo(skb)->frags[i].page);
++ net_put_page(skb_shinfo(skb)->frags[i].page);
+ eat -= skb_shinfo(skb)->frags[i].size;
+ } else {
+ skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+@@ -1367,7 +1367,7 @@ EXPORT_SYMBOL(skb_copy_bits);
+ */
+ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
+ {
+- put_page(spd->pages[i]);
++ net_put_page(spd->pages[i]);
+ }
+
+ static inline struct page *linear_to_page(struct page *page, unsigned int *len,
+@@ -1391,7 +1391,7 @@ new_page:
+ off = sk->sk_sndmsg_off;
+ mlen = PAGE_SIZE - off;
+ if (mlen < 64 && mlen < *len) {
+- put_page(p);
++ net_put_page(p);
+ goto new_page;
+ }
+
+@@ -1401,7 +1401,7 @@ new_page:
+ memcpy(page_address(p) + off, page_address(page) + *offset, *len);
+ sk->sk_sndmsg_off += *len;
+ *offset = off;
+- get_page(p);
++ net_get_page(p);
+
+ return p;
+ }
+@@ -1423,7 +1423,7 @@ static inline int spd_fill_page(struct s
+ if (!page)
+ return 1;
+ } else
+- get_page(page);
++ net_get_page(page);
+
+ spd->pages[spd->nr_pages] = page;
+ spd->partial[spd->nr_pages].len = *len;
+@@ -2056,7 +2056,7 @@ static inline void skb_split_no_header(s
+ * where splitting is expensive.
+ * 2. Split is accurately. We make this.
+ */
+- get_page(skb_shinfo(skb)->frags[i].page);
++ net_get_page(skb_shinfo(skb)->frags[i].page);
+ skb_shinfo(skb1)->frags[0].page_offset += len - pos;
+ skb_shinfo(skb1)->frags[0].size -= len - pos;
+ skb_shinfo(skb)->frags[i].size = len - pos;
+@@ -2178,7 +2178,7 @@ int skb_shift(struct sk_buff *tgt, struc
+ to++;
+
+ } else {
+- get_page(fragfrom->page);
++ net_get_page(fragfrom->page);
+ fragto->page = fragfrom->page;
+ fragto->page_offset = fragfrom->page_offset;
+ fragto->size = todo;
+@@ -2200,7 +2200,7 @@ int skb_shift(struct sk_buff *tgt, struc
+ fragto = &skb_shinfo(tgt)->frags[merge];
+
+ fragto->size += fragfrom->size;
+- put_page(fragfrom->page);
++ net_put_page(fragfrom->page);
+ }
+
+ /* Reposition in the original skb */
+@@ -2601,7 +2601,7 @@ struct sk_buff *skb_segment(struct sk_bu
+
+ while (pos < offset + len && i < nfrags) {
+ *frag = skb_shinfo(skb)->frags[i];
+- get_page(frag->page);
++ net_get_page(frag->page);
+ size = frag->size;
+
+ if (pos < offset) {
+diff -upkr linux-2.6.36/net/ipv4/ip_output.c linux-2.6.36/net/ipv4/ip_output.c
+--- linux-2.6.36/net/ipv4/ip_output.c 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/net/ipv4/ip_output.c 2010-10-26 12:01:40.655752708 +0400
+@@ -1040,7 +1040,7 @@ alloc_new_skb:
+ err = -EMSGSIZE;
+ goto error;
+ }
+- get_page(page);
++ net_get_page(page);
+ skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+ frag = &skb_shinfo(skb)->frags[i];
+ }
+@@ -1199,7 +1199,7 @@ ssize_t ip_append_page(struct sock *sk,
+ if (skb_can_coalesce(skb, i, page, offset)) {
+ skb_shinfo(skb)->frags[i-1].size += len;
+ } else if (i < MAX_SKB_FRAGS) {
+- get_page(page);
++ net_get_page(page);
+ skb_fill_page_desc(skb, i, page, offset, len);
+ } else {
+ err = -EMSGSIZE;
+diff -upkr linux-2.6.36/net/ipv4/Makefile linux-2.6.36/net/ipv4/Makefile
+--- linux-2.6.36/net/ipv4/Makefile 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/net/ipv4/Makefile 2010-10-26 12:01:40.655752708 +0400
+@@ -49,6 +49,7 @@ obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
+ obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
+ obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
+ obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
++obj-$(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION) += tcp_zero_copy.o
+
+ obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
+ xfrm4_output.o
+diff -upkr linux-2.6.36/net/ipv4/tcp.c linux-2.6.36/net/ipv4/tcp.c
+--- linux-2.6.36/net/ipv4/tcp.c 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/net/ipv4/tcp.c 2010-10-26 12:01:40.659752056 +0400
+@@ -806,7 +806,7 @@ new_segment:
+ if (can_coalesce) {
+ skb_shinfo(skb)->frags[i - 1].size += copy;
+ } else {
+- get_page(page);
++ net_get_page(page);
+ skb_fill_page_desc(skb, i, page, offset, copy);
+ }
+
+@@ -1015,7 +1015,7 @@ new_segment:
+ goto new_segment;
+ } else if (page) {
+ if (off == PAGE_SIZE) {
+- put_page(page);
++ net_put_page(page);
+ TCP_PAGE(sk) = page = NULL;
+ off = 0;
+ }
+@@ -1056,9 +1056,9 @@ new_segment:
+ } else {
+ skb_fill_page_desc(skb, i, page, off, copy);
+ if (TCP_PAGE(sk)) {
+- get_page(page);
++ net_get_page(page);
+ } else if (off + copy < PAGE_SIZE) {
+- get_page(page);
++ net_get_page(page);
+ TCP_PAGE(sk) = page;
+ }
+ }
+diff -upkr linux-2.6.36/net/ipv4/tcp_output.c linux-2.6.36/net/ipv4/tcp_output.c
+--- linux-2.6.36/net/ipv4/tcp_output.c 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/net/ipv4/tcp_output.c 2010-10-26 12:01:40.659752056 +0400
+@@ -1086,7 +1086,7 @@ static void __pskb_trim_head(struct sk_b
+ k = 0;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ if (skb_shinfo(skb)->frags[i].size <= eat) {
+- put_page(skb_shinfo(skb)->frags[i].page);
++ net_put_page(skb_shinfo(skb)->frags[i].page);
+ eat -= skb_shinfo(skb)->frags[i].size;
+ } else {
+ skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+diff -upkr linux-2.6.36/net/ipv4/tcp_zero_copy.c linux-2.6.36/net/ipv4/tcp_zero_copy.c
+--- linux-2.6.36/net/ipv4/tcp_zero_copy.c 2010-10-26 12:02:24.519252006 +0400
++++ linux-2.6.36/net/ipv4/tcp_zero_copy.c 2010-10-26 12:01:40.659752056 +0400
+@@ -0,0 +1,49 @@
++/*
++ * Support routines for TCP zero copy transmit
++ *
++ * Created by Vladislav Bolkhovitin
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ */
++
++#include <linux/skbuff.h>
++
++net_get_page_callback_t net_get_page_callback __read_mostly;
++EXPORT_SYMBOL(net_get_page_callback);
++
++net_put_page_callback_t net_put_page_callback __read_mostly;
++EXPORT_SYMBOL(net_put_page_callback);
++
++/*
++ * Caller of this function must ensure that at the moment when it's called
++ * there are no pages in the system with net_priv field set to non-zero
++ * value. Hence, this function, as well as net_get_page() and net_put_page(),
++ * don't need any protection.
++ */
++int net_set_get_put_page_callbacks(
++ net_get_page_callback_t get_callback,
++ net_put_page_callback_t put_callback)
++{
++ int res = 0;
++
++ if ((net_get_page_callback != NULL) && (get_callback != NULL) &&
++ (net_get_page_callback != get_callback)) {
++ res = -EBUSY;
++ goto out;
++ }
++
++ if ((net_put_page_callback != NULL) && (put_callback != NULL) &&
++ (net_put_page_callback != put_callback)) {
++ res = -EBUSY;
++ goto out;
++ }
++
++ net_get_page_callback = get_callback;
++ net_put_page_callback = put_callback;
++
++out:
++ return res;
++}
++EXPORT_SYMBOL(net_set_get_put_page_callbacks);
+diff -upkr linux-2.6.36/net/ipv6/ip6_output.c linux-2.6.36/net/ipv6/ip6_output.c
+--- linux-2.6.36/net/ipv6/ip6_output.c 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/net/ipv6/ip6_output.c 2010-10-26 12:01:40.659752056 +0400
+@@ -1391,7 +1391,7 @@ alloc_new_skb:
+ err = -EMSGSIZE;
+ goto error;
+ }
+- get_page(page);
++ net_get_page(page);
+ skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+ frag = &skb_shinfo(skb)->frags[i];
+ }
+diff -upkr linux-2.6.36/net/Kconfig linux-2.6.36/net/Kconfig
+--- linux-2.6.36/net/Kconfig 2010-10-21 00:30:22.000000000 +0400
++++ linux-2.6.36/net/Kconfig 2010-10-26 12:01:40.659752056 +0400
+@@ -72,6 +72,18 @@ config INET
+
+ Short answer: say Y.
+
++config TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION
++ bool "TCP/IP zero-copy transfer completion notification"
++ depends on INET
++ default SCST_ISCSI
++ ---help---
++ Adds support for sending a notification upon completion of a
++ zero-copy TCP/IP transfer. This can speed up certain TCP/IP
++ software. Currently this is only used by the iSCSI target driver
++ iSCSI-SCST.
++
++ If unsure, say N.
++
+ if INET
+ source "net/ipv4/Kconfig"
+ source "net/ipv6/Kconfig"
+diff -uprN orig/linux-2.6.36/include/scst/scst_const.h linux-2.6.36/include/scst/scst_const.h
+--- orig/linux-2.6.36/include/scst/scst_const.h
++++ linux-2.6.36/include/scst/scst_const.h
+@@ -0,0 +1,413 @@
++/*
++ * include/scst_const.h
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * Contains common SCST constants.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __SCST_CONST_H
++#define __SCST_CONST_H
++
++#ifndef GENERATING_UPSTREAM_PATCH
++/*
++ * Include <linux/version.h> only when not converting this header file into
++ * a patch for upstream review because only then the symbol LINUX_VERSION_CODE
++ * is needed.
++ */
++#include <linux/version.h>
++#endif
++#include <scsi/scsi.h>
++
++#define SCST_CONST_VERSION "$Revision: 2605 $"
++
++/*** Shared constants between user and kernel spaces ***/
++
++/* Max size of CDB */
++#define SCST_MAX_CDB_SIZE 16
++
++/* Max size of various names */
++#define SCST_MAX_NAME 50
++
++/* Max size of external names, like initiator name */
++#define SCST_MAX_EXTERNAL_NAME 256
++
++/*
++ * Size of sense sufficient to carry standard sense data.
++ * Warning! It's allocated on stack!
++ */
++#define SCST_STANDARD_SENSE_LEN 18
++
++/* Max size of sense */
++#define SCST_SENSE_BUFFERSIZE 96
++
++/*************************************************************
++ ** Allowed delivery statuses for cmd's delivery_status
++ *************************************************************/
++
++#define SCST_CMD_DELIVERY_SUCCESS 0
++#define SCST_CMD_DELIVERY_FAILED -1
++#define SCST_CMD_DELIVERY_ABORTED -2
++
++/*************************************************************
++ ** Values for task management functions
++ *************************************************************/
++#define SCST_ABORT_TASK 0
++#define SCST_ABORT_TASK_SET 1
++#define SCST_CLEAR_ACA 2
++#define SCST_CLEAR_TASK_SET 3
++#define SCST_LUN_RESET 4
++#define SCST_TARGET_RESET 5
++
++/** SCST extensions **/
++
++/*
++ * Notifies about I_T nexus loss event in the corresponding session.
++ * Aborts all tasks there, resets the reservation, if any, and sets
++ * up the I_T Nexus loss UA.
++ */
++#define SCST_NEXUS_LOSS_SESS 6
++
++/* Aborts all tasks in the corresponding session */
++#define SCST_ABORT_ALL_TASKS_SESS 7
++
++/*
++ * Notifies about I_T nexus loss event. Aborts all tasks in all sessions
++ * of the tgt, resets the reservations, if any, and sets up the I_T Nexus
++ * loss UA.
++ */
++#define SCST_NEXUS_LOSS 8
++
++/* Aborts all tasks in all sessions of the tgt */
++#define SCST_ABORT_ALL_TASKS 9
++
++/*
++ * Internal TM command issued by SCST in scst_unregister_session(). It is the
++ * same as SCST_NEXUS_LOSS_SESS, except:
++ * - it doesn't call task_mgmt_affected_cmds_done()
++ * - it doesn't call task_mgmt_fn_done()
++ * - it doesn't queue NEXUS LOSS UA.
++ *
++ * Target drivers must NEVER use it!!
++ */
++#define SCST_UNREG_SESS_TM 10
++
++/*
++ * Internal TM command issued by SCST in scst_pr_abort_reg(). It aborts all
++ * tasks from mcmd->origin_pr_cmd->tgt_dev, except mcmd->origin_pr_cmd.
++ * Additionally:
++ * - it signals pr_aborting_cmpl completion when all affected
++ * commands marked as aborted.
++ * - it doesn't call task_mgmt_affected_cmds_done()
++ * - it doesn't call task_mgmt_fn_done()
++ * - it calls mcmd->origin_pr_cmd->scst_cmd_done() when all affected
++ * commands aborted.
++ *
++ * Target drivers must NEVER use it!!
++ */
++#define SCST_PR_ABORT_ALL 11
++
++/*************************************************************
++ ** Values for mgmt cmd's status field. Codes taken from iSCSI
++ *************************************************************/
++#define SCST_MGMT_STATUS_SUCCESS 0
++#define SCST_MGMT_STATUS_TASK_NOT_EXIST -1
++#define SCST_MGMT_STATUS_LUN_NOT_EXIST -2
++#define SCST_MGMT_STATUS_FN_NOT_SUPPORTED -5
++#define SCST_MGMT_STATUS_REJECTED -255
++#define SCST_MGMT_STATUS_FAILED -129
++
++/*************************************************************
++ ** SCSI task attribute queue types
++ *************************************************************/
++enum scst_cmd_queue_type {
++ SCST_CMD_QUEUE_UNTAGGED = 0,
++ SCST_CMD_QUEUE_SIMPLE,
++ SCST_CMD_QUEUE_ORDERED,
++ SCST_CMD_QUEUE_HEAD_OF_QUEUE,
++ SCST_CMD_QUEUE_ACA
++};
++
++/*************************************************************
++ ** CDB flags
++ **
++ ** Implicit ordered used for commands which need calm environment
++ ** without any simultaneous activities. For instance, for MODE
++ ** SELECT it is needed to correctly generate its UA.
++ *************************************************************/
++enum scst_cdb_flags {
++ SCST_TRANSFER_LEN_TYPE_FIXED = 0x0001,
++ SCST_SMALL_TIMEOUT = 0x0002,
++ SCST_LONG_TIMEOUT = 0x0004,
++ SCST_UNKNOWN_LENGTH = 0x0008,
++ SCST_INFO_VALID = 0x0010, /* must be single bit */
++ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED = 0x0020,
++ SCST_IMPLICIT_HQ = 0x0040,
++ SCST_IMPLICIT_ORDERED = 0x0080, /* ToDo: remove it's nonsense */
++ SCST_SKIP_UA = 0x0100,
++ SCST_WRITE_MEDIUM = 0x0200,
++ SCST_LOCAL_CMD = 0x0400,
++ SCST_FULLY_LOCAL_CMD = 0x0800,
++ SCST_REG_RESERVE_ALLOWED = 0x1000,
++ SCST_WRITE_EXCL_ALLOWED = 0x2000,
++ SCST_EXCL_ACCESS_ALLOWED = 0x4000,
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED = 0x8000,
++#endif
++};
++
++/*************************************************************
++ ** Data direction aliases. Changing it don't forget to change
++ ** scst_to_tgt_dma_dir as well!!
++ *************************************************************/
++#define SCST_DATA_UNKNOWN 0
++#define SCST_DATA_WRITE 1
++#define SCST_DATA_READ 2
++#define SCST_DATA_BIDI (SCST_DATA_WRITE | SCST_DATA_READ)
++#define SCST_DATA_NONE 4
++
++/*************************************************************
++ ** Default suffix for targets with NULL names
++ *************************************************************/
++#define SCST_DEFAULT_TGT_NAME_SUFFIX "_target_"
++
++/*************************************************************
++ ** Sense manipulation and examination
++ *************************************************************/
++#define SCST_LOAD_SENSE(key_asc_ascq) key_asc_ascq
++
++#define SCST_SENSE_VALID(sense) ((sense != NULL) && \
++ ((((const uint8_t *)(sense))[0] & 0x70) == 0x70))
++
++#define SCST_NO_SENSE(sense) ((sense != NULL) && \
++ (((const uint8_t *)(sense))[2] == 0))
++
++/*************************************************************
++ ** Sense data for the appropriate errors. Can be used with
++ ** scst_set_cmd_error()
++ *************************************************************/
++#define scst_sense_no_sense NO_SENSE, 0x00, 0
++#define scst_sense_hardw_error HARDWARE_ERROR, 0x44, 0
++#define scst_sense_aborted_command ABORTED_COMMAND, 0x00, 0
++#define scst_sense_invalid_opcode ILLEGAL_REQUEST, 0x20, 0
++#define scst_sense_invalid_field_in_cdb ILLEGAL_REQUEST, 0x24, 0
++#define scst_sense_invalid_field_in_parm_list ILLEGAL_REQUEST, 0x26, 0
++#define scst_sense_parameter_value_invalid ILLEGAL_REQUEST, 0x26, 2
++#define scst_sense_invalid_release ILLEGAL_REQUEST, 0x26, 4
++#define scst_sense_parameter_list_length_invalid \
++ ILLEGAL_REQUEST, 0x1A, 0
++#define scst_sense_reset_UA UNIT_ATTENTION, 0x29, 0
++#define scst_sense_nexus_loss_UA UNIT_ATTENTION, 0x29, 0x7
++#define scst_sense_saving_params_unsup ILLEGAL_REQUEST, 0x39, 0
++#define scst_sense_lun_not_supported ILLEGAL_REQUEST, 0x25, 0
++#define scst_sense_data_protect DATA_PROTECT, 0x00, 0
++#define scst_sense_miscompare_error MISCOMPARE, 0x1D, 0
++#define scst_sense_block_out_range_error ILLEGAL_REQUEST, 0x21, 0
++#define scst_sense_medium_changed_UA UNIT_ATTENTION, 0x28, 0
++#define scst_sense_read_error MEDIUM_ERROR, 0x11, 0
++#define scst_sense_write_error MEDIUM_ERROR, 0x03, 0
++#define scst_sense_not_ready NOT_READY, 0x04, 0x10
++#define scst_sense_invalid_message ILLEGAL_REQUEST, 0x49, 0
++#define scst_sense_cleared_by_another_ini_UA UNIT_ATTENTION, 0x2F, 0
++#define scst_sense_capacity_data_changed UNIT_ATTENTION, 0x2A, 0x9
++#define scst_sense_reservation_preempted UNIT_ATTENTION, 0x2A, 0x03
++#define scst_sense_reservation_released UNIT_ATTENTION, 0x2A, 0x04
++#define scst_sense_registrations_preempted UNIT_ATTENTION, 0x2A, 0x05
++#define scst_sense_reported_luns_data_changed UNIT_ATTENTION, 0x3F, 0xE
++#define scst_sense_inquery_data_changed UNIT_ATTENTION, 0x3F, 0x3
++
++/*************************************************************
++ * SCSI opcodes not listed anywhere else
++ *************************************************************/
++#define REPORT_DEVICE_IDENTIFIER 0xA3
++#define INIT_ELEMENT_STATUS 0x07
++#define INIT_ELEMENT_STATUS_RANGE 0x37
++#define PREVENT_ALLOW_MEDIUM 0x1E
++#define READ_ATTRIBUTE 0x8C
++#define REQUEST_VOLUME_ADDRESS 0xB5
++#define WRITE_ATTRIBUTE 0x8D
++#define WRITE_VERIFY_16 0x8E
++#define VERIFY_6 0x13
++#ifndef VERIFY_12
++#define VERIFY_12 0xAF
++#endif
++#ifndef GENERATING_UPSTREAM_PATCH
++/*
++ * The constants below have been defined in the kernel header <scsi/scsi.h>
++ * and hence are not needed when this header file is included in kernel code.
++ * The definitions below are only used when this header file is included during
++ * compilation of SCST's user space components.
++ */
++#ifndef READ_16
++#define READ_16 0x88
++#endif
++#ifndef WRITE_16
++#define WRITE_16 0x8a
++#endif
++#ifndef VERIFY_16
++#define VERIFY_16 0x8f
++#endif
++#ifndef SERVICE_ACTION_IN
++#define SERVICE_ACTION_IN 0x9e
++#endif
++#ifndef SAI_READ_CAPACITY_16
++/* values for service action in */
++#define SAI_READ_CAPACITY_16 0x10
++#endif
++#endif
++#ifndef GENERATING_UPSTREAM_PATCH
++#ifndef REPORT_LUNS
++#define REPORT_LUNS 0xa0
++#endif
++#endif
++
++/*************************************************************
++ ** SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
++ ** T10/1561-D Revision 4 Draft dated 7th November 2002.
++ *************************************************************/
++#define SAM_STAT_GOOD 0x00
++#define SAM_STAT_CHECK_CONDITION 0x02
++#define SAM_STAT_CONDITION_MET 0x04
++#define SAM_STAT_BUSY 0x08
++#define SAM_STAT_INTERMEDIATE 0x10
++#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
++#define SAM_STAT_RESERVATION_CONFLICT 0x18
++#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
++#define SAM_STAT_TASK_SET_FULL 0x28
++#define SAM_STAT_ACA_ACTIVE 0x30
++#define SAM_STAT_TASK_ABORTED 0x40
++
++/*************************************************************
++ ** Control byte field in CDB
++ *************************************************************/
++#define CONTROL_BYTE_LINK_BIT 0x01
++#define CONTROL_BYTE_NACA_BIT 0x04
++
++/*************************************************************
++ ** Byte 1 in INQUIRY CDB
++ *************************************************************/
++#define SCST_INQ_EVPD 0x01
++
++/*************************************************************
++ ** Byte 3 in Standard INQUIRY data
++ *************************************************************/
++#define SCST_INQ_BYTE3 3
++
++#define SCST_INQ_NORMACA_BIT 0x20
++
++/*************************************************************
++ ** Byte 2 in RESERVE_10 CDB
++ *************************************************************/
++#define SCST_RES_3RDPTY 0x10
++#define SCST_RES_LONGID 0x02
++
++/*************************************************************
++ ** Values for the control mode page TST field
++ *************************************************************/
++#define SCST_CONTR_MODE_ONE_TASK_SET 0
++#define SCST_CONTR_MODE_SEP_TASK_SETS 1
++
++/*******************************************************************
++ ** Values for the control mode page QUEUE ALGORITHM MODIFIER field
++ *******************************************************************/
++#define SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER 0
++#define SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER 1
++
++/*************************************************************
++ ** Values for the control mode page D_SENSE field
++ *************************************************************/
++#define SCST_CONTR_MODE_FIXED_SENSE 0
++#define SCST_CONTR_MODE_DESCR_SENSE 1
++
++/*************************************************************
++ ** TransportID protocol identifiers
++ *************************************************************/
++
++#define SCSI_TRANSPORTID_PROTOCOLID_FCP2 0
++#define SCSI_TRANSPORTID_PROTOCOLID_SPI5 1
++#define SCSI_TRANSPORTID_PROTOCOLID_SRP 4
++#define SCSI_TRANSPORTID_PROTOCOLID_ISCSI 5
++#define SCSI_TRANSPORTID_PROTOCOLID_SAS 6
++
++/*************************************************************
++ ** Misc SCSI constants
++ *************************************************************/
++#define SCST_SENSE_ASC_UA_RESET 0x29
++#define BYTCHK 0x02
++#define POSITION_LEN_SHORT 20
++#define POSITION_LEN_LONG 32
++
++/*************************************************************
++ ** Various timeouts
++ *************************************************************/
++#define SCST_DEFAULT_TIMEOUT (60 * HZ)
++
++#define SCST_GENERIC_CHANGER_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_CHANGER_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_GENERIC_PROCESSOR_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_PROCESSOR_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_GENERIC_TAPE_SMALL_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_TAPE_REG_TIMEOUT (900 * HZ)
++#define SCST_GENERIC_TAPE_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_GENERIC_MODISK_SMALL_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_MODISK_REG_TIMEOUT (900 * HZ)
++#define SCST_GENERIC_MODISK_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_GENERIC_DISK_SMALL_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_DISK_REG_TIMEOUT (60 * HZ)
++#define SCST_GENERIC_DISK_LONG_TIMEOUT (3600 * HZ)
++
++#define SCST_GENERIC_RAID_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_RAID_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_GENERIC_CDROM_SMALL_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_CDROM_REG_TIMEOUT (900 * HZ)
++#define SCST_GENERIC_CDROM_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_MAX_OTHER_TIMEOUT (14000 * HZ)
++
++/*************************************************************
++ ** I/O grouping attribute string values. Must match constants
++ ** w/o '_STR' suffix!
++ *************************************************************/
++#define SCST_IO_GROUPING_AUTO_STR "auto"
++#define SCST_IO_GROUPING_THIS_GROUP_ONLY_STR "this_group_only"
++#define SCST_IO_GROUPING_NEVER_STR "never"
++
++/*************************************************************
++ ** Threads pool type attribute string values.
++ ** Must match scst_dev_type_threads_pool_type!
++ *************************************************************/
++#define SCST_THREADS_POOL_PER_INITIATOR_STR "per_initiator"
++#define SCST_THREADS_POOL_SHARED_STR "shared"
++
++/*************************************************************
++ ** Misc constants
++ *************************************************************/
++#define SCST_SYSFS_BLOCK_SIZE PAGE_SIZE
++
++#define SCST_PR_DIR "/var/lib/scst/pr"
++
++#define TID_COMMON_SIZE 24
++
++#define SCST_SYSFS_KEY_MARK "[key]"
++
++#define SCST_MIN_REL_TGT_ID 1
++#define SCST_MAX_REL_TGT_ID 65535
++
++#endif /* __SCST_CONST_H */
+diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.h
+--- orig/linux-2.6.36/include/scst/scst.h
++++ linux-2.6.36/include/scst/scst.h
+@@ -0,0 +1,3524 @@
++/*
++ * include/scst.h
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * Main SCSI target mid-level include file.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __SCST_H
++#define __SCST_H
++
++#include <linux/types.h>
++#include <linux/version.h>
++#include <linux/blkdev.h>
++#include <linux/interrupt.h>
++#include <linux/wait.h>
++
++/* #define CONFIG_SCST_PROC */
++
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_eh.h>
++#include <scsi/scsi.h>
++
++#include <scst/scst_const.h>
++
++#include <scst/scst_sgv.h>
++
++/*
++ * Version numbers, the same as for the kernel.
++ *
++ * Changing it don't forget to change SCST_FIO_REV in scst_vdisk.c
++ * and FIO_REV in usr/fileio/common.h as well.
++ */
++#define SCST_VERSION(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + d)
++#define SCST_VERSION_CODE SCST_VERSION(2, 0, 0, 1)
++#define SCST_VERSION_STRING_SUFFIX
++#define SCST_VERSION_STRING "2.0.0.1" SCST_VERSION_STRING_SUFFIX
++#define SCST_INTERFACE_VERSION \
++ SCST_VERSION_STRING "$Revision: 3165 $" SCST_CONST_VERSION
++
++#define SCST_LOCAL_NAME "scst_local"
++
++/*************************************************************
++ ** States of command processing state machine. At first,
++ ** "active" states, then - "passive" ones. This is to have
++ ** more efficient generated code of the corresponding
++ ** "switch" statements.
++ *************************************************************/
++
++/* Dev handler's parse() is going to be called */
++#define SCST_CMD_STATE_PARSE 0
++
++/* Allocation of the cmd's data buffer */
++#define SCST_CMD_STATE_PREPARE_SPACE 1
++
++/* Calling preprocessing_done() */
++#define SCST_CMD_STATE_PREPROCESSING_DONE 2
++
++/* Target driver's rdy_to_xfer() is going to be called */
++#define SCST_CMD_STATE_RDY_TO_XFER 3
++
++/* Target driver's pre_exec() is going to be called */
++#define SCST_CMD_STATE_TGT_PRE_EXEC 4
++
++/* Cmd is going to be sent for execution */
++#define SCST_CMD_STATE_SEND_FOR_EXEC 5
++
++/* Cmd is being checked if it should be executed locally */
++#define SCST_CMD_STATE_LOCAL_EXEC 6
++
++/* Cmd is ready for execution */
++#define SCST_CMD_STATE_REAL_EXEC 7
++
++/* Internal post-exec checks */
++#define SCST_CMD_STATE_PRE_DEV_DONE 8
++
++/* Internal MODE SELECT pages related checks */
++#define SCST_CMD_STATE_MODE_SELECT_CHECKS 9
++
++/* Dev handler's dev_done() is going to be called */
++#define SCST_CMD_STATE_DEV_DONE 10
++
++/* Target driver's xmit_response() is going to be called */
++#define SCST_CMD_STATE_PRE_XMIT_RESP 11
++
++/* Target driver's xmit_response() is going to be called */
++#define SCST_CMD_STATE_XMIT_RESP 12
++
++/* Cmd finished */
++#define SCST_CMD_STATE_FINISHED 13
++
++/* Internal cmd finished */
++#define SCST_CMD_STATE_FINISHED_INTERNAL 14
++
++#define SCST_CMD_STATE_LAST_ACTIVE (SCST_CMD_STATE_FINISHED_INTERNAL+100)
++
++/* A cmd is created, but scst_cmd_init_done() not called */
++#define SCST_CMD_STATE_INIT_WAIT (SCST_CMD_STATE_LAST_ACTIVE+1)
++
++/* LUN translation (cmd->tgt_dev assignment) */
++#define SCST_CMD_STATE_INIT (SCST_CMD_STATE_LAST_ACTIVE+2)
++
++/* Waiting for scst_restart_cmd() */
++#define SCST_CMD_STATE_PREPROCESSING_DONE_CALLED (SCST_CMD_STATE_LAST_ACTIVE+3)
++
++/* Waiting for data from the initiator (until scst_rx_data() called) */
++#define SCST_CMD_STATE_DATA_WAIT (SCST_CMD_STATE_LAST_ACTIVE+4)
++
++/* Waiting for CDB's execution finish */
++#define SCST_CMD_STATE_REAL_EXECUTING (SCST_CMD_STATE_LAST_ACTIVE+5)
++
++/* Waiting for response's transmission finish */
++#define SCST_CMD_STATE_XMIT_WAIT (SCST_CMD_STATE_LAST_ACTIVE+6)
++
++/*************************************************************
++ * Can be retuned instead of cmd's state by dev handlers'
++ * functions, if the command's state should be set by default
++ *************************************************************/
++#define SCST_CMD_STATE_DEFAULT 500
++
++/*************************************************************
++ * Can be retuned instead of cmd's state by dev handlers'
++ * functions, if it is impossible to complete requested
++ * task in atomic context. The cmd will be restarted in thread
++ * context.
++ *************************************************************/
++#define SCST_CMD_STATE_NEED_THREAD_CTX 1000
++
++/*************************************************************
++ * Can be retuned instead of cmd's state by dev handlers'
++ * parse function, if the cmd processing should be stopped
++ * for now. The cmd will be restarted by dev handlers itself.
++ *************************************************************/
++#define SCST_CMD_STATE_STOP 1001
++
++/*************************************************************
++ ** States of mgmt command processing state machine
++ *************************************************************/
++
++/* LUN translation (mcmd->tgt_dev assignment) */
++#define SCST_MCMD_STATE_INIT 0
++
++/* Mgmt cmd is being processed */
++#define SCST_MCMD_STATE_EXEC 1
++
++/* Waiting for affected commands done */
++#define SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_DONE 2
++
++/* Post actions when affected commands done */
++#define SCST_MCMD_STATE_AFFECTED_CMDS_DONE 3
++
++/* Waiting for affected local commands finished */
++#define SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_FINISHED 4
++
++/* Target driver's task_mgmt_fn_done() is going to be called */
++#define SCST_MCMD_STATE_DONE 5
++
++/* The mcmd finished */
++#define SCST_MCMD_STATE_FINISHED 6
++
++/*************************************************************
++ ** Constants for "atomic" parameter of SCST's functions
++ *************************************************************/
++#define SCST_NON_ATOMIC 0
++#define SCST_ATOMIC 1
++
++/*************************************************************
++ ** Values for pref_context parameter of scst_cmd_init_done(),
++ ** scst_rx_data(), scst_restart_cmd(), scst_tgt_cmd_done()
++ ** and scst_cmd_done()
++ *************************************************************/
++
++enum scst_exec_context {
++ /*
++ * Direct cmd's processing (i.e. regular function calls in the current
++ * context) sleeping is not allowed
++ */
++ SCST_CONTEXT_DIRECT_ATOMIC,
++
++ /*
++ * Direct cmd's processing (i.e. regular function calls in the current
++ * context), sleeping is allowed, no restrictions
++ */
++ SCST_CONTEXT_DIRECT,
++
++ /* Tasklet or thread context required for cmd's processing */
++ SCST_CONTEXT_TASKLET,
++
++ /* Thread context required for cmd's processing */
++ SCST_CONTEXT_THREAD,
++
++ /*
++ * Context is the same as it was in previous call of the corresponding
++ * callback. For example, if dev handler's exec() does sync. data
++ * reading this value should be used for scst_cmd_done(). The same is
++ * true if scst_tgt_cmd_done() called directly from target driver's
++ * xmit_response(). Not allowed in scst_cmd_init_done() and
++ * scst_cmd_init_stage1_done().
++ */
++ SCST_CONTEXT_SAME
++};
++
++/*************************************************************
++ ** Values for status parameter of scst_rx_data()
++ *************************************************************/
++
++/* Success */
++#define SCST_RX_STATUS_SUCCESS 0
++
++/*
++ * Data receiving finished with error, so set the sense and
++ * finish the command, including xmit_response() call
++ */
++#define SCST_RX_STATUS_ERROR 1
++
++/*
++ * Data receiving finished with error and the sense is set,
++ * so finish the command, including xmit_response() call
++ */
++#define SCST_RX_STATUS_ERROR_SENSE_SET 2
++
++/*
++ * Data receiving finished with fatal error, so finish the command,
++ * but don't call xmit_response()
++ */
++#define SCST_RX_STATUS_ERROR_FATAL 3
++
++/*************************************************************
++ ** Values for status parameter of scst_restart_cmd()
++ *************************************************************/
++
++/* Success */
++#define SCST_PREPROCESS_STATUS_SUCCESS 0
++
++/*
++ * Command's processing finished with error, so set the sense and
++ * finish the command, including xmit_response() call
++ */
++#define SCST_PREPROCESS_STATUS_ERROR 1
++
++/*
++ * Command's processing finished with error and the sense is set,
++ * so finish the command, including xmit_response() call
++ */
++#define SCST_PREPROCESS_STATUS_ERROR_SENSE_SET 2
++
++/*
++ * Command's processing finished with fatal error, so finish the command,
++ * but don't call xmit_response()
++ */
++#define SCST_PREPROCESS_STATUS_ERROR_FATAL 3
++
++/*************************************************************
++ ** Values for AEN functions
++ *************************************************************/
++
++/*
++ * SCSI Asynchronous Event. Parameter contains SCSI sense
++ * (Unit Attention). AENs generated only for 2 the following UAs:
++ * CAPACITY DATA HAS CHANGED and REPORTED LUNS DATA HAS CHANGED.
++ * Other UAs reported regularly as CHECK CONDITION status,
++ * because it doesn't look safe to report them using AENs, since
++ * reporting using AENs opens delivery race windows even in case of
++ * untagged commands.
++ */
++#define SCST_AEN_SCSI 0
++
++/*************************************************************
++ ** Allowed return/status codes for report_aen() callback and
++ ** scst_set_aen_delivery_status() function
++ *************************************************************/
++
++/* Success */
++#define SCST_AEN_RES_SUCCESS 0
++
++/* Not supported */
++#define SCST_AEN_RES_NOT_SUPPORTED -1
++
++/* Failure */
++#define SCST_AEN_RES_FAILED -2
++
++/*************************************************************
++ ** Allowed return codes for xmit_response(), rdy_to_xfer()
++ *************************************************************/
++
++/* Success */
++#define SCST_TGT_RES_SUCCESS 0
++
++/* Internal device queue is full, retry again later */
++#define SCST_TGT_RES_QUEUE_FULL -1
++
++/*
++ * It is impossible to complete requested task in atomic context.
++ * The cmd will be restarted in thread context.
++ */
++#define SCST_TGT_RES_NEED_THREAD_CTX -2
++
++/*
++ * Fatal error, if returned by xmit_response() the cmd will
++ * be destroyed, if by any other function, xmit_response()
++ * will be called with HARDWARE ERROR sense data
++ */
++#define SCST_TGT_RES_FATAL_ERROR -3
++
++/*************************************************************
++ ** Allowed return codes for dev handler's exec()
++ *************************************************************/
++
++/* The cmd is done, go to other ones */
++#define SCST_EXEC_COMPLETED 0
++
++/* The cmd should be sent to SCSI mid-level */
++#define SCST_EXEC_NOT_COMPLETED 1
++
++/*
++ * Set if cmd is finished and there is status/sense to be sent.
++ * The status should be not sent (i.e. the flag not set) if the
++ * possibility to perform a command in "chunks" (i.e. with multiple
++ * xmit_response()/rdy_to_xfer()) is used (not implemented yet).
++ * Obsolete, use scst_cmd_get_is_send_status() instead.
++ */
++#define SCST_TSC_FLAG_STATUS 0x2
++
++/*************************************************************
++ ** Additional return code for dev handler's task_mgmt_fn()
++ *************************************************************/
++
++/* Regular standard actions for the command should be done */
++#define SCST_DEV_TM_NOT_COMPLETED 1
++
++/*************************************************************
++ ** Session initialization phases
++ *************************************************************/
++
++/* Set if session is being initialized */
++#define SCST_SESS_IPH_INITING 0
++
++/* Set if the session is successfully initialized */
++#define SCST_SESS_IPH_SUCCESS 1
++
++/* Set if the session initialization failed */
++#define SCST_SESS_IPH_FAILED 2
++
++/* Set if session is initialized and ready */
++#define SCST_SESS_IPH_READY 3
++
++/*************************************************************
++ ** Session shutdown phases
++ *************************************************************/
++
++/* Set if session is initialized and ready */
++#define SCST_SESS_SPH_READY 0
++
++/* Set if session is shutting down */
++#define SCST_SESS_SPH_SHUTDOWN 1
++
++/* Set if session is shutting down */
++#define SCST_SESS_SPH_UNREG_DONE_CALLING 2
++
++/*************************************************************
++ ** Session's async (atomic) flags
++ *************************************************************/
++
++/* Set if the sess's hw pending work is scheduled */
++#define SCST_SESS_HW_PENDING_WORK_SCHEDULED 0
++
++/*************************************************************
++ ** Cmd's async (atomic) flags
++ *************************************************************/
++
++/* Set if the cmd is aborted and ABORTED sense will be sent as the result */
++#define SCST_CMD_ABORTED 0
++
++/* Set if the cmd is aborted by other initiator */
++#define SCST_CMD_ABORTED_OTHER 1
++
++/* Set if no response should be sent to the target about this cmd */
++#define SCST_CMD_NO_RESP 2
++
++/* Set if the cmd is dead and can be destroyed at any time */
++#define SCST_CMD_CAN_BE_DESTROYED 3
++
++/*
++ * Set if the cmd's device has TAS flag set. Used only when aborted by
++ * other initiator.
++ */
++#define SCST_CMD_DEVICE_TAS 4
++
++/*************************************************************
++ ** Tgt_dev's async. flags (tgt_dev_flags)
++ *************************************************************/
++
++/* Set if tgt_dev has Unit Attention sense */
++#define SCST_TGT_DEV_UA_PENDING 0
++
++/* Set if tgt_dev is RESERVED by another session */
++#define SCST_TGT_DEV_RESERVED 1
++
++/* Set if the corresponding context is atomic */
++#define SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC 5
++#define SCST_TGT_DEV_AFTER_EXEC_ATOMIC 6
++
++#define SCST_TGT_DEV_CLUST_POOL 11
++
++/*************************************************************
++ ** I/O groupping types. Changing them don't forget to change
++ ** the corresponding *_STR values in scst_const.h!
++ *************************************************************/
++
++/*
++ * All initiators with the same name connected to this group will have
++ * shared IO context, for each name own context. All initiators with
++ * different names will have own IO context.
++ */
++#define SCST_IO_GROUPING_AUTO 0
++
++/* All initiators connected to this group will have shared IO context */
++#define SCST_IO_GROUPING_THIS_GROUP_ONLY -1
++
++/* Each initiator connected to this group will have own IO context */
++#define SCST_IO_GROUPING_NEVER -2
++
++/*************************************************************
++ ** Kernel cache creation helper
++ *************************************************************/
++#ifndef KMEM_CACHE
++#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
++ sizeof(struct __struct), __alignof__(struct __struct),\
++ (__flags), NULL, NULL)
++#endif
++
++/*************************************************************
++ ** Vlaid_mask constants for scst_analyze_sense()
++ *************************************************************/
++
++#define SCST_SENSE_KEY_VALID 1
++#define SCST_SENSE_ASC_VALID 2
++#define SCST_SENSE_ASCQ_VALID 4
++
++#define SCST_SENSE_ASCx_VALID (SCST_SENSE_ASC_VALID | \
++ SCST_SENSE_ASCQ_VALID)
++
++#define SCST_SENSE_ALL_VALID (SCST_SENSE_KEY_VALID | \
++ SCST_SENSE_ASC_VALID | \
++ SCST_SENSE_ASCQ_VALID)
++
++/*************************************************************
++ * TYPES
++ *************************************************************/
++
++struct scst_tgt;
++struct scst_session;
++struct scst_cmd;
++struct scst_mgmt_cmd;
++struct scst_device;
++struct scst_tgt_dev;
++struct scst_dev_type;
++struct scst_acg;
++struct scst_acg_dev;
++struct scst_acn;
++struct scst_aen;
++
++/*
++ * SCST uses 64-bit numbers to represent LUN's internally. The value
++ * NO_SUCH_LUN is guaranteed to be different of every valid LUN.
++ */
++#define NO_SUCH_LUN ((uint64_t)-1)
++
++typedef enum dma_data_direction scst_data_direction;
++
++/*
++ * SCST target template: defines target driver's parameters and callback
++ * functions.
++ *
++ * MUST HAVEs define functions that are expected to be defined in order to
++ * work. OPTIONAL says that there is a choice.
++ */
++struct scst_tgt_template {
++ /* public: */
++
++ /*
++ * SG tablesize allows to check whether scatter/gather can be used
++ * or not.
++ */
++ int sg_tablesize;
++
++ /*
++ * True, if this target adapter uses unchecked DMA onto an ISA bus.
++ */
++ unsigned unchecked_isa_dma:1;
++
++ /*
++ * True, if this target adapter can benefit from using SG-vector
++ * clustering (i.e. smaller number of segments).
++ */
++ unsigned use_clustering:1;
++
++ /*
++ * True, if this target adapter doesn't support SG-vector clustering
++ */
++ unsigned no_clustering:1;
++
++ /*
++ * True, if corresponding function supports execution in
++ * the atomic (non-sleeping) context
++ */
++ unsigned xmit_response_atomic:1;
++ unsigned rdy_to_xfer_atomic:1;
++
++ /* True, if this target doesn't need "enabled" attribute */
++ unsigned enabled_attr_not_needed:1;
++
++ /*
++ * The maximum time in seconds cmd can stay inside the target
++ * hardware, i.e. after rdy_to_xfer() and xmit_response(), before
++ * on_hw_pending_cmd_timeout() will be called, if defined.
++ *
++ * In the current implementation a cmd will be aborted in time t
++ * max_hw_pending_time <= t < 2*max_hw_pending_time.
++ */
++ int max_hw_pending_time;
++
++ /*
++ * This function is equivalent to the SCSI
++ * queuecommand. The target should transmit the response
++ * buffer and the status in the scst_cmd struct.
++ * The expectation is that this executing this command is NON-BLOCKING.
++ * If it is blocking, consider to set threads_num to some none 0 number.
++ *
++ * After the response is actually transmitted, the target
++ * should call the scst_tgt_cmd_done() function of the
++ * mid-level, which will allow it to free up the command.
++ * Returns one of the SCST_TGT_RES_* constants.
++ *
++ * Pay attention to "atomic" attribute of the cmd, which can be get
++ * by scst_cmd_atomic(): it is true if the function called in the
++ * atomic (non-sleeping) context.
++ *
++ * MUST HAVE
++ */
++ int (*xmit_response) (struct scst_cmd *cmd);
++
++ /*
++ * This function informs the driver that data
++ * buffer corresponding to the said command have now been
++ * allocated and it is OK to receive data for this command.
++ * This function is necessary because a SCSI target does not
++ * have any control over the commands it receives. Most lower
++ * level protocols have a corresponding function which informs
++ * the initiator that buffers have been allocated e.g., XFER_
++ * RDY in Fibre Channel. After the data is actually received
++ * the low-level driver needs to call scst_rx_data() in order to
++ * continue processing this command.
++ * Returns one of the SCST_TGT_RES_* constants.
++ *
++ * This command is expected to be NON-BLOCKING.
++ * If it is blocking, consider to set threads_num to some none 0 number.
++ *
++ * Pay attention to "atomic" attribute of the cmd, which can be get
++ * by scst_cmd_atomic(): it is true if the function called in the
++ * atomic (non-sleeping) context.
++ *
++ * OPTIONAL
++ */
++ int (*rdy_to_xfer) (struct scst_cmd *cmd);
++
++ /*
++ * Called if cmd stays inside the target hardware, i.e. after
++ * rdy_to_xfer() and xmit_response(), more than max_hw_pending_time
++ * time. The target driver supposed to cleanup this command and
++ * resume cmd's processing.
++ *
++ * OPTIONAL
++ */
++ void (*on_hw_pending_cmd_timeout) (struct scst_cmd *cmd);
++
++ /*
++ * Called to notify the driver that the command is about to be freed.
++ * Necessary, because for aborted commands xmit_response() could not
++ * be called. Could be called on IRQ context.
++ *
++ * OPTIONAL
++ */
++ void (*on_free_cmd) (struct scst_cmd *cmd);
++
++ /*
++ * This function allows target driver to handle data buffer
++ * allocations on its own.
++ *
++ * Target driver doesn't have to always allocate buffer in this
++ * function, but if it decide to do it, it must check that
++ * scst_cmd_get_data_buff_alloced() returns 0, otherwise to avoid
++ * double buffer allocation and memory leaks alloc_data_buf() shall
++ * fail.
++ *
++ * Shall return 0 in case of success or < 0 (preferrably -ENOMEM)
++ * in case of error, or > 0 if the regular SCST allocation should be
++ * done. In case of returning successfully,
++ * scst_cmd->tgt_data_buf_alloced will be set by SCST.
++ *
++ * It is possible that both target driver and dev handler request own
++ * memory allocation. In this case, data will be memcpy() between
++ * buffers, where necessary.
++ *
++ * If allocation in atomic context - cf. scst_cmd_atomic() - is not
++ * desired or fails and consequently < 0 is returned, this function
++ * will be re-called in thread context.
++ *
++ * Please note that the driver will have to handle itself all relevant
++ * details such as scatterlist setup, highmem, freeing the allocated
++ * memory, etc.
++ *
++ * OPTIONAL.
++ */
++ int (*alloc_data_buf) (struct scst_cmd *cmd);
++
++ /*
++ * This function informs the driver that data
++ * buffer corresponding to the said command have now been
++ * allocated and other preprocessing tasks have been done.
++ * A target driver could need to do some actions at this stage.
++ * After the target driver done the needed actions, it shall call
++ * scst_restart_cmd() in order to continue processing this command.
++ * In case of preliminary the command completion, this function will
++ * also be called before xmit_response().
++ *
++ * Called only if the cmd is queued using scst_cmd_init_stage1_done()
++ * instead of scst_cmd_init_done().
++ *
++ * Returns void, the result is expected to be returned using
++ * scst_restart_cmd().
++ *
++ * This command is expected to be NON-BLOCKING.
++ * If it is blocking, consider to set threads_num to some none 0 number.
++ *
++ * Pay attention to "atomic" attribute of the cmd, which can be get
++ * by scst_cmd_atomic(): it is true if the function called in the
++ * atomic (non-sleeping) context.
++ *
++ * OPTIONAL.
++ */
++ void (*preprocessing_done) (struct scst_cmd *cmd);
++
++ /*
++ * This function informs the driver that the said command is about
++ * to be executed.
++ *
++ * Returns one of the SCST_PREPROCESS_* constants.
++ *
++ * This command is expected to be NON-BLOCKING.
++ * If it is blocking, consider to set threads_num to some none 0 number.
++ *
++ * OPTIONAL
++ */
++ int (*pre_exec) (struct scst_cmd *cmd);
++
++ /*
++ * This function informs the driver that all affected by the
++ * corresponding task management function commands have beed completed.
++ * No return value expected.
++ *
++ * This function is expected to be NON-BLOCKING.
++ *
++ * Called without any locks held from a thread context.
++ *
++ * OPTIONAL
++ */
++ void (*task_mgmt_affected_cmds_done) (struct scst_mgmt_cmd *mgmt_cmd);
++
++ /*
++ * This function informs the driver that the corresponding task
++ * management function has been completed, i.e. all the corresponding
++ * commands completed and freed. No return value expected.
++ *
++ * This function is expected to be NON-BLOCKING.
++ *
++ * Called without any locks held from a thread context.
++ *
++ * MUST HAVE if the target supports task management.
++ */
++ void (*task_mgmt_fn_done) (struct scst_mgmt_cmd *mgmt_cmd);
++
++ /*
++ * This function should detect the target adapters that
++ * are present in the system. The function should return a value
++ * >= 0 to signify the number of detected target adapters.
++ * A negative value should be returned whenever there is
++ * an error.
++ *
++ * MUST HAVE
++ */
++ int (*detect) (struct scst_tgt_template *tgt_template);
++
++ /*
++ * This function should free up the resources allocated to the device.
++ * The function should return 0 to indicate successful release
++ * or a negative value if there are some issues with the release.
++ * In the current version the return value is ignored.
++ *
++ * MUST HAVE
++ */
++ int (*release) (struct scst_tgt *tgt);
++
++ /*
++ * This function is used for Asynchronous Event Notifications.
++ *
++ * Returns one of the SCST_AEN_RES_* constants.
++ * After AEN is sent, target driver must call scst_aen_done() and,
++ * optionally, scst_set_aen_delivery_status().
++ *
++ * This function is expected to be NON-BLOCKING, but can sleep.
++ *
++ * This function must be prepared to handle AENs between calls for the
++ * corresponding session of scst_unregister_session() and
++ * unreg_done_fn() callback called or before scst_unregister_session()
++ * returned, if its called in the blocking mode. AENs for such sessions
++ * should be ignored.
++ *
++ * MUST HAVE, if low-level protocol supports AENs.
++ */
++ int (*report_aen) (struct scst_aen *aen);
++
++ /*
++ * This function returns in tr_id the corresponding to sess initiator
++ * port TransporID in the form as it's used by PR commands, see
++ * "Transport Identifiers" in SPC. Space for the initiator port
++ * TransporID must be allocated via kmalloc(). Caller supposed to
++ * kfree() it, when it isn't needed anymore.
++ *
++ * If sess is NULL, this function must return TransportID PROTOCOL
++ * IDENTIFIER of this transport.
++ *
++ * Returns 0 on success or negative error code otherwise.
++ *
++ * SHOULD HAVE, because it's required for Persistent Reservations.
++ */
++ int (*get_initiator_port_transport_id) (struct scst_session *sess,
++ uint8_t **transport_id);
++
++ /*
++ * This function allows to enable or disable particular target.
++ * A disabled target doesn't receive and process any SCSI commands.
++ *
++ * SHOULD HAVE to avoid race when there are connected initiators,
++ * while target not yet completed the initial configuration. In this
++ * case the too early connected initiators would see not those devices,
++ * which they intended to see.
++ *
++ * If you are sure your target driver doesn't need enabling target,
++ * you should set enabled_attr_not_needed in 1.
++ */
++ int (*enable_target) (struct scst_tgt *tgt, bool enable);
++
++ /*
++ * This function shows if particular target is enabled or not.
++ *
++ * SHOULD HAVE, see above why.
++ */
++ bool (*is_target_enabled) (struct scst_tgt *tgt);
++
++ /*
++ * This function adds a virtual target.
++ *
++ * If both add_target and del_target callbacks defined, then this
++ * target driver supposed to support virtual targets. In this case
++ * an "mgmt" entry will be created in the sysfs root for this driver.
++ * The "mgmt" entry will support 2 commands: "add_target" and
++ * "del_target", for which the corresponding callbacks will be called.
++ * Also target driver can define own commands for the "mgmt" entry, see
++ * mgmt_cmd and mgmt_cmd_help below.
++ *
++ * This approach allows uniform targets management to simplify external
++ * management tools like scstadmin. See README for more details.
++ *
++ * Either both add_target and del_target must be defined, or none.
++ *
++ * MUST HAVE if virtual targets are supported.
++ */
++ ssize_t (*add_target) (const char *target_name, char *params);
++
++ /*
++ * This function deletes a virtual target. See comment for add_target
++ * above.
++ *
++ * MUST HAVE if virtual targets are supported.
++ */
++ ssize_t (*del_target) (const char *target_name);
++
++ /*
++ * This function called if not "add_target" or "del_target" command is
++ * sent to the mgmt entry (see comment for add_target above). In this
++ * case the command passed to this function as is in a string form.
++ *
++ * OPTIONAL.
++ */
++ ssize_t (*mgmt_cmd) (char *cmd);
++
++ /*
++ * Should return physical transport version. Used in the corresponding
++ * INQUIRY version descriptor. See SPC for the list of available codes.
++ *
++ * OPTIONAL
++ */
++ uint16_t (*get_phys_transport_version) (struct scst_tgt *tgt);
++
++ /*
++ * Should return SCSI transport version. Used in the corresponding
++ * INQUIRY version descriptor. See SPC for the list of available codes.
++ *
++ * OPTIONAL
++ */
++ uint16_t (*get_scsi_transport_version) (struct scst_tgt *tgt);
++
++ /*
++ * Name of the template. Must be unique to identify
++ * the template. MUST HAVE
++ */
++ const char name[SCST_MAX_NAME];
++
++ /*
++ * Number of additional threads to the pool of dedicated threads.
++ * Used if xmit_response() or rdy_to_xfer() is blocking.
++ * It is the target driver's duty to ensure that not more, than that
++ * number of threads, are blocked in those functions at any time.
++ */
++ int threads_num;
++
++ /* Optional default log flags */
++ const unsigned long default_trace_flags;
++
++ /* Optional pointer to trace flags */
++ unsigned long *trace_flags;
++
++ /* Optional local trace table */
++ struct scst_trace_log *trace_tbl;
++
++ /* Optional local trace table help string */
++ const char *trace_tbl_help;
++
++ /* sysfs attributes, if any */
++ const struct attribute **tgtt_attrs;
++
++ /* sysfs target attributes, if any */
++ const struct attribute **tgt_attrs;
++
++ /* sysfs session attributes, if any */
++ const struct attribute **sess_attrs;
++
++ /* Optional help string for mgmt_cmd commands */
++ const char *mgmt_cmd_help;
++
++ /* List of parameters for add_target command, if any */
++ const char *add_target_parameters;
++
++ /*
++ * List of optional, i.e. which could be added by add_attribute command
++ * and deleted by del_attribute command, sysfs attributes, if any.
++ * Helpful for scstadmin to work correctly.
++ */
++ const char *tgtt_optional_attributes;
++
++ /*
++ * List of optional, i.e. which could be added by add_target_attribute
++ * command and deleted by del_target_attribute command, sysfs
++ * attributes, if any. Helpful for scstadmin to work correctly.
++ */
++ const char *tgt_optional_attributes;
++
++ /** Private, must be inited to 0 by memset() **/
++
++ /* List of targets per template, protected by scst_mutex */
++ struct list_head tgt_list;
++
++ /* List entry of global templates list */
++ struct list_head scst_template_list_entry;
++
++ struct kobject tgtt_kobj; /* kobject for this struct */
++
++ /* Number of currently active sysfs mgmt works (scst_sysfs_work_item) */
++ int tgtt_active_sysfs_works_count;
++
++ /* sysfs release completion */
++ struct completion tgtt_kobj_release_cmpl;
++
++};
++
++/*
++ * Threads pool types. Changing them don't forget to change
++ * the corresponding *_STR values in scst_const.h!
++ */
++enum scst_dev_type_threads_pool_type {
++ /* Each initiator will have dedicated threads pool. */
++ SCST_THREADS_POOL_PER_INITIATOR = 0,
++
++ /* All connected initiators will use shared threads pool */
++ SCST_THREADS_POOL_SHARED,
++
++ /* Invalid value for scst_parse_threads_pool_type() */
++ SCST_THREADS_POOL_TYPE_INVALID,
++};
++
++/*
++ * SCST dev handler template: defines dev handler's parameters and callback
++ * functions.
++ *
++ * MUST HAVEs define functions that are expected to be defined in order to
++ * work. OPTIONAL says that there is a choice.
++ */
++struct scst_dev_type {
++ /* SCSI type of the supported device. MUST HAVE */
++ int type;
++
++ /*
++ * True, if corresponding function supports execution in
++ * the atomic (non-sleeping) context
++ */
++ unsigned parse_atomic:1;
++ unsigned alloc_data_buf_atomic:1;
++ unsigned dev_done_atomic:1;
++
++ /*
++ * Should be true, if exec() is synchronous. This is a hint to SCST core
++ * to optimize commands order management.
++ */
++ unsigned exec_sync:1;
++
++ /*
++ * Should be set if the device wants to receive notification of
++ * Persistent Reservation commands (PR OUT only)
++ * Note: The notification will not be send if the command failed
++ */
++ unsigned pr_cmds_notifications:1;
++
++ /*
++ * Called to parse CDB from the cmd and initialize
++ * cmd->bufflen and cmd->data_direction (both - REQUIRED).
++ *
++ * Returns the command's next state or SCST_CMD_STATE_DEFAULT,
++ * if the next default state should be used, or
++ * SCST_CMD_STATE_NEED_THREAD_CTX if the function called in atomic
++ * context, but requires sleeping, or SCST_CMD_STATE_STOP if the
++ * command should not be further processed for now. In the
++ * SCST_CMD_STATE_NEED_THREAD_CTX case the function
++ * will be recalled in the thread context, where sleeping is allowed.
++ *
++ * Pay attention to "atomic" attribute of the cmd, which can be get
++ * by scst_cmd_atomic(): it is true if the function called in the
++ * atomic (non-sleeping) context.
++ *
++ * MUST HAVE
++ */
++ int (*parse) (struct scst_cmd *cmd);
++
++ /*
++ * This function allows dev handler to handle data buffer
++ * allocations on its own.
++ *
++ * Returns the command's next state or SCST_CMD_STATE_DEFAULT,
++ * if the next default state should be used, or
++ * SCST_CMD_STATE_NEED_THREAD_CTX if the function called in atomic
++ * context, but requires sleeping, or SCST_CMD_STATE_STOP if the
++ * command should not be further processed for now. In the
++ * SCST_CMD_STATE_NEED_THREAD_CTX case the function
++ * will be recalled in the thread context, where sleeping is allowed.
++ *
++ * Pay attention to "atomic" attribute of the cmd, which can be get
++ * by scst_cmd_atomic(): it is true if the function called in the
++ * atomic (non-sleeping) context.
++ *
++ * OPTIONAL
++ */
++ int (*alloc_data_buf) (struct scst_cmd *cmd);
++
++ /*
++ * Called to execute CDB. Useful, for instance, to implement
++ * data caching. The result of CDB execution is reported via
++ * cmd->scst_cmd_done() callback.
++ * Returns:
++ * - SCST_EXEC_COMPLETED - the cmd is done, go to other ones
++ * - SCST_EXEC_NOT_COMPLETED - the cmd should be sent to SCSI
++ * mid-level.
++ *
++ * If this function provides sync execution, you should set
++ * exec_sync flag and consider to setup dedicated threads by
++ * setting threads_num > 0.
++ *
++ * !! If this function is implemented, scst_check_local_events() !!
++ * !! shall be called inside it just before the actual command's !!
++ * !! execution. !!
++ *
++ * OPTIONAL, if not set, the commands will be sent directly to SCSI
++ * device.
++ */
++ int (*exec) (struct scst_cmd *cmd);
++
++ /*
++ * Called to notify dev handler about the result of cmd execution
++ * and perform some post processing. Cmd's fields is_send_status and
++ * resp_data_len should be set by this function, but SCST offers good
++ * defaults.
++ * Returns the command's next state or SCST_CMD_STATE_DEFAULT,
++ * if the next default state should be used, or
++ * SCST_CMD_STATE_NEED_THREAD_CTX if the function called in atomic
++ * context, but requires sleeping. In the last case, the function
++ * will be recalled in the thread context, where sleeping is allowed.
++ *
++ * Pay attention to "atomic" attribute of the cmd, which can be get
++ * by scst_cmd_atomic(): it is true if the function called in the
++ * atomic (non-sleeping) context.
++ */
++ int (*dev_done) (struct scst_cmd *cmd);
++
++ /*
++ * Called to notify dev hander that the command is about to be freed.
++ * Could be called on IRQ context.
++ */
++ void (*on_free_cmd) (struct scst_cmd *cmd);
++
++ /*
++ * Called to execute a task management command.
++ * Returns:
++ * - SCST_MGMT_STATUS_SUCCESS - the command is done with success,
++ * no firther actions required
++ * - The SCST_MGMT_STATUS_* error code if the command is failed and
++ * no further actions required
++ * - SCST_DEV_TM_NOT_COMPLETED - regular standard actions for the
++ * command should be done
++ *
++ * Called without any locks held from a thread context.
++ */
++ int (*task_mgmt_fn) (struct scst_mgmt_cmd *mgmt_cmd,
++ struct scst_tgt_dev *tgt_dev);
++
++ /*
++ * Called when new device is attaching to the dev handler
++ * Returns 0 on success, error code otherwise.
++ */
++ int (*attach) (struct scst_device *dev);
++
++ /* Called when a device is detaching from the dev handler */
++ void (*detach) (struct scst_device *dev);
++
++ /*
++ * Called when new tgt_dev (session) is attaching to the dev handler.
++ * Returns 0 on success, error code otherwise.
++ */
++ int (*attach_tgt) (struct scst_tgt_dev *tgt_dev);
++
++ /* Called when tgt_dev (session) is detaching from the dev handler */
++ void (*detach_tgt) (struct scst_tgt_dev *tgt_dev);
++
++ /*
++ * This function adds a virtual device.
++ *
++ * If both add_device and del_device callbacks defined, then this
++ * dev handler supposed to support adding/deleting virtual devices.
++ * In this case an "mgmt" entry will be created in the sysfs root for
++ * this handler. The "mgmt" entry will support 2 commands: "add_device"
++ * and "del_device", for which the corresponding callbacks will be called.
++ * Also dev handler can define own commands for the "mgmt" entry, see
++ * mgmt_cmd and mgmt_cmd_help below.
++ *
++ * This approach allows uniform devices management to simplify external
++ * management tools like scstadmin. See README for more details.
++ *
++ * Either both add_device and del_device must be defined, or none.
++ *
++ * MUST HAVE if virtual devices are supported.
++ */
++ ssize_t (*add_device) (const char *device_name, char *params);
++
++ /*
++ * This function deletes a virtual device. See comment for add_device
++ * above.
++ *
++ * MUST HAVE if virtual devices are supported.
++ */
++ ssize_t (*del_device) (const char *device_name);
++
++ /*
++ * This function called if not "add_device" or "del_device" command is
++ * sent to the mgmt entry (see comment for add_device above). In this
++ * case the command passed to this function as is in a string form.
++ *
++ * OPTIONAL.
++ */
++ ssize_t (*mgmt_cmd) (char *cmd);
++
++ /*
++ * Name of the dev handler. Must be unique. MUST HAVE.
++ *
++ * It's SCST_MAX_NAME + few more bytes to match scst_user expectations.
++ */
++ char name[SCST_MAX_NAME + 10];
++
++ /*
++ * Number of threads in this handler's devices' threads pools.
++ * If 0 - no threads will be created, if <0 - creation of the threads
++ * pools is prohibited. Also pay attention to threads_pool_type below.
++ */
++ int threads_num;
++
++ /* Threads pool type. Valid only if threads_num > 0. */
++ enum scst_dev_type_threads_pool_type threads_pool_type;
++
++ /* Optional default log flags */
++ const unsigned long default_trace_flags;
++
++ /* Optional pointer to trace flags */
++ unsigned long *trace_flags;
++
++ /* Optional local trace table */
++ struct scst_trace_log *trace_tbl;
++
++ /* Optional local trace table help string */
++ const char *trace_tbl_help;
++
++ /* Optional help string for mgmt_cmd commands */
++ const char *mgmt_cmd_help;
++
++ /* List of parameters for add_device command, if any */
++ const char *add_device_parameters;
++
++ /*
++ * List of optional, i.e. which could be added by add_attribute command
++ * and deleted by del_attribute command, sysfs attributes, if any.
++ * Helpful for scstadmin to work correctly.
++ */
++ const char *devt_optional_attributes;
++
++ /*
++ * List of optional, i.e. which could be added by add_device_attribute
++ * command and deleted by del_device_attribute command, sysfs
++ * attributes, if any. Helpful for scstadmin to work correctly.
++ */
++ const char *dev_optional_attributes;
++
++ /* sysfs attributes, if any */
++ const struct attribute **devt_attrs;
++
++ /* sysfs device attributes, if any */
++ const struct attribute **dev_attrs;
++
++ /* Pointer to dev handler's private data */
++ void *devt_priv;
++
++ /* Pointer to parent dev type in the sysfs hierarchy */
++ struct scst_dev_type *parent;
++
++ struct module *module;
++
++ /** Private, must be inited to 0 by memset() **/
++
++ /* list entry in scst_(virtual_)dev_type_list */
++ struct list_head dev_type_list_entry;
++
++ struct kobject devt_kobj; /* main handlers/driver */
++
++ /* Number of currently active sysfs mgmt works (scst_sysfs_work_item) */
++ int devt_active_sysfs_works_count;
++
++ /* To wait until devt_kobj released */
++ struct completion devt_kobj_release_compl;
++};
++
++/*
++ * An SCST target, analog of SCSI target port.
++ */
++struct scst_tgt {
++ /* List of remote sessions per target, protected by scst_mutex */
++ struct list_head sess_list;
++
++ /* List entry of targets per template (tgts_list) */
++ struct list_head tgt_list_entry;
++
++ struct scst_tgt_template *tgtt; /* corresponding target template */
++
++ struct scst_acg *default_acg; /* default acg for this target */
++
++ struct list_head tgt_acg_list; /* target ACG groups */
++
++ /*
++ * Maximum SG table size. Needed here, since different cards on the
++ * same target template can have different SG table limitations.
++ */
++ int sg_tablesize;
++
++ /* Used for storage of target driver private stuff */
++ void *tgt_priv;
++
++ /*
++ * The following fields used to store and retry cmds if target's
++ * internal queue is full, so the target is unable to accept
++ * the cmd returning QUEUE FULL.
++ * They protected by tgt_lock, where necessary.
++ */
++ bool retry_timer_active;
++ struct timer_list retry_timer;
++ atomic_t finished_cmds;
++ int retry_cmds;
++ spinlock_t tgt_lock;
++ struct list_head retry_cmd_list;
++
++ /* Used to wait until session finished to unregister */
++ wait_queue_head_t unreg_waitQ;
++
++ /* Name of the target */
++ char *tgt_name;
++
++ uint16_t rel_tgt_id;
++
++ /* sysfs release completion */
++ struct completion tgt_kobj_release_cmpl;
++
++ struct kobject tgt_kobj; /* main targets/target kobject */
++ struct kobject *tgt_sess_kobj; /* target/sessions/ */
++ struct kobject *tgt_luns_kobj; /* target/luns/ */
++ struct kobject *tgt_ini_grp_kobj; /* target/ini_groups/ */
++};
++
++/* Hash size and hash fn for hash based lun translation */
++#define TGT_DEV_HASH_SHIFT 5
++#define TGT_DEV_HASH_SIZE (1 << TGT_DEV_HASH_SHIFT)
++#define HASH_VAL(_val) (_val & (TGT_DEV_HASH_SIZE - 1))
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++
++/* Defines extended latency statistics */
++struct scst_ext_latency_stat {
++ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
++ unsigned int processed_cmds_rd;
++ uint64_t min_scst_time_rd, min_tgt_time_rd, min_dev_time_rd;
++ uint64_t max_scst_time_rd, max_tgt_time_rd, max_dev_time_rd;
++
++ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
++ unsigned int processed_cmds_wr;
++ uint64_t min_scst_time_wr, min_tgt_time_wr, min_dev_time_wr;
++ uint64_t max_scst_time_wr, max_tgt_time_wr, max_dev_time_wr;
++};
++
++#define SCST_IO_SIZE_THRESHOLD_SMALL (8*1024)
++#define SCST_IO_SIZE_THRESHOLD_MEDIUM (32*1024)
++#define SCST_IO_SIZE_THRESHOLD_LARGE (128*1024)
++#define SCST_IO_SIZE_THRESHOLD_VERY_LARGE (512*1024)
++
++#define SCST_LATENCY_STAT_INDEX_SMALL 0
++#define SCST_LATENCY_STAT_INDEX_MEDIUM 1
++#define SCST_LATENCY_STAT_INDEX_LARGE 2
++#define SCST_LATENCY_STAT_INDEX_VERY_LARGE 3
++#define SCST_LATENCY_STAT_INDEX_OTHER 4
++#define SCST_LATENCY_STATS_NUM (SCST_LATENCY_STAT_INDEX_OTHER + 1)
++
++#endif /* CONFIG_SCST_MEASURE_LATENCY */
++
++/*
++ * SCST session, analog of SCSI I_T nexus
++ */
++struct scst_session {
++ /*
++ * Initialization phase, one of SCST_SESS_IPH_* constants, protected by
++ * sess_list_lock
++ */
++ int init_phase;
++
++ struct scst_tgt *tgt; /* corresponding target */
++
++ /* Used for storage of target driver private stuff */
++ void *tgt_priv;
++
++ /* session's async flags */
++ unsigned long sess_aflags;
++
++ /*
++ * Hash list of tgt_dev's for this session, protected by scst_mutex
++ * and suspended activity
++ */
++ struct list_head sess_tgt_dev_list_hash[TGT_DEV_HASH_SIZE];
++
++ /*
++ * List of cmds in this session. Protected by sess_list_lock.
++ *
++ * We must always keep commands in the sess list from the
++ * very beginning, because otherwise they can be missed during
++ * TM processing.
++ */
++ struct list_head sess_cmd_list;
++
++ spinlock_t sess_list_lock; /* protects sess_cmd_list, etc */
++
++ atomic_t refcnt; /* get/put counter */
++
++ /*
++ * Alive commands for this session. ToDo: make it part of the common
++ * IO flow control.
++ */
++ atomic_t sess_cmd_count;
++
++ /* Access control for this session and list entry there */
++ struct scst_acg *acg;
++
++ /* Initiator port transport id */
++ uint8_t *transport_id;
++
++ /* List entry for the sessions list inside ACG */
++ struct list_head acg_sess_list_entry;
++
++ struct delayed_work hw_pending_work;
++
++ /* Name of attached initiator */
++ const char *initiator_name;
++
++ /* List entry of sessions per target */
++ struct list_head sess_list_entry;
++
++ /* List entry for the list that keeps session, waiting for the init */
++ struct list_head sess_init_list_entry;
++
++ /*
++ * List entry for the list that keeps session, waiting for the shutdown
++ */
++ struct list_head sess_shut_list_entry;
++
++ /*
++ * Lists of deferred during session initialization commands.
++ * Protected by sess_list_lock.
++ */
++ struct list_head init_deferred_cmd_list;
++ struct list_head init_deferred_mcmd_list;
++
++ /*
++ * Shutdown phase, one of SCST_SESS_SPH_* constants, unprotected.
++ * Async. relating to init_phase, must be a separate variable, because
++ * session could be unregistered before async. registration is finished.
++ */
++ unsigned long shut_phase;
++
++ /* Used if scst_unregister_session() called in wait mode */
++ struct completion *shutdown_compl;
++
++ /* sysfs release completion */
++ struct completion sess_kobj_release_cmpl;
++
++ unsigned int sess_kobj_ready:1;
++
++ struct kobject sess_kobj; /* kobject for this struct */
++
++ /*
++ * Functions and data for user callbacks from scst_register_session()
++ * and scst_unregister_session()
++ */
++ void *reg_sess_data;
++ void (*init_result_fn) (struct scst_session *sess, void *data,
++ int result);
++ void (*unreg_done_fn) (struct scst_session *sess);
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++ /*
++ * Must be the last to allow to work with drivers who don't know
++ * about this config time option.
++ */
++ spinlock_t lat_lock;
++ uint64_t scst_time, tgt_time, dev_time;
++ unsigned int processed_cmds;
++ uint64_t min_scst_time, min_tgt_time, min_dev_time;
++ uint64_t max_scst_time, max_tgt_time, max_dev_time;
++ struct scst_ext_latency_stat sess_latency_stat[SCST_LATENCY_STATS_NUM];
++#endif
++};
++
++/*
++ * SCST_PR_ABORT_ALL TM function helper structure
++ */
++struct scst_pr_abort_all_pending_mgmt_cmds_counter {
++ /*
++ * How many there are pending for this cmd SCST_PR_ABORT_ALL TM
++ * commands.
++ */
++ atomic_t pr_abort_pending_cnt;
++
++ /* Saved completition routine */
++ void (*saved_cmd_done) (struct scst_cmd *cmd, int next_state,
++ enum scst_exec_context pref_context);
++
++ /*
++ * How many there are pending for this cmd SCST_PR_ABORT_ALL TM
++ * commands, which not yet aborted all affected commands and
++ * a completion to signal, when it's done.
++ */
++ atomic_t pr_aborting_cnt;
++ struct completion pr_aborting_cmpl;
++};
++
++/*
++ * Structure to control commands' queuing and threads pool processing the queue
++ */
++struct scst_cmd_threads {
++ spinlock_t cmd_list_lock;
++ struct list_head active_cmd_list; /* commands queue */
++ wait_queue_head_t cmd_list_waitQ;
++
++ struct io_context *io_context; /* IO context of the threads pool */
++ int io_context_refcnt;
++
++ bool io_context_ready;
++
++ /* io_context_mutex protects io_context and io_context_refcnt. */
++ struct mutex io_context_mutex;
++
++ int nr_threads; /* number of processing threads */
++ struct list_head threads_list; /* processing threads */
++
++ struct list_head lists_list_entry;
++};
++
++/*
++ * SCST command, analog of I_T_L_Q nexus or task
++ */
++struct scst_cmd {
++ /* List entry for below *_cmd_threads */
++ struct list_head cmd_list_entry;
++
++ /* Pointer to lists of commands with the lock */
++ struct scst_cmd_threads *cmd_threads;
++
++ atomic_t cmd_ref;
++
++ struct scst_session *sess; /* corresponding session */
++
++ /* Cmd state, one of SCST_CMD_STATE_* constants */
++ int state;
++
++ /*************************************************************
++ ** Cmd's flags
++ *************************************************************/
++
++ /*
++ * Set if expected_sn should be incremented, i.e. cmd was sent
++ * for execution
++ */
++ unsigned int sent_for_exec:1;
++
++ /* Set if the cmd's action is completed */
++ unsigned int completed:1;
++
++ /* Set if we should ignore Unit Attention in scst_check_sense() */
++ unsigned int ua_ignore:1;
++
++ /* Set if cmd is being processed in atomic context */
++ unsigned int atomic:1;
++
++ /* Set if this command was sent in double UA possible state */
++ unsigned int double_ua_possible:1;
++
++ /* Set if this command contains status */
++ unsigned int is_send_status:1;
++
++ /* Set if cmd is being retried */
++ unsigned int retry:1;
++
++ /* Set if cmd is internally generated */
++ unsigned int internal:1;
++
++ /* Set if the device was blocked by scst_check_blocked_dev() */
++ unsigned int unblock_dev:1;
++
++ /* Set if cmd is queued as hw pending */
++ unsigned int cmd_hw_pending:1;
++
++ /*
++ * Set if the target driver wants to alloc data buffers on its own.
++ * In this case alloc_data_buf() must be provided in the target driver
++ * template.
++ */
++ unsigned int tgt_need_alloc_data_buf:1;
++
++ /*
++ * Set by SCST if the custom data buffer allocation by the target driver
++ * succeeded.
++ */
++ unsigned int tgt_data_buf_alloced:1;
++
++ /* Set if custom data buffer allocated by dev handler */
++ unsigned int dh_data_buf_alloced:1;
++
++ /* Set if the target driver called scst_set_expected() */
++ unsigned int expected_values_set:1;
++
++ /*
++ * Set if the SG buffer was modified by scst_adjust_sg()
++ */
++ unsigned int sg_buff_modified:1;
++
++ /*
++ * Set if cmd buffer was vmallocated and copied from more
++ * then one sg chunk
++ */
++ unsigned int sg_buff_vmallocated:1;
++
++ /*
++ * Set if scst_cmd_init_stage1_done() called and the target
++ * want that preprocessing_done() will be called
++ */
++ unsigned int preprocessing_only:1;
++
++ /* Set if cmd's SN was set */
++ unsigned int sn_set:1;
++
++ /* Set if hq_cmd_count was incremented */
++ unsigned int hq_cmd_inced:1;
++
++ /*
++ * Set if scst_cmd_init_stage1_done() called and the target wants
++ * that the SN for the cmd won't be assigned until scst_restart_cmd()
++ */
++ unsigned int set_sn_on_restart_cmd:1;
++
++ /* Set if the cmd's must not use sgv cache for data buffer */
++ unsigned int no_sgv:1;
++
++ /*
++ * Set if target driver may need to call dma_sync_sg() or similar
++ * function before transferring cmd' data to the target device
++ * via DMA.
++ */
++ unsigned int may_need_dma_sync:1;
++
++ /* Set if the cmd was done or aborted out of its SN */
++ unsigned int out_of_sn:1;
++
++ /* Set if increment expected_sn in cmd->scst_cmd_done() */
++ unsigned int inc_expected_sn_on_done:1;
++
++ /* Set if tgt_sn field is valid */
++ unsigned int tgt_sn_set:1;
++
++ /* Set if any direction residual is possible */
++ unsigned int resid_possible:1;
++
++ /* Set if cmd is done */
++ unsigned int done:1;
++
++ /* Set if cmd is finished */
++ unsigned int finished:1;
++
++#ifdef CONFIG_SCST_DEBUG_TM
++ /* Set if the cmd was delayed by task management debugging code */
++ unsigned int tm_dbg_delayed:1;
++
++ /* Set if the cmd must be ignored by task management debugging code */
++ unsigned int tm_dbg_immut:1;
++#endif
++
++ /**************************************************************/
++
++ /* cmd's async flags */
++ unsigned long cmd_flags;
++
++ /* Keeps status of cmd's status/data delivery to remote initiator */
++ int delivery_status;
++
++ struct scst_tgt_template *tgtt; /* to save extra dereferences */
++ struct scst_tgt *tgt; /* to save extra dereferences */
++ struct scst_device *dev; /* to save extra dereferences */
++
++ struct scst_tgt_dev *tgt_dev; /* corresponding device for this cmd */
++
++ uint64_t lun; /* LUN for this cmd */
++
++ unsigned long start_time;
++
++ /* List entry for tgt_dev's SN related lists */
++ struct list_head sn_cmd_list_entry;
++
++ /* Cmd's serial number, used to execute cmd's in order of arrival */
++ unsigned int sn;
++
++ /* The corresponding sn_slot in tgt_dev->sn_slots */
++ atomic_t *sn_slot;
++
++ /* List entry for sess's sess_cmd_list */
++ struct list_head sess_cmd_list_entry;
++
++ /*
++ * Used to found the cmd by scst_find_cmd_by_tag(). Set by the
++ * target driver on the cmd's initialization time
++ */
++ uint64_t tag;
++
++ uint32_t tgt_sn; /* SN set by target driver (for TM purposes) */
++
++ /* CDB and its len */
++ uint8_t cdb[SCST_MAX_CDB_SIZE];
++ unsigned short cdb_len;
++ unsigned short ext_cdb_len;
++ uint8_t *ext_cdb;
++
++ enum scst_cdb_flags op_flags;
++ const char *op_name;
++
++ enum scst_cmd_queue_type queue_type;
++
++ int timeout; /* CDB execution timeout in seconds */
++ int retries; /* Amount of retries that will be done by SCSI mid-level */
++
++ /* SCSI data direction, one of SCST_DATA_* constants */
++ scst_data_direction data_direction;
++
++ /* Remote initiator supplied values, if any */
++ scst_data_direction expected_data_direction;
++ int expected_transfer_len;
++ int expected_out_transfer_len; /* for bidi writes */
++
++ /*
++ * Cmd data length. Could be different from bufflen for commands like
++ * VERIFY, which transfer different amount of data (if any), than
++ * processed.
++ */
++ int data_len;
++
++ /* Completition routine */
++ void (*scst_cmd_done) (struct scst_cmd *cmd, int next_state,
++ enum scst_exec_context pref_context);
++
++ struct sgv_pool_obj *sgv; /* sgv object */
++ int bufflen; /* cmd buffer length */
++ struct scatterlist *sg; /* cmd data buffer SG vector */
++ int sg_cnt; /* SG segments count */
++
++ /*
++ * Response data length in data buffer. Must not be set
++ * directly, use scst_set_resp_data_len() for that.
++ */
++ int resp_data_len;
++
++ /*
++ * Response data length adjusted on residual, i.e.
++ * min(expected_len, resp_len), if expected len set.
++ */
++ int adjusted_resp_data_len;
++
++ /*
++ * Data length to write, i.e. transfer from the initiator. Might be
++ * different from (out_)bufflen, if the initiator asked too big or too
++ * small expected(_out_)transfer_len.
++ */
++ int write_len;
++
++ /*
++ * Write sg and sg_cnt to point out either on sg/sg_cnt, or on
++ * out_sg/out_sg_cnt.
++ */
++ struct scatterlist **write_sg;
++ int *write_sg_cnt;
++
++ /* scst_get_sg_buf_[first,next]() support */
++ int get_sg_buf_entry_num;
++
++ /* Bidirectional transfers support */
++ int out_bufflen; /* WRITE buffer length */
++ struct sgv_pool_obj *out_sgv; /* WRITE sgv object */
++ struct scatterlist *out_sg; /* WRITE data buffer SG vector */
++ int out_sg_cnt; /* WRITE SG segments count */
++
++ /*
++ * Used if both target driver and dev handler request own memory
++ * allocation. In other cases, both are equal to sg and sg_cnt
++ * correspondingly.
++ *
++ * If target driver requests own memory allocations, it MUST use
++ * functions scst_cmd_get_tgt_sg*() to get sg and sg_cnt! Otherwise,
++ * it may use functions scst_cmd_get_sg*().
++ */
++ struct scatterlist *tgt_sg;
++ int tgt_sg_cnt;
++ struct scatterlist *tgt_out_sg; /* bidirectional */
++ int tgt_out_sg_cnt; /* bidirectional */
++
++ /*
++ * The status fields in case of errors must be set using
++ * scst_set_cmd_error_status()!
++ */
++ uint8_t status; /* status byte from target device */
++ uint8_t msg_status; /* return status from host adapter itself */
++ uint8_t host_status; /* set by low-level driver to indicate status */
++ uint8_t driver_status; /* set by mid-level */
++
++ uint8_t *sense; /* pointer to sense buffer */
++ unsigned short sense_valid_len; /* length of valid sense data */
++ unsigned short sense_buflen; /* length of the sense buffer, if any */
++
++ /* Start time when cmd was sent to rdy_to_xfer() or xmit_response() */
++ unsigned long hw_pending_start;
++
++ /* Used for storage of target driver private stuff */
++ void *tgt_priv;
++
++ /* Used for storage of dev handler private stuff */
++ void *dh_priv;
++
++ /* Used to restore sg if it was modified by scst_adjust_sg() */
++ struct scatterlist *orig_sg;
++ int *p_orig_sg_cnt;
++ int orig_sg_cnt, orig_sg_entry, orig_entry_len;
++
++ /* Used to retry commands in case of double UA */
++ int dbl_ua_orig_resp_data_len, dbl_ua_orig_data_direction;
++
++ /*
++ * List of the corresponding mgmt cmds, if any. Protected by
++ * sess_list_lock.
++ */
++ struct list_head mgmt_cmd_list;
++
++ /* List entry for dev's blocked_cmd_list */
++ struct list_head blocked_cmd_list_entry;
++
++ /* Counter of the corresponding SCST_PR_ABORT_ALL TM commands */
++ struct scst_pr_abort_all_pending_mgmt_cmds_counter *pr_abort_counter;
++
++ struct scst_cmd *orig_cmd; /* Used to issue REQUEST SENSE */
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++ /*
++ * Must be the last to allow to work with drivers who don't know
++ * about this config time option.
++ */
++ uint64_t start, curr_start, parse_time, alloc_buf_time;
++ uint64_t restart_waiting_time, rdy_to_xfer_time;
++ uint64_t pre_exec_time, exec_time, dev_done_time;
++ uint64_t xmit_time, tgt_on_free_time, dev_on_free_time;
++#endif
++};
++
++/*
++ * Parameters for SCST management commands
++ */
++struct scst_rx_mgmt_params {
++ int fn;
++ uint64_t tag;
++ const uint8_t *lun;
++ int lun_len;
++ uint32_t cmd_sn;
++ int atomic;
++ void *tgt_priv;
++ unsigned char tag_set;
++ unsigned char lun_set;
++ unsigned char cmd_sn_set;
++};
++
++/*
++ * A stub structure to link an management command and affected regular commands
++ */
++struct scst_mgmt_cmd_stub {
++ struct scst_mgmt_cmd *mcmd;
++
++ /* List entry in cmd->mgmt_cmd_list */
++ struct list_head cmd_mgmt_cmd_list_entry;
++
++ /* Set if the cmd was counted in mcmd->cmd_done_wait_count */
++ unsigned int done_counted:1;
++
++ /* Set if the cmd was counted in mcmd->cmd_finish_wait_count */
++ unsigned int finish_counted:1;
++};
++
++/*
++ * SCST task management structure
++ */
++struct scst_mgmt_cmd {
++ /* List entry for *_mgmt_cmd_list */
++ struct list_head mgmt_cmd_list_entry;
++
++ struct scst_session *sess;
++
++ /* Mgmt cmd state, one of SCST_MCMD_STATE_* constants */
++ int state;
++
++ int fn; /* task management function */
++
++ /* Set if device(s) should be unblocked after mcmd's finish */
++ unsigned int needs_unblocking:1;
++ unsigned int lun_set:1; /* set, if lun field is valid */
++ unsigned int cmd_sn_set:1; /* set, if cmd_sn field is valid */
++
++ /*
++ * Number of commands to finish before sending response,
++ * protected by scst_mcmd_lock
++ */
++ int cmd_finish_wait_count;
++
++ /*
++ * Number of commands to complete (done) before resetting reservation,
++ * protected by scst_mcmd_lock
++ */
++ int cmd_done_wait_count;
++
++ /* Number of completed commands, protected by scst_mcmd_lock */
++ int completed_cmd_count;
++
++ uint64_t lun; /* LUN for this mgmt cmd */
++ /* or (and for iSCSI) */
++ uint64_t tag; /* tag of the corresponding cmd */
++
++ uint32_t cmd_sn; /* affected command's highest SN */
++
++ /* corresponding cmd (to be aborted, found by tag) */
++ struct scst_cmd *cmd_to_abort;
++
++ /* corresponding device for this mgmt cmd (found by lun) */
++ struct scst_tgt_dev *mcmd_tgt_dev;
++
++ /* completition status, one of the SCST_MGMT_STATUS_* constants */
++ int status;
++
++ /* Used for storage of target driver private stuff or origin PR cmd */
++ union {
++ void *tgt_priv;
++ struct scst_cmd *origin_pr_cmd;
++ };
++};
++
++/*
++ * Persistent reservations registrant
++ */
++struct scst_dev_registrant {
++ uint8_t *transport_id;
++ uint16_t rel_tgt_id;
++ __be64 key;
++
++ /* tgt_dev (I_T nexus) for this registrant, if any */
++ struct scst_tgt_dev *tgt_dev;
++
++ /* List entry for dev_registrants_list */
++ struct list_head dev_registrants_list_entry;
++
++ /* 2 auxiliary fields used to rollback changes for errors, etc. */
++ struct list_head aux_list_entry;
++ __be64 rollback_key;
++};
++
++/*
++ * SCST device
++ */
++struct scst_device {
++ unsigned short type; /* SCSI type of the device */
++
++ /*************************************************************
++ ** Dev's flags. Updates serialized by dev_lock or suspended
++ ** activity
++ *************************************************************/
++
++ /* Set if dev is RESERVED */
++ unsigned short dev_reserved:1;
++
++ /* Set if double reset UA is possible */
++ unsigned short dev_double_ua_possible:1;
++
++ /* If set, dev is read only */
++ unsigned short rd_only:1;
++
++ /**************************************************************/
++
++ /*************************************************************
++ ** Dev's control mode page related values. Updates serialized
++ ** by scst_block_dev(). Modified independently to the above and
++ ** below fields, hence the alignment.
++ *************************************************************/
++
++ unsigned int queue_alg:4 __attribute__((aligned(sizeof(long))));
++ unsigned int tst:3;
++ unsigned int tas:1;
++ unsigned int swp:1;
++ unsigned int d_sense:1;
++
++ /*
++ * Set if device implements own ordered commands management. If not set
++ * and queue_alg is SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER,
++ * expected_sn will be incremented only after commands finished.
++ */
++ unsigned int has_own_order_mgmt:1;
++
++ /**************************************************************/
++
++ /*
++ * How many times device was blocked for new cmds execution.
++ * Protected by dev_lock
++ */
++ int block_count;
++
++ /* How many cmds alive on this dev */
++ atomic_t dev_cmd_count;
++
++ /*
++ * Set if dev is persistently reserved. Protected by dev_pr_mutex.
++ * Modified independently to the above field, hence the alignment.
++ */
++ unsigned int pr_is_set:1 __attribute__((aligned(sizeof(long))));
++
++ /*
++ * Set if there is a thread changing or going to change PR state(s).
++ * Protected by dev_pr_mutex.
++ */
++ unsigned int pr_writer_active:1;
++
++ /*
++ * How many threads are checking commands for PR allowance. Used to
++ * implement lockless read-only fast path.
++ */
++ atomic_t pr_readers_count;
++
++ struct scst_dev_type *handler; /* corresponding dev handler */
++
++ /* Used for storage of dev handler private stuff */
++ void *dh_priv;
++
++ /* Corresponding real SCSI device, could be NULL for virtual devices */
++ struct scsi_device *scsi_dev;
++
++ /* List of commands with lock, if dedicated threads are used */
++ struct scst_cmd_threads dev_cmd_threads;
++
++ /* Memory limits for this device */
++ struct scst_mem_lim dev_mem_lim;
++
++ /* How many write cmds alive on this dev. Temporary, ToDo */
++ atomic_t write_cmd_count;
++
++ /*************************************************************
++ ** Persistent reservation fields. Protected by dev_pr_mutex.
++ *************************************************************/
++
++ /*
++ * True if persist through power loss is activated. Modified
++ * independently to the above field, hence the alignment.
++ */
++ unsigned short pr_aptpl:1 __attribute__((aligned(sizeof(long))));
++
++ /* Persistent reservation type */
++ uint8_t pr_type;
++
++ /* Persistent reservation scope */
++ uint8_t pr_scope;
++
++ /* Mutex to protect PR operations */
++ struct mutex dev_pr_mutex;
++
++ /* Persistent reservation generation value */
++ uint32_t pr_generation;
++
++ /* Reference to registrant - persistent reservation holder */
++ struct scst_dev_registrant *pr_holder;
++
++ /* List of dev's registrants */
++ struct list_head dev_registrants_list;
++
++ /*
++ * Count of connected tgt_devs from transports, which don't support
++ * PRs, i.e. don't have get_initiator_port_transport_id(). Protected
++ * by scst_mutex.
++ */
++ int not_pr_supporting_tgt_devs_num;
++
++ /* Persist through power loss files */
++ char *pr_file_name;
++ char *pr_file_name1;
++
++ /**************************************************************/
++
++ spinlock_t dev_lock; /* device lock */
++
++ struct list_head blocked_cmd_list; /* protected by dev_lock */
++
++ /* A list entry used during TM, protected by scst_mutex */
++ struct list_head tm_dev_list_entry;
++
++ /* Virtual device internal ID */
++ int virt_id;
++
++ /* Pointer to virtual device name, for convenience only */
++ char *virt_name;
++
++ /* List entry in global devices list */
++ struct list_head dev_list_entry;
++
++ /*
++ * List of tgt_dev's, one per session, protected by scst_mutex or
++ * dev_lock for reads and both for writes
++ */
++ struct list_head dev_tgt_dev_list;
++
++ /* List of acg_dev's, one per acg, protected by scst_mutex */
++ struct list_head dev_acg_dev_list;
++
++ /* Number of threads in the device's threads pools */
++ int threads_num;
++
++ /* Threads pool type of the device. Valid only if threads_num > 0. */
++ enum scst_dev_type_threads_pool_type threads_pool_type;
++
++ /* sysfs release completion */
++ struct completion dev_kobj_release_cmpl;
++
++ struct kobject dev_kobj; /* kobject for this struct */
++ struct kobject *dev_exp_kobj; /* exported groups */
++
++ /* Export number in the dev's sysfs list. Protected by scst_mutex */
++ int dev_exported_lun_num;
++};
++
++/*
++ * Used to store threads local tgt_dev specific data
++ */
++struct scst_thr_data_hdr {
++ /* List entry in tgt_dev->thr_data_list */
++ struct list_head thr_data_list_entry;
++ struct task_struct *owner_thr; /* the owner thread */
++ atomic_t ref;
++ /* Function that will be called on the tgt_dev destruction */
++ void (*free_fn) (struct scst_thr_data_hdr *data);
++};
++
++/*
++ * Used to clearly dispose async io_context
++ */
++struct scst_async_io_context_keeper {
++ struct kref aic_keeper_kref;
++ bool aic_ready;
++ struct io_context *aic;
++ struct task_struct *aic_keeper_thr;
++ wait_queue_head_t aic_keeper_waitQ;
++};
++
++/*
++ * Used to store per-session specific device information, analog of
++ * SCSI I_T_L nexus.
++ */
++struct scst_tgt_dev {
++ /* List entry in sess->sess_tgt_dev_list_hash */
++ struct list_head sess_tgt_dev_list_entry;
++
++ struct scst_device *dev; /* to save extra dereferences */
++ uint64_t lun; /* to save extra dereferences */
++
++ gfp_t gfp_mask;
++ struct sgv_pool *pool;
++ int max_sg_cnt;
++
++ /*
++ * Tgt_dev's async flags. Modified independently to the neighbour
++ * fields.
++ */
++ unsigned long tgt_dev_flags;
++
++ /* Used for storage of dev handler private stuff */
++ void *dh_priv;
++
++ /* How many cmds alive on this dev in this session */
++ atomic_t tgt_dev_cmd_count;
++
++ /*
++ * Used to execute cmd's in order of arrival, honoring SCSI task
++ * attributes.
++ *
++ * Protected by sn_lock, except expected_sn, which is protected by
++ * itself. Curr_sn must have the same size as expected_sn to
++ * overflow simultaneously.
++ */
++ int def_cmd_count;
++ spinlock_t sn_lock;
++ unsigned int expected_sn;
++ unsigned int curr_sn;
++ int hq_cmd_count;
++ struct list_head deferred_cmd_list;
++ struct list_head skipped_sn_list;
++
++ /*
++ * Set if the prev cmd was ORDERED. Size and, hence, alignment must
++ * allow unprotected modifications independently to the neighbour fields.
++ */
++ unsigned long prev_cmd_ordered;
++
++ int num_free_sn_slots; /* if it's <0, then all slots are busy */
++ atomic_t *cur_sn_slot;
++ atomic_t sn_slots[15];
++
++ /* List of scst_thr_data_hdr and lock */
++ spinlock_t thr_data_lock;
++ struct list_head thr_data_list;
++
++ /* Pointer to lists of commands with the lock */
++ struct scst_cmd_threads *active_cmd_threads;
++
++ /* Union to save some CPU cache footprint */
++ union {
++ struct {
++ /* Copy to save fast path dereference */
++ struct io_context *async_io_context;
++
++ struct scst_async_io_context_keeper *aic_keeper;
++ };
++
++ /* Lists of commands with lock, if dedicated threads are used */
++ struct scst_cmd_threads tgt_dev_cmd_threads;
++ };
++
++ spinlock_t tgt_dev_lock; /* per-session device lock */
++
++ /* List of UA's for this device, protected by tgt_dev_lock */
++ struct list_head UA_list;
++
++ struct scst_session *sess; /* corresponding session */
++ struct scst_acg_dev *acg_dev; /* corresponding acg_dev */
++
++ /* Reference to registrant to find quicker */
++ struct scst_dev_registrant *registrant;
++
++ /* List entry in dev->dev_tgt_dev_list */
++ struct list_head dev_tgt_dev_list_entry;
++
++ /* Internal tmp list entry */
++ struct list_head extra_tgt_dev_list_entry;
++
++ /* Set if INQUIRY DATA HAS CHANGED UA is needed */
++ unsigned int inq_changed_ua_needed:1;
++
++ /*
++ * Stored Unit Attention sense and its length for possible
++ * subsequent REQUEST SENSE. Both protected by tgt_dev_lock.
++ */
++ unsigned short tgt_dev_valid_sense_len;
++ uint8_t tgt_dev_sense[SCST_SENSE_BUFFERSIZE];
++
++ /* sysfs release completion */
++ struct completion tgt_dev_kobj_release_cmpl;
++
++ struct kobject tgt_dev_kobj; /* kobject for this struct */
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++ /*
++ * Must be the last to allow to work with drivers who don't know
++ * about this config time option.
++ *
++ * Protected by sess->lat_lock.
++ */
++ uint64_t scst_time, tgt_time, dev_time;
++ unsigned int processed_cmds;
++ struct scst_ext_latency_stat dev_latency_stat[SCST_LATENCY_STATS_NUM];
++#endif
++};
++
++/*
++ * Used to store ACG-specific device information, like LUN
++ */
++struct scst_acg_dev {
++ struct scst_device *dev; /* corresponding device */
++
++ uint64_t lun; /* device's LUN in this acg */
++
++ /* If set, the corresponding LU is read only */
++ unsigned int rd_only:1;
++
++ struct scst_acg *acg; /* parent acg */
++
++ /* List entry in dev->dev_acg_dev_list */
++ struct list_head dev_acg_dev_list_entry;
++
++ /* List entry in acg->acg_dev_list */
++ struct list_head acg_dev_list_entry;
++
++ /* kobject for this structure */
++ struct kobject acg_dev_kobj;
++
++ /* sysfs release completion */
++ struct completion acg_dev_kobj_release_cmpl;
++
++ /* Name of the link to the corresponding LUN */
++ char acg_dev_link_name[20];
++};
++
++/*
++ * ACG - access control group. Used to store group related
++ * control information.
++ */
++struct scst_acg {
++ /* Owner target */
++ struct scst_tgt *tgt;
++
++ /* List of acg_dev's in this acg, protected by scst_mutex */
++ struct list_head acg_dev_list;
++
++ /* List of attached sessions, protected by scst_mutex */
++ struct list_head acg_sess_list;
++
++ /* List of attached acn's, protected by scst_mutex */
++ struct list_head acn_list;
++
++ /* List entry in acg_lists */
++ struct list_head acg_list_entry;
++
++ /* Name of this acg */
++ const char *acg_name;
++
++ /* Type of I/O initiators groupping */
++ int acg_io_grouping_type;
++
++ unsigned int tgt_acg:1;
++
++ /* sysfs release completion */
++ struct completion acg_kobj_release_cmpl;
++
++ /* kobject for this structure */
++ struct kobject acg_kobj;
++
++ struct kobject *luns_kobj;
++ struct kobject *initiators_kobj;
++
++ unsigned int addr_method;
++};
++
++/*
++ * ACN - access control name. Used to store names, by which
++ * incoming sessions will be assigned to appropriate ACG.
++ */
++struct scst_acn {
++ struct scst_acg *acg; /* owner ACG */
++
++ const char *name; /* initiator's name */
++
++ /* List entry in acg->acn_list */
++ struct list_head acn_list_entry;
++
++ /* sysfs file attributes */
++ struct kobj_attribute *acn_attr;
++};
++
++/*
++ * Used to store per-session UNIT ATTENTIONs
++ */
++struct scst_tgt_dev_UA {
++ /* List entry in tgt_dev->UA_list */
++ struct list_head UA_list_entry;
++
++ /* Set if UA is global for session */
++ unsigned short global_UA:1;
++
++ /* Unit Attention valid sense len */
++ unsigned short UA_valid_sense_len;
++ /* Unit Attention sense buf */
++ uint8_t UA_sense_buffer[SCST_SENSE_BUFFERSIZE];
++};
++
++/* Used to deliver AENs */
++struct scst_aen {
++ int event_fn; /* AEN fn */
++
++ struct scst_session *sess; /* corresponding session */
++ __be64 lun; /* corresponding LUN in SCSI form */
++
++ union {
++ /* SCSI AEN data */
++ struct {
++ int aen_sense_len;
++ uint8_t aen_sense[SCST_STANDARD_SENSE_LEN];
++ };
++ };
++
++ /* Keeps status of AEN's delivery to remote initiator */
++ int delivery_status;
++};
++
++#ifndef smp_mb__after_set_bit
++/* There is no smp_mb__after_set_bit() in the kernel */
++#define smp_mb__after_set_bit() smp_mb()
++#endif
++
++/*
++ * Registers target template.
++ * Returns 0 on success or appropriate error code otherwise.
++ */
++int __scst_register_target_template(struct scst_tgt_template *vtt,
++ const char *version);
++static inline int scst_register_target_template(struct scst_tgt_template *vtt)
++{
++ return __scst_register_target_template(vtt, SCST_INTERFACE_VERSION);
++}
++
++/*
++ * Registers target template, non-GPL version.
++ * Returns 0 on success or appropriate error code otherwise.
++ *
++ * Note: *vtt must be static!
++ */
++int __scst_register_target_template_non_gpl(struct scst_tgt_template *vtt,
++ const char *version);
++static inline int scst_register_target_template_non_gpl(
++ struct scst_tgt_template *vtt)
++{
++ return __scst_register_target_template_non_gpl(vtt,
++ SCST_INTERFACE_VERSION);
++}
++
++void scst_unregister_target_template(struct scst_tgt_template *vtt);
++
++struct scst_tgt *scst_register_target(struct scst_tgt_template *vtt,
++ const char *target_name);
++void scst_unregister_target(struct scst_tgt *tgt);
++
++struct scst_session *scst_register_session(struct scst_tgt *tgt, int atomic,
++ const char *initiator_name, void *tgt_priv, void *result_fn_data,
++ void (*result_fn) (struct scst_session *sess, void *data, int result));
++struct scst_session *scst_register_session_non_gpl(struct scst_tgt *tgt,
++ const char *initiator_name, void *tgt_priv);
++void scst_unregister_session(struct scst_session *sess, int wait,
++ void (*unreg_done_fn) (struct scst_session *sess));
++void scst_unregister_session_non_gpl(struct scst_session *sess);
++
++int __scst_register_dev_driver(struct scst_dev_type *dev_type,
++ const char *version);
++static inline int scst_register_dev_driver(struct scst_dev_type *dev_type)
++{
++ return __scst_register_dev_driver(dev_type, SCST_INTERFACE_VERSION);
++}
++void scst_unregister_dev_driver(struct scst_dev_type *dev_type);
++
++int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
++ const char *version);
++/*
++ * Registers dev handler driver for virtual devices (eg VDISK).
++ * Returns 0 on success or appropriate error code otherwise.
++ */
++static inline int scst_register_virtual_dev_driver(
++ struct scst_dev_type *dev_type)
++{
++ return __scst_register_virtual_dev_driver(dev_type,
++ SCST_INTERFACE_VERSION);
++}
++
++void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type);
++
++bool scst_initiator_has_luns(struct scst_tgt *tgt, const char *initiator_name);
++
++struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
++ const uint8_t *lun, int lun_len, const uint8_t *cdb,
++ unsigned int cdb_len, int atomic);
++void scst_cmd_init_done(struct scst_cmd *cmd,
++ enum scst_exec_context pref_context);
++
++/*
++ * Notifies SCST that the driver finished the first stage of the command
++ * initialization, and the command is ready for execution, but after
++ * SCST done the command's preprocessing preprocessing_done() function
++ * should be called. The second argument sets preferred command execition
++ * context. See SCST_CONTEXT_* constants for details.
++ *
++ * See comment for scst_cmd_init_done() for the serialization requirements.
++ */
++static inline void scst_cmd_init_stage1_done(struct scst_cmd *cmd,
++ enum scst_exec_context pref_context, int set_sn)
++{
++ cmd->preprocessing_only = 1;
++ cmd->set_sn_on_restart_cmd = !set_sn;
++ scst_cmd_init_done(cmd, pref_context);
++}
++
++void scst_restart_cmd(struct scst_cmd *cmd, int status,
++ enum scst_exec_context pref_context);
++
++void scst_rx_data(struct scst_cmd *cmd, int status,
++ enum scst_exec_context pref_context);
++
++void scst_tgt_cmd_done(struct scst_cmd *cmd,
++ enum scst_exec_context pref_context);
++
++int scst_rx_mgmt_fn(struct scst_session *sess,
++ const struct scst_rx_mgmt_params *params);
++
++/*
++ * Creates new management command using tag and sends it for execution.
++ * Can be used for SCST_ABORT_TASK only.
++ * Must not be called in parallel with scst_unregister_session() for the
++ * same sess. Returns 0 for success, error code otherwise.
++ *
++ * Obsolete in favor of scst_rx_mgmt_fn()
++ */
++static inline int scst_rx_mgmt_fn_tag(struct scst_session *sess, int fn,
++ uint64_t tag, int atomic, void *tgt_priv)
++{
++ struct scst_rx_mgmt_params params;
++
++ BUG_ON(fn != SCST_ABORT_TASK);
++
++ memset(&params, 0, sizeof(params));
++ params.fn = fn;
++ params.tag = tag;
++ params.tag_set = 1;
++ params.atomic = atomic;
++ params.tgt_priv = tgt_priv;
++ return scst_rx_mgmt_fn(sess, &params);
++}
++
++/*
++ * Creates new management command using LUN and sends it for execution.
++ * Currently can be used for any fn, except SCST_ABORT_TASK.
++ * Must not be called in parallel with scst_unregister_session() for the
++ * same sess. Returns 0 for success, error code otherwise.
++ *
++ * Obsolete in favor of scst_rx_mgmt_fn()
++ */
++static inline int scst_rx_mgmt_fn_lun(struct scst_session *sess, int fn,
++ const uint8_t *lun, int lun_len, int atomic, void *tgt_priv)
++{
++ struct scst_rx_mgmt_params params;
++
++ BUG_ON(fn == SCST_ABORT_TASK);
++
++ memset(&params, 0, sizeof(params));
++ params.fn = fn;
++ params.lun = lun;
++ params.lun_len = lun_len;
++ params.lun_set = 1;
++ params.atomic = atomic;
++ params.tgt_priv = tgt_priv;
++ return scst_rx_mgmt_fn(sess, &params);
++}
++
++int scst_get_cdb_info(struct scst_cmd *cmd);
++
++int scst_set_cmd_error_status(struct scst_cmd *cmd, int status);
++int scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq);
++void scst_set_busy(struct scst_cmd *cmd);
++
++void scst_check_convert_sense(struct scst_cmd *cmd);
++
++void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq);
++
++void scst_capacity_data_changed(struct scst_device *dev);
++
++struct scst_cmd *scst_find_cmd_by_tag(struct scst_session *sess, uint64_t tag);
++struct scst_cmd *scst_find_cmd(struct scst_session *sess, void *data,
++ int (*cmp_fn) (struct scst_cmd *cmd,
++ void *data));
++
++enum dma_data_direction scst_to_dma_dir(int scst_dir);
++enum dma_data_direction scst_to_tgt_dma_dir(int scst_dir);
++
++/*
++ * Returns true, if cmd's CDB is fully locally handled by SCST and false
++ * otherwise. Dev handlers parse() and dev_done() not called for such commands.
++ */
++static inline bool scst_is_cmd_fully_local(struct scst_cmd *cmd)
++{
++ return (cmd->op_flags & SCST_FULLY_LOCAL_CMD) != 0;
++}
++
++/*
++ * Returns true, if cmd's CDB is locally handled by SCST and
++ * false otherwise.
++ */
++static inline bool scst_is_cmd_local(struct scst_cmd *cmd)
++{
++ return (cmd->op_flags & SCST_LOCAL_CMD) != 0;
++}
++
++/* Returns true, if cmd can deliver UA */
++static inline bool scst_is_ua_command(struct scst_cmd *cmd)
++{
++ return (cmd->op_flags & SCST_SKIP_UA) == 0;
++}
++
++int scst_register_virtual_device(struct scst_dev_type *dev_handler,
++ const char *dev_name);
++void scst_unregister_virtual_device(int id);
++
++/*
++ * Get/Set functions for tgt's sg_tablesize
++ */
++static inline int scst_tgt_get_sg_tablesize(struct scst_tgt *tgt)
++{
++ return tgt->sg_tablesize;
++}
++
++static inline void scst_tgt_set_sg_tablesize(struct scst_tgt *tgt, int val)
++{
++ tgt->sg_tablesize = val;
++}
++
++/*
++ * Get/Set functions for tgt's target private data
++ */
++static inline void *scst_tgt_get_tgt_priv(struct scst_tgt *tgt)
++{
++ return tgt->tgt_priv;
++}
++
++static inline void scst_tgt_set_tgt_priv(struct scst_tgt *tgt, void *val)
++{
++ tgt->tgt_priv = val;
++}
++
++void scst_update_hw_pending_start(struct scst_cmd *cmd);
++
++/*
++ * Get/Set functions for session's target private data
++ */
++static inline void *scst_sess_get_tgt_priv(struct scst_session *sess)
++{
++ return sess->tgt_priv;
++}
++
++static inline void scst_sess_set_tgt_priv(struct scst_session *sess,
++ void *val)
++{
++ sess->tgt_priv = val;
++}
++
++/**
++ * Returns TRUE if cmd is being executed in atomic context.
++ *
++ * Note: checkpatch will complain on the use of in_atomic() below. You can
++ * safely ignore this warning since in_atomic() is used here only for debugging
++ * purposes.
++ */
++static inline bool scst_cmd_atomic(struct scst_cmd *cmd)
++{
++ int res = cmd->atomic;
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (unlikely((in_atomic() || in_interrupt() || irqs_disabled()) &&
++ !res)) {
++ printk(KERN_ERR "ERROR: atomic context and non-atomic cmd\n");
++ dump_stack();
++ cmd->atomic = 1;
++ res = 1;
++ }
++#endif
++ return res;
++}
++
++/*
++ * Returns TRUE if cmd has been preliminary completed, i.e. completed or
++ * aborted.
++ */
++static inline bool scst_cmd_prelim_completed(struct scst_cmd *cmd)
++{
++ return cmd->completed || test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
++}
++
++static inline enum scst_exec_context __scst_estimate_context(bool atomic)
++{
++ if (in_irq())
++ return SCST_CONTEXT_TASKLET;
++/*
++ * We come here from many non reliable places, like the block layer, and don't
++ * have any reliable way to detect if we called under atomic context or not
++ * (in_atomic() isn't reliable), so let's be safe and disable this section
++ * for now to unconditionally return thread context.
++ */
++#if 0
++ else if (irqs_disabled())
++ return SCST_CONTEXT_THREAD;
++ else if (in_atomic())
++ return SCST_CONTEXT_DIRECT_ATOMIC;
++ else
++ return atomic ? SCST_CONTEXT_DIRECT :
++ SCST_CONTEXT_DIRECT_ATOMIC;
++#else
++ return SCST_CONTEXT_THREAD;
++#endif
++}
++
++static inline enum scst_exec_context scst_estimate_context(void)
++{
++ return __scst_estimate_context(false);
++}
++
++static inline enum scst_exec_context scst_estimate_context_atomic(void)
++{
++ return __scst_estimate_context(true);
++}
++
++/* Returns cmd's CDB */
++static inline const uint8_t *scst_cmd_get_cdb(struct scst_cmd *cmd)
++{
++ return cmd->cdb;
++}
++
++/* Returns cmd's CDB length */
++static inline unsigned int scst_cmd_get_cdb_len(struct scst_cmd *cmd)
++{
++ return cmd->cdb_len;
++}
++
++/* Returns cmd's extended CDB */
++static inline const uint8_t *scst_cmd_get_ext_cdb(struct scst_cmd *cmd)
++{
++ return cmd->ext_cdb;
++}
++
++/* Returns cmd's extended CDB length */
++static inline unsigned int scst_cmd_get_ext_cdb_len(struct scst_cmd *cmd)
++{
++ return cmd->ext_cdb_len;
++}
++
++/* Sets cmd's extended CDB and its length */
++static inline void scst_cmd_set_ext_cdb(struct scst_cmd *cmd,
++ uint8_t *ext_cdb, unsigned int ext_cdb_len)
++{
++ cmd->ext_cdb = ext_cdb;
++ cmd->ext_cdb_len = ext_cdb_len;
++}
++
++/* Returns cmd's session */
++static inline struct scst_session *scst_cmd_get_session(struct scst_cmd *cmd)
++{
++ return cmd->sess;
++}
++
++/* Returns cmd's response data length */
++static inline int scst_cmd_get_resp_data_len(struct scst_cmd *cmd)
++{
++ return cmd->resp_data_len;
++}
++
++/* Returns cmd's adjusted response data length */
++static inline int scst_cmd_get_adjusted_resp_data_len(struct scst_cmd *cmd)
++{
++ return cmd->adjusted_resp_data_len;
++}
++
++/* Returns if status should be sent for cmd */
++static inline int scst_cmd_get_is_send_status(struct scst_cmd *cmd)
++{
++ return cmd->is_send_status;
++}
++
++/*
++ * Returns pointer to cmd's SG data buffer.
++ *
++ * Usage of this function is not recommended, use scst_get_buf_*()
++ * family of functions instead.
++ */
++static inline struct scatterlist *scst_cmd_get_sg(struct scst_cmd *cmd)
++{
++ return cmd->sg;
++}
++
++/*
++ * Returns cmd's sg_cnt.
++ *
++ * Usage of this function is not recommended, use scst_get_buf_*()
++ * family of functions instead.
++ */
++static inline int scst_cmd_get_sg_cnt(struct scst_cmd *cmd)
++{
++ return cmd->sg_cnt;
++}
++
++/*
++ * Returns cmd's data buffer length.
++ *
++ * In case if you need to iterate over data in the buffer, usage of
++ * this function is not recommended, use scst_get_buf_*()
++ * family of functions instead.
++ */
++static inline unsigned int scst_cmd_get_bufflen(struct scst_cmd *cmd)
++{
++ return cmd->bufflen;
++}
++
++/*
++ * Returns pointer to cmd's bidirectional in (WRITE) SG data buffer.
++ *
++ * Usage of this function is not recommended, use scst_get_out_buf_*()
++ * family of functions instead.
++ */
++static inline struct scatterlist *scst_cmd_get_out_sg(struct scst_cmd *cmd)
++{
++ return cmd->out_sg;
++}
++
++/*
++ * Returns cmd's bidirectional in (WRITE) sg_cnt.
++ *
++ * Usage of this function is not recommended, use scst_get_out_buf_*()
++ * family of functions instead.
++ */
++static inline int scst_cmd_get_out_sg_cnt(struct scst_cmd *cmd)
++{
++ return cmd->out_sg_cnt;
++}
++
++void scst_restore_sg_buff(struct scst_cmd *cmd);
++
++/* Restores modified sg buffer in the original state, if necessary */
++static inline void scst_check_restore_sg_buff(struct scst_cmd *cmd)
++{
++ if (unlikely(cmd->sg_buff_modified))
++ scst_restore_sg_buff(cmd);
++}
++
++/*
++ * Returns cmd's bidirectional in (WRITE) data buffer length.
++ *
++ * In case if you need to iterate over data in the buffer, usage of
++ * this function is not recommended, use scst_get_out_buf_*()
++ * family of functions instead.
++ */
++static inline unsigned int scst_cmd_get_out_bufflen(struct scst_cmd *cmd)
++{
++ return cmd->out_bufflen;
++}
++
++/* Returns pointer to cmd's target's SG data buffer */
++static inline struct scatterlist *scst_cmd_get_tgt_sg(struct scst_cmd *cmd)
++{
++ return cmd->tgt_sg;
++}
++
++/* Returns cmd's target's sg_cnt */
++static inline int scst_cmd_get_tgt_sg_cnt(struct scst_cmd *cmd)
++{
++ return cmd->tgt_sg_cnt;
++}
++
++/* Sets cmd's target's SG data buffer */
++static inline void scst_cmd_set_tgt_sg(struct scst_cmd *cmd,
++ struct scatterlist *sg, int sg_cnt)
++{
++ cmd->tgt_sg = sg;
++ cmd->tgt_sg_cnt = sg_cnt;
++ cmd->tgt_data_buf_alloced = 1;
++}
++
++/* Returns pointer to cmd's target's OUT SG data buffer */
++static inline struct scatterlist *scst_cmd_get_out_tgt_sg(struct scst_cmd *cmd)
++{
++ return cmd->tgt_out_sg;
++}
++
++/* Returns cmd's target's OUT sg_cnt */
++static inline int scst_cmd_get_tgt_out_sg_cnt(struct scst_cmd *cmd)
++{
++ return cmd->tgt_out_sg_cnt;
++}
++
++/* Sets cmd's target's OUT SG data buffer */
++static inline void scst_cmd_set_tgt_out_sg(struct scst_cmd *cmd,
++ struct scatterlist *sg, int sg_cnt)
++{
++ WARN_ON(!cmd->tgt_data_buf_alloced);
++
++ cmd->tgt_out_sg = sg;
++ cmd->tgt_out_sg_cnt = sg_cnt;
++}
++
++/* Returns cmd's data direction */
++static inline scst_data_direction scst_cmd_get_data_direction(
++ struct scst_cmd *cmd)
++{
++ return cmd->data_direction;
++}
++
++/* Returns cmd's write len as well as write SG and sg_cnt */
++static inline int scst_cmd_get_write_fields(struct scst_cmd *cmd,
++ struct scatterlist **sg, int *sg_cnt)
++{
++ *sg = *cmd->write_sg;
++ *sg_cnt = *cmd->write_sg_cnt;
++ return cmd->write_len;
++}
++
++void scst_cmd_set_write_not_received_data_len(struct scst_cmd *cmd,
++ int not_received);
++
++bool __scst_get_resid(struct scst_cmd *cmd, int *resid, int *bidi_out_resid);
++
++/*
++ * Returns true if cmd has residual(s) and returns them in the corresponding
++ * parameters(s).
++ */
++static inline bool scst_get_resid(struct scst_cmd *cmd,
++ int *resid, int *bidi_out_resid)
++{
++ if (likely(!cmd->resid_possible))
++ return false;
++ return __scst_get_resid(cmd, resid, bidi_out_resid);
++}
++
++/* Returns cmd's status byte from host device */
++static inline uint8_t scst_cmd_get_status(struct scst_cmd *cmd)
++{
++ return cmd->status;
++}
++
++/* Returns cmd's status from host adapter itself */
++static inline uint8_t scst_cmd_get_msg_status(struct scst_cmd *cmd)
++{
++ return cmd->msg_status;
++}
++
++/* Returns cmd's status set by low-level driver to indicate its status */
++static inline uint8_t scst_cmd_get_host_status(struct scst_cmd *cmd)
++{
++ return cmd->host_status;
++}
++
++/* Returns cmd's status set by SCSI mid-level */
++static inline uint8_t scst_cmd_get_driver_status(struct scst_cmd *cmd)
++{
++ return cmd->driver_status;
++}
++
++/* Returns pointer to cmd's sense buffer */
++static inline uint8_t *scst_cmd_get_sense_buffer(struct scst_cmd *cmd)
++{
++ return cmd->sense;
++}
++
++/* Returns cmd's valid sense length */
++static inline int scst_cmd_get_sense_buffer_len(struct scst_cmd *cmd)
++{
++ return cmd->sense_valid_len;
++}
++
++/*
++ * Get/Set functions for cmd's queue_type
++ */
++static inline enum scst_cmd_queue_type scst_cmd_get_queue_type(
++ struct scst_cmd *cmd)
++{
++ return cmd->queue_type;
++}
++
++static inline void scst_cmd_set_queue_type(struct scst_cmd *cmd,
++ enum scst_cmd_queue_type queue_type)
++{
++ cmd->queue_type = queue_type;
++}
++
++/*
++ * Get/Set functions for cmd's target SN
++ */
++static inline uint64_t scst_cmd_get_tag(struct scst_cmd *cmd)
++{
++ return cmd->tag;
++}
++
++static inline void scst_cmd_set_tag(struct scst_cmd *cmd, uint64_t tag)
++{
++ cmd->tag = tag;
++}
++
++/*
++ * Get/Set functions for cmd's target private data.
++ * Variant with *_lock must be used if target driver uses
++ * scst_find_cmd() to avoid race with it, except inside scst_find_cmd()'s
++ * callback, where lock is already taken.
++ */
++static inline void *scst_cmd_get_tgt_priv(struct scst_cmd *cmd)
++{
++ return cmd->tgt_priv;
++}
++
++static inline void scst_cmd_set_tgt_priv(struct scst_cmd *cmd, void *val)
++{
++ cmd->tgt_priv = val;
++}
++
++/*
++ * Get/Set functions for tgt_need_alloc_data_buf flag
++ */
++static inline int scst_cmd_get_tgt_need_alloc_data_buf(struct scst_cmd *cmd)
++{
++ return cmd->tgt_need_alloc_data_buf;
++}
++
++static inline void scst_cmd_set_tgt_need_alloc_data_buf(struct scst_cmd *cmd)
++{
++ cmd->tgt_need_alloc_data_buf = 1;
++}
++
++/*
++ * Get/Set functions for tgt_data_buf_alloced flag
++ */
++static inline int scst_cmd_get_tgt_data_buff_alloced(struct scst_cmd *cmd)
++{
++ return cmd->tgt_data_buf_alloced;
++}
++
++static inline void scst_cmd_set_tgt_data_buff_alloced(struct scst_cmd *cmd)
++{
++ cmd->tgt_data_buf_alloced = 1;
++}
++
++/*
++ * Get/Set functions for dh_data_buf_alloced flag
++ */
++static inline int scst_cmd_get_dh_data_buff_alloced(struct scst_cmd *cmd)
++{
++ return cmd->dh_data_buf_alloced;
++}
++
++static inline void scst_cmd_set_dh_data_buff_alloced(struct scst_cmd *cmd)
++{
++ cmd->dh_data_buf_alloced = 1;
++}
++
++/*
++ * Get/Set functions for no_sgv flag
++ */
++static inline int scst_cmd_get_no_sgv(struct scst_cmd *cmd)
++{
++ return cmd->no_sgv;
++}
++
++static inline void scst_cmd_set_no_sgv(struct scst_cmd *cmd)
++{
++ cmd->no_sgv = 1;
++}
++
++/*
++ * Get/Set functions for tgt_sn
++ */
++static inline int scst_cmd_get_tgt_sn(struct scst_cmd *cmd)
++{
++ BUG_ON(!cmd->tgt_sn_set);
++ return cmd->tgt_sn;
++}
++
++static inline void scst_cmd_set_tgt_sn(struct scst_cmd *cmd, uint32_t tgt_sn)
++{
++ cmd->tgt_sn_set = 1;
++ cmd->tgt_sn = tgt_sn;
++}
++
++/*
++ * Returns 1 if the cmd was aborted, so its status is invalid and no
++ * reply shall be sent to the remote initiator. A target driver should
++ * only clear internal resources, associated with cmd.
++ */
++static inline int scst_cmd_aborted(struct scst_cmd *cmd)
++{
++ return test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags) &&
++ !test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
++}
++
++/* Returns sense data format for cmd's dev */
++static inline bool scst_get_cmd_dev_d_sense(struct scst_cmd *cmd)
++{
++ return (cmd->dev != NULL) ? cmd->dev->d_sense : 0;
++}
++
++/*
++ * Get/Set functions for expected data direction, transfer length
++ * and its validity flag
++ */
++static inline int scst_cmd_is_expected_set(struct scst_cmd *cmd)
++{
++ return cmd->expected_values_set;
++}
++
++static inline scst_data_direction scst_cmd_get_expected_data_direction(
++ struct scst_cmd *cmd)
++{
++ return cmd->expected_data_direction;
++}
++
++static inline int scst_cmd_get_expected_transfer_len(
++ struct scst_cmd *cmd)
++{
++ return cmd->expected_transfer_len;
++}
++
++static inline int scst_cmd_get_expected_out_transfer_len(
++ struct scst_cmd *cmd)
++{
++ return cmd->expected_out_transfer_len;
++}
++
++static inline void scst_cmd_set_expected(struct scst_cmd *cmd,
++ scst_data_direction expected_data_direction,
++ int expected_transfer_len)
++{
++ cmd->expected_data_direction = expected_data_direction;
++ cmd->expected_transfer_len = expected_transfer_len;
++ cmd->expected_values_set = 1;
++}
++
++static inline void scst_cmd_set_expected_out_transfer_len(struct scst_cmd *cmd,
++ int expected_out_transfer_len)
++{
++ WARN_ON(!cmd->expected_values_set);
++ cmd->expected_out_transfer_len = expected_out_transfer_len;
++}
++
++/*
++ * Get/clear functions for cmd's may_need_dma_sync
++ */
++static inline int scst_get_may_need_dma_sync(struct scst_cmd *cmd)
++{
++ return cmd->may_need_dma_sync;
++}
++
++static inline void scst_clear_may_need_dma_sync(struct scst_cmd *cmd)
++{
++ cmd->may_need_dma_sync = 0;
++}
++
++/*
++ * Get/set functions for cmd's delivery_status. It is one of
++ * SCST_CMD_DELIVERY_* constants. It specifies the status of the
++ * command's delivery to initiator.
++ */
++static inline int scst_get_delivery_status(struct scst_cmd *cmd)
++{
++ return cmd->delivery_status;
++}
++
++static inline void scst_set_delivery_status(struct scst_cmd *cmd,
++ int delivery_status)
++{
++ cmd->delivery_status = delivery_status;
++}
++
++static inline unsigned int scst_get_active_cmd_count(struct scst_cmd *cmd)
++{
++ if (likely(cmd->tgt_dev != NULL))
++ return atomic_read(&cmd->tgt_dev->tgt_dev_cmd_count);
++ else
++ return (unsigned int)-1;
++}
++
++/*
++ * Get/Set function for mgmt cmd's target private data
++ */
++static inline void *scst_mgmt_cmd_get_tgt_priv(struct scst_mgmt_cmd *mcmd)
++{
++ return mcmd->tgt_priv;
++}
++
++static inline void scst_mgmt_cmd_set_tgt_priv(struct scst_mgmt_cmd *mcmd,
++ void *val)
++{
++ mcmd->tgt_priv = val;
++}
++
++/* Returns mgmt cmd's completition status (SCST_MGMT_STATUS_* constants) */
++static inline int scst_mgmt_cmd_get_status(struct scst_mgmt_cmd *mcmd)
++{
++ return mcmd->status;
++}
++
++/* Returns mgmt cmd's TM fn */
++static inline int scst_mgmt_cmd_get_fn(struct scst_mgmt_cmd *mcmd)
++{
++ return mcmd->fn;
++}
++
++/*
++ * Called by dev handler's task_mgmt_fn() to notify SCST core that mcmd
++ * is going to complete asynchronously.
++ */
++void scst_prepare_async_mcmd(struct scst_mgmt_cmd *mcmd);
++
++/*
++ * Called by dev handler to notify SCST core that async. mcmd is completed
++ * with status "status".
++ */
++void scst_async_mcmd_completed(struct scst_mgmt_cmd *mcmd, int status);
++
++/* Returns AEN's fn */
++static inline int scst_aen_get_event_fn(struct scst_aen *aen)
++{
++ return aen->event_fn;
++}
++
++/* Returns AEN's session */
++static inline struct scst_session *scst_aen_get_sess(struct scst_aen *aen)
++{
++ return aen->sess;
++}
++
++/* Returns AEN's LUN */
++static inline __be64 scst_aen_get_lun(struct scst_aen *aen)
++{
++ return aen->lun;
++}
++
++/* Returns SCSI AEN's sense */
++static inline const uint8_t *scst_aen_get_sense(struct scst_aen *aen)
++{
++ return aen->aen_sense;
++}
++
++/* Returns SCSI AEN's sense length */
++static inline int scst_aen_get_sense_len(struct scst_aen *aen)
++{
++ return aen->aen_sense_len;
++}
++
++/*
++ * Get/set functions for AEN's delivery_status. It is one of
++ * SCST_AEN_RES_* constants. It specifies the status of the
++ * command's delivery to initiator.
++ */
++static inline int scst_get_aen_delivery_status(struct scst_aen *aen)
++{
++ return aen->delivery_status;
++}
++
++static inline void scst_set_aen_delivery_status(struct scst_aen *aen,
++ int status)
++{
++ aen->delivery_status = status;
++}
++
++void scst_aen_done(struct scst_aen *aen);
++
++static inline void sg_clear(struct scatterlist *sg)
++{
++ memset(sg, 0, sizeof(*sg));
++#ifdef CONFIG_DEBUG_SG
++ sg->sg_magic = SG_MAGIC;
++#endif
++}
++
++enum scst_sg_copy_dir {
++ SCST_SG_COPY_FROM_TARGET,
++ SCST_SG_COPY_TO_TARGET
++};
++
++void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir);
++
++/*
++ * Functions for access to the commands data (SG) buffer,
++ * including HIGHMEM environment. Should be used instead of direct
++ * access. Returns the mapped buffer length for success, 0 for EOD,
++ * negative error code otherwise.
++ *
++ * "Buf" argument returns the mapped buffer
++ *
++ * The "put" function unmaps the buffer.
++ */
++static inline int __scst_get_buf(struct scst_cmd *cmd, struct scatterlist *sg,
++ int sg_cnt, uint8_t **buf)
++{
++ int res = 0;
++ int i = cmd->get_sg_buf_entry_num;
++
++ *buf = NULL;
++
++ if ((i >= sg_cnt) || unlikely(sg == NULL))
++ goto out;
++
++ *buf = page_address(sg_page(&sg[i]));
++ *buf += sg[i].offset;
++
++ res = sg[i].length;
++ cmd->get_sg_buf_entry_num++;
++
++out:
++ return res;
++}
++
++static inline int scst_get_buf_first(struct scst_cmd *cmd, uint8_t **buf)
++{
++ cmd->get_sg_buf_entry_num = 0;
++ cmd->may_need_dma_sync = 1;
++ return __scst_get_buf(cmd, cmd->sg, cmd->sg_cnt, buf);
++}
++
++static inline int scst_get_buf_next(struct scst_cmd *cmd, uint8_t **buf)
++{
++ return __scst_get_buf(cmd, cmd->sg, cmd->sg_cnt, buf);
++}
++
++static inline void scst_put_buf(struct scst_cmd *cmd, void *buf)
++{
++ /* Nothing to do */
++}
++
++static inline int scst_get_out_buf_first(struct scst_cmd *cmd, uint8_t **buf)
++{
++ cmd->get_sg_buf_entry_num = 0;
++ cmd->may_need_dma_sync = 1;
++ return __scst_get_buf(cmd, cmd->out_sg, cmd->out_sg_cnt, buf);
++}
++
++static inline int scst_get_out_buf_next(struct scst_cmd *cmd, uint8_t **buf)
++{
++ return __scst_get_buf(cmd, cmd->out_sg, cmd->out_sg_cnt, buf);
++}
++
++static inline void scst_put_out_buf(struct scst_cmd *cmd, void *buf)
++{
++ /* Nothing to do */
++}
++
++static inline int scst_get_sg_buf_first(struct scst_cmd *cmd, uint8_t **buf,
++ struct scatterlist *sg, int sg_cnt)
++{
++ cmd->get_sg_buf_entry_num = 0;
++ cmd->may_need_dma_sync = 1;
++ return __scst_get_buf(cmd, sg, sg_cnt, buf);
++}
++
++static inline int scst_get_sg_buf_next(struct scst_cmd *cmd, uint8_t **buf,
++ struct scatterlist *sg, int sg_cnt)
++{
++ return __scst_get_buf(cmd, sg, sg_cnt, buf);
++}
++
++static inline void scst_put_sg_buf(struct scst_cmd *cmd, void *buf,
++ struct scatterlist *sg, int sg_cnt)
++{
++ /* Nothing to do */
++}
++
++/*
++ * Returns approximate higher rounded buffers count that
++ * scst_get_buf_[first|next]() return.
++ */
++static inline int scst_get_buf_count(struct scst_cmd *cmd)
++{
++ return (cmd->sg_cnt == 0) ? 1 : cmd->sg_cnt;
++}
++
++/*
++ * Returns approximate higher rounded buffers count that
++ * scst_get_out_buf_[first|next]() return.
++ */
++static inline int scst_get_out_buf_count(struct scst_cmd *cmd)
++{
++ return (cmd->out_sg_cnt == 0) ? 1 : cmd->out_sg_cnt;
++}
++
++int scst_suspend_activity(bool interruptible);
++void scst_resume_activity(void);
++
++void scst_process_active_cmd(struct scst_cmd *cmd, bool atomic);
++
++void scst_post_parse(struct scst_cmd *cmd);
++void scst_post_alloc_data_buf(struct scst_cmd *cmd);
++
++int scst_check_local_events(struct scst_cmd *cmd);
++
++int scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd);
++
++struct scst_trace_log {
++ unsigned int val;
++ const char *token;
++};
++
++extern struct mutex scst_mutex;
++
++const struct sysfs_ops *scst_sysfs_get_sysfs_ops(void);
++
++/*
++ * Returns target driver's root sysfs kobject.
++ * The driver can create own files/directories/links here.
++ */
++static inline struct kobject *scst_sysfs_get_tgtt_kobj(
++ struct scst_tgt_template *tgtt)
++{
++ return &tgtt->tgtt_kobj;
++}
++
++/*
++ * Returns target's root sysfs kobject.
++ * The driver can create own files/directories/links here.
++ */
++static inline struct kobject *scst_sysfs_get_tgt_kobj(
++ struct scst_tgt *tgt)
++{
++ return &tgt->tgt_kobj;
++}
++
++/*
++ * Returns device handler's root sysfs kobject.
++ * The driver can create own files/directories/links here.
++ */
++static inline struct kobject *scst_sysfs_get_devt_kobj(
++ struct scst_dev_type *devt)
++{
++ return &devt->devt_kobj;
++}
++
++/*
++ * Returns device's root sysfs kobject.
++ * The driver can create own files/directories/links here.
++ */
++static inline struct kobject *scst_sysfs_get_dev_kobj(
++ struct scst_device *dev)
++{
++ return &dev->dev_kobj;
++}
++
++/*
++ * Returns session's root sysfs kobject.
++ * The driver can create own files/directories/links here.
++ */
++static inline struct kobject *scst_sysfs_get_sess_kobj(
++ struct scst_session *sess)
++{
++ return &sess->sess_kobj;
++}
++
++/* Returns target name */
++static inline const char *scst_get_tgt_name(const struct scst_tgt *tgt)
++{
++ return tgt->tgt_name;
++}
++
++int scst_alloc_sense(struct scst_cmd *cmd, int atomic);
++int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
++ const uint8_t *sense, unsigned int len);
++
++int scst_set_sense(uint8_t *buffer, int len, bool d_sense,
++ int key, int asc, int ascq);
++
++bool scst_is_ua_sense(const uint8_t *sense, int len);
++
++bool scst_analyze_sense(const uint8_t *sense, int len,
++ unsigned int valid_mask, int key, int asc, int ascq);
++
++unsigned long scst_random(void);
++
++void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len);
++
++void scst_get(void);
++void scst_put(void);
++
++void scst_cmd_get(struct scst_cmd *cmd);
++void scst_cmd_put(struct scst_cmd *cmd);
++
++struct scatterlist *scst_alloc(int size, gfp_t gfp_mask, int *count);
++void scst_free(struct scatterlist *sg, int count);
++
++void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
++ struct scst_thr_data_hdr *data,
++ void (*free_fn) (struct scst_thr_data_hdr *data));
++void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev);
++void scst_dev_del_all_thr_data(struct scst_device *dev);
++struct scst_thr_data_hdr *__scst_find_thr_data(struct scst_tgt_dev *tgt_dev,
++ struct task_struct *tsk);
++
++/* Finds local to the current thread data. Returns NULL, if they not found. */
++static inline struct scst_thr_data_hdr *scst_find_thr_data(
++ struct scst_tgt_dev *tgt_dev)
++{
++ return __scst_find_thr_data(tgt_dev, current);
++}
++
++/* Increase ref counter for the thread data */
++static inline void scst_thr_data_get(struct scst_thr_data_hdr *data)
++{
++ atomic_inc(&data->ref);
++}
++
++/* Decrease ref counter for the thread data */
++static inline void scst_thr_data_put(struct scst_thr_data_hdr *data)
++{
++ if (atomic_dec_and_test(&data->ref))
++ data->free_fn(data);
++}
++
++int scst_calc_block_shift(int sector_size);
++int scst_sbc_generic_parse(struct scst_cmd *cmd,
++ int (*get_block_shift)(struct scst_cmd *cmd));
++int scst_cdrom_generic_parse(struct scst_cmd *cmd,
++ int (*get_block_shift)(struct scst_cmd *cmd));
++int scst_modisk_generic_parse(struct scst_cmd *cmd,
++ int (*get_block_shift)(struct scst_cmd *cmd));
++int scst_tape_generic_parse(struct scst_cmd *cmd,
++ int (*get_block_size)(struct scst_cmd *cmd));
++int scst_changer_generic_parse(struct scst_cmd *cmd,
++ int (*nothing)(struct scst_cmd *cmd));
++int scst_processor_generic_parse(struct scst_cmd *cmd,
++ int (*nothing)(struct scst_cmd *cmd));
++int scst_raid_generic_parse(struct scst_cmd *cmd,
++ int (*nothing)(struct scst_cmd *cmd));
++
++int scst_block_generic_dev_done(struct scst_cmd *cmd,
++ void (*set_block_shift)(struct scst_cmd *cmd, int block_shift));
++int scst_tape_generic_dev_done(struct scst_cmd *cmd,
++ void (*set_block_size)(struct scst_cmd *cmd, int block_size));
++
++int scst_obtain_device_parameters(struct scst_device *dev);
++
++void scst_reassign_persistent_sess_states(struct scst_session *new_sess,
++ struct scst_session *old_sess);
++
++int scst_get_max_lun_commands(struct scst_session *sess, uint64_t lun);
++
++/*
++ * Has to be put here open coded, because Linux doesn't have equivalent, which
++ * allows exclusive wake ups of threads in LIFO order. We need it to let (yet)
++ * unneeded threads sleep and not pollute CPU cache by their stacks.
++ */
++static inline void add_wait_queue_exclusive_head(wait_queue_head_t *q,
++ wait_queue_t *wait)
++{
++ unsigned long flags;
++
++ wait->flags |= WQ_FLAG_EXCLUSIVE;
++ spin_lock_irqsave(&q->lock, flags);
++ __add_wait_queue(q, wait);
++ spin_unlock_irqrestore(&q->lock, flags);
++}
++
++/*
++ * Structure to match events to user space and replies on them
++ */
++struct scst_sysfs_user_info {
++ /* Unique cookie to identify request */
++ uint32_t info_cookie;
++
++ /* Entry in the global list */
++ struct list_head info_list_entry;
++
++ /* Set if reply from the user space is being executed */
++ unsigned int info_being_executed:1;
++
++ /* Set if this info is in the info_list */
++ unsigned int info_in_list:1;
++
++ /* Completion to wait on for the request completion */
++ struct completion info_completion;
++
++ /* Request completion status and optional data */
++ int info_status;
++ void *data;
++};
++
++int scst_sysfs_user_add_info(struct scst_sysfs_user_info **out_info);
++void scst_sysfs_user_del_info(struct scst_sysfs_user_info *info);
++struct scst_sysfs_user_info *scst_sysfs_user_get_info(uint32_t cookie);
++int scst_wait_info_completion(struct scst_sysfs_user_info *info,
++ unsigned long timeout);
++
++unsigned int scst_get_setup_id(void);
++
++/*
++ * Needed to avoid potential circular locking dependency between scst_mutex
++ * and internal sysfs locking (s_active). It could be since most sysfs entries
++ * are created and deleted under scst_mutex AND scst_mutex is taken inside
++ * sysfs functions. So, we push from the sysfs functions all the processing
++ * taking scst_mutex. To avoid deadlock, we return from them with EAGAIN
++ * if processing is taking too long. User space then should poll
++ * last_sysfs_mgmt_res until it returns the result of the processing
++ * (something other than EAGAIN).
++ */
++struct scst_sysfs_work_item {
++ /*
++ * If true, then last_sysfs_mgmt_res will not be updated. This is
++ * needed to allow read only sysfs monitoring during management actions.
++ * All management actions are supposed to be externally serialized,
++ * so then last_sysfs_mgmt_res automatically serialized too.
++ * Othewrwise a monitoring action can overwrite value of simultaneous
++ * management action's last_sysfs_mgmt_res.
++ */
++ bool read_only_action;
++
++ struct list_head sysfs_work_list_entry;
++ struct kref sysfs_work_kref;
++ int (*sysfs_work_fn)(struct scst_sysfs_work_item *work);
++ struct completion sysfs_work_done;
++ char *buf;
++
++ union {
++ struct scst_dev_type *devt;
++ struct scst_tgt_template *tgtt;
++ struct {
++ struct scst_tgt *tgt;
++ struct scst_acg *acg;
++ union {
++ bool is_tgt_kobj;
++ int io_grouping_type;
++ bool enable;
++ };
++ };
++ struct {
++ struct scst_device *dev;
++ int new_threads_num;
++ enum scst_dev_type_threads_pool_type new_threads_pool_type;
++ };
++ struct scst_session *sess;
++ struct {
++ struct scst_tgt *tgt;
++ unsigned long l;
++ };
++ };
++ int work_res;
++ char *res_buf;
++};
++
++int scst_alloc_sysfs_work(int (*sysfs_work_fn)(struct scst_sysfs_work_item *),
++ bool read_only_action, struct scst_sysfs_work_item **res_work);
++int scst_sysfs_queue_wait_work(struct scst_sysfs_work_item *work);
++void scst_sysfs_work_get(struct scst_sysfs_work_item *work);
++void scst_sysfs_work_put(struct scst_sysfs_work_item *work);
++
++char *scst_get_next_lexem(char **token_str);
++void scst_restore_token_str(char *prev_lexem, char *token_str);
++char *scst_get_next_token_str(char **input_str);
++
++void scst_init_threads(struct scst_cmd_threads *cmd_threads);
++void scst_deinit_threads(struct scst_cmd_threads *cmd_threads);
++
++#endif /* __SCST_H */
+diff -upkr -X linux-2.6.36/Documentation/dontdiff linux-2.6.36/drivers/Kconfig linux-2.6.36/drivers/Kconfig
+--- orig/linux-2.6.36/drivers/Kconfig 01:51:29.000000000 +0400
++++ linux-2.6.36/drivers/Kconfig 14:14:46.000000000 +0400
+@@ -22,6 +22,8 @@ source "drivers/ide/Kconfig"
+
+ source "drivers/scsi/Kconfig"
+
++source "drivers/scst/Kconfig"
++
+ source "drivers/ata/Kconfig"
+
+ source "drivers/md/Kconfig"
+diff -upkr -X linux-2.6.36/Documentation/dontdiff linux-2.6.36/drivers/Makefile linux-2.6.36/drivers/Makefile
+--- orig/linux-2.6.36/drivers/Makefile 15:40:04.000000000 +0200
++++ linux-2.6.36/drivers/Makefile 15:40:20.000000000 +0200
+@@ -113,3 +113,4 @@ obj-$(CONFIG_VLYNQ) += vlynq/
+ obj-$(CONFIG_STAGING) += staging/
+ obj-y += platform/
+ obj-y += ieee802154/
++obj-$(CONFIG_SCST) += scst/
+diff -uprN orig/linux-2.6.36/drivers/scst/Kconfig linux-2.6.36/drivers/scst/Kconfig
+--- orig/linux-2.6.36/drivers/scst/Kconfig
++++ linux-2.6.36/drivers/scst/Kconfig
+@@ -0,0 +1,254 @@
++menu "SCSI target (SCST) support"
++
++config SCST
++ tristate "SCSI target (SCST) support"
++ depends on SCSI
++ help
++ SCSI target (SCST) is designed to provide unified, consistent
++ interface between SCSI target drivers and Linux kernel and
++ simplify target drivers development as much as possible. Visit
++ http://scst.sourceforge.net for more info about it.
++
++config SCST_DISK
++ tristate "SCSI target disk support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for disk device.
++
++config SCST_TAPE
++ tristate "SCSI target tape support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for tape device.
++
++config SCST_CDROM
++ tristate "SCSI target CDROM support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for CDROM device.
++
++config SCST_MODISK
++ tristate "SCSI target MO disk support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for MO disk device.
++
++config SCST_CHANGER
++ tristate "SCSI target changer support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for changer device.
++
++config SCST_PROCESSOR
++ tristate "SCSI target processor support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for processor device.
++
++config SCST_RAID
++ tristate "SCSI target storage array controller (RAID) support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for raid storage array controller (RAID) device.
++
++config SCST_VDISK
++ tristate "SCSI target virtual disk and/or CDROM support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST device handler for virtual disk and/or CDROM device.
++
++config SCST_USER
++ tristate "User-space SCSI target driver support"
++ default SCST
++ depends on SCSI && SCST && !HIGHMEM4G && !HIGHMEM64G
++ help
++ The SCST device handler scst_user allows to implement full-feature
++ SCSI target devices in user space.
++
++ If unsure, say "N".
++
++config SCST_STRICT_SERIALIZING
++ bool "Strict serialization"
++ depends on SCST
++ help
++ Enable strict SCSI command serialization. When enabled, SCST sends
++ all SCSI commands to the underlying SCSI device synchronously, one
++ after one. This makes task management more reliable, at the cost of
++ a performance penalty. This is most useful for stateful SCSI devices
++ like tapes, where the result of the execution of a command
++ depends on the device settings configured by previous commands. Disk
++ and RAID devices are stateless in most cases. The current SCSI core
++ in Linux doesn't allow to abort all commands reliably if they have
++ been sent asynchronously to a stateful device.
++ Enable this option if you use stateful device(s) and need as much
++ error recovery reliability as possible.
++
++ If unsure, say "N".
++
++config SCST_STRICT_SECURITY
++ bool "Strict security"
++ depends on SCST
++ help
++ Makes SCST clear (zero-fill) allocated data buffers. Note: this has a
++ significant performance penalty.
++
++ If unsure, say "N".
++
++config SCST_TEST_IO_IN_SIRQ
++ bool "Allow test I/O from soft-IRQ context"
++ depends on SCST
++ help
++ Allows SCST to submit selected SCSI commands (TUR and
++ READ/WRITE) from soft-IRQ context (tasklets). Enabling it will
++ decrease amount of context switches and slightly improve
++ performance. The goal of this option is to be able to measure
++ overhead of the context switches. See more info about it in
++ README.scst.
++
++ WARNING! Improperly used, this option can lead you to a kernel crash!
++
++ If unsure, say "N".
++
++config SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
++ bool "Send back UNKNOWN TASK when an already finished task is aborted"
++ depends on SCST
++ help
++ Controls which response is sent by SCST to the initiator in case
++ the initiator attempts to abort (ABORT TASK) an already finished
++ request. If this option is enabled, the response UNKNOWN TASK is
++ sent back to the initiator. However, some initiators, particularly
++ the VMware iSCSI initiator, interpret the UNKNOWN TASK response as
++ if the target got crazy and try to RESET it. Then sometimes the
++ initiator gets crazy itself.
++
++ If unsure, say "N".
++
++config SCST_USE_EXPECTED_VALUES
++ bool "Prefer initiator-supplied SCSI command attributes"
++ depends on SCST
++ help
++ When SCST receives a SCSI command from an initiator, such a SCSI
++ command has both data transfer length and direction attributes.
++ There are two possible sources for these attributes: either the
++ values computed by SCST from its internal command translation table
++ or the values supplied by the initiator. The former are used by
++ default because of security reasons. Invalid initiator-supplied
++ attributes can crash the target, especially in pass-through mode.
++ Only consider enabling this option when SCST logs the following
++ message: "Unknown opcode XX for YY. Should you update
++ scst_scsi_op_table?" and when the initiator complains. Please
++ report any unrecognized commands to scst-devel@lists.sourceforge.net.
++
++ If unsure, say "N".
++
++config SCST_EXTRACHECKS
++ bool "Extra consistency checks"
++ depends on SCST
++ help
++ Enable additional consistency checks in the SCSI middle level target
++ code. This may be helpful for SCST developers. Enable it if you have
++ any problems.
++
++ If unsure, say "N".
++
++config SCST_TRACING
++ bool "Tracing support"
++ depends on SCST
++ default y
++ help
++ Enable SCSI middle level tracing support. Tracing can be controlled
++ dynamically via sysfs interface. The traced information
++ is sent to the kernel log and may be very helpful when analyzing
++ the cause of a communication problem between initiator and target.
++
++ If unsure, say "Y".
++
++config SCST_DEBUG
++ bool "Debugging support"
++ depends on SCST
++ select DEBUG_BUGVERBOSE
++ help
++ Enables support for debugging SCST. This may be helpful for SCST
++ developers.
++
++ If unsure, say "N".
++
++config SCST_DEBUG_OOM
++ bool "Out-of-memory debugging support"
++ depends on SCST
++ help
++ Let SCST's internal memory allocation function
++ (scst_alloc_sg_entries()) fail about once in every 10000 calls, at
++ least if the flag __GFP_NOFAIL has not been set. This allows SCST
++ developers to test the behavior of SCST in out-of-memory conditions.
++ This may be helpful for SCST developers.
++
++ If unsure, say "N".
++
++config SCST_DEBUG_RETRY
++ bool "SCSI command retry debugging support"
++ depends on SCST
++ help
++ Let SCST's internal SCSI command transfer function
++ (scst_rdy_to_xfer()) fail about once in every 100 calls. This allows
++ SCST developers to test the behavior of SCST when SCSI queues fill
++ up. This may be helpful for SCST developers.
++
++ If unsure, say "N".
++
++config SCST_DEBUG_SN
++ bool "SCSI sequence number debugging support"
++ depends on SCST
++ help
++ Allows to test SCSI command ordering via sequence numbers by
++ randomly changing the type of SCSI commands into
++ SCST_CMD_QUEUE_ORDERED, SCST_CMD_QUEUE_HEAD_OF_QUEUE or
++ SCST_CMD_QUEUE_SIMPLE for about one in 300 SCSI commands.
++ This may be helpful for SCST developers.
++
++ If unsure, say "N".
++
++config SCST_DEBUG_TM
++ bool "Task management debugging support"
++ depends on SCST_DEBUG
++ help
++ Enables support for debugging of SCST's task management functions.
++ When enabled, some of the commands on LUN 0 in the default access
++ control group will be delayed for about 60 seconds. This will
++ cause the remote initiator send SCSI task management functions,
++ e.g. ABORT TASK and TARGET RESET.
++
++ If unsure, say "N".
++
++config SCST_TM_DBG_GO_OFFLINE
++ bool "Let devices become completely unresponsive"
++ depends on SCST_DEBUG_TM
++ help
++ Enable this option if you want that the device eventually becomes
++ completely unresponsive. When disabled, the device will receive
++ ABORT and RESET commands.
++
++config SCST_MEASURE_LATENCY
++ bool "Commands processing latency measurement facility"
++ depends on SCST
++ help
++ This option enables commands processing latency measurement
++ facility in SCST. It will provide in the sysfs interface
++ average commands processing latency statistics. You can clear
++ already measured results by writing 0 in the corresponding sysfs file.
++ Note, you need a non-preemtible kernel to have correct results.
++
++ If unsure, say "N".
++
++source "drivers/scst/iscsi-scst/Kconfig"
++source "drivers/scst/srpt/Kconfig"
++
++endmenu
+diff -uprN orig/linux-2.6.36/drivers/scst/Makefile linux-2.6.36/drivers/scst/Makefile
+--- orig/linux-2.6.36/drivers/scst/Makefile
++++ linux-2.6.36/drivers/scst/Makefile
+@@ -0,0 +1,12 @@
++ccflags-y += -Wno-unused-parameter
++
++scst-y += scst_main.o
++scst-y += scst_pres.o
++scst-y += scst_targ.o
++scst-y += scst_lib.o
++scst-y += scst_sysfs.o
++scst-y += scst_mem.o
++scst-y += scst_debug.o
++
++obj-$(CONFIG_SCST) += scst.o dev_handlers/ iscsi-scst/ qla2xxx-target/ \
++ srpt/ scst_local/
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/scst_lib.c
+--- orig/linux-2.6.36/drivers/scst/scst_lib.c
++++ linux-2.6.36/drivers/scst/scst_lib.c
+@@ -0,0 +1,7362 @@
++/*
++ * scst_lib.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/kthread.h>
++#include <linux/cdrom.h>
++#include <linux/unistd.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/delay.h>
++#include <linux/vmalloc.h>
++#include <asm/kmap_types.h>
++#include <asm/unaligned.h>
++
++#include <scst/scst.h>
++#include "scst_priv.h"
++#include "scst_mem.h"
++#include "scst_pres.h"
++
++struct scsi_io_context {
++ unsigned int full_cdb_used:1;
++ void *data;
++ void (*done)(void *data, char *sense, int result, int resid);
++ char sense[SCST_SENSE_BUFFERSIZE];
++ unsigned char full_cdb[0];
++};
++static struct kmem_cache *scsi_io_context_cache;
++
++/* get_trans_len_x extract x bytes from cdb as length starting from off */
++static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off);
++
++static int get_bidi_trans_len_2(struct scst_cmd *cmd, uint8_t off);
++
++/* for special commands */
++static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_prevent_allow_medium_removal(struct scst_cmd *cmd,
++ uint8_t off);
++static int get_trans_len_3_read_elem_stat(struct scst_cmd *cmd, uint8_t off);
++static int get_trans_len_start_stop(struct scst_cmd *cmd, uint8_t off);
++
++/*
+++=====================================-============-======-
++| Command name | Operation | Type |
++| | code | |
++|-------------------------------------+------------+------+
++
+++=========================================================+
++|Key: M = command implementation is mandatory. |
++| O = command implementation is optional. |
++| V = Vendor-specific |
++| R = Reserved |
++| ' '= DON'T use for this device |
+++=========================================================+
++*/
++
++#define SCST_CDB_MANDATORY 'M' /* mandatory */
++#define SCST_CDB_OPTIONAL 'O' /* optional */
++#define SCST_CDB_VENDOR 'V' /* vendor */
++#define SCST_CDB_RESERVED 'R' /* reserved */
++#define SCST_CDB_NOTSUPP ' ' /* don't use */
++
++struct scst_sdbops {
++ uint8_t ops; /* SCSI-2 op codes */
++ uint8_t devkey[16]; /* Key for every device type M,O,V,R
++ * type_disk devkey[0]
++ * type_tape devkey[1]
++ * type_printer devkey[2]
++ * type_proseccor devkey[3]
++ * type_worm devkey[4]
++ * type_cdrom devkey[5]
++ * type_scanner devkey[6]
++ * type_mod devkey[7]
++ * type_changer devkey[8]
++ * type_commdev devkey[9]
++ * type_reserv devkey[A]
++ * type_reserv devkey[B]
++ * type_raid devkey[C]
++ * type_enclosure devkey[D]
++ * type_reserv devkey[E]
++ * type_reserv devkey[F]
++ */
++ const char *op_name; /* SCSI-2 op codes full name */
++ uint8_t direction; /* init --> target: SCST_DATA_WRITE
++ * target --> init: SCST_DATA_READ
++ */
++ uint16_t flags; /* opcode -- various flags */
++ uint8_t off; /* length offset in cdb */
++ int (*get_trans_len)(struct scst_cmd *cmd, uint8_t off);
++};
++
++static int scst_scsi_op_list[256];
++
++#define FLAG_NONE 0
++
++static const struct scst_sdbops scst_scsi_op_table[] = {
++ /*
++ * +-------------------> TYPE_IS_DISK (0)
++ * |
++ * |+------------------> TYPE_IS_TAPE (1)
++ * ||
++ * || +----------------> TYPE_IS_PROCESSOR (3)
++ * || |
++ * || | +--------------> TYPE_IS_CDROM (5)
++ * || | |
++ * || | | +------------> TYPE_IS_MOD (7)
++ * || | | |
++ * || | | |+-----------> TYPE_IS_CHANGER (8)
++ * || | | ||
++ * || | | || +-------> TYPE_IS_RAID (C)
++ * || | | || |
++ * || | | || |
++ * 0123456789ABCDEF ---> TYPE_IS_???? */
++
++ /* 6-bytes length CDB */
++ {0x00, "MMMMMMMMMMMMMMMM", "TEST UNIT READY",
++ /* let's be HQ to don't look dead under high load */
++ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_IMPLICIT_HQ|
++ SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED|
++#endif
++ SCST_EXCL_ACCESS_ALLOWED,
++ 0, get_trans_len_none},
++ {0x01, " M ", "REWIND",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT|SCST_WRITE_EXCL_ALLOWED,
++ 0, get_trans_len_none},
++ {0x01, "O V OO OO ", "REZERO UNIT",
++ SCST_DATA_NONE, SCST_WRITE_EXCL_ALLOWED,
++ 0, get_trans_len_none},
++ {0x02, "VVVVVV V ", "REQUEST BLOCK ADDR",
++ SCST_DATA_NONE, SCST_SMALL_TIMEOUT, 0, get_trans_len_none},
++ {0x03, "MMMMMMMMMMMMMMMM", "REQUEST SENSE",
++ SCST_DATA_READ, SCST_SMALL_TIMEOUT|SCST_SKIP_UA|SCST_LOCAL_CMD|
++ SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|
++ SCST_EXCL_ACCESS_ALLOWED,
++ 4, get_trans_len_1},
++ {0x04, "M O O ", "FORMAT UNIT",
++ SCST_DATA_WRITE, SCST_LONG_TIMEOUT|SCST_UNKNOWN_LENGTH|SCST_WRITE_MEDIUM,
++ 0, get_trans_len_none},
++ {0x04, " O ", "FORMAT",
++ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
++ {0x05, "VMVVVV V ", "READ BLOCK LIMITS",
++ SCST_DATA_READ, SCST_SMALL_TIMEOUT|
++ SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|
++ SCST_EXCL_ACCESS_ALLOWED,
++ 0, get_trans_len_block_limit},
++ {0x07, " O ", "INITIALIZE ELEMENT STATUS",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
++ {0x07, "OVV O OV ", "REASSIGN BLOCKS",
++ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
++ {0x08, "O ", "READ(6)",
++ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED|
++#endif
++ SCST_WRITE_EXCL_ALLOWED,
++ 4, get_trans_len_1_256},
++ {0x08, " MV OO OV ", "READ(6)",
++ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
++ SCST_WRITE_EXCL_ALLOWED,
++ 2, get_trans_len_3},
++ {0x08, " M ", "GET MESSAGE(6)",
++ SCST_DATA_READ, FLAG_NONE, 2, get_trans_len_3},
++ {0x08, " O ", "RECEIVE",
++ SCST_DATA_READ, FLAG_NONE, 2, get_trans_len_3},
++ {0x0A, "O ", "WRITE(6)",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED|
++#endif
++ SCST_WRITE_MEDIUM,
++ 4, get_trans_len_1_256},
++ {0x0A, " M O OV ", "WRITE(6)",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
++ 2, get_trans_len_3},
++ {0x0A, " M ", "PRINT",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x0A, " M ", "SEND MESSAGE(6)",
++ SCST_DATA_WRITE, FLAG_NONE, 2, get_trans_len_3},
++ {0x0A, " M ", "SEND(6)",
++ SCST_DATA_WRITE, FLAG_NONE, 2, get_trans_len_3},
++ {0x0B, "O OO OV ", "SEEK(6)",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x0B, " ", "TRACK SELECT",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x0B, " O ", "SLEW AND PRINT",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x0C, "VVVVVV V ", "SEEK BLOCK",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
++ {0x0D, "VVVVVV V ", "PARTITION",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT|SCST_WRITE_MEDIUM,
++ 0, get_trans_len_none},
++ {0x0F, "VOVVVV V ", "READ REVERSE",
++ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
++ SCST_WRITE_EXCL_ALLOWED,
++ 2, get_trans_len_3},
++ {0x10, "VM V V ", "WRITE FILEMARKS",
++ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
++ {0x10, " O O ", "SYNCHRONIZE BUFFER",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x11, "VMVVVV ", "SPACE",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT|
++ SCST_WRITE_EXCL_ALLOWED,
++ 0, get_trans_len_none},
++ {0x12, "MMMMMMMMMMMMMMMM", "INQUIRY",
++ SCST_DATA_READ, SCST_SMALL_TIMEOUT|SCST_IMPLICIT_HQ|SCST_SKIP_UA|
++ SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
++ 4, get_trans_len_1},
++ {0x13, "VOVVVV ", "VERIFY(6)",
++ SCST_DATA_NONE, SCST_TRANSFER_LEN_TYPE_FIXED|
++ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED,
++ 2, get_trans_len_3},
++ {0x14, "VOOVVV ", "RECOVER BUFFERED DATA",
++ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
++ SCST_WRITE_EXCL_ALLOWED,
++ 2, get_trans_len_3},
++ {0x15, "OMOOOOOOOOOOOOOO", "MODE SELECT(6)",
++ SCST_DATA_WRITE, SCST_IMPLICIT_ORDERED, 4, get_trans_len_1},
++ {0x16, "MMMMMMMMMMMMMMMM", "RESERVE",
++ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|
++ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
++ 0, get_trans_len_none},
++ {0x17, "MMMMMMMMMMMMMMMM", "RELEASE",
++ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|
++ SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
++ 0, get_trans_len_none},
++ {0x18, "OOOOOOOO ", "COPY",
++ SCST_DATA_WRITE, SCST_LONG_TIMEOUT, 2, get_trans_len_3},
++ {0x19, "VMVVVV ", "ERASE",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT|SCST_WRITE_MEDIUM,
++ 0, get_trans_len_none},
++ {0x1A, "OMOOOOOOOOOOOOOO", "MODE SENSE(6)",
++ SCST_DATA_READ, SCST_SMALL_TIMEOUT, 4, get_trans_len_1},
++ {0x1B, " O ", "SCAN",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x1B, " O ", "LOAD UNLOAD",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
++ {0x1B, " O ", "STOP PRINT",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x1B, "O OO O O ", "START STOP UNIT",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_start_stop},
++ {0x1C, "OOOOOOOOOOOOOOOO", "RECEIVE DIAGNOSTIC RESULTS",
++ SCST_DATA_READ, FLAG_NONE, 3, get_trans_len_2},
++ {0x1D, "MMMMMMMMMMMMMMMM", "SEND DIAGNOSTIC",
++ SCST_DATA_WRITE, FLAG_NONE, 4, get_trans_len_1},
++ {0x1E, "OOOOOOOOOOOOOOOO", "PREVENT ALLOW MEDIUM REMOVAL",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0,
++ get_trans_len_prevent_allow_medium_removal},
++ {0x1F, " O ", "PORT STATUS",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++
++ /* 10-bytes length CDB */
++ {0x23, "V VV V ", "READ FORMAT CAPACITY",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x24, "V VVM ", "SET WINDOW",
++ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_3},
++ {0x25, "M MM M ", "READ CAPACITY",
++ SCST_DATA_READ, SCST_IMPLICIT_HQ|
++ SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|
++ SCST_EXCL_ACCESS_ALLOWED,
++ 0, get_trans_len_read_capacity},
++ {0x25, " O ", "GET WINDOW",
++ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_3},
++ {0x28, "M MMMM ", "READ(10)",
++ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED|
++#endif
++ SCST_WRITE_EXCL_ALLOWED,
++ 7, get_trans_len_2},
++ {0x28, " O ", "GET MESSAGE(10)",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x29, "V VV O ", "READ GENERATION",
++ SCST_DATA_READ, FLAG_NONE, 8, get_trans_len_1},
++ {0x2A, "O MO M ", "WRITE(10)",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED|
++#endif
++ SCST_WRITE_MEDIUM,
++ 7, get_trans_len_2},
++ {0x2A, " O ", "SEND MESSAGE(10)",
++ SCST_DATA_WRITE, FLAG_NONE, 7, get_trans_len_2},
++ {0x2A, " O ", "SEND(10)",
++ SCST_DATA_WRITE, FLAG_NONE, 7, get_trans_len_2},
++ {0x2B, " O ", "LOCATE",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT|
++ SCST_WRITE_EXCL_ALLOWED,
++ 0, get_trans_len_none},
++ {0x2B, " O ", "POSITION TO ELEMENT",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
++ {0x2B, "O OO O ", "SEEK(10)",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x2C, "V O O ", "ERASE(10)",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT|SCST_WRITE_MEDIUM,
++ 0, get_trans_len_none},
++ {0x2D, "V O O ", "READ UPDATED BLOCK",
++ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED, 0, get_trans_len_single},
++ {0x2E, "O OO O ", "WRITE AND VERIFY(10)",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
++ 7, get_trans_len_2},
++ {0x2F, "O OO O ", "VERIFY(10)",
++ SCST_DATA_NONE, SCST_TRANSFER_LEN_TYPE_FIXED|
++ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED,
++ 7, get_trans_len_2},
++ {0x33, "O OO O ", "SET LIMITS(10)",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x34, " O ", "READ POSITION",
++ SCST_DATA_READ, SCST_SMALL_TIMEOUT|
++ SCST_WRITE_EXCL_ALLOWED,
++ 7, get_trans_len_read_pos},
++ {0x34, " O ", "GET DATA BUFFER STATUS",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x34, "O OO O ", "PRE-FETCH",
++ SCST_DATA_NONE, SCST_WRITE_EXCL_ALLOWED,
++ 0, get_trans_len_none},
++ {0x35, "O OO O ", "SYNCHRONIZE CACHE",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x36, "O OO O ", "LOCK UNLOCK CACHE",
++ SCST_DATA_NONE, SCST_WRITE_EXCL_ALLOWED,
++ 0, get_trans_len_none},
++ {0x37, "O O ", "READ DEFECT DATA(10)",
++ SCST_DATA_READ, SCST_WRITE_EXCL_ALLOWED,
++ 8, get_trans_len_1},
++ {0x37, " O ", "INIT ELEMENT STATUS WRANGE",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
++ {0x38, " O O ", "MEDIUM SCAN",
++ SCST_DATA_READ, FLAG_NONE, 8, get_trans_len_1},
++ {0x39, "OOOOOOOO ", "COMPARE",
++ SCST_DATA_WRITE, FLAG_NONE, 3, get_trans_len_3},
++ {0x3A, "OOOOOOOO ", "COPY AND VERIFY",
++ SCST_DATA_WRITE, FLAG_NONE, 3, get_trans_len_3},
++ {0x3B, "OOOOOOOOOOOOOOOO", "WRITE BUFFER",
++ SCST_DATA_WRITE, SCST_SMALL_TIMEOUT, 6, get_trans_len_3},
++ {0x3C, "OOOOOOOOOOOOOOOO", "READ BUFFER",
++ SCST_DATA_READ, SCST_SMALL_TIMEOUT, 6, get_trans_len_3},
++ {0x3D, " O O ", "UPDATE BLOCK",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED,
++ 0, get_trans_len_single},
++ {0x3E, "O OO O ", "READ LONG",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x3F, "O O O ", "WRITE LONG",
++ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 7, get_trans_len_2},
++ {0x40, "OOOOOOOOOO ", "CHANGE DEFINITION",
++ SCST_DATA_WRITE, SCST_SMALL_TIMEOUT, 8, get_trans_len_1},
++ {0x41, "O O ", "WRITE SAME",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
++ 0, get_trans_len_single},
++ {0x42, " O ", "READ SUB-CHANNEL",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x43, " O ", "READ TOC/PMA/ATIP",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x44, " M ", "REPORT DENSITY SUPPORT",
++ SCST_DATA_READ, SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|
++ SCST_EXCL_ACCESS_ALLOWED,
++ 7, get_trans_len_2},
++ {0x44, " O ", "READ HEADER",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x45, " O ", "PLAY AUDIO(10)",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x46, " O ", "GET CONFIGURATION",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x47, " O ", "PLAY AUDIO MSF",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x48, " O ", "PLAY AUDIO TRACK INDEX",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x49, " O ", "PLAY TRACK RELATIVE(10)",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x4A, " O ", "GET EVENT STATUS NOTIFICATION",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x4B, " O ", "PAUSE/RESUME",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x4C, "OOOOOOOOOOOOOOOO", "LOG SELECT",
++ SCST_DATA_WRITE, SCST_IMPLICIT_ORDERED, 7, get_trans_len_2},
++ {0x4D, "OOOOOOOOOOOOOOOO", "LOG SENSE",
++ SCST_DATA_READ, SCST_SMALL_TIMEOUT|
++ SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|
++ SCST_EXCL_ACCESS_ALLOWED,
++ 7, get_trans_len_2},
++ {0x4E, " O ", "STOP PLAY/SCAN",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x50, " ", "XDWRITE",
++ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
++ {0x51, " O ", "READ DISC INFORMATION",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x51, " ", "XPWRITE",
++ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
++ {0x52, " O ", "READ TRACK INFORMATION",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x53, "O ", "XDWRITEREAD(10)",
++ SCST_DATA_READ|SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|
++ SCST_WRITE_MEDIUM,
++ 7, get_bidi_trans_len_2},
++ {0x53, " O ", "RESERVE TRACK",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x54, " O ", "SEND OPC INFORMATION",
++ SCST_DATA_WRITE, FLAG_NONE, 7, get_trans_len_2},
++ {0x55, "OOOOOOOOOOOOOOOO", "MODE SELECT(10)",
++ SCST_DATA_WRITE, SCST_IMPLICIT_ORDERED, 7, get_trans_len_2},
++ {0x56, "OOOOOOOOOOOOOOOO", "RESERVE(10)",
++ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD,
++ 0, get_trans_len_none},
++ {0x57, "OOOOOOOOOOOOOOOO", "RELEASE(10)",
++ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|
++ SCST_REG_RESERVE_ALLOWED,
++ 0, get_trans_len_none},
++ {0x58, " O ", "REPAIR TRACK",
++ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
++ {0x5A, "OOOOOOOOOOOOOOOO", "MODE SENSE(10)",
++ SCST_DATA_READ, SCST_SMALL_TIMEOUT, 7, get_trans_len_2},
++ {0x5B, " O ", "CLOSE TRACK/SESSION",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x5C, " O ", "READ BUFFER CAPACITY",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x5D, " O ", "SEND CUE SHEET",
++ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_3},
++ {0x5E, "OOOOO OOOO ", "PERSISTENT RESERV IN",
++ SCST_DATA_READ, SCST_SMALL_TIMEOUT|
++ SCST_LOCAL_CMD|
++ SCST_WRITE_EXCL_ALLOWED|
++ SCST_EXCL_ACCESS_ALLOWED,
++ 5, get_trans_len_4},
++ {0x5F, "OOOOO OOOO ", "PERSISTENT RESERV OUT",
++ SCST_DATA_WRITE, SCST_SMALL_TIMEOUT|
++ SCST_LOCAL_CMD|
++ SCST_WRITE_EXCL_ALLOWED|
++ SCST_EXCL_ACCESS_ALLOWED,
++ 5, get_trans_len_4},
++
++ /* 16-bytes length CDB */
++ {0x80, "O OO O ", "XDWRITE EXTENDED",
++ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
++ {0x80, " M ", "WRITE FILEMARKS",
++ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
++ {0x81, "O OO O ", "REBUILD",
++ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 10, get_trans_len_4},
++ {0x82, "O OO O ", "REGENERATE",
++ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 10, get_trans_len_4},
++ {0x83, "OOOOOOOOOOOOOOOO", "EXTENDED COPY",
++ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 10, get_trans_len_4},
++ {0x84, "OOOOOOOOOOOOOOOO", "RECEIVE COPY RESULT",
++ SCST_DATA_WRITE, FLAG_NONE, 10, get_trans_len_4},
++ {0x86, "OOOOOOOOOO ", "ACCESS CONTROL IN",
++ SCST_DATA_NONE, SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|
++ SCST_EXCL_ACCESS_ALLOWED,
++ 0, get_trans_len_none},
++ {0x87, "OOOOOOOOOO ", "ACCESS CONTROL OUT",
++ SCST_DATA_NONE, SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|
++ SCST_EXCL_ACCESS_ALLOWED,
++ 0, get_trans_len_none},
++ {0x88, "M MMMM ", "READ(16)",
++ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED|
++#endif
++ SCST_WRITE_EXCL_ALLOWED,
++ 10, get_trans_len_4},
++ {0x8A, "O OO O ", "WRITE(16)",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED|
++#endif
++ SCST_WRITE_MEDIUM,
++ 10, get_trans_len_4},
++ {0x8C, "OOOOOOOOOO ", "READ ATTRIBUTE",
++ SCST_DATA_READ, FLAG_NONE, 10, get_trans_len_4},
++ {0x8D, "OOOOOOOOOO ", "WRITE ATTRIBUTE",
++ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 10, get_trans_len_4},
++ {0x8E, "O OO O ", "WRITE AND VERIFY(16)",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
++ 10, get_trans_len_4},
++ {0x8F, "O OO O ", "VERIFY(16)",
++ SCST_DATA_NONE, SCST_TRANSFER_LEN_TYPE_FIXED|
++ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED,
++ 10, get_trans_len_4},
++ {0x90, "O OO O ", "PRE-FETCH(16)",
++ SCST_DATA_NONE, SCST_WRITE_EXCL_ALLOWED,
++ 0, get_trans_len_none},
++ {0x91, "O OO O ", "SYNCHRONIZE CACHE(16)",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x91, " M ", "SPACE(16)",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT|
++ SCST_WRITE_EXCL_ALLOWED,
++ 0, get_trans_len_none},
++ {0x92, "O OO O ", "LOCK UNLOCK CACHE(16)",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0x92, " O ", "LOCATE(16)",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT|
++ SCST_WRITE_EXCL_ALLOWED,
++ 0, get_trans_len_none},
++ {0x93, "O O ", "WRITE SAME(16)",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
++ 10, get_trans_len_4},
++ {0x93, " M ", "ERASE(16)",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT|SCST_WRITE_MEDIUM,
++ 0, get_trans_len_none},
++ {0x9E, "O ", "SERVICE ACTION IN",
++ SCST_DATA_READ, FLAG_NONE, 0, get_trans_len_serv_act_in},
++
++ /* 12-bytes length CDB */
++ {0xA0, "VVVVVVVVVV M ", "REPORT LUNS",
++ SCST_DATA_READ, SCST_SMALL_TIMEOUT|SCST_IMPLICIT_HQ|SCST_SKIP_UA|
++ SCST_FULLY_LOCAL_CMD|SCST_LOCAL_CMD|
++ SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
++ 6, get_trans_len_4},
++ {0xA1, " O ", "BLANK",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
++ {0xA3, " O ", "SEND KEY",
++ SCST_DATA_WRITE, FLAG_NONE, 8, get_trans_len_2},
++ {0xA3, "OOOOO OOOO ", "REPORT DEVICE IDENTIDIER",
++ SCST_DATA_READ, SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
++ 6, get_trans_len_4},
++ {0xA3, " M ", "MAINTENANCE(IN)",
++ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_4},
++ {0xA4, " O ", "REPORT KEY",
++ SCST_DATA_READ, FLAG_NONE, 8, get_trans_len_2},
++ {0xA4, " O ", "MAINTENANCE(OUT)",
++ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_4},
++ {0xA5, " M ", "MOVE MEDIUM",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
++ {0xA5, " O ", "PLAY AUDIO(12)",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0xA6, " O O ", "EXCHANGE/LOAD/UNLOAD MEDIUM",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
++ {0xA7, " O ", "SET READ AHEAD",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0xA8, " O ", "GET MESSAGE(12)",
++ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_4},
++ {0xA8, "O OO O ", "READ(12)",
++ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED|
++#endif
++ SCST_WRITE_EXCL_ALLOWED,
++ 6, get_trans_len_4},
++ {0xA9, " O ", "PLAY TRACK RELATIVE(12)",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0xAA, "O OO O ", "WRITE(12)",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED|
++#endif
++ SCST_WRITE_MEDIUM,
++ 6, get_trans_len_4},
++ {0xAA, " O ", "SEND MESSAGE(12)",
++ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_4},
++ {0xAC, " O ", "ERASE(12)",
++ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
++ {0xAC, " M ", "GET PERFORMANCE",
++ SCST_DATA_READ, SCST_UNKNOWN_LENGTH, 0, get_trans_len_none},
++ {0xAD, " O ", "READ DVD STRUCTURE",
++ SCST_DATA_READ, FLAG_NONE, 8, get_trans_len_2},
++ {0xAE, "O OO O ", "WRITE AND VERIFY(12)",
++ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
++ 6, get_trans_len_4},
++ {0xAF, "O OO O ", "VERIFY(12)",
++ SCST_DATA_NONE, SCST_TRANSFER_LEN_TYPE_FIXED|
++ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED,
++ 6, get_trans_len_4},
++#if 0 /* No need to support at all */
++ {0xB0, " OO O ", "SEARCH DATA HIGH(12)",
++ SCST_DATA_WRITE, FLAG_NONE, 9, get_trans_len_1},
++ {0xB1, " OO O ", "SEARCH DATA EQUAL(12)",
++ SCST_DATA_WRITE, FLAG_NONE, 9, get_trans_len_1},
++ {0xB2, " OO O ", "SEARCH DATA LOW(12)",
++ SCST_DATA_WRITE, FLAG_NONE, 9, get_trans_len_1},
++#endif
++ {0xB3, " OO O ", "SET LIMITS(12)",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0xB5, " O ", "REQUEST VOLUME ELEMENT ADDRESS",
++ SCST_DATA_READ, FLAG_NONE, 9, get_trans_len_1},
++ {0xB6, " O ", "SEND VOLUME TAG",
++ SCST_DATA_WRITE, FLAG_NONE, 9, get_trans_len_1},
++ {0xB6, " M ", "SET STREAMING",
++ SCST_DATA_WRITE, FLAG_NONE, 9, get_trans_len_2},
++ {0xB7, " O ", "READ DEFECT DATA(12)",
++ SCST_DATA_READ, SCST_WRITE_EXCL_ALLOWED,
++ 9, get_trans_len_1},
++ {0xB8, " O ", "READ ELEMENT STATUS",
++ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_3_read_elem_stat},
++ {0xB9, " O ", "READ CD MSF",
++ SCST_DATA_READ, SCST_UNKNOWN_LENGTH, 0, get_trans_len_none},
++ {0xBA, " O ", "SCAN",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
++ {0xBA, " O ", "REDUNDANCY GROUP(IN)",
++ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_4},
++ {0xBB, " O ", "SET SPEED",
++ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
++ {0xBB, " O ", "REDUNDANCY GROUP(OUT)",
++ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_4},
++ {0xBC, " O ", "SPARE(IN)",
++ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_4},
++ {0xBD, " O ", "MECHANISM STATUS",
++ SCST_DATA_READ, FLAG_NONE, 8, get_trans_len_2},
++ {0xBD, " O ", "SPARE(OUT)",
++ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_4},
++ {0xBE, " O ", "READ CD",
++ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED, 6, get_trans_len_3},
++ {0xBE, " O ", "VOLUME SET(IN)",
++ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_4},
++ {0xBF, " O ", "SEND DVD STRUCTUE",
++ SCST_DATA_WRITE, FLAG_NONE, 8, get_trans_len_2},
++ {0xBF, " O ", "VOLUME SET(OUT)",
++ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_4},
++ {0xE7, " V ", "INIT ELEMENT STATUS WRANGE",
++ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_cdb_len_10}
++};
++
++#define SCST_CDB_TBL_SIZE ((int)ARRAY_SIZE(scst_scsi_op_table))
++
++static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
++static void scst_check_internal_sense(struct scst_device *dev, int result,
++ uint8_t *sense, int sense_len);
++static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
++ int flags);
++static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
++ const uint8_t *sense, int sense_len, int flags);
++static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
++ const uint8_t *sense, int sense_len, int flags);
++static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
++static void scst_release_space(struct scst_cmd *cmd);
++static void scst_unblock_cmds(struct scst_device *dev);
++static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
++static int scst_alloc_add_tgt_dev(struct scst_session *sess,
++ struct scst_acg_dev *acg_dev, struct scst_tgt_dev **out_tgt_dev);
++static void scst_tgt_retry_timer_fn(unsigned long arg);
++
++#ifdef CONFIG_SCST_DEBUG_TM
++static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev);
++static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
++#else
++static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
++static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
++#endif /* CONFIG_SCST_DEBUG_TM */
++
++/**
++ * scst_alloc_sense() - allocate sense buffer for command
++ *
++ * Allocates, if necessary, sense buffer for command. Returns 0 on success
++ * and error code othrwise. Parameter "atomic" should be non-0 if the
++ * function called in atomic context.
++ */
++int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
++{
++ int res = 0;
++ gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
++
++ TRACE_ENTRY();
++
++ if (cmd->sense != NULL)
++ goto memzero;
++
++ cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
++ if (cmd->sense == NULL) {
++ PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
++ "The sense data will be lost!!", cmd->cdb[0]);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ cmd->sense_buflen = SCST_SENSE_BUFFERSIZE;
++
++memzero:
++ cmd->sense_valid_len = 0;
++ memset(cmd->sense, 0, cmd->sense_buflen);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL(scst_alloc_sense);
++
++/**
++ * scst_alloc_set_sense() - allocate and fill sense buffer for command
++ *
++ * Allocates, if necessary, sense buffer for command and copies in
++ * it data from the supplied sense buffer. Returns 0 on success
++ * and error code othrwise.
++ */
++int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
++ const uint8_t *sense, unsigned int len)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ /*
++ * We don't check here if the existing sense is valid or not, because
++ * we suppose the caller did it based on cmd->status.
++ */
++
++ res = scst_alloc_sense(cmd, atomic);
++ if (res != 0) {
++ PRINT_BUFFER("Lost sense", sense, len);
++ goto out;
++ }
++
++ cmd->sense_valid_len = len;
++ if (cmd->sense_buflen < len) {
++ PRINT_WARNING("Sense truncated (needed %d), shall you increase "
++ "SCST_SENSE_BUFFERSIZE? Op: %x", len, cmd->cdb[0]);
++ cmd->sense_valid_len = cmd->sense_buflen;
++ }
++
++ memcpy(cmd->sense, sense, cmd->sense_valid_len);
++ TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_valid_len);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL(scst_alloc_set_sense);
++
++/**
++ * scst_set_cmd_error_status() - set error SCSI status
++ * @cmd: SCST command
++ * @status: SCSI status to set
++ *
++ * Description:
++ * Sets error SCSI status in the command and prepares it for returning it.
++ * Returns 0 on success, error code otherwise.
++ */
++int scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (cmd->status != 0) {
++ TRACE_MGMT_DBG("cmd %p already has status %x set", cmd,
++ cmd->status);
++ res = -EEXIST;
++ goto out;
++ }
++
++ cmd->status = status;
++ cmd->host_status = DID_OK;
++
++ cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
++ cmd->dbl_ua_orig_data_direction = cmd->data_direction;
++
++ cmd->data_direction = SCST_DATA_NONE;
++ cmd->resp_data_len = 0;
++ cmd->resid_possible = 1;
++ cmd->is_send_status = 1;
++
++ cmd->completed = 1;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL(scst_set_cmd_error_status);
++
++static int scst_set_lun_not_supported_request_sense(struct scst_cmd *cmd,
++ int key, int asc, int ascq)
++{
++ int res;
++ int sense_len, len;
++ struct scatterlist *sg;
++
++ TRACE_ENTRY();
++
++ if (cmd->status != 0) {
++ TRACE_MGMT_DBG("cmd %p already has status %x set", cmd,
++ cmd->status);
++ res = -EEXIST;
++ goto out;
++ }
++
++ if ((cmd->sg != NULL) && SCST_SENSE_VALID(sg_virt(cmd->sg))) {
++ TRACE_MGMT_DBG("cmd %p already has sense set", cmd);
++ res = -EEXIST;
++ goto out;
++ }
++
++ if (cmd->sg == NULL) {
++ /*
++ * If target driver preparing data buffer using alloc_data_buf()
++ * callback, it is responsible to copy the sense to its buffer
++ * in xmit_response().
++ */
++ if (cmd->tgt_data_buf_alloced && (cmd->tgt_sg != NULL)) {
++ cmd->sg = cmd->tgt_sg;
++ cmd->sg_cnt = cmd->tgt_sg_cnt;
++ TRACE_MEM("Tgt sg used for sense for cmd %p", cmd);
++ goto go;
++ }
++
++ if (cmd->bufflen == 0)
++ cmd->bufflen = cmd->cdb[4];
++
++ cmd->sg = scst_alloc(cmd->bufflen, GFP_ATOMIC, &cmd->sg_cnt);
++ if (cmd->sg == NULL) {
++ PRINT_ERROR("Unable to alloc sg for REQUEST SENSE"
++ "(sense %x/%x/%x)", key, asc, ascq);
++ res = 1;
++ goto out;
++ }
++
++ TRACE_MEM("sg %p alloced for sense for cmd %p (cnt %d, "
++ "len %d)", cmd->sg, cmd, cmd->sg_cnt, cmd->bufflen);
++ }
++
++go:
++ sg = cmd->sg;
++ len = sg->length;
++
++ TRACE_MEM("sg %p (len %d) for sense for cmd %p", sg, len, cmd);
++
++ sense_len = scst_set_sense(sg_virt(sg), len, cmd->cdb[1] & 1,
++ key, asc, ascq);
++
++ TRACE_BUFFER("Sense set", sg_virt(sg), sense_len);
++
++ cmd->data_direction = SCST_DATA_READ;
++ scst_set_resp_data_len(cmd, sense_len);
++
++ res = 0;
++ cmd->completed = 1;
++ cmd->resid_possible = 1;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_set_lun_not_supported_inquiry(struct scst_cmd *cmd)
++{
++ int res;
++ uint8_t *buf;
++ struct scatterlist *sg;
++ int len;
++
++ TRACE_ENTRY();
++
++ if (cmd->status != 0) {
++ TRACE_MGMT_DBG("cmd %p already has status %x set", cmd,
++ cmd->status);
++ res = -EEXIST;
++ goto out;
++ }
++
++ if (cmd->sg == NULL) {
++ /*
++ * If target driver preparing data buffer using alloc_data_buf()
++ * callback, it is responsible to copy the sense to its buffer
++ * in xmit_response().
++ */
++ if (cmd->tgt_data_buf_alloced && (cmd->tgt_sg != NULL)) {
++ cmd->sg = cmd->tgt_sg;
++ cmd->sg_cnt = cmd->tgt_sg_cnt;
++ TRACE_MEM("Tgt used for INQUIRY for not supported "
++ "LUN for cmd %p", cmd);
++ goto go;
++ }
++
++ if (cmd->bufflen == 0)
++ cmd->bufflen = min_t(int, 36, (cmd->cdb[3] << 8) | cmd->cdb[4]);
++
++ cmd->sg = scst_alloc(cmd->bufflen, GFP_ATOMIC, &cmd->sg_cnt);
++ if (cmd->sg == NULL) {
++ PRINT_ERROR("%s", "Unable to alloc sg for INQUIRY "
++ "for not supported LUN");
++ res = 1;
++ goto out;
++ }
++
++ TRACE_MEM("sg %p alloced for INQUIRY for not supported LUN for "
++ "cmd %p (cnt %d, len %d)", cmd->sg, cmd, cmd->sg_cnt,
++ cmd->bufflen);
++ }
++
++go:
++ sg = cmd->sg;
++ len = sg->length;
++
++ TRACE_MEM("sg %p (len %d) for INQUIRY for cmd %p", sg, len, cmd);
++
++ buf = sg_virt(sg);
++ len = min_t(int, 36, len);
++
++ memset(buf, 0, len);
++ buf[0] = 0x7F; /* Peripheral qualifier 011b, Peripheral device type 1Fh */
++ buf[4] = len - 4;
++
++ TRACE_BUFFER("INQUIRY for not supported LUN set", buf, len);
++
++ cmd->data_direction = SCST_DATA_READ;
++ scst_set_resp_data_len(cmd, len);
++
++ res = 0;
++ cmd->completed = 1;
++ cmd->resid_possible = 1;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_set_cmd_error() - set error in the command and fill the sense buffer.
++ *
++ * Sets error in the command and fill the sense buffer. Returns 0 on success,
++ * error code otherwise.
++ */
++int scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ /*
++ * We need for LOGICAL UNIT NOT SUPPORTED special handling for
++ * REQUEST SENSE and INQUIRY.
++ */
++ if ((key == ILLEGAL_REQUEST) && (asc == 0x25) && (ascq == 0)) {
++ if (cmd->cdb[0] == REQUEST_SENSE)
++ res = scst_set_lun_not_supported_request_sense(cmd,
++ key, asc, ascq);
++ else if (cmd->cdb[0] == INQUIRY)
++ res = scst_set_lun_not_supported_inquiry(cmd);
++ else
++ goto do_sense;
++
++ if (res > 0)
++ goto do_sense;
++ else
++ goto out;
++ }
++
++do_sense:
++ res = scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
++ if (res != 0)
++ goto out;
++
++ res = scst_alloc_sense(cmd, 1);
++ if (res != 0) {
++ PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
++ key, asc, ascq);
++ goto out;
++ }
++
++ cmd->sense_valid_len = scst_set_sense(cmd->sense, cmd->sense_buflen,
++ scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
++ TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_valid_len);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL(scst_set_cmd_error);
++
++/**
++ * scst_set_sense() - set sense from KEY/ASC/ASCQ numbers
++ *
++ * Sets the corresponding fields in the sense buffer taking sense type
++ * into account. Returns resulting sense length.
++ */
++int scst_set_sense(uint8_t *buffer, int len, bool d_sense,
++ int key, int asc, int ascq)
++{
++ int res;
++
++ BUG_ON(len == 0);
++
++ memset(buffer, 0, len);
++
++ if (d_sense) {
++ /* Descriptor format */
++ if (len < 8) {
++ PRINT_ERROR("Length %d of sense buffer too small to "
++ "fit sense %x:%x:%x", len, key, asc, ascq);
++ }
++
++ buffer[0] = 0x72; /* Response Code */
++ if (len > 1)
++ buffer[1] = key; /* Sense Key */
++ if (len > 2)
++ buffer[2] = asc; /* ASC */
++ if (len > 3)
++ buffer[3] = ascq; /* ASCQ */
++ res = 8;
++ } else {
++ /* Fixed format */
++ if (len < 18) {
++ PRINT_ERROR("Length %d of sense buffer too small to "
++ "fit sense %x:%x:%x", len, key, asc, ascq);
++ }
++
++ buffer[0] = 0x70; /* Response Code */
++ if (len > 2)
++ buffer[2] = key; /* Sense Key */
++ if (len > 7)
++ buffer[7] = 0x0a; /* Additional Sense Length */
++ if (len > 12)
++ buffer[12] = asc; /* ASC */
++ if (len > 13)
++ buffer[13] = ascq; /* ASCQ */
++ res = 18;
++ }
++
++ TRACE_BUFFER("Sense set", buffer, res);
++ return res;
++}
++EXPORT_SYMBOL(scst_set_sense);
++
++/**
++ * scst_analyze_sense() - analyze sense
++ *
++ * Returns true if sense matches to (key, asc, ascq) and false otherwise.
++ * Valid_mask is one or several SCST_SENSE_*_VALID constants setting valid
++ * (key, asc, ascq) values.
++ */
++bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
++ int key, int asc, int ascq)
++{
++ bool res = false;
++
++ /* Response Code */
++ if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
++ /* Fixed format */
++
++ /* Sense Key */
++ if (valid_mask & SCST_SENSE_KEY_VALID) {
++ if (len < 3)
++ goto out;
++ if (sense[2] != key)
++ goto out;
++ }
++
++ /* ASC */
++ if (valid_mask & SCST_SENSE_ASC_VALID) {
++ if (len < 13)
++ goto out;
++ if (sense[12] != asc)
++ goto out;
++ }
++
++ /* ASCQ */
++ if (valid_mask & SCST_SENSE_ASCQ_VALID) {
++ if (len < 14)
++ goto out;
++ if (sense[13] != ascq)
++ goto out;
++ }
++ } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
++ /* Descriptor format */
++
++ /* Sense Key */
++ if (valid_mask & SCST_SENSE_KEY_VALID) {
++ if (len < 2)
++ goto out;
++ if (sense[1] != key)
++ goto out;
++ }
++
++ /* ASC */
++ if (valid_mask & SCST_SENSE_ASC_VALID) {
++ if (len < 3)
++ goto out;
++ if (sense[2] != asc)
++ goto out;
++ }
++
++ /* ASCQ */
++ if (valid_mask & SCST_SENSE_ASCQ_VALID) {
++ if (len < 4)
++ goto out;
++ if (sense[3] != ascq)
++ goto out;
++ }
++ } else
++ goto out;
++
++ res = true;
++
++out:
++ TRACE_EXIT_RES((int)res);
++ return res;
++}
++EXPORT_SYMBOL(scst_analyze_sense);
++
++/**
++ * scst_is_ua_sense() - determine if the sense is UA sense
++ *
++ * Returns true if the sense is valid and carrying a Unit
++ * Attention or false otherwise.
++ */
++bool scst_is_ua_sense(const uint8_t *sense, int len)
++{
++ if (SCST_SENSE_VALID(sense))
++ return scst_analyze_sense(sense, len,
++ SCST_SENSE_KEY_VALID, UNIT_ATTENTION, 0, 0);
++ else
++ return false;
++}
++EXPORT_SYMBOL(scst_is_ua_sense);
++
++bool scst_is_ua_global(const uint8_t *sense, int len)
++{
++ bool res;
++
++ /* Changing it don't forget to change scst_requeue_ua() as well!! */
++
++ if (scst_analyze_sense(sense, len, SCST_SENSE_ALL_VALID,
++ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed)))
++ res = true;
++ else
++ res = false;
++
++ return res;
++}
++
++/**
++ * scst_check_convert_sense() - check sense type and convert it if needed
++ *
++ * Checks if sense in the sense buffer, if any, is in the correct format.
++ * If not, converts it in the correct format.
++ */
++void scst_check_convert_sense(struct scst_cmd *cmd)
++{
++ bool d_sense;
++
++ TRACE_ENTRY();
++
++ if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
++ goto out;
++
++ d_sense = scst_get_cmd_dev_d_sense(cmd);
++ if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
++ TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
++ cmd);
++ if ((cmd->sense_valid_len < 18)) {
++ PRINT_ERROR("Sense too small to convert (%d, "
++ "type: fixed)", cmd->sense_buflen);
++ goto out;
++ }
++ cmd->sense_valid_len = scst_set_sense(cmd->sense, cmd->sense_buflen,
++ d_sense, cmd->sense[2], cmd->sense[12], cmd->sense[13]);
++ } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
++ (cmd->sense[0] == 0x73))) {
++ TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
++ cmd);
++ if ((cmd->sense_buflen < 18) || (cmd->sense_valid_len < 8)) {
++ PRINT_ERROR("Sense too small to convert (%d, "
++ "type: descryptor, valid %d)",
++ cmd->sense_buflen, cmd->sense_valid_len);
++ goto out;
++ }
++ cmd->sense_valid_len = scst_set_sense(cmd->sense,
++ cmd->sense_buflen, d_sense,
++ cmd->sense[1], cmd->sense[2], cmd->sense[3]);
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_check_convert_sense);
++
++static int scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
++ unsigned int len)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ res = scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
++ if (res != 0)
++ goto out;
++
++ res = scst_alloc_set_sense(cmd, 1, sense, len);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_set_busy() - set BUSY or TASK QUEUE FULL status
++ *
++ * Sets BUSY or TASK QUEUE FULL status depending on if this session has other
++ * outstanding commands or not.
++ */
++void scst_set_busy(struct scst_cmd *cmd)
++{
++ int c = atomic_read(&cmd->sess->sess_cmd_count);
++
++ TRACE_ENTRY();
++
++ if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
++ scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
++ TRACE(TRACE_FLOW_CONTROL, "Sending BUSY status to initiator %s "
++ "(cmds count %d, queue_type %x, sess->init_phase %d)",
++ cmd->sess->initiator_name, c,
++ cmd->queue_type, cmd->sess->init_phase);
++ } else {
++ scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
++ TRACE(TRACE_FLOW_CONTROL, "Sending QUEUE_FULL status to "
++ "initiator %s (cmds count %d, queue_type %x, "
++ "sess->init_phase %d)", cmd->sess->initiator_name, c,
++ cmd->queue_type, cmd->sess->init_phase);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_set_busy);
++
++/**
++ * scst_set_initial_UA() - set initial Unit Attention
++ *
++ * Sets initial Unit Attention on all devices of the session,
++ * replacing default scst_sense_reset_UA
++ */
++void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
++{
++ int i;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
++ asc, ascq);
++
++ /* Protect sess_tgt_dev_list_hash */
++ mutex_lock(&scst_mutex);
++
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[i];
++ struct scst_tgt_dev *tgt_dev;
++
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ spin_lock_bh(&tgt_dev->tgt_dev_lock);
++ if (!list_empty(&tgt_dev->UA_list)) {
++ struct scst_tgt_dev_UA *ua;
++
++ ua = list_entry(tgt_dev->UA_list.next,
++ typeof(*ua), UA_list_entry);
++ if (scst_analyze_sense(ua->UA_sense_buffer,
++ ua->UA_valid_sense_len,
++ SCST_SENSE_ALL_VALID,
++ SCST_LOAD_SENSE(scst_sense_reset_UA))) {
++ ua->UA_valid_sense_len = scst_set_sense(
++ ua->UA_sense_buffer,
++ sizeof(ua->UA_sense_buffer),
++ tgt_dev->dev->d_sense,
++ key, asc, ascq);
++ } else
++ PRINT_ERROR("%s",
++ "The first UA isn't RESET UA");
++ } else
++ PRINT_ERROR("%s", "There's no RESET UA to "
++ "replace");
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++ }
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_set_initial_UA);
++
++static struct scst_aen *scst_alloc_aen(struct scst_session *sess,
++ uint64_t unpacked_lun)
++{
++ struct scst_aen *aen;
++
++ TRACE_ENTRY();
++
++ aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
++ if (aen == NULL) {
++ PRINT_ERROR("AEN memory allocation failed. Corresponding "
++ "event notification will not be performed (initiator "
++ "%s)", sess->initiator_name);
++ goto out;
++ }
++ memset(aen, 0, sizeof(*aen));
++
++ aen->sess = sess;
++ scst_sess_get(sess);
++
++ aen->lun = scst_pack_lun(unpacked_lun, sess->acg->addr_method);
++
++out:
++ TRACE_EXIT_HRES((unsigned long)aen);
++ return aen;
++};
++
++static void scst_free_aen(struct scst_aen *aen)
++{
++ TRACE_ENTRY();
++
++ scst_sess_put(aen->sess);
++ mempool_free(aen, scst_aen_mempool);
++
++ TRACE_EXIT();
++ return;
++};
++
++/* Must be called under scst_mutex */
++void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
++ int key, int asc, int ascq)
++{
++ struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
++ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
++ int sl;
++
++ TRACE_ENTRY();
++
++ if ((tgt_dev->sess->init_phase != SCST_SESS_IPH_READY) ||
++ (tgt_dev->sess->shut_phase != SCST_SESS_SPH_READY))
++ goto out;
++
++ if (tgtt->report_aen != NULL) {
++ struct scst_aen *aen;
++ int rc;
++
++ aen = scst_alloc_aen(tgt_dev->sess, tgt_dev->lun);
++ if (aen == NULL)
++ goto queue_ua;
++
++ aen->event_fn = SCST_AEN_SCSI;
++ aen->aen_sense_len = scst_set_sense(aen->aen_sense,
++ sizeof(aen->aen_sense), tgt_dev->dev->d_sense,
++ key, asc, ascq);
++
++ TRACE_DBG("Calling target's %s report_aen(%p)",
++ tgtt->name, aen);
++ rc = tgtt->report_aen(aen);
++ TRACE_DBG("Target's %s report_aen(%p) returned %d",
++ tgtt->name, aen, rc);
++ if (rc == SCST_AEN_RES_SUCCESS)
++ goto out;
++
++ scst_free_aen(aen);
++ }
++
++queue_ua:
++ TRACE_MGMT_DBG("AEN not supported, queuing plain UA (tgt_dev %p)",
++ tgt_dev);
++ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
++ tgt_dev->dev->d_sense, key, asc, ascq);
++ scst_check_set_UA(tgt_dev, sense_buffer, sl, 0);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_capacity_data_changed() - notify SCST about device capacity change
++ *
++ * Notifies SCST core that dev has changed its capacity. Called under no locks.
++ */
++void scst_capacity_data_changed(struct scst_device *dev)
++{
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ if (dev->type != TYPE_DISK) {
++ TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
++ "CHANGED UA", dev->type);
++ goto out;
++ }
++
++ TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ scst_gen_aen_or_ua(tgt_dev,
++ SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_capacity_data_changed);
++
++static inline bool scst_is_report_luns_changed_type(int type)
++{
++ switch (type) {
++ case TYPE_DISK:
++ case TYPE_TAPE:
++ case TYPE_PRINTER:
++ case TYPE_PROCESSOR:
++ case TYPE_WORM:
++ case TYPE_ROM:
++ case TYPE_SCANNER:
++ case TYPE_MOD:
++ case TYPE_MEDIUM_CHANGER:
++ case TYPE_RAID:
++ case TYPE_ENCLOSURE:
++ return true;
++ default:
++ return false;
++ }
++}
++
++/* scst_mutex supposed to be held */
++static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
++ int flags)
++{
++ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
++ struct list_head *shead;
++ struct scst_tgt_dev *tgt_dev;
++ int i;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
++ "(sess %p)", sess);
++
++ local_bh_disable();
++
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ shead = &sess->sess_tgt_dev_list_hash[i];
++
++ list_for_each_entry(tgt_dev, shead,
++ sess_tgt_dev_list_entry) {
++ /* Lockdep triggers here a false positive.. */
++ spin_lock(&tgt_dev->tgt_dev_lock);
++ }
++ }
++
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ shead = &sess->sess_tgt_dev_list_hash[i];
++
++ list_for_each_entry(tgt_dev, shead,
++ sess_tgt_dev_list_entry) {
++ int sl;
++
++ if (!scst_is_report_luns_changed_type(
++ tgt_dev->dev->type))
++ continue;
++
++ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
++ tgt_dev->dev->d_sense,
++ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
++
++ __scst_check_set_UA(tgt_dev, sense_buffer,
++ sl, flags | SCST_SET_UA_FLAG_GLOBAL);
++ }
++ }
++
++ for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
++ shead = &sess->sess_tgt_dev_list_hash[i];
++
++ list_for_each_entry_reverse(tgt_dev,
++ shead, sess_tgt_dev_list_entry) {
++ spin_unlock(&tgt_dev->tgt_dev_lock);
++ }
++ }
++
++ local_bh_enable();
++
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++static void scst_report_luns_changed_sess(struct scst_session *sess)
++{
++ int i;
++ struct scst_tgt_template *tgtt = sess->tgt->tgtt;
++ int d_sense = 0;
++ uint64_t lun = 0;
++
++ TRACE_ENTRY();
++
++ if ((sess->init_phase != SCST_SESS_IPH_READY) ||
++ (sess->shut_phase != SCST_SESS_SPH_READY))
++ goto out;
++
++ TRACE_DBG("REPORTED LUNS DATA CHANGED (sess %p)", sess);
++
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *shead;
++ struct scst_tgt_dev *tgt_dev;
++
++ shead = &sess->sess_tgt_dev_list_hash[i];
++
++ list_for_each_entry(tgt_dev, shead,
++ sess_tgt_dev_list_entry) {
++ if (scst_is_report_luns_changed_type(
++ tgt_dev->dev->type)) {
++ lun = tgt_dev->lun;
++ d_sense = tgt_dev->dev->d_sense;
++ goto found;
++ }
++ }
++ }
++
++found:
++ if (tgtt->report_aen != NULL) {
++ struct scst_aen *aen;
++ int rc;
++
++ aen = scst_alloc_aen(sess, lun);
++ if (aen == NULL)
++ goto queue_ua;
++
++ aen->event_fn = SCST_AEN_SCSI;
++ aen->aen_sense_len = scst_set_sense(aen->aen_sense,
++ sizeof(aen->aen_sense), d_sense,
++ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
++
++ TRACE_DBG("Calling target's %s report_aen(%p)",
++ tgtt->name, aen);
++ rc = tgtt->report_aen(aen);
++ TRACE_DBG("Target's %s report_aen(%p) returned %d",
++ tgtt->name, aen, rc);
++ if (rc == SCST_AEN_RES_SUCCESS)
++ goto out;
++
++ scst_free_aen(aen);
++ }
++
++queue_ua:
++ scst_queue_report_luns_changed_UA(sess, 0);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++void scst_report_luns_changed(struct scst_acg *acg)
++{
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
++
++ list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
++ scst_report_luns_changed_sess(sess);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_aen_done() - AEN processing done
++ *
++ * Notifies SCST that the driver has sent the AEN and it
++ * can be freed now. Don't forget to set the delivery status, if it
++ * isn't success, using scst_set_aen_delivery_status() before calling
++ * this function.
++ */
++void scst_aen_done(struct scst_aen *aen)
++{
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
++ aen->event_fn, aen->sess->initiator_name);
++
++ if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
++ goto out_free;
++
++ if (aen->event_fn != SCST_AEN_SCSI)
++ goto out_free;
++
++ TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
++ aen->sess->initiator_name);
++
++ if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
++ SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
++ scst_sense_reported_luns_data_changed))) {
++ mutex_lock(&scst_mutex);
++ scst_queue_report_luns_changed_UA(aen->sess,
++ SCST_SET_UA_FLAG_AT_HEAD);
++ mutex_unlock(&scst_mutex);
++ } else {
++ struct list_head *shead;
++ struct scst_tgt_dev *tgt_dev;
++ uint64_t lun;
++
++ lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
++
++ mutex_lock(&scst_mutex);
++
++ /* tgt_dev might get dead, so we need to reseek it */
++ shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
++ list_for_each_entry(tgt_dev, shead,
++ sess_tgt_dev_list_entry) {
++ if (tgt_dev->lun == lun) {
++ TRACE_MGMT_DBG("Requeuing failed AEN UA for "
++ "tgt_dev %p", tgt_dev);
++ scst_check_set_UA(tgt_dev, aen->aen_sense,
++ aen->aen_sense_len,
++ SCST_SET_UA_FLAG_AT_HEAD);
++ break;
++ }
++ }
++
++ mutex_unlock(&scst_mutex);
++ }
++
++out_free:
++ scst_free_aen(aen);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_aen_done);
++
++void scst_requeue_ua(struct scst_cmd *cmd)
++{
++ TRACE_ENTRY();
++
++ if (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ALL_VALID,
++ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
++ TRACE_MGMT_DBG("Requeuing REPORTED LUNS DATA CHANGED UA "
++ "for delivery failed cmd %p", cmd);
++ mutex_lock(&scst_mutex);
++ scst_queue_report_luns_changed_UA(cmd->sess,
++ SCST_SET_UA_FLAG_AT_HEAD);
++ mutex_unlock(&scst_mutex);
++ } else {
++ TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd %p", cmd);
++ scst_check_set_UA(cmd->tgt_dev, cmd->sense,
++ cmd->sense_valid_len, SCST_SET_UA_FLAG_AT_HEAD);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++static void scst_check_reassign_sess(struct scst_session *sess)
++{
++ struct scst_acg *acg, *old_acg;
++ struct scst_acg_dev *acg_dev;
++ int i, rc;
++ struct list_head *shead;
++ struct scst_tgt_dev *tgt_dev;
++ bool luns_changed = false;
++ bool add_failed, something_freed, not_needed_freed = false;
++
++ TRACE_ENTRY();
++
++ if (sess->shut_phase != SCST_SESS_SPH_READY)
++ goto out;
++
++ TRACE_MGMT_DBG("Checking reassignment for sess %p (initiator %s)",
++ sess, sess->initiator_name);
++
++ acg = scst_find_acg(sess);
++ if (acg == sess->acg) {
++ TRACE_MGMT_DBG("No reassignment for sess %p", sess);
++ goto out;
++ }
++
++ TRACE_MGMT_DBG("sess %p will be reassigned from acg %s to acg %s",
++ sess, sess->acg->acg_name, acg->acg_name);
++
++ old_acg = sess->acg;
++ sess->acg = NULL; /* to catch implicit dependencies earlier */
++
++retry_add:
++ add_failed = false;
++ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
++ unsigned int inq_changed_ua_needed = 0;
++
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ shead = &sess->sess_tgt_dev_list_hash[i];
++
++ list_for_each_entry(tgt_dev, shead,
++ sess_tgt_dev_list_entry) {
++ if ((tgt_dev->dev == acg_dev->dev) &&
++ (tgt_dev->lun == acg_dev->lun) &&
++ (tgt_dev->acg_dev->rd_only == acg_dev->rd_only)) {
++ TRACE_MGMT_DBG("sess %p: tgt_dev %p for "
++ "LUN %lld stays the same",
++ sess, tgt_dev,
++ (unsigned long long)tgt_dev->lun);
++ tgt_dev->acg_dev = acg_dev;
++ goto next;
++ } else if (tgt_dev->lun == acg_dev->lun)
++ inq_changed_ua_needed = 1;
++ }
++ }
++
++ luns_changed = true;
++
++ TRACE_MGMT_DBG("sess %p: Allocing new tgt_dev for LUN %lld",
++ sess, (unsigned long long)acg_dev->lun);
++
++ rc = scst_alloc_add_tgt_dev(sess, acg_dev, &tgt_dev);
++ if (rc == -EPERM)
++ continue;
++ else if (rc != 0) {
++ add_failed = true;
++ break;
++ }
++
++ tgt_dev->inq_changed_ua_needed = inq_changed_ua_needed ||
++ not_needed_freed;
++next:
++ continue;
++ }
++
++ something_freed = false;
++ not_needed_freed = true;
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct scst_tgt_dev *t;
++ shead = &sess->sess_tgt_dev_list_hash[i];
++
++ list_for_each_entry_safe(tgt_dev, t, shead,
++ sess_tgt_dev_list_entry) {
++ if (tgt_dev->acg_dev->acg != acg) {
++ TRACE_MGMT_DBG("sess %p: Deleting not used "
++ "tgt_dev %p for LUN %lld",
++ sess, tgt_dev,
++ (unsigned long long)tgt_dev->lun);
++ luns_changed = true;
++ something_freed = true;
++ scst_free_tgt_dev(tgt_dev);
++ }
++ }
++ }
++
++ if (add_failed && something_freed) {
++ TRACE_MGMT_DBG("sess %p: Retrying adding new tgt_devs", sess);
++ goto retry_add;
++ }
++
++ sess->acg = acg;
++
++ TRACE_DBG("Moving sess %p from acg %s to acg %s", sess,
++ old_acg->acg_name, acg->acg_name);
++ list_move_tail(&sess->acg_sess_list_entry, &acg->acg_sess_list);
++
++ scst_recreate_sess_luns_link(sess);
++ /* Ignore possible error, since we can't do anything on it */
++
++ if (luns_changed) {
++ scst_report_luns_changed_sess(sess);
++
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ shead = &sess->sess_tgt_dev_list_hash[i];
++
++ list_for_each_entry(tgt_dev, shead,
++ sess_tgt_dev_list_entry) {
++ if (tgt_dev->inq_changed_ua_needed) {
++ TRACE_MGMT_DBG("sess %p: Setting "
++ "INQUIRY DATA HAS CHANGED UA "
++ "(tgt_dev %p)", sess, tgt_dev);
++
++ tgt_dev->inq_changed_ua_needed = 0;
++
++ scst_gen_aen_or_ua(tgt_dev,
++ SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
++ }
++ }
++ }
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++void scst_check_reassign_sessions(void)
++{
++ struct scst_tgt_template *tgtt;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
++ struct scst_tgt *tgt;
++ list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
++ struct scst_session *sess;
++ list_for_each_entry(sess, &tgt->sess_list,
++ sess_list_entry) {
++ scst_check_reassign_sess(sess);
++ }
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ switch (cmd->state) {
++ case SCST_CMD_STATE_INIT_WAIT:
++ case SCST_CMD_STATE_INIT:
++ case SCST_CMD_STATE_PARSE:
++ if (cmd->preprocessing_only) {
++ res = SCST_CMD_STATE_PREPROCESSING_DONE;
++ break;
++ } /* else go through */
++ case SCST_CMD_STATE_DEV_DONE:
++ if (cmd->internal)
++ res = SCST_CMD_STATE_FINISHED_INTERNAL;
++ else
++ res = SCST_CMD_STATE_PRE_XMIT_RESP;
++ break;
++
++ case SCST_CMD_STATE_PRE_DEV_DONE:
++ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
++ res = SCST_CMD_STATE_DEV_DONE;
++ break;
++
++ case SCST_CMD_STATE_PRE_XMIT_RESP:
++ res = SCST_CMD_STATE_XMIT_RESP;
++ break;
++
++ case SCST_CMD_STATE_PREPROCESSING_DONE:
++ case SCST_CMD_STATE_PREPROCESSING_DONE_CALLED:
++ if (cmd->tgt_dev == NULL)
++ res = SCST_CMD_STATE_PRE_XMIT_RESP;
++ else
++ res = SCST_CMD_STATE_PRE_DEV_DONE;
++ break;
++
++ case SCST_CMD_STATE_PREPARE_SPACE:
++ if (cmd->preprocessing_only) {
++ res = SCST_CMD_STATE_PREPROCESSING_DONE;
++ break;
++ } /* else go through */
++ case SCST_CMD_STATE_RDY_TO_XFER:
++ case SCST_CMD_STATE_DATA_WAIT:
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXECUTING:
++ res = SCST_CMD_STATE_PRE_DEV_DONE;
++ break;
++
++ default:
++ PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
++ cmd->state, cmd, cmd->cdb[0]);
++ BUG();
++ /* Invalid state to supress compiler's warning */
++ res = SCST_CMD_STATE_LAST_ACTIVE;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_set_cmd_abnormal_done_state() - set command's next abnormal done state
++ *
++ * Sets state of the SCSI target state machine to abnormally complete command
++ * ASAP.
++ *
++ * Returns the new state.
++ */
++int scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
++{
++ TRACE_ENTRY();
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ switch (cmd->state) {
++ case SCST_CMD_STATE_XMIT_RESP:
++ case SCST_CMD_STATE_FINISHED:
++ case SCST_CMD_STATE_FINISHED_INTERNAL:
++ case SCST_CMD_STATE_XMIT_WAIT:
++ PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
++ cmd->state, cmd, cmd->cdb[0]);
++ BUG();
++ }
++#endif
++
++ cmd->state = scst_get_cmd_abnormal_done_state(cmd);
++
++ switch (cmd->state) {
++ case SCST_CMD_STATE_INIT_WAIT:
++ case SCST_CMD_STATE_INIT:
++ case SCST_CMD_STATE_PARSE:
++ case SCST_CMD_STATE_PREPROCESSING_DONE:
++ case SCST_CMD_STATE_PREPROCESSING_DONE_CALLED:
++ case SCST_CMD_STATE_PREPARE_SPACE:
++ case SCST_CMD_STATE_RDY_TO_XFER:
++ case SCST_CMD_STATE_DATA_WAIT:
++ cmd->write_len = 0;
++ cmd->resid_possible = 1;
++ break;
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXECUTING:
++ case SCST_CMD_STATE_DEV_DONE:
++ case SCST_CMD_STATE_PRE_DEV_DONE:
++ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
++ case SCST_CMD_STATE_PRE_XMIT_RESP:
++ break;
++ default:
++ PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
++ cmd->state, cmd, cmd->cdb[0]);
++ BUG();
++ break;
++ }
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
++ (cmd->state != SCST_CMD_STATE_PREPROCESSING_DONE)) &&
++ (cmd->tgt_dev == NULL) && !cmd->internal) {
++ PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
++ "op %x)", cmd->state, cmd, cmd->cdb[0]);
++ BUG();
++ }
++#endif
++
++ TRACE_EXIT_RES(cmd->state);
++ return cmd->state;
++}
++EXPORT_SYMBOL_GPL(scst_set_cmd_abnormal_done_state);
++
++void scst_zero_write_rest(struct scst_cmd *cmd)
++{
++ int len, offs = 0;
++ uint8_t *buf;
++
++ TRACE_ENTRY();
++
++ len = scst_get_sg_buf_first(cmd, &buf, *cmd->write_sg,
++ *cmd->write_sg_cnt);
++ while (len > 0) {
++ int cur_offs;
++
++ if (offs + len <= cmd->write_len)
++ goto next;
++ else if (offs >= cmd->write_len)
++ cur_offs = 0;
++ else
++ cur_offs = cmd->write_len - offs;
++
++ memset(&buf[cur_offs], 0, len - cur_offs);
++
++next:
++ offs += len;
++ scst_put_sg_buf(cmd, buf, *cmd->write_sg, *cmd->write_sg_cnt);
++ len = scst_get_sg_buf_next(cmd, &buf, *cmd->write_sg,
++ *cmd->write_sg_cnt);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static void scst_adjust_sg(struct scst_cmd *cmd, struct scatterlist *sg,
++ int *sg_cnt, int adjust_len)
++{
++ int i, l;
++
++ TRACE_ENTRY();
++
++ l = 0;
++ for (i = 0; i < *sg_cnt; i++) {
++ l += sg[i].length;
++ if (l >= adjust_len) {
++ int left = adjust_len - (l - sg[i].length);
++#ifdef CONFIG_SCST_DEBUG
++ TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
++ "sg %p, sg_cnt %d, adjust_len %d, i %d, "
++ "sg[i].length %d, left %d",
++ cmd, (long long unsigned int)cmd->tag,
++ sg, *sg_cnt, adjust_len, i,
++ sg[i].length, left);
++#endif
++ cmd->orig_sg = sg;
++ cmd->p_orig_sg_cnt = sg_cnt;
++ cmd->orig_sg_cnt = *sg_cnt;
++ cmd->orig_sg_entry = i;
++ cmd->orig_entry_len = sg[i].length;
++ *sg_cnt = (left > 0) ? i+1 : i;
++ sg[i].length = left;
++ cmd->sg_buff_modified = 1;
++ break;
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_restore_sg_buff() - restores modified sg buffer
++ *
++ * Restores modified sg buffer in the original state.
++ */
++void scst_restore_sg_buff(struct scst_cmd *cmd)
++{
++ TRACE_MEM("cmd %p, sg %p, orig_sg_entry %d, "
++ "orig_entry_len %d, orig_sg_cnt %d", cmd, cmd->orig_sg,
++ cmd->orig_sg_entry, cmd->orig_entry_len,
++ cmd->orig_sg_cnt);
++ cmd->orig_sg[cmd->orig_sg_entry].length = cmd->orig_entry_len;
++ *cmd->p_orig_sg_cnt = cmd->orig_sg_cnt;
++ cmd->sg_buff_modified = 0;
++}
++EXPORT_SYMBOL(scst_restore_sg_buff);
++
++/**
++ * scst_set_resp_data_len() - set response data length
++ *
++ * Sets response data length for cmd and truncates its SG vector accordingly.
++ *
++ * The cmd->resp_data_len must not be set directly, it must be set only
++ * using this function. Value of resp_data_len must be <= cmd->bufflen.
++ */
++void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
++{
++ TRACE_ENTRY();
++
++ scst_check_restore_sg_buff(cmd);
++ cmd->resp_data_len = resp_data_len;
++
++ if (resp_data_len == cmd->bufflen)
++ goto out;
++
++ scst_adjust_sg(cmd, cmd->sg, &cmd->sg_cnt, resp_data_len);
++
++ cmd->resid_possible = 1;
++
++out:
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_set_resp_data_len);
++
++void scst_limit_sg_write_len(struct scst_cmd *cmd)
++{
++ TRACE_ENTRY();
++
++ TRACE_MEM("Limiting sg write len to %d (cmd %p, sg %p, sg_cnt %d)",
++ cmd->write_len, cmd, *cmd->write_sg, *cmd->write_sg_cnt);
++
++ scst_check_restore_sg_buff(cmd);
++ scst_adjust_sg(cmd, *cmd->write_sg, cmd->write_sg_cnt, cmd->write_len);
++
++ TRACE_EXIT();
++ return;
++}
++
++void scst_adjust_resp_data_len(struct scst_cmd *cmd)
++{
++ TRACE_ENTRY();
++
++ if (!cmd->expected_values_set) {
++ cmd->adjusted_resp_data_len = cmd->resp_data_len;
++ goto out;
++ }
++
++ cmd->adjusted_resp_data_len = min(cmd->resp_data_len,
++ cmd->expected_transfer_len);
++
++ if (cmd->adjusted_resp_data_len != cmd->resp_data_len) {
++ TRACE_MEM("Abjusting resp_data_len to %d (cmd %p, sg %p, "
++ "sg_cnt %d)", cmd->adjusted_resp_data_len, cmd, cmd->sg,
++ cmd->sg_cnt);
++ scst_check_restore_sg_buff(cmd);
++ scst_adjust_sg(cmd, cmd->sg, &cmd->sg_cnt,
++ cmd->adjusted_resp_data_len);
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_cmd_set_write_not_received_data_len() - sets cmd's not received len
++ *
++ * Sets cmd's not received data length. Also automatically sets resid_possible.
++ */
++void scst_cmd_set_write_not_received_data_len(struct scst_cmd *cmd,
++ int not_received)
++{
++ TRACE_ENTRY();
++
++ BUG_ON(!cmd->expected_values_set);
++
++ cmd->resid_possible = 1;
++
++ if ((cmd->expected_data_direction & SCST_DATA_READ) &&
++ (cmd->expected_data_direction & SCST_DATA_WRITE)) {
++ cmd->write_len = cmd->expected_out_transfer_len - not_received;
++ if (cmd->write_len == cmd->out_bufflen)
++ goto out;
++ } else if (cmd->expected_data_direction & SCST_DATA_WRITE) {
++ cmd->write_len = cmd->expected_transfer_len - not_received;
++ if (cmd->write_len == cmd->bufflen)
++ goto out;
++ }
++
++ /*
++ * Write len now can be bigger cmd->(out_)bufflen, but that's OK,
++ * because it will be used to only calculate write residuals.
++ */
++
++ TRACE_DBG("cmd %p, not_received %d, write_len %d", cmd, not_received,
++ cmd->write_len);
++
++ if (cmd->data_direction & SCST_DATA_WRITE)
++ scst_limit_sg_write_len(cmd);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_cmd_set_write_not_received_data_len);
++
++/**
++ * __scst_get_resid() - returns residuals for cmd
++ *
++ * Returns residuals for command. Must not be called directly, use
++ * scst_get_resid() instead.
++ */
++bool __scst_get_resid(struct scst_cmd *cmd, int *resid, int *bidi_out_resid)
++{
++ TRACE_ENTRY();
++
++ *resid = 0;
++ if (bidi_out_resid != NULL)
++ *bidi_out_resid = 0;
++
++ BUG_ON(!cmd->expected_values_set);
++
++ if (cmd->expected_data_direction & SCST_DATA_READ) {
++ *resid = cmd->expected_transfer_len - cmd->resp_data_len;
++ if ((cmd->expected_data_direction & SCST_DATA_WRITE) && bidi_out_resid) {
++ if (cmd->write_len < cmd->expected_out_transfer_len)
++ *bidi_out_resid = cmd->expected_out_transfer_len -
++ cmd->write_len;
++ else
++ *bidi_out_resid = cmd->write_len - cmd->out_bufflen;
++ }
++ } else if (cmd->expected_data_direction & SCST_DATA_WRITE) {
++ if (cmd->write_len < cmd->expected_transfer_len)
++ *resid = cmd->expected_transfer_len - cmd->write_len;
++ else
++ *resid = cmd->write_len - cmd->bufflen;
++ }
++
++ TRACE_DBG("cmd %p, resid %d, bidi_out_resid %d (resp_data_len %d, "
++ "expected_data_direction %d, write_len %d, bufflen %d)", cmd,
++ *resid, bidi_out_resid ? *bidi_out_resid : 0, cmd->resp_data_len,
++ cmd->expected_data_direction, cmd->write_len, cmd->bufflen);
++
++ TRACE_EXIT_RES(1);
++ return true;
++}
++EXPORT_SYMBOL(__scst_get_resid);
++
++/* No locks */
++int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
++{
++ struct scst_tgt *tgt = cmd->tgt;
++ int res = 0;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ spin_lock_irqsave(&tgt->tgt_lock, flags);
++ tgt->retry_cmds++;
++ /*
++ * Memory barrier is needed here, because we need the exact order
++ * between the read and write between retry_cmds and finished_cmds to
++ * not miss the case when a command finished while we queuing it for
++ * retry after the finished_cmds check.
++ */
++ smp_mb();
++ TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
++ tgt->retry_cmds);
++ if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
++ /* At least one cmd finished, so try again */
++ tgt->retry_cmds--;
++ TRACE_RETRY("Some command(s) finished, direct retry "
++ "(finished_cmds=%d, tgt->finished_cmds=%d, "
++ "retry_cmds=%d)", finished_cmds,
++ atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
++ res = -1;
++ goto out_unlock_tgt;
++ }
++
++ TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
++ list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
++
++ if (!tgt->retry_timer_active) {
++ tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
++ add_timer(&tgt->retry_timer);
++ tgt->retry_timer_active = 1;
++ }
++
++out_unlock_tgt:
++ spin_unlock_irqrestore(&tgt->tgt_lock, flags);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_update_hw_pending_start() - update commands pending start
++ *
++ * Updates the command's hw_pending_start as if it's just started hw pending.
++ * Target drivers should call it if they received reply from this pending
++ * command, but SCST core won't see it.
++ */
++void scst_update_hw_pending_start(struct scst_cmd *cmd)
++{
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ /* To sync with scst_check_hw_pending_cmd() */
++ spin_lock_irqsave(&cmd->sess->sess_list_lock, flags);
++ cmd->hw_pending_start = jiffies;
++ TRACE_MGMT_DBG("Updated hw_pending_start to %ld (cmd %p)",
++ cmd->hw_pending_start, cmd);
++ spin_unlock_irqrestore(&cmd->sess->sess_list_lock, flags);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_update_hw_pending_start);
++
++/*
++ * Supposed to be called under sess_list_lock, but can release/reaquire it.
++ * Returns 0 to continue, >0 to restart, <0 to break.
++ */
++static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
++ unsigned long cur_time, unsigned long max_time,
++ struct scst_session *sess, unsigned long *flags,
++ struct scst_tgt_template *tgtt)
++{
++ int res = -1; /* break */
++
++ TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
++ "pending time %ld", cmd, cmd->cmd_hw_pending,
++ (long)(cur_time - cmd->start_time) / HZ,
++ (long)(cur_time - cmd->hw_pending_start) / HZ);
++
++ if (time_before(cur_time, cmd->start_time + max_time)) {
++ /* Cmds are ordered, so no need to check more */
++ goto out;
++ }
++
++ if (!cmd->cmd_hw_pending) {
++ res = 0; /* continue */
++ goto out;
++ }
++
++ if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
++ res = 0; /* continue */
++ goto out;
++ }
++
++ TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
++ cmd, (cur_time - cmd->hw_pending_start) / HZ,
++ cmd->state);
++
++ cmd->cmd_hw_pending = 0;
++
++ spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
++ tgtt->on_hw_pending_cmd_timeout(cmd);
++ spin_lock_irqsave(&sess->sess_list_lock, *flags);
++
++ res = 1; /* restart */
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void scst_hw_pending_work_fn(struct delayed_work *work)
++{
++ struct scst_session *sess = container_of(work, struct scst_session,
++ hw_pending_work);
++ struct scst_tgt_template *tgtt = sess->tgt->tgtt;
++ struct scst_cmd *cmd;
++ unsigned long cur_time = jiffies;
++ unsigned long flags;
++ unsigned long max_time = tgtt->max_hw_pending_time * HZ;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
++
++ clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
++
++ spin_lock_irqsave(&sess->sess_list_lock, flags);
++
++restart:
++ list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list_entry) {
++ int rc;
++
++ rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
++ &flags, tgtt);
++ if (rc < 0)
++ break;
++ else if (rc == 0)
++ continue;
++ else
++ goto restart;
++ }
++
++ if (!list_empty(&sess->sess_cmd_list)) {
++ /*
++ * For stuck cmds if there is no activity we might need to have
++ * one more run to release them, so reschedule once again.
++ */
++ TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
++ sess, tgtt->max_hw_pending_time);
++ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
++ schedule_delayed_work(&sess->hw_pending_work,
++ tgtt->max_hw_pending_time * HZ);
++ }
++
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++
++ TRACE_EXIT();
++ return;
++}
++
++static bool __scst_is_relative_target_port_id_unique(uint16_t id,
++ const struct scst_tgt *t)
++{
++ bool res = true;
++ struct scst_tgt_template *tgtt;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(tgtt, &scst_template_list,
++ scst_template_list_entry) {
++ struct scst_tgt *tgt;
++ list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
++ if (tgt == t)
++ continue;
++ if ((tgt->tgtt->is_target_enabled != NULL) &&
++ !tgt->tgtt->is_target_enabled(tgt))
++ continue;
++ if (id == tgt->rel_tgt_id) {
++ res = false;
++ break;
++ }
++ }
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_mutex supposed to be locked */
++bool scst_is_relative_target_port_id_unique(uint16_t id,
++ const struct scst_tgt *t)
++{
++ bool res;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++ res = __scst_is_relative_target_port_id_unique(id, t);
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++int gen_relative_target_port_id(uint16_t *id)
++{
++ int res = -EOVERFLOW;
++ static unsigned long rti = SCST_MIN_REL_TGT_ID, rti_prev;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ rti_prev = rti;
++ do {
++ if (__scst_is_relative_target_port_id_unique(rti, NULL)) {
++ *id = (uint16_t)rti++;
++ res = 0;
++ goto out_unlock;
++ }
++ rti++;
++ if (rti > SCST_MAX_REL_TGT_ID)
++ rti = SCST_MIN_REL_TGT_ID;
++ } while (rti != rti_prev);
++
++ PRINT_ERROR("%s", "Unable to create unique relative target port id");
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* No locks */
++int scst_alloc_tgt(struct scst_tgt_template *tgtt, struct scst_tgt **tgt)
++{
++ struct scst_tgt *t;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ t = kzalloc(sizeof(*t), GFP_KERNEL);
++ if (t == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
++ res = -ENOMEM;
++ goto out;
++ }
++
++ INIT_LIST_HEAD(&t->sess_list);
++ init_waitqueue_head(&t->unreg_waitQ);
++ t->tgtt = tgtt;
++ t->sg_tablesize = tgtt->sg_tablesize;
++ spin_lock_init(&t->tgt_lock);
++ INIT_LIST_HEAD(&t->retry_cmd_list);
++ atomic_set(&t->finished_cmds, 0);
++ init_timer(&t->retry_timer);
++ t->retry_timer.data = (unsigned long)t;
++ t->retry_timer.function = scst_tgt_retry_timer_fn;
++
++ INIT_LIST_HEAD(&t->tgt_acg_list);
++
++ *tgt = t;
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* No locks */
++void scst_free_tgt(struct scst_tgt *tgt)
++{
++ TRACE_ENTRY();
++
++ kfree(tgt->tgt_name);
++
++ kfree(tgt);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called under scst_mutex and suspended activity */
++int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
++{
++ struct scst_device *dev;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ dev = kzalloc(sizeof(*dev), gfp_mask);
++ if (dev == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s",
++ "Allocation of scst_device failed");
++ res = -ENOMEM;
++ goto out;
++ }
++
++ dev->handler = &scst_null_devtype;
++ atomic_set(&dev->dev_cmd_count, 0);
++ atomic_set(&dev->write_cmd_count, 0);
++ scst_init_mem_lim(&dev->dev_mem_lim);
++ spin_lock_init(&dev->dev_lock);
++ INIT_LIST_HEAD(&dev->blocked_cmd_list);
++ INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
++ INIT_LIST_HEAD(&dev->dev_acg_dev_list);
++ dev->dev_double_ua_possible = 1;
++ dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
++
++ mutex_init(&dev->dev_pr_mutex);
++ atomic_set(&dev->pr_readers_count, 0);
++ dev->pr_generation = 0;
++ dev->pr_is_set = 0;
++ dev->pr_holder = NULL;
++ dev->pr_scope = SCOPE_LU;
++ dev->pr_type = TYPE_UNSPECIFIED;
++ INIT_LIST_HEAD(&dev->dev_registrants_list);
++
++ scst_init_threads(&dev->dev_cmd_threads);
++
++ *out_dev = dev;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++void scst_free_device(struct scst_device *dev)
++{
++ TRACE_ENTRY();
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (!list_empty(&dev->dev_tgt_dev_list) ||
++ !list_empty(&dev->dev_acg_dev_list)) {
++ PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
++ "is not empty!", __func__);
++ BUG();
++ }
++#endif
++
++ scst_deinit_threads(&dev->dev_cmd_threads);
++
++ kfree(dev->virt_name);
++ kfree(dev);
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_init_mem_lim - initialize memory limits structure
++ *
++ * Initializes memory limits structure mem_lim according to
++ * the current system configuration. This structure should be latter used
++ * to track and limit allocated by one or more SGV pools memory.
++ */
++void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
++{
++ atomic_set(&mem_lim->alloced_pages, 0);
++ mem_lim->max_allowed_pages =
++ ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
++}
++EXPORT_SYMBOL_GPL(scst_init_mem_lim);
++
++static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
++ struct scst_device *dev, uint64_t lun)
++{
++ struct scst_acg_dev *res;
++
++ TRACE_ENTRY();
++
++ res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
++ if (res == NULL) {
++ TRACE(TRACE_OUT_OF_MEM,
++ "%s", "Allocation of scst_acg_dev failed");
++ goto out;
++ }
++
++ res->dev = dev;
++ res->acg = acg;
++ res->lun = lun;
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/*
++ * The activity supposed to be suspended and scst_mutex held or the
++ * corresponding target supposed to be stopped.
++ */
++static void scst_del_free_acg_dev(struct scst_acg_dev *acg_dev, bool del_sysfs)
++{
++ TRACE_ENTRY();
++
++ TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
++ acg_dev);
++ list_del(&acg_dev->acg_dev_list_entry);
++ list_del(&acg_dev->dev_acg_dev_list_entry);
++
++ if (del_sysfs)
++ scst_acg_dev_sysfs_del(acg_dev);
++
++ kmem_cache_free(scst_acgd_cachep, acg_dev);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++int scst_acg_add_lun(struct scst_acg *acg, struct kobject *parent,
++ struct scst_device *dev, uint64_t lun, int read_only,
++ bool gen_scst_report_luns_changed, struct scst_acg_dev **out_acg_dev)
++{
++ int res = 0;
++ struct scst_acg_dev *acg_dev;
++ struct scst_tgt_dev *tgt_dev;
++ struct scst_session *sess;
++ LIST_HEAD(tmp_tgt_dev_list);
++ bool del_sysfs = true;
++
++ TRACE_ENTRY();
++
++ INIT_LIST_HEAD(&tmp_tgt_dev_list);
++
++ acg_dev = scst_alloc_acg_dev(acg, dev, lun);
++ if (acg_dev == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++ acg_dev->rd_only = read_only;
++
++ TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
++ acg_dev);
++ list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
++ list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
++
++ list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
++ res = scst_alloc_add_tgt_dev(sess, acg_dev, &tgt_dev);
++ if (res == -EPERM)
++ continue;
++ else if (res != 0)
++ goto out_free;
++
++ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
++ &tmp_tgt_dev_list);
++ }
++
++ res = scst_acg_dev_sysfs_create(acg_dev, parent);
++ if (res != 0) {
++ del_sysfs = false;
++ goto out_free;
++ }
++
++ if (gen_scst_report_luns_changed)
++ scst_report_luns_changed(acg);
++
++ PRINT_INFO("Added device %s to group %s (LUN %lld, "
++ "rd_only %d)", dev->virt_name, acg->acg_name,
++ (long long unsigned int)lun, read_only);
++
++ if (out_acg_dev != NULL)
++ *out_acg_dev = acg_dev;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
++ extra_tgt_dev_list_entry) {
++ scst_free_tgt_dev(tgt_dev);
++ }
++ scst_del_free_acg_dev(acg_dev, del_sysfs);
++ goto out;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++int scst_acg_del_lun(struct scst_acg *acg, uint64_t lun,
++ bool gen_scst_report_luns_changed)
++{
++ int res = 0;
++ struct scst_acg_dev *acg_dev = NULL, *a;
++ struct scst_tgt_dev *tgt_dev, *tt;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
++ if (a->lun == lun) {
++ acg_dev = a;
++ break;
++ }
++ }
++ if (acg_dev == NULL) {
++ PRINT_ERROR("Device is not found in group %s", acg->acg_name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ list_for_each_entry_safe(tgt_dev, tt, &acg_dev->dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if (tgt_dev->acg_dev == acg_dev)
++ scst_free_tgt_dev(tgt_dev);
++ }
++
++ scst_del_free_acg_dev(acg_dev, true);
++
++ if (gen_scst_report_luns_changed)
++ scst_report_luns_changed(acg);
++
++ PRINT_INFO("Removed LUN %lld from group %s", (unsigned long long)lun,
++ acg->acg_name);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++struct scst_acg *scst_alloc_add_acg(struct scst_tgt *tgt,
++ const char *acg_name, bool tgt_acg)
++{
++ struct scst_acg *acg;
++
++ TRACE_ENTRY();
++
++ acg = kzalloc(sizeof(*acg), GFP_KERNEL);
++ if (acg == NULL) {
++ PRINT_ERROR("%s", "Allocation of acg failed");
++ goto out;
++ }
++
++ acg->tgt = tgt;
++ INIT_LIST_HEAD(&acg->acg_dev_list);
++ INIT_LIST_HEAD(&acg->acg_sess_list);
++ INIT_LIST_HEAD(&acg->acn_list);
++ acg->acg_name = kstrdup(acg_name, GFP_KERNEL);
++ if (acg->acg_name == NULL) {
++ PRINT_ERROR("%s", "Allocation of acg_name failed");
++ goto out_free;
++ }
++
++ acg->addr_method = SCST_LUN_ADDR_METHOD_PERIPHERAL;
++
++ if (tgt_acg) {
++ int rc;
++
++ TRACE_DBG("Adding acg '%s' to device '%s' acg_list", acg_name,
++ tgt->tgt_name);
++ list_add_tail(&acg->acg_list_entry, &tgt->tgt_acg_list);
++ acg->tgt_acg = 1;
++
++ rc = scst_acg_sysfs_create(tgt, acg);
++ if (rc != 0)
++ goto out_del;
++ }
++
++out:
++ TRACE_EXIT_HRES(acg);
++ return acg;
++
++out_del:
++ list_del(&acg->acg_list_entry);
++
++out_free:
++ kfree(acg);
++ acg = NULL;
++ goto out;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++void scst_del_free_acg(struct scst_acg *acg)
++{
++ struct scst_acn *acn, *acnt;
++ struct scst_acg_dev *acg_dev, *acg_dev_tmp;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Clearing acg %s from list", acg->acg_name);
++
++ BUG_ON(!list_empty(&acg->acg_sess_list));
++
++ /* Freeing acg_devs */
++ list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ struct scst_tgt_dev *tgt_dev, *tt;
++ list_for_each_entry_safe(tgt_dev, tt,
++ &acg_dev->dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if (tgt_dev->acg_dev == acg_dev)
++ scst_free_tgt_dev(tgt_dev);
++ }
++ scst_del_free_acg_dev(acg_dev, true);
++ }
++
++ /* Freeing names */
++ list_for_each_entry_safe(acn, acnt, &acg->acn_list, acn_list_entry) {
++ scst_del_free_acn(acn,
++ list_is_last(&acn->acn_list_entry, &acg->acn_list));
++ }
++ INIT_LIST_HEAD(&acg->acn_list);
++
++ if (acg->tgt_acg) {
++ TRACE_DBG("Removing acg %s from list", acg->acg_name);
++ list_del(&acg->acg_list_entry);
++
++ scst_acg_sysfs_del(acg);
++ } else
++ acg->tgt->default_acg = NULL;
++
++ BUG_ON(!list_empty(&acg->acg_sess_list));
++ BUG_ON(!list_empty(&acg->acg_dev_list));
++ BUG_ON(!list_empty(&acg->acn_list));
++
++ kfree(acg->acg_name);
++ kfree(acg);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++struct scst_acg *scst_tgt_find_acg(struct scst_tgt *tgt, const char *name)
++{
++ struct scst_acg *acg, *acg_ret = NULL;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(acg, &tgt->tgt_acg_list, acg_list_entry) {
++ if (strcmp(acg->acg_name, name) == 0) {
++ acg_ret = acg;
++ break;
++ }
++ }
++
++ TRACE_EXIT();
++ return acg_ret;
++}
++
++/* scst_mutex supposed to be held */
++static struct scst_tgt_dev *scst_find_shared_io_tgt_dev(
++ struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_tgt_dev *res = NULL;
++ struct scst_acg *acg = tgt_dev->acg_dev->acg;
++ struct scst_tgt_dev *t;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("tgt_dev %s (acg %p, io_grouping_type %d)",
++ tgt_dev->sess->initiator_name, acg, acg->acg_io_grouping_type);
++
++ switch (acg->acg_io_grouping_type) {
++ case SCST_IO_GROUPING_AUTO:
++ if (tgt_dev->sess->initiator_name == NULL)
++ goto out;
++
++ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if ((t == tgt_dev) ||
++ (t->sess->initiator_name == NULL) ||
++ (t->active_cmd_threads == NULL))
++ continue;
++
++ TRACE_DBG("t %s", t->sess->initiator_name);
++
++ /* We check other ACG's as well */
++
++ if (strcmp(t->sess->initiator_name,
++ tgt_dev->sess->initiator_name) == 0)
++ goto found;
++ }
++ break;
++
++ case SCST_IO_GROUPING_THIS_GROUP_ONLY:
++ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if ((t == tgt_dev) || (t->active_cmd_threads == NULL))
++ continue;
++
++ TRACE_DBG("t %s (acg %p)", t->sess->initiator_name,
++ t->acg_dev->acg);
++
++ if (t->acg_dev->acg == acg)
++ goto found;
++ }
++ break;
++
++ case SCST_IO_GROUPING_NEVER:
++ goto out;
++
++ default:
++ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if ((t == tgt_dev) || (t->active_cmd_threads == NULL))
++ continue;
++
++ TRACE_DBG("t %s (acg %p, io_grouping_type %d)",
++ t->sess->initiator_name, t->acg_dev->acg,
++ t->acg_dev->acg->acg_io_grouping_type);
++
++ if (t->acg_dev->acg->acg_io_grouping_type ==
++ acg->acg_io_grouping_type)
++ goto found;
++ }
++ break;
++ }
++
++out:
++ TRACE_EXIT_HRES((unsigned long)res);
++ return res;
++
++found:
++ if (t->active_cmd_threads == &scst_main_cmd_threads) {
++ res = t;
++ TRACE_MGMT_DBG("Going to share async IO context %p (res %p, "
++ "ini %s, dev %s, grouping type %d)",
++ t->aic_keeper->aic, res, t->sess->initiator_name,
++ t->dev->virt_name,
++ t->acg_dev->acg->acg_io_grouping_type);
++ } else {
++ res = t;
++ if (!*(volatile bool*)&res->active_cmd_threads->io_context_ready) {
++ TRACE_MGMT_DBG("IO context for t %p not yet "
++ "initialized, waiting...", t);
++ msleep(100);
++ barrier();
++ goto found;
++ }
++ TRACE_MGMT_DBG("Going to share IO context %p (res %p, ini %s, "
++ "dev %s, cmd_threads %p, grouping type %d)",
++ res->active_cmd_threads->io_context, res,
++ t->sess->initiator_name, t->dev->virt_name,
++ t->active_cmd_threads,
++ t->acg_dev->acg->acg_io_grouping_type);
++ }
++ goto out;
++}
++
++enum scst_dev_type_threads_pool_type scst_parse_threads_pool_type(const char *p,
++ int len)
++{
++ enum scst_dev_type_threads_pool_type res;
++
++ if (strncasecmp(p, SCST_THREADS_POOL_PER_INITIATOR_STR,
++ min_t(int, strlen(SCST_THREADS_POOL_PER_INITIATOR_STR),
++ len)) == 0)
++ res = SCST_THREADS_POOL_PER_INITIATOR;
++ else if (strncasecmp(p, SCST_THREADS_POOL_SHARED_STR,
++ min_t(int, strlen(SCST_THREADS_POOL_SHARED_STR),
++ len)) == 0)
++ res = SCST_THREADS_POOL_SHARED;
++ else {
++ PRINT_ERROR("Unknown threads pool type %s", p);
++ res = SCST_THREADS_POOL_TYPE_INVALID;
++ }
++
++ return res;
++}
++
++static int scst_ioc_keeper_thread(void *arg)
++{
++ struct scst_async_io_context_keeper *aic_keeper =
++ (struct scst_async_io_context_keeper *)arg;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("AIC %p keeper thread %s (PID %d) started", aic_keeper,
++ current->comm, current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ BUG_ON(aic_keeper->aic != NULL);
++
++ aic_keeper->aic = get_io_context(GFP_KERNEL, -1);
++ TRACE_MGMT_DBG("Alloced new async IO context %p (aic %p)",
++ aic_keeper->aic, aic_keeper);
++
++ /* We have our own ref counting */
++ put_io_context(aic_keeper->aic);
++
++ /* We are ready */
++ aic_keeper->aic_ready = true;
++ wake_up_all(&aic_keeper->aic_keeper_waitQ);
++
++ wait_event_interruptible(aic_keeper->aic_keeper_waitQ,
++ kthread_should_stop());
++
++ TRACE_MGMT_DBG("AIC %p keeper thread %s (PID %d) finished", aic_keeper,
++ current->comm, current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++/* scst_mutex supposed to be held */
++int scst_tgt_dev_setup_threads(struct scst_tgt_dev *tgt_dev)
++{
++ int res = 0;
++ struct scst_device *dev = tgt_dev->dev;
++ struct scst_async_io_context_keeper *aic_keeper;
++
++ TRACE_ENTRY();
++
++ if (dev->threads_num < 0)
++ goto out;
++
++ if (dev->threads_num == 0) {
++ struct scst_tgt_dev *shared_io_tgt_dev;
++ tgt_dev->active_cmd_threads = &scst_main_cmd_threads;
++
++ shared_io_tgt_dev = scst_find_shared_io_tgt_dev(tgt_dev);
++ if (shared_io_tgt_dev != NULL) {
++ aic_keeper = shared_io_tgt_dev->aic_keeper;
++ kref_get(&aic_keeper->aic_keeper_kref);
++
++ TRACE_MGMT_DBG("Linking async io context %p "
++ "for shared tgt_dev %p (dev %s)",
++ aic_keeper->aic, tgt_dev,
++ tgt_dev->dev->virt_name);
++ } else {
++ /* Create new context */
++ aic_keeper = kzalloc(sizeof(*aic_keeper), GFP_KERNEL);
++ if (aic_keeper == NULL) {
++ PRINT_ERROR("Unable to alloc aic_keeper "
++ "(size %zd)", sizeof(*aic_keeper));
++ res = -ENOMEM;
++ goto out;
++ }
++
++ kref_init(&aic_keeper->aic_keeper_kref);
++ init_waitqueue_head(&aic_keeper->aic_keeper_waitQ);
++
++ aic_keeper->aic_keeper_thr =
++ kthread_run(scst_ioc_keeper_thread,
++ aic_keeper, "aic_keeper");
++ if (IS_ERR(aic_keeper->aic_keeper_thr)) {
++ PRINT_ERROR("Error running ioc_keeper "
++ "thread (tgt_dev %p)", tgt_dev);
++ res = PTR_ERR(aic_keeper->aic_keeper_thr);
++ goto out_free_keeper;
++ }
++
++ wait_event(aic_keeper->aic_keeper_waitQ,
++ aic_keeper->aic_ready);
++
++ TRACE_MGMT_DBG("Created async io context %p "
++ "for not shared tgt_dev %p (dev %s)",
++ aic_keeper->aic, tgt_dev,
++ tgt_dev->dev->virt_name);
++ }
++
++ tgt_dev->async_io_context = aic_keeper->aic;
++ tgt_dev->aic_keeper = aic_keeper;
++
++ res = scst_add_threads(tgt_dev->active_cmd_threads, NULL, NULL,
++ tgt_dev->sess->tgt->tgtt->threads_num);
++ goto out;
++ }
++
++ switch (dev->threads_pool_type) {
++ case SCST_THREADS_POOL_PER_INITIATOR:
++ {
++ struct scst_tgt_dev *shared_io_tgt_dev;
++
++ scst_init_threads(&tgt_dev->tgt_dev_cmd_threads);
++
++ tgt_dev->active_cmd_threads = &tgt_dev->tgt_dev_cmd_threads;
++
++ shared_io_tgt_dev = scst_find_shared_io_tgt_dev(tgt_dev);
++ if (shared_io_tgt_dev != NULL) {
++ TRACE_MGMT_DBG("Linking io context %p for "
++ "shared tgt_dev %p (cmd_threads %p)",
++ shared_io_tgt_dev->active_cmd_threads->io_context,
++ tgt_dev, tgt_dev->active_cmd_threads);
++ /* It's ref counted via threads */
++ tgt_dev->active_cmd_threads->io_context =
++ shared_io_tgt_dev->active_cmd_threads->io_context;
++ }
++
++ res = scst_add_threads(tgt_dev->active_cmd_threads, NULL,
++ tgt_dev,
++ dev->threads_num + tgt_dev->sess->tgt->tgtt->threads_num);
++ if (res != 0) {
++ /* Let's clear here, because no threads could be run */
++ tgt_dev->active_cmd_threads->io_context = NULL;
++ }
++ break;
++ }
++ case SCST_THREADS_POOL_SHARED:
++ {
++ tgt_dev->active_cmd_threads = &dev->dev_cmd_threads;
++
++ res = scst_add_threads(tgt_dev->active_cmd_threads, dev, NULL,
++ tgt_dev->sess->tgt->tgtt->threads_num);
++ break;
++ }
++ default:
++ PRINT_CRIT_ERROR("Unknown threads pool type %d (dev %s)",
++ dev->threads_pool_type, dev->virt_name);
++ BUG();
++ break;
++ }
++
++out:
++ if (res == 0)
++ tm_dbg_init_tgt_dev(tgt_dev);
++
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free_keeper:
++ kfree(aic_keeper);
++ goto out;
++}
++
++static void scst_aic_keeper_release(struct kref *kref)
++{
++ struct scst_async_io_context_keeper *aic_keeper;
++
++ TRACE_ENTRY();
++
++ aic_keeper = container_of(kref, struct scst_async_io_context_keeper,
++ aic_keeper_kref);
++
++ kthread_stop(aic_keeper->aic_keeper_thr);
++
++ kfree(aic_keeper);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* scst_mutex supposed to be held */
++void scst_tgt_dev_stop_threads(struct scst_tgt_dev *tgt_dev)
++{
++ TRACE_ENTRY();
++
++ if (tgt_dev->dev->threads_num < 0)
++ goto out_deinit;
++
++ if (tgt_dev->active_cmd_threads == &scst_main_cmd_threads) {
++ /* Global async threads */
++ kref_put(&tgt_dev->aic_keeper->aic_keeper_kref,
++ scst_aic_keeper_release);
++ tgt_dev->async_io_context = NULL;
++ tgt_dev->aic_keeper = NULL;
++ } else if (tgt_dev->active_cmd_threads == &tgt_dev->dev->dev_cmd_threads) {
++ /* Per device shared threads */
++ scst_del_threads(tgt_dev->active_cmd_threads,
++ tgt_dev->sess->tgt->tgtt->threads_num);
++ } else if (tgt_dev->active_cmd_threads == &tgt_dev->tgt_dev_cmd_threads) {
++ /* Per tgt_dev threads */
++ scst_del_threads(tgt_dev->active_cmd_threads, -1);
++ scst_deinit_threads(&tgt_dev->tgt_dev_cmd_threads);
++ } /* else no threads (not yet initialized, e.g.) */
++
++out_deinit:
++ tm_dbg_deinit_tgt_dev(tgt_dev);
++ tgt_dev->active_cmd_threads = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * scst_mutex supposed to be held, there must not be parallel activity in this
++ * session.
++ */
++static int scst_alloc_add_tgt_dev(struct scst_session *sess,
++ struct scst_acg_dev *acg_dev, struct scst_tgt_dev **out_tgt_dev)
++{
++ int res = 0;
++ int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
++ struct scst_tgt_dev *tgt_dev;
++ struct scst_device *dev = acg_dev->dev;
++ struct list_head *sess_tgt_dev_list_head;
++ int i, sl;
++ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
++
++ TRACE_ENTRY();
++
++ tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
++ if (tgt_dev == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_tgt_dev "
++ "failed");
++ res = -ENOMEM;
++ goto out;
++ }
++
++ tgt_dev->dev = dev;
++ tgt_dev->lun = acg_dev->lun;
++ tgt_dev->acg_dev = acg_dev;
++ tgt_dev->sess = sess;
++ atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
++
++ scst_sgv_pool_use_norm(tgt_dev);
++
++ if (dev->scsi_dev != NULL) {
++ ini_sg = dev->scsi_dev->host->sg_tablesize;
++ ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
++ ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
++ ENABLE_CLUSTERING);
++ } else {
++ ini_sg = (1 << 15) /* infinite */;
++ ini_unchecked_isa_dma = 0;
++ ini_use_clustering = 0;
++ }
++ tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
++
++ if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
++ !sess->tgt->tgtt->no_clustering)
++ scst_sgv_pool_use_norm_clust(tgt_dev);
++
++ if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
++ scst_sgv_pool_use_dma(tgt_dev);
++
++ TRACE_MGMT_DBG("Device %s on SCST lun=%lld",
++ dev->virt_name, (long long unsigned int)tgt_dev->lun);
++
++ spin_lock_init(&tgt_dev->tgt_dev_lock);
++ INIT_LIST_HEAD(&tgt_dev->UA_list);
++ spin_lock_init(&tgt_dev->thr_data_lock);
++ INIT_LIST_HEAD(&tgt_dev->thr_data_list);
++ spin_lock_init(&tgt_dev->sn_lock);
++ INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
++ INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
++ tgt_dev->curr_sn = (typeof(tgt_dev->curr_sn))(-300);
++ tgt_dev->expected_sn = tgt_dev->curr_sn + 1;
++ tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
++ tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
++ for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
++ atomic_set(&tgt_dev->sn_slots[i], 0);
++
++ if (dev->handler->parse_atomic &&
++ dev->handler->alloc_data_buf_atomic &&
++ (sess->tgt->tgtt->preprocessing_done == NULL)) {
++ if (sess->tgt->tgtt->rdy_to_xfer_atomic)
++ __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
++ &tgt_dev->tgt_dev_flags);
++ }
++ if (dev->handler->dev_done_atomic &&
++ sess->tgt->tgtt->xmit_response_atomic) {
++ __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
++ &tgt_dev->tgt_dev_flags);
++ }
++
++ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
++ dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
++ scst_alloc_set_UA(tgt_dev, sense_buffer, sl, 0);
++
++ if (sess->tgt->tgtt->get_initiator_port_transport_id == NULL) {
++ if (!list_empty(&dev->dev_registrants_list)) {
++ PRINT_WARNING("Initiators from target %s can't connect "
++ "to device %s, because the device has PR "
++ "registrants and the target doesn't support "
++ "Persistent Reservations", sess->tgt->tgtt->name,
++ dev->virt_name);
++ res = -EPERM;
++ goto out_free;
++ }
++ dev->not_pr_supporting_tgt_devs_num++;
++ }
++
++ res = scst_pr_init_tgt_dev(tgt_dev);
++ if (res != 0)
++ goto out_dec_free;
++
++ res = scst_tgt_dev_setup_threads(tgt_dev);
++ if (res != 0)
++ goto out_pr_clear;
++
++ if (dev->handler && dev->handler->attach_tgt) {
++ TRACE_DBG("Calling dev handler's attach_tgt(%p)", tgt_dev);
++ res = dev->handler->attach_tgt(tgt_dev);
++ TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
++ if (res != 0) {
++ PRINT_ERROR("Device handler's %s attach_tgt() "
++ "failed: %d", dev->handler->name, res);
++ goto out_stop_threads;
++ }
++ }
++
++ res = scst_tgt_dev_sysfs_create(tgt_dev);
++ if (res != 0)
++ goto out_detach;
++
++ spin_lock_bh(&dev->dev_lock);
++ list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
++ if (dev->dev_reserved)
++ __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
++ spin_unlock_bh(&dev->dev_lock);
++
++ sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
++ list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
++ sess_tgt_dev_list_head);
++
++ *out_tgt_dev = tgt_dev;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_detach:
++ if (dev->handler && dev->handler->detach_tgt) {
++ TRACE_DBG("Calling dev handler's detach_tgt(%p)",
++ tgt_dev);
++ dev->handler->detach_tgt(tgt_dev);
++ TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
++ }
++
++out_stop_threads:
++ scst_tgt_dev_stop_threads(tgt_dev);
++
++out_pr_clear:
++ scst_pr_clear_tgt_dev(tgt_dev);
++
++out_dec_free:
++ if (tgt_dev->sess->tgt->tgtt->get_initiator_port_transport_id == NULL)
++ dev->not_pr_supporting_tgt_devs_num--;
++
++out_free:
++ scst_free_all_UA(tgt_dev);
++ kmem_cache_free(scst_tgtd_cachep, tgt_dev);
++ goto out;
++}
++
++/* No locks supposed to be held, scst_mutex - held */
++void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
++{
++ TRACE_ENTRY();
++
++ scst_clear_reservation(tgt_dev);
++
++ /* With activity suspended the lock isn't needed, but let's be safe */
++ spin_lock_bh(&tgt_dev->tgt_dev_lock);
++ scst_free_all_UA(tgt_dev);
++ memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++
++ if (queue_UA) {
++ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
++ int sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
++ tgt_dev->dev->d_sense,
++ SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
++ scst_check_set_UA(tgt_dev, sense_buffer, sl, 0);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * scst_mutex supposed to be held, there must not be parallel activity in this
++ * session.
++ */
++static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_device *dev = tgt_dev->dev;
++
++ TRACE_ENTRY();
++
++ spin_lock_bh(&dev->dev_lock);
++ list_del(&tgt_dev->dev_tgt_dev_list_entry);
++ spin_unlock_bh(&dev->dev_lock);
++
++ list_del(&tgt_dev->sess_tgt_dev_list_entry);
++
++ scst_tgt_dev_sysfs_del(tgt_dev);
++
++ if (tgt_dev->sess->tgt->tgtt->get_initiator_port_transport_id == NULL)
++ dev->not_pr_supporting_tgt_devs_num--;
++
++ scst_clear_reservation(tgt_dev);
++ scst_pr_clear_tgt_dev(tgt_dev);
++ scst_free_all_UA(tgt_dev);
++
++ if (dev->handler && dev->handler->detach_tgt) {
++ TRACE_DBG("Calling dev handler's detach_tgt(%p)",
++ tgt_dev);
++ dev->handler->detach_tgt(tgt_dev);
++ TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
++ }
++
++ scst_tgt_dev_stop_threads(tgt_dev);
++
++ BUG_ON(!list_empty(&tgt_dev->thr_data_list));
++
++ kmem_cache_free(scst_tgtd_cachep, tgt_dev);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* scst_mutex supposed to be held */
++int scst_sess_alloc_tgt_devs(struct scst_session *sess)
++{
++ int res = 0;
++ struct scst_acg_dev *acg_dev;
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
++ acg_dev_list_entry) {
++ res = scst_alloc_add_tgt_dev(sess, acg_dev, &tgt_dev);
++ if (res == -EPERM)
++ continue;
++ else if (res != 0)
++ goto out_free;
++ }
++
++out:
++ TRACE_EXIT();
++ return res;
++
++out_free:
++ scst_sess_free_tgt_devs(sess);
++ goto out;
++}
++
++/*
++ * scst_mutex supposed to be held, there must not be parallel activity in this
++ * session.
++ */
++void scst_sess_free_tgt_devs(struct scst_session *sess)
++{
++ int i;
++ struct scst_tgt_dev *tgt_dev, *t;
++
++ TRACE_ENTRY();
++
++ /* The session is going down, no users, so no locks */
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[i];
++ list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ scst_free_tgt_dev(tgt_dev);
++ }
++ INIT_LIST_HEAD(sess_tgt_dev_list_head);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++int scst_acg_add_acn(struct scst_acg *acg, const char *name)
++{
++ int res = 0;
++ struct scst_acn *acn;
++ int len;
++ char *nm;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(acn, &acg->acn_list, acn_list_entry) {
++ if (strcmp(acn->name, name) == 0) {
++ PRINT_ERROR("Name %s already exists in group %s",
++ name, acg->acg_name);
++ res = -EEXIST;
++ goto out;
++ }
++ }
++
++ acn = kzalloc(sizeof(*acn), GFP_KERNEL);
++ if (acn == NULL) {
++ PRINT_ERROR("%s", "Unable to allocate scst_acn");
++ res = -ENOMEM;
++ goto out;
++ }
++
++ acn->acg = acg;
++
++ len = strlen(name);
++ nm = kmalloc(len + 1, GFP_KERNEL);
++ if (nm == NULL) {
++ PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
++ res = -ENOMEM;
++ goto out_free;
++ }
++
++ strcpy(nm, name);
++ acn->name = nm;
++
++ res = scst_acn_sysfs_create(acn);
++ if (res != 0)
++ goto out_free_nm;
++
++ list_add_tail(&acn->acn_list_entry, &acg->acn_list);
++
++out:
++ if (res == 0) {
++ PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
++ scst_check_reassign_sessions();
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free_nm:
++ kfree(nm);
++
++out_free:
++ kfree(acn);
++ goto out;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++void scst_del_free_acn(struct scst_acn *acn, bool reassign)
++{
++ TRACE_ENTRY();
++
++ list_del(&acn->acn_list_entry);
++
++ scst_acn_sysfs_del(acn);
++
++ kfree(acn->name);
++ kfree(acn);
++
++ if (reassign)
++ scst_check_reassign_sessions();
++
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++struct scst_acn *scst_find_acn(struct scst_acg *acg, const char *name)
++{
++ struct scst_acn *acn;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Trying to find name '%s'", name);
++
++ list_for_each_entry(acn, &acg->acn_list, acn_list_entry) {
++ if (strcmp(acn->name, name) == 0) {
++ TRACE_DBG("%s", "Found");
++ goto out;
++ }
++ }
++ acn = NULL;
++out:
++ TRACE_EXIT();
++ return acn;
++}
++
++static struct scst_cmd *scst_create_prepare_internal_cmd(
++ struct scst_cmd *orig_cmd, int bufsize)
++{
++ struct scst_cmd *res;
++ gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
++
++ TRACE_ENTRY();
++
++ res = scst_alloc_cmd(gfp_mask);
++ if (res == NULL)
++ goto out;
++
++ res->cmd_threads = orig_cmd->cmd_threads;
++ res->sess = orig_cmd->sess;
++ res->atomic = scst_cmd_atomic(orig_cmd);
++ res->internal = 1;
++ res->tgtt = orig_cmd->tgtt;
++ res->tgt = orig_cmd->tgt;
++ res->dev = orig_cmd->dev;
++ res->tgt_dev = orig_cmd->tgt_dev;
++ res->lun = orig_cmd->lun;
++ res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
++ res->data_direction = SCST_DATA_UNKNOWN;
++ res->orig_cmd = orig_cmd;
++ res->bufflen = bufsize;
++
++ scst_sess_get(res->sess);
++ if (res->tgt_dev != NULL)
++ __scst_get();
++
++ res->state = SCST_CMD_STATE_PARSE;
++
++out:
++ TRACE_EXIT_HRES((unsigned long)res);
++ return res;
++}
++
++int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
++{
++ int res = 0;
++ static const uint8_t request_sense[6] = {
++ REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0
++ };
++ struct scst_cmd *rs_cmd;
++
++ TRACE_ENTRY();
++
++ if (orig_cmd->sense != NULL) {
++ TRACE_MEM("Releasing sense %p (orig_cmd %p)",
++ orig_cmd->sense, orig_cmd);
++ mempool_free(orig_cmd->sense, scst_sense_mempool);
++ orig_cmd->sense = NULL;
++ }
++
++ rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
++ SCST_SENSE_BUFFERSIZE);
++ if (rs_cmd == NULL)
++ goto out_error;
++
++ memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
++ rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
++ rs_cmd->cdb_len = sizeof(request_sense);
++ rs_cmd->data_direction = SCST_DATA_READ;
++ rs_cmd->expected_data_direction = rs_cmd->data_direction;
++ rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
++ rs_cmd->expected_values_set = 1;
++
++ TRACE_MGMT_DBG("Adding REQUEST SENSE cmd %p to head of active "
++ "cmd list", rs_cmd);
++ spin_lock_irq(&rs_cmd->cmd_threads->cmd_list_lock);
++ list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_threads->active_cmd_list);
++ wake_up(&rs_cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irq(&rs_cmd->cmd_threads->cmd_list_lock);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_error:
++ res = -1;
++ goto out;
++}
++
++static void scst_complete_request_sense(struct scst_cmd *req_cmd)
++{
++ struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
++ uint8_t *buf;
++ int len;
++
++ TRACE_ENTRY();
++
++ BUG_ON(orig_cmd == NULL);
++
++ len = scst_get_buf_first(req_cmd, &buf);
++
++ if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
++ SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
++ PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
++ buf, len);
++ scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
++ len);
++ } else {
++ PRINT_ERROR("%s", "Unable to get the sense via "
++ "REQUEST SENSE, returning HARDWARE ERROR");
++ scst_set_cmd_error(orig_cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++
++ if (len > 0)
++ scst_put_buf(req_cmd, buf);
++
++ TRACE_MGMT_DBG("Adding orig cmd %p to head of active "
++ "cmd list", orig_cmd);
++ spin_lock_irq(&orig_cmd->cmd_threads->cmd_list_lock);
++ list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_threads->active_cmd_list);
++ wake_up(&orig_cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irq(&orig_cmd->cmd_threads->cmd_list_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++int scst_finish_internal_cmd(struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ BUG_ON(!cmd->internal);
++
++ if (cmd->cdb[0] == REQUEST_SENSE)
++ scst_complete_request_sense(cmd);
++
++ __scst_cmd_put(cmd);
++
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++static void scst_send_release(struct scst_device *dev)
++{
++ struct scsi_device *scsi_dev;
++ unsigned char cdb[6];
++ uint8_t sense[SCSI_SENSE_BUFFERSIZE];
++ int rc, i;
++
++ TRACE_ENTRY();
++
++ if (dev->scsi_dev == NULL)
++ goto out;
++
++ scsi_dev = dev->scsi_dev;
++
++ for (i = 0; i < 5; i++) {
++ memset(cdb, 0, sizeof(cdb));
++ cdb[0] = RELEASE;
++ cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
++ ((scsi_dev->lun << 5) & 0xe0) : 0;
++
++ memset(sense, 0, sizeof(sense));
++
++ TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
++ "SCSI mid-level");
++ rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
++ sense, 15, 0, 0
++ , NULL
++ );
++ TRACE_DBG("MODE_SENSE done: %x", rc);
++
++ if (scsi_status_is_good(rc)) {
++ break;
++ } else {
++ PRINT_ERROR("RELEASE failed: %d", rc);
++ PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
++ scst_check_internal_sense(dev, rc, sense,
++ sizeof(sense));
++ }
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* scst_mutex supposed to be held */
++static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_device *dev = tgt_dev->dev;
++ int release = 0;
++
++ TRACE_ENTRY();
++
++ spin_lock_bh(&dev->dev_lock);
++ if (dev->dev_reserved &&
++ !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
++ /* This is one who holds the reservation */
++ struct scst_tgt_dev *tgt_dev_tmp;
++ list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ clear_bit(SCST_TGT_DEV_RESERVED,
++ &tgt_dev_tmp->tgt_dev_flags);
++ }
++ dev->dev_reserved = 0;
++ release = 1;
++ }
++ spin_unlock_bh(&dev->dev_lock);
++
++ if (release)
++ scst_send_release(dev);
++
++ TRACE_EXIT();
++ return;
++}
++
++struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
++ const char *initiator_name)
++{
++ struct scst_session *sess;
++ int i;
++
++ TRACE_ENTRY();
++
++ sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
++ if (sess == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s",
++ "Allocation of scst_session failed");
++ goto out;
++ }
++
++ sess->init_phase = SCST_SESS_IPH_INITING;
++ sess->shut_phase = SCST_SESS_SPH_READY;
++ atomic_set(&sess->refcnt, 0);
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[i];
++ INIT_LIST_HEAD(sess_tgt_dev_list_head);
++ }
++ spin_lock_init(&sess->sess_list_lock);
++ INIT_LIST_HEAD(&sess->sess_cmd_list);
++ sess->tgt = tgt;
++ INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
++ INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
++ INIT_DELAYED_WORK(&sess->hw_pending_work,
++ (void (*)(struct work_struct *))scst_hw_pending_work_fn);
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++ spin_lock_init(&sess->lat_lock);
++#endif
++
++ sess->initiator_name = kstrdup(initiator_name, gfp_mask);
++ if (sess->initiator_name == NULL) {
++ PRINT_ERROR("%s", "Unable to dup sess->initiator_name");
++ goto out_free;
++ }
++
++out:
++ TRACE_EXIT();
++ return sess;
++
++out_free:
++ kmem_cache_free(scst_sess_cachep, sess);
++ sess = NULL;
++ goto out;
++}
++
++void scst_free_session(struct scst_session *sess)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ scst_sess_free_tgt_devs(sess);
++
++ /* tgt will stay alive at least until its sysfs alive */
++ kobject_get(&sess->tgt->tgt_kobj);
++
++ mutex_unlock(&scst_mutex);
++ scst_sess_sysfs_del(sess);
++ mutex_lock(&scst_mutex);
++
++ /*
++ * The lists delete must be after sysfs del. Otherwise it would break
++ * logic in scst_sess_sysfs_create() to avoid duplicate sysfs names.
++ */
++
++ TRACE_DBG("Removing sess %p from the list", sess);
++ list_del(&sess->sess_list_entry);
++ TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
++ list_del(&sess->acg_sess_list_entry);
++
++ mutex_unlock(&scst_mutex);
++
++ wake_up_all(&sess->tgt->unreg_waitQ);
++
++ kobject_put(&sess->tgt->tgt_kobj);
++
++ kfree(sess->transport_id);
++ kfree(sess->initiator_name);
++
++ kmem_cache_free(scst_sess_cachep, sess);
++
++ TRACE_EXIT();
++ return;
++}
++
++void scst_free_session_callback(struct scst_session *sess)
++{
++ struct completion *c;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Freeing session %p", sess);
++
++ cancel_delayed_work_sync(&sess->hw_pending_work);
++
++ c = sess->shutdown_compl;
++
++ mutex_lock(&scst_mutex);
++ /*
++ * Necessary to sync with other threads trying to queue AEN, which
++ * the target driver will not be able to serve and crash, because after
++ * unreg_done_fn() called its internal session data will be destroyed.
++ */
++ sess->shut_phase = SCST_SESS_SPH_UNREG_DONE_CALLING;
++ mutex_unlock(&scst_mutex);
++
++ if (sess->unreg_done_fn) {
++ TRACE_DBG("Calling unreg_done_fn(%p)", sess);
++ sess->unreg_done_fn(sess);
++ TRACE_DBG("%s", "unreg_done_fn() returned");
++ }
++ scst_free_session(sess);
++
++ if (c)
++ complete_all(c);
++
++ TRACE_EXIT();
++ return;
++}
++
++void scst_sched_session_free(struct scst_session *sess)
++{
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
++ PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
++ "shut phase %lx", sess, sess->shut_phase);
++ BUG();
++ }
++
++ spin_lock_irqsave(&scst_mgmt_lock, flags);
++ TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
++ list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
++ spin_unlock_irqrestore(&scst_mgmt_lock, flags);
++
++ wake_up(&scst_mgmt_waitQ);
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_cmd_get() - increase command's reference counter
++ */
++void scst_cmd_get(struct scst_cmd *cmd)
++{
++ __scst_cmd_get(cmd);
++}
++EXPORT_SYMBOL(scst_cmd_get);
++
++/**
++ * scst_cmd_put() - decrease command's reference counter
++ */
++void scst_cmd_put(struct scst_cmd *cmd)
++{
++ __scst_cmd_put(cmd);
++}
++EXPORT_SYMBOL(scst_cmd_put);
++
++struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
++{
++ struct scst_cmd *cmd;
++
++ TRACE_ENTRY();
++
++ cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
++ if (cmd == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
++ goto out;
++ }
++
++ cmd->state = SCST_CMD_STATE_INIT_WAIT;
++ cmd->start_time = jiffies;
++ atomic_set(&cmd->cmd_ref, 1);
++ cmd->cmd_threads = &scst_main_cmd_threads;
++ INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
++ cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
++ cmd->timeout = SCST_DEFAULT_TIMEOUT;
++ cmd->retries = 0;
++ cmd->data_len = -1;
++ cmd->is_send_status = 1;
++ cmd->resp_data_len = -1;
++ cmd->write_sg = &cmd->sg;
++ cmd->write_sg_cnt = &cmd->sg_cnt;
++
++ cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
++ cmd->dbl_ua_orig_resp_data_len = -1;
++
++out:
++ TRACE_EXIT();
++ return cmd;
++}
++
++static void scst_destroy_put_cmd(struct scst_cmd *cmd)
++{
++ scst_sess_put(cmd->sess);
++
++ /*
++ * At this point tgt_dev can be dead, but the pointer remains non-NULL
++ */
++ if (likely(cmd->tgt_dev != NULL))
++ __scst_put();
++
++ scst_destroy_cmd(cmd);
++ return;
++}
++
++/* No locks supposed to be held */
++void scst_free_cmd(struct scst_cmd *cmd)
++{
++ int destroy = 1;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Freeing cmd %p (tag %llu)",
++ cmd, (long long unsigned int)cmd->tag);
++
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
++ cmd, atomic_read(&scst_cmd_count));
++ }
++
++ BUG_ON(cmd->unblock_dev);
++
++ /*
++ * Target driver can already free sg buffer before calling
++ * scst_tgt_cmd_done(). E.g., scst_local has to do that.
++ */
++ if (!cmd->tgt_data_buf_alloced)
++ scst_check_restore_sg_buff(cmd);
++
++ if ((cmd->tgtt->on_free_cmd != NULL) && likely(!cmd->internal)) {
++ TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
++ scst_set_cur_start(cmd);
++ cmd->tgtt->on_free_cmd(cmd);
++ scst_set_tgt_on_free_time(cmd);
++ TRACE_DBG("%s", "Target's on_free_cmd() returned");
++ }
++
++ if (likely(cmd->dev != NULL)) {
++ struct scst_dev_type *handler = cmd->dev->handler;
++ if (handler->on_free_cmd != NULL) {
++ TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
++ handler->name, cmd);
++ scst_set_cur_start(cmd);
++ handler->on_free_cmd(cmd);
++ scst_set_dev_on_free_time(cmd);
++ TRACE_DBG("Dev handler %s on_free_cmd() returned",
++ handler->name);
++ }
++ }
++
++ scst_release_space(cmd);
++
++ if (unlikely(cmd->sense != NULL)) {
++ TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
++ mempool_free(cmd->sense, scst_sense_mempool);
++ cmd->sense = NULL;
++ }
++
++ if (likely(cmd->tgt_dev != NULL)) {
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
++ PRINT_ERROR("Finishing not executed cmd %p (opcode "
++ "%d, target %s, LUN %lld, sn %d, expected_sn %d)",
++ cmd, cmd->cdb[0], cmd->tgtt->name,
++ (long long unsigned int)cmd->lun,
++ cmd->sn, cmd->tgt_dev->expected_sn);
++ scst_unblock_deferred(cmd->tgt_dev, cmd);
++ }
++#endif
++
++ if (unlikely(cmd->out_of_sn)) {
++ TRACE_SN("Out of SN cmd %p (tag %llu, sn %d), "
++ "destroy=%d", cmd,
++ (long long unsigned int)cmd->tag,
++ cmd->sn, destroy);
++ destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
++ &cmd->cmd_flags);
++ }
++ }
++
++ if (likely(destroy))
++ scst_destroy_put_cmd(cmd);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* No locks supposed to be held. */
++void scst_check_retries(struct scst_tgt *tgt)
++{
++ int need_wake_up = 0;
++
++ TRACE_ENTRY();
++
++ /*
++ * We don't worry about overflow of finished_cmds, because we check
++ * only for its change.
++ */
++ atomic_inc(&tgt->finished_cmds);
++ /* See comment in scst_queue_retry_cmd() */
++ smp_mb__after_atomic_inc();
++ if (unlikely(tgt->retry_cmds > 0)) {
++ struct scst_cmd *c, *tc;
++ unsigned long flags;
++
++ TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
++ tgt->retry_cmds);
++
++ spin_lock_irqsave(&tgt->tgt_lock, flags);
++ list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
++ cmd_list_entry) {
++ tgt->retry_cmds--;
++
++ TRACE_RETRY("Moving retry cmd %p to head of active "
++ "cmd list (retry_cmds left %d)",
++ c, tgt->retry_cmds);
++ spin_lock(&c->cmd_threads->cmd_list_lock);
++ list_move(&c->cmd_list_entry,
++ &c->cmd_threads->active_cmd_list);
++ wake_up(&c->cmd_threads->cmd_list_waitQ);
++ spin_unlock(&c->cmd_threads->cmd_list_lock);
++
++ need_wake_up++;
++ if (need_wake_up >= 2) /* "slow start" */
++ break;
++ }
++ spin_unlock_irqrestore(&tgt->tgt_lock, flags);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static void scst_tgt_retry_timer_fn(unsigned long arg)
++{
++ struct scst_tgt *tgt = (struct scst_tgt *)arg;
++ unsigned long flags;
++
++ TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
++
++ spin_lock_irqsave(&tgt->tgt_lock, flags);
++ tgt->retry_timer_active = 0;
++ spin_unlock_irqrestore(&tgt->tgt_lock, flags);
++
++ scst_check_retries(tgt);
++
++ TRACE_EXIT();
++ return;
++}
++
++struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
++{
++ struct scst_mgmt_cmd *mcmd;
++
++ TRACE_ENTRY();
++
++ mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
++ if (mcmd == NULL) {
++ PRINT_CRIT_ERROR("%s", "Allocation of management command "
++ "failed, some commands and their data could leak");
++ goto out;
++ }
++ memset(mcmd, 0, sizeof(*mcmd));
++
++out:
++ TRACE_EXIT();
++ return mcmd;
++}
++
++void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
++{
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
++ atomic_dec(&mcmd->sess->sess_cmd_count);
++ spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
++
++ scst_sess_put(mcmd->sess);
++
++ if (mcmd->mcmd_tgt_dev != NULL)
++ __scst_put();
++
++ mempool_free(mcmd, scst_mgmt_mempool);
++
++ TRACE_EXIT();
++ return;
++}
++
++int scst_alloc_space(struct scst_cmd *cmd)
++{
++ gfp_t gfp_mask;
++ int res = -ENOMEM;
++ int atomic = scst_cmd_atomic(cmd);
++ int flags;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ static int ll;
++
++ TRACE_ENTRY();
++
++ gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
++
++ flags = atomic ? SGV_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
++ if (cmd->no_sgv)
++ flags |= SGV_POOL_ALLOC_NO_CACHED;
++
++ cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
++ &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
++ if (cmd->sg == NULL)
++ goto out;
++
++ if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
++ if ((ll < 10) || TRACING_MINOR()) {
++ PRINT_INFO("Unable to complete command due to "
++ "SG IO count limitation (requested %d, "
++ "available %d, tgt lim %d)", cmd->sg_cnt,
++ tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
++ ll++;
++ }
++ goto out_sg_free;
++ }
++
++ if (cmd->data_direction != SCST_DATA_BIDI)
++ goto success;
++
++ cmd->out_sg = sgv_pool_alloc(tgt_dev->pool, cmd->out_bufflen, gfp_mask,
++ flags, &cmd->out_sg_cnt, &cmd->out_sgv,
++ &cmd->dev->dev_mem_lim, NULL);
++ if (cmd->out_sg == NULL)
++ goto out_sg_free;
++
++ if (unlikely(cmd->out_sg_cnt > tgt_dev->max_sg_cnt)) {
++ if ((ll < 10) || TRACING_MINOR()) {
++ PRINT_INFO("Unable to complete command due to "
++ "SG IO count limitation (OUT buffer, requested "
++ "%d, available %d, tgt lim %d)", cmd->out_sg_cnt,
++ tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
++ ll++;
++ }
++ goto out_out_sg_free;
++ }
++
++success:
++ res = 0;
++
++out:
++ TRACE_EXIT();
++ return res;
++
++out_out_sg_free:
++ sgv_pool_free(cmd->out_sgv, &cmd->dev->dev_mem_lim);
++ cmd->out_sgv = NULL;
++ cmd->out_sg = NULL;
++ cmd->out_sg_cnt = 0;
++
++out_sg_free:
++ sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
++ cmd->sgv = NULL;
++ cmd->sg = NULL;
++ cmd->sg_cnt = 0;
++ goto out;
++}
++
++static void scst_release_space(struct scst_cmd *cmd)
++{
++ TRACE_ENTRY();
++
++ if (cmd->sgv == NULL) {
++ if ((cmd->sg != NULL) &&
++ !(cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced)) {
++ TRACE_MEM("Freeing sg %p for cmd %p (cnt %d)", cmd->sg,
++ cmd, cmd->sg_cnt);
++ scst_free(cmd->sg, cmd->sg_cnt);
++ goto out_zero;
++ } else
++ goto out;
++ }
++
++ if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
++ TRACE_MEM("%s", "*data_buf_alloced set, returning");
++ goto out;
++ }
++
++ if (cmd->out_sgv != NULL) {
++ sgv_pool_free(cmd->out_sgv, &cmd->dev->dev_mem_lim);
++ cmd->out_sgv = NULL;
++ cmd->out_sg_cnt = 0;
++ cmd->out_sg = NULL;
++ cmd->out_bufflen = 0;
++ }
++
++ sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
++
++out_zero:
++ cmd->sgv = NULL;
++ cmd->sg_cnt = 0;
++ cmd->sg = NULL;
++ cmd->bufflen = 0;
++ cmd->data_len = 0;
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void scsi_end_async(struct request *req, int error)
++{
++ struct scsi_io_context *sioc = req->end_io_data;
++
++ TRACE_DBG("sioc %p, cmd %p", sioc, sioc->data);
++
++ if (sioc->done)
++ sioc->done(sioc->data, sioc->sense, req->errors, req->resid_len);
++
++ if (!sioc->full_cdb_used)
++ kmem_cache_free(scsi_io_context_cache, sioc);
++ else
++ kfree(sioc);
++
++ __blk_put_request(req->q, req);
++ return;
++}
++
++/**
++ * scst_scsi_exec_async - executes a SCSI command in pass-through mode
++ * @cmd: scst command
++ * @done: callback function when done
++ */
++int scst_scsi_exec_async(struct scst_cmd *cmd,
++ void (*done)(void *, char *, int, int))
++{
++ int res = 0;
++ struct request_queue *q = cmd->dev->scsi_dev->request_queue;
++ struct request *rq;
++ struct scsi_io_context *sioc;
++ int write = (cmd->data_direction & SCST_DATA_WRITE) ? WRITE : READ;
++ gfp_t gfp = GFP_KERNEL;
++ int cmd_len = cmd->cdb_len;
++
++ if (cmd->ext_cdb_len == 0) {
++ TRACE_DBG("Simple CDB (cmd_len %d)", cmd_len);
++ sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
++ if (sioc == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++ } else {
++ cmd_len += cmd->ext_cdb_len;
++
++ TRACE_DBG("Extended CDB (cmd_len %d)", cmd_len);
++
++ sioc = kzalloc(sizeof(*sioc) + cmd_len, gfp);
++ if (sioc == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++
++ sioc->full_cdb_used = 1;
++
++ memcpy(sioc->full_cdb, cmd->cdb, cmd->cdb_len);
++ memcpy(&sioc->full_cdb[cmd->cdb_len], cmd->ext_cdb,
++ cmd->ext_cdb_len);
++ }
++
++ rq = blk_get_request(q, write, gfp);
++ if (rq == NULL) {
++ res = -ENOMEM;
++ goto out_free_sioc;
++ }
++
++ rq->cmd_type = REQ_TYPE_BLOCK_PC;
++ rq->cmd_flags |= REQ_QUIET;
++
++ if (cmd->sg == NULL)
++ goto done;
++
++ if (cmd->data_direction == SCST_DATA_BIDI) {
++ struct request *next_rq;
++
++ if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
++ res = -EOPNOTSUPP;
++ goto out_free_rq;
++ }
++
++ res = blk_rq_map_kern_sg(rq, cmd->out_sg, cmd->out_sg_cnt, gfp);
++ if (res != 0) {
++ TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
++ goto out_free_rq;
++ }
++
++ next_rq = blk_get_request(q, READ, gfp);
++ if (next_rq == NULL) {
++ res = -ENOMEM;
++ goto out_free_unmap;
++ }
++ rq->next_rq = next_rq;
++ next_rq->cmd_type = rq->cmd_type;
++
++ res = blk_rq_map_kern_sg(next_rq, cmd->sg, cmd->sg_cnt, gfp);
++ if (res != 0) {
++ TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
++ goto out_free_unmap;
++ }
++ } else {
++ res = blk_rq_map_kern_sg(rq, cmd->sg, cmd->sg_cnt, gfp);
++ if (res != 0) {
++ TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
++ goto out_free_rq;
++ }
++ }
++
++done:
++ TRACE_DBG("sioc %p, cmd %p", sioc, cmd);
++
++ sioc->data = cmd;
++ sioc->done = done;
++
++ rq->cmd_len = cmd_len;
++ if (cmd->ext_cdb_len == 0) {
++ memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
++ memcpy(rq->cmd, cmd->cdb, cmd->cdb_len);
++ } else
++ rq->cmd = sioc->full_cdb;
++
++ rq->sense = sioc->sense;
++ rq->sense_len = sizeof(sioc->sense);
++ rq->timeout = cmd->timeout;
++ rq->retries = cmd->retries;
++ rq->end_io_data = sioc;
++
++ blk_execute_rq_nowait(rq->q, NULL, rq,
++ (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE), scsi_end_async);
++out:
++ return res;
++
++out_free_unmap:
++ if (rq->next_rq != NULL) {
++ blk_put_request(rq->next_rq);
++ rq->next_rq = NULL;
++ }
++ blk_rq_unmap_kern_sg(rq, res);
++
++out_free_rq:
++ blk_put_request(rq);
++
++out_free_sioc:
++ if (!sioc->full_cdb_used)
++ kmem_cache_free(scsi_io_context_cache, sioc);
++ else
++ kfree(sioc);
++ goto out;
++}
++
++/**
++ * scst_copy_sg() - copy data between the command's SGs
++ *
++ * Copies data between cmd->tgt_sg and cmd->sg in direction defined by
++ * copy_dir parameter.
++ */
++void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
++{
++ struct scatterlist *src_sg, *dst_sg;
++ unsigned int to_copy;
++ int atomic = scst_cmd_atomic(cmd);
++
++ TRACE_ENTRY();
++
++ if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
++ if (cmd->data_direction != SCST_DATA_BIDI) {
++ src_sg = cmd->tgt_sg;
++ dst_sg = cmd->sg;
++ to_copy = cmd->bufflen;
++ } else {
++ TRACE_MEM("BIDI cmd %p", cmd);
++ src_sg = cmd->tgt_out_sg;
++ dst_sg = cmd->out_sg;
++ to_copy = cmd->out_bufflen;
++ }
++ } else {
++ src_sg = cmd->sg;
++ dst_sg = cmd->tgt_sg;
++ to_copy = cmd->resp_data_len;
++ }
++
++ TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, to_copy %lld",
++ cmd, copy_dir, src_sg, dst_sg, (long long)to_copy);
++
++ if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
++ /*
++ * It can happened, e.g., with scst_user for cmd with delay
++ * alloc, which failed with Check Condition.
++ */
++ goto out;
++ }
++
++ sg_copy(dst_sg, src_sg, 0, to_copy,
++ atomic ? KM_SOFTIRQ0 : KM_USER0,
++ atomic ? KM_SOFTIRQ1 : KM_USER1);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_copy_sg);
++
++int scst_get_full_buf(struct scst_cmd *cmd, uint8_t **buf)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(cmd->sg_buff_vmallocated);
++
++ if (scst_get_buf_count(cmd) > 1) {
++ int len;
++ uint8_t *tmp_buf;
++ int full_size;
++
++ full_size = 0;
++ len = scst_get_buf_first(cmd, &tmp_buf);
++ while (len > 0) {
++ full_size += len;
++ scst_put_buf(cmd, tmp_buf);
++ len = scst_get_buf_next(cmd, &tmp_buf);
++ }
++
++ *buf = vmalloc(full_size);
++ if (*buf == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "vmalloc() failed for opcode "
++ "%x", cmd->cdb[0]);
++ res = -ENOMEM;
++ goto out;
++ }
++ cmd->sg_buff_vmallocated = 1;
++
++ if (scst_cmd_get_data_direction(cmd) == SCST_DATA_WRITE) {
++ uint8_t *buf_ptr;
++
++ buf_ptr = *buf;
++
++ len = scst_get_buf_first(cmd, &tmp_buf);
++ while (len > 0) {
++ memcpy(buf_ptr, tmp_buf, len);
++ buf_ptr += len;
++
++ scst_put_buf(cmd, tmp_buf);
++ len = scst_get_buf_next(cmd, &tmp_buf);
++ }
++ }
++ res = full_size;
++ } else
++ res = scst_get_buf_first(cmd, buf);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++void scst_put_full_buf(struct scst_cmd *cmd, uint8_t *buf)
++{
++ TRACE_ENTRY();
++
++ if (buf == NULL)
++ goto out;
++
++ if (cmd->sg_buff_vmallocated) {
++ if (scst_cmd_get_data_direction(cmd) == SCST_DATA_READ) {
++ int len;
++ uint8_t *tmp_buf, *buf_p;
++
++ buf_p = buf;
++
++ len = scst_get_buf_first(cmd, &tmp_buf);
++ while (len > 0) {
++ memcpy(tmp_buf, buf_p, len);
++ buf_p += len;
++
++ scst_put_buf(cmd, tmp_buf);
++ len = scst_get_buf_next(cmd, &tmp_buf);
++ }
++
++ }
++
++ cmd->sg_buff_vmallocated = 0;
++
++ vfree(buf);
++ } else
++ scst_put_buf(cmd, buf);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, 0, 16, 12, 0, 0 };
++
++#define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
++#define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
++
++/* get_trans_len_x extract x bytes from cdb as length starting from off */
++
++static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off)
++{
++ cmd->cdb_len = 10;
++ cmd->bufflen = 0;
++ return 0;
++}
++
++static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
++{
++ cmd->bufflen = 6;
++ return 0;
++}
++
++static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
++{
++ cmd->bufflen = 8;
++ return 0;
++}
++
++static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
++ cmd->op_name = "READ CAPACITY(16)";
++ cmd->bufflen = be32_to_cpu(get_unaligned((__be32 *)&cmd->cdb[10]));
++ cmd->op_flags |= SCST_IMPLICIT_HQ | SCST_REG_RESERVE_ALLOWED |
++ SCST_WRITE_EXCL_ALLOWED | SCST_EXCL_ACCESS_ALLOWED;
++ } else
++ cmd->op_flags |= SCST_UNKNOWN_LENGTH;
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
++{
++ cmd->bufflen = 1;
++ return 0;
++}
++
++static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
++{
++ uint8_t *p = (uint8_t *)cmd->cdb + off;
++ int res = 0;
++
++ cmd->bufflen = 0;
++ cmd->bufflen |= ((u32)p[0]) << 8;
++ cmd->bufflen |= ((u32)p[1]);
++
++ switch (cmd->cdb[1] & 0x1f) {
++ case 0:
++ case 1:
++ case 6:
++ if (cmd->bufflen != 0) {
++ PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
++ "allocation length for service action %x",
++ cmd->bufflen, cmd->cdb[1] & 0x1f);
++ goto out_inval;
++ }
++ break;
++ }
++
++ switch (cmd->cdb[1] & 0x1f) {
++ case 0:
++ case 1:
++ cmd->bufflen = 20;
++ break;
++ case 6:
++ cmd->bufflen = 32;
++ break;
++ case 8:
++ cmd->bufflen = max(28, cmd->bufflen);
++ break;
++ default:
++ PRINT_ERROR("READ POSITION: Invalid service action %x",
++ cmd->cdb[1] & 0x1f);
++ goto out_inval;
++ }
++
++out:
++ return res;
++
++out_inval:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ res = 1;
++ goto out;
++}
++
++static int get_trans_len_prevent_allow_medium_removal(struct scst_cmd *cmd,
++ uint8_t off)
++{
++ if ((cmd->cdb[4] & 3) == 0)
++ cmd->op_flags |= SCST_REG_RESERVE_ALLOWED |
++ SCST_WRITE_EXCL_ALLOWED | SCST_EXCL_ACCESS_ALLOWED;
++ return 0;
++}
++
++static int get_trans_len_start_stop(struct scst_cmd *cmd, uint8_t off)
++{
++ if ((cmd->cdb[4] & 0xF1) == 0x1)
++ cmd->op_flags |= SCST_REG_RESERVE_ALLOWED |
++ SCST_WRITE_EXCL_ALLOWED | SCST_EXCL_ACCESS_ALLOWED;
++ return 0;
++}
++
++static int get_trans_len_3_read_elem_stat(struct scst_cmd *cmd, uint8_t off)
++{
++ const uint8_t *p = cmd->cdb + off;
++
++ cmd->bufflen = 0;
++ cmd->bufflen |= ((u32)p[0]) << 16;
++ cmd->bufflen |= ((u32)p[1]) << 8;
++ cmd->bufflen |= ((u32)p[2]);
++
++ if ((cmd->cdb[6] & 0x2) == 0x2)
++ cmd->op_flags |= SCST_REG_RESERVE_ALLOWED |
++ SCST_WRITE_EXCL_ALLOWED | SCST_EXCL_ACCESS_ALLOWED;
++ return 0;
++}
++
++static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
++{
++ cmd->bufflen = (u32)cmd->cdb[off];
++ return 0;
++}
++
++static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
++{
++ cmd->bufflen = (u32)cmd->cdb[off];
++ if (cmd->bufflen == 0)
++ cmd->bufflen = 256;
++ return 0;
++}
++
++static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
++{
++ const uint8_t *p = cmd->cdb + off;
++
++ cmd->bufflen = 0;
++ cmd->bufflen |= ((u32)p[0]) << 8;
++ cmd->bufflen |= ((u32)p[1]);
++
++ return 0;
++}
++
++static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
++{
++ const uint8_t *p = cmd->cdb + off;
++
++ cmd->bufflen = 0;
++ cmd->bufflen |= ((u32)p[0]) << 16;
++ cmd->bufflen |= ((u32)p[1]) << 8;
++ cmd->bufflen |= ((u32)p[2]);
++
++ return 0;
++}
++
++static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
++{
++ const uint8_t *p = cmd->cdb + off;
++
++ cmd->bufflen = 0;
++ cmd->bufflen |= ((u32)p[0]) << 24;
++ cmd->bufflen |= ((u32)p[1]) << 16;
++ cmd->bufflen |= ((u32)p[2]) << 8;
++ cmd->bufflen |= ((u32)p[3]);
++
++ return 0;
++}
++
++static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
++{
++ cmd->bufflen = 0;
++ return 0;
++}
++
++static int get_bidi_trans_len_2(struct scst_cmd *cmd, uint8_t off)
++{
++ const uint8_t *p = cmd->cdb + off;
++
++ cmd->bufflen = 0;
++ cmd->bufflen |= ((u32)p[0]) << 8;
++ cmd->bufflen |= ((u32)p[1]);
++
++ cmd->out_bufflen = cmd->bufflen;
++
++ return 0;
++}
++
++/**
++ * scst_get_cdb_info() - fill various info about the command's CDB
++ *
++ * Description:
++ * Fills various info about the command's CDB in the corresponding fields
++ * in the command.
++ *
++ * Returns: 0 on success, <0 if command is unknown, >0 if command
++ * is invalid.
++ */
++int scst_get_cdb_info(struct scst_cmd *cmd)
++{
++ int dev_type = cmd->dev->type;
++ int i, res = 0;
++ uint8_t op;
++ const struct scst_sdbops *ptr = NULL;
++
++ TRACE_ENTRY();
++
++ op = cmd->cdb[0]; /* get clear opcode */
++
++ TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
++ "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
++ dev_type);
++
++ i = scst_scsi_op_list[op];
++ while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
++ if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
++ ptr = &scst_scsi_op_table[i];
++ TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
++ ptr->ops, ptr->devkey[0], /* disk */
++ ptr->devkey[1], /* tape */
++ ptr->devkey[2], /* printer */
++ ptr->devkey[3], /* cpu */
++ ptr->devkey[4], /* cdr */
++ ptr->devkey[5], /* cdrom */
++ ptr->devkey[6], /* scanner */
++ ptr->devkey[7], /* worm */
++ ptr->devkey[8], /* changer */
++ ptr->devkey[9], /* commdev */
++ ptr->op_name);
++ TRACE_DBG("direction=%d flags=%d off=%d",
++ ptr->direction,
++ ptr->flags,
++ ptr->off);
++ break;
++ }
++ i++;
++ }
++
++ if (unlikely(ptr == NULL)) {
++ /* opcode not found or now not used */
++ TRACE(TRACE_MINOR, "Unknown opcode 0x%x for type %d", op,
++ dev_type);
++ res = -1;
++ goto out;
++ }
++
++ cmd->cdb_len = SCST_GET_CDB_LEN(op);
++ cmd->op_name = ptr->op_name;
++ cmd->data_direction = ptr->direction;
++ cmd->op_flags = ptr->flags | SCST_INFO_VALID;
++ res = (*ptr->get_trans_len)(cmd, ptr->off);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_get_cdb_info);
++
++/* Packs SCST LUN back to SCSI form */
++__be64 scst_pack_lun(const uint64_t lun, unsigned int addr_method)
++{
++ uint64_t res;
++ uint16_t *p = (uint16_t *)&res;
++
++ res = lun;
++
++ if ((addr_method == SCST_LUN_ADDR_METHOD_FLAT) && (lun != 0)) {
++ /*
++ * Flat space: luns other than 0 should use flat space
++ * addressing method.
++ */
++ *p = 0x7fff & *p;
++ *p = 0x4000 | *p;
++ }
++ /* Default is to use peripheral device addressing mode */
++
++ *p = (__force u16)cpu_to_be16(*p);
++
++ TRACE_EXIT_HRES((unsigned long)res);
++ return (__force __be64)res;
++}
++
++/*
++ * Routine to extract a lun number from an 8-byte LUN structure
++ * in network byte order (BE).
++ * (see SAM-2, Section 4.12.3 page 40)
++ * Supports 2 types of lun unpacking: peripheral and logical unit.
++ */
++uint64_t scst_unpack_lun(const uint8_t *lun, int len)
++{
++ uint64_t res = NO_SUCH_LUN;
++ int address_method;
++
++ TRACE_ENTRY();
++
++ TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
++
++ if (unlikely(len < 2)) {
++ PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
++ "more", len);
++ goto out;
++ }
++
++ if (len > 2) {
++ switch (len) {
++ case 8:
++ if ((*((__be64 *)lun) &
++ __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
++ goto out_err;
++ break;
++ case 4:
++ if (*((__be16 *)&lun[2]) != 0)
++ goto out_err;
++ break;
++ case 6:
++ if (*((__be32 *)&lun[2]) != 0)
++ goto out_err;
++ break;
++ default:
++ goto out_err;
++ }
++ }
++
++ address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
++ switch (address_method) {
++ case 0: /* peripheral device addressing method */
++#if 0
++ if (*lun) {
++ PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
++ "peripheral device addressing method 0x%02x, "
++ "expected 0", *lun);
++ break;
++ }
++ res = *(lun + 1);
++ break;
++#else
++ /*
++ * Looks like it's legal to use it as flat space addressing
++ * method as well
++ */
++
++ /* go through */
++#endif
++
++ case 1: /* flat space addressing method */
++ res = *(lun + 1) | (((*lun) & 0x3f) << 8);
++ break;
++
++ case 2: /* logical unit addressing method */
++ if (*lun & 0x3f) {
++ PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
++ "addressing method 0x%02x, expected 0",
++ *lun & 0x3f);
++ break;
++ }
++ if (*(lun + 1) & 0xe0) {
++ PRINT_ERROR("Illegal TARGET in LUN logical unit "
++ "addressing method 0x%02x, expected 0",
++ (*(lun + 1) & 0xf8) >> 5);
++ break;
++ }
++ res = *(lun + 1) & 0x1f;
++ break;
++
++ case 3: /* extended logical unit addressing method */
++ default:
++ PRINT_ERROR("Unimplemented LUN addressing method %u",
++ address_method);
++ break;
++ }
++
++out:
++ TRACE_EXIT_RES((int)res);
++ return res;
++
++out_err:
++ PRINT_ERROR("%s", "Multi-level LUN unimplemented");
++ goto out;
++}
++
++/**
++ ** Generic parse() support routines.
++ ** Done via pointer on functions to avoid unneeded dereferences on
++ ** the fast path.
++ **/
++
++/**
++ * scst_calc_block_shift() - calculate block shift
++ *
++ * Calculates and returns block shift for the given sector size
++ */
++int scst_calc_block_shift(int sector_size)
++{
++ int block_shift = 0;
++ int t;
++
++ if (sector_size == 0)
++ sector_size = 512;
++
++ t = sector_size;
++ while (1) {
++ if ((t & 1) != 0)
++ break;
++ t >>= 1;
++ block_shift++;
++ }
++ if (block_shift < 9) {
++ PRINT_ERROR("Wrong sector size %d", sector_size);
++ block_shift = -1;
++ }
++
++ TRACE_EXIT_RES(block_shift);
++ return block_shift;
++}
++EXPORT_SYMBOL_GPL(scst_calc_block_shift);
++
++/**
++ * scst_sbc_generic_parse() - generic SBC parsing
++ *
++ * Generic parse() for SBC (disk) devices
++ */
++int scst_sbc_generic_parse(struct scst_cmd *cmd,
++ int (*get_block_shift)(struct scst_cmd *cmd))
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /*
++ * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
++ * therefore change them only if necessary
++ */
++
++ TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
++ cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
++
++ switch (cmd->cdb[0]) {
++ case VERIFY_6:
++ case VERIFY:
++ case VERIFY_12:
++ case VERIFY_16:
++ if ((cmd->cdb[1] & BYTCHK) == 0) {
++ cmd->data_len = cmd->bufflen << get_block_shift(cmd);
++ cmd->bufflen = 0;
++ goto set_timeout;
++ } else
++ cmd->data_len = 0;
++ break;
++ default:
++ /* It's all good */
++ break;
++ }
++
++ if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
++ int block_shift = get_block_shift(cmd);
++ /*
++ * No need for locks here, since *_detach() can not be
++ * called, when there are existing commands.
++ */
++ cmd->bufflen = cmd->bufflen << block_shift;
++ cmd->out_bufflen = cmd->out_bufflen << block_shift;
++ }
++
++set_timeout:
++ if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
++ cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
++ else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
++ else if (cmd->op_flags & SCST_LONG_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
++
++ TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
++ res, cmd->bufflen, cmd->data_len, cmd->data_direction);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_sbc_generic_parse);
++
++/**
++ * scst_cdrom_generic_parse() - generic MMC parse
++ *
++ * Generic parse() for MMC (cdrom) devices
++ */
++int scst_cdrom_generic_parse(struct scst_cmd *cmd,
++ int (*get_block_shift)(struct scst_cmd *cmd))
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /*
++ * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
++ * therefore change them only if necessary
++ */
++
++ TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
++ cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
++
++ cmd->cdb[1] &= 0x1f;
++
++ switch (cmd->cdb[0]) {
++ case VERIFY_6:
++ case VERIFY:
++ case VERIFY_12:
++ case VERIFY_16:
++ if ((cmd->cdb[1] & BYTCHK) == 0) {
++ cmd->data_len = cmd->bufflen << get_block_shift(cmd);
++ cmd->bufflen = 0;
++ goto set_timeout;
++ }
++ break;
++ default:
++ /* It's all good */
++ break;
++ }
++
++ if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
++ int block_shift = get_block_shift(cmd);
++ cmd->bufflen = cmd->bufflen << block_shift;
++ cmd->out_bufflen = cmd->out_bufflen << block_shift;
++ }
++
++set_timeout:
++ if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
++ cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
++ else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
++ else if (cmd->op_flags & SCST_LONG_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
++
++ TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
++ cmd->data_direction);
++
++ TRACE_EXIT();
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_cdrom_generic_parse);
++
++/**
++ * scst_modisk_generic_parse() - generic MO parse
++ *
++ * Generic parse() for MO disk devices
++ */
++int scst_modisk_generic_parse(struct scst_cmd *cmd,
++ int (*get_block_shift)(struct scst_cmd *cmd))
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /*
++ * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
++ * therefore change them only if necessary
++ */
++
++ TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
++ cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
++
++ cmd->cdb[1] &= 0x1f;
++
++ switch (cmd->cdb[0]) {
++ case VERIFY_6:
++ case VERIFY:
++ case VERIFY_12:
++ case VERIFY_16:
++ if ((cmd->cdb[1] & BYTCHK) == 0) {
++ cmd->data_len = cmd->bufflen << get_block_shift(cmd);
++ cmd->bufflen = 0;
++ goto set_timeout;
++ }
++ break;
++ default:
++ /* It's all good */
++ break;
++ }
++
++ if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
++ int block_shift = get_block_shift(cmd);
++ cmd->bufflen = cmd->bufflen << block_shift;
++ cmd->out_bufflen = cmd->out_bufflen << block_shift;
++ }
++
++set_timeout:
++ if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
++ cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
++ else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
++ else if (cmd->op_flags & SCST_LONG_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
++
++ TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
++ cmd->data_direction);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_modisk_generic_parse);
++
++/**
++ * scst_tape_generic_parse() - generic tape parse
++ *
++ * Generic parse() for tape devices
++ */
++int scst_tape_generic_parse(struct scst_cmd *cmd,
++ int (*get_block_size)(struct scst_cmd *cmd))
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /*
++ * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
++ * therefore change them only if necessary
++ */
++
++ TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
++ cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
++
++ if (cmd->cdb[0] == READ_POSITION) {
++ int tclp = cmd->cdb[1] & 4;
++ int long_bit = cmd->cdb[1] & 2;
++ int bt = cmd->cdb[1] & 1;
++
++ if ((tclp == long_bit) && (!bt || !long_bit)) {
++ cmd->bufflen =
++ tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
++ cmd->data_direction = SCST_DATA_READ;
++ } else {
++ cmd->bufflen = 0;
++ cmd->data_direction = SCST_DATA_NONE;
++ }
++ }
++
++ if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1]) {
++ int block_size = get_block_size(cmd);
++ cmd->bufflen = cmd->bufflen * block_size;
++ cmd->out_bufflen = cmd->out_bufflen * block_size;
++ }
++
++ if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
++ cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
++ else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
++ else if (cmd->op_flags & SCST_LONG_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_tape_generic_parse);
++
++static int scst_null_parse(struct scst_cmd *cmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /*
++ * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
++ * therefore change them only if necessary
++ */
++
++ TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
++ cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
++#if 0
++ switch (cmd->cdb[0]) {
++ default:
++ /* It's all good */
++ break;
++ }
++#endif
++ TRACE_DBG("res %d bufflen %d direct %d",
++ res, cmd->bufflen, cmd->data_direction);
++
++ TRACE_EXIT();
++ return res;
++}
++
++/**
++ * scst_changer_generic_parse() - generic changer parse
++ *
++ * Generic parse() for changer devices
++ */
++int scst_changer_generic_parse(struct scst_cmd *cmd,
++ int (*nothing)(struct scst_cmd *cmd))
++{
++ int res = scst_null_parse(cmd);
++
++ if (cmd->op_flags & SCST_LONG_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
++ else
++ cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
++
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_changer_generic_parse);
++
++/**
++ * scst_processor_generic_parse - generic SCSI processor parse
++ *
++ * Generic parse() for SCSI processor devices
++ */
++int scst_processor_generic_parse(struct scst_cmd *cmd,
++ int (*nothing)(struct scst_cmd *cmd))
++{
++ int res = scst_null_parse(cmd);
++
++ if (cmd->op_flags & SCST_LONG_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
++ else
++ cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
++
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_processor_generic_parse);
++
++/**
++ * scst_raid_generic_parse() - generic RAID parse
++ *
++ * Generic parse() for RAID devices
++ */
++int scst_raid_generic_parse(struct scst_cmd *cmd,
++ int (*nothing)(struct scst_cmd *cmd))
++{
++ int res = scst_null_parse(cmd);
++
++ if (cmd->op_flags & SCST_LONG_TIMEOUT)
++ cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
++ else
++ cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
++
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_raid_generic_parse);
++
++/**
++ ** Generic dev_done() support routines.
++ ** Done via pointer on functions to avoid unneeded dereferences on
++ ** the fast path.
++ **/
++
++/**
++ * scst_block_generic_dev_done() - generic SBC dev_done
++ *
++ * Generic dev_done() for block (SBC) devices
++ */
++int scst_block_generic_dev_done(struct scst_cmd *cmd,
++ void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
++{
++ int opcode = cmd->cdb[0];
++ int status = cmd->status;
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ TRACE_ENTRY();
++
++ /*
++ * SCST sets good defaults for cmd->is_send_status and
++ * cmd->resp_data_len based on cmd->status and cmd->data_direction,
++ * therefore change them only if necessary
++ */
++
++ if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
++ switch (opcode) {
++ case READ_CAPACITY:
++ {
++ /* Always keep track of disk capacity */
++ int buffer_size, sector_size, sh;
++ uint8_t *buffer;
++
++ buffer_size = scst_get_buf_first(cmd, &buffer);
++ if (unlikely(buffer_size <= 0)) {
++ if (buffer_size < 0) {
++ PRINT_ERROR("%s: Unable to get the"
++ " buffer (%d)", __func__, buffer_size);
++ }
++ goto out;
++ }
++
++ sector_size =
++ ((buffer[4] << 24) | (buffer[5] << 16) |
++ (buffer[6] << 8) | (buffer[7] << 0));
++ scst_put_buf(cmd, buffer);
++ if (sector_size != 0)
++ sh = scst_calc_block_shift(sector_size);
++ else
++ sh = 0;
++ set_block_shift(cmd, sh);
++ TRACE_DBG("block_shift %d", sh);
++ break;
++ }
++ default:
++ /* It's all good */
++ break;
++ }
++ }
++
++ TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
++ "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_block_generic_dev_done);
++
++/**
++ * scst_tape_generic_dev_done() - generic tape dev done
++ *
++ * Generic dev_done() for tape devices
++ */
++int scst_tape_generic_dev_done(struct scst_cmd *cmd,
++ void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
++{
++ int opcode = cmd->cdb[0];
++ int res = SCST_CMD_STATE_DEFAULT;
++ int buffer_size, bs;
++ uint8_t *buffer = NULL;
++
++ TRACE_ENTRY();
++
++ /*
++ * SCST sets good defaults for cmd->is_send_status and
++ * cmd->resp_data_len based on cmd->status and cmd->data_direction,
++ * therefore change them only if necessary
++ */
++
++ if (cmd->status != SAM_STAT_GOOD)
++ goto out;
++
++ switch (opcode) {
++ case MODE_SENSE:
++ case MODE_SELECT:
++ buffer_size = scst_get_buf_first(cmd, &buffer);
++ if (unlikely(buffer_size <= 0)) {
++ if (buffer_size < 0) {
++ PRINT_ERROR("%s: Unable to get the buffer (%d)",
++ __func__, buffer_size);
++ }
++ goto out;
++ }
++ break;
++ }
++
++ switch (opcode) {
++ case MODE_SENSE:
++ TRACE_DBG("%s", "MODE_SENSE");
++ if ((cmd->cdb[2] & 0xC0) == 0) {
++ if (buffer[3] == 8) {
++ bs = (buffer[9] << 16) |
++ (buffer[10] << 8) | buffer[11];
++ set_block_size(cmd, bs);
++ }
++ }
++ break;
++ case MODE_SELECT:
++ TRACE_DBG("%s", "MODE_SELECT");
++ if (buffer[3] == 8) {
++ bs = (buffer[9] << 16) | (buffer[10] << 8) |
++ (buffer[11]);
++ set_block_size(cmd, bs);
++ }
++ break;
++ default:
++ /* It's all good */
++ break;
++ }
++
++ switch (opcode) {
++ case MODE_SENSE:
++ case MODE_SELECT:
++ scst_put_buf(cmd, buffer);
++ break;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_tape_generic_dev_done);
++
++static void scst_check_internal_sense(struct scst_device *dev, int result,
++ uint8_t *sense, int sense_len)
++{
++ TRACE_ENTRY();
++
++ if (host_byte(result) == DID_RESET) {
++ int sl;
++ TRACE(TRACE_MGMT, "DID_RESET received for device %s, "
++ "triggering reset UA", dev->virt_name);
++ sl = scst_set_sense(sense, sense_len, dev->d_sense,
++ SCST_LOAD_SENSE(scst_sense_reset_UA));
++ scst_dev_check_set_UA(dev, NULL, sense, sl);
++ } else if ((status_byte(result) == CHECK_CONDITION) &&
++ scst_is_ua_sense(sense, sense_len))
++ scst_dev_check_set_UA(dev, NULL, sense, sense_len);
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_to_dma_dir() - translate SCST's data direction to DMA direction
++ *
++ * Translates SCST's data direction to DMA one from backend storage
++ * perspective.
++ */
++enum dma_data_direction scst_to_dma_dir(int scst_dir)
++{
++ static const enum dma_data_direction tr_tbl[] = { DMA_NONE,
++ DMA_TO_DEVICE, DMA_FROM_DEVICE, DMA_BIDIRECTIONAL, DMA_NONE };
++
++ return tr_tbl[scst_dir];
++}
++EXPORT_SYMBOL(scst_to_dma_dir);
++
++/*
++ * scst_to_tgt_dma_dir() - translate SCST data direction to DMA direction
++ *
++ * Translates SCST data direction to DMA data direction from the perspective
++ * of a target.
++ */
++enum dma_data_direction scst_to_tgt_dma_dir(int scst_dir)
++{
++ static const enum dma_data_direction tr_tbl[] = { DMA_NONE,
++ DMA_FROM_DEVICE, DMA_TO_DEVICE, DMA_BIDIRECTIONAL, DMA_NONE };
++
++ return tr_tbl[scst_dir];
++}
++EXPORT_SYMBOL(scst_to_tgt_dma_dir);
++
++/**
++ * scst_obtain_device_parameters() - obtain device control parameters
++ *
++ * Issues a MODE SENSE for control mode page data and sets the corresponding
++ * dev's parameter from it. Returns 0 on success and not 0 otherwise.
++ */
++int scst_obtain_device_parameters(struct scst_device *dev)
++{
++ int rc, i;
++ uint8_t cmd[16];
++ uint8_t buffer[4+0x0A];
++ uint8_t sense_buffer[SCSI_SENSE_BUFFERSIZE];
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
++
++ for (i = 0; i < 5; i++) {
++ /* Get control mode page */
++ memset(cmd, 0, sizeof(cmd));
++#if 0
++ cmd[0] = MODE_SENSE_10;
++ cmd[1] = 0;
++ cmd[2] = 0x0A;
++ cmd[8] = sizeof(buffer); /* it's < 256 */
++#else
++ cmd[0] = MODE_SENSE;
++ cmd[1] = 8; /* DBD */
++ cmd[2] = 0x0A;
++ cmd[4] = sizeof(buffer);
++#endif
++
++ memset(buffer, 0, sizeof(buffer));
++ memset(sense_buffer, 0, sizeof(sense_buffer));
++
++ TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
++ rc = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
++ sizeof(buffer), sense_buffer, 15, 0, 0
++ , NULL
++ );
++
++ TRACE_DBG("MODE_SENSE done: %x", rc);
++
++ if (scsi_status_is_good(rc)) {
++ int q;
++
++ PRINT_BUFF_FLAG(TRACE_SCSI, "Returned control mode "
++ "page data", buffer, sizeof(buffer));
++
++ dev->tst = buffer[4+2] >> 5;
++ q = buffer[4+3] >> 4;
++ if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
++ PRINT_ERROR("Too big QUEUE ALG %x, dev %s",
++ dev->queue_alg, dev->virt_name);
++ }
++ dev->queue_alg = q;
++ dev->swp = (buffer[4+4] & 0x8) >> 3;
++ dev->tas = (buffer[4+5] & 0x40) >> 6;
++ dev->d_sense = (buffer[4+2] & 0x4) >> 2;
++
++ /*
++ * Unfortunately, SCSI ML doesn't provide a way to
++ * specify commands task attribute, so we can rely on
++ * device's restricted reordering only. Linux I/O
++ * subsystem doesn't reorder pass-through (PC) requests.
++ */
++ dev->has_own_order_mgmt = !dev->queue_alg;
++
++ PRINT_INFO("Device %s: TST %x, QUEUE ALG %x, SWP %x, "
++ "TAS %x, D_SENSE %d, has_own_order_mgmt %d",
++ dev->virt_name, dev->tst, dev->queue_alg,
++ dev->swp, dev->tas, dev->d_sense,
++ dev->has_own_order_mgmt);
++
++ goto out;
++ } else {
++ scst_check_internal_sense(dev, rc, sense_buffer,
++ sizeof(sense_buffer));
++#if 0
++ if ((status_byte(rc) == CHECK_CONDITION) &&
++ SCST_SENSE_VALID(sense_buffer)) {
++#else
++ /*
++ * 3ware controller is buggy and returns CONDITION_GOOD
++ * instead of CHECK_CONDITION
++ */
++ if (SCST_SENSE_VALID(sense_buffer)) {
++#endif
++ PRINT_BUFF_FLAG(TRACE_SCSI, "Returned sense "
++ "data", sense_buffer,
++ sizeof(sense_buffer));
++ if (scst_analyze_sense(sense_buffer,
++ sizeof(sense_buffer),
++ SCST_SENSE_KEY_VALID,
++ ILLEGAL_REQUEST, 0, 0)) {
++ PRINT_INFO("Device %s doesn't support "
++ "MODE SENSE", dev->virt_name);
++ break;
++ } else if (scst_analyze_sense(sense_buffer,
++ sizeof(sense_buffer),
++ SCST_SENSE_KEY_VALID,
++ NOT_READY, 0, 0)) {
++ PRINT_ERROR("Device %s not ready",
++ dev->virt_name);
++ break;
++ }
++ } else {
++ PRINT_INFO("Internal MODE SENSE to "
++ "device %s failed: %x",
++ dev->virt_name, rc);
++ PRINT_BUFF_FLAG(TRACE_SCSI, "MODE SENSE sense",
++ sense_buffer, sizeof(sense_buffer));
++ switch (host_byte(rc)) {
++ case DID_RESET:
++ case DID_ABORT:
++ case DID_SOFT_ERROR:
++ break;
++ default:
++ goto brk;
++ }
++ switch (driver_byte(rc)) {
++ case DRIVER_BUSY:
++ case DRIVER_SOFT:
++ break;
++ default:
++ goto brk;
++ }
++ }
++ }
++ }
++brk:
++ PRINT_WARNING("Unable to get device's %s control mode page, using "
++ "existing values/defaults: TST %x, QUEUE ALG %x, SWP %x, "
++ "TAS %x, D_SENSE %d, has_own_order_mgmt %d", dev->virt_name,
++ dev->tst, dev->queue_alg, dev->swp, dev->tas, dev->d_sense,
++ dev->has_own_order_mgmt);
++
++out:
++ TRACE_EXIT();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(scst_obtain_device_parameters);
++
++/* Called under dev_lock and BH off */
++void scst_process_reset(struct scst_device *dev,
++ struct scst_session *originator, struct scst_cmd *exclude_cmd,
++ struct scst_mgmt_cmd *mcmd, bool setUA)
++{
++ struct scst_tgt_dev *tgt_dev;
++ struct scst_cmd *cmd, *tcmd;
++
++ TRACE_ENTRY();
++
++ /* Clear RESERVE'ation, if necessary */
++ if (dev->dev_reserved) {
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ TRACE_MGMT_DBG("Clearing RESERVE'ation for "
++ "tgt_dev LUN %lld",
++ (long long unsigned int)tgt_dev->lun);
++ clear_bit(SCST_TGT_DEV_RESERVED,
++ &tgt_dev->tgt_dev_flags);
++ }
++ dev->dev_reserved = 0;
++ /*
++ * There is no need to send RELEASE, since the device is going
++ * to be resetted. Actually, since we can be in RESET TM
++ * function, it might be dangerous.
++ */
++ }
++
++ dev->dev_double_ua_possible = 1;
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ struct scst_session *sess = tgt_dev->sess;
++
++ spin_lock_bh(&tgt_dev->tgt_dev_lock);
++
++ scst_free_all_UA(tgt_dev);
++
++ memset(tgt_dev->tgt_dev_sense, 0,
++ sizeof(tgt_dev->tgt_dev_sense));
++
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++
++ spin_lock_irq(&sess->sess_list_lock);
++
++ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
++ list_for_each_entry(cmd, &sess->sess_cmd_list,
++ sess_cmd_list_entry) {
++ if (cmd == exclude_cmd)
++ continue;
++ if ((cmd->tgt_dev == tgt_dev) ||
++ ((cmd->tgt_dev == NULL) &&
++ (cmd->lun == tgt_dev->lun))) {
++ scst_abort_cmd(cmd, mcmd,
++ (tgt_dev->sess != originator), 0);
++ }
++ }
++ spin_unlock_irq(&sess->sess_list_lock);
++ }
++
++ list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
++ blocked_cmd_list_entry) {
++ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
++ list_del(&cmd->blocked_cmd_list_entry);
++ TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
++ "to active cmd list", cmd);
++ spin_lock_irq(&cmd->cmd_threads->cmd_list_lock);
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irq(&cmd->cmd_threads->cmd_list_lock);
++ }
++ }
++
++ if (setUA) {
++ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
++ int sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
++ dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
++ scst_dev_check_set_local_UA(dev, exclude_cmd, sense_buffer, sl);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* No locks, no IRQ or IRQ-disabled context allowed */
++int scst_set_pending_UA(struct scst_cmd *cmd)
++{
++ int res = 0, i;
++ struct scst_tgt_dev_UA *UA_entry;
++ bool first = true, global_unlock = false;
++ struct scst_session *sess = cmd->sess;
++
++ TRACE_ENTRY();
++
++ /*
++ * RMB and recheck to sync with setting SCST_CMD_ABORTED in
++ * scst_abort_cmd() to not set UA for the being aborted cmd, hence
++ * possibly miss its delivery by a legitimate command while the UA is
++ * being requeued.
++ */
++ smp_rmb();
++ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
++ TRACE_MGMT_DBG("Not set pending UA for aborted cmd %p", cmd);
++ res = -1;
++ goto out;
++ }
++
++ TRACE_MGMT_DBG("Setting pending UA cmd %p", cmd);
++
++ spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
++
++again:
++ /* UA list could be cleared behind us, so retest */
++ if (list_empty(&cmd->tgt_dev->UA_list)) {
++ TRACE_DBG("%s",
++ "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
++ res = -1;
++ goto out_unlock;
++ }
++
++ UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
++ UA_list_entry);
++
++ TRACE_DBG("next %p UA_entry %p",
++ cmd->tgt_dev->UA_list.next, UA_entry);
++
++ if (UA_entry->global_UA && first) {
++ TRACE_MGMT_DBG("Global UA %p detected", UA_entry);
++
++ spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
++
++ /*
++ * cmd won't allow to suspend activities, so we can access
++ * sess->sess_tgt_dev_list_hash without any additional
++ * protection.
++ */
++
++ local_bh_disable();
++
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[i];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ /* Lockdep triggers here a false positive.. */
++ spin_lock(&tgt_dev->tgt_dev_lock);
++ }
++ }
++
++ first = false;
++ global_unlock = true;
++ goto again;
++ }
++
++ if (scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
++ UA_entry->UA_valid_sense_len) != 0)
++ goto out_unlock;
++
++ cmd->ua_ignore = 1;
++
++ list_del(&UA_entry->UA_list_entry);
++
++ if (UA_entry->global_UA) {
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[i];
++ struct scst_tgt_dev *tgt_dev;
++
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ struct scst_tgt_dev_UA *ua;
++ list_for_each_entry(ua, &tgt_dev->UA_list,
++ UA_list_entry) {
++ if (ua->global_UA &&
++ memcmp(ua->UA_sense_buffer,
++ UA_entry->UA_sense_buffer,
++ sizeof(ua->UA_sense_buffer)) == 0) {
++ TRACE_MGMT_DBG("Freeing not "
++ "needed global UA %p",
++ ua);
++ list_del(&ua->UA_list_entry);
++ mempool_free(ua, scst_ua_mempool);
++ break;
++ }
++ }
++ }
++ }
++ }
++
++ mempool_free(UA_entry, scst_ua_mempool);
++
++ if (list_empty(&cmd->tgt_dev->UA_list)) {
++ clear_bit(SCST_TGT_DEV_UA_PENDING,
++ &cmd->tgt_dev->tgt_dev_flags);
++ }
++
++out_unlock:
++ if (global_unlock) {
++ for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[i];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry_reverse(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ spin_unlock(&tgt_dev->tgt_dev_lock);
++ }
++ }
++
++ local_bh_enable();
++ spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
++ }
++
++ spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Called under tgt_dev_lock and BH off */
++static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
++ const uint8_t *sense, int sense_len, int flags)
++{
++ struct scst_tgt_dev_UA *UA_entry = NULL;
++
++ TRACE_ENTRY();
++
++ UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
++ if (UA_entry == NULL) {
++ PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
++ "allocation failed. The UNIT ATTENTION "
++ "on some sessions will be missed");
++ PRINT_BUFFER("Lost UA", sense, sense_len);
++ goto out;
++ }
++ memset(UA_entry, 0, sizeof(*UA_entry));
++
++ UA_entry->global_UA = (flags & SCST_SET_UA_FLAG_GLOBAL) != 0;
++ if (UA_entry->global_UA)
++ TRACE_MGMT_DBG("Queuing global UA %p", UA_entry);
++
++ if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer)) {
++ PRINT_WARNING("Sense truncated (needed %d), shall you increase "
++ "SCST_SENSE_BUFFERSIZE?", sense_len);
++ sense_len = sizeof(UA_entry->UA_sense_buffer);
++ }
++ memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
++ UA_entry->UA_valid_sense_len = sense_len;
++
++ set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
++
++ TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
++
++ if (flags & SCST_SET_UA_FLAG_AT_HEAD)
++ list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
++ else
++ list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* tgt_dev_lock supposed to be held and BH off */
++static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
++ const uint8_t *sense, int sense_len, int flags)
++{
++ int skip_UA = 0;
++ struct scst_tgt_dev_UA *UA_entry_tmp;
++ int len = min((int)sizeof(UA_entry_tmp->UA_sense_buffer), sense_len);
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
++ UA_list_entry) {
++ if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, len) == 0) {
++ TRACE_MGMT_DBG("%s", "UA already exists");
++ skip_UA = 1;
++ break;
++ }
++ }
++
++ if (skip_UA == 0)
++ scst_alloc_set_UA(tgt_dev, sense, len, flags);
++
++ TRACE_EXIT();
++ return;
++}
++
++void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
++ const uint8_t *sense, int sense_len, int flags)
++{
++ TRACE_ENTRY();
++
++ spin_lock_bh(&tgt_dev->tgt_dev_lock);
++ __scst_check_set_UA(tgt_dev, sense, sense_len, flags);
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called under dev_lock and BH off */
++void scst_dev_check_set_local_UA(struct scst_device *dev,
++ struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
++{
++ struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
++
++ TRACE_ENTRY();
++
++ if (exclude != NULL)
++ exclude_tgt_dev = exclude->tgt_dev;
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if (tgt_dev != exclude_tgt_dev)
++ scst_check_set_UA(tgt_dev, sense, sense_len, 0);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called under dev_lock and BH off */
++void __scst_dev_check_set_UA(struct scst_device *dev,
++ struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
++{
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Processing UA dev %p", dev);
++
++ /* Check for reset UA */
++ if (scst_analyze_sense(sense, sense_len, SCST_SENSE_ASC_VALID,
++ 0, SCST_SENSE_ASC_UA_RESET, 0))
++ scst_process_reset(dev,
++ (exclude != NULL) ? exclude->sess : NULL,
++ exclude, NULL, false);
++
++ scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called under tgt_dev_lock or when tgt_dev is unused */
++static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_tgt_dev_UA *UA_entry, *t;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry_safe(UA_entry, t,
++ &tgt_dev->UA_list, UA_list_entry) {
++ TRACE_MGMT_DBG("Clearing UA for tgt_dev LUN %lld",
++ (long long unsigned int)tgt_dev->lun);
++ list_del(&UA_entry->UA_list_entry);
++ mempool_free(UA_entry, scst_ua_mempool);
++ }
++ INIT_LIST_HEAD(&tgt_dev->UA_list);
++ clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* No locks */
++struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_cmd *res = NULL, *cmd, *t;
++ typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
++
++ spin_lock_irq(&tgt_dev->sn_lock);
++
++ if (unlikely(tgt_dev->hq_cmd_count != 0))
++ goto out_unlock;
++
++restart:
++ list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
++ sn_cmd_list_entry) {
++ EXTRACHECKS_BUG_ON(cmd->queue_type ==
++ SCST_CMD_QUEUE_HEAD_OF_QUEUE);
++ if (cmd->sn == expected_sn) {
++ TRACE_SN("Deferred command %p (sn %d, set %d) found",
++ cmd, cmd->sn, cmd->sn_set);
++ tgt_dev->def_cmd_count--;
++ list_del(&cmd->sn_cmd_list_entry);
++ if (res == NULL)
++ res = cmd;
++ else {
++ spin_lock(&cmd->cmd_threads->cmd_list_lock);
++ TRACE_SN("Adding cmd %p to active cmd list",
++ cmd);
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
++ }
++ }
++ }
++ if (res != NULL)
++ goto out_unlock;
++
++ list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
++ sn_cmd_list_entry) {
++ EXTRACHECKS_BUG_ON(cmd->queue_type ==
++ SCST_CMD_QUEUE_HEAD_OF_QUEUE);
++ if (cmd->sn == expected_sn) {
++ atomic_t *slot = cmd->sn_slot;
++ /*
++ * !! At this point any pointer in cmd, except !!
++ * !! sn_slot and sn_cmd_list_entry, could be !!
++ * !! already destroyed !!
++ */
++ TRACE_SN("cmd %p (tag %llu) with skipped sn %d found",
++ cmd,
++ (long long unsigned int)cmd->tag,
++ cmd->sn);
++ tgt_dev->def_cmd_count--;
++ list_del(&cmd->sn_cmd_list_entry);
++ spin_unlock_irq(&tgt_dev->sn_lock);
++ if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
++ &cmd->cmd_flags))
++ scst_destroy_put_cmd(cmd);
++ scst_inc_expected_sn(tgt_dev, slot);
++ expected_sn = tgt_dev->expected_sn;
++ spin_lock_irq(&tgt_dev->sn_lock);
++ goto restart;
++ }
++ }
++
++out_unlock:
++ spin_unlock_irq(&tgt_dev->sn_lock);
++ return res;
++}
++
++/*****************************************************************
++ ** The following thr_data functions are necessary, because the
++ ** kernel doesn't provide a better way to have threads local
++ ** storage
++ *****************************************************************/
++
++/**
++ * scst_add_thr_data() - add the current thread's local data
++ *
++ * Adds local to the current thread data to tgt_dev
++ * (they will be local for the tgt_dev and current thread).
++ */
++void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
++ struct scst_thr_data_hdr *data,
++ void (*free_fn) (struct scst_thr_data_hdr *data))
++{
++ data->owner_thr = current;
++ atomic_set(&data->ref, 1);
++ EXTRACHECKS_BUG_ON(free_fn == NULL);
++ data->free_fn = free_fn;
++ spin_lock(&tgt_dev->thr_data_lock);
++ list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
++ spin_unlock(&tgt_dev->thr_data_lock);
++}
++EXPORT_SYMBOL_GPL(scst_add_thr_data);
++
++/**
++ * scst_del_all_thr_data() - delete all thread's local data
++ *
++ * Deletes all local to threads data from tgt_dev
++ */
++void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
++{
++ spin_lock(&tgt_dev->thr_data_lock);
++ while (!list_empty(&tgt_dev->thr_data_list)) {
++ struct scst_thr_data_hdr *d = list_entry(
++ tgt_dev->thr_data_list.next, typeof(*d),
++ thr_data_list_entry);
++ list_del(&d->thr_data_list_entry);
++ spin_unlock(&tgt_dev->thr_data_lock);
++ scst_thr_data_put(d);
++ spin_lock(&tgt_dev->thr_data_lock);
++ }
++ spin_unlock(&tgt_dev->thr_data_lock);
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_del_all_thr_data);
++
++/**
++ * scst_dev_del_all_thr_data() - delete all thread's local data from device
++ *
++ * Deletes all local to threads data from all tgt_dev's of the device
++ */
++void scst_dev_del_all_thr_data(struct scst_device *dev)
++{
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ scst_del_all_thr_data(tgt_dev);
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_dev_del_all_thr_data);
++
++/* thr_data_lock supposed to be held */
++static struct scst_thr_data_hdr *__scst_find_thr_data_locked(
++ struct scst_tgt_dev *tgt_dev, struct task_struct *tsk)
++{
++ struct scst_thr_data_hdr *res = NULL, *d;
++
++ list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
++ if (d->owner_thr == tsk) {
++ res = d;
++ scst_thr_data_get(res);
++ break;
++ }
++ }
++ return res;
++}
++
++/**
++ * __scst_find_thr_data() - find local to the thread data
++ *
++ * Finds local to the thread data. Returns NULL, if they not found.
++ */
++struct scst_thr_data_hdr *__scst_find_thr_data(struct scst_tgt_dev *tgt_dev,
++ struct task_struct *tsk)
++{
++ struct scst_thr_data_hdr *res;
++
++ spin_lock(&tgt_dev->thr_data_lock);
++ res = __scst_find_thr_data_locked(tgt_dev, tsk);
++ spin_unlock(&tgt_dev->thr_data_lock);
++
++ return res;
++}
++EXPORT_SYMBOL_GPL(__scst_find_thr_data);
++
++bool scst_del_thr_data(struct scst_tgt_dev *tgt_dev, struct task_struct *tsk)
++{
++ bool res;
++ struct scst_thr_data_hdr *td;
++
++ spin_lock(&tgt_dev->thr_data_lock);
++
++ td = __scst_find_thr_data_locked(tgt_dev, tsk);
++ if (td != NULL) {
++ list_del(&td->thr_data_list_entry);
++ res = true;
++ } else
++ res = false;
++
++ spin_unlock(&tgt_dev->thr_data_lock);
++
++ if (td != NULL) {
++ /* the find() fn also gets it */
++ scst_thr_data_put(td);
++ scst_thr_data_put(td);
++ }
++
++ return res;
++}
++
++/* dev_lock supposed to be held and BH disabled */
++void scst_block_dev(struct scst_device *dev)
++{
++ dev->block_count++;
++ TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
++}
++
++/* No locks */
++void scst_unblock_dev(struct scst_device *dev)
++{
++ spin_lock_bh(&dev->dev_lock);
++ TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
++ dev->block_count-1, dev);
++ if (--dev->block_count == 0)
++ scst_unblock_cmds(dev);
++ spin_unlock_bh(&dev->dev_lock);
++ BUG_ON(dev->block_count < 0);
++}
++
++/* No locks */
++bool __scst_check_blocked_dev(struct scst_cmd *cmd)
++{
++ int res = false;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(cmd->unblock_dev);
++
++ if (unlikely(cmd->internal) && (cmd->cdb[0] == REQUEST_SENSE)) {
++ /*
++ * The original command can already block the device, so
++ * REQUEST SENSE command should always pass.
++ */
++ goto out;
++ }
++
++repeat:
++ if (dev->block_count > 0) {
++ spin_lock_bh(&dev->dev_lock);
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
++ goto out_unlock;
++ if (dev->block_count > 0) {
++ TRACE_MGMT_DBG("Delaying cmd %p due to blocking "
++ "(tag %llu, dev %p)", cmd,
++ (long long unsigned int)cmd->tag, dev);
++ list_add_tail(&cmd->blocked_cmd_list_entry,
++ &dev->blocked_cmd_list);
++ res = true;
++ spin_unlock_bh(&dev->dev_lock);
++ goto out;
++ } else {
++ TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
++ "continuing");
++ }
++ spin_unlock_bh(&dev->dev_lock);
++ }
++
++ if (dev->dev_double_ua_possible) {
++ spin_lock_bh(&dev->dev_lock);
++ if (dev->block_count == 0) {
++ TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
++ "cmds due to possible double reset UA (dev %p)",
++ cmd, (long long unsigned int)cmd->tag, dev);
++ scst_block_dev(dev);
++ cmd->unblock_dev = 1;
++ } else {
++ spin_unlock_bh(&dev->dev_lock);
++ TRACE_MGMT_DBG("Somebody blocked the device, "
++ "repeating (count %d)", dev->block_count);
++ goto repeat;
++ }
++ spin_unlock_bh(&dev->dev_lock);
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unlock:
++ spin_unlock_bh(&dev->dev_lock);
++ goto out;
++}
++
++/* Called under dev_lock */
++static void scst_unblock_cmds(struct scst_device *dev)
++{
++ struct scst_cmd *cmd, *tcmd;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ local_irq_save(flags);
++ list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
++ blocked_cmd_list_entry) {
++ list_del(&cmd->blocked_cmd_list_entry);
++ TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
++ spin_lock(&cmd->cmd_threads->cmd_list_lock);
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ list_add(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ else
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
++ }
++ local_irq_restore(flags);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
++ struct scst_cmd *out_of_sn_cmd)
++{
++ EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
++
++ if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
++ scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
++ scst_make_deferred_commands_active(tgt_dev);
++ } else {
++ out_of_sn_cmd->out_of_sn = 1;
++ spin_lock_irq(&tgt_dev->sn_lock);
++ tgt_dev->def_cmd_count++;
++ list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
++ &tgt_dev->skipped_sn_list);
++ TRACE_SN("out_of_sn_cmd %p with sn %d added to skipped_sn_list"
++ " (expected_sn %d)", out_of_sn_cmd, out_of_sn_cmd->sn,
++ tgt_dev->expected_sn);
++ spin_unlock_irq(&tgt_dev->sn_lock);
++ }
++
++ return;
++}
++
++void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
++ struct scst_cmd *out_of_sn_cmd)
++{
++ TRACE_ENTRY();
++
++ if (!out_of_sn_cmd->sn_set) {
++ TRACE_SN("cmd %p without sn", out_of_sn_cmd);
++ goto out;
++ }
++
++ __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++void scst_on_hq_cmd_response(struct scst_cmd *cmd)
++{
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++
++ TRACE_ENTRY();
++
++ if (!cmd->hq_cmd_inced)
++ goto out;
++
++ spin_lock_irq(&tgt_dev->sn_lock);
++ tgt_dev->hq_cmd_count--;
++ spin_unlock_irq(&tgt_dev->sn_lock);
++
++ EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
++
++ /*
++ * There is no problem in checking hq_cmd_count in the
++ * non-locked state. In the worst case we will only have
++ * unneeded run of the deferred commands.
++ */
++ if (tgt_dev->hq_cmd_count == 0)
++ scst_make_deferred_commands_active(tgt_dev);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++void scst_store_sense(struct scst_cmd *cmd)
++{
++ TRACE_ENTRY();
++
++ if (SCST_SENSE_VALID(cmd->sense) &&
++ !test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags) &&
++ (cmd->tgt_dev != NULL)) {
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++
++ TRACE_DBG("Storing sense (cmd %p)", cmd);
++
++ spin_lock_bh(&tgt_dev->tgt_dev_lock);
++
++ if (cmd->sense_valid_len <= sizeof(tgt_dev->tgt_dev_sense))
++ tgt_dev->tgt_dev_valid_sense_len = cmd->sense_valid_len;
++ else {
++ tgt_dev->tgt_dev_valid_sense_len = sizeof(tgt_dev->tgt_dev_sense);
++ PRINT_ERROR("Stored sense truncated to size %d "
++ "(needed %d)", tgt_dev->tgt_dev_valid_sense_len,
++ cmd->sense_valid_len);
++ }
++ memcpy(tgt_dev->tgt_dev_sense, cmd->sense,
++ tgt_dev->tgt_dev_valid_sense_len);
++
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
++{
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
++ "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
++ atomic_read(&scst_cmd_count));
++
++ scst_done_cmd_mgmt(cmd);
++
++ if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
++ if (cmd->completed) {
++ /* It's completed and it's OK to return its result */
++ goto out;
++ }
++
++ /* For not yet inited commands cmd->dev can be NULL here */
++ if (test_bit(SCST_CMD_DEVICE_TAS, &cmd->cmd_flags)) {
++ TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
++ "(tag %llu), returning TASK ABORTED ", cmd,
++ (long long unsigned int)cmd->tag);
++ scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
++ } else {
++ TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
++ "(tag %llu), aborting without delivery or "
++ "notification",
++ cmd, (long long unsigned int)cmd->tag);
++ /*
++ * There is no need to check/requeue possible UA,
++ * because, if it exists, it will be delivered
++ * by the "completed" branch above.
++ */
++ clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
++ }
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_get_max_lun_commands() - return maximum supported commands count
++ *
++ * Returns maximum commands count which can be queued to this LUN in this
++ * session.
++ *
++ * If lun is NO_SUCH_LUN, returns minimum of maximum commands count which
++ * can be queued to any LUN in this session.
++ *
++ * If sess is NULL, returns minimum of maximum commands count which can be
++ * queued to any SCST device.
++ */
++int scst_get_max_lun_commands(struct scst_session *sess, uint64_t lun)
++{
++ return SCST_MAX_TGT_DEV_COMMANDS;
++}
++EXPORT_SYMBOL(scst_get_max_lun_commands);
++
++/**
++ * scst_reassign_persistent_sess_states() - reassigns persistent states
++ *
++ * Reassigns persistent states from old_sess to new_sess.
++ */
++void scst_reassign_persistent_sess_states(struct scst_session *new_sess,
++ struct scst_session *old_sess)
++{
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ TRACE_PR("Reassigning persistent states from old_sess %p to "
++ "new_sess %p", old_sess, new_sess);
++
++ if ((new_sess == NULL) || (old_sess == NULL)) {
++ TRACE_DBG("%s", "new_sess or old_sess is NULL");
++ goto out;
++ }
++
++ if (new_sess == old_sess) {
++ TRACE_DBG("%s", "new_sess or old_sess are the same");
++ goto out;
++ }
++
++ if ((new_sess->transport_id == NULL) ||
++ (old_sess->transport_id == NULL)) {
++ TRACE_DBG("%s", "new_sess or old_sess doesn't support PRs");
++ goto out;
++ }
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ struct scst_tgt_dev *tgt_dev;
++ struct scst_tgt_dev *new_tgt_dev = NULL, *old_tgt_dev = NULL;
++
++ TRACE_DBG("Processing dev %s", dev->virt_name);
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if (tgt_dev->sess == new_sess) {
++ new_tgt_dev = tgt_dev;
++ if (old_tgt_dev != NULL)
++ break;
++ }
++ if (tgt_dev->sess == old_sess) {
++ old_tgt_dev = tgt_dev;
++ if (new_tgt_dev != NULL)
++ break;
++ }
++ }
++
++ if ((new_tgt_dev == NULL) || (old_tgt_dev == NULL)) {
++ TRACE_DBG("new_tgt_dev %p or old_sess %p is NULL, "
++ "skipping (dev %s)", new_tgt_dev, old_tgt_dev,
++ dev->virt_name);
++ continue;
++ }
++
++ scst_pr_write_lock(dev);
++
++ if (old_tgt_dev->registrant != NULL) {
++ TRACE_PR("Reassigning reg %p from tgt_dev %p to %p",
++ old_tgt_dev->registrant, old_tgt_dev,
++ new_tgt_dev);
++
++ if (new_tgt_dev->registrant != NULL)
++ new_tgt_dev->registrant->tgt_dev = NULL;
++
++ new_tgt_dev->registrant = old_tgt_dev->registrant;
++ new_tgt_dev->registrant->tgt_dev = new_tgt_dev;
++
++ old_tgt_dev->registrant = NULL;
++ }
++
++ scst_pr_write_unlock(dev);
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_reassign_persistent_sess_states);
++
++/**
++ * scst_get_next_lexem() - parse and return next lexem in the string
++ *
++ * Returns pointer to the next lexem from token_str skipping
++ * spaces and '=' character and using them then as a delimeter. Content
++ * of token_str is modified by setting '\0' at the delimeter's position.
++ */
++char *scst_get_next_lexem(char **token_str)
++{
++ char *p = *token_str;
++ char *q;
++ static const char blank = '\0';
++
++ if ((token_str == NULL) || (*token_str == NULL))
++ return (char *)&blank;
++
++ for (p = *token_str; (*p != '\0') && (isspace(*p) || (*p == '=')); p++)
++ ;
++
++ for (q = p; (*q != '\0') && !isspace(*q) && (*q != '='); q++)
++ ;
++
++ if (*q != '\0')
++ *q++ = '\0';
++
++ *token_str = q;
++ return p;
++}
++EXPORT_SYMBOL_GPL(scst_get_next_lexem);
++
++/**
++ * scst_restore_token_str() - restore string modified by scst_get_next_lexem()
++ *
++ * Restores token_str modified by scst_get_next_lexem() to the
++ * previous value before scst_get_next_lexem() was called. Prev_lexem is
++ * a pointer to lexem returned by scst_get_next_lexem().
++ */
++void scst_restore_token_str(char *prev_lexem, char *token_str)
++{
++ if (&prev_lexem[strlen(prev_lexem)] != token_str)
++ prev_lexem[strlen(prev_lexem)] = ' ';
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_restore_token_str);
++
++/**
++ * scst_get_next_token_str() - parse and return next token
++ *
++ * This function returns pointer to the next token strings from input_str
++ * using '\n', ';' and '\0' as a delimeter. Content of input_str is
++ * modified by setting '\0' at the delimeter's position.
++ */
++char *scst_get_next_token_str(char **input_str)
++{
++ char *p = *input_str;
++ int i = 0;
++
++ while ((p[i] != '\n') && (p[i] != ';') && (p[i] != '\0'))
++ i++;
++
++ if (i == 0)
++ return NULL;
++
++ if (p[i] == '\0')
++ *input_str = &p[i];
++ else
++ *input_str = &p[i+1];
++
++ p[i] = '\0';
++
++ return p;
++}
++EXPORT_SYMBOL_GPL(scst_get_next_token_str);
++
++static void __init scst_scsi_op_list_init(void)
++{
++ int i;
++ uint8_t op = 0xff;
++
++ TRACE_ENTRY();
++
++ for (i = 0; i < 256; i++)
++ scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
++
++ for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
++ if (scst_scsi_op_table[i].ops != op) {
++ op = scst_scsi_op_table[i].ops;
++ scst_scsi_op_list[op] = i;
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++int __init scst_lib_init(void)
++{
++ int res = 0;
++
++ scst_scsi_op_list_init();
++
++ scsi_io_context_cache = kmem_cache_create("scst_scsi_io_context",
++ sizeof(struct scsi_io_context),
++ 0, 0, NULL);
++ if (!scsi_io_context_cache) {
++ PRINT_ERROR("%s", "Can't init scsi io context cache");
++ res = -ENOMEM;
++ goto out;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++void scst_lib_exit(void)
++{
++ BUILD_BUG_ON(SCST_MAX_CDB_SIZE != BLK_MAX_CDB);
++ BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < SCSI_SENSE_BUFFERSIZE);
++
++ kmem_cache_destroy(scsi_io_context_cache);
++}
++
++#ifdef CONFIG_SCST_DEBUG
++
++/**
++ * scst_random() - return a pseudo-random number for debugging purposes.
++ *
++ * Returns a pseudo-random number for debugging purposes. Available only in
++ * the DEBUG build.
++ *
++ * Original taken from the XFS code
++ */
++unsigned long scst_random(void)
++{
++ static int Inited;
++ static unsigned long RandomValue;
++ static DEFINE_SPINLOCK(lock);
++ /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
++ register long rv;
++ register long lo;
++ register long hi;
++ unsigned long flags;
++
++ spin_lock_irqsave(&lock, flags);
++ if (!Inited) {
++ RandomValue = jiffies;
++ Inited = 1;
++ }
++ rv = RandomValue;
++ hi = rv / 127773;
++ lo = rv % 127773;
++ rv = 16807 * lo - 2836 * hi;
++ if (rv <= 0)
++ rv += 2147483647;
++ RandomValue = rv;
++ spin_unlock_irqrestore(&lock, flags);
++ return rv;
++}
++EXPORT_SYMBOL_GPL(scst_random);
++#endif /* CONFIG_SCST_DEBUG */
++
++#ifdef CONFIG_SCST_DEBUG_TM
++
++#define TM_DBG_STATE_ABORT 0
++#define TM_DBG_STATE_RESET 1
++#define TM_DBG_STATE_OFFLINE 2
++
++#define INIT_TM_DBG_STATE TM_DBG_STATE_ABORT
++
++static void tm_dbg_timer_fn(unsigned long arg);
++
++static DEFINE_SPINLOCK(scst_tm_dbg_lock);
++/* All serialized by scst_tm_dbg_lock */
++static struct {
++ unsigned int tm_dbg_release:1;
++ unsigned int tm_dbg_blocked:1;
++} tm_dbg_flags;
++static LIST_HEAD(tm_dbg_delayed_cmd_list);
++static int tm_dbg_delayed_cmds_count;
++static int tm_dbg_passed_cmds_count;
++static int tm_dbg_state;
++static int tm_dbg_on_state_passes;
++static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
++static struct scst_tgt_dev *tm_dbg_tgt_dev;
++
++static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
++
++static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev)
++{
++ if (tgt_dev->lun == 6) {
++ unsigned long flags;
++
++ if (tm_dbg_tgt_dev != NULL)
++ tm_dbg_deinit_tgt_dev(tm_dbg_tgt_dev);
++
++ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
++ tm_dbg_state = INIT_TM_DBG_STATE;
++ tm_dbg_on_state_passes =
++ tm_dbg_on_state_num_passes[tm_dbg_state];
++ tm_dbg_tgt_dev = tgt_dev;
++ PRINT_INFO("LUN %lld connected from initiator %s is under "
++ "TM debugging (tgt_dev %p)",
++ (unsigned long long)tgt_dev->lun,
++ tgt_dev->sess->initiator_name, tgt_dev);
++ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
++ }
++ return;
++}
++
++static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
++{
++ if (tm_dbg_tgt_dev == tgt_dev) {
++ unsigned long flags;
++ TRACE_MGMT_DBG("Deinit TM debugging tgt_dev %p", tgt_dev);
++ del_timer_sync(&tm_dbg_timer);
++ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
++ tm_dbg_tgt_dev = NULL;
++ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
++ }
++ return;
++}
++
++static void tm_dbg_timer_fn(unsigned long arg)
++{
++ TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
++ tm_dbg_flags.tm_dbg_release = 1;
++ /* Used to make sure that all woken up threads see the new value */
++ smp_wmb();
++ wake_up_all(&tm_dbg_tgt_dev->active_cmd_threads->cmd_list_waitQ);
++ return;
++}
++
++/* Called under scst_tm_dbg_lock and IRQs off */
++static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
++{
++ switch (tm_dbg_state) {
++ case TM_DBG_STATE_ABORT:
++ if (tm_dbg_delayed_cmds_count == 0) {
++ unsigned long d = 58*HZ + (scst_random() % (4*HZ));
++ TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu)"
++ " for %ld.%ld seconds (%ld HZ), "
++ "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
++ d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
++ mod_timer(&tm_dbg_timer, jiffies + d);
++#if 0
++ tm_dbg_flags.tm_dbg_blocked = 1;
++#endif
++ } else {
++ TRACE_MGMT_DBG("Delaying another timed cmd %p "
++ "(tag %llu), delayed_cmds_count=%d, "
++ "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
++ tm_dbg_delayed_cmds_count,
++ tm_dbg_on_state_passes);
++ if (tm_dbg_delayed_cmds_count == 2)
++ tm_dbg_flags.tm_dbg_blocked = 0;
++ }
++ break;
++
++ case TM_DBG_STATE_RESET:
++ case TM_DBG_STATE_OFFLINE:
++ TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
++ "(tag %llu), delayed_cmds_count=%d, "
++ "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
++ tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
++ tm_dbg_flags.tm_dbg_blocked = 1;
++ break;
++
++ default:
++ BUG();
++ }
++ /* IRQs already off */
++ spin_lock(&cmd->cmd_threads->cmd_list_lock);
++ list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
++ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
++ cmd->tm_dbg_delayed = 1;
++ tm_dbg_delayed_cmds_count++;
++ return;
++}
++
++/* No locks */
++void tm_dbg_check_released_cmds(void)
++{
++ if (tm_dbg_flags.tm_dbg_release) {
++ struct scst_cmd *cmd, *tc;
++ spin_lock_irq(&scst_tm_dbg_lock);
++ list_for_each_entry_safe_reverse(cmd, tc,
++ &tm_dbg_delayed_cmd_list, cmd_list_entry) {
++ TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
++ "delayed_cmds_count=%d", cmd, cmd->tag,
++ tm_dbg_delayed_cmds_count);
++ spin_lock(&cmd->cmd_threads->cmd_list_lock);
++ list_move(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
++ }
++ tm_dbg_flags.tm_dbg_release = 0;
++ spin_unlock_irq(&scst_tm_dbg_lock);
++ }
++}
++
++/* Called under scst_tm_dbg_lock */
++static void tm_dbg_change_state(void)
++{
++ tm_dbg_flags.tm_dbg_blocked = 0;
++ if (--tm_dbg_on_state_passes == 0) {
++ switch (tm_dbg_state) {
++ case TM_DBG_STATE_ABORT:
++ TRACE_MGMT_DBG("%s", "Changing "
++ "tm_dbg_state to RESET");
++ tm_dbg_state = TM_DBG_STATE_RESET;
++ tm_dbg_flags.tm_dbg_blocked = 0;
++ break;
++ case TM_DBG_STATE_RESET:
++ case TM_DBG_STATE_OFFLINE:
++#ifdef CONFIG_SCST_TM_DBG_GO_OFFLINE
++ TRACE_MGMT_DBG("%s", "Changing "
++ "tm_dbg_state to OFFLINE");
++ tm_dbg_state = TM_DBG_STATE_OFFLINE;
++#else
++ TRACE_MGMT_DBG("%s", "Changing "
++ "tm_dbg_state to ABORT");
++ tm_dbg_state = TM_DBG_STATE_ABORT;
++#endif
++ break;
++ default:
++ BUG();
++ }
++ tm_dbg_on_state_passes =
++ tm_dbg_on_state_num_passes[tm_dbg_state];
++ }
++
++ TRACE_MGMT_DBG("%s", "Deleting timer");
++ del_timer_sync(&tm_dbg_timer);
++ return;
++}
++
++/* No locks */
++int tm_dbg_check_cmd(struct scst_cmd *cmd)
++{
++ int res = 0;
++ unsigned long flags;
++
++ if (cmd->tm_dbg_immut)
++ goto out;
++
++ if (cmd->tm_dbg_delayed) {
++ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
++ TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
++ "delayed_cmds_count=%d", cmd, cmd->tag,
++ tm_dbg_delayed_cmds_count);
++
++ cmd->tm_dbg_immut = 1;
++ tm_dbg_delayed_cmds_count--;
++ if ((tm_dbg_delayed_cmds_count == 0) &&
++ (tm_dbg_state == TM_DBG_STATE_ABORT))
++ tm_dbg_change_state();
++ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
++ } else if (cmd->tgt_dev && (tm_dbg_tgt_dev == cmd->tgt_dev)) {
++ /* Delay 50th command */
++ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
++ if (tm_dbg_flags.tm_dbg_blocked ||
++ (++tm_dbg_passed_cmds_count % 50) == 0) {
++ tm_dbg_delay_cmd(cmd);
++ res = 1;
++ } else
++ cmd->tm_dbg_immut = 1;
++ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
++ }
++
++out:
++ return res;
++}
++
++/* No locks */
++void tm_dbg_release_cmd(struct scst_cmd *cmd)
++{
++ struct scst_cmd *c;
++ unsigned long flags;
++
++ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
++ list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
++ cmd_list_entry) {
++ if (c == cmd) {
++ TRACE_MGMT_DBG("Abort request for "
++ "delayed cmd %p (tag=%llu), moving it to "
++ "active cmd list (delayed_cmds_count=%d)",
++ c, c->tag, tm_dbg_delayed_cmds_count);
++
++ if (!test_bit(SCST_CMD_ABORTED_OTHER,
++ &cmd->cmd_flags)) {
++ /* Test how completed commands handled */
++ if (((scst_random() % 10) == 5)) {
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(
++ scst_sense_hardw_error));
++ /* It's completed now */
++ }
++ }
++
++ spin_lock(&cmd->cmd_threads->cmd_list_lock);
++ list_move(&c->cmd_list_entry,
++ &c->cmd_threads->active_cmd_list);
++ wake_up(&c->cmd_threads->cmd_list_waitQ);
++ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
++ return;
++}
++
++/* Might be called under scst_mutex */
++void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
++{
++ unsigned long flags;
++
++ if (dev != NULL) {
++ if (tm_dbg_tgt_dev == NULL)
++ goto out;
++
++ if (tm_dbg_tgt_dev->dev != dev)
++ goto out;
++ }
++
++ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
++ if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
++ TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
++ tm_dbg_delayed_cmds_count);
++ tm_dbg_change_state();
++ tm_dbg_flags.tm_dbg_release = 1;
++ /*
++ * Used to make sure that all woken up threads see the new
++ * value.
++ */
++ smp_wmb();
++ if (tm_dbg_tgt_dev != NULL)
++ wake_up_all(&tm_dbg_tgt_dev->active_cmd_threads->cmd_list_waitQ);
++ } else {
++ TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
++ }
++ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
++
++out:
++ return;
++}
++
++int tm_dbg_is_release(void)
++{
++ return tm_dbg_flags.tm_dbg_release;
++}
++#endif /* CONFIG_SCST_DEBUG_TM */
++
++#ifdef CONFIG_SCST_DEBUG_SN
++void scst_check_debug_sn(struct scst_cmd *cmd)
++{
++ static DEFINE_SPINLOCK(lock);
++ static int type;
++ static int cnt;
++ unsigned long flags;
++ int old = cmd->queue_type;
++
++ spin_lock_irqsave(&lock, flags);
++
++ if (cnt == 0) {
++ if ((scst_random() % 1000) == 500) {
++ if ((scst_random() % 3) == 1)
++ type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
++ else
++ type = SCST_CMD_QUEUE_ORDERED;
++ do {
++ cnt = scst_random() % 10;
++ } while (cnt == 0);
++ } else
++ goto out_unlock;
++ }
++
++ cmd->queue_type = type;
++ cnt--;
++
++ if (((scst_random() % 1000) == 750))
++ cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
++ else if (((scst_random() % 1000) == 751))
++ cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
++ else if (((scst_random() % 1000) == 752))
++ cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
++
++ TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
++ cmd->queue_type, cnt);
++
++out_unlock:
++ spin_unlock_irqrestore(&lock, flags);
++ return;
++}
++#endif /* CONFIG_SCST_DEBUG_SN */
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++
++static uint64_t scst_get_nsec(void)
++{
++ struct timespec ts;
++ ktime_get_ts(&ts);
++ return (uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec;
++}
++
++void scst_set_start_time(struct scst_cmd *cmd)
++{
++ cmd->start = scst_get_nsec();
++ TRACE_DBG("cmd %p: start %lld", cmd, cmd->start);
++}
++
++void scst_set_cur_start(struct scst_cmd *cmd)
++{
++ cmd->curr_start = scst_get_nsec();
++ TRACE_DBG("cmd %p: cur_start %lld", cmd, cmd->curr_start);
++}
++
++void scst_set_parse_time(struct scst_cmd *cmd)
++{
++ cmd->parse_time += scst_get_nsec() - cmd->curr_start;
++ TRACE_DBG("cmd %p: parse_time %lld", cmd, cmd->parse_time);
++}
++
++void scst_set_alloc_buf_time(struct scst_cmd *cmd)
++{
++ cmd->alloc_buf_time += scst_get_nsec() - cmd->curr_start;
++ TRACE_DBG("cmd %p: alloc_buf_time %lld", cmd, cmd->alloc_buf_time);
++}
++
++void scst_set_restart_waiting_time(struct scst_cmd *cmd)
++{
++ cmd->restart_waiting_time += scst_get_nsec() - cmd->curr_start;
++ TRACE_DBG("cmd %p: restart_waiting_time %lld", cmd,
++ cmd->restart_waiting_time);
++}
++
++void scst_set_rdy_to_xfer_time(struct scst_cmd *cmd)
++{
++ cmd->rdy_to_xfer_time += scst_get_nsec() - cmd->curr_start;
++ TRACE_DBG("cmd %p: rdy_to_xfer_time %lld", cmd, cmd->rdy_to_xfer_time);
++}
++
++void scst_set_pre_exec_time(struct scst_cmd *cmd)
++{
++ cmd->pre_exec_time += scst_get_nsec() - cmd->curr_start;
++ TRACE_DBG("cmd %p: pre_exec_time %lld", cmd, cmd->pre_exec_time);
++}
++
++void scst_set_exec_time(struct scst_cmd *cmd)
++{
++ cmd->exec_time += scst_get_nsec() - cmd->curr_start;
++ TRACE_DBG("cmd %p: exec_time %lld", cmd, cmd->exec_time);
++}
++
++void scst_set_dev_done_time(struct scst_cmd *cmd)
++{
++ cmd->dev_done_time += scst_get_nsec() - cmd->curr_start;
++ TRACE_DBG("cmd %p: dev_done_time %lld", cmd, cmd->dev_done_time);
++}
++
++void scst_set_xmit_time(struct scst_cmd *cmd)
++{
++ cmd->xmit_time += scst_get_nsec() - cmd->curr_start;
++ TRACE_DBG("cmd %p: xmit_time %lld", cmd, cmd->xmit_time);
++}
++
++void scst_set_tgt_on_free_time(struct scst_cmd *cmd)
++{
++ cmd->tgt_on_free_time += scst_get_nsec() - cmd->curr_start;
++ TRACE_DBG("cmd %p: tgt_on_free_time %lld", cmd, cmd->tgt_on_free_time);
++}
++
++void scst_set_dev_on_free_time(struct scst_cmd *cmd)
++{
++ cmd->dev_on_free_time += scst_get_nsec() - cmd->curr_start;
++ TRACE_DBG("cmd %p: dev_on_free_time %lld", cmd, cmd->dev_on_free_time);
++}
++
++void scst_update_lat_stats(struct scst_cmd *cmd)
++{
++ uint64_t finish, scst_time, tgt_time, dev_time;
++ struct scst_session *sess = cmd->sess;
++ int data_len;
++ int i;
++ struct scst_ext_latency_stat *latency_stat, *dev_latency_stat;
++
++ finish = scst_get_nsec();
++
++ /* Determine the IO size for extended latency statistics */
++ data_len = cmd->bufflen;
++ i = SCST_LATENCY_STAT_INDEX_OTHER;
++ if (data_len <= SCST_IO_SIZE_THRESHOLD_SMALL)
++ i = SCST_LATENCY_STAT_INDEX_SMALL;
++ else if (data_len <= SCST_IO_SIZE_THRESHOLD_MEDIUM)
++ i = SCST_LATENCY_STAT_INDEX_MEDIUM;
++ else if (data_len <= SCST_IO_SIZE_THRESHOLD_LARGE)
++ i = SCST_LATENCY_STAT_INDEX_LARGE;
++ else if (data_len <= SCST_IO_SIZE_THRESHOLD_VERY_LARGE)
++ i = SCST_LATENCY_STAT_INDEX_VERY_LARGE;
++ latency_stat = &sess->sess_latency_stat[i];
++ if (cmd->tgt_dev != NULL)
++ dev_latency_stat = &cmd->tgt_dev->dev_latency_stat[i];
++ else
++ dev_latency_stat = NULL;
++
++ /* Calculate the latencies */
++ scst_time = finish - cmd->start - (cmd->parse_time +
++ cmd->alloc_buf_time + cmd->restart_waiting_time +
++ cmd->rdy_to_xfer_time + cmd->pre_exec_time +
++ cmd->exec_time + cmd->dev_done_time + cmd->xmit_time +
++ cmd->tgt_on_free_time + cmd->dev_on_free_time);
++ tgt_time = cmd->alloc_buf_time + cmd->restart_waiting_time +
++ cmd->rdy_to_xfer_time + cmd->pre_exec_time +
++ cmd->xmit_time + cmd->tgt_on_free_time;
++ dev_time = cmd->parse_time + cmd->exec_time + cmd->dev_done_time +
++ cmd->dev_on_free_time;
++
++ spin_lock_bh(&sess->lat_lock);
++
++ /* Save the basic latency information */
++ sess->scst_time += scst_time;
++ sess->tgt_time += tgt_time;
++ sess->dev_time += dev_time;
++ sess->processed_cmds++;
++
++ if ((sess->min_scst_time == 0) ||
++ (sess->min_scst_time > scst_time))
++ sess->min_scst_time = scst_time;
++ if ((sess->min_tgt_time == 0) ||
++ (sess->min_tgt_time > tgt_time))
++ sess->min_tgt_time = tgt_time;
++ if ((sess->min_dev_time == 0) ||
++ (sess->min_dev_time > dev_time))
++ sess->min_dev_time = dev_time;
++
++ if (sess->max_scst_time < scst_time)
++ sess->max_scst_time = scst_time;
++ if (sess->max_tgt_time < tgt_time)
++ sess->max_tgt_time = tgt_time;
++ if (sess->max_dev_time < dev_time)
++ sess->max_dev_time = dev_time;
++
++ /* Save the extended latency information */
++ if (cmd->data_direction & SCST_DATA_READ) {
++ latency_stat->scst_time_rd += scst_time;
++ latency_stat->tgt_time_rd += tgt_time;
++ latency_stat->dev_time_rd += dev_time;
++ latency_stat->processed_cmds_rd++;
++
++ if ((latency_stat->min_scst_time_rd == 0) ||
++ (latency_stat->min_scst_time_rd > scst_time))
++ latency_stat->min_scst_time_rd = scst_time;
++ if ((latency_stat->min_tgt_time_rd == 0) ||
++ (latency_stat->min_tgt_time_rd > tgt_time))
++ latency_stat->min_tgt_time_rd = tgt_time;
++ if ((latency_stat->min_dev_time_rd == 0) ||
++ (latency_stat->min_dev_time_rd > dev_time))
++ latency_stat->min_dev_time_rd = dev_time;
++
++ if (latency_stat->max_scst_time_rd < scst_time)
++ latency_stat->max_scst_time_rd = scst_time;
++ if (latency_stat->max_tgt_time_rd < tgt_time)
++ latency_stat->max_tgt_time_rd = tgt_time;
++ if (latency_stat->max_dev_time_rd < dev_time)
++ latency_stat->max_dev_time_rd = dev_time;
++
++ if (dev_latency_stat != NULL) {
++ dev_latency_stat->scst_time_rd += scst_time;
++ dev_latency_stat->tgt_time_rd += tgt_time;
++ dev_latency_stat->dev_time_rd += dev_time;
++ dev_latency_stat->processed_cmds_rd++;
++
++ if ((dev_latency_stat->min_scst_time_rd == 0) ||
++ (dev_latency_stat->min_scst_time_rd > scst_time))
++ dev_latency_stat->min_scst_time_rd = scst_time;
++ if ((dev_latency_stat->min_tgt_time_rd == 0) ||
++ (dev_latency_stat->min_tgt_time_rd > tgt_time))
++ dev_latency_stat->min_tgt_time_rd = tgt_time;
++ if ((dev_latency_stat->min_dev_time_rd == 0) ||
++ (dev_latency_stat->min_dev_time_rd > dev_time))
++ dev_latency_stat->min_dev_time_rd = dev_time;
++
++ if (dev_latency_stat->max_scst_time_rd < scst_time)
++ dev_latency_stat->max_scst_time_rd = scst_time;
++ if (dev_latency_stat->max_tgt_time_rd < tgt_time)
++ dev_latency_stat->max_tgt_time_rd = tgt_time;
++ if (dev_latency_stat->max_dev_time_rd < dev_time)
++ dev_latency_stat->max_dev_time_rd = dev_time;
++ }
++ } else if (cmd->data_direction & SCST_DATA_WRITE) {
++ latency_stat->scst_time_wr += scst_time;
++ latency_stat->tgt_time_wr += tgt_time;
++ latency_stat->dev_time_wr += dev_time;
++ latency_stat->processed_cmds_wr++;
++
++ if ((latency_stat->min_scst_time_wr == 0) ||
++ (latency_stat->min_scst_time_wr > scst_time))
++ latency_stat->min_scst_time_wr = scst_time;
++ if ((latency_stat->min_tgt_time_wr == 0) ||
++ (latency_stat->min_tgt_time_wr > tgt_time))
++ latency_stat->min_tgt_time_wr = tgt_time;
++ if ((latency_stat->min_dev_time_wr == 0) ||
++ (latency_stat->min_dev_time_wr > dev_time))
++ latency_stat->min_dev_time_wr = dev_time;
++
++ if (latency_stat->max_scst_time_wr < scst_time)
++ latency_stat->max_scst_time_wr = scst_time;
++ if (latency_stat->max_tgt_time_wr < tgt_time)
++ latency_stat->max_tgt_time_wr = tgt_time;
++ if (latency_stat->max_dev_time_wr < dev_time)
++ latency_stat->max_dev_time_wr = dev_time;
++
++ if (dev_latency_stat != NULL) {
++ dev_latency_stat->scst_time_wr += scst_time;
++ dev_latency_stat->tgt_time_wr += tgt_time;
++ dev_latency_stat->dev_time_wr += dev_time;
++ dev_latency_stat->processed_cmds_wr++;
++
++ if ((dev_latency_stat->min_scst_time_wr == 0) ||
++ (dev_latency_stat->min_scst_time_wr > scst_time))
++ dev_latency_stat->min_scst_time_wr = scst_time;
++ if ((dev_latency_stat->min_tgt_time_wr == 0) ||
++ (dev_latency_stat->min_tgt_time_wr > tgt_time))
++ dev_latency_stat->min_tgt_time_wr = tgt_time;
++ if ((dev_latency_stat->min_dev_time_wr == 0) ||
++ (dev_latency_stat->min_dev_time_wr > dev_time))
++ dev_latency_stat->min_dev_time_wr = dev_time;
++
++ if (dev_latency_stat->max_scst_time_wr < scst_time)
++ dev_latency_stat->max_scst_time_wr = scst_time;
++ if (dev_latency_stat->max_tgt_time_wr < tgt_time)
++ dev_latency_stat->max_tgt_time_wr = tgt_time;
++ if (dev_latency_stat->max_dev_time_wr < dev_time)
++ dev_latency_stat->max_dev_time_wr = dev_time;
++ }
++ }
++
++ spin_unlock_bh(&sess->lat_lock);
++
++ TRACE_DBG("cmd %p: finish %lld, scst_time %lld, "
++ "tgt_time %lld, dev_time %lld", cmd, finish, scst_time,
++ tgt_time, dev_time);
++ return;
++}
++
++#endif /* CONFIG_SCST_MEASURE_LATENCY */
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_main.c linux-2.6.36/drivers/scst/scst_main.c
+--- orig/linux-2.6.36/drivers/scst/scst_main.c
++++ linux-2.6.36/drivers/scst/scst_main.c
+@@ -0,0 +1,2198 @@
++/*
++ * scst_main.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/unistd.h>
++#include <linux/string.h>
++#include <linux/kthread.h>
++#include <linux/delay.h>
++
++#include <scst/scst.h>
++#include "scst_priv.h"
++#include "scst_mem.h"
++#include "scst_pres.h"
++
++#if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
++#warning "HIGHMEM kernel configurations are fully supported, but not\
++ recommended for performance reasons. Consider changing VMSPLIT\
++ option or use a 64-bit configuration instead. See README file for\
++ details."
++#endif
++
++/**
++ ** SCST global variables. They are all uninitialized to have their layout in
++ ** memory be exactly as specified. Otherwise compiler puts zero-initialized
++ ** variable separately from nonzero-initialized ones.
++ **/
++
++/*
++ * Main SCST mutex. All targets, devices and dev_types management is done
++ * under this mutex.
++ *
++ * It must NOT be used in any works (schedule_work(), etc.), because
++ * otherwise a deadlock (double lock, actually) is possible, e.g., with
++ * scst_user detach_tgt(), which is called under scst_mutex and calls
++ * flush_scheduled_work().
++ */
++struct mutex scst_mutex;
++EXPORT_SYMBOL_GPL(scst_mutex);
++
++/*
++ * Secondary level main mutex, inner for scst_mutex. Needed for
++ * __scst_pr_register_all_tg_pt(), since we can't use scst_mutex there,
++ * because of the circular locking dependency with dev_pr_mutex.
++ */
++struct mutex scst_mutex2;
++
++/* Both protected by scst_mutex or scst_mutex2 on read and both on write */
++struct list_head scst_template_list;
++struct list_head scst_dev_list;
++
++/* Protected by scst_mutex */
++struct list_head scst_dev_type_list;
++struct list_head scst_virtual_dev_type_list;
++
++spinlock_t scst_main_lock;
++
++static struct kmem_cache *scst_mgmt_cachep;
++mempool_t *scst_mgmt_mempool;
++static struct kmem_cache *scst_mgmt_stub_cachep;
++mempool_t *scst_mgmt_stub_mempool;
++static struct kmem_cache *scst_ua_cachep;
++mempool_t *scst_ua_mempool;
++static struct kmem_cache *scst_sense_cachep;
++mempool_t *scst_sense_mempool;
++static struct kmem_cache *scst_aen_cachep;
++mempool_t *scst_aen_mempool;
++struct kmem_cache *scst_tgtd_cachep;
++struct kmem_cache *scst_sess_cachep;
++struct kmem_cache *scst_acgd_cachep;
++
++unsigned int scst_setup_id;
++
++spinlock_t scst_init_lock;
++wait_queue_head_t scst_init_cmd_list_waitQ;
++struct list_head scst_init_cmd_list;
++unsigned int scst_init_poll_cnt;
++
++struct kmem_cache *scst_cmd_cachep;
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++unsigned long scst_trace_flag;
++#endif
++
++int scst_max_tasklet_cmd = SCST_DEF_MAX_TASKLET_CMD;
++
++unsigned long scst_flags;
++atomic_t scst_cmd_count;
++
++struct scst_cmd_threads scst_main_cmd_threads;
++
++struct scst_tasklet scst_tasklets[NR_CPUS];
++
++spinlock_t scst_mcmd_lock;
++struct list_head scst_active_mgmt_cmd_list;
++struct list_head scst_delayed_mgmt_cmd_list;
++wait_queue_head_t scst_mgmt_cmd_list_waitQ;
++
++wait_queue_head_t scst_mgmt_waitQ;
++spinlock_t scst_mgmt_lock;
++struct list_head scst_sess_init_list;
++struct list_head scst_sess_shut_list;
++
++wait_queue_head_t scst_dev_cmd_waitQ;
++
++static struct mutex scst_suspend_mutex;
++/* protected by scst_suspend_mutex */
++static struct list_head scst_cmd_threads_list;
++
++int scst_threads;
++static struct task_struct *scst_init_cmd_thread;
++static struct task_struct *scst_mgmt_thread;
++static struct task_struct *scst_mgmt_cmd_thread;
++
++static int suspend_count;
++
++static int scst_virt_dev_last_id; /* protected by scst_mutex */
++
++static unsigned int scst_max_cmd_mem;
++unsigned int scst_max_dev_cmd_mem;
++
++module_param_named(scst_threads, scst_threads, int, 0);
++MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
++
++module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, S_IRUGO);
++MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
++ "all SCSI commands of all devices at any given time in MB");
++
++module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, S_IRUGO);
++MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
++ "by all SCSI commands of a device at any given time in MB");
++
++struct scst_dev_type scst_null_devtype = {
++ .name = "none",
++ .threads_num = -1,
++};
++
++static void __scst_resume_activity(void);
++
++/**
++ * __scst_register_target_template() - register target template.
++ * @vtt: target template
++ * @version: SCST_INTERFACE_VERSION version string to ensure that
++ * SCST core and the target driver use the same version of
++ * the SCST interface
++ *
++ * Description:
++ * Registers a target template and returns 0 on success or appropriate
++ * error code otherwise.
++ *
++ * Target drivers supposed to behave sanely and not call register()
++ * and unregister() randomly sinultaneously.
++ */
++int __scst_register_target_template(struct scst_tgt_template *vtt,
++ const char *version)
++{
++ int res = 0;
++ struct scst_tgt_template *t;
++
++ TRACE_ENTRY();
++
++ INIT_LIST_HEAD(&vtt->tgt_list);
++
++ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
++ PRINT_ERROR("Incorrect version of target %s", vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (!vtt->detect) {
++ PRINT_ERROR("Target driver %s must have "
++ "detect() method.", vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (!vtt->release) {
++ PRINT_ERROR("Target driver %s must have "
++ "release() method.", vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (!vtt->xmit_response) {
++ PRINT_ERROR("Target driver %s must have "
++ "xmit_response() method.", vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (vtt->get_initiator_port_transport_id == NULL)
++ PRINT_WARNING("Target driver %s doesn't support Persistent "
++ "Reservations", vtt->name);
++
++ if (vtt->threads_num < 0) {
++ PRINT_ERROR("Wrong threads_num value %d for "
++ "target \"%s\"", vtt->threads_num,
++ vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if ((!vtt->enable_target || !vtt->is_target_enabled) &&
++ !vtt->enabled_attr_not_needed)
++ PRINT_WARNING("Target driver %s doesn't have enable_target() "
++ "and/or is_target_enabled() method(s). This is unsafe "
++ "and can lead that initiators connected on the "
++ "initialization time can see an unexpected set of "
++ "devices or no devices at all!", vtt->name);
++
++ if (((vtt->add_target != NULL) && (vtt->del_target == NULL)) ||
++ ((vtt->add_target == NULL) && (vtt->del_target != NULL))) {
++ PRINT_ERROR("Target driver %s must either define both "
++ "add_target() and del_target(), or none.", vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (vtt->rdy_to_xfer == NULL)
++ vtt->rdy_to_xfer_atomic = 1;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0)
++ goto out;
++ list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
++ if (strcmp(t->name, vtt->name) == 0) {
++ PRINT_ERROR("Target driver %s already registered",
++ vtt->name);
++ mutex_unlock(&scst_mutex);
++ goto out_unlock;
++ }
++ }
++ mutex_unlock(&scst_mutex);
++
++ res = scst_tgtt_sysfs_create(vtt);
++ if (res)
++ goto out;
++
++ mutex_lock(&scst_mutex);
++ mutex_lock(&scst_mutex2);
++ list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
++ mutex_unlock(&scst_mutex2);
++ mutex_unlock(&scst_mutex);
++
++ TRACE_DBG("%s", "Calling target driver's detect()");
++ res = vtt->detect(vtt);
++ TRACE_DBG("Target driver's detect() returned %d", res);
++ if (res < 0) {
++ PRINT_ERROR("%s", "The detect() routine failed");
++ res = -EINVAL;
++ goto out_del;
++ }
++
++ PRINT_INFO("Target template %s registered successfully", vtt->name);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ scst_tgtt_sysfs_del(vtt);
++
++ mutex_lock(&scst_mutex);
++
++ mutex_lock(&scst_mutex2);
++ list_del(&vtt->scst_template_list_entry);
++ mutex_unlock(&scst_mutex2);
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ goto out;
++}
++EXPORT_SYMBOL_GPL(__scst_register_target_template);
++
++static int scst_check_non_gpl_target_template(struct scst_tgt_template *vtt)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (vtt->task_mgmt_affected_cmds_done || vtt->threads_num ||
++ vtt->on_hw_pending_cmd_timeout) {
++ PRINT_ERROR("Not allowed functionality in non-GPL version for "
++ "target template %s", vtt->name);
++ res = -EPERM;
++ goto out;
++ }
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * __scst_register_target_template_non_gpl() - register target template,
++ * non-GPL version
++ * @vtt: target template
++ * @version: SCST_INTERFACE_VERSION version string to ensure that
++ * SCST core and the target driver use the same version of
++ * the SCST interface
++ *
++ * Description:
++ * Registers a target template and returns 0 on success or appropriate
++ * error code otherwise.
++ *
++ * Note: *vtt must be static!
++ */
++int __scst_register_target_template_non_gpl(struct scst_tgt_template *vtt,
++ const char *version)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ res = scst_check_non_gpl_target_template(vtt);
++ if (res != 0)
++ goto out;
++
++ res = __scst_register_target_template(vtt, version);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL(__scst_register_target_template_non_gpl);
++
++/**
++ * scst_unregister_target_template() - unregister target template
++ *
++ * Target drivers supposed to behave sanely and not call register()
++ * and unregister() randomly sinultaneously. Also it is supposed that
++ * no attepts to create new targets for this vtt will be done in a race
++ * with this function.
++ */
++void scst_unregister_target_template(struct scst_tgt_template *vtt)
++{
++ struct scst_tgt *tgt;
++ struct scst_tgt_template *t;
++ int found = 0;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
++ if (strcmp(t->name, vtt->name) == 0) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found) {
++ PRINT_ERROR("Target driver %s isn't registered", vtt->name);
++ goto out_err_up;
++ }
++
++ mutex_lock(&scst_mutex2);
++ list_del(&vtt->scst_template_list_entry);
++ mutex_unlock(&scst_mutex2);
++
++ /* Wait for outstanding sysfs mgmt calls completed */
++ while (vtt->tgtt_active_sysfs_works_count > 0) {
++ mutex_unlock(&scst_mutex);
++ msleep(100);
++ mutex_lock(&scst_mutex);
++ }
++
++restart:
++ list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
++ mutex_unlock(&scst_mutex);
++ scst_unregister_target(tgt);
++ mutex_lock(&scst_mutex);
++ goto restart;
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ scst_tgtt_sysfs_del(vtt);
++
++ PRINT_INFO("Target template %s unregistered successfully", vtt->name);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_err_up:
++ mutex_unlock(&scst_mutex);
++ goto out;
++}
++EXPORT_SYMBOL(scst_unregister_target_template);
++
++/**
++ * scst_register_target() - register target
++ *
++ * Registers a target for template vtt and returns new target structure on
++ * success or NULL otherwise.
++ */
++struct scst_tgt *scst_register_target(struct scst_tgt_template *vtt,
++ const char *target_name)
++{
++ struct scst_tgt *tgt;
++ int rc = 0;
++
++ TRACE_ENTRY();
++
++ rc = scst_alloc_tgt(vtt, &tgt);
++ if (rc != 0)
++ goto out;
++
++ if (target_name != NULL) {
++
++ tgt->tgt_name = kmalloc(strlen(target_name) + 1, GFP_KERNEL);
++ if (tgt->tgt_name == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "Allocation of tgt name %s failed",
++ target_name);
++ rc = -ENOMEM;
++ goto out_free_tgt;
++ }
++ strcpy(tgt->tgt_name, target_name);
++ } else {
++ static int tgt_num; /* protected by scst_mutex */
++ int len = strlen(vtt->name) +
++ strlen(SCST_DEFAULT_TGT_NAME_SUFFIX) + 11 + 1;
++
++ tgt->tgt_name = kmalloc(len, GFP_KERNEL);
++ if (tgt->tgt_name == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "Allocation of tgt name failed "
++ "(template name %s)", vtt->name);
++ rc = -ENOMEM;
++ goto out_free_tgt;
++ }
++ sprintf(tgt->tgt_name, "%s%s%d", vtt->name,
++ SCST_DEFAULT_TGT_NAME_SUFFIX, tgt_num++);
++ }
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ rc = -EINTR;
++ goto out_free_tgt;
++ }
++
++ rc = scst_tgt_sysfs_create(tgt);
++ if (rc < 0)
++ goto out_unlock;
++
++ tgt->default_acg = scst_alloc_add_acg(tgt, tgt->tgt_name, false);
++ if (tgt->default_acg == NULL)
++ goto out_sysfs_del;
++
++ mutex_lock(&scst_mutex2);
++ list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
++ mutex_unlock(&scst_mutex2);
++
++ mutex_unlock(&scst_mutex);
++
++ PRINT_INFO("Target %s for template %s registered successfully",
++ tgt->tgt_name, vtt->name);
++
++ TRACE_DBG("tgt %p", tgt);
++
++out:
++ TRACE_EXIT();
++ return tgt;
++
++out_sysfs_del:
++ mutex_unlock(&scst_mutex);
++ scst_tgt_sysfs_del(tgt);
++ goto out_free_tgt;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_free_tgt:
++ /* In case of error tgt_name will be freed in scst_free_tgt() */
++ scst_free_tgt(tgt);
++ tgt = NULL;
++ goto out;
++}
++EXPORT_SYMBOL(scst_register_target);
++
++static inline int test_sess_list(struct scst_tgt *tgt)
++{
++ int res;
++ mutex_lock(&scst_mutex);
++ res = list_empty(&tgt->sess_list);
++ mutex_unlock(&scst_mutex);
++ return res;
++}
++
++/**
++ * scst_unregister_target() - unregister target.
++ *
++ * It is supposed that no attepts to create new sessions for this
++ * target will be done in a race with this function.
++ */
++void scst_unregister_target(struct scst_tgt *tgt)
++{
++ struct scst_session *sess;
++ struct scst_tgt_template *vtt = tgt->tgtt;
++ struct scst_acg *acg, *acg_tmp;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("%s", "Calling target driver's release()");
++ tgt->tgtt->release(tgt);
++ TRACE_DBG("%s", "Target driver's release() returned");
++
++ mutex_lock(&scst_mutex);
++again:
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ if (sess->shut_phase == SCST_SESS_SPH_READY) {
++ /*
++ * Sometimes it's hard for target driver to track all
++ * its sessions (see scst_local, eg), so let's help it.
++ */
++ mutex_unlock(&scst_mutex);
++ scst_unregister_session(sess, 0, NULL);
++ mutex_lock(&scst_mutex);
++ goto again;
++ }
++ }
++ mutex_unlock(&scst_mutex);
++
++ TRACE_DBG("%s", "Waiting for sessions shutdown");
++ wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
++ TRACE_DBG("%s", "wait_event() returned");
++
++ scst_suspend_activity(false);
++ mutex_lock(&scst_mutex);
++
++ mutex_lock(&scst_mutex2);
++ list_del(&tgt->tgt_list_entry);
++ mutex_unlock(&scst_mutex2);
++
++ del_timer_sync(&tgt->retry_timer);
++
++ scst_del_free_acg(tgt->default_acg);
++
++ list_for_each_entry_safe(acg, acg_tmp, &tgt->tgt_acg_list,
++ acg_list_entry) {
++ scst_del_free_acg(acg);
++ }
++
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++
++ scst_tgt_sysfs_del(tgt);
++
++ PRINT_INFO("Target %s for template %s unregistered successfully",
++ tgt->tgt_name, vtt->name);
++
++ scst_free_tgt(tgt);
++
++ TRACE_DBG("Unregistering tgt %p finished", tgt);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_unregister_target);
++
++static int scst_susp_wait(bool interruptible)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (interruptible) {
++ res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
++ (atomic_read(&scst_cmd_count) == 0),
++ SCST_SUSPENDING_TIMEOUT);
++ if (res <= 0) {
++ __scst_resume_activity();
++ if (res == 0)
++ res = -EBUSY;
++ } else
++ res = 0;
++ } else
++ wait_event(scst_dev_cmd_waitQ,
++ atomic_read(&scst_cmd_count) == 0);
++
++ TRACE_MGMT_DBG("wait_event() returned %d", res);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_suspend_activity() - globally suspend any activity
++ *
++ * Description:
++ * Globally suspends any activity and doesn't return, until there are any
++ * active commands (state after SCST_CMD_STATE_INIT). If "interruptible"
++ * is true, it returns after SCST_SUSPENDING_TIMEOUT or if it was interrupted
++ * by a signal with the corresponding error status < 0. If "interruptible"
++ * is false, it will wait virtually forever. On success returns 0.
++ *
++ * New arriving commands stay in the suspended state until
++ * scst_resume_activity() is called.
++ */
++int scst_suspend_activity(bool interruptible)
++{
++ int res = 0;
++ bool rep = false;
++
++ TRACE_ENTRY();
++
++ if (interruptible) {
++ if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++ } else
++ mutex_lock(&scst_suspend_mutex);
++
++ TRACE_MGMT_DBG("suspend_count %d", suspend_count);
++ suspend_count++;
++ if (suspend_count > 1)
++ goto out_up;
++
++ set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
++ set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
++ /*
++ * Assignment of SCST_FLAG_SUSPENDING and SCST_FLAG_SUSPENDED must be
++ * ordered with scst_cmd_count. Otherwise lockless logic in
++ * scst_translate_lun() and scst_mgmt_translate_lun() won't work.
++ */
++ smp_mb__after_set_bit();
++
++ /*
++ * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
++ * information about scst_user behavior.
++ *
++ * ToDo: make the global suspending unneeded (switch to per-device
++ * reference counting? That would mean to switch off from lockless
++ * implementation of scst_translate_lun().. )
++ */
++
++ if (atomic_read(&scst_cmd_count) != 0) {
++ PRINT_INFO("Waiting for %d active commands to complete... This "
++ "might take few minutes for disks or few hours for "
++ "tapes, if you use long executed commands, like "
++ "REWIND or FORMAT. In case, if you have a hung user "
++ "space device (i.e. made using scst_user module) not "
++ "responding to any commands, if might take virtually "
++ "forever until the corresponding user space "
++ "program recovers and starts responding or gets "
++ "killed.", atomic_read(&scst_cmd_count));
++ rep = true;
++ }
++
++ res = scst_susp_wait(interruptible);
++ if (res != 0)
++ goto out_clear;
++
++ clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
++ /* See comment about smp_mb() above */
++ smp_mb__after_clear_bit();
++
++ TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
++ atomic_read(&scst_cmd_count));
++
++ res = scst_susp_wait(interruptible);
++ if (res != 0)
++ goto out_clear;
++
++ if (rep)
++ PRINT_INFO("%s", "All active commands completed");
++
++out_up:
++ mutex_unlock(&scst_suspend_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_clear:
++ clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
++ /* See comment about smp_mb() above */
++ smp_mb__after_clear_bit();
++ goto out_up;
++}
++EXPORT_SYMBOL_GPL(scst_suspend_activity);
++
++static void __scst_resume_activity(void)
++{
++ struct scst_cmd_threads *l;
++
++ TRACE_ENTRY();
++
++ suspend_count--;
++ TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
++ if (suspend_count > 0)
++ goto out;
++
++ clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
++ /*
++ * The barrier is needed to make sure all woken up threads see the
++ * cleared flag. Not sure if it's really needed, but let's be safe.
++ */
++ smp_mb__after_clear_bit();
++
++ list_for_each_entry(l, &scst_cmd_threads_list, lists_list_entry) {
++ wake_up_all(&l->cmd_list_waitQ);
++ }
++ wake_up_all(&scst_init_cmd_list_waitQ);
++
++ spin_lock_irq(&scst_mcmd_lock);
++ if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
++ struct scst_mgmt_cmd *m;
++ m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
++ mgmt_cmd_list_entry);
++ TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
++ "mgmt cmd list", m);
++ list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
++ }
++ spin_unlock_irq(&scst_mcmd_lock);
++ wake_up_all(&scst_mgmt_cmd_list_waitQ);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_resume_activity() - globally resume all activities
++ *
++ * Resumes suspended by scst_suspend_activity() activities.
++ */
++void scst_resume_activity(void)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_suspend_mutex);
++ __scst_resume_activity();
++ mutex_unlock(&scst_suspend_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_resume_activity);
++
++static int scst_register_device(struct scsi_device *scsidp)
++{
++ int res = 0;
++ struct scst_device *dev, *d;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = scst_alloc_device(GFP_KERNEL, &dev);
++ if (res != 0)
++ goto out_unlock;
++
++ dev->type = scsidp->type;
++
++ dev->virt_name = kmalloc(50, GFP_KERNEL);
++ if (dev->virt_name == NULL) {
++ PRINT_ERROR("%s", "Unable to alloc device name");
++ res = -ENOMEM;
++ goto out_free_dev;
++ }
++ snprintf(dev->virt_name, 50, "%d:%d:%d:%d", scsidp->host->host_no,
++ scsidp->channel, scsidp->id, scsidp->lun);
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (strcmp(d->virt_name, dev->virt_name) == 0) {
++ PRINT_ERROR("Device %s already exists", dev->virt_name);
++ res = -EEXIST;
++ goto out_free_dev;
++ }
++ }
++
++ dev->scsi_dev = scsidp;
++
++ list_add_tail(&dev->dev_list_entry, &scst_dev_list);
++
++ mutex_unlock(&scst_mutex);
++
++ res = scst_dev_sysfs_create(dev);
++ if (res != 0)
++ goto out_del;
++
++ PRINT_INFO("Attached to scsi%d, channel %d, id %d, lun %d, "
++ "type %d", scsidp->host->host_no, scsidp->channel,
++ scsidp->id, scsidp->lun, scsidp->type);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ list_del(&dev->dev_list_entry);
++
++out_free_dev:
++ scst_free_device(dev);
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ goto out;
++}
++
++static void scst_unregister_device(struct scsi_device *scsidp)
++{
++ struct scst_device *d, *dev = NULL;
++ struct scst_acg_dev *acg_dev, *aa;
++
++ TRACE_ENTRY();
++
++ scst_suspend_activity(false);
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (d->scsi_dev == scsidp) {
++ dev = d;
++ TRACE_DBG("Device %p found", dev);
++ break;
++ }
++ }
++ if (dev == NULL) {
++ PRINT_ERROR("SCST device for SCSI device %d:%d:%d:%d not found",
++ scsidp->host->host_no, scsidp->channel, scsidp->id,
++ scsidp->lun);
++ goto out_unlock;
++ }
++
++ list_del(&dev->dev_list_entry);
++
++ scst_assign_dev_handler(dev, &scst_null_devtype);
++
++ list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
++ dev_acg_dev_list_entry) {
++ scst_acg_del_lun(acg_dev->acg, acg_dev->lun, true);
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ scst_resume_activity();
++
++ scst_dev_sysfs_del(dev);
++
++ PRINT_INFO("Detached from scsi%d, channel %d, id %d, lun %d, type %d",
++ scsidp->host->host_no, scsidp->channel, scsidp->id,
++ scsidp->lun, scsidp->type);
++
++ scst_free_device(dev);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++ goto out;
++}
++
++static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
++{
++ int res = 0;
++
++ if (dev_handler->parse == NULL) {
++ PRINT_ERROR("scst dev handler %s must have "
++ "parse() method.", dev_handler->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (((dev_handler->add_device != NULL) &&
++ (dev_handler->del_device == NULL)) ||
++ ((dev_handler->add_device == NULL) &&
++ (dev_handler->del_device != NULL))) {
++ PRINT_ERROR("Dev handler %s must either define both "
++ "add_device() and del_device(), or none.",
++ dev_handler->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (dev_handler->alloc_data_buf == NULL)
++ dev_handler->alloc_data_buf_atomic = 1;
++
++ if (dev_handler->dev_done == NULL)
++ dev_handler->dev_done_atomic = 1;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_check_device_name(const char *dev_name)
++{
++ int res = 0;
++
++ if (strchr(dev_name, '/') != NULL) {
++ PRINT_ERROR("Dev name %s contains illegal character '/'",
++ dev_name);
++ res = -EINVAL;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_register_virtual_device() - register a virtual device.
++ * @dev_handler: the device's device handler
++ * @dev_name: the new device name, NULL-terminated string. Must be uniq
++ * among all virtual devices in the system.
++ *
++ * Registers a virtual device and returns assinged to the device ID on
++ * success, or negative value otherwise
++ */
++int scst_register_virtual_device(struct scst_dev_type *dev_handler,
++ const char *dev_name)
++{
++ int res, rc;
++ struct scst_device *dev, *d;
++ bool sysfs_del = false;
++
++ TRACE_ENTRY();
++
++ if (dev_handler == NULL) {
++ PRINT_ERROR("%s: valid device handler must be supplied",
++ __func__);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (dev_name == NULL) {
++ PRINT_ERROR("%s: device name must be non-NULL", __func__);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_check_device_name(dev_name);
++ if (res != 0)
++ goto out;
++
++ res = scst_dev_handler_check(dev_handler);
++ if (res != 0)
++ goto out;
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_resume;
++ }
++
++ res = scst_alloc_device(GFP_KERNEL, &dev);
++ if (res != 0)
++ goto out_unlock;
++
++ dev->type = dev_handler->type;
++ dev->scsi_dev = NULL;
++ dev->virt_name = kstrdup(dev_name, GFP_KERNEL);
++ if (dev->virt_name == NULL) {
++ PRINT_ERROR("Unable to allocate virt_name for dev %s",
++ dev_name);
++ res = -ENOMEM;
++ goto out_free_dev;
++ }
++
++ while (1) {
++ dev->virt_id = scst_virt_dev_last_id++;
++ if (dev->virt_id > 0)
++ break;
++ scst_virt_dev_last_id = 1;
++ }
++
++ res = dev->virt_id;
++
++ rc = scst_pr_init_dev(dev);
++ if (rc != 0) {
++ res = rc;
++ goto out_free_dev;
++ }
++
++ /*
++ * We can drop scst_mutex, because we have not yet added the dev in
++ * scst_dev_list, so it "doesn't exist" yet.
++ */
++ mutex_unlock(&scst_mutex);
++
++ res = scst_dev_sysfs_create(dev);
++ if (res != 0)
++ goto out_lock_pr_clear_dev;
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (strcmp(d->virt_name, dev_name) == 0) {
++ PRINT_ERROR("Device %s already exists", dev_name);
++ res = -EEXIST;
++ sysfs_del = true;
++ goto out_pr_clear_dev;
++ }
++ }
++
++ rc = scst_assign_dev_handler(dev, dev_handler);
++ if (rc != 0) {
++ res = rc;
++ sysfs_del = true;
++ goto out_pr_clear_dev;
++ }
++
++ list_add_tail(&dev->dev_list_entry, &scst_dev_list);
++
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++
++ res = dev->virt_id;
++
++ PRINT_INFO("Attached to virtual device %s (id %d)",
++ dev_name, res);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_lock_pr_clear_dev:
++ mutex_lock(&scst_mutex);
++
++out_pr_clear_dev:
++ scst_pr_clear_dev(dev);
++
++out_free_dev:
++ mutex_unlock(&scst_mutex);
++ if (sysfs_del)
++ scst_dev_sysfs_del(dev);
++ scst_free_device(dev);
++ goto out_resume;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_resume:
++ scst_resume_activity();
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_register_virtual_device);
++
++/**
++ * scst_unregister_virtual_device() - unegister a virtual device.
++ * @id: the device's ID, returned by the registration function
++ */
++void scst_unregister_virtual_device(int id)
++{
++ struct scst_device *d, *dev = NULL;
++ struct scst_acg_dev *acg_dev, *aa;
++
++ TRACE_ENTRY();
++
++ scst_suspend_activity(false);
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (d->virt_id == id) {
++ dev = d;
++ TRACE_DBG("Virtual device %p (id %d) found", dev, id);
++ break;
++ }
++ }
++ if (dev == NULL) {
++ PRINT_ERROR("Virtual device (id %d) not found", id);
++ goto out_unlock;
++ }
++
++ list_del(&dev->dev_list_entry);
++
++ scst_pr_clear_dev(dev);
++
++ scst_assign_dev_handler(dev, &scst_null_devtype);
++
++ list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
++ dev_acg_dev_list_entry) {
++ scst_acg_del_lun(acg_dev->acg, acg_dev->lun, true);
++ }
++
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++
++ scst_dev_sysfs_del(dev);
++
++ PRINT_INFO("Detached from virtual device %s (id %d)",
++ dev->virt_name, dev->virt_id);
++
++ scst_free_device(dev);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_unregister_virtual_device);
++
++/**
++ * __scst_register_dev_driver() - register pass-through dev handler driver
++ * @dev_type: dev handler template
++ * @version: SCST_INTERFACE_VERSION version string to ensure that
++ * SCST core and the dev handler use the same version of
++ * the SCST interface
++ *
++ * Description:
++ * Registers a pass-through dev handler driver. Returns 0 on success
++ * or appropriate error code otherwise.
++ */
++int __scst_register_dev_driver(struct scst_dev_type *dev_type,
++ const char *version)
++{
++ int res, exist;
++ struct scst_dev_type *dt;
++
++ TRACE_ENTRY();
++
++ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
++ PRINT_ERROR("Incorrect version of dev handler %s",
++ dev_type->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_dev_handler_check(dev_type);
++ if (res != 0)
++ goto out;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ exist = 0;
++ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
++ if (strcmp(dt->name, dev_type->name) == 0) {
++ PRINT_ERROR("Device type handler \"%s\" already "
++ "exist", dt->name);
++ exist = 1;
++ break;
++ }
++ }
++ if (exist)
++ goto out_unlock;
++
++ list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
++
++ mutex_unlock(&scst_mutex);
++
++ res = scst_devt_sysfs_create(dev_type);
++ if (res < 0)
++ goto out;
++
++ PRINT_INFO("Device handler \"%s\" for type %d registered "
++ "successfully", dev_type->name, dev_type->type);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ goto out;
++}
++EXPORT_SYMBOL_GPL(__scst_register_dev_driver);
++
++/**
++ * scst_unregister_dev_driver() - unregister pass-through dev handler driver
++ */
++void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
++{
++ struct scst_device *dev;
++ struct scst_dev_type *dt;
++ int found = 0;
++
++ TRACE_ENTRY();
++
++ scst_suspend_activity(false);
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
++ if (strcmp(dt->name, dev_type->name) == 0) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found) {
++ PRINT_ERROR("Dev handler \"%s\" isn't registered",
++ dev_type->name);
++ goto out_up;
++ }
++
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ if (dev->handler == dev_type) {
++ scst_assign_dev_handler(dev, &scst_null_devtype);
++ TRACE_DBG("Dev handler removed from device %p", dev);
++ }
++ }
++
++ list_del(&dev_type->dev_type_list_entry);
++
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++
++ scst_devt_sysfs_del(dev_type);
++
++ PRINT_INFO("Device handler \"%s\" for type %d unloaded",
++ dev_type->name, dev_type->type);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_up:
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_unregister_dev_driver);
++
++/**
++ * __scst_register_virtual_dev_driver() - register virtual dev handler driver
++ * @dev_type: dev handler template
++ * @version: SCST_INTERFACE_VERSION version string to ensure that
++ * SCST core and the dev handler use the same version of
++ * the SCST interface
++ *
++ * Description:
++ * Registers a virtual dev handler driver. Returns 0 on success or
++ * appropriate error code otherwise.
++ */
++int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
++ const char *version)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
++ PRINT_ERROR("Incorrect version of virtual dev handler %s",
++ dev_type->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_dev_handler_check(dev_type);
++ if (res != 0)
++ goto out;
++
++ mutex_lock(&scst_mutex);
++ list_add_tail(&dev_type->dev_type_list_entry, &scst_virtual_dev_type_list);
++ mutex_unlock(&scst_mutex);
++
++ res = scst_devt_sysfs_create(dev_type);
++ if (res < 0)
++ goto out;
++
++ if (dev_type->type != -1) {
++ PRINT_INFO("Virtual device handler %s for type %d "
++ "registered successfully", dev_type->name,
++ dev_type->type);
++ } else {
++ PRINT_INFO("Virtual device handler \"%s\" registered "
++ "successfully", dev_type->name);
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(__scst_register_virtual_dev_driver);
++
++/**
++ * scst_unregister_virtual_dev_driver() - unregister virtual dev driver
++ */
++void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ /* Disable sysfs mgmt calls (e.g. addition of new devices) */
++ list_del(&dev_type->dev_type_list_entry);
++
++ /* Wait for outstanding sysfs mgmt calls completed */
++ while (dev_type->devt_active_sysfs_works_count > 0) {
++ mutex_unlock(&scst_mutex);
++ msleep(100);
++ mutex_lock(&scst_mutex);
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ scst_devt_sysfs_del(dev_type);
++
++ PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_unregister_virtual_dev_driver);
++
++/* scst_mutex supposed to be held */
++int scst_add_threads(struct scst_cmd_threads *cmd_threads,
++ struct scst_device *dev, struct scst_tgt_dev *tgt_dev, int num)
++{
++ int res = 0, i;
++ struct scst_cmd_thread_t *thr;
++ int n = 0, tgt_dev_num = 0;
++
++ TRACE_ENTRY();
++
++ if (num == 0) {
++ res = 0;
++ goto out;
++ }
++
++ list_for_each_entry(thr, &cmd_threads->threads_list, thread_list_entry) {
++ n++;
++ }
++
++ TRACE_DBG("cmd_threads %p, dev %p, tgt_dev %p, num %d, n %d",
++ cmd_threads, dev, tgt_dev, num, n);
++
++ if (tgt_dev != NULL) {
++ struct scst_tgt_dev *t;
++ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if (t == tgt_dev)
++ break;
++ tgt_dev_num++;
++ }
++ }
++
++ for (i = 0; i < num; i++) {
++ thr = kmalloc(sizeof(*thr), GFP_KERNEL);
++ if (!thr) {
++ res = -ENOMEM;
++ PRINT_ERROR("Fail to allocate thr %d", res);
++ goto out_wait;
++ }
++
++ if (dev != NULL) {
++ char nm[14]; /* to limit the name's len */
++ strlcpy(nm, dev->virt_name, ARRAY_SIZE(nm));
++ thr->cmd_thread = kthread_create(scst_cmd_thread,
++ cmd_threads, "%s%d", nm, n++);
++ } else if (tgt_dev != NULL) {
++ char nm[11]; /* to limit the name's len */
++ strlcpy(nm, tgt_dev->dev->virt_name, ARRAY_SIZE(nm));
++ thr->cmd_thread = kthread_create(scst_cmd_thread,
++ cmd_threads, "%s%d_%d", nm, tgt_dev_num, n++);
++ } else
++ thr->cmd_thread = kthread_create(scst_cmd_thread,
++ cmd_threads, "scstd%d", n++);
++
++ if (IS_ERR(thr->cmd_thread)) {
++ res = PTR_ERR(thr->cmd_thread);
++ PRINT_ERROR("kthread_create() failed: %d", res);
++ kfree(thr);
++ goto out_wait;
++ }
++
++ list_add(&thr->thread_list_entry, &cmd_threads->threads_list);
++ cmd_threads->nr_threads++;
++
++ TRACE_DBG("Added thr %p to threads list (nr_threads %d, n %d)",
++ thr, cmd_threads->nr_threads, n);
++
++ wake_up_process(thr->cmd_thread);
++ }
++
++out_wait:
++ if (i > 0 && cmd_threads != &scst_main_cmd_threads) {
++ /*
++ * Wait for io_context gets initialized to avoid possible races
++ * for it from the sharing it tgt_devs.
++ */
++ while (!*(volatile bool*)&cmd_threads->io_context_ready) {
++ TRACE_DBG("Waiting for io_context for cmd_threads %p "
++ "initialized", cmd_threads);
++ msleep(50);
++ }
++ }
++
++ if (res != 0)
++ scst_del_threads(cmd_threads, i);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_mutex supposed to be held */
++void scst_del_threads(struct scst_cmd_threads *cmd_threads, int num)
++{
++ struct scst_cmd_thread_t *ct, *tmp;
++
++ TRACE_ENTRY();
++
++ if (num == 0)
++ goto out;
++
++ list_for_each_entry_safe_reverse(ct, tmp, &cmd_threads->threads_list,
++ thread_list_entry) {
++ int rc;
++ struct scst_device *dev;
++
++ rc = kthread_stop(ct->cmd_thread);
++ if (rc != 0 && rc != -EINTR)
++ TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
++
++ list_del(&ct->thread_list_entry);
++
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ scst_del_thr_data(tgt_dev, ct->cmd_thread);
++ }
++ }
++
++ kfree(ct);
++
++ cmd_threads->nr_threads--;
++
++ --num;
++ if (num == 0)
++ break;
++ }
++
++ EXTRACHECKS_BUG_ON((cmd_threads->nr_threads == 0) &&
++ (cmd_threads->io_context != NULL));
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++void scst_stop_dev_threads(struct scst_device *dev)
++{
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ scst_tgt_dev_stop_threads(tgt_dev);
++ }
++
++ if ((dev->threads_num > 0) &&
++ (dev->threads_pool_type == SCST_THREADS_POOL_SHARED))
++ scst_del_threads(&dev->dev_cmd_threads, -1);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++int scst_create_dev_threads(struct scst_device *dev)
++{
++ int res = 0;
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ res = scst_tgt_dev_setup_threads(tgt_dev);
++ if (res != 0)
++ goto out_err;
++ }
++
++ if ((dev->threads_num > 0) &&
++ (dev->threads_pool_type == SCST_THREADS_POOL_SHARED)) {
++ res = scst_add_threads(&dev->dev_cmd_threads, dev, NULL,
++ dev->threads_num);
++ if (res != 0)
++ goto out_err;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err:
++ scst_stop_dev_threads(dev);
++ goto out;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++int scst_assign_dev_handler(struct scst_device *dev,
++ struct scst_dev_type *handler)
++{
++ int res = 0;
++ struct scst_tgt_dev *tgt_dev;
++ LIST_HEAD(attached_tgt_devs);
++
++ TRACE_ENTRY();
++
++ BUG_ON(handler == NULL);
++
++ if (dev->handler == handler)
++ goto out;
++
++ if (dev->handler == NULL)
++ goto assign;
++
++ if (dev->handler->detach_tgt) {
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ TRACE_DBG("Calling dev handler's detach_tgt(%p)",
++ tgt_dev);
++ dev->handler->detach_tgt(tgt_dev);
++ TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
++ }
++ }
++
++ /*
++ * devt_dev sysfs must be created AFTER attach() and deleted BEFORE
++ * detach() to avoid calls from sysfs for not yet ready or already dead
++ * objects.
++ */
++ scst_devt_dev_sysfs_del(dev);
++
++ if (dev->handler->detach) {
++ TRACE_DBG("%s", "Calling dev handler's detach()");
++ dev->handler->detach(dev);
++ TRACE_DBG("%s", "Old handler's detach() returned");
++ }
++
++ scst_stop_dev_threads(dev);
++
++assign:
++ dev->handler = handler;
++
++ if (handler == NULL)
++ goto out;
++
++ dev->threads_num = handler->threads_num;
++ dev->threads_pool_type = handler->threads_pool_type;
++
++ if (handler->attach) {
++ TRACE_DBG("Calling new dev handler's attach(%p)", dev);
++ res = handler->attach(dev);
++ TRACE_DBG("New dev handler's attach() returned %d", res);
++ if (res != 0) {
++ PRINT_ERROR("New device handler's %s attach() "
++ "failed: %d", handler->name, res);
++ goto out;
++ }
++ }
++
++ res = scst_devt_dev_sysfs_create(dev);
++ if (res != 0)
++ goto out_detach;
++
++ if (handler->attach_tgt) {
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ TRACE_DBG("Calling dev handler's attach_tgt(%p)",
++ tgt_dev);
++ res = handler->attach_tgt(tgt_dev);
++ TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
++ if (res != 0) {
++ PRINT_ERROR("Device handler's %s attach_tgt() "
++ "failed: %d", handler->name, res);
++ goto out_err_remove_sysfs;
++ }
++ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
++ &attached_tgt_devs);
++ }
++ }
++
++ res = scst_create_dev_threads(dev);
++ if (res != 0)
++ goto out_err_detach_tgt;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err_detach_tgt:
++ if (handler && handler->detach_tgt) {
++ list_for_each_entry(tgt_dev, &attached_tgt_devs,
++ extra_tgt_dev_list_entry) {
++ TRACE_DBG("Calling handler's detach_tgt(%p)",
++ tgt_dev);
++ handler->detach_tgt(tgt_dev);
++ TRACE_DBG("%s", "Handler's detach_tgt() returned");
++ }
++ }
++
++out_err_remove_sysfs:
++ scst_devt_dev_sysfs_del(dev);
++
++out_detach:
++ if (handler && handler->detach) {
++ TRACE_DBG("%s", "Calling handler's detach()");
++ handler->detach(dev);
++ TRACE_DBG("%s", "Handler's detach() returned");
++ }
++
++ dev->handler = &scst_null_devtype;
++ dev->threads_num = scst_null_devtype.threads_num;
++ dev->threads_pool_type = scst_null_devtype.threads_pool_type;
++ goto out;
++}
++
++/**
++ * scst_init_threads() - initialize SCST processing threads pool
++ *
++ * Initializes scst_cmd_threads structure
++ */
++void scst_init_threads(struct scst_cmd_threads *cmd_threads)
++{
++ TRACE_ENTRY();
++
++ spin_lock_init(&cmd_threads->cmd_list_lock);
++ INIT_LIST_HEAD(&cmd_threads->active_cmd_list);
++ init_waitqueue_head(&cmd_threads->cmd_list_waitQ);
++ INIT_LIST_HEAD(&cmd_threads->threads_list);
++ mutex_init(&cmd_threads->io_context_mutex);
++
++ mutex_lock(&scst_suspend_mutex);
++ list_add_tail(&cmd_threads->lists_list_entry,
++ &scst_cmd_threads_list);
++ mutex_unlock(&scst_suspend_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_init_threads);
++
++/**
++ * scst_deinit_threads() - deinitialize SCST processing threads pool
++ *
++ * Deinitializes scst_cmd_threads structure
++ */
++void scst_deinit_threads(struct scst_cmd_threads *cmd_threads)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_suspend_mutex);
++ list_del(&cmd_threads->lists_list_entry);
++ mutex_unlock(&scst_suspend_mutex);
++
++ BUG_ON(cmd_threads->io_context);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_deinit_threads);
++
++static void scst_stop_global_threads(void)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ scst_del_threads(&scst_main_cmd_threads, -1);
++
++ if (scst_mgmt_cmd_thread)
++ kthread_stop(scst_mgmt_cmd_thread);
++ if (scst_mgmt_thread)
++ kthread_stop(scst_mgmt_thread);
++ if (scst_init_cmd_thread)
++ kthread_stop(scst_init_cmd_thread);
++
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* It does NOT stop ran threads on error! */
++static int scst_start_global_threads(int num)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ res = scst_add_threads(&scst_main_cmd_threads, NULL, NULL, num);
++ if (res < 0)
++ goto out_unlock;
++
++ scst_init_cmd_thread = kthread_run(scst_init_thread,
++ NULL, "scst_initd");
++ if (IS_ERR(scst_init_cmd_thread)) {
++ res = PTR_ERR(scst_init_cmd_thread);
++ PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
++ scst_init_cmd_thread = NULL;
++ goto out_unlock;
++ }
++
++ scst_mgmt_cmd_thread = kthread_run(scst_tm_thread,
++ NULL, "scsi_tm");
++ if (IS_ERR(scst_mgmt_cmd_thread)) {
++ res = PTR_ERR(scst_mgmt_cmd_thread);
++ PRINT_ERROR("kthread_create() for TM failed: %d", res);
++ scst_mgmt_cmd_thread = NULL;
++ goto out_unlock;
++ }
++
++ scst_mgmt_thread = kthread_run(scst_global_mgmt_thread,
++ NULL, "scst_mgmtd");
++ if (IS_ERR(scst_mgmt_thread)) {
++ res = PTR_ERR(scst_mgmt_thread);
++ PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
++ scst_mgmt_thread = NULL;
++ goto out_unlock;
++ }
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_get() - increase global SCST ref counter
++ *
++ * Increases global SCST ref counter that prevents from entering into suspended
++ * activities stage, so protects from any global management operations.
++ */
++void scst_get(void)
++{
++ __scst_get();
++}
++EXPORT_SYMBOL(scst_get);
++
++/**
++ * scst_put() - decrease global SCST ref counter
++ *
++ * Decreses global SCST ref counter that prevents from entering into suspended
++ * activities stage, so protects from any global management operations. On
++ * zero, if suspending activities is waiting, they will be suspended.
++ */
++void scst_put(void)
++{
++ __scst_put();
++}
++EXPORT_SYMBOL(scst_put);
++
++/**
++ * scst_get_setup_id() - return SCST setup ID
++ *
++ * Returns SCST setup ID. This ID can be used for multiple
++ * setups with the same configuration.
++ */
++unsigned int scst_get_setup_id(void)
++{
++ return scst_setup_id;
++}
++EXPORT_SYMBOL_GPL(scst_get_setup_id);
++
++static int scst_add(struct device *cdev, struct class_interface *intf)
++{
++ struct scsi_device *scsidp;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ scsidp = to_scsi_device(cdev->parent);
++
++ if ((scsidp->host->hostt->name == NULL) ||
++ (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0))
++ res = scst_register_device(scsidp);
++
++ TRACE_EXIT();
++ return res;
++}
++
++static void scst_remove(struct device *cdev, struct class_interface *intf)
++{
++ struct scsi_device *scsidp;
++
++ TRACE_ENTRY();
++
++ scsidp = to_scsi_device(cdev->parent);
++
++ if ((scsidp->host->hostt->name == NULL) ||
++ (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0))
++ scst_unregister_device(scsidp);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct class_interface scst_interface = {
++ .add_dev = scst_add,
++ .remove_dev = scst_remove,
++};
++
++static void __init scst_print_config(void)
++{
++ char buf[128];
++ int i, j;
++
++ i = snprintf(buf, sizeof(buf), "Enabled features: ");
++ j = i;
++
++#ifdef CONFIG_SCST_STRICT_SERIALIZING
++ i += snprintf(&buf[i], sizeof(buf) - i, "STRICT_SERIALIZING");
++#endif
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_TRACING
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_TM
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_RETRY
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_OOM
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_SN
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ i += snprintf(&buf[i], sizeof(buf) - i,
++ "%sTEST_IO_IN_SIRQ",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_STRICT_SECURITY
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sSTRICT_SECURITY",
++ (j == i) ? "" : ", ");
++#endif
++
++ if (j != i)
++ PRINT_INFO("%s", buf);
++}
++
++static int __init init_scst(void)
++{
++ int res, i;
++ int scst_num_cpus;
++
++ TRACE_ENTRY();
++
++ {
++ struct scsi_sense_hdr *shdr;
++ BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
++ }
++ {
++ struct scst_tgt_dev *t;
++ struct scst_cmd *c;
++ BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
++ BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
++ }
++
++ mutex_init(&scst_mutex);
++ mutex_init(&scst_mutex2);
++ INIT_LIST_HEAD(&scst_template_list);
++ INIT_LIST_HEAD(&scst_dev_list);
++ INIT_LIST_HEAD(&scst_dev_type_list);
++ INIT_LIST_HEAD(&scst_virtual_dev_type_list);
++ spin_lock_init(&scst_main_lock);
++ spin_lock_init(&scst_init_lock);
++ init_waitqueue_head(&scst_init_cmd_list_waitQ);
++ INIT_LIST_HEAD(&scst_init_cmd_list);
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
++#endif
++ atomic_set(&scst_cmd_count, 0);
++ spin_lock_init(&scst_mcmd_lock);
++ INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
++ INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
++ init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
++ init_waitqueue_head(&scst_mgmt_waitQ);
++ spin_lock_init(&scst_mgmt_lock);
++ INIT_LIST_HEAD(&scst_sess_init_list);
++ INIT_LIST_HEAD(&scst_sess_shut_list);
++ init_waitqueue_head(&scst_dev_cmd_waitQ);
++ mutex_init(&scst_suspend_mutex);
++ INIT_LIST_HEAD(&scst_cmd_threads_list);
++
++ scst_init_threads(&scst_main_cmd_threads);
++
++ res = scst_lib_init();
++ if (res != 0)
++ goto out_deinit_threads;
++
++ scst_num_cpus = num_online_cpus();
++
++ /* ToDo: register_cpu_notifier() */
++
++ if (scst_threads == 0)
++ scst_threads = scst_num_cpus;
++
++ if (scst_threads < 1) {
++ PRINT_ERROR("%s", "scst_threads can not be less than 1");
++ scst_threads = scst_num_cpus;
++ }
++
++#define INIT_CACHEP(p, s, o) do { \
++ p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
++ TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
++ sizeof(struct s)); \
++ if (p == NULL) { \
++ res = -ENOMEM; \
++ goto o; \
++ } \
++ } while (0)
++
++ INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out_lib_exit);
++ INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
++ out_destroy_mgmt_cache);
++ INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
++ out_destroy_mgmt_stub_cache);
++ {
++ struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
++ INIT_CACHEP(scst_sense_cachep, scst_sense,
++ out_destroy_ua_cache);
++ }
++ INIT_CACHEP(scst_aen_cachep, scst_aen, out_destroy_sense_cache);
++ INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_aen_cache);
++ INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
++ INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
++ INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
++
++ scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
++ mempool_free_slab, scst_mgmt_cachep);
++ if (scst_mgmt_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_destroy_acg_cache;
++ }
++
++ /*
++ * All mgmt stubs, UAs and sense buffers are bursty and loosing them
++ * may have fatal consequences, so let's have big pools for them.
++ */
++
++ scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
++ mempool_free_slab, scst_mgmt_stub_cachep);
++ if (scst_mgmt_stub_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_destroy_mgmt_mempool;
++ }
++
++ scst_ua_mempool = mempool_create(512, mempool_alloc_slab,
++ mempool_free_slab, scst_ua_cachep);
++ if (scst_ua_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_destroy_mgmt_stub_mempool;
++ }
++
++ scst_sense_mempool = mempool_create(1024, mempool_alloc_slab,
++ mempool_free_slab, scst_sense_cachep);
++ if (scst_sense_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_destroy_ua_mempool;
++ }
++
++ scst_aen_mempool = mempool_create(100, mempool_alloc_slab,
++ mempool_free_slab, scst_aen_cachep);
++ if (scst_aen_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_destroy_sense_mempool;
++ }
++
++ res = scst_sysfs_init();
++ if (res != 0)
++ goto out_destroy_aen_mempool;
++
++ if (scst_max_cmd_mem == 0) {
++ struct sysinfo si;
++ si_meminfo(&si);
++#if BITS_PER_LONG == 32
++ scst_max_cmd_mem = min(
++ (((uint64_t)(si.totalram - si.totalhigh) << PAGE_SHIFT)
++ >> 20) >> 2, (uint64_t)1 << 30);
++#else
++ scst_max_cmd_mem = (((si.totalram - si.totalhigh) << PAGE_SHIFT)
++ >> 20) >> 2;
++#endif
++ }
++
++ if (scst_max_dev_cmd_mem != 0) {
++ if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
++ PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
++ "scst_max_cmd_mem (%d)",
++ scst_max_dev_cmd_mem,
++ scst_max_cmd_mem);
++ scst_max_dev_cmd_mem = scst_max_cmd_mem;
++ }
++ } else
++ scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
++
++ res = scst_sgv_pools_init(
++ ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
++ if (res != 0)
++ goto out_sysfs_cleanup;
++
++ res = scsi_register_interface(&scst_interface);
++ if (res != 0)
++ goto out_destroy_sgv_pool;
++
++ for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
++ spin_lock_init(&scst_tasklets[i].tasklet_lock);
++ INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
++ tasklet_init(&scst_tasklets[i].tasklet,
++ (void *)scst_cmd_tasklet,
++ (unsigned long)&scst_tasklets[i]);
++ }
++
++ TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
++ scst_threads);
++
++ res = scst_start_global_threads(scst_threads);
++ if (res < 0)
++ goto out_thread_free;
++
++ PRINT_INFO("SCST version %s loaded successfully (max mem for "
++ "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
++ scst_max_cmd_mem, scst_max_dev_cmd_mem);
++
++ scst_print_config();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_thread_free:
++ scst_stop_global_threads();
++
++ scsi_unregister_interface(&scst_interface);
++
++out_destroy_sgv_pool:
++ scst_sgv_pools_deinit();
++
++out_sysfs_cleanup:
++ scst_sysfs_cleanup();
++
++out_destroy_aen_mempool:
++ mempool_destroy(scst_aen_mempool);
++
++out_destroy_sense_mempool:
++ mempool_destroy(scst_sense_mempool);
++
++out_destroy_ua_mempool:
++ mempool_destroy(scst_ua_mempool);
++
++out_destroy_mgmt_stub_mempool:
++ mempool_destroy(scst_mgmt_stub_mempool);
++
++out_destroy_mgmt_mempool:
++ mempool_destroy(scst_mgmt_mempool);
++
++out_destroy_acg_cache:
++ kmem_cache_destroy(scst_acgd_cachep);
++
++out_destroy_tgt_cache:
++ kmem_cache_destroy(scst_tgtd_cachep);
++
++out_destroy_sess_cache:
++ kmem_cache_destroy(scst_sess_cachep);
++
++out_destroy_cmd_cache:
++ kmem_cache_destroy(scst_cmd_cachep);
++
++out_destroy_aen_cache:
++ kmem_cache_destroy(scst_aen_cachep);
++
++out_destroy_sense_cache:
++ kmem_cache_destroy(scst_sense_cachep);
++
++out_destroy_ua_cache:
++ kmem_cache_destroy(scst_ua_cachep);
++
++out_destroy_mgmt_stub_cache:
++ kmem_cache_destroy(scst_mgmt_stub_cachep);
++
++out_destroy_mgmt_cache:
++ kmem_cache_destroy(scst_mgmt_cachep);
++
++out_lib_exit:
++ scst_lib_exit();
++
++out_deinit_threads:
++ scst_deinit_threads(&scst_main_cmd_threads);
++ goto out;
++}
++
++static void __exit exit_scst(void)
++{
++ TRACE_ENTRY();
++
++ /* ToDo: unregister_cpu_notifier() */
++
++ scst_stop_global_threads();
++
++ scst_deinit_threads(&scst_main_cmd_threads);
++
++ scsi_unregister_interface(&scst_interface);
++
++ scst_sgv_pools_deinit();
++
++ scst_sysfs_cleanup();
++
++#define DEINIT_CACHEP(p) do { \
++ kmem_cache_destroy(p); \
++ p = NULL; \
++ } while (0)
++
++ mempool_destroy(scst_mgmt_mempool);
++ mempool_destroy(scst_mgmt_stub_mempool);
++ mempool_destroy(scst_ua_mempool);
++ mempool_destroy(scst_sense_mempool);
++ mempool_destroy(scst_aen_mempool);
++
++ DEINIT_CACHEP(scst_mgmt_cachep);
++ DEINIT_CACHEP(scst_mgmt_stub_cachep);
++ DEINIT_CACHEP(scst_ua_cachep);
++ DEINIT_CACHEP(scst_sense_cachep);
++ DEINIT_CACHEP(scst_aen_cachep);
++ DEINIT_CACHEP(scst_cmd_cachep);
++ DEINIT_CACHEP(scst_sess_cachep);
++ DEINIT_CACHEP(scst_tgtd_cachep);
++ DEINIT_CACHEP(scst_acgd_cachep);
++
++ scst_lib_exit();
++
++ PRINT_INFO("%s", "SCST unloaded");
++
++ TRACE_EXIT();
++ return;
++}
++
++module_init(init_scst);
++module_exit(exit_scst);
++
++MODULE_AUTHOR("Vladislav Bolkhovitin");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SCSI target core");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_module.c linux-2.6.36/drivers/scst/scst_module.c
+--- orig/linux-2.6.36/drivers/scst/scst_module.c
++++ linux-2.6.36/drivers/scst/scst_module.c
+@@ -0,0 +1,70 @@
++/*
++ * scst_module.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * Support for loading target modules. The usage is similar to scsi_module.c
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++
++#include <scst.h>
++
++static int __init init_this_scst_driver(void)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ res = scst_register_target_template(&driver_target_template);
++ TRACE_DBG("scst_register_target_template() returned %d", res);
++ if (res < 0)
++ goto out;
++
++#ifdef SCST_REGISTER_INITIATOR_DRIVER
++ driver_template.module = THIS_MODULE;
++ scsi_register_module(MODULE_SCSI_HA, &driver_template);
++ TRACE_DBG("driver_template.present=%d",
++ driver_template.present);
++ if (driver_template.present == 0) {
++ res = -ENODEV;
++ MOD_DEC_USE_COUNT;
++ goto out;
++ }
++#endif
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void __exit exit_this_scst_driver(void)
++{
++ TRACE_ENTRY();
++
++#ifdef SCST_REGISTER_INITIATOR_DRIVER
++ scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
++#endif
++
++ scst_unregister_target_template(&driver_target_template);
++
++ TRACE_EXIT();
++ return;
++}
++
++module_init(init_this_scst_driver);
++module_exit(exit_this_scst_driver);
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/scst_pres.c
+--- orig/linux-2.6.36/drivers/scst/scst_pres.c
++++ linux-2.6.36/drivers/scst/scst_pres.c
+@@ -0,0 +1,2648 @@
++/*
++ * scst_pres.c
++ *
++ * Copyright (C) 2009 - 2010 Alexey Obitotskiy <alexeyo1@open-e.com>
++ * Copyright (C) 2009 - 2010 Open-E, Inc.
++ * Copyright (C) 2009 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/unistd.h>
++#include <linux/string.h>
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#include <linux/time.h>
++#include <linux/ctype.h>
++#include <asm/byteorder.h>
++#include <linux/syscalls.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/fcntl.h>
++#include <linux/uaccess.h>
++#include <linux/namei.h>
++#include <linux/version.h>
++#include <linux/vmalloc.h>
++#include <asm/unaligned.h>
++
++#include <scst/scst.h>
++#include <scst/scst_const.h>
++#include "scst_priv.h"
++#include "scst_pres.h"
++
++#define SCST_PR_ROOT_ENTRY "pr"
++#define SCST_PR_FILE_SIGN 0xBBEEEEAAEEBBDD77LLU
++#define SCST_PR_FILE_VERSION 1LLU
++
++#define FILE_BUFFER_SIZE 512
++
++#ifndef isblank
++#define isblank(c) ((c) == ' ' || (c) == '\t')
++#endif
++
++static inline int tid_size(const uint8_t *tid)
++{
++ BUG_ON(tid == NULL);
++
++ if ((tid[0] & 0x0f) == SCSI_TRANSPORTID_PROTOCOLID_ISCSI)
++ return be16_to_cpu(get_unaligned((__be16 *)&tid[2])) + 4;
++ else
++ return TID_COMMON_SIZE;
++}
++
++/* Secures tid by setting 0 in the last byte of NULL-terminated tid's */
++static inline void tid_secure(uint8_t *tid)
++{
++ if ((tid[0] & 0x0f) == SCSI_TRANSPORTID_PROTOCOLID_ISCSI) {
++ int size = tid_size(tid);
++ tid[size - 1] = '\0';
++ }
++
++ return;
++}
++
++/* Returns false if tid's are not equal, true otherwise */
++static bool tid_equal(const uint8_t *tid_a, const uint8_t *tid_b)
++{
++ int len;
++
++ if (tid_a == NULL || tid_b == NULL)
++ return false;
++
++ if ((tid_a[0] & 0x0f) != (tid_b[0] & 0x0f)) {
++ TRACE_DBG("%s", "Different protocol IDs");
++ return false;
++ }
++
++ if ((tid_a[0] & 0x0f) == SCSI_TRANSPORTID_PROTOCOLID_ISCSI) {
++ const uint8_t tid_a_fmt = tid_a[0] & 0xc0;
++ const uint8_t tid_b_fmt = tid_b[0] & 0xc0;
++ int tid_a_len, tid_a_max = tid_size(tid_a) - 4;
++ int tid_b_len, tid_b_max = tid_size(tid_b) - 4;
++ int i;
++
++ tid_a += 4;
++ tid_b += 4;
++
++ if (tid_a_fmt == 0x00)
++ tid_a_len = strnlen(tid_a, tid_a_max);
++ else if (tid_a_fmt == 0x40) {
++ if (tid_a_fmt != tid_b_fmt) {
++ uint8_t *p = strnchr(tid_a, tid_a_max, ',');
++ if (p == NULL)
++ goto out_error;
++ tid_a_len = p - tid_a;
++
++ BUG_ON(tid_a_len > tid_a_max);
++ BUG_ON(tid_a_len < 0);
++ } else
++ tid_a_len = strnlen(tid_a, tid_a_max);
++ } else
++ goto out_error;
++
++ if (tid_b_fmt == 0x00)
++ tid_b_len = strnlen(tid_b, tid_b_max);
++ else if (tid_b_fmt == 0x40) {
++ if (tid_a_fmt != tid_b_fmt) {
++ uint8_t *p = strnchr(tid_b, tid_b_max, ',');
++ if (p == NULL)
++ goto out_error;
++ tid_b_len = p - tid_b;
++
++ BUG_ON(tid_b_len > tid_b_max);
++ BUG_ON(tid_b_len < 0);
++ } else
++ tid_b_len = strnlen(tid_b, tid_b_max);
++ } else
++ goto out_error;
++
++ if (tid_a_len != tid_b_len)
++ return false;
++
++ len = tid_a_len;
++
++ /* ISCSI names are case insensitive */
++ for (i = 0; i < len; i++)
++ if (tolower(tid_a[i]) != tolower(tid_b[i]))
++ return false;
++ return true;
++ } else
++ len = TID_COMMON_SIZE;
++
++ return (memcmp(tid_a, tid_b, len) == 0);
++
++out_error:
++ PRINT_ERROR("%s", "Invalid initiator port transport id");
++ return false;
++}
++
++/* Must be called under dev_pr_mutex */
++static inline void scst_pr_set_holder(struct scst_device *dev,
++ struct scst_dev_registrant *holder, uint8_t scope, uint8_t type)
++{
++ dev->pr_is_set = 1;
++ dev->pr_scope = scope;
++ dev->pr_type = type;
++ if (dev->pr_type != TYPE_EXCLUSIVE_ACCESS_ALL_REG &&
++ dev->pr_type != TYPE_WRITE_EXCLUSIVE_ALL_REG)
++ dev->pr_holder = holder;
++}
++
++/* Must be called under dev_pr_mutex */
++static bool scst_pr_is_holder(struct scst_device *dev,
++ struct scst_dev_registrant *reg)
++{
++ bool res = false;
++
++ TRACE_ENTRY();
++
++ if (!dev->pr_is_set)
++ goto out;
++
++ if (dev->pr_type == TYPE_EXCLUSIVE_ACCESS_ALL_REG ||
++ dev->pr_type == TYPE_WRITE_EXCLUSIVE_ALL_REG) {
++ res = (reg != NULL);
++ } else
++ res = (dev->pr_holder == reg);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++/* Must be called under dev_pr_mutex */
++void scst_pr_dump_prs(struct scst_device *dev, bool force)
++{
++ if (!force) {
++#if defined(CONFIG_SCST_DEBUG)
++ if ((trace_flag & TRACE_PRES) == 0)
++#endif
++ goto out;
++ }
++
++ PRINT_INFO("Persistent reservations for device %s:", dev->virt_name);
++
++ if (list_empty(&dev->dev_registrants_list))
++ PRINT_INFO("%s", " No registrants");
++ else {
++ struct scst_dev_registrant *reg;
++ int i = 0;
++ list_for_each_entry(reg, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ PRINT_INFO(" [%d] registrant %s/%d, key %016llx "
++ "(reg %p, tgt_dev %p)", i++,
++ debug_transport_id_to_initiator_name(
++ reg->transport_id),
++ reg->rel_tgt_id, reg->key, reg, reg->tgt_dev);
++ }
++ }
++
++ if (dev->pr_is_set) {
++ struct scst_dev_registrant *holder = dev->pr_holder;
++ if (holder != NULL)
++ PRINT_INFO("Reservation holder is %s/%d (key %016llx, "
++ "scope %x, type %x, reg %p, tgt_dev %p)",
++ debug_transport_id_to_initiator_name(
++ holder->transport_id),
++ holder->rel_tgt_id, holder->key, dev->pr_scope,
++ dev->pr_type, holder, holder->tgt_dev);
++ else
++ PRINT_INFO("All registrants are reservation holders "
++ "(scope %x, type %x)", dev->pr_scope,
++ dev->pr_type);
++ } else
++ PRINT_INFO("%s", "Not reserved");
++
++out:
++ return;
++}
++
++#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
++
++/* dev_pr_mutex must be locked */
++static void scst_pr_find_registrants_list_all(struct scst_device *dev,
++ struct scst_dev_registrant *exclude_reg, struct list_head *list)
++{
++ struct scst_dev_registrant *reg;
++
++ TRACE_ENTRY();
++
++ TRACE_PR("Finding all registered records for device '%s' "
++ "with exclude reg key %016llx",
++ dev->virt_name, exclude_reg->key);
++
++ list_for_each_entry(reg, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ if (reg == exclude_reg)
++ continue;
++ TRACE_PR("Adding registrant %s/%d (%p) to find list (key %016llx)",
++ debug_transport_id_to_initiator_name(reg->transport_id),
++ reg->rel_tgt_id, reg, reg->key);
++ list_add_tail(&reg->aux_list_entry, list);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* dev_pr_mutex must be locked */
++static void scst_pr_find_registrants_list_key(struct scst_device *dev,
++ __be64 key, struct list_head *list)
++{
++ struct scst_dev_registrant *reg;
++
++ TRACE_ENTRY();
++
++ TRACE_PR("Finding registrants for device '%s' with key %016llx",
++ dev->virt_name, key);
++
++ list_for_each_entry(reg, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ if (reg->key == key) {
++ TRACE_PR("Adding registrant %s/%d (%p) to the find "
++ "list (key %016llx)",
++ debug_transport_id_to_initiator_name(
++ reg->transport_id),
++ reg->rel_tgt_id, reg->tgt_dev, key);
++ list_add_tail(&reg->aux_list_entry, list);
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* dev_pr_mutex must be locked */
++static struct scst_dev_registrant *scst_pr_find_reg(
++ struct scst_device *dev, const uint8_t *transport_id,
++ const uint16_t rel_tgt_id)
++{
++ struct scst_dev_registrant *reg, *res = NULL;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(reg, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ if ((reg->rel_tgt_id == rel_tgt_id) &&
++ tid_equal(reg->transport_id, transport_id)) {
++ res = reg;
++ break;
++ }
++ }
++
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* Must be called under dev_pr_mutex */
++static void scst_pr_clear_reservation(struct scst_device *dev)
++{
++ TRACE_ENTRY();
++
++ WARN_ON(!dev->pr_is_set);
++
++ dev->pr_is_set = 0;
++ dev->pr_scope = SCOPE_LU;
++ dev->pr_type = TYPE_UNSPECIFIED;
++
++ dev->pr_holder = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Must be called under dev_pr_mutex */
++static void scst_pr_clear_holder(struct scst_device *dev)
++{
++ TRACE_ENTRY();
++
++ WARN_ON(!dev->pr_is_set);
++
++ if (dev->pr_type == TYPE_WRITE_EXCLUSIVE_ALL_REG ||
++ dev->pr_type == TYPE_EXCLUSIVE_ACCESS_ALL_REG) {
++ if (list_empty(&dev->dev_registrants_list))
++ scst_pr_clear_reservation(dev);
++ } else
++ scst_pr_clear_reservation(dev);
++
++ dev->pr_holder = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Must be called under dev_pr_mutex */
++static struct scst_dev_registrant *scst_pr_add_registrant(
++ struct scst_device *dev, const uint8_t *transport_id,
++ const uint16_t rel_tgt_id, __be64 key,
++ bool dev_lock_locked)
++{
++ struct scst_dev_registrant *reg;
++ struct scst_tgt_dev *t;
++ gfp_t gfp_flags = dev_lock_locked ? GFP_ATOMIC : GFP_KERNEL;
++
++ TRACE_ENTRY();
++
++ BUG_ON(dev == NULL);
++ BUG_ON(transport_id == NULL);
++
++ TRACE_PR("Registering %s/%d (dev %s)",
++ debug_transport_id_to_initiator_name(transport_id),
++ rel_tgt_id, dev->virt_name);
++
++ reg = scst_pr_find_reg(dev, transport_id, rel_tgt_id);
++ if (reg != NULL) {
++ /*
++ * It might happen when a target driver would make >1 session
++ * from the same initiator to the same target.
++ */
++ PRINT_ERROR("Registrant %p/%d (dev %s) already exists!", reg,
++ rel_tgt_id, dev->virt_name);
++ PRINT_BUFFER("TransportID", transport_id, 24);
++ WARN_ON(1);
++ reg = NULL;
++ goto out;
++ }
++
++ reg = kzalloc(sizeof(*reg), gfp_flags);
++ if (reg == NULL) {
++ PRINT_ERROR("%s", "Unable to allocate registration record");
++ goto out;
++ }
++
++ reg->transport_id = kmalloc(tid_size(transport_id), gfp_flags);
++ if (reg->transport_id == NULL) {
++ PRINT_ERROR("%s", "Unable to allocate initiator port "
++ "transport id");
++ goto out_free;
++ }
++ memcpy(reg->transport_id, transport_id, tid_size(transport_id));
++
++ reg->rel_tgt_id = rel_tgt_id;
++ reg->key = key;
++
++ /*
++ * We can't use scst_mutex here, because of the circular
++ * locking dependency with dev_pr_mutex.
++ */
++ if (!dev_lock_locked)
++ spin_lock_bh(&dev->dev_lock);
++ list_for_each_entry(t, &dev->dev_tgt_dev_list, dev_tgt_dev_list_entry) {
++ if (tid_equal(t->sess->transport_id, transport_id) &&
++ (t->sess->tgt->rel_tgt_id == rel_tgt_id) &&
++ (t->registrant == NULL)) {
++ /*
++ * We must assign here, because t can die
++ * immediately after we release dev_lock.
++ */
++ TRACE_PR("Found tgt_dev %p", t);
++ reg->tgt_dev = t;
++ t->registrant = reg;
++ break;
++ }
++ }
++ if (!dev_lock_locked)
++ spin_unlock_bh(&dev->dev_lock);
++
++ list_add_tail(&reg->dev_registrants_list_entry,
++ &dev->dev_registrants_list);
++
++ TRACE_PR("Reg %p registered (dev %s, tgt_dev %p)", reg,
++ dev->virt_name, reg->tgt_dev);
++
++out:
++ TRACE_EXIT_HRES((unsigned long)reg);
++ return reg;
++
++out_free:
++ kfree(reg);
++ reg = NULL;
++ goto out;
++}
++
++/* Must be called under dev_pr_mutex */
++static void scst_pr_remove_registrant(struct scst_device *dev,
++ struct scst_dev_registrant *reg)
++{
++ TRACE_ENTRY();
++
++ TRACE_PR("Removing registrant %s/%d (reg %p, tgt_dev %p, key %016llx, "
++ "dev %s)", debug_transport_id_to_initiator_name(reg->transport_id),
++ reg->rel_tgt_id, reg, reg->tgt_dev, reg->key, dev->virt_name);
++
++ list_del(&reg->dev_registrants_list_entry);
++
++ if (scst_pr_is_holder(dev, reg))
++ scst_pr_clear_holder(dev);
++
++ if (reg->tgt_dev)
++ reg->tgt_dev->registrant = NULL;
++
++ kfree(reg->transport_id);
++ kfree(reg);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Must be called under dev_pr_mutex */
++static void scst_pr_send_ua_reg(struct scst_device *dev,
++ struct scst_dev_registrant *reg,
++ int key, int asc, int ascq)
++{
++ static uint8_t ua[SCST_STANDARD_SENSE_LEN];
++
++ TRACE_ENTRY();
++
++ scst_set_sense(ua, sizeof(ua), dev->d_sense, key, asc, ascq);
++
++ TRACE_PR("Queuing UA [%x %x %x]: registrant %s/%d (%p), tgt_dev %p, "
++ "key %016llx", ua[2], ua[12], ua[13],
++ debug_transport_id_to_initiator_name(reg->transport_id),
++ reg->rel_tgt_id, reg, reg->tgt_dev, reg->key);
++
++ if (reg->tgt_dev)
++ scst_check_set_UA(reg->tgt_dev, ua, sizeof(ua), 0);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Must be called under dev_pr_mutex */
++static void scst_pr_send_ua_all(struct scst_device *dev,
++ struct scst_dev_registrant *exclude_reg,
++ int key, int asc, int ascq)
++{
++ struct scst_dev_registrant *reg;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(reg, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ if (reg != exclude_reg)
++ scst_pr_send_ua_reg(dev, reg, key, asc, ascq);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Must be called under dev_pr_mutex */
++static void scst_pr_abort_reg(struct scst_device *dev,
++ struct scst_cmd *pr_cmd, struct scst_dev_registrant *reg)
++{
++ struct scst_session *sess;
++ __be64 packed_lun;
++ int rc;
++
++ TRACE_ENTRY();
++
++ if (reg->tgt_dev == NULL) {
++ TRACE_PR("Registrant %s/%d (%p, key 0x%016llx) has no session",
++ debug_transport_id_to_initiator_name(reg->transport_id),
++ reg->rel_tgt_id, reg, reg->key);
++ goto out;
++ }
++
++ sess = reg->tgt_dev->sess;
++
++ TRACE_PR("Aborting %d commands for %s/%d (reg %p, key 0x%016llx, "
++ "tgt_dev %p, sess %p)",
++ atomic_read(&reg->tgt_dev->tgt_dev_cmd_count),
++ debug_transport_id_to_initiator_name(reg->transport_id),
++ reg->rel_tgt_id, reg, reg->key, reg->tgt_dev, sess);
++
++ packed_lun = scst_pack_lun(reg->tgt_dev->lun, sess->acg->addr_method);
++
++ rc = scst_rx_mgmt_fn_lun(sess, SCST_PR_ABORT_ALL,
++ (uint8_t *)&packed_lun, sizeof(packed_lun), SCST_NON_ATOMIC,
++ pr_cmd);
++ if (rc != 0) {
++ /*
++ * There's nothing more we can do here... Hopefully, it would
++ * never happen.
++ */
++ PRINT_ERROR("SCST_PR_ABORT_ALL failed %d (sess %p)",
++ rc, sess);
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* Abstract vfs_unlink & path_put for different kernel versions */
++static inline void scst_pr_vfs_unlink_and_put(struct nameidata *nd)
++{
++ vfs_unlink(nd->path.dentry->d_parent->d_inode,
++ nd->path.dentry);
++ path_put(&nd->path);
++}
++
++static inline void scst_pr_path_put(struct nameidata *nd)
++{
++ path_put(&nd->path);
++}
++
++/* Called under scst_mutex */
++static int scst_pr_do_load_device_file(struct scst_device *dev,
++ const char *file_name)
++{
++ int res = 0, rc;
++ struct file *file = NULL;
++ struct inode *inode;
++ char *buf = NULL;
++ loff_t file_size, pos, data_size;
++ uint64_t sign, version;
++ mm_segment_t old_fs;
++ uint8_t pr_is_set, aptpl;
++ __be64 key;
++ uint16_t rel_tgt_id;
++
++ TRACE_ENTRY();
++
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++
++ TRACE_PR("Loading persistent file '%s'", file_name);
++
++ file = filp_open(file_name, O_RDONLY, 0);
++ if (IS_ERR(file)) {
++ res = PTR_ERR(file);
++ TRACE_PR("Unable to open file '%s' - error %d", file_name, res);
++ goto out;
++ }
++
++ inode = file->f_dentry->d_inode;
++
++ if (S_ISREG(inode->i_mode))
++ /* Nothing to do */;
++ else if (S_ISBLK(inode->i_mode))
++ inode = inode->i_bdev->bd_inode;
++ else {
++ PRINT_ERROR("Invalid file mode 0x%x", inode->i_mode);
++ goto out_close;
++ }
++
++ file_size = inode->i_size;
++
++ /* Let's limit the file size by some reasonable number */
++ if ((file_size == 0) || (file_size >= 15*1024*1024)) {
++ PRINT_ERROR("Invalid PR file size %d", (int)file_size);
++ res = -EINVAL;
++ goto out_close;
++ }
++
++ buf = vmalloc(file_size);
++ if (buf == NULL) {
++ res = -ENOMEM;
++ PRINT_ERROR("%s", "Unable to allocate buffer");
++ goto out_close;
++ }
++
++ pos = 0;
++ rc = vfs_read(file, (void __force __user *)buf, file_size, &pos);
++ if (rc != file_size) {
++ PRINT_ERROR("Unable to read file '%s' - error %d", file_name,
++ rc);
++ res = rc;
++ goto out_close;
++ }
++
++ data_size = 0;
++ data_size += sizeof(sign);
++ data_size += sizeof(version);
++ data_size += sizeof(aptpl);
++ data_size += sizeof(pr_is_set);
++ data_size += sizeof(dev->pr_type);
++ data_size += sizeof(dev->pr_scope);
++
++ if (file_size < data_size) {
++ res = -EINVAL;
++ PRINT_ERROR("Invalid file '%s' - size too small", file_name);
++ goto out_close;
++ }
++
++ pos = 0;
++
++ sign = get_unaligned((uint64_t *)&buf[pos]);
++ if (sign != SCST_PR_FILE_SIGN) {
++ res = -EINVAL;
++ PRINT_ERROR("Invalid persistent file signature %016llx "
++ "(expected %016llx)", sign, SCST_PR_FILE_SIGN);
++ goto out_close;
++ }
++ pos += sizeof(sign);
++
++ version = get_unaligned((uint64_t *)&buf[pos]);
++ if (version != SCST_PR_FILE_VERSION) {
++ res = -EINVAL;
++ PRINT_ERROR("Invalid persistent file version %016llx "
++ "(expected %016llx)", version, SCST_PR_FILE_VERSION);
++ goto out_close;
++ }
++ pos += sizeof(version);
++
++ while (data_size < file_size) {
++ uint8_t *tid;
++
++ data_size++;
++ tid = &buf[data_size];
++ data_size += tid_size(tid);
++ data_size += sizeof(key);
++ data_size += sizeof(rel_tgt_id);
++
++ if (data_size > file_size) {
++ res = -EINVAL;
++ PRINT_ERROR("Invalid file '%s' - size mismatch have "
++ "%lld expected %lld", file_name, file_size,
++ data_size);
++ goto out_close;
++ }
++ }
++
++ aptpl = buf[pos];
++ dev->pr_aptpl = aptpl ? 1 : 0;
++ pos += sizeof(aptpl);
++
++ pr_is_set = buf[pos];
++ dev->pr_is_set = pr_is_set ? 1 : 0;
++ pos += sizeof(pr_is_set);
++
++ dev->pr_type = buf[pos];
++ pos += sizeof(dev->pr_type);
++
++ dev->pr_scope = buf[pos];
++ pos += sizeof(dev->pr_scope);
++
++ while (pos < file_size) {
++ uint8_t is_holder;
++ uint8_t *tid;
++ struct scst_dev_registrant *reg = NULL;
++
++ is_holder = buf[pos++];
++
++ tid = &buf[pos];
++ pos += tid_size(tid);
++
++ key = get_unaligned((__be64 *)&buf[pos]);
++ pos += sizeof(key);
++
++ rel_tgt_id = get_unaligned((uint16_t *)&buf[pos]);
++ pos += sizeof(rel_tgt_id);
++
++ reg = scst_pr_add_registrant(dev, tid, rel_tgt_id, key, false);
++ if (reg == NULL) {
++ res = -ENOMEM;
++ goto out_close;
++ }
++
++ if (is_holder)
++ dev->pr_holder = reg;
++ }
++
++out_close:
++ filp_close(file, NULL);
++
++out:
++ if (buf != NULL)
++ vfree(buf);
++
++ set_fs(old_fs);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_pr_load_device_file(struct scst_device *dev)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (dev->pr_file_name == NULL || dev->pr_file_name1 == NULL) {
++ PRINT_ERROR("Invalid file paths for '%s'", dev->virt_name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_pr_do_load_device_file(dev, dev->pr_file_name);
++ if (res == 0)
++ goto out;
++ else if (res == -ENOMEM)
++ goto out;
++
++ res = scst_pr_do_load_device_file(dev, dev->pr_file_name1);
++
++ scst_pr_dump_prs(dev, false);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_pr_copy_file(const char *src, const char *dest)
++{
++ int res = 0;
++ struct inode *inode;
++ loff_t file_size, pos;
++ uint8_t *buf = NULL;
++ struct file *file_src = NULL, *file_dest = NULL;
++ mm_segment_t old_fs = get_fs();
++
++ TRACE_ENTRY();
++
++ if (src == NULL || dest == NULL) {
++ res = -EINVAL;
++ PRINT_ERROR("%s", "Invalid persistent files path - backup "
++ "skipped");
++ goto out;
++ }
++
++ TRACE_PR("Copying '%s' into '%s'", src, dest);
++
++ set_fs(KERNEL_DS);
++
++ file_src = filp_open(src, O_RDONLY, 0);
++ if (IS_ERR(file_src)) {
++ res = PTR_ERR(file_src);
++ TRACE_PR("Unable to open file '%s' - error %d", src,
++ res);
++ goto out_free;
++ }
++
++ file_dest = filp_open(dest, O_WRONLY | O_CREAT | O_TRUNC, 0644);
++ if (IS_ERR(file_dest)) {
++ res = PTR_ERR(file_dest);
++ TRACE_PR("Unable to open backup file '%s' - error %d", dest,
++ res);
++ goto out_close;
++ }
++
++ inode = file_src->f_dentry->d_inode;
++
++ if (S_ISREG(inode->i_mode))
++ /* Nothing to do */;
++ else if (S_ISBLK(inode->i_mode))
++ inode = inode->i_bdev->bd_inode;
++ else {
++ PRINT_ERROR("Invalid file mode 0x%x", inode->i_mode);
++ res = -EINVAL;
++ set_fs(old_fs);
++ goto out_skip;
++ }
++
++ file_size = inode->i_size;
++
++ buf = vmalloc(file_size);
++ if (buf == NULL) {
++ res = -ENOMEM;
++ PRINT_ERROR("%s", "Unable to allocate temporary buffer");
++ goto out_skip;
++ }
++
++ pos = 0;
++ res = vfs_read(file_src, (void __force __user *)buf, file_size, &pos);
++ if (res != file_size) {
++ PRINT_ERROR("Unable to read file '%s' - error %d", src, res);
++ goto out_skip;
++ }
++
++ pos = 0;
++ res = vfs_write(file_dest, (void __force __user *)buf, file_size, &pos);
++ if (res != file_size) {
++ PRINT_ERROR("Unable to write to '%s' - error %d", dest, res);
++ goto out_skip;
++ }
++
++ res = vfs_fsync(file_dest, 0);
++ if (res != 0) {
++ PRINT_ERROR("fsync() of the backup PR file failed: %d", res);
++ goto out_skip;
++ }
++
++out_skip:
++ filp_close(file_dest, NULL);
++
++out_close:
++ filp_close(file_src, NULL);
++
++out_free:
++ if (buf != NULL)
++ vfree(buf);
++
++ set_fs(old_fs);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void scst_pr_remove_device_files(struct scst_tgt_dev *tgt_dev)
++{
++ int res = 0;
++ struct scst_device *dev = tgt_dev->dev;
++ struct nameidata nd;
++ mm_segment_t old_fs = get_fs();
++
++ TRACE_ENTRY();
++
++ set_fs(KERNEL_DS);
++
++ res = path_lookup(dev->pr_file_name, 0, &nd);
++ if (!res)
++ scst_pr_vfs_unlink_and_put(&nd);
++ else
++ TRACE_DBG("Unable to lookup file '%s' - error %d",
++ dev->pr_file_name, res);
++
++ res = path_lookup(dev->pr_file_name1, 0, &nd);
++ if (!res)
++ scst_pr_vfs_unlink_and_put(&nd);
++ else
++ TRACE_DBG("Unable to lookup file '%s' - error %d",
++ dev->pr_file_name1, res);
++
++ set_fs(old_fs);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Must be called under dev_pr_mutex */
++void scst_pr_sync_device_file(struct scst_tgt_dev *tgt_dev, struct scst_cmd *cmd)
++{
++ int res = 0;
++ struct scst_device *dev = tgt_dev->dev;
++ struct file *file;
++ mm_segment_t old_fs = get_fs();
++ loff_t pos = 0;
++ uint64_t sign;
++ uint64_t version;
++ uint8_t pr_is_set, aptpl;
++
++ TRACE_ENTRY();
++
++ if ((dev->pr_aptpl == 0) || list_empty(&dev->dev_registrants_list)) {
++ scst_pr_remove_device_files(tgt_dev);
++ goto out;
++ }
++
++ scst_pr_copy_file(dev->pr_file_name, dev->pr_file_name1);
++
++ set_fs(KERNEL_DS);
++
++ file = filp_open(dev->pr_file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
++ if (IS_ERR(file)) {
++ res = PTR_ERR(file);
++ PRINT_ERROR("Unable to (re)create PR file '%s' - error %d",
++ dev->pr_file_name, res);
++ goto out_set_fs;
++ }
++
++ TRACE_PR("Updating pr file '%s'", dev->pr_file_name);
++
++ /*
++ * signature
++ */
++ sign = 0;
++ pos = 0;
++ res = vfs_write(file, (void __force __user *)&sign, sizeof(sign), &pos);
++ if (res != sizeof(sign))
++ goto write_error;
++
++ /*
++ * version
++ */
++ version = SCST_PR_FILE_VERSION;
++ res = vfs_write(file, (void __force __user *)&version, sizeof(version), &pos);
++ if (res != sizeof(version))
++ goto write_error;
++
++ /*
++ * APTPL
++ */
++ aptpl = dev->pr_aptpl;
++ res = vfs_write(file, (void __force __user *)&aptpl, sizeof(aptpl), &pos);
++ if (res != sizeof(aptpl))
++ goto write_error;
++
++ /*
++ * reservation
++ */
++ pr_is_set = dev->pr_is_set;
++ res = vfs_write(file, (void __force __user *)&pr_is_set, sizeof(pr_is_set), &pos);
++ if (res != sizeof(pr_is_set))
++ goto write_error;
++
++ res = vfs_write(file, (void __force __user *)&dev->pr_type, sizeof(dev->pr_type), &pos);
++ if (res != sizeof(dev->pr_type))
++ goto write_error;
++
++ res = vfs_write(file, (void __force __user *)&dev->pr_scope, sizeof(dev->pr_scope), &pos);
++ if (res != sizeof(dev->pr_scope))
++ goto write_error;
++
++ /*
++ * registration records
++ */
++ if (!list_empty(&dev->dev_registrants_list)) {
++ struct scst_dev_registrant *reg;
++
++ list_for_each_entry(reg, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ uint8_t is_holder = 0;
++ int size;
++
++ is_holder = (dev->pr_holder == reg);
++
++ res = vfs_write(file, (void __force __user *)&is_holder, sizeof(is_holder),
++ &pos);
++ if (res != sizeof(is_holder))
++ goto write_error;
++
++ size = tid_size(reg->transport_id);
++ res = vfs_write(file, (void __force __user *)reg->transport_id, size, &pos);
++ if (res != size)
++ goto write_error;
++
++ res = vfs_write(file, (void __force __user *)&reg->key,
++ sizeof(reg->key), &pos);
++ if (res != sizeof(reg->key))
++ goto write_error;
++
++ res = vfs_write(file, (void __force __user *)&reg->rel_tgt_id,
++ sizeof(reg->rel_tgt_id), &pos);
++ if (res != sizeof(reg->rel_tgt_id))
++ goto write_error;
++ }
++ }
++
++ res = vfs_fsync(file, 0);
++ if (res != 0) {
++ PRINT_ERROR("fsync() of the PR file failed: %d", res);
++ goto write_error_close;
++ }
++
++ sign = SCST_PR_FILE_SIGN;
++ pos = 0;
++ res = vfs_write(file, (void __force __user *)&sign, sizeof(sign), &pos);
++ if (res != sizeof(sign))
++ goto write_error;
++
++ res = vfs_fsync(file, 0);
++ if (res != 0) {
++ PRINT_ERROR("fsync() of the PR file failed: %d", res);
++ goto write_error_close;
++ }
++
++ res = 0;
++
++ filp_close(file, NULL);
++
++out_set_fs:
++ set_fs(old_fs);
++
++out:
++ if (res != 0) {
++ PRINT_CRIT_ERROR("Unable to save persistent information "
++ "(target %s, initiator %s, device %s)",
++ tgt_dev->sess->tgt->tgt_name,
++ tgt_dev->sess->initiator_name, dev->virt_name);
++#if 0 /*
++ * Looks like it's safer to return SUCCESS and expect operator's
++ * intervention to be able to save the PR's state next time, than
++ * to return HARDWARE ERROR and screw up all the interaction with
++ * the affected initiator.
++ */
++ if (cmd != NULL)
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++#endif
++ }
++
++ TRACE_EXIT_RES(res);
++ return;
++
++write_error:
++ PRINT_ERROR("Error writing to '%s' - error %d", dev->pr_file_name, res);
++
++write_error_close:
++ filp_close(file, NULL);
++ {
++ struct nameidata nd;
++ int rc;
++
++ rc = path_lookup(dev->pr_file_name, 0, &nd);
++ if (!rc)
++ scst_pr_vfs_unlink_and_put(&nd);
++ else
++ TRACE_PR("Unable to lookup '%s' - error %d",
++ dev->pr_file_name, rc);
++ }
++ goto out_set_fs;
++}
++
++static int scst_pr_check_pr_path(void)
++{
++ int res;
++ struct nameidata nd;
++ mm_segment_t old_fs = get_fs();
++
++ TRACE_ENTRY();
++
++ set_fs(KERNEL_DS);
++
++ res = path_lookup(SCST_PR_DIR, 0, &nd);
++ if (res != 0) {
++ PRINT_ERROR("Unable to find %s (err %d), you should create "
++ "this directory manually or reinstall SCST",
++ SCST_PR_DIR, res);
++ goto out_setfs;
++ }
++
++ scst_pr_path_put(&nd);
++
++out_setfs:
++ set_fs(old_fs);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Called under scst_mutex */
++int scst_pr_init_dev(struct scst_device *dev)
++{
++ int res = 0;
++ uint8_t q;
++ int name_len;
++
++ TRACE_ENTRY();
++
++ name_len = snprintf(&q, sizeof(q), "%s/%s", SCST_PR_DIR, dev->virt_name) + 1;
++ dev->pr_file_name = kmalloc(name_len, GFP_KERNEL);
++ if (dev->pr_file_name == NULL) {
++ PRINT_ERROR("Allocation of device '%s' file path failed",
++ dev->virt_name);
++ res = -ENOMEM;
++ goto out;
++ } else
++ snprintf(dev->pr_file_name, name_len, "%s/%s", SCST_PR_DIR,
++ dev->virt_name);
++
++ name_len = snprintf(&q, sizeof(q), "%s/%s.1", SCST_PR_DIR, dev->virt_name) + 1;
++ dev->pr_file_name1 = kmalloc(name_len, GFP_KERNEL);
++ if (dev->pr_file_name1 == NULL) {
++ PRINT_ERROR("Allocation of device '%s' backup file path failed",
++ dev->virt_name);
++ res = -ENOMEM;
++ goto out_free_name;
++ } else
++ snprintf(dev->pr_file_name1, name_len, "%s/%s.1", SCST_PR_DIR,
++ dev->virt_name);
++
++ res = scst_pr_check_pr_path();
++ if (res == 0) {
++ res = scst_pr_load_device_file(dev);
++ if (res == -ENOENT)
++ res = 0;
++ }
++
++ if (res != 0)
++ goto out_free_name1;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free_name1:
++ kfree(dev->pr_file_name1);
++ dev->pr_file_name1 = NULL;
++
++out_free_name:
++ kfree(dev->pr_file_name);
++ dev->pr_file_name = NULL;
++ goto out;
++}
++
++/* Called under scst_mutex */
++void scst_pr_clear_dev(struct scst_device *dev)
++{
++ struct scst_dev_registrant *reg, *tmp_reg;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry_safe(reg, tmp_reg, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ scst_pr_remove_registrant(dev, reg);
++ }
++
++ kfree(dev->pr_file_name);
++ kfree(dev->pr_file_name1);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called under scst_mutex */
++int scst_pr_init_tgt_dev(struct scst_tgt_dev *tgt_dev)
++{
++ int res = 0;
++ struct scst_dev_registrant *reg;
++ struct scst_device *dev = tgt_dev->dev;
++ const uint8_t *transport_id = tgt_dev->sess->transport_id;
++ const uint16_t rel_tgt_id = tgt_dev->sess->tgt->rel_tgt_id;
++
++ TRACE_ENTRY();
++
++ if (tgt_dev->sess->transport_id == NULL)
++ goto out;
++
++ scst_pr_write_lock(dev);
++
++ reg = scst_pr_find_reg(dev, transport_id, rel_tgt_id);
++ if ((reg != NULL) && (reg->tgt_dev == NULL)) {
++ TRACE_PR("Assigning reg %s/%d (%p) to tgt_dev %p (dev %s)",
++ debug_transport_id_to_initiator_name(transport_id),
++ rel_tgt_id, reg, tgt_dev, dev->virt_name);
++ tgt_dev->registrant = reg;
++ reg->tgt_dev = tgt_dev;
++ }
++
++ scst_pr_write_unlock(dev);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Called under scst_mutex */
++void scst_pr_clear_tgt_dev(struct scst_tgt_dev *tgt_dev)
++{
++ TRACE_ENTRY();
++
++ if (tgt_dev->registrant != NULL) {
++ struct scst_dev_registrant *reg = tgt_dev->registrant;
++ struct scst_device *dev = tgt_dev->dev;
++ struct scst_tgt_dev *t;
++
++ scst_pr_write_lock(dev);
++
++ tgt_dev->registrant = NULL;
++ reg->tgt_dev = NULL;
++
++ /* Just in case, actually. It should never happen. */
++ list_for_each_entry(t, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if (t == tgt_dev)
++ continue;
++ if ((t->sess->tgt->rel_tgt_id == reg->rel_tgt_id) &&
++ tid_equal(t->sess->transport_id, reg->transport_id)) {
++ TRACE_PR("Reassigning reg %s/%d (%p) to tgt_dev "
++ "%p (being cleared tgt_dev %p)",
++ debug_transport_id_to_initiator_name(
++ reg->transport_id),
++ reg->rel_tgt_id, reg, t, tgt_dev);
++ t->registrant = reg;
++ reg->tgt_dev = t;
++ break;
++ }
++ }
++
++ scst_pr_write_unlock(dev);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked. Might also be called under scst_mutex2. */
++static int scst_pr_register_with_spec_i_pt(struct scst_cmd *cmd,
++ const uint16_t rel_tgt_id, uint8_t *buffer, int buffer_size,
++ struct list_head *rollback_list)
++{
++ int res = 0;
++ int offset, ext_size;
++ __be64 action_key;
++ struct scst_device *dev = cmd->dev;
++ struct scst_dev_registrant *reg;
++ uint8_t *transport_id;
++
++ action_key = get_unaligned((__be64 *)&buffer[8]);
++
++ ext_size = be32_to_cpu(get_unaligned((__be32 *)&buffer[24]));
++ if ((ext_size + 28) > buffer_size) {
++ TRACE_PR("Invalid buffer size %d (max %d)", buffer_size,
++ ext_size + 28);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
++ res = -EINVAL;
++ goto out;
++ }
++
++ offset = 0;
++ while (offset < ext_size) {
++ transport_id = &buffer[28 + offset];
++
++ if ((offset + tid_size(transport_id)) > ext_size) {
++ TRACE_PR("Invalid transport_id size %d (max %d)",
++ tid_size(transport_id), ext_size - offset);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
++ res = -EINVAL;
++ goto out;
++ }
++ tid_secure(transport_id);
++ offset += tid_size(transport_id);
++ }
++
++ offset = 0;
++ while (offset < ext_size) {
++ struct scst_tgt_dev *t;
++
++ transport_id = &buffer[28 + offset];
++
++ TRACE_PR("rel_tgt_id %d, transport_id %s", rel_tgt_id,
++ debug_transport_id_to_initiator_name(transport_id));
++
++ if ((transport_id[0] & 0x0f) == SCSI_TRANSPORTID_PROTOCOLID_ISCSI &&
++ (transport_id[0] & 0xc0) == 0) {
++ TRACE_PR("Wildcard iSCSI TransportID %s",
++ &transport_id[4]);
++ /*
++ * We can't use scst_mutex here, because of the
++ * circular locking dependency with dev_pr_mutex.
++ */
++ spin_lock_bh(&dev->dev_lock);
++ list_for_each_entry(t, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ /*
++ * We must go over all matching tgt_devs and
++ * register them on the requested rel_tgt_id
++ */
++ if (!tid_equal(t->sess->transport_id,
++ transport_id))
++ continue;
++
++ reg = scst_pr_find_reg(dev,
++ t->sess->transport_id, rel_tgt_id);
++ if (reg == NULL) {
++ reg = scst_pr_add_registrant(dev,
++ t->sess->transport_id,
++ rel_tgt_id, action_key, true);
++ if (reg == NULL) {
++ spin_unlock_bh(&dev->dev_lock);
++ scst_set_busy(cmd);
++ res = -ENOMEM;
++ goto out;
++ }
++ } else if (reg->key != action_key) {
++ TRACE_PR("Changing key of reg %p "
++ "(tgt_dev %p)", reg, t);
++ reg->rollback_key = reg->key;
++ reg->key = action_key;
++ } else
++ continue;
++
++ list_add_tail(&reg->aux_list_entry,
++ rollback_list);
++ }
++ spin_unlock_bh(&dev->dev_lock);
++ } else {
++ reg = scst_pr_find_reg(dev, transport_id, rel_tgt_id);
++ if (reg != NULL) {
++ if (reg->key == action_key)
++ goto next;
++ TRACE_PR("Changing key of reg %p (tgt_dev %p)",
++ reg, reg->tgt_dev);
++ reg->rollback_key = reg->key;
++ reg->key = action_key;
++ } else {
++ reg = scst_pr_add_registrant(dev, transport_id,
++ rel_tgt_id, action_key, false);
++ if (reg == NULL) {
++ scst_set_busy(cmd);
++ res = -ENOMEM;
++ goto out;
++ }
++ }
++
++ list_add_tail(&reg->aux_list_entry,
++ rollback_list);
++ }
++next:
++ offset += tid_size(transport_id);
++ }
++out:
++ return res;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++static void scst_pr_unregister(struct scst_device *dev,
++ struct scst_dev_registrant *reg)
++{
++ bool is_holder;
++ uint8_t pr_type;
++
++ TRACE_ENTRY();
++
++ TRACE_PR("Unregistering key %0llx", reg->key);
++
++ is_holder = scst_pr_is_holder(dev, reg);
++ pr_type = dev->pr_type;
++
++ scst_pr_remove_registrant(dev, reg);
++
++ if (is_holder && !dev->pr_is_set) {
++ /* A registration just released */
++ switch (pr_type) {
++ case TYPE_WRITE_EXCLUSIVE_REGONLY:
++ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
++ scst_pr_send_ua_all(dev, NULL,
++ SCST_LOAD_SENSE(scst_sense_reservation_released));
++ break;
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++static void scst_pr_unregister_all_tg_pt(struct scst_device *dev,
++ const uint8_t *transport_id)
++{
++ struct scst_tgt_template *tgtt;
++ uint8_t proto_id = transport_id[0] & 0x0f;
++
++ TRACE_ENTRY();
++
++ /*
++ * We can't use scst_mutex here, because of the circular locking
++ * dependency with dev_pr_mutex.
++ */
++ mutex_lock(&scst_mutex2);
++
++ list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
++ struct scst_tgt *tgt;
++
++ if (tgtt->get_initiator_port_transport_id == NULL)
++ continue;
++
++ if (tgtt->get_initiator_port_transport_id(NULL, NULL) != proto_id)
++ continue;
++
++ list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
++ struct scst_dev_registrant *reg;
++
++ reg = scst_pr_find_reg(dev, transport_id,
++ tgt->rel_tgt_id);
++ if (reg == NULL)
++ continue;
++
++ scst_pr_unregister(dev, reg);
++ }
++ }
++
++ mutex_unlock(&scst_mutex2);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked. Might also be called under scst_mutex2. */
++static int scst_pr_register_on_tgt_id(struct scst_cmd *cmd,
++ const uint16_t rel_tgt_id, uint8_t *buffer, int buffer_size,
++ bool spec_i_pt, struct list_head *rollback_list)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ TRACE_PR("rel_tgt_id %d, spec_i_pt %d", rel_tgt_id, spec_i_pt);
++
++ if (spec_i_pt) {
++ res = scst_pr_register_with_spec_i_pt(cmd, rel_tgt_id, buffer,
++ buffer_size, rollback_list);
++ if (res != 0)
++ goto out;
++ }
++
++ /* tgt_dev can be among TIDs for scst_pr_register_with_spec_i_pt() */
++
++ if (scst_pr_find_reg(cmd->dev, cmd->sess->transport_id, rel_tgt_id) == NULL) {
++ __be64 action_key;
++ struct scst_dev_registrant *reg;
++
++ action_key = get_unaligned((__be64 *)&buffer[8]);
++
++ reg = scst_pr_add_registrant(cmd->dev, cmd->sess->transport_id,
++ rel_tgt_id, action_key, false);
++ if (reg == NULL) {
++ res = -ENOMEM;
++ scst_set_busy(cmd);
++ goto out;
++ }
++
++ list_add_tail(&reg->aux_list_entry, rollback_list);
++ }
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++static int scst_pr_register_all_tg_pt(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size, bool spec_i_pt, struct list_head *rollback_list)
++{
++ int res = 0;
++ struct scst_tgt_template *tgtt;
++ uint8_t proto_id = cmd->sess->transport_id[0] & 0x0f;
++
++ TRACE_ENTRY();
++
++ /*
++ * We can't use scst_mutex here, because of the circular locking
++ * dependency with dev_pr_mutex.
++ */
++ mutex_lock(&scst_mutex2);
++
++ list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
++ struct scst_tgt *tgt;
++
++ if (tgtt->get_initiator_port_transport_id == NULL)
++ continue;
++
++ if (tgtt->get_initiator_port_transport_id(NULL, NULL) != proto_id)
++ continue;
++
++ TRACE_PR("tgtt %s, spec_i_pt %d", tgtt->name, spec_i_pt);
++
++ list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
++ if (tgt->rel_tgt_id == 0)
++ continue;
++ TRACE_PR("tgt %s, rel_tgt_id %d", tgt->tgt_name,
++ tgt->rel_tgt_id);
++ res = scst_pr_register_on_tgt_id(cmd, tgt->rel_tgt_id,
++ buffer, buffer_size, spec_i_pt, rollback_list);
++ if (res != 0)
++ goto out_unlock;
++ }
++ }
++
++out_unlock:
++ mutex_unlock(&scst_mutex2);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++static int __scst_pr_register(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size, bool spec_i_pt, bool all_tg_pt)
++{
++ int res;
++ struct scst_dev_registrant *reg, *treg;
++ LIST_HEAD(rollback_list);
++
++ TRACE_ENTRY();
++
++ if (all_tg_pt) {
++ res = scst_pr_register_all_tg_pt(cmd, buffer, buffer_size,
++ spec_i_pt, &rollback_list);
++ if (res != 0)
++ goto out_rollback;
++ } else {
++ res = scst_pr_register_on_tgt_id(cmd,
++ cmd->sess->tgt->rel_tgt_id, buffer, buffer_size,
++ spec_i_pt, &rollback_list);
++ if (res != 0)
++ goto out_rollback;
++ }
++
++ list_for_each_entry(reg, &rollback_list, aux_list_entry) {
++ reg->rollback_key = 0;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_rollback:
++ list_for_each_entry_safe(reg, treg, &rollback_list, aux_list_entry) {
++ list_del(&reg->aux_list_entry);
++ if (reg->rollback_key == 0)
++ scst_pr_remove_registrant(cmd->dev, reg);
++ else {
++ reg->key = reg->rollback_key;
++ reg->rollback_key = 0;
++ }
++ }
++ goto out;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_register(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
++{
++ int aptpl, spec_i_pt, all_tg_pt;
++ __be64 key, action_key;
++ struct scst_device *dev = cmd->dev;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_session *sess = cmd->sess;
++ struct scst_dev_registrant *reg;
++
++ TRACE_ENTRY();
++
++ aptpl = buffer[20] & 0x01;
++ spec_i_pt = (buffer[20] >> 3) & 0x01;
++ all_tg_pt = (buffer[20] >> 2) & 0x01;
++ key = get_unaligned((__be64 *)&buffer[0]);
++ action_key = get_unaligned((__be64 *)&buffer[8]);
++
++ if (spec_i_pt == 0 && buffer_size != 24) {
++ TRACE_PR("Invalid buffer size %d", buffer_size);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
++ goto out;
++ }
++
++ reg = tgt_dev->registrant;
++
++ TRACE_PR("Register: initiator %s/%d (%p), key %0llx, action_key %0llx "
++ "(tgt_dev %p)",
++ debug_transport_id_to_initiator_name(sess->transport_id),
++ sess->tgt->rel_tgt_id, reg, key, action_key, tgt_dev);
++
++ if (reg == NULL) {
++ TRACE_PR("tgt_dev %p is not registered yet - registering",
++ tgt_dev);
++ if (key) {
++ TRACE_PR("%s", "Key must be zero on new registration");
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ }
++ if (action_key) {
++ int rc = __scst_pr_register(cmd, buffer, buffer_size,
++ spec_i_pt, all_tg_pt);
++ if (rc != 0)
++ goto out;
++ } else
++ TRACE_PR("%s", "Doing nothing - action_key is zero");
++ } else {
++ if (reg->key != key) {
++ TRACE_PR("tgt_dev %p already registered - reservation "
++ "key %0llx mismatch", tgt_dev, reg->key);
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ }
++ if (spec_i_pt) {
++ TRACE_PR("%s", "spec_i_pt must be zero in this case");
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++ if (action_key == 0) {
++ if (all_tg_pt)
++ scst_pr_unregister_all_tg_pt(dev,
++ sess->transport_id);
++ else
++ scst_pr_unregister(dev, reg);
++ } else
++ reg->key = action_key;
++ }
++
++ dev->pr_generation++;
++
++ dev->pr_aptpl = aptpl;
++
++ scst_pr_dump_prs(dev, false);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_register_and_ignore(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size)
++{
++ int aptpl, all_tg_pt;
++ __be64 action_key;
++ struct scst_dev_registrant *reg = NULL;
++ struct scst_device *dev = cmd->dev;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_session *sess = cmd->sess;
++
++ TRACE_ENTRY();
++
++ aptpl = buffer[20] & 0x01;
++ all_tg_pt = (buffer[20] >> 2) & 0x01;
++ action_key = get_unaligned((__be64 *)&buffer[8]);
++
++ if (buffer_size != 24) {
++ TRACE_PR("Invalid buffer size %d", buffer_size);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
++ goto out;
++ }
++
++ reg = tgt_dev->registrant;
++
++ TRACE_PR("Register and ignore: initiator %s/%d (%p), action_key "
++ "%016llx (tgt_dev %p)",
++ debug_transport_id_to_initiator_name(sess->transport_id),
++ sess->tgt->rel_tgt_id, reg, action_key, tgt_dev);
++
++ if (reg == NULL) {
++ TRACE_PR("Tgt_dev %p is not registered yet - trying to "
++ "register", tgt_dev);
++ if (action_key) {
++ int rc = __scst_pr_register(cmd, buffer, buffer_size,
++ false, all_tg_pt);
++ if (rc != 0)
++ goto out;
++ } else
++ TRACE_PR("%s", "Doing nothing, action_key is zero");
++ } else {
++ if (action_key == 0) {
++ if (all_tg_pt)
++ scst_pr_unregister_all_tg_pt(dev,
++ sess->transport_id);
++ else
++ scst_pr_unregister(dev, reg);
++ } else
++ reg->key = action_key;
++ }
++
++ dev->pr_generation++;
++
++ dev->pr_aptpl = aptpl;
++
++ scst_pr_dump_prs(dev, false);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_register_and_move(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size)
++{
++ int aptpl;
++ int unreg;
++ int tid_buffer_size;
++ __be64 key, action_key;
++ struct scst_device *dev = cmd->dev;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_session *sess = cmd->sess;
++ struct scst_dev_registrant *reg, *reg_move;
++ const uint8_t *transport_id = NULL;
++ uint8_t *transport_id_move = NULL;
++ uint16_t rel_tgt_id_move;
++
++ TRACE_ENTRY();
++
++ aptpl = buffer[17] & 0x01;
++ key = get_unaligned((__be64 *)&buffer[0]);
++ action_key = get_unaligned((__be64 *)&buffer[8]);
++ unreg = (buffer[17] >> 1) & 0x01;
++ tid_buffer_size = be32_to_cpu(get_unaligned((__be32 *)&buffer[20]));
++
++ if ((tid_buffer_size + 24) > buffer_size) {
++ TRACE_PR("Invalid buffer size %d (%d)",
++ buffer_size, tid_buffer_size + 24);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
++ goto out;
++ }
++
++ if (tid_buffer_size < 24) {
++ TRACE_PR("%s", "Transport id buffer too small");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
++ goto out;
++ }
++
++ reg = tgt_dev->registrant;
++ /* We already checked reg is not NULL */
++ if (reg->key != key) {
++ TRACE_PR("Registrant's %s/%d (%p) key %016llx mismatch with "
++ "%016llx (tgt_dev %p)",
++ debug_transport_id_to_initiator_name(reg->transport_id),
++ reg->rel_tgt_id, reg, reg->key, key, tgt_dev);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ }
++
++ if (!dev->pr_is_set) {
++ TRACE_PR("%s", "There must be a PR");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++
++ /*
++ * This check also required by table "PERSISTENT RESERVE OUT service
++ * actions that are allowed in the presence of various reservations".
++ */
++ if (!scst_pr_is_holder(dev, reg)) {
++ TRACE_PR("Registrant %s/%d (%p) is not a holder (tgt_dev %p)",
++ debug_transport_id_to_initiator_name(
++ reg->transport_id), reg->rel_tgt_id,
++ reg, tgt_dev);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ }
++
++ if (action_key == 0) {
++ TRACE_PR("%s", "Action key must be non-zero");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++
++ transport_id = sess->transport_id;
++ transport_id_move = (uint8_t *)&buffer[24];
++ rel_tgt_id_move = be16_to_cpu(get_unaligned((__be16 *)&buffer[18]));
++
++ if ((tid_size(transport_id_move) + 24) > buffer_size) {
++ TRACE_PR("Invalid buffer size %d (%d)",
++ buffer_size, tid_size(transport_id_move) + 24);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
++ goto out;
++ }
++
++ tid_secure(transport_id_move);
++
++ if (dev->pr_type == TYPE_WRITE_EXCLUSIVE_ALL_REG ||
++ dev->pr_type == TYPE_EXCLUSIVE_ACCESS_ALL_REG) {
++ TRACE_PR("Unable to finish operation due to wrong reservation "
++ "type %02x", dev->pr_type);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ }
++
++ if (tid_equal(transport_id, transport_id_move)) {
++ TRACE_PR("%s", "Equal transport id's");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
++ goto out;
++ }
++
++ reg_move = scst_pr_find_reg(dev, transport_id_move, rel_tgt_id_move);
++ if (reg_move == NULL) {
++ reg_move = scst_pr_add_registrant(dev, transport_id_move,
++ rel_tgt_id_move, action_key, false);
++ if (reg_move == NULL) {
++ scst_set_busy(cmd);
++ goto out;
++ }
++ } else if (reg_move->key != action_key) {
++ TRACE_PR("Changing key for reg %p", reg);
++ reg_move->key = action_key;
++ }
++
++ TRACE_PR("Register and move: from initiator %s/%d (%p, tgt_dev %p) to "
++ "initiator %s/%d (%p, tgt_dev %p), key %016llx (unreg %d)",
++ debug_transport_id_to_initiator_name(reg->transport_id),
++ reg->rel_tgt_id, reg, reg->tgt_dev,
++ debug_transport_id_to_initiator_name(transport_id_move),
++ rel_tgt_id_move, reg_move, reg_move->tgt_dev, action_key,
++ unreg);
++
++ /* Move the holder */
++ scst_pr_set_holder(dev, reg_move, dev->pr_scope, dev->pr_type);
++
++ if (unreg)
++ scst_pr_remove_registrant(dev, reg);
++
++ dev->pr_generation++;
++
++ dev->pr_aptpl = aptpl;
++
++ scst_pr_dump_prs(dev, false);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_reserve(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
++{
++ uint8_t scope, type;
++ __be64 key;
++ struct scst_device *dev = cmd->dev;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_dev_registrant *reg;
++
++ TRACE_ENTRY();
++
++ key = get_unaligned((__be64 *)&buffer[0]);
++ scope = (cmd->cdb[2] & 0x0f) >> 4;
++ type = cmd->cdb[2] & 0x0f;
++
++ if (buffer_size != 24) {
++ TRACE_PR("Invalid buffer size %d", buffer_size);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
++ goto out;
++ }
++
++ if (!scst_pr_type_valid(type)) {
++ TRACE_PR("Invalid reservation type %d", type);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++
++ if (((cmd->cdb[2] & 0x0f) >> 4) != SCOPE_LU) {
++ TRACE_PR("Invalid reservation scope %d", scope);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++
++ reg = tgt_dev->registrant;
++
++ TRACE_PR("Reserve: initiator %s/%d (%p), key %016llx, scope %d, "
++ "type %d (tgt_dev %p)",
++ debug_transport_id_to_initiator_name(cmd->sess->transport_id),
++ cmd->sess->tgt->rel_tgt_id, reg, key, scope, type, tgt_dev);
++
++ /* We already checked reg is not NULL */
++ if (reg->key != key) {
++ TRACE_PR("Registrant's %p key %016llx mismatch with %016llx",
++ reg, reg->key, key);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ }
++
++ if (!dev->pr_is_set)
++ scst_pr_set_holder(dev, reg, scope, type);
++ else {
++ if (!scst_pr_is_holder(dev, reg)) {
++ /*
++ * This check also required by table "PERSISTENT
++ * RESERVE OUT service actions that are allowed in the
++ * presence of various reservations".
++ */
++ TRACE_PR("Only holder can override - reg %p is not a "
++ "holder", reg);
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ } else {
++ if (dev->pr_scope != scope || dev->pr_type != type) {
++ TRACE_PR("Error overriding scope or type for "
++ "reg %p", reg);
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ } else
++ TRACE_PR("Do nothing: reservation of reg %p "
++ "is the same", reg);
++ }
++ }
++
++ scst_pr_dump_prs(dev, false);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_release(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
++{
++ int scope, type;
++ __be64 key;
++ struct scst_device *dev = cmd->dev;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_dev_registrant *reg;
++ uint8_t cur_pr_type;
++
++ TRACE_ENTRY();
++
++ key = get_unaligned((__be64 *)&buffer[0]);
++ scope = (cmd->cdb[2] & 0x0f) >> 4;
++ type = cmd->cdb[2] & 0x0f;
++
++ if (buffer_size != 24) {
++ TRACE_PR("Invalid buffer size %d", buffer_size);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
++ goto out;
++ }
++
++ if (!dev->pr_is_set) {
++ TRACE_PR("%s", "There is no PR - do nothing");
++ goto out;
++ }
++
++ reg = tgt_dev->registrant;
++
++ TRACE_PR("Release: initiator %s/%d (%p), key %016llx, scope %d, type "
++ "%d (tgt_dev %p)", debug_transport_id_to_initiator_name(
++ cmd->sess->transport_id),
++ cmd->sess->tgt->rel_tgt_id, reg, key, scope, type, tgt_dev);
++
++ /* We already checked reg is not NULL */
++ if (reg->key != key) {
++ TRACE_PR("Registrant's %p key %016llx mismatch with %016llx",
++ reg, reg->key, key);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ }
++
++ if (!scst_pr_is_holder(dev, reg)) {
++ TRACE_PR("Registrant %p is not a holder - do nothing", reg);
++ goto out;
++ }
++
++ if (dev->pr_scope != scope || dev->pr_type != type) {
++ TRACE_PR("%s", "Released scope or type do not match with "
++ "holder");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_release));
++ goto out;
++ }
++
++ cur_pr_type = dev->pr_type; /* it will be cleared */
++
++ scst_pr_clear_reservation(dev);
++
++ switch (cur_pr_type) {
++ case TYPE_WRITE_EXCLUSIVE_REGONLY:
++ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
++ case TYPE_WRITE_EXCLUSIVE_ALL_REG:
++ case TYPE_EXCLUSIVE_ACCESS_ALL_REG:
++ scst_pr_send_ua_all(dev, reg,
++ SCST_LOAD_SENSE(scst_sense_reservation_released));
++ }
++
++ scst_pr_dump_prs(dev, false);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_clear(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
++{
++ int scope, type;
++ __be64 key;
++ struct scst_device *dev = cmd->dev;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_dev_registrant *reg, *r, *t;
++
++ TRACE_ENTRY();
++
++ key = get_unaligned((__be64 *)&buffer[0]);
++ scope = (cmd->cdb[2] & 0x0f) >> 4;
++ type = cmd->cdb[2] & 0x0f;
++
++ if (buffer_size != 24) {
++ TRACE_PR("Invalid buffer size %d", buffer_size);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
++ goto out;
++ }
++
++ reg = tgt_dev->registrant;
++
++ TRACE_PR("Clear: initiator %s/%d (%p), key %016llx (tgt_dev %p)",
++ debug_transport_id_to_initiator_name(cmd->sess->transport_id),
++ cmd->sess->tgt->rel_tgt_id, reg, key, tgt_dev);
++
++ /* We already checked reg is not NULL */
++ if (reg->key != key) {
++ TRACE_PR("Registrant's %p key %016llx mismatch with %016llx",
++ reg, reg->key, key);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ }
++
++ scst_pr_send_ua_all(dev, reg,
++ SCST_LOAD_SENSE(scst_sense_reservation_preempted));
++
++ list_for_each_entry_safe(r, t, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ scst_pr_remove_registrant(dev, r);
++ }
++
++ dev->pr_generation++;
++
++ scst_pr_dump_prs(dev, false);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void scst_pr_do_preempt(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size, bool abort)
++{
++ __be64 key, action_key;
++ int scope, type;
++ struct scst_device *dev = cmd->dev;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_dev_registrant *reg, *r, *rt;
++ int existing_pr_type = dev->pr_type;
++ int existing_pr_scope = dev->pr_scope;
++ LIST_HEAD(preempt_list);
++
++ TRACE_ENTRY();
++
++ key = get_unaligned((__be64 *)&buffer[0]);
++ action_key = get_unaligned((__be64 *)&buffer[8]);
++ scope = (cmd->cdb[2] & 0x0f) >> 4;
++ type = cmd->cdb[2] & 0x0f;
++
++ if (buffer_size != 24) {
++ TRACE_PR("Invalid buffer size %d", buffer_size);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
++ goto out;
++ }
++
++ if (!scst_pr_type_valid(type)) {
++ TRACE_PR("Invalid reservation type %d", type);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++
++ reg = tgt_dev->registrant;
++
++ TRACE_PR("Preempt%s: initiator %s/%d (%p), key %016llx, action_key "
++ "%016llx, scope %x type %x (tgt_dev %p)",
++ abort ? " and abort" : "",
++ debug_transport_id_to_initiator_name(cmd->sess->transport_id),
++ cmd->sess->tgt->rel_tgt_id, reg, key, action_key, scope, type,
++ tgt_dev);
++
++ /* We already checked reg is not NULL */
++ if (reg->key != key) {
++ TRACE_PR("Registrant's %p key %016llx mismatch with %016llx",
++ reg, reg->key, key);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++ }
++
++ if (!dev->pr_is_set) {
++ scst_pr_find_registrants_list_key(dev, action_key,
++ &preempt_list);
++ if (list_empty(&preempt_list))
++ goto out_error;
++ list_for_each_entry_safe(r, rt, &preempt_list, aux_list_entry) {
++ if (abort)
++ scst_pr_abort_reg(dev, cmd, r);
++ if (r != reg) {
++ scst_pr_send_ua_reg(dev, r, SCST_LOAD_SENSE(
++ scst_sense_registrations_preempted));
++ scst_pr_remove_registrant(dev, r);
++ }
++ }
++ goto done;
++ }
++
++ if (dev->pr_type == TYPE_WRITE_EXCLUSIVE_ALL_REG ||
++ dev->pr_type == TYPE_EXCLUSIVE_ACCESS_ALL_REG) {
++ if (action_key == 0) {
++ scst_pr_find_registrants_list_all(dev, reg,
++ &preempt_list);
++ list_for_each_entry_safe(r, rt, &preempt_list,
++ aux_list_entry) {
++ BUG_ON(r == reg);
++ if (abort)
++ scst_pr_abort_reg(dev, cmd, r);
++ scst_pr_send_ua_reg(dev, r,
++ SCST_LOAD_SENSE(
++ scst_sense_registrations_preempted));
++ scst_pr_remove_registrant(dev, r);
++ }
++ scst_pr_set_holder(dev, reg, scope, type);
++ } else {
++ scst_pr_find_registrants_list_key(dev, action_key,
++ &preempt_list);
++ if (list_empty(&preempt_list))
++ goto out_error;
++ list_for_each_entry_safe(r, rt, &preempt_list,
++ aux_list_entry) {
++ if (abort)
++ scst_pr_abort_reg(dev, cmd, r);
++ if (r != reg) {
++ scst_pr_send_ua_reg(dev, r,
++ SCST_LOAD_SENSE(
++ scst_sense_registrations_preempted));
++ scst_pr_remove_registrant(dev, r);
++ }
++ }
++ }
++ goto done;
++ }
++
++ if (dev->pr_holder->key != action_key) {
++ if (action_key == 0) {
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_invalid_field_in_parm_list));
++ goto out;
++ } else {
++ scst_pr_find_registrants_list_key(dev, action_key,
++ &preempt_list);
++ if (list_empty(&preempt_list))
++ goto out_error;
++ list_for_each_entry_safe(r, rt, &preempt_list,
++ aux_list_entry) {
++ if (abort)
++ scst_pr_abort_reg(dev, cmd, r);
++ if (r != reg)
++ scst_pr_send_ua_reg(dev, r,
++ SCST_LOAD_SENSE(
++ scst_sense_registrations_preempted));
++ scst_pr_remove_registrant(dev, r);
++ }
++ goto done;
++ }
++ }
++
++ scst_pr_find_registrants_list_key(dev, action_key,
++ &preempt_list);
++
++ list_for_each_entry_safe(r, rt, &preempt_list, aux_list_entry) {
++ if (abort)
++ scst_pr_abort_reg(dev, cmd, r);
++ if (r != reg) {
++ scst_pr_send_ua_reg(dev, r, SCST_LOAD_SENSE(
++ scst_sense_registrations_preempted));
++ scst_pr_remove_registrant(dev, r);
++ }
++ }
++
++ scst_pr_set_holder(dev, reg, scope, type);
++
++ if (existing_pr_type != type || existing_pr_scope != scope) {
++ list_for_each_entry(r, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ if (r != reg)
++ scst_pr_send_ua_reg(dev, r, SCST_LOAD_SENSE(
++ scst_sense_reservation_released));
++ }
++ }
++
++done:
++ dev->pr_generation++;
++
++ scst_pr_dump_prs(dev, false);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_error:
++ TRACE_PR("Invalid key %016llx", action_key);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_preempt(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
++{
++ TRACE_ENTRY();
++
++ scst_pr_do_preempt(cmd, buffer, buffer_size, false);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void scst_cmd_done_pr_preempt(struct scst_cmd *cmd, int next_state,
++ enum scst_exec_context pref_context)
++{
++ void (*saved_cmd_done) (struct scst_cmd *cmd, int next_state,
++ enum scst_exec_context pref_context);
++
++ TRACE_ENTRY();
++
++ saved_cmd_done = NULL; /* to remove warning that it's used not inited */
++
++ if (cmd->pr_abort_counter != NULL) {
++ if (!atomic_dec_and_test(&cmd->pr_abort_counter->pr_abort_pending_cnt))
++ goto out;
++ saved_cmd_done = cmd->pr_abort_counter->saved_cmd_done;
++ kfree(cmd->pr_abort_counter);
++ cmd->pr_abort_counter = NULL;
++ }
++
++ saved_cmd_done(cmd, next_state, pref_context);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * Called with dev_pr_mutex locked, no IRQ. Expects session_list_lock
++ * not locked
++ */
++void scst_pr_preempt_and_abort(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size)
++{
++ TRACE_ENTRY();
++
++ cmd->pr_abort_counter = kzalloc(sizeof(*cmd->pr_abort_counter),
++ GFP_KERNEL);
++ if (cmd->pr_abort_counter == NULL) {
++ PRINT_ERROR("Unable to allocate PR abort counter (size %zd)",
++ sizeof(*cmd->pr_abort_counter));
++ scst_set_busy(cmd);
++ goto out;
++ }
++
++ /* 1 to protect cmd from be done by the TM thread too early */
++ atomic_set(&cmd->pr_abort_counter->pr_abort_pending_cnt, 1);
++ atomic_set(&cmd->pr_abort_counter->pr_aborting_cnt, 1);
++ init_completion(&cmd->pr_abort_counter->pr_aborting_cmpl);
++
++ cmd->pr_abort_counter->saved_cmd_done = cmd->scst_cmd_done;
++ cmd->scst_cmd_done = scst_cmd_done_pr_preempt;
++
++ scst_pr_do_preempt(cmd, buffer, buffer_size, true);
++
++ if (!atomic_dec_and_test(&cmd->pr_abort_counter->pr_aborting_cnt))
++ wait_for_completion(&cmd->pr_abort_counter->pr_aborting_cmpl);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* Checks if this is a Compatible Reservation Handling (CRH) case */
++bool scst_pr_crh_case(struct scst_cmd *cmd)
++{
++ bool allowed;
++ struct scst_device *dev = cmd->dev;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_dev_registrant *reg;
++ uint8_t type;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Test if there is a CRH case for command %s (0x%x) from "
++ "%s", cmd->op_name, cmd->cdb[0], cmd->sess->initiator_name);
++
++ if (!dev->pr_is_set) {
++ TRACE_PR("%s", "PR not set");
++ allowed = false;
++ goto out;
++ }
++
++ reg = tgt_dev->registrant;
++ type = dev->pr_type;
++
++ switch (type) {
++ case TYPE_WRITE_EXCLUSIVE:
++ case TYPE_EXCLUSIVE_ACCESS:
++ WARN_ON(dev->pr_holder == NULL);
++ if (reg == dev->pr_holder)
++ allowed = true;
++ else
++ allowed = false;
++ break;
++
++ case TYPE_WRITE_EXCLUSIVE_REGONLY:
++ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
++ case TYPE_WRITE_EXCLUSIVE_ALL_REG:
++ case TYPE_EXCLUSIVE_ACCESS_ALL_REG:
++ allowed = (reg != NULL);
++ break;
++
++ default:
++ PRINT_ERROR("Invalid PR type %x", type);
++ allowed = false;
++ break;
++ }
++
++ if (!allowed)
++ TRACE_PR("Command %s (0x%x) from %s rejected due to not CRH "
++ "reservation", cmd->op_name, cmd->cdb[0],
++ cmd->sess->initiator_name);
++ else
++ TRACE_DBG("Command %s (0x%x) from %s is allowed to execute "
++ "due to CRH", cmd->op_name, cmd->cdb[0],
++ cmd->sess->initiator_name);
++
++out:
++ TRACE_EXIT_RES(allowed);
++ return allowed;
++
++}
++
++/* Check if command allowed in presence of reservation */
++bool scst_pr_is_cmd_allowed(struct scst_cmd *cmd)
++{
++ bool allowed;
++ struct scst_device *dev = cmd->dev;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_dev_registrant *reg;
++ uint8_t type;
++ bool unlock;
++
++ TRACE_ENTRY();
++
++ unlock = scst_pr_read_lock(dev);
++
++ TRACE_DBG("Testing if command %s (0x%x) from %s allowed to execute",
++ cmd->op_name, cmd->cdb[0], cmd->sess->initiator_name);
++
++ /* Recheck, because it can change while we were waiting for the lock */
++ if (unlikely(!dev->pr_is_set)) {
++ allowed = true;
++ goto out_unlock;
++ }
++
++ reg = tgt_dev->registrant;
++ type = dev->pr_type;
++
++ switch (type) {
++ case TYPE_WRITE_EXCLUSIVE:
++ if (reg && reg == dev->pr_holder)
++ allowed = true;
++ else
++ allowed = (cmd->op_flags & SCST_WRITE_EXCL_ALLOWED) != 0;
++ break;
++
++ case TYPE_EXCLUSIVE_ACCESS:
++ if (reg && reg == dev->pr_holder)
++ allowed = true;
++ else
++ allowed = (cmd->op_flags & SCST_EXCL_ACCESS_ALLOWED) != 0;
++ break;
++
++ case TYPE_WRITE_EXCLUSIVE_REGONLY:
++ case TYPE_WRITE_EXCLUSIVE_ALL_REG:
++ if (reg)
++ allowed = true;
++ else
++ allowed = (cmd->op_flags & SCST_WRITE_EXCL_ALLOWED) != 0;
++ break;
++
++ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
++ case TYPE_EXCLUSIVE_ACCESS_ALL_REG:
++ if (reg)
++ allowed = true;
++ else
++ allowed = (cmd->op_flags & SCST_EXCL_ACCESS_ALLOWED) != 0;
++ break;
++
++ default:
++ PRINT_ERROR("Invalid PR type %x", type);
++ allowed = false;
++ break;
++ }
++
++ if (!allowed)
++ TRACE_PR("Command %s (0x%x) from %s rejected due "
++ "to PR", cmd->op_name, cmd->cdb[0],
++ cmd->sess->initiator_name);
++ else
++ TRACE_DBG("Command %s (0x%x) from %s is allowed to execute",
++ cmd->op_name, cmd->cdb[0], cmd->sess->initiator_name);
++
++out_unlock:
++ scst_pr_read_unlock(dev, unlock);
++
++ TRACE_EXIT_RES(allowed);
++ return allowed;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_read_keys(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
++{
++ int i, offset = 0, size, size_max;
++ struct scst_device *dev = cmd->dev;
++ struct scst_dev_registrant *reg;
++
++ TRACE_ENTRY();
++
++ if (buffer_size < 8) {
++ TRACE_PR("buffer_size too small: %d. expected >= 8 "
++ "(buffer %p)", buffer_size, buffer);
++ goto skip;
++ }
++
++ TRACE_PR("Read Keys (dev %s): PRGen %d", dev->virt_name,
++ dev->pr_generation);
++
++ put_unaligned(cpu_to_be32(dev->pr_generation), (__be32 *)&buffer[0]);
++
++ offset = 8;
++ size = 0;
++ size_max = buffer_size - 8;
++
++ i = 0;
++ list_for_each_entry(reg, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ if (size_max - size >= 8) {
++ TRACE_PR("Read Keys (dev %s): key 0x%llx",
++ dev->virt_name, reg->key);
++
++ WARN_ON(reg->key == 0);
++
++ put_unaligned(reg->key,
++ (__be64 *)&buffer[offset + 8 * i]);
++
++ offset += 8;
++ }
++ size += 8;
++ }
++
++ put_unaligned(cpu_to_be32(size), (__be32 *)&buffer[4]);
++
++skip:
++ scst_set_resp_data_len(cmd, offset);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_read_reservation(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size)
++{
++ struct scst_device *dev = cmd->dev;
++ uint8_t b[24];
++ int size = 0;
++
++ TRACE_ENTRY();
++
++ if (buffer_size < 8) {
++ TRACE_PR("buffer_size too small: %d. expected >= 8 "
++ "(buffer %p)", buffer_size, buffer);
++ goto skip;
++ }
++
++ memset(b, 0, sizeof(b));
++
++ put_unaligned(cpu_to_be32(dev->pr_generation), (__be32 *)&b[0]);
++
++ if (!dev->pr_is_set) {
++ TRACE_PR("Read Reservation: no reservations for dev %s",
++ dev->virt_name);
++ b[4] =
++ b[5] =
++ b[6] =
++ b[7] = 0;
++
++ size = 8;
++ } else {
++ __be64 key = dev->pr_holder ? dev->pr_holder->key : 0;
++
++ TRACE_PR("Read Reservation: dev %s, holder %p, key 0x%llx, "
++ "scope %d, type %d", dev->virt_name, dev->pr_holder,
++ key, dev->pr_scope, dev->pr_type);
++
++ b[4] =
++ b[5] =
++ b[6] = 0;
++ b[7] = 0x10;
++
++ put_unaligned(key, (__be64 *)&b[8]);
++ b[21] = dev->pr_scope << 4 | dev->pr_type;
++
++ size = 24;
++ }
++
++ memset(buffer, 0, buffer_size);
++ memcpy(buffer, b, min(size, buffer_size));
++
++skip:
++ scst_set_resp_data_len(cmd, size);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_report_caps(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
++{
++ int offset = 0;
++ unsigned int crh = 1;
++ unsigned int atp_c = 1;
++ unsigned int sip_c = 1;
++ unsigned int ptpl_c = 1;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE_ENTRY();
++
++ if (buffer_size < 8) {
++ TRACE_PR("buffer_size too small: %d. expected >= 8 "
++ "(buffer %p)", buffer_size, buffer);
++ goto skip;
++ }
++
++ TRACE_PR("Reporting capabilities (dev %s): crh %x, sip_c %x, "
++ "atp_c %x, ptpl_c %x, pr_aptpl %x", dev->virt_name,
++ crh, sip_c, atp_c, ptpl_c, dev->pr_aptpl);
++
++ buffer[0] = 0;
++ buffer[1] = 8;
++
++ buffer[2] = crh << 4 | sip_c << 3 | atp_c << 2 | ptpl_c;
++ buffer[3] = (1 << 7) | (dev->pr_aptpl > 0 ? 1 : 0);
++
++ /* All commands supported */
++ buffer[4] = 0xEA;
++ buffer[5] = 0x1;
++
++ offset += 8;
++
++skip:
++ scst_set_resp_data_len(cmd, offset);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called with dev_pr_mutex locked, no IRQ */
++void scst_pr_read_full_status(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size)
++{
++ int offset = 0, size, size_max;
++ struct scst_device *dev = cmd->dev;
++ struct scst_dev_registrant *reg;
++
++ TRACE_ENTRY();
++
++ if (buffer_size < 8)
++ goto skip;
++
++ put_unaligned(cpu_to_be32(dev->pr_generation), (__be32 *)&buffer[0]);
++ offset += 8;
++
++ size = 0;
++ size_max = buffer_size - 8;
++
++ list_for_each_entry(reg, &dev->dev_registrants_list,
++ dev_registrants_list_entry) {
++ int ts;
++ int rec_len;
++
++ ts = tid_size(reg->transport_id);
++ rec_len = 24 + ts;
++
++ if (size_max - size > rec_len) {
++ memset(&buffer[offset], 0, rec_len);
++
++ put_unaligned(reg->key, (__be64 *)(&buffer[offset]));
++
++ if (dev->pr_is_set && scst_pr_is_holder(dev, reg)) {
++ buffer[offset + 12] = 1;
++ buffer[offset + 13] = (dev->pr_scope << 8) | dev->pr_type;
++ }
++
++ put_unaligned(cpu_to_be16(reg->rel_tgt_id),
++ (__be16 *)&buffer[offset + 18]);
++ put_unaligned(cpu_to_be32(ts),
++ (__be32 *)&buffer[offset + 20]);
++
++ memcpy(&buffer[offset + 24], reg->transport_id, ts);
++
++ offset += rec_len;
++ }
++ size += rec_len;
++ }
++
++ put_unaligned(cpu_to_be32(size), (__be32 *)&buffer[4]);
++
++skip:
++ scst_set_resp_data_len(cmd, offset);
++
++ TRACE_EXIT();
++ return;
++}
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.h linux-2.6.36/drivers/scst/scst_pres.h
+--- orig/linux-2.6.36/drivers/scst/scst_pres.h
++++ linux-2.6.36/drivers/scst/scst_pres.h
+@@ -0,0 +1,170 @@
++/*
++ * scst_pres.c
++ *
++ * Copyright (C) 2009 - 2010 Alexey Obitotskiy <alexeyo1@open-e.com>
++ * Copyright (C) 2009 - 2010 Open-E, Inc.
++ * Copyright (C) 2009 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef SCST_PRES_H_
++#define SCST_PRES_H_
++
++#include <linux/delay.h>
++
++#define PR_REGISTER 0x00
++#define PR_RESERVE 0x01
++#define PR_RELEASE 0x02
++#define PR_CLEAR 0x03
++#define PR_PREEMPT 0x04
++#define PR_PREEMPT_AND_ABORT 0x05
++#define PR_REGISTER_AND_IGNORE 0x06
++#define PR_REGISTER_AND_MOVE 0x07
++
++#define PR_READ_KEYS 0x00
++#define PR_READ_RESERVATION 0x01
++#define PR_REPORT_CAPS 0x02
++#define PR_READ_FULL_STATUS 0x03
++
++#define TYPE_UNSPECIFIED (-1)
++#define TYPE_WRITE_EXCLUSIVE 0x01
++#define TYPE_EXCLUSIVE_ACCESS 0x03
++#define TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
++#define TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
++#define TYPE_WRITE_EXCLUSIVE_ALL_REG 0x07
++#define TYPE_EXCLUSIVE_ACCESS_ALL_REG 0x08
++
++#define SCOPE_LU 0x00
++
++static inline bool scst_pr_type_valid(uint8_t type)
++{
++ switch (type) {
++ case TYPE_WRITE_EXCLUSIVE:
++ case TYPE_EXCLUSIVE_ACCESS:
++ case TYPE_WRITE_EXCLUSIVE_REGONLY:
++ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
++ case TYPE_WRITE_EXCLUSIVE_ALL_REG:
++ case TYPE_EXCLUSIVE_ACCESS_ALL_REG:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static inline bool scst_pr_read_lock(struct scst_device *dev)
++{
++ bool unlock = false;
++
++ TRACE_ENTRY();
++
++ atomic_inc(&dev->pr_readers_count);
++ smp_mb__after_atomic_inc(); /* to sync with scst_pr_write_lock() */
++
++ if (unlikely(dev->pr_writer_active)) {
++ unlock = true;
++ atomic_dec(&dev->pr_readers_count);
++ mutex_lock(&dev->dev_pr_mutex);
++ }
++
++ TRACE_EXIT_RES(unlock);
++ return unlock;
++}
++
++static inline void scst_pr_read_unlock(struct scst_device *dev, bool unlock)
++{
++ TRACE_ENTRY();
++
++ if (unlikely(unlock))
++ mutex_unlock(&dev->dev_pr_mutex);
++ else {
++ /*
++ * To sync with scst_pr_write_lock(). We need it to ensure
++ * order of our reads with the writer's writes.
++ */
++ smp_mb__before_atomic_dec();
++ atomic_dec(&dev->pr_readers_count);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline void scst_pr_write_lock(struct scst_device *dev)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&dev->dev_pr_mutex);
++
++ dev->pr_writer_active = 1;
++
++ /* to sync with scst_pr_read_lock() and unlock() */
++ smp_mb();
++
++ while (atomic_read(&dev->pr_readers_count) != 0) {
++ TRACE_DBG("Waiting for %d readers (dev %p)",
++ atomic_read(&dev->pr_readers_count), dev);
++ msleep(1);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline void scst_pr_write_unlock(struct scst_device *dev)
++{
++ TRACE_ENTRY();
++
++ dev->pr_writer_active = 0;
++
++ mutex_unlock(&dev->dev_pr_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++
++int scst_pr_init_dev(struct scst_device *dev);
++void scst_pr_clear_dev(struct scst_device *dev);
++
++int scst_pr_init_tgt_dev(struct scst_tgt_dev *tgt_dev);
++void scst_pr_clear_tgt_dev(struct scst_tgt_dev *tgt_dev);
++
++bool scst_pr_crh_case(struct scst_cmd *cmd);
++bool scst_pr_is_cmd_allowed(struct scst_cmd *cmd);
++
++void scst_pr_register(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_register_and_ignore(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size);
++void scst_pr_register_and_move(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size);
++void scst_pr_reserve(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_release(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_clear(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_preempt(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_preempt_and_abort(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size);
++
++void scst_pr_read_keys(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_read_reservation(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size);
++void scst_pr_report_caps(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_read_full_status(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size);
++
++void scst_pr_sync_device_file(struct scst_tgt_dev *tgt_dev, struct scst_cmd *cmd);
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++void scst_pr_dump_prs(struct scst_device *dev, bool force);
++#else
++static inline void scst_pr_dump_prs(struct scst_device *dev, bool force) {}
++#endif
++
++#endif /* SCST_PRES_H_ */
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_priv.h linux-2.6.36/drivers/scst/scst_priv.h
+--- orig/linux-2.6.36/drivers/scst/scst_priv.h
++++ linux-2.6.36/drivers/scst/scst_priv.h
+@@ -0,0 +1,603 @@
++/*
++ * scst_priv.h
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __SCST_PRIV_H
++#define __SCST_PRIV_H
++
++#include <linux/types.h>
++
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_driver.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_host.h>
++
++#define LOG_PREFIX "scst"
++
++#include <scst/scst_debug.h>
++
++#define TRACE_RTRY 0x80000000
++#define TRACE_SCSI_SERIALIZING 0x40000000
++/** top being the edge away from the interupt */
++#define TRACE_SND_TOP 0x20000000
++#define TRACE_RCV_TOP 0x01000000
++/** bottom being the edge toward the interupt */
++#define TRACE_SND_BOT 0x08000000
++#define TRACE_RCV_BOT 0x04000000
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++#define trace_flag scst_trace_flag
++extern unsigned long scst_trace_flag;
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++
++#define SCST_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR | TRACE_PID | \
++ TRACE_LINE | TRACE_FUNCTION | TRACE_SPECIAL | TRACE_MGMT | \
++ TRACE_MGMT_DEBUG | TRACE_RTRY)
++
++#define TRACE_RETRY(args...) TRACE_DBG_FLAG(TRACE_RTRY, args)
++#define TRACE_SN(args...) TRACE_DBG_FLAG(TRACE_SCSI_SERIALIZING, args)
++#define TRACE_SEND_TOP(args...) TRACE_DBG_FLAG(TRACE_SND_TOP, args)
++#define TRACE_RECV_TOP(args...) TRACE_DBG_FLAG(TRACE_RCV_TOP, args)
++#define TRACE_SEND_BOT(args...) TRACE_DBG_FLAG(TRACE_SND_BOT, args)
++#define TRACE_RECV_BOT(args...) TRACE_DBG_FLAG(TRACE_RCV_BOT, args)
++
++#else /* CONFIG_SCST_DEBUG */
++
++# ifdef CONFIG_SCST_TRACING
++#define SCST_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
++ TRACE_SPECIAL)
++# else
++#define SCST_DEFAULT_LOG_FLAGS 0
++# endif
++
++#define TRACE_RETRY(args...)
++#define TRACE_SN(args...)
++#define TRACE_SEND_TOP(args...)
++#define TRACE_RECV_TOP(args...)
++#define TRACE_SEND_BOT(args...)
++#define TRACE_RECV_BOT(args...)
++
++#endif
++
++/**
++ ** Bits for scst_flags
++ **/
++
++/*
++ * Set if new commands initialization is being suspended for a while.
++ * Used to let TM commands execute while preparing the suspend, since
++ * RESET or ABORT could be necessary to free SCSI commands.
++ */
++#define SCST_FLAG_SUSPENDING 0
++
++/* Set if new commands initialization is suspended for a while */
++#define SCST_FLAG_SUSPENDED 1
++
++/**
++ ** Return codes for cmd state process functions. Codes are the same as
++ ** for SCST_EXEC_* to avoid translation to them and, hence, have better code.
++ **/
++#define SCST_CMD_STATE_RES_CONT_NEXT SCST_EXEC_COMPLETED
++#define SCST_CMD_STATE_RES_CONT_SAME SCST_EXEC_NOT_COMPLETED
++#define SCST_CMD_STATE_RES_NEED_THREAD (SCST_EXEC_NOT_COMPLETED+1)
++
++/**
++ ** Maximum count of uncompleted commands that an initiator could
++ ** queue on any device. Then it will start getting TASK QUEUE FULL status.
++ **/
++#define SCST_MAX_TGT_DEV_COMMANDS 48
++
++/**
++ ** Maximum count of uncompleted commands that could be queued on any device.
++ ** Then initiators sending commands to this device will start getting
++ ** TASK QUEUE FULL status.
++ **/
++#define SCST_MAX_DEV_COMMANDS 256
++
++#define SCST_TGT_RETRY_TIMEOUT (3/2*HZ)
++
++/* Definitions of symbolic constants for LUN addressing method */
++#define SCST_LUN_ADDR_METHOD_PERIPHERAL 0
++#define SCST_LUN_ADDR_METHOD_FLAT 1
++
++/* Activities suspending timeout */
++#define SCST_SUSPENDING_TIMEOUT (90 * HZ)
++
++extern struct mutex scst_mutex2;
++
++extern int scst_threads;
++
++extern unsigned int scst_max_dev_cmd_mem;
++
++extern mempool_t *scst_mgmt_mempool;
++extern mempool_t *scst_mgmt_stub_mempool;
++extern mempool_t *scst_ua_mempool;
++extern mempool_t *scst_sense_mempool;
++extern mempool_t *scst_aen_mempool;
++
++extern struct kmem_cache *scst_cmd_cachep;
++extern struct kmem_cache *scst_sess_cachep;
++extern struct kmem_cache *scst_tgtd_cachep;
++extern struct kmem_cache *scst_acgd_cachep;
++
++extern spinlock_t scst_main_lock;
++
++extern struct scst_sgv_pools scst_sgv;
++
++extern unsigned long scst_flags;
++extern atomic_t scst_cmd_count;
++extern struct list_head scst_template_list;
++extern struct list_head scst_dev_list;
++extern struct list_head scst_dev_type_list;
++extern struct list_head scst_virtual_dev_type_list;
++extern wait_queue_head_t scst_dev_cmd_waitQ;
++
++extern unsigned int scst_setup_id;
++
++#define SCST_DEF_MAX_TASKLET_CMD 20
++extern int scst_max_tasklet_cmd;
++
++extern spinlock_t scst_init_lock;
++extern struct list_head scst_init_cmd_list;
++extern wait_queue_head_t scst_init_cmd_list_waitQ;
++extern unsigned int scst_init_poll_cnt;
++
++extern struct scst_cmd_threads scst_main_cmd_threads;
++
++extern spinlock_t scst_mcmd_lock;
++/* The following lists protected by scst_mcmd_lock */
++extern struct list_head scst_active_mgmt_cmd_list;
++extern struct list_head scst_delayed_mgmt_cmd_list;
++extern wait_queue_head_t scst_mgmt_cmd_list_waitQ;
++
++struct scst_tasklet {
++ spinlock_t tasklet_lock;
++ struct list_head tasklet_cmd_list;
++ struct tasklet_struct tasklet;
++};
++extern struct scst_tasklet scst_tasklets[NR_CPUS];
++
++extern wait_queue_head_t scst_mgmt_waitQ;
++extern spinlock_t scst_mgmt_lock;
++extern struct list_head scst_sess_init_list;
++extern struct list_head scst_sess_shut_list;
++
++struct scst_cmd_thread_t {
++ struct task_struct *cmd_thread;
++ struct list_head thread_list_entry;
++};
++
++static inline bool scst_set_io_context(struct scst_cmd *cmd,
++ struct io_context **old)
++{
++ bool res;
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ return false;
++#endif
++
++ if (cmd->cmd_threads == &scst_main_cmd_threads) {
++ EXTRACHECKS_BUG_ON(in_interrupt());
++ /*
++ * No need for any ref counting action, because io_context
++ * supposed to be cleared in the end of the caller function.
++ */
++ current->io_context = cmd->tgt_dev->async_io_context;
++ res = true;
++ TRACE_DBG("io_context %p (tgt_dev %p)", current->io_context,
++ cmd->tgt_dev);
++ EXTRACHECKS_BUG_ON(current->io_context == NULL);
++ } else
++ res = false;
++
++ return res;
++}
++
++static inline void scst_reset_io_context(struct scst_tgt_dev *tgt_dev,
++ struct io_context *old)
++{
++ current->io_context = old;
++ TRACE_DBG("io_context %p reset", current->io_context);
++ return;
++}
++
++/*
++ * Converts string presentation of threads pool type to enum.
++ * Returns SCST_THREADS_POOL_TYPE_INVALID if the string is invalid.
++ */
++extern enum scst_dev_type_threads_pool_type scst_parse_threads_pool_type(
++ const char *p, int len);
++
++extern int scst_add_threads(struct scst_cmd_threads *cmd_threads,
++ struct scst_device *dev, struct scst_tgt_dev *tgt_dev, int num);
++extern void scst_del_threads(struct scst_cmd_threads *cmd_threads, int num);
++
++extern int scst_create_dev_threads(struct scst_device *dev);
++extern void scst_stop_dev_threads(struct scst_device *dev);
++
++extern int scst_tgt_dev_setup_threads(struct scst_tgt_dev *tgt_dev);
++extern void scst_tgt_dev_stop_threads(struct scst_tgt_dev *tgt_dev);
++
++extern bool scst_del_thr_data(struct scst_tgt_dev *tgt_dev,
++ struct task_struct *tsk);
++
++extern struct scst_dev_type scst_null_devtype;
++
++extern struct scst_cmd *__scst_check_deferred_commands(
++ struct scst_tgt_dev *tgt_dev);
++
++/* Used to save the function call on the fast path */
++static inline struct scst_cmd *scst_check_deferred_commands(
++ struct scst_tgt_dev *tgt_dev)
++{
++ if (tgt_dev->def_cmd_count == 0)
++ return NULL;
++ else
++ return __scst_check_deferred_commands(tgt_dev);
++}
++
++static inline void scst_make_deferred_commands_active(
++ struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_cmd *c;
++
++ c = __scst_check_deferred_commands(tgt_dev);
++ if (c != NULL) {
++ TRACE_SN("Adding cmd %p to active cmd list", c);
++ spin_lock_irq(&c->cmd_threads->cmd_list_lock);
++ list_add_tail(&c->cmd_list_entry,
++ &c->cmd_threads->active_cmd_list);
++ wake_up(&c->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irq(&c->cmd_threads->cmd_list_lock);
++ }
++
++ return;
++}
++
++void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot);
++int scst_check_hq_cmd(struct scst_cmd *cmd);
++
++void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
++ struct scst_cmd *cmd_sn);
++
++void scst_on_hq_cmd_response(struct scst_cmd *cmd);
++void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd);
++
++int scst_cmd_thread(void *arg);
++void scst_cmd_tasklet(long p);
++int scst_init_thread(void *arg);
++int scst_tm_thread(void *arg);
++int scst_global_mgmt_thread(void *arg);
++
++void scst_zero_write_rest(struct scst_cmd *cmd);
++void scst_limit_sg_write_len(struct scst_cmd *cmd);
++void scst_adjust_resp_data_len(struct scst_cmd *cmd);
++
++int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds);
++
++int scst_alloc_tgt(struct scst_tgt_template *tgtt, struct scst_tgt **tgt);
++void scst_free_tgt(struct scst_tgt *tgt);
++
++int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev);
++void scst_free_device(struct scst_device *dev);
++
++struct scst_acg *scst_alloc_add_acg(struct scst_tgt *tgt,
++ const char *acg_name, bool tgt_acg);
++void scst_del_free_acg(struct scst_acg *acg);
++
++struct scst_acg *scst_tgt_find_acg(struct scst_tgt *tgt, const char *name);
++struct scst_acg *scst_find_acg(const struct scst_session *sess);
++
++void scst_check_reassign_sessions(void);
++
++int scst_sess_alloc_tgt_devs(struct scst_session *sess);
++void scst_sess_free_tgt_devs(struct scst_session *sess);
++void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA);
++
++int scst_acg_add_lun(struct scst_acg *acg, struct kobject *parent,
++ struct scst_device *dev, uint64_t lun, int read_only,
++ bool gen_scst_report_luns_changed, struct scst_acg_dev **out_acg_dev);
++int scst_acg_del_lun(struct scst_acg *acg, uint64_t lun,
++ bool gen_scst_report_luns_changed);
++
++int scst_acg_add_acn(struct scst_acg *acg, const char *name);
++void scst_del_free_acn(struct scst_acn *acn, bool reassign);
++struct scst_acn *scst_find_acn(struct scst_acg *acg, const char *name);
++
++/* The activity supposed to be suspended and scst_mutex held */
++static inline bool scst_acg_sess_is_empty(struct scst_acg *acg)
++{
++ return list_empty(&acg->acg_sess_list);
++}
++
++int scst_prepare_request_sense(struct scst_cmd *orig_cmd);
++int scst_finish_internal_cmd(struct scst_cmd *cmd);
++
++void scst_store_sense(struct scst_cmd *cmd);
++
++int scst_assign_dev_handler(struct scst_device *dev,
++ struct scst_dev_type *handler);
++
++struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
++ const char *initiator_name);
++void scst_free_session(struct scst_session *sess);
++void scst_free_session_callback(struct scst_session *sess);
++
++struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask);
++void scst_free_cmd(struct scst_cmd *cmd);
++static inline void scst_destroy_cmd(struct scst_cmd *cmd)
++{
++ kmem_cache_free(scst_cmd_cachep, cmd);
++ return;
++}
++
++void scst_check_retries(struct scst_tgt *tgt);
++
++int scst_scsi_exec_async(struct scst_cmd *cmd,
++ void (*done)(void *, char *, int, int));
++
++int scst_alloc_space(struct scst_cmd *cmd);
++
++int scst_lib_init(void);
++void scst_lib_exit(void);
++
++int scst_get_full_buf(struct scst_cmd *cmd, uint8_t **buf);
++void scst_put_full_buf(struct scst_cmd *cmd, uint8_t *buf);
++
++__be64 scst_pack_lun(const uint64_t lun, unsigned int addr_method);
++uint64_t scst_unpack_lun(const uint8_t *lun, int len);
++
++struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask);
++void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd);
++void scst_done_cmd_mgmt(struct scst_cmd *cmd);
++
++static inline void scst_devt_cleanup(struct scst_dev_type *devt) { }
++
++int scst_sysfs_init(void);
++void scst_sysfs_cleanup(void);
++int scst_tgtt_sysfs_create(struct scst_tgt_template *tgtt);
++void scst_tgtt_sysfs_del(struct scst_tgt_template *tgtt);
++int scst_tgt_sysfs_create(struct scst_tgt *tgt);
++void scst_tgt_sysfs_prepare_put(struct scst_tgt *tgt);
++void scst_tgt_sysfs_del(struct scst_tgt *tgt);
++int scst_sess_sysfs_create(struct scst_session *sess);
++void scst_sess_sysfs_del(struct scst_session *sess);
++int scst_recreate_sess_luns_link(struct scst_session *sess);
++int scst_sgv_sysfs_create(struct sgv_pool *pool);
++void scst_sgv_sysfs_del(struct sgv_pool *pool);
++int scst_devt_sysfs_create(struct scst_dev_type *devt);
++void scst_devt_sysfs_del(struct scst_dev_type *devt);
++int scst_dev_sysfs_create(struct scst_device *dev);
++void scst_dev_sysfs_del(struct scst_device *dev);
++int scst_tgt_dev_sysfs_create(struct scst_tgt_dev *tgt_dev);
++void scst_tgt_dev_sysfs_del(struct scst_tgt_dev *tgt_dev);
++int scst_devt_dev_sysfs_create(struct scst_device *dev);
++void scst_devt_dev_sysfs_del(struct scst_device *dev);
++int scst_acg_sysfs_create(struct scst_tgt *tgt,
++ struct scst_acg *acg);
++void scst_acg_sysfs_del(struct scst_acg *acg);
++int scst_acg_dev_sysfs_create(struct scst_acg_dev *acg_dev,
++ struct kobject *parent);
++void scst_acg_dev_sysfs_del(struct scst_acg_dev *acg_dev);
++int scst_acn_sysfs_create(struct scst_acn *acn);
++void scst_acn_sysfs_del(struct scst_acn *acn);
++
++void __scst_dev_check_set_UA(struct scst_device *dev, struct scst_cmd *exclude,
++ const uint8_t *sense, int sense_len);
++static inline void scst_dev_check_set_UA(struct scst_device *dev,
++ struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
++{
++ spin_lock_bh(&dev->dev_lock);
++ __scst_dev_check_set_UA(dev, exclude, sense, sense_len);
++ spin_unlock_bh(&dev->dev_lock);
++ return;
++}
++void scst_dev_check_set_local_UA(struct scst_device *dev,
++ struct scst_cmd *exclude, const uint8_t *sense, int sense_len);
++
++#define SCST_SET_UA_FLAG_AT_HEAD 1
++#define SCST_SET_UA_FLAG_GLOBAL 2
++
++void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
++ const uint8_t *sense, int sense_len, int flags);
++int scst_set_pending_UA(struct scst_cmd *cmd);
++
++void scst_report_luns_changed(struct scst_acg *acg);
++
++void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
++ bool other_ini, bool call_dev_task_mgmt_fn);
++void scst_process_reset(struct scst_device *dev,
++ struct scst_session *originator, struct scst_cmd *exclude_cmd,
++ struct scst_mgmt_cmd *mcmd, bool setUA);
++
++bool scst_is_ua_global(const uint8_t *sense, int len);
++void scst_requeue_ua(struct scst_cmd *cmd);
++
++void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
++ int key, int asc, int ascq);
++
++static inline bool scst_is_implicit_hq(struct scst_cmd *cmd)
++{
++ return (cmd->op_flags & SCST_IMPLICIT_HQ) != 0;
++}
++
++/*
++ * Some notes on devices "blocking". Blocking means that no
++ * commands will go from SCST to underlying SCSI device until it
++ * is unblocked. But we don't care about all commands that
++ * already on the device.
++ */
++
++extern void scst_block_dev(struct scst_device *dev);
++extern void scst_unblock_dev(struct scst_device *dev);
++
++extern bool __scst_check_blocked_dev(struct scst_cmd *cmd);
++
++static inline bool scst_check_blocked_dev(struct scst_cmd *cmd)
++{
++ if (unlikely(cmd->dev->block_count > 0) ||
++ unlikely(cmd->dev->dev_double_ua_possible))
++ return __scst_check_blocked_dev(cmd);
++ else
++ return false;
++}
++
++/* No locks */
++static inline void scst_check_unblock_dev(struct scst_cmd *cmd)
++{
++ if (unlikely(cmd->unblock_dev)) {
++ TRACE_MGMT_DBG("cmd %p (tag %llu): unblocking dev %p", cmd,
++ (long long unsigned int)cmd->tag, cmd->dev);
++ cmd->unblock_dev = 0;
++ scst_unblock_dev(cmd->dev);
++ }
++ return;
++}
++
++static inline void __scst_get(void)
++{
++ atomic_inc(&scst_cmd_count);
++ TRACE_DBG("Incrementing scst_cmd_count(new value %d)",
++ atomic_read(&scst_cmd_count));
++ /* See comment about smp_mb() in scst_suspend_activity() */
++ smp_mb__after_atomic_inc();
++}
++
++static inline void __scst_put(void)
++{
++ int f;
++ f = atomic_dec_and_test(&scst_cmd_count);
++ /* See comment about smp_mb() in scst_suspend_activity() */
++ if (f && unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
++ TRACE_MGMT_DBG("%s", "Waking up scst_dev_cmd_waitQ");
++ wake_up_all(&scst_dev_cmd_waitQ);
++ }
++ TRACE_DBG("Decrementing scst_cmd_count(new value %d)",
++ atomic_read(&scst_cmd_count));
++}
++
++void scst_sched_session_free(struct scst_session *sess);
++
++static inline void scst_sess_get(struct scst_session *sess)
++{
++ atomic_inc(&sess->refcnt);
++ TRACE_DBG("Incrementing sess %p refcnt (new value %d)",
++ sess, atomic_read(&sess->refcnt));
++}
++
++static inline void scst_sess_put(struct scst_session *sess)
++{
++ TRACE_DBG("Decrementing sess %p refcnt (new value %d)",
++ sess, atomic_read(&sess->refcnt)-1);
++ if (atomic_dec_and_test(&sess->refcnt))
++ scst_sched_session_free(sess);
++}
++
++static inline void __scst_cmd_get(struct scst_cmd *cmd)
++{
++ atomic_inc(&cmd->cmd_ref);
++ TRACE_DBG("Incrementing cmd %p ref (new value %d)",
++ cmd, atomic_read(&cmd->cmd_ref));
++}
++
++static inline void __scst_cmd_put(struct scst_cmd *cmd)
++{
++ TRACE_DBG("Decrementing cmd %p ref (new value %d)",
++ cmd, atomic_read(&cmd->cmd_ref)-1);
++ if (atomic_dec_and_test(&cmd->cmd_ref))
++ scst_free_cmd(cmd);
++}
++
++extern void scst_throttle_cmd(struct scst_cmd *cmd);
++extern void scst_unthrottle_cmd(struct scst_cmd *cmd);
++
++#ifdef CONFIG_SCST_DEBUG_TM
++extern void tm_dbg_check_released_cmds(void);
++extern int tm_dbg_check_cmd(struct scst_cmd *cmd);
++extern void tm_dbg_release_cmd(struct scst_cmd *cmd);
++extern void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn,
++ int force);
++extern int tm_dbg_is_release(void);
++#else
++static inline void tm_dbg_check_released_cmds(void) {}
++static inline int tm_dbg_check_cmd(struct scst_cmd *cmd)
++{
++ return 0;
++}
++static inline void tm_dbg_release_cmd(struct scst_cmd *cmd) {}
++static inline void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn,
++ int force) {}
++static inline int tm_dbg_is_release(void)
++{
++ return 0;
++}
++#endif /* CONFIG_SCST_DEBUG_TM */
++
++#ifdef CONFIG_SCST_DEBUG_SN
++void scst_check_debug_sn(struct scst_cmd *cmd);
++#else
++static inline void scst_check_debug_sn(struct scst_cmd *cmd) {}
++#endif
++
++static inline int scst_sn_before(uint32_t seq1, uint32_t seq2)
++{
++ return (int32_t)(seq1-seq2) < 0;
++}
++
++int gen_relative_target_port_id(uint16_t *id);
++bool scst_is_relative_target_port_id_unique(uint16_t id,
++ const struct scst_tgt *t);
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++
++void scst_set_start_time(struct scst_cmd *cmd);
++void scst_set_cur_start(struct scst_cmd *cmd);
++void scst_set_parse_time(struct scst_cmd *cmd);
++void scst_set_alloc_buf_time(struct scst_cmd *cmd);
++void scst_set_restart_waiting_time(struct scst_cmd *cmd);
++void scst_set_rdy_to_xfer_time(struct scst_cmd *cmd);
++void scst_set_pre_exec_time(struct scst_cmd *cmd);
++void scst_set_exec_time(struct scst_cmd *cmd);
++void scst_set_dev_done_time(struct scst_cmd *cmd);
++void scst_set_xmit_time(struct scst_cmd *cmd);
++void scst_set_tgt_on_free_time(struct scst_cmd *cmd);
++void scst_set_dev_on_free_time(struct scst_cmd *cmd);
++void scst_update_lat_stats(struct scst_cmd *cmd);
++
++#else
++
++static inline void scst_set_start_time(struct scst_cmd *cmd) {}
++static inline void scst_set_cur_start(struct scst_cmd *cmd) {}
++static inline void scst_set_parse_time(struct scst_cmd *cmd) {}
++static inline void scst_set_alloc_buf_time(struct scst_cmd *cmd) {}
++static inline void scst_set_restart_waiting_time(struct scst_cmd *cmd) {}
++static inline void scst_set_rdy_to_xfer_time(struct scst_cmd *cmd) {}
++static inline void scst_set_pre_exec_time(struct scst_cmd *cmd) {}
++static inline void scst_set_exec_time(struct scst_cmd *cmd) {}
++static inline void scst_set_dev_done_time(struct scst_cmd *cmd) {}
++static inline void scst_set_xmit_time(struct scst_cmd *cmd) {}
++static inline void scst_set_tgt_on_free_time(struct scst_cmd *cmd) {}
++static inline void scst_set_dev_on_free_time(struct scst_cmd *cmd) {}
++static inline void scst_update_lat_stats(struct scst_cmd *cmd) {}
++
++#endif /* CONFIG_SCST_MEASURE_LATENCY */
++
++#endif /* __SCST_PRIV_H */
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst/scst_sysfs.c
+--- orig/linux-2.6.36/drivers/scst/scst_sysfs.c
++++ linux-2.6.36/drivers/scst/scst_sysfs.c
+@@ -0,0 +1,5336 @@
++/*
++ * scst_sysfs.c
++ *
++ * Copyright (C) 2009 Daniel Henrique Debonzi <debonzi@linux.vnet.ibm.com>
++ * Copyright (C) 2009 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2009 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kobject.h>
++#include <linux/string.h>
++#include <linux/sysfs.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/ctype.h>
++#include <linux/slab.h>
++#include <linux/kthread.h>
++
++#include <scst/scst.h>
++#include "scst_priv.h"
++#include "scst_mem.h"
++#include "scst_pres.h"
++
++static DECLARE_COMPLETION(scst_sysfs_root_release_completion);
++
++static struct kobject scst_sysfs_root_kobj;
++static struct kobject *scst_targets_kobj;
++static struct kobject *scst_devices_kobj;
++static struct kobject *scst_sgv_kobj;
++static struct kobject *scst_handlers_kobj;
++
++static const char *scst_dev_handler_types[] = {
++ "Direct-access device (e.g., magnetic disk)",
++ "Sequential-access device (e.g., magnetic tape)",
++ "Printer device",
++ "Processor device",
++ "Write-once device (e.g., some optical disks)",
++ "CD-ROM device",
++ "Scanner device (obsolete)",
++ "Optical memory device (e.g., some optical disks)",
++ "Medium changer device (e.g., jukeboxes)",
++ "Communications device (obsolete)",
++ "Defined by ASC IT8 (Graphic arts pre-press devices)",
++ "Defined by ASC IT8 (Graphic arts pre-press devices)",
++ "Storage array controller device (e.g., RAID)",
++ "Enclosure services device",
++ "Simplified direct-access device (e.g., magnetic disk)",
++ "Optical card reader/writer device"
++};
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++static DEFINE_MUTEX(scst_log_mutex);
++
++static struct scst_trace_log scst_trace_tbl[] = {
++ { TRACE_OUT_OF_MEM, "out_of_mem" },
++ { TRACE_MINOR, "minor" },
++ { TRACE_SG_OP, "sg" },
++ { TRACE_MEMORY, "mem" },
++ { TRACE_BUFF, "buff" },
++#ifndef GENERATING_UPSTREAM_PATCH
++ { TRACE_ENTRYEXIT, "entryexit" },
++#endif
++ { TRACE_PID, "pid" },
++ { TRACE_LINE, "line" },
++ { TRACE_FUNCTION, "function" },
++ { TRACE_DEBUG, "debug" },
++ { TRACE_SPECIAL, "special" },
++ { TRACE_SCSI, "scsi" },
++ { TRACE_MGMT, "mgmt" },
++ { TRACE_MGMT_DEBUG, "mgmt_dbg" },
++ { TRACE_FLOW_CONTROL, "flow_control" },
++ { TRACE_PRES, "pr" },
++ { 0, NULL }
++};
++
++static struct scst_trace_log scst_local_trace_tbl[] = {
++ { TRACE_RTRY, "retry" },
++ { TRACE_SCSI_SERIALIZING, "scsi_serializing" },
++ { TRACE_RCV_BOT, "recv_bot" },
++ { TRACE_SND_BOT, "send_bot" },
++ { TRACE_RCV_TOP, "recv_top" },
++ { TRACE_SND_TOP, "send_top" },
++ { 0, NULL }
++};
++
++static ssize_t scst_trace_level_show(const struct scst_trace_log *local_tbl,
++ unsigned long log_level, char *buf, const char *help);
++static int scst_write_trace(const char *buf, size_t length,
++ unsigned long *log_level, unsigned long default_level,
++ const char *name, const struct scst_trace_log *tbl);
++
++#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
++
++static ssize_t scst_luns_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf);
++static ssize_t scst_luns_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count);
++static ssize_t scst_tgt_addr_method_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf);
++static ssize_t scst_tgt_addr_method_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count);
++static ssize_t scst_tgt_io_grouping_type_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf);
++static ssize_t scst_tgt_io_grouping_type_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count);
++static ssize_t scst_ini_group_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf);
++static ssize_t scst_ini_group_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count);
++static ssize_t scst_rel_tgt_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf);
++static ssize_t scst_rel_tgt_id_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count);
++static ssize_t scst_acg_luns_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count);
++static ssize_t scst_acg_ini_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf);
++static ssize_t scst_acg_ini_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count);
++static ssize_t scst_acg_addr_method_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf);
++static ssize_t scst_acg_addr_method_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count);
++static ssize_t scst_acg_io_grouping_type_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf);
++static ssize_t scst_acg_io_grouping_type_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count);
++static ssize_t scst_acn_file_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++
++/**
++ ** Sysfs work
++ **/
++
++static DEFINE_SPINLOCK(sysfs_work_lock);
++static LIST_HEAD(sysfs_work_list);
++static DECLARE_WAIT_QUEUE_HEAD(sysfs_work_waitQ);
++static int active_sysfs_works;
++static int last_sysfs_work_res;
++static struct task_struct *sysfs_work_thread;
++
++/**
++ * scst_alloc_sysfs_work() - allocates a sysfs work
++ */
++int scst_alloc_sysfs_work(int (*sysfs_work_fn)(struct scst_sysfs_work_item *),
++ bool read_only_action, struct scst_sysfs_work_item **res_work)
++{
++ int res = 0;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ if (sysfs_work_fn == NULL) {
++ PRINT_ERROR("%s", "sysfs_work_fn is NULL");
++ res = -EINVAL;
++ goto out;
++ }
++
++ *res_work = NULL;
++
++ work = kzalloc(sizeof(*work), GFP_KERNEL);
++ if (work == NULL) {
++ PRINT_ERROR("Unable to alloc sysfs work (size %zd)",
++ sizeof(*work));
++ res = -ENOMEM;
++ goto out;
++ }
++
++ work->read_only_action = read_only_action;
++ kref_init(&work->sysfs_work_kref);
++ init_completion(&work->sysfs_work_done);
++ work->sysfs_work_fn = sysfs_work_fn;
++
++ *res_work = work;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL(scst_alloc_sysfs_work);
++
++static void scst_sysfs_work_release(struct kref *kref)
++{
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ work = container_of(kref, struct scst_sysfs_work_item,
++ sysfs_work_kref);
++
++ TRACE_DBG("Freeing sysfs work %p (buf %p)", work, work->buf);
++
++ kfree(work->buf);
++ kfree(work->res_buf);
++ kfree(work);
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_sysfs_work_get() - increases ref counter of the sysfs work
++ */
++void scst_sysfs_work_get(struct scst_sysfs_work_item *work)
++{
++ kref_get(&work->sysfs_work_kref);
++}
++EXPORT_SYMBOL(scst_sysfs_work_get);
++
++/**
++ * scst_sysfs_work_put() - decreases ref counter of the sysfs work
++ */
++void scst_sysfs_work_put(struct scst_sysfs_work_item *work)
++{
++ kref_put(&work->sysfs_work_kref, scst_sysfs_work_release);
++}
++EXPORT_SYMBOL(scst_sysfs_work_put);
++
++/**
++ * scst_sysfs_queue_wait_work() - waits for the work to complete
++ *
++ * Returnes status of the completed work or -EAGAIN if the work not
++ * completed before timeout. In the latter case a user should poll
++ * last_sysfs_mgmt_res until it returns the result of the processing.
++ */
++int scst_sysfs_queue_wait_work(struct scst_sysfs_work_item *work)
++{
++ int res = 0, rc;
++ unsigned long timeout = 15*HZ;
++
++ TRACE_ENTRY();
++
++ spin_lock(&sysfs_work_lock);
++
++ TRACE_DBG("Adding sysfs work %p to the list", work);
++ list_add_tail(&work->sysfs_work_list_entry, &sysfs_work_list);
++
++ active_sysfs_works++;
++
++ spin_unlock(&sysfs_work_lock);
++
++ kref_get(&work->sysfs_work_kref);
++
++ wake_up(&sysfs_work_waitQ);
++
++ while (1) {
++ rc = wait_for_completion_interruptible_timeout(
++ &work->sysfs_work_done, timeout);
++ if (rc == 0) {
++ if (!mutex_is_locked(&scst_mutex)) {
++ TRACE_DBG("scst_mutex not locked, continue "
++ "waiting (work %p)", work);
++ timeout = 5*HZ;
++ continue;
++ }
++ TRACE_MGMT_DBG("Time out waiting for work %p",
++ work);
++ res = -EAGAIN;
++ goto out_put;
++ } else if (rc < 0) {
++ res = rc;
++ goto out_put;
++ }
++ break;
++ }
++
++ res = work->work_res;
++
++out_put:
++ kref_put(&work->sysfs_work_kref, scst_sysfs_work_release);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL(scst_sysfs_queue_wait_work);
++
++/* Called under sysfs_work_lock and drops/reaquire it inside */
++static void scst_process_sysfs_works(void)
++{
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ while (!list_empty(&sysfs_work_list)) {
++ work = list_entry(sysfs_work_list.next,
++ struct scst_sysfs_work_item, sysfs_work_list_entry);
++ list_del(&work->sysfs_work_list_entry);
++ spin_unlock(&sysfs_work_lock);
++
++ TRACE_DBG("Sysfs work %p", work);
++
++ work->work_res = work->sysfs_work_fn(work);
++
++ spin_lock(&sysfs_work_lock);
++ if (!work->read_only_action)
++ last_sysfs_work_res = work->work_res;
++ active_sysfs_works--;
++ spin_unlock(&sysfs_work_lock);
++
++ complete_all(&work->sysfs_work_done);
++ kref_put(&work->sysfs_work_kref, scst_sysfs_work_release);
++
++ spin_lock(&sysfs_work_lock);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline int test_sysfs_work_list(void)
++{
++ int res = !list_empty(&sysfs_work_list) ||
++ unlikely(kthread_should_stop());
++ return res;
++}
++
++static int sysfs_work_thread_fn(void *arg)
++{
++ TRACE_ENTRY();
++
++ PRINT_INFO("User interface thread started, PID %d", current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ set_user_nice(current, -10);
++
++ spin_lock(&sysfs_work_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_sysfs_work_list()) {
++ add_wait_queue_exclusive(&sysfs_work_waitQ, &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_sysfs_work_list())
++ break;
++ spin_unlock(&sysfs_work_lock);
++ schedule();
++ spin_lock(&sysfs_work_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&sysfs_work_waitQ, &wait);
++ }
++
++ scst_process_sysfs_works();
++ }
++ spin_unlock(&sysfs_work_lock);
++
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so both lists must be empty.
++ */
++ BUG_ON(!list_empty(&sysfs_work_list));
++
++ PRINT_INFO("User interface thread PID %d finished", current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++/* No locks */
++static int scst_check_grab_tgtt_ptr(struct scst_tgt_template *tgtt)
++{
++ int res = 0;
++ struct scst_tgt_template *tt;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(tt, &scst_template_list, scst_template_list_entry) {
++ if (tt == tgtt) {
++ tgtt->tgtt_active_sysfs_works_count++;
++ goto out_unlock;
++ }
++ }
++
++ TRACE_DBG("Tgtt %p not found", tgtt);
++ res = -ENOENT;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* No locks */
++static void scst_ungrab_tgtt_ptr(struct scst_tgt_template *tgtt)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++ tgtt->tgtt_active_sysfs_works_count--;
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* scst_mutex supposed to be locked */
++static int scst_check_tgt_acg_ptrs(struct scst_tgt *tgt, struct scst_acg *acg)
++{
++ int res = 0;
++ struct scst_tgt_template *tgtt;
++
++ list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
++ struct scst_tgt *t;
++ list_for_each_entry(t, &tgtt->tgt_list, tgt_list_entry) {
++ if (t == tgt) {
++ struct scst_acg *a;
++ if (acg == NULL)
++ goto out;
++ if (acg == tgt->default_acg)
++ goto out;
++ list_for_each_entry(a, &tgt->tgt_acg_list,
++ acg_list_entry) {
++ if (a == acg)
++ goto out;
++ }
++ }
++ }
++ }
++
++ TRACE_DBG("Tgt %p/ACG %p not found", tgt, acg);
++ res = -ENOENT;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_mutex supposed to be locked */
++static int scst_check_devt_ptr(struct scst_dev_type *devt,
++ struct list_head *list)
++{
++ int res = 0;
++ struct scst_dev_type *dt;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(dt, list, dev_type_list_entry) {
++ if (dt == devt)
++ goto out;
++ }
++
++ TRACE_DBG("Devt %p not found", devt);
++ res = -ENOENT;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_mutex supposed to be locked */
++static int scst_check_dev_ptr(struct scst_device *dev)
++{
++ int res = 0;
++ struct scst_device *d;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (d == dev)
++ goto out;
++ }
++
++ TRACE_DBG("Dev %p not found", dev);
++ res = -ENOENT;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* No locks */
++static int scst_check_grab_devt_ptr(struct scst_dev_type *devt,
++ struct list_head *list)
++{
++ int res = 0;
++ struct scst_dev_type *dt;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(dt, list, dev_type_list_entry) {
++ if (dt == devt) {
++ devt->devt_active_sysfs_works_count++;
++ goto out_unlock;
++ }
++ }
++
++ TRACE_DBG("Devt %p not found", devt);
++ res = -ENOENT;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* No locks */
++static void scst_ungrab_devt_ptr(struct scst_dev_type *devt)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++ devt->devt_active_sysfs_works_count--;
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ ** Regilar SCST sysfs ops
++ **/
++static ssize_t scst_show(struct kobject *kobj, struct attribute *attr,
++ char *buf)
++{
++ struct kobj_attribute *kobj_attr;
++ kobj_attr = container_of(attr, struct kobj_attribute, attr);
++
++ return kobj_attr->show(kobj, kobj_attr, buf);
++}
++
++static ssize_t scst_store(struct kobject *kobj, struct attribute *attr,
++ const char *buf, size_t count)
++{
++ struct kobj_attribute *kobj_attr;
++ kobj_attr = container_of(attr, struct kobj_attribute, attr);
++
++ if (kobj_attr->store)
++ return kobj_attr->store(kobj, kobj_attr, buf, count);
++ else
++ return -EIO;
++}
++
++static const struct sysfs_ops scst_sysfs_ops = {
++ .show = scst_show,
++ .store = scst_store,
++};
++
++const struct sysfs_ops *scst_sysfs_get_sysfs_ops(void)
++{
++ return &scst_sysfs_ops;
++}
++EXPORT_SYMBOL_GPL(scst_sysfs_get_sysfs_ops);
++
++/**
++ ** Target Template
++ **/
++
++static void scst_tgtt_release(struct kobject *kobj)
++{
++ struct scst_tgt_template *tgtt;
++
++ TRACE_ENTRY();
++
++ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
++ complete_all(&tgtt->tgtt_kobj_release_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct kobj_type tgtt_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_tgtt_release,
++};
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++static ssize_t scst_tgtt_trace_level_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_tgt_template *tgtt;
++
++ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
++
++ return scst_trace_level_show(tgtt->trace_tbl,
++ tgtt->trace_flags ? *tgtt->trace_flags : 0, buf,
++ tgtt->trace_tbl_help);
++}
++
++static ssize_t scst_tgtt_trace_level_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_tgt_template *tgtt;
++
++ TRACE_ENTRY();
++
++ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
++
++ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = scst_write_trace(buf, count, tgtt->trace_flags,
++ tgtt->default_trace_flags, tgtt->name, tgtt->trace_tbl);
++
++ mutex_unlock(&scst_log_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute tgtt_trace_attr =
++ __ATTR(trace_level, S_IRUGO | S_IWUSR,
++ scst_tgtt_trace_level_show, scst_tgtt_trace_level_store);
++
++#endif /* #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
++
++static ssize_t scst_tgtt_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++char *help = "Usage: echo \"add_target target_name [parameters]\" "
++ ">mgmt\n"
++ " echo \"del_target target_name\" >mgmt\n"
++ "%s%s"
++ "%s"
++ "\n"
++ "where parameters are one or more "
++ "param_name=value pairs separated by ';'\n\n"
++ "%s%s%s%s%s%s%s%s\n";
++ struct scst_tgt_template *tgtt;
++
++ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
++
++ return scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, help,
++ (tgtt->tgtt_optional_attributes != NULL) ?
++ " echo \"add_attribute <attribute> <value>\" >mgmt\n"
++ " echo \"del_attribute <attribute> <value>\" >mgmt\n" : "",
++ (tgtt->tgt_optional_attributes != NULL) ?
++ " echo \"add_target_attribute target_name <attribute> <value>\" >mgmt\n"
++ " echo \"del_target_attribute target_name <attribute> <value>\" >mgmt\n" : "",
++ (tgtt->mgmt_cmd_help) ? tgtt->mgmt_cmd_help : "",
++ (tgtt->add_target_parameters != NULL) ?
++ "The following parameters available: " : "",
++ (tgtt->add_target_parameters != NULL) ?
++ tgtt->add_target_parameters : "",
++ (tgtt->tgtt_optional_attributes != NULL) ?
++ "The following target driver attributes available: " : "",
++ (tgtt->tgtt_optional_attributes != NULL) ?
++ tgtt->tgtt_optional_attributes : "",
++ (tgtt->tgtt_optional_attributes != NULL) ? "\n" : "",
++ (tgtt->tgt_optional_attributes != NULL) ?
++ "The following target attributes available: " : "",
++ (tgtt->tgt_optional_attributes != NULL) ?
++ tgtt->tgt_optional_attributes : "",
++ (tgtt->tgt_optional_attributes != NULL) ? "\n" : "");
++}
++
++static int scst_process_tgtt_mgmt_store(char *buffer,
++ struct scst_tgt_template *tgtt)
++{
++ int res = 0;
++ char *p, *pp, *target_name;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("buffer %s", buffer);
++
++ /* Check if our pointer is still alive and, if yes, grab it */
++ if (scst_check_grab_tgtt_ptr(tgtt) != 0)
++ goto out;
++
++ pp = buffer;
++ if (pp[strlen(pp) - 1] == '\n')
++ pp[strlen(pp) - 1] = '\0';
++
++ p = scst_get_next_lexem(&pp);
++
++ if (strcasecmp("add_target", p) == 0) {
++ target_name = scst_get_next_lexem(&pp);
++ if (*target_name == '\0') {
++ PRINT_ERROR("%s", "Target name required");
++ res = -EINVAL;
++ goto out_ungrab;
++ }
++ res = tgtt->add_target(target_name, pp);
++ } else if (strcasecmp("del_target", p) == 0) {
++ target_name = scst_get_next_lexem(&pp);
++ if (*target_name == '\0') {
++ PRINT_ERROR("%s", "Target name required");
++ res = -EINVAL;
++ goto out_ungrab;
++ }
++
++ p = scst_get_next_lexem(&pp);
++ if (*p != '\0')
++ goto out_syntax_err;
++
++ res = tgtt->del_target(target_name);
++ } else if (tgtt->mgmt_cmd != NULL) {
++ scst_restore_token_str(p, pp);
++ res = tgtt->mgmt_cmd(buffer);
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_ungrab;
++ }
++
++out_ungrab:
++ scst_ungrab_tgtt_ptr(tgtt);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_syntax_err:
++ PRINT_ERROR("Syntax error on \"%s\"", p);
++ res = -EINVAL;
++ goto out_ungrab;
++}
++
++static int scst_tgtt_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return scst_process_tgtt_mgmt_store(work->buf, work->tgtt);
++}
++
++static ssize_t scst_tgtt_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ char *buffer;
++ struct scst_sysfs_work_item *work;
++ struct scst_tgt_template *tgtt;
++
++ TRACE_ENTRY();
++
++ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
++
++ buffer = kzalloc(count+1, GFP_KERNEL);
++ if (buffer == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++ memcpy(buffer, buf, count);
++ buffer[count] = '\0';
++
++ res = scst_alloc_sysfs_work(scst_tgtt_mgmt_store_work_fn, false, &work);
++ if (res != 0)
++ goto out_free;
++
++ work->buf = buffer;
++ work->tgtt = tgtt;
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(buffer);
++ goto out;
++}
++
++static struct kobj_attribute scst_tgtt_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_tgtt_mgmt_show,
++ scst_tgtt_mgmt_store);
++
++int scst_tgtt_sysfs_create(struct scst_tgt_template *tgtt)
++{
++ int res = 0;
++ const struct attribute **pattr;
++
++ TRACE_ENTRY();
++
++ init_completion(&tgtt->tgtt_kobj_release_cmpl);
++
++ res = kobject_init_and_add(&tgtt->tgtt_kobj, &tgtt_ktype,
++ scst_targets_kobj, tgtt->name);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgtt %s to sysfs", tgtt->name);
++ goto out;
++ }
++
++ if (tgtt->add_target != NULL) {
++ res = sysfs_create_file(&tgtt->tgtt_kobj,
++ &scst_tgtt_mgmt.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add mgmt attr for target driver %s",
++ tgtt->name);
++ goto out_del;
++ }
++ }
++
++ pattr = tgtt->tgtt_attrs;
++ if (pattr != NULL) {
++ while (*pattr != NULL) {
++ TRACE_DBG("Creating attr %s for target driver %s",
++ (*pattr)->name, tgtt->name);
++ res = sysfs_create_file(&tgtt->tgtt_kobj, *pattr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attr %s for target "
++ "driver %s", (*pattr)->name,
++ tgtt->name);
++ goto out_del;
++ }
++ pattr++;
++ }
++ }
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ if (tgtt->trace_flags != NULL) {
++ res = sysfs_create_file(&tgtt->tgtt_kobj,
++ &tgtt_trace_attr.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add trace_flag for target "
++ "driver %s", tgtt->name);
++ goto out_del;
++ }
++ }
++#endif
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ scst_tgtt_sysfs_del(tgtt);
++ goto out;
++}
++
++/*
++ * Must not be called under scst_mutex, due to possible deadlock with
++ * sysfs ref counting in sysfs works (it is waiting for the last put, but
++ * the last ref counter holder is waiting for scst_mutex)
++ */
++void scst_tgtt_sysfs_del(struct scst_tgt_template *tgtt)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ kobject_del(&tgtt->tgtt_kobj);
++ kobject_put(&tgtt->tgtt_kobj);
++
++ rc = wait_for_completion_timeout(&tgtt->tgtt_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for target template %s (%d refs)...", tgtt->name,
++ atomic_read(&tgtt->tgtt_kobj.kref.refcount));
++ wait_for_completion(&tgtt->tgtt_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs "
++ "entry for target template %s", tgtt->name);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ ** Target directory implementation
++ **/
++
++static void scst_tgt_release(struct kobject *kobj)
++{
++ struct scst_tgt *tgt;
++
++ TRACE_ENTRY();
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ complete_all(&tgt->tgt_kobj_release_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct kobj_type tgt_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_tgt_release,
++};
++
++static void scst_acg_release(struct kobject *kobj)
++{
++ struct scst_acg *acg;
++
++ TRACE_ENTRY();
++
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
++ complete_all(&acg->acg_kobj_release_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct kobj_type acg_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_acg_release,
++};
++
++static struct kobj_attribute scst_luns_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_luns_mgmt_show,
++ scst_luns_mgmt_store);
++
++static struct kobj_attribute scst_acg_luns_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_luns_mgmt_show,
++ scst_acg_luns_mgmt_store);
++
++static struct kobj_attribute scst_acg_ini_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_acg_ini_mgmt_show,
++ scst_acg_ini_mgmt_store);
++
++static struct kobj_attribute scst_ini_group_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_ini_group_mgmt_show,
++ scst_ini_group_mgmt_store);
++
++static struct kobj_attribute scst_tgt_addr_method =
++ __ATTR(addr_method, S_IRUGO | S_IWUSR, scst_tgt_addr_method_show,
++ scst_tgt_addr_method_store);
++
++static struct kobj_attribute scst_tgt_io_grouping_type =
++ __ATTR(io_grouping_type, S_IRUGO | S_IWUSR,
++ scst_tgt_io_grouping_type_show,
++ scst_tgt_io_grouping_type_store);
++
++static struct kobj_attribute scst_rel_tgt_id =
++ __ATTR(rel_tgt_id, S_IRUGO | S_IWUSR, scst_rel_tgt_id_show,
++ scst_rel_tgt_id_store);
++
++static struct kobj_attribute scst_acg_addr_method =
++ __ATTR(addr_method, S_IRUGO | S_IWUSR, scst_acg_addr_method_show,
++ scst_acg_addr_method_store);
++
++static struct kobj_attribute scst_acg_io_grouping_type =
++ __ATTR(io_grouping_type, S_IRUGO | S_IWUSR,
++ scst_acg_io_grouping_type_show,
++ scst_acg_io_grouping_type_store);
++
++static ssize_t scst_tgt_enable_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_tgt *tgt;
++ int res;
++ bool enabled;
++
++ TRACE_ENTRY();
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++
++ enabled = tgt->tgtt->is_target_enabled(tgt);
++
++ res = sprintf(buf, "%d\n", enabled ? 1 : 0);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_process_tgt_enable_store(struct scst_tgt *tgt, bool enable)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ /* Tgt protected by kobject reference */
++
++ TRACE_DBG("tgt %s, enable %d", tgt->tgt_name, enable);
++
++ if (enable) {
++ if (tgt->rel_tgt_id == 0) {
++ res = gen_relative_target_port_id(&tgt->rel_tgt_id);
++ if (res != 0)
++ goto out_put;
++ PRINT_INFO("Using autogenerated rel ID %d for target "
++ "%s", tgt->rel_tgt_id, tgt->tgt_name);
++ } else {
++ if (!scst_is_relative_target_port_id_unique(
++ tgt->rel_tgt_id, tgt)) {
++ PRINT_ERROR("Relative port id %d is not unique",
++ tgt->rel_tgt_id);
++ res = -EBADSLT;
++ goto out_put;
++ }
++ }
++ }
++
++ res = tgt->tgtt->enable_target(tgt, enable);
++
++out_put:
++ kobject_put(&tgt->tgt_kobj);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_tgt_enable_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return scst_process_tgt_enable_store(work->tgt, work->enable);
++}
++
++static ssize_t scst_tgt_enable_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_tgt *tgt;
++ bool enable;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ if (buf == NULL) {
++ PRINT_ERROR("%s: NULL buffer?", __func__);
++ res = -EINVAL;
++ goto out;
++ }
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++
++ switch (buf[0]) {
++ case '0':
++ enable = false;
++ break;
++ case '1':
++ enable = true;
++ break;
++ default:
++ PRINT_ERROR("%s: Requested action not understood: %s",
++ __func__, buf);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_alloc_sysfs_work(scst_tgt_enable_store_work_fn, false,
++ &work);
++ if (res != 0)
++ goto out;
++
++ work->tgt = tgt;
++ work->enable = enable;
++
++ kobject_get(&tgt->tgt_kobj);
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute tgt_enable_attr =
++ __ATTR(enabled, S_IRUGO | S_IWUSR,
++ scst_tgt_enable_show, scst_tgt_enable_store);
++
++/*
++ * Supposed to be called under scst_mutex. In case of error will drop,
++ * then reacquire it.
++ */
++int scst_tgt_sysfs_create(struct scst_tgt *tgt)
++{
++ int res;
++ const struct attribute **pattr;
++
++ TRACE_ENTRY();
++
++ init_completion(&tgt->tgt_kobj_release_cmpl);
++
++ res = kobject_init_and_add(&tgt->tgt_kobj, &tgt_ktype,
++ &tgt->tgtt->tgtt_kobj, tgt->tgt_name);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt %s to sysfs", tgt->tgt_name);
++ goto out;
++ }
++
++ if ((tgt->tgtt->enable_target != NULL) &&
++ (tgt->tgtt->is_target_enabled != NULL)) {
++ res = sysfs_create_file(&tgt->tgt_kobj,
++ &tgt_enable_attr.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attr %s to sysfs",
++ tgt_enable_attr.attr.name);
++ goto out_err;
++ }
++ }
++
++ tgt->tgt_sess_kobj = kobject_create_and_add("sessions", &tgt->tgt_kobj);
++ if (tgt->tgt_sess_kobj == NULL) {
++ PRINT_ERROR("Can't create sess kobj for tgt %s", tgt->tgt_name);
++ goto out_nomem;
++ }
++
++ tgt->tgt_luns_kobj = kobject_create_and_add("luns", &tgt->tgt_kobj);
++ if (tgt->tgt_luns_kobj == NULL) {
++ PRINT_ERROR("Can't create luns kobj for tgt %s", tgt->tgt_name);
++ goto out_nomem;
++ }
++
++ res = sysfs_create_file(tgt->tgt_luns_kobj, &scst_luns_mgmt.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attribute %s for tgt %s",
++ scst_luns_mgmt.attr.name, tgt->tgt_name);
++ goto out_err;
++ }
++
++ tgt->tgt_ini_grp_kobj = kobject_create_and_add("ini_groups",
++ &tgt->tgt_kobj);
++ if (tgt->tgt_ini_grp_kobj == NULL) {
++ PRINT_ERROR("Can't create ini_grp kobj for tgt %s",
++ tgt->tgt_name);
++ goto out_nomem;
++ }
++
++ res = sysfs_create_file(tgt->tgt_ini_grp_kobj,
++ &scst_ini_group_mgmt.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attribute %s for tgt %s",
++ scst_ini_group_mgmt.attr.name, tgt->tgt_name);
++ goto out_err;
++ }
++
++ res = sysfs_create_file(&tgt->tgt_kobj,
++ &scst_rel_tgt_id.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attribute %s for tgt %s",
++ scst_rel_tgt_id.attr.name, tgt->tgt_name);
++ goto out_err;
++ }
++
++ res = sysfs_create_file(&tgt->tgt_kobj,
++ &scst_tgt_addr_method.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attribute %s for tgt %s",
++ scst_tgt_addr_method.attr.name, tgt->tgt_name);
++ goto out_err;
++ }
++
++ res = sysfs_create_file(&tgt->tgt_kobj,
++ &scst_tgt_io_grouping_type.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attribute %s for tgt %s",
++ scst_tgt_io_grouping_type.attr.name, tgt->tgt_name);
++ goto out_err;
++ }
++
++ pattr = tgt->tgtt->tgt_attrs;
++ if (pattr != NULL) {
++ while (*pattr != NULL) {
++ TRACE_DBG("Creating attr %s for tgt %s", (*pattr)->name,
++ tgt->tgt_name);
++ res = sysfs_create_file(&tgt->tgt_kobj, *pattr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
++ (*pattr)->name, tgt->tgt_name);
++ goto out_err;
++ }
++ pattr++;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_nomem:
++ res = -ENOMEM;
++
++out_err:
++ mutex_unlock(&scst_mutex);
++ scst_tgt_sysfs_del(tgt);
++ mutex_lock(&scst_mutex);
++ goto out;
++}
++
++/*
++ * Must not be called under scst_mutex, due to possible deadlock with
++ * sysfs ref counting in sysfs works (it is waiting for the last put, but
++ * the last ref counter holder is waiting for scst_mutex)
++ */
++void scst_tgt_sysfs_del(struct scst_tgt *tgt)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ kobject_del(tgt->tgt_sess_kobj);
++ kobject_del(tgt->tgt_luns_kobj);
++ kobject_del(tgt->tgt_ini_grp_kobj);
++ kobject_del(&tgt->tgt_kobj);
++
++ kobject_put(tgt->tgt_sess_kobj);
++ kobject_put(tgt->tgt_luns_kobj);
++ kobject_put(tgt->tgt_ini_grp_kobj);
++ kobject_put(&tgt->tgt_kobj);
++
++ rc = wait_for_completion_timeout(&tgt->tgt_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for target %s (%d refs)...", tgt->tgt_name,
++ atomic_read(&tgt->tgt_kobj.kref.refcount));
++ wait_for_completion(&tgt->tgt_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs "
++ "entry for target %s", tgt->tgt_name);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ ** Devices directory implementation
++ **/
++
++static ssize_t scst_dev_sysfs_type_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++
++ struct scst_device *dev;
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++
++ pos = sprintf(buf, "%d - %s\n", dev->type,
++ (unsigned)dev->type > ARRAY_SIZE(scst_dev_handler_types) ?
++ "unknown" : scst_dev_handler_types[dev->type]);
++
++ return pos;
++}
++
++static struct kobj_attribute dev_type_attr =
++ __ATTR(type, S_IRUGO, scst_dev_sysfs_type_show, NULL);
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++static ssize_t scst_dev_sysfs_dump_prs(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++
++ scst_pr_dump_prs(dev, true);
++
++ TRACE_EXIT_RES(count);
++ return count;
++}
++
++static struct kobj_attribute dev_dump_prs_attr =
++ __ATTR(dump_prs, S_IWUSR, NULL, scst_dev_sysfs_dump_prs);
++
++#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
++
++static int scst_process_dev_sysfs_threads_data_store(
++ struct scst_device *dev, int threads_num,
++ enum scst_dev_type_threads_pool_type threads_pool_type)
++{
++ int res = 0;
++ int oldtn = dev->threads_num;
++ enum scst_dev_type_threads_pool_type oldtt = dev->threads_pool_type;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("dev %p, threads_num %d, threads_pool_type %d", dev,
++ threads_num, threads_pool_type);
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_resume;
++ }
++
++ /* Check if our pointer is still alive */
++ if (scst_check_dev_ptr(dev) != 0)
++ goto out_unlock;
++
++ scst_stop_dev_threads(dev);
++
++ dev->threads_num = threads_num;
++ dev->threads_pool_type = threads_pool_type;
++
++ res = scst_create_dev_threads(dev);
++ if (res != 0)
++ goto out_unlock;
++
++ if (oldtn != dev->threads_num)
++ PRINT_INFO("Changed cmd threads num to %d", dev->threads_num);
++ else if (oldtt != dev->threads_pool_type)
++ PRINT_INFO("Changed cmd threads pool type to %d",
++ dev->threads_pool_type);
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_resume:
++ scst_resume_activity();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_dev_sysfs_threads_data_store_work_fn(
++ struct scst_sysfs_work_item *work)
++{
++ return scst_process_dev_sysfs_threads_data_store(work->dev,
++ work->new_threads_num, work->new_threads_pool_type);
++}
++
++static ssize_t scst_dev_sysfs_check_threads_data(
++ struct scst_device *dev, int threads_num,
++ enum scst_dev_type_threads_pool_type threads_pool_type, bool *stop)
++{
++ int res = 0;
++
++ *stop = false;
++
++ if (dev->threads_num < 0) {
++ PRINT_ERROR("Threads pool disabled for device %s",
++ dev->virt_name);
++ res = -EPERM;
++ goto out;
++ }
++
++ if ((threads_num == dev->threads_num) &&
++ (threads_pool_type == dev->threads_pool_type)) {
++ *stop = true;
++ goto out;
++ }
++
++out:
++ return res;
++}
++
++static ssize_t scst_dev_sysfs_threads_num_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++
++ pos = sprintf(buf, "%d\n%s", dev->threads_num,
++ (dev->threads_num != dev->handler->threads_num) ?
++ SCST_SYSFS_KEY_MARK "\n" : "");
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t scst_dev_sysfs_threads_num_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_device *dev;
++ long newtn;
++ bool stop;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++
++ res = strict_strtol(buf, 0, &newtn);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtol() for %s failed: %d ", buf, res);
++ goto out;
++ }
++ if (newtn < 0) {
++ PRINT_ERROR("Illegal threads num value %ld", newtn);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_dev_sysfs_check_threads_data(dev, newtn,
++ dev->threads_pool_type, &stop);
++ if ((res != 0) || stop)
++ goto out;
++
++ res = scst_alloc_sysfs_work(scst_dev_sysfs_threads_data_store_work_fn,
++ false, &work);
++ if (res != 0)
++ goto out;
++
++ work->dev = dev;
++ work->new_threads_num = newtn;
++ work->new_threads_pool_type = dev->threads_pool_type;
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute dev_threads_num_attr =
++ __ATTR(threads_num, S_IRUGO | S_IWUSR,
++ scst_dev_sysfs_threads_num_show,
++ scst_dev_sysfs_threads_num_store);
++
++static ssize_t scst_dev_sysfs_threads_pool_type_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++
++ if (dev->threads_num == 0) {
++ pos = sprintf(buf, "Async\n");
++ goto out;
++ } else if (dev->threads_num < 0) {
++ pos = sprintf(buf, "Not valid\n");
++ goto out;
++ }
++
++ switch (dev->threads_pool_type) {
++ case SCST_THREADS_POOL_PER_INITIATOR:
++ pos = sprintf(buf, "%s\n%s", SCST_THREADS_POOL_PER_INITIATOR_STR,
++ (dev->threads_pool_type != dev->handler->threads_pool_type) ?
++ SCST_SYSFS_KEY_MARK "\n" : "");
++ break;
++ case SCST_THREADS_POOL_SHARED:
++ pos = sprintf(buf, "%s\n%s", SCST_THREADS_POOL_SHARED_STR,
++ (dev->threads_pool_type != dev->handler->threads_pool_type) ?
++ SCST_SYSFS_KEY_MARK "\n" : "");
++ break;
++ default:
++ pos = sprintf(buf, "Unknown\n");
++ break;
++ }
++
++out:
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t scst_dev_sysfs_threads_pool_type_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_device *dev;
++ enum scst_dev_type_threads_pool_type newtpt;
++ struct scst_sysfs_work_item *work;
++ bool stop;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++
++ newtpt = scst_parse_threads_pool_type(buf, count);
++ if (newtpt == SCST_THREADS_POOL_TYPE_INVALID) {
++ PRINT_ERROR("Illegal threads pool type %s", buf);
++ res = -EINVAL;
++ goto out;
++ }
++
++ TRACE_DBG("buf %s, count %zd, newtpt %d", buf, count, newtpt);
++
++ res = scst_dev_sysfs_check_threads_data(dev, dev->threads_num,
++ newtpt, &stop);
++ if ((res != 0) || stop)
++ goto out;
++
++ res = scst_alloc_sysfs_work(scst_dev_sysfs_threads_data_store_work_fn,
++ false, &work);
++ if (res != 0)
++ goto out;
++
++ work->dev = dev;
++ work->new_threads_num = dev->threads_num;
++ work->new_threads_pool_type = newtpt;
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute dev_threads_pool_type_attr =
++ __ATTR(threads_pool_type, S_IRUGO | S_IWUSR,
++ scst_dev_sysfs_threads_pool_type_show,
++ scst_dev_sysfs_threads_pool_type_store);
++
++static struct attribute *scst_dev_attrs[] = {
++ &dev_type_attr.attr,
++ NULL,
++};
++
++static void scst_sysfs_dev_release(struct kobject *kobj)
++{
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ complete_all(&dev->dev_kobj_release_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++int scst_devt_dev_sysfs_create(struct scst_device *dev)
++{
++ int res = 0;
++ const struct attribute **pattr;
++
++ TRACE_ENTRY();
++
++ if (dev->handler == &scst_null_devtype)
++ goto out;
++
++ res = sysfs_create_link(&dev->dev_kobj,
++ &dev->handler->devt_kobj, "handler");
++ if (res != 0) {
++ PRINT_ERROR("Can't create handler link for dev %s",
++ dev->virt_name);
++ goto out;
++ }
++
++ res = sysfs_create_link(&dev->handler->devt_kobj,
++ &dev->dev_kobj, dev->virt_name);
++ if (res != 0) {
++ PRINT_ERROR("Can't create handler link for dev %s",
++ dev->virt_name);
++ goto out_err;
++ }
++
++ if (dev->handler->threads_num >= 0) {
++ res = sysfs_create_file(&dev->dev_kobj,
++ &dev_threads_num_attr.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add dev attr %s for dev %s",
++ dev_threads_num_attr.attr.name,
++ dev->virt_name);
++ goto out_err;
++ }
++ res = sysfs_create_file(&dev->dev_kobj,
++ &dev_threads_pool_type_attr.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add dev attr %s for dev %s",
++ dev_threads_pool_type_attr.attr.name,
++ dev->virt_name);
++ goto out_err;
++ }
++ }
++
++ pattr = dev->handler->dev_attrs;
++ if (pattr != NULL) {
++ while (*pattr != NULL) {
++ res = sysfs_create_file(&dev->dev_kobj, *pattr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add dev attr %s for dev %s",
++ (*pattr)->name, dev->virt_name);
++ goto out_err;
++ }
++ pattr++;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err:
++ scst_devt_dev_sysfs_del(dev);
++ goto out;
++}
++
++void scst_devt_dev_sysfs_del(struct scst_device *dev)
++{
++ const struct attribute **pattr;
++
++ TRACE_ENTRY();
++
++ if (dev->handler == &scst_null_devtype)
++ goto out;
++
++ pattr = dev->handler->dev_attrs;
++ if (pattr != NULL) {
++ while (*pattr != NULL) {
++ sysfs_remove_file(&dev->dev_kobj, *pattr);
++ pattr++;
++ }
++ }
++
++ sysfs_remove_link(&dev->dev_kobj, "handler");
++ sysfs_remove_link(&dev->handler->devt_kobj, dev->virt_name);
++
++ if (dev->handler->threads_num >= 0) {
++ sysfs_remove_file(&dev->dev_kobj,
++ &dev_threads_num_attr.attr);
++ sysfs_remove_file(&dev->dev_kobj,
++ &dev_threads_pool_type_attr.attr);
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static struct kobj_type scst_dev_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_sysfs_dev_release,
++ .default_attrs = scst_dev_attrs,
++};
++
++/*
++ * Must not be called under scst_mutex, because it can call
++ * scst_dev_sysfs_del()
++ */
++int scst_dev_sysfs_create(struct scst_device *dev)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ init_completion(&dev->dev_kobj_release_cmpl);
++
++ res = kobject_init_and_add(&dev->dev_kobj, &scst_dev_ktype,
++ scst_devices_kobj, dev->virt_name);
++ if (res != 0) {
++ PRINT_ERROR("Can't add device %s to sysfs", dev->virt_name);
++ goto out;
++ }
++
++ dev->dev_exp_kobj = kobject_create_and_add("exported",
++ &dev->dev_kobj);
++ if (dev->dev_exp_kobj == NULL) {
++ PRINT_ERROR("Can't create exported link for device %s",
++ dev->virt_name);
++ res = -ENOMEM;
++ goto out_del;
++ }
++
++ if (dev->scsi_dev != NULL) {
++ res = sysfs_create_link(&dev->dev_kobj,
++ &dev->scsi_dev->sdev_dev.kobj, "scsi_device");
++ if (res != 0) {
++ PRINT_ERROR("Can't create scsi_device link for dev %s",
++ dev->virt_name);
++ goto out_del;
++ }
++ }
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ if (dev->scsi_dev == NULL) {
++ res = sysfs_create_file(&dev->dev_kobj,
++ &dev_dump_prs_attr.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't create attr %s for dev %s",
++ dev_dump_prs_attr.attr.name, dev->virt_name);
++ goto out_del;
++ }
++ }
++#endif
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ scst_dev_sysfs_del(dev);
++ goto out;
++}
++
++/*
++ * Must not be called under scst_mutex, due to possible deadlock with
++ * sysfs ref counting in sysfs works (it is waiting for the last put, but
++ * the last ref counter holder is waiting for scst_mutex)
++ */
++void scst_dev_sysfs_del(struct scst_device *dev)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ kobject_del(dev->dev_exp_kobj);
++ kobject_del(&dev->dev_kobj);
++
++ kobject_put(dev->dev_exp_kobj);
++ kobject_put(&dev->dev_kobj);
++
++ rc = wait_for_completion_timeout(&dev->dev_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for device %s (%d refs)...", dev->virt_name,
++ atomic_read(&dev->dev_kobj.kref.refcount));
++ wait_for_completion(&dev->dev_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs "
++ "entry for device %s", dev->virt_name);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ ** Tgt_dev's directory implementation
++ **/
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++
++static char *scst_io_size_names[] = {
++ "<=8K ",
++ "<=32K ",
++ "<=128K",
++ "<=512K",
++ ">512K "
++};
++
++static ssize_t scst_tgt_dev_latency_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buffer)
++{
++ int res = 0, i;
++ char buf[50];
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ tgt_dev = container_of(kobj, struct scst_tgt_dev, tgt_dev_kobj);
++
++ for (i = 0; i < SCST_LATENCY_STATS_NUM; i++) {
++ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
++ unsigned int processed_cmds_wr;
++ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
++ unsigned int processed_cmds_rd;
++ struct scst_ext_latency_stat *latency_stat;
++
++ latency_stat = &tgt_dev->dev_latency_stat[i];
++ scst_time_wr = latency_stat->scst_time_wr;
++ scst_time_rd = latency_stat->scst_time_rd;
++ tgt_time_wr = latency_stat->tgt_time_wr;
++ tgt_time_rd = latency_stat->tgt_time_rd;
++ dev_time_wr = latency_stat->dev_time_wr;
++ dev_time_rd = latency_stat->dev_time_rd;
++ processed_cmds_wr = latency_stat->processed_cmds_wr;
++ processed_cmds_rd = latency_stat->processed_cmds_rd;
++
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-5s %-9s %-15lu ", "Write", scst_io_size_names[i],
++ (unsigned long)processed_cmds_wr);
++ if (processed_cmds_wr == 0)
++ processed_cmds_wr = 1;
++
++ do_div(scst_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_wr,
++ (unsigned long)scst_time_wr,
++ (unsigned long)latency_stat->max_scst_time_wr,
++ (unsigned long)latency_stat->scst_time_wr);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s", buf);
++
++ do_div(tgt_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_wr,
++ (unsigned long)tgt_time_wr,
++ (unsigned long)latency_stat->max_tgt_time_wr,
++ (unsigned long)latency_stat->tgt_time_wr);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s", buf);
++
++ do_div(dev_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_wr,
++ (unsigned long)dev_time_wr,
++ (unsigned long)latency_stat->max_dev_time_wr,
++ (unsigned long)latency_stat->dev_time_wr);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s\n", buf);
++
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-5s %-9s %-15lu ", "Read", scst_io_size_names[i],
++ (unsigned long)processed_cmds_rd);
++ if (processed_cmds_rd == 0)
++ processed_cmds_rd = 1;
++
++ do_div(scst_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_rd,
++ (unsigned long)scst_time_rd,
++ (unsigned long)latency_stat->max_scst_time_rd,
++ (unsigned long)latency_stat->scst_time_rd);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s", buf);
++
++ do_div(tgt_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_rd,
++ (unsigned long)tgt_time_rd,
++ (unsigned long)latency_stat->max_tgt_time_rd,
++ (unsigned long)latency_stat->tgt_time_rd);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s", buf);
++
++ do_div(dev_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_rd,
++ (unsigned long)dev_time_rd,
++ (unsigned long)latency_stat->max_dev_time_rd,
++ (unsigned long)latency_stat->dev_time_rd);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s\n", buf);
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute tgt_dev_latency_attr =
++ __ATTR(latency, S_IRUGO,
++ scst_tgt_dev_latency_show, NULL);
++
++#endif /* CONFIG_SCST_MEASURE_LATENCY */
++
++static ssize_t scst_tgt_dev_active_commands_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_tgt_dev *tgt_dev;
++
++ tgt_dev = container_of(kobj, struct scst_tgt_dev, tgt_dev_kobj);
++
++ pos = sprintf(buf, "%d\n", atomic_read(&tgt_dev->tgt_dev_cmd_count));
++
++ return pos;
++}
++
++static struct kobj_attribute tgt_dev_active_commands_attr =
++ __ATTR(active_commands, S_IRUGO,
++ scst_tgt_dev_active_commands_show, NULL);
++
++static struct attribute *scst_tgt_dev_attrs[] = {
++ &tgt_dev_active_commands_attr.attr,
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++ &tgt_dev_latency_attr.attr,
++#endif
++ NULL,
++};
++
++static void scst_sysfs_tgt_dev_release(struct kobject *kobj)
++{
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ tgt_dev = container_of(kobj, struct scst_tgt_dev, tgt_dev_kobj);
++ complete_all(&tgt_dev->tgt_dev_kobj_release_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct kobj_type scst_tgt_dev_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_sysfs_tgt_dev_release,
++ .default_attrs = scst_tgt_dev_attrs,
++};
++
++int scst_tgt_dev_sysfs_create(struct scst_tgt_dev *tgt_dev)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ init_completion(&tgt_dev->tgt_dev_kobj_release_cmpl);
++
++ res = kobject_init_and_add(&tgt_dev->tgt_dev_kobj, &scst_tgt_dev_ktype,
++ &tgt_dev->sess->sess_kobj, "lun%lld",
++ (unsigned long long)tgt_dev->lun);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt_dev %lld to sysfs",
++ (unsigned long long)tgt_dev->lun);
++ goto out;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * Called with scst_mutex held.
++ *
++ * !! No sysfs works must use kobject_get() to protect tgt_dev, due to possible
++ * !! deadlock with scst_mutex (it is waiting for the last put, but
++ * !! the last ref counter holder is waiting for scst_mutex)
++ */
++void scst_tgt_dev_sysfs_del(struct scst_tgt_dev *tgt_dev)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ kobject_del(&tgt_dev->tgt_dev_kobj);
++ kobject_put(&tgt_dev->tgt_dev_kobj);
++
++ rc = wait_for_completion_timeout(
++ &tgt_dev->tgt_dev_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for tgt_dev %lld (%d refs)...",
++ (unsigned long long)tgt_dev->lun,
++ atomic_read(&tgt_dev->tgt_dev_kobj.kref.refcount));
++ wait_for_completion(&tgt_dev->tgt_dev_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs entry for "
++ "tgt_dev %lld", (unsigned long long)tgt_dev->lun);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ ** Sessions subdirectory implementation
++ **/
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++
++static ssize_t scst_sess_latency_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buffer)
++{
++ ssize_t res = 0;
++ struct scst_session *sess;
++ int i;
++ char buf[50];
++ uint64_t scst_time, tgt_time, dev_time;
++ unsigned int processed_cmds;
++
++ TRACE_ENTRY();
++
++ sess = container_of(kobj, struct scst_session, sess_kobj);
++
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-15s %-15s %-46s %-46s %-46s\n",
++ "T-L names", "Total commands", "SCST latency",
++ "Target latency", "Dev latency (min/avg/max/all ns)");
++
++ spin_lock_bh(&sess->lat_lock);
++
++ for (i = 0; i < SCST_LATENCY_STATS_NUM ; i++) {
++ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
++ unsigned int processed_cmds_wr;
++ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
++ unsigned int processed_cmds_rd;
++ struct scst_ext_latency_stat *latency_stat;
++
++ latency_stat = &sess->sess_latency_stat[i];
++ scst_time_wr = latency_stat->scst_time_wr;
++ scst_time_rd = latency_stat->scst_time_rd;
++ tgt_time_wr = latency_stat->tgt_time_wr;
++ tgt_time_rd = latency_stat->tgt_time_rd;
++ dev_time_wr = latency_stat->dev_time_wr;
++ dev_time_rd = latency_stat->dev_time_rd;
++ processed_cmds_wr = latency_stat->processed_cmds_wr;
++ processed_cmds_rd = latency_stat->processed_cmds_rd;
++
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-5s %-9s %-15lu ",
++ "Write", scst_io_size_names[i],
++ (unsigned long)processed_cmds_wr);
++ if (processed_cmds_wr == 0)
++ processed_cmds_wr = 1;
++
++ do_div(scst_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_wr,
++ (unsigned long)scst_time_wr,
++ (unsigned long)latency_stat->max_scst_time_wr,
++ (unsigned long)latency_stat->scst_time_wr);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s", buf);
++
++ do_div(tgt_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_wr,
++ (unsigned long)tgt_time_wr,
++ (unsigned long)latency_stat->max_tgt_time_wr,
++ (unsigned long)latency_stat->tgt_time_wr);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s", buf);
++
++ do_div(dev_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_wr,
++ (unsigned long)dev_time_wr,
++ (unsigned long)latency_stat->max_dev_time_wr,
++ (unsigned long)latency_stat->dev_time_wr);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s\n", buf);
++
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-5s %-9s %-15lu ",
++ "Read", scst_io_size_names[i],
++ (unsigned long)processed_cmds_rd);
++ if (processed_cmds_rd == 0)
++ processed_cmds_rd = 1;
++
++ do_div(scst_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_rd,
++ (unsigned long)scst_time_rd,
++ (unsigned long)latency_stat->max_scst_time_rd,
++ (unsigned long)latency_stat->scst_time_rd);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s", buf);
++
++ do_div(tgt_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_rd,
++ (unsigned long)tgt_time_rd,
++ (unsigned long)latency_stat->max_tgt_time_rd,
++ (unsigned long)latency_stat->tgt_time_rd);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s", buf);
++
++ do_div(dev_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_rd,
++ (unsigned long)dev_time_rd,
++ (unsigned long)latency_stat->max_dev_time_rd,
++ (unsigned long)latency_stat->dev_time_rd);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s\n", buf);
++ }
++
++ scst_time = sess->scst_time;
++ tgt_time = sess->tgt_time;
++ dev_time = sess->dev_time;
++ processed_cmds = sess->processed_cmds;
++
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "\n%-15s %-16d", "Overall ", processed_cmds);
++
++ if (processed_cmds == 0)
++ processed_cmds = 1;
++
++ do_div(scst_time, processed_cmds);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)sess->min_scst_time,
++ (unsigned long)scst_time,
++ (unsigned long)sess->max_scst_time,
++ (unsigned long)sess->scst_time);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s", buf);
++
++ do_div(tgt_time, processed_cmds);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)sess->min_tgt_time,
++ (unsigned long)tgt_time,
++ (unsigned long)sess->max_tgt_time,
++ (unsigned long)sess->tgt_time);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s", buf);
++
++ do_div(dev_time, processed_cmds);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)sess->min_dev_time,
++ (unsigned long)dev_time,
++ (unsigned long)sess->max_dev_time,
++ (unsigned long)sess->dev_time);
++ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
++ "%-47s\n\n", buf);
++
++ spin_unlock_bh(&sess->lat_lock);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_sess_zero_latency(struct scst_sysfs_work_item *work)
++{
++ int res = 0, t;
++ struct scst_session *sess = work->sess;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_put;
++ }
++
++ PRINT_INFO("Zeroing latency statistics for initiator "
++ "%s", sess->initiator_name);
++
++ spin_lock_bh(&sess->lat_lock);
++
++ sess->scst_time = 0;
++ sess->tgt_time = 0;
++ sess->dev_time = 0;
++ sess->min_scst_time = 0;
++ sess->min_tgt_time = 0;
++ sess->min_dev_time = 0;
++ sess->max_scst_time = 0;
++ sess->max_tgt_time = 0;
++ sess->max_dev_time = 0;
++ sess->processed_cmds = 0;
++ memset(sess->sess_latency_stat, 0,
++ sizeof(sess->sess_latency_stat));
++
++ for (t = TGT_DEV_HASH_SIZE-1; t >= 0; t--) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[t];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ tgt_dev->scst_time = 0;
++ tgt_dev->tgt_time = 0;
++ tgt_dev->dev_time = 0;
++ tgt_dev->processed_cmds = 0;
++ memset(tgt_dev->dev_latency_stat, 0,
++ sizeof(tgt_dev->dev_latency_stat));
++ }
++ }
++
++ spin_unlock_bh(&sess->lat_lock);
++
++ mutex_unlock(&scst_mutex);
++
++out_put:
++ kobject_put(&sess->sess_kobj);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_sess_latency_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_session *sess;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ sess = container_of(kobj, struct scst_session, sess_kobj);
++
++ res = scst_alloc_sysfs_work(scst_sess_zero_latency, false, &work);
++ if (res != 0)
++ goto out;
++
++ work->sess = sess;
++
++ kobject_get(&sess->sess_kobj);
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute session_latency_attr =
++ __ATTR(latency, S_IRUGO | S_IWUSR, scst_sess_latency_show,
++ scst_sess_latency_store);
++
++#endif /* CONFIG_SCST_MEASURE_LATENCY */
++
++static ssize_t scst_sess_sysfs_commands_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_session *sess;
++
++ sess = container_of(kobj, struct scst_session, sess_kobj);
++
++ return sprintf(buf, "%i\n", atomic_read(&sess->sess_cmd_count));
++}
++
++static struct kobj_attribute session_commands_attr =
++ __ATTR(commands, S_IRUGO, scst_sess_sysfs_commands_show, NULL);
++
++static int scst_sysfs_sess_get_active_commands(struct scst_session *sess)
++{
++ int res;
++ int active_cmds = 0, t;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_put;
++ }
++
++ for (t = TGT_DEV_HASH_SIZE-1; t >= 0; t--) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[t];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ active_cmds += atomic_read(&tgt_dev->tgt_dev_cmd_count);
++ }
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ res = active_cmds;
++
++out_put:
++ kobject_put(&sess->sess_kobj);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_sysfs_sess_get_active_commands_work_fn(struct scst_sysfs_work_item *work)
++{
++ return scst_sysfs_sess_get_active_commands(work->sess);
++}
++
++static ssize_t scst_sess_sysfs_active_commands_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int res;
++ struct scst_session *sess;
++ struct scst_sysfs_work_item *work;
++
++ sess = container_of(kobj, struct scst_session, sess_kobj);
++
++ res = scst_alloc_sysfs_work(scst_sysfs_sess_get_active_commands_work_fn,
++ true, &work);
++ if (res != 0)
++ goto out;
++
++ work->sess = sess;
++
++ kobject_get(&sess->sess_kobj);
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res != -EAGAIN)
++ res = sprintf(buf, "%i\n", res);
++
++out:
++ return res;
++}
++
++static struct kobj_attribute session_active_commands_attr =
++ __ATTR(active_commands, S_IRUGO, scst_sess_sysfs_active_commands_show,
++ NULL);
++
++static ssize_t scst_sess_sysfs_initiator_name_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_session *sess;
++
++ sess = container_of(kobj, struct scst_session, sess_kobj);
++
++ return scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, "%s\n",
++ sess->initiator_name);
++}
++
++static struct kobj_attribute session_initiator_name_attr =
++ __ATTR(initiator_name, S_IRUGO, scst_sess_sysfs_initiator_name_show, NULL);
++
++static struct attribute *scst_session_attrs[] = {
++ &session_commands_attr.attr,
++ &session_active_commands_attr.attr,
++ &session_initiator_name_attr.attr,
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++ &session_latency_attr.attr,
++#endif /* CONFIG_SCST_MEASURE_LATENCY */
++ NULL,
++};
++
++static void scst_sysfs_session_release(struct kobject *kobj)
++{
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ sess = container_of(kobj, struct scst_session, sess_kobj);
++ complete_all(&sess->sess_kobj_release_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct kobj_type scst_session_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_sysfs_session_release,
++ .default_attrs = scst_session_attrs,
++};
++
++static int scst_create_sess_luns_link(struct scst_session *sess)
++{
++ int res;
++
++ /*
++ * No locks are needed, because sess supposed to be in acg->acg_sess_list
++ * and tgt->sess_list, so blocking them from disappearing.
++ */
++
++ if (sess->acg == sess->tgt->default_acg)
++ res = sysfs_create_link(&sess->sess_kobj,
++ sess->tgt->tgt_luns_kobj, "luns");
++ else
++ res = sysfs_create_link(&sess->sess_kobj,
++ sess->acg->luns_kobj, "luns");
++
++ if (res != 0)
++ PRINT_ERROR("Can't create luns link for initiator %s",
++ sess->initiator_name);
++
++ return res;
++}
++
++int scst_recreate_sess_luns_link(struct scst_session *sess)
++{
++ sysfs_remove_link(&sess->sess_kobj, "luns");
++ return scst_create_sess_luns_link(sess);
++}
++
++/* Supposed to be called under scst_mutex */
++int scst_sess_sysfs_create(struct scst_session *sess)
++{
++ int res = 0;
++ struct scst_session *s;
++ const struct attribute **pattr;
++ char *name = (char *)sess->initiator_name;
++ int len = strlen(name) + 1, n = 1;
++
++ TRACE_ENTRY();
++
++restart:
++ list_for_each_entry(s, &sess->tgt->sess_list, sess_list_entry) {
++ if (!s->sess_kobj_ready)
++ continue;
++
++ if (strcmp(name, kobject_name(&s->sess_kobj)) == 0) {
++ if (s == sess)
++ continue;
++
++ TRACE_DBG("Dublicated session from the same initiator "
++ "%s found", name);
++
++ if (name == sess->initiator_name) {
++ len = strlen(sess->initiator_name);
++ len += 20;
++ name = kmalloc(len, GFP_KERNEL);
++ if (name == NULL) {
++ PRINT_ERROR("Unable to allocate a "
++ "replacement name (size %d)",
++ len);
++ }
++ }
++
++ snprintf(name, len, "%s_%d", sess->initiator_name, n);
++ n++;
++ goto restart;
++ }
++ }
++
++ init_completion(&sess->sess_kobj_release_cmpl);
++
++ TRACE_DBG("Adding session %s to sysfs", name);
++
++ res = kobject_init_and_add(&sess->sess_kobj, &scst_session_ktype,
++ sess->tgt->tgt_sess_kobj, name);
++ if (res != 0) {
++ PRINT_ERROR("Can't add session %s to sysfs", name);
++ goto out_free;
++ }
++
++ sess->sess_kobj_ready = 1;
++
++ pattr = sess->tgt->tgtt->sess_attrs;
++ if (pattr != NULL) {
++ while (*pattr != NULL) {
++ res = sysfs_create_file(&sess->sess_kobj, *pattr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add sess attr %s for sess "
++ "for initiator %s", (*pattr)->name,
++ name);
++ goto out_free;
++ }
++ pattr++;
++ }
++ }
++
++ res = scst_create_sess_luns_link(sess);
++
++out_free:
++ if (name != sess->initiator_name)
++ kfree(name);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * Must not be called under scst_mutex, due to possible deadlock with
++ * sysfs ref counting in sysfs works (it is waiting for the last put, but
++ * the last ref counter holder is waiting for scst_mutex)
++ */
++void scst_sess_sysfs_del(struct scst_session *sess)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ if (!sess->sess_kobj_ready)
++ goto out;
++
++ TRACE_DBG("Deleting session %s from sysfs",
++ kobject_name(&sess->sess_kobj));
++
++ kobject_del(&sess->sess_kobj);
++ kobject_put(&sess->sess_kobj);
++
++ rc = wait_for_completion_timeout(&sess->sess_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for session from %s (%d refs)...", sess->initiator_name,
++ atomic_read(&sess->sess_kobj.kref.refcount));
++ wait_for_completion(&sess->sess_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs "
++ "entry for session %s", sess->initiator_name);
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ ** Target luns directory implementation
++ **/
++
++static void scst_acg_dev_release(struct kobject *kobj)
++{
++ struct scst_acg_dev *acg_dev;
++
++ TRACE_ENTRY();
++
++ acg_dev = container_of(kobj, struct scst_acg_dev, acg_dev_kobj);
++ complete_all(&acg_dev->acg_dev_kobj_release_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++static ssize_t scst_lun_rd_only_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
++{
++ struct scst_acg_dev *acg_dev;
++
++ acg_dev = container_of(kobj, struct scst_acg_dev, acg_dev_kobj);
++
++ if (acg_dev->rd_only || acg_dev->dev->rd_only)
++ return sprintf(buf, "%d\n%s\n", 1, SCST_SYSFS_KEY_MARK);
++ else
++ return sprintf(buf, "%d\n", 0);
++}
++
++static struct kobj_attribute lun_options_attr =
++ __ATTR(read_only, S_IRUGO, scst_lun_rd_only_show, NULL);
++
++static struct attribute *lun_attrs[] = {
++ &lun_options_attr.attr,
++ NULL,
++};
++
++static struct kobj_type acg_dev_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_acg_dev_release,
++ .default_attrs = lun_attrs,
++};
++
++/*
++ * Called with scst_mutex held.
++ *
++ * !! No sysfs works must use kobject_get() to protect acg_dev, due to possible
++ * !! deadlock with scst_mutex (it is waiting for the last put, but
++ * !! the last ref counter holder is waiting for scst_mutex)
++ */
++void scst_acg_dev_sysfs_del(struct scst_acg_dev *acg_dev)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ if (acg_dev->dev != NULL) {
++ sysfs_remove_link(acg_dev->dev->dev_exp_kobj,
++ acg_dev->acg_dev_link_name);
++ kobject_put(&acg_dev->dev->dev_kobj);
++ }
++
++ kobject_del(&acg_dev->acg_dev_kobj);
++ kobject_put(&acg_dev->acg_dev_kobj);
++
++ rc = wait_for_completion_timeout(&acg_dev->acg_dev_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for acg_dev %p (%d refs)...", acg_dev,
++ atomic_read(&acg_dev->acg_dev_kobj.kref.refcount));
++ wait_for_completion(&acg_dev->acg_dev_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs "
++ "entry for acg_dev %p", acg_dev);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++int scst_acg_dev_sysfs_create(struct scst_acg_dev *acg_dev,
++ struct kobject *parent)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ init_completion(&acg_dev->acg_dev_kobj_release_cmpl);
++
++ res = kobject_init_and_add(&acg_dev->acg_dev_kobj, &acg_dev_ktype,
++ parent, "%u", acg_dev->lun);
++ if (res != 0) {
++ PRINT_ERROR("Can't add acg_dev %p to sysfs", acg_dev);
++ goto out;
++ }
++
++ kobject_get(&acg_dev->dev->dev_kobj);
++
++ snprintf(acg_dev->acg_dev_link_name, sizeof(acg_dev->acg_dev_link_name),
++ "export%u", acg_dev->dev->dev_exported_lun_num++);
++
++ res = sysfs_create_link(acg_dev->dev->dev_exp_kobj,
++ &acg_dev->acg_dev_kobj, acg_dev->acg_dev_link_name);
++ if (res != 0) {
++ PRINT_ERROR("Can't create acg %s LUN link",
++ acg_dev->acg->acg_name);
++ goto out_del;
++ }
++
++ res = sysfs_create_link(&acg_dev->acg_dev_kobj,
++ &acg_dev->dev->dev_kobj, "device");
++ if (res != 0) {
++ PRINT_ERROR("Can't create acg %s device link",
++ acg_dev->acg->acg_name);
++ goto out_del;
++ }
++
++out:
++ return res;
++
++out_del:
++ scst_acg_dev_sysfs_del(acg_dev);
++ goto out;
++}
++
++static int __scst_process_luns_mgmt_store(char *buffer,
++ struct scst_tgt *tgt, struct scst_acg *acg, bool tgt_kobj)
++{
++ int res, read_only = 0, action;
++ char *p, *e = NULL;
++ unsigned int virt_lun;
++ struct scst_acg_dev *acg_dev = NULL, *acg_dev_tmp;
++ struct scst_device *d, *dev = NULL;
++
++#define SCST_LUN_ACTION_ADD 1
++#define SCST_LUN_ACTION_DEL 2
++#define SCST_LUN_ACTION_REPLACE 3
++#define SCST_LUN_ACTION_CLEAR 4
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("buffer %s", buffer);
++
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ if (strncasecmp("add", p, 3) == 0) {
++ p += 3;
++ action = SCST_LUN_ACTION_ADD;
++ } else if (strncasecmp("del", p, 3) == 0) {
++ p += 3;
++ action = SCST_LUN_ACTION_DEL;
++ } else if (!strncasecmp("replace", p, 7)) {
++ p += 7;
++ action = SCST_LUN_ACTION_REPLACE;
++ } else if (!strncasecmp("clear", p, 5)) {
++ p += 5;
++ action = SCST_LUN_ACTION_CLEAR;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_resume;
++ }
++
++ /* Check if tgt and acg not already freed while we were coming here */
++ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
++ goto out_unlock;
++
++ if ((action != SCST_LUN_ACTION_CLEAR) &&
++ (action != SCST_LUN_ACTION_DEL)) {
++ if (!isspace(*p)) {
++ PRINT_ERROR("%s", "Syntax error");
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p; /* save p */
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = '\0';
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (!strcmp(d->virt_name, p)) {
++ dev = d;
++ TRACE_DBG("Device %p (%s) found", dev, p);
++ break;
++ }
++ }
++ if (dev == NULL) {
++ PRINT_ERROR("Device '%s' not found", p);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ }
++
++ switch (action) {
++ case SCST_LUN_ACTION_ADD:
++ case SCST_LUN_ACTION_REPLACE:
++ {
++ bool dev_replaced = false;
++
++ e++;
++ while (isspace(*e) && *e != '\0')
++ e++;
++ virt_lun = simple_strtoul(e, &e, 0);
++
++ while (isspace(*e) && *e != '\0')
++ e++;
++
++ while (1) {
++ char *pp;
++ unsigned long val;
++ char *param = scst_get_next_token_str(&e);
++ if (param == NULL)
++ break;
++
++ p = scst_get_next_lexem(&param);
++ if (*p == '\0') {
++ PRINT_ERROR("Syntax error at %s (device %s)",
++ param, dev->virt_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ pp = scst_get_next_lexem(&param);
++ if (*pp == '\0') {
++ PRINT_ERROR("Parameter %s value missed for device %s",
++ p, dev->virt_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ if (scst_get_next_lexem(&param)[0] != '\0') {
++ PRINT_ERROR("Too many parameter's %s values (device %s)",
++ p, dev->virt_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ res = strict_strtoul(pp, 0, &val);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtoul() for %s failed: %d "
++ "(device %s)", pp, res, dev->virt_name);
++ goto out_unlock;
++ }
++
++ if (!strcasecmp("read_only", p)) {
++ read_only = val;
++ TRACE_DBG("READ ONLY %d", read_only);
++ } else {
++ PRINT_ERROR("Unknown parameter %s (device %s)",
++ p, dev->virt_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ }
++
++ acg_dev = NULL;
++ list_for_each_entry(acg_dev_tmp, &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ if (acg_dev_tmp->lun == virt_lun) {
++ acg_dev = acg_dev_tmp;
++ break;
++ }
++ }
++
++ if (acg_dev != NULL) {
++ if (action == SCST_LUN_ACTION_ADD) {
++ PRINT_ERROR("virt lun %d already exists in "
++ "group %s", virt_lun, acg->acg_name);
++ res = -EEXIST;
++ goto out_unlock;
++ } else {
++ /* Replace */
++ res = scst_acg_del_lun(acg, acg_dev->lun,
++ false);
++ if (res != 0)
++ goto out_unlock;
++
++ dev_replaced = true;
++ }
++ }
++
++ res = scst_acg_add_lun(acg,
++ tgt_kobj ? tgt->tgt_luns_kobj : acg->luns_kobj,
++ dev, virt_lun, read_only, !dev_replaced, NULL);
++ if (res != 0)
++ goto out_unlock;
++
++ if (dev_replaced) {
++ struct scst_tgt_dev *tgt_dev;
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if ((tgt_dev->acg_dev->acg == acg) &&
++ (tgt_dev->lun == virt_lun)) {
++ TRACE_MGMT_DBG("INQUIRY DATA HAS CHANGED"
++ " on tgt_dev %p", tgt_dev);
++ scst_gen_aen_or_ua(tgt_dev,
++ SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
++ }
++ }
++ }
++
++ break;
++ }
++ case SCST_LUN_ACTION_DEL:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ virt_lun = simple_strtoul(p, &p, 0);
++
++ res = scst_acg_del_lun(acg, virt_lun, true);
++ if (res != 0)
++ goto out_unlock;
++ break;
++ case SCST_LUN_ACTION_CLEAR:
++ PRINT_INFO("Removed all devices from group %s",
++ acg->acg_name);
++ list_for_each_entry_safe(acg_dev, acg_dev_tmp,
++ &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ res = scst_acg_del_lun(acg, acg_dev->lun,
++ list_is_last(&acg_dev->acg_dev_list_entry,
++ &acg->acg_dev_list));
++ if (res)
++ goto out_unlock;
++ }
++ break;
++ }
++
++ res = 0;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_resume:
++ scst_resume_activity();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++#undef SCST_LUN_ACTION_ADD
++#undef SCST_LUN_ACTION_DEL
++#undef SCST_LUN_ACTION_REPLACE
++#undef SCST_LUN_ACTION_CLEAR
++}
++
++static int scst_luns_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return __scst_process_luns_mgmt_store(work->buf, work->tgt, work->acg,
++ work->is_tgt_kobj);
++}
++
++static ssize_t __scst_acg_mgmt_store(struct scst_acg *acg,
++ const char *buf, size_t count, bool is_tgt_kobj,
++ int (*sysfs_work_fn)(struct scst_sysfs_work_item *))
++{
++ int res;
++ char *buffer;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ buffer = kzalloc(count+1, GFP_KERNEL);
++ if (buffer == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++ memcpy(buffer, buf, count);
++ buffer[count] = '\0';
++
++ res = scst_alloc_sysfs_work(sysfs_work_fn, false, &work);
++ if (res != 0)
++ goto out_free;
++
++ work->buf = buffer;
++ work->tgt = acg->tgt;
++ work->acg = acg;
++ work->is_tgt_kobj = is_tgt_kobj;
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(buffer);
++ goto out;
++}
++
++static ssize_t __scst_luns_mgmt_store(struct scst_acg *acg,
++ bool tgt_kobj, const char *buf, size_t count)
++{
++ return __scst_acg_mgmt_store(acg, buf, count, tgt_kobj,
++ scst_luns_mgmt_store_work_fn);
++}
++
++static ssize_t scst_luns_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
++{
++ static char *help = "Usage: echo \"add|del H:C:I:L lun [parameters]\" >mgmt\n"
++ " echo \"add VNAME lun [parameters]\" >mgmt\n"
++ " echo \"del lun\" >mgmt\n"
++ " echo \"replace H:C:I:L lun [parameters]\" >mgmt\n"
++ " echo \"replace VNAME lun [parameters]\" >mgmt\n"
++ " echo \"clear\" >mgmt\n"
++ "\n"
++ "where parameters are one or more "
++ "param_name=value pairs separated by ';'\n"
++ "\nThe following parameters available: read_only.";
++
++ return sprintf(buf, "%s", help);
++}
++
++static ssize_t scst_luns_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int res;
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj->parent, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ res = __scst_luns_mgmt_store(acg, true, buf, count);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t __scst_acg_addr_method_show(struct scst_acg *acg, char *buf)
++{
++ int res;
++
++ switch (acg->addr_method) {
++ case SCST_LUN_ADDR_METHOD_FLAT:
++ res = sprintf(buf, "FLAT\n%s\n", SCST_SYSFS_KEY_MARK);
++ break;
++ case SCST_LUN_ADDR_METHOD_PERIPHERAL:
++ res = sprintf(buf, "PERIPHERAL\n");
++ break;
++ default:
++ res = sprintf(buf, "UNKNOWN\n");
++ break;
++ }
++
++ return res;
++}
++
++static ssize_t __scst_acg_addr_method_store(struct scst_acg *acg,
++ const char *buf, size_t count)
++{
++ int res = count;
++
++ if (strncasecmp(buf, "FLAT", min_t(int, 4, count)) == 0)
++ acg->addr_method = SCST_LUN_ADDR_METHOD_FLAT;
++ else if (strncasecmp(buf, "PERIPHERAL", min_t(int, 10, count)) == 0)
++ acg->addr_method = SCST_LUN_ADDR_METHOD_PERIPHERAL;
++ else {
++ PRINT_ERROR("Unknown address method %s", buf);
++ res = -EINVAL;
++ }
++
++ TRACE_DBG("acg %p, addr_method %d", acg, acg->addr_method);
++
++ return res;
++}
++
++static ssize_t scst_tgt_addr_method_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ return __scst_acg_addr_method_show(acg, buf);
++}
++
++static ssize_t scst_tgt_addr_method_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ res = __scst_acg_addr_method_store(acg, buf, count);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t __scst_acg_io_grouping_type_show(struct scst_acg *acg, char *buf)
++{
++ int res;
++
++ switch (acg->acg_io_grouping_type) {
++ case SCST_IO_GROUPING_AUTO:
++ res = sprintf(buf, "%s\n", SCST_IO_GROUPING_AUTO_STR);
++ break;
++ case SCST_IO_GROUPING_THIS_GROUP_ONLY:
++ res = sprintf(buf, "%s\n%s\n",
++ SCST_IO_GROUPING_THIS_GROUP_ONLY_STR,
++ SCST_SYSFS_KEY_MARK);
++ break;
++ case SCST_IO_GROUPING_NEVER:
++ res = sprintf(buf, "%s\n%s\n", SCST_IO_GROUPING_NEVER_STR,
++ SCST_SYSFS_KEY_MARK);
++ break;
++ default:
++ res = sprintf(buf, "%d\n%s\n", acg->acg_io_grouping_type,
++ SCST_SYSFS_KEY_MARK);
++ break;
++ }
++
++ return res;
++}
++
++static int __scst_acg_process_io_grouping_type_store(struct scst_tgt *tgt,
++ struct scst_acg *acg, int io_grouping_type)
++{
++ int res = 0;
++ struct scst_acg_dev *acg_dev;
++
++ TRACE_DBG("tgt %p, acg %p, io_grouping_type %d", tgt, acg,
++ io_grouping_type);
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_resume;
++ }
++
++ /* Check if tgt and acg not already freed while we were coming here */
++ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
++ goto out_unlock;
++
++ acg->acg_io_grouping_type = io_grouping_type;
++
++ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
++ int rc;
++
++ scst_stop_dev_threads(acg_dev->dev);
++
++ rc = scst_create_dev_threads(acg_dev->dev);
++ if (rc != 0)
++ res = rc;
++ }
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_resume:
++ scst_resume_activity();
++
++out:
++ return res;
++}
++
++static int __scst_acg_io_grouping_type_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return __scst_acg_process_io_grouping_type_store(work->tgt, work->acg,
++ work->io_grouping_type);
++}
++
++static ssize_t __scst_acg_io_grouping_type_store(struct scst_acg *acg,
++ const char *buf, size_t count)
++{
++ int res = 0;
++ int prev = acg->acg_io_grouping_type;
++ long io_grouping_type;
++ struct scst_sysfs_work_item *work;
++
++ if (strncasecmp(buf, SCST_IO_GROUPING_AUTO_STR,
++ min_t(int, strlen(SCST_IO_GROUPING_AUTO_STR), count)) == 0)
++ io_grouping_type = SCST_IO_GROUPING_AUTO;
++ else if (strncasecmp(buf, SCST_IO_GROUPING_THIS_GROUP_ONLY_STR,
++ min_t(int, strlen(SCST_IO_GROUPING_THIS_GROUP_ONLY_STR), count)) == 0)
++ io_grouping_type = SCST_IO_GROUPING_THIS_GROUP_ONLY;
++ else if (strncasecmp(buf, SCST_IO_GROUPING_NEVER_STR,
++ min_t(int, strlen(SCST_IO_GROUPING_NEVER_STR), count)) == 0)
++ io_grouping_type = SCST_IO_GROUPING_NEVER;
++ else {
++ res = strict_strtol(buf, 0, &io_grouping_type);
++ if ((res != 0) || (io_grouping_type <= 0)) {
++ PRINT_ERROR("Unknown or not allowed I/O grouping type "
++ "%s", buf);
++ res = -EINVAL;
++ goto out;
++ }
++ }
++
++ if (prev == io_grouping_type)
++ goto out;
++
++ res = scst_alloc_sysfs_work(__scst_acg_io_grouping_type_store_work_fn,
++ false, &work);
++ if (res != 0)
++ goto out;
++
++ work->tgt = acg->tgt;
++ work->acg = acg;
++ work->io_grouping_type = io_grouping_type;
++
++ res = scst_sysfs_queue_wait_work(work);
++
++out:
++ return res;
++}
++
++static ssize_t scst_tgt_io_grouping_type_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ return __scst_acg_io_grouping_type_show(acg, buf);
++}
++
++static ssize_t scst_tgt_io_grouping_type_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ res = __scst_acg_io_grouping_type_store(acg, buf, count);
++ if (res != 0)
++ goto out;
++
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * Called with scst_mutex held.
++ *
++ * !! No sysfs works must use kobject_get() to protect acg, due to possible
++ * !! deadlock with scst_mutex (it is waiting for the last put, but
++ * !! the last ref counter holder is waiting for scst_mutex)
++ */
++void scst_acg_sysfs_del(struct scst_acg *acg)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ kobject_del(acg->luns_kobj);
++ kobject_del(acg->initiators_kobj);
++ kobject_del(&acg->acg_kobj);
++
++ kobject_put(acg->luns_kobj);
++ kobject_put(acg->initiators_kobj);
++ kobject_put(&acg->acg_kobj);
++
++ rc = wait_for_completion_timeout(&acg->acg_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for acg %s (%d refs)...", acg->acg_name,
++ atomic_read(&acg->acg_kobj.kref.refcount));
++ wait_for_completion(&acg->acg_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs "
++ "entry for acg %s", acg->acg_name);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++int scst_acg_sysfs_create(struct scst_tgt *tgt,
++ struct scst_acg *acg)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ init_completion(&acg->acg_kobj_release_cmpl);
++
++ res = kobject_init_and_add(&acg->acg_kobj, &acg_ktype,
++ tgt->tgt_ini_grp_kobj, acg->acg_name);
++ if (res != 0) {
++ PRINT_ERROR("Can't add acg '%s' to sysfs", acg->acg_name);
++ goto out;
++ }
++
++ acg->luns_kobj = kobject_create_and_add("luns", &acg->acg_kobj);
++ if (acg->luns_kobj == NULL) {
++ PRINT_ERROR("Can't create luns kobj for tgt %s",
++ tgt->tgt_name);
++ res = -ENOMEM;
++ goto out_del;
++ }
++
++ res = sysfs_create_file(acg->luns_kobj, &scst_acg_luns_mgmt.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
++ scst_acg_luns_mgmt.attr.name, tgt->tgt_name);
++ goto out_del;
++ }
++
++ acg->initiators_kobj = kobject_create_and_add("initiators",
++ &acg->acg_kobj);
++ if (acg->initiators_kobj == NULL) {
++ PRINT_ERROR("Can't create initiators kobj for tgt %s",
++ tgt->tgt_name);
++ res = -ENOMEM;
++ goto out_del;
++ }
++
++ res = sysfs_create_file(acg->initiators_kobj,
++ &scst_acg_ini_mgmt.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
++ scst_acg_ini_mgmt.attr.name, tgt->tgt_name);
++ goto out_del;
++ }
++
++ res = sysfs_create_file(&acg->acg_kobj, &scst_acg_addr_method.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
++ scst_acg_addr_method.attr.name, tgt->tgt_name);
++ goto out_del;
++ }
++
++ res = sysfs_create_file(&acg->acg_kobj, &scst_acg_io_grouping_type.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
++ scst_acg_io_grouping_type.attr.name, tgt->tgt_name);
++ goto out_del;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ scst_acg_sysfs_del(acg);
++ goto out;
++}
++
++static ssize_t scst_acg_addr_method_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_acg *acg;
++
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
++
++ return __scst_acg_addr_method_show(acg, buf);
++}
++
++static ssize_t scst_acg_addr_method_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_acg *acg;
++
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
++
++ res = __scst_acg_addr_method_store(acg, buf, count);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_acg_io_grouping_type_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_acg *acg;
++
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
++
++ return __scst_acg_io_grouping_type_show(acg, buf);
++}
++
++static ssize_t scst_acg_io_grouping_type_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_acg *acg;
++
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
++
++ res = __scst_acg_io_grouping_type_store(acg, buf, count);
++ if (res != 0)
++ goto out;
++
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_ini_group_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ static char *help = "Usage: echo \"create GROUP_NAME\" >mgmt\n"
++ " echo \"del GROUP_NAME\" >mgmt\n";
++
++ return sprintf(buf, "%s", help);
++}
++
++static int scst_process_ini_group_mgmt_store(char *buffer,
++ struct scst_tgt *tgt)
++{
++ int res, action;
++ int len;
++ char *name;
++ char *p, *e = NULL;
++ struct scst_acg *a, *acg = NULL;
++
++#define SCST_INI_GROUP_ACTION_CREATE 1
++#define SCST_INI_GROUP_ACTION_DEL 2
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("tgt %p, buffer %s", tgt, buffer);
++
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ if (strncasecmp("create ", p, 7) == 0) {
++ p += 7;
++ action = SCST_INI_GROUP_ACTION_CREATE;
++ } else if (strncasecmp("del ", p, 4) == 0) {
++ p += 4;
++ action = SCST_INI_GROUP_ACTION_DEL;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_resume;
++ }
++
++ /* Check if our pointer is still alive */
++ if (scst_check_tgt_acg_ptrs(tgt, NULL) != 0)
++ goto out_unlock;
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = '\0';
++
++ if (p[0] == '\0') {
++ PRINT_ERROR("%s", "Group name required");
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ list_for_each_entry(a, &tgt->tgt_acg_list, acg_list_entry) {
++ if (strcmp(a->acg_name, p) == 0) {
++ TRACE_DBG("group (acg) %p %s found",
++ a, a->acg_name);
++ acg = a;
++ break;
++ }
++ }
++
++ switch (action) {
++ case SCST_INI_GROUP_ACTION_CREATE:
++ TRACE_DBG("Creating group '%s'", p);
++ if (acg != NULL) {
++ PRINT_ERROR("acg name %s exist", p);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ len = strlen(p) + 1;
++ name = kmalloc(len, GFP_KERNEL);
++ if (name == NULL) {
++ PRINT_ERROR("%s", "Allocation of name failed");
++ res = -ENOMEM;
++ goto out_unlock;
++ }
++ strlcpy(name, p, len);
++
++ acg = scst_alloc_add_acg(tgt, name, true);
++ kfree(name);
++ if (acg == NULL)
++ goto out_unlock;
++ break;
++ case SCST_INI_GROUP_ACTION_DEL:
++ TRACE_DBG("Deleting group '%s'", p);
++ if (acg == NULL) {
++ PRINT_ERROR("Group %s not found", p);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ if (!scst_acg_sess_is_empty(acg)) {
++ PRINT_ERROR("Group %s is not empty", acg->acg_name);
++ res = -EBUSY;
++ goto out_unlock;
++ }
++ scst_del_free_acg(acg);
++ break;
++ }
++
++ res = 0;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_resume:
++ scst_resume_activity();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++#undef SCST_LUN_ACTION_CREATE
++#undef SCST_LUN_ACTION_DEL
++}
++
++static int scst_ini_group_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return scst_process_ini_group_mgmt_store(work->buf, work->tgt);
++}
++
++static ssize_t scst_ini_group_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ char *buffer;
++ struct scst_tgt *tgt;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ tgt = container_of(kobj->parent, struct scst_tgt, tgt_kobj);
++
++ buffer = kzalloc(count+1, GFP_KERNEL);
++ if (buffer == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++ memcpy(buffer, buf, count);
++ buffer[count] = '\0';
++
++ res = scst_alloc_sysfs_work(scst_ini_group_mgmt_store_work_fn, false,
++ &work);
++ if (res != 0)
++ goto out_free;
++
++ work->buf = buffer;
++ work->tgt = tgt;
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(buffer);
++ goto out;
++}
++
++static ssize_t scst_rel_tgt_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_tgt *tgt;
++ int res;
++
++ TRACE_ENTRY();
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++
++ res = sprintf(buf, "%d\n%s", tgt->rel_tgt_id,
++ (tgt->rel_tgt_id != 0) ? SCST_SYSFS_KEY_MARK "\n" : "");
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_process_rel_tgt_id_store(struct scst_sysfs_work_item *work)
++{
++ int res = 0;
++ struct scst_tgt *tgt = work->tgt;
++ unsigned long rel_tgt_id = work->l;
++
++ TRACE_ENTRY();
++
++ /* tgt protected by kobject_get() */
++
++ TRACE_DBG("Trying to set relative target port id %d",
++ (uint16_t)rel_tgt_id);
++
++ if (tgt->tgtt->is_target_enabled(tgt) &&
++ rel_tgt_id != tgt->rel_tgt_id) {
++ if (!scst_is_relative_target_port_id_unique(rel_tgt_id, tgt)) {
++ PRINT_ERROR("Relative port id %d is not unique",
++ (uint16_t)rel_tgt_id);
++ res = -EBADSLT;
++ goto out_put;
++ }
++ }
++
++ if (rel_tgt_id < SCST_MIN_REL_TGT_ID ||
++ rel_tgt_id > SCST_MAX_REL_TGT_ID) {
++ if ((rel_tgt_id == 0) && !tgt->tgtt->is_target_enabled(tgt))
++ goto set;
++
++ PRINT_ERROR("Invalid relative port id %d",
++ (uint16_t)rel_tgt_id);
++ res = -EINVAL;
++ goto out_put;
++ }
++
++set:
++ tgt->rel_tgt_id = (uint16_t)rel_tgt_id;
++
++out_put:
++ kobject_put(&tgt->tgt_kobj);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_rel_tgt_id_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res = 0;
++ struct scst_tgt *tgt;
++ unsigned long rel_tgt_id;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ if (buf == NULL)
++ goto out;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++
++ res = strict_strtoul(buf, 0, &rel_tgt_id);
++ if (res != 0) {
++ PRINT_ERROR("%s", "Wrong rel_tgt_id");
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_alloc_sysfs_work(scst_process_rel_tgt_id_store, false,
++ &work);
++ if (res != 0)
++ goto out;
++
++ work->tgt = tgt;
++ work->l = rel_tgt_id;
++
++ kobject_get(&tgt->tgt_kobj);
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++int scst_acn_sysfs_create(struct scst_acn *acn)
++{
++ int res = 0;
++ int len;
++ struct scst_acg *acg = acn->acg;
++ struct kobj_attribute *attr = NULL;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ static struct lock_class_key __key;
++#endif
++
++ TRACE_ENTRY();
++
++ acn->acn_attr = NULL;
++
++ attr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL);
++ if (attr == NULL) {
++ PRINT_ERROR("Unable to allocate attributes for initiator '%s'",
++ acn->name);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ len = strlen(acn->name) + 1;
++ attr->attr.name = kzalloc(len, GFP_KERNEL);
++ if (attr->attr.name == NULL) {
++ PRINT_ERROR("Unable to allocate attributes for initiator '%s'",
++ acn->name);
++ res = -ENOMEM;
++ goto out_free;
++ }
++ strlcpy((char *)attr->attr.name, acn->name, len);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ attr->attr.key = &__key;
++#endif
++
++ attr->attr.mode = S_IRUGO;
++ attr->show = scst_acn_file_show;
++ attr->store = NULL;
++
++ res = sysfs_create_file(acg->initiators_kobj, &attr->attr);
++ if (res != 0) {
++ PRINT_ERROR("Unable to create acn '%s' for group '%s'",
++ acn->name, acg->acg_name);
++ kfree(attr->attr.name);
++ goto out_free;
++ }
++
++ acn->acn_attr = attr;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(attr);
++ goto out;
++}
++
++void scst_acn_sysfs_del(struct scst_acn *acn)
++{
++ struct scst_acg *acg = acn->acg;
++
++ TRACE_ENTRY();
++
++ if (acn->acn_attr != NULL) {
++ sysfs_remove_file(acg->initiators_kobj,
++ &acn->acn_attr->attr);
++ kfree(acn->acn_attr->attr.name);
++ kfree(acn->acn_attr);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static ssize_t scst_acn_file_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, "%s\n",
++ attr->attr.name);
++}
++
++static ssize_t scst_acg_luns_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int res;
++ struct scst_acg *acg;
++
++ acg = container_of(kobj->parent, struct scst_acg, acg_kobj);
++ res = __scst_luns_mgmt_store(acg, false, buf, count);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_acg_ini_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ static char *help = "Usage: echo \"add INITIATOR_NAME\" "
++ ">mgmt\n"
++ " echo \"del INITIATOR_NAME\" "
++ ">mgmt\n"
++ " echo \"move INITIATOR_NAME DEST_GROUP_NAME\" "
++ ">mgmt\n"
++ " echo \"clear\" "
++ ">mgmt\n";
++
++ return sprintf(buf, "%s", help);
++}
++
++static int scst_process_acg_ini_mgmt_store(char *buffer,
++ struct scst_tgt *tgt, struct scst_acg *acg)
++{
++ int res, action;
++ char *p, *e = NULL;
++ char *name = NULL, *group = NULL;
++ struct scst_acg *acg_dest = NULL;
++ struct scst_acn *acn = NULL, *acn_tmp;
++
++#define SCST_ACG_ACTION_INI_ADD 1
++#define SCST_ACG_ACTION_INI_DEL 2
++#define SCST_ACG_ACTION_INI_CLEAR 3
++#define SCST_ACG_ACTION_INI_MOVE 4
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("tgt %p, acg %p, buffer %s", tgt, acg, buffer);
++
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++
++ if (strncasecmp("add", p, 3) == 0) {
++ p += 3;
++ action = SCST_ACG_ACTION_INI_ADD;
++ } else if (strncasecmp("del", p, 3) == 0) {
++ p += 3;
++ action = SCST_ACG_ACTION_INI_DEL;
++ } else if (strncasecmp("clear", p, 5) == 0) {
++ p += 5;
++ action = SCST_ACG_ACTION_INI_CLEAR;
++ } else if (strncasecmp("move", p, 4) == 0) {
++ p += 4;
++ action = SCST_ACG_ACTION_INI_MOVE;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (action != SCST_ACG_ACTION_INI_CLEAR)
++ if (!isspace(*p)) {
++ PRINT_ERROR("%s", "Syntax error");
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_resume;
++ }
++
++ /* Check if tgt and acg not already freed while we were coming here */
++ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
++ goto out_unlock;
++
++ if (action != SCST_ACG_ACTION_INI_CLEAR)
++ while (isspace(*p) && *p != '\0')
++ p++;
++
++ switch (action) {
++ case SCST_ACG_ACTION_INI_ADD:
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = '\0';
++ name = p;
++
++ if (name[0] == '\0') {
++ PRINT_ERROR("%s", "Invalid initiator name");
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ res = scst_acg_add_acn(acg, name);
++ if (res != 0)
++ goto out_unlock;
++ break;
++ case SCST_ACG_ACTION_INI_DEL:
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = '\0';
++ name = p;
++
++ if (name[0] == '\0') {
++ PRINT_ERROR("%s", "Invalid initiator name");
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ acn = scst_find_acn(acg, name);
++ if (acn == NULL) {
++ PRINT_ERROR("Unable to find "
++ "initiator '%s' in group '%s'",
++ name, acg->acg_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ scst_del_free_acn(acn, true);
++ break;
++ case SCST_ACG_ACTION_INI_CLEAR:
++ list_for_each_entry_safe(acn, acn_tmp, &acg->acn_list,
++ acn_list_entry) {
++ scst_del_free_acn(acn, false);
++ }
++ scst_check_reassign_sessions();
++ break;
++ case SCST_ACG_ACTION_INI_MOVE:
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ if (*e == '\0') {
++ PRINT_ERROR("%s", "Too few parameters");
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ *e = '\0';
++ name = p;
++
++ if (name[0] == '\0') {
++ PRINT_ERROR("%s", "Invalid initiator name");
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ e++;
++ p = e;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = '\0';
++ group = p;
++
++ if (group[0] == '\0') {
++ PRINT_ERROR("%s", "Invalid group name");
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ TRACE_DBG("Move initiator '%s' to group '%s'",
++ name, group);
++
++ acn = scst_find_acn(acg, name);
++ if (acn == NULL) {
++ PRINT_ERROR("Unable to find "
++ "initiator '%s' in group '%s'",
++ name, acg->acg_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ acg_dest = scst_tgt_find_acg(tgt, group);
++ if (acg_dest == NULL) {
++ PRINT_ERROR("Unable to find group '%s' in target '%s'",
++ group, tgt->tgt_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ if (scst_find_acn(acg_dest, name) != NULL) {
++ PRINT_ERROR("Initiator '%s' already exists in group '%s'",
++ name, acg_dest->acg_name);
++ res = -EEXIST;
++ goto out_unlock;
++ }
++ scst_del_free_acn(acn, false);
++
++ res = scst_acg_add_acn(acg_dest, name);
++ if (res != 0)
++ goto out_unlock;
++ break;
++ }
++
++ res = 0;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_resume:
++ scst_resume_activity();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++#undef SCST_ACG_ACTION_INI_ADD
++#undef SCST_ACG_ACTION_INI_DEL
++#undef SCST_ACG_ACTION_INI_CLEAR
++#undef SCST_ACG_ACTION_INI_MOVE
++}
++
++static int scst_acg_ini_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return scst_process_acg_ini_mgmt_store(work->buf, work->tgt, work->acg);
++}
++
++static ssize_t scst_acg_ini_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ struct scst_acg *acg;
++
++ acg = container_of(kobj->parent, struct scst_acg, acg_kobj);
++
++ return __scst_acg_mgmt_store(acg, buf, count, false,
++ scst_acg_ini_mgmt_store_work_fn);
++}
++
++/**
++ ** SGV directory implementation
++ **/
++
++static struct kobj_attribute sgv_stat_attr =
++ __ATTR(stats, S_IRUGO | S_IWUSR, sgv_sysfs_stat_show,
++ sgv_sysfs_stat_reset);
++
++static struct attribute *sgv_attrs[] = {
++ &sgv_stat_attr.attr,
++ NULL,
++};
++
++static void sgv_kobj_release(struct kobject *kobj)
++{
++ struct sgv_pool *pool;
++
++ TRACE_ENTRY();
++
++ pool = container_of(kobj, struct sgv_pool, sgv_kobj);
++ complete_all(&pool->sgv_kobj_release_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct kobj_type sgv_pool_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = sgv_kobj_release,
++ .default_attrs = sgv_attrs,
++};
++
++int scst_sgv_sysfs_create(struct sgv_pool *pool)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ init_completion(&pool->sgv_kobj_release_cmpl);
++
++ res = kobject_init_and_add(&pool->sgv_kobj, &sgv_pool_ktype,
++ scst_sgv_kobj, pool->name);
++ if (res != 0) {
++ PRINT_ERROR("Can't add sgv pool %s to sysfs", pool->name);
++ goto out;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++void scst_sgv_sysfs_del(struct sgv_pool *pool)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ kobject_del(&pool->sgv_kobj);
++ kobject_put(&pool->sgv_kobj);
++
++ rc = wait_for_completion_timeout(&pool->sgv_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for SGV pool %s (%d refs)...", pool->name,
++ atomic_read(&pool->sgv_kobj.kref.refcount));
++ wait_for_completion(&pool->sgv_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs "
++ "entry for SGV pool %s", pool->name);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct kobj_attribute sgv_global_stat_attr =
++ __ATTR(global_stats, S_IRUGO | S_IWUSR, sgv_sysfs_global_stat_show,
++ sgv_sysfs_global_stat_reset);
++
++static struct attribute *sgv_default_attrs[] = {
++ &sgv_global_stat_attr.attr,
++ NULL,
++};
++
++static void scst_sysfs_release(struct kobject *kobj)
++{
++ kfree(kobj);
++}
++
++static struct kobj_type sgv_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_sysfs_release,
++ .default_attrs = sgv_default_attrs,
++};
++
++/**
++ ** SCST sysfs root directory implementation
++ **/
++
++static ssize_t scst_threads_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int count;
++
++ TRACE_ENTRY();
++
++ count = sprintf(buf, "%d\n%s", scst_main_cmd_threads.nr_threads,
++ (scst_main_cmd_threads.nr_threads != scst_threads) ?
++ SCST_SYSFS_KEY_MARK "\n" : "");
++
++ TRACE_EXIT();
++ return count;
++}
++
++static int scst_process_threads_store(int newtn)
++{
++ int res;
++ long oldtn, delta;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("newtn %d", newtn);
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ oldtn = scst_main_cmd_threads.nr_threads;
++
++ delta = newtn - oldtn;
++ if (delta < 0)
++ scst_del_threads(&scst_main_cmd_threads, -delta);
++ else {
++ res = scst_add_threads(&scst_main_cmd_threads, NULL, NULL, delta);
++ if (res != 0)
++ goto out_up;
++ }
++
++ PRINT_INFO("Changed cmd threads num: old %ld, new %d", oldtn, newtn);
++
++out_up:
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_threads_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return scst_process_threads_store(work->new_threads_num);
++}
++
++static ssize_t scst_threads_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ long newtn;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ res = strict_strtol(buf, 0, &newtn);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtol() for %s failed: %d ", buf, res);
++ goto out;
++ }
++ if (newtn <= 0) {
++ PRINT_ERROR("Illegal threads num value %ld", newtn);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_alloc_sysfs_work(scst_threads_store_work_fn, false, &work);
++ if (res != 0)
++ goto out;
++
++ work->new_threads_num = newtn;
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_setup_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int count;
++
++ TRACE_ENTRY();
++
++ count = sprintf(buf, "0x%x\n%s\n", scst_setup_id,
++ (scst_setup_id == 0) ? "" : SCST_SYSFS_KEY_MARK);
++
++ TRACE_EXIT();
++ return count;
++}
++
++static ssize_t scst_setup_id_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ unsigned long val;
++
++ TRACE_ENTRY();
++
++ res = strict_strtoul(buf, 0, &val);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtoul() for %s failed: %d ", buf, res);
++ goto out;
++ }
++
++ scst_setup_id = val;
++ PRINT_INFO("Changed scst_setup_id to %x", scst_setup_id);
++
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++static void scst_read_trace_tlb(const struct scst_trace_log *tbl, char *buf,
++ unsigned long log_level, int *pos)
++{
++ const struct scst_trace_log *t = tbl;
++
++ if (t == NULL)
++ goto out;
++
++ while (t->token) {
++ if (log_level & t->val) {
++ *pos += sprintf(&buf[*pos], "%s%s",
++ (*pos == 0) ? "" : " | ",
++ t->token);
++ }
++ t++;
++ }
++out:
++ return;
++}
++
++static ssize_t scst_trace_level_show(const struct scst_trace_log *local_tbl,
++ unsigned long log_level, char *buf, const char *help)
++{
++ int pos = 0;
++
++ scst_read_trace_tlb(scst_trace_tbl, buf, log_level, &pos);
++ scst_read_trace_tlb(local_tbl, buf, log_level, &pos);
++
++ pos += sprintf(&buf[pos], "\n\n\nUsage:\n"
++ " echo \"all|none|default\" >trace_level\n"
++ " echo \"value DEC|0xHEX|0OCT\" >trace_level\n"
++ " echo \"add|del TOKEN\" >trace_level\n"
++ "\nwhere TOKEN is one of [debug, function, line, pid,\n"
++#ifndef GENERATING_UPSTREAM_PATCH
++ " entryexit, buff, mem, sg, out_of_mem,\n"
++#else
++ " buff, mem, sg, out_of_mem,\n"
++#endif
++ " special, scsi, mgmt, minor,\n"
++ " mgmt_dbg, scsi_serializing,\n"
++ " retry, recv_bot, send_bot, recv_top, pr,\n"
++ " send_top%s]", help != NULL ? help : "");
++
++ return pos;
++}
++
++static ssize_t scst_main_trace_level_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return scst_trace_level_show(scst_local_trace_tbl, trace_flag,
++ buf, NULL);
++}
++
++static int scst_write_trace(const char *buf, size_t length,
++ unsigned long *log_level, unsigned long default_level,
++ const char *name, const struct scst_trace_log *tbl)
++{
++ int res = length;
++ int action;
++ unsigned long level = 0, oldlevel;
++ char *buffer, *p, *e;
++ const struct scst_trace_log *t;
++
++#define SCST_TRACE_ACTION_ALL 1
++#define SCST_TRACE_ACTION_NONE 2
++#define SCST_TRACE_ACTION_DEFAULT 3
++#define SCST_TRACE_ACTION_ADD 4
++#define SCST_TRACE_ACTION_DEL 5
++#define SCST_TRACE_ACTION_VALUE 6
++
++ TRACE_ENTRY();
++
++ if ((buf == NULL) || (length == 0)) {
++ res = -EINVAL;
++ goto out;
++ }
++
++ buffer = kmalloc(length+1, GFP_KERNEL);
++ if (buffer == NULL) {
++ PRINT_ERROR("Unable to alloc intermediate buffer (size %zd)",
++ length+1);
++ res = -ENOMEM;
++ goto out;
++ }
++ memcpy(buffer, buf, length);
++ buffer[length] = '\0';
++
++ TRACE_DBG("buffer %s", buffer);
++
++ p = buffer;
++ if (!strncasecmp("all", p, 3)) {
++ action = SCST_TRACE_ACTION_ALL;
++ } else if (!strncasecmp("none", p, 4) || !strncasecmp("null", p, 4)) {
++ action = SCST_TRACE_ACTION_NONE;
++ } else if (!strncasecmp("default", p, 7)) {
++ action = SCST_TRACE_ACTION_DEFAULT;
++ } else if (!strncasecmp("add", p, 3)) {
++ p += 3;
++ action = SCST_TRACE_ACTION_ADD;
++ } else if (!strncasecmp("del", p, 3)) {
++ p += 3;
++ action = SCST_TRACE_ACTION_DEL;
++ } else if (!strncasecmp("value", p, 5)) {
++ p += 5;
++ action = SCST_TRACE_ACTION_VALUE;
++ } else {
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ switch (action) {
++ case SCST_TRACE_ACTION_ADD:
++ case SCST_TRACE_ACTION_DEL:
++ case SCST_TRACE_ACTION_VALUE:
++ if (!isspace(*p)) {
++ PRINT_ERROR("%s", "Syntax error");
++ res = -EINVAL;
++ goto out_free;
++ }
++ }
++
++ switch (action) {
++ case SCST_TRACE_ACTION_ALL:
++ level = TRACE_ALL;
++ break;
++ case SCST_TRACE_ACTION_DEFAULT:
++ level = default_level;
++ break;
++ case SCST_TRACE_ACTION_NONE:
++ level = TRACE_NULL;
++ break;
++ case SCST_TRACE_ACTION_ADD:
++ case SCST_TRACE_ACTION_DEL:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = 0;
++ if (tbl) {
++ t = tbl;
++ while (t->token) {
++ if (!strcasecmp(p, t->token)) {
++ level = t->val;
++ break;
++ }
++ t++;
++ }
++ }
++ if (level == 0) {
++ t = scst_trace_tbl;
++ while (t->token) {
++ if (!strcasecmp(p, t->token)) {
++ level = t->val;
++ break;
++ }
++ t++;
++ }
++ }
++ if (level == 0) {
++ PRINT_ERROR("Unknown token \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case SCST_TRACE_ACTION_VALUE:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ res = strict_strtoul(p, 0, &level);
++ if (res != 0) {
++ PRINT_ERROR("Invalid trace value \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++ break;
++ }
++
++ oldlevel = *log_level;
++
++ switch (action) {
++ case SCST_TRACE_ACTION_ADD:
++ *log_level |= level;
++ break;
++ case SCST_TRACE_ACTION_DEL:
++ *log_level &= ~level;
++ break;
++ default:
++ *log_level = level;
++ break;
++ }
++
++ PRINT_INFO("Changed trace level for \"%s\": old 0x%08lx, new 0x%08lx",
++ name, oldlevel, *log_level);
++
++out_free:
++ kfree(buffer);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++#undef SCST_TRACE_ACTION_ALL
++#undef SCST_TRACE_ACTION_NONE
++#undef SCST_TRACE_ACTION_DEFAULT
++#undef SCST_TRACE_ACTION_ADD
++#undef SCST_TRACE_ACTION_DEL
++#undef SCST_TRACE_ACTION_VALUE
++}
++
++static ssize_t scst_main_trace_level_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = scst_write_trace(buf, count, &trace_flag,
++ SCST_DEFAULT_LOG_FLAGS, "scst", scst_local_trace_tbl);
++
++ mutex_unlock(&scst_log_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
++
++static ssize_t scst_version_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
++{
++ TRACE_ENTRY();
++
++ sprintf(buf, "%s\n", SCST_VERSION_STRING);
++
++#ifdef CONFIG_SCST_STRICT_SERIALIZING
++ strcat(buf, "STRICT_SERIALIZING\n");
++#endif
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ strcat(buf, "EXTRACHECKS\n");
++#endif
++
++#ifdef CONFIG_SCST_TRACING
++ strcat(buf, "TRACING\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++ strcat(buf, "DEBUG\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_TM
++ strcat(buf, "DEBUG_TM\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_RETRY
++ strcat(buf, "DEBUG_RETRY\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_OOM
++ strcat(buf, "DEBUG_OOM\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_SN
++ strcat(buf, "DEBUG_SN\n");
++#endif
++
++#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
++ strcat(buf, "USE_EXPECTED_VALUES\n");
++#endif
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ strcat(buf, "TEST_IO_IN_SIRQ\n");
++#endif
++
++#ifdef CONFIG_SCST_STRICT_SECURITY
++ strcat(buf, "STRICT_SECURITY\n");
++#endif
++
++ TRACE_EXIT();
++ return strlen(buf);
++}
++
++static ssize_t scst_last_sysfs_mgmt_res_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ spin_lock(&sysfs_work_lock);
++ TRACE_DBG("active_sysfs_works %d", active_sysfs_works);
++ if (active_sysfs_works > 0)
++ res = -EAGAIN;
++ else
++ res = sprintf(buf, "%d\n", last_sysfs_work_res);
++ spin_unlock(&sysfs_work_lock);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute scst_threads_attr =
++ __ATTR(threads, S_IRUGO | S_IWUSR, scst_threads_show,
++ scst_threads_store);
++
++static struct kobj_attribute scst_setup_id_attr =
++ __ATTR(setup_id, S_IRUGO | S_IWUSR, scst_setup_id_show,
++ scst_setup_id_store);
++
++static ssize_t scst_max_tasklet_cmd_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int count;
++
++ TRACE_ENTRY();
++
++ count = sprintf(buf, "%d\n%s\n", scst_max_tasklet_cmd,
++ (scst_max_tasklet_cmd == SCST_DEF_MAX_TASKLET_CMD)
++ ? "" : SCST_SYSFS_KEY_MARK);
++
++ TRACE_EXIT();
++ return count;
++}
++
++static ssize_t scst_max_tasklet_cmd_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ unsigned long val;
++
++ TRACE_ENTRY();
++
++ res = strict_strtoul(buf, 0, &val);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtoul() for %s failed: %d ", buf, res);
++ goto out;
++ }
++
++ scst_max_tasklet_cmd = val;
++ PRINT_INFO("Changed scst_max_tasklet_cmd to %d", scst_max_tasklet_cmd);
++
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute scst_max_tasklet_cmd_attr =
++ __ATTR(max_tasklet_cmd, S_IRUGO | S_IWUSR, scst_max_tasklet_cmd_show,
++ scst_max_tasklet_cmd_store);
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++static struct kobj_attribute scst_trace_level_attr =
++ __ATTR(trace_level, S_IRUGO | S_IWUSR, scst_main_trace_level_show,
++ scst_main_trace_level_store);
++#endif
++
++static struct kobj_attribute scst_version_attr =
++ __ATTR(version, S_IRUGO, scst_version_show, NULL);
++
++static struct kobj_attribute scst_last_sysfs_mgmt_res_attr =
++ __ATTR(last_sysfs_mgmt_res, S_IRUGO,
++ scst_last_sysfs_mgmt_res_show, NULL);
++
++static struct attribute *scst_sysfs_root_default_attrs[] = {
++ &scst_threads_attr.attr,
++ &scst_setup_id_attr.attr,
++ &scst_max_tasklet_cmd_attr.attr,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ &scst_trace_level_attr.attr,
++#endif
++ &scst_version_attr.attr,
++ &scst_last_sysfs_mgmt_res_attr.attr,
++ NULL,
++};
++
++static void scst_sysfs_root_release(struct kobject *kobj)
++{
++ complete_all(&scst_sysfs_root_release_completion);
++}
++
++static struct kobj_type scst_sysfs_root_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_sysfs_root_release,
++ .default_attrs = scst_sysfs_root_default_attrs,
++};
++
++/**
++ ** Dev handlers
++ **/
++
++static void scst_devt_release(struct kobject *kobj)
++{
++ struct scst_dev_type *devt;
++
++ TRACE_ENTRY();
++
++ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
++ complete_all(&devt->devt_kobj_release_compl);
++
++ TRACE_EXIT();
++ return;
++}
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++static ssize_t scst_devt_trace_level_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_dev_type *devt;
++
++ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
++
++ return scst_trace_level_show(devt->trace_tbl,
++ devt->trace_flags ? *devt->trace_flags : 0, buf,
++ devt->trace_tbl_help);
++}
++
++static ssize_t scst_devt_trace_level_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_dev_type *devt;
++
++ TRACE_ENTRY();
++
++ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
++
++ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = scst_write_trace(buf, count, devt->trace_flags,
++ devt->default_trace_flags, devt->name, devt->trace_tbl);
++
++ mutex_unlock(&scst_log_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute devt_trace_attr =
++ __ATTR(trace_level, S_IRUGO | S_IWUSR,
++ scst_devt_trace_level_show, scst_devt_trace_level_store);
++
++#endif /* #if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
++
++static ssize_t scst_devt_type_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos;
++ struct scst_dev_type *devt;
++
++ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
++
++ pos = sprintf(buf, "%d - %s\n", devt->type,
++ (unsigned)devt->type > ARRAY_SIZE(scst_dev_handler_types) ?
++ "unknown" : scst_dev_handler_types[devt->type]);
++
++ return pos;
++}
++
++static struct kobj_attribute scst_devt_type_attr =
++ __ATTR(type, S_IRUGO, scst_devt_type_show, NULL);
++
++static struct attribute *scst_devt_default_attrs[] = {
++ &scst_devt_type_attr.attr,
++ NULL,
++};
++
++static struct kobj_type scst_devt_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_devt_release,
++ .default_attrs = scst_devt_default_attrs,
++};
++
++static ssize_t scst_devt_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ char *help = "Usage: echo \"add_device device_name [parameters]\" "
++ ">mgmt\n"
++ " echo \"del_device device_name\" >mgmt\n"
++ "%s%s"
++ "%s"
++ "\n"
++ "where parameters are one or more "
++ "param_name=value pairs separated by ';'\n\n"
++ "%s%s%s%s%s%s%s%s\n";
++ struct scst_dev_type *devt;
++
++ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
++
++ return scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, help,
++ (devt->devt_optional_attributes != NULL) ?
++ " echo \"add_attribute <attribute> <value>\" >mgmt\n"
++ " echo \"del_attribute <attribute> <value>\" >mgmt\n" : "",
++ (devt->dev_optional_attributes != NULL) ?
++ " echo \"add_device_attribute device_name <attribute> <value>\" >mgmt"
++ " echo \"del_device_attribute device_name <attribute> <value>\" >mgmt\n" : "",
++ (devt->mgmt_cmd_help) ? devt->mgmt_cmd_help : "",
++ (devt->add_device_parameters != NULL) ?
++ "The following parameters available: " : "",
++ (devt->add_device_parameters != NULL) ?
++ devt->add_device_parameters : "",
++ (devt->devt_optional_attributes != NULL) ?
++ "The following dev handler attributes available: " : "",
++ (devt->devt_optional_attributes != NULL) ?
++ devt->devt_optional_attributes : "",
++ (devt->devt_optional_attributes != NULL) ? "\n" : "",
++ (devt->dev_optional_attributes != NULL) ?
++ "The following device attributes available: " : "",
++ (devt->dev_optional_attributes != NULL) ?
++ devt->dev_optional_attributes : "",
++ (devt->dev_optional_attributes != NULL) ? "\n" : "");
++}
++
++static int scst_process_devt_mgmt_store(char *buffer,
++ struct scst_dev_type *devt)
++{
++ int res = 0;
++ char *p, *pp, *dev_name;
++
++ TRACE_ENTRY();
++
++ /* Check if our pointer is still alive and, if yes, grab it */
++ if (scst_check_grab_devt_ptr(devt, &scst_virtual_dev_type_list) != 0)
++ goto out;
++
++ TRACE_DBG("devt %p, buffer %s", devt, buffer);
++
++ pp = buffer;
++ if (pp[strlen(pp) - 1] == '\n')
++ pp[strlen(pp) - 1] = '\0';
++
++ p = scst_get_next_lexem(&pp);
++
++ if (strcasecmp("add_device", p) == 0) {
++ dev_name = scst_get_next_lexem(&pp);
++ if (*dev_name == '\0') {
++ PRINT_ERROR("%s", "Device name required");
++ res = -EINVAL;
++ goto out_ungrab;
++ }
++ res = devt->add_device(dev_name, pp);
++ } else if (strcasecmp("del_device", p) == 0) {
++ dev_name = scst_get_next_lexem(&pp);
++ if (*dev_name == '\0') {
++ PRINT_ERROR("%s", "Device name required");
++ res = -EINVAL;
++ goto out_ungrab;
++ }
++
++ p = scst_get_next_lexem(&pp);
++ if (*p != '\0')
++ goto out_syntax_err;
++
++ res = devt->del_device(dev_name);
++ } else if (devt->mgmt_cmd != NULL) {
++ scst_restore_token_str(p, pp);
++ res = devt->mgmt_cmd(buffer);
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_ungrab;
++ }
++
++out_ungrab:
++ scst_ungrab_devt_ptr(devt);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_syntax_err:
++ PRINT_ERROR("Syntax error on \"%s\"", p);
++ res = -EINVAL;
++ goto out_ungrab;
++}
++
++static int scst_devt_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return scst_process_devt_mgmt_store(work->buf, work->devt);
++}
++
++static ssize_t __scst_devt_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count,
++ int (*sysfs_work_fn)(struct scst_sysfs_work_item *work))
++{
++ int res;
++ char *buffer;
++ struct scst_dev_type *devt;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
++
++ buffer = kzalloc(count+1, GFP_KERNEL);
++ if (buffer == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++ memcpy(buffer, buf, count);
++ buffer[count] = '\0';
++
++ res = scst_alloc_sysfs_work(sysfs_work_fn, false, &work);
++ if (res != 0)
++ goto out_free;
++
++ work->buf = buffer;
++ work->devt = devt;
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(buffer);
++ goto out;
++}
++
++static ssize_t scst_devt_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ return __scst_devt_mgmt_store(kobj, attr, buf, count,
++ scst_devt_mgmt_store_work_fn);
++}
++
++static struct kobj_attribute scst_devt_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_devt_mgmt_show,
++ scst_devt_mgmt_store);
++
++static ssize_t scst_devt_pass_through_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ char *help = "Usage: echo \"add_device H:C:I:L\" >mgmt\n"
++ " echo \"del_device H:C:I:L\" >mgmt\n";
++ return sprintf(buf, "%s", help);
++}
++
++static int scst_process_devt_pass_through_mgmt_store(char *buffer,
++ struct scst_dev_type *devt)
++{
++ int res = 0;
++ char *p, *pp, *action;
++ unsigned long host, channel, id, lun;
++ struct scst_device *d, *dev = NULL;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("devt %p, buffer %s", devt, buffer);
++
++ pp = buffer;
++ if (pp[strlen(pp) - 1] == '\n')
++ pp[strlen(pp) - 1] = '\0';
++
++ action = scst_get_next_lexem(&pp);
++ p = scst_get_next_lexem(&pp);
++ if (*p == '\0') {
++ PRINT_ERROR("%s", "Device required");
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (*scst_get_next_lexem(&pp) != '\0') {
++ PRINT_ERROR("%s", "Too many parameters");
++ res = -EINVAL;
++ goto out_syntax_err;
++ }
++
++ host = simple_strtoul(p, &p, 0);
++ if ((host == ULONG_MAX) || (*p != ':'))
++ goto out_syntax_err;
++ p++;
++ channel = simple_strtoul(p, &p, 0);
++ if ((channel == ULONG_MAX) || (*p != ':'))
++ goto out_syntax_err;
++ p++;
++ id = simple_strtoul(p, &p, 0);
++ if ((channel == ULONG_MAX) || (*p != ':'))
++ goto out_syntax_err;
++ p++;
++ lun = simple_strtoul(p, &p, 0);
++ if (lun == ULONG_MAX)
++ goto out_syntax_err;
++
++ TRACE_DBG("Dev %ld:%ld:%ld:%ld", host, channel, id, lun);
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ /* Check if devt not be already freed while we were coming here */
++ if (scst_check_devt_ptr(devt, &scst_dev_type_list) != 0)
++ goto out_unlock;
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if ((d->virt_id == 0) &&
++ d->scsi_dev->host->host_no == host &&
++ d->scsi_dev->channel == channel &&
++ d->scsi_dev->id == id &&
++ d->scsi_dev->lun == lun) {
++ dev = d;
++ TRACE_DBG("Dev %p (%ld:%ld:%ld:%ld) found",
++ dev, host, channel, id, lun);
++ break;
++ }
++ }
++ if (dev == NULL) {
++ PRINT_ERROR("Device %ld:%ld:%ld:%ld not found",
++ host, channel, id, lun);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ if (dev->scsi_dev->type != devt->type) {
++ PRINT_ERROR("Type %d of device %s differs from type "
++ "%d of dev handler %s", dev->type,
++ dev->virt_name, devt->type, devt->name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ if (strcasecmp("add_device", action) == 0) {
++ res = scst_assign_dev_handler(dev, devt);
++ if (res == 0)
++ PRINT_INFO("Device %s assigned to dev handler %s",
++ dev->virt_name, devt->name);
++ } else if (strcasecmp("del_device", action) == 0) {
++ if (dev->handler != devt) {
++ PRINT_ERROR("Device %s is not assigned to handler %s",
++ dev->virt_name, devt->name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ res = scst_assign_dev_handler(dev, &scst_null_devtype);
++ if (res == 0)
++ PRINT_INFO("Device %s unassigned from dev handler %s",
++ dev->virt_name, devt->name);
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", action);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_syntax_err:
++ PRINT_ERROR("Syntax error on \"%s\"", p);
++ res = -EINVAL;
++ goto out;
++}
++
++static int scst_devt_pass_through_mgmt_store_work_fn(
++ struct scst_sysfs_work_item *work)
++{
++ return scst_process_devt_pass_through_mgmt_store(work->buf, work->devt);
++}
++
++static ssize_t scst_devt_pass_through_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ return __scst_devt_mgmt_store(kobj, attr, buf, count,
++ scst_devt_pass_through_mgmt_store_work_fn);
++}
++
++static struct kobj_attribute scst_devt_pass_through_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_devt_pass_through_mgmt_show,
++ scst_devt_pass_through_mgmt_store);
++
++int scst_devt_sysfs_create(struct scst_dev_type *devt)
++{
++ int res;
++ struct kobject *parent;
++ const struct attribute **pattr;
++
++ TRACE_ENTRY();
++
++ init_completion(&devt->devt_kobj_release_compl);
++
++ if (devt->parent != NULL)
++ parent = &devt->parent->devt_kobj;
++ else
++ parent = scst_handlers_kobj;
++
++ res = kobject_init_and_add(&devt->devt_kobj, &scst_devt_ktype,
++ parent, devt->name);
++ if (res != 0) {
++ PRINT_ERROR("Can't add devt %s to sysfs", devt->name);
++ goto out;
++ }
++
++ if (devt->add_device != NULL) {
++ res = sysfs_create_file(&devt->devt_kobj,
++ &scst_devt_mgmt.attr);
++ } else {
++ res = sysfs_create_file(&devt->devt_kobj,
++ &scst_devt_pass_through_mgmt.attr);
++ }
++ if (res != 0) {
++ PRINT_ERROR("Can't add mgmt attr for dev handler %s",
++ devt->name);
++ goto out_err;
++ }
++
++ pattr = devt->devt_attrs;
++ if (pattr != NULL) {
++ while (*pattr != NULL) {
++ res = sysfs_create_file(&devt->devt_kobj, *pattr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add devt attr %s for dev "
++ "handler %s", (*pattr)->name,
++ devt->name);
++ goto out_err;
++ }
++ pattr++;
++ }
++ }
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ if (devt->trace_flags != NULL) {
++ res = sysfs_create_file(&devt->devt_kobj,
++ &devt_trace_attr.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add devt trace_flag for dev "
++ "handler %s", devt->name);
++ goto out_err;
++ }
++ }
++#endif
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err:
++ scst_devt_sysfs_del(devt);
++ goto out;
++}
++
++void scst_devt_sysfs_del(struct scst_dev_type *devt)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ kobject_del(&devt->devt_kobj);
++ kobject_put(&devt->devt_kobj);
++
++ rc = wait_for_completion_timeout(&devt->devt_kobj_release_compl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing of sysfs entry "
++ "for dev handler template %s (%d refs)...", devt->name,
++ atomic_read(&devt->devt_kobj.kref.refcount));
++ wait_for_completion(&devt->devt_kobj_release_compl);
++ PRINT_INFO("Done waiting for releasing sysfs entry "
++ "for dev handler template %s", devt->name);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ ** Sysfs user info
++ **/
++
++static DEFINE_MUTEX(scst_sysfs_user_info_mutex);
++
++/* All protected by scst_sysfs_user_info_mutex */
++static LIST_HEAD(scst_sysfs_user_info_list);
++static uint32_t scst_sysfs_info_cur_cookie;
++
++/* scst_sysfs_user_info_mutex supposed to be held */
++static struct scst_sysfs_user_info *scst_sysfs_user_find_info(uint32_t cookie)
++{
++ struct scst_sysfs_user_info *info, *res = NULL;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(info, &scst_sysfs_user_info_list,
++ info_list_entry) {
++ if (info->info_cookie == cookie) {
++ res = info;
++ break;
++ }
++ }
++
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/**
++ * scst_sysfs_user_get_info() - get user_info
++ *
++ * Finds the user_info based on cookie and mark it as received the reply by
++ * setting for it flag info_being_executed.
++ *
++ * Returns found entry or NULL.
++ */
++struct scst_sysfs_user_info *scst_sysfs_user_get_info(uint32_t cookie)
++{
++ struct scst_sysfs_user_info *res = NULL;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_sysfs_user_info_mutex);
++
++ res = scst_sysfs_user_find_info(cookie);
++ if (res != NULL) {
++ if (!res->info_being_executed)
++ res->info_being_executed = 1;
++ }
++
++ mutex_unlock(&scst_sysfs_user_info_mutex);
++
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_sysfs_user_get_info);
++
++/**
++ ** Helper functionality to help target drivers and dev handlers support
++ ** sending events to user space and wait for their completion in a safe
++ ** manner. See samples how to use it in iscsi-scst or scst_user.
++ **/
++
++/**
++ * scst_sysfs_user_add_info() - create and add user_info in the global list
++ *
++ * Creates an info structure and adds it in the info_list.
++ * Returns 0 and out_info on success, error code otherwise.
++ */
++int scst_sysfs_user_add_info(struct scst_sysfs_user_info **out_info)
++{
++ int res = 0;
++ struct scst_sysfs_user_info *info;
++
++ TRACE_ENTRY();
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (info == NULL) {
++ PRINT_ERROR("Unable to allocate sysfs user info (size %zd)",
++ sizeof(*info));
++ res = -ENOMEM;
++ goto out;
++ }
++
++ mutex_lock(&scst_sysfs_user_info_mutex);
++
++ while ((info->info_cookie == 0) ||
++ (scst_sysfs_user_find_info(info->info_cookie) != NULL))
++ info->info_cookie = scst_sysfs_info_cur_cookie++;
++
++ init_completion(&info->info_completion);
++
++ list_add_tail(&info->info_list_entry, &scst_sysfs_user_info_list);
++ info->info_in_list = 1;
++
++ *out_info = info;
++
++ mutex_unlock(&scst_sysfs_user_info_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_sysfs_user_add_info);
++
++/**
++ * scst_sysfs_user_del_info - delete and frees user_info
++ */
++void scst_sysfs_user_del_info(struct scst_sysfs_user_info *info)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_sysfs_user_info_mutex);
++
++ if (info->info_in_list)
++ list_del(&info->info_list_entry);
++
++ mutex_unlock(&scst_sysfs_user_info_mutex);
++
++ kfree(info);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_sysfs_user_del_info);
++
++/*
++ * Returns true if the reply received and being processed by another part of
++ * the kernel, false otherwise. Also removes the user_info from the list to
++ * fix for the user space that it missed the timeout.
++ */
++static bool scst_sysfs_user_info_executing(struct scst_sysfs_user_info *info)
++{
++ bool res;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_sysfs_user_info_mutex);
++
++ res = info->info_being_executed;
++
++ if (info->info_in_list) {
++ list_del(&info->info_list_entry);
++ info->info_in_list = 0;
++ }
++
++ mutex_unlock(&scst_sysfs_user_info_mutex);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_wait_info_completion() - wait an user space event's completion
++ *
++ * Waits for the info request been completed by user space at most timeout
++ * jiffies. If the reply received before timeout and being processed by
++ * another part of the kernel, i.e. scst_sysfs_user_info_executing()
++ * returned true, waits for it to complete indefinitely.
++ *
++ * Returns status of the request completion.
++ */
++int scst_wait_info_completion(struct scst_sysfs_user_info *info,
++ unsigned long timeout)
++{
++ int res, rc;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Waiting for info %p completion", info);
++
++ while (1) {
++ rc = wait_for_completion_interruptible_timeout(
++ &info->info_completion, timeout);
++ if (rc > 0) {
++ TRACE_DBG("Waiting for info %p finished with %d",
++ info, rc);
++ break;
++ } else if (rc == 0) {
++ if (!scst_sysfs_user_info_executing(info)) {
++ PRINT_ERROR("Timeout waiting for user "
++ "space event %p", info);
++ res = -EBUSY;
++ goto out;
++ } else {
++ /* Req is being executed in the kernel */
++ TRACE_DBG("Keep waiting for info %p completion",
++ info);
++ wait_for_completion(&info->info_completion);
++ break;
++ }
++ } else if (rc != -ERESTARTSYS) {
++ res = rc;
++ PRINT_ERROR("wait_for_completion() failed: %d",
++ res);
++ goto out;
++ } else {
++ TRACE_DBG("Waiting for info %p finished with %d, "
++ "retrying", info, rc);
++ }
++ }
++
++ TRACE_DBG("info %p, status %d", info, info->info_status);
++ res = info->info_status;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_wait_info_completion);
++
++int __init scst_sysfs_init(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ sysfs_work_thread = kthread_run(sysfs_work_thread_fn,
++ NULL, "scst_uid");
++ if (IS_ERR(sysfs_work_thread)) {
++ res = PTR_ERR(sysfs_work_thread);
++ PRINT_ERROR("kthread_create() for user interface thread "
++ "failed: %d", res);
++ sysfs_work_thread = NULL;
++ goto out;
++ }
++
++ res = kobject_init_and_add(&scst_sysfs_root_kobj,
++ &scst_sysfs_root_ktype, kernel_kobj, "%s", "scst_tgt");
++ if (res != 0)
++ goto sysfs_root_add_error;
++
++ scst_targets_kobj = kobject_create_and_add("targets",
++ &scst_sysfs_root_kobj);
++ if (scst_targets_kobj == NULL)
++ goto targets_kobj_error;
++
++ scst_devices_kobj = kobject_create_and_add("devices",
++ &scst_sysfs_root_kobj);
++ if (scst_devices_kobj == NULL)
++ goto devices_kobj_error;
++
++ scst_sgv_kobj = kzalloc(sizeof(*scst_sgv_kobj), GFP_KERNEL);
++ if (scst_sgv_kobj == NULL)
++ goto sgv_kobj_error;
++
++ res = kobject_init_and_add(scst_sgv_kobj, &sgv_ktype,
++ &scst_sysfs_root_kobj, "%s", "sgv");
++ if (res != 0)
++ goto sgv_kobj_add_error;
++
++ scst_handlers_kobj = kobject_create_and_add("handlers",
++ &scst_sysfs_root_kobj);
++ if (scst_handlers_kobj == NULL)
++ goto handlers_kobj_error;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++handlers_kobj_error:
++ kobject_del(scst_sgv_kobj);
++
++sgv_kobj_add_error:
++ kobject_put(scst_sgv_kobj);
++
++sgv_kobj_error:
++ kobject_del(scst_devices_kobj);
++ kobject_put(scst_devices_kobj);
++
++devices_kobj_error:
++ kobject_del(scst_targets_kobj);
++ kobject_put(scst_targets_kobj);
++
++targets_kobj_error:
++ kobject_del(&scst_sysfs_root_kobj);
++
++sysfs_root_add_error:
++ kobject_put(&scst_sysfs_root_kobj);
++
++ kthread_stop(sysfs_work_thread);
++
++ if (res == 0)
++ res = -EINVAL;
++
++ goto out;
++}
++
++void scst_sysfs_cleanup(void)
++{
++ TRACE_ENTRY();
++
++ PRINT_INFO("%s", "Exiting SCST sysfs hierarchy...");
++
++ kobject_del(scst_sgv_kobj);
++ kobject_put(scst_sgv_kobj);
++
++ kobject_del(scst_devices_kobj);
++ kobject_put(scst_devices_kobj);
++
++ kobject_del(scst_targets_kobj);
++ kobject_put(scst_targets_kobj);
++
++ kobject_del(scst_handlers_kobj);
++ kobject_put(scst_handlers_kobj);
++
++ kobject_del(&scst_sysfs_root_kobj);
++ kobject_put(&scst_sysfs_root_kobj);
++
++ wait_for_completion(&scst_sysfs_root_release_completion);
++ /*
++ * There is a race, when in the release() schedule happens just after
++ * calling complete(), so if we exit and unload scst module immediately,
++ * there will be oops there. So let's give it a chance to quit
++ * gracefully. Unfortunately, current kobjects implementation
++ * doesn't allow better ways to handle it.
++ */
++ msleep(3000);
++
++ if (sysfs_work_thread)
++ kthread_stop(sysfs_work_thread);
++
++ PRINT_INFO("%s", "Exiting SCST sysfs hierarchy done");
++
++ TRACE_EXIT();
++ return;
++}
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_targ.c linux-2.6.36/drivers/scst/scst_targ.c
+--- orig/linux-2.6.36/drivers/scst/scst_targ.c
++++ linux-2.6.36/drivers/scst/scst_targ.c
+@@ -0,0 +1,6654 @@
++/*
++ * scst_targ.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/unistd.h>
++#include <linux/string.h>
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#include <linux/ktime.h>
++
++#include <scst/scst.h>
++#include "scst_priv.h"
++#include "scst_pres.h"
++
++#if 0 /* Temporary left for future performance investigations */
++/* Deleting it don't forget to delete write_cmd_count */
++#define CONFIG_SCST_ORDERED_READS
++#endif
++
++#if 0 /* Let's disable it for now to see if users will complain about it */
++/* Deleting it don't forget to delete write_cmd_count */
++#define CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
++#endif
++
++static void scst_cmd_set_sn(struct scst_cmd *cmd);
++static int __scst_init_cmd(struct scst_cmd *cmd);
++static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
++static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
++ uint64_t tag, bool to_abort);
++static void scst_process_redirect_cmd(struct scst_cmd *cmd,
++ enum scst_exec_context context, int check_retries);
++
++/**
++ * scst_post_parse() - do post parse actions
++ *
++ * This function must be called by dev handler after its parse() callback
++ * returned SCST_CMD_STATE_STOP before calling scst_process_active_cmd().
++ */
++void scst_post_parse(struct scst_cmd *cmd)
++{
++ scst_set_parse_time(cmd);
++}
++EXPORT_SYMBOL_GPL(scst_post_parse);
++
++/**
++ * scst_post_alloc_data_buf() - do post alloc_data_buf actions
++ *
++ * This function must be called by dev handler after its alloc_data_buf()
++ * callback returned SCST_CMD_STATE_STOP before calling
++ * scst_process_active_cmd().
++ */
++void scst_post_alloc_data_buf(struct scst_cmd *cmd)
++{
++ scst_set_alloc_buf_time(cmd);
++}
++EXPORT_SYMBOL_GPL(scst_post_alloc_data_buf);
++
++static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
++{
++ struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
++ unsigned long flags;
++
++ if (atomic_read(&scst_cmd_count) <= scst_max_tasklet_cmd) {
++ spin_lock_irqsave(&t->tasklet_lock, flags);
++ TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
++ smp_processor_id());
++ list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
++ spin_unlock_irqrestore(&t->tasklet_lock, flags);
++
++ tasklet_schedule(&t->tasklet);
++ } else {
++ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
++ TRACE_DBG("Too many tasklet commands (%d), adding cmd %p to "
++ "active cmd list", atomic_read(&scst_cmd_count), cmd);
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
++ }
++ return;
++}
++
++/**
++ * scst_rx_cmd() - create new command
++ * @sess: SCST session
++ * @lun: LUN for the command
++ * @lun_len: length of the LUN in bytes
++ * @cdb: CDB of the command
++ * @cdb_len: length of the CDB in bytes
++ * @atomic: true, if current context is atomic
++ *
++ * Description:
++ * Creates new SCST command. Returns new command on success or
++ * NULL otherwise.
++ *
++ * Must not be called in parallel with scst_unregister_session() for the
++ * same session.
++ */
++struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
++ const uint8_t *lun, int lun_len, const uint8_t *cdb,
++ unsigned int cdb_len, int atomic)
++{
++ struct scst_cmd *cmd;
++
++ TRACE_ENTRY();
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
++ PRINT_CRIT_ERROR("%s",
++ "New cmd while shutting down the session");
++ BUG();
++ }
++#endif
++
++ cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
++ if (cmd == NULL)
++ goto out;
++
++ cmd->sess = sess;
++ cmd->tgt = sess->tgt;
++ cmd->tgtt = sess->tgt->tgtt;
++
++ cmd->lun = scst_unpack_lun(lun, lun_len);
++ if (unlikely(cmd->lun == NO_SUCH_LUN)) {
++ PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_lun_not_supported));
++ }
++
++ /*
++ * For cdb_len 0 defer the error reporting until scst_cmd_init_done(),
++ * scst_set_cmd_error() supports nested calls.
++ */
++ if (unlikely(cdb_len > SCST_MAX_CDB_SIZE)) {
++ PRINT_ERROR("Too big CDB len %d, finishing cmd", cdb_len);
++ cdb_len = SCST_MAX_CDB_SIZE;
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_message));
++ }
++
++ memcpy(cmd->cdb, cdb, cdb_len);
++ cmd->cdb_len = cdb_len;
++
++ TRACE_DBG("cmd %p, sess %p", cmd, sess);
++ scst_sess_get(sess);
++
++out:
++ TRACE_EXIT();
++ return cmd;
++}
++EXPORT_SYMBOL(scst_rx_cmd);
++
++/*
++ * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
++ * this command should be stopped.
++ */
++static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
++{
++ int rc, res = 0;
++
++ TRACE_ENTRY();
++
++ /* See the comment in scst_do_job_init() */
++ if (unlikely(!list_empty(&scst_init_cmd_list))) {
++ TRACE_MGMT_DBG("%s", "init cmd list busy");
++ goto out_redirect;
++ }
++ /*
++ * Memory barrier isn't necessary here, because CPU appears to
++ * be self-consistent and we don't care about the race, described
++ * in comment in scst_do_job_init().
++ */
++
++ rc = __scst_init_cmd(cmd);
++ if (unlikely(rc > 0))
++ goto out_redirect;
++ else if (unlikely(rc != 0)) {
++ res = 1;
++ goto out;
++ }
++
++ EXTRACHECKS_BUG_ON(*context == SCST_CONTEXT_SAME);
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ scst_get_cdb_info(cmd);
++ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
++ goto out;
++#endif
++
++ /* Small context optimization */
++ if ((*context == SCST_CONTEXT_TASKLET) ||
++ (*context == SCST_CONTEXT_DIRECT_ATOMIC)) {
++ /*
++ * If any data_direction not set, it's SCST_DATA_UNKNOWN,
++ * which is 0, so we can safely | them
++ */
++ BUILD_BUG_ON(SCST_DATA_UNKNOWN != 0);
++ if ((cmd->data_direction | cmd->expected_data_direction) & SCST_DATA_WRITE) {
++ if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
++ &cmd->tgt_dev->tgt_dev_flags))
++ *context = SCST_CONTEXT_THREAD;
++ } else
++ *context = SCST_CONTEXT_THREAD;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_redirect:
++ if (cmd->preprocessing_only) {
++ /*
++ * Poor man solution for single threaded targets, where
++ * blocking receiver at least sometimes means blocking all.
++ * For instance, iSCSI target won't be able to receive
++ * Data-Out PDUs.
++ */
++ BUG_ON(*context != SCST_CONTEXT_DIRECT);
++ scst_set_busy(cmd);
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = 1;
++ /* Keep initiator away from too many BUSY commands */
++ msleep(50);
++ } else {
++ unsigned long flags;
++ spin_lock_irqsave(&scst_init_lock, flags);
++ TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
++ "%d)", cmd, atomic_read(&scst_cmd_count));
++ list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
++ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
++ scst_init_poll_cnt++;
++ spin_unlock_irqrestore(&scst_init_lock, flags);
++ wake_up(&scst_init_cmd_list_waitQ);
++ res = -1;
++ }
++ goto out;
++}
++
++/**
++ * scst_cmd_init_done() - the command's initialization done
++ * @cmd: SCST command
++ * @pref_context: preferred command execution context
++ *
++ * Description:
++ * Notifies SCST that the driver finished its part of the command
++ * initialization, and the command is ready for execution.
++ * The second argument sets preferred command execition context.
++ * See SCST_CONTEXT_* constants for details.
++ *
++ * !!IMPORTANT!!
++ *
++ * If cmd->set_sn_on_restart_cmd not set, this function, as well as
++ * scst_cmd_init_stage1_done() and scst_restart_cmd(), must not be
++ * called simultaneously for the same session (more precisely,
++ * for the same session/LUN, i.e. tgt_dev), i.e. they must be
++ * somehow externally serialized. This is needed to have lock free fast
++ * path in scst_cmd_set_sn(). For majority of targets those functions are
++ * naturally serialized by the single source of commands. Only iSCSI
++ * immediate commands with multiple connections per session seems to be an
++ * exception. For it, some mutex/lock shall be used for the serialization.
++ */
++void scst_cmd_init_done(struct scst_cmd *cmd,
++ enum scst_exec_context pref_context)
++{
++ unsigned long flags;
++ struct scst_session *sess = cmd->sess;
++ int rc;
++
++ TRACE_ENTRY();
++
++ scst_set_start_time(cmd);
++
++ TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
++ TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
++ "(cmd %p)", (long long unsigned int)cmd->tag,
++ (long long unsigned int)cmd->lun, cmd->cdb_len,
++ cmd->queue_type, cmd);
++ PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
++ cmd->cdb, cmd->cdb_len);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (unlikely((in_irq() || irqs_disabled())) &&
++ ((pref_context == SCST_CONTEXT_DIRECT) ||
++ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
++ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
++ "SCST_CONTEXT_THREAD instead", pref_context,
++ cmd->tgtt->name);
++ dump_stack();
++ pref_context = SCST_CONTEXT_THREAD;
++ }
++#endif
++
++ atomic_inc(&sess->sess_cmd_count);
++
++ spin_lock_irqsave(&sess->sess_list_lock, flags);
++
++ if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
++ /*
++ * We must always keep commands in the sess list from the
++ * very beginning, because otherwise they can be missed during
++ * TM processing. This check is needed because there might be
++ * old, i.e. deferred, commands and new, i.e. just coming, ones.
++ */
++ if (cmd->sess_cmd_list_entry.next == NULL)
++ list_add_tail(&cmd->sess_cmd_list_entry,
++ &sess->sess_cmd_list);
++ switch (sess->init_phase) {
++ case SCST_SESS_IPH_SUCCESS:
++ break;
++ case SCST_SESS_IPH_INITING:
++ TRACE_DBG("Adding cmd %p to init deferred cmd list",
++ cmd);
++ list_add_tail(&cmd->cmd_list_entry,
++ &sess->init_deferred_cmd_list);
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++ goto out;
++ case SCST_SESS_IPH_FAILED:
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++ scst_set_busy(cmd);
++ scst_set_cmd_abnormal_done_state(cmd);
++ goto active;
++ default:
++ BUG();
++ }
++ } else
++ list_add_tail(&cmd->sess_cmd_list_entry,
++ &sess->sess_cmd_list);
++
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++
++ if (unlikely(cmd->cdb_len == 0)) {
++ PRINT_ERROR("%s", "Wrong CDB len 0, finishing cmd");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ scst_set_cmd_abnormal_done_state(cmd);
++ goto active;
++ }
++
++ if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
++ PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_message));
++ goto active;
++ }
++
++ /*
++ * Cmd must be inited here to preserve the order. In case if cmd
++ * already preliminary completed by target driver we need to init
++ * cmd anyway to find out in which format we should return sense.
++ */
++ cmd->state = SCST_CMD_STATE_INIT;
++ rc = scst_init_cmd(cmd, &pref_context);
++ if (unlikely(rc < 0))
++ goto out;
++
++active:
++ /* Here cmd must not be in any cmd list, no locks */
++ switch (pref_context) {
++ case SCST_CONTEXT_TASKLET:
++ scst_schedule_tasklet(cmd);
++ break;
++
++ default:
++ PRINT_ERROR("Context %x is undefined, using the thread one",
++ pref_context);
++ /* go through */
++ case SCST_CONTEXT_THREAD:
++ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
++ TRACE_DBG("Adding cmd %p to active cmd list", cmd);
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ list_add(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ else
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
++ break;
++
++ case SCST_CONTEXT_DIRECT:
++ scst_process_active_cmd(cmd, false);
++ break;
++
++ case SCST_CONTEXT_DIRECT_ATOMIC:
++ scst_process_active_cmd(cmd, true);
++ break;
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_cmd_init_done);
++
++static int scst_pre_parse(struct scst_cmd *cmd)
++{
++ int res;
++ struct scst_device *dev = cmd->dev;
++ int rc;
++
++ TRACE_ENTRY();
++
++ /*
++ * Expected transfer data supplied by the SCSI transport via the
++ * target driver are untrusted, so we prefer to fetch them from CDB.
++ * Additionally, not all transports support supplying the expected
++ * transfer data.
++ */
++
++ rc = scst_get_cdb_info(cmd);
++ if (unlikely(rc != 0)) {
++ if (rc > 0) {
++ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
++ goto out_err;
++ }
++
++ EXTRACHECKS_BUG_ON(cmd->op_flags & SCST_INFO_VALID);
++
++ TRACE(TRACE_MINOR, "Unknown opcode 0x%02x for %s. "
++ "Should you update scst_scsi_op_table?",
++ cmd->cdb[0], dev->handler->name);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Failed CDB", cmd->cdb,
++ cmd->cdb_len);
++ } else
++ EXTRACHECKS_BUG_ON(!(cmd->op_flags & SCST_INFO_VALID));
++
++#ifdef CONFIG_SCST_STRICT_SERIALIZING
++ cmd->inc_expected_sn_on_done = 1;
++#else
++ cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
++ (!dev->has_own_order_mgmt &&
++ (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
++ cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
++#endif
++
++ TRACE_DBG("op_name <%s> (cmd %p), direction=%d "
++ "(expected %d, set %s), bufflen=%d, out_bufflen=%d (expected "
++ "len %d, out expected len %d), flags=%d", cmd->op_name, cmd,
++ cmd->data_direction, cmd->expected_data_direction,
++ scst_cmd_is_expected_set(cmd) ? "yes" : "no",
++ cmd->bufflen, cmd->out_bufflen, cmd->expected_transfer_len,
++ cmd->expected_out_transfer_len, cmd->op_flags);
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = -1;
++ goto out;
++}
++
++#ifndef CONFIG_SCST_USE_EXPECTED_VALUES
++static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
++{
++ bool res = false;
++
++ /* VERIFY commands with BYTCHK unset shouldn't fail here */
++ if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
++ (cmd->cdb[1] & BYTCHK) == 0) {
++ res = true;
++ goto out;
++ }
++
++ switch (cmd->cdb[0]) {
++ case TEST_UNIT_READY:
++ /* Crazy VMware people sometimes do TUR with READ direction */
++ if ((cmd->expected_data_direction == SCST_DATA_READ) ||
++ (cmd->expected_data_direction == SCST_DATA_NONE))
++ res = true;
++ break;
++ }
++
++out:
++ return res;
++}
++#endif
++
++static int scst_parse_cmd(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_RES_CONT_SAME;
++ int state;
++ struct scst_device *dev = cmd->dev;
++ int orig_bufflen = cmd->bufflen;
++
++ TRACE_ENTRY();
++
++ if (likely(!scst_is_cmd_fully_local(cmd))) {
++ if (unlikely(!dev->handler->parse_atomic &&
++ scst_cmd_atomic(cmd))) {
++ /*
++ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
++ * optimization.
++ */
++ TRACE_MGMT_DBG("Dev handler %s parse() needs thread "
++ "context, rescheduling", dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ TRACE_DBG("Calling dev handler %s parse(%p)",
++ dev->handler->name, cmd);
++ TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
++ cmd->cdb, cmd->cdb_len);
++ scst_set_cur_start(cmd);
++ state = dev->handler->parse(cmd);
++ /* Caution: cmd can be already dead here */
++ TRACE_DBG("Dev handler %s parse() returned %d",
++ dev->handler->name, state);
++
++ switch (state) {
++ case SCST_CMD_STATE_NEED_THREAD_CTX:
++ scst_set_parse_time(cmd);
++ TRACE_DBG("Dev handler %s parse() requested thread "
++ "context, rescheduling", dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++
++ case SCST_CMD_STATE_STOP:
++ TRACE_DBG("Dev handler %s parse() requested stop "
++ "processing", dev->handler->name);
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ goto out;
++ }
++
++ scst_set_parse_time(cmd);
++
++ if (state == SCST_CMD_STATE_DEFAULT)
++ state = SCST_CMD_STATE_PREPARE_SPACE;
++ } else
++ state = SCST_CMD_STATE_PREPARE_SPACE;
++
++ if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
++ goto set_res;
++
++ if (unlikely(!(cmd->op_flags & SCST_INFO_VALID))) {
++#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
++ if (scst_cmd_is_expected_set(cmd)) {
++ TRACE(TRACE_MINOR, "Using initiator supplied values: "
++ "direction %d, transfer_len %d/%d",
++ cmd->expected_data_direction,
++ cmd->expected_transfer_len,
++ cmd->expected_out_transfer_len);
++ cmd->data_direction = cmd->expected_data_direction;
++ cmd->bufflen = cmd->expected_transfer_len;
++ cmd->out_bufflen = cmd->expected_out_transfer_len;
++ } else {
++ PRINT_ERROR("Unknown opcode 0x%02x for %s and "
++ "target %s not supplied expected values",
++ cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++ }
++#else
++ /*
++ * Let's ignore reporting T10/04-262r7 16-byte and 12-byte ATA
++ * pass-thru commands to not pollute logs (udev(?) checks them
++ * for some reason). If somebody has their description, please,
++ * update scst_scsi_op_table.
++ */
++ if ((cmd->cdb[0] != 0x85) && (cmd->cdb[0] != 0xa1))
++ PRINT_ERROR("Refusing unknown opcode %x", cmd->cdb[0]);
++ else
++ TRACE(TRACE_MINOR, "Refusing unknown opcode %x",
++ cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++#endif
++ }
++
++ if (unlikely(cmd->cdb_len == 0)) {
++ PRINT_ERROR("Unable to get CDB length for "
++ "opcode 0x%02x. Returning INVALID "
++ "OPCODE", cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++ }
++
++ EXTRACHECKS_BUG_ON(cmd->cdb_len == 0);
++
++ TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
++ "(expected %d, set %s), bufflen=%d, out_bufflen=%d, (expected "
++ "len %d, out expected len %d), flags=%x", cmd->op_name, cmd,
++ cmd->data_direction, cmd->expected_data_direction,
++ scst_cmd_is_expected_set(cmd) ? "yes" : "no",
++ cmd->bufflen, cmd->out_bufflen, cmd->expected_transfer_len,
++ cmd->expected_out_transfer_len, cmd->op_flags);
++
++ if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
++ if (scst_cmd_is_expected_set(cmd)) {
++ /*
++ * Command data length can't be easily
++ * determined from the CDB. ToDo, all such
++ * commands processing should be fixed. Until
++ * it's done, get the length from the supplied
++ * expected value, but limit it to some
++ * reasonable value (15MB).
++ */
++ cmd->bufflen = min(cmd->expected_transfer_len,
++ 15*1024*1024);
++ if (cmd->data_direction == SCST_DATA_BIDI)
++ cmd->out_bufflen = min(cmd->expected_out_transfer_len,
++ 15*1024*1024);
++ cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
++ } else {
++ PRINT_ERROR("Unknown data transfer length for opcode "
++ "0x%x (handler %s, target %s)", cmd->cdb[0],
++ dev->handler->name, cmd->tgtt->name);
++ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_message));
++ goto out_done;
++ }
++ }
++
++ if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
++ PRINT_ERROR("NACA bit in control byte CDB is not supported "
++ "(opcode 0x%02x)", cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_done;
++ }
++
++ if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
++ PRINT_ERROR("Linked commands are not supported "
++ "(opcode 0x%02x)", cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_done;
++ }
++
++ if (cmd->dh_data_buf_alloced &&
++ unlikely((orig_bufflen > cmd->bufflen))) {
++ PRINT_ERROR("Dev handler supplied data buffer (size %d), "
++ "is less, than required (size %d)", cmd->bufflen,
++ orig_bufflen);
++ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
++ goto out_hw_error;
++ }
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if ((cmd->bufflen != 0) &&
++ ((cmd->data_direction == SCST_DATA_NONE) ||
++ ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
++ PRINT_ERROR("Dev handler %s parse() returned "
++ "invalid cmd data_direction %d, bufflen %d, state %d "
++ "or sg %p (opcode 0x%x)", dev->handler->name,
++ cmd->data_direction, cmd->bufflen, state, cmd->sg,
++ cmd->cdb[0]);
++ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
++ goto out_hw_error;
++ }
++#endif
++
++ if (scst_cmd_is_expected_set(cmd)) {
++#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
++ if (unlikely((cmd->data_direction != cmd->expected_data_direction) ||
++ (cmd->bufflen != cmd->expected_transfer_len) ||
++ (cmd->out_bufflen != cmd->expected_out_transfer_len))) {
++ TRACE(TRACE_MINOR, "Expected values don't match "
++ "decoded ones: data_direction %d, "
++ "expected_data_direction %d, "
++ "bufflen %d, expected_transfer_len %d, "
++ "out_bufflen %d, expected_out_transfer_len %d",
++ cmd->data_direction,
++ cmd->expected_data_direction,
++ cmd->bufflen, cmd->expected_transfer_len,
++ cmd->out_bufflen, cmd->expected_out_transfer_len);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
++ cmd->cdb, cmd->cdb_len);
++ cmd->data_direction = cmd->expected_data_direction;
++ cmd->bufflen = cmd->expected_transfer_len;
++ cmd->out_bufflen = cmd->expected_out_transfer_len;
++ cmd->resid_possible = 1;
++ }
++#else
++ if (unlikely(cmd->data_direction !=
++ cmd->expected_data_direction)) {
++ if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
++ (cmd->bufflen != 0)) &&
++ !scst_is_allowed_to_mismatch_cmd(cmd)) {
++ PRINT_ERROR("Expected data direction %d for "
++ "opcode 0x%02x (handler %s, target %s) "
++ "doesn't match decoded value %d",
++ cmd->expected_data_direction,
++ cmd->cdb[0], dev->handler->name,
++ cmd->tgtt->name, cmd->data_direction);
++ PRINT_BUFFER("Failed CDB", cmd->cdb,
++ cmd->cdb_len);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_message));
++ goto out_done;
++ }
++ }
++ if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
++ TRACE(TRACE_MINOR, "Warning: expected "
++ "transfer length %d for opcode 0x%02x "
++ "(handler %s, target %s) doesn't match "
++ "decoded value %d",
++ cmd->expected_transfer_len, cmd->cdb[0],
++ dev->handler->name, cmd->tgtt->name,
++ cmd->bufflen);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
++ cmd->cdb, cmd->cdb_len);
++ if ((cmd->data_direction & SCST_DATA_READ) ||
++ (cmd->data_direction & SCST_DATA_WRITE))
++ cmd->resid_possible = 1;
++ }
++ if (unlikely(cmd->out_bufflen != cmd->expected_out_transfer_len)) {
++ TRACE(TRACE_MINOR, "Warning: expected bidirectional OUT "
++ "transfer length %d for opcode 0x%02x "
++ "(handler %s, target %s) doesn't match "
++ "decoded value %d",
++ cmd->expected_out_transfer_len, cmd->cdb[0],
++ dev->handler->name, cmd->tgtt->name,
++ cmd->out_bufflen);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
++ cmd->cdb, cmd->cdb_len);
++ cmd->resid_possible = 1;
++ }
++#endif
++ }
++
++ if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
++ PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
++ "target %s", cmd->cdb[0], dev->handler->name,
++ cmd->tgtt->name);
++ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
++ goto out_hw_error;
++ }
++
++set_res:
++ if (cmd->data_len == -1)
++ cmd->data_len = cmd->bufflen;
++
++ if (cmd->bufflen == 0) {
++ /*
++ * According to SPC bufflen 0 for data transfer commands isn't
++ * an error, so we need to fix the transfer direction.
++ */
++ cmd->data_direction = SCST_DATA_NONE;
++ }
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ switch (state) {
++ case SCST_CMD_STATE_PREPARE_SPACE:
++ case SCST_CMD_STATE_PARSE:
++ case SCST_CMD_STATE_RDY_TO_XFER:
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXEC:
++ case SCST_CMD_STATE_PRE_DEV_DONE:
++ case SCST_CMD_STATE_DEV_DONE:
++ case SCST_CMD_STATE_PRE_XMIT_RESP:
++ case SCST_CMD_STATE_XMIT_RESP:
++ case SCST_CMD_STATE_FINISHED:
++ case SCST_CMD_STATE_FINISHED_INTERNAL:
++#endif
++ cmd->state = state;
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++#ifdef CONFIG_SCST_EXTRACHECKS
++ break;
++
++ default:
++ if (state >= 0) {
++ PRINT_ERROR("Dev handler %s parse() returned "
++ "invalid cmd state %d (opcode %d)",
++ dev->handler->name, state, cmd->cdb[0]);
++ } else {
++ PRINT_ERROR("Dev handler %s parse() returned "
++ "error %d (opcode %d)", dev->handler->name,
++ state, cmd->cdb[0]);
++ }
++ goto out_hw_error;
++ }
++#endif
++
++ if (cmd->resp_data_len == -1) {
++ if (cmd->data_direction & SCST_DATA_READ)
++ cmd->resp_data_len = cmd->bufflen;
++ else
++ cmd->resp_data_len = 0;
++ }
++
++ /* We already completed (with an error) */
++ if (unlikely(cmd->completed))
++ goto out_done;
++
++#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
++ /*
++ * We can't allow atomic command on the exec stages. It shouldn't
++ * be because of the SCST_TGT_DEV_AFTER_* optimization, but during
++ * parsing data_direction can change, so we need to recheck.
++ */
++ if (unlikely(scst_cmd_atomic(cmd) &&
++ !(cmd->data_direction & SCST_DATA_WRITE))) {
++ TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_MINOR, "Atomic context and "
++ "non-WRITE data direction, rescheduling (cmd %p)", cmd);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++#endif
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++
++out_hw_error:
++ /* dev_done() will be called as part of the regular cmd's finish */
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++
++out_done:
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++}
++
++static void scst_set_write_len(struct scst_cmd *cmd)
++{
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(!(cmd->data_direction & SCST_DATA_WRITE));
++
++ if (cmd->data_direction & SCST_DATA_READ) {
++ cmd->write_len = cmd->out_bufflen;
++ cmd->write_sg = &cmd->out_sg;
++ cmd->write_sg_cnt = &cmd->out_sg_cnt;
++ } else {
++ cmd->write_len = cmd->bufflen;
++ /* write_sg and write_sg_cnt already initialized correctly */
++ }
++
++ TRACE_MEM("cmd %p, write_len %d, write_sg %p, write_sg_cnt %d, "
++ "resid_possible %d", cmd, cmd->write_len, *cmd->write_sg,
++ *cmd->write_sg_cnt, cmd->resid_possible);
++
++ if (unlikely(cmd->resid_possible)) {
++ if (cmd->data_direction & SCST_DATA_READ) {
++ cmd->write_len = min(cmd->out_bufflen,
++ cmd->expected_out_transfer_len);
++ if (cmd->write_len == cmd->out_bufflen)
++ goto out;
++ } else {
++ cmd->write_len = min(cmd->bufflen,
++ cmd->expected_transfer_len);
++ if (cmd->write_len == cmd->bufflen)
++ goto out;
++ }
++ scst_limit_sg_write_len(cmd);
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_prepare_space(struct scst_cmd *cmd)
++{
++ int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE_ENTRY();
++
++ if (cmd->data_direction == SCST_DATA_NONE)
++ goto done;
++
++ if (likely(!scst_is_cmd_fully_local(cmd)) &&
++ (dev->handler->alloc_data_buf != NULL)) {
++ int state;
++
++ if (unlikely(!dev->handler->alloc_data_buf_atomic &&
++ scst_cmd_atomic(cmd))) {
++ /*
++ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
++ * optimization.
++ */
++ TRACE_MGMT_DBG("Dev handler %s alloc_data_buf() needs "
++ "thread context, rescheduling",
++ dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ TRACE_DBG("Calling dev handler %s alloc_data_buf(%p)",
++ dev->handler->name, cmd);
++ scst_set_cur_start(cmd);
++ state = dev->handler->alloc_data_buf(cmd);
++ /* Caution: cmd can be already dead here */
++ TRACE_DBG("Dev handler %s alloc_data_buf() returned %d",
++ dev->handler->name, state);
++
++ switch (state) {
++ case SCST_CMD_STATE_NEED_THREAD_CTX:
++ scst_set_alloc_buf_time(cmd);
++ TRACE_DBG("Dev handler %s alloc_data_buf() requested "
++ "thread context, rescheduling",
++ dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++
++ case SCST_CMD_STATE_STOP:
++ TRACE_DBG("Dev handler %s alloc_data_buf() requested "
++ "stop processing", dev->handler->name);
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ goto out;
++ }
++
++ scst_set_alloc_buf_time(cmd);
++
++ if (unlikely(state != SCST_CMD_STATE_DEFAULT)) {
++ cmd->state = state;
++ goto out;
++ }
++ }
++
++ if (cmd->tgt_need_alloc_data_buf) {
++ int orig_bufflen = cmd->bufflen;
++
++ TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
++ cmd);
++
++ scst_set_cur_start(cmd);
++ r = cmd->tgtt->alloc_data_buf(cmd);
++ scst_set_alloc_buf_time(cmd);
++
++ if (r > 0)
++ goto alloc;
++ else if (r == 0) {
++ if (unlikely(cmd->bufflen == 0)) {
++ /* See comment in scst_alloc_space() */
++ if (cmd->sg == NULL)
++ goto alloc;
++ }
++
++ cmd->tgt_data_buf_alloced = 1;
++
++ if (unlikely(orig_bufflen < cmd->bufflen)) {
++ PRINT_ERROR("Target driver allocated data "
++ "buffer (size %d), is less, than "
++ "required (size %d)", orig_bufflen,
++ cmd->bufflen);
++ goto out_error;
++ }
++ TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
++ } else
++ goto check;
++ }
++
++alloc:
++ if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
++ r = scst_alloc_space(cmd);
++ } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
++ TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
++ r = 0;
++ } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
++ TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
++ cmd->sg = cmd->tgt_sg;
++ cmd->sg_cnt = cmd->tgt_sg_cnt;
++ cmd->out_sg = cmd->tgt_out_sg;
++ cmd->out_sg_cnt = cmd->tgt_out_sg_cnt;
++ r = 0;
++ } else {
++ TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
++ "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
++ cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
++ r = 0;
++ }
++
++check:
++ if (r != 0) {
++ if (scst_cmd_atomic(cmd)) {
++ TRACE_MEM("%s", "Atomic memory allocation failed, "
++ "rescheduling to the thread");
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ } else
++ goto out_no_space;
++ }
++
++done:
++ if (cmd->preprocessing_only) {
++ cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE;
++ if (cmd->data_direction & SCST_DATA_WRITE)
++ scst_set_write_len(cmd);
++ } else if (cmd->data_direction & SCST_DATA_WRITE) {
++ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
++ scst_set_write_len(cmd);
++ } else
++ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++
++out_no_space:
++ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
++ "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
++ scst_set_busy(cmd);
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++
++out_error:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++}
++
++static int scst_preprocessing_done(struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(!cmd->preprocessing_only);
++
++ cmd->preprocessing_only = 0;
++
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE_CALLED;
++
++ TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
++ scst_set_cur_start(cmd);
++ cmd->tgtt->preprocessing_done(cmd);
++ TRACE_DBG("%s", "preprocessing_done() returned");
++
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/**
++ * scst_restart_cmd() - restart execution of the command
++ * @cmd: SCST commands
++ * @status: completion status
++ * @pref_context: preferred command execition context
++ *
++ * Description:
++ * Notifies SCST that the driver finished its part of the command's
++ * preprocessing and it is ready for further processing.
++ *
++ * The second argument sets completion status
++ * (see SCST_PREPROCESS_STATUS_* constants for details)
++ *
++ * See also comment for scst_cmd_init_done() for the serialization
++ * requirements.
++ */
++void scst_restart_cmd(struct scst_cmd *cmd, int status,
++ enum scst_exec_context pref_context)
++{
++ TRACE_ENTRY();
++
++ scst_set_restart_waiting_time(cmd);
++
++ TRACE_DBG("Preferred context: %d", pref_context);
++ TRACE_DBG("tag=%llu, status=%#x",
++ (long long unsigned int)scst_cmd_get_tag(cmd),
++ status);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if ((in_irq() || irqs_disabled()) &&
++ ((pref_context == SCST_CONTEXT_DIRECT) ||
++ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
++ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
++ "SCST_CONTEXT_THREAD instead", pref_context,
++ cmd->tgtt->name);
++ dump_stack();
++ pref_context = SCST_CONTEXT_THREAD;
++ }
++#endif
++
++ switch (status) {
++ case SCST_PREPROCESS_STATUS_SUCCESS:
++ if (cmd->data_direction & SCST_DATA_WRITE)
++ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
++ else
++ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
++ if (cmd->set_sn_on_restart_cmd)
++ scst_cmd_set_sn(cmd);
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
++ break;
++#endif
++ /* Small context optimization */
++ if ((pref_context == SCST_CONTEXT_TASKLET) ||
++ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
++ ((pref_context == SCST_CONTEXT_SAME) &&
++ scst_cmd_atomic(cmd)))
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ case SCST_PREPROCESS_STATUS_ERROR_FATAL:
++ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
++ /* go through */
++ case SCST_PREPROCESS_STATUS_ERROR:
++ if (cmd->sense != NULL)
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ default:
++ PRINT_ERROR("%s() received unknown status %x", __func__,
++ status);
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++ }
++
++ scst_process_redirect_cmd(cmd, pref_context, 1);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_restart_cmd);
++
++static int scst_rdy_to_xfer(struct scst_cmd *cmd)
++{
++ int res, rc;
++ struct scst_tgt_template *tgtt = cmd->tgtt;
++
++ TRACE_ENTRY();
++
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
++ goto out_dev_done;
++ }
++
++ if ((tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
++ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
++#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
++ /* We can't allow atomic command on the exec stages */
++ if (scst_cmd_atomic(cmd)) {
++ TRACE_DBG("NULL rdy_to_xfer() and atomic context, "
++ "rescheduling (cmd %p)", cmd);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ } else
++#endif
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++ }
++
++ if (unlikely(!tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
++ /*
++ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
++ * optimization.
++ */
++ TRACE_MGMT_DBG("Target driver %s rdy_to_xfer() needs thread "
++ "context, rescheduling", tgtt->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ while (1) {
++ int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
++
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ cmd->state = SCST_CMD_STATE_DATA_WAIT;
++
++ if (tgtt->on_hw_pending_cmd_timeout != NULL) {
++ struct scst_session *sess = cmd->sess;
++ cmd->hw_pending_start = jiffies;
++ cmd->cmd_hw_pending = 1;
++ if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
++ TRACE_DBG("Sched HW pending work for sess %p "
++ "(max time %d)", sess,
++ tgtt->max_hw_pending_time);
++ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
++ &sess->sess_aflags);
++ schedule_delayed_work(&sess->hw_pending_work,
++ tgtt->max_hw_pending_time * HZ);
++ }
++ }
++
++ scst_set_cur_start(cmd);
++
++ TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
++#ifdef CONFIG_SCST_DEBUG_RETRY
++ if (((scst_random() % 100) == 75))
++ rc = SCST_TGT_RES_QUEUE_FULL;
++ else
++#endif
++ rc = tgtt->rdy_to_xfer(cmd);
++ TRACE_DBG("rdy_to_xfer() returned %d", rc);
++
++ if (likely(rc == SCST_TGT_RES_SUCCESS))
++ goto out;
++
++ scst_set_rdy_to_xfer_time(cmd);
++
++ cmd->cmd_hw_pending = 0;
++
++ /* Restore the previous state */
++ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
++
++ switch (rc) {
++ case SCST_TGT_RES_QUEUE_FULL:
++ if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
++ break;
++ else
++ continue;
++
++ case SCST_TGT_RES_NEED_THREAD_CTX:
++ TRACE_DBG("Target driver %s "
++ "rdy_to_xfer() requested thread "
++ "context, rescheduling", tgtt->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ break;
++
++ default:
++ goto out_error_rc;
++ }
++ break;
++ }
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++
++out_error_rc:
++ if (rc == SCST_TGT_RES_FATAL_ERROR) {
++ PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
++ "fatal error", tgtt->name);
++ } else {
++ PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
++ "value %d", tgtt->name, rc);
++ }
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++
++out_dev_done:
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++}
++
++/* No locks, but might be in IRQ */
++static void scst_process_redirect_cmd(struct scst_cmd *cmd,
++ enum scst_exec_context context, int check_retries)
++{
++ struct scst_tgt *tgt = cmd->tgt;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Context: %x", context);
++
++ if (check_retries)
++ scst_check_retries(tgt);
++
++ if (context == SCST_CONTEXT_SAME)
++ context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
++ SCST_CONTEXT_DIRECT;
++
++ switch (context) {
++ case SCST_CONTEXT_DIRECT_ATOMIC:
++ scst_process_active_cmd(cmd, true);
++ break;
++
++ case SCST_CONTEXT_DIRECT:
++ scst_process_active_cmd(cmd, false);
++ break;
++
++ case SCST_CONTEXT_TASKLET:
++ scst_schedule_tasklet(cmd);
++ break;
++
++ default:
++ PRINT_ERROR("Context %x is unknown, using the thread one",
++ context);
++ /* go through */
++ case SCST_CONTEXT_THREAD:
++ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
++ TRACE_DBG("Adding cmd %p to active cmd list", cmd);
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ list_add(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ else
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
++ break;
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_rx_data() - the command's data received
++ * @cmd: SCST commands
++ * @status: data receiving completion status
++ * @pref_context: preferred command execution context
++ *
++ * Description:
++ * Notifies SCST that the driver received all the necessary data
++ * and the command is ready for further processing.
++ *
++ * The second argument sets data receiving completion status
++ * (see SCST_RX_STATUS_* constants for details)
++ */
++void scst_rx_data(struct scst_cmd *cmd, int status,
++ enum scst_exec_context pref_context)
++{
++ TRACE_ENTRY();
++
++ scst_set_rdy_to_xfer_time(cmd);
++
++ TRACE_DBG("Preferred context: %d", pref_context);
++ TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
++
++ cmd->cmd_hw_pending = 0;
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if ((in_irq() || irqs_disabled()) &&
++ ((pref_context == SCST_CONTEXT_DIRECT) ||
++ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
++ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
++ "SCST_CONTEXT_THREAD instead", pref_context,
++ cmd->tgtt->name);
++ dump_stack();
++ pref_context = SCST_CONTEXT_THREAD;
++ }
++#endif
++
++ switch (status) {
++ case SCST_RX_STATUS_SUCCESS:
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ if (trace_flag & TRACE_RCV_BOT) {
++ int i;
++ struct scatterlist *sg;
++ if (cmd->out_sg != NULL)
++ sg = cmd->out_sg;
++ else if (cmd->tgt_out_sg != NULL)
++ sg = cmd->tgt_out_sg;
++ else if (cmd->tgt_sg != NULL)
++ sg = cmd->tgt_sg;
++ else
++ sg = cmd->sg;
++ if (sg != NULL) {
++ TRACE_RECV_BOT("RX data for cmd %p "
++ "(sg_cnt %d, sg %p, sg[0].page %p)",
++ cmd, cmd->tgt_sg_cnt, sg,
++ (void *)sg_page(&sg[0]));
++ for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
++ PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
++ sg_virt(&sg[i]), sg[i].length);
++ }
++ }
++ }
++#endif
++ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
++ break;
++#endif
++
++ /* Small context optimization */
++ if ((pref_context == SCST_CONTEXT_TASKLET) ||
++ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
++ ((pref_context == SCST_CONTEXT_SAME) &&
++ scst_cmd_atomic(cmd)))
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ case SCST_RX_STATUS_ERROR_SENSE_SET:
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ case SCST_RX_STATUS_ERROR_FATAL:
++ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
++ /* go through */
++ case SCST_RX_STATUS_ERROR:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ default:
++ PRINT_ERROR("scst_rx_data() received unknown status %x",
++ status);
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++ }
++
++ scst_process_redirect_cmd(cmd, pref_context, 1);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_rx_data);
++
++static int scst_tgt_pre_exec(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
++
++ TRACE_ENTRY();
++
++ if (unlikely(cmd->resid_possible)) {
++ if (cmd->data_direction & SCST_DATA_WRITE) {
++ bool do_zero = false;
++ if (cmd->data_direction & SCST_DATA_READ) {
++ if (cmd->write_len != cmd->out_bufflen)
++ do_zero = true;
++ } else {
++ if (cmd->write_len != cmd->bufflen)
++ do_zero = true;
++ }
++ if (do_zero) {
++ scst_check_restore_sg_buff(cmd);
++ scst_zero_write_rest(cmd);
++ }
++ }
++ }
++
++ cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
++
++ if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
++ goto out;
++
++ TRACE_DBG("Calling pre_exec(%p)", cmd);
++ scst_set_cur_start(cmd);
++ rc = cmd->tgtt->pre_exec(cmd);
++ scst_set_pre_exec_time(cmd);
++ TRACE_DBG("pre_exec() returned %d", rc);
++
++ if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
++ switch (rc) {
++ case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
++ scst_set_cmd_abnormal_done_state(cmd);
++ break;
++ case SCST_PREPROCESS_STATUS_ERROR_FATAL:
++ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
++ /* go through */
++ case SCST_PREPROCESS_STATUS_ERROR:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ break;
++ default:
++ BUG();
++ break;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
++ const uint8_t *rq_sense, int rq_sense_len, int resid)
++{
++ TRACE_ENTRY();
++
++ scst_set_exec_time(cmd);
++
++ cmd->status = result & 0xff;
++ cmd->msg_status = msg_byte(result);
++ cmd->host_status = host_byte(result);
++ cmd->driver_status = driver_byte(result);
++ if (unlikely(resid != 0)) {
++ if ((cmd->data_direction & SCST_DATA_READ) &&
++ (resid > 0) && (resid < cmd->resp_data_len))
++ scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
++ /*
++ * We ignore write direction residue, because from the
++ * initiator's POV we already transferred all the data.
++ */
++ }
++
++ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
++ /* We might have double reset UA here */
++ cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
++ cmd->dbl_ua_orig_data_direction = cmd->data_direction;
++
++ scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
++ }
++
++ TRACE(TRACE_SCSI, "cmd %p, result %x, cmd->status %x, resid %d, "
++ "cmd->msg_status %x, cmd->host_status %x, "
++ "cmd->driver_status %x", cmd, result, cmd->status, resid,
++ cmd->msg_status, cmd->host_status, cmd->driver_status);
++
++ cmd->completed = 1;
++
++ TRACE_EXIT();
++ return;
++}
++
++/* For small context optimization */
++static inline enum scst_exec_context scst_optimize_post_exec_context(
++ struct scst_cmd *cmd, enum scst_exec_context context)
++{
++ if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
++ (context == SCST_CONTEXT_TASKLET) ||
++ (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
++ if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
++ &cmd->tgt_dev->tgt_dev_flags))
++ context = SCST_CONTEXT_THREAD;
++ }
++ return context;
++}
++
++static void scst_cmd_done(void *data, char *sense, int result, int resid)
++{
++ struct scst_cmd *cmd;
++
++ TRACE_ENTRY();
++
++ cmd = (struct scst_cmd *)data;
++ if (cmd == NULL)
++ goto out;
++
++ scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
++
++ cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
++
++ scst_process_redirect_cmd(cmd,
++ scst_optimize_post_exec_context(cmd, scst_estimate_context()), 0);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
++ enum scst_exec_context pref_context)
++{
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(cmd->pr_abort_counter != NULL);
++
++ scst_set_exec_time(cmd);
++
++ TRACE(TRACE_SCSI, "cmd %p, status %x, msg_status %x, host_status %x, "
++ "driver_status %x, resp_data_len %d", cmd, cmd->status,
++ cmd->msg_status, cmd->host_status, cmd->driver_status,
++ cmd->resp_data_len);
++
++ if (next_state == SCST_CMD_STATE_DEFAULT)
++ next_state = SCST_CMD_STATE_PRE_DEV_DONE;
++
++#if defined(CONFIG_SCST_DEBUG)
++ if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
++ if ((trace_flag & TRACE_RCV_TOP) && (cmd->sg != NULL)) {
++ int i;
++ struct scatterlist *sg = cmd->sg;
++ TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
++ "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
++ for (i = 0; i < cmd->sg_cnt; ++i) {
++ TRACE_BUFF_FLAG(TRACE_RCV_TOP,
++ "Exec'd sg", sg_virt(&sg[i]),
++ sg[i].length);
++ }
++ }
++ }
++#endif
++
++ cmd->state = next_state;
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
++ (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
++ (next_state != SCST_CMD_STATE_FINISHED) &&
++ (next_state != SCST_CMD_STATE_FINISHED_INTERNAL)) {
++ PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
++ __func__, next_state, cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ }
++#endif
++ pref_context = scst_optimize_post_exec_context(cmd, pref_context);
++ scst_process_redirect_cmd(cmd, pref_context, 0);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_report_luns_local(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_COMPLETED, rc;
++ int dev_cnt = 0;
++ int buffer_size;
++ int i;
++ struct scst_tgt_dev *tgt_dev = NULL;
++ uint8_t *buffer;
++ int offs, overflow = 0;
++
++ TRACE_ENTRY();
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++
++ if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
++ PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
++ "LUNS command", cmd->cdb[2]);
++ goto out_err;
++ }
++
++ buffer_size = scst_get_buf_first(cmd, &buffer);
++ if (unlikely(buffer_size == 0))
++ goto out_compl;
++ else if (unlikely(buffer_size < 0))
++ goto out_hw_err;
++
++ if (buffer_size < 16)
++ goto out_put_err;
++
++ memset(buffer, 0, buffer_size);
++ offs = 8;
++
++ /*
++ * cmd won't allow to suspend activities, so we can access
++ * sess->sess_tgt_dev_list_hash without any additional protection.
++ */
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &cmd->sess->sess_tgt_dev_list_hash[i];
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ if (!overflow) {
++ if (offs >= buffer_size) {
++ scst_put_buf(cmd, buffer);
++ buffer_size = scst_get_buf_next(cmd,
++ &buffer);
++ if (buffer_size > 0) {
++ memset(buffer, 0, buffer_size);
++ offs = 0;
++ } else {
++ overflow = 1;
++ goto inc_dev_cnt;
++ }
++ }
++ if ((buffer_size - offs) < 8) {
++ PRINT_ERROR("Buffer allocated for "
++ "REPORT LUNS command doesn't "
++ "allow to fit 8 byte entry "
++ "(buffer_size=%d)",
++ buffer_size);
++ goto out_put_hw_err;
++ }
++ if ((cmd->sess->acg->addr_method == SCST_LUN_ADDR_METHOD_FLAT) &&
++ (tgt_dev->lun != 0)) {
++ buffer[offs] = (tgt_dev->lun >> 8) & 0x3f;
++ buffer[offs] = buffer[offs] | 0x40;
++ buffer[offs+1] = tgt_dev->lun & 0xff;
++ } else {
++ buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
++ buffer[offs+1] = tgt_dev->lun & 0xff;
++ }
++ offs += 8;
++ }
++inc_dev_cnt:
++ dev_cnt++;
++ }
++ }
++ if (!overflow)
++ scst_put_buf(cmd, buffer);
++
++ /* Set the response header */
++ buffer_size = scst_get_buf_first(cmd, &buffer);
++ if (unlikely(buffer_size == 0))
++ goto out_compl;
++ else if (unlikely(buffer_size < 0))
++ goto out_hw_err;
++
++ dev_cnt *= 8;
++ buffer[0] = (dev_cnt >> 24) & 0xff;
++ buffer[1] = (dev_cnt >> 16) & 0xff;
++ buffer[2] = (dev_cnt >> 8) & 0xff;
++ buffer[3] = dev_cnt & 0xff;
++
++ scst_put_buf(cmd, buffer);
++
++ dev_cnt += 8;
++ if (dev_cnt < cmd->resp_data_len)
++ scst_set_resp_data_len(cmd, dev_cnt);
++
++out_compl:
++ cmd->completed = 1;
++
++ /* Clear left sense_reported_luns_data_changed UA, if any. */
++
++ /*
++ * cmd won't allow to suspend activities, so we can access
++ * sess->sess_tgt_dev_list_hash without any additional protection.
++ */
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &cmd->sess->sess_tgt_dev_list_hash[i];
++
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ struct scst_tgt_dev_UA *ua;
++
++ spin_lock_bh(&tgt_dev->tgt_dev_lock);
++ list_for_each_entry(ua, &tgt_dev->UA_list,
++ UA_list_entry) {
++ if (scst_analyze_sense(ua->UA_sense_buffer,
++ ua->UA_valid_sense_len,
++ SCST_SENSE_ALL_VALID,
++ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
++ TRACE_MGMT_DBG("Freeing not needed "
++ "REPORTED LUNS DATA CHANGED UA "
++ "%p", ua);
++ list_del(&ua->UA_list_entry);
++ mempool_free(ua, scst_ua_mempool);
++ break;
++ }
++ }
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++ }
++ }
++
++out_done:
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_put_err:
++ scst_put_buf(cmd, buffer);
++
++out_err:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_compl;
++
++out_put_hw_err:
++ scst_put_buf(cmd, buffer);
++
++out_hw_err:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_compl;
++}
++
++static int scst_request_sense_local(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_COMPLETED, rc;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ uint8_t *buffer;
++ int buffer_size = 0, sl = 0;
++
++ TRACE_ENTRY();
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++
++ spin_lock_bh(&tgt_dev->tgt_dev_lock);
++
++ if (tgt_dev->tgt_dev_valid_sense_len == 0)
++ goto out_unlock_not_completed;
++
++ TRACE(TRACE_SCSI, "%s: Returning stored sense", cmd->op_name);
++
++ buffer_size = scst_get_buf_first(cmd, &buffer);
++ if (unlikely(buffer_size == 0))
++ goto out_unlock_compl;
++ else if (unlikely(buffer_size < 0))
++ goto out_unlock_hw_err;
++
++ memset(buffer, 0, buffer_size);
++
++ if (((tgt_dev->tgt_dev_sense[0] == 0x70) ||
++ (tgt_dev->tgt_dev_sense[0] == 0x71)) && (cmd->cdb[1] & 1)) {
++ PRINT_WARNING("%s: Fixed format of the saved sense, but "
++ "descriptor format requested. Convertion will "
++ "truncated data", cmd->op_name);
++ PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
++ tgt_dev->tgt_dev_valid_sense_len);
++
++ buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
++ sl = scst_set_sense(buffer, buffer_size, true,
++ tgt_dev->tgt_dev_sense[2], tgt_dev->tgt_dev_sense[12],
++ tgt_dev->tgt_dev_sense[13]);
++ } else if (((tgt_dev->tgt_dev_sense[0] == 0x72) ||
++ (tgt_dev->tgt_dev_sense[0] == 0x73)) && !(cmd->cdb[1] & 1)) {
++ PRINT_WARNING("%s: Descriptor format of the "
++ "saved sense, but fixed format requested. Convertion "
++ "will truncated data", cmd->op_name);
++ PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
++ tgt_dev->tgt_dev_valid_sense_len);
++
++ buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
++ sl = scst_set_sense(buffer, buffer_size, false,
++ tgt_dev->tgt_dev_sense[1], tgt_dev->tgt_dev_sense[2],
++ tgt_dev->tgt_dev_sense[3]);
++ } else {
++ if (buffer_size >= tgt_dev->tgt_dev_valid_sense_len)
++ sl = tgt_dev->tgt_dev_valid_sense_len;
++ else {
++ sl = buffer_size;
++ TRACE(TRACE_MINOR, "%s: Being returned sense truncated "
++ "to size %d (needed %d)", cmd->op_name,
++ buffer_size, tgt_dev->tgt_dev_valid_sense_len);
++ }
++ memcpy(buffer, tgt_dev->tgt_dev_sense, sl);
++ }
++
++ scst_put_buf(cmd, buffer);
++
++ tgt_dev->tgt_dev_valid_sense_len = 0;
++
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++
++ scst_set_resp_data_len(cmd, sl);
++
++out_compl:
++ cmd->completed = 1;
++
++out_done:
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unlock_hw_err:
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_compl;
++
++out_unlock_not_completed:
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++ res = SCST_EXEC_NOT_COMPLETED;
++ goto out;
++
++out_unlock_compl:
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++ goto out_compl;
++}
++
++static int scst_reserve_local(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_NOT_COMPLETED, rc;
++ struct scst_device *dev;
++ struct scst_tgt_dev *tgt_dev_tmp;
++
++ TRACE_ENTRY();
++
++ if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
++ PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
++ "(lun=%lld)", (long long unsigned int)cmd->lun);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_done;
++ }
++
++ dev = cmd->dev;
++
++ /*
++ * There's no need to block this device, even for
++ * SCST_CONTR_MODE_ONE_TASK_SET, or anyhow else protect reservations
++ * changes, because:
++ *
++ * 1. The reservation changes are (rather) atomic, i.e., in contrast
++ * to persistent reservations, don't have any invalid intermediate
++ * states during being changed.
++ *
++ * 2. It's a duty of initiators to ensure order of regular commands
++ * around the reservation command either by ORDERED attribute, or by
++ * queue draining, or etc. For case of SCST_CONTR_MODE_ONE_TASK_SET
++ * there are no target drivers which can ensure even for ORDERED
++ * comamnds order of their delivery, so, because initiators know
++ * it, also there's no point to do any extra protection actions.
++ */
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ if (!list_empty(&dev->dev_registrants_list)) {
++ if (scst_pr_crh_case(cmd))
++ goto out_completed;
++ else {
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++ }
++
++ spin_lock_bh(&dev->dev_lock);
++
++ if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
++ spin_unlock_bh(&dev->dev_lock);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++
++ list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if (cmd->tgt_dev != tgt_dev_tmp)
++ set_bit(SCST_TGT_DEV_RESERVED,
++ &tgt_dev_tmp->tgt_dev_flags);
++ }
++ dev->dev_reserved = 1;
++
++ spin_unlock_bh(&dev->dev_lock);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_completed:
++ cmd->completed = 1;
++
++out_done:
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ res = SCST_EXEC_COMPLETED;
++ goto out;
++}
++
++static int scst_release_local(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_NOT_COMPLETED, rc;
++ struct scst_tgt_dev *tgt_dev_tmp;
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ dev = cmd->dev;
++
++ /*
++ * See comment in scst_reserve_local() why no dev blocking or any
++ * other protection is needed here.
++ */
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ if (!list_empty(&dev->dev_registrants_list)) {
++ if (scst_pr_crh_case(cmd))
++ goto out_completed;
++ else {
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++ }
++
++ spin_lock_bh(&dev->dev_lock);
++
++ /*
++ * The device could be RELEASED behind us, if RESERVING session
++ * is closed (see scst_free_tgt_dev()), but this actually doesn't
++ * matter, so use lock and no retest for DEV_RESERVED bits again
++ */
++ if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
++ res = SCST_EXEC_COMPLETED;
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++ cmd->completed = 1;
++ } else {
++ list_for_each_entry(tgt_dev_tmp,
++ &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ clear_bit(SCST_TGT_DEV_RESERVED,
++ &tgt_dev_tmp->tgt_dev_flags);
++ }
++ dev->dev_reserved = 0;
++ }
++
++ spin_unlock_bh(&dev->dev_lock);
++
++ if (res == SCST_EXEC_COMPLETED)
++ goto out_done;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_completed:
++ cmd->completed = 1;
++
++out_done:
++ res = SCST_EXEC_COMPLETED;
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ goto out;
++}
++
++/**
++ * scst_check_local_events() - check if there are any local SCSI events
++ *
++ * Description:
++ * Checks if the command can be executed or there are local events,
++ * like reservatons, pending UAs, etc. Returns < 0 if command must be
++ * aborted, > 0 if there is an event and command should be immediately
++ * completed, or 0 otherwise.
++ *
++ * !! Dev handlers implementing exec() callback must call this function there
++ * !! just before the actual command's execution!
++ *
++ * On call no locks, no IRQ or IRQ-disabled context allowed.
++ */
++static int scst_persistent_reserve_in_local(struct scst_cmd *cmd)
++{
++ int rc;
++ struct scst_device *dev;
++ struct scst_tgt_dev *tgt_dev;
++ struct scst_session *session;
++ int action;
++ uint8_t *buffer;
++ int buffer_size;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(scst_cmd_atomic(cmd));
++
++ dev = cmd->dev;
++ tgt_dev = cmd->tgt_dev;
++ session = cmd->sess;
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ if (unlikely(dev->not_pr_supporting_tgt_devs_num != 0)) {
++ PRINT_WARNING("Persistent Reservation command %x refused for "
++ "device %s, because the device has not supporting PR "
++ "transports connected", cmd->cdb[0], dev->virt_name);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++ }
++
++ if (dev->dev_reserved) {
++ TRACE_PR("PR command rejected, because device %s holds regular "
++ "reservation", dev->virt_name);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++
++ if (dev->scsi_dev != NULL) {
++ PRINT_WARNING("PR commands for pass-through devices not "
++ "supported (device %s)", dev->virt_name);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++ }
++
++ buffer_size = scst_get_full_buf(cmd, &buffer);
++ if (unlikely(buffer_size <= 0)) {
++ if (buffer_size < 0)
++ scst_set_busy(cmd);
++ goto out_done;
++ }
++
++ scst_pr_write_lock(dev);
++
++ /* We can be aborted by another PR command while waiting for the lock */
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
++ goto out_unlock;
++ }
++
++ action = cmd->cdb[1] & 0x1f;
++
++ TRACE(TRACE_SCSI, "PR action %x for '%s' (LUN %llx) from '%s'", action,
++ dev->virt_name, tgt_dev->lun, session->initiator_name);
++
++ switch (action) {
++ case PR_READ_KEYS:
++ scst_pr_read_keys(cmd, buffer, buffer_size);
++ break;
++ case PR_READ_RESERVATION:
++ scst_pr_read_reservation(cmd, buffer, buffer_size);
++ break;
++ case PR_REPORT_CAPS:
++ scst_pr_report_caps(cmd, buffer, buffer_size);
++ break;
++ case PR_READ_FULL_STATUS:
++ scst_pr_read_full_status(cmd, buffer, buffer_size);
++ break;
++ default:
++ PRINT_ERROR("Unsupported action %x", action);
++ scst_pr_write_unlock(dev);
++ goto out_err;
++ }
++
++out_complete:
++ cmd->completed = 1;
++
++out_unlock:
++ scst_pr_write_unlock(dev);
++
++ scst_put_full_buf(cmd, buffer);
++
++out_done:
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++
++ TRACE_EXIT_RES(SCST_EXEC_COMPLETED);
++ return SCST_EXEC_COMPLETED;
++
++out_err:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_complete;
++}
++
++/* No locks, no IRQ or IRQ-disabled context allowed */
++static int scst_persistent_reserve_out_local(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_COMPLETED;
++ int rc;
++ struct scst_device *dev;
++ struct scst_tgt_dev *tgt_dev;
++ struct scst_session *session;
++ int action;
++ uint8_t *buffer;
++ int buffer_size;
++ bool aborted = false;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(scst_cmd_atomic(cmd));
++
++ dev = cmd->dev;
++ tgt_dev = cmd->tgt_dev;
++ session = cmd->sess;
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ if (unlikely(dev->not_pr_supporting_tgt_devs_num != 0)) {
++ PRINT_WARNING("Persistent Reservation command %x refused for "
++ "device %s, because the device has not supporting PR "
++ "transports connected", cmd->cdb[0], dev->virt_name);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++ }
++
++ action = cmd->cdb[1] & 0x1f;
++
++ TRACE(TRACE_SCSI, "PR action %x for '%s' (LUN %llx) from '%s'", action,
++ dev->virt_name, tgt_dev->lun, session->initiator_name);
++
++ if (dev->dev_reserved) {
++ TRACE_PR("PR command rejected, because device %s holds regular "
++ "reservation", dev->virt_name);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++
++ /*
++ * Check if tgt_dev already registered. Also by this check we make
++ * sure that table "PERSISTENT RESERVE OUT service actions that are
++ * allowed in the presence of various reservations" is honored.
++ * REGISTER AND MOVE and RESERVE will be additionally checked for
++ * conflicts later.
++ */
++ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
++ (tgt_dev->registrant == NULL)) {
++ TRACE_PR("'%s' not registered", cmd->sess->initiator_name);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++
++ buffer_size = scst_get_full_buf(cmd, &buffer);
++ if (unlikely(buffer_size <= 0)) {
++ if (buffer_size < 0)
++ scst_set_busy(cmd);
++ goto out_done;
++ }
++
++ /* Check scope */
++ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
++ (action != PR_CLEAR) && ((cmd->cdb[2] & 0x0f) >> 4) != SCOPE_LU) {
++ TRACE_PR("Scope must be SCOPE_LU for action %x", action);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_put_full_buf;
++ }
++
++ /* Check SPEC_I_PT (PR_REGISTER_AND_MOVE has another format) */
++ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_MOVE) &&
++ ((buffer[20] >> 3) & 0x01)) {
++ TRACE_PR("SPEC_I_PT must be zero for action %x", action);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_invalid_field_in_cdb));
++ goto out_put_full_buf;
++ }
++
++ /* Check ALL_TG_PT (PR_REGISTER_AND_MOVE has another format) */
++ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
++ (action != PR_REGISTER_AND_MOVE) && ((buffer[20] >> 2) & 0x01)) {
++ TRACE_PR("ALL_TG_PT must be zero for action %x", action);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_invalid_field_in_cdb));
++ goto out_put_full_buf;
++ }
++
++ scst_pr_write_lock(dev);
++
++ /* We can be aborted by another PR command while waiting for the lock */
++ aborted = test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
++ if (unlikely(aborted)) {
++ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
++ goto out_unlock;
++ }
++
++ switch (action) {
++ case PR_REGISTER:
++ scst_pr_register(cmd, buffer, buffer_size);
++ break;
++ case PR_RESERVE:
++ scst_pr_reserve(cmd, buffer, buffer_size);
++ break;
++ case PR_RELEASE:
++ scst_pr_release(cmd, buffer, buffer_size);
++ break;
++ case PR_CLEAR:
++ scst_pr_clear(cmd, buffer, buffer_size);
++ break;
++ case PR_PREEMPT:
++ scst_pr_preempt(cmd, buffer, buffer_size);
++ break;
++ case PR_PREEMPT_AND_ABORT:
++ scst_pr_preempt_and_abort(cmd, buffer, buffer_size);
++ break;
++ case PR_REGISTER_AND_IGNORE:
++ scst_pr_register_and_ignore(cmd, buffer, buffer_size);
++ break;
++ case PR_REGISTER_AND_MOVE:
++ scst_pr_register_and_move(cmd, buffer, buffer_size);
++ break;
++ default:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_unlock;
++ }
++
++ if (cmd->status == SAM_STAT_GOOD)
++ scst_pr_sync_device_file(tgt_dev, cmd);
++
++ if ((dev->handler->pr_cmds_notifications) &&
++ (cmd->status == SAM_STAT_GOOD)) /* sync file may change status */
++ res = SCST_EXEC_NOT_COMPLETED;
++
++out_unlock:
++ scst_pr_write_unlock(dev);
++
++out_put_full_buf:
++ scst_put_full_buf(cmd, buffer);
++
++out_done:
++ if (SCST_EXEC_COMPLETED == res) {
++ if (!aborted)
++ cmd->completed = 1;
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT,
++ SCST_CONTEXT_SAME);
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* No locks, no IRQ or IRQ-disabled context allowed */
++int scst_check_local_events(struct scst_cmd *cmd)
++{
++ int res, rc;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE_ENTRY();
++
++ /*
++ * There's no race here, because we need to trace commands sent
++ * *after* dev_double_ua_possible flag was set.
++ */
++ if (unlikely(dev->dev_double_ua_possible))
++ cmd->double_ua_possible = 1;
++
++ /* Reserve check before Unit Attention */
++ if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
++ &tgt_dev->tgt_dev_flags))) {
++ if ((cmd->op_flags & SCST_REG_RESERVE_ALLOWED) == 0) {
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out_complete;
++ }
++ }
++
++ if (dev->pr_is_set) {
++ if (unlikely(!scst_pr_is_cmd_allowed(cmd))) {
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out_complete;
++ }
++ }
++
++ /*
++ * Let's check for ABORTED after scst_pr_is_cmd_allowed(), because
++ * we might sleep for a while there.
++ */
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
++ goto out_uncomplete;
++ }
++
++ /* If we had internal bus reset, set the command error unit attention */
++ if ((dev->scsi_dev != NULL) &&
++ unlikely(dev->scsi_dev->was_reset)) {
++ if (scst_is_ua_command(cmd)) {
++ int done = 0;
++ /*
++ * Prevent more than 1 cmd to be triggered by
++ * was_reset.
++ */
++ spin_lock_bh(&dev->dev_lock);
++ if (dev->scsi_dev->was_reset) {
++ TRACE(TRACE_MGMT, "was_reset is %d", 1);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_reset_UA));
++ /*
++ * It looks like it is safe to clear was_reset
++ * here.
++ */
++ dev->scsi_dev->was_reset = 0;
++ done = 1;
++ }
++ spin_unlock_bh(&dev->dev_lock);
++
++ if (done)
++ goto out_complete;
++ }
++ }
++
++ if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
++ &cmd->tgt_dev->tgt_dev_flags))) {
++ if (scst_is_ua_command(cmd)) {
++ rc = scst_set_pending_UA(cmd);
++ if (rc == 0)
++ goto out_complete;
++ }
++ }
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_complete:
++ res = 1;
++ BUG_ON(!cmd->completed);
++ goto out;
++
++out_uncomplete:
++ res = -1;
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_check_local_events);
++
++/* No locks */
++void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
++{
++ if (slot == NULL)
++ goto inc;
++
++ /* Optimized for lockless fast path */
++
++ TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
++ atomic_read(slot));
++
++ if (!atomic_dec_and_test(slot))
++ goto out;
++
++ TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
++ tgt_dev->num_free_sn_slots);
++ if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
++ spin_lock_irq(&tgt_dev->sn_lock);
++ if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
++ if (tgt_dev->num_free_sn_slots < 0)
++ tgt_dev->cur_sn_slot = slot;
++ /*
++ * To be in-sync with SIMPLE case in scst_cmd_set_sn()
++ */
++ smp_mb();
++ tgt_dev->num_free_sn_slots++;
++ TRACE_SN("Incremented num_free_sn_slots (%d)",
++ tgt_dev->num_free_sn_slots);
++
++ }
++ spin_unlock_irq(&tgt_dev->sn_lock);
++ }
++
++inc:
++ /*
++ * No protection of expected_sn is needed, because only one thread
++ * at time can be here (serialized by sn). Also it is supposed that
++ * there could not be half-incremented halves.
++ */
++ tgt_dev->expected_sn++;
++ /*
++ * Write must be before def_cmd_count read to be in sync. with
++ * scst_post_exec_sn(). See comment in scst_send_for_exec().
++ */
++ smp_mb();
++ TRACE_SN("Next expected_sn: %d", tgt_dev->expected_sn);
++
++out:
++ return;
++}
++
++/* No locks */
++static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
++ bool make_active)
++{
++ /* For HQ commands SN is not set */
++ bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
++ cmd->sn_set && !cmd->retry;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_cmd *res;
++
++ TRACE_ENTRY();
++
++ if (inc_expected_sn)
++ scst_inc_expected_sn(tgt_dev, cmd->sn_slot);
++
++ if (make_active) {
++ scst_make_deferred_commands_active(tgt_dev);
++ res = NULL;
++ } else
++ res = scst_check_deferred_commands(tgt_dev);
++
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* cmd must be additionally referenced to not die inside */
++static int scst_do_real_exec(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_NOT_COMPLETED;
++ int rc;
++ struct scst_device *dev = cmd->dev;
++ struct scst_dev_type *handler = dev->handler;
++ struct io_context *old_ctx = NULL;
++ bool ctx_changed = false;
++
++ TRACE_ENTRY();
++
++ ctx_changed = scst_set_io_context(cmd, &old_ctx);
++
++ cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
++
++ if (handler->exec) {
++ TRACE_DBG("Calling dev handler %s exec(%p)",
++ handler->name, cmd);
++ TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
++ cmd->cdb_len);
++ scst_set_cur_start(cmd);
++ res = handler->exec(cmd);
++ TRACE_DBG("Dev handler %s exec() returned %d",
++ handler->name, res);
++
++ if (res == SCST_EXEC_COMPLETED)
++ goto out_complete;
++
++ scst_set_exec_time(cmd);
++
++ BUG_ON(res != SCST_EXEC_NOT_COMPLETED);
++ }
++
++ TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
++
++ if (unlikely(dev->scsi_dev == NULL)) {
++ PRINT_ERROR("Command for virtual device must be "
++ "processed by device handler (LUN %lld)!",
++ (long long unsigned int)cmd->lun);
++ goto out_error;
++ }
++
++ res = scst_check_local_events(cmd);
++ if (unlikely(res != 0))
++ goto out_done;
++
++ scst_set_cur_start(cmd);
++
++ rc = scst_scsi_exec_async(cmd, scst_cmd_done);
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("scst pass-through exec failed: %x", rc);
++ if ((int)rc == -EINVAL)
++ PRINT_ERROR("Do you have too low max_sectors on your "
++ "backend hardware? For success max_sectors must "
++ "be >= bufflen in sectors (max_sectors %d, "
++ "bufflen %db, CDB %x). See README for more "
++ "details.", dev->scsi_dev->host->max_sectors,
++ cmd->bufflen, cmd->cdb[0]);
++ goto out_error;
++ }
++
++out_complete:
++ res = SCST_EXEC_COMPLETED;
++
++ if (ctx_changed)
++ scst_reset_io_context(cmd->tgt_dev, old_ctx);
++
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_error:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_done;
++
++out_done:
++ res = SCST_EXEC_COMPLETED;
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ goto out_complete;
++}
++
++static inline int scst_real_exec(struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
++ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
++
++ __scst_cmd_get(cmd);
++
++ res = scst_do_real_exec(cmd);
++ if (likely(res == SCST_EXEC_COMPLETED)) {
++ scst_post_exec_sn(cmd, true);
++ if (cmd->dev->scsi_dev != NULL)
++ generic_unplug_device(
++ cmd->dev->scsi_dev->request_queue);
++ } else
++ BUG();
++
++ __scst_cmd_put(cmd);
++
++ /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_do_local_exec(struct scst_cmd *cmd)
++{
++ int res;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++
++ TRACE_ENTRY();
++
++ /* Check READ_ONLY device status */
++ if ((cmd->op_flags & SCST_WRITE_MEDIUM) &&
++ (tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
++ cmd->dev->rd_only)) {
++ PRINT_WARNING("Attempt of write access to read-only device: "
++ "initiator %s, LUN %lld, op %x",
++ cmd->sess->initiator_name, cmd->lun, cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_data_protect));
++ goto out_done;
++ }
++
++ if (!scst_is_cmd_local(cmd)) {
++ res = SCST_EXEC_NOT_COMPLETED;
++ goto out;
++ }
++
++ switch (cmd->cdb[0]) {
++ case RESERVE:
++ case RESERVE_10:
++ res = scst_reserve_local(cmd);
++ break;
++ case RELEASE:
++ case RELEASE_10:
++ res = scst_release_local(cmd);
++ break;
++ case PERSISTENT_RESERVE_IN:
++ res = scst_persistent_reserve_in_local(cmd);
++ break;
++ case PERSISTENT_RESERVE_OUT:
++ res = scst_persistent_reserve_out_local(cmd);
++ break;
++ case REPORT_LUNS:
++ res = scst_report_luns_local(cmd);
++ break;
++ case REQUEST_SENSE:
++ res = scst_request_sense_local(cmd);
++ break;
++ default:
++ res = SCST_EXEC_NOT_COMPLETED;
++ break;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_done:
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ res = SCST_EXEC_COMPLETED;
++ goto out;
++}
++
++static int scst_local_exec(struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
++ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
++
++ __scst_cmd_get(cmd);
++
++ res = scst_do_local_exec(cmd);
++ if (likely(res == SCST_EXEC_NOT_COMPLETED))
++ cmd->state = SCST_CMD_STATE_REAL_EXEC;
++ else if (res == SCST_EXEC_COMPLETED)
++ scst_post_exec_sn(cmd, true);
++ else
++ BUG();
++
++ __scst_cmd_put(cmd);
++
++ /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_exec(struct scst_cmd **active_cmd)
++{
++ struct scst_cmd *cmd = *active_cmd;
++ struct scst_cmd *ref_cmd;
++ struct scst_device *dev = cmd->dev;
++ int res = SCST_CMD_STATE_RES_CONT_NEXT, count;
++
++ TRACE_ENTRY();
++
++ if (unlikely(scst_check_blocked_dev(cmd)))
++ goto out;
++
++ /* To protect tgt_dev */
++ ref_cmd = cmd;
++ __scst_cmd_get(ref_cmd);
++
++ count = 0;
++ while (1) {
++ int rc;
++
++ cmd->sent_for_exec = 1;
++ /*
++ * To sync with scst_abort_cmd(). The above assignment must
++ * be before SCST_CMD_ABORTED test, done later in
++ * scst_check_local_events(). It's far from here, so the order
++ * is virtually guaranteed, but let's have it just in case.
++ */
++ smp_mb();
++
++ cmd->scst_cmd_done = scst_cmd_done_local;
++ cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
++
++ rc = scst_do_local_exec(cmd);
++ if (likely(rc == SCST_EXEC_NOT_COMPLETED))
++ /* Nothing to do */;
++ else {
++ BUG_ON(rc != SCST_EXEC_COMPLETED);
++ goto done;
++ }
++
++ cmd->state = SCST_CMD_STATE_REAL_EXEC;
++
++ rc = scst_do_real_exec(cmd);
++ BUG_ON(rc != SCST_EXEC_COMPLETED);
++
++done:
++ count++;
++
++ cmd = scst_post_exec_sn(cmd, false);
++ if (cmd == NULL)
++ break;
++
++ if (unlikely(scst_check_blocked_dev(cmd)))
++ break;
++
++ __scst_cmd_put(ref_cmd);
++ ref_cmd = cmd;
++ __scst_cmd_get(ref_cmd);
++ }
++
++ *active_cmd = cmd;
++
++ if (count == 0)
++ goto out_put;
++
++ if (dev->scsi_dev != NULL)
++ generic_unplug_device(dev->scsi_dev->request_queue);
++
++out_put:
++ __scst_cmd_put(ref_cmd);
++ /* !! At this point sess, dev and tgt_dev can be already freed !! */
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_send_for_exec(struct scst_cmd **active_cmd)
++{
++ int res;
++ struct scst_cmd *cmd = *active_cmd;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ typeof(tgt_dev->expected_sn) expected_sn;
++
++ TRACE_ENTRY();
++
++ if (unlikely(cmd->internal))
++ goto exec;
++
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ goto exec;
++
++ BUG_ON(!cmd->sn_set);
++
++ expected_sn = tgt_dev->expected_sn;
++ /* Optimized for lockless fast path */
++ if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
++ spin_lock_irq(&tgt_dev->sn_lock);
++
++ tgt_dev->def_cmd_count++;
++ /*
++ * Memory barrier is needed here to implement lockless fast
++ * path. We need the exact order of read and write between
++ * def_cmd_count and expected_sn. Otherwise, we can miss case,
++ * when expected_sn was changed to be equal to cmd->sn while
++ * we are queuing cmd the deferred list after the expected_sn
++ * below. It will lead to a forever stuck command. But with
++ * the barrier in such case __scst_check_deferred_commands()
++ * will be called and it will take sn_lock, so we will be
++ * synchronized.
++ */
++ smp_mb();
++
++ expected_sn = tgt_dev->expected_sn;
++ if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
++ if (unlikely(test_bit(SCST_CMD_ABORTED,
++ &cmd->cmd_flags))) {
++ /* Necessary to allow aborting out of sn cmds */
++ TRACE_MGMT_DBG("Aborting out of sn cmd %p "
++ "(tag %llu, sn %u)", cmd,
++ (long long unsigned)cmd->tag, cmd->sn);
++ tgt_dev->def_cmd_count--;
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ } else {
++ TRACE_SN("Deferring cmd %p (sn=%d, set %d, "
++ "expected_sn=%d)", cmd, cmd->sn,
++ cmd->sn_set, expected_sn);
++ list_add_tail(&cmd->sn_cmd_list_entry,
++ &tgt_dev->deferred_cmd_list);
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ }
++ spin_unlock_irq(&tgt_dev->sn_lock);
++ goto out;
++ } else {
++ TRACE_SN("Somebody incremented expected_sn %d, "
++ "continuing", expected_sn);
++ tgt_dev->def_cmd_count--;
++ spin_unlock_irq(&tgt_dev->sn_lock);
++ }
++ }
++
++exec:
++ res = scst_exec(active_cmd);
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* No locks supposed to be held */
++static int scst_check_sense(struct scst_cmd *cmd)
++{
++ int res = 0;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE_ENTRY();
++
++ if (unlikely(cmd->ua_ignore))
++ goto out;
++
++ /* If we had internal bus reset behind us, set the command error UA */
++ if ((dev->scsi_dev != NULL) &&
++ unlikely(cmd->host_status == DID_RESET) &&
++ scst_is_ua_command(cmd)) {
++ TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
++ dev->scsi_dev->was_reset, cmd->host_status);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_reset_UA));
++ /* It looks like it is safe to clear was_reset here */
++ dev->scsi_dev->was_reset = 0;
++ }
++
++ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
++ SCST_SENSE_VALID(cmd->sense)) {
++ PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
++ cmd->sense_valid_len);
++
++ /* Check Unit Attention Sense Key */
++ if (scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
++ if (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASC_VALID,
++ 0, SCST_SENSE_ASC_UA_RESET, 0)) {
++ if (cmd->double_ua_possible) {
++ TRACE_MGMT_DBG("Double UA "
++ "detected for device %p", dev);
++ TRACE_MGMT_DBG("Retrying cmd"
++ " %p (tag %llu)", cmd,
++ (long long unsigned)cmd->tag);
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++ cmd->completed = 0;
++
++ mempool_free(cmd->sense,
++ scst_sense_mempool);
++ cmd->sense = NULL;
++
++ scst_check_restore_sg_buff(cmd);
++
++ BUG_ON(cmd->dbl_ua_orig_resp_data_len < 0);
++ cmd->data_direction =
++ cmd->dbl_ua_orig_data_direction;
++ cmd->resp_data_len =
++ cmd->dbl_ua_orig_resp_data_len;
++
++ cmd->state = SCST_CMD_STATE_REAL_EXEC;
++ cmd->retry = 1;
++ res = 1;
++ goto out;
++ }
++ }
++ scst_dev_check_set_UA(dev, cmd, cmd->sense,
++ cmd->sense_valid_len);
++ }
++ }
++
++ if (unlikely(cmd->double_ua_possible)) {
++ if (scst_is_ua_command(cmd)) {
++ TRACE_MGMT_DBG("Clearing dbl_ua_possible flag (dev %p, "
++ "cmd %p)", dev, cmd);
++ /*
++ * Lock used to protect other flags in the bitfield
++ * (just in case, actually). Those flags can't be
++ * changed in parallel, because the device is
++ * serialized.
++ */
++ spin_lock_bh(&dev->dev_lock);
++ dev->dev_double_ua_possible = 0;
++ spin_unlock_bh(&dev->dev_lock);
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_check_auto_sense(struct scst_cmd *cmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
++ (!SCST_SENSE_VALID(cmd->sense) ||
++ SCST_NO_SENSE(cmd->sense))) {
++ TRACE(TRACE_SCSI|TRACE_MINOR_AND_MGMT_DBG, "CHECK_CONDITION, "
++ "but no sense: cmd->status=%x, cmd->msg_status=%x, "
++ "cmd->host_status=%x, cmd->driver_status=%x (cmd %p)",
++ cmd->status, cmd->msg_status, cmd->host_status,
++ cmd->driver_status, cmd);
++ res = 1;
++ } else if (unlikely(cmd->host_status)) {
++ if ((cmd->host_status == DID_REQUEUE) ||
++ (cmd->host_status == DID_IMM_RETRY) ||
++ (cmd->host_status == DID_SOFT_ERROR) ||
++ (cmd->host_status == DID_ABORT)) {
++ scst_set_busy(cmd);
++ } else {
++ TRACE(TRACE_SCSI|TRACE_MINOR_AND_MGMT_DBG, "Host "
++ "status %x received, returning HARDWARE ERROR "
++ "instead (cmd %p)", cmd->host_status, cmd);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_pre_dev_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
++
++ TRACE_ENTRY();
++
++ if (unlikely(scst_check_auto_sense(cmd))) {
++ PRINT_INFO("Command finished with CHECK CONDITION, but "
++ "without sense data (opcode 0x%x), issuing "
++ "REQUEST SENSE", cmd->cdb[0]);
++ rc = scst_prepare_request_sense(cmd);
++ if (rc == 0)
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ else {
++ PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
++ "returning HARDWARE ERROR");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ goto out;
++ } else if (unlikely(scst_check_sense(cmd))) {
++ /*
++ * We can't allow atomic command on the exec stages, so
++ * restart to the thread
++ */
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ if (likely(scsi_status_is_good(cmd->status))) {
++ unsigned char type = cmd->dev->type;
++ if (unlikely((cmd->cdb[0] == MODE_SENSE ||
++ cmd->cdb[0] == MODE_SENSE_10)) &&
++ (cmd->tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
++ cmd->dev->rd_only) &&
++ (type == TYPE_DISK ||
++ type == TYPE_WORM ||
++ type == TYPE_MOD ||
++ type == TYPE_TAPE)) {
++ int32_t length;
++ uint8_t *address;
++ bool err = false;
++
++ length = scst_get_buf_first(cmd, &address);
++ if (length < 0) {
++ PRINT_ERROR("%s", "Unable to get "
++ "MODE_SENSE buffer");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(
++ scst_sense_hardw_error));
++ err = true;
++ } else if (length > 2 && cmd->cdb[0] == MODE_SENSE)
++ address[2] |= 0x80; /* Write Protect*/
++ else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
++ address[3] |= 0x80; /* Write Protect*/
++ scst_put_buf(cmd, address);
++
++ if (err)
++ goto out;
++ }
++
++ /*
++ * Check and clear NormACA option for the device, if necessary,
++ * since we don't support ACA
++ */
++ if (unlikely((cmd->cdb[0] == INQUIRY)) &&
++ /* Std INQUIRY data (no EVPD) */
++ !(cmd->cdb[1] & SCST_INQ_EVPD) &&
++ (cmd->resp_data_len > SCST_INQ_BYTE3)) {
++ uint8_t *buffer;
++ int buflen;
++ bool err = false;
++
++ /* ToDo: all pages ?? */
++ buflen = scst_get_buf_first(cmd, &buffer);
++ if (buflen > SCST_INQ_BYTE3) {
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
++ PRINT_INFO("NormACA set for device: "
++ "lun=%lld, type 0x%02x. Clear it, "
++ "since it's unsupported.",
++ (long long unsigned int)cmd->lun,
++ buffer[0]);
++ }
++#endif
++ buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
++ } else if (buflen != 0) {
++ PRINT_ERROR("%s", "Unable to get INQUIRY "
++ "buffer");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ err = true;
++ }
++ if (buflen > 0)
++ scst_put_buf(cmd, buffer);
++
++ if (err)
++ goto out;
++ }
++
++ if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
++ (cmd->cdb[0] == MODE_SELECT_10) ||
++ (cmd->cdb[0] == LOG_SELECT))) {
++ TRACE(TRACE_SCSI,
++ "MODE/LOG SELECT succeeded (LUN %lld)",
++ (long long unsigned int)cmd->lun);
++ cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
++ goto out;
++ }
++ } else {
++ TRACE(TRACE_SCSI, "cmd %p not succeeded with status %x",
++ cmd, cmd->status);
++
++ if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
++ if (!test_bit(SCST_TGT_DEV_RESERVED,
++ &cmd->tgt_dev->tgt_dev_flags)) {
++ struct scst_tgt_dev *tgt_dev_tmp;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE(TRACE_SCSI, "RESERVE failed lun=%lld, "
++ "status=%x",
++ (long long unsigned int)cmd->lun,
++ cmd->status);
++ PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
++ cmd->sense_valid_len);
++
++ /* Clearing the reservation */
++ spin_lock_bh(&dev->dev_lock);
++ list_for_each_entry(tgt_dev_tmp,
++ &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ clear_bit(SCST_TGT_DEV_RESERVED,
++ &tgt_dev_tmp->tgt_dev_flags);
++ }
++ dev->dev_reserved = 0;
++ spin_unlock_bh(&dev->dev_lock);
++ }
++ }
++
++ /* Check for MODE PARAMETERS CHANGED UA */
++ if ((cmd->dev->scsi_dev != NULL) &&
++ (cmd->status == SAM_STAT_CHECK_CONDITION) &&
++ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
++ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASCx_VALID,
++ 0, 0x2a, 0x01)) {
++ TRACE(TRACE_SCSI, "MODE PARAMETERS CHANGED UA (lun "
++ "%lld)", (long long unsigned int)cmd->lun);
++ cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
++ goto out;
++ }
++ }
++
++ cmd->state = SCST_CMD_STATE_DEV_DONE;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_mode_select_checks(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_RES_CONT_SAME;
++
++ TRACE_ENTRY();
++
++ if (likely(scsi_status_is_good(cmd->status))) {
++ int atomic = scst_cmd_atomic(cmd);
++ if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
++ (cmd->cdb[0] == MODE_SELECT_10) ||
++ (cmd->cdb[0] == LOG_SELECT))) {
++ struct scst_device *dev = cmd->dev;
++ int sl;
++ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
++
++ if (atomic && (dev->scsi_dev != NULL)) {
++ TRACE_DBG("%s", "MODE/LOG SELECT: thread "
++ "context required");
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
++ "setting the SELECT UA (lun=%lld)",
++ (long long unsigned int)cmd->lun);
++
++ spin_lock_bh(&dev->dev_lock);
++ if (cmd->cdb[0] == LOG_SELECT) {
++ sl = scst_set_sense(sense_buffer,
++ sizeof(sense_buffer),
++ dev->d_sense,
++ UNIT_ATTENTION, 0x2a, 0x02);
++ } else {
++ sl = scst_set_sense(sense_buffer,
++ sizeof(sense_buffer),
++ dev->d_sense,
++ UNIT_ATTENTION, 0x2a, 0x01);
++ }
++ scst_dev_check_set_local_UA(dev, cmd, sense_buffer, sl);
++ spin_unlock_bh(&dev->dev_lock);
++
++ if (dev->scsi_dev != NULL)
++ scst_obtain_device_parameters(dev);
++ }
++ } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
++ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
++ /* mode parameters changed */
++ (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASCx_VALID,
++ 0, 0x2a, 0x01) ||
++ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASC_VALID,
++ 0, 0x29, 0) /* reset */ ||
++ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASC_VALID,
++ 0, 0x28, 0) /* medium changed */ ||
++ /* cleared by another ini (just in case) */
++ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASC_VALID,
++ 0, 0x2F, 0))) {
++ int atomic = scst_cmd_atomic(cmd);
++ if (atomic) {
++ TRACE_DBG("Possible parameters changed UA %x: "
++ "thread context required", cmd->sense[12]);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
++ "(LUN %lld): getting new parameters", cmd->sense[12],
++ (long long unsigned int)cmd->lun);
++
++ scst_obtain_device_parameters(cmd->dev);
++ } else
++ BUG();
++
++ cmd->state = SCST_CMD_STATE_DEV_DONE;
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
++{
++ if (likely(cmd->sn_set))
++ scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
++
++ scst_make_deferred_commands_active(cmd->tgt_dev);
++}
++
++static int scst_dev_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_RES_CONT_SAME;
++ int state;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE_ENTRY();
++
++ state = SCST_CMD_STATE_PRE_XMIT_RESP;
++
++ if (likely(!scst_is_cmd_fully_local(cmd)) &&
++ likely(dev->handler->dev_done != NULL)) {
++ int rc;
++
++ if (unlikely(!dev->handler->dev_done_atomic &&
++ scst_cmd_atomic(cmd))) {
++ /*
++ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
++ * optimization.
++ */
++ TRACE_MGMT_DBG("Dev handler %s dev_done() needs thread "
++ "context, rescheduling", dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ TRACE_DBG("Calling dev handler %s dev_done(%p)",
++ dev->handler->name, cmd);
++ scst_set_cur_start(cmd);
++ rc = dev->handler->dev_done(cmd);
++ scst_set_dev_done_time(cmd);
++ TRACE_DBG("Dev handler %s dev_done() returned %d",
++ dev->handler->name, rc);
++ if (rc != SCST_CMD_STATE_DEFAULT)
++ state = rc;
++ }
++
++ switch (state) {
++#ifdef CONFIG_SCST_EXTRACHECKS
++ case SCST_CMD_STATE_PRE_XMIT_RESP:
++ case SCST_CMD_STATE_PARSE:
++ case SCST_CMD_STATE_PREPARE_SPACE:
++ case SCST_CMD_STATE_RDY_TO_XFER:
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXEC:
++ case SCST_CMD_STATE_PRE_DEV_DONE:
++ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
++ case SCST_CMD_STATE_DEV_DONE:
++ case SCST_CMD_STATE_XMIT_RESP:
++ case SCST_CMD_STATE_FINISHED:
++ case SCST_CMD_STATE_FINISHED_INTERNAL:
++#else
++ default:
++#endif
++ cmd->state = state;
++ break;
++ case SCST_CMD_STATE_NEED_THREAD_CTX:
++ TRACE_DBG("Dev handler %s dev_done() requested "
++ "thread context, rescheduling",
++ dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ break;
++#ifdef CONFIG_SCST_EXTRACHECKS
++ default:
++ if (state >= 0) {
++ PRINT_ERROR("Dev handler %s dev_done() returned "
++ "invalid cmd state %d",
++ dev->handler->name, state);
++ } else {
++ PRINT_ERROR("Dev handler %s dev_done() returned "
++ "error %d", dev->handler->name,
++ state);
++ }
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ break;
++#endif
++ }
++
++ scst_check_unblock_dev(cmd);
++
++ if (cmd->inc_expected_sn_on_done && cmd->sent_for_exec)
++ scst_inc_check_expected_sn(cmd);
++
++ if (unlikely(cmd->internal))
++ cmd->state = SCST_CMD_STATE_FINISHED_INTERNAL;
++
++#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
++ if (cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) {
++ /* We can't allow atomic command on the exec stages */
++ if (scst_cmd_atomic(cmd)) {
++ switch (state) {
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXEC:
++ TRACE_DBG("Atomic context and redirect, "
++ "rescheduling (cmd %p)", cmd);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ break;
++ }
++ }
++ }
++#endif
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++static int scst_pre_xmit_response(struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(cmd->internal);
++
++#ifdef CONFIG_SCST_DEBUG_TM
++ if (cmd->tm_dbg_delayed &&
++ !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
++ if (scst_cmd_atomic(cmd)) {
++ TRACE_MGMT_DBG("%s",
++ "DEBUG_TM delayed cmd needs a thread");
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ return res;
++ }
++ TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
++ cmd, cmd->tag);
++ schedule_timeout_uninterruptible(HZ);
++ }
++#endif
++
++ if (likely(cmd->tgt_dev != NULL)) {
++ /*
++ * Those counters protect from not getting too long processing
++ * latency, so we should decrement them after cmd completed.
++ */
++ atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
++#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
++ atomic_dec(&cmd->dev->dev_cmd_count);
++#endif
++#ifdef CONFIG_SCST_ORDERED_READS
++ /* If expected values not set, expected direction is UNKNOWN */
++ if (cmd->expected_data_direction & SCST_DATA_WRITE)
++ atomic_dec(&cmd->dev->write_cmd_count);
++#endif
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ scst_on_hq_cmd_response(cmd);
++
++ if (unlikely(!cmd->sent_for_exec)) {
++ TRACE_SN("cmd %p was not sent to mid-lev"
++ " (sn %d, set %d)",
++ cmd, cmd->sn, cmd->sn_set);
++ scst_unblock_deferred(cmd->tgt_dev, cmd);
++ cmd->sent_for_exec = 1;
++ }
++ }
++
++ cmd->done = 1;
++ smp_mb(); /* to sync with scst_abort_cmd() */
++
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
++ scst_xmit_process_aborted_cmd(cmd);
++ else if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION))
++ scst_store_sense(cmd);
++
++ if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu), "
++ "skipping", cmd, (long long unsigned int)cmd->tag);
++ cmd->state = SCST_CMD_STATE_FINISHED;
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++ }
++
++ if (unlikely(cmd->resid_possible))
++ scst_adjust_resp_data_len(cmd);
++ else
++ cmd->adjusted_resp_data_len = cmd->resp_data_len;
++
++ cmd->state = SCST_CMD_STATE_XMIT_RESP;
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++static int scst_xmit_response(struct scst_cmd *cmd)
++{
++ struct scst_tgt_template *tgtt = cmd->tgtt;
++ int res, rc;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(cmd->internal);
++
++ if (unlikely(!tgtt->xmit_response_atomic &&
++ scst_cmd_atomic(cmd))) {
++ /*
++ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
++ * optimization.
++ */
++ TRACE_MGMT_DBG("Target driver %s xmit_response() needs thread "
++ "context, rescheduling", tgtt->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ while (1) {
++ int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
++
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ cmd->state = SCST_CMD_STATE_XMIT_WAIT;
++
++ TRACE_DBG("Calling xmit_response(%p)", cmd);
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ if (trace_flag & TRACE_SND_BOT) {
++ int i;
++ struct scatterlist *sg;
++ if (cmd->tgt_sg != NULL)
++ sg = cmd->tgt_sg;
++ else
++ sg = cmd->sg;
++ if (sg != NULL) {
++ TRACE(TRACE_SND_BOT, "Xmitting data for cmd %p "
++ "(sg_cnt %d, sg %p, sg[0].page %p)",
++ cmd, cmd->tgt_sg_cnt, sg,
++ (void *)sg_page(&sg[0]));
++ for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
++ PRINT_BUFF_FLAG(TRACE_SND_BOT,
++ "Xmitting sg", sg_virt(&sg[i]),
++ sg[i].length);
++ }
++ }
++ }
++#endif
++
++ if (tgtt->on_hw_pending_cmd_timeout != NULL) {
++ struct scst_session *sess = cmd->sess;
++ cmd->hw_pending_start = jiffies;
++ cmd->cmd_hw_pending = 1;
++ if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
++ TRACE_DBG("Sched HW pending work for sess %p "
++ "(max time %d)", sess,
++ tgtt->max_hw_pending_time);
++ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
++ &sess->sess_aflags);
++ schedule_delayed_work(&sess->hw_pending_work,
++ tgtt->max_hw_pending_time * HZ);
++ }
++ }
++
++ scst_set_cur_start(cmd);
++
++#ifdef CONFIG_SCST_DEBUG_RETRY
++ if (((scst_random() % 100) == 77))
++ rc = SCST_TGT_RES_QUEUE_FULL;
++ else
++#endif
++ rc = tgtt->xmit_response(cmd);
++ TRACE_DBG("xmit_response() returned %d", rc);
++
++ if (likely(rc == SCST_TGT_RES_SUCCESS))
++ goto out;
++
++ scst_set_xmit_time(cmd);
++
++ cmd->cmd_hw_pending = 0;
++
++ /* Restore the previous state */
++ cmd->state = SCST_CMD_STATE_XMIT_RESP;
++
++ switch (rc) {
++ case SCST_TGT_RES_QUEUE_FULL:
++ if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
++ break;
++ else
++ continue;
++
++ case SCST_TGT_RES_NEED_THREAD_CTX:
++ TRACE_DBG("Target driver %s xmit_response() "
++ "requested thread context, rescheduling",
++ tgtt->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ break;
++
++ default:
++ goto out_error;
++ }
++ break;
++ }
++
++out:
++ /* Caution: cmd can be already dead here */
++ TRACE_EXIT_HRES(res);
++ return res;
++
++out_error:
++ if (rc == SCST_TGT_RES_FATAL_ERROR) {
++ PRINT_ERROR("Target driver %s xmit_response() returned "
++ "fatal error", tgtt->name);
++ } else {
++ PRINT_ERROR("Target driver %s xmit_response() returned "
++ "invalid value %d", tgtt->name, rc);
++ }
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ cmd->state = SCST_CMD_STATE_FINISHED;
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++}
++
++/**
++ * scst_tgt_cmd_done() - the command's processing done
++ * @cmd: SCST command
++ * @pref_context: preferred command execution context
++ *
++ * Description:
++ * Notifies SCST that the driver sent the response and the command
++ * can be freed now. Don't forget to set the delivery status, if it
++ * isn't success, using scst_set_delivery_status() before calling
++ * this function. The third argument sets preferred command execition
++ * context (see SCST_CONTEXT_* constants for details)
++ */
++void scst_tgt_cmd_done(struct scst_cmd *cmd,
++ enum scst_exec_context pref_context)
++{
++ TRACE_ENTRY();
++
++ BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
++
++ scst_set_xmit_time(cmd);
++
++ cmd->cmd_hw_pending = 0;
++
++ if (unlikely(cmd->tgt_dev == NULL))
++ pref_context = SCST_CONTEXT_THREAD;
++
++ cmd->state = SCST_CMD_STATE_FINISHED;
++
++ scst_process_redirect_cmd(cmd, pref_context, 1);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_tgt_cmd_done);
++
++static int scst_finish_cmd(struct scst_cmd *cmd)
++{
++ int res;
++ struct scst_session *sess = cmd->sess;
++
++ TRACE_ENTRY();
++
++ scst_update_lat_stats(cmd);
++
++ if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
++ if ((cmd->tgt_dev != NULL) &&
++ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
++ /* This UA delivery failed, so we need to requeue it */
++ if (scst_cmd_atomic(cmd) &&
++ scst_is_ua_global(cmd->sense, cmd->sense_valid_len)) {
++ TRACE_MGMT_DBG("Requeuing of global UA for "
++ "failed cmd %p needs a thread", cmd);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++ scst_requeue_ua(cmd);
++ }
++ }
++
++ atomic_dec(&sess->sess_cmd_count);
++
++ spin_lock_irq(&sess->sess_list_lock);
++ list_del(&cmd->sess_cmd_list_entry);
++ spin_unlock_irq(&sess->sess_list_lock);
++
++ cmd->finished = 1;
++ smp_mb(); /* to sync with scst_abort_cmd() */
++
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
++ "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
++ atomic_read(&scst_cmd_count));
++
++ scst_finish_cmd_mgmt(cmd);
++ }
++
++ __scst_cmd_put(cmd);
++
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/*
++ * No locks, but it must be externally serialized (see comment for
++ * scst_cmd_init_done() in scst.h)
++ */
++static void scst_cmd_set_sn(struct scst_cmd *cmd)
++{
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ if (scst_is_implicit_hq(cmd) &&
++ likely(cmd->queue_type == SCST_CMD_QUEUE_SIMPLE)) {
++ TRACE_SN("Implicit HQ cmd %p", cmd);
++ cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
++ }
++
++ EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
++
++ /* Optimized for lockless fast path */
++
++ scst_check_debug_sn(cmd);
++
++#ifdef CONFIG_SCST_STRICT_SERIALIZING
++ cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
++#endif
++
++ if (cmd->dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
++ /*
++ * Not the best way, but good enough until there is a
++ * possibility to specify queue type during pass-through
++ * commands submission.
++ */
++ cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
++ }
++
++ switch (cmd->queue_type) {
++ case SCST_CMD_QUEUE_SIMPLE:
++ case SCST_CMD_QUEUE_UNTAGGED:
++#ifdef CONFIG_SCST_ORDERED_READS
++ if (scst_cmd_is_expected_set(cmd)) {
++ if ((cmd->expected_data_direction == SCST_DATA_READ) &&
++ (atomic_read(&cmd->dev->write_cmd_count) == 0))
++ goto ordered;
++ } else
++ goto ordered;
++#endif
++ if (likely(tgt_dev->num_free_sn_slots >= 0)) {
++ /*
++ * atomic_inc_return() implies memory barrier to sync
++ * with scst_inc_expected_sn()
++ */
++ if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
++ tgt_dev->curr_sn++;
++ TRACE_SN("Incremented curr_sn %d",
++ tgt_dev->curr_sn);
++ }
++ cmd->sn_slot = tgt_dev->cur_sn_slot;
++ cmd->sn = tgt_dev->curr_sn;
++
++ tgt_dev->prev_cmd_ordered = 0;
++ } else {
++ TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
++ "%zd", ARRAY_SIZE(tgt_dev->sn_slots));
++ goto ordered;
++ }
++ break;
++
++ case SCST_CMD_QUEUE_ORDERED:
++ TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
++ordered:
++ if (!tgt_dev->prev_cmd_ordered) {
++ spin_lock_irqsave(&tgt_dev->sn_lock, flags);
++ if (tgt_dev->num_free_sn_slots >= 0) {
++ tgt_dev->num_free_sn_slots--;
++ if (tgt_dev->num_free_sn_slots >= 0) {
++ int i = 0;
++ /* Commands can finish in any order, so
++ * we don't know which slot is empty.
++ */
++ while (1) {
++ tgt_dev->cur_sn_slot++;
++ if (tgt_dev->cur_sn_slot ==
++ tgt_dev->sn_slots + ARRAY_SIZE(tgt_dev->sn_slots))
++ tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
++
++ if (atomic_read(tgt_dev->cur_sn_slot) == 0)
++ break;
++
++ i++;
++ BUG_ON(i == ARRAY_SIZE(tgt_dev->sn_slots));
++ }
++ TRACE_SN("New cur SN slot %zd",
++ tgt_dev->cur_sn_slot -
++ tgt_dev->sn_slots);
++ }
++ }
++ spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
++ }
++ tgt_dev->prev_cmd_ordered = 1;
++ tgt_dev->curr_sn++;
++ cmd->sn = tgt_dev->curr_sn;
++ break;
++
++ case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
++ TRACE_SN("HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
++ spin_lock_irqsave(&tgt_dev->sn_lock, flags);
++ tgt_dev->hq_cmd_count++;
++ spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
++ cmd->hq_cmd_inced = 1;
++ goto out;
++
++ default:
++ BUG();
++ }
++
++ TRACE_SN("cmd(%p)->sn: %d (tgt_dev %p, *cur_sn_slot %d, "
++ "num_free_sn_slots %d, prev_cmd_ordered %ld, "
++ "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
++ atomic_read(tgt_dev->cur_sn_slot),
++ tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
++ tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
++
++ cmd->sn_set = 1;
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * Returns 0 on success, > 0 when we need to wait for unblock,
++ * < 0 if there is no device (lun) or device type handler.
++ *
++ * No locks, but might be on IRQ, protection is done by the
++ * suspended activity.
++ */
++static int scst_translate_lun(struct scst_cmd *cmd)
++{
++ struct scst_tgt_dev *tgt_dev = NULL;
++ int res;
++
++ TRACE_ENTRY();
++
++ /* See comment about smp_mb() in scst_suspend_activity() */
++ __scst_get();
++
++ if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
++ struct list_head *sess_tgt_dev_list_head =
++ &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
++ TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
++ (long long unsigned int)cmd->lun);
++ res = -1;
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ if (tgt_dev->lun == cmd->lun) {
++ TRACE_DBG("tgt_dev %p found", tgt_dev);
++
++ if (unlikely(tgt_dev->dev->handler ==
++ &scst_null_devtype)) {
++ PRINT_INFO("Dev handler for device "
++ "%lld is NULL, the device will not "
++ "be visible remotely",
++ (long long unsigned int)cmd->lun);
++ break;
++ }
++
++ cmd->cmd_threads = tgt_dev->active_cmd_threads;
++ cmd->tgt_dev = tgt_dev;
++ cmd->dev = tgt_dev->dev;
++
++ res = 0;
++ break;
++ }
++ }
++ if (res != 0) {
++ TRACE(TRACE_MINOR,
++ "tgt_dev for LUN %lld not found, command to "
++ "unexisting LU?",
++ (long long unsigned int)cmd->lun);
++ __scst_put();
++ }
++ } else {
++ TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
++ __scst_put();
++ res = 1;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * No locks, but might be on IRQ.
++ *
++ * Returns 0 on success, > 0 when we need to wait for unblock,
++ * < 0 if there is no device (lun) or device type handler.
++ */
++static int __scst_init_cmd(struct scst_cmd *cmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ res = scst_translate_lun(cmd);
++ if (likely(res == 0)) {
++ int cnt;
++ bool failure = false;
++
++ cmd->state = SCST_CMD_STATE_PARSE;
++
++ cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
++ if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
++ TRACE(TRACE_FLOW_CONTROL,
++ "Too many pending commands (%d) in "
++ "session, returning BUSY to initiator \"%s\"",
++ cnt, (cmd->sess->initiator_name[0] == '\0') ?
++ "Anonymous" : cmd->sess->initiator_name);
++ failure = true;
++ }
++
++#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
++ cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
++ if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
++ if (!failure) {
++ TRACE(TRACE_FLOW_CONTROL,
++ "Too many pending device "
++ "commands (%d), returning BUSY to "
++ "initiator \"%s\"", cnt,
++ (cmd->sess->initiator_name[0] == '\0') ?
++ "Anonymous" :
++ cmd->sess->initiator_name);
++ failure = true;
++ }
++ }
++#endif
++
++#ifdef CONFIG_SCST_ORDERED_READS
++ /* If expected values not set, expected direction is UNKNOWN */
++ if (cmd->expected_data_direction & SCST_DATA_WRITE)
++ atomic_inc(&cmd->dev->write_cmd_count);
++#endif
++
++ if (unlikely(failure))
++ goto out_busy;
++
++ if (unlikely(scst_pre_parse(cmd) != 0))
++ goto out;
++
++ if (!cmd->set_sn_on_restart_cmd)
++ scst_cmd_set_sn(cmd);
++ } else if (res < 0) {
++ TRACE_DBG("Finishing cmd %p", cmd);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_lun_not_supported));
++ scst_set_cmd_abnormal_done_state(cmd);
++ } else
++ goto out;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_busy:
++ scst_set_busy(cmd);
++ scst_set_cmd_abnormal_done_state(cmd);
++ goto out;
++}
++
++/* Called under scst_init_lock and IRQs disabled */
++static void scst_do_job_init(void)
++ __releases(&scst_init_lock)
++ __acquires(&scst_init_lock)
++{
++ struct scst_cmd *cmd;
++ int susp;
++
++ TRACE_ENTRY();
++
++restart:
++ /*
++ * There is no need for read barrier here, because we don't care where
++ * this check will be done.
++ */
++ susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
++ if (scst_init_poll_cnt > 0)
++ scst_init_poll_cnt--;
++
++ list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
++ int rc;
++ if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
++ continue;
++ if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
++ spin_unlock_irq(&scst_init_lock);
++ rc = __scst_init_cmd(cmd);
++ spin_lock_irq(&scst_init_lock);
++ if (rc > 0) {
++ TRACE_MGMT_DBG("%s",
++ "FLAG SUSPENDED set, restarting");
++ goto restart;
++ }
++ } else {
++ TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
++ cmd, (long long unsigned int)cmd->tag);
++ scst_set_cmd_abnormal_done_state(cmd);
++ }
++
++ /*
++ * Deleting cmd from init cmd list after __scst_init_cmd()
++ * is necessary to keep the check in scst_init_cmd() correct
++ * to preserve the commands order.
++ *
++ * We don't care about the race, when init cmd list is empty
++ * and one command detected that it just was not empty, so
++ * it's inserting to it, but another command at the same time
++ * seeing init cmd list empty and goes directly, because it
++ * could affect only commands from the same initiator to the
++ * same tgt_dev, but scst_cmd_init_done*() doesn't guarantee
++ * the order in case of simultaneous such calls anyway.
++ */
++ TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
++ smp_wmb(); /* enforce the required order */
++ list_del(&cmd->cmd_list_entry);
++ spin_unlock(&scst_init_lock);
++
++ spin_lock(&cmd->cmd_threads->cmd_list_lock);
++ TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ list_add(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ else
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
++
++ spin_lock(&scst_init_lock);
++ goto restart;
++ }
++
++ /* It isn't really needed, but let's keep it */
++ if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
++ goto restart;
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline int test_init_cmd_list(void)
++{
++ int res = (!list_empty(&scst_init_cmd_list) &&
++ !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
++ unlikely(kthread_should_stop()) ||
++ (scst_init_poll_cnt > 0);
++ return res;
++}
++
++int scst_init_thread(void *arg)
++{
++ TRACE_ENTRY();
++
++ PRINT_INFO("Init thread started, PID %d", current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ set_user_nice(current, -10);
++
++ spin_lock_irq(&scst_init_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_init_cmd_list()) {
++ add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
++ &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_init_cmd_list())
++ break;
++ spin_unlock_irq(&scst_init_lock);
++ schedule();
++ spin_lock_irq(&scst_init_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
++ }
++ scst_do_job_init();
++ }
++ spin_unlock_irq(&scst_init_lock);
++
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so scst_init_cmd_list must be empty.
++ */
++ BUG_ON(!list_empty(&scst_init_cmd_list));
++
++ PRINT_INFO("Init thread PID %d finished", current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++/**
++ * scst_process_active_cmd() - process active command
++ *
++ * Description:
++ * Main SCST commands processing routing. Must be used only by dev handlers.
++ *
++ * Argument atomic is true, if function called in atomic context.
++ *
++ * Must be called with no locks held.
++ */
++void scst_process_active_cmd(struct scst_cmd *cmd, bool atomic)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ /*
++ * Checkpatch will complain on the use of in_atomic() below. You
++ * can safely ignore this warning since in_atomic() is used here only
++ * for debugging purposes.
++ */
++ EXTRACHECKS_BUG_ON(in_irq() || irqs_disabled());
++ EXTRACHECKS_WARN_ON((in_atomic() || in_interrupt() || irqs_disabled()) &&
++ !atomic);
++
++ cmd->atomic = atomic;
++
++ TRACE_DBG("cmd %p, atomic %d", cmd, atomic);
++
++ do {
++ switch (cmd->state) {
++ case SCST_CMD_STATE_PARSE:
++ res = scst_parse_cmd(cmd);
++ break;
++
++ case SCST_CMD_STATE_PREPARE_SPACE:
++ res = scst_prepare_space(cmd);
++ break;
++
++ case SCST_CMD_STATE_PREPROCESSING_DONE:
++ res = scst_preprocessing_done(cmd);
++ break;
++
++ case SCST_CMD_STATE_RDY_TO_XFER:
++ res = scst_rdy_to_xfer(cmd);
++ break;
++
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ res = scst_tgt_pre_exec(cmd);
++ break;
++
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ if (tm_dbg_check_cmd(cmd) != 0) {
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ TRACE_MGMT_DBG("Skipping cmd %p (tag %llu), "
++ "because of TM DBG delay", cmd,
++ (long long unsigned int)cmd->tag);
++ break;
++ }
++ res = scst_send_for_exec(&cmd);
++ /*
++ * !! At this point cmd, sess & tgt_dev can already be
++ * freed !!
++ */
++ break;
++
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ res = scst_local_exec(cmd);
++ /*
++ * !! At this point cmd, sess & tgt_dev can already be
++ * freed !!
++ */
++ break;
++
++ case SCST_CMD_STATE_REAL_EXEC:
++ res = scst_real_exec(cmd);
++ /*
++ * !! At this point cmd, sess & tgt_dev can already be
++ * freed !!
++ */
++ break;
++
++ case SCST_CMD_STATE_PRE_DEV_DONE:
++ res = scst_pre_dev_done(cmd);
++ EXTRACHECKS_BUG_ON((res == SCST_CMD_STATE_RES_NEED_THREAD) &&
++ (cmd->state == SCST_CMD_STATE_PRE_DEV_DONE));
++ break;
++
++ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
++ res = scst_mode_select_checks(cmd);
++ break;
++
++ case SCST_CMD_STATE_DEV_DONE:
++ res = scst_dev_done(cmd);
++ break;
++
++ case SCST_CMD_STATE_PRE_XMIT_RESP:
++ res = scst_pre_xmit_response(cmd);
++ EXTRACHECKS_BUG_ON(res ==
++ SCST_CMD_STATE_RES_NEED_THREAD);
++ break;
++
++ case SCST_CMD_STATE_XMIT_RESP:
++ res = scst_xmit_response(cmd);
++ break;
++
++ case SCST_CMD_STATE_FINISHED:
++ res = scst_finish_cmd(cmd);
++ break;
++
++ case SCST_CMD_STATE_FINISHED_INTERNAL:
++ res = scst_finish_internal_cmd(cmd);
++ EXTRACHECKS_BUG_ON(res ==
++ SCST_CMD_STATE_RES_NEED_THREAD);
++ break;
++
++ default:
++ PRINT_CRIT_ERROR("cmd (%p) in state %d, but shouldn't "
++ "be", cmd, cmd->state);
++ BUG();
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ break;
++ }
++ } while (res == SCST_CMD_STATE_RES_CONT_SAME);
++
++ if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
++ /* None */
++ } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
++ spin_lock_irq(&cmd->cmd_threads->cmd_list_lock);
++#ifdef CONFIG_SCST_EXTRACHECKS
++ switch (cmd->state) {
++ case SCST_CMD_STATE_PARSE:
++ case SCST_CMD_STATE_PREPARE_SPACE:
++ case SCST_CMD_STATE_RDY_TO_XFER:
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXEC:
++ case SCST_CMD_STATE_DEV_DONE:
++ case SCST_CMD_STATE_XMIT_RESP:
++#endif
++ TRACE_DBG("Adding cmd %p to head of active cmd list",
++ cmd);
++ list_add(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++#ifdef CONFIG_SCST_EXTRACHECKS
++ break;
++ default:
++ PRINT_CRIT_ERROR("cmd %p is in invalid state %d)", cmd,
++ cmd->state);
++ spin_unlock_irq(&cmd->cmd_threads->cmd_list_lock);
++ BUG();
++ spin_lock_irq(&cmd->cmd_threads->cmd_list_lock);
++ break;
++ }
++#endif
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irq(&cmd->cmd_threads->cmd_list_lock);
++ } else
++ BUG();
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_process_active_cmd);
++
++/* Called under cmd_list_lock and IRQs disabled */
++static void scst_do_job_active(struct list_head *cmd_list,
++ spinlock_t *cmd_list_lock, bool atomic)
++ __releases(cmd_list_lock)
++ __acquires(cmd_list_lock)
++{
++ TRACE_ENTRY();
++
++ while (!list_empty(cmd_list)) {
++ struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
++ cmd_list_entry);
++ TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
++ list_del(&cmd->cmd_list_entry);
++ spin_unlock_irq(cmd_list_lock);
++ scst_process_active_cmd(cmd, atomic);
++ spin_lock_irq(cmd_list_lock);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline int test_cmd_threads(struct scst_cmd_threads *p_cmd_threads)
++{
++ int res = !list_empty(&p_cmd_threads->active_cmd_list) ||
++ unlikely(kthread_should_stop()) ||
++ tm_dbg_is_release();
++ return res;
++}
++
++int scst_cmd_thread(void *arg)
++{
++ struct scst_cmd_threads *p_cmd_threads = arg;
++
++ TRACE_ENTRY();
++
++ PRINT_INFO("Processing thread %s (PID %d) started", current->comm,
++ current->pid);
++
++#if 0
++ set_user_nice(current, 10);
++#endif
++ current->flags |= PF_NOFREEZE;
++
++ mutex_lock(&p_cmd_threads->io_context_mutex);
++
++ WARN_ON(current->io_context);
++
++ if (p_cmd_threads != &scst_main_cmd_threads) {
++ /*
++ * For linked IO contexts io_context might be not NULL while
++ * io_context 0.
++ */
++ if (p_cmd_threads->io_context == NULL) {
++ p_cmd_threads->io_context = get_io_context(GFP_KERNEL, -1);
++ TRACE_MGMT_DBG("Alloced new IO context %p "
++ "(p_cmd_threads %p)",
++ p_cmd_threads->io_context,
++ p_cmd_threads);
++ /*
++ * Put the extra reference created by get_io_context()
++ * because we don't need it.
++ */
++ put_io_context(p_cmd_threads->io_context);
++ } else {
++ current->io_context = ioc_task_link(p_cmd_threads->io_context);
++ TRACE_MGMT_DBG("Linked IO context %p "
++ "(p_cmd_threads %p)", p_cmd_threads->io_context,
++ p_cmd_threads);
++ }
++ p_cmd_threads->io_context_refcnt++;
++ }
++
++ mutex_unlock(&p_cmd_threads->io_context_mutex);
++
++ p_cmd_threads->io_context_ready = true;
++
++ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_cmd_threads(p_cmd_threads)) {
++ add_wait_queue_exclusive_head(
++ &p_cmd_threads->cmd_list_waitQ,
++ &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_cmd_threads(p_cmd_threads))
++ break;
++ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
++ schedule();
++ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&p_cmd_threads->cmd_list_waitQ, &wait);
++ }
++
++ if (tm_dbg_is_release()) {
++ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
++ tm_dbg_check_released_cmds();
++ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
++ }
++
++ scst_do_job_active(&p_cmd_threads->active_cmd_list,
++ &p_cmd_threads->cmd_list_lock, false);
++ }
++ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
++
++ if (p_cmd_threads != &scst_main_cmd_threads) {
++ mutex_lock(&p_cmd_threads->io_context_mutex);
++ if (--p_cmd_threads->io_context_refcnt == 0)
++ p_cmd_threads->io_context = NULL;
++ mutex_unlock(&p_cmd_threads->io_context_mutex);
++ }
++
++ PRINT_INFO("Processing thread %s (PID %d) finished", current->comm,
++ current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++void scst_cmd_tasklet(long p)
++{
++ struct scst_tasklet *t = (struct scst_tasklet *)p;
++
++ TRACE_ENTRY();
++
++ spin_lock_irq(&t->tasklet_lock);
++ scst_do_job_active(&t->tasklet_cmd_list, &t->tasklet_lock, true);
++ spin_unlock_irq(&t->tasklet_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * Returns 0 on success, < 0 if there is no device handler or
++ * > 0 if SCST_FLAG_SUSPENDED set and SCST_FLAG_SUSPENDING - not.
++ * No locks, protection is done by the suspended activity.
++ */
++static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
++{
++ struct scst_tgt_dev *tgt_dev = NULL;
++ struct list_head *sess_tgt_dev_list_head;
++ int res = -1;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %lld)", mcmd,
++ (long long unsigned int)mcmd->lun);
++
++ /* See comment about smp_mb() in scst_suspend_activity() */
++ __scst_get();
++
++ if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
++ !test_bit(SCST_FLAG_SUSPENDING, &scst_flags))) {
++ TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
++ __scst_put();
++ res = 1;
++ goto out;
++ }
++
++ sess_tgt_dev_list_head =
++ &mcmd->sess->sess_tgt_dev_list_hash[HASH_VAL(mcmd->lun)];
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ if (tgt_dev->lun == mcmd->lun) {
++ TRACE_DBG("tgt_dev %p found", tgt_dev);
++ mcmd->mcmd_tgt_dev = tgt_dev;
++ res = 0;
++ break;
++ }
++ }
++ if (mcmd->mcmd_tgt_dev == NULL)
++ __scst_put();
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* No locks */
++void scst_done_cmd_mgmt(struct scst_cmd *cmd)
++{
++ struct scst_mgmt_cmd_stub *mstb, *t;
++ bool wake = 0;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("cmd %p done (tag %llu)",
++ cmd, (long long unsigned int)cmd->tag);
++
++ spin_lock_irqsave(&scst_mcmd_lock, flags);
++
++ list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
++ cmd_mgmt_cmd_list_entry) {
++ struct scst_mgmt_cmd *mcmd;
++
++ if (!mstb->done_counted)
++ continue;
++
++ mcmd = mstb->mcmd;
++ TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_done_wait_count %d",
++ mcmd, mcmd->cmd_done_wait_count);
++
++ mcmd->cmd_done_wait_count--;
++
++ BUG_ON(mcmd->cmd_done_wait_count < 0);
++
++ if (mcmd->cmd_done_wait_count > 0) {
++ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
++ "skipping", mcmd->cmd_done_wait_count);
++ goto check_free;
++ }
++
++ if (mcmd->state == SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_DONE) {
++ mcmd->state = SCST_MCMD_STATE_AFFECTED_CMDS_DONE;
++ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
++ "list", mcmd);
++ list_add_tail(&mcmd->mgmt_cmd_list_entry,
++ &scst_active_mgmt_cmd_list);
++ wake = 1;
++ }
++
++check_free:
++ if (!mstb->finish_counted) {
++ TRACE_DBG("Releasing mstb %p", mstb);
++ list_del(&mstb->cmd_mgmt_cmd_list_entry);
++ mempool_free(mstb, scst_mgmt_stub_mempool);
++ }
++ }
++
++ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
++
++ if (wake)
++ wake_up(&scst_mgmt_cmd_list_waitQ);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called under scst_mcmd_lock and IRQs disabled */
++static void __scst_dec_finish_wait_count(struct scst_mgmt_cmd *mcmd, bool *wake)
++{
++ TRACE_ENTRY();
++
++ mcmd->cmd_finish_wait_count--;
++
++ BUG_ON(mcmd->cmd_finish_wait_count < 0);
++
++ if (mcmd->cmd_finish_wait_count > 0) {
++ TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, "
++ "skipping", mcmd->cmd_finish_wait_count);
++ goto out;
++ }
++
++ if (mcmd->cmd_done_wait_count > 0) {
++ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
++ "skipping", mcmd->cmd_done_wait_count);
++ goto out;
++ }
++
++ if (mcmd->state == SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_FINISHED) {
++ mcmd->state = SCST_MCMD_STATE_DONE;
++ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
++ "list", mcmd);
++ list_add_tail(&mcmd->mgmt_cmd_list_entry,
++ &scst_active_mgmt_cmd_list);
++ *wake = true;
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_prepare_async_mcmd() - prepare async management command
++ *
++ * Notifies SCST that management command is going to be async, i.e.
++ * will be completed in another context.
++ *
++ * No SCST locks supposed to be held on entrance.
++ */
++void scst_prepare_async_mcmd(struct scst_mgmt_cmd *mcmd)
++{
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Preparing mcmd %p for async execution "
++ "(cmd_finish_wait_count %d)", mcmd,
++ mcmd->cmd_finish_wait_count);
++
++ spin_lock_irqsave(&scst_mcmd_lock, flags);
++ mcmd->cmd_finish_wait_count++;
++ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_prepare_async_mcmd);
++
++/**
++ * scst_async_mcmd_completed() - async management command completed
++ *
++ * Notifies SCST that async management command, prepared by
++ * scst_prepare_async_mcmd(), completed.
++ *
++ * No SCST locks supposed to be held on entrance.
++ */
++void scst_async_mcmd_completed(struct scst_mgmt_cmd *mcmd, int status)
++{
++ unsigned long flags;
++ bool wake = false;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Async mcmd %p completed (status %d)", mcmd, status);
++
++ spin_lock_irqsave(&scst_mcmd_lock, flags);
++
++ if (status != SCST_MGMT_STATUS_SUCCESS)
++ mcmd->status = status;
++
++ __scst_dec_finish_wait_count(mcmd, &wake);
++
++ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
++
++ if (wake)
++ wake_up(&scst_mgmt_cmd_list_waitQ);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_async_mcmd_completed);
++
++/* No locks */
++static void scst_finish_cmd_mgmt(struct scst_cmd *cmd)
++{
++ struct scst_mgmt_cmd_stub *mstb, *t;
++ bool wake = false;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("cmd %p finished (tag %llu)",
++ cmd, (long long unsigned int)cmd->tag);
++
++ spin_lock_irqsave(&scst_mcmd_lock, flags);
++
++ list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
++ cmd_mgmt_cmd_list_entry) {
++ struct scst_mgmt_cmd *mcmd = mstb->mcmd;
++
++ TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_finish_wait_count %d", mcmd,
++ mcmd->cmd_finish_wait_count);
++
++ BUG_ON(!mstb->finish_counted);
++
++ if (cmd->completed)
++ mcmd->completed_cmd_count++;
++
++ __scst_dec_finish_wait_count(mcmd, &wake);
++
++ TRACE_DBG("Releasing mstb %p", mstb);
++ list_del(&mstb->cmd_mgmt_cmd_list_entry);
++ mempool_free(mstb, scst_mgmt_stub_mempool);
++ }
++
++ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
++
++ if (wake)
++ wake_up(&scst_mgmt_cmd_list_waitQ);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
++ struct scst_tgt_dev *tgt_dev, int set_status)
++{
++ int res = SCST_DEV_TM_NOT_COMPLETED;
++ struct scst_dev_type *h = tgt_dev->dev->handler;
++
++ if (h->task_mgmt_fn) {
++ TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
++ h->name, mcmd->fn);
++ EXTRACHECKS_BUG_ON(in_irq() || irqs_disabled());
++ res = h->task_mgmt_fn(mcmd, tgt_dev);
++ TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
++ h->name, res);
++ if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED))
++ mcmd->status = res;
++ }
++ return res;
++}
++
++static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
++{
++ switch (mgmt_fn) {
++#ifdef CONFIG_SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
++ case SCST_ABORT_TASK:
++#endif
++#if 0
++ case SCST_ABORT_TASK_SET:
++ case SCST_CLEAR_TASK_SET:
++#endif
++ return 1;
++ default:
++ return 0;
++ }
++}
++
++/* Might be called under sess_list_lock and IRQ off + BHs also off */
++void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
++ bool other_ini, bool call_dev_task_mgmt_fn)
++{
++ unsigned long flags;
++ static DEFINE_SPINLOCK(other_ini_lock);
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_SCSI|TRACE_MGMT_DEBUG, "Aborting cmd %p (tag %llu, op %x)",
++ cmd, (long long unsigned int)cmd->tag, cmd->cdb[0]);
++
++ /* To protect from concurrent aborts */
++ spin_lock_irqsave(&other_ini_lock, flags);
++
++ if (other_ini) {
++ struct scst_device *dev = NULL;
++
++ /* Might be necessary if command aborted several times */
++ if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
++ set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
++
++ /* Necessary for scst_xmit_process_aborted_cmd */
++ if (cmd->dev != NULL)
++ dev = cmd->dev;
++ else if ((mcmd != NULL) && (mcmd->mcmd_tgt_dev != NULL))
++ dev = mcmd->mcmd_tgt_dev->dev;
++
++ if (dev != NULL) {
++ if (dev->tas)
++ set_bit(SCST_CMD_DEVICE_TAS, &cmd->cmd_flags);
++ } else
++ PRINT_WARNING("Abort cmd %p from other initiator, but "
++ "neither cmd, nor mcmd %p have tgt_dev set, so "
++ "TAS information can be lost", cmd, mcmd);
++ } else {
++ /* Might be necessary if command aborted several times */
++ clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
++ }
++
++ set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
++
++ spin_unlock_irqrestore(&other_ini_lock, flags);
++
++ /*
++ * To sync with cmd->finished/done set in
++ * scst_finish_cmd()/scst_pre_xmit_response() and with setting UA for
++ * aborted cmd in scst_set_pending_UA().
++ */
++ smp_mb__after_set_bit();
++
++ if (cmd->tgt_dev == NULL) {
++ spin_lock_irqsave(&scst_init_lock, flags);
++ scst_init_poll_cnt++;
++ spin_unlock_irqrestore(&scst_init_lock, flags);
++ wake_up(&scst_init_cmd_list_waitQ);
++ }
++
++ if (call_dev_task_mgmt_fn && (cmd->tgt_dev != NULL)) {
++ EXTRACHECKS_BUG_ON(irqs_disabled());
++ scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 1);
++ }
++
++ spin_lock_irqsave(&scst_mcmd_lock, flags);
++ if ((mcmd != NULL) && !cmd->finished) {
++ struct scst_mgmt_cmd_stub *mstb;
++
++ mstb = mempool_alloc(scst_mgmt_stub_mempool, GFP_ATOMIC);
++ if (mstb == NULL) {
++ PRINT_CRIT_ERROR("Allocation of management command "
++ "stub failed (mcmd %p, cmd %p)", mcmd, cmd);
++ goto unlock;
++ }
++ memset(mstb, 0, sizeof(*mstb));
++
++ TRACE_DBG("mstb %p, mcmd %p", mstb, mcmd);
++
++ mstb->mcmd = mcmd;
++
++ /*
++ * Delay the response until the command's finish in order to
++ * guarantee that "no further responses from the task are sent
++ * to the SCSI initiator port" after response from the TM
++ * function is sent (SAM). Plus, we must wait here to be sure
++ * that we won't receive double commands with the same tag.
++ * Moreover, if we don't wait here, we might have a possibility
++ * for data corruption, when aborted and reported as completed
++ * command actually gets executed *after* new commands sent
++ * after this TM command completed.
++ */
++
++ if (cmd->sent_for_exec && !cmd->done) {
++ TRACE_MGMT_DBG("cmd %p (tag %llu) is being executed",
++ cmd, (long long unsigned int)cmd->tag);
++ mstb->done_counted = 1;
++ mcmd->cmd_done_wait_count++;
++ }
++
++ /*
++ * We don't have to wait the command's status delivery finish
++ * to other initiators + it can affect MPIO failover.
++ */
++ if (!other_ini) {
++ mstb->finish_counted = 1;
++ mcmd->cmd_finish_wait_count++;
++ }
++
++ if (mstb->done_counted || mstb->finish_counted) {
++ TRACE_MGMT_DBG("cmd %p (tag %llu, sn %u) being "
++ "executed/xmitted (state %d, op %x, proc time "
++ "%ld sec., timeout %d sec.), deferring ABORT "
++ "(cmd_done_wait_count %d, cmd_finish_wait_count "
++ "%d)", cmd, (long long unsigned int)cmd->tag,
++ cmd->sn, cmd->state, cmd->cdb[0],
++ (long)(jiffies - cmd->start_time) / HZ,
++ cmd->timeout / HZ, mcmd->cmd_done_wait_count,
++ mcmd->cmd_finish_wait_count);
++ /*
++ * cmd can't die here or sess_list_lock already taken
++ * and cmd is in the sess list
++ */
++ list_add_tail(&mstb->cmd_mgmt_cmd_list_entry,
++ &cmd->mgmt_cmd_list);
++ } else {
++ /* We don't need to wait for this cmd */
++ mempool_free(mstb, scst_mgmt_stub_mempool);
++ }
++ }
++
++unlock:
++ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
++
++ tm_dbg_release_cmd(cmd);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* No locks. Returns 0, if mcmd should be processed further. */
++static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
++{
++ int res;
++
++ spin_lock_irq(&scst_mcmd_lock);
++
++ switch (mcmd->state) {
++ case SCST_MCMD_STATE_INIT:
++ case SCST_MCMD_STATE_EXEC:
++ if (mcmd->cmd_done_wait_count == 0) {
++ mcmd->state = SCST_MCMD_STATE_AFFECTED_CMDS_DONE;
++ res = 0;
++ } else {
++ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
++ "preparing to wait", mcmd->cmd_done_wait_count);
++ mcmd->state = SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_DONE;
++ res = -1;
++ }
++ break;
++
++ case SCST_MCMD_STATE_AFFECTED_CMDS_DONE:
++ if (mcmd->cmd_finish_wait_count == 0) {
++ mcmd->state = SCST_MCMD_STATE_DONE;
++ res = 0;
++ } else {
++ TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, "
++ "preparing to wait",
++ mcmd->cmd_finish_wait_count);
++ mcmd->state = SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_FINISHED;
++ res = -1;
++ }
++ break;
++
++ case SCST_MCMD_STATE_DONE:
++ mcmd->state = SCST_MCMD_STATE_FINISHED;
++ res = 0;
++ break;
++
++ default:
++ PRINT_CRIT_ERROR("Wrong mcmd %p state %d (fn %d, "
++ "cmd_finish_wait_count %d, cmd_done_wait_count %d)",
++ mcmd, mcmd->state, mcmd->fn,
++ mcmd->cmd_finish_wait_count, mcmd->cmd_done_wait_count);
++ spin_unlock_irq(&scst_mcmd_lock);
++ BUG();
++ goto out;
++ }
++
++ spin_unlock_irq(&scst_mcmd_lock);
++
++out:
++ return res;
++}
++
++/* IRQs supposed to be disabled */
++static bool __scst_check_unblock_aborted_cmd(struct scst_cmd *cmd,
++ struct list_head *list_entry)
++{
++ bool res;
++ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
++ list_del(list_entry);
++ spin_lock(&cmd->cmd_threads->cmd_list_lock);
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
++ res = 1;
++ } else
++ res = 0;
++ return res;
++}
++
++static void scst_unblock_aborted_cmds(int scst_mutex_held)
++{
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ if (!scst_mutex_held)
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ struct scst_cmd *cmd, *tcmd;
++ struct scst_tgt_dev *tgt_dev;
++ spin_lock_bh(&dev->dev_lock);
++ local_irq_disable();
++ list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
++ blocked_cmd_list_entry) {
++ if (__scst_check_unblock_aborted_cmd(cmd,
++ &cmd->blocked_cmd_list_entry)) {
++ TRACE_MGMT_DBG("Unblock aborted blocked cmd %p",
++ cmd);
++ }
++ }
++ local_irq_enable();
++ spin_unlock_bh(&dev->dev_lock);
++
++ local_irq_disable();
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ spin_lock(&tgt_dev->sn_lock);
++ list_for_each_entry_safe(cmd, tcmd,
++ &tgt_dev->deferred_cmd_list,
++ sn_cmd_list_entry) {
++ if (__scst_check_unblock_aborted_cmd(cmd,
++ &cmd->sn_cmd_list_entry)) {
++ TRACE_MGMT_DBG("Unblocked aborted SN "
++ "cmd %p (sn %u)",
++ cmd, cmd->sn);
++ tgt_dev->def_cmd_count--;
++ }
++ }
++ spin_unlock(&tgt_dev->sn_lock);
++ }
++ local_irq_enable();
++ }
++
++ if (!scst_mutex_held)
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
++ struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_cmd *cmd;
++ struct scst_session *sess = tgt_dev->sess;
++ bool other_ini;
++
++ TRACE_ENTRY();
++
++ if ((mcmd->fn == SCST_PR_ABORT_ALL) &&
++ (mcmd->origin_pr_cmd->sess != sess))
++ other_ini = true;
++ else
++ other_ini = false;
++
++ spin_lock_irq(&sess->sess_list_lock);
++
++ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
++ list_for_each_entry(cmd, &sess->sess_cmd_list,
++ sess_cmd_list_entry) {
++ if ((mcmd->fn == SCST_PR_ABORT_ALL) &&
++ (mcmd->origin_pr_cmd == cmd))
++ continue;
++ if ((cmd->tgt_dev == tgt_dev) ||
++ ((cmd->tgt_dev == NULL) &&
++ (cmd->lun == tgt_dev->lun))) {
++ if (mcmd->cmd_sn_set) {
++ BUG_ON(!cmd->tgt_sn_set);
++ if (scst_sn_before(mcmd->cmd_sn, cmd->tgt_sn) ||
++ (mcmd->cmd_sn == cmd->tgt_sn))
++ continue;
++ }
++ scst_abort_cmd(cmd, mcmd, other_ini, 0);
++ }
++ }
++ spin_unlock_irq(&sess->sess_list_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
++{
++ int res;
++ struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
++
++ TRACE(TRACE_MGMT, "Aborting task set (lun=%lld, mcmd=%p)",
++ (long long unsigned int)tgt_dev->lun, mcmd);
++
++ __scst_abort_task_set(mcmd, tgt_dev);
++
++ if (mcmd->fn == SCST_PR_ABORT_ALL) {
++ struct scst_pr_abort_all_pending_mgmt_cmds_counter *pr_cnt =
++ mcmd->origin_pr_cmd->pr_abort_counter;
++ if (atomic_dec_and_test(&pr_cnt->pr_aborting_cnt))
++ complete_all(&pr_cnt->pr_aborting_cmpl);
++ }
++
++ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "ABORT TASK SET/PR ABORT", 0);
++
++ scst_unblock_aborted_cmds(0);
++
++ scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_is_cmd_belongs_to_dev(struct scst_cmd *cmd,
++ struct scst_device *dev)
++{
++ struct scst_tgt_dev *tgt_dev = NULL;
++ struct list_head *sess_tgt_dev_list_head;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Finding match for dev %p and cmd %p (lun %lld)", dev, cmd,
++ (long long unsigned int)cmd->lun);
++
++ sess_tgt_dev_list_head =
++ &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ if (tgt_dev->lun == cmd->lun) {
++ TRACE_DBG("dev %p found", tgt_dev->dev);
++ res = (tgt_dev->dev == dev);
++ goto out;
++ }
++ }
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_clear_task_set(struct scst_mgmt_cmd *mcmd)
++{
++ int res;
++ struct scst_device *dev = mcmd->mcmd_tgt_dev->dev;
++ struct scst_tgt_dev *tgt_dev;
++ LIST_HEAD(UA_tgt_devs);
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_MGMT, "Clearing task set (lun=%lld, mcmd=%p)",
++ (long long unsigned int)mcmd->lun, mcmd);
++
++#if 0 /* we are SAM-3 */
++ /*
++ * When a logical unit is aborting one or more tasks from a SCSI
++ * initiator port with the TASK ABORTED status it should complete all
++ * of those tasks before entering additional tasks from that SCSI
++ * initiator port into the task set - SAM2
++ */
++ mcmd->needs_unblocking = 1;
++ spin_lock_bh(&dev->dev_lock);
++ scst_block_dev(dev);
++ spin_unlock_bh(&dev->dev_lock);
++#endif
++
++ __scst_abort_task_set(mcmd, mcmd->mcmd_tgt_dev);
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ struct scst_session *sess = tgt_dev->sess;
++ struct scst_cmd *cmd;
++ int aborted = 0;
++
++ if (tgt_dev == mcmd->mcmd_tgt_dev)
++ continue;
++
++ spin_lock_irq(&sess->sess_list_lock);
++
++ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
++ list_for_each_entry(cmd, &sess->sess_cmd_list,
++ sess_cmd_list_entry) {
++ if ((cmd->dev == dev) ||
++ ((cmd->dev == NULL) &&
++ scst_is_cmd_belongs_to_dev(cmd, dev))) {
++ scst_abort_cmd(cmd, mcmd, 1, 0);
++ aborted = 1;
++ }
++ }
++ spin_unlock_irq(&sess->sess_list_lock);
++
++ if (aborted)
++ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
++ &UA_tgt_devs);
++ }
++
++ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "CLEAR TASK SET", 0);
++
++ scst_unblock_aborted_cmds(1);
++
++ mutex_unlock(&scst_mutex);
++
++ if (!dev->tas) {
++ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
++ int sl;
++
++ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
++ dev->d_sense,
++ SCST_LOAD_SENSE(scst_sense_cleared_by_another_ini_UA));
++
++ list_for_each_entry(tgt_dev, &UA_tgt_devs,
++ extra_tgt_dev_list_entry) {
++ scst_check_set_UA(tgt_dev, sense_buffer, sl, 0);
++ }
++ }
++
++ scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 0);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Returns 0 if the command processing should be continued,
++ * >0, if it should be requeued, <0 otherwise */
++static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
++{
++ int res = 0, rc;
++
++ TRACE_ENTRY();
++
++ switch (mcmd->fn) {
++ case SCST_ABORT_TASK:
++ {
++ struct scst_session *sess = mcmd->sess;
++ struct scst_cmd *cmd;
++
++ spin_lock_irq(&sess->sess_list_lock);
++ cmd = __scst_find_cmd_by_tag(sess, mcmd->tag, true);
++ if (cmd == NULL) {
++ TRACE_MGMT_DBG("ABORT TASK: command "
++ "for tag %llu not found",
++ (long long unsigned int)mcmd->tag);
++ mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
++ spin_unlock_irq(&sess->sess_list_lock);
++ res = scst_set_mcmd_next_state(mcmd);
++ goto out;
++ }
++ __scst_cmd_get(cmd);
++ spin_unlock_irq(&sess->sess_list_lock);
++ TRACE_DBG("Cmd to abort %p for tag %llu found",
++ cmd, (long long unsigned int)mcmd->tag);
++ mcmd->cmd_to_abort = cmd;
++ mcmd->state = SCST_MCMD_STATE_EXEC;
++ break;
++ }
++
++ case SCST_TARGET_RESET:
++ case SCST_NEXUS_LOSS_SESS:
++ case SCST_ABORT_ALL_TASKS_SESS:
++ case SCST_NEXUS_LOSS:
++ case SCST_ABORT_ALL_TASKS:
++ case SCST_UNREG_SESS_TM:
++ mcmd->state = SCST_MCMD_STATE_EXEC;
++ break;
++
++ case SCST_ABORT_TASK_SET:
++ case SCST_CLEAR_ACA:
++ case SCST_CLEAR_TASK_SET:
++ case SCST_LUN_RESET:
++ case SCST_PR_ABORT_ALL:
++ rc = scst_mgmt_translate_lun(mcmd);
++ if (rc == 0)
++ mcmd->state = SCST_MCMD_STATE_EXEC;
++ else if (rc < 0) {
++ PRINT_ERROR("Corresponding device for LUN %lld not "
++ "found", (long long unsigned int)mcmd->lun);
++ mcmd->status = SCST_MGMT_STATUS_LUN_NOT_EXIST;
++ res = scst_set_mcmd_next_state(mcmd);
++ } else
++ res = rc;
++ break;
++
++ default:
++ BUG();
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
++{
++ int res, rc;
++ struct scst_device *dev;
++ struct scst_acg *acg = mcmd->sess->acg;
++ struct scst_acg_dev *acg_dev;
++ int cont, c;
++ LIST_HEAD(host_devs);
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
++ mcmd, atomic_read(&mcmd->sess->sess_cmd_count));
++
++ mcmd->needs_unblocking = 1;
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
++ struct scst_device *d;
++ struct scst_tgt_dev *tgt_dev;
++ int found = 0;
++
++ dev = acg_dev->dev;
++
++ spin_lock_bh(&dev->dev_lock);
++ scst_block_dev(dev);
++ scst_process_reset(dev, mcmd->sess, NULL, mcmd, true);
++ spin_unlock_bh(&dev->dev_lock);
++
++ cont = 0;
++ c = 0;
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ cont = 1;
++ if (mcmd->sess == tgt_dev->sess) {
++ rc = scst_call_dev_task_mgmt_fn(mcmd,
++ tgt_dev, 0);
++ if (rc == SCST_DEV_TM_NOT_COMPLETED)
++ c = 1;
++ else if ((rc < 0) &&
++ (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
++ mcmd->status = rc;
++ break;
++ }
++ }
++ if (cont && !c)
++ continue;
++
++ if (dev->scsi_dev == NULL)
++ continue;
++
++ list_for_each_entry(d, &host_devs, tm_dev_list_entry) {
++ if (dev->scsi_dev->host->host_no ==
++ d->scsi_dev->host->host_no) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found)
++ list_add_tail(&dev->tm_dev_list_entry, &host_devs);
++
++ tm_dbg_task_mgmt(dev, "TARGET RESET", 0);
++ }
++
++ scst_unblock_aborted_cmds(1);
++
++ /*
++ * We suppose here that for all commands that already on devices
++ * on/after scsi_reset_provider() completion callbacks will be called.
++ */
++
++ list_for_each_entry(dev, &host_devs, tm_dev_list_entry) {
++ /* dev->scsi_dev must be non-NULL here */
++ TRACE(TRACE_MGMT, "Resetting host %d bus ",
++ dev->scsi_dev->host->host_no);
++ rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_TARGET);
++ TRACE(TRACE_MGMT, "Result of host %d target reset: %s",
++ dev->scsi_dev->host->host_no,
++ (rc == SUCCESS) ? "SUCCESS" : "FAILED");
++#if 0
++ if ((rc != SUCCESS) &&
++ (mcmd->status == SCST_MGMT_STATUS_SUCCESS)) {
++ /*
++ * SCSI_TRY_RESET_BUS is also done by
++ * scsi_reset_provider()
++ */
++ mcmd->status = SCST_MGMT_STATUS_FAILED;
++ }
++#else
++ /*
++ * scsi_reset_provider() returns very weird status, so let's
++ * always succeed
++ */
++#endif
++ }
++
++ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
++ dev = acg_dev->dev;
++ if (dev->scsi_dev != NULL)
++ dev->scsi_dev->was_reset = 0;
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
++{
++ int res, rc;
++ struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
++ struct scst_device *dev = tgt_dev->dev;
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_MGMT, "Resetting LUN %lld (mcmd %p)",
++ (long long unsigned int)tgt_dev->lun, mcmd);
++
++ mcmd->needs_unblocking = 1;
++
++ spin_lock_bh(&dev->dev_lock);
++ scst_block_dev(dev);
++ scst_process_reset(dev, mcmd->sess, NULL, mcmd, true);
++ spin_unlock_bh(&dev->dev_lock);
++
++ rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
++ if (rc != SCST_DEV_TM_NOT_COMPLETED)
++ goto out_tm_dbg;
++
++ if (dev->scsi_dev != NULL) {
++ TRACE(TRACE_MGMT, "Resetting host %d bus ",
++ dev->scsi_dev->host->host_no);
++ rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
++#if 0
++ if (rc != SUCCESS && mcmd->status == SCST_MGMT_STATUS_SUCCESS)
++ mcmd->status = SCST_MGMT_STATUS_FAILED;
++#else
++ /*
++ * scsi_reset_provider() returns very weird status, so let's
++ * always succeed
++ */
++#endif
++ dev->scsi_dev->was_reset = 0;
++ }
++
++ scst_unblock_aborted_cmds(0);
++
++out_tm_dbg:
++ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "LUN RESET", 0);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_mutex supposed to be held */
++static void scst_do_nexus_loss_sess(struct scst_mgmt_cmd *mcmd)
++{
++ int i;
++ struct scst_session *sess = mcmd->sess;
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[i];
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ scst_nexus_loss(tgt_dev,
++ (mcmd->fn != SCST_UNREG_SESS_TM));
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
++ int nexus_loss)
++{
++ int res;
++ int i;
++ struct scst_session *sess = mcmd->sess;
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ if (nexus_loss) {
++ TRACE_MGMT_DBG("Nexus loss for sess %p (mcmd %p)",
++ sess, mcmd);
++ } else {
++ TRACE_MGMT_DBG("Aborting all from sess %p (mcmd %p)",
++ sess, mcmd);
++ }
++
++ mutex_lock(&scst_mutex);
++
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[i];
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ int rc;
++
++ __scst_abort_task_set(mcmd, tgt_dev);
++
++ rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
++ if (rc < 0 && mcmd->status == SCST_MGMT_STATUS_SUCCESS)
++ mcmd->status = rc;
++
++ tm_dbg_task_mgmt(tgt_dev->dev, "NEXUS LOSS SESS or "
++ "ABORT ALL SESS or UNREG SESS",
++ (mcmd->fn == SCST_UNREG_SESS_TM));
++ }
++ }
++
++ scst_unblock_aborted_cmds(1);
++
++ mutex_unlock(&scst_mutex);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_mutex supposed to be held */
++static void scst_do_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd)
++{
++ int i;
++ struct scst_tgt *tgt = mcmd->sess->tgt;
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[i];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ scst_nexus_loss(tgt_dev, true);
++ }
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
++ int nexus_loss)
++{
++ int res;
++ int i;
++ struct scst_tgt *tgt = mcmd->sess->tgt;
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ if (nexus_loss) {
++ TRACE_MGMT_DBG("I_T Nexus loss (tgt %p, mcmd %p)",
++ tgt, mcmd);
++ } else {
++ TRACE_MGMT_DBG("Aborting all from tgt %p (mcmd %p)",
++ tgt, mcmd);
++ }
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[i];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ int rc;
++
++ __scst_abort_task_set(mcmd, tgt_dev);
++
++ if (nexus_loss)
++ scst_nexus_loss(tgt_dev, true);
++
++ if (mcmd->sess == tgt_dev->sess) {
++ rc = scst_call_dev_task_mgmt_fn(
++ mcmd, tgt_dev, 0);
++ if ((rc < 0) &&
++ (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
++ mcmd->status = rc;
++ }
++
++ tm_dbg_task_mgmt(tgt_dev->dev, "NEXUS LOSS or "
++ "ABORT ALL", 0);
++ }
++ }
++ }
++
++ scst_unblock_aborted_cmds(1);
++
++ mutex_unlock(&scst_mutex);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_abort_task(struct scst_mgmt_cmd *mcmd)
++{
++ int res;
++ struct scst_cmd *cmd = mcmd->cmd_to_abort;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Abortind task (cmd %p, sn %d, set %d, tag %llu, "
++ "queue_type %x)", cmd, cmd->sn, cmd->sn_set,
++ (long long unsigned int)mcmd->tag, cmd->queue_type);
++
++ if (mcmd->lun_set && (mcmd->lun != cmd->lun)) {
++ PRINT_ERROR("ABORT TASK: LUN mismatch: mcmd LUN %llx, "
++ "cmd LUN %llx, cmd tag %llu",
++ (long long unsigned int)mcmd->lun,
++ (long long unsigned int)cmd->lun,
++ (long long unsigned int)mcmd->tag);
++ mcmd->status = SCST_MGMT_STATUS_REJECTED;
++ } else if (mcmd->cmd_sn_set &&
++ (scst_sn_before(mcmd->cmd_sn, cmd->tgt_sn) ||
++ (mcmd->cmd_sn == cmd->tgt_sn))) {
++ PRINT_ERROR("ABORT TASK: SN mismatch: mcmd SN %x, "
++ "cmd SN %x, cmd tag %llu", mcmd->cmd_sn,
++ cmd->tgt_sn, (long long unsigned int)mcmd->tag);
++ mcmd->status = SCST_MGMT_STATUS_REJECTED;
++ } else {
++ scst_abort_cmd(cmd, mcmd, 0, 1);
++ scst_unblock_aborted_cmds(0);
++ }
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ mcmd->cmd_to_abort = NULL; /* just in case */
++
++ __scst_cmd_put(cmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ mcmd->status = SCST_MGMT_STATUS_SUCCESS;
++
++ switch (mcmd->fn) {
++ case SCST_ABORT_TASK:
++ res = scst_abort_task(mcmd);
++ break;
++
++ case SCST_ABORT_TASK_SET:
++ case SCST_PR_ABORT_ALL:
++ res = scst_abort_task_set(mcmd);
++ break;
++
++ case SCST_CLEAR_TASK_SET:
++ if (mcmd->mcmd_tgt_dev->dev->tst ==
++ SCST_CONTR_MODE_SEP_TASK_SETS)
++ res = scst_abort_task_set(mcmd);
++ else
++ res = scst_clear_task_set(mcmd);
++ break;
++
++ case SCST_LUN_RESET:
++ res = scst_lun_reset(mcmd);
++ break;
++
++ case SCST_TARGET_RESET:
++ res = scst_target_reset(mcmd);
++ break;
++
++ case SCST_ABORT_ALL_TASKS_SESS:
++ res = scst_abort_all_nexus_loss_sess(mcmd, 0);
++ break;
++
++ case SCST_NEXUS_LOSS_SESS:
++ case SCST_UNREG_SESS_TM:
++ res = scst_abort_all_nexus_loss_sess(mcmd, 1);
++ break;
++
++ case SCST_ABORT_ALL_TASKS:
++ res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
++ break;
++
++ case SCST_NEXUS_LOSS:
++ res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
++ break;
++
++ case SCST_CLEAR_ACA:
++ if (scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1) ==
++ SCST_DEV_TM_NOT_COMPLETED) {
++ mcmd->status = SCST_MGMT_STATUS_FN_NOT_SUPPORTED;
++ /* Nothing to do (yet) */
++ }
++ goto out_done;
++
++ default:
++ PRINT_ERROR("Unknown task management function %d", mcmd->fn);
++ mcmd->status = SCST_MGMT_STATUS_REJECTED;
++ goto out_done;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_done:
++ res = scst_set_mcmd_next_state(mcmd);
++ goto out;
++}
++
++static void scst_call_task_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
++{
++ struct scst_session *sess = mcmd->sess;
++
++ if ((sess->tgt->tgtt->task_mgmt_affected_cmds_done != NULL) &&
++ (mcmd->fn != SCST_UNREG_SESS_TM) &&
++ (mcmd->fn != SCST_PR_ABORT_ALL)) {
++ TRACE_DBG("Calling target %s task_mgmt_affected_cmds_done(%p)",
++ sess->tgt->tgtt->name, sess);
++ sess->tgt->tgtt->task_mgmt_affected_cmds_done(mcmd);
++ TRACE_MGMT_DBG("Target's %s task_mgmt_affected_cmds_done() "
++ "returned", sess->tgt->tgtt->name);
++ }
++ return;
++}
++
++static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ switch (mcmd->fn) {
++ case SCST_NEXUS_LOSS_SESS:
++ case SCST_UNREG_SESS_TM:
++ scst_do_nexus_loss_sess(mcmd);
++ break;
++
++ case SCST_NEXUS_LOSS:
++ scst_do_nexus_loss_tgt(mcmd);
++ break;
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ scst_call_task_mgmt_affected_cmds_done(mcmd);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
++{
++ struct scst_device *dev;
++ struct scst_session *sess = mcmd->sess;
++
++ TRACE_ENTRY();
++
++ mcmd->state = SCST_MCMD_STATE_FINISHED;
++ if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
++ mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
++
++ if (mcmd->fn < SCST_UNREG_SESS_TM)
++ TRACE(TRACE_MGMT, "TM fn %d finished, "
++ "status %x", mcmd->fn, mcmd->status);
++ else
++ TRACE_MGMT_DBG("TM fn %d finished, "
++ "status %x", mcmd->fn, mcmd->status);
++
++ if (mcmd->fn == SCST_PR_ABORT_ALL) {
++ mcmd->origin_pr_cmd->scst_cmd_done(mcmd->origin_pr_cmd,
++ SCST_CMD_STATE_DEFAULT,
++ SCST_CONTEXT_THREAD);
++ } else if ((sess->tgt->tgtt->task_mgmt_fn_done != NULL) &&
++ (mcmd->fn != SCST_UNREG_SESS_TM)) {
++ TRACE_DBG("Calling target %s task_mgmt_fn_done(%p)",
++ sess->tgt->tgtt->name, sess);
++ sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
++ TRACE_MGMT_DBG("Target's %s task_mgmt_fn_done() "
++ "returned", sess->tgt->tgtt->name);
++ }
++
++ if (mcmd->needs_unblocking) {
++ switch (mcmd->fn) {
++ case SCST_LUN_RESET:
++ case SCST_CLEAR_TASK_SET:
++ scst_unblock_dev(mcmd->mcmd_tgt_dev->dev);
++ break;
++
++ case SCST_TARGET_RESET:
++ {
++ struct scst_acg *acg = mcmd->sess->acg;
++ struct scst_acg_dev *acg_dev;
++
++ mutex_lock(&scst_mutex);
++ list_for_each_entry(acg_dev, &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ dev = acg_dev->dev;
++ scst_unblock_dev(dev);
++ }
++ mutex_unlock(&scst_mutex);
++ break;
++ }
++
++ default:
++ BUG();
++ break;
++ }
++ }
++
++ mcmd->tgt_priv = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Returns >0, if cmd should be requeued */
++static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /*
++ * We are in the TM thread and mcmd->state guaranteed to not be
++ * changed behind us.
++ */
++
++ TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
++
++ while (1) {
++ switch (mcmd->state) {
++ case SCST_MCMD_STATE_INIT:
++ res = scst_mgmt_cmd_init(mcmd);
++ if (res)
++ goto out;
++ break;
++
++ case SCST_MCMD_STATE_EXEC:
++ if (scst_mgmt_cmd_exec(mcmd))
++ goto out;
++ break;
++
++ case SCST_MCMD_STATE_AFFECTED_CMDS_DONE:
++ if (scst_mgmt_affected_cmds_done(mcmd))
++ goto out;
++ break;
++
++ case SCST_MCMD_STATE_DONE:
++ scst_mgmt_cmd_send_done(mcmd);
++ break;
++
++ case SCST_MCMD_STATE_FINISHED:
++ scst_free_mgmt_cmd(mcmd);
++ /* mcmd is dead */
++ goto out;
++
++ default:
++ PRINT_CRIT_ERROR("Wrong mcmd %p state %d (fn %d, "
++ "cmd_finish_wait_count %d, cmd_done_wait_count "
++ "%d)", mcmd, mcmd->state, mcmd->fn,
++ mcmd->cmd_finish_wait_count,
++ mcmd->cmd_done_wait_count);
++ BUG();
++ res = -1;
++ goto out;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static inline int test_mgmt_cmd_list(void)
++{
++ int res = !list_empty(&scst_active_mgmt_cmd_list) ||
++ unlikely(kthread_should_stop());
++ return res;
++}
++
++int scst_tm_thread(void *arg)
++{
++ TRACE_ENTRY();
++
++ PRINT_INFO("Task management thread started, PID %d", current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ set_user_nice(current, -10);
++
++ spin_lock_irq(&scst_mcmd_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_mgmt_cmd_list()) {
++ add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
++ &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_mgmt_cmd_list())
++ break;
++ spin_unlock_irq(&scst_mcmd_lock);
++ schedule();
++ spin_lock_irq(&scst_mcmd_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
++ }
++
++ while (!list_empty(&scst_active_mgmt_cmd_list)) {
++ int rc;
++ struct scst_mgmt_cmd *mcmd;
++ mcmd = list_entry(scst_active_mgmt_cmd_list.next,
++ typeof(*mcmd), mgmt_cmd_list_entry);
++ TRACE_MGMT_DBG("Deleting mgmt cmd %p from active cmd "
++ "list", mcmd);
++ list_del(&mcmd->mgmt_cmd_list_entry);
++ spin_unlock_irq(&scst_mcmd_lock);
++ rc = scst_process_mgmt_cmd(mcmd);
++ spin_lock_irq(&scst_mcmd_lock);
++ if (rc > 0) {
++ if (test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
++ !test_bit(SCST_FLAG_SUSPENDING,
++ &scst_flags)) {
++ TRACE_MGMT_DBG("Adding mgmt cmd %p to "
++ "head of delayed mgmt cmd list",
++ mcmd);
++ list_add(&mcmd->mgmt_cmd_list_entry,
++ &scst_delayed_mgmt_cmd_list);
++ } else {
++ TRACE_MGMT_DBG("Adding mgmt cmd %p to "
++ "head of active mgmt cmd list",
++ mcmd);
++ list_add(&mcmd->mgmt_cmd_list_entry,
++ &scst_active_mgmt_cmd_list);
++ }
++ }
++ }
++ }
++ spin_unlock_irq(&scst_mcmd_lock);
++
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so scst_active_mgmt_cmd_list must be empty.
++ */
++ BUG_ON(!list_empty(&scst_active_mgmt_cmd_list));
++
++ PRINT_INFO("Task management thread PID %d finished", current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
++ *sess, int fn, int atomic, void *tgt_priv)
++{
++ struct scst_mgmt_cmd *mcmd = NULL;
++
++ TRACE_ENTRY();
++
++ if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
++ PRINT_ERROR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
++ "(target %s)", sess->tgt->tgtt->name);
++ goto out;
++ }
++
++ mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
++ if (mcmd == NULL) {
++ PRINT_CRIT_ERROR("Lost TM fn %d, initiator %s", fn,
++ sess->initiator_name);
++ goto out;
++ }
++
++ mcmd->sess = sess;
++ mcmd->fn = fn;
++ mcmd->state = SCST_MCMD_STATE_INIT;
++ mcmd->tgt_priv = tgt_priv;
++
++ if (fn == SCST_PR_ABORT_ALL) {
++ atomic_inc(&mcmd->origin_pr_cmd->pr_abort_counter->pr_abort_pending_cnt);
++ atomic_inc(&mcmd->origin_pr_cmd->pr_abort_counter->pr_aborting_cnt);
++ }
++
++out:
++ TRACE_EXIT();
++ return mcmd;
++}
++
++static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
++ struct scst_mgmt_cmd *mcmd)
++{
++ unsigned long flags;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ scst_sess_get(sess);
++
++ if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
++ PRINT_CRIT_ERROR("New mgmt cmd while shutting down the "
++ "session %p shut_phase %ld", sess, sess->shut_phase);
++ BUG();
++ }
++
++ local_irq_save(flags);
++
++ spin_lock(&sess->sess_list_lock);
++ atomic_inc(&sess->sess_cmd_count);
++
++ if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
++ switch (sess->init_phase) {
++ case SCST_SESS_IPH_INITING:
++ TRACE_DBG("Adding mcmd %p to init deferred mcmd list",
++ mcmd);
++ list_add_tail(&mcmd->mgmt_cmd_list_entry,
++ &sess->init_deferred_mcmd_list);
++ goto out_unlock;
++ case SCST_SESS_IPH_SUCCESS:
++ break;
++ case SCST_SESS_IPH_FAILED:
++ res = -1;
++ goto out_unlock;
++ default:
++ BUG();
++ }
++ }
++
++ spin_unlock(&sess->sess_list_lock);
++
++ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
++ spin_lock(&scst_mcmd_lock);
++ list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
++ spin_unlock(&scst_mcmd_lock);
++
++ local_irq_restore(flags);
++
++ wake_up(&scst_mgmt_cmd_list_waitQ);
++
++out:
++ TRACE_EXIT();
++ return res;
++
++out_unlock:
++ spin_unlock(&sess->sess_list_lock);
++ local_irq_restore(flags);
++ goto out;
++}
++
++/**
++ * scst_rx_mgmt_fn() - create new management command and send it for execution
++ *
++ * Description:
++ * Creates new management command and sends it for execution.
++ *
++ * Returns 0 for success, error code otherwise.
++ *
++ * Must not be called in parallel with scst_unregister_session() for the
++ * same sess.
++ */
++int scst_rx_mgmt_fn(struct scst_session *sess,
++ const struct scst_rx_mgmt_params *params)
++{
++ int res = -EFAULT;
++ struct scst_mgmt_cmd *mcmd = NULL;
++
++ TRACE_ENTRY();
++
++ switch (params->fn) {
++ case SCST_ABORT_TASK:
++ BUG_ON(!params->tag_set);
++ break;
++ case SCST_TARGET_RESET:
++ case SCST_ABORT_ALL_TASKS:
++ case SCST_NEXUS_LOSS:
++ break;
++ default:
++ BUG_ON(!params->lun_set);
++ }
++
++ mcmd = scst_pre_rx_mgmt_cmd(sess, params->fn, params->atomic,
++ params->tgt_priv);
++ if (mcmd == NULL)
++ goto out;
++
++ if (params->lun_set) {
++ mcmd->lun = scst_unpack_lun(params->lun, params->lun_len);
++ if (mcmd->lun == NO_SUCH_LUN)
++ goto out_free;
++ mcmd->lun_set = 1;
++ }
++
++ if (params->tag_set)
++ mcmd->tag = params->tag;
++
++ mcmd->cmd_sn_set = params->cmd_sn_set;
++ mcmd->cmd_sn = params->cmd_sn;
++
++ if (params->fn < SCST_UNREG_SESS_TM)
++ TRACE(TRACE_MGMT, "TM fn %d", params->fn);
++ else
++ TRACE_MGMT_DBG("TM fn %d", params->fn);
++
++ TRACE_MGMT_DBG("sess=%p, tag_set %d, tag %lld, lun_set %d, "
++ "lun=%lld, cmd_sn_set %d, cmd_sn %d, priv %p", sess,
++ params->tag_set,
++ (long long unsigned int)params->tag,
++ params->lun_set,
++ (long long unsigned int)mcmd->lun,
++ params->cmd_sn_set,
++ params->cmd_sn,
++ params->tgt_priv);
++
++ if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
++ goto out_free;
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ scst_free_mgmt_cmd(mcmd);
++ mcmd = NULL;
++ goto out;
++}
++EXPORT_SYMBOL(scst_rx_mgmt_fn);
++
++/*
++ * Written by Jack Handy - jakkhandy@hotmail.com
++ * Taken by Gennadiy Nerubayev <parakie@gmail.com> from
++ * http://www.codeproject.com/KB/string/wildcmp.aspx. No license attached
++ * to it, and it's posted on a free site; assumed to be free for use.
++ *
++ * Added the negative sign support - VLNB
++ *
++ * Also see comment for wildcmp().
++ *
++ * User space part of iSCSI-SCST also has a copy of this code, so fixing a bug
++ * here, don't forget to fix the copy too!
++ */
++static bool __wildcmp(const char *wild, const char *string, int recursion_level)
++{
++ const char *cp = NULL, *mp = NULL;
++
++ while ((*string) && (*wild != '*')) {
++ if ((*wild == '!') && (recursion_level == 0))
++ return !__wildcmp(++wild, string, ++recursion_level);
++
++ if ((*wild != *string) && (*wild != '?'))
++ return false;
++
++ wild++;
++ string++;
++ }
++
++ while (*string) {
++ if ((*wild == '!') && (recursion_level == 0))
++ return !__wildcmp(++wild, string, ++recursion_level);
++
++ if (*wild == '*') {
++ if (!*++wild)
++ return true;
++
++ mp = wild;
++ cp = string+1;
++ } else if ((*wild == *string) || (*wild == '?')) {
++ wild++;
++ string++;
++ } else {
++ wild = mp;
++ string = cp++;
++ }
++ }
++
++ while (*wild == '*')
++ wild++;
++
++ return !*wild;
++}
++
++/*
++ * Returns true if string "string" matches pattern "wild", false otherwise.
++ * Pattern is a regular DOS-type pattern, containing '*' and '?' symbols.
++ * '*' means match all any symbols, '?' means match only any single symbol.
++ *
++ * For instance:
++ * if (wildcmp("bl?h.*", "blah.jpg")) {
++ * // match
++ * } else {
++ * // no match
++ * }
++ *
++ * Also it supports boolean inversion sign '!', which does boolean inversion of
++ * the value of the rest of the string. Only one '!' allowed in the pattern,
++ * other '!' are treated as regular symbols. For instance:
++ * if (wildcmp("bl!?h.*", "blah.jpg")) {
++ * // no match
++ * } else {
++ * // match
++ * }
++ *
++ * Also see comment for __wildcmp().
++ */
++static bool wildcmp(const char *wild, const char *string)
++{
++ return __wildcmp(wild, string, 0);
++}
++
++/* scst_mutex supposed to be held */
++static struct scst_acg *scst_find_tgt_acg_by_name_wild(struct scst_tgt *tgt,
++ const char *initiator_name)
++{
++ struct scst_acg *acg, *res = NULL;
++ struct scst_acn *n;
++
++ TRACE_ENTRY();
++
++ if (initiator_name == NULL)
++ goto out;
++
++ list_for_each_entry(acg, &tgt->tgt_acg_list, acg_list_entry) {
++ list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
++ if (wildcmp(n->name, initiator_name)) {
++ TRACE_DBG("Access control group %s found",
++ acg->acg_name);
++ res = acg;
++ goto out;
++ }
++ }
++ }
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* Must be called under scst_mutex */
++static struct scst_acg *__scst_find_acg(struct scst_tgt *tgt,
++ const char *initiator_name)
++{
++ struct scst_acg *acg = NULL;
++
++ TRACE_ENTRY();
++
++ acg = scst_find_tgt_acg_by_name_wild(tgt, initiator_name);
++ if (acg == NULL)
++ acg = tgt->default_acg;
++
++ TRACE_EXIT_HRES((unsigned long)acg);
++ return acg;
++}
++
++/* Must be called under scst_mutex */
++struct scst_acg *scst_find_acg(const struct scst_session *sess)
++{
++ return __scst_find_acg(sess->tgt, sess->initiator_name);
++}
++
++/**
++ * scst_initiator_has_luns() - check if this initiator will see any LUNs
++ *
++ * Checks if this initiator will see any LUNs upon connect to this target.
++ * Returns true if yes and false otherwise.
++ */
++bool scst_initiator_has_luns(struct scst_tgt *tgt, const char *initiator_name)
++{
++ bool res;
++ struct scst_acg *acg;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ acg = __scst_find_acg(tgt, initiator_name);
++
++ res = !list_empty(&acg->acg_dev_list);
++
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_initiator_has_luns);
++
++static int scst_init_session(struct scst_session *sess)
++{
++ int res = 0;
++ struct scst_cmd *cmd;
++ struct scst_mgmt_cmd *mcmd, *tm;
++ int mwake = 0;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ sess->acg = scst_find_acg(sess);
++
++ PRINT_INFO("Using security group \"%s\" for initiator \"%s\"",
++ sess->acg->acg_name, sess->initiator_name);
++
++ list_add_tail(&sess->acg_sess_list_entry, &sess->acg->acg_sess_list);
++
++ TRACE_DBG("Adding sess %p to tgt->sess_list", sess);
++ list_add_tail(&sess->sess_list_entry, &sess->tgt->sess_list);
++
++ if (sess->tgt->tgtt->get_initiator_port_transport_id != NULL) {
++ res = sess->tgt->tgtt->get_initiator_port_transport_id(sess,
++ &sess->transport_id);
++ if (res != 0) {
++ PRINT_ERROR("Unable to make initiator %s port "
++ "transport id", sess->initiator_name);
++ goto failed;
++ }
++ TRACE_PR("sess %p (ini %s), transport id %s/%d", sess,
++ sess->initiator_name,
++ debug_transport_id_to_initiator_name(
++ sess->transport_id), sess->tgt->rel_tgt_id);
++ }
++
++ res = scst_sess_sysfs_create(sess);
++ if (res != 0)
++ goto failed;
++
++ /*
++ * scst_sess_alloc_tgt_devs() must be called after session added in the
++ * sess_list to not race with scst_check_reassign_sess()!
++ */
++ res = scst_sess_alloc_tgt_devs(sess);
++
++failed:
++ mutex_unlock(&scst_mutex);
++
++ if (sess->init_result_fn) {
++ TRACE_DBG("Calling init_result_fn(%p)", sess);
++ sess->init_result_fn(sess, sess->reg_sess_data, res);
++ TRACE_DBG("%s", "init_result_fn() returned");
++ }
++
++ spin_lock_irq(&sess->sess_list_lock);
++
++ if (res == 0)
++ sess->init_phase = SCST_SESS_IPH_SUCCESS;
++ else
++ sess->init_phase = SCST_SESS_IPH_FAILED;
++
++restart:
++ list_for_each_entry(cmd, &sess->init_deferred_cmd_list,
++ cmd_list_entry) {
++ TRACE_DBG("Deleting cmd %p from init deferred cmd list", cmd);
++ list_del(&cmd->cmd_list_entry);
++ atomic_dec(&sess->sess_cmd_count);
++ spin_unlock_irq(&sess->sess_list_lock);
++ scst_cmd_init_done(cmd, SCST_CONTEXT_THREAD);
++ spin_lock_irq(&sess->sess_list_lock);
++ goto restart;
++ }
++
++ spin_lock(&scst_mcmd_lock);
++ list_for_each_entry_safe(mcmd, tm, &sess->init_deferred_mcmd_list,
++ mgmt_cmd_list_entry) {
++ TRACE_DBG("Moving mgmt command %p from init deferred mcmd list",
++ mcmd);
++ list_move_tail(&mcmd->mgmt_cmd_list_entry,
++ &scst_active_mgmt_cmd_list);
++ mwake = 1;
++ }
++
++ spin_unlock(&scst_mcmd_lock);
++ /*
++ * In case of an error at this point the caller target driver supposed
++ * to already call this sess's unregistration.
++ */
++ sess->init_phase = SCST_SESS_IPH_READY;
++ spin_unlock_irq(&sess->sess_list_lock);
++
++ if (mwake)
++ wake_up(&scst_mgmt_cmd_list_waitQ);
++
++ scst_sess_put(sess);
++
++ TRACE_EXIT();
++ return res;
++}
++
++/**
++ * scst_register_session() - register session
++ * @tgt: target
++ * @atomic: true, if the function called in the atomic context. If false,
++ * this function will block until the session registration is
++ * completed.
++ * @initiator_name: remote initiator's name, any NULL-terminated string,
++ * e.g. iSCSI name, which used as the key to found appropriate
++ * access control group. Could be NULL, then the default
++ * target's LUNs are used.
++ * @tgt_priv: pointer to target driver's private data
++ * @result_fn_data: any target driver supplied data
++ * @result_fn: pointer to the function that will be asynchronously called
++ * when session initialization finishes.
++ * Can be NULL. Parameters:
++ * - sess - session
++ * - data - target driver supplied to scst_register_session()
++ * data
++ * - result - session initialization result, 0 on success or
++ * appropriate error code otherwise
++ *
++ * Description:
++ * Registers new session. Returns new session on success or NULL otherwise.
++ *
++ * Note: A session creation and initialization is a complex task,
++ * which requires sleeping state, so it can't be fully done
++ * in interrupt context. Therefore the "bottom half" of it, if
++ * scst_register_session() is called from atomic context, will be
++ * done in SCST thread context. In this case scst_register_session()
++ * will return not completely initialized session, but the target
++ * driver can supply commands to this session via scst_rx_cmd().
++ * Those commands processing will be delayed inside SCST until
++ * the session initialization is finished, then their processing
++ * will be restarted. The target driver will be notified about
++ * finish of the session initialization by function result_fn().
++ * On success the target driver could do nothing, but if the
++ * initialization fails, the target driver must ensure that
++ * no more new commands being sent or will be sent to SCST after
++ * result_fn() returns. All already sent to SCST commands for
++ * failed session will be returned in xmit_response() with BUSY status.
++ * In case of failure the driver shall call scst_unregister_session()
++ * inside result_fn(), it will NOT be called automatically.
++ */
++struct scst_session *scst_register_session(struct scst_tgt *tgt, int atomic,
++ const char *initiator_name, void *tgt_priv, void *result_fn_data,
++ void (*result_fn) (struct scst_session *sess, void *data, int result))
++{
++ struct scst_session *sess;
++ int res;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ sess = scst_alloc_session(tgt, atomic ? GFP_ATOMIC : GFP_KERNEL,
++ initiator_name);
++ if (sess == NULL)
++ goto out;
++
++ scst_sess_set_tgt_priv(sess, tgt_priv);
++
++ scst_sess_get(sess); /* one for registered session */
++ scst_sess_get(sess); /* one held until sess is inited */
++
++ if (atomic) {
++ sess->reg_sess_data = result_fn_data;
++ sess->init_result_fn = result_fn;
++ spin_lock_irqsave(&scst_mgmt_lock, flags);
++ TRACE_DBG("Adding sess %p to scst_sess_init_list", sess);
++ list_add_tail(&sess->sess_init_list_entry,
++ &scst_sess_init_list);
++ spin_unlock_irqrestore(&scst_mgmt_lock, flags);
++ wake_up(&scst_mgmt_waitQ);
++ } else {
++ res = scst_init_session(sess);
++ if (res != 0)
++ goto out_free;
++ }
++
++out:
++ TRACE_EXIT();
++ return sess;
++
++out_free:
++ scst_free_session(sess);
++ sess = NULL;
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_register_session);
++
++/**
++ * scst_register_session_non_gpl() - register session (non-GPL version)
++ * @tgt: target
++ * @initiator_name: remote initiator's name, any NULL-terminated string,
++ * e.g. iSCSI name, which used as the key to found appropriate
++ * access control group. Could be NULL, then the default
++ * target's LUNs are used.
++ * @tgt_priv: pointer to target driver's private data
++ *
++ * Description:
++ * Registers new session. Returns new session on success or NULL otherwise.
++ */
++struct scst_session *scst_register_session_non_gpl(struct scst_tgt *tgt,
++ const char *initiator_name, void *tgt_priv)
++{
++ return scst_register_session(tgt, 0, initiator_name, tgt_priv,
++ NULL, NULL);
++}
++EXPORT_SYMBOL(scst_register_session_non_gpl);
++
++/**
++ * scst_unregister_session() - unregister session
++ * @sess: session to be unregistered
++ * @wait: if true, instructs to wait until all commands, which
++ * currently is being executed and belonged to the session,
++ * finished. Otherwise, target driver should be prepared to
++ * receive xmit_response() for the session's command after
++ * scst_unregister_session() returns.
++ * @unreg_done_fn: pointer to the function that will be asynchronously called
++ * when the last session's command finishes and
++ * the session is about to be completely freed. Can be NULL.
++ * Parameter:
++ * - sess - session
++ *
++ * Unregisters session.
++ *
++ * Notes:
++ * - All outstanding commands will be finished regularly. After
++ * scst_unregister_session() returned, no new commands must be sent to
++ * SCST via scst_rx_cmd().
++ *
++ * - The caller must ensure that no scst_rx_cmd() or scst_rx_mgmt_fn_*() is
++ * called in paralell with scst_unregister_session().
++ *
++ * - Can be called before result_fn() of scst_register_session() called,
++ * i.e. during the session registration/initialization.
++ *
++ * - It is highly recommended to call scst_unregister_session() as soon as it
++ * gets clear that session will be unregistered and not to wait until all
++ * related commands finished. This function provides the wait functionality,
++ * but it also starts recovering stuck commands, if there are any.
++ * Otherwise, your target driver could wait for those commands forever.
++ */
++void scst_unregister_session(struct scst_session *sess, int wait,
++ void (*unreg_done_fn) (struct scst_session *sess))
++{
++ unsigned long flags;
++ DECLARE_COMPLETION_ONSTACK(c);
++ int rc, lun;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Unregistering session %p (wait %d)", sess, wait);
++
++ sess->unreg_done_fn = unreg_done_fn;
++
++ /* Abort all outstanding commands and clear reservation, if necessary */
++ lun = 0;
++ rc = scst_rx_mgmt_fn_lun(sess, SCST_UNREG_SESS_TM,
++ (uint8_t *)&lun, sizeof(lun), SCST_ATOMIC, NULL);
++ if (rc != 0) {
++ PRINT_ERROR("SCST_UNREG_SESS_TM failed %d (sess %p)",
++ rc, sess);
++ }
++
++ sess->shut_phase = SCST_SESS_SPH_SHUTDOWN;
++
++ spin_lock_irqsave(&scst_mgmt_lock, flags);
++
++ if (wait)
++ sess->shutdown_compl = &c;
++
++ spin_unlock_irqrestore(&scst_mgmt_lock, flags);
++
++ scst_sess_put(sess);
++
++ if (wait) {
++ TRACE_DBG("Waiting for session %p to complete", sess);
++ wait_for_completion(&c);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_unregister_session);
++
++/**
++ * scst_unregister_session_non_gpl() - unregister session, non-GPL version
++ * @sess: session to be unregistered
++ *
++ * Unregisters session.
++ *
++ * See notes for scst_unregister_session() above.
++ */
++void scst_unregister_session_non_gpl(struct scst_session *sess)
++{
++ TRACE_ENTRY();
++
++ scst_unregister_session(sess, 1, NULL);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_unregister_session_non_gpl);
++
++static inline int test_mgmt_list(void)
++{
++ int res = !list_empty(&scst_sess_init_list) ||
++ !list_empty(&scst_sess_shut_list) ||
++ unlikely(kthread_should_stop());
++ return res;
++}
++
++int scst_global_mgmt_thread(void *arg)
++{
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ PRINT_INFO("Management thread started, PID %d", current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ set_user_nice(current, -10);
++
++ spin_lock_irq(&scst_mgmt_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_mgmt_list()) {
++ add_wait_queue_exclusive(&scst_mgmt_waitQ, &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_mgmt_list())
++ break;
++ spin_unlock_irq(&scst_mgmt_lock);
++ schedule();
++ spin_lock_irq(&scst_mgmt_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&scst_mgmt_waitQ, &wait);
++ }
++
++ while (!list_empty(&scst_sess_init_list)) {
++ sess = list_entry(scst_sess_init_list.next,
++ typeof(*sess), sess_init_list_entry);
++ TRACE_DBG("Removing sess %p from scst_sess_init_list",
++ sess);
++ list_del(&sess->sess_init_list_entry);
++ spin_unlock_irq(&scst_mgmt_lock);
++
++ if (sess->init_phase == SCST_SESS_IPH_INITING)
++ scst_init_session(sess);
++ else {
++ PRINT_CRIT_ERROR("session %p is in "
++ "scst_sess_init_list, but in unknown "
++ "init phase %x", sess,
++ sess->init_phase);
++ BUG();
++ }
++
++ spin_lock_irq(&scst_mgmt_lock);
++ }
++
++ while (!list_empty(&scst_sess_shut_list)) {
++ sess = list_entry(scst_sess_shut_list.next,
++ typeof(*sess), sess_shut_list_entry);
++ TRACE_DBG("Removing sess %p from scst_sess_shut_list",
++ sess);
++ list_del(&sess->sess_shut_list_entry);
++ spin_unlock_irq(&scst_mgmt_lock);
++
++ switch (sess->shut_phase) {
++ case SCST_SESS_SPH_SHUTDOWN:
++ BUG_ON(atomic_read(&sess->refcnt) != 0);
++ scst_free_session_callback(sess);
++ break;
++ default:
++ PRINT_CRIT_ERROR("session %p is in "
++ "scst_sess_shut_list, but in unknown "
++ "shut phase %lx", sess,
++ sess->shut_phase);
++ BUG();
++ break;
++ }
++
++ spin_lock_irq(&scst_mgmt_lock);
++ }
++ }
++ spin_unlock_irq(&scst_mgmt_lock);
++
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so both lists must be empty.
++ */
++ BUG_ON(!list_empty(&scst_sess_init_list));
++ BUG_ON(!list_empty(&scst_sess_shut_list));
++
++ PRINT_INFO("Management thread PID %d finished", current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++/* Called under sess->sess_list_lock */
++static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
++ uint64_t tag, bool to_abort)
++{
++ struct scst_cmd *cmd, *res = NULL;
++
++ TRACE_ENTRY();
++
++ /* ToDo: hash list */
++
++ TRACE_DBG("%s (sess=%p, tag=%llu)", "Searching in sess cmd list",
++ sess, (long long unsigned int)tag);
++
++ list_for_each_entry(cmd, &sess->sess_cmd_list,
++ sess_cmd_list_entry) {
++ if (cmd->tag == tag) {
++ /*
++ * We must not count done commands, because
++ * they were submitted for transmittion.
++ * Otherwise we can have a race, when for
++ * some reason cmd's release delayed
++ * after transmittion and initiator sends
++ * cmd with the same tag => it can be possible
++ * that a wrong cmd will be returned.
++ */
++ if (cmd->done) {
++ if (to_abort) {
++ /*
++ * We should return the latest not
++ * aborted cmd with this tag.
++ */
++ if (res == NULL)
++ res = cmd;
++ else {
++ if (test_bit(SCST_CMD_ABORTED,
++ &res->cmd_flags)) {
++ res = cmd;
++ } else if (!test_bit(SCST_CMD_ABORTED,
++ &cmd->cmd_flags))
++ res = cmd;
++ }
++ }
++ continue;
++ } else {
++ res = cmd;
++ break;
++ }
++ }
++ }
++
++ TRACE_EXIT();
++ return res;
++}
++
++/**
++ * scst_find_cmd() - find command by custom comparison function
++ *
++ * Finds a command based on user supplied data and comparision
++ * callback function, that should return true, if the command is found.
++ * Returns the command on success or NULL otherwise.
++ */
++struct scst_cmd *scst_find_cmd(struct scst_session *sess, void *data,
++ int (*cmp_fn) (struct scst_cmd *cmd,
++ void *data))
++{
++ struct scst_cmd *cmd = NULL;
++ unsigned long flags = 0;
++
++ TRACE_ENTRY();
++
++ if (cmp_fn == NULL)
++ goto out;
++
++ spin_lock_irqsave(&sess->sess_list_lock, flags);
++
++ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
++ list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list_entry) {
++ /*
++ * We must not count done commands, because they were
++ * submitted for transmittion. Otherwise we can have a race,
++ * when for some reason cmd's release delayed after
++ * transmittion and initiator sends cmd with the same tag =>
++ * it can be possible that a wrong cmd will be returned.
++ */
++ if (cmd->done)
++ continue;
++ if (cmp_fn(cmd, data))
++ goto out_unlock;
++ }
++
++ cmd = NULL;
++
++out_unlock:
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++
++out:
++ TRACE_EXIT();
++ return cmd;
++}
++EXPORT_SYMBOL(scst_find_cmd);
++
++/**
++ * scst_find_cmd_by_tag() - find command by tag
++ *
++ * Finds a command based on the supplied tag comparing it with one
++ * that previously set by scst_cmd_set_tag(). Returns the found command on
++ * success or NULL otherwise.
++ */
++struct scst_cmd *scst_find_cmd_by_tag(struct scst_session *sess,
++ uint64_t tag)
++{
++ unsigned long flags;
++ struct scst_cmd *cmd;
++ spin_lock_irqsave(&sess->sess_list_lock, flags);
++ cmd = __scst_find_cmd_by_tag(sess, tag, false);
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++ return cmd;
++}
++EXPORT_SYMBOL(scst_find_cmd_by_tag);
+diff -uprN orig/linux-2.6.36/include/scst/scst_debug.h linux-2.6.36/include/scst/scst_debug.h
+--- orig/linux-2.6.36/include/scst/scst_debug.h
++++ linux-2.6.36/include/scst/scst_debug.h
+@@ -0,0 +1,351 @@
++/*
++ * include/scst_debug.h
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * Contains macroses for execution tracing and error reporting
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __SCST_DEBUG_H
++#define __SCST_DEBUG_H
++
++#include <generated/autoconf.h> /* for CONFIG_* */
++
++#include <linux/bug.h> /* for WARN_ON_ONCE */
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++#define EXTRACHECKS_BUG_ON(a) BUG_ON(a)
++#define EXTRACHECKS_WARN_ON(a) WARN_ON(a)
++#define EXTRACHECKS_WARN_ON_ONCE(a) WARN_ON_ONCE(a)
++#else
++#define EXTRACHECKS_BUG_ON(a) do { } while (0)
++#define EXTRACHECKS_WARN_ON(a) do { } while (0)
++#define EXTRACHECKS_WARN_ON_ONCE(a) do { } while (0)
++#endif
++
++#define TRACE_NULL 0x00000000
++#define TRACE_DEBUG 0x00000001
++#define TRACE_FUNCTION 0x00000002
++#define TRACE_LINE 0x00000004
++#define TRACE_PID 0x00000008
++#ifndef GENERATING_UPSTREAM_PATCH
++#define TRACE_ENTRYEXIT 0x00000010
++#endif
++#define TRACE_BUFF 0x00000020
++#define TRACE_MEMORY 0x00000040
++#define TRACE_SG_OP 0x00000080
++#define TRACE_OUT_OF_MEM 0x00000100
++#define TRACE_MINOR 0x00000200 /* less important events */
++#define TRACE_MGMT 0x00000400
++#define TRACE_MGMT_DEBUG 0x00000800
++#define TRACE_SCSI 0x00001000
++#define TRACE_SPECIAL 0x00002000 /* filtering debug, etc */
++#define TRACE_FLOW_CONTROL 0x00004000 /* flow control in action */
++#define TRACE_PRES 0x00008000
++#define TRACE_ALL 0xffffffff
++/* Flags 0xXXXX0000 are local for users */
++
++#define TRACE_MINOR_AND_MGMT_DBG (TRACE_MINOR|TRACE_MGMT_DEBUG)
++
++#ifndef KERN_CONT
++#define KERN_CONT ""
++#endif
++
++/*
++ * Note: in the next two printk() statements the KERN_CONT macro is only
++ * present to suppress a checkpatch warning (KERN_CONT is defined as "").
++ */
++#define PRINT(log_flag, format, args...) \
++ printk(log_flag format "\n", ## args)
++#define PRINTN(log_flag, format, args...) \
++ printk(log_flag format, ## args)
++
++#ifdef LOG_PREFIX
++#define __LOG_PREFIX LOG_PREFIX
++#else
++#define __LOG_PREFIX NULL
++#endif
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++#ifndef CONFIG_SCST_DEBUG
++#define ___unlikely(a) (a)
++#else
++#define ___unlikely(a) unlikely(a)
++#endif
++
++/*
++ * We don't print prefix for debug traces to not put additional preasure
++ * on the logging system in case of a lot of logging.
++ */
++
++int debug_print_prefix(unsigned long trace_flag,
++ const char *prefix, const char *func, int line);
++void debug_print_buffer(const void *data, int len);
++const char *debug_transport_id_to_initiator_name(const uint8_t *transport_id);
++
++#define TRACING_MINOR() (trace_flag & TRACE_MINOR)
++
++#define TRACE(trace, format, args...) \
++do { \
++ if (___unlikely(trace_flag & (trace))) { \
++ debug_print_prefix(trace_flag, __LOG_PREFIX, \
++ __func__, __LINE__); \
++ PRINT(KERN_CONT, format, args); \
++ } \
++} while (0)
++
++#ifdef CONFIG_SCST_DEBUG
++
++#define PRINT_BUFFER(message, buff, len) \
++do { \
++ PRINT(KERN_INFO, "%s:%s:", __func__, message); \
++ debug_print_buffer(buff, len); \
++} while (0)
++
++#else
++
++#define PRINT_BUFFER(message, buff, len) \
++do { \
++ PRINT(KERN_INFO, "%s:", message); \
++ debug_print_buffer(buff, len); \
++} while (0)
++
++#endif
++
++#define PRINT_BUFF_FLAG(flag, message, buff, len) \
++do { \
++ if (___unlikely(trace_flag & (flag))) { \
++ debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
++ PRINT(KERN_CONT, "%s:", message); \
++ debug_print_buffer(buff, len); \
++ } \
++} while (0)
++
++#else /* CONFIG_SCST_DEBUG || CONFIG_SCST_TRACING */
++
++#define TRACING_MINOR() (false)
++
++#define TRACE(trace, args...) do {} while (0)
++#define PRINT_BUFFER(message, buff, len) do {} while (0)
++#define PRINT_BUFF_FLAG(flag, message, buff, len) do {} while (0)
++
++#endif /* CONFIG_SCST_DEBUG || CONFIG_SCST_TRACING */
++
++#ifdef CONFIG_SCST_DEBUG
++
++#define TRACE_DBG_FLAG(trace, format, args...) \
++do { \
++ if (trace_flag & (trace)) { \
++ debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
++ PRINT(KERN_CONT, format, args); \
++ } \
++} while (0)
++
++#define TRACE_MEM(args...) TRACE_DBG_FLAG(TRACE_MEMORY, args)
++#define TRACE_SG(args...) TRACE_DBG_FLAG(TRACE_SG_OP, args)
++#define TRACE_DBG(args...) TRACE_DBG_FLAG(TRACE_DEBUG, args)
++#define TRACE_DBG_SPECIAL(args...) TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_SPECIAL, args)
++#define TRACE_MGMT_DBG(args...) TRACE_DBG_FLAG(TRACE_MGMT_DEBUG, args)
++#define TRACE_MGMT_DBG_SPECIAL(args...) \
++ TRACE_DBG_FLAG(TRACE_MGMT_DEBUG|TRACE_SPECIAL, args)
++#define TRACE_PR(args...) TRACE_DBG_FLAG(TRACE_PRES, args)
++
++#define TRACE_BUFFER(message, buff, len) \
++do { \
++ if (trace_flag & TRACE_BUFF) { \
++ debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
++ PRINT(KERN_CONT, "%s:", message); \
++ debug_print_buffer(buff, len); \
++ } \
++} while (0)
++
++#define TRACE_BUFF_FLAG(flag, message, buff, len) \
++do { \
++ if (trace_flag & (flag)) { \
++ debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
++ PRINT(KERN_CONT, "%s:", message); \
++ debug_print_buffer(buff, len); \
++ } \
++} while (0)
++
++#define PRINT_LOG_FLAG(log_flag, format, args...) \
++do { \
++ debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
++ PRINT(KERN_CONT, format, args); \
++} while (0)
++
++#define PRINT_WARNING(format, args...) \
++do { \
++ debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
++ PRINT(KERN_CONT, "***WARNING***: " format, args); \
++} while (0)
++
++#define PRINT_ERROR(format, args...) \
++do { \
++ debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
++ PRINT(KERN_CONT, "***ERROR***: " format, args); \
++} while (0)
++
++#define PRINT_CRIT_ERROR(format, args...) \
++do { \
++ debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
++ PRINT(KERN_CONT, "***CRITICAL ERROR***: " format, args); \
++} while (0)
++
++#define PRINT_INFO(format, args...) \
++do { \
++ debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
++ PRINT(KERN_CONT, format, args); \
++} while (0)
++
++#ifndef GENERATING_UPSTREAM_PATCH
++#define TRACE_ENTRY() \
++do { \
++ if (trace_flag & TRACE_ENTRYEXIT) { \
++ if (trace_flag & TRACE_PID) { \
++ PRINT(KERN_INFO, "[%d]: ENTRY %s", current->pid, \
++ __func__); \
++ } \
++ else { \
++ PRINT(KERN_INFO, "ENTRY %s", __func__); \
++ } \
++ } \
++} while (0)
++
++#define TRACE_EXIT() \
++do { \
++ if (trace_flag & TRACE_ENTRYEXIT) { \
++ if (trace_flag & TRACE_PID) { \
++ PRINT(KERN_INFO, "[%d]: EXIT %s", current->pid, \
++ __func__); \
++ } \
++ else { \
++ PRINT(KERN_INFO, "EXIT %s", __func__); \
++ } \
++ } \
++} while (0)
++
++#define TRACE_EXIT_RES(res) \
++do { \
++ if (trace_flag & TRACE_ENTRYEXIT) { \
++ if (trace_flag & TRACE_PID) { \
++ PRINT(KERN_INFO, "[%d]: EXIT %s: %ld", current->pid, \
++ __func__, (long)(res)); \
++ } \
++ else { \
++ PRINT(KERN_INFO, "EXIT %s: %ld", \
++ __func__, (long)(res)); \
++ } \
++ } \
++} while (0)
++
++#define TRACE_EXIT_HRES(res) \
++do { \
++ if (trace_flag & TRACE_ENTRYEXIT) { \
++ if (trace_flag & TRACE_PID) { \
++ PRINT(KERN_INFO, "[%d]: EXIT %s: 0x%lx", current->pid, \
++ __func__, (long)(res)); \
++ } \
++ else { \
++ PRINT(KERN_INFO, "EXIT %s: %lx", \
++ __func__, (long)(res)); \
++ } \
++ } \
++} while (0)
++#endif
++
++#else /* CONFIG_SCST_DEBUG */
++
++#define TRACE_MEM(format, args...) do {} while (0)
++#define TRACE_SG(format, args...) do {} while (0)
++#define TRACE_DBG(format, args...) do {} while (0)
++#define TRACE_DBG_FLAG(format, args...) do {} while (0)
++#define TRACE_DBG_SPECIAL(format, args...) do {} while (0)
++#define TRACE_MGMT_DBG(format, args...) do {} while (0)
++#define TRACE_MGMT_DBG_SPECIAL(format, args...) do {} while (0)
++#define TRACE_PR(format, args...) do {} while (0)
++#define TRACE_BUFFER(message, buff, len) do {} while (0)
++#define TRACE_BUFF_FLAG(flag, message, buff, len) do {} while (0)
++
++#ifndef GENERATING_UPSTREAM_PATCH
++#define TRACE_ENTRY() do {} while (0)
++#define TRACE_EXIT() do {} while (0)
++#define TRACE_EXIT_RES(res) do {} while (0)
++#define TRACE_EXIT_HRES(res) do {} while (0)
++#endif
++
++#ifdef LOG_PREFIX
++
++#define PRINT_INFO(format, args...) \
++do { \
++ PRINT(KERN_INFO, "%s: " format, LOG_PREFIX, args); \
++} while (0)
++
++#define PRINT_WARNING(format, args...) \
++do { \
++ PRINT(KERN_INFO, "%s: ***WARNING***: " \
++ format, LOG_PREFIX, args); \
++} while (0)
++
++#define PRINT_ERROR(format, args...) \
++do { \
++ PRINT(KERN_INFO, "%s: ***ERROR***: " \
++ format, LOG_PREFIX, args); \
++} while (0)
++
++#define PRINT_CRIT_ERROR(format, args...) \
++do { \
++ PRINT(KERN_INFO, "%s: ***CRITICAL ERROR***: " \
++ format, LOG_PREFIX, args); \
++} while (0)
++
++#else
++
++#define PRINT_INFO(format, args...) \
++do { \
++ PRINT(KERN_INFO, format, args); \
++} while (0)
++
++#define PRINT_WARNING(format, args...) \
++do { \
++ PRINT(KERN_INFO, "***WARNING***: " \
++ format, args); \
++} while (0)
++
++#define PRINT_ERROR(format, args...) \
++do { \
++ PRINT(KERN_ERR, "***ERROR***: " \
++ format, args); \
++} while (0)
++
++#define PRINT_CRIT_ERROR(format, args...) \
++do { \
++ PRINT(KERN_CRIT, "***CRITICAL ERROR***: " \
++ format, args); \
++} while (0)
++
++#endif /* LOG_PREFIX */
++
++#endif /* CONFIG_SCST_DEBUG */
++
++#if defined(CONFIG_SCST_DEBUG) && defined(CONFIG_DEBUG_SLAB)
++#define SCST_SLAB_FLAGS (SLAB_RED_ZONE | SLAB_POISON)
++#else
++#define SCST_SLAB_FLAGS 0L
++#endif
++
++#endif /* __SCST_DEBUG_H */
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_debug.c linux-2.6.36/drivers/scst/scst_debug.c
+--- orig/linux-2.6.36/drivers/scst/scst_debug.c
++++ linux-2.6.36/drivers/scst/scst_debug.c
+@@ -0,0 +1,224 @@
++/*
++ * scst_debug.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * Contains helper functions for execution tracing and error reporting.
++ * Intended to be included in main .c file.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <scst/scst.h>
++#include <scst/scst_debug.h>
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++#define TRACE_BUF_SIZE 512
++
++static char trace_buf[TRACE_BUF_SIZE];
++static DEFINE_SPINLOCK(trace_buf_lock);
++
++static inline int get_current_tid(void)
++{
++ /* Code should be the same as in sys_gettid() */
++ if (in_interrupt()) {
++ /*
++ * Unfortunately, task_pid_vnr() isn't IRQ-safe, so otherwise
++ * it can oops. ToDo.
++ */
++ return 0;
++ }
++ return task_pid_vnr(current);
++}
++
++/**
++ * debug_print_prefix() - print debug prefix for a log line
++ *
++ * Prints, if requested by trace_flag, debug prefix for each log line
++ */
++int debug_print_prefix(unsigned long trace_flag,
++ const char *prefix, const char *func, int line)
++{
++ int i = 0;
++ unsigned long flags;
++ int pid = get_current_tid();
++
++ spin_lock_irqsave(&trace_buf_lock, flags);
++
++ trace_buf[0] = '\0';
++
++ if (trace_flag & TRACE_PID)
++ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE, "[%d]: ", pid);
++ if (prefix != NULL)
++ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "%s: ",
++ prefix);
++ if (trace_flag & TRACE_FUNCTION)
++ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "%s:", func);
++ if (trace_flag & TRACE_LINE)
++ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "%i:", line);
++
++ PRINTN(KERN_INFO, "%s", trace_buf);
++
++ spin_unlock_irqrestore(&trace_buf_lock, flags);
++
++ return i;
++}
++EXPORT_SYMBOL(debug_print_prefix);
++
++/**
++ * debug_print_buffer() - print a buffer
++ *
++ * Prints in the log data from the buffer
++ */
++void debug_print_buffer(const void *data, int len)
++{
++ int z, z1, i;
++ const unsigned char *buf = (const unsigned char *) data;
++ unsigned long flags;
++
++ if (buf == NULL)
++ return;
++
++ spin_lock_irqsave(&trace_buf_lock, flags);
++
++ PRINT(KERN_INFO, " (h)___0__1__2__3__4__5__6__7__8__9__A__B__C__D__E__F");
++ for (z = 0, z1 = 0, i = 0; z < len; z++) {
++ if (z % 16 == 0) {
++ if (z != 0) {
++ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i,
++ " ");
++ for (; (z1 < z) && (i < TRACE_BUF_SIZE - 1);
++ z1++) {
++ if ((buf[z1] >= 0x20) &&
++ (buf[z1] < 0x80))
++ trace_buf[i++] = buf[z1];
++ else
++ trace_buf[i++] = '.';
++ }
++ trace_buf[i] = '\0';
++ PRINT(KERN_INFO, "%s", trace_buf);
++ i = 0;
++ }
++ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i,
++ "%4x: ", z);
++ }
++ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "%02x ",
++ buf[z]);
++ }
++
++ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, " ");
++ for (; (z1 < z) && (i < TRACE_BUF_SIZE - 1); z1++) {
++ if ((buf[z1] > 0x20) && (buf[z1] < 0x80))
++ trace_buf[i++] = buf[z1];
++ else
++ trace_buf[i++] = '.';
++ }
++ trace_buf[i] = '\0';
++
++ PRINT(KERN_INFO, "%s", trace_buf);
++
++ spin_unlock_irqrestore(&trace_buf_lock, flags);
++ return;
++}
++EXPORT_SYMBOL(debug_print_buffer);
++
++/*
++ * This function converts transport_id in a string form into internal per-CPU
++ * static buffer. This buffer isn't anyhow protected, because it's acceptable
++ * if the name corrupted in the debug logs because of the race for this buffer.
++ *
++ * Note! You can't call this function 2 or more times in a single logging
++ * (printk) statement, because then each new call of this functon will override
++ * data written in this buffer by the previous call. You should instead split
++ * that logging statement on smaller statements each calling
++ * debug_transport_id_to_initiator_name() only once.
++ */
++const char *debug_transport_id_to_initiator_name(const uint8_t *transport_id)
++{
++ /*
++ * No external protection, because it's acceptable if the name
++ * corrupted in the debug logs because of the race for this
++ * buffer.
++ */
++#define SIZEOF_NAME_BUF 256
++ static char name_bufs[NR_CPUS][SIZEOF_NAME_BUF];
++ char *name_buf;
++ unsigned long flags;
++
++ BUG_ON(transport_id == NULL); /* better to catch it not under lock */
++
++ spin_lock_irqsave(&trace_buf_lock, flags);
++
++ name_buf = name_bufs[smp_processor_id()];
++
++ /*
++ * To prevent external racing with us users from accidentally
++ * missing their NULL terminator.
++ */
++ memset(name_buf, 0, SIZEOF_NAME_BUF);
++ smp_mb();
++
++ switch (transport_id[0] & 0x0f) {
++ case SCSI_TRANSPORTID_PROTOCOLID_ISCSI:
++ scnprintf(name_buf, SIZEOF_NAME_BUF, "%s",
++ &transport_id[4]);
++ break;
++ case SCSI_TRANSPORTID_PROTOCOLID_FCP2:
++ scnprintf(name_buf, SIZEOF_NAME_BUF,
++ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
++ transport_id[8], transport_id[9],
++ transport_id[10], transport_id[11],
++ transport_id[12], transport_id[13],
++ transport_id[14], transport_id[15]);
++ break;
++ case SCSI_TRANSPORTID_PROTOCOLID_SPI5:
++ scnprintf(name_buf, SIZEOF_NAME_BUF,
++ "%x:%x", be16_to_cpu((__force __be16)transport_id[2]),
++ be16_to_cpu((__force __be16)transport_id[6]));
++ break;
++ case SCSI_TRANSPORTID_PROTOCOLID_SRP:
++ scnprintf(name_buf, SIZEOF_NAME_BUF,
++ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
++ ":%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
++ transport_id[8], transport_id[9],
++ transport_id[10], transport_id[11],
++ transport_id[12], transport_id[13],
++ transport_id[14], transport_id[15],
++ transport_id[16], transport_id[17],
++ transport_id[18], transport_id[19],
++ transport_id[20], transport_id[21],
++ transport_id[22], transport_id[23]);
++ break;
++ case SCSI_TRANSPORTID_PROTOCOLID_SAS:
++ scnprintf(name_buf, SIZEOF_NAME_BUF,
++ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
++ transport_id[4], transport_id[5],
++ transport_id[6], transport_id[7],
++ transport_id[8], transport_id[9],
++ transport_id[10], transport_id[11]);
++ break;
++ default:
++ scnprintf(name_buf, SIZEOF_NAME_BUF,
++ "(Not known protocol ID %x)", transport_id[0] & 0x0f);
++ break;
++ }
++
++ spin_unlock_irqrestore(&trace_buf_lock, flags);
++
++ return name_buf;
++#undef SIZEOF_NAME_BUF
++}
++
++#endif /* CONFIG_SCST_DEBUG || CONFIG_SCST_TRACING */
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_proc.c linux-2.6.36/drivers/scst/scst_proc.c
+--- orig/linux-2.6.36/drivers/scst/scst_proc.c
++++ linux-2.6.36/drivers/scst/scst_proc.c
+@@ -0,0 +1,2704 @@
++/*
++ * scst_proc.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/unistd.h>
++#include <linux/string.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#include <scst/scst.h>
++#include "scst_priv.h"
++#include "scst_mem.h"
++#include "scst_pres.h"
++
++static int scst_proc_init_groups(void);
++static void scst_proc_cleanup_groups(void);
++static int scst_proc_assign_handler(char *buf);
++static int scst_proc_group_add(const char *p, unsigned int addr_method);
++static int scst_proc_del_free_acg(struct scst_acg *acg, int remove_proc);
++
++static struct scst_proc_data scst_version_proc_data;
++static struct scst_proc_data scst_help_proc_data;
++static struct scst_proc_data scst_sgv_proc_data;
++static struct scst_proc_data scst_groups_names_proc_data;
++static struct scst_proc_data scst_groups_devices_proc_data;
++static struct scst_proc_data scst_groups_addr_method_proc_data;
++static struct scst_proc_data scst_sessions_proc_data;
++static struct scst_proc_data scst_dev_handler_type_proc_data;
++static struct scst_proc_data scst_tgt_proc_data;
++static struct scst_proc_data scst_threads_proc_data;
++static struct scst_proc_data scst_scsi_tgt_proc_data;
++static struct scst_proc_data scst_dev_handler_proc_data;
++
++/*
++ * Must be less than 4K page size, since our output routines
++ * use some slack for overruns
++ */
++#define SCST_PROC_BLOCK_SIZE (PAGE_SIZE - 512)
++
++#define SCST_PROC_LOG_ENTRY_NAME "trace_level"
++#define SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME "type"
++#define SCST_PROC_VERSION_NAME "version"
++#define SCST_PROC_SESSIONS_NAME "sessions"
++#define SCST_PROC_HELP_NAME "help"
++#define SCST_PROC_THREADS_NAME "threads"
++#define SCST_PROC_GROUPS_ENTRY_NAME "groups"
++#define SCST_PROC_GROUPS_DEVICES_ENTRY_NAME "devices"
++#define SCST_PROC_GROUPS_USERS_ENTRY_NAME "names"
++#define SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME "addr_method"
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++#define SCST_PROC_LAT_ENTRY_NAME "latency"
++#endif
++
++#define SCST_PROC_ACTION_ALL 1
++#define SCST_PROC_ACTION_NONE 2
++#define SCST_PROC_ACTION_DEFAULT 3
++#define SCST_PROC_ACTION_ADD 4
++#define SCST_PROC_ACTION_CLEAR 5
++#define SCST_PROC_ACTION_MOVE 6
++#define SCST_PROC_ACTION_DEL 7
++#define SCST_PROC_ACTION_REPLACE 8
++#define SCST_PROC_ACTION_VALUE 9
++#define SCST_PROC_ACTION_ASSIGN 10
++#define SCST_PROC_ACTION_ADD_GROUP 11
++#define SCST_PROC_ACTION_DEL_GROUP 12
++#define SCST_PROC_ACTION_RENAME_GROUP 13
++#define SCST_PROC_ACTION_DUMP_PRS 14
++
++static struct proc_dir_entry *scst_proc_scsi_tgt;
++static struct proc_dir_entry *scst_proc_groups_root;
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++static struct scst_proc_data scst_log_proc_data;
++
++static struct scst_trace_log scst_proc_trace_tbl[] = {
++ { TRACE_OUT_OF_MEM, "out_of_mem" },
++ { TRACE_MINOR, "minor" },
++ { TRACE_SG_OP, "sg" },
++ { TRACE_MEMORY, "mem" },
++ { TRACE_BUFF, "buff" },
++#ifndef GENERATING_UPSTREAM_PATCH
++ { TRACE_ENTRYEXIT, "entryexit" },
++#endif
++ { TRACE_PID, "pid" },
++ { TRACE_LINE, "line" },
++ { TRACE_FUNCTION, "function" },
++ { TRACE_DEBUG, "debug" },
++ { TRACE_SPECIAL, "special" },
++ { TRACE_SCSI, "scsi" },
++ { TRACE_MGMT, "mgmt" },
++ { TRACE_MGMT_DEBUG, "mgmt_dbg" },
++ { TRACE_FLOW_CONTROL, "flow_control" },
++ { TRACE_PRES, "pr" },
++ { 0, NULL }
++};
++
++static struct scst_trace_log scst_proc_local_trace_tbl[] = {
++ { TRACE_RTRY, "retry" },
++ { TRACE_SCSI_SERIALIZING, "scsi_serializing" },
++ { TRACE_RCV_BOT, "recv_bot" },
++ { TRACE_SND_BOT, "send_bot" },
++ { TRACE_RCV_TOP, "recv_top" },
++ { TRACE_SND_TOP, "send_top" },
++ { 0, NULL }
++};
++#endif
++
++static char *scst_proc_help_string =
++" echo \"assign H:C:I:L HANDLER_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
++"\n"
++" echo \"add_group GROUP_NAME [FLAT]\" >/proc/scsi_tgt/scsi_tgt\n"
++" echo \"del_group GROUP_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
++" echo \"rename_group OLD_NAME NEW_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
++"\n"
++" echo \"add|del H:C:I:L lun [READ_ONLY]\""
++" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
++" echo \"replace H:C:I:L lun [READ_ONLY]\""
++" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
++" echo \"add|del V_NAME lun [READ_ONLY]\""
++" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
++" echo \"replace V_NAME lun [READ_ONLY]\""
++" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
++" echo \"clear\" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
++"\n"
++" echo \"add|del NAME\" >/proc/scsi_tgt/groups/GROUP_NAME/names\n"
++" echo \"move NAME NEW_GROUP_NAME\" >/proc/scsi_tgt/groups/OLD_GROUP_NAME/names\n"
++" echo \"clear\" >/proc/scsi_tgt/groups/GROUP_NAME/names\n"
++"\n"
++" echo \"DEC|0xHEX|0OCT\" >/proc/scsi_tgt/threads\n"
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++"\n"
++" echo \"all|none|default\" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
++" echo \"value DEC|0xHEX|0OCT\""
++" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
++" echo \"set|add|del TOKEN\""
++" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
++" where TOKEN is one of [debug, function, line, pid, entryexit,\n"
++" buff, mem, sg, out_of_mem, special, scsi,\n"
++" mgmt, minor, mgmt_dbg]\n"
++" Additionally for /proc/scsi_tgt/trace_level there are these TOKENs\n"
++" [scsi_serializing, retry, recv_bot, send_bot, recv_top, send_top]\n"
++" echo \"dump_prs dev_name\" >/proc/scsi_tgt/trace_level\n"
++#endif
++;
++
++static char *scst_proc_dev_handler_type[] = {
++ "Direct-access device (e.g., magnetic disk)",
++ "Sequential-access device (e.g., magnetic tape)",
++ "Printer device",
++ "Processor device",
++ "Write-once device (e.g., some optical disks)",
++ "CD-ROM device",
++ "Scanner device (obsolete)",
++ "Optical memory device (e.g., some optical disks)",
++ "Medium changer device (e.g., jukeboxes)",
++ "Communications device (obsolete)",
++ "Defined by ASC IT8 (Graphic arts pre-press devices)",
++ "Defined by ASC IT8 (Graphic arts pre-press devices)",
++ "Storage array controller device (e.g., RAID)",
++ "Enclosure services device",
++ "Simplified direct-access device (e.g., magnetic disk)",
++ "Optical card reader/writer device"
++};
++
++static DEFINE_MUTEX(scst_proc_mutex);
++
++#include <linux/ctype.h>
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++static DEFINE_MUTEX(scst_log_mutex);
++
++int scst_proc_log_entry_write(struct file *file, const char __user *buf,
++ unsigned long length, unsigned long *log_level,
++ unsigned long default_level, const struct scst_trace_log *tbl)
++{
++ int res = length;
++ int action;
++ unsigned long level = 0, oldlevel;
++ char *buffer, *p, *e;
++ const struct scst_trace_log *t;
++ char *data = (char *)PDE(file->f_dentry->d_inode)->data;
++
++ TRACE_ENTRY();
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ /*
++ * Usage:
++ * echo "all|none|default" >/proc/scsi_tgt/trace_level
++ * echo "value DEC|0xHEX|0OCT" >/proc/scsi_tgt/trace_level
++ * echo "add|del TOKEN" >/proc/scsi_tgt/trace_level
++ */
++ p = buffer;
++ if (!strncasecmp("all", p, 3)) {
++ action = SCST_PROC_ACTION_ALL;
++ } else if (!strncasecmp("none", p, 4) || !strncasecmp("null", p, 4)) {
++ action = SCST_PROC_ACTION_NONE;
++ } else if (!strncasecmp("default", p, 7)) {
++ action = SCST_PROC_ACTION_DEFAULT;
++ } else if (!strncasecmp("add ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_ADD;
++ } else if (!strncasecmp("del ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_DEL;
++ } else if (!strncasecmp("value ", p, 6)) {
++ p += 6;
++ action = SCST_PROC_ACTION_VALUE;
++ } else if (!strncasecmp("dump_prs ", p, 9)) {
++ p += 9;
++ action = SCST_PROC_ACTION_DUMP_PRS;
++ } else {
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ switch (action) {
++ case SCST_PROC_ACTION_ALL:
++ level = TRACE_ALL;
++ break;
++ case SCST_PROC_ACTION_DEFAULT:
++ level = default_level;
++ break;
++ case SCST_PROC_ACTION_NONE:
++ level = TRACE_NULL;
++ break;
++ case SCST_PROC_ACTION_ADD:
++ case SCST_PROC_ACTION_DEL:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = 0;
++ if (tbl) {
++ t = tbl;
++ while (t->token) {
++ if (!strcasecmp(p, t->token)) {
++ level = t->val;
++ break;
++ }
++ t++;
++ }
++ }
++ if (level == 0) {
++ t = scst_proc_trace_tbl;
++ while (t->token) {
++ if (!strcasecmp(p, t->token)) {
++ level = t->val;
++ break;
++ }
++ t++;
++ }
++ }
++ if (level == 0) {
++ PRINT_ERROR("Unknown token \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case SCST_PROC_ACTION_VALUE:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ level = simple_strtoul(p, NULL, 0);
++ break;
++ case SCST_PROC_ACTION_DUMP_PRS:
++ {
++ struct scst_device *dev;
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = '\0';
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_free;
++ }
++
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ if (strcmp(dev->virt_name, p) == 0) {
++ scst_pr_dump_prs(dev, true);
++ goto out_up;
++ }
++ }
++
++ PRINT_ERROR("Device %s not found", p);
++ res = -ENOENT;
++out_up:
++ mutex_unlock(&scst_mutex);
++ goto out_free;
++ }
++ }
++
++ oldlevel = *log_level;
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ *log_level |= level;
++ break;
++ case SCST_PROC_ACTION_DEL:
++ *log_level &= ~level;
++ break;
++ default:
++ *log_level = level;
++ break;
++ }
++
++ PRINT_INFO("Changed trace level for \"%s\": "
++ "old 0x%08lx, new 0x%08lx",
++ (char *)data, oldlevel, *log_level);
++
++out_free:
++ free_page((unsigned long)buffer);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_proc_log_entry_write);
++
++static ssize_t scst_proc_scsi_tgt_gen_write_log(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = scst_proc_log_entry_write(file, buf, length,
++ &trace_flag, SCST_DEFAULT_LOG_FLAGS,
++ scst_proc_local_trace_tbl);
++
++ mutex_unlock(&scst_log_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++
++static char *scst_io_size_names[] = {
++ "<=8K ",
++ "<=32K ",
++ "<=128K",
++ "<=512K",
++ ">512K "
++};
++
++static int lat_info_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_acg *acg;
++ struct scst_session *sess;
++ char buf[50];
++
++ TRACE_ENTRY();
++
++ BUILD_BUG_ON(SCST_LATENCY_STATS_NUM != ARRAY_SIZE(scst_io_size_names));
++ BUILD_BUG_ON(SCST_LATENCY_STATS_NUM != ARRAY_SIZE(sess->sess_latency_stat));
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
++ bool header_printed = false;
++
++ list_for_each_entry(sess, &acg->acg_sess_list,
++ acg_sess_list_entry) {
++ unsigned int i;
++ int t;
++ uint64_t scst_time, tgt_time, dev_time;
++ unsigned int processed_cmds;
++
++ if (!header_printed) {
++ seq_printf(seq, "%-15s %-15s %-46s %-46s %-46s\n",
++ "T-L names", "Total commands", "SCST latency",
++ "Target latency", "Dev latency (min/avg/max/all ns)");
++ header_printed = true;
++ }
++
++ seq_printf(seq, "Target name: %s\nInitiator name: %s\n",
++ sess->tgt->tgtt->name,
++ sess->initiator_name);
++
++ spin_lock_bh(&sess->lat_lock);
++
++ for (i = 0; i < SCST_LATENCY_STATS_NUM ; i++) {
++ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
++ unsigned int processed_cmds_wr;
++ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
++ unsigned int processed_cmds_rd;
++ struct scst_ext_latency_stat *latency_stat;
++
++ latency_stat = &sess->sess_latency_stat[i];
++ scst_time_wr = latency_stat->scst_time_wr;
++ scst_time_rd = latency_stat->scst_time_rd;
++ tgt_time_wr = latency_stat->tgt_time_wr;
++ tgt_time_rd = latency_stat->tgt_time_rd;
++ dev_time_wr = latency_stat->dev_time_wr;
++ dev_time_rd = latency_stat->dev_time_rd;
++ processed_cmds_wr = latency_stat->processed_cmds_wr;
++ processed_cmds_rd = latency_stat->processed_cmds_rd;
++
++ seq_printf(seq, "%-5s %-9s %-15lu ",
++ "Write", scst_io_size_names[i],
++ (unsigned long)processed_cmds_wr);
++ if (processed_cmds_wr == 0)
++ processed_cmds_wr = 1;
++
++ do_div(scst_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_wr,
++ (unsigned long)scst_time_wr,
++ (unsigned long)latency_stat->max_scst_time_wr,
++ (unsigned long)latency_stat->scst_time_wr);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(tgt_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_wr,
++ (unsigned long)tgt_time_wr,
++ (unsigned long)latency_stat->max_tgt_time_wr,
++ (unsigned long)latency_stat->tgt_time_wr);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(dev_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_wr,
++ (unsigned long)dev_time_wr,
++ (unsigned long)latency_stat->max_dev_time_wr,
++ (unsigned long)latency_stat->dev_time_wr);
++ seq_printf(seq, "%-47s\n", buf);
++
++ seq_printf(seq, "%-5s %-9s %-15lu ",
++ "Read", scst_io_size_names[i],
++ (unsigned long)processed_cmds_rd);
++ if (processed_cmds_rd == 0)
++ processed_cmds_rd = 1;
++
++ do_div(scst_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_rd,
++ (unsigned long)scst_time_rd,
++ (unsigned long)latency_stat->max_scst_time_rd,
++ (unsigned long)latency_stat->scst_time_rd);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(tgt_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_rd,
++ (unsigned long)tgt_time_rd,
++ (unsigned long)latency_stat->max_tgt_time_rd,
++ (unsigned long)latency_stat->tgt_time_rd);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(dev_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_rd,
++ (unsigned long)dev_time_rd,
++ (unsigned long)latency_stat->max_dev_time_rd,
++ (unsigned long)latency_stat->dev_time_rd);
++ seq_printf(seq, "%-47s\n", buf);
++ }
++
++ for (t = TGT_DEV_HASH_SIZE-1; t >= 0; t--) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[t];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++
++ seq_printf(seq, "\nLUN: %llu\n", tgt_dev->lun);
++
++ for (i = 0; i < SCST_LATENCY_STATS_NUM ; i++) {
++ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
++ unsigned int processed_cmds_wr;
++ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
++ unsigned int processed_cmds_rd;
++ struct scst_ext_latency_stat *latency_stat;
++
++ latency_stat = &tgt_dev->dev_latency_stat[i];
++ scst_time_wr = latency_stat->scst_time_wr;
++ scst_time_rd = latency_stat->scst_time_rd;
++ tgt_time_wr = latency_stat->tgt_time_wr;
++ tgt_time_rd = latency_stat->tgt_time_rd;
++ dev_time_wr = latency_stat->dev_time_wr;
++ dev_time_rd = latency_stat->dev_time_rd;
++ processed_cmds_wr = latency_stat->processed_cmds_wr;
++ processed_cmds_rd = latency_stat->processed_cmds_rd;
++
++ seq_printf(seq, "%-5s %-9s %-15lu ",
++ "Write", scst_io_size_names[i],
++ (unsigned long)processed_cmds_wr);
++ if (processed_cmds_wr == 0)
++ processed_cmds_wr = 1;
++
++ do_div(scst_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_wr,
++ (unsigned long)scst_time_wr,
++ (unsigned long)latency_stat->max_scst_time_wr,
++ (unsigned long)latency_stat->scst_time_wr);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(tgt_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_wr,
++ (unsigned long)tgt_time_wr,
++ (unsigned long)latency_stat->max_tgt_time_wr,
++ (unsigned long)latency_stat->tgt_time_wr);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(dev_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_wr,
++ (unsigned long)dev_time_wr,
++ (unsigned long)latency_stat->max_dev_time_wr,
++ (unsigned long)latency_stat->dev_time_wr);
++ seq_printf(seq, "%-47s\n", buf);
++
++ seq_printf(seq, "%-5s %-9s %-15lu ",
++ "Read", scst_io_size_names[i],
++ (unsigned long)processed_cmds_rd);
++ if (processed_cmds_rd == 0)
++ processed_cmds_rd = 1;
++
++ do_div(scst_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_rd,
++ (unsigned long)scst_time_rd,
++ (unsigned long)latency_stat->max_scst_time_rd,
++ (unsigned long)latency_stat->scst_time_rd);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(tgt_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_rd,
++ (unsigned long)tgt_time_rd,
++ (unsigned long)latency_stat->max_tgt_time_rd,
++ (unsigned long)latency_stat->tgt_time_rd);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(dev_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_rd,
++ (unsigned long)dev_time_rd,
++ (unsigned long)latency_stat->max_dev_time_rd,
++ (unsigned long)latency_stat->dev_time_rd);
++ seq_printf(seq, "%-47s\n", buf);
++ }
++ }
++ }
++
++ scst_time = sess->scst_time;
++ tgt_time = sess->tgt_time;
++ dev_time = sess->dev_time;
++ processed_cmds = sess->processed_cmds;
++
++ seq_printf(seq, "\n%-15s %-16d", "Overall ",
++ processed_cmds);
++
++ if (processed_cmds == 0)
++ processed_cmds = 1;
++
++ do_div(scst_time, processed_cmds);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)sess->min_scst_time,
++ (unsigned long)scst_time,
++ (unsigned long)sess->max_scst_time,
++ (unsigned long)sess->scst_time);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(tgt_time, processed_cmds);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)sess->min_tgt_time,
++ (unsigned long)tgt_time,
++ (unsigned long)sess->max_tgt_time,
++ (unsigned long)sess->tgt_time);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(dev_time, processed_cmds);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)sess->min_dev_time,
++ (unsigned long)dev_time,
++ (unsigned long)sess->max_dev_time,
++ (unsigned long)sess->dev_time);
++ seq_printf(seq, "%-47s\n\n", buf);
++
++ spin_unlock_bh(&sess->lat_lock);
++ }
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_proc_scsi_tgt_gen_write_lat(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res = length, t;
++ struct scst_acg *acg;
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
++ list_for_each_entry(sess, &acg->acg_sess_list,
++ acg_sess_list_entry) {
++ PRINT_INFO("Zeroing latency statistics for initiator "
++ "%s", sess->initiator_name);
++ spin_lock_bh(&sess->lat_lock);
++
++ sess->scst_time = 0;
++ sess->tgt_time = 0;
++ sess->dev_time = 0;
++ sess->min_scst_time = 0;
++ sess->min_tgt_time = 0;
++ sess->min_dev_time = 0;
++ sess->max_scst_time = 0;
++ sess->max_tgt_time = 0;
++ sess->max_dev_time = 0;
++ sess->processed_cmds = 0;
++ memset(sess->sess_latency_stat, 0,
++ sizeof(sess->sess_latency_stat));
++
++ for (t = TGT_DEV_HASH_SIZE-1; t >= 0; t--) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[t];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ tgt_dev->scst_time = 0;
++ tgt_dev->tgt_time = 0;
++ tgt_dev->dev_time = 0;
++ tgt_dev->processed_cmds = 0;
++ memset(tgt_dev->dev_latency_stat, 0,
++ sizeof(tgt_dev->dev_latency_stat));
++ }
++ }
++
++ spin_unlock_bh(&sess->lat_lock);
++ }
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_lat_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write_lat)
++ .show = lat_info_show,
++ .data = "scsi_tgt",
++};
++
++#endif /* CONFIG_SCST_MEASURE_LATENCY */
++
++static int __init scst_proc_init_module_log(void)
++{
++ int res = 0;
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) || \
++ defined(CONFIG_SCST_MEASURE_LATENCY)
++ struct proc_dir_entry *generic;
++#endif
++
++ TRACE_ENTRY();
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_LOG_ENTRY_NAME,
++ &scst_log_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_LOG_ENTRY_NAME);
++ res = -ENOMEM;
++ }
++#endif
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++ if (res == 0) {
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_LAT_ENTRY_NAME,
++ &scst_lat_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME,
++ SCST_PROC_LAT_ENTRY_NAME);
++ res = -ENOMEM;
++ }
++ }
++#endif
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void scst_proc_cleanup_module_log(void)
++{
++ TRACE_ENTRY();
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ remove_proc_entry(SCST_PROC_LOG_ENTRY_NAME, scst_proc_scsi_tgt);
++#endif
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++ remove_proc_entry(SCST_PROC_LAT_ENTRY_NAME, scst_proc_scsi_tgt);
++#endif
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_proc_group_add_tree(struct scst_acg *acg, const char *name)
++{
++ int res = 0;
++ struct proc_dir_entry *generic;
++
++ TRACE_ENTRY();
++
++ acg->acg_proc_root = proc_mkdir(name, scst_proc_groups_root);
++ if (acg->acg_proc_root == NULL) {
++ PRINT_ERROR("Not enough memory to register %s entry in "
++ "/proc/%s/%s", name, SCST_PROC_ENTRY_NAME,
++ SCST_PROC_GROUPS_ENTRY_NAME);
++ goto out;
++ }
++
++ scst_groups_addr_method_proc_data.data = acg;
++ generic = scst_create_proc_entry(acg->acg_proc_root,
++ SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME,
++ &scst_groups_addr_method_proc_data);
++ if (!generic) {
++ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
++ SCST_PROC_ENTRY_NAME,
++ SCST_PROC_GROUPS_ENTRY_NAME,
++ name, SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME);
++ res = -ENOMEM;
++ goto out_remove;
++ }
++
++ scst_groups_devices_proc_data.data = acg;
++ generic = scst_create_proc_entry(acg->acg_proc_root,
++ SCST_PROC_GROUPS_DEVICES_ENTRY_NAME,
++ &scst_groups_devices_proc_data);
++ if (!generic) {
++ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
++ SCST_PROC_ENTRY_NAME,
++ SCST_PROC_GROUPS_ENTRY_NAME,
++ name, SCST_PROC_GROUPS_DEVICES_ENTRY_NAME);
++ res = -ENOMEM;
++ goto out_remove0;
++ }
++
++ scst_groups_names_proc_data.data = acg;
++ generic = scst_create_proc_entry(acg->acg_proc_root,
++ SCST_PROC_GROUPS_USERS_ENTRY_NAME,
++ &scst_groups_names_proc_data);
++ if (!generic) {
++ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
++ SCST_PROC_ENTRY_NAME,
++ SCST_PROC_GROUPS_ENTRY_NAME,
++ name, SCST_PROC_GROUPS_USERS_ENTRY_NAME);
++ res = -ENOMEM;
++ goto out_remove1;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_remove1:
++ remove_proc_entry(SCST_PROC_GROUPS_DEVICES_ENTRY_NAME,
++ acg->acg_proc_root);
++
++out_remove0:
++ remove_proc_entry(SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME,
++ acg->acg_proc_root);
++out_remove:
++ remove_proc_entry(name, scst_proc_groups_root);
++ goto out;
++}
++
++static void scst_proc_del_acg_tree(struct proc_dir_entry *acg_proc_root,
++ const char *name)
++{
++ TRACE_ENTRY();
++
++ remove_proc_entry(SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME, acg_proc_root);
++ remove_proc_entry(SCST_PROC_GROUPS_USERS_ENTRY_NAME, acg_proc_root);
++ remove_proc_entry(SCST_PROC_GROUPS_DEVICES_ENTRY_NAME, acg_proc_root);
++ remove_proc_entry(name, scst_proc_groups_root);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++static int scst_proc_group_add(const char *p, unsigned int addr_method)
++{
++ int res = 0, len = strlen(p) + 1;
++ struct scst_acg *acg;
++ char *name = NULL;
++
++ TRACE_ENTRY();
++
++ name = kmalloc(len, GFP_KERNEL);
++ if (name == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of name failed");
++ goto out_nomem;
++ }
++ strlcpy(name, p, len);
++
++ acg = scst_alloc_add_acg(NULL, name, false);
++ if (acg == NULL) {
++ PRINT_ERROR("scst_alloc_add_acg() (name %s) failed", name);
++ goto out_free;
++ }
++
++ acg->addr_method = addr_method;
++
++ res = scst_proc_group_add_tree(acg, p);
++ if (res != 0)
++ goto out_free_acg;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free_acg:
++ scst_proc_del_free_acg(acg, 0);
++
++out_free:
++ kfree(name);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++static int scst_proc_del_free_acg(struct scst_acg *acg, int remove_proc)
++{
++ struct proc_dir_entry *acg_proc_root = acg->acg_proc_root;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (acg != scst_default_acg) {
++ if (!scst_acg_sess_is_empty(acg)) {
++ PRINT_ERROR("%s", "Session is not empty");
++ res = -EBUSY;
++ goto out;
++ }
++ if (remove_proc)
++ scst_proc_del_acg_tree(acg_proc_root, acg->acg_name);
++ scst_del_free_acg(acg);
++ }
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++static int scst_proc_rename_acg(struct scst_acg *acg, const char *new_name)
++{
++ int res = 0, len = strlen(new_name) + 1;
++ char *name;
++ struct proc_dir_entry *old_acg_proc_root = acg->acg_proc_root;
++
++ TRACE_ENTRY();
++
++ name = kmalloc(len, GFP_KERNEL);
++ if (name == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of new name failed");
++ goto out_nomem;
++ }
++ strlcpy(name, new_name, len);
++
++ res = scst_proc_group_add_tree(acg, new_name);
++ if (res != 0)
++ goto out_free;
++
++ scst_proc_del_acg_tree(old_acg_proc_root, acg->acg_name);
++
++ kfree(acg->acg_name);
++ acg->acg_name = name;
++
++ scst_check_reassign_sessions();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(name);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++static int __init scst_proc_init_groups(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /* create the proc directory entry for the device */
++ scst_proc_groups_root = proc_mkdir(SCST_PROC_GROUPS_ENTRY_NAME,
++ scst_proc_scsi_tgt);
++ if (scst_proc_groups_root == NULL) {
++ PRINT_ERROR("Not enough memory to register %s entry in "
++ "/proc/%s", SCST_PROC_GROUPS_ENTRY_NAME,
++ SCST_PROC_ENTRY_NAME);
++ goto out_nomem;
++ }
++
++ res = scst_proc_group_add_tree(scst_default_acg,
++ SCST_DEFAULT_ACG_NAME);
++ if (res != 0)
++ goto out_remove;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_remove:
++ remove_proc_entry(SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++static void scst_proc_cleanup_groups(void)
++{
++ struct scst_acg *acg_tmp, *acg;
++
++ TRACE_ENTRY();
++
++ /* remove all groups (dir & entries) */
++ list_for_each_entry_safe(acg, acg_tmp, &scst_acg_list,
++ acg_list_entry) {
++ scst_proc_del_free_acg(acg, 1);
++ }
++
++ scst_proc_del_acg_tree(scst_default_acg->acg_proc_root,
++ SCST_DEFAULT_ACG_NAME);
++ TRACE_DBG("remove_proc_entry(%s, %p)",
++ SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
++
++ TRACE_EXIT();
++}
++
++static int __init scst_proc_init_sgv(void)
++{
++ int res = 0;
++ struct proc_dir_entry *pr;
++
++ TRACE_ENTRY();
++
++ pr = scst_create_proc_entry(scst_proc_scsi_tgt, "sgv",
++ &scst_sgv_proc_data);
++ if (pr == NULL) {
++ PRINT_ERROR("%s", "cannot create sgv /proc entry");
++ res = -ENOMEM;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void __exit scst_proc_cleanup_sgv(void)
++{
++ TRACE_ENTRY();
++ remove_proc_entry("sgv", scst_proc_scsi_tgt);
++ TRACE_EXIT();
++}
++
++int __init scst_proc_init_module(void)
++{
++ int res = 0;
++ struct proc_dir_entry *generic;
++
++ TRACE_ENTRY();
++
++ scst_proc_scsi_tgt = proc_mkdir(SCST_PROC_ENTRY_NAME, NULL);
++ if (!scst_proc_scsi_tgt) {
++ PRINT_ERROR("cannot init /proc/%s", SCST_PROC_ENTRY_NAME);
++ goto out_nomem;
++ }
++
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_ENTRY_NAME,
++ &scst_tgt_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_ENTRY_NAME);
++ goto out_remove;
++ }
++
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_VERSION_NAME,
++ &scst_version_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_VERSION_NAME);
++ goto out_remove1;
++ }
++
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_SESSIONS_NAME,
++ &scst_sessions_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_SESSIONS_NAME);
++ goto out_remove2;
++ }
++
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_HELP_NAME,
++ &scst_help_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_HELP_NAME);
++ goto out_remove3;
++ }
++
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_THREADS_NAME,
++ &scst_threads_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_THREADS_NAME);
++ goto out_remove4;
++ }
++
++ if (scst_proc_init_module_log() < 0)
++ goto out_remove5;
++
++ if (scst_proc_init_groups() < 0)
++ goto out_remove6;
++
++ if (scst_proc_init_sgv() < 0)
++ goto out_remove7;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_remove7:
++ scst_proc_cleanup_groups();
++
++out_remove6:
++ scst_proc_cleanup_module_log();
++
++out_remove5:
++ remove_proc_entry(SCST_PROC_THREADS_NAME, scst_proc_scsi_tgt);
++
++out_remove4:
++ remove_proc_entry(SCST_PROC_HELP_NAME, scst_proc_scsi_tgt);
++
++out_remove3:
++ remove_proc_entry(SCST_PROC_SESSIONS_NAME, scst_proc_scsi_tgt);
++
++out_remove2:
++ remove_proc_entry(SCST_PROC_VERSION_NAME, scst_proc_scsi_tgt);
++
++out_remove1:
++ remove_proc_entry(SCST_PROC_ENTRY_NAME, scst_proc_scsi_tgt);
++
++out_remove:
++ remove_proc_entry(SCST_PROC_ENTRY_NAME, NULL);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++void __exit scst_proc_cleanup_module(void)
++{
++ TRACE_ENTRY();
++
++ /* We may not bother about locks here */
++ scst_proc_cleanup_sgv();
++ scst_proc_cleanup_groups();
++ scst_proc_cleanup_module_log();
++ remove_proc_entry(SCST_PROC_THREADS_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_HELP_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_SESSIONS_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_VERSION_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_ENTRY_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_ENTRY_NAME, NULL);
++
++ TRACE_EXIT();
++}
++
++static ssize_t scst_proc_threads_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res = length;
++ int oldtn, newtn, delta;
++ char *buffer;
++
++ TRACE_ENTRY();
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
++ res = -EINTR;
++ goto out_free;
++ }
++
++ mutex_lock(&scst_mutex);
++
++ oldtn = scst_main_cmd_threads.nr_threads;
++ newtn = simple_strtoul(buffer, NULL, 0);
++ if (newtn <= 0) {
++ PRINT_ERROR("Illegal threads num value %d", newtn);
++ res = -EINVAL;
++ goto out_up_thr_free;
++ }
++ delta = newtn - oldtn;
++ if (delta < 0)
++ scst_del_threads(&scst_main_cmd_threads, -delta);
++ else {
++ int rc = scst_add_threads(&scst_main_cmd_threads, NULL, NULL,
++ delta);
++ if (rc != 0)
++ res = rc;
++ }
++
++ PRINT_INFO("Changed cmd threads num: old %d, new %d", oldtn, newtn);
++
++out_up_thr_free:
++ mutex_unlock(&scst_mutex);
++
++ mutex_unlock(&scst_proc_mutex);
++
++out_free:
++ free_page((unsigned long)buffer);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++int scst_build_proc_target_dir_entries(struct scst_tgt_template *vtt)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /* create the proc directory entry for the device */
++ vtt->proc_tgt_root = proc_mkdir(vtt->name, scst_proc_scsi_tgt);
++ if (vtt->proc_tgt_root == NULL) {
++ PRINT_ERROR("Not enough memory to register SCSI target %s "
++ "in /proc/%s", vtt->name, SCST_PROC_ENTRY_NAME);
++ goto out_nomem;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++void scst_cleanup_proc_target_dir_entries(struct scst_tgt_template *vtt)
++{
++ TRACE_ENTRY();
++
++ remove_proc_entry(vtt->name, scst_proc_scsi_tgt);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called under scst_mutex */
++int scst_build_proc_target_entries(struct scst_tgt *vtt)
++{
++ int res = 0;
++ struct proc_dir_entry *p;
++ char name[20];
++
++ TRACE_ENTRY();
++
++ if (vtt->tgtt->read_proc || vtt->tgtt->write_proc) {
++ /* create the proc file entry for the device */
++ scnprintf(name, sizeof(name), "%d", vtt->tgtt->proc_dev_num);
++ scst_scsi_tgt_proc_data.data = (void *)vtt;
++ p = scst_create_proc_entry(vtt->tgtt->proc_tgt_root,
++ name,
++ &scst_scsi_tgt_proc_data);
++ if (p == NULL) {
++ PRINT_ERROR("Not enough memory to register SCSI "
++ "target entry %s in /proc/%s/%s", name,
++ SCST_PROC_ENTRY_NAME, vtt->tgtt->name);
++ res = -ENOMEM;
++ goto out;
++ }
++ vtt->proc_num = vtt->tgtt->proc_dev_num;
++ vtt->tgtt->proc_dev_num++;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++void scst_cleanup_proc_target_entries(struct scst_tgt *vtt)
++{
++ char name[20];
++
++ TRACE_ENTRY();
++
++ if (vtt->tgtt->read_proc || vtt->tgtt->write_proc) {
++ scnprintf(name, sizeof(name), "%d", vtt->proc_num);
++ remove_proc_entry(name, vtt->tgtt->proc_tgt_root);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static ssize_t scst_proc_scsi_tgt_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ struct scst_tgt *vtt =
++ (struct scst_tgt *)PDE(file->f_dentry->d_inode)->data;
++ ssize_t res = 0;
++ char *buffer;
++ char *start;
++ int eof = 0;
++
++ TRACE_ENTRY();
++
++ if (vtt->tgtt->write_proc == NULL) {
++ res = -ENOSYS;
++ goto out;
++ }
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ TRACE_BUFFER("Buffer", buffer, length);
++
++ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
++ res = -EINTR;
++ goto out_free;
++ }
++
++ res = vtt->tgtt->write_proc(buffer, &start, 0, length, &eof, vtt);
++
++ mutex_unlock(&scst_proc_mutex);
++
++out_free:
++ free_page((unsigned long)buffer);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++int scst_build_proc_dev_handler_dir_entries(struct scst_dev_type *dev_type)
++{
++ int res = 0;
++ struct proc_dir_entry *p;
++ const char *name; /* workaround to keep /proc ABI intact */
++
++ TRACE_ENTRY();
++
++ BUG_ON(dev_type->proc_dev_type_root);
++
++ if (strcmp(dev_type->name, "vdisk_fileio") == 0)
++ name = "vdisk";
++ else
++ name = dev_type->name;
++
++ /* create the proc directory entry for the dev type handler */
++ dev_type->proc_dev_type_root = proc_mkdir(name,
++ scst_proc_scsi_tgt);
++ if (dev_type->proc_dev_type_root == NULL) {
++ PRINT_ERROR("Not enough memory to register dev handler dir "
++ "%s in /proc/%s", name, SCST_PROC_ENTRY_NAME);
++ goto out_nomem;
++ }
++
++ scst_dev_handler_type_proc_data.data = dev_type;
++ if (dev_type->type >= 0) {
++ p = scst_create_proc_entry(dev_type->proc_dev_type_root,
++ SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
++ &scst_dev_handler_type_proc_data);
++ if (p == NULL) {
++ PRINT_ERROR("Not enough memory to register dev "
++ "handler entry %s in /proc/%s/%s",
++ SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
++ SCST_PROC_ENTRY_NAME, name);
++ goto out_remove;
++ }
++ }
++
++ if (dev_type->read_proc || dev_type->write_proc) {
++ /* create the proc file entry for the dev type handler */
++ scst_dev_handler_proc_data.data = (void *)dev_type;
++ p = scst_create_proc_entry(dev_type->proc_dev_type_root,
++ name,
++ &scst_dev_handler_proc_data);
++ if (p == NULL) {
++ PRINT_ERROR("Not enough memory to register dev "
++ "handler entry %s in /proc/%s/%s", name,
++ SCST_PROC_ENTRY_NAME, name);
++ goto out_remove1;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_remove1:
++ if (dev_type->type >= 0)
++ remove_proc_entry(SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
++ dev_type->proc_dev_type_root);
++
++out_remove:
++ remove_proc_entry(name, scst_proc_scsi_tgt);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++void scst_cleanup_proc_dev_handler_dir_entries(struct scst_dev_type *dev_type)
++{
++ /* Workaround to keep /proc ABI intact */
++ const char *name;
++
++ TRACE_ENTRY();
++
++ BUG_ON(dev_type->proc_dev_type_root == NULL);
++
++ if (strcmp(dev_type->name, "vdisk_fileio") == 0)
++ name = "vdisk";
++ else
++ name = dev_type->name;
++
++ if (dev_type->type >= 0) {
++ remove_proc_entry(SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
++ dev_type->proc_dev_type_root);
++ }
++ if (dev_type->read_proc || dev_type->write_proc)
++ remove_proc_entry(name, dev_type->proc_dev_type_root);
++ remove_proc_entry(name, scst_proc_scsi_tgt);
++ dev_type->proc_dev_type_root = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++static ssize_t scst_proc_scsi_dev_handler_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ struct scst_dev_type *dev_type =
++ (struct scst_dev_type *)PDE(file->f_dentry->d_inode)->data;
++ ssize_t res = 0;
++ char *buffer;
++ char *start;
++ int eof = 0;
++
++ TRACE_ENTRY();
++
++ if (dev_type->write_proc == NULL) {
++ res = -ENOSYS;
++ goto out;
++ }
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ TRACE_BUFFER("Buffer", buffer, length);
++
++ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
++ res = -EINTR;
++ goto out_free;
++ }
++
++ res = dev_type->write_proc(buffer, &start, 0, length, &eof, dev_type);
++
++ mutex_unlock(&scst_proc_mutex);
++
++out_free:
++ free_page((unsigned long)buffer);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_proc_scsi_tgt_gen_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res, rc = 0, action;
++ char *buffer, *p, *pp, *ppp;
++ struct scst_acg *a, *acg = NULL;
++ unsigned int addr_method = SCST_LUN_ADDR_METHOD_PERIPHERAL;
++
++ TRACE_ENTRY();
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ /*
++ * Usage: echo "add_group GROUP_NAME [FLAT]" >/proc/scsi_tgt/scsi_tgt
++ * or echo "del_group GROUP_NAME" >/proc/scsi_tgt/scsi_tgt
++ * or echo "rename_group OLD_NAME NEW_NAME" >/proc/scsi_tgt/scsi_tgt"
++ * or echo "assign H:C:I:L HANDLER_NAME" >/proc/scsi_tgt/scsi_tgt
++ */
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ if (!strncasecmp("assign ", p, 7)) {
++ p += 7;
++ action = SCST_PROC_ACTION_ASSIGN;
++ } else if (!strncasecmp("add_group ", p, 10)) {
++ p += 10;
++ action = SCST_PROC_ACTION_ADD_GROUP;
++ } else if (!strncasecmp("del_group ", p, 10)) {
++ p += 10;
++ action = SCST_PROC_ACTION_DEL_GROUP;
++ } else if (!strncasecmp("rename_group ", p, 13)) {
++ p += 13;
++ action = SCST_PROC_ACTION_RENAME_GROUP;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out_free;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_free_resume;
++ }
++
++ res = length;
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD_GROUP:
++ case SCST_PROC_ACTION_DEL_GROUP:
++ case SCST_PROC_ACTION_RENAME_GROUP:
++ pp = p;
++ while (!isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ *pp = '\0';
++ pp++;
++ while (isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ switch (action) {
++ case SCST_PROC_ACTION_ADD_GROUP:
++ ppp = pp;
++ while (!isspace(*ppp) && *ppp != '\0')
++ ppp++;
++ if (*ppp != '\0') {
++ *ppp = '\0';
++ ppp++;
++ while (isspace(*ppp) && *ppp != '\0')
++ ppp++;
++ if (*ppp != '\0') {
++ PRINT_ERROR("%s", "Too many "
++ "arguments");
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ }
++ if (strcasecmp(pp, "FLAT") != 0) {
++ PRINT_ERROR("Unexpected "
++ "argument %s", pp);
++ res = -EINVAL;
++ goto out_up_free;
++ } else
++ addr_method = SCST_LUN_ADDR_METHOD_FLAT;
++ break;
++ case SCST_PROC_ACTION_DEL_GROUP:
++ PRINT_ERROR("%s", "Too many "
++ "arguments");
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ }
++ }
++
++ if (strcmp(p, SCST_DEFAULT_ACG_NAME) == 0) {
++ PRINT_ERROR("Attempt to add/delete/rename predefined "
++ "group \"%s\"", p);
++ res = -EINVAL;
++ goto out_up_free;
++ }
++
++ list_for_each_entry(a, &scst_acg_list, acg_list_entry) {
++ if (strcmp(a->acg_name, p) == 0) {
++ TRACE_DBG("group (acg) %p %s found",
++ a, a->acg_name);
++ acg = a;
++ break;
++ }
++ }
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD_GROUP:
++ if (acg) {
++ PRINT_ERROR("acg name %s exist", p);
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ rc = scst_proc_group_add(p, addr_method);
++ break;
++ case SCST_PROC_ACTION_DEL_GROUP:
++ if (acg == NULL) {
++ PRINT_ERROR("acg name %s not found", p);
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ rc = scst_proc_del_free_acg(acg, 1);
++ break;
++ case SCST_PROC_ACTION_RENAME_GROUP:
++ if (acg == NULL) {
++ PRINT_ERROR("acg name %s not found", p);
++ res = -EINVAL;
++ goto out_up_free;
++ }
++
++ p = pp;
++ while (!isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ *pp = '\0';
++ pp++;
++ while (isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ PRINT_ERROR("%s", "Too many arguments");
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ }
++ rc = scst_proc_rename_acg(acg, p);
++ break;
++ }
++ break;
++ case SCST_PROC_ACTION_ASSIGN:
++ rc = scst_proc_assign_handler(p);
++ break;
++ }
++
++ if (rc != 0)
++ res = rc;
++
++out_up_free:
++ mutex_unlock(&scst_mutex);
++
++out_free_resume:
++ scst_resume_activity();
++
++out_free:
++ free_page((unsigned long)buffer);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++static int scst_proc_assign_handler(char *buf)
++{
++ int res = 0;
++ char *p = buf, *e, *ee;
++ unsigned long host, channel = 0, id = 0, lun = 0;
++ struct scst_device *d, *dev = NULL;
++ struct scst_dev_type *dt, *handler = NULL;
++
++ TRACE_ENTRY();
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++
++ host = simple_strtoul(p, &p, 0);
++ if ((host == ULONG_MAX) || (*p != ':'))
++ goto out_synt_err;
++ p++;
++ channel = simple_strtoul(p, &p, 0);
++ if ((channel == ULONG_MAX) || (*p != ':'))
++ goto out_synt_err;
++ p++;
++ id = simple_strtoul(p, &p, 0);
++ if ((channel == ULONG_MAX) || (*p != ':'))
++ goto out_synt_err;
++ p++;
++ lun = simple_strtoul(p, &p, 0);
++ if (lun == ULONG_MAX)
++ goto out_synt_err;
++
++ e = p;
++ e++;
++ while (isspace(*e) && *e != '\0')
++ e++;
++ ee = e;
++ while (!isspace(*ee) && *ee != '\0')
++ ee++;
++ *ee = '\0';
++
++ TRACE_DBG("Dev %ld:%ld:%ld:%ld, handler %s", host, channel, id, lun, e);
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if ((d->virt_id == 0) &&
++ d->scsi_dev->host->host_no == host &&
++ d->scsi_dev->channel == channel &&
++ d->scsi_dev->id == id &&
++ d->scsi_dev->lun == lun) {
++ dev = d;
++ TRACE_DBG("Dev %p (%ld:%ld:%ld:%ld) found",
++ dev, host, channel, id, lun);
++ break;
++ }
++ }
++
++ if (dev == NULL) {
++ PRINT_ERROR("Device %ld:%ld:%ld:%ld not found",
++ host, channel, id, lun);
++ res = -EINVAL;
++ goto out;
++ }
++
++ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
++ if (!strcmp(dt->name, e)) {
++ handler = dt;
++ TRACE_DBG("Dev handler %p with name %s found",
++ dt, dt->name);
++ break;
++ }
++ }
++
++ if (handler == NULL) {
++ PRINT_ERROR("Handler %s not found", e);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (dev->scsi_dev->type != handler->type) {
++ PRINT_ERROR("Type %d of device %s differs from type "
++ "%d of dev handler %s", dev->type,
++ dev->handler->name, handler->type, handler->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_assign_dev_handler(dev, handler);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_synt_err:
++ PRINT_ERROR("Syntax error on %s", p);
++ res = -EINVAL;
++ goto out;
++}
++
++static ssize_t scst_proc_groups_devices_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res, action, rc, read_only = 0;
++ char *buffer, *p, *e = NULL;
++ unsigned int virt_lun;
++ struct scst_acg *acg =
++ (struct scst_acg *)PDE(file->f_dentry->d_inode)->data;
++ struct scst_acg_dev *acg_dev = NULL, *acg_dev_tmp;
++ struct scst_device *d, *dev = NULL;
++
++ TRACE_ENTRY();
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ /*
++ * Usage: echo "add|del H:C:I:L lun [READ_ONLY]" \
++ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
++ * or echo "replace H:C:I:L lun [READ_ONLY]" \
++ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
++ * or echo "add|del V_NAME lun [READ_ONLY]" \
++ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
++ * or echo "replace V_NAME lun [READ_ONLY]" \
++ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
++ * or echo "clear" >/proc/scsi_tgt/groups/GROUP_NAME/devices
++ */
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ if (!strncasecmp("clear", p, 5)) {
++ action = SCST_PROC_ACTION_CLEAR;
++ } else if (!strncasecmp("add ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_ADD;
++ } else if (!strncasecmp("del ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_DEL;
++ } else if (!strncasecmp("replace ", p, 8)) {
++ p += 8;
++ action = SCST_PROC_ACTION_REPLACE;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out_free;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_free_resume;
++ }
++
++ res = length;
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ case SCST_PROC_ACTION_DEL:
++ case SCST_PROC_ACTION_REPLACE:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p; /* save p */
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = 0;
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (!strcmp(d->virt_name, p)) {
++ dev = d;
++ TRACE_DBG("Device %p (%s) found", dev, p);
++ break;
++ }
++ }
++ if (dev == NULL) {
++ PRINT_ERROR("Device %s not found", p);
++ res = -EINVAL;
++ goto out_free_up;
++ }
++ break;
++ }
++
++ /* ToDo: create separate functions */
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ case SCST_PROC_ACTION_REPLACE:
++ {
++ bool dev_replaced = false;
++
++ e++;
++ while (isspace(*e) && *e != '\0')
++ e++;
++ virt_lun = simple_strtoul(e, &e, 0);
++
++ while (isspace(*e) && *e != '\0')
++ e++;
++
++ if (*e != '\0') {
++ if (!strncasecmp("READ_ONLY", e, 9))
++ read_only = 1;
++ else {
++ PRINT_ERROR("Unknown option \"%s\"", e);
++ res = -EINVAL;
++ goto out_free_up;
++ }
++ }
++
++ list_for_each_entry(acg_dev_tmp, &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ if (acg_dev_tmp->lun == virt_lun) {
++ acg_dev = acg_dev_tmp;
++ break;
++ }
++ }
++ if (acg_dev != NULL) {
++ if (action == SCST_PROC_ACTION_ADD) {
++ PRINT_ERROR("virt lun %d already exists in "
++ "group %s", virt_lun, acg->acg_name);
++ res = -EEXIST;
++ goto out_free_up;
++ } else {
++ /* Replace */
++ rc = scst_acg_del_lun(acg, acg_dev->lun,
++ false);
++ if (rc) {
++ res = rc;
++ goto out_free_up;
++ }
++ dev_replaced = true;
++ }
++ }
++
++ rc = scst_acg_add_lun(acg, NULL, dev, virt_lun, read_only,
++ false, NULL);
++ if (rc) {
++ res = rc;
++ goto out_free_up;
++ }
++
++ if (action == SCST_PROC_ACTION_ADD)
++ scst_report_luns_changed(acg);
++
++ if (dev_replaced) {
++ struct scst_tgt_dev *tgt_dev;
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if ((tgt_dev->acg_dev->acg == acg) &&
++ (tgt_dev->lun == virt_lun)) {
++ TRACE_MGMT_DBG("INQUIRY DATA HAS CHANGED"
++ " on tgt_dev %p", tgt_dev);
++ scst_gen_aen_or_ua(tgt_dev,
++ SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
++ }
++ }
++ }
++ break;
++ }
++ case SCST_PROC_ACTION_DEL:
++ {
++ /*
++ * This code doesn't handle if there are >1 LUNs for the same
++ * device in the group. Instead, it always deletes the first
++ * entry. It wasn't fixed for compatibility reasons, because
++ * procfs is now obsoleted.
++ */
++ struct scst_acg_dev *a;
++ list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
++ if (a->dev == dev) {
++ rc = scst_acg_del_lun(acg, a->lun, true);
++ if (rc) {
++ res = rc;
++ goto out_free_up;
++ }
++ break;
++ }
++ }
++ PRINT_ERROR("Device is not found in group %s", acg->acg_name);
++ break;
++ }
++ case SCST_PROC_ACTION_CLEAR:
++ list_for_each_entry_safe(acg_dev, acg_dev_tmp,
++ &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ rc = scst_acg_del_lun(acg, acg_dev->lun,
++ list_is_last(&acg_dev->acg_dev_list_entry,
++ &acg->acg_dev_list));
++ if (rc) {
++ res = rc;
++ goto out_free_up;
++ }
++ }
++ break;
++ }
++
++out_free_up:
++ mutex_unlock(&scst_mutex);
++
++out_free_resume:
++ scst_resume_activity();
++
++out_free:
++ free_page((unsigned long)buffer);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_proc_groups_names_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res = length, rc = 0, action;
++ char *buffer, *p, *pp = NULL;
++ struct scst_acg *acg =
++ (struct scst_acg *)PDE(file->f_dentry->d_inode)->data;
++ struct scst_acn *n, *nn;
++
++ TRACE_ENTRY();
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ /*
++ * Usage: echo "add|del NAME" >/proc/scsi_tgt/groups/GROUP_NAME/names
++ * or echo "move NAME NEW_GROUP_NAME" >/proc/scsi_tgt/groups/OLD_GROUP_NAME/names"
++ * or echo "clear" >/proc/scsi_tgt/groups/GROUP_NAME/names
++ */
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ if (!strncasecmp("clear", p, 5)) {
++ action = SCST_PROC_ACTION_CLEAR;
++ } else if (!strncasecmp("add ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_ADD;
++ } else if (!strncasecmp("del ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_DEL;
++ } else if (!strncasecmp("move ", p, 5)) {
++ p += 5;
++ action = SCST_PROC_ACTION_MOVE;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ case SCST_PROC_ACTION_DEL:
++ case SCST_PROC_ACTION_MOVE:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ pp = p;
++ while (!isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ *pp = '\0';
++ pp++;
++ while (isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ case SCST_PROC_ACTION_DEL:
++ PRINT_ERROR("%s", "Too many "
++ "arguments");
++ res = -EINVAL;
++ goto out_free;
++ }
++ }
++ }
++ break;
++ }
++
++ rc = scst_suspend_activity(true);
++ if (rc != 0)
++ goto out_free;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_free_resume;
++ }
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ rc = scst_acg_add_acn(acg, p);
++ break;
++ case SCST_PROC_ACTION_DEL:
++ rc = scst_acg_remove_name(acg, p, true);
++ break;
++ case SCST_PROC_ACTION_MOVE:
++ {
++ struct scst_acg *a, *new_acg = NULL;
++ char *name = p;
++ p = pp;
++ while (!isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ *pp = '\0';
++ pp++;
++ while (isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ PRINT_ERROR("%s", "Too many arguments");
++ res = -EINVAL;
++ goto out_free_unlock;
++ }
++ }
++ list_for_each_entry(a, &scst_acg_list, acg_list_entry) {
++ if (strcmp(a->acg_name, p) == 0) {
++ TRACE_DBG("group (acg) %p %s found",
++ a, a->acg_name);
++ new_acg = a;
++ break;
++ }
++ }
++ if (new_acg == NULL) {
++ PRINT_ERROR("Group %s not found", p);
++ res = -EINVAL;
++ goto out_free_unlock;
++ }
++ rc = scst_acg_remove_name(acg, name, false);
++ if (rc != 0)
++ goto out_free_unlock;
++ rc = scst_acg_add_acn(new_acg, name);
++ if (rc != 0)
++ scst_acg_add_acn(acg, name);
++ break;
++ }
++ case SCST_PROC_ACTION_CLEAR:
++ list_for_each_entry_safe(n, nn, &acg->acn_list,
++ acn_list_entry) {
++ scst_del_free_acn(n, false);
++ }
++ scst_check_reassign_sessions();
++ break;
++ }
++
++out_free_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_free_resume:
++ scst_resume_activity();
++
++out_free:
++ free_page((unsigned long)buffer);
++
++out:
++ if (rc < 0)
++ res = rc;
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_version_info_show(struct seq_file *seq, void *v)
++{
++ TRACE_ENTRY();
++
++ seq_printf(seq, "%s\n", SCST_VERSION_STRING);
++
++#ifdef CONFIG_SCST_STRICT_SERIALIZING
++ seq_printf(seq, "STRICT_SERIALIZING\n");
++#endif
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ seq_printf(seq, "EXTRACHECKS\n");
++#endif
++
++#ifdef CONFIG_SCST_TRACING
++ seq_printf(seq, "TRACING\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++ seq_printf(seq, "DEBUG\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_TM
++ seq_printf(seq, "DEBUG_TM\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_RETRY
++ seq_printf(seq, "DEBUG_RETRY\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_OOM
++ seq_printf(seq, "DEBUG_OOM\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_SN
++ seq_printf(seq, "DEBUG_SN\n");
++#endif
++
++#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
++ seq_printf(seq, "USE_EXPECTED_VALUES\n");
++#endif
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ seq_printf(seq, "TEST_IO_IN_SIRQ\n");
++#endif
++
++#ifdef CONFIG_SCST_STRICT_SECURITY
++ seq_printf(seq, "STRICT_SECURITY\n");
++#endif
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static struct scst_proc_data scst_version_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = scst_version_info_show,
++};
++
++static int scst_help_info_show(struct seq_file *seq, void *v)
++{
++ TRACE_ENTRY();
++
++ seq_printf(seq, "%s\n", scst_proc_help_string);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static struct scst_proc_data scst_help_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = scst_help_info_show,
++};
++
++static int scst_dev_handler_type_info_show(struct seq_file *seq, void *v)
++{
++ struct scst_dev_type *dev_type = (struct scst_dev_type *)seq->private;
++
++ TRACE_ENTRY();
++
++ seq_printf(seq, "%d - %s\n", dev_type->type,
++ dev_type->type > (int)ARRAY_SIZE(scst_proc_dev_handler_type)
++ ? "unknown" : scst_proc_dev_handler_type[dev_type->type]);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static struct scst_proc_data scst_dev_handler_type_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = scst_dev_handler_type_info_show,
++};
++
++static int scst_sessions_info_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_acg *acg;
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ seq_printf(seq, "%-20s %-45s %-35s %-15s\n",
++ "Target name", "Initiator name",
++ "Group name", "Active/All Commands Count");
++
++ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
++ list_for_each_entry(sess, &acg->acg_sess_list,
++ acg_sess_list_entry) {
++ int active_cmds = 0, t;
++ for (t = TGT_DEV_HASH_SIZE-1; t >= 0; t--) {
++ struct list_head *sess_tgt_dev_list_head =
++ &sess->sess_tgt_dev_list_hash[t];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev,
++ sess_tgt_dev_list_head,
++ sess_tgt_dev_list_entry) {
++ active_cmds += atomic_read(&tgt_dev->tgt_dev_cmd_count);
++ }
++ }
++ seq_printf(seq, "%-20s %-45s %-35s %d/%d\n",
++ sess->tgt->tgtt->name,
++ sess->initiator_name,
++ acg->acg_name, active_cmds,
++ atomic_read(&sess->sess_cmd_count));
++ }
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_sessions_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = scst_sessions_info_show,
++};
++
++static struct scst_proc_data scst_sgv_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = sgv_procinfo_show,
++};
++
++static int scst_groups_names_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_acg *acg = (struct scst_acg *)seq->private;
++ struct scst_acn *name;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ list_for_each_entry(name, &acg->acn_list, acn_list_entry) {
++ seq_printf(seq, "%s\n", name->name);
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_groups_names_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_groups_names_write)
++ .show = scst_groups_names_show,
++};
++
++static int scst_groups_addr_method_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_acg *acg = (struct scst_acg *)seq->private;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ switch (acg->addr_method) {
++ case SCST_LUN_ADDR_METHOD_FLAT:
++ seq_printf(seq, "%s\n", "FLAT");
++ break;
++ case SCST_LUN_ADDR_METHOD_PERIPHERAL:
++ seq_printf(seq, "%s\n", "PERIPHERAL");
++ break;
++ default:
++ seq_printf(seq, "%s\n", "UNKNOWN");
++ break;
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++static struct scst_proc_data scst_groups_addr_method_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = scst_groups_addr_method_show,
++};
++static int scst_groups_devices_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_acg *acg = (struct scst_acg *)seq->private;
++ struct scst_acg_dev *acg_dev;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ seq_printf(seq, "%-60s%-13s%s\n", "Device (host:ch:id:lun or name)",
++ "LUN", "Options");
++
++ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
++ seq_printf(seq, "%-60s%-13lld%s\n",
++ acg_dev->dev->virt_name,
++ (long long unsigned int)acg_dev->lun,
++ acg_dev->rd_only ? "RO" : "");
++ }
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_groups_devices_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_groups_devices_write)
++ .show = scst_groups_devices_show,
++};
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++static int scst_proc_read_tlb(const struct scst_trace_log *tbl,
++ struct seq_file *seq,
++ unsigned long log_level, int *first)
++{
++ const struct scst_trace_log *t = tbl;
++ int res = 0;
++
++ while (t->token) {
++ if (log_level & t->val) {
++ seq_printf(seq, "%s%s", *first ? "" : " | ", t->token);
++ *first = 0;
++ }
++ t++;
++ }
++ return res;
++}
++
++int scst_proc_log_entry_read(struct seq_file *seq, unsigned long log_level,
++ const struct scst_trace_log *tbl)
++{
++ int res = 0, first = 1;
++
++ TRACE_ENTRY();
++
++ scst_proc_read_tlb(scst_proc_trace_tbl, seq, log_level, &first);
++
++ if (tbl)
++ scst_proc_read_tlb(tbl, seq, log_level, &first);
++
++ seq_printf(seq, "%s\n", first ? "none" : "");
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_proc_log_entry_read);
++
++static int log_info_show(struct seq_file *seq, void *v)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = scst_proc_log_entry_read(seq, trace_flag,
++ scst_proc_local_trace_tbl);
++
++ mutex_unlock(&scst_log_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_log_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write_log)
++ .show = log_info_show,
++ .data = "scsi_tgt",
++};
++
++#endif
++
++static int scst_tgt_info_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ seq_printf(seq, "%-60s%s\n", "Device (host:ch:id:lun or name)",
++ "Device handler");
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ seq_printf(seq, "%-60s%s\n",
++ dev->virt_name, dev->handler->name);
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_tgt_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write)
++ .show = scst_tgt_info_show,
++};
++
++static int scst_threads_info_show(struct seq_file *seq, void *v)
++{
++ TRACE_ENTRY();
++
++ seq_printf(seq, "%d\n", scst_main_cmd_threads.nr_threads);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static struct scst_proc_data scst_threads_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_threads_write)
++ .show = scst_threads_info_show,
++};
++
++static int scst_scsi_tgtinfo_show(struct seq_file *seq, void *v)
++{
++ struct scst_tgt *vtt = seq->private;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ if (vtt->tgtt->read_proc)
++ res = vtt->tgtt->read_proc(seq, vtt);
++
++ mutex_unlock(&scst_proc_mutex);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_scsi_tgt_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_write)
++ .show = scst_scsi_tgtinfo_show,
++};
++
++static int scst_dev_handler_info_show(struct seq_file *seq, void *v)
++{
++ struct scst_dev_type *dev_type = seq->private;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ if (dev_type->read_proc)
++ res = dev_type->read_proc(seq, dev_type);
++
++ mutex_unlock(&scst_proc_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_dev_handler_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_dev_handler_write)
++ .show = scst_dev_handler_info_show,
++};
++
++struct proc_dir_entry *scst_create_proc_entry(struct proc_dir_entry *root,
++ const char *name, struct scst_proc_data *pdata)
++{
++ struct proc_dir_entry *p = NULL;
++
++ TRACE_ENTRY();
++
++ if (root) {
++ mode_t mode;
++
++ mode = S_IFREG | S_IRUGO | (pdata->seq_op.write ? S_IWUSR : 0);
++ p = create_proc_entry(name, mode, root);
++ if (p == NULL) {
++ PRINT_ERROR("Fail to create entry %s in /proc", name);
++ } else {
++ p->proc_fops = &pdata->seq_op;
++ p->data = pdata->data;
++ }
++ }
++
++ TRACE_EXIT();
++ return p;
++}
++EXPORT_SYMBOL_GPL(scst_create_proc_entry);
++
++int scst_single_seq_open(struct inode *inode, struct file *file)
++{
++ struct scst_proc_data *pdata = container_of(PDE(inode)->proc_fops,
++ struct scst_proc_data, seq_op);
++ return single_open(file, pdata->show, PDE(inode)->data);
++}
++EXPORT_SYMBOL_GPL(scst_single_seq_open);
++
++struct proc_dir_entry *scst_proc_get_tgt_root(
++ struct scst_tgt_template *vtt)
++{
++ return vtt->proc_tgt_root;
++}
++EXPORT_SYMBOL_GPL(scst_proc_get_tgt_root);
++
++struct proc_dir_entry *scst_proc_get_dev_type_root(
++ struct scst_dev_type *dtt)
++{
++ return dtt->proc_dev_type_root;
++}
++EXPORT_SYMBOL_GPL(scst_proc_get_dev_type_root);
+diff -uprN orig/linux-2.6.36/include/scst/scst_sgv.h linux-2.6.36/include/scst/scst_sgv.h
+--- orig/linux-2.6.36/include/scst/scst_sgv.h
++++ linux-2.6.36/include/scst/scst_sgv.h
+@@ -0,0 +1,98 @@
++/*
++ * include/scst_sgv.h
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * Include file for SCST SGV cache.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++#ifndef __SCST_SGV_H
++#define __SCST_SGV_H
++
++/** SGV pool routines and flag bits **/
++
++/* Set if the allocated object must be not from the cache */
++#define SGV_POOL_ALLOC_NO_CACHED 1
++
++/* Set if there should not be any memory allocations on a cache miss */
++#define SGV_POOL_NO_ALLOC_ON_CACHE_MISS 2
++
++/* Set an object should be returned even if it doesn't have SG vector built */
++#define SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL 4
++
++/*
++ * Set if the allocated object must be a new one, i.e. from the cache,
++ * but not cached
++ */
++#define SGV_POOL_ALLOC_GET_NEW 8
++
++struct sgv_pool_obj;
++struct sgv_pool;
++
++/*
++ * Structure to keep a memory limit for an SCST object
++ */
++struct scst_mem_lim {
++ /* How much memory allocated under this object */
++ atomic_t alloced_pages;
++
++ /*
++ * How much memory allowed to allocated under this object. Put here
++ * mostly to save a possible cache miss accessing scst_max_dev_cmd_mem.
++ */
++ int max_allowed_pages;
++};
++
++/* Types of clustering */
++enum sgv_clustering_types {
++ /* No clustering performed */
++ sgv_no_clustering = 0,
++
++ /*
++ * A page will only be merged with the latest previously allocated
++ * page, so the order of pages in the SG will be preserved.
++ */
++ sgv_tail_clustering,
++
++ /*
++ * Free merging of pages at any place in the SG is allowed. This mode
++ * usually provides the best merging rate.
++ */
++ sgv_full_clustering,
++};
++
++struct sgv_pool *sgv_pool_create(const char *name,
++ enum sgv_clustering_types clustered, int single_alloc_pages,
++ bool shared, int purge_interval);
++void sgv_pool_del(struct sgv_pool *pool);
++
++void sgv_pool_get(struct sgv_pool *pool);
++void sgv_pool_put(struct sgv_pool *pool);
++
++void sgv_pool_flush(struct sgv_pool *pool);
++
++void sgv_pool_set_allocator(struct sgv_pool *pool,
++ struct page *(*alloc_pages_fn)(struct scatterlist *, gfp_t, void *),
++ void (*free_pages_fn)(struct scatterlist *, int, void *));
++
++struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
++ gfp_t gfp_mask, int flags, int *count,
++ struct sgv_pool_obj **sgv, struct scst_mem_lim *mem_lim, void *priv);
++void sgv_pool_free(struct sgv_pool_obj *sgv, struct scst_mem_lim *mem_lim);
++
++void *sgv_get_priv(struct sgv_pool_obj *sgv);
++
++void scst_init_mem_lim(struct scst_mem_lim *mem_lim);
++
++#endif /* __SCST_SGV_H */
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.h linux-2.6.36/drivers/scst/scst_mem.h
+--- orig/linux-2.6.36/drivers/scst/scst_mem.h
++++ linux-2.6.36/drivers/scst/scst_mem.h
+@@ -0,0 +1,151 @@
++/*
++ * scst_mem.h
++ *
++ * Copyright (C) 2006 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/scatterlist.h>
++#include <linux/workqueue.h>
++
++#define SGV_POOL_ELEMENTS 11
++
++/*
++ * sg_num is indexed by the page number, pg_count is indexed by the sg number.
++ * Made in one entry to simplify the code (eg all sizeof(*) parts) and save
++ * some CPU cache for non-clustered case.
++ */
++struct trans_tbl_ent {
++ unsigned short sg_num;
++ unsigned short pg_count;
++};
++
++/*
++ * SGV pool object
++ */
++struct sgv_pool_obj {
++ int cache_num;
++ int pages;
++
++ /* jiffies, protected by sgv_pool_lock */
++ unsigned long time_stamp;
++
++ struct list_head recycling_list_entry;
++ struct list_head sorted_recycling_list_entry;
++
++ struct sgv_pool *owner_pool;
++ int orig_sg;
++ int orig_length;
++ int sg_count;
++ void *allocator_priv;
++ struct trans_tbl_ent *trans_tbl;
++ struct scatterlist *sg_entries;
++ struct scatterlist sg_entries_data[0];
++};
++
++/*
++ * SGV pool statistics accounting structure
++ */
++struct sgv_pool_cache_acc {
++ atomic_t total_alloc, hit_alloc;
++ atomic_t merged;
++};
++
++/*
++ * SGV pool allocation functions
++ */
++struct sgv_pool_alloc_fns {
++ struct page *(*alloc_pages_fn)(struct scatterlist *sg, gfp_t gfp_mask,
++ void *priv);
++ void (*free_pages_fn)(struct scatterlist *sg, int sg_count,
++ void *priv);
++};
++
++/*
++ * SGV pool
++ */
++struct sgv_pool {
++ enum sgv_clustering_types clustering_type;
++ int single_alloc_pages;
++ int max_cached_pages;
++
++ struct sgv_pool_alloc_fns alloc_fns;
++
++ /* <=4K, <=8, <=16, <=32, <=64, <=128, <=256, <=512, <=1024, <=2048 */
++ struct kmem_cache *caches[SGV_POOL_ELEMENTS];
++
++ spinlock_t sgv_pool_lock; /* outer lock for sgv_pools_lock! */
++
++ int purge_interval;
++
++ /* Protected by sgv_pool_lock, if necessary */
++ unsigned int purge_work_scheduled:1;
++
++ /* Protected by sgv_pool_lock */
++ struct list_head sorted_recycling_list;
++
++ int inactive_cached_pages; /* protected by sgv_pool_lock */
++
++ /* Protected by sgv_pool_lock */
++ struct list_head recycling_lists[SGV_POOL_ELEMENTS];
++
++ int cached_pages, cached_entries; /* protected by sgv_pool_lock */
++
++ struct sgv_pool_cache_acc cache_acc[SGV_POOL_ELEMENTS];
++
++ struct delayed_work sgv_purge_work;
++
++ struct list_head sgv_active_pools_list_entry;
++
++ atomic_t big_alloc, big_pages, big_merged;
++ atomic_t other_alloc, other_pages, other_merged;
++
++ atomic_t sgv_pool_ref;
++
++ int max_caches;
++
++ /* SCST_MAX_NAME + few more bytes to match scst_user expectations */
++ char cache_names[SGV_POOL_ELEMENTS][SCST_MAX_NAME + 10];
++ char name[SCST_MAX_NAME + 10];
++
++ struct mm_struct *owner_mm;
++
++ struct list_head sgv_pools_list_entry;
++
++ struct kobject sgv_kobj;
++
++ /* sysfs release completion */
++ struct completion sgv_kobj_release_cmpl;
++};
++
++static inline struct scatterlist *sgv_pool_sg(struct sgv_pool_obj *obj)
++{
++ return obj->sg_entries;
++}
++
++int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark);
++void scst_sgv_pools_deinit(void);
++
++ssize_t sgv_sysfs_stat_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++ssize_t sgv_sysfs_stat_reset(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count);
++ssize_t sgv_sysfs_global_stat_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++ssize_t sgv_sysfs_global_stat_reset(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count);
++
++void scst_sgv_pool_use_norm(struct scst_tgt_dev *tgt_dev);
++void scst_sgv_pool_use_norm_clust(struct scst_tgt_dev *tgt_dev);
++void scst_sgv_pool_use_dma(struct scst_tgt_dev *tgt_dev);
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/scst_mem.c
+--- orig/linux-2.6.36/drivers/scst/scst_mem.c
++++ linux-2.6.36/drivers/scst/scst_mem.c
+@@ -0,0 +1,1880 @@
++/*
++ * scst_mem.c
++ *
++ * Copyright (C) 2006 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/unistd.h>
++#include <linux/string.h>
++
++#include <scst/scst.h>
++#include "scst_priv.h"
++#include "scst_mem.h"
++
++#define SGV_DEFAULT_PURGE_INTERVAL (60 * HZ)
++#define SGV_MIN_SHRINK_INTERVAL (1 * HZ)
++
++/* Max pages freed from a pool per shrinking iteration */
++#define MAX_PAGES_PER_POOL 50
++
++static struct sgv_pool *sgv_norm_clust_pool, *sgv_norm_pool, *sgv_dma_pool;
++
++static atomic_t sgv_pages_total = ATOMIC_INIT(0);
++
++/* Both read-only */
++static int sgv_hi_wmk;
++static int sgv_lo_wmk;
++
++static int sgv_max_local_pages, sgv_max_trans_pages;
++
++static DEFINE_SPINLOCK(sgv_pools_lock); /* inner lock for sgv_pool_lock! */
++static DEFINE_MUTEX(sgv_pools_mutex);
++
++/* Both protected by sgv_pools_lock */
++static struct sgv_pool *sgv_cur_purge_pool;
++static LIST_HEAD(sgv_active_pools_list);
++
++static atomic_t sgv_releases_on_hiwmk = ATOMIC_INIT(0);
++static atomic_t sgv_releases_on_hiwmk_failed = ATOMIC_INIT(0);
++
++static atomic_t sgv_other_total_alloc = ATOMIC_INIT(0);
++
++static struct shrinker sgv_shrinker;
++
++/*
++ * Protected by sgv_pools_mutex AND sgv_pools_lock for writes,
++ * either one for reads.
++ */
++static LIST_HEAD(sgv_pools_list);
++
++static inline bool sgv_pool_clustered(const struct sgv_pool *pool)
++{
++ return pool->clustering_type != sgv_no_clustering;
++}
++
++void scst_sgv_pool_use_norm(struct scst_tgt_dev *tgt_dev)
++{
++ tgt_dev->gfp_mask = __GFP_NOWARN;
++ tgt_dev->pool = sgv_norm_pool;
++ clear_bit(SCST_TGT_DEV_CLUST_POOL, &tgt_dev->tgt_dev_flags);
++}
++
++void scst_sgv_pool_use_norm_clust(struct scst_tgt_dev *tgt_dev)
++{
++ TRACE_MEM("%s", "Use clustering");
++ tgt_dev->gfp_mask = __GFP_NOWARN;
++ tgt_dev->pool = sgv_norm_clust_pool;
++ set_bit(SCST_TGT_DEV_CLUST_POOL, &tgt_dev->tgt_dev_flags);
++}
++
++void scst_sgv_pool_use_dma(struct scst_tgt_dev *tgt_dev)
++{
++ TRACE_MEM("%s", "Use ISA DMA memory");
++ tgt_dev->gfp_mask = __GFP_NOWARN | GFP_DMA;
++ tgt_dev->pool = sgv_dma_pool;
++ clear_bit(SCST_TGT_DEV_CLUST_POOL, &tgt_dev->tgt_dev_flags);
++}
++
++/* Must be no locks */
++static void sgv_dtor_and_free(struct sgv_pool_obj *obj)
++{
++ struct sgv_pool *pool = obj->owner_pool;
++
++ TRACE_MEM("Destroying sgv obj %p", obj);
++
++ if (obj->sg_count != 0) {
++ pool->alloc_fns.free_pages_fn(obj->sg_entries,
++ obj->sg_count, obj->allocator_priv);
++ }
++ if (obj->sg_entries != obj->sg_entries_data) {
++ if (obj->trans_tbl !=
++ (struct trans_tbl_ent *)obj->sg_entries_data) {
++ /* kfree() handles NULL parameter */
++ kfree(obj->trans_tbl);
++ obj->trans_tbl = NULL;
++ }
++ kfree(obj->sg_entries);
++ }
++
++ kmem_cache_free(pool->caches[obj->cache_num], obj);
++ return;
++}
++
++/* Might be called under sgv_pool_lock */
++static inline void sgv_del_from_active(struct sgv_pool *pool)
++{
++ struct list_head *next;
++
++ TRACE_MEM("Deleting sgv pool %p from the active list", pool);
++
++ spin_lock_bh(&sgv_pools_lock);
++
++ next = pool->sgv_active_pools_list_entry.next;
++ list_del(&pool->sgv_active_pools_list_entry);
++
++ if (sgv_cur_purge_pool == pool) {
++ TRACE_MEM("Sgv pool %p is sgv cur purge pool", pool);
++
++ if (next == &sgv_active_pools_list)
++ next = next->next;
++
++ if (next == &sgv_active_pools_list) {
++ sgv_cur_purge_pool = NULL;
++ TRACE_MEM("%s", "Sgv active list now empty");
++ } else {
++ sgv_cur_purge_pool = list_entry(next, typeof(*pool),
++ sgv_active_pools_list_entry);
++ TRACE_MEM("New sgv cur purge pool %p",
++ sgv_cur_purge_pool);
++ }
++ }
++
++ spin_unlock_bh(&sgv_pools_lock);
++ return;
++}
++
++/* Must be called under sgv_pool_lock held */
++static void sgv_dec_cached_entries(struct sgv_pool *pool, int pages)
++{
++ pool->cached_entries--;
++ pool->cached_pages -= pages;
++
++ if (pool->cached_entries == 0)
++ sgv_del_from_active(pool);
++
++ return;
++}
++
++/* Must be called under sgv_pool_lock held */
++static void __sgv_purge_from_cache(struct sgv_pool_obj *obj)
++{
++ int pages = obj->pages;
++ struct sgv_pool *pool = obj->owner_pool;
++
++ TRACE_MEM("Purging sgv obj %p from pool %p (new cached_entries %d)",
++ obj, pool, pool->cached_entries-1);
++
++ list_del(&obj->sorted_recycling_list_entry);
++ list_del(&obj->recycling_list_entry);
++
++ pool->inactive_cached_pages -= pages;
++ sgv_dec_cached_entries(pool, pages);
++
++ atomic_sub(pages, &sgv_pages_total);
++
++ return;
++}
++
++/* Must be called under sgv_pool_lock held */
++static bool sgv_purge_from_cache(struct sgv_pool_obj *obj, int min_interval,
++ unsigned long cur_time)
++{
++ EXTRACHECKS_BUG_ON(min_interval < 0);
++
++ TRACE_MEM("Checking if sgv obj %p should be purged (cur time %ld, "
++ "obj time %ld, time to purge %ld)", obj, cur_time,
++ obj->time_stamp, obj->time_stamp + min_interval);
++
++ if (time_after_eq(cur_time, (obj->time_stamp + min_interval))) {
++ __sgv_purge_from_cache(obj);
++ return true;
++ }
++ return false;
++}
++
++/* No locks */
++static int sgv_shrink_pool(struct sgv_pool *pool, int nr, int min_interval,
++ unsigned long cur_time)
++{
++ int freed = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_MEM("Trying to shrink pool %p (nr %d, min_interval %d)",
++ pool, nr, min_interval);
++
++ if (pool->purge_interval < 0) {
++ TRACE_MEM("Not shrinkable pool %p, skipping", pool);
++ goto out;
++ }
++
++ spin_lock_bh(&pool->sgv_pool_lock);
++
++ while (!list_empty(&pool->sorted_recycling_list) &&
++ (atomic_read(&sgv_pages_total) > sgv_lo_wmk)) {
++ struct sgv_pool_obj *obj = list_entry(
++ pool->sorted_recycling_list.next,
++ struct sgv_pool_obj, sorted_recycling_list_entry);
++
++ if (sgv_purge_from_cache(obj, min_interval, cur_time)) {
++ int pages = obj->pages;
++
++ freed += pages;
++ nr -= pages;
++
++ TRACE_MEM("%d pages purged from pool %p (nr left %d, "
++ "total freed %d)", pages, pool, nr, freed);
++
++ spin_unlock_bh(&pool->sgv_pool_lock);
++ sgv_dtor_and_free(obj);
++ spin_lock_bh(&pool->sgv_pool_lock);
++ } else
++ break;
++
++ if ((nr <= 0) || (freed >= MAX_PAGES_PER_POOL)) {
++ if (freed >= MAX_PAGES_PER_POOL)
++ TRACE_MEM("%d pages purged from pool %p, "
++ "leaving", freed, pool);
++ break;
++ }
++ }
++
++ spin_unlock_bh(&pool->sgv_pool_lock);
++
++out:
++ TRACE_EXIT_RES(nr);
++ return nr;
++}
++
++/* No locks */
++static int __sgv_shrink(int nr, int min_interval)
++{
++ struct sgv_pool *pool;
++ unsigned long cur_time = jiffies;
++ int prev_nr = nr;
++ bool circle = false;
++
++ TRACE_ENTRY();
++
++ TRACE_MEM("Trying to shrink %d pages from all sgv pools "
++ "(min_interval %d)", nr, min_interval);
++
++ while (nr > 0) {
++ struct list_head *next;
++
++ spin_lock_bh(&sgv_pools_lock);
++
++ pool = sgv_cur_purge_pool;
++ if (pool == NULL) {
++ if (list_empty(&sgv_active_pools_list)) {
++ TRACE_MEM("%s", "Active pools list is empty");
++ goto out_unlock;
++ }
++
++ pool = list_entry(sgv_active_pools_list.next,
++ typeof(*pool),
++ sgv_active_pools_list_entry);
++ }
++ sgv_pool_get(pool);
++
++ next = pool->sgv_active_pools_list_entry.next;
++ if (next == &sgv_active_pools_list) {
++ if (circle && (prev_nr == nr)) {
++ TRACE_MEM("Full circle done, but no progress, "
++ "leaving (nr %d)", nr);
++ goto out_unlock_put;
++ }
++ circle = true;
++ prev_nr = nr;
++
++ next = next->next;
++ }
++
++ sgv_cur_purge_pool = list_entry(next, typeof(*pool),
++ sgv_active_pools_list_entry);
++ TRACE_MEM("New cur purge pool %p", sgv_cur_purge_pool);
++
++ spin_unlock_bh(&sgv_pools_lock);
++
++ nr = sgv_shrink_pool(pool, nr, min_interval, cur_time);
++
++ sgv_pool_put(pool);
++ }
++
++out:
++ TRACE_EXIT_RES(nr);
++ return nr;
++
++out_unlock:
++ spin_unlock_bh(&sgv_pools_lock);
++ goto out;
++
++out_unlock_put:
++ spin_unlock_bh(&sgv_pools_lock);
++ sgv_pool_put(pool);
++ goto out;
++}
++
++static int sgv_shrink(struct shrinker *shrinker, int nr, gfp_t gfpm)
++{
++ TRACE_ENTRY();
++
++ if (nr > 0) {
++ nr = __sgv_shrink(nr, SGV_MIN_SHRINK_INTERVAL);
++ TRACE_MEM("Left %d", nr);
++ } else {
++ struct sgv_pool *pool;
++ int inactive_pages = 0;
++
++ spin_lock_bh(&sgv_pools_lock);
++ list_for_each_entry(pool, &sgv_active_pools_list,
++ sgv_active_pools_list_entry) {
++ if (pool->purge_interval > 0)
++ inactive_pages += pool->inactive_cached_pages;
++ }
++ spin_unlock_bh(&sgv_pools_lock);
++
++ nr = max((int)0, inactive_pages - sgv_lo_wmk);
++ TRACE_MEM("Can free %d (total %d)", nr,
++ atomic_read(&sgv_pages_total));
++ }
++
++ TRACE_EXIT_RES(nr);
++ return nr;
++}
++
++static void sgv_purge_work_fn(struct delayed_work *work)
++{
++ unsigned long cur_time = jiffies;
++ struct sgv_pool *pool = container_of(work, struct sgv_pool,
++ sgv_purge_work);
++
++ TRACE_ENTRY();
++
++ TRACE_MEM("Purge work for pool %p", pool);
++
++ spin_lock_bh(&pool->sgv_pool_lock);
++
++ pool->purge_work_scheduled = false;
++
++ while (!list_empty(&pool->sorted_recycling_list)) {
++ struct sgv_pool_obj *obj = list_entry(
++ pool->sorted_recycling_list.next,
++ struct sgv_pool_obj, sorted_recycling_list_entry);
++
++ if (sgv_purge_from_cache(obj, pool->purge_interval, cur_time)) {
++ spin_unlock_bh(&pool->sgv_pool_lock);
++ sgv_dtor_and_free(obj);
++ spin_lock_bh(&pool->sgv_pool_lock);
++ } else {
++ /*
++ * Let's reschedule it for full period to not get here
++ * too often. In the worst case we have shrinker
++ * to reclaim buffers quickier.
++ */
++ TRACE_MEM("Rescheduling purge work for pool %p (delay "
++ "%d HZ/%d sec)", pool, pool->purge_interval,
++ pool->purge_interval/HZ);
++ schedule_delayed_work(&pool->sgv_purge_work,
++ pool->purge_interval);
++ pool->purge_work_scheduled = true;
++ break;
++ }
++ }
++
++ spin_unlock_bh(&pool->sgv_pool_lock);
++
++ TRACE_MEM("Leaving purge work for pool %p", pool);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int sgv_check_full_clustering(struct scatterlist *sg, int cur, int hint)
++{
++ int res = -1;
++ int i = hint;
++ unsigned long pfn_cur = page_to_pfn(sg_page(&sg[cur]));
++ int len_cur = sg[cur].length;
++ unsigned long pfn_cur_next = pfn_cur + (len_cur >> PAGE_SHIFT);
++ int full_page_cur = (len_cur & (PAGE_SIZE - 1)) == 0;
++ unsigned long pfn, pfn_next;
++ bool full_page;
++
++#if 0
++ TRACE_MEM("pfn_cur %ld, pfn_cur_next %ld, len_cur %d, full_page_cur %d",
++ pfn_cur, pfn_cur_next, len_cur, full_page_cur);
++#endif
++
++ /* check the hint first */
++ if (i >= 0) {
++ pfn = page_to_pfn(sg_page(&sg[i]));
++ pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
++ full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
++
++ if ((pfn == pfn_cur_next) && full_page_cur)
++ goto out_head;
++
++ if ((pfn_next == pfn_cur) && full_page)
++ goto out_tail;
++ }
++
++ /* ToDo: implement more intelligent search */
++ for (i = cur - 1; i >= 0; i--) {
++ pfn = page_to_pfn(sg_page(&sg[i]));
++ pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
++ full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
++
++ if ((pfn == pfn_cur_next) && full_page_cur)
++ goto out_head;
++
++ if ((pfn_next == pfn_cur) && full_page)
++ goto out_tail;
++ }
++
++out:
++ return res;
++
++out_tail:
++ TRACE_MEM("SG segment %d will be tail merged with segment %d", cur, i);
++ sg[i].length += len_cur;
++ sg_clear(&sg[cur]);
++ res = i;
++ goto out;
++
++out_head:
++ TRACE_MEM("SG segment %d will be head merged with segment %d", cur, i);
++ sg_assign_page(&sg[i], sg_page(&sg[cur]));
++ sg[i].length += len_cur;
++ sg_clear(&sg[cur]);
++ res = i;
++ goto out;
++}
++
++static int sgv_check_tail_clustering(struct scatterlist *sg, int cur, int hint)
++{
++ int res = -1;
++ unsigned long pfn_cur = page_to_pfn(sg_page(&sg[cur]));
++ int len_cur = sg[cur].length;
++ int prev;
++ unsigned long pfn_prev;
++ bool full_page;
++
++#ifdef SCST_HIGHMEM
++ if (page >= highmem_start_page) {
++ TRACE_MEM("%s", "HIGHMEM page allocated, no clustering")
++ goto out;
++ }
++#endif
++
++#if 0
++ TRACE_MEM("pfn_cur %ld, pfn_cur_next %ld, len_cur %d, full_page_cur %d",
++ pfn_cur, pfn_cur_next, len_cur, full_page_cur);
++#endif
++
++ if (cur == 0)
++ goto out;
++
++ prev = cur - 1;
++ pfn_prev = page_to_pfn(sg_page(&sg[prev])) +
++ (sg[prev].length >> PAGE_SHIFT);
++ full_page = (sg[prev].length & (PAGE_SIZE - 1)) == 0;
++
++ if ((pfn_prev == pfn_cur) && full_page) {
++ TRACE_MEM("SG segment %d will be tail merged with segment %d",
++ cur, prev);
++ sg[prev].length += len_cur;
++ sg_clear(&sg[cur]);
++ res = prev;
++ }
++
++out:
++ return res;
++}
++
++static void sgv_free_sys_sg_entries(struct scatterlist *sg, int sg_count,
++ void *priv)
++{
++ int i;
++
++ TRACE_MEM("sg=%p, sg_count=%d", sg, sg_count);
++
++ for (i = 0; i < sg_count; i++) {
++ struct page *p = sg_page(&sg[i]);
++ int len = sg[i].length;
++ int pages =
++ (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
++
++ TRACE_MEM("page %lx, len %d, pages %d",
++ (unsigned long)p, len, pages);
++
++ while (pages > 0) {
++ int order = 0;
++
++/*
++ * __free_pages() doesn't like freeing pages with not that order with
++ * which they were allocated, so disable this small optimization.
++ */
++#if 0
++ if (len > 0) {
++ while (((1 << order) << PAGE_SHIFT) < len)
++ order++;
++ len = 0;
++ }
++#endif
++ TRACE_MEM("free_pages(): order %d, page %lx",
++ order, (unsigned long)p);
++
++ __free_pages(p, order);
++
++ pages -= 1 << order;
++ p += 1 << order;
++ }
++ }
++}
++
++static struct page *sgv_alloc_sys_pages(struct scatterlist *sg,
++ gfp_t gfp_mask, void *priv)
++{
++ struct page *page = alloc_pages(gfp_mask, 0);
++
++ sg_set_page(sg, page, PAGE_SIZE, 0);
++ TRACE_MEM("page=%p, sg=%p, priv=%p", page, sg, priv);
++ if (page == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of "
++ "sg page failed");
++ }
++ return page;
++}
++
++static int sgv_alloc_sg_entries(struct scatterlist *sg, int pages,
++ gfp_t gfp_mask, enum sgv_clustering_types clustering_type,
++ struct trans_tbl_ent *trans_tbl,
++ const struct sgv_pool_alloc_fns *alloc_fns, void *priv)
++{
++ int sg_count = 0;
++ int pg, i, j;
++ int merged = -1;
++
++ TRACE_MEM("pages=%d, clustering_type=%d", pages, clustering_type);
++
++#if 0
++ gfp_mask |= __GFP_COLD;
++#endif
++#ifdef CONFIG_SCST_STRICT_SECURITY
++ gfp_mask |= __GFP_ZERO;
++#endif
++
++ for (pg = 0; pg < pages; pg++) {
++ void *rc;
++#ifdef CONFIG_SCST_DEBUG_OOM
++ if (((gfp_mask & __GFP_NOFAIL) != __GFP_NOFAIL) &&
++ ((scst_random() % 10000) == 55))
++ rc = NULL;
++ else
++#endif
++ rc = alloc_fns->alloc_pages_fn(&sg[sg_count], gfp_mask,
++ priv);
++ if (rc == NULL)
++ goto out_no_mem;
++
++ /*
++ * This code allows compiler to see full body of the clustering
++ * functions and gives it a chance to generate better code.
++ * At least, the resulting code is smaller, comparing to
++ * calling them using a function pointer.
++ */
++ if (clustering_type == sgv_full_clustering)
++ merged = sgv_check_full_clustering(sg, sg_count, merged);
++ else if (clustering_type == sgv_tail_clustering)
++ merged = sgv_check_tail_clustering(sg, sg_count, merged);
++ else
++ merged = -1;
++
++ if (merged == -1)
++ sg_count++;
++
++ TRACE_MEM("pg=%d, merged=%d, sg_count=%d", pg, merged,
++ sg_count);
++ }
++
++ if ((clustering_type != sgv_no_clustering) && (trans_tbl != NULL)) {
++ pg = 0;
++ for (i = 0; i < pages; i++) {
++ int n = (sg[i].length >> PAGE_SHIFT) +
++ ((sg[i].length & ~PAGE_MASK) != 0);
++ trans_tbl[i].pg_count = pg;
++ for (j = 0; j < n; j++)
++ trans_tbl[pg++].sg_num = i+1;
++ TRACE_MEM("i=%d, n=%d, pg_count=%d", i, n,
++ trans_tbl[i].pg_count);
++ }
++ }
++
++out:
++ TRACE_MEM("sg_count=%d", sg_count);
++ return sg_count;
++
++out_no_mem:
++ alloc_fns->free_pages_fn(sg, sg_count, priv);
++ sg_count = 0;
++ goto out;
++}
++
++static int sgv_alloc_arrays(struct sgv_pool_obj *obj,
++ int pages_to_alloc, gfp_t gfp_mask)
++{
++ int sz, tsz = 0;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ sz = pages_to_alloc * sizeof(obj->sg_entries[0]);
++
++ obj->sg_entries = kmalloc(sz, gfp_mask);
++ if (unlikely(obj->sg_entries == NULL)) {
++ TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool_obj "
++ "SG vector failed (size %d)", sz);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ sg_init_table(obj->sg_entries, pages_to_alloc);
++
++ if (sgv_pool_clustered(obj->owner_pool)) {
++ if (pages_to_alloc <= sgv_max_trans_pages) {
++ obj->trans_tbl =
++ (struct trans_tbl_ent *)obj->sg_entries_data;
++ /*
++ * No need to clear trans_tbl, if needed, it will be
++ * fully rewritten in sgv_alloc_sg_entries()
++ */
++ } else {
++ tsz = pages_to_alloc * sizeof(obj->trans_tbl[0]);
++ obj->trans_tbl = kzalloc(tsz, gfp_mask);
++ if (unlikely(obj->trans_tbl == NULL)) {
++ TRACE(TRACE_OUT_OF_MEM, "Allocation of "
++ "trans_tbl failed (size %d)", tsz);
++ res = -ENOMEM;
++ goto out_free;
++ }
++ }
++ }
++
++ TRACE_MEM("pages_to_alloc %d, sz %d, tsz %d, obj %p, sg_entries %p, "
++ "trans_tbl %p", pages_to_alloc, sz, tsz, obj, obj->sg_entries,
++ obj->trans_tbl);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(obj->sg_entries);
++ obj->sg_entries = NULL;
++ goto out;
++}
++
++static struct sgv_pool_obj *sgv_get_obj(struct sgv_pool *pool, int cache_num,
++ int pages, gfp_t gfp_mask, bool get_new)
++{
++ struct sgv_pool_obj *obj;
++
++ spin_lock_bh(&pool->sgv_pool_lock);
++
++ if (unlikely(get_new)) {
++ /* Used only for buffers preallocation */
++ goto get_new;
++ }
++
++ if (likely(!list_empty(&pool->recycling_lists[cache_num]))) {
++ obj = list_entry(pool->recycling_lists[cache_num].next,
++ struct sgv_pool_obj, recycling_list_entry);
++
++ list_del(&obj->sorted_recycling_list_entry);
++ list_del(&obj->recycling_list_entry);
++
++ pool->inactive_cached_pages -= pages;
++
++ spin_unlock_bh(&pool->sgv_pool_lock);
++ goto out;
++ }
++
++get_new:
++ if (pool->cached_entries == 0) {
++ TRACE_MEM("Adding pool %p to the active list", pool);
++ spin_lock_bh(&sgv_pools_lock);
++ list_add_tail(&pool->sgv_active_pools_list_entry,
++ &sgv_active_pools_list);
++ spin_unlock_bh(&sgv_pools_lock);
++ }
++
++ pool->cached_entries++;
++ pool->cached_pages += pages;
++
++ spin_unlock_bh(&pool->sgv_pool_lock);
++
++ TRACE_MEM("New cached entries %d (pool %p)", pool->cached_entries,
++ pool);
++
++ obj = kmem_cache_alloc(pool->caches[cache_num],
++ gfp_mask & ~(__GFP_HIGHMEM|GFP_DMA));
++ if (likely(obj)) {
++ memset(obj, 0, sizeof(*obj));
++ obj->cache_num = cache_num;
++ obj->pages = pages;
++ obj->owner_pool = pool;
++ } else {
++ spin_lock_bh(&pool->sgv_pool_lock);
++ sgv_dec_cached_entries(pool, pages);
++ spin_unlock_bh(&pool->sgv_pool_lock);
++ }
++
++out:
++ return obj;
++}
++
++static void sgv_put_obj(struct sgv_pool_obj *obj)
++{
++ struct sgv_pool *pool = obj->owner_pool;
++ struct list_head *entry;
++ struct list_head *list = &pool->recycling_lists[obj->cache_num];
++ int pages = obj->pages;
++
++ spin_lock_bh(&pool->sgv_pool_lock);
++
++ TRACE_MEM("sgv %p, cache num %d, pages %d, sg_count %d", obj,
++ obj->cache_num, pages, obj->sg_count);
++
++ if (sgv_pool_clustered(pool)) {
++ /* Make objects with less entries more preferred */
++ __list_for_each(entry, list) {
++ struct sgv_pool_obj *tmp = list_entry(entry,
++ struct sgv_pool_obj, recycling_list_entry);
++
++ TRACE_MEM("tmp %p, cache num %d, pages %d, sg_count %d",
++ tmp, tmp->cache_num, tmp->pages, tmp->sg_count);
++
++ if (obj->sg_count <= tmp->sg_count)
++ break;
++ }
++ entry = entry->prev;
++ } else
++ entry = list;
++
++ TRACE_MEM("Adding in %p (list %p)", entry, list);
++ list_add(&obj->recycling_list_entry, entry);
++
++ list_add_tail(&obj->sorted_recycling_list_entry,
++ &pool->sorted_recycling_list);
++
++ obj->time_stamp = jiffies;
++
++ pool->inactive_cached_pages += pages;
++
++ if (!pool->purge_work_scheduled) {
++ TRACE_MEM("Scheduling purge work for pool %p", pool);
++ pool->purge_work_scheduled = true;
++ schedule_delayed_work(&pool->sgv_purge_work,
++ pool->purge_interval);
++ }
++
++ spin_unlock_bh(&pool->sgv_pool_lock);
++ return;
++}
++
++/* No locks */
++static int sgv_hiwmk_check(int pages_to_alloc)
++{
++ int res = 0;
++ int pages = pages_to_alloc;
++
++ pages += atomic_read(&sgv_pages_total);
++
++ if (unlikely(pages > sgv_hi_wmk)) {
++ pages -= sgv_hi_wmk;
++ atomic_inc(&sgv_releases_on_hiwmk);
++
++ pages = __sgv_shrink(pages, 0);
++ if (pages > 0) {
++ TRACE(TRACE_OUT_OF_MEM, "Requested amount of "
++ "memory (%d pages) for being executed "
++ "commands together with the already "
++ "allocated memory exceeds the allowed "
++ "maximum %d. Should you increase "
++ "scst_max_cmd_mem?", pages_to_alloc,
++ sgv_hi_wmk);
++ atomic_inc(&sgv_releases_on_hiwmk_failed);
++ res = -ENOMEM;
++ goto out_unlock;
++ }
++ }
++
++ atomic_add(pages_to_alloc, &sgv_pages_total);
++
++out_unlock:
++ TRACE_MEM("pages_to_alloc %d, new total %d", pages_to_alloc,
++ atomic_read(&sgv_pages_total));
++
++ return res;
++}
++
++/* No locks */
++static void sgv_hiwmk_uncheck(int pages)
++{
++ atomic_sub(pages, &sgv_pages_total);
++ TRACE_MEM("pages %d, new total %d", pages,
++ atomic_read(&sgv_pages_total));
++ return;
++}
++
++/* No locks */
++static bool sgv_check_allowed_mem(struct scst_mem_lim *mem_lim, int pages)
++{
++ int alloced;
++ bool res = true;
++
++ alloced = atomic_add_return(pages, &mem_lim->alloced_pages);
++ if (unlikely(alloced > mem_lim->max_allowed_pages)) {
++ TRACE(TRACE_OUT_OF_MEM, "Requested amount of memory "
++ "(%d pages) for being executed commands on a device "
++ "together with the already allocated memory exceeds "
++ "the allowed maximum %d. Should you increase "
++ "scst_max_dev_cmd_mem?", pages,
++ mem_lim->max_allowed_pages);
++ atomic_sub(pages, &mem_lim->alloced_pages);
++ res = false;
++ }
++
++ TRACE_MEM("mem_lim %p, pages %d, res %d, new alloced %d", mem_lim,
++ pages, res, atomic_read(&mem_lim->alloced_pages));
++
++ return res;
++}
++
++/* No locks */
++static void sgv_uncheck_allowed_mem(struct scst_mem_lim *mem_lim, int pages)
++{
++ atomic_sub(pages, &mem_lim->alloced_pages);
++
++ TRACE_MEM("mem_lim %p, pages %d, new alloced %d", mem_lim,
++ pages, atomic_read(&mem_lim->alloced_pages));
++ return;
++}
++
++/**
++ * sgv_pool_alloc - allocate an SG vector from the SGV pool
++ * @pool: the cache to alloc from
++ * @size: size of the resulting SG vector in bytes
++ * @gfp_mask: the allocation mask
++ * @flags: the allocation flags
++ * @count: the resulting count of SG entries in the resulting SG vector
++ * @sgv: the resulting SGV object
++ * @mem_lim: memory limits
++ * @priv: pointer to private for this allocation data
++ *
++ * Description:
++ * Allocate an SG vector from the SGV pool and returns pointer to it or
++ * NULL in case of any error. See the SGV pool documentation for more details.
++ */
++struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
++ gfp_t gfp_mask, int flags, int *count,
++ struct sgv_pool_obj **sgv, struct scst_mem_lim *mem_lim, void *priv)
++{
++ struct sgv_pool_obj *obj;
++ int cache_num, pages, cnt;
++ struct scatterlist *res = NULL;
++ int pages_to_alloc;
++ int no_cached = flags & SGV_POOL_ALLOC_NO_CACHED;
++ bool allowed_mem_checked = false, hiwmk_checked = false;
++
++ TRACE_ENTRY();
++
++ if (unlikely(size == 0))
++ goto out;
++
++ EXTRACHECKS_BUG_ON((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
++
++ pages = ((size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ if (pool->single_alloc_pages == 0) {
++ int pages_order = get_order(size);
++ cache_num = pages_order;
++ pages_to_alloc = (1 << pages_order);
++ } else {
++ cache_num = 0;
++ pages_to_alloc = max(pool->single_alloc_pages, pages);
++ }
++
++ TRACE_MEM("size=%d, pages=%d, pages_to_alloc=%d, cache num=%d, "
++ "flags=%x, no_cached=%d, *sgv=%p", size, pages,
++ pages_to_alloc, cache_num, flags, no_cached, *sgv);
++
++ if (*sgv != NULL) {
++ obj = *sgv;
++
++ TRACE_MEM("Supplied obj %p, cache num %d", obj, obj->cache_num);
++
++ EXTRACHECKS_BUG_ON(obj->sg_count != 0);
++
++ if (unlikely(!sgv_check_allowed_mem(mem_lim, pages_to_alloc)))
++ goto out_fail_free_sg_entries;
++ allowed_mem_checked = true;
++
++ if (unlikely(sgv_hiwmk_check(pages_to_alloc) != 0))
++ goto out_fail_free_sg_entries;
++ hiwmk_checked = true;
++ } else if ((pages_to_alloc <= pool->max_cached_pages) && !no_cached) {
++ if (unlikely(!sgv_check_allowed_mem(mem_lim, pages_to_alloc)))
++ goto out_fail;
++ allowed_mem_checked = true;
++
++ obj = sgv_get_obj(pool, cache_num, pages_to_alloc, gfp_mask,
++ flags & SGV_POOL_ALLOC_GET_NEW);
++ if (unlikely(obj == NULL)) {
++ TRACE(TRACE_OUT_OF_MEM, "Allocation of "
++ "sgv_pool_obj failed (size %d)", size);
++ goto out_fail;
++ }
++
++ if (obj->sg_count != 0) {
++ TRACE_MEM("Cached obj %p", obj);
++ atomic_inc(&pool->cache_acc[cache_num].hit_alloc);
++ goto success;
++ }
++
++ if (flags & SGV_POOL_NO_ALLOC_ON_CACHE_MISS) {
++ if (!(flags & SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
++ goto out_fail_free;
++ }
++
++ TRACE_MEM("Brand new obj %p", obj);
++
++ if (pages_to_alloc <= sgv_max_local_pages) {
++ obj->sg_entries = obj->sg_entries_data;
++ sg_init_table(obj->sg_entries, pages_to_alloc);
++ TRACE_MEM("sg_entries %p", obj->sg_entries);
++ if (sgv_pool_clustered(pool)) {
++ obj->trans_tbl = (struct trans_tbl_ent *)
++ (obj->sg_entries + pages_to_alloc);
++ TRACE_MEM("trans_tbl %p", obj->trans_tbl);
++ /*
++ * No need to clear trans_tbl, if needed, it
++ * will be fully rewritten in
++ * sgv_alloc_sg_entries().
++ */
++ }
++ } else {
++ if (unlikely(sgv_alloc_arrays(obj, pages_to_alloc,
++ gfp_mask) != 0))
++ goto out_fail_free;
++ }
++
++ if ((flags & SGV_POOL_NO_ALLOC_ON_CACHE_MISS) &&
++ (flags & SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
++ goto out_return;
++
++ obj->allocator_priv = priv;
++
++ if (unlikely(sgv_hiwmk_check(pages_to_alloc) != 0))
++ goto out_fail_free_sg_entries;
++ hiwmk_checked = true;
++ } else {
++ int sz;
++
++ pages_to_alloc = pages;
++
++ if (unlikely(!sgv_check_allowed_mem(mem_lim, pages_to_alloc)))
++ goto out_fail;
++ allowed_mem_checked = true;
++
++ if (flags & SGV_POOL_NO_ALLOC_ON_CACHE_MISS)
++ goto out_return2;
++
++ sz = sizeof(*obj) + pages * sizeof(obj->sg_entries[0]);
++
++ obj = kmalloc(sz, gfp_mask);
++ if (unlikely(obj == NULL)) {
++ TRACE(TRACE_OUT_OF_MEM, "Allocation of "
++ "sgv_pool_obj failed (size %d)", size);
++ goto out_fail;
++ }
++ memset(obj, 0, sizeof(*obj));
++
++ obj->owner_pool = pool;
++ cache_num = -1;
++ obj->cache_num = cache_num;
++ obj->pages = pages_to_alloc;
++ obj->allocator_priv = priv;
++
++ obj->sg_entries = obj->sg_entries_data;
++ sg_init_table(obj->sg_entries, pages);
++
++ if (unlikely(sgv_hiwmk_check(pages_to_alloc) != 0))
++ goto out_fail_free_sg_entries;
++ hiwmk_checked = true;
++
++ TRACE_MEM("Big or no_cached obj %p (size %d)", obj, sz);
++ }
++
++ obj->sg_count = sgv_alloc_sg_entries(obj->sg_entries,
++ pages_to_alloc, gfp_mask, pool->clustering_type,
++ obj->trans_tbl, &pool->alloc_fns, priv);
++ if (unlikely(obj->sg_count <= 0)) {
++ obj->sg_count = 0;
++ if ((flags & SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL) &&
++ (cache_num >= 0))
++ goto out_return1;
++ else
++ goto out_fail_free_sg_entries;
++ }
++
++ if (cache_num >= 0) {
++ atomic_add(pages_to_alloc - obj->sg_count,
++ &pool->cache_acc[cache_num].merged);
++ } else {
++ if (no_cached) {
++ atomic_add(pages_to_alloc,
++ &pool->other_pages);
++ atomic_add(pages_to_alloc - obj->sg_count,
++ &pool->other_merged);
++ } else {
++ atomic_add(pages_to_alloc,
++ &pool->big_pages);
++ atomic_add(pages_to_alloc - obj->sg_count,
++ &pool->big_merged);
++ }
++ }
++
++success:
++ if (cache_num >= 0) {
++ int sg;
++ atomic_inc(&pool->cache_acc[cache_num].total_alloc);
++ if (sgv_pool_clustered(pool))
++ cnt = obj->trans_tbl[pages-1].sg_num;
++ else
++ cnt = pages;
++ sg = cnt-1;
++ obj->orig_sg = sg;
++ obj->orig_length = obj->sg_entries[sg].length;
++ if (sgv_pool_clustered(pool)) {
++ obj->sg_entries[sg].length =
++ (pages - obj->trans_tbl[sg].pg_count) << PAGE_SHIFT;
++ }
++ } else {
++ cnt = obj->sg_count;
++ if (no_cached)
++ atomic_inc(&pool->other_alloc);
++ else
++ atomic_inc(&pool->big_alloc);
++ }
++
++ *count = cnt;
++ res = obj->sg_entries;
++ *sgv = obj;
++
++ if (size & ~PAGE_MASK)
++ obj->sg_entries[cnt-1].length -=
++ PAGE_SIZE - (size & ~PAGE_MASK);
++
++ TRACE_MEM("obj=%p, sg_entries %p (size=%d, pages=%d, sg_count=%d, "
++ "count=%d, last_len=%d)", obj, obj->sg_entries, size, pages,
++ obj->sg_count, *count, obj->sg_entries[obj->orig_sg].length);
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++
++out_return:
++ obj->allocator_priv = priv;
++ obj->owner_pool = pool;
++
++out_return1:
++ *sgv = obj;
++ TRACE_MEM("Returning failed obj %p (count %d)", obj, *count);
++
++out_return2:
++ *count = pages_to_alloc;
++ res = NULL;
++ goto out_uncheck;
++
++out_fail_free_sg_entries:
++ if (obj->sg_entries != obj->sg_entries_data) {
++ if (obj->trans_tbl !=
++ (struct trans_tbl_ent *)obj->sg_entries_data) {
++ /* kfree() handles NULL parameter */
++ kfree(obj->trans_tbl);
++ obj->trans_tbl = NULL;
++ }
++ kfree(obj->sg_entries);
++ obj->sg_entries = NULL;
++ }
++
++out_fail_free:
++ if (cache_num >= 0) {
++ spin_lock_bh(&pool->sgv_pool_lock);
++ sgv_dec_cached_entries(pool, pages_to_alloc);
++ spin_unlock_bh(&pool->sgv_pool_lock);
++
++ kmem_cache_free(pool->caches[obj->cache_num], obj);
++ } else
++ kfree(obj);
++
++out_fail:
++ res = NULL;
++ *count = 0;
++ *sgv = NULL;
++ TRACE_MEM("%s", "Allocation failed");
++
++out_uncheck:
++ if (hiwmk_checked)
++ sgv_hiwmk_uncheck(pages_to_alloc);
++ if (allowed_mem_checked)
++ sgv_uncheck_allowed_mem(mem_lim, pages_to_alloc);
++ goto out;
++}
++EXPORT_SYMBOL_GPL(sgv_pool_alloc);
++
++/**
++ * sgv_get_priv - return the private allocation data
++ *
++ * Allows to get the allocation private data for this SGV
++ * cache object. The private data supposed to be set by sgv_pool_alloc().
++ */
++void *sgv_get_priv(struct sgv_pool_obj *obj)
++{
++ return obj->allocator_priv;
++}
++EXPORT_SYMBOL_GPL(sgv_get_priv);
++
++/**
++ * sgv_pool_free - free previously allocated SG vector
++ * @sgv: the SGV object to free
++ * @mem_lim: memory limits
++ *
++ * Description:
++ * Frees previously allocated SG vector and updates memory limits
++ */
++void sgv_pool_free(struct sgv_pool_obj *obj, struct scst_mem_lim *mem_lim)
++{
++ int pages = (obj->sg_count != 0) ? obj->pages : 0;
++
++ TRACE_MEM("Freeing obj %p, cache num %d, pages %d, sg_entries %p, "
++ "sg_count %d, allocator_priv %p", obj, obj->cache_num, pages,
++ obj->sg_entries, obj->sg_count, obj->allocator_priv);
++
++/*
++ * Enable it if you are investigating a data corruption and want to make
++ * sure that target or dev handler didn't leave the pages mapped somewhere and,
++ * hence, provoked a data corruption.
++ *
++ * Make sure the check value for _count is set correctly. In most cases, 1 is
++ * correct, but, e.g., iSCSI-SCST can call it with value 2, because
++ * it frees the corresponding cmd before the last put_page() call from
++ * net_put_page() for the last page in the SG. Also, user space dev handlers
++ * usually have their memory mapped in their address space.
++ */
++#if 0
++ {
++ struct scatterlist *sg = obj->sg_entries;
++ int i;
++ for (i = 0; i < obj->sg_count; i++) {
++ struct page *p = sg_page(&sg[i]);
++ int len = sg[i].length;
++ int pages = (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
++ while (pages > 0) {
++ if (atomic_read(&p->_count) != 1) {
++ PRINT_WARNING("Freeing page %p with "
++ "additional owners (_count %d). "
++ "Data corruption possible!",
++ p, atomic_read(&p->_count));
++ WARN_ON(1);
++ }
++ pages--;
++ p++;
++ }
++ }
++ }
++#endif
++
++ if (obj->cache_num >= 0) {
++ obj->sg_entries[obj->orig_sg].length = obj->orig_length;
++ sgv_put_obj(obj);
++ } else {
++ obj->owner_pool->alloc_fns.free_pages_fn(obj->sg_entries,
++ obj->sg_count, obj->allocator_priv);
++ kfree(obj);
++ sgv_hiwmk_uncheck(pages);
++ }
++
++ sgv_uncheck_allowed_mem(mem_lim, pages);
++ return;
++}
++EXPORT_SYMBOL_GPL(sgv_pool_free);
++
++/**
++ * scst_alloc() - allocates an SG vector
++ *
++ * Allocates and returns pointer to SG vector with data size "size".
++ * In *count returned the count of entries in the vector.
++ * Returns NULL for failure.
++ */
++struct scatterlist *scst_alloc(int size, gfp_t gfp_mask, int *count)
++{
++ struct scatterlist *res;
++ int pages = (size >> PAGE_SHIFT) + ((size & ~PAGE_MASK) != 0);
++ struct sgv_pool_alloc_fns sys_alloc_fns = {
++ sgv_alloc_sys_pages, sgv_free_sys_sg_entries };
++ int no_fail = ((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
++ int cnt;
++
++ TRACE_ENTRY();
++
++ atomic_inc(&sgv_other_total_alloc);
++
++ if (unlikely(sgv_hiwmk_check(pages) != 0)) {
++ if (!no_fail) {
++ res = NULL;
++ goto out;
++ } else {
++ /*
++ * Update active_pages_total since alloc can't fail.
++ * If it wasn't updated then the counter would cross 0
++ * on free again.
++ */
++ sgv_hiwmk_uncheck(-pages);
++ }
++ }
++
++ res = kmalloc(pages*sizeof(*res), gfp_mask);
++ if (res == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate sg for %d pages",
++ pages);
++ goto out_uncheck;
++ }
++
++ sg_init_table(res, pages);
++
++ /*
++ * If we allow use clustering here, we will have troubles in
++ * scst_free() to figure out how many pages are in the SG vector.
++ * So, let's always don't use clustering.
++ */
++ cnt = sgv_alloc_sg_entries(res, pages, gfp_mask, sgv_no_clustering,
++ NULL, &sys_alloc_fns, NULL);
++ if (cnt <= 0)
++ goto out_free;
++
++ if (size & ~PAGE_MASK)
++ res[cnt-1].length -= PAGE_SIZE - (size & ~PAGE_MASK);
++
++ *count = cnt;
++
++out:
++ TRACE_MEM("Alloced sg %p (count %d, no_fail %d)", res, *count, no_fail);
++
++ TRACE_EXIT_HRES(res);
++ return res;
++
++out_free:
++ kfree(res);
++ res = NULL;
++
++out_uncheck:
++ if (!no_fail)
++ sgv_hiwmk_uncheck(pages);
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_alloc);
++
++/**
++ * scst_free() - frees SG vector
++ *
++ * Frees SG vector returned by scst_alloc().
++ */
++void scst_free(struct scatterlist *sg, int count)
++{
++ TRACE_MEM("Freeing sg=%p", sg);
++
++ sgv_hiwmk_uncheck(count);
++
++ sgv_free_sys_sg_entries(sg, count, NULL);
++ kfree(sg);
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_free);
++
++/* Must be called under sgv_pools_mutex */
++static void sgv_pool_init_cache(struct sgv_pool *pool, int cache_num)
++{
++ int size;
++ int pages;
++ struct sgv_pool_obj *obj;
++
++ atomic_set(&pool->cache_acc[cache_num].total_alloc, 0);
++ atomic_set(&pool->cache_acc[cache_num].hit_alloc, 0);
++ atomic_set(&pool->cache_acc[cache_num].merged, 0);
++
++ if (pool->single_alloc_pages == 0)
++ pages = 1 << cache_num;
++ else
++ pages = pool->single_alloc_pages;
++
++ if (pages <= sgv_max_local_pages) {
++ size = sizeof(*obj) + pages *
++ (sizeof(obj->sg_entries[0]) +
++ ((pool->clustering_type != sgv_no_clustering) ?
++ sizeof(obj->trans_tbl[0]) : 0));
++ } else if (pages <= sgv_max_trans_pages) {
++ /*
++ * sg_entries is allocated outside object,
++ * but trans_tbl is still embedded.
++ */
++ size = sizeof(*obj) + pages *
++ (((pool->clustering_type != sgv_no_clustering) ?
++ sizeof(obj->trans_tbl[0]) : 0));
++ } else {
++ size = sizeof(*obj);
++ /* both sgv and trans_tbl are kmalloc'ed() */
++ }
++
++ TRACE_MEM("pages=%d, size=%d", pages, size);
++
++ scnprintf(pool->cache_names[cache_num],
++ sizeof(pool->cache_names[cache_num]),
++ "%s-%uK", pool->name, (pages << PAGE_SHIFT) >> 10);
++ pool->caches[cache_num] = kmem_cache_create(
++ pool->cache_names[cache_num], size, 0, SCST_SLAB_FLAGS, NULL
++ );
++ return;
++}
++
++/* Must be called under sgv_pools_mutex */
++static int sgv_pool_init(struct sgv_pool *pool, const char *name,
++ enum sgv_clustering_types clustering_type, int single_alloc_pages,
++ int purge_interval)
++{
++ int res = -ENOMEM;
++ int i;
++
++ TRACE_ENTRY();
++
++ if (single_alloc_pages < 0) {
++ PRINT_ERROR("Wrong single_alloc_pages value %d",
++ single_alloc_pages);
++ res = -EINVAL;
++ goto out;
++ }
++
++ memset(pool, 0, sizeof(*pool));
++
++ atomic_set(&pool->big_alloc, 0);
++ atomic_set(&pool->big_pages, 0);
++ atomic_set(&pool->big_merged, 0);
++ atomic_set(&pool->other_alloc, 0);
++ atomic_set(&pool->other_pages, 0);
++ atomic_set(&pool->other_merged, 0);
++
++ pool->clustering_type = clustering_type;
++ pool->single_alloc_pages = single_alloc_pages;
++ if (purge_interval != 0) {
++ pool->purge_interval = purge_interval;
++ if (purge_interval < 0) {
++ /* Let's pretend that it's always scheduled */
++ pool->purge_work_scheduled = 1;
++ }
++ } else
++ pool->purge_interval = SGV_DEFAULT_PURGE_INTERVAL;
++ if (single_alloc_pages == 0) {
++ pool->max_caches = SGV_POOL_ELEMENTS;
++ pool->max_cached_pages = 1 << (SGV_POOL_ELEMENTS - 1);
++ } else {
++ pool->max_caches = 1;
++ pool->max_cached_pages = single_alloc_pages;
++ }
++ pool->alloc_fns.alloc_pages_fn = sgv_alloc_sys_pages;
++ pool->alloc_fns.free_pages_fn = sgv_free_sys_sg_entries;
++
++ TRACE_MEM("name %s, sizeof(*obj)=%zd, clustering_type=%d, "
++ "single_alloc_pages=%d, max_caches=%d, max_cached_pages=%d",
++ name, sizeof(struct sgv_pool_obj), clustering_type,
++ single_alloc_pages, pool->max_caches, pool->max_cached_pages);
++
++ strlcpy(pool->name, name, sizeof(pool->name)-1);
++
++ pool->owner_mm = current->mm;
++
++ for (i = 0; i < pool->max_caches; i++) {
++ sgv_pool_init_cache(pool, i);
++ if (pool->caches[i] == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool "
++ "cache %s(%d) failed", name, i);
++ goto out_free;
++ }
++ }
++
++ atomic_set(&pool->sgv_pool_ref, 1);
++ spin_lock_init(&pool->sgv_pool_lock);
++ INIT_LIST_HEAD(&pool->sorted_recycling_list);
++ for (i = 0; i < pool->max_caches; i++)
++ INIT_LIST_HEAD(&pool->recycling_lists[i]);
++
++ INIT_DELAYED_WORK(&pool->sgv_purge_work,
++ (void (*)(struct work_struct *))sgv_purge_work_fn);
++
++ spin_lock_bh(&sgv_pools_lock);
++ list_add_tail(&pool->sgv_pools_list_entry, &sgv_pools_list);
++ spin_unlock_bh(&sgv_pools_lock);
++
++ res = scst_sgv_sysfs_create(pool);
++ if (res != 0)
++ goto out_del;
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ spin_lock_bh(&sgv_pools_lock);
++ list_del(&pool->sgv_pools_list_entry);
++ spin_unlock_bh(&sgv_pools_lock);
++
++out_free:
++ for (i = 0; i < pool->max_caches; i++) {
++ if (pool->caches[i]) {
++ kmem_cache_destroy(pool->caches[i]);
++ pool->caches[i] = NULL;
++ } else
++ break;
++ }
++ goto out;
++}
++
++static void sgv_evaluate_local_max_pages(void)
++{
++ int space4sgv_ttbl = PAGE_SIZE - sizeof(struct sgv_pool_obj);
++
++ sgv_max_local_pages = space4sgv_ttbl /
++ (sizeof(struct trans_tbl_ent) + sizeof(struct scatterlist));
++
++ sgv_max_trans_pages = space4sgv_ttbl / sizeof(struct trans_tbl_ent);
++
++ TRACE_MEM("sgv_max_local_pages %d, sgv_max_trans_pages %d",
++ sgv_max_local_pages, sgv_max_trans_pages);
++ return;
++}
++
++/**
++ * sgv_pool_flush - flushe the SGV pool
++ *
++ * Flushes, i.e. frees, all the cached entries in the SGV pool.
++ */
++void sgv_pool_flush(struct sgv_pool *pool)
++{
++ int i;
++
++ TRACE_ENTRY();
++
++ for (i = 0; i < pool->max_caches; i++) {
++ struct sgv_pool_obj *obj;
++
++ spin_lock_bh(&pool->sgv_pool_lock);
++
++ while (!list_empty(&pool->recycling_lists[i])) {
++ obj = list_entry(pool->recycling_lists[i].next,
++ struct sgv_pool_obj, recycling_list_entry);
++
++ __sgv_purge_from_cache(obj);
++
++ spin_unlock_bh(&pool->sgv_pool_lock);
++
++ EXTRACHECKS_BUG_ON(obj->owner_pool != pool);
++ sgv_dtor_and_free(obj);
++
++ spin_lock_bh(&pool->sgv_pool_lock);
++ }
++ spin_unlock_bh(&pool->sgv_pool_lock);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(sgv_pool_flush);
++
++static void sgv_pool_destroy(struct sgv_pool *pool)
++{
++ int i;
++
++ TRACE_ENTRY();
++
++ cancel_delayed_work_sync(&pool->sgv_purge_work);
++
++ sgv_pool_flush(pool);
++
++ mutex_lock(&sgv_pools_mutex);
++ spin_lock_bh(&sgv_pools_lock);
++ list_del(&pool->sgv_pools_list_entry);
++ spin_unlock_bh(&sgv_pools_lock);
++ mutex_unlock(&sgv_pools_mutex);
++
++ scst_sgv_sysfs_del(pool);
++
++ for (i = 0; i < pool->max_caches; i++) {
++ if (pool->caches[i])
++ kmem_cache_destroy(pool->caches[i]);
++ pool->caches[i] = NULL;
++ }
++
++ kfree(pool);
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * sgv_pool_set_allocator - set custom pages allocator
++ * @pool: the cache
++ * @alloc_pages_fn: pages allocation function
++ * @free_pages_fn: pages freeing function
++ *
++ * Description:
++ * Allows to set custom pages allocator for the SGV pool.
++ * See the SGV pool documentation for more details.
++ */
++void sgv_pool_set_allocator(struct sgv_pool *pool,
++ struct page *(*alloc_pages_fn)(struct scatterlist *, gfp_t, void *),
++ void (*free_pages_fn)(struct scatterlist *, int, void *))
++{
++ pool->alloc_fns.alloc_pages_fn = alloc_pages_fn;
++ pool->alloc_fns.free_pages_fn = free_pages_fn;
++ return;
++}
++EXPORT_SYMBOL_GPL(sgv_pool_set_allocator);
++
++/**
++ * sgv_pool_create - creates and initializes an SGV pool
++ * @name: the name of the SGV pool
++ * @clustered: sets type of the pages clustering.
++ * @single_alloc_pages: if 0, then the SGV pool will work in the set of
++ * power 2 size buffers mode. If >0, then the SGV pool will
++ * work in the fixed size buffers mode. In this case
++ * single_alloc_pages sets the size of each buffer in pages.
++ * @shared: sets if the SGV pool can be shared between devices or not.
++ * The cache sharing allowed only between devices created inside
++ * the same address space. If an SGV pool is shared, each
++ * subsequent call of sgv_pool_create() with the same cache name
++ * will not create a new cache, but instead return a reference
++ * to it.
++ * @purge_interval: sets the cache purging interval. I.e., an SG buffer
++ * will be freed if it's unused for time t
++ * purge_interval <= t < 2*purge_interval. If purge_interval
++ * is 0, then the default interval will be used (60 seconds).
++ * If purge_interval <0, then the automatic purging will be
++ * disabled.
++ *
++ * Description:
++ * Returns the resulting SGV pool or NULL in case of any error.
++ */
++struct sgv_pool *sgv_pool_create(const char *name,
++ enum sgv_clustering_types clustering_type,
++ int single_alloc_pages, bool shared, int purge_interval)
++{
++ struct sgv_pool *pool;
++ int rc;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&sgv_pools_mutex);
++
++ list_for_each_entry(pool, &sgv_pools_list, sgv_pools_list_entry) {
++ if (strcmp(pool->name, name) == 0) {
++ if (shared) {
++ if (pool->owner_mm != current->mm) {
++ PRINT_ERROR("Attempt of a shared use "
++ "of SGV pool %s with "
++ "different MM", name);
++ goto out_unlock;
++ }
++ sgv_pool_get(pool);
++ goto out_unlock;
++ } else {
++ PRINT_ERROR("SGV pool %s already exists", name);
++ pool = NULL;
++ goto out_unlock;
++ }
++ }
++ }
++
++ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
++ if (pool == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of sgv_pool failed");
++ goto out_unlock;
++ }
++
++ rc = sgv_pool_init(pool, name, clustering_type, single_alloc_pages,
++ purge_interval);
++ if (rc != 0)
++ goto out_free;
++
++out_unlock:
++ mutex_unlock(&sgv_pools_mutex);
++
++ TRACE_EXIT_RES(pool != NULL);
++ return pool;
++
++out_free:
++ kfree(pool);
++ goto out_unlock;
++}
++EXPORT_SYMBOL_GPL(sgv_pool_create);
++
++/**
++ * sgv_pool_get - increase ref counter for the corresponding SGV pool
++ *
++ * Increases ref counter for the corresponding SGV pool
++ */
++void sgv_pool_get(struct sgv_pool *pool)
++{
++ atomic_inc(&pool->sgv_pool_ref);
++ TRACE_MEM("Incrementing sgv pool %p ref (new value %d)",
++ pool, atomic_read(&pool->sgv_pool_ref));
++ return;
++}
++EXPORT_SYMBOL_GPL(sgv_pool_get);
++
++/**
++ * sgv_pool_put - decrease ref counter for the corresponding SGV pool
++ *
++ * Decreases ref counter for the corresponding SGV pool. If the ref
++ * counter reaches 0, the cache will be destroyed.
++ */
++void sgv_pool_put(struct sgv_pool *pool)
++{
++ TRACE_MEM("Decrementing sgv pool %p ref (new value %d)",
++ pool, atomic_read(&pool->sgv_pool_ref)-1);
++ if (atomic_dec_and_test(&pool->sgv_pool_ref))
++ sgv_pool_destroy(pool);
++ return;
++}
++EXPORT_SYMBOL_GPL(sgv_pool_put);
++
++/**
++ * sgv_pool_del - deletes the corresponding SGV pool
++ * @pool: the cache to delete.
++ *
++ * Description:
++ * If the cache is shared, it will decrease its reference counter.
++ * If the reference counter reaches 0, the cache will be destroyed.
++ */
++void sgv_pool_del(struct sgv_pool *pool)
++{
++ TRACE_ENTRY();
++
++ sgv_pool_put(pool);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(sgv_pool_del);
++
++/* Both parameters in pages */
++int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ sgv_hi_wmk = mem_hwmark;
++ sgv_lo_wmk = mem_lwmark;
++
++ sgv_evaluate_local_max_pages();
++
++ sgv_norm_pool = sgv_pool_create("sgv", sgv_no_clustering, 0, false, 0);
++ if (sgv_norm_pool == NULL)
++ goto out_err;
++
++ sgv_norm_clust_pool = sgv_pool_create("sgv-clust",
++ sgv_full_clustering, 0, false, 0);
++ if (sgv_norm_clust_pool == NULL)
++ goto out_free_norm;
++
++ sgv_dma_pool = sgv_pool_create("sgv-dma", sgv_no_clustering, 0,
++ false, 0);
++ if (sgv_dma_pool == NULL)
++ goto out_free_clust;
++
++ sgv_shrinker.shrink = sgv_shrink;
++ sgv_shrinker.seeks = DEFAULT_SEEKS;
++ register_shrinker(&sgv_shrinker);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free_clust:
++ sgv_pool_destroy(sgv_norm_clust_pool);
++
++out_free_norm:
++ sgv_pool_destroy(sgv_norm_pool);
++
++out_err:
++ res = -ENOMEM;
++ goto out;
++}
++
++void scst_sgv_pools_deinit(void)
++{
++ TRACE_ENTRY();
++
++ unregister_shrinker(&sgv_shrinker);
++
++ sgv_pool_destroy(sgv_dma_pool);
++ sgv_pool_destroy(sgv_norm_pool);
++ sgv_pool_destroy(sgv_norm_clust_pool);
++
++ flush_scheduled_work();
++
++ TRACE_EXIT();
++ return;
++}
++
++ssize_t sgv_sysfs_stat_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct sgv_pool *pool;
++ int i, total = 0, hit = 0, merged = 0, allocated = 0;
++ int oa, om, res;
++
++ pool = container_of(kobj, struct sgv_pool, sgv_kobj);
++
++ for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
++ int t;
++
++ hit += atomic_read(&pool->cache_acc[i].hit_alloc);
++ total += atomic_read(&pool->cache_acc[i].total_alloc);
++
++ t = atomic_read(&pool->cache_acc[i].total_alloc) -
++ atomic_read(&pool->cache_acc[i].hit_alloc);
++ allocated += t * (1 << i);
++ merged += atomic_read(&pool->cache_acc[i].merged);
++ }
++
++ res = sprintf(buf, "%-30s %-11s %-11s %-11s %-11s", "Name", "Hit", "Total",
++ "% merged", "Cached (P/I/O)");
++
++ res += sprintf(&buf[res], "\n%-30s %-11d %-11d %-11d %d/%d/%d\n",
++ pool->name, hit, total,
++ (allocated != 0) ? merged*100/allocated : 0,
++ pool->cached_pages, pool->inactive_cached_pages,
++ pool->cached_entries);
++
++ for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
++ int t = atomic_read(&pool->cache_acc[i].total_alloc) -
++ atomic_read(&pool->cache_acc[i].hit_alloc);
++ allocated = t * (1 << i);
++ merged = atomic_read(&pool->cache_acc[i].merged);
++
++ res += sprintf(&buf[res], " %-28s %-11d %-11d %d\n",
++ pool->cache_names[i],
++ atomic_read(&pool->cache_acc[i].hit_alloc),
++ atomic_read(&pool->cache_acc[i].total_alloc),
++ (allocated != 0) ? merged*100/allocated : 0);
++ }
++
++ allocated = atomic_read(&pool->big_pages);
++ merged = atomic_read(&pool->big_merged);
++ oa = atomic_read(&pool->other_pages);
++ om = atomic_read(&pool->other_merged);
++
++ res += sprintf(&buf[res], " %-40s %d/%-9d %d/%d\n", "big/other",
++ atomic_read(&pool->big_alloc), atomic_read(&pool->other_alloc),
++ (allocated != 0) ? merged*100/allocated : 0,
++ (oa != 0) ? om/oa : 0);
++
++ return res;
++}
++
++ssize_t sgv_sysfs_stat_reset(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ struct sgv_pool *pool;
++ int i;
++
++ TRACE_ENTRY();
++
++ pool = container_of(kobj, struct sgv_pool, sgv_kobj);
++
++ for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
++ atomic_set(&pool->cache_acc[i].hit_alloc, 0);
++ atomic_set(&pool->cache_acc[i].total_alloc, 0);
++ atomic_set(&pool->cache_acc[i].merged, 0);
++ }
++
++ atomic_set(&pool->big_pages, 0);
++ atomic_set(&pool->big_merged, 0);
++ atomic_set(&pool->big_alloc, 0);
++ atomic_set(&pool->other_pages, 0);
++ atomic_set(&pool->other_merged, 0);
++ atomic_set(&pool->other_alloc, 0);
++
++ PRINT_INFO("Statistics for SGV pool %s resetted", pool->name);
++
++ TRACE_EXIT_RES(count);
++ return count;
++}
++
++ssize_t sgv_sysfs_global_stat_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct sgv_pool *pool;
++ int inactive_pages = 0, res;
++
++ TRACE_ENTRY();
++
++ spin_lock_bh(&sgv_pools_lock);
++ list_for_each_entry(pool, &sgv_active_pools_list,
++ sgv_active_pools_list_entry) {
++ inactive_pages += pool->inactive_cached_pages;
++ }
++ spin_unlock_bh(&sgv_pools_lock);
++
++ res = sprintf(buf, "%-42s %d/%d\n%-42s %d/%d\n%-42s %d/%d\n"
++ "%-42s %-11d\n",
++ "Inactive/active pages", inactive_pages,
++ atomic_read(&sgv_pages_total) - inactive_pages,
++ "Hi/lo watermarks [pages]", sgv_hi_wmk, sgv_lo_wmk,
++ "Hi watermark releases/failures",
++ atomic_read(&sgv_releases_on_hiwmk),
++ atomic_read(&sgv_releases_on_hiwmk_failed),
++ "Other allocs", atomic_read(&sgv_other_total_alloc));
++
++ TRACE_EXIT();
++ return res;
++}
++
++ssize_t sgv_sysfs_global_stat_reset(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ TRACE_ENTRY();
++
++ atomic_set(&sgv_releases_on_hiwmk, 0);
++ atomic_set(&sgv_releases_on_hiwmk_failed, 0);
++ atomic_set(&sgv_other_total_alloc, 0);
++
++ PRINT_INFO("%s", "Global SGV pool statistics resetted");
++
++ TRACE_EXIT_RES(count);
++ return count;
++}
++
+diff -uprN orig/linux-2.6.36/Documentation/scst/sgv_cache.txt linux-2.6.36/Documentation/scst/sgv_cache.txt
+--- orig/linux-2.6.36/Documentation/scst/sgv_cache.txt
++++ linux-2.6.36/Documentation/scst/sgv_cache.txt
+@@ -0,0 +1,224 @@
++ SCST SGV CACHE.
++
++ PROGRAMMING INTERFACE DESCRIPTION.
++
++ For SCST version 1.0.2
++
++SCST SGV cache is a memory management subsystem in SCST. One can call it
++a "memory pool", but Linux kernel already have a mempool interface,
++which serves different purposes. SGV cache provides to SCST core, target
++drivers and backend dev handlers facilities to allocate, build and cache
++SG vectors for data buffers. The main advantage of it is the caching
++facility, when it doesn't free to the system each vector, which is not
++used anymore, but keeps it for a while (possibly indefinitely) to let it
++be reused by the next consecutive command. This allows to:
++
++ - Reduce commands processing latencies and, hence, improve performance;
++
++ - Make commands processing latencies predictable, which is essential
++ for RT applications.
++
++The freed SG vectors are kept by the SGV cache either for some (possibly
++indefinite) time, or, optionally, until the system needs more memory and
++asks to free some using the set_shrinker() interface. Also the SGV cache
++allows to:
++
++ - Cluster pages together. "Cluster" means merging adjacent pages in a
++single SG entry. It allows to have less SG entries in the resulting SG
++vector, hence improve performance handling it as well as allow to
++work with bigger buffers on hardware with limited SG capabilities.
++
++ - Set custom page allocator functions. For instance, scst_user device
++handler uses this facility to eliminate unneeded mapping/unmapping of
++user space pages and avoid unneeded IOCTL calls for buffers allocations.
++In fileio_tgt application, which uses a regular malloc() function to
++allocate data buffers, this facility allows ~30% less CPU load and
++considerable performance increase.
++
++ - Prevent each initiator or all initiators altogether to allocate too
++much memory and DoS the target. Consider 10 initiators, which can have
++access to 10 devices each. Any of them can queue up to 64 commands, each
++can transfer up to 1MB of data. So, all of them in a peak can allocate
++up to 10*10*64 = ~6.5GB of memory for data buffers. This amount must be
++limited somehow and the SGV cache performs this function.
++
++From implementation POV the SGV cache is a simple extension of the kmem
++cache. It can work in 2 modes:
++
++1. With fixed size buffers.
++
++2. With a set of power 2 size buffers. In this mode each SGV cache
++(struct sgv_pool) has SGV_POOL_ELEMENTS (11 currently) of kmem caches.
++Each of those kmem caches keeps SGV cache objects (struct sgv_pool_obj)
++corresponding to SG vectors with size of order X pages. For instance,
++request to allocate 4 pages will be served from kmem cache[2], since the
++order of the of number of requested pages is 2. If later request to
++allocate 11KB comes, the same SG vector with 4 pages will be reused (see
++below). This mode is in average allows less memory overhead comparing
++with the fixed size buffers mode.
++
++Consider how the SGV cache works in the set of buffers mode. When a
++request to allocate new SG vector comes, sgv_pool_alloc() via
++sgv_get_obj() checks if there is already a cached vector with that
++order. If yes, then that vector will be reused and its length, if
++necessary, will be modified to match the requested size. In the above
++example request for 11KB buffer, 4 pages vector will be reused and
++modified using trans_tbl to contain 3 pages and the last entry will be
++modified to contain the requested length - 2*PAGE_SIZE. If there is no
++cached object, then a new sgv_pool_obj will be allocated from the
++corresponding kmem cache, chosen by the order of number of requested
++pages. Then that vector will be filled by pages and returned.
++
++In the fixed size buffers mode the SGV cache works similarly, except
++that it always allocate buffer with the predefined fixed size. I.e.
++even for 4K request the whole buffer with predefined size, say, 1MB,
++will be used.
++
++In both modes, if size of a request exceeds the maximum allowed for
++caching buffer size, the requested buffer will be allocated, but not
++cached.
++
++Freed cached sgv_pool_obj objects are actually freed to the system
++either by the purge work, which is scheduled once in 60 seconds, or in
++sgv_shrink() called by system, when it's asking for memory.
++
++ Interface.
++
++struct sgv_pool *sgv_pool_create(const char *name,
++ enum sgv_clustering_types clustered, int single_alloc_pages,
++ bool shared, int purge_interval)
++
++This function creates and initializes an SGV cache. It has the following
++arguments:
++
++ - name - the name of the SGV cache
++
++ - clustered - sets type of the pages clustering. The type can be:
++
++ * sgv_no_clustering - no clustering performed.
++
++ * sgv_tail_clustering - a page will only be merged with the latest
++ previously allocated page, so the order of pages in the SG will be
++ preserved
++
++ * sgv_full_clustering - free merging of pages at any place in
++ the SG is allowed. This mode usually provides the best merging
++ rate.
++
++ - single_alloc_pages - if 0, then the SGV cache will work in the set of
++ power 2 size buffers mode. If >0, then the SGV cache will work in the
++ fixed size buffers mode. In this case single_alloc_pages sets the
++ size of each buffer in pages.
++
++ - shared - sets if the SGV cache can be shared between devices or not.
++ The cache sharing allowed only between devices created inside the same
++ address space. If an SGV cache is shared, each subsequent call of
++ sgv_pool_create() with the same cache name will not create a new cache,
++ but instead return a reference to it.
++
++ - purge_interval - sets the cache purging interval. I.e. an SG buffer
++ will be freed if it's unused for time t purge_interval <= t <
++ 2*purge_interval. If purge_interval is 0, then the default interval
++ will be used (60 seconds). If purge_interval <0, then the automatic
++ purging will be disabled. Shrinking by the system's demand will also
++ be disabled.
++
++Returns the resulting SGV cache or NULL in case of any error.
++
++void sgv_pool_del(struct sgv_pool *pool)
++
++This function deletes the corresponding SGV cache. If the cache is
++shared, it will decrease its reference counter. If the reference counter
++reaches 0, the cache will be destroyed.
++
++void sgv_pool_flush(struct sgv_pool *pool)
++
++This function flushes, i.e. frees, all the cached entries in the SGV
++cache.
++
++void sgv_pool_set_allocator(struct sgv_pool *pool,
++ struct page *(*alloc_pages_fn)(struct scatterlist *sg, gfp_t gfp, void *priv),
++ void (*free_pages_fn)(struct scatterlist *sg, int sg_count, void *priv));
++
++This function allows to set for the SGV cache a custom pages allocator. For
++instance, scst_user uses such function to supply to the cache mapped from
++user space pages.
++
++alloc_pages_fn() has the following parameters:
++
++ - sg - SG entry, to which the allocated page should be added.
++
++ - gfp - the allocation GFP flags
++
++ - priv - pointer to a private data supplied to sgv_pool_alloc()
++
++This function should return the allocated page or NULL, if no page was
++allocated.
++
++free_pages_fn() has the following parameters:
++
++ - sg - SG vector to free
++
++ - sg_count - number of SG entries in the sg
++
++ - priv - pointer to a private data supplied to the corresponding sgv_pool_alloc()
++
++struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
++ gfp_t gfp_mask, int flags, int *count,
++ struct sgv_pool_obj **sgv, struct scst_mem_lim *mem_lim, void *priv)
++
++This function allocates an SG vector from the SGV cache. It has the
++following parameters:
++
++ - pool - the cache to alloc from
++
++ - size - size of the resulting SG vector in bytes
++
++ - gfp_mask - the allocation mask
++
++ - flags - the allocation flags. The following flags are possible and
++ can be set using OR operation:
++
++ * SGV_POOL_ALLOC_NO_CACHED - the SG vector must not be cached.
++
++ * SGV_POOL_NO_ALLOC_ON_CACHE_MISS - don't do an allocation on a
++ cache miss.
++
++ * SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL - return an empty SGV object,
++ i.e. without the SG vector, if the allocation can't be completed.
++ For instance, because SGV_POOL_NO_ALLOC_ON_CACHE_MISS flag set.
++
++ - count - the resulting count of SG entries in the resulting SG vector.
++
++ - sgv - the resulting SGV object. It should be used to free the
++ resulting SG vector.
++
++ - mem_lim - memory limits, see below.
++
++ - priv - pointer to private for this allocation data. This pointer will
++ be supplied to alloc_pages_fn() and free_pages_fn() and can be
++ retrieved by sgv_get_priv().
++
++This function returns pointer to the resulting SG vector or NULL in case
++of any error.
++
++void sgv_pool_free(struct sgv_pool_obj *sgv, struct scst_mem_lim *mem_lim)
++
++This function frees previously allocated SG vector, referenced by SGV
++cache object sgv.
++
++void *sgv_get_priv(struct sgv_pool_obj *sgv)
++
++This function allows to get the allocation private data for this SGV
++cache object sgv. The private data are set by sgv_pool_alloc().
++
++void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
++
++This function initializes memory limits structure mem_lim according to
++the current system configuration. This structure should be latter used
++to track and limit allocated by one or more SGV caches memory.
++
++ Runtime information and statistics.
++
++Runtime information and statistics is available in /sys/kernel/scst_tgt/sgv.
++
+diff -uprN orig/linux-2.6.36/include/scst/scst_user.h linux-2.6.36/include/scst/scst_user.h
+--- orig/linux-2.6.36/include/scst/scst_user.h
++++ linux-2.6.36/include/scst/scst_user.h
+@@ -0,0 +1,322 @@
++/*
++ * include/scst_user.h
++ *
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * Contains constants and data structures for scst_user module.
++ * See http://scst.sourceforge.net/doc/scst_user_spec.txt or
++ * scst_user_spec.txt for description.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __SCST_USER_H
++#define __SCST_USER_H
++
++#include <scst/scst_const.h>
++
++#define DEV_USER_NAME "scst_user"
++#define DEV_USER_PATH "/dev/"
++#define DEV_USER_VERSION_NAME "2.0.0.1"
++#define DEV_USER_VERSION \
++ DEV_USER_VERSION_NAME "$Revision: 3165 $" SCST_CONST_VERSION
++
++#define SCST_USER_PARSE_STANDARD 0
++#define SCST_USER_PARSE_CALL 1
++#define SCST_USER_PARSE_EXCEPTION 2
++#define SCST_USER_MAX_PARSE_OPT SCST_USER_PARSE_EXCEPTION
++
++#define SCST_USER_ON_FREE_CMD_CALL 0
++#define SCST_USER_ON_FREE_CMD_IGNORE 1
++#define SCST_USER_MAX_ON_FREE_CMD_OPT SCST_USER_ON_FREE_CMD_IGNORE
++
++#define SCST_USER_MEM_NO_REUSE 0
++#define SCST_USER_MEM_REUSE_READ 1
++#define SCST_USER_MEM_REUSE_WRITE 2
++#define SCST_USER_MEM_REUSE_ALL 3
++#define SCST_USER_MAX_MEM_REUSE_OPT SCST_USER_MEM_REUSE_ALL
++
++#define SCST_USER_PARTIAL_TRANSFERS_NOT_SUPPORTED 0
++#define SCST_USER_PARTIAL_TRANSFERS_SUPPORTED_ORDERED 1
++#define SCST_USER_PARTIAL_TRANSFERS_SUPPORTED 2
++#define SCST_USER_MAX_PARTIAL_TRANSFERS_OPT \
++ SCST_USER_PARTIAL_TRANSFERS_SUPPORTED
++
++#ifndef aligned_u64
++#define aligned_u64 uint64_t __attribute__((aligned(8)))
++#endif
++
++/*************************************************************
++ ** Private ucmd states
++ *************************************************************/
++#define UCMD_STATE_NEW 0
++#define UCMD_STATE_PARSING 1
++#define UCMD_STATE_BUF_ALLOCING 2
++#define UCMD_STATE_EXECING 3
++#define UCMD_STATE_ON_FREEING 4
++#define UCMD_STATE_ON_FREE_SKIPPED 5
++#define UCMD_STATE_ON_CACHE_FREEING 6
++#define UCMD_STATE_TM_EXECING 7
++
++#define UCMD_STATE_ATTACH_SESS 0x20
++#define UCMD_STATE_DETACH_SESS 0x21
++
++struct scst_user_opt {
++ uint8_t parse_type;
++ uint8_t on_free_cmd_type;
++ uint8_t memory_reuse_type;
++ uint8_t partial_transfers_type;
++ int32_t partial_len;
++
++ /* SCSI control mode page parameters, see SPC */
++ uint8_t tst;
++ uint8_t queue_alg;
++ uint8_t tas;
++ uint8_t swp;
++ uint8_t d_sense;
++
++ uint8_t has_own_order_mgmt;
++};
++
++struct scst_user_dev_desc {
++ aligned_u64 version_str;
++ aligned_u64 license_str;
++ uint8_t type;
++ uint8_t sgv_shared;
++ uint8_t sgv_disable_clustered_pool;
++ int32_t sgv_single_alloc_pages;
++ int32_t sgv_purge_interval;
++ struct scst_user_opt opt;
++ uint32_t block_size;
++ uint8_t enable_pr_cmds_notifications;
++ char name[SCST_MAX_NAME];
++ char sgv_name[SCST_MAX_NAME];
++};
++
++struct scst_user_sess {
++ aligned_u64 sess_h;
++ aligned_u64 lun;
++ uint16_t threads_num;
++ uint8_t rd_only;
++ uint16_t scsi_transport_version;
++ uint16_t phys_transport_version;
++ char initiator_name[SCST_MAX_EXTERNAL_NAME];
++ char target_name[SCST_MAX_EXTERNAL_NAME];
++};
++
++struct scst_user_scsi_cmd_parse {
++ aligned_u64 sess_h;
++
++ uint8_t cdb[SCST_MAX_CDB_SIZE];
++ uint16_t cdb_len;
++ uint16_t ext_cdb_len;
++
++ int32_t timeout;
++ int32_t bufflen;
++ int32_t out_bufflen;
++
++ uint32_t op_flags;
++
++ uint8_t queue_type;
++ uint8_t data_direction;
++
++ uint8_t expected_values_set;
++ uint8_t expected_data_direction;
++ int32_t expected_transfer_len;
++ int32_t expected_out_transfer_len;
++
++ uint32_t sn;
++};
++
++struct scst_user_scsi_cmd_alloc_mem {
++ aligned_u64 sess_h;
++
++ uint8_t cdb[SCST_MAX_CDB_SIZE];
++ uint16_t cdb_len;
++ uint16_t ext_cdb_len;
++
++ int32_t alloc_len;
++
++ uint8_t queue_type;
++ uint8_t data_direction;
++
++ uint32_t sn;
++};
++
++struct scst_user_scsi_cmd_exec {
++ aligned_u64 sess_h;
++
++ uint8_t cdb[SCST_MAX_CDB_SIZE];
++ uint16_t cdb_len;
++ uint16_t ext_cdb_len;
++
++ int32_t data_len;
++ int32_t bufflen;
++ int32_t alloc_len;
++ aligned_u64 pbuf;
++ uint8_t queue_type;
++ uint8_t data_direction;
++ uint8_t partial;
++ int32_t timeout;
++
++ aligned_u64 p_out_buf;
++ int32_t out_bufflen;
++
++ uint32_t sn;
++
++ uint32_t parent_cmd_h;
++ int32_t parent_cmd_data_len;
++ uint32_t partial_offset;
++};
++
++struct scst_user_scsi_on_free_cmd {
++ aligned_u64 pbuf;
++ int32_t resp_data_len;
++ uint8_t buffer_cached;
++ uint8_t aborted;
++ uint8_t status;
++ uint8_t delivery_status;
++};
++
++struct scst_user_on_cached_mem_free {
++ aligned_u64 pbuf;
++};
++
++struct scst_user_tm {
++ aligned_u64 sess_h;
++ uint32_t fn;
++ uint32_t cmd_h_to_abort;
++ uint32_t cmd_sn;
++ uint8_t cmd_sn_set;
++};
++
++struct scst_user_get_cmd {
++ uint32_t cmd_h;
++ uint32_t subcode;
++ union {
++ aligned_u64 preply;
++ struct scst_user_sess sess;
++ struct scst_user_scsi_cmd_parse parse_cmd;
++ struct scst_user_scsi_cmd_alloc_mem alloc_cmd;
++ struct scst_user_scsi_cmd_exec exec_cmd;
++ struct scst_user_scsi_on_free_cmd on_free_cmd;
++ struct scst_user_on_cached_mem_free on_cached_mem_free;
++ struct scst_user_tm tm_cmd;
++ };
++};
++
++/* Be careful adding new members here, this structure is allocated on stack! */
++struct scst_user_scsi_cmd_reply_parse {
++ uint8_t status;
++ union {
++ struct {
++ uint8_t queue_type;
++ uint8_t data_direction;
++ uint16_t cdb_len;
++ uint32_t op_flags;
++ int32_t data_len;
++ int32_t bufflen;
++ };
++ struct {
++ uint8_t sense_len;
++ aligned_u64 psense_buffer;
++ };
++ };
++};
++
++/* Be careful adding new members here, this structure is allocated on stack! */
++struct scst_user_scsi_cmd_reply_alloc_mem {
++ aligned_u64 pbuf;
++};
++
++/* Be careful adding new members here, this structure is allocated on stack! */
++struct scst_user_scsi_cmd_reply_exec {
++ int32_t resp_data_len;
++ aligned_u64 pbuf;
++
++#define SCST_EXEC_REPLY_BACKGROUND 0
++#define SCST_EXEC_REPLY_COMPLETED 1
++ uint8_t reply_type;
++
++ uint8_t status;
++ uint8_t sense_len;
++ aligned_u64 psense_buffer;
++};
++
++/* Be careful adding new members here, this structure is allocated on stack! */
++struct scst_user_reply_cmd {
++ uint32_t cmd_h;
++ uint32_t subcode;
++ union {
++ int32_t result;
++ struct scst_user_scsi_cmd_reply_parse parse_reply;
++ struct scst_user_scsi_cmd_reply_alloc_mem alloc_reply;
++ struct scst_user_scsi_cmd_reply_exec exec_reply;
++ };
++};
++
++/* Be careful adding new members here, this structure is allocated on stack! */
++struct scst_user_get_ext_cdb {
++ uint32_t cmd_h;
++ aligned_u64 ext_cdb_buffer;
++};
++
++/* Be careful adding new members here, this structure is allocated on stack! */
++struct scst_user_prealloc_buffer_in {
++ aligned_u64 pbuf;
++ uint32_t bufflen;
++ uint8_t for_clust_pool;
++};
++
++/* Be careful adding new members here, this structure is allocated on stack! */
++struct scst_user_prealloc_buffer_out {
++ uint32_t cmd_h;
++};
++
++/* Be careful adding new members here, this structure is allocated on stack! */
++union scst_user_prealloc_buffer {
++ struct scst_user_prealloc_buffer_in in;
++ struct scst_user_prealloc_buffer_out out;
++};
++
++#define SCST_USER_REGISTER_DEVICE _IOW('u', 1, struct scst_user_dev_desc)
++#define SCST_USER_UNREGISTER_DEVICE _IO('u', 2)
++#define SCST_USER_SET_OPTIONS _IOW('u', 3, struct scst_user_opt)
++#define SCST_USER_GET_OPTIONS _IOR('u', 4, struct scst_user_opt)
++#define SCST_USER_REPLY_AND_GET_CMD _IOWR('u', 5, struct scst_user_get_cmd)
++#define SCST_USER_REPLY_CMD _IOW('u', 6, struct scst_user_reply_cmd)
++#define SCST_USER_FLUSH_CACHE _IO('u', 7)
++#define SCST_USER_DEVICE_CAPACITY_CHANGED _IO('u', 8)
++#define SCST_USER_GET_EXTENDED_CDB _IOWR('u', 9, struct scst_user_get_ext_cdb)
++#define SCST_USER_PREALLOC_BUFFER _IOWR('u', 10, union scst_user_prealloc_buffer)
++
++/* Values for scst_user_get_cmd.subcode */
++#define SCST_USER_ATTACH_SESS \
++ _IOR('s', UCMD_STATE_ATTACH_SESS, struct scst_user_sess)
++#define SCST_USER_DETACH_SESS \
++ _IOR('s', UCMD_STATE_DETACH_SESS, struct scst_user_sess)
++#define SCST_USER_PARSE \
++ _IOWR('s', UCMD_STATE_PARSING, struct scst_user_scsi_cmd_parse)
++#define SCST_USER_ALLOC_MEM \
++ _IOWR('s', UCMD_STATE_BUF_ALLOCING, struct scst_user_scsi_cmd_alloc_mem)
++#define SCST_USER_EXEC \
++ _IOWR('s', UCMD_STATE_EXECING, struct scst_user_scsi_cmd_exec)
++#define SCST_USER_ON_FREE_CMD \
++ _IOR('s', UCMD_STATE_ON_FREEING, struct scst_user_scsi_on_free_cmd)
++#define SCST_USER_ON_CACHED_MEM_FREE \
++ _IOR('s', UCMD_STATE_ON_CACHE_FREEING, \
++ struct scst_user_on_cached_mem_free)
++#define SCST_USER_TASK_MGMT \
++ _IOWR('s', UCMD_STATE_TM_EXECING, struct scst_user_tm)
++
++#endif /* __SCST_USER_H */
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/drivers/scst/dev_handlers/scst_user.c
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c
++++ linux-2.6.36/drivers/scst/dev_handlers/scst_user.c
+@@ -0,0 +1,3739 @@
++/*
++ * scst_user.c
++ *
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * SCSI virtual user space device handler
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#include <linux/poll.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++
++#define LOG_PREFIX DEV_USER_NAME
++
++#include <scst/scst.h>
++#include <scst/scst_user.h>
++#include "scst_dev_handler.h"
++
++#define DEV_USER_CMD_HASH_ORDER 6
++#define DEV_USER_ATTACH_TIMEOUT (5*HZ)
++
++struct scst_user_dev {
++ struct rw_semaphore dev_rwsem;
++
++ /*
++ * Must be kept here, because it's needed on the cleanup time,
++ * when corresponding scst_dev is already dead.
++ */
++ struct scst_cmd_threads udev_cmd_threads;
++
++ /* Protected by udev_cmd_threads.cmd_list_lock */
++ struct list_head ready_cmd_list;
++
++ /* Protected by dev_rwsem or don't need any protection */
++ unsigned int blocking:1;
++ unsigned int cleanup_done:1;
++ unsigned int tst:3;
++ unsigned int queue_alg:4;
++ unsigned int tas:1;
++ unsigned int swp:1;
++ unsigned int d_sense:1;
++ unsigned int has_own_order_mgmt:1;
++
++ int (*generic_parse)(struct scst_cmd *cmd,
++ int (*get_block)(struct scst_cmd *cmd));
++
++ int block;
++ int def_block;
++
++ struct scst_mem_lim udev_mem_lim;
++ struct sgv_pool *pool;
++ struct sgv_pool *pool_clust;
++
++ uint8_t parse_type;
++ uint8_t on_free_cmd_type;
++ uint8_t memory_reuse_type;
++ uint8_t partial_transfers_type;
++ uint32_t partial_len;
++
++ struct scst_dev_type devtype;
++
++ /* Both protected by udev_cmd_threads.cmd_list_lock */
++ unsigned int handle_counter;
++ struct list_head ucmd_hash[1 << DEV_USER_CMD_HASH_ORDER];
++
++ struct scst_device *sdev;
++
++ int virt_id;
++ struct list_head dev_list_entry;
++ char name[SCST_MAX_NAME];
++
++ struct list_head cleanup_list_entry;
++ struct completion cleanup_cmpl;
++};
++
++/* Most fields are unprotected, since only one thread at time can access them */
++struct scst_user_cmd {
++ struct scst_cmd *cmd;
++ struct scst_user_dev *dev;
++
++ atomic_t ucmd_ref;
++
++ unsigned int buff_cached:1;
++ unsigned int buf_dirty:1;
++ unsigned int background_exec:1;
++ unsigned int aborted:1;
++
++ struct scst_user_cmd *buf_ucmd;
++
++ int cur_data_page;
++ int num_data_pages;
++ int first_page_offset;
++ unsigned long ubuff;
++ struct page **data_pages;
++ struct sgv_pool_obj *sgv;
++
++ /*
++ * Special flags, which can be accessed asynchronously (hence "long").
++ * Protected by udev_cmd_threads.cmd_list_lock.
++ */
++ unsigned long sent_to_user:1;
++ unsigned long jammed:1;
++ unsigned long this_state_unjammed:1;
++ unsigned long seen_by_user:1; /* here only as a small optimization */
++
++ unsigned int state;
++
++ struct list_head ready_cmd_list_entry;
++
++ unsigned int h;
++ struct list_head hash_list_entry;
++
++ int user_cmd_payload_len;
++ struct scst_user_get_cmd user_cmd;
++
++ /* cmpl used only by ATTACH_SESS, mcmd used only by TM */
++ union {
++ struct completion *cmpl;
++ struct scst_mgmt_cmd *mcmd;
++ };
++ int result;
++};
++
++static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
++ gfp_t gfp_mask);
++static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
++
++static int dev_user_parse(struct scst_cmd *cmd);
++static int dev_user_alloc_data_buf(struct scst_cmd *cmd);
++static int dev_user_exec(struct scst_cmd *cmd);
++static void dev_user_on_free_cmd(struct scst_cmd *cmd);
++static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
++ struct scst_tgt_dev *tgt_dev);
++
++static int dev_user_disk_done(struct scst_cmd *cmd);
++static int dev_user_tape_done(struct scst_cmd *cmd);
++
++static struct page *dev_user_alloc_pages(struct scatterlist *sg,
++ gfp_t gfp_mask, void *priv);
++static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
++ void *priv);
++
++static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
++
++static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
++ unsigned long *flags);
++
++static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd);
++static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
++ int status);
++static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
++static int dev_user_register_dev(struct file *file,
++ const struct scst_user_dev_desc *dev_desc);
++static int dev_user_unregister_dev(struct file *file);
++static int dev_user_flush_cache(struct file *file);
++static int dev_user_capacity_changed(struct file *file);
++static int dev_user_prealloc_buffer(struct file *file, void __user *arg);
++static int __dev_user_set_opt(struct scst_user_dev *dev,
++ const struct scst_user_opt *opt);
++static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
++static int dev_user_get_opt(struct file *file, void __user *arg);
++
++static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
++static long dev_user_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg);
++static int dev_user_release(struct inode *inode, struct file *file);
++static int dev_user_exit_dev(struct scst_user_dev *dev);
++
++static ssize_t dev_user_sysfs_commands_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++
++static struct kobj_attribute dev_user_commands_attr =
++ __ATTR(commands, S_IRUGO, dev_user_sysfs_commands_show, NULL);
++
++static const struct attribute *dev_user_dev_attrs[] = {
++ &dev_user_commands_attr.attr,
++ NULL,
++};
++
++static int dev_usr_parse(struct scst_cmd *cmd);
++
++/** Data **/
++
++static struct kmem_cache *user_cmd_cachep;
++static struct kmem_cache *user_get_cmd_cachep;
++
++static DEFINE_MUTEX(dev_priv_mutex);
++
++static const struct file_operations dev_user_fops = {
++ .poll = dev_user_poll,
++ .unlocked_ioctl = dev_user_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = dev_user_ioctl,
++#endif
++ .release = dev_user_release,
++};
++
++static struct scst_dev_type dev_user_devtype = {
++ .name = DEV_USER_NAME,
++ .type = -1,
++ .parse = dev_usr_parse,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++static int dev_user_major;
++
++static struct class *dev_user_sysfs_class;
++
++static DEFINE_SPINLOCK(dev_list_lock);
++static LIST_HEAD(dev_list);
++
++static DEFINE_SPINLOCK(cleanup_lock);
++static LIST_HEAD(cleanup_list);
++static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
++static struct task_struct *cleanup_thread;
++
++/*
++ * Skip this command if result is not 0. Must be called under
++ * udev_cmd_threads.cmd_list_lock and IRQ off.
++ */
++static inline bool ucmd_get_check(struct scst_user_cmd *ucmd)
++{
++ int r = atomic_inc_return(&ucmd->ucmd_ref);
++ int res;
++ if (unlikely(r == 1)) {
++ TRACE_DBG("ucmd %p is being destroyed", ucmd);
++ atomic_dec(&ucmd->ucmd_ref);
++ res = true;
++ /*
++ * Necessary code is serialized by cmd_list_lock in
++ * cmd_remove_hash()
++ */
++ } else {
++ TRACE_DBG("ucmd %p, new ref_cnt %d", ucmd,
++ atomic_read(&ucmd->ucmd_ref));
++ res = false;
++ }
++ return res;
++}
++
++static inline void ucmd_get(struct scst_user_cmd *ucmd)
++{
++ TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
++ atomic_inc(&ucmd->ucmd_ref);
++ /*
++ * For the same reason as in kref_get(). Let's be safe and
++ * always do it.
++ */
++ smp_mb__after_atomic_inc();
++}
++
++/* Must not be called under cmd_list_lock!! */
++static inline void ucmd_put(struct scst_user_cmd *ucmd)
++{
++ TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
++
++ EXTRACHECKS_BUG_ON(atomic_read(&ucmd->ucmd_ref) == 0);
++
++ if (atomic_dec_and_test(&ucmd->ucmd_ref))
++ dev_user_free_ucmd(ucmd);
++}
++
++static inline int calc_num_pg(unsigned long buf, int len)
++{
++ len += buf & ~PAGE_MASK;
++ return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
++}
++
++static void __dev_user_not_reg(void)
++{
++ TRACE_MGMT_DBG("%s", "Device not registered");
++ return;
++}
++
++static inline int dev_user_check_reg(struct scst_user_dev *dev)
++{
++ if (dev == NULL) {
++ __dev_user_not_reg();
++ return -ENODEV;
++ }
++ return 0;
++}
++
++static inline int scst_user_cmd_hashfn(int h)
++{
++ return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
++}
++
++static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
++ unsigned int h)
++{
++ struct list_head *head;
++ struct scst_user_cmd *ucmd;
++
++ head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
++ list_for_each_entry(ucmd, head, hash_list_entry) {
++ if (ucmd->h == h) {
++ TRACE_DBG("Found ucmd %p", ucmd);
++ return ucmd;
++ }
++ }
++ return NULL;
++}
++
++static void cmd_insert_hash(struct scst_user_cmd *ucmd)
++{
++ struct list_head *head;
++ struct scst_user_dev *dev = ucmd->dev;
++ struct scst_user_cmd *u;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev->udev_cmd_threads.cmd_list_lock, flags);
++ do {
++ ucmd->h = dev->handle_counter++;
++ u = __ucmd_find_hash(dev, ucmd->h);
++ } while (u != NULL);
++ head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
++ list_add_tail(&ucmd->hash_list_entry, head);
++ spin_unlock_irqrestore(&dev->udev_cmd_threads.cmd_list_lock, flags);
++
++ TRACE_DBG("Inserted ucmd %p, h=%d (dev %s)", ucmd, ucmd->h, dev->name);
++ return;
++}
++
++static inline void cmd_remove_hash(struct scst_user_cmd *ucmd)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ucmd->dev->udev_cmd_threads.cmd_list_lock, flags);
++ list_del(&ucmd->hash_list_entry);
++ spin_unlock_irqrestore(&ucmd->dev->udev_cmd_threads.cmd_list_lock, flags);
++
++ TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
++ return;
++}
++
++static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
++{
++ TRACE_ENTRY();
++
++ TRACE_MEM("Freeing ucmd %p", ucmd);
++
++ cmd_remove_hash(ucmd);
++ EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
++
++ kmem_cache_free(user_cmd_cachep, ucmd);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct page *dev_user_alloc_pages(struct scatterlist *sg,
++ gfp_t gfp_mask, void *priv)
++{
++ struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
++ int offset = 0;
++
++ TRACE_ENTRY();
++
++ /* *sg supposed to be zeroed */
++
++ TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
++ ucmd->ubuff, ucmd->cur_data_page);
++
++ if (ucmd->cur_data_page == 0) {
++ TRACE_MEM("ucmd->first_page_offset %d",
++ ucmd->first_page_offset);
++ offset = ucmd->first_page_offset;
++ ucmd_get(ucmd);
++ }
++
++ if (ucmd->cur_data_page >= ucmd->num_data_pages)
++ goto out;
++
++ sg_set_page(sg, ucmd->data_pages[ucmd->cur_data_page],
++ PAGE_SIZE - offset, offset);
++ ucmd->cur_data_page++;
++
++ TRACE_MEM("page=%p, length=%d, offset=%d", sg_page(sg), sg->length,
++ sg->offset);
++ TRACE_BUFFER("Page data", sg_virt(sg), sg->length);
++
++out:
++ TRACE_EXIT();
++ return sg_page(sg);
++}
++
++static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
++{
++ TRACE_ENTRY();
++
++ TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
++ ucmd, ucmd->h, ucmd->ubuff);
++
++ ucmd->user_cmd_payload_len =
++ offsetof(struct scst_user_get_cmd, on_cached_mem_free) +
++ sizeof(ucmd->user_cmd.on_cached_mem_free);
++ ucmd->user_cmd.cmd_h = ucmd->h;
++ ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
++ ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
++
++ ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
++
++ dev_user_add_to_ready(ucmd);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
++{
++ int i;
++
++ TRACE_ENTRY();
++
++ TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
++ ucmd->ubuff, ucmd->num_data_pages);
++
++ for (i = 0; i < ucmd->num_data_pages; i++) {
++ struct page *page = ucmd->data_pages[i];
++
++ if (ucmd->buf_dirty)
++ SetPageDirty(page);
++
++ page_cache_release(page);
++ }
++
++ kfree(ucmd->data_pages);
++ ucmd->data_pages = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
++{
++ TRACE_ENTRY();
++
++ BUG_ON(ucmd->data_pages == NULL);
++
++ TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
++ ucmd, ucmd->ubuff, ucmd->buff_cached);
++
++ dev_user_unmap_buf(ucmd);
++
++ if (ucmd->buff_cached)
++ dev_user_on_cached_mem_free(ucmd);
++ else
++ ucmd_put(ucmd);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
++ void *priv)
++{
++ struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
++
++ TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
++ sg_count, ucmd);
++
++ __dev_user_free_sg_entries(ucmd);
++
++ return;
++}
++
++static inline int is_buff_cached(struct scst_user_cmd *ucmd)
++{
++ int mem_reuse_type = ucmd->dev->memory_reuse_type;
++
++ if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
++ ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
++ (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
++ ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
++ (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE)))
++ return 1;
++ else
++ return 0;
++}
++
++static inline int is_need_offs_page(unsigned long buf, int len)
++{
++ return ((buf & ~PAGE_MASK) != 0) &&
++ ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
++}
++
++/*
++ * Returns 0 for success, <0 for fatal failure, >0 - need pages.
++ * Unmaps the buffer, if needed in case of error
++ */
++static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
++{
++ int res = 0;
++ struct scst_cmd *cmd = ucmd->cmd;
++ struct scst_user_dev *dev = ucmd->dev;
++ struct sgv_pool *pool;
++ gfp_t gfp_mask;
++ int flags = 0;
++ int bufflen, orig_bufflen;
++ int last_len = 0;
++ int out_sg_pages = 0;
++
++ TRACE_ENTRY();
++
++ gfp_mask = __GFP_NOWARN;
++ gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
++
++ if (cmd->data_direction != SCST_DATA_BIDI) {
++ orig_bufflen = cmd->bufflen;
++ pool = (struct sgv_pool *)cmd->tgt_dev->dh_priv;
++ } else {
++ /* Make out_sg->offset 0 */
++ int len = cmd->bufflen + ucmd->first_page_offset;
++ out_sg_pages = (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
++ orig_bufflen = (out_sg_pages << PAGE_SHIFT) + cmd->out_bufflen;
++ pool = dev->pool;
++ }
++ bufflen = orig_bufflen;
++
++ EXTRACHECKS_BUG_ON(bufflen == 0);
++
++ if (cached_buff) {
++ flags |= SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
++ if (ucmd->ubuff == 0)
++ flags |= SGV_POOL_NO_ALLOC_ON_CACHE_MISS;
++ } else {
++ TRACE_MEM("%s", "Not cached buff");
++ flags |= SGV_POOL_ALLOC_NO_CACHED;
++ if (ucmd->ubuff == 0) {
++ res = 1;
++ goto out;
++ }
++ bufflen += ucmd->first_page_offset;
++ if (is_need_offs_page(ucmd->ubuff, orig_bufflen))
++ last_len = bufflen & ~PAGE_MASK;
++ else
++ last_len = orig_bufflen & ~PAGE_MASK;
++ }
++ ucmd->buff_cached = cached_buff;
++
++ cmd->sg = sgv_pool_alloc(pool, bufflen, gfp_mask, flags, &cmd->sg_cnt,
++ &ucmd->sgv, &dev->udev_mem_lim, ucmd);
++ if (cmd->sg != NULL) {
++ struct scst_user_cmd *buf_ucmd =
++ (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
++
++ TRACE_MEM("Buf ucmd %p (cmd->sg_cnt %d, last seg len %d, "
++ "last_len %d, bufflen %d)", buf_ucmd, cmd->sg_cnt,
++ cmd->sg[cmd->sg_cnt-1].length, last_len, bufflen);
++
++ ucmd->ubuff = buf_ucmd->ubuff;
++ ucmd->buf_ucmd = buf_ucmd;
++
++ EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
++ (ucmd != buf_ucmd));
++
++ if (last_len != 0) {
++ cmd->sg[cmd->sg_cnt-1].length &= PAGE_MASK;
++ cmd->sg[cmd->sg_cnt-1].length += last_len;
++ }
++
++ TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
++ "last seg len %d)", ucmd, cached_buff, ucmd->ubuff,
++ cmd->sg[cmd->sg_cnt-1].length);
++
++ if (cmd->data_direction == SCST_DATA_BIDI) {
++ cmd->out_sg = &cmd->sg[out_sg_pages];
++ cmd->out_sg_cnt = cmd->sg_cnt - out_sg_pages;
++ cmd->sg_cnt = out_sg_pages;
++ TRACE_MEM("cmd %p, out_sg %p, out_sg_cnt %d, sg_cnt %d",
++ cmd, cmd->out_sg, cmd->out_sg_cnt, cmd->sg_cnt);
++ }
++
++ if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
++ static int ll;
++ if ((ll < 10) || TRACING_MINOR()) {
++ PRINT_INFO("Unable to complete command due to "
++ "SG IO count limitation (requested %d, "
++ "available %d, tgt lim %d)",
++ cmd->sg_cnt, cmd->tgt_dev->max_sg_cnt,
++ cmd->tgt->sg_tablesize);
++ ll++;
++ }
++ cmd->sg = NULL;
++ /* sgv will be freed in dev_user_free_sgv() */
++ res = -1;
++ }
++ } else {
++ TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
++ "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
++ ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
++ if (unlikely(cmd->sg_cnt == 0)) {
++ TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
++ BUG_ON(ucmd->sgv != NULL);
++ res = -1;
++ } else {
++ switch (ucmd->state) {
++ case UCMD_STATE_BUF_ALLOCING:
++ res = 1;
++ break;
++ case UCMD_STATE_EXECING:
++ res = -1;
++ break;
++ default:
++ BUG();
++ break;
++ }
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
++{
++ int rc, res = SCST_CMD_STATE_DEFAULT;
++ struct scst_cmd *cmd = ucmd->cmd;
++
++ TRACE_ENTRY();
++
++ ucmd->state = UCMD_STATE_BUF_ALLOCING;
++ scst_cmd_set_dh_data_buff_alloced(cmd);
++
++ rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
++ if (rc == 0)
++ goto out;
++ else if (rc < 0) {
++ scst_set_busy(cmd);
++ res = scst_set_cmd_abnormal_done_state(cmd);
++ goto out;
++ }
++
++ if (!(cmd->data_direction & SCST_DATA_WRITE) &&
++ !scst_is_cmd_local(cmd)) {
++ TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
++ goto out;
++ }
++
++ ucmd->user_cmd_payload_len =
++ offsetof(struct scst_user_get_cmd, alloc_cmd) +
++ sizeof(ucmd->user_cmd.alloc_cmd);
++ ucmd->user_cmd.cmd_h = ucmd->h;
++ ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
++ ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
++ memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb, cmd->cdb_len);
++ ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
++ ucmd->user_cmd.alloc_cmd.ext_cdb_len = cmd->ext_cdb_len;
++ ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ?
++ (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
++ ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
++ ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
++ ucmd->user_cmd.alloc_cmd.sn = cmd->tgt_sn;
++
++ dev_user_add_to_ready(ucmd);
++
++ res = SCST_CMD_STATE_STOP;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
++ gfp_t gfp_mask)
++{
++ struct scst_user_cmd *ucmd = NULL;
++
++ TRACE_ENTRY();
++
++ ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
++ if (unlikely(ucmd == NULL)) {
++ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
++ "user cmd (gfp_mask %x)", gfp_mask);
++ goto out;
++ }
++ ucmd->dev = dev;
++ atomic_set(&ucmd->ucmd_ref, 1);
++
++ cmd_insert_hash(ucmd);
++
++ TRACE_MEM("ucmd %p allocated", ucmd);
++
++out:
++ TRACE_EXIT_HRES((unsigned long)ucmd);
++ return ucmd;
++}
++
++static int dev_user_get_block(struct scst_cmd *cmd)
++{
++ struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
++ /*
++ * No need for locks here, since *_detach() can not be
++ * called, when there are existing commands.
++ */
++ TRACE_EXIT_RES(dev->block);
++ return dev->block;
++}
++
++static int dev_user_parse(struct scst_cmd *cmd)
++{
++ int rc, res = SCST_CMD_STATE_DEFAULT;
++ struct scst_user_cmd *ucmd;
++ int atomic = scst_cmd_atomic(cmd);
++ struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
++ gfp_t gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
++
++ TRACE_ENTRY();
++
++ if (cmd->dh_priv == NULL) {
++ ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
++ if (unlikely(ucmd == NULL)) {
++ if (atomic) {
++ res = SCST_CMD_STATE_NEED_THREAD_CTX;
++ goto out;
++ } else {
++ scst_set_busy(cmd);
++ goto out_error;
++ }
++ }
++ ucmd->cmd = cmd;
++ cmd->dh_priv = ucmd;
++ } else {
++ ucmd = (struct scst_user_cmd *)cmd->dh_priv;
++ TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
++ }
++
++ TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
++
++ if (ucmd->state == UCMD_STATE_PARSING) {
++ /* We've already done */
++ goto done;
++ }
++
++ EXTRACHECKS_BUG_ON(ucmd->state != UCMD_STATE_NEW);
++
++ switch (dev->parse_type) {
++ case SCST_USER_PARSE_STANDARD:
++ TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
++ rc = dev->generic_parse(cmd, dev_user_get_block);
++ if (rc != 0)
++ goto out_invalid;
++ break;
++
++ case SCST_USER_PARSE_EXCEPTION:
++ TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
++ rc = dev->generic_parse(cmd, dev_user_get_block);
++ if ((rc == 0) && (cmd->op_flags & SCST_INFO_VALID))
++ break;
++ else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
++ TRACE_MEM("Restarting PARSE to thread context "
++ "(ucmd %p)", ucmd);
++ res = SCST_CMD_STATE_NEED_THREAD_CTX;
++ goto out;
++ }
++ /* else go through */
++
++ case SCST_USER_PARSE_CALL:
++ TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
++ "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
++ ucmd->user_cmd_payload_len =
++ offsetof(struct scst_user_get_cmd, parse_cmd) +
++ sizeof(ucmd->user_cmd.parse_cmd);
++ ucmd->user_cmd.cmd_h = ucmd->h;
++ ucmd->user_cmd.subcode = SCST_USER_PARSE;
++ ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
++ memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb, cmd->cdb_len);
++ ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
++ ucmd->user_cmd.parse_cmd.ext_cdb_len = cmd->ext_cdb_len;
++ ucmd->user_cmd.parse_cmd.timeout = cmd->timeout / HZ;
++ ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
++ ucmd->user_cmd.parse_cmd.out_bufflen = cmd->out_bufflen;
++ ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
++ ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
++ ucmd->user_cmd.parse_cmd.expected_values_set =
++ cmd->expected_values_set;
++ ucmd->user_cmd.parse_cmd.expected_data_direction =
++ cmd->expected_data_direction;
++ ucmd->user_cmd.parse_cmd.expected_transfer_len =
++ cmd->expected_transfer_len;
++ ucmd->user_cmd.parse_cmd.expected_out_transfer_len =
++ cmd->expected_out_transfer_len;
++ ucmd->user_cmd.parse_cmd.sn = cmd->tgt_sn;
++ ucmd->user_cmd.parse_cmd.op_flags = cmd->op_flags;
++ ucmd->state = UCMD_STATE_PARSING;
++ dev_user_add_to_ready(ucmd);
++ res = SCST_CMD_STATE_STOP;
++ goto out;
++
++ default:
++ BUG();
++ goto out;
++ }
++
++done:
++ if (cmd->bufflen == 0) {
++ /*
++ * According to SPC bufflen 0 for data transfer commands isn't
++ * an error, so we need to fix the transfer direction.
++ */
++ cmd->data_direction = SCST_DATA_NONE;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_invalid:
++ PRINT_ERROR("PARSE failed (ucmd %p, rc %d)", ucmd, rc);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++
++out_error:
++ res = scst_set_cmd_abnormal_done_state(cmd);
++ goto out;
++}
++
++static int dev_user_alloc_data_buf(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++ struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON((ucmd->state != UCMD_STATE_NEW) &&
++ (ucmd->state != UCMD_STATE_PARSING) &&
++ (ucmd->state != UCMD_STATE_BUF_ALLOCING));
++
++ res = dev_user_alloc_space(ucmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
++{
++ struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
++ unsigned long start = buf_ucmd->ubuff;
++ int i, bufflen = ucmd->cmd->bufflen;
++
++ TRACE_ENTRY();
++
++ if (start == 0)
++ goto out;
++
++ /*
++ * Possibly, flushing of all the pages from ucmd->cmd->sg can be
++ * faster, since it should be cache hot, while ucmd->buf_ucmd and
++ * buf_ucmd->data_pages are cache cold. But, from other side,
++ * sizeof(buf_ucmd->data_pages[0]) is considerably smaller, than
++ * sizeof(ucmd->cmd->sg[0]), so on big buffers going over
++ * data_pages array can lead to less cache misses. So, real numbers are
++ * needed. ToDo.
++ */
++
++ for (i = 0; (bufflen > 0) && (i < buf_ucmd->num_data_pages); i++) {
++ struct page *page;
++ page = buf_ucmd->data_pages[i];
++#ifdef ARCH_HAS_FLUSH_ANON_PAGE
++ struct vm_area_struct *vma = find_vma(current->mm, start);
++ if (vma != NULL)
++ flush_anon_page(vma, page, start);
++#endif
++ flush_dcache_page(page);
++ start += PAGE_SIZE;
++ bufflen -= PAGE_SIZE;
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static int dev_user_exec(struct scst_cmd *cmd)
++{
++ struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
++ int res = SCST_EXEC_COMPLETED;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
++ "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
++ cmd->bufflen, cmd->data_len, ucmd->ubuff);
++
++ if (cmd->data_direction & SCST_DATA_WRITE)
++ dev_user_flush_dcache(ucmd);
++
++ BUILD_BUG_ON(sizeof(ucmd->user_cmd.exec_cmd.cdb) != sizeof(cmd->cdb));
++
++ ucmd->user_cmd_payload_len =
++ offsetof(struct scst_user_get_cmd, exec_cmd) +
++ sizeof(ucmd->user_cmd.exec_cmd);
++ ucmd->user_cmd.cmd_h = ucmd->h;
++ ucmd->user_cmd.subcode = SCST_USER_EXEC;
++ ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
++ memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb, cmd->cdb_len);
++ ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
++ ucmd->user_cmd.exec_cmd.ext_cdb_len = cmd->ext_cdb_len;
++ ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
++ ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
++ ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
++ if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
++ ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ?
++ (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
++ }
++ ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
++ ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
++ ucmd->user_cmd.exec_cmd.partial = 0;
++ ucmd->user_cmd.exec_cmd.timeout = cmd->timeout / HZ;
++ ucmd->user_cmd.exec_cmd.p_out_buf = ucmd->ubuff +
++ (cmd->sg_cnt << PAGE_SHIFT);
++ ucmd->user_cmd.exec_cmd.out_bufflen = cmd->out_bufflen;
++ ucmd->user_cmd.exec_cmd.sn = cmd->tgt_sn;
++
++ ucmd->state = UCMD_STATE_EXECING;
++
++ dev_user_add_to_ready(ucmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
++{
++ if (ucmd->sgv != NULL) {
++ sgv_pool_free(ucmd->sgv, &ucmd->dev->udev_mem_lim);
++ ucmd->sgv = NULL;
++ } else if (ucmd->data_pages != NULL) {
++ /* We mapped pages, but for some reason didn't allocate them */
++ ucmd_get(ucmd);
++ __dev_user_free_sg_entries(ucmd);
++ }
++ return;
++}
++
++static void dev_user_on_free_cmd(struct scst_cmd *cmd)
++{
++ struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
++
++ TRACE_ENTRY();
++
++ if (unlikely(ucmd == NULL))
++ goto out;
++
++ TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
++ ucmd->buff_cached, ucmd->ubuff);
++
++ ucmd->cmd = NULL;
++ if ((cmd->data_direction & SCST_DATA_WRITE) && ucmd->buf_ucmd != NULL)
++ ucmd->buf_ucmd->buf_dirty = 1;
++
++ if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
++ ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
++ /* The state assignment must be before freeing sgv! */
++ goto out_reply;
++ }
++
++ if (unlikely(!ucmd->seen_by_user)) {
++ TRACE_MGMT_DBG("Not seen by user ucmd %p", ucmd);
++ goto out_reply;
++ }
++
++ ucmd->user_cmd_payload_len =
++ offsetof(struct scst_user_get_cmd, on_free_cmd) +
++ sizeof(ucmd->user_cmd.on_free_cmd);
++ ucmd->user_cmd.cmd_h = ucmd->h;
++ ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
++ ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
++ ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
++ ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
++ ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
++ ucmd->user_cmd.on_free_cmd.status = cmd->status;
++ ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
++
++ ucmd->state = UCMD_STATE_ON_FREEING;
++
++ dev_user_add_to_ready(ucmd);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_reply:
++ dev_user_process_reply_on_free(ucmd);
++ goto out;
++}
++
++static void dev_user_set_block(struct scst_cmd *cmd, int block)
++{
++ struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
++ /*
++ * No need for locks here, since *_detach() can not be
++ * called, when there are existing commands.
++ */
++ TRACE_DBG("dev %p, new block %d", dev, block);
++ if (block != 0)
++ dev->block = block;
++ else
++ dev->block = dev->def_block;
++ return;
++}
++
++static int dev_user_disk_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ TRACE_ENTRY();
++
++ res = scst_block_generic_dev_done(cmd, dev_user_set_block);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_tape_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ TRACE_ENTRY();
++
++ res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
++{
++ struct scst_user_dev *dev = ucmd->dev;
++ unsigned long flags;
++ int do_wake = in_interrupt();
++
++ TRACE_ENTRY();
++
++ if (ucmd->cmd)
++ do_wake |= ucmd->cmd->preprocessing_only;
++
++ spin_lock_irqsave(&dev->udev_cmd_threads.cmd_list_lock, flags);
++
++ ucmd->this_state_unjammed = 0;
++
++ if ((ucmd->state == UCMD_STATE_PARSING) ||
++ (ucmd->state == UCMD_STATE_BUF_ALLOCING)) {
++ /*
++ * If we don't put such commands in the queue head, then under
++ * high load we might delay threads, waiting for memory
++ * allocations, for too long and start loosing NOPs, which
++ * would lead to consider us by remote initiators as
++ * unresponsive and stuck => broken connections, etc. If none
++ * of our commands completed in NOP timeout to allow the head
++ * commands to go, then we are really overloaded and/or stuck.
++ */
++ TRACE_DBG("Adding ucmd %p (state %d) to head of ready "
++ "cmd list", ucmd, ucmd->state);
++ list_add(&ucmd->ready_cmd_list_entry,
++ &dev->ready_cmd_list);
++ } else if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
++ unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
++ unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
++ TRACE_MGMT_DBG("Adding mgmt ucmd %p (state %d) to head of "
++ "ready cmd list", ucmd, ucmd->state);
++ list_add(&ucmd->ready_cmd_list_entry,
++ &dev->ready_cmd_list);
++ do_wake = 1;
++ } else {
++ if ((ucmd->cmd != NULL) &&
++ unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
++ TRACE_DBG("Adding HQ ucmd %p to head of ready cmd list",
++ ucmd);
++ list_add(&ucmd->ready_cmd_list_entry,
++ &dev->ready_cmd_list);
++ } else {
++ TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
++ list_add_tail(&ucmd->ready_cmd_list_entry,
++ &dev->ready_cmd_list);
++ }
++ do_wake |= ((ucmd->state == UCMD_STATE_ON_CACHE_FREEING) ||
++ (ucmd->state == UCMD_STATE_ON_FREEING));
++ }
++
++ if (do_wake) {
++ TRACE_DBG("Waking up dev %p", dev);
++ wake_up(&dev->udev_cmd_threads.cmd_list_waitQ);
++ }
++
++ spin_unlock_irqrestore(&dev->udev_cmd_threads.cmd_list_lock, flags);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
++ int num_pg)
++{
++ int res = 0, rc;
++ int i;
++ struct task_struct *tsk = current;
++
++ TRACE_ENTRY();
++
++ if (unlikely(ubuff == 0))
++ goto out_nomem;
++
++ BUG_ON(ucmd->data_pages != NULL);
++
++ ucmd->num_data_pages = num_pg;
++
++ ucmd->data_pages =
++ kmalloc(sizeof(*ucmd->data_pages) * ucmd->num_data_pages,
++ GFP_KERNEL);
++ if (ucmd->data_pages == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
++ "(num_data_pages=%d)", ucmd->num_data_pages);
++ res = -ENOMEM;
++ goto out_nomem;
++ }
++
++ TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d,"
++ " first_page_offset %d, len %d)", ucmd, ubuff,
++ ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
++ (ucmd->cmd != NULL) ? ucmd->cmd->bufflen : -1);
++
++ down_read(&tsk->mm->mmap_sem);
++ rc = get_user_pages(tsk, tsk->mm, ubuff, ucmd->num_data_pages,
++ 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
++ up_read(&tsk->mm->mmap_sem);
++
++ /* get_user_pages() flushes dcache */
++
++ if (rc < ucmd->num_data_pages)
++ goto out_unmap;
++
++ ucmd->ubuff = ubuff;
++ ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_nomem:
++ if (ucmd->cmd != NULL)
++ scst_set_busy(ucmd->cmd);
++ /* go through */
++
++out_err:
++ if (ucmd->cmd != NULL)
++ scst_set_cmd_abnormal_done_state(ucmd->cmd);
++ goto out;
++
++out_unmap:
++ PRINT_ERROR("Failed to get %d user pages (rc %d)",
++ ucmd->num_data_pages, rc);
++ if (rc > 0) {
++ for (i = 0; i < rc; i++)
++ page_cache_release(ucmd->data_pages[i]);
++ }
++ kfree(ucmd->data_pages);
++ ucmd->data_pages = NULL;
++ res = -EFAULT;
++ if (ucmd->cmd != NULL)
++ scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_err;
++}
++
++static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
++ struct scst_user_reply_cmd *reply)
++{
++ int res = 0;
++ struct scst_cmd *cmd = ucmd->cmd;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("ucmd %p, pbuf %llx", ucmd, reply->alloc_reply.pbuf);
++
++ if (likely(reply->alloc_reply.pbuf != 0)) {
++ int pages;
++ if (ucmd->buff_cached) {
++ if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
++ PRINT_ERROR("Supplied pbuf %llx isn't "
++ "page aligned",
++ reply->alloc_reply.pbuf);
++ goto out_hwerr;
++ }
++ pages = cmd->sg_cnt;
++ } else
++ pages = calc_num_pg(reply->alloc_reply.pbuf,
++ cmd->bufflen);
++ res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
++ } else {
++ scst_set_busy(ucmd->cmd);
++ scst_set_cmd_abnormal_done_state(ucmd->cmd);
++ }
++
++out_process:
++ scst_post_alloc_data_buf(cmd);
++ scst_process_active_cmd(cmd, false);
++
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_hwerr:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(ucmd->cmd);
++ res = -EINVAL;
++ goto out_process;
++}
++
++static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
++ struct scst_user_reply_cmd *reply)
++{
++ int res = 0, rc;
++ struct scst_user_scsi_cmd_reply_parse *preply =
++ &reply->parse_reply;
++ struct scst_cmd *cmd = ucmd->cmd;
++
++ TRACE_ENTRY();
++
++ if (preply->status != 0)
++ goto out_status;
++
++ if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
++ goto out_inval;
++
++ if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
++ (preply->data_direction != SCST_DATA_READ) &&
++ (preply->data_direction != SCST_DATA_BIDI) &&
++ (preply->data_direction != SCST_DATA_NONE)))
++ goto out_inval;
++
++ if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
++ (preply->bufflen == 0)))
++ goto out_inval;
++
++ if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
++ goto out_inval;
++
++ if (unlikely(preply->cdb_len > SCST_MAX_CDB_SIZE))
++ goto out_inval;
++
++ TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
++ "data_len %d, pbuf %llx, cdb_len %d, op_flags %x", ucmd,
++ preply->queue_type, preply->data_direction, preply->bufflen,
++ preply->data_len, reply->alloc_reply.pbuf, preply->cdb_len,
++ preply->op_flags);
++
++ cmd->queue_type = preply->queue_type;
++ cmd->data_direction = preply->data_direction;
++ cmd->bufflen = preply->bufflen;
++ cmd->data_len = preply->data_len;
++ if (preply->cdb_len > 0)
++ cmd->cdb_len = preply->cdb_len;
++ if (preply->op_flags & SCST_INFO_VALID)
++ cmd->op_flags = preply->op_flags;
++
++out_process:
++ scst_post_parse(cmd);
++ scst_process_active_cmd(cmd, false);
++
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_inval:
++ PRINT_ERROR("Invalid parse_reply parameters (LUN %lld, op %x, cmd %p)",
++ (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
++ PRINT_BUFFER("Invalid parse_reply", reply, sizeof(*reply));
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ res = -EINVAL;
++ goto out_abnormal;
++
++out_hwerr_res_set:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++
++out_abnormal:
++ scst_set_cmd_abnormal_done_state(cmd);
++ goto out_process;
++
++out_status:
++ TRACE_DBG("ucmd %p returned with error from user status %x",
++ ucmd, preply->status);
++
++ if (preply->sense_len != 0) {
++ int sense_len;
++
++ res = scst_alloc_sense(cmd, 0);
++ if (res != 0)
++ goto out_hwerr_res_set;
++
++ sense_len = min_t(int, cmd->sense_buflen, preply->sense_len);
++
++ rc = copy_from_user(cmd->sense,
++ (void __user *)(unsigned long)preply->psense_buffer,
++ sense_len);
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d sense's bytes", rc);
++ res = -EFAULT;
++ goto out_hwerr_res_set;
++ }
++ cmd->sense_valid_len = sense_len;
++ }
++ scst_set_cmd_error_status(cmd, preply->status);
++ goto out_abnormal;
++}
++
++static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("ON FREE ucmd %p", ucmd);
++
++ dev_user_free_sgv(ucmd);
++ ucmd_put(ucmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
++
++ ucmd_put(ucmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
++ struct scst_user_reply_cmd *reply)
++{
++ int res = 0;
++ struct scst_user_scsi_cmd_reply_exec *ereply =
++ &reply->exec_reply;
++ struct scst_cmd *cmd = ucmd->cmd;
++
++ TRACE_ENTRY();
++
++ if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
++ if (ucmd->background_exec) {
++ TRACE_DBG("Background ucmd %p finished", ucmd);
++ ucmd_put(ucmd);
++ goto out;
++ }
++ if (unlikely(ereply->resp_data_len > cmd->bufflen))
++ goto out_inval;
++ if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
++ (ereply->resp_data_len != 0)))
++ goto out_inval;
++ } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
++ if (unlikely(ucmd->background_exec))
++ goto out_inval;
++ if (unlikely((cmd->data_direction & SCST_DATA_READ) ||
++ (cmd->resp_data_len != 0)))
++ goto out_inval;
++ /*
++ * background_exec assignment must be after ucmd get.
++ * Otherwise, due to reorder, in dev_user_process_reply()
++ * it is possible that ucmd is destroyed before it "got" here.
++ */
++ ucmd_get(ucmd);
++ ucmd->background_exec = 1;
++ TRACE_DBG("Background ucmd %p", ucmd);
++ goto out_compl;
++ } else
++ goto out_inval;
++
++ TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
++ ereply->status, ereply->resp_data_len);
++
++ cmd->atomic = 0;
++
++ if (ereply->resp_data_len != 0) {
++ if (ucmd->ubuff == 0) {
++ int pages, rc;
++ if (unlikely(ereply->pbuf == 0))
++ goto out_busy;
++ if (ucmd->buff_cached) {
++ if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
++ PRINT_ERROR("Supplied pbuf %llx isn't "
++ "page aligned", ereply->pbuf);
++ goto out_hwerr;
++ }
++ pages = cmd->sg_cnt;
++ } else
++ pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
++ rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
++ if ((rc != 0) || (ucmd->ubuff == 0))
++ goto out_compl;
++
++ rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
++ if (unlikely(rc != 0))
++ goto out_busy;
++ } else
++ dev_user_flush_dcache(ucmd);
++ cmd->may_need_dma_sync = 1;
++ scst_set_resp_data_len(cmd, ereply->resp_data_len);
++ } else if (cmd->resp_data_len != ereply->resp_data_len) {
++ if (ucmd->ubuff == 0) {
++ /*
++ * We have an empty SG, so can't call
++ * scst_set_resp_data_len()
++ */
++ cmd->resp_data_len = ereply->resp_data_len;
++ cmd->resid_possible = 1;
++ } else
++ scst_set_resp_data_len(cmd, ereply->resp_data_len);
++ }
++
++ cmd->status = ereply->status;
++ if (ereply->sense_len != 0) {
++ int sense_len, rc;
++
++ res = scst_alloc_sense(cmd, 0);
++ if (res != 0)
++ goto out_compl;
++
++ sense_len = min((int)cmd->sense_buflen, (int)ereply->sense_len);
++
++ rc = copy_from_user(cmd->sense,
++ (void __user *)(unsigned long)ereply->psense_buffer,
++ sense_len);
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d sense's bytes", rc);
++ res = -EFAULT;
++ goto out_hwerr_res_set;
++ }
++ cmd->sense_valid_len = sense_len;
++ }
++
++out_compl:
++ cmd->completed = 1;
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_DIRECT);
++ /* !! At this point cmd can be already freed !! */
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_inval:
++ PRINT_ERROR("Invalid exec_reply parameters (LUN %lld, op %x, cmd %p)",
++ (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
++ PRINT_BUFFER("Invalid exec_reply", reply, sizeof(*reply));
++
++out_hwerr:
++ res = -EINVAL;
++
++out_hwerr_res_set:
++ if (ucmd->background_exec) {
++ ucmd_put(ucmd);
++ goto out;
++ } else {
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_compl;
++ }
++
++out_busy:
++ scst_set_busy(cmd);
++ goto out_compl;
++}
++
++static int dev_user_process_reply(struct scst_user_dev *dev,
++ struct scst_user_reply_cmd *reply)
++{
++ int res = 0;
++ struct scst_user_cmd *ucmd;
++ int state;
++
++ TRACE_ENTRY();
++
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ ucmd = __ucmd_find_hash(dev, reply->cmd_h);
++ if (unlikely(ucmd == NULL)) {
++ TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
++ res = -ESRCH;
++ goto out_unlock;
++ }
++
++ if (unlikely(ucmd_get_check(ucmd))) {
++ TRACE_MGMT_DBG("Found being destroyed cmd_h %d", reply->cmd_h);
++ res = -ESRCH;
++ goto out_unlock;
++ }
++
++ /* To sync. with dev_user_process_reply_exec(). See comment there. */
++ smp_mb();
++ if (ucmd->background_exec) {
++ state = UCMD_STATE_EXECING;
++ goto unlock_process;
++ }
++
++ if (unlikely(ucmd->this_state_unjammed)) {
++ TRACE_MGMT_DBG("Reply on unjammed ucmd %p, ignoring",
++ ucmd);
++ goto out_unlock_put;
++ }
++
++ if (unlikely(!ucmd->sent_to_user)) {
++ TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
++ "state %x", ucmd, ucmd->state);
++ res = -EINVAL;
++ goto out_unlock_put;
++ }
++
++ if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
++ goto out_wrong_state;
++
++ if (unlikely(_IOC_NR(reply->subcode) != ucmd->state))
++ goto out_wrong_state;
++
++ state = ucmd->state;
++ ucmd->sent_to_user = 0;
++
++unlock_process:
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ switch (state) {
++ case UCMD_STATE_PARSING:
++ res = dev_user_process_reply_parse(ucmd, reply);
++ break;
++
++ case UCMD_STATE_BUF_ALLOCING:
++ res = dev_user_process_reply_alloc(ucmd, reply);
++ break;
++
++ case UCMD_STATE_EXECING:
++ res = dev_user_process_reply_exec(ucmd, reply);
++ break;
++
++ case UCMD_STATE_ON_FREEING:
++ res = dev_user_process_reply_on_free(ucmd);
++ break;
++
++ case UCMD_STATE_ON_CACHE_FREEING:
++ res = dev_user_process_reply_on_cache_free(ucmd);
++ break;
++
++ case UCMD_STATE_TM_EXECING:
++ res = dev_user_process_reply_tm_exec(ucmd, reply->result);
++ break;
++
++ case UCMD_STATE_ATTACH_SESS:
++ case UCMD_STATE_DETACH_SESS:
++ res = dev_user_process_reply_sess(ucmd, reply->result);
++ break;
++
++ default:
++ BUG();
++ break;
++ }
++
++out_put:
++ ucmd_put(ucmd);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_wrong_state:
++ PRINT_ERROR("Command's %p subcode %x doesn't match internal "
++ "command's state %x or reply->subcode (%x) != ucmd->subcode "
++ "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
++ reply->subcode, ucmd->user_cmd.subcode);
++ res = -EINVAL;
++ dev_user_unjam_cmd(ucmd, 0, NULL);
++
++out_unlock_put:
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ goto out_put;
++
++out_unlock:
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ goto out;
++}
++
++static int dev_user_reply_cmd(struct file *file, void __user *arg)
++{
++ int res = 0, rc;
++ struct scst_user_dev *dev;
++ struct scst_user_reply_cmd reply;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ res = dev_user_check_reg(dev);
++ if (unlikely(res != 0)) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out;
++ }
++ down_read(&dev->dev_rwsem);
++ mutex_unlock(&dev_priv_mutex);
++
++ rc = copy_from_user(&reply, arg, sizeof(reply));
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ res = -EFAULT;
++ goto out_up;
++ }
++
++ TRACE_DBG("Reply for dev %s", dev->name);
++
++ TRACE_BUFFER("Reply", &reply, sizeof(reply));
++
++ res = dev_user_process_reply(dev, &reply);
++ if (unlikely(res < 0))
++ goto out_up;
++
++out_up:
++ up_read(&dev->dev_rwsem);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_get_ext_cdb(struct file *file, void __user *arg)
++{
++ int res = 0, rc;
++ struct scst_user_dev *dev;
++ struct scst_user_cmd *ucmd;
++ struct scst_cmd *cmd = NULL;
++ struct scst_user_get_ext_cdb get;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ res = dev_user_check_reg(dev);
++ if (unlikely(res != 0)) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out;
++ }
++ down_read(&dev->dev_rwsem);
++ mutex_unlock(&dev_priv_mutex);
++
++ rc = copy_from_user(&get, arg, sizeof(get));
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ res = -EFAULT;
++ goto out_up;
++ }
++
++ TRACE_MGMT_DBG("Get ext cdb for dev %s", dev->name);
++
++ TRACE_BUFFER("Get ext cdb", &get, sizeof(get));
++
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ ucmd = __ucmd_find_hash(dev, get.cmd_h);
++ if (unlikely(ucmd == NULL)) {
++ TRACE_MGMT_DBG("cmd_h %d not found", get.cmd_h);
++ res = -ESRCH;
++ goto out_unlock;
++ }
++
++ if (unlikely(ucmd_get_check(ucmd))) {
++ TRACE_MGMT_DBG("Found being destroyed cmd_h %d", get.cmd_h);
++ res = -ESRCH;
++ goto out_unlock;
++ }
++
++ if ((ucmd->cmd != NULL) && (ucmd->state <= UCMD_STATE_EXECING) &&
++ (ucmd->sent_to_user || ucmd->background_exec)) {
++ cmd = ucmd->cmd;
++ scst_cmd_get(cmd);
++ } else {
++ TRACE_MGMT_DBG("Invalid ucmd state %d for cmd_h %d",
++ ucmd->state, get.cmd_h);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ if (cmd == NULL)
++ goto out_put;
++
++ if (cmd->ext_cdb == NULL)
++ goto out_cmd_put;
++
++ TRACE_BUFFER("EXT CDB", cmd->ext_cdb, cmd->ext_cdb_len);
++ rc = copy_to_user((void __user *)(unsigned long)get.ext_cdb_buffer,
++ cmd->ext_cdb, cmd->ext_cdb_len);
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("Failed to copy to user %d bytes", rc);
++ res = -EFAULT;
++ goto out_cmd_put;
++ }
++
++out_cmd_put:
++ scst_cmd_put(cmd);
++
++out_put:
++ ucmd_put(ucmd);
++
++out_up:
++ up_read(&dev->dev_rwsem);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unlock:
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ goto out_up;
++}
++
++static int dev_user_process_scst_commands(struct scst_user_dev *dev)
++ __releases(&dev->udev_cmd_threads.cmd_list_lock)
++ __acquires(&dev->udev_cmd_threads.cmd_list_lock)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ while (!list_empty(&dev->udev_cmd_threads.active_cmd_list)) {
++ struct scst_cmd *cmd = list_entry(
++ dev->udev_cmd_threads.active_cmd_list.next, typeof(*cmd),
++ cmd_list_entry);
++ TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
++ list_del(&cmd->cmd_list_entry);
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ scst_process_active_cmd(cmd, false);
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ res++;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Called under udev_cmd_threads.cmd_list_lock and IRQ off */
++static struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
++ __releases(&dev->udev_cmd_threads.cmd_list_lock)
++ __acquires(&dev->udev_cmd_threads.cmd_list_lock)
++{
++ struct scst_user_cmd *u;
++
++again:
++ u = NULL;
++ if (!list_empty(cmd_list)) {
++ u = list_entry(cmd_list->next, typeof(*u),
++ ready_cmd_list_entry);
++
++ TRACE_DBG("Found ready ucmd %p", u);
++ list_del(&u->ready_cmd_list_entry);
++
++ EXTRACHECKS_BUG_ON(u->this_state_unjammed);
++
++ if (u->cmd != NULL) {
++ if (u->state == UCMD_STATE_EXECING) {
++ struct scst_user_dev *dev = u->dev;
++ int rc;
++
++ EXTRACHECKS_BUG_ON(u->jammed);
++
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ rc = scst_check_local_events(u->cmd);
++ if (unlikely(rc != 0)) {
++ u->cmd->scst_cmd_done(u->cmd,
++ SCST_CMD_STATE_DEFAULT,
++ SCST_CONTEXT_DIRECT);
++ /*
++ * !! At this point cmd & u can be !!
++ * !! already freed !!
++ */
++ spin_lock_irq(
++ &dev->udev_cmd_threads.cmd_list_lock);
++ goto again;
++ }
++
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ } else if (unlikely(test_bit(SCST_CMD_ABORTED,
++ &u->cmd->cmd_flags))) {
++ switch (u->state) {
++ case UCMD_STATE_PARSING:
++ case UCMD_STATE_BUF_ALLOCING:
++ TRACE_MGMT_DBG("Aborting ucmd %p", u);
++ dev_user_unjam_cmd(u, 0, NULL);
++ goto again;
++ case UCMD_STATE_EXECING:
++ EXTRACHECKS_BUG_ON(1);
++ }
++ }
++ }
++ u->sent_to_user = 1;
++ u->seen_by_user = 1;
++ }
++ return u;
++}
++
++static inline int test_cmd_threads(struct scst_user_dev *dev)
++{
++ int res = !list_empty(&dev->udev_cmd_threads.active_cmd_list) ||
++ !list_empty(&dev->ready_cmd_list) ||
++ !dev->blocking || dev->cleanup_done ||
++ signal_pending(current);
++ return res;
++}
++
++/* Called under udev_cmd_threads.cmd_list_lock and IRQ off */
++static int dev_user_get_next_cmd(struct scst_user_dev *dev,
++ struct scst_user_cmd **ucmd)
++{
++ int res = 0;
++ wait_queue_t wait;
++
++ TRACE_ENTRY();
++
++ init_waitqueue_entry(&wait, current);
++
++ while (1) {
++ if (!test_cmd_threads(dev)) {
++ add_wait_queue_exclusive_head(
++ &dev->udev_cmd_threads.cmd_list_waitQ,
++ &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_cmd_threads(dev))
++ break;
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ schedule();
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&dev->udev_cmd_threads.cmd_list_waitQ,
++ &wait);
++ }
++
++ dev_user_process_scst_commands(dev);
++
++ *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
++ if (*ucmd != NULL)
++ break;
++
++ if (!dev->blocking || dev->cleanup_done) {
++ res = -EAGAIN;
++ TRACE_DBG("No ready commands, returning %d", res);
++ break;
++ }
++
++ if (signal_pending(current)) {
++ res = -EINTR;
++ TRACE_DBG("Signal pending, returning %d", res);
++ break;
++ }
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_reply_get_cmd(struct file *file, void __user *arg)
++{
++ int res = 0, rc;
++ struct scst_user_dev *dev;
++ struct scst_user_get_cmd *cmd;
++ struct scst_user_reply_cmd *reply;
++ struct scst_user_cmd *ucmd;
++ uint64_t ureply;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ res = dev_user_check_reg(dev);
++ if (unlikely(res != 0)) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out;
++ }
++ down_read(&dev->dev_rwsem);
++ mutex_unlock(&dev_priv_mutex);
++
++ /* get_user() can't be used with 64-bit values on x86_32 */
++ rc = copy_from_user(&ureply, (uint64_t __user *)
++ &((struct scst_user_get_cmd __user *)arg)->preply,
++ sizeof(ureply));
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ res = -EFAULT;
++ goto out_up;
++ }
++
++ TRACE_DBG("ureply %lld (dev %s)", (long long unsigned int)ureply,
++ dev->name);
++
++ cmd = kmem_cache_alloc(user_get_cmd_cachep, GFP_KERNEL);
++ if (unlikely(cmd == NULL)) {
++ res = -ENOMEM;
++ goto out_up;
++ }
++
++ if (ureply != 0) {
++ unsigned long u = (unsigned long)ureply;
++ reply = (struct scst_user_reply_cmd *)cmd;
++ rc = copy_from_user(reply, (void __user *)u, sizeof(*reply));
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ res = -EFAULT;
++ goto out_free;
++ }
++
++ TRACE_BUFFER("Reply", reply, sizeof(*reply));
++
++ res = dev_user_process_reply(dev, reply);
++ if (unlikely(res < 0))
++ goto out_free;
++ }
++
++ kmem_cache_free(user_get_cmd_cachep, cmd);
++
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++again:
++ res = dev_user_get_next_cmd(dev, &ucmd);
++ if (res == 0) {
++ int len;
++ /*
++ * A misbehaving user space handler can make ucmd to get dead
++ * immediately after we released the lock, which can lead to
++ * copy of dead data to the user space, which can lead to a
++ * leak of sensitive information.
++ */
++ if (unlikely(ucmd_get_check(ucmd))) {
++ /* Oops, this ucmd is already being destroyed. Retry. */
++ goto again;
++ }
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ EXTRACHECKS_BUG_ON(ucmd->user_cmd_payload_len == 0);
++
++ len = ucmd->user_cmd_payload_len;
++ TRACE_DBG("ucmd %p (user_cmd %p), payload_len %d (len %d)",
++ ucmd, &ucmd->user_cmd, ucmd->user_cmd_payload_len, len);
++ TRACE_BUFFER("UCMD", &ucmd->user_cmd, len);
++ rc = copy_to_user(arg, &ucmd->user_cmd, len);
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("Copy to user failed (%d), requeuing ucmd "
++ "%p back to head of ready cmd list", rc, ucmd);
++ res = -EFAULT;
++ /* Requeue ucmd back */
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ list_add(&ucmd->ready_cmd_list_entry,
++ &dev->ready_cmd_list);
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ }
++#ifdef CONFIG_SCST_EXTRACHECKS
++ else
++ ucmd->user_cmd_payload_len = 0;
++#endif
++ ucmd_put(ucmd);
++ } else
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++out_up:
++ up_read(&dev->dev_rwsem);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kmem_cache_free(user_get_cmd_cachep, cmd);
++ goto out_up;
++}
++
++static long dev_user_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ long res, rc;
++
++ TRACE_ENTRY();
++
++ switch (cmd) {
++ case SCST_USER_REPLY_AND_GET_CMD:
++ TRACE_DBG("%s", "REPLY_AND_GET_CMD");
++ res = dev_user_reply_get_cmd(file, (void __user *)arg);
++ break;
++
++ case SCST_USER_REPLY_CMD:
++ TRACE_DBG("%s", "REPLY_CMD");
++ res = dev_user_reply_cmd(file, (void __user *)arg);
++ break;
++
++ case SCST_USER_GET_EXTENDED_CDB:
++ TRACE_DBG("%s", "GET_EXTENDED_CDB");
++ res = dev_user_get_ext_cdb(file, (void __user *)arg);
++ break;
++
++ case SCST_USER_REGISTER_DEVICE:
++ {
++ struct scst_user_dev_desc *dev_desc;
++ TRACE_DBG("%s", "REGISTER_DEVICE");
++ dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
++ if (dev_desc == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++ rc = copy_from_user(dev_desc, (void __user *)arg,
++ sizeof(*dev_desc));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %ld user's bytes", rc);
++ res = -EFAULT;
++ kfree(dev_desc);
++ goto out;
++ }
++ TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
++ dev_desc->name[sizeof(dev_desc->name)-1] = '\0';
++ dev_desc->sgv_name[sizeof(dev_desc->sgv_name)-1] = '\0';
++ res = dev_user_register_dev(file, dev_desc);
++ kfree(dev_desc);
++ break;
++ }
++
++ case SCST_USER_UNREGISTER_DEVICE:
++ TRACE_DBG("%s", "UNREGISTER_DEVICE");
++ res = dev_user_unregister_dev(file);
++ break;
++
++ case SCST_USER_FLUSH_CACHE:
++ TRACE_DBG("%s", "FLUSH_CACHE");
++ res = dev_user_flush_cache(file);
++ break;
++
++ case SCST_USER_SET_OPTIONS:
++ {
++ struct scst_user_opt opt;
++ TRACE_DBG("%s", "SET_OPTIONS");
++ rc = copy_from_user(&opt, (void __user *)arg, sizeof(opt));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %ld user's bytes", rc);
++ res = -EFAULT;
++ goto out;
++ }
++ TRACE_BUFFER("opt", &opt, sizeof(opt));
++ res = dev_user_set_opt(file, &opt);
++ break;
++ }
++
++ case SCST_USER_GET_OPTIONS:
++ TRACE_DBG("%s", "GET_OPTIONS");
++ res = dev_user_get_opt(file, (void __user *)arg);
++ break;
++
++ case SCST_USER_DEVICE_CAPACITY_CHANGED:
++ TRACE_DBG("%s", "CAPACITY_CHANGED");
++ res = dev_user_capacity_changed(file);
++ break;
++
++ case SCST_USER_PREALLOC_BUFFER:
++ TRACE_DBG("%s", "PREALLOC_BUFFER");
++ res = dev_user_prealloc_buffer(file, (void __user *)arg);
++ break;
++
++ default:
++ PRINT_ERROR("Invalid ioctl cmd %x", cmd);
++ res = -EINVAL;
++ goto out;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static unsigned int dev_user_poll(struct file *file, poll_table *wait)
++{
++ int res = 0;
++ struct scst_user_dev *dev;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ res = dev_user_check_reg(dev);
++ if (unlikely(res != 0)) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out;
++ }
++ down_read(&dev->dev_rwsem);
++ mutex_unlock(&dev_priv_mutex);
++
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ if (!list_empty(&dev->ready_cmd_list) ||
++ !list_empty(&dev->udev_cmd_threads.active_cmd_list)) {
++ res |= POLLIN | POLLRDNORM;
++ goto out_unlock;
++ }
++
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ TRACE_DBG("Before poll_wait() (dev %s)", dev->name);
++ poll_wait(file, &dev->udev_cmd_threads.cmd_list_waitQ, wait);
++ TRACE_DBG("After poll_wait() (dev %s)", dev->name);
++
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ if (!list_empty(&dev->ready_cmd_list) ||
++ !list_empty(&dev->udev_cmd_threads.active_cmd_list)) {
++ res |= POLLIN | POLLRDNORM;
++ goto out_unlock;
++ }
++
++out_unlock:
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ up_read(&dev->dev_rwsem);
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/*
++ * Called under udev_cmd_threads.cmd_list_lock, but can drop it inside,
++ * then reacquire.
++ */
++static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
++ unsigned long *flags)
++ __releases(&dev->udev_cmd_threads.cmd_list_lock)
++ __acquires(&dev->udev_cmd_threads.cmd_list_lock)
++{
++ int state = ucmd->state;
++ struct scst_user_dev *dev = ucmd->dev;
++
++ TRACE_ENTRY();
++
++ if (ucmd->this_state_unjammed)
++ goto out;
++
++ TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
++ state);
++
++ ucmd->jammed = 1;
++ ucmd->this_state_unjammed = 1;
++ ucmd->sent_to_user = 0;
++
++ switch (state) {
++ case UCMD_STATE_PARSING:
++ case UCMD_STATE_BUF_ALLOCING:
++ if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
++ ucmd->aborted = 1;
++ else {
++ if (busy)
++ scst_set_busy(ucmd->cmd);
++ else
++ scst_set_cmd_error(ucmd->cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ scst_set_cmd_abnormal_done_state(ucmd->cmd);
++
++ if (state == UCMD_STATE_PARSING)
++ scst_post_parse(ucmd->cmd);
++ else
++ scst_post_alloc_data_buf(ucmd->cmd);
++
++ TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
++ list_add(&ucmd->cmd->cmd_list_entry,
++ &ucmd->cmd->cmd_threads->active_cmd_list);
++ wake_up(&ucmd->cmd->cmd_threads->cmd_list_waitQ);
++ break;
++
++ case UCMD_STATE_EXECING:
++ if (flags != NULL)
++ spin_unlock_irqrestore(&dev->udev_cmd_threads.cmd_list_lock,
++ *flags);
++ else
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
++
++ if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
++ ucmd->aborted = 1;
++ else {
++ if (busy)
++ scst_set_busy(ucmd->cmd);
++ else
++ scst_set_cmd_error(ucmd->cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++
++ ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT,
++ SCST_CONTEXT_DIRECT);
++ /* !! At this point cmd and ucmd can be already freed !! */
++
++ if (flags != NULL)
++ spin_lock_irqsave(&dev->udev_cmd_threads.cmd_list_lock,
++ *flags);
++ else
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ break;
++
++ case UCMD_STATE_ON_FREEING:
++ case UCMD_STATE_ON_CACHE_FREEING:
++ case UCMD_STATE_TM_EXECING:
++ case UCMD_STATE_ATTACH_SESS:
++ case UCMD_STATE_DETACH_SESS:
++ if (flags != NULL)
++ spin_unlock_irqrestore(&dev->udev_cmd_threads.cmd_list_lock,
++ *flags);
++ else
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ switch (state) {
++ case UCMD_STATE_ON_FREEING:
++ dev_user_process_reply_on_free(ucmd);
++ break;
++
++ case UCMD_STATE_ON_CACHE_FREEING:
++ dev_user_process_reply_on_cache_free(ucmd);
++ break;
++
++ case UCMD_STATE_TM_EXECING:
++ dev_user_process_reply_tm_exec(ucmd,
++ SCST_MGMT_STATUS_FAILED);
++ break;
++
++ case UCMD_STATE_ATTACH_SESS:
++ case UCMD_STATE_DETACH_SESS:
++ dev_user_process_reply_sess(ucmd, -EFAULT);
++ break;
++ }
++
++ if (flags != NULL)
++ spin_lock_irqsave(&dev->udev_cmd_threads.cmd_list_lock,
++ *flags);
++ else
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ break;
++
++ default:
++ PRINT_CRIT_ERROR("Wrong ucmd state %x", state);
++ BUG();
++ break;
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static int dev_user_unjam_dev(struct scst_user_dev *dev)
++ __releases(&dev->udev_cmd_threads.cmd_list_lock)
++ __acquires(&dev->udev_cmd_threads.cmd_list_lock)
++{
++ int i, res = 0;
++ struct scst_user_cmd *ucmd;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Unjamming dev %p", dev);
++
++ sgv_pool_flush(dev->pool);
++ sgv_pool_flush(dev->pool_clust);
++
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++repeat:
++ for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
++ struct list_head *head = &dev->ucmd_hash[i];
++
++ list_for_each_entry(ucmd, head, hash_list_entry) {
++ res++;
++
++ if (!ucmd->sent_to_user)
++ continue;
++
++ if (ucmd_get_check(ucmd))
++ continue;
++
++ TRACE_MGMT_DBG("ucmd %p, state %x, scst_cmd %p", ucmd,
++ ucmd->state, ucmd->cmd);
++
++ dev_user_unjam_cmd(ucmd, 0, NULL);
++
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ ucmd_put(ucmd);
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ goto repeat;
++ }
++ }
++
++ if (dev_user_process_scst_commands(dev) != 0)
++ goto repeat;
++
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
++ int status)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
++ ucmd->user_cmd.tm_cmd.fn, status);
++
++ if (status == SCST_MGMT_STATUS_TASK_NOT_EXIST) {
++ /*
++ * It is possible that user space seen TM cmd before cmd
++ * to abort or will never see it at all, because it was
++ * aborted on the way there. So, it is safe to return
++ * success instead, because, if there is the TM cmd at this
++ * point, then the cmd to abort apparrently does exist.
++ */
++ status = SCST_MGMT_STATUS_SUCCESS;
++ }
++
++ scst_async_mcmd_completed(ucmd->mcmd, status);
++
++ ucmd_put(ucmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void dev_user_abort_ready_commands(struct scst_user_dev *dev)
++{
++ struct scst_user_cmd *ucmd;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ spin_lock_irqsave(&dev->udev_cmd_threads.cmd_list_lock, flags);
++again:
++ list_for_each_entry(ucmd, &dev->ready_cmd_list, ready_cmd_list_entry) {
++ if ((ucmd->cmd != NULL) && !ucmd->seen_by_user &&
++ test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags)) {
++ switch (ucmd->state) {
++ case UCMD_STATE_PARSING:
++ case UCMD_STATE_BUF_ALLOCING:
++ case UCMD_STATE_EXECING:
++ TRACE_MGMT_DBG("Aborting ready ucmd %p", ucmd);
++ list_del(&ucmd->ready_cmd_list_entry);
++ dev_user_unjam_cmd(ucmd, 0, &flags);
++ goto again;
++ }
++ }
++ }
++
++ spin_unlock_irqrestore(&dev->udev_cmd_threads.cmd_list_lock, flags);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
++ struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_user_cmd *ucmd;
++ struct scst_user_dev *dev =
++ (struct scst_user_dev *)tgt_dev->dev->dh_priv;
++ struct scst_user_cmd *ucmd_to_abort = NULL;
++
++ TRACE_ENTRY();
++
++ /*
++ * In the used approach we don't do anything with hung devices, which
++ * stopped responding and/or have stuck commands. We forcedly abort such
++ * commands only if they not yet sent to the user space or if the device
++ * is getting unloaded, e.g. if its handler program gets killed. This is
++ * because it's pretty hard to distinguish between stuck and temporary
++ * overloaded states of the device. There are several reasons for that:
++ *
++ * 1. Some commands need a lot of time to complete (several hours),
++ * so for an impatient user such command(s) will always look as
++ * stuck.
++ *
++ * 2. If we forcedly abort, i.e. abort before it's actually completed
++ * in the user space, just one command, we will have to put the whole
++ * device offline until we are sure that no more previously aborted
++ * commands will get executed. Otherwise, we might have a possibility
++ * for data corruption, when aborted and reported as completed
++ * command actually gets executed *after* new commands sent
++ * after the force abort was done. Many journaling file systems and
++ * databases use "provide required commands order via queue draining"
++ * approach and not putting the whole device offline after the forced
++ * abort will break it. This makes our decision, if a command stuck
++ * or not, cost a lot.
++ *
++ * So, we leave policy definition if a device stuck or not to
++ * the user space and simply let all commands live until they are
++ * completed or their devices get closed/killed. This approach is very
++ * much OK, but can affect management commands, which need activity
++ * suspending via scst_suspend_activity() function such as devices or
++ * targets registration/removal. But during normal life such commands
++ * should be rare. Plus, when possible, scst_suspend_activity() will
++ * return after timeout EBUSY status to allow caller to not stuck
++ * forever as well.
++ *
++ * But, anyway, ToDo, we should reimplement that in the SCST core, so
++ * stuck commands would affect only related devices.
++ */
++
++ dev_user_abort_ready_commands(dev);
++
++ /* We can't afford missing TM command due to memory shortage */
++ ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
++
++ ucmd->user_cmd_payload_len =
++ offsetof(struct scst_user_get_cmd, tm_cmd) +
++ sizeof(ucmd->user_cmd.tm_cmd);
++ ucmd->user_cmd.cmd_h = ucmd->h;
++ ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
++ ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
++ ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
++ ucmd->user_cmd.tm_cmd.cmd_sn = mcmd->cmd_sn;
++ ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
++
++ if (mcmd->cmd_to_abort != NULL) {
++ ucmd_to_abort =
++ (struct scst_user_cmd *)mcmd->cmd_to_abort->dh_priv;
++ if (ucmd_to_abort != NULL)
++ ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
++ }
++
++ TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
++ "ucmd_to_abort %p, cmd_h_to_abort %d, mcmd %p)", ucmd, ucmd->h,
++ mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
++ ucmd->user_cmd.tm_cmd.cmd_h_to_abort, mcmd);
++
++ ucmd->mcmd = mcmd;
++ ucmd->state = UCMD_STATE_TM_EXECING;
++
++ scst_prepare_async_mcmd(mcmd);
++
++ dev_user_add_to_ready(ucmd);
++
++ TRACE_EXIT();
++ return SCST_DEV_TM_NOT_COMPLETED;
++}
++
++static int dev_user_attach(struct scst_device *sdev)
++{
++ int res = 0;
++ struct scst_user_dev *dev = NULL, *d;
++
++ TRACE_ENTRY();
++
++ spin_lock(&dev_list_lock);
++ list_for_each_entry(d, &dev_list, dev_list_entry) {
++ if (strcmp(d->name, sdev->virt_name) == 0) {
++ dev = d;
++ break;
++ }
++ }
++ spin_unlock(&dev_list_lock);
++ if (dev == NULL) {
++ PRINT_ERROR("Device %s not found", sdev->virt_name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ sdev->dh_priv = dev;
++ sdev->tst = dev->tst;
++ sdev->queue_alg = dev->queue_alg;
++ sdev->swp = dev->swp;
++ sdev->tas = dev->tas;
++ sdev->d_sense = dev->d_sense;
++ sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
++
++ dev->sdev = sdev;
++
++ PRINT_INFO("Attached user space virtual device \"%s\"",
++ dev->name);
++
++out:
++ TRACE_EXIT();
++ return res;
++}
++
++static void dev_user_detach(struct scst_device *sdev)
++{
++ struct scst_user_dev *dev = (struct scst_user_dev *)sdev->dh_priv;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("virt_id %d", sdev->virt_id);
++
++ PRINT_INFO("Detached user space virtual device \"%s\"",
++ dev->name);
++
++ /* dev will be freed by the caller */
++ sdev->dh_priv = NULL;
++ dev->sdev = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
++{
++ int res = 0;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
++
++ spin_lock_irqsave(&ucmd->dev->udev_cmd_threads.cmd_list_lock, flags);
++
++ if (ucmd->state == UCMD_STATE_ATTACH_SESS) {
++ TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
++ ucmd->result = status;
++ } else if (ucmd->state == UCMD_STATE_DETACH_SESS) {
++ TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
++ } else
++ BUG();
++
++ if (ucmd->cmpl != NULL)
++ complete_all(ucmd->cmpl);
++
++ spin_unlock_irqrestore(&ucmd->dev->udev_cmd_threads.cmd_list_lock, flags);
++
++ ucmd_put(ucmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_user_dev *dev =
++ (struct scst_user_dev *)tgt_dev->dev->dh_priv;
++ int res = 0, rc;
++ struct scst_user_cmd *ucmd;
++ DECLARE_COMPLETION_ONSTACK(cmpl);
++ struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
++ struct scst_tgt *tgt = tgt_dev->sess->tgt;
++
++ TRACE_ENTRY();
++
++ tgt_dev->active_cmd_threads = &dev->udev_cmd_threads;
++
++ /*
++ * We can't replace tgt_dev->pool, because it can be used to allocate
++ * memory for SCST local commands, like REPORT LUNS, where there is no
++ * corresponding ucmd. Otherwise we will crash in dev_user_alloc_sg().
++ */
++ if (test_bit(SCST_TGT_DEV_CLUST_POOL, &tgt_dev->tgt_dev_flags))
++ tgt_dev->dh_priv = dev->pool_clust;
++ else
++ tgt_dev->dh_priv = dev->pool;
++
++ ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
++ if (ucmd == NULL)
++ goto out_nomem;
++
++ ucmd->cmpl = &cmpl;
++
++ ucmd->user_cmd_payload_len = offsetof(struct scst_user_get_cmd, sess) +
++ sizeof(ucmd->user_cmd.sess);
++ ucmd->user_cmd.cmd_h = ucmd->h;
++ ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
++ ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
++ ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
++ ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
++ ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only;
++ if (tgtt->get_phys_transport_version != NULL)
++ ucmd->user_cmd.sess.phys_transport_version =
++ tgtt->get_phys_transport_version(tgt);
++ if (tgtt->get_scsi_transport_version != NULL)
++ ucmd->user_cmd.sess.scsi_transport_version =
++ tgtt->get_scsi_transport_version(tgt);
++ strlcpy(ucmd->user_cmd.sess.initiator_name,
++ tgt_dev->sess->initiator_name,
++ sizeof(ucmd->user_cmd.sess.initiator_name)-1);
++ strlcpy(ucmd->user_cmd.sess.target_name,
++ tgt_dev->sess->tgt->tgt_name,
++ sizeof(ucmd->user_cmd.sess.target_name)-1);
++
++ TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %llx, LUN %llx, "
++ "threads_num %d, rd_only %d, initiator %s, target %s)",
++ ucmd, ucmd->h, ucmd->user_cmd.sess.sess_h,
++ ucmd->user_cmd.sess.lun, ucmd->user_cmd.sess.threads_num,
++ ucmd->user_cmd.sess.rd_only, ucmd->user_cmd.sess.initiator_name,
++ ucmd->user_cmd.sess.target_name);
++
++ ucmd->state = UCMD_STATE_ATTACH_SESS;
++
++ ucmd_get(ucmd);
++
++ dev_user_add_to_ready(ucmd);
++
++ rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
++ if (rc > 0)
++ res = ucmd->result;
++ else {
++ PRINT_ERROR("%s", "ATTACH_SESS command timeout");
++ res = -EFAULT;
++ }
++
++ BUG_ON(irqs_disabled());
++
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++ ucmd->cmpl = NULL;
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ ucmd_put(ucmd);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_user_dev *dev =
++ (struct scst_user_dev *)tgt_dev->dev->dh_priv;
++ struct scst_user_cmd *ucmd;
++
++ TRACE_ENTRY();
++
++ /*
++ * We can't miss TM command due to memory shortage, because it might
++ * lead to a memory leak in the user space handler.
++ */
++ ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
++ if (ucmd == NULL)
++ goto out;
++
++ TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %llx)", ucmd,
++ ucmd->h, ucmd->user_cmd.sess.sess_h);
++
++ ucmd->user_cmd_payload_len = offsetof(struct scst_user_get_cmd, sess) +
++ sizeof(ucmd->user_cmd.sess);
++ ucmd->user_cmd.cmd_h = ucmd->h;
++ ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
++ ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
++
++ ucmd->state = UCMD_STATE_DETACH_SESS;
++
++ dev_user_add_to_ready(ucmd);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* No locks are needed, but the activity must be suspended */
++static void dev_user_setup_functions(struct scst_user_dev *dev)
++{
++ TRACE_ENTRY();
++
++ dev->devtype.parse = dev_user_parse;
++ dev->devtype.alloc_data_buf = dev_user_alloc_data_buf;
++ dev->devtype.dev_done = NULL;
++
++ if (dev->parse_type != SCST_USER_PARSE_CALL) {
++ switch (dev->devtype.type) {
++ case TYPE_DISK:
++ dev->generic_parse = scst_sbc_generic_parse;
++ dev->devtype.dev_done = dev_user_disk_done;
++ break;
++
++ case TYPE_TAPE:
++ dev->generic_parse = scst_tape_generic_parse;
++ dev->devtype.dev_done = dev_user_tape_done;
++ break;
++
++ case TYPE_MOD:
++ dev->generic_parse = scst_modisk_generic_parse;
++ dev->devtype.dev_done = dev_user_disk_done;
++ break;
++
++ case TYPE_ROM:
++ dev->generic_parse = scst_cdrom_generic_parse;
++ dev->devtype.dev_done = dev_user_disk_done;
++ break;
++
++ case TYPE_MEDIUM_CHANGER:
++ dev->generic_parse = scst_changer_generic_parse;
++ break;
++
++ case TYPE_PROCESSOR:
++ dev->generic_parse = scst_processor_generic_parse;
++ break;
++
++ case TYPE_RAID:
++ dev->generic_parse = scst_raid_generic_parse;
++ break;
++
++ default:
++ PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
++ "for it", dev->devtype.type);
++ dev->parse_type = SCST_USER_PARSE_CALL;
++ break;
++ }
++ } else {
++ dev->generic_parse = NULL;
++ dev->devtype.dev_done = NULL;
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static int dev_user_check_version(const struct scst_user_dev_desc *dev_desc)
++{
++ char str[sizeof(DEV_USER_VERSION) > 20 ? sizeof(DEV_USER_VERSION) : 20];
++ int res = 0, rc;
++
++ rc = copy_from_user(str,
++ (void __user *)(unsigned long)dev_desc->license_str,
++ sizeof(str));
++ if (rc != 0) {
++ PRINT_ERROR("%s", "Unable to get license string");
++ res = -EFAULT;
++ goto out;
++ }
++ str[sizeof(str)-1] = '\0';
++
++ if ((strcmp(str, "GPL") != 0) &&
++ (strcmp(str, "GPL v2") != 0) &&
++ (strcmp(str, "Dual BSD/GPL") != 0) &&
++ (strcmp(str, "Dual MIT/GPL") != 0) &&
++ (strcmp(str, "Dual MPL/GPL") != 0)) {
++ /* ->name already 0-terminated in dev_user_ioctl() */
++ PRINT_ERROR("Unsupported license of user device %s (%s). "
++ "Ask license@scst-tgt.com for more info.",
++ dev_desc->name, str);
++ res = -EPERM;
++ goto out;
++ }
++
++ rc = copy_from_user(str,
++ (void __user *)(unsigned long)dev_desc->version_str,
++ sizeof(str));
++ if (rc != 0) {
++ PRINT_ERROR("%s", "Unable to get version string");
++ res = -EFAULT;
++ goto out;
++ }
++ str[sizeof(str)-1] = '\0';
++
++ if (strcmp(str, DEV_USER_VERSION) != 0) {
++ /* ->name already 0-terminated in dev_user_ioctl() */
++ PRINT_ERROR("Incorrect version of user device %s (%s). "
++ "Expected: %s", dev_desc->name, str,
++ DEV_USER_VERSION);
++ res = -EINVAL;
++ goto out;
++ }
++
++out:
++ return res;
++}
++
++static int dev_user_register_dev(struct file *file,
++ const struct scst_user_dev_desc *dev_desc)
++{
++ int res, i;
++ struct scst_user_dev *dev, *d;
++ int block;
++
++ TRACE_ENTRY();
++
++ res = dev_user_check_version(dev_desc);
++ if (res != 0)
++ goto out;
++
++ switch (dev_desc->type) {
++ case TYPE_DISK:
++ case TYPE_ROM:
++ case TYPE_MOD:
++ if (dev_desc->block_size == 0) {
++ PRINT_ERROR("Wrong block size %d",
++ dev_desc->block_size);
++ res = -EINVAL;
++ goto out;
++ }
++ block = scst_calc_block_shift(dev_desc->block_size);
++ if (block == -1) {
++ res = -EINVAL;
++ goto out;
++ }
++ break;
++ default:
++ block = dev_desc->block_size;
++ break;
++ }
++
++ if (!try_module_get(THIS_MODULE)) {
++ PRINT_ERROR("%s", "Fail to get module");
++ res = -ETXTBSY;
++ goto out;
++ }
++
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ if (dev == NULL) {
++ res = -ENOMEM;
++ goto out_put;
++ }
++
++ init_rwsem(&dev->dev_rwsem);
++ INIT_LIST_HEAD(&dev->ready_cmd_list);
++ if (file->f_flags & O_NONBLOCK) {
++ TRACE_DBG("%s", "Non-blocking operations");
++ dev->blocking = 0;
++ } else
++ dev->blocking = 1;
++ for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
++ INIT_LIST_HEAD(&dev->ucmd_hash[i]);
++
++ scst_init_threads(&dev->udev_cmd_threads);
++
++ strlcpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
++
++ scst_init_mem_lim(&dev->udev_mem_lim);
++
++ scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "%s",
++ (dev_desc->sgv_name[0] == '\0') ? dev->name :
++ dev_desc->sgv_name);
++ dev->pool = sgv_pool_create(dev->devtype.name, sgv_no_clustering,
++ dev_desc->sgv_single_alloc_pages,
++ dev_desc->sgv_shared,
++ dev_desc->sgv_purge_interval);
++ if (dev->pool == NULL) {
++ res = -ENOMEM;
++ goto out_deinit_threads;
++ }
++ sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
++ dev_user_free_sg_entries);
++
++ if (!dev_desc->sgv_disable_clustered_pool) {
++ scnprintf(dev->devtype.name, sizeof(dev->devtype.name),
++ "%s-clust",
++ (dev_desc->sgv_name[0] == '\0') ? dev->name :
++ dev_desc->sgv_name);
++ dev->pool_clust = sgv_pool_create(dev->devtype.name,
++ sgv_tail_clustering,
++ dev_desc->sgv_single_alloc_pages,
++ dev_desc->sgv_shared,
++ dev_desc->sgv_purge_interval);
++ if (dev->pool_clust == NULL) {
++ res = -ENOMEM;
++ goto out_free0;
++ }
++ sgv_pool_set_allocator(dev->pool_clust, dev_user_alloc_pages,
++ dev_user_free_sg_entries);
++ } else {
++ dev->pool_clust = dev->pool;
++ sgv_pool_get(dev->pool_clust);
++ }
++
++ scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "%s",
++ dev->name);
++ dev->devtype.type = dev_desc->type;
++ dev->devtype.threads_num = -1;
++ dev->devtype.parse_atomic = 1;
++ dev->devtype.alloc_data_buf_atomic = 1;
++ dev->devtype.dev_done_atomic = 1;
++ dev->devtype.dev_attrs = dev_user_dev_attrs;
++ dev->devtype.attach = dev_user_attach;
++ dev->devtype.detach = dev_user_detach;
++ dev->devtype.attach_tgt = dev_user_attach_tgt;
++ dev->devtype.detach_tgt = dev_user_detach_tgt;
++ dev->devtype.exec = dev_user_exec;
++ dev->devtype.on_free_cmd = dev_user_on_free_cmd;
++ dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
++ if (dev_desc->enable_pr_cmds_notifications)
++ dev->devtype.pr_cmds_notifications = 1;
++
++ init_completion(&dev->cleanup_cmpl);
++ dev->block = block;
++ dev->def_block = block;
++
++ res = __dev_user_set_opt(dev, &dev_desc->opt);
++ if (res != 0)
++ goto out_free;
++
++ TRACE_MEM("dev %p, name %s", dev, dev->name);
++
++ spin_lock(&dev_list_lock);
++
++ list_for_each_entry(d, &dev_list, dev_list_entry) {
++ if (strcmp(d->name, dev->name) == 0) {
++ PRINT_ERROR("Device %s already exist",
++ dev->name);
++ res = -EEXIST;
++ spin_unlock(&dev_list_lock);
++ goto out_free;
++ }
++ }
++
++ list_add_tail(&dev->dev_list_entry, &dev_list);
++
++ spin_unlock(&dev_list_lock);
++
++ res = scst_register_virtual_dev_driver(&dev->devtype);
++ if (res < 0)
++ goto out_del_free;
++
++ dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
++ if (dev->virt_id < 0) {
++ res = dev->virt_id;
++ goto out_unreg_handler;
++ }
++
++ mutex_lock(&dev_priv_mutex);
++ if (file->private_data != NULL) {
++ mutex_unlock(&dev_priv_mutex);
++ PRINT_ERROR("%s", "Device already registered");
++ res = -EINVAL;
++ goto out_unreg_drv;
++ }
++ file->private_data = dev;
++ mutex_unlock(&dev_priv_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unreg_drv:
++ scst_unregister_virtual_device(dev->virt_id);
++
++out_unreg_handler:
++ scst_unregister_virtual_dev_driver(&dev->devtype);
++
++out_del_free:
++ spin_lock(&dev_list_lock);
++ list_del(&dev->dev_list_entry);
++ spin_unlock(&dev_list_lock);
++
++out_free:
++ sgv_pool_del(dev->pool_clust);
++
++out_free0:
++ sgv_pool_del(dev->pool);
++
++out_deinit_threads:
++ scst_deinit_threads(&dev->udev_cmd_threads);
++
++ kfree(dev);
++
++out_put:
++ module_put(THIS_MODULE);
++ goto out;
++}
++
++static int dev_user_unregister_dev(struct file *file)
++{
++ int res;
++ struct scst_user_dev *dev;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ res = dev_user_check_reg(dev);
++ if (res != 0) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out;
++ }
++ down_read(&dev->dev_rwsem);
++ mutex_unlock(&dev_priv_mutex);
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out_up;
++
++ up_read(&dev->dev_rwsem);
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ if (dev == NULL) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out_resume;
++ }
++
++ dev->blocking = 0;
++ wake_up_all(&dev->udev_cmd_threads.cmd_list_waitQ);
++
++ down_write(&dev->dev_rwsem);
++ file->private_data = NULL;
++ mutex_unlock(&dev_priv_mutex);
++
++ dev_user_exit_dev(dev);
++
++ up_write(&dev->dev_rwsem); /* to make lockdep happy */
++
++ kfree(dev);
++
++out_resume:
++ scst_resume_activity();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_up:
++ up_read(&dev->dev_rwsem);
++ goto out;
++}
++
++static int dev_user_flush_cache(struct file *file)
++{
++ int res;
++ struct scst_user_dev *dev;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ res = dev_user_check_reg(dev);
++ if (res != 0) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out;
++ }
++ down_read(&dev->dev_rwsem);
++ mutex_unlock(&dev_priv_mutex);
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out_up;
++
++ sgv_pool_flush(dev->pool);
++ sgv_pool_flush(dev->pool_clust);
++
++ scst_resume_activity();
++
++out_up:
++ up_read(&dev->dev_rwsem);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_capacity_changed(struct file *file)
++{
++ int res;
++ struct scst_user_dev *dev;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ res = dev_user_check_reg(dev);
++ if (res != 0) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out;
++ }
++ down_read(&dev->dev_rwsem);
++ mutex_unlock(&dev_priv_mutex);
++
++ scst_capacity_data_changed(dev->sdev);
++
++ up_read(&dev->dev_rwsem);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_prealloc_buffer(struct file *file, void __user *arg)
++{
++ int res = 0, rc;
++ struct scst_user_dev *dev;
++ union scst_user_prealloc_buffer pre;
++ aligned_u64 pbuf;
++ uint32_t bufflen;
++ struct scst_user_cmd *ucmd;
++ int pages, sg_cnt;
++ struct sgv_pool *pool;
++ struct scatterlist *sg;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ res = dev_user_check_reg(dev);
++ if (unlikely(res != 0)) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out;
++ }
++ down_read(&dev->dev_rwsem);
++ mutex_unlock(&dev_priv_mutex);
++
++ rc = copy_from_user(&pre.in, arg, sizeof(pre.in));
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ res = -EFAULT;
++ goto out_up;
++ }
++
++ TRACE_MEM("Prealloc buffer with size %dKB for dev %s",
++ pre.in.bufflen / 1024, dev->name);
++ TRACE_BUFFER("Input param", &pre.in, sizeof(pre.in));
++
++ pbuf = pre.in.pbuf;
++ bufflen = pre.in.bufflen;
++
++ ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
++ if (ucmd == NULL) {
++ res = -ENOMEM;
++ goto out_up;
++ }
++
++ ucmd->buff_cached = 1;
++
++ TRACE_MEM("ucmd %p, pbuf %llx", ucmd, pbuf);
++
++ if (unlikely((pbuf & ~PAGE_MASK) != 0)) {
++ PRINT_ERROR("Supplied pbuf %llx isn't page aligned", pbuf);
++ res = -EINVAL;
++ goto out_put;
++ }
++
++ pages = calc_num_pg(pbuf, bufflen);
++ res = dev_user_map_buf(ucmd, pbuf, pages);
++ if (res != 0)
++ goto out_put;
++
++ if (pre.in.for_clust_pool)
++ pool = dev->pool_clust;
++ else
++ pool = dev->pool;
++
++ sg = sgv_pool_alloc(pool, bufflen, GFP_KERNEL, SGV_POOL_ALLOC_GET_NEW,
++ &sg_cnt, &ucmd->sgv, &dev->udev_mem_lim, ucmd);
++ if (sg != NULL) {
++ struct scst_user_cmd *buf_ucmd =
++ (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
++
++ TRACE_MEM("Buf ucmd %p (sg_cnt %d, last seg len %d, "
++ "bufflen %d)", buf_ucmd, sg_cnt,
++ sg[sg_cnt-1].length, bufflen);
++
++ EXTRACHECKS_BUG_ON(ucmd != buf_ucmd);
++
++ ucmd->buf_ucmd = buf_ucmd;
++ } else {
++ res = -ENOMEM;
++ goto out_put;
++ }
++
++ dev_user_free_sgv(ucmd);
++
++ pre.out.cmd_h = ucmd->h;
++ rc = copy_to_user(arg, &pre.out, sizeof(pre.out));
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("Failed to copy to user %d bytes", rc);
++ res = -EFAULT;
++ goto out_put;
++ }
++
++out_put:
++ ucmd_put(ucmd);
++
++out_up:
++ up_read(&dev->dev_rwsem);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int __dev_user_set_opt(struct scst_user_dev *dev,
++ const struct scst_user_opt *opt)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("dev %s, parse_type %x, on_free_cmd_type %x, "
++ "memory_reuse_type %x, partial_transfers_type %x, "
++ "partial_len %d", dev->name, opt->parse_type,
++ opt->on_free_cmd_type, opt->memory_reuse_type,
++ opt->partial_transfers_type, opt->partial_len);
++
++ if (opt->parse_type > SCST_USER_MAX_PARSE_OPT ||
++ opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT ||
++ opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT ||
++ opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT) {
++ PRINT_ERROR("%s", "Invalid option");
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
++ (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
++ ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
++ (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
++ (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1) ||
++ (opt->d_sense > 1)) {
++ PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x,"
++ " tas %x, d_sense %d, has_own_order_mgmt %x)", opt->tst,
++ opt->queue_alg, opt->swp, opt->tas, opt->d_sense,
++ opt->has_own_order_mgmt);
++ res = -EINVAL;
++ goto out;
++ }
++
++ dev->parse_type = opt->parse_type;
++ dev->on_free_cmd_type = opt->on_free_cmd_type;
++ dev->memory_reuse_type = opt->memory_reuse_type;
++ dev->partial_transfers_type = opt->partial_transfers_type;
++ dev->partial_len = opt->partial_len;
++
++ dev->tst = opt->tst;
++ dev->queue_alg = opt->queue_alg;
++ dev->swp = opt->swp;
++ dev->tas = opt->tas;
++ dev->tst = opt->tst;
++ dev->d_sense = opt->d_sense;
++ dev->has_own_order_mgmt = opt->has_own_order_mgmt;
++ if (dev->sdev != NULL) {
++ dev->sdev->tst = opt->tst;
++ dev->sdev->queue_alg = opt->queue_alg;
++ dev->sdev->swp = opt->swp;
++ dev->sdev->tas = opt->tas;
++ dev->sdev->d_sense = opt->d_sense;
++ dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
++ }
++
++ dev_user_setup_functions(dev);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
++{
++ int res;
++ struct scst_user_dev *dev;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ res = dev_user_check_reg(dev);
++ if (res != 0) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out;
++ }
++ down_read(&dev->dev_rwsem);
++ mutex_unlock(&dev_priv_mutex);
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out_up;
++
++ res = __dev_user_set_opt(dev, opt);
++
++ scst_resume_activity();
++
++out_up:
++ up_read(&dev->dev_rwsem);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_user_get_opt(struct file *file, void __user *arg)
++{
++ int res, rc;
++ struct scst_user_dev *dev;
++ struct scst_user_opt opt;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&dev_priv_mutex);
++ dev = (struct scst_user_dev *)file->private_data;
++ res = dev_user_check_reg(dev);
++ if (res != 0) {
++ mutex_unlock(&dev_priv_mutex);
++ goto out;
++ }
++ down_read(&dev->dev_rwsem);
++ mutex_unlock(&dev_priv_mutex);
++
++ opt.parse_type = dev->parse_type;
++ opt.on_free_cmd_type = dev->on_free_cmd_type;
++ opt.memory_reuse_type = dev->memory_reuse_type;
++ opt.partial_transfers_type = dev->partial_transfers_type;
++ opt.partial_len = dev->partial_len;
++ opt.tst = dev->tst;
++ opt.queue_alg = dev->queue_alg;
++ opt.tas = dev->tas;
++ opt.swp = dev->swp;
++ opt.d_sense = dev->d_sense;
++ opt.has_own_order_mgmt = dev->has_own_order_mgmt;
++
++ TRACE_DBG("dev %s, parse_type %x, on_free_cmd_type %x, "
++ "memory_reuse_type %x, partial_transfers_type %x, "
++ "partial_len %d", dev->name, opt.parse_type,
++ opt.on_free_cmd_type, opt.memory_reuse_type,
++ opt.partial_transfers_type, opt.partial_len);
++
++ rc = copy_to_user(arg, &opt, sizeof(opt));
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("Failed to copy to user %d bytes", rc);
++ res = -EFAULT;
++ goto out_up;
++ }
++
++out_up:
++ up_read(&dev->dev_rwsem);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int dev_usr_parse(struct scst_cmd *cmd)
++{
++ BUG();
++ return SCST_CMD_STATE_DEFAULT;
++}
++
++static int dev_user_exit_dev(struct scst_user_dev *dev)
++{
++ TRACE_ENTRY();
++
++ TRACE(TRACE_MGMT, "Releasing dev %s", dev->name);
++
++ spin_lock(&dev_list_lock);
++ list_del(&dev->dev_list_entry);
++ spin_unlock(&dev_list_lock);
++
++ dev->blocking = 0;
++ wake_up_all(&dev->udev_cmd_threads.cmd_list_waitQ);
++
++ spin_lock(&cleanup_lock);
++ list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
++ spin_unlock(&cleanup_lock);
++
++ wake_up(&cleanup_list_waitQ);
++
++ scst_unregister_virtual_device(dev->virt_id);
++ scst_unregister_virtual_dev_driver(&dev->devtype);
++
++ sgv_pool_flush(dev->pool_clust);
++ sgv_pool_flush(dev->pool);
++
++ TRACE_MGMT_DBG("Unregistering finished (dev %p)", dev);
++
++ dev->cleanup_done = 1;
++
++ wake_up(&cleanup_list_waitQ);
++ wake_up(&dev->udev_cmd_threads.cmd_list_waitQ);
++
++ wait_for_completion(&dev->cleanup_cmpl);
++
++ sgv_pool_del(dev->pool_clust);
++ sgv_pool_del(dev->pool);
++
++ scst_deinit_threads(&dev->udev_cmd_threads);
++
++ TRACE_MGMT_DBG("Releasing completed (dev %p)", dev);
++
++ module_put(THIS_MODULE);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static int __dev_user_release(void *arg)
++{
++ struct scst_user_dev *dev = (struct scst_user_dev *)arg;
++ dev_user_exit_dev(dev);
++ kfree(dev);
++ return 0;
++}
++
++static int dev_user_release(struct inode *inode, struct file *file)
++{
++ struct scst_user_dev *dev;
++ struct task_struct *t;
++
++ TRACE_ENTRY();
++
++ dev = (struct scst_user_dev *)file->private_data;
++ if (dev == NULL)
++ goto out;
++ file->private_data = NULL;
++
++ TRACE_MGMT_DBG("Going to release dev %s", dev->name);
++
++ t = kthread_run(__dev_user_release, dev, "scst_usr_released");
++ if (IS_ERR(t)) {
++ PRINT_CRIT_ERROR("kthread_run() failed (%ld), releasing device "
++ "%p directly. If you have several devices under load "
++ "it might deadlock!", PTR_ERR(t), dev);
++ __dev_user_release(dev);
++ }
++
++out:
++ TRACE_EXIT();
++ return 0;
++}
++
++static int dev_user_process_cleanup(struct scst_user_dev *dev)
++{
++ struct scst_user_cmd *ucmd;
++ int rc = 0, res = 1;
++
++ TRACE_ENTRY();
++
++ BUG_ON(dev->blocking);
++ wake_up_all(&dev->udev_cmd_threads.cmd_list_waitQ); /* just in case */
++
++ while (1) {
++ int rc1;
++
++ TRACE_DBG("Cleanuping dev %p", dev);
++
++ rc1 = dev_user_unjam_dev(dev);
++ if ((rc1 == 0) && (rc == -EAGAIN) && dev->cleanup_done)
++ break;
++
++ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ rc = dev_user_get_next_cmd(dev, &ucmd);
++ if (rc == 0)
++ dev_user_unjam_cmd(ucmd, 1, NULL);
++
++ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
++
++ if (rc == -EAGAIN) {
++ if (!dev->cleanup_done) {
++ TRACE_DBG("No more commands (dev %p)", dev);
++ goto out;
++ }
++ }
++ }
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++{
++ int i;
++ for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
++ struct list_head *head = &dev->ucmd_hash[i];
++ struct scst_user_cmd *ucmd2;
++again:
++ list_for_each_entry(ucmd2, head, hash_list_entry) {
++ PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd2,
++ ucmd2->state, atomic_read(&ucmd2->ucmd_ref));
++ ucmd_put(ucmd2);
++ goto again;
++ }
++ }
++}
++#endif
++
++ TRACE_DBG("Cleanuping done (dev %p)", dev);
++ complete_all(&dev->cleanup_cmpl);
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t dev_user_sysfs_commands_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0, ppos, i;
++ struct scst_device *dev;
++ struct scst_user_dev *udev;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ udev = (struct scst_user_dev *)dev->dh_priv;
++
++ spin_lock_irqsave(&udev->udev_cmd_threads.cmd_list_lock, flags);
++ for (i = 0; i < (int)ARRAY_SIZE(udev->ucmd_hash); i++) {
++ struct list_head *head = &udev->ucmd_hash[i];
++ struct scst_user_cmd *ucmd;
++ list_for_each_entry(ucmd, head, hash_list_entry) {
++ ppos = pos;
++ pos += scnprintf(&buf[pos],
++ SCST_SYSFS_BLOCK_SIZE - pos,
++ "ucmd %p (state %x, ref %d), "
++ "sent_to_user %d, seen_by_user %d, "
++ "aborted %d, jammed %d, scst_cmd %p\n",
++ ucmd, ucmd->state,
++ atomic_read(&ucmd->ucmd_ref),
++ ucmd->sent_to_user, ucmd->seen_by_user,
++ ucmd->aborted, ucmd->jammed, ucmd->cmd);
++ if (pos >= SCST_SYSFS_BLOCK_SIZE-1) {
++ ppos += scnprintf(&buf[ppos],
++ SCST_SYSFS_BLOCK_SIZE - ppos, "...\n");
++ pos = ppos;
++ break;
++ }
++ }
++ }
++ spin_unlock_irqrestore(&udev->udev_cmd_threads.cmd_list_lock, flags);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static inline int test_cleanup_list(void)
++{
++ int res = !list_empty(&cleanup_list) ||
++ unlikely(kthread_should_stop());
++ return res;
++}
++
++static int dev_user_cleanup_thread(void *arg)
++{
++ TRACE_ENTRY();
++
++ PRINT_INFO("Cleanup thread started, PID %d", current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ spin_lock(&cleanup_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_cleanup_list()) {
++ add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_cleanup_list())
++ break;
++ spin_unlock(&cleanup_lock);
++ schedule();
++ spin_lock(&cleanup_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&cleanup_list_waitQ, &wait);
++ }
++
++ /*
++ * We have to poll devices, because commands can go from SCST
++ * core on cmd_list_waitQ and we have no practical way to
++ * detect them.
++ */
++
++ while (1) {
++ struct scst_user_dev *dev;
++ LIST_HEAD(cl_devs);
++
++ while (!list_empty(&cleanup_list)) {
++ int rc;
++
++ dev = list_entry(cleanup_list.next,
++ typeof(*dev), cleanup_list_entry);
++ list_del(&dev->cleanup_list_entry);
++
++ spin_unlock(&cleanup_lock);
++ rc = dev_user_process_cleanup(dev);
++ spin_lock(&cleanup_lock);
++
++ if (rc != 0)
++ list_add_tail(&dev->cleanup_list_entry,
++ &cl_devs);
++ }
++
++ if (list_empty(&cl_devs))
++ break;
++
++ spin_unlock(&cleanup_lock);
++ msleep(100);
++ spin_lock(&cleanup_lock);
++
++ while (!list_empty(&cl_devs)) {
++ dev = list_entry(cl_devs.next, typeof(*dev),
++ cleanup_list_entry);
++ list_move_tail(&dev->cleanup_list_entry,
++ &cleanup_list);
++ }
++ }
++ }
++ spin_unlock(&cleanup_lock);
++
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so cleanup_list must be empty.
++ */
++ BUG_ON(!list_empty(&cleanup_list));
++
++ PRINT_INFO("Cleanup thread PID %d finished", current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static int __init init_scst_user(void)
++{
++ int res = 0;
++ struct max_get_reply {
++ union {
++ struct scst_user_get_cmd g;
++ struct scst_user_reply_cmd r;
++ };
++ };
++ struct device *dev;
++
++ TRACE_ENTRY();
++
++ user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
++ if (user_cmd_cachep == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++
++ user_get_cmd_cachep = KMEM_CACHE(max_get_reply, SCST_SLAB_FLAGS);
++ if (user_get_cmd_cachep == NULL) {
++ res = -ENOMEM;
++ goto out_cache;
++ }
++
++ dev_user_devtype.module = THIS_MODULE;
++
++ res = scst_register_virtual_dev_driver(&dev_user_devtype);
++ if (res < 0)
++ goto out_cache1;
++
++ dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
++ if (IS_ERR(dev_user_sysfs_class)) {
++ PRINT_ERROR("%s", "Unable create sysfs class for SCST user "
++ "space handler");
++ res = PTR_ERR(dev_user_sysfs_class);
++ goto out_unreg;
++ }
++
++ dev_user_major = register_chrdev(0, DEV_USER_NAME, &dev_user_fops);
++ if (dev_user_major < 0) {
++ PRINT_ERROR("register_chrdev() failed: %d", res);
++ res = dev_user_major;
++ goto out_class;
++ }
++
++ dev = device_create(dev_user_sysfs_class, NULL,
++ MKDEV(dev_user_major, 0),
++ NULL,
++ DEV_USER_NAME);
++ if (IS_ERR(dev)) {
++ res = PTR_ERR(dev);
++ goto out_chrdev;
++ }
++
++ cleanup_thread = kthread_run(dev_user_cleanup_thread, NULL,
++ "scst_usr_cleanupd");
++ if (IS_ERR(cleanup_thread)) {
++ res = PTR_ERR(cleanup_thread);
++ PRINT_ERROR("kthread_create() failed: %d", res);
++ goto out_dev;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_dev:
++ device_destroy(dev_user_sysfs_class, MKDEV(dev_user_major, 0));
++
++out_chrdev:
++ unregister_chrdev(dev_user_major, DEV_USER_NAME);
++
++out_class:
++ class_destroy(dev_user_sysfs_class);
++
++out_unreg:
++ scst_unregister_dev_driver(&dev_user_devtype);
++
++out_cache1:
++ kmem_cache_destroy(user_get_cmd_cachep);
++
++out_cache:
++ kmem_cache_destroy(user_cmd_cachep);
++ goto out;
++}
++
++static void __exit exit_scst_user(void)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ rc = kthread_stop(cleanup_thread);
++ if (rc < 0)
++ TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
++
++ unregister_chrdev(dev_user_major, DEV_USER_NAME);
++ device_destroy(dev_user_sysfs_class, MKDEV(dev_user_major, 0));
++ class_destroy(dev_user_sysfs_class);
++
++ scst_unregister_virtual_dev_driver(&dev_user_devtype);
++
++ kmem_cache_destroy(user_get_cmd_cachep);
++ kmem_cache_destroy(user_cmd_cachep);
++
++ TRACE_EXIT();
++ return;
++}
++
++module_init(init_scst_user);
++module_exit(exit_scst_user);
++
++MODULE_AUTHOR("Vladislav Bolkhovitin");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("User space device handler for SCST");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c
++++ linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c
+@@ -0,0 +1,4228 @@
++/*
++ * scst_vdisk.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 Ming Zhang <blackmagic02881 at gmail dot com>
++ * Copyright (C) 2007 Ross Walker <rswwalker at hotmail dot com>
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * SCSI disk (type 0) and CDROM (type 5) dev handler using files
++ * on file systems or block devices (VDISK)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/unistd.h>
++#include <linux/smp_lock.h>
++#include <linux/spinlock.h>
++#include <linux/init.h>
++#include <linux/uio.h>
++#include <linux/list.h>
++#include <linux/ctype.h>
++#include <linux/writeback.h>
++#include <linux/vmalloc.h>
++#include <asm/atomic.h>
++#include <linux/kthread.h>
++#include <linux/sched.h>
++#include <linux/version.h>
++#include <asm/div64.h>
++#include <asm/unaligned.h>
++#include <linux/slab.h>
++#include <linux/bio.h>
++
++#define LOG_PREFIX "dev_vdisk"
++
++#include <scst/scst.h>
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++#define TRACE_ORDER 0x80000000
++
++static struct scst_trace_log vdisk_local_trace_tbl[] = {
++ { TRACE_ORDER, "order" },
++ { 0, NULL }
++};
++#define trace_log_tbl vdisk_local_trace_tbl
++
++#define VDISK_TRACE_TLB_HELP ", order"
++
++#endif
++
++#include "scst_dev_handler.h"
++
++/* 8 byte ASCII Vendor */
++#define SCST_FIO_VENDOR "SCST_FIO"
++#define SCST_BIO_VENDOR "SCST_BIO"
++/* 4 byte ASCII Product Revision Level - left aligned */
++#define SCST_FIO_REV " 200"
++
++#define MAX_USN_LEN (20+1) /* For '\0' */
++
++#define INQ_BUF_SZ 128
++#define EVPD 0x01
++#define CMDDT 0x02
++
++#define MSENSE_BUF_SZ 256
++#define DBD 0x08 /* disable block descriptor */
++#define WP 0x80 /* write protect */
++#define DPOFUA 0x10 /* DPOFUA bit */
++#define WCE 0x04 /* write cache enable */
++
++#define PF 0x10 /* page format */
++#define SP 0x01 /* save pages */
++#define PS 0x80 /* parameter saveable */
++
++#define BYTE 8
++#define DEF_DISK_BLOCKSIZE_SHIFT 9
++#define DEF_DISK_BLOCKSIZE (1 << DEF_DISK_BLOCKSIZE_SHIFT)
++#define DEF_CDROM_BLOCKSIZE_SHIFT 11
++#define DEF_CDROM_BLOCKSIZE (1 << DEF_CDROM_BLOCKSIZE_SHIFT)
++#define DEF_SECTORS 56
++#define DEF_HEADS 255
++#define LEN_MEM (32 * 1024)
++#define DEF_RD_ONLY 0
++#define DEF_WRITE_THROUGH 0
++#define DEF_NV_CACHE 0
++#define DEF_O_DIRECT 0
++#define DEF_REMOVABLE 0
++
++#define VDISK_NULLIO_SIZE (3LL*1024*1024*1024*1024/2)
++
++#define DEF_TST SCST_CONTR_MODE_SEP_TASK_SETS
++/*
++ * Since we can't control backstorage device's reordering, we have to always
++ * report unrestricted reordering.
++ */
++#define DEF_QUEUE_ALG_WT SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER
++#define DEF_QUEUE_ALG SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER
++#define DEF_SWP 0
++#define DEF_TAS 0
++
++#define DEF_DSENSE SCST_CONTR_MODE_FIXED_SENSE
++
++static unsigned int random_values[256] = {
++ 9862592UL, 3744545211UL, 2348289082UL, 4036111983UL,
++ 435574201UL, 3110343764UL, 2383055570UL, 1826499182UL,
++ 4076766377UL, 1549935812UL, 3696752161UL, 1200276050UL,
++ 3878162706UL, 1783530428UL, 2291072214UL, 125807985UL,
++ 3407668966UL, 547437109UL, 3961389597UL, 969093968UL,
++ 56006179UL, 2591023451UL, 1849465UL, 1614540336UL,
++ 3699757935UL, 479961779UL, 3768703953UL, 2529621525UL,
++ 4157893312UL, 3673555386UL, 4091110867UL, 2193909423UL,
++ 2800464448UL, 3052113233UL, 450394455UL, 3424338713UL,
++ 2113709130UL, 4082064373UL, 3708640918UL, 3841182218UL,
++ 3141803315UL, 1032476030UL, 1166423150UL, 1169646901UL,
++ 2686611738UL, 575517645UL, 2829331065UL, 1351103339UL,
++ 2856560215UL, 2402488288UL, 867847666UL, 8524618UL,
++ 704790297UL, 2228765657UL, 231508411UL, 1425523814UL,
++ 2146764591UL, 1287631730UL, 4142687914UL, 3879884598UL,
++ 729945311UL, 310596427UL, 2263511876UL, 1983091134UL,
++ 3500916580UL, 1642490324UL, 3858376049UL, 695342182UL,
++ 780528366UL, 1372613640UL, 1100993200UL, 1314818946UL,
++ 572029783UL, 3775573540UL, 776262915UL, 2684520905UL,
++ 1007252738UL, 3505856396UL, 1974886670UL, 3115856627UL,
++ 4194842288UL, 2135793908UL, 3566210707UL, 7929775UL,
++ 1321130213UL, 2627281746UL, 3587067247UL, 2025159890UL,
++ 2587032000UL, 3098513342UL, 3289360258UL, 130594898UL,
++ 2258149812UL, 2275857755UL, 3966929942UL, 1521739999UL,
++ 4191192765UL, 958953550UL, 4153558347UL, 1011030335UL,
++ 524382185UL, 4099757640UL, 498828115UL, 2396978754UL,
++ 328688935UL, 826399828UL, 3174103611UL, 3921966365UL,
++ 2187456284UL, 2631406787UL, 3930669674UL, 4282803915UL,
++ 1776755417UL, 374959755UL, 2483763076UL, 844956392UL,
++ 2209187588UL, 3647277868UL, 291047860UL, 3485867047UL,
++ 2223103546UL, 2526736133UL, 3153407604UL, 3828961796UL,
++ 3355731910UL, 2322269798UL, 2752144379UL, 519897942UL,
++ 3430536488UL, 1801511593UL, 1953975728UL, 3286944283UL,
++ 1511612621UL, 1050133852UL, 409321604UL, 1037601109UL,
++ 3352316843UL, 4198371381UL, 617863284UL, 994672213UL,
++ 1540735436UL, 2337363549UL, 1242368492UL, 665473059UL,
++ 2330728163UL, 3443103219UL, 2291025133UL, 3420108120UL,
++ 2663305280UL, 1608969839UL, 2278959931UL, 1389747794UL,
++ 2226946970UL, 2131266900UL, 3856979144UL, 1894169043UL,
++ 2692697628UL, 3797290626UL, 3248126844UL, 3922786277UL,
++ 343705271UL, 3739749888UL, 2191310783UL, 2962488787UL,
++ 4119364141UL, 1403351302UL, 2984008923UL, 3822407178UL,
++ 1932139782UL, 2323869332UL, 2793574182UL, 1852626483UL,
++ 2722460269UL, 1136097522UL, 1005121083UL, 1805201184UL,
++ 2212824936UL, 2979547931UL, 4133075915UL, 2585731003UL,
++ 2431626071UL, 134370235UL, 3763236829UL, 1171434827UL,
++ 2251806994UL, 1289341038UL, 3616320525UL, 392218563UL,
++ 1544502546UL, 2993937212UL, 1957503701UL, 3579140080UL,
++ 4270846116UL, 2030149142UL, 1792286022UL, 366604999UL,
++ 2625579499UL, 790898158UL, 770833822UL, 815540197UL,
++ 2747711781UL, 3570468835UL, 3976195842UL, 1257621341UL,
++ 1198342980UL, 1860626190UL, 3247856686UL, 351473955UL,
++ 993440563UL, 340807146UL, 1041994520UL, 3573925241UL,
++ 480246395UL, 2104806831UL, 1020782793UL, 3362132583UL,
++ 2272911358UL, 3440096248UL, 2356596804UL, 259492703UL,
++ 3899500740UL, 252071876UL, 2177024041UL, 4284810959UL,
++ 2775999888UL, 2653420445UL, 2876046047UL, 1025771859UL,
++ 1994475651UL, 3564987377UL, 4112956647UL, 1821511719UL,
++ 3113447247UL, 455315102UL, 1585273189UL, 2311494568UL,
++ 774051541UL, 1898115372UL, 2637499516UL, 247231365UL,
++ 1475014417UL, 803585727UL, 3911097303UL, 1714292230UL,
++ 476579326UL, 2496900974UL, 3397613314UL, 341202244UL,
++ 807790202UL, 4221326173UL, 499979741UL, 1301488547UL,
++ 1056807896UL, 3525009458UL, 1174811641UL, 3049738746UL,
++};
++
++struct scst_vdisk_dev {
++ uint32_t block_size;
++ uint64_t nblocks;
++ int block_shift;
++ loff_t file_size; /* in bytes */
++
++ /*
++ * This lock can be taken on both SIRQ and thread context, but in
++ * all cases for each particular instance it's taken consistenly either
++ * on SIRQ or thread context. Mix of them is forbidden.
++ */
++ spinlock_t flags_lock;
++
++ /*
++ * Below flags are protected by flags_lock or suspended activity
++ * with scst_vdisk_mutex.
++ */
++ unsigned int rd_only:1;
++ unsigned int wt_flag:1;
++ unsigned int nv_cache:1;
++ unsigned int o_direct_flag:1;
++ unsigned int media_changed:1;
++ unsigned int prevent_allow_medium_removal:1;
++ unsigned int nullio:1;
++ unsigned int blockio:1;
++ unsigned int cdrom_empty:1;
++ unsigned int removable:1;
++
++ int virt_id;
++ char name[16+1]; /* Name of the virtual device,
++ must be <= SCSI Model + 1 */
++ char *filename; /* File name, protected by
++ scst_mutex and suspended activities */
++ uint16_t command_set_version;
++
++ /* All 4 protected by vdisk_serial_rwlock */
++ unsigned int t10_dev_id_set:1; /* true if t10_dev_id manually set */
++ unsigned int usn_set:1; /* true if usn manually set */
++ char t10_dev_id[16+8+2]; /* T10 device ID */
++ char usn[MAX_USN_LEN];
++
++ struct scst_device *dev;
++ struct list_head vdev_list_entry;
++
++ struct scst_dev_type *vdev_devt;
++};
++
++struct scst_vdisk_thr {
++ struct scst_thr_data_hdr hdr;
++ struct file *fd;
++ struct block_device *bdev;
++ struct iovec *iv;
++ int iv_count;
++};
++
++/* Context RA patch supposed to be applied on the kernel */
++#define DEF_NUM_THREADS 8
++static int num_threads = DEF_NUM_THREADS;
++
++module_param_named(num_threads, num_threads, int, S_IRUGO);
++MODULE_PARM_DESC(num_threads, "vdisk threads count");
++
++static int vdisk_attach(struct scst_device *dev);
++static void vdisk_detach(struct scst_device *dev);
++static int vdisk_attach_tgt(struct scst_tgt_dev *tgt_dev);
++static void vdisk_detach_tgt(struct scst_tgt_dev *tgt_dev);
++static int vdisk_parse(struct scst_cmd *);
++static int vdisk_do_job(struct scst_cmd *cmd);
++static int vcdrom_parse(struct scst_cmd *);
++static int vcdrom_exec(struct scst_cmd *cmd);
++static void vdisk_exec_read(struct scst_cmd *cmd,
++ struct scst_vdisk_thr *thr, loff_t loff);
++static void vdisk_exec_write(struct scst_cmd *cmd,
++ struct scst_vdisk_thr *thr, loff_t loff);
++static void blockio_exec_rw(struct scst_cmd *cmd, struct scst_vdisk_thr *thr,
++ u64 lba_start, int write);
++static int blockio_flush(struct block_device *bdev);
++static void vdisk_exec_verify(struct scst_cmd *cmd,
++ struct scst_vdisk_thr *thr, loff_t loff);
++static void vdisk_exec_read_capacity(struct scst_cmd *cmd);
++static void vdisk_exec_read_capacity16(struct scst_cmd *cmd);
++static void vdisk_exec_inquiry(struct scst_cmd *cmd);
++static void vdisk_exec_request_sense(struct scst_cmd *cmd);
++static void vdisk_exec_mode_sense(struct scst_cmd *cmd);
++static void vdisk_exec_mode_select(struct scst_cmd *cmd);
++static void vdisk_exec_log(struct scst_cmd *cmd);
++static void vdisk_exec_read_toc(struct scst_cmd *cmd);
++static void vdisk_exec_prevent_allow_medium_removal(struct scst_cmd *cmd);
++static int vdisk_fsync(struct scst_vdisk_thr *thr, loff_t loff,
++ loff_t len, struct scst_cmd *cmd, struct scst_device *dev);
++static ssize_t vdisk_add_fileio_device(const char *device_name, char *params);
++static ssize_t vdisk_add_blockio_device(const char *device_name, char *params);
++static ssize_t vdisk_add_nullio_device(const char *device_name, char *params);
++static ssize_t vdisk_del_device(const char *device_name);
++static ssize_t vcdrom_add_device(const char *device_name, char *params);
++static ssize_t vcdrom_del_device(const char *device_name);
++static int vdisk_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
++ struct scst_tgt_dev *tgt_dev);
++static uint64_t vdisk_gen_dev_id_num(const char *virt_dev_name);
++
++/** SYSFS **/
++
++static ssize_t vdev_sysfs_size_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++static ssize_t vdisk_sysfs_blocksize_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++static ssize_t vdisk_sysfs_rd_only_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++static ssize_t vdisk_sysfs_wt_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++static ssize_t vdisk_sysfs_nv_cache_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++static ssize_t vdisk_sysfs_o_direct_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++static ssize_t vdisk_sysfs_removable_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++static ssize_t vdev_sysfs_filename_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++static ssize_t vdisk_sysfs_resync_size_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count);
++static ssize_t vdev_sysfs_t10_dev_id_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count);
++static ssize_t vdev_sysfs_t10_dev_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++static ssize_t vdev_sysfs_usn_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count);
++static ssize_t vdev_sysfs_usn_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++
++static ssize_t vcdrom_sysfs_filename_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count);
++
++static struct kobj_attribute vdev_size_attr =
++ __ATTR(size_mb, S_IRUGO, vdev_sysfs_size_show, NULL);
++static struct kobj_attribute vdisk_blocksize_attr =
++ __ATTR(blocksize, S_IRUGO, vdisk_sysfs_blocksize_show, NULL);
++static struct kobj_attribute vdisk_rd_only_attr =
++ __ATTR(read_only, S_IRUGO, vdisk_sysfs_rd_only_show, NULL);
++static struct kobj_attribute vdisk_wt_attr =
++ __ATTR(write_through, S_IRUGO, vdisk_sysfs_wt_show, NULL);
++static struct kobj_attribute vdisk_nv_cache_attr =
++ __ATTR(nv_cache, S_IRUGO, vdisk_sysfs_nv_cache_show, NULL);
++static struct kobj_attribute vdisk_o_direct_attr =
++ __ATTR(o_direct, S_IRUGO, vdisk_sysfs_o_direct_show, NULL);
++static struct kobj_attribute vdisk_removable_attr =
++ __ATTR(removable, S_IRUGO, vdisk_sysfs_removable_show, NULL);
++static struct kobj_attribute vdisk_filename_attr =
++ __ATTR(filename, S_IRUGO, vdev_sysfs_filename_show, NULL);
++static struct kobj_attribute vdisk_resync_size_attr =
++ __ATTR(resync_size, S_IWUSR, NULL, vdisk_sysfs_resync_size_store);
++static struct kobj_attribute vdev_t10_dev_id_attr =
++ __ATTR(t10_dev_id, S_IWUSR|S_IRUGO, vdev_sysfs_t10_dev_id_show,
++ vdev_sysfs_t10_dev_id_store);
++static struct kobj_attribute vdev_usn_attr =
++ __ATTR(usn, S_IWUSR|S_IRUGO, vdev_sysfs_usn_show, vdev_sysfs_usn_store);
++
++static struct kobj_attribute vcdrom_filename_attr =
++ __ATTR(filename, S_IRUGO|S_IWUSR, vdev_sysfs_filename_show,
++ vcdrom_sysfs_filename_store);
++
++static const struct attribute *vdisk_fileio_attrs[] = {
++ &vdev_size_attr.attr,
++ &vdisk_blocksize_attr.attr,
++ &vdisk_rd_only_attr.attr,
++ &vdisk_wt_attr.attr,
++ &vdisk_nv_cache_attr.attr,
++ &vdisk_o_direct_attr.attr,
++ &vdisk_removable_attr.attr,
++ &vdisk_filename_attr.attr,
++ &vdisk_resync_size_attr.attr,
++ &vdev_t10_dev_id_attr.attr,
++ &vdev_usn_attr.attr,
++ NULL,
++};
++
++static const struct attribute *vdisk_blockio_attrs[] = {
++ &vdev_size_attr.attr,
++ &vdisk_blocksize_attr.attr,
++ &vdisk_rd_only_attr.attr,
++ &vdisk_nv_cache_attr.attr,
++ &vdisk_removable_attr.attr,
++ &vdisk_filename_attr.attr,
++ &vdisk_resync_size_attr.attr,
++ &vdev_t10_dev_id_attr.attr,
++ &vdev_usn_attr.attr,
++ NULL,
++};
++
++static const struct attribute *vdisk_nullio_attrs[] = {
++ &vdev_size_attr.attr,
++ &vdisk_blocksize_attr.attr,
++ &vdisk_rd_only_attr.attr,
++ &vdisk_removable_attr.attr,
++ &vdev_t10_dev_id_attr.attr,
++ &vdev_usn_attr.attr,
++ NULL,
++};
++
++static const struct attribute *vcdrom_attrs[] = {
++ &vdev_size_attr.attr,
++ &vcdrom_filename_attr.attr,
++ &vdev_t10_dev_id_attr.attr,
++ &vdev_usn_attr.attr,
++ NULL,
++};
++
++/* Protects vdisks addition/deletion and related activities, like search */
++static DEFINE_MUTEX(scst_vdisk_mutex);
++
++/* Protects devices t10_dev_id and usn */
++static DEFINE_RWLOCK(vdisk_serial_rwlock);
++
++/* Protected by scst_vdisk_mutex */
++static LIST_HEAD(vdev_list);
++
++static struct kmem_cache *vdisk_thr_cachep;
++
++/*
++ * Be careful changing "name" field, since it is the name of the corresponding
++ * /sys/kernel/scst_tgt entry, hence a part of user space ABI.
++ */
++
++static struct scst_dev_type vdisk_file_devtype = {
++ .name = "vdisk_fileio",
++ .type = TYPE_DISK,
++ .exec_sync = 1,
++ .threads_num = -1,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = vdisk_attach,
++ .detach = vdisk_detach,
++ .attach_tgt = vdisk_attach_tgt,
++ .detach_tgt = vdisk_detach_tgt,
++ .parse = vdisk_parse,
++ .exec = vdisk_do_job,
++ .task_mgmt_fn = vdisk_task_mgmt_fn,
++ .add_device = vdisk_add_fileio_device,
++ .del_device = vdisk_del_device,
++ .dev_attrs = vdisk_fileio_attrs,
++ .add_device_parameters = "filename, blocksize, write_through, "
++ "nv_cache, o_direct, read_only, removable",
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++ .trace_tbl = vdisk_local_trace_tbl,
++ .trace_tbl_help = VDISK_TRACE_TLB_HELP,
++#endif
++};
++
++static struct kmem_cache *blockio_work_cachep;
++
++static struct scst_dev_type vdisk_blk_devtype = {
++ .name = "vdisk_blockio",
++ .type = TYPE_DISK,
++ .threads_num = 1,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = vdisk_attach,
++ .detach = vdisk_detach,
++ .attach_tgt = vdisk_attach_tgt,
++ .detach_tgt = vdisk_detach_tgt,
++ .parse = vdisk_parse,
++ .exec = vdisk_do_job,
++ .task_mgmt_fn = vdisk_task_mgmt_fn,
++ .add_device = vdisk_add_blockio_device,
++ .del_device = vdisk_del_device,
++ .dev_attrs = vdisk_blockio_attrs,
++ .add_device_parameters = "filename, blocksize, nv_cache, read_only, "
++ "removable",
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++ .trace_tbl = vdisk_local_trace_tbl,
++ .trace_tbl_help = VDISK_TRACE_TLB_HELP,
++#endif
++};
++
++static struct scst_dev_type vdisk_null_devtype = {
++ .name = "vdisk_nullio",
++ .type = TYPE_DISK,
++ .threads_num = 0,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = vdisk_attach,
++ .detach = vdisk_detach,
++ .attach_tgt = vdisk_attach_tgt,
++ .detach_tgt = vdisk_detach_tgt,
++ .parse = vdisk_parse,
++ .exec = vdisk_do_job,
++ .task_mgmt_fn = vdisk_task_mgmt_fn,
++ .add_device = vdisk_add_nullio_device,
++ .del_device = vdisk_del_device,
++ .dev_attrs = vdisk_nullio_attrs,
++ .add_device_parameters = "blocksize, read_only, removable",
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++ .trace_tbl = vdisk_local_trace_tbl,
++ .trace_tbl_help = VDISK_TRACE_TLB_HELP,
++#endif
++};
++
++static struct scst_dev_type vcdrom_devtype = {
++ .name = "vcdrom",
++ .type = TYPE_ROM,
++ .exec_sync = 1,
++ .threads_num = -1,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = vdisk_attach,
++ .detach = vdisk_detach,
++ .attach_tgt = vdisk_attach_tgt,
++ .detach_tgt = vdisk_detach_tgt,
++ .parse = vcdrom_parse,
++ .exec = vcdrom_exec,
++ .task_mgmt_fn = vdisk_task_mgmt_fn,
++ .add_device = vcdrom_add_device,
++ .del_device = vcdrom_del_device,
++ .dev_attrs = vcdrom_attrs,
++ .add_device_parameters = NULL,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++ .trace_tbl = vdisk_local_trace_tbl,
++ .trace_tbl_help = VDISK_TRACE_TLB_HELP,
++#endif
++};
++
++static struct scst_vdisk_thr nullio_thr_data;
++
++static const char *vdev_get_filename(const struct scst_vdisk_dev *virt_dev)
++{
++ if (virt_dev->filename != NULL)
++ return virt_dev->filename;
++ else
++ return "none";
++}
++
++/* Returns fd, use IS_ERR(fd) to get error status */
++static struct file *vdev_open_fd(const struct scst_vdisk_dev *virt_dev)
++{
++ int open_flags = 0;
++ struct file *fd;
++
++ TRACE_ENTRY();
++
++ if (virt_dev->dev->rd_only)
++ open_flags |= O_RDONLY;
++ else
++ open_flags |= O_RDWR;
++ if (virt_dev->o_direct_flag)
++ open_flags |= O_DIRECT;
++ if (virt_dev->wt_flag && !virt_dev->nv_cache)
++ open_flags |= O_SYNC;
++ TRACE_DBG("Opening file %s, flags 0x%x",
++ virt_dev->filename, open_flags);
++ fd = filp_open(virt_dev->filename, O_LARGEFILE | open_flags, 0600);
++
++ TRACE_EXIT();
++ return fd;
++}
++
++static void vdisk_blockio_check_flush_support(struct scst_vdisk_dev *virt_dev)
++{
++ struct inode *inode;
++ struct file *fd;
++
++ TRACE_ENTRY();
++
++ if (!virt_dev->blockio || virt_dev->rd_only || virt_dev->nv_cache)
++ goto out;
++
++ fd = filp_open(virt_dev->filename, O_LARGEFILE, 0600);
++ if (IS_ERR(fd)) {
++ PRINT_ERROR("filp_open(%s) returned error %ld",
++ virt_dev->filename, PTR_ERR(fd));
++ goto out;
++ }
++
++ inode = fd->f_dentry->d_inode;
++
++ if (!S_ISBLK(inode->i_mode)) {
++ PRINT_ERROR("%s is NOT a block device", virt_dev->filename);
++ goto out_close;
++ }
++
++ if (blockio_flush(inode->i_bdev) != 0) {
++ PRINT_WARNING("Device %s doesn't support barriers, switching "
++ "to NV_CACHE mode. Read README for more details.",
++ virt_dev->filename);
++ virt_dev->nv_cache = 1;
++ }
++
++out_close:
++ filp_close(fd, NULL);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* Returns 0 on success and file size in *file_size, error code otherwise */
++static int vdisk_get_file_size(const char *filename, bool blockio,
++ loff_t *file_size)
++{
++ struct inode *inode;
++ int res = 0;
++ struct file *fd;
++
++ TRACE_ENTRY();
++
++ *file_size = 0;
++
++ fd = filp_open(filename, O_LARGEFILE | O_RDONLY, 0600);
++ if (IS_ERR(fd)) {
++ res = PTR_ERR(fd);
++ PRINT_ERROR("filp_open(%s) returned error %d", filename, res);
++ goto out;
++ }
++
++ inode = fd->f_dentry->d_inode;
++
++ if (blockio && !S_ISBLK(inode->i_mode)) {
++ PRINT_ERROR("File %s is NOT a block device", filename);
++ res = -EINVAL;
++ goto out_close;
++ }
++
++ if (S_ISREG(inode->i_mode))
++ /* Nothing to do */;
++ else if (S_ISBLK(inode->i_mode))
++ inode = inode->i_bdev->bd_inode;
++ else {
++ res = -EINVAL;
++ goto out_close;
++ }
++
++ *file_size = inode->i_size;
++
++out_close:
++ filp_close(fd, NULL);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int vdisk_attach(struct scst_device *dev)
++{
++ int res = 0;
++ loff_t err;
++ struct scst_vdisk_dev *virt_dev = NULL, *vv;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("virt_id %d (%s)", dev->virt_id, dev->virt_name);
++
++ if (dev->virt_id == 0) {
++ PRINT_ERROR("%s", "Not a virtual device");
++ res = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * scst_vdisk_mutex must be already taken before
++ * scst_register_virtual_device()
++ */
++ list_for_each_entry(vv, &vdev_list, vdev_list_entry) {
++ if (strcmp(vv->name, dev->virt_name) == 0) {
++ virt_dev = vv;
++ break;
++ }
++ }
++
++ if (virt_dev == NULL) {
++ PRINT_ERROR("Device %s not found", dev->virt_name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ virt_dev->dev = dev;
++
++ dev->rd_only = virt_dev->rd_only;
++
++ if (!virt_dev->cdrom_empty) {
++ if (virt_dev->nullio)
++ err = VDISK_NULLIO_SIZE;
++ else {
++ res = vdisk_get_file_size(virt_dev->filename,
++ virt_dev->blockio, &err);
++ if (res != 0)
++ goto out;
++ }
++ virt_dev->file_size = err;
++
++ TRACE_DBG("size of file: %lld", (long long unsigned int)err);
++
++ vdisk_blockio_check_flush_support(virt_dev);
++ } else
++ virt_dev->file_size = 0;
++
++ virt_dev->nblocks = virt_dev->file_size >> virt_dev->block_shift;
++
++ if (!virt_dev->cdrom_empty) {
++ PRINT_INFO("Attached SCSI target virtual %s %s "
++ "(file=\"%s\", fs=%lldMB, bs=%d, nblocks=%lld,"
++ " cyln=%lld%s)",
++ (dev->type == TYPE_DISK) ? "disk" : "cdrom",
++ virt_dev->name, vdev_get_filename(virt_dev),
++ virt_dev->file_size >> 20, virt_dev->block_size,
++ (long long unsigned int)virt_dev->nblocks,
++ (long long unsigned int)virt_dev->nblocks/64/32,
++ virt_dev->nblocks < 64*32
++ ? " !WARNING! cyln less than 1" : "");
++ } else {
++ PRINT_INFO("Attached empty SCSI target virtual cdrom %s",
++ virt_dev->name);
++ }
++
++ dev->dh_priv = virt_dev;
++
++ dev->tst = DEF_TST;
++ dev->d_sense = DEF_DSENSE;
++ if (virt_dev->wt_flag && !virt_dev->nv_cache)
++ dev->queue_alg = DEF_QUEUE_ALG_WT;
++ else
++ dev->queue_alg = DEF_QUEUE_ALG;
++ dev->swp = DEF_SWP;
++ dev->tas = DEF_TAS;
++
++out:
++ TRACE_EXIT();
++ return res;
++}
++
++/* scst_mutex supposed to be held */
++static void vdisk_detach(struct scst_device *dev)
++{
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)dev->dh_priv;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("virt_id %d", dev->virt_id);
++
++ PRINT_INFO("Detached virtual device %s (\"%s\")",
++ virt_dev->name, vdev_get_filename(virt_dev));
++
++ /* virt_dev will be freed by the caller */
++ dev->dh_priv = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++static void vdisk_free_thr_data(struct scst_thr_data_hdr *d)
++{
++ struct scst_vdisk_thr *thr =
++ container_of(d, struct scst_vdisk_thr, hdr);
++
++ TRACE_ENTRY();
++
++ if (thr->fd)
++ filp_close(thr->fd, NULL);
++
++ kfree(thr->iv);
++
++ kmem_cache_free(vdisk_thr_cachep, thr);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct scst_vdisk_thr *vdisk_init_thr_data(
++ struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_vdisk_thr *res;
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)tgt_dev->dev->dh_priv;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(virt_dev->nullio);
++
++ res = kmem_cache_zalloc(vdisk_thr_cachep, GFP_KERNEL);
++ if (res == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Unable to allocate struct "
++ "scst_vdisk_thr");
++ goto out;
++ }
++
++ if (!virt_dev->cdrom_empty) {
++ res->fd = vdev_open_fd(virt_dev);
++ if (IS_ERR(res->fd)) {
++ PRINT_ERROR("filp_open(%s) returned an error %ld",
++ virt_dev->filename, PTR_ERR(res->fd));
++ goto out_free;
++ }
++ if (virt_dev->blockio)
++ res->bdev = res->fd->f_dentry->d_inode->i_bdev;
++ else
++ res->bdev = NULL;
++ } else
++ res->fd = NULL;
++
++ scst_add_thr_data(tgt_dev, &res->hdr, vdisk_free_thr_data);
++
++out:
++ TRACE_EXIT_HRES((unsigned long)res);
++ return res;
++
++out_free:
++ kmem_cache_free(vdisk_thr_cachep, res);
++ res = NULL;
++ goto out;
++}
++
++static int vdisk_attach_tgt(struct scst_tgt_dev *tgt_dev)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /* Nothing to do */
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void vdisk_detach_tgt(struct scst_tgt_dev *tgt_dev)
++{
++ TRACE_ENTRY();
++
++ scst_del_all_thr_data(tgt_dev);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int vdisk_do_job(struct scst_cmd *cmd)
++{
++ int rc, res;
++ uint64_t lba_start = 0;
++ loff_t data_len = 0;
++ uint8_t *cdb = cmd->cdb;
++ int opcode = cdb[0];
++ loff_t loff;
++ struct scst_device *dev = cmd->dev;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)dev->dh_priv;
++ struct scst_thr_data_hdr *d;
++ struct scst_vdisk_thr *thr = NULL;
++ int fua = 0;
++
++ TRACE_ENTRY();
++
++ switch (cmd->queue_type) {
++ case SCST_CMD_QUEUE_ORDERED:
++ TRACE(TRACE_ORDER, "ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
++ break;
++ case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
++ TRACE(TRACE_ORDER, "HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
++ break;
++ default:
++ break;
++ }
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++
++ if (!virt_dev->nullio) {
++ d = scst_find_thr_data(tgt_dev);
++ if (unlikely(d == NULL)) {
++ thr = vdisk_init_thr_data(tgt_dev);
++ if (thr == NULL) {
++ scst_set_busy(cmd);
++ goto out_compl;
++ }
++ scst_thr_data_get(&thr->hdr);
++ } else
++ thr = container_of(d, struct scst_vdisk_thr, hdr);
++ } else {
++ thr = &nullio_thr_data;
++ scst_thr_data_get(&thr->hdr);
++ }
++
++ switch (opcode) {
++ case READ_6:
++ case WRITE_6:
++ case VERIFY_6:
++ lba_start = (((cdb[1] & 0x1f) << (BYTE * 2)) +
++ (cdb[2] << (BYTE * 1)) +
++ (cdb[3] << (BYTE * 0)));
++ data_len = cmd->bufflen;
++ break;
++ case READ_10:
++ case READ_12:
++ case WRITE_10:
++ case WRITE_12:
++ case VERIFY:
++ case WRITE_VERIFY:
++ case WRITE_VERIFY_12:
++ case VERIFY_12:
++ lba_start |= ((u64)cdb[2]) << 24;
++ lba_start |= ((u64)cdb[3]) << 16;
++ lba_start |= ((u64)cdb[4]) << 8;
++ lba_start |= ((u64)cdb[5]);
++ data_len = cmd->bufflen;
++ break;
++ case READ_16:
++ case WRITE_16:
++ case WRITE_VERIFY_16:
++ case VERIFY_16:
++ lba_start |= ((u64)cdb[2]) << 56;
++ lba_start |= ((u64)cdb[3]) << 48;
++ lba_start |= ((u64)cdb[4]) << 40;
++ lba_start |= ((u64)cdb[5]) << 32;
++ lba_start |= ((u64)cdb[6]) << 24;
++ lba_start |= ((u64)cdb[7]) << 16;
++ lba_start |= ((u64)cdb[8]) << 8;
++ lba_start |= ((u64)cdb[9]);
++ data_len = cmd->bufflen;
++ break;
++ case SYNCHRONIZE_CACHE:
++ lba_start |= ((u64)cdb[2]) << 24;
++ lba_start |= ((u64)cdb[3]) << 16;
++ lba_start |= ((u64)cdb[4]) << 8;
++ lba_start |= ((u64)cdb[5]);
++ data_len = ((cdb[7] << (BYTE * 1)) + (cdb[8] << (BYTE * 0)))
++ << virt_dev->block_shift;
++ if (data_len == 0)
++ data_len = virt_dev->file_size -
++ ((loff_t)lba_start << virt_dev->block_shift);
++ break;
++ }
++
++ loff = (loff_t)lba_start << virt_dev->block_shift;
++ TRACE_DBG("cmd %p, lba_start %lld, loff %lld, data_len %lld", cmd,
++ (long long unsigned int)lba_start,
++ (long long unsigned int)loff,
++ (long long unsigned int)data_len);
++ if (unlikely(loff < 0) || unlikely(data_len < 0) ||
++ unlikely((loff + data_len) > virt_dev->file_size)) {
++ PRINT_INFO("Access beyond the end of the device "
++ "(%lld of %lld, len %lld)",
++ (long long unsigned int)loff,
++ (long long unsigned int)virt_dev->file_size,
++ (long long unsigned int)data_len);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_block_out_range_error));
++ goto out_compl;
++ }
++
++ switch (opcode) {
++ case WRITE_10:
++ case WRITE_12:
++ case WRITE_16:
++ fua = (cdb[1] & 0x8);
++ if (fua) {
++ TRACE(TRACE_ORDER, "FUA: loff=%lld, "
++ "data_len=%lld", (long long unsigned int)loff,
++ (long long unsigned int)data_len);
++ }
++ break;
++ }
++
++ switch (opcode) {
++ case READ_6:
++ case READ_10:
++ case READ_12:
++ case READ_16:
++ if (virt_dev->blockio) {
++ blockio_exec_rw(cmd, thr, lba_start, 0);
++ goto out_thr;
++ } else
++ vdisk_exec_read(cmd, thr, loff);
++ break;
++ case WRITE_6:
++ case WRITE_10:
++ case WRITE_12:
++ case WRITE_16:
++ {
++ if (virt_dev->blockio) {
++ blockio_exec_rw(cmd, thr, lba_start, 1);
++ goto out_thr;
++ } else
++ vdisk_exec_write(cmd, thr, loff);
++ /* O_SYNC flag is used for WT devices */
++ if (fua)
++ vdisk_fsync(thr, loff, data_len, cmd, dev);
++ break;
++ }
++ case WRITE_VERIFY:
++ case WRITE_VERIFY_12:
++ case WRITE_VERIFY_16:
++ {
++ /* ToDo: BLOCKIO VERIFY */
++ vdisk_exec_write(cmd, thr, loff);
++ /* O_SYNC flag is used for WT devices */
++ if (scsi_status_is_good(cmd->status))
++ vdisk_exec_verify(cmd, thr, loff);
++ break;
++ }
++ case SYNCHRONIZE_CACHE:
++ {
++ int immed = cdb[1] & 0x2;
++ TRACE(TRACE_ORDER, "SYNCHRONIZE_CACHE: "
++ "loff=%lld, data_len=%lld, immed=%d",
++ (long long unsigned int)loff,
++ (long long unsigned int)data_len, immed);
++ if (immed) {
++ scst_cmd_get(cmd); /* to protect dev */
++ cmd->completed = 1;
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT,
++ SCST_CONTEXT_SAME);
++ vdisk_fsync(thr, loff, data_len, NULL, dev);
++ /* ToDo: vdisk_fsync() error processing */
++ scst_cmd_put(cmd);
++ goto out_thr;
++ } else {
++ vdisk_fsync(thr, loff, data_len, cmd, dev);
++ break;
++ }
++ }
++ case VERIFY_6:
++ case VERIFY:
++ case VERIFY_12:
++ case VERIFY_16:
++ vdisk_exec_verify(cmd, thr, loff);
++ break;
++ case MODE_SENSE:
++ case MODE_SENSE_10:
++ vdisk_exec_mode_sense(cmd);
++ break;
++ case MODE_SELECT:
++ case MODE_SELECT_10:
++ vdisk_exec_mode_select(cmd);
++ break;
++ case LOG_SELECT:
++ case LOG_SENSE:
++ vdisk_exec_log(cmd);
++ break;
++ case ALLOW_MEDIUM_REMOVAL:
++ vdisk_exec_prevent_allow_medium_removal(cmd);
++ break;
++ case READ_TOC:
++ vdisk_exec_read_toc(cmd);
++ break;
++ case START_STOP:
++ vdisk_fsync(thr, 0, virt_dev->file_size, cmd, dev);
++ break;
++ case RESERVE:
++ case RESERVE_10:
++ case RELEASE:
++ case RELEASE_10:
++ case TEST_UNIT_READY:
++ break;
++ case INQUIRY:
++ vdisk_exec_inquiry(cmd);
++ break;
++ case REQUEST_SENSE:
++ vdisk_exec_request_sense(cmd);
++ break;
++ case READ_CAPACITY:
++ vdisk_exec_read_capacity(cmd);
++ break;
++ case SERVICE_ACTION_IN:
++ if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
++ vdisk_exec_read_capacity16(cmd);
++ break;
++ }
++ /* else go through */
++ case REPORT_LUNS:
++ default:
++ TRACE_DBG("Invalid opcode %d", opcode);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ }
++
++out_compl:
++ cmd->completed = 1;
++
++out_done:
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++
++out_thr:
++ if (likely(thr != NULL))
++ scst_thr_data_put(&thr->hdr);
++
++ res = SCST_EXEC_COMPLETED;
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int vdisk_get_block_shift(struct scst_cmd *cmd)
++{
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ return virt_dev->block_shift;
++}
++
++static int vdisk_parse(struct scst_cmd *cmd)
++{
++ scst_sbc_generic_parse(cmd, vdisk_get_block_shift);
++ return SCST_CMD_STATE_DEFAULT;
++}
++
++static int vcdrom_parse(struct scst_cmd *cmd)
++{
++ scst_cdrom_generic_parse(cmd, vdisk_get_block_shift);
++ return SCST_CMD_STATE_DEFAULT;
++}
++
++static int vcdrom_exec(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_COMPLETED;
++ int opcode = cmd->cdb[0];
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++
++ TRACE_ENTRY();
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++
++ if (virt_dev->cdrom_empty && (opcode != INQUIRY)) {
++ TRACE_DBG("%s", "CDROM empty");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_not_ready));
++ goto out_done;
++ }
++
++ if (virt_dev->media_changed && scst_is_ua_command(cmd)) {
++ spin_lock(&virt_dev->flags_lock);
++ if (virt_dev->media_changed) {
++ virt_dev->media_changed = 0;
++ TRACE_DBG("%s", "Reporting media changed");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_medium_changed_UA));
++ spin_unlock(&virt_dev->flags_lock);
++ goto out_done;
++ }
++ spin_unlock(&virt_dev->flags_lock);
++ }
++
++ res = vdisk_do_job(cmd);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_done:
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ goto out;
++}
++
++static uint64_t vdisk_gen_dev_id_num(const char *virt_dev_name)
++{
++ unsigned int dev_id_num, i;
++
++ for (dev_id_num = 0, i = 0; i < strlen(virt_dev_name); i++) {
++ unsigned int rv = random_values[(int)(virt_dev_name[i])];
++ /* Do some rotating of the bits */
++ dev_id_num ^= ((rv << i) | (rv >> (32 - i)));
++ }
++
++ return ((uint64_t)scst_get_setup_id() << 32) | dev_id_num;
++}
++
++static void vdisk_exec_inquiry(struct scst_cmd *cmd)
++{
++ int32_t length, i, resp_len = 0;
++ uint8_t *address;
++ uint8_t *buf;
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++
++ /* ToDo: Performance Boost:
++ * 1. remove kzalloc, buf
++ * 2. do all checks before touching *address
++ * 3. zero *address
++ * 4. write directly to *address
++ */
++
++ TRACE_ENTRY();
++
++ buf = kzalloc(INQ_BUF_SZ, GFP_KERNEL);
++ if (buf == NULL) {
++ scst_set_busy(cmd);
++ goto out;
++ }
++
++ length = scst_get_buf_first(cmd, &address);
++ TRACE_DBG("length %d", length);
++ if (unlikely(length <= 0)) {
++ if (length < 0) {
++ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ goto out_free;
++ }
++
++ if (cmd->cdb[1] & CMDDT) {
++ TRACE_DBG("%s", "INQUIRY: CMDDT is unsupported");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_put;
++ }
++
++ buf[0] = cmd->dev->type; /* type dev */
++ if (virt_dev->removable)
++ buf[1] = 0x80; /* removable */
++ /* Vital Product */
++ if (cmd->cdb[1] & EVPD) {
++ if (0 == cmd->cdb[2]) {
++ /* supported vital product data pages */
++ buf[3] = 3;
++ buf[4] = 0x0; /* this page */
++ buf[5] = 0x80; /* unit serial number */
++ buf[6] = 0x83; /* device identification */
++ if (virt_dev->dev->type == TYPE_DISK) {
++ buf[3] += 1;
++ buf[7] = 0xB0; /* block limits */
++ }
++ resp_len = buf[3] + 4;
++ } else if (0x80 == cmd->cdb[2]) {
++ /* unit serial number */
++ int usn_len;
++ read_lock(&vdisk_serial_rwlock);
++ usn_len = strlen(virt_dev->usn);
++ buf[1] = 0x80;
++ buf[3] = usn_len;
++ strncpy(&buf[4], virt_dev->usn, usn_len);
++ read_unlock(&vdisk_serial_rwlock);
++ resp_len = buf[3] + 4;
++ } else if (0x83 == cmd->cdb[2]) {
++ /* device identification */
++ int num = 4;
++
++ buf[1] = 0x83;
++ /* T10 vendor identifier field format (faked) */
++ buf[num + 0] = 0x2; /* ASCII */
++ buf[num + 1] = 0x1; /* Vendor ID */
++ if (virt_dev->blockio)
++ memcpy(&buf[num + 4], SCST_BIO_VENDOR, 8);
++ else
++ memcpy(&buf[num + 4], SCST_FIO_VENDOR, 8);
++
++ read_lock(&vdisk_serial_rwlock);
++ i = strlen(virt_dev->t10_dev_id);
++ memcpy(&buf[num + 12], virt_dev->t10_dev_id, i);
++ read_unlock(&vdisk_serial_rwlock);
++
++ buf[num + 3] = 8 + i;
++ num += buf[num + 3];
++
++ num += 4;
++
++ /*
++ * Relative target port identifier
++ */
++ buf[num + 0] = 0x01; /* binary */
++ /* Relative target port id */
++ buf[num + 1] = 0x10 | 0x04;
++
++ put_unaligned(cpu_to_be16(cmd->tgt->rel_tgt_id),
++ (__be16 *)&buf[num + 4 + 2]);
++
++ buf[num + 3] = 4;
++ num += buf[num + 3];
++
++ num += 4;
++
++ /*
++ * IEEE id
++ */
++ buf[num + 0] = 0x01; /* binary */
++
++ /* EUI-64 */
++ buf[num + 1] = 0x02;
++ buf[num + 2] = 0x00;
++ buf[num + 3] = 0x08;
++
++ /* IEEE id */
++ buf[num + 4] = virt_dev->t10_dev_id[0];
++ buf[num + 5] = virt_dev->t10_dev_id[1];
++ buf[num + 6] = virt_dev->t10_dev_id[2];
++
++ /* IEEE ext id */
++ buf[num + 7] = virt_dev->t10_dev_id[3];
++ buf[num + 8] = virt_dev->t10_dev_id[4];
++ buf[num + 9] = virt_dev->t10_dev_id[5];
++ buf[num + 10] = virt_dev->t10_dev_id[6];
++ buf[num + 11] = virt_dev->t10_dev_id[7];
++ num += buf[num + 3];
++
++ resp_len = num;
++ buf[2] = (resp_len >> 8) & 0xFF;
++ buf[3] = resp_len & 0xFF;
++ resp_len += 4;
++ } else if ((0xB0 == cmd->cdb[2]) &&
++ (virt_dev->dev->type == TYPE_DISK)) {
++ /* block limits */
++ int max_transfer;
++ buf[1] = 0xB0;
++ buf[3] = 0x1C;
++ /* Optimal transfer granuality is PAGE_SIZE */
++ put_unaligned(cpu_to_be16(max_t(int,
++ PAGE_SIZE/virt_dev->block_size, 1)),
++ (uint16_t *)&buf[6]);
++ /* Max transfer len is min of sg limit and 8M */
++ max_transfer = min_t(int,
++ cmd->tgt_dev->max_sg_cnt << PAGE_SHIFT,
++ 8*1024*1024) / virt_dev->block_size;
++ put_unaligned(cpu_to_be32(max_transfer),
++ (uint32_t *)&buf[8]);
++ /*
++ * Let's have optimal transfer len 1MB. Better to not
++ * set it at all, because we don't have such limit,
++ * but some initiators may not understand that (?).
++ * From other side, too big transfers are not optimal,
++ * because SGV cache supports only <4M buffers.
++ */
++ put_unaligned(cpu_to_be32(min_t(int,
++ max_transfer,
++ 1*1024*1024 / virt_dev->block_size)),
++ (uint32_t *)&buf[12]);
++ resp_len = buf[3] + 4;
++ } else {
++ TRACE_DBG("INQUIRY: Unsupported EVPD page %x",
++ cmd->cdb[2]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_put;
++ }
++ } else {
++ int len, num;
++
++ if (cmd->cdb[2] != 0) {
++ TRACE_DBG("INQUIRY: Unsupported page %x", cmd->cdb[2]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_put;
++ }
++
++ buf[2] = 5; /* Device complies to SPC-3 */
++ buf[3] = 0x12; /* HiSup + data in format specified in SPC */
++ buf[4] = 31;/* n - 4 = 35 - 4 = 31 for full 36 byte data */
++ buf[6] = 1; /* MultiP 1 */
++ buf[7] = 2; /* CMDQUE 1, BQue 0 => commands queuing supported */
++
++ /*
++ * 8 byte ASCII Vendor Identification of the target
++ * - left aligned.
++ */
++ if (virt_dev->blockio)
++ memcpy(&buf[8], SCST_BIO_VENDOR, 8);
++ else
++ memcpy(&buf[8], SCST_FIO_VENDOR, 8);
++
++ /*
++ * 16 byte ASCII Product Identification of the target - left
++ * aligned.
++ */
++ memset(&buf[16], ' ', 16);
++ len = min(strlen(virt_dev->name), (size_t)16);
++ memcpy(&buf[16], virt_dev->name, len);
++
++ /*
++ * 4 byte ASCII Product Revision Level of the target - left
++ * aligned.
++ */
++ memcpy(&buf[32], SCST_FIO_REV, 4);
++
++ /** Version descriptors **/
++
++ buf[4] += 58 - 36;
++ num = 0;
++
++ /* SAM-3 T10/1561-D revision 14 */
++ buf[58 + num] = 0x0;
++ buf[58 + num + 1] = 0x76;
++ num += 2;
++
++ /* Physical transport */
++ if (cmd->tgtt->get_phys_transport_version != NULL) {
++ uint16_t v = cmd->tgtt->get_phys_transport_version(cmd->tgt);
++ if (v != 0) {
++ *((__be16 *)&buf[58 + num]) = cpu_to_be16(v);
++ num += 2;
++ }
++ }
++
++ /* SCSI transport */
++ if (cmd->tgtt->get_scsi_transport_version != NULL) {
++ *((__be16 *)&buf[58 + num]) =
++ cpu_to_be16(cmd->tgtt->get_scsi_transport_version(cmd->tgt));
++ num += 2;
++ }
++
++ /* SPC-3 T10/1416-D revision 23 */
++ buf[58 + num] = 0x3;
++ buf[58 + num + 1] = 0x12;
++ num += 2;
++
++ /* Device command set */
++ if (virt_dev->command_set_version != 0) {
++ *((__be16 *)&buf[58 + num]) =
++ cpu_to_be16(virt_dev->command_set_version);
++ num += 2;
++ }
++
++ buf[4] += num;
++ resp_len = buf[4] + 5;
++ }
++
++ BUG_ON(resp_len >= INQ_BUF_SZ);
++
++ if (length > resp_len)
++ length = resp_len;
++ memcpy(address, buf, length);
++
++out_put:
++ scst_put_buf(cmd, address);
++ if (length < cmd->resp_data_len)
++ scst_set_resp_data_len(cmd, length);
++
++out_free:
++ kfree(buf);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void vdisk_exec_request_sense(struct scst_cmd *cmd)
++{
++ int32_t length, sl;
++ uint8_t *address;
++ uint8_t b[SCST_STANDARD_SENSE_LEN];
++
++ TRACE_ENTRY();
++
++ sl = scst_set_sense(b, sizeof(b), cmd->dev->d_sense,
++ SCST_LOAD_SENSE(scst_sense_no_sense));
++
++ length = scst_get_buf_first(cmd, &address);
++ TRACE_DBG("length %d", length);
++ if (length < 0) {
++ PRINT_ERROR("scst_get_buf_first() failed: %d)", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out;
++ }
++
++ length = min(sl, length);
++ memcpy(address, b, length);
++ scst_set_resp_data_len(cmd, length);
++
++ scst_put_buf(cmd, address);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * <<Following mode pages info copied from ST318451LW with some corrections>>
++ *
++ * ToDo: revise them
++ */
++static int vdisk_err_recov_pg(unsigned char *p, int pcontrol,
++ struct scst_vdisk_dev *virt_dev)
++{ /* Read-Write Error Recovery page for mode_sense */
++ const unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
++ 5, 0, 0xff, 0xff};
++
++ memcpy(p, err_recov_pg, sizeof(err_recov_pg));
++ if (1 == pcontrol)
++ memset(p + 2, 0, sizeof(err_recov_pg) - 2);
++ return sizeof(err_recov_pg);
++}
++
++static int vdisk_disconnect_pg(unsigned char *p, int pcontrol,
++ struct scst_vdisk_dev *virt_dev)
++{ /* Disconnect-Reconnect page for mode_sense */
++ const unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0};
++
++ memcpy(p, disconnect_pg, sizeof(disconnect_pg));
++ if (1 == pcontrol)
++ memset(p + 2, 0, sizeof(disconnect_pg) - 2);
++ return sizeof(disconnect_pg);
++}
++
++static int vdisk_rigid_geo_pg(unsigned char *p, int pcontrol,
++ struct scst_vdisk_dev *virt_dev)
++{
++ unsigned char geo_m_pg[] = {0x04, 0x16, 0, 0, 0, DEF_HEADS, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0x3a, 0x98/* 15K RPM */, 0, 0};
++ int32_t ncyl, n, rem;
++ uint64_t dividend;
++
++ memcpy(p, geo_m_pg, sizeof(geo_m_pg));
++ /*
++ * Divide virt_dev->nblocks by (DEF_HEADS * DEF_SECTORS) and store
++ * the quotient in ncyl and the remainder in rem.
++ */
++ dividend = virt_dev->nblocks;
++ rem = do_div(dividend, DEF_HEADS * DEF_SECTORS);
++ ncyl = dividend;
++ if (rem != 0)
++ ncyl++;
++ memcpy(&n, p + 2, sizeof(u32));
++ n = n | ((__force u32)cpu_to_be32(ncyl) >> 8);
++ memcpy(p + 2, &n, sizeof(u32));
++ if (1 == pcontrol)
++ memset(p + 2, 0, sizeof(geo_m_pg) - 2);
++ return sizeof(geo_m_pg);
++}
++
++static int vdisk_format_pg(unsigned char *p, int pcontrol,
++ struct scst_vdisk_dev *virt_dev)
++{ /* Format device page for mode_sense */
++ const unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0x40, 0, 0, 0};
++
++ memcpy(p, format_pg, sizeof(format_pg));
++ p[10] = (DEF_SECTORS >> 8) & 0xff;
++ p[11] = DEF_SECTORS & 0xff;
++ p[12] = (virt_dev->block_size >> 8) & 0xff;
++ p[13] = virt_dev->block_size & 0xff;
++ if (1 == pcontrol)
++ memset(p + 2, 0, sizeof(format_pg) - 2);
++ return sizeof(format_pg);
++}
++
++static int vdisk_caching_pg(unsigned char *p, int pcontrol,
++ struct scst_vdisk_dev *virt_dev)
++{ /* Caching page for mode_sense */
++ const unsigned char caching_pg[] = {0x8, 18, 0x10, 0, 0xff, 0xff, 0, 0,
++ 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
++
++ memcpy(p, caching_pg, sizeof(caching_pg));
++ p[2] |= !(virt_dev->wt_flag || virt_dev->nv_cache) ? WCE : 0;
++ if (1 == pcontrol)
++ memset(p + 2, 0, sizeof(caching_pg) - 2);
++ return sizeof(caching_pg);
++}
++
++static int vdisk_ctrl_m_pg(unsigned char *p, int pcontrol,
++ struct scst_vdisk_dev *virt_dev)
++{ /* Control mode page for mode_sense */
++ const unsigned char ctrl_m_pg[] = {0xa, 0xa, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0x2, 0x4b};
++
++ memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
++ switch (pcontrol) {
++ case 0:
++ p[2] |= virt_dev->dev->tst << 5;
++ p[2] |= virt_dev->dev->d_sense << 2;
++ p[3] |= virt_dev->dev->queue_alg << 4;
++ p[4] |= virt_dev->dev->swp << 3;
++ p[5] |= virt_dev->dev->tas << 6;
++ break;
++ case 1:
++ memset(p + 2, 0, sizeof(ctrl_m_pg) - 2);
++#if 0 /*
++ * It's too early to implement it, since we can't control the
++ * backstorage device parameters. ToDo
++ */
++ p[2] |= 7 << 5; /* TST */
++ p[3] |= 0xF << 4; /* QUEUE ALGORITHM MODIFIER */
++#endif
++ p[2] |= 1 << 2; /* D_SENSE */
++ p[4] |= 1 << 3; /* SWP */
++ p[5] |= 1 << 6; /* TAS */
++ break;
++ case 2:
++ p[2] |= DEF_TST << 5;
++ p[2] |= DEF_DSENSE << 2;
++ if (virt_dev->wt_flag || virt_dev->nv_cache)
++ p[3] |= DEF_QUEUE_ALG_WT << 4;
++ else
++ p[3] |= DEF_QUEUE_ALG << 4;
++ p[4] |= DEF_SWP << 3;
++ p[5] |= DEF_TAS << 6;
++ break;
++ default:
++ BUG();
++ }
++ return sizeof(ctrl_m_pg);
++}
++
++static int vdisk_iec_m_pg(unsigned char *p, int pcontrol,
++ struct scst_vdisk_dev *virt_dev)
++{ /* Informational Exceptions control mode page for mode_sense */
++ const unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
++ 0, 0, 0x0, 0x0};
++ memcpy(p, iec_m_pg, sizeof(iec_m_pg));
++ if (1 == pcontrol)
++ memset(p + 2, 0, sizeof(iec_m_pg) - 2);
++ return sizeof(iec_m_pg);
++}
++
++static void vdisk_exec_mode_sense(struct scst_cmd *cmd)
++{
++ int32_t length;
++ uint8_t *address;
++ uint8_t *buf;
++ struct scst_vdisk_dev *virt_dev;
++ uint32_t blocksize;
++ uint64_t nblocks;
++ unsigned char dbd, type;
++ int pcontrol, pcode, subpcode;
++ unsigned char dev_spec;
++ int msense_6, offset = 0, len;
++ unsigned char *bp;
++
++ TRACE_ENTRY();
++
++ buf = kzalloc(MSENSE_BUF_SZ, GFP_KERNEL);
++ if (buf == NULL) {
++ scst_set_busy(cmd);
++ goto out;
++ }
++
++ virt_dev = (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ blocksize = virt_dev->block_size;
++ nblocks = virt_dev->nblocks;
++
++ type = cmd->dev->type; /* type dev */
++ dbd = cmd->cdb[1] & DBD;
++ pcontrol = (cmd->cdb[2] & 0xc0) >> 6;
++ pcode = cmd->cdb[2] & 0x3f;
++ subpcode = cmd->cdb[3];
++ msense_6 = (MODE_SENSE == cmd->cdb[0]);
++ dev_spec = (virt_dev->dev->rd_only ||
++ cmd->tgt_dev->acg_dev->rd_only) ? WP : 0;
++
++ if (!virt_dev->blockio)
++ dev_spec |= DPOFUA;
++
++ length = scst_get_buf_first(cmd, &address);
++ if (unlikely(length <= 0)) {
++ if (length < 0) {
++ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ goto out_free;
++ }
++
++ if (0x3 == pcontrol) {
++ TRACE_DBG("%s", "MODE SENSE: Saving values not supported");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_saving_params_unsup));
++ goto out_put;
++ }
++
++ if (msense_6) {
++ buf[1] = type;
++ buf[2] = dev_spec;
++ offset = 4;
++ } else {
++ buf[2] = type;
++ buf[3] = dev_spec;
++ offset = 8;
++ }
++
++ if (0 != subpcode) {
++ /* TODO: Control Extension page */
++ TRACE_DBG("%s", "MODE SENSE: Only subpage 0 is supported");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_put;
++ }
++
++ if (!dbd) {
++ /* Create block descriptor */
++ buf[offset - 1] = 0x08; /* block descriptor length */
++ if (nblocks >> 32) {
++ buf[offset + 0] = 0xFF;
++ buf[offset + 1] = 0xFF;
++ buf[offset + 2] = 0xFF;
++ buf[offset + 3] = 0xFF;
++ } else {
++ /* num blks */
++ buf[offset + 0] = (nblocks >> (BYTE * 3)) & 0xFF;
++ buf[offset + 1] = (nblocks >> (BYTE * 2)) & 0xFF;
++ buf[offset + 2] = (nblocks >> (BYTE * 1)) & 0xFF;
++ buf[offset + 3] = (nblocks >> (BYTE * 0)) & 0xFF;
++ }
++ buf[offset + 4] = 0; /* density code */
++ buf[offset + 5] = (blocksize >> (BYTE * 2)) & 0xFF;/* blklen */
++ buf[offset + 6] = (blocksize >> (BYTE * 1)) & 0xFF;
++ buf[offset + 7] = (blocksize >> (BYTE * 0)) & 0xFF;
++
++ offset += 8; /* increment offset */
++ }
++
++ bp = buf + offset;
++
++ switch (pcode) {
++ case 0x1: /* Read-Write error recovery page, direct access */
++ len = vdisk_err_recov_pg(bp, pcontrol, virt_dev);
++ break;
++ case 0x2: /* Disconnect-Reconnect page, all devices */
++ len = vdisk_disconnect_pg(bp, pcontrol, virt_dev);
++ break;
++ case 0x3: /* Format device page, direct access */
++ len = vdisk_format_pg(bp, pcontrol, virt_dev);
++ break;
++ case 0x4: /* Rigid disk geometry */
++ len = vdisk_rigid_geo_pg(bp, pcontrol, virt_dev);
++ break;
++ case 0x8: /* Caching page, direct access */
++ len = vdisk_caching_pg(bp, pcontrol, virt_dev);
++ break;
++ case 0xa: /* Control Mode page, all devices */
++ len = vdisk_ctrl_m_pg(bp, pcontrol, virt_dev);
++ break;
++ case 0x1c: /* Informational Exceptions Mode page, all devices */
++ len = vdisk_iec_m_pg(bp, pcontrol, virt_dev);
++ break;
++ case 0x3f: /* Read all Mode pages */
++ len = vdisk_err_recov_pg(bp, pcontrol, virt_dev);
++ len += vdisk_disconnect_pg(bp + len, pcontrol, virt_dev);
++ len += vdisk_format_pg(bp + len, pcontrol, virt_dev);
++ len += vdisk_caching_pg(bp + len, pcontrol, virt_dev);
++ len += vdisk_ctrl_m_pg(bp + len, pcontrol, virt_dev);
++ len += vdisk_iec_m_pg(bp + len, pcontrol, virt_dev);
++ len += vdisk_rigid_geo_pg(bp + len, pcontrol, virt_dev);
++ break;
++ default:
++ TRACE_DBG("MODE SENSE: Unsupported page %x", pcode);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_put;
++ }
++
++ offset += len;
++
++ if (msense_6)
++ buf[0] = offset - 1;
++ else {
++ buf[0] = ((offset - 2) >> 8) & 0xff;
++ buf[1] = (offset - 2) & 0xff;
++ }
++
++ if (offset > length)
++ offset = length;
++ memcpy(address, buf, offset);
++
++out_put:
++ scst_put_buf(cmd, address);
++ if (offset < cmd->resp_data_len)
++ scst_set_resp_data_len(cmd, offset);
++
++out_free:
++ kfree(buf);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static int vdisk_set_wt(struct scst_vdisk_dev *virt_dev, int wt)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if ((virt_dev->wt_flag == wt) || virt_dev->nullio || virt_dev->nv_cache)
++ goto out;
++
++ spin_lock(&virt_dev->flags_lock);
++ virt_dev->wt_flag = wt;
++ spin_unlock(&virt_dev->flags_lock);
++
++ scst_dev_del_all_thr_data(virt_dev->dev);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void vdisk_ctrl_m_pg_select(unsigned char *p,
++ struct scst_vdisk_dev *virt_dev)
++{
++ struct scst_device *dev = virt_dev->dev;
++ int old_swp = dev->swp, old_tas = dev->tas, old_dsense = dev->d_sense;
++
++#if 0
++ /* Not implemented yet, see comment in vdisk_ctrl_m_pg() */
++ dev->tst = p[2] >> 5;
++ dev->queue_alg = p[3] >> 4;
++#endif
++ dev->swp = (p[4] & 0x8) >> 3;
++ dev->tas = (p[5] & 0x40) >> 6;
++ dev->d_sense = (p[2] & 0x4) >> 2;
++
++ PRINT_INFO("Device %s: new control mode page parameters: SWP %x "
++ "(was %x), TAS %x (was %x), D_SENSE %d (was %d)",
++ virt_dev->name, dev->swp, old_swp, dev->tas, old_tas,
++ dev->d_sense, old_dsense);
++ return;
++}
++
++static void vdisk_exec_mode_select(struct scst_cmd *cmd)
++{
++ int32_t length;
++ uint8_t *address;
++ struct scst_vdisk_dev *virt_dev;
++ int mselect_6, offset;
++
++ TRACE_ENTRY();
++
++ virt_dev = (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ mselect_6 = (MODE_SELECT == cmd->cdb[0]);
++
++ length = scst_get_buf_first(cmd, &address);
++ if (unlikely(length <= 0)) {
++ if (length < 0) {
++ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ goto out;
++ }
++
++ if (!(cmd->cdb[1] & PF) || (cmd->cdb[1] & SP)) {
++ TRACE(TRACE_MINOR|TRACE_SCSI, "MODE SELECT: Unsupported "
++ "value(s) of PF and/or SP bits (cdb[1]=%x)",
++ cmd->cdb[1]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_put;
++ }
++
++ if (mselect_6)
++ offset = 4;
++ else
++ offset = 8;
++
++ if (address[offset - 1] == 8) {
++ offset += 8;
++ } else if (address[offset - 1] != 0) {
++ PRINT_ERROR("%s", "MODE SELECT: Wrong parameters list "
++ "lenght");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
++ goto out_put;
++ }
++
++ while (length > offset + 2) {
++ if (address[offset] & PS) {
++ PRINT_ERROR("%s", "MODE SELECT: Illegal PS bit");
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_invalid_field_in_parm_list));
++ goto out_put;
++ }
++ if ((address[offset] & 0x3f) == 0x8) {
++ /* Caching page */
++ if (address[offset + 1] != 18) {
++ PRINT_ERROR("%s", "MODE SELECT: Invalid "
++ "caching page request");
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_invalid_field_in_parm_list));
++ goto out_put;
++ }
++ if (vdisk_set_wt(virt_dev,
++ (address[offset + 2] & WCE) ? 0 : 1) != 0) {
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_put;
++ }
++ break;
++ } else if ((address[offset] & 0x3f) == 0xA) {
++ /* Control page */
++ if (address[offset + 1] != 0xA) {
++ PRINT_ERROR("%s", "MODE SELECT: Invalid "
++ "control page request");
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_invalid_field_in_parm_list));
++ goto out_put;
++ }
++ vdisk_ctrl_m_pg_select(&address[offset], virt_dev);
++ } else {
++ PRINT_ERROR("MODE SELECT: Invalid request %x",
++ address[offset] & 0x3f);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_invalid_field_in_parm_list));
++ goto out_put;
++ }
++ offset += address[offset + 1];
++ }
++
++out_put:
++ scst_put_buf(cmd, address);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void vdisk_exec_log(struct scst_cmd *cmd)
++{
++ TRACE_ENTRY();
++
++ /* No log pages are supported */
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++
++ TRACE_EXIT();
++ return;
++}
++
++static void vdisk_exec_read_capacity(struct scst_cmd *cmd)
++{
++ int32_t length;
++ uint8_t *address;
++ struct scst_vdisk_dev *virt_dev;
++ uint32_t blocksize;
++ uint64_t nblocks;
++ uint8_t buffer[8];
++
++ TRACE_ENTRY();
++
++ virt_dev = (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ blocksize = virt_dev->block_size;
++ nblocks = virt_dev->nblocks;
++
++ if ((cmd->cdb[8] & 1) == 0) {
++ uint64_t lba = be64_to_cpu(get_unaligned((__be64 *)&cmd->cdb[2]));
++ if (lba != 0) {
++ TRACE_DBG("PMI zero and LBA not zero (cmd %p)", cmd);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++ }
++
++ /* Last block on the virt_dev is (nblocks-1) */
++ memset(buffer, 0, sizeof(buffer));
++ if (nblocks >> 32) {
++ buffer[0] = 0xFF;
++ buffer[1] = 0xFF;
++ buffer[2] = 0xFF;
++ buffer[3] = 0xFF;
++ } else {
++ buffer[0] = ((nblocks - 1) >> (BYTE * 3)) & 0xFF;
++ buffer[1] = ((nblocks - 1) >> (BYTE * 2)) & 0xFF;
++ buffer[2] = ((nblocks - 1) >> (BYTE * 1)) & 0xFF;
++ buffer[3] = ((nblocks - 1) >> (BYTE * 0)) & 0xFF;
++ }
++ buffer[4] = (blocksize >> (BYTE * 3)) & 0xFF;
++ buffer[5] = (blocksize >> (BYTE * 2)) & 0xFF;
++ buffer[6] = (blocksize >> (BYTE * 1)) & 0xFF;
++ buffer[7] = (blocksize >> (BYTE * 0)) & 0xFF;
++
++ length = scst_get_buf_first(cmd, &address);
++ if (unlikely(length <= 0)) {
++ if (length < 0) {
++ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ goto out;
++ }
++
++ length = min_t(int, length, sizeof(buffer));
++
++ memcpy(address, buffer, length);
++
++ scst_put_buf(cmd, address);
++
++ if (length < cmd->resp_data_len)
++ scst_set_resp_data_len(cmd, length);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void vdisk_exec_read_capacity16(struct scst_cmd *cmd)
++{
++ int32_t length;
++ uint8_t *address;
++ struct scst_vdisk_dev *virt_dev;
++ uint32_t blocksize;
++ uint64_t nblocks;
++ uint8_t buffer[32];
++
++ TRACE_ENTRY();
++
++ virt_dev = (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ blocksize = virt_dev->block_size;
++ nblocks = virt_dev->nblocks - 1;
++
++ if ((cmd->cdb[14] & 1) == 0) {
++ uint64_t lba = be64_to_cpu(get_unaligned((__be64 *)&cmd->cdb[2]));
++ if (lba != 0) {
++ TRACE_DBG("PMI zero and LBA not zero (cmd %p)", cmd);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++ }
++
++ memset(buffer, 0, sizeof(buffer));
++
++ buffer[0] = nblocks >> 56;
++ buffer[1] = (nblocks >> 48) & 0xFF;
++ buffer[2] = (nblocks >> 40) & 0xFF;
++ buffer[3] = (nblocks >> 32) & 0xFF;
++ buffer[4] = (nblocks >> 24) & 0xFF;
++ buffer[5] = (nblocks >> 16) & 0xFF;
++ buffer[6] = (nblocks >> 8) & 0xFF;
++ buffer[7] = nblocks & 0xFF;
++
++ buffer[8] = (blocksize >> (BYTE * 3)) & 0xFF;
++ buffer[9] = (blocksize >> (BYTE * 2)) & 0xFF;
++ buffer[10] = (blocksize >> (BYTE * 1)) & 0xFF;
++ buffer[11] = (blocksize >> (BYTE * 0)) & 0xFF;
++
++ switch (blocksize) {
++ case 512:
++ buffer[13] = 3;
++ break;
++ case 1024:
++ buffer[13] = 2;
++ break;
++ case 2048:
++ buffer[13] = 1;
++ break;
++ case 4096:
++ default:
++ buffer[13] = 0;
++ break;
++ }
++
++ length = scst_get_buf_first(cmd, &address);
++ if (unlikely(length <= 0)) {
++ if (length < 0) {
++ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ goto out;
++ }
++
++ length = min_t(int, length, sizeof(buffer));
++
++ memcpy(address, buffer, length);
++
++ scst_put_buf(cmd, address);
++
++ if (length < cmd->resp_data_len)
++ scst_set_resp_data_len(cmd, length);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void vdisk_exec_read_toc(struct scst_cmd *cmd)
++{
++ int32_t length, off = 0;
++ uint8_t *address;
++ struct scst_vdisk_dev *virt_dev;
++ uint32_t nblocks;
++ uint8_t buffer[4+8+8] = { 0x00, 0x0a, 0x01, 0x01, 0x00, 0x14,
++ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
++
++ TRACE_ENTRY();
++
++ if (cmd->dev->type != TYPE_ROM) {
++ PRINT_ERROR("%s", "READ TOC for non-CDROM device");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out;
++ }
++
++ if (cmd->cdb[2] & 0x0e/*Format*/) {
++ PRINT_ERROR("%s", "READ TOC: invalid requested data format");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++
++ if ((cmd->cdb[6] != 0 && (cmd->cdb[2] & 0x01)) ||
++ (cmd->cdb[6] > 1 && cmd->cdb[6] != 0xAA)) {
++ PRINT_ERROR("READ TOC: invalid requested track number %x",
++ cmd->cdb[6]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++
++ length = scst_get_buf_first(cmd, &address);
++ if (unlikely(length <= 0)) {
++ if (length < 0) {
++ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ goto out;
++ }
++
++ virt_dev = (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ /* ToDo when you have > 8TB ROM device. */
++ nblocks = (uint32_t)virt_dev->nblocks;
++
++ /* Header */
++ memset(buffer, 0, sizeof(buffer));
++ buffer[2] = 0x01; /* First Track/Session */
++ buffer[3] = 0x01; /* Last Track/Session */
++ off = 4;
++ if (cmd->cdb[6] <= 1) {
++ /* Fistr TOC Track Descriptor */
++ /* ADDR 0x10 - Q Sub-channel encodes current position data
++ CONTROL 0x04 - Data track, recoreded uninterrupted */
++ buffer[off+1] = 0x14;
++ /* Track Number */
++ buffer[off+2] = 0x01;
++ off += 8;
++ }
++ if (!(cmd->cdb[2] & 0x01)) {
++ /* Lead-out area TOC Track Descriptor */
++ buffer[off+1] = 0x14;
++ /* Track Number */
++ buffer[off+2] = 0xAA;
++ /* Track Start Address */
++ buffer[off+4] = (nblocks >> (BYTE * 3)) & 0xFF;
++ buffer[off+5] = (nblocks >> (BYTE * 2)) & 0xFF;
++ buffer[off+6] = (nblocks >> (BYTE * 1)) & 0xFF;
++ buffer[off+7] = (nblocks >> (BYTE * 0)) & 0xFF;
++ off += 8;
++ }
++
++ buffer[1] = off - 2; /* Data Length */
++
++ if (off > length)
++ off = length;
++ memcpy(address, buffer, off);
++
++ scst_put_buf(cmd, address);
++
++ if (off < cmd->resp_data_len)
++ scst_set_resp_data_len(cmd, off);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void vdisk_exec_prevent_allow_medium_removal(struct scst_cmd *cmd)
++{
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++
++ TRACE_DBG("PERSIST/PREVENT 0x%02x", cmd->cdb[4]);
++
++ spin_lock(&virt_dev->flags_lock);
++ virt_dev->prevent_allow_medium_removal = cmd->cdb[4] & 0x01 ? 1 : 0;
++ spin_unlock(&virt_dev->flags_lock);
++
++ return;
++}
++
++static int vdisk_fsync(struct scst_vdisk_thr *thr, loff_t loff,
++ loff_t len, struct scst_cmd *cmd, struct scst_device *dev)
++{
++ int res = 0;
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)dev->dh_priv;
++ struct file *file;
++
++ TRACE_ENTRY();
++
++ /* Hopefully, the compiler will generate the single comparison */
++ if (virt_dev->nv_cache || virt_dev->wt_flag ||
++ virt_dev->o_direct_flag || virt_dev->nullio)
++ goto out;
++
++ if (virt_dev->blockio) {
++ res = blockio_flush(thr->bdev);
++ goto out;
++ }
++
++ file = thr->fd;
++
++#if 0 /* For sparse files we might need to sync metadata as well */
++ res = generic_write_sync(file, loff, len);
++#else
++ res = filemap_write_and_wait_range(file->f_mapping, loff, len);
++#endif
++ if (unlikely(res != 0)) {
++ PRINT_ERROR("sync range failed (%d)", res);
++ if (cmd != NULL) {
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_write_error));
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct iovec *vdisk_alloc_iv(struct scst_cmd *cmd,
++ struct scst_vdisk_thr *thr)
++{
++ int iv_count;
++
++ iv_count = min_t(int, scst_get_buf_count(cmd), UIO_MAXIOV);
++ if (iv_count > thr->iv_count) {
++ kfree(thr->iv);
++ /* It can't be called in atomic context */
++ thr->iv = kmalloc(sizeof(*thr->iv) * iv_count, GFP_KERNEL);
++ if (thr->iv == NULL) {
++ PRINT_ERROR("Unable to allocate iv (%d)", iv_count);
++ scst_set_busy(cmd);
++ goto out;
++ }
++ thr->iv_count = iv_count;
++ }
++
++out:
++ return thr->iv;
++}
++
++static void vdisk_exec_read(struct scst_cmd *cmd,
++ struct scst_vdisk_thr *thr, loff_t loff)
++{
++ mm_segment_t old_fs;
++ loff_t err;
++ ssize_t length, full_len;
++ uint8_t __user *address;
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct file *fd = thr->fd;
++ struct iovec *iv;
++ int iv_count, i;
++ bool finished = false;
++
++ TRACE_ENTRY();
++
++ if (virt_dev->nullio)
++ goto out;
++
++ iv = vdisk_alloc_iv(cmd, thr);
++ if (iv == NULL)
++ goto out;
++
++ length = scst_get_buf_first(cmd, (uint8_t __force **)&address);
++ if (unlikely(length < 0)) {
++ PRINT_ERROR("scst_get_buf_first() failed: %zd", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out;
++ }
++
++ old_fs = get_fs();
++ set_fs(get_ds());
++
++ while (1) {
++ iv_count = 0;
++ full_len = 0;
++ i = -1;
++ while (length > 0) {
++ full_len += length;
++ i++;
++ iv_count++;
++ iv[i].iov_base = address;
++ iv[i].iov_len = length;
++ if (iv_count == UIO_MAXIOV)
++ break;
++ length = scst_get_buf_next(cmd,
++ (uint8_t __force **)&address);
++ }
++ if (length == 0) {
++ finished = true;
++ if (unlikely(iv_count == 0))
++ break;
++ } else if (unlikely(length < 0)) {
++ PRINT_ERROR("scst_get_buf_next() failed: %zd", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_set_fs;
++ }
++
++ TRACE_DBG("(iv_count %d, full_len %zd)", iv_count, full_len);
++ /* SEEK */
++ if (fd->f_op->llseek)
++ err = fd->f_op->llseek(fd, loff, 0/*SEEK_SET*/);
++ else
++ err = default_llseek(fd, loff, 0/*SEEK_SET*/);
++ if (err != loff) {
++ PRINT_ERROR("lseek trouble %lld != %lld",
++ (long long unsigned int)err,
++ (long long unsigned int)loff);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_set_fs;
++ }
++
++ /* READ */
++ err = vfs_readv(fd, (struct iovec __force __user *)iv, iv_count,
++ &fd->f_pos);
++
++ if ((err < 0) || (err < full_len)) {
++ PRINT_ERROR("readv() returned %lld from %zd",
++ (long long unsigned int)err,
++ full_len);
++ if (err == -EAGAIN)
++ scst_set_busy(cmd);
++ else {
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_read_error));
++ }
++ goto out_set_fs;
++ }
++
++ for (i = 0; i < iv_count; i++)
++ scst_put_buf(cmd, (void __force *)(iv[i].iov_base));
++
++ if (finished)
++ break;
++
++ loff += full_len;
++ length = scst_get_buf_next(cmd, (uint8_t __force **)&address);
++ };
++
++ set_fs(old_fs);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_set_fs:
++ set_fs(old_fs);
++ for (i = 0; i < iv_count; i++)
++ scst_put_buf(cmd, (void __force *)(iv[i].iov_base));
++ goto out;
++}
++
++static void vdisk_exec_write(struct scst_cmd *cmd,
++ struct scst_vdisk_thr *thr, loff_t loff)
++{
++ mm_segment_t old_fs;
++ loff_t err;
++ ssize_t length, full_len, saved_full_len;
++ uint8_t __user *address;
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct file *fd = thr->fd;
++ struct iovec *iv, *eiv;
++ int i, iv_count, eiv_count;
++ bool finished = false;
++
++ TRACE_ENTRY();
++
++ if (virt_dev->nullio)
++ goto out;
++
++ iv = vdisk_alloc_iv(cmd, thr);
++ if (iv == NULL)
++ goto out;
++
++ length = scst_get_buf_first(cmd, (uint8_t __force **)&address);
++ if (unlikely(length < 0)) {
++ PRINT_ERROR("scst_get_buf_first() failed: %zd", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out;
++ }
++
++ old_fs = get_fs();
++ set_fs(get_ds());
++
++ while (1) {
++ iv_count = 0;
++ full_len = 0;
++ i = -1;
++ while (length > 0) {
++ full_len += length;
++ i++;
++ iv_count++;
++ iv[i].iov_base = address;
++ iv[i].iov_len = length;
++ if (iv_count == UIO_MAXIOV)
++ break;
++ length = scst_get_buf_next(cmd,
++ (uint8_t __force **)&address);
++ }
++ if (length == 0) {
++ finished = true;
++ if (unlikely(iv_count == 0))
++ break;
++ } else if (unlikely(length < 0)) {
++ PRINT_ERROR("scst_get_buf_next() failed: %zd", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_set_fs;
++ }
++
++ saved_full_len = full_len;
++ eiv = iv;
++ eiv_count = iv_count;
++restart:
++ TRACE_DBG("writing(eiv_count %d, full_len %zd)", eiv_count, full_len);
++
++ /* SEEK */
++ if (fd->f_op->llseek)
++ err = fd->f_op->llseek(fd, loff, 0 /*SEEK_SET */);
++ else
++ err = default_llseek(fd, loff, 0 /*SEEK_SET */);
++ if (err != loff) {
++ PRINT_ERROR("lseek trouble %lld != %lld",
++ (long long unsigned int)err,
++ (long long unsigned int)loff);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_set_fs;
++ }
++
++ /* WRITE */
++ err = vfs_writev(fd, (struct iovec __force __user *)eiv, eiv_count,
++ &fd->f_pos);
++
++ if (err < 0) {
++ PRINT_ERROR("write() returned %lld from %zd",
++ (long long unsigned int)err,
++ full_len);
++ if (err == -EAGAIN)
++ scst_set_busy(cmd);
++ else {
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_write_error));
++ }
++ goto out_set_fs;
++ } else if (err < full_len) {
++ /*
++ * Probably that's wrong, but sometimes write() returns
++ * value less, than requested. Let's restart.
++ */
++ int e = eiv_count;
++ TRACE_MGMT_DBG("write() returned %d from %zd "
++ "(iv_count=%d)", (int)err, full_len,
++ eiv_count);
++ if (err == 0) {
++ PRINT_INFO("Suspicious: write() returned 0 from "
++ "%zd (iv_count=%d)", full_len, eiv_count);
++ }
++ full_len -= err;
++ for (i = 0; i < e; i++) {
++ if ((long long)eiv->iov_len < err) {
++ err -= eiv->iov_len;
++ eiv++;
++ eiv_count--;
++ } else {
++ eiv->iov_base =
++ (uint8_t __force __user *)eiv->iov_base + err;
++ eiv->iov_len -= err;
++ break;
++ }
++ }
++ goto restart;
++ }
++
++ for (i = 0; i < iv_count; i++)
++ scst_put_buf(cmd, (void __force *)(iv[i].iov_base));
++
++ if (finished)
++ break;
++
++ loff += saved_full_len;
++ length = scst_get_buf_next(cmd, (uint8_t __force **)&address);
++ }
++
++ set_fs(old_fs);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_set_fs:
++ set_fs(old_fs);
++ for (i = 0; i < iv_count; i++)
++ scst_put_buf(cmd, (void __force *)(iv[i].iov_base));
++ goto out;
++}
++
++struct scst_blockio_work {
++ atomic_t bios_inflight;
++ struct scst_cmd *cmd;
++};
++
++static inline void blockio_check_finish(struct scst_blockio_work *blockio_work)
++{
++ /* Decrement the bios in processing, and if zero signal completion */
++ if (atomic_dec_and_test(&blockio_work->bios_inflight)) {
++ blockio_work->cmd->completed = 1;
++ blockio_work->cmd->scst_cmd_done(blockio_work->cmd,
++ SCST_CMD_STATE_DEFAULT, scst_estimate_context());
++ kmem_cache_free(blockio_work_cachep, blockio_work);
++ }
++ return;
++}
++
++static void blockio_endio(struct bio *bio, int error)
++{
++ struct scst_blockio_work *blockio_work = bio->bi_private;
++
++ if (unlikely(!bio_flagged(bio, BIO_UPTODATE))) {
++ if (error == 0) {
++ PRINT_ERROR("Not up to date bio with error 0 for "
++ "cmd %p, returning -EIO", blockio_work->cmd);
++ error = -EIO;
++ }
++ }
++
++ if (unlikely(error != 0)) {
++ static DEFINE_SPINLOCK(blockio_endio_lock);
++ unsigned long flags;
++
++ PRINT_ERROR("cmd %p returned error %d", blockio_work->cmd,
++ error);
++
++ /* To protect from several bios finishing simultaneously */
++ spin_lock_irqsave(&blockio_endio_lock, flags);
++
++ if (bio->bi_rw & REQ_WRITE)
++ scst_set_cmd_error(blockio_work->cmd,
++ SCST_LOAD_SENSE(scst_sense_write_error));
++ else
++ scst_set_cmd_error(blockio_work->cmd,
++ SCST_LOAD_SENSE(scst_sense_read_error));
++
++ spin_unlock_irqrestore(&blockio_endio_lock, flags);
++ }
++
++ blockio_check_finish(blockio_work);
++
++ bio_put(bio);
++ return;
++}
++
++static void blockio_exec_rw(struct scst_cmd *cmd, struct scst_vdisk_thr *thr,
++ u64 lba_start, int write)
++{
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct block_device *bdev = thr->bdev;
++ struct request_queue *q = bdev_get_queue(bdev);
++ int length, max_nr_vecs = 0;
++ uint8_t *address;
++ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
++ int need_new_bio;
++ struct scst_blockio_work *blockio_work;
++ int bios = 0;
++
++ TRACE_ENTRY();
++
++ if (virt_dev->nullio)
++ goto out;
++
++ /* Allocate and initialize blockio_work struct */
++ blockio_work = kmem_cache_alloc(blockio_work_cachep, GFP_KERNEL);
++ if (blockio_work == NULL)
++ goto out_no_mem;
++
++ blockio_work->cmd = cmd;
++
++ if (q)
++ max_nr_vecs = min(bio_get_nr_vecs(bdev), BIO_MAX_PAGES);
++ else
++ max_nr_vecs = 1;
++
++ need_new_bio = 1;
++
++ length = scst_get_buf_first(cmd, &address);
++ while (length > 0) {
++ int len, bytes, off, thislen;
++ uint8_t *addr;
++ u64 lba_start0;
++
++ addr = address;
++ off = offset_in_page(addr);
++ len = length;
++ thislen = 0;
++ lba_start0 = lba_start;
++
++ while (len > 0) {
++ int rc;
++ struct page *page = virt_to_page(addr);
++
++ if (need_new_bio) {
++ bio = bio_kmalloc(GFP_KERNEL, max_nr_vecs);
++ if (!bio) {
++ PRINT_ERROR("Failed to create bio "
++ "for data segment %d (cmd %p)",
++ cmd->get_sg_buf_entry_num, cmd);
++ goto out_no_bio;
++ }
++
++ bios++;
++ need_new_bio = 0;
++ bio->bi_end_io = blockio_endio;
++ bio->bi_sector = lba_start0 <<
++ (virt_dev->block_shift - 9);
++ bio->bi_bdev = bdev;
++ bio->bi_private = blockio_work;
++ /*
++ * Better to fail fast w/o any local recovery
++ * and retries.
++ */
++ bio->bi_rw |= REQ_FAILFAST_DEV |
++ REQ_FAILFAST_TRANSPORT |
++ REQ_FAILFAST_DRIVER;
++#if 0 /* It could be win, but could be not, so a performance study is needed */
++ bio->bi_rw |= REQ_SYNC;
++#endif
++ if (!hbio)
++ hbio = tbio = bio;
++ else
++ tbio = tbio->bi_next = bio;
++ }
++
++ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
++
++ rc = bio_add_page(bio, page, bytes, off);
++ if (rc < bytes) {
++ BUG_ON(rc != 0);
++ need_new_bio = 1;
++ lba_start0 += thislen >> virt_dev->block_shift;
++ thislen = 0;
++ continue;
++ }
++
++ addr += PAGE_SIZE;
++ thislen += bytes;
++ len -= bytes;
++ off = 0;
++ }
++
++ lba_start += length >> virt_dev->block_shift;
++
++ scst_put_buf(cmd, address);
++ length = scst_get_buf_next(cmd, &address);
++ }
++
++ /* +1 to prevent erroneous too early command completion */
++ atomic_set(&blockio_work->bios_inflight, bios+1);
++
++ while (hbio) {
++ bio = hbio;
++ hbio = hbio->bi_next;
++ bio->bi_next = NULL;
++ submit_bio((write != 0), bio);
++ }
++
++ if (q && q->unplug_fn)
++ q->unplug_fn(q);
++
++ blockio_check_finish(blockio_work);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_no_bio:
++ while (hbio) {
++ bio = hbio;
++ hbio = hbio->bi_next;
++ bio_put(bio);
++ }
++ kmem_cache_free(blockio_work_cachep, blockio_work);
++
++out_no_mem:
++ scst_set_busy(cmd);
++ goto out;
++}
++
++static int blockio_flush(struct block_device *bdev)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ res = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT);
++ if (res != 0)
++ PRINT_ERROR("blkdev_issue_flush() failed: %d", res);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void vdisk_exec_verify(struct scst_cmd *cmd,
++ struct scst_vdisk_thr *thr, loff_t loff)
++{
++ mm_segment_t old_fs;
++ loff_t err;
++ ssize_t length, len_mem = 0;
++ uint8_t *address_sav, *address;
++ int compare;
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct file *fd = thr->fd;
++ uint8_t *mem_verify = NULL;
++
++ TRACE_ENTRY();
++
++ if (vdisk_fsync(thr, loff, cmd->bufflen, cmd, cmd->dev) != 0)
++ goto out;
++
++ /*
++ * Until the cache is cleared prior the verifying, there is not
++ * much point in this code. ToDo.
++ *
++ * Nevertherless, this code is valuable if the data have not read
++ * from the file/disk yet.
++ */
++
++ /* SEEK */
++ old_fs = get_fs();
++ set_fs(get_ds());
++
++ if (!virt_dev->nullio) {
++ if (fd->f_op->llseek)
++ err = fd->f_op->llseek(fd, loff, 0/*SEEK_SET*/);
++ else
++ err = default_llseek(fd, loff, 0/*SEEK_SET*/);
++ if (err != loff) {
++ PRINT_ERROR("lseek trouble %lld != %lld",
++ (long long unsigned int)err,
++ (long long unsigned int)loff);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_set_fs;
++ }
++ }
++
++ mem_verify = vmalloc(LEN_MEM);
++ if (mem_verify == NULL) {
++ PRINT_ERROR("Unable to allocate memory %d for verify",
++ LEN_MEM);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_set_fs;
++ }
++
++ length = scst_get_buf_first(cmd, &address);
++ address_sav = address;
++ if (!length && cmd->data_len) {
++ length = cmd->data_len;
++ compare = 0;
++ } else
++ compare = 1;
++
++ while (length > 0) {
++ len_mem = (length > LEN_MEM) ? LEN_MEM : length;
++ TRACE_DBG("Verify: length %zd - len_mem %zd", length, len_mem);
++
++ if (!virt_dev->nullio)
++ err = vfs_read(fd, (char __force __user *)mem_verify,
++ len_mem, &fd->f_pos);
++ else
++ err = len_mem;
++ if ((err < 0) || (err < len_mem)) {
++ PRINT_ERROR("verify() returned %lld from %zd",
++ (long long unsigned int)err, len_mem);
++ if (err == -EAGAIN)
++ scst_set_busy(cmd);
++ else {
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_read_error));
++ }
++ if (compare)
++ scst_put_buf(cmd, address_sav);
++ goto out_set_fs;
++ }
++ if (compare && memcmp(address, mem_verify, len_mem) != 0) {
++ TRACE_DBG("Verify: error memcmp length %zd", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_miscompare_error));
++ scst_put_buf(cmd, address_sav);
++ goto out_set_fs;
++ }
++ length -= len_mem;
++ address += len_mem;
++ if (compare && length <= 0) {
++ scst_put_buf(cmd, address_sav);
++ length = scst_get_buf_next(cmd, &address);
++ address_sav = address;
++ }
++ }
++
++ if (length < 0) {
++ PRINT_ERROR("scst_get_buf_() failed: %zd", length);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++
++out_set_fs:
++ set_fs(old_fs);
++ if (mem_verify)
++ vfree(mem_verify);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static int vdisk_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
++ struct scst_tgt_dev *tgt_dev)
++{
++ TRACE_ENTRY();
++
++ if ((mcmd->fn == SCST_LUN_RESET) || (mcmd->fn == SCST_TARGET_RESET)) {
++ /* Restore default values */
++ struct scst_device *dev = tgt_dev->dev;
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)dev->dh_priv;
++
++ dev->tst = DEF_TST;
++ dev->d_sense = DEF_DSENSE;
++ if (virt_dev->wt_flag && !virt_dev->nv_cache)
++ dev->queue_alg = DEF_QUEUE_ALG_WT;
++ else
++ dev->queue_alg = DEF_QUEUE_ALG;
++ dev->swp = DEF_SWP;
++ dev->tas = DEF_TAS;
++
++ spin_lock(&virt_dev->flags_lock);
++ virt_dev->prevent_allow_medium_removal = 0;
++ spin_unlock(&virt_dev->flags_lock);
++ } else if (mcmd->fn == SCST_PR_ABORT_ALL) {
++ struct scst_device *dev = tgt_dev->dev;
++ struct scst_vdisk_dev *virt_dev =
++ (struct scst_vdisk_dev *)dev->dh_priv;
++ spin_lock(&virt_dev->flags_lock);
++ virt_dev->prevent_allow_medium_removal = 0;
++ spin_unlock(&virt_dev->flags_lock);
++ }
++
++ TRACE_EXIT();
++ return SCST_DEV_TM_NOT_COMPLETED;
++}
++
++static void vdisk_report_registering(const struct scst_vdisk_dev *virt_dev)
++{
++ char buf[128];
++ int i, j;
++
++ i = snprintf(buf, sizeof(buf), "Registering virtual %s device %s ",
++ virt_dev->vdev_devt->name, virt_dev->name);
++ j = i;
++
++ if (virt_dev->wt_flag)
++ i += snprintf(&buf[i], sizeof(buf) - i, "(WRITE_THROUGH");
++
++ if (virt_dev->nv_cache)
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sNV_CACHE",
++ (j == i) ? "(" : ", ");
++
++ if (virt_dev->rd_only)
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sREAD_ONLY",
++ (j == i) ? "(" : ", ");
++
++ if (virt_dev->o_direct_flag)
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sO_DIRECT",
++ (j == i) ? "(" : ", ");
++
++ if (virt_dev->nullio)
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sNULLIO",
++ (j == i) ? "(" : ", ");
++
++ if (virt_dev->blockio)
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sBLOCKIO",
++ (j == i) ? "(" : ", ");
++
++ if (virt_dev->removable)
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sREMOVABLE",
++ (j == i) ? "(" : ", ");
++
++ if (j == i)
++ PRINT_INFO("%s", buf);
++ else
++ PRINT_INFO("%s)", buf);
++
++ return;
++}
++
++static int vdisk_resync_size(struct scst_vdisk_dev *virt_dev)
++{
++ loff_t file_size;
++ int res = 0;
++
++ BUG_ON(virt_dev->nullio);
++
++ res = vdisk_get_file_size(virt_dev->filename,
++ virt_dev->blockio, &file_size);
++ if (res != 0)
++ goto out;
++
++ if (file_size == virt_dev->file_size) {
++ PRINT_INFO("Size of virtual disk %s remained the same",
++ virt_dev->name);
++ goto out;
++ }
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ virt_dev->file_size = file_size;
++ virt_dev->nblocks = virt_dev->file_size >> virt_dev->block_shift;
++
++ scst_dev_del_all_thr_data(virt_dev->dev);
++
++ PRINT_INFO("New size of SCSI target virtual disk %s "
++ "(fs=%lldMB, bs=%d, nblocks=%lld, cyln=%lld%s)",
++ virt_dev->name, virt_dev->file_size >> 20,
++ virt_dev->block_size,
++ (long long unsigned int)virt_dev->nblocks,
++ (long long unsigned int)virt_dev->nblocks/64/32,
++ virt_dev->nblocks < 64*32 ? " !WARNING! cyln less "
++ "than 1" : "");
++
++ scst_capacity_data_changed(virt_dev->dev);
++
++ scst_resume_activity();
++
++out:
++ return res;
++}
++
++static int vdev_create(struct scst_dev_type *devt,
++ const char *name, struct scst_vdisk_dev **res_virt_dev)
++{
++ int res = 0;
++ struct scst_vdisk_dev *virt_dev;
++ uint64_t dev_id_num;
++ int dev_id_len;
++ char dev_id_str[17];
++ int32_t i;
++
++ virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
++ if (virt_dev == NULL) {
++ PRINT_ERROR("Allocation of virtual device %s failed",
++ devt->name);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ spin_lock_init(&virt_dev->flags_lock);
++ virt_dev->vdev_devt = devt;
++
++ virt_dev->rd_only = DEF_RD_ONLY;
++ virt_dev->removable = DEF_REMOVABLE;
++
++ virt_dev->block_size = DEF_DISK_BLOCKSIZE;
++ virt_dev->block_shift = DEF_DISK_BLOCKSIZE_SHIFT;
++
++ if (strlen(name) >= sizeof(virt_dev->name)) {
++ PRINT_ERROR("Name %s is too long (max allowed %zd)", name,
++ sizeof(virt_dev->name)-1);
++ res = -EINVAL;
++ goto out_free;
++ }
++ strcpy(virt_dev->name, name);
++
++ dev_id_num = vdisk_gen_dev_id_num(virt_dev->name);
++ dev_id_len = scnprintf(dev_id_str, sizeof(dev_id_str), "%llx",
++ dev_id_num);
++
++ i = strlen(virt_dev->name) + 1; /* for ' ' */
++ memset(virt_dev->t10_dev_id, ' ', i + dev_id_len);
++ memcpy(virt_dev->t10_dev_id, virt_dev->name, i-1);
++ memcpy(virt_dev->t10_dev_id + i, dev_id_str, dev_id_len);
++ TRACE_DBG("t10_dev_id %s", virt_dev->t10_dev_id);
++
++ virt_dev->t10_dev_id_set = 1; /* temporary */
++
++ scnprintf(virt_dev->usn, sizeof(virt_dev->usn), "%llx", dev_id_num);
++ TRACE_DBG("usn %s", virt_dev->usn);
++
++ virt_dev->usn_set = 1; /* temporary */
++
++ *res_virt_dev = virt_dev;
++
++out:
++ return res;
++
++out_free:
++ kfree(virt_dev);
++ goto out;
++}
++
++static void vdev_destroy(struct scst_vdisk_dev *virt_dev)
++{
++ kfree(virt_dev->filename);
++ kfree(virt_dev);
++ return;
++}
++
++/* scst_vdisk_mutex supposed to be held */
++static struct scst_vdisk_dev *vdev_find(const char *name)
++{
++ struct scst_vdisk_dev *res, *vv;
++
++ TRACE_ENTRY();
++
++ res = NULL;
++ list_for_each_entry(vv, &vdev_list, vdev_list_entry) {
++ if (strcmp(vv->name, name) == 0) {
++ res = vv;
++ break;
++ }
++ }
++
++ TRACE_EXIT_HRES((unsigned long)res);
++ return res;
++}
++
++static int vdev_parse_add_dev_params(struct scst_vdisk_dev *virt_dev,
++ char *params, const char *allowed_params[])
++{
++ int res = 0;
++ unsigned long val;
++ char *param, *p, *pp;
++
++ TRACE_ENTRY();
++
++ while (1) {
++ param = scst_get_next_token_str(&params);
++ if (param == NULL)
++ break;
++
++ p = scst_get_next_lexem(&param);
++ if (*p == '\0') {
++ PRINT_ERROR("Syntax error at %s (device %s)",
++ param, virt_dev->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (allowed_params != NULL) {
++ const char **a = allowed_params;
++ bool allowed = false;
++
++ while (*a != NULL) {
++ if (!strcasecmp(*a, p)) {
++ allowed = true;
++ break;
++ }
++ a++;
++ }
++
++ if (!allowed) {
++ PRINT_ERROR("Unknown parameter %s (device %s)", p,
++ virt_dev->name);
++ res = -EINVAL;
++ goto out;
++ }
++ }
++
++ pp = scst_get_next_lexem(&param);
++ if (*pp == '\0') {
++ PRINT_ERROR("Parameter %s value missed for device %s",
++ p, virt_dev->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (scst_get_next_lexem(&param)[0] != '\0') {
++ PRINT_ERROR("Too many parameter's %s values (device %s)",
++ p, virt_dev->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (!strcasecmp("filename", p)) {
++ if (*pp != '/') {
++ PRINT_ERROR("Filename %s must be global "
++ "(device %s)", pp, virt_dev->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ virt_dev->filename = kstrdup(pp, GFP_KERNEL);
++ if (virt_dev->filename == NULL) {
++ PRINT_ERROR("Unable to duplicate file name %s "
++ "(device %s)", pp, virt_dev->name);
++ res = -ENOMEM;
++ goto out;
++ }
++ continue;
++ }
++
++ res = strict_strtoul(pp, 0, &val);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtoul() for %s failed: %d "
++ "(device %s)", pp, res, virt_dev->name);
++ goto out;
++ }
++
++ if (!strcasecmp("write_through", p)) {
++ virt_dev->wt_flag = val;
++ TRACE_DBG("WRITE THROUGH %d", virt_dev->wt_flag);
++ } else if (!strcasecmp("nv_cache", p)) {
++ virt_dev->nv_cache = val;
++ TRACE_DBG("NON-VOLATILE CACHE %d", virt_dev->nv_cache);
++ } else if (!strcasecmp("o_direct", p)) {
++#if 0
++ virt_dev->o_direct_flag = val;
++ TRACE_DBG("O_DIRECT %d", virt_dev->o_direct_flag);
++#else
++ PRINT_INFO("O_DIRECT flag doesn't currently"
++ " work, ignoring it, use fileio_tgt "
++ "in O_DIRECT mode instead (device %s)", virt_dev->name);
++#endif
++ } else if (!strcasecmp("read_only", p)) {
++ virt_dev->rd_only = val;
++ TRACE_DBG("READ ONLY %d", virt_dev->rd_only);
++ } else if (!strcasecmp("removable", p)) {
++ virt_dev->removable = val;
++ TRACE_DBG("REMOVABLE %d", virt_dev->removable);
++ } else if (!strcasecmp("blocksize", p)) {
++ virt_dev->block_size = val;
++ virt_dev->block_shift = scst_calc_block_shift(
++ virt_dev->block_size);
++ if (virt_dev->block_shift < 9) {
++ res = -EINVAL;
++ goto out;
++ }
++ TRACE_DBG("block_size %d, block_shift %d",
++ virt_dev->block_size,
++ virt_dev->block_shift);
++ } else {
++ PRINT_ERROR("Unknown parameter %s (device %s)", p,
++ virt_dev->name);
++ res = -EINVAL;
++ goto out;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_vdisk_mutex supposed to be held */
++static int vdev_fileio_add_device(const char *device_name, char *params)
++{
++ int res = 0;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ res = vdev_create(&vdisk_file_devtype, device_name, &virt_dev);
++ if (res != 0)
++ goto out;
++
++ virt_dev->command_set_version = 0x04C0; /* SBC-3 */
++
++ virt_dev->wt_flag = DEF_WRITE_THROUGH;
++ virt_dev->nv_cache = DEF_NV_CACHE;
++ virt_dev->o_direct_flag = DEF_O_DIRECT;
++
++ res = vdev_parse_add_dev_params(virt_dev, params, NULL);
++ if (res != 0)
++ goto out_destroy;
++
++ if (virt_dev->rd_only && (virt_dev->wt_flag || virt_dev->nv_cache)) {
++ PRINT_ERROR("Write options on read only device %s",
++ virt_dev->name);
++ res = -EINVAL;
++ goto out_destroy;
++ }
++
++ if (virt_dev->filename == NULL) {
++ PRINT_ERROR("File name required (device %s)", virt_dev->name);
++ res = -EINVAL;
++ goto out_destroy;
++ }
++
++ list_add_tail(&virt_dev->vdev_list_entry, &vdev_list);
++
++ vdisk_report_registering(virt_dev);
++
++ virt_dev->virt_id = scst_register_virtual_device(virt_dev->vdev_devt,
++ virt_dev->name);
++ if (virt_dev->virt_id < 0) {
++ res = virt_dev->virt_id;
++ goto out_del;
++ }
++
++ TRACE_DBG("Registered virt_dev %s with id %d", virt_dev->name,
++ virt_dev->virt_id);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ list_del(&virt_dev->vdev_list_entry);
++
++out_destroy:
++ vdev_destroy(virt_dev);
++ goto out;
++}
++
++/* scst_vdisk_mutex supposed to be held */
++static int vdev_blockio_add_device(const char *device_name, char *params)
++{
++ int res = 0;
++ const char *allowed_params[] = { "filename", "read_only", "removable",
++ "blocksize", "nv_cache", NULL };
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ res = vdev_create(&vdisk_blk_devtype, device_name, &virt_dev);
++ if (res != 0)
++ goto out;
++
++ virt_dev->command_set_version = 0x04C0; /* SBC-3 */
++
++ virt_dev->blockio = 1;
++
++ res = vdev_parse_add_dev_params(virt_dev, params, allowed_params);
++ if (res != 0)
++ goto out_destroy;
++
++ if (virt_dev->filename == NULL) {
++ PRINT_ERROR("File name required (device %s)", virt_dev->name);
++ res = -EINVAL;
++ goto out_destroy;
++ }
++
++ list_add_tail(&virt_dev->vdev_list_entry, &vdev_list);
++
++ vdisk_report_registering(virt_dev);
++
++ virt_dev->virt_id = scst_register_virtual_device(virt_dev->vdev_devt,
++ virt_dev->name);
++ if (virt_dev->virt_id < 0) {
++ res = virt_dev->virt_id;
++ goto out_del;
++ }
++
++ TRACE_DBG("Registered virt_dev %s with id %d", virt_dev->name,
++ virt_dev->virt_id);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ list_del(&virt_dev->vdev_list_entry);
++
++out_destroy:
++ vdev_destroy(virt_dev);
++ goto out;
++}
++
++/* scst_vdisk_mutex supposed to be held */
++static int vdev_nullio_add_device(const char *device_name, char *params)
++{
++ int res = 0;
++ const char *allowed_params[] = { "read_only", "removable",
++ "blocksize", NULL };
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ res = vdev_create(&vdisk_null_devtype, device_name, &virt_dev);
++ if (res != 0)
++ goto out;
++
++ virt_dev->command_set_version = 0x04C0; /* SBC-3 */
++
++ virt_dev->nullio = 1;
++
++ res = vdev_parse_add_dev_params(virt_dev, params, allowed_params);
++ if (res != 0)
++ goto out_destroy;
++
++ list_add_tail(&virt_dev->vdev_list_entry, &vdev_list);
++
++ vdisk_report_registering(virt_dev);
++
++ virt_dev->virt_id = scst_register_virtual_device(virt_dev->vdev_devt,
++ virt_dev->name);
++ if (virt_dev->virt_id < 0) {
++ res = virt_dev->virt_id;
++ goto out_del;
++ }
++
++ TRACE_DBG("Registered virt_dev %s with id %d", virt_dev->name,
++ virt_dev->virt_id);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ list_del(&virt_dev->vdev_list_entry);
++
++out_destroy:
++ vdev_destroy(virt_dev);
++ goto out;
++}
++
++static ssize_t vdisk_add_fileio_device(const char *device_name, char *params)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = vdev_fileio_add_device(device_name, params);
++
++ mutex_unlock(&scst_vdisk_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t vdisk_add_blockio_device(const char *device_name, char *params)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = vdev_blockio_add_device(device_name, params);
++
++ mutex_unlock(&scst_vdisk_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++}
++
++static ssize_t vdisk_add_nullio_device(const char *device_name, char *params)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = vdev_nullio_add_device(device_name, params);
++
++ mutex_unlock(&scst_vdisk_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++}
++
++/* scst_vdisk_mutex supposed to be held */
++static void vdev_del_device(struct scst_vdisk_dev *virt_dev)
++{
++ TRACE_ENTRY();
++
++ scst_unregister_virtual_device(virt_dev->virt_id);
++
++ list_del(&virt_dev->vdev_list_entry);
++
++ PRINT_INFO("Virtual device %s unregistered", virt_dev->name);
++ TRACE_DBG("virt_id %d unregistered", virt_dev->virt_id);
++
++ vdev_destroy(virt_dev);
++
++ return;
++}
++
++static ssize_t vdisk_del_device(const char *device_name)
++{
++ int res = 0;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ virt_dev = vdev_find(device_name);
++ if (virt_dev == NULL) {
++ PRINT_ERROR("Device %s not found", device_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ vdev_del_device(virt_dev);
++
++out_unlock:
++ mutex_unlock(&scst_vdisk_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_vdisk_mutex supposed to be held */
++static ssize_t __vcdrom_add_device(const char *device_name, char *params)
++{
++ int res = 0;
++ const char *allowed_params[] = { NULL }; /* no params */
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ res = vdev_create(&vcdrom_devtype, device_name, &virt_dev);
++ if (res != 0)
++ goto out;
++
++ virt_dev->command_set_version = 0x02A0; /* MMC-3 */
++
++ virt_dev->rd_only = 1;
++ virt_dev->removable = 1;
++ virt_dev->cdrom_empty = 1;
++
++ virt_dev->block_size = DEF_CDROM_BLOCKSIZE;
++ virt_dev->block_shift = DEF_CDROM_BLOCKSIZE_SHIFT;
++
++ res = vdev_parse_add_dev_params(virt_dev, params, allowed_params);
++ if (res != 0)
++ goto out_destroy;
++
++ list_add_tail(&virt_dev->vdev_list_entry, &vdev_list);
++
++ vdisk_report_registering(virt_dev);
++
++ virt_dev->virt_id = scst_register_virtual_device(virt_dev->vdev_devt,
++ virt_dev->name);
++ if (virt_dev->virt_id < 0) {
++ res = virt_dev->virt_id;
++ goto out_del;
++ }
++
++ TRACE_DBG("Registered virt_dev %s with id %d", virt_dev->name,
++ virt_dev->virt_id);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ list_del(&virt_dev->vdev_list_entry);
++
++out_destroy:
++ vdev_destroy(virt_dev);
++ goto out;
++}
++
++static ssize_t vcdrom_add_device(const char *device_name, char *params)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = __vcdrom_add_device(device_name, params);
++
++ mutex_unlock(&scst_vdisk_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++}
++
++static ssize_t vcdrom_del_device(const char *device_name)
++{
++ int res = 0;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ virt_dev = vdev_find(device_name);
++ if (virt_dev == NULL) {
++ PRINT_ERROR("Device %s not found", device_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ vdev_del_device(virt_dev);
++
++out_unlock:
++ mutex_unlock(&scst_vdisk_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int vcdrom_change(struct scst_vdisk_dev *virt_dev,
++ char *buffer)
++{
++ loff_t err;
++ char *old_fn, *p, *pp;
++ const char *filename = NULL;
++ int length = strlen(buffer);
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ p = buffer;
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++ filename = p;
++ p = &buffer[length-1];
++ pp = &buffer[length];
++ while (isspace(*p) && (*p != '\0')) {
++ pp = p;
++ p--;
++ }
++ *pp = '\0';
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ /* To sync with detach*() functions */
++ mutex_lock(&scst_mutex);
++
++ if (*filename == '\0') {
++ virt_dev->cdrom_empty = 1;
++ TRACE_DBG("%s", "No media");
++ } else if (*filename != '/') {
++ PRINT_ERROR("File path \"%s\" is not absolute", filename);
++ res = -EINVAL;
++ goto out_unlock;
++ } else
++ virt_dev->cdrom_empty = 0;
++
++ old_fn = virt_dev->filename;
++
++ if (!virt_dev->cdrom_empty) {
++ int len = strlen(filename) + 1;
++ char *fn = kmalloc(len, GFP_KERNEL);
++ if (fn == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s",
++ "Allocation of filename failed");
++ res = -ENOMEM;
++ goto out_unlock;
++ }
++
++ strlcpy(fn, filename, len);
++ virt_dev->filename = fn;
++
++ res = vdisk_get_file_size(virt_dev->filename,
++ virt_dev->blockio, &err);
++ if (res != 0)
++ goto out_free_fn;
++ } else {
++ err = 0;
++ virt_dev->filename = NULL;
++ }
++
++ if (virt_dev->prevent_allow_medium_removal) {
++ PRINT_ERROR("Prevent medium removal for "
++ "virtual device with name %s", virt_dev->name);
++ res = -EINVAL;
++ goto out_free_fn;
++ }
++
++ virt_dev->file_size = err;
++ virt_dev->nblocks = virt_dev->file_size >> virt_dev->block_shift;
++ if (!virt_dev->cdrom_empty)
++ virt_dev->media_changed = 1;
++
++ mutex_unlock(&scst_mutex);
++
++ scst_dev_del_all_thr_data(virt_dev->dev);
++
++ if (!virt_dev->cdrom_empty) {
++ PRINT_INFO("Changed SCSI target virtual cdrom %s "
++ "(file=\"%s\", fs=%lldMB, bs=%d, nblocks=%lld,"
++ " cyln=%lld%s)", virt_dev->name,
++ vdev_get_filename(virt_dev),
++ virt_dev->file_size >> 20, virt_dev->block_size,
++ (long long unsigned int)virt_dev->nblocks,
++ (long long unsigned int)virt_dev->nblocks/64/32,
++ virt_dev->nblocks < 64*32 ? " !WARNING! cyln less "
++ "than 1" : "");
++ } else {
++ PRINT_INFO("Removed media from SCSI target virtual cdrom %s",
++ virt_dev->name);
++ }
++
++ kfree(old_fn);
++
++out_resume:
++ scst_resume_activity();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free_fn:
++ kfree(virt_dev->filename);
++ virt_dev->filename = old_fn;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ goto out_resume;
++}
++
++static int vcdrom_sysfs_process_filename_store(struct scst_sysfs_work_item *work)
++{
++ int res;
++ struct scst_device *dev = work->dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ /* It's safe, since we taken dev_kobj and dh_priv NULLed in attach() */
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ res = vcdrom_change(virt_dev, work->buf);
++
++ kobject_put(&dev->dev_kobj);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t vcdrom_sysfs_filename_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ char *i_buf;
++ struct scst_sysfs_work_item *work;
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++
++ i_buf = kmalloc(count+1, GFP_KERNEL);
++ if (i_buf == NULL) {
++ PRINT_ERROR("Unable to alloc intermediate buffer with size %zd",
++ count+1);
++ res = -ENOMEM;
++ goto out;
++ }
++ memcpy(i_buf, buf, count);
++ i_buf[count] = '\0';
++
++ res = scst_alloc_sysfs_work(vcdrom_sysfs_process_filename_store,
++ false, &work);
++ if (res != 0)
++ goto out_free;
++
++ work->buf = i_buf;
++ work->dev = dev;
++
++ kobject_get(&dev->dev_kobj);
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(i_buf);
++ goto out;
++}
++
++static ssize_t vdev_sysfs_size_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ pos = sprintf(buf, "%lld\n", virt_dev->file_size / 1024 / 1024);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t vdisk_sysfs_blocksize_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ pos = sprintf(buf, "%d\n%s", (int)virt_dev->block_size,
++ (virt_dev->block_size == DEF_DISK_BLOCKSIZE) ? "" :
++ SCST_SYSFS_KEY_MARK "\n");
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t vdisk_sysfs_rd_only_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ pos = sprintf(buf, "%d\n%s", virt_dev->rd_only ? 1 : 0,
++ (virt_dev->rd_only == DEF_RD_ONLY) ? "" :
++ SCST_SYSFS_KEY_MARK "");
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t vdisk_sysfs_wt_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ pos = sprintf(buf, "%d\n%s", virt_dev->wt_flag ? 1 : 0,
++ (virt_dev->wt_flag == DEF_WRITE_THROUGH) ? "" :
++ SCST_SYSFS_KEY_MARK "");
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t vdisk_sysfs_nv_cache_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ pos = sprintf(buf, "%d\n%s", virt_dev->nv_cache ? 1 : 0,
++ (virt_dev->nv_cache == DEF_NV_CACHE) ? "" :
++ SCST_SYSFS_KEY_MARK "");
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t vdisk_sysfs_o_direct_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ pos = sprintf(buf, "%d\n%s", virt_dev->o_direct_flag ? 1 : 0,
++ (virt_dev->o_direct_flag == DEF_O_DIRECT) ? "" :
++ SCST_SYSFS_KEY_MARK "");
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t vdisk_sysfs_removable_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ pos = sprintf(buf, "%d\n", virt_dev->removable ? 1 : 0);
++
++ if ((virt_dev->dev->type != TYPE_ROM) &&
++ (virt_dev->removable != DEF_REMOVABLE))
++ pos += sprintf(&buf[pos], "%s\n", SCST_SYSFS_KEY_MARK);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static int vdev_sysfs_process_get_filename(struct scst_sysfs_work_item *work)
++{
++ int res = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = work->dev;
++
++ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
++ res = -EINTR;
++ goto out_put;
++ }
++
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ if (virt_dev == NULL)
++ goto out_unlock;
++
++ if (virt_dev->filename != NULL)
++ work->res_buf = kasprintf(GFP_KERNEL, "%s\n%s\n",
++ vdev_get_filename(virt_dev), SCST_SYSFS_KEY_MARK);
++ else
++ work->res_buf = kasprintf(GFP_KERNEL, "%s\n",
++ vdev_get_filename(virt_dev));
++
++out_unlock:
++ mutex_unlock(&scst_vdisk_mutex);
++
++out_put:
++ kobject_put(&dev->dev_kobj);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t vdev_sysfs_filename_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int res = 0;
++ struct scst_device *dev;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++
++ res = scst_alloc_sysfs_work(vdev_sysfs_process_get_filename,
++ true, &work);
++ if (res != 0)
++ goto out;
++
++ work->dev = dev;
++
++ kobject_get(&dev->dev_kobj);
++
++ scst_sysfs_work_get(work);
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res != 0)
++ goto out_put;
++
++ res = snprintf(buf, SCST_SYSFS_BLOCK_SIZE, "%s\n", work->res_buf);
++
++out_put:
++ scst_sysfs_work_put(work);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int vdisk_sysfs_process_resync_size_store(
++ struct scst_sysfs_work_item *work)
++{
++ int res;
++ struct scst_device *dev = work->dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ /* It's safe, since we taken dev_kobj and dh_priv NULLed in attach() */
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ res = vdisk_resync_size(virt_dev);
++
++ kobject_put(&dev->dev_kobj);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t vdisk_sysfs_resync_size_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_device *dev;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++
++ res = scst_alloc_sysfs_work(vdisk_sysfs_process_resync_size_store,
++ false, &work);
++ if (res != 0)
++ goto out;
++
++ work->dev = dev;
++
++ kobject_get(&dev->dev_kobj);
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t vdev_sysfs_t10_dev_id_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res, i;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ write_lock(&vdisk_serial_rwlock);
++
++ if ((count > sizeof(virt_dev->t10_dev_id)) ||
++ ((count == sizeof(virt_dev->t10_dev_id)) &&
++ (buf[count-1] != '\n'))) {
++ PRINT_ERROR("T10 device id is too long (max %zd "
++ "characters)", sizeof(virt_dev->t10_dev_id)-1);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ memset(virt_dev->t10_dev_id, 0, sizeof(virt_dev->t10_dev_id));
++ memcpy(virt_dev->t10_dev_id, buf, count);
++
++ i = 0;
++ while (i < sizeof(virt_dev->t10_dev_id)) {
++ if (virt_dev->t10_dev_id[i] == '\n') {
++ virt_dev->t10_dev_id[i] = '\0';
++ break;
++ }
++ i++;
++ }
++
++ virt_dev->t10_dev_id_set = 1;
++
++ res = count;
++
++ PRINT_INFO("T10 device id for device %s changed to %s", virt_dev->name,
++ virt_dev->t10_dev_id);
++
++out_unlock:
++ write_unlock(&vdisk_serial_rwlock);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t vdev_sysfs_t10_dev_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ read_lock(&vdisk_serial_rwlock);
++ pos = sprintf(buf, "%s\n%s", virt_dev->t10_dev_id,
++ virt_dev->t10_dev_id_set ? SCST_SYSFS_KEY_MARK "\n" : "");
++ read_unlock(&vdisk_serial_rwlock);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t vdev_sysfs_usn_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res, i;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = dev->dh_priv;
++
++ write_lock(&vdisk_serial_rwlock);
++
++ if ((count > sizeof(virt_dev->usn)) ||
++ ((count == sizeof(virt_dev->usn)) &&
++ (buf[count-1] != '\n'))) {
++ PRINT_ERROR("USN is too long (max %zd "
++ "characters)", sizeof(virt_dev->usn)-1);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ memset(virt_dev->usn, 0, sizeof(virt_dev->usn));
++ memcpy(virt_dev->usn, buf, count);
++
++ i = 0;
++ while (i < sizeof(virt_dev->usn)) {
++ if (virt_dev->usn[i] == '\n') {
++ virt_dev->usn[i] = '\0';
++ break;
++ }
++ i++;
++ }
++
++ virt_dev->usn_set = 1;
++
++ res = count;
++
++ PRINT_INFO("USN for device %s changed to %s", virt_dev->name,
++ virt_dev->usn);
++
++out_unlock:
++ write_unlock(&vdisk_serial_rwlock);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t vdev_sysfs_usn_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++
++ read_lock(&vdisk_serial_rwlock);
++ pos = sprintf(buf, "%s\n%s", virt_dev->usn,
++ virt_dev->usn_set ? SCST_SYSFS_KEY_MARK "\n" : "");
++ read_unlock(&vdisk_serial_rwlock);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static int __init init_scst_vdisk(struct scst_dev_type *devtype)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ devtype->module = THIS_MODULE;
++
++ res = scst_register_virtual_dev_driver(devtype);
++ if (res < 0)
++ goto out;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++}
++
++static void exit_scst_vdisk(struct scst_dev_type *devtype)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_vdisk_mutex);
++ while (1) {
++ struct scst_vdisk_dev *virt_dev;
++
++ if (list_empty(&vdev_list))
++ break;
++
++ virt_dev = list_entry(vdev_list.next, typeof(*virt_dev),
++ vdev_list_entry);
++
++ vdev_del_device(virt_dev);
++ }
++ mutex_unlock(&scst_vdisk_mutex);
++
++ scst_unregister_virtual_dev_driver(devtype);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int __init init_scst_vdisk_driver(void)
++{
++ int res;
++
++ vdisk_thr_cachep = KMEM_CACHE(scst_vdisk_thr, SCST_SLAB_FLAGS);
++ if (vdisk_thr_cachep == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++
++ blockio_work_cachep = KMEM_CACHE(scst_blockio_work, SCST_SLAB_FLAGS);
++ if (blockio_work_cachep == NULL) {
++ res = -ENOMEM;
++ goto out_free_vdisk_cache;
++ }
++
++ if (num_threads < 1) {
++ PRINT_ERROR("num_threads can not be less than 1, use "
++ "default %d", DEF_NUM_THREADS);
++ num_threads = DEF_NUM_THREADS;
++ }
++
++ vdisk_file_devtype.threads_num = num_threads;
++ vcdrom_devtype.threads_num = num_threads;
++
++ atomic_set(&nullio_thr_data.hdr.ref, 1); /* never destroy it */
++
++ res = init_scst_vdisk(&vdisk_file_devtype);
++ if (res != 0)
++ goto out_free_slab;
++
++ res = init_scst_vdisk(&vdisk_blk_devtype);
++ if (res != 0)
++ goto out_free_vdisk;
++
++ res = init_scst_vdisk(&vdisk_null_devtype);
++ if (res != 0)
++ goto out_free_blk;
++
++ res = init_scst_vdisk(&vcdrom_devtype);
++ if (res != 0)
++ goto out_free_null;
++
++out:
++ return res;
++
++out_free_null:
++ exit_scst_vdisk(&vdisk_null_devtype);
++
++out_free_blk:
++ exit_scst_vdisk(&vdisk_blk_devtype);
++
++out_free_vdisk:
++ exit_scst_vdisk(&vdisk_file_devtype);
++
++out_free_slab:
++ kmem_cache_destroy(blockio_work_cachep);
++
++out_free_vdisk_cache:
++ kmem_cache_destroy(vdisk_thr_cachep);
++ goto out;
++}
++
++static void __exit exit_scst_vdisk_driver(void)
++{
++ exit_scst_vdisk(&vdisk_null_devtype);
++ exit_scst_vdisk(&vdisk_blk_devtype);
++ exit_scst_vdisk(&vdisk_file_devtype);
++ exit_scst_vdisk(&vcdrom_devtype);
++
++ kmem_cache_destroy(blockio_work_cachep);
++ kmem_cache_destroy(vdisk_thr_cachep);
++}
++
++module_init(init_scst_vdisk_driver);
++module_exit(exit_scst_vdisk_driver);
++
++MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SCSI disk (type 0) and CDROM (type 5) dev handler for "
++ "SCST using files on file systems or block devices");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documentation/scst/README.scst
+--- orig/linux-2.6.36/Documentation/scst/README.scst
++++ linux-2.6.36/Documentation/scst/README.scst
+@@ -0,0 +1,1453 @@
++Generic SCSI target mid-level for Linux (SCST)
++==============================================
++
++SCST is designed to provide unified, consistent interface between SCSI
++target drivers and Linux kernel and simplify target drivers development
++as much as possible. Detail description of SCST's features and internals
++could be found on its Internet page http://scst.sourceforge.net.
++
++SCST supports the following I/O modes:
++
++ * Pass-through mode with one to many relationship, i.e. when multiple
++ initiators can connect to the exported pass-through devices, for
++ the following SCSI devices types: disks (type 0), tapes (type 1),
++ processors (type 3), CDROMs (type 5), MO disks (type 7), medium
++ changers (type 8) and RAID controllers (type 0xC).
++
++ * FILEIO mode, which allows to use files on file systems or block
++ devices as virtual remotely available SCSI disks or CDROMs with
++ benefits of the Linux page cache.
++
++ * BLOCKIO mode, which performs direct block IO with a block device,
++ bypassing page-cache for all operations. This mode works ideally with
++ high-end storage HBAs and for applications that either do not need
++ caching between application and disk or need the large block
++ throughput.
++
++ * "Performance" device handlers, which provide in pseudo pass-through
++ mode a way for direct performance measurements without overhead of
++ actual data transferring from/to underlying SCSI device.
++
++In addition, SCST supports advanced per-initiator access and devices
++visibility management, so different initiators could see different set
++of devices with different access permissions. See below for details.
++
++Full list of SCST features and comparison with other Linux targets you
++can find on http://scst.sourceforge.net/comparison.html.
++
++Installation
++------------
++
++To see your devices remotely, you need to add a corresponding LUN for
++them (see below how). By default, no local devices are seen remotely.
++There must be LUN 0 in each LUNs set (security group), i.e. LUs
++numeration must not start from, e.g., 1. Otherwise you will see no
++devices on remote initiators and SCST core will write into the kernel
++log message: "tgt_dev for LUN 0 not found, command to unexisting LU?"
++
++It is highly recommended to use scstadmin utility for configuring
++devices and security groups.
++
++The flow of SCST inialization should be as the following:
++
++1. Load of SCST modules with necessary module parameters, if needed.
++
++2. Configure targets, devices, LUNs, etc. using either scstadmin
++(recommended), or the sysfs interface directly as described below.
++
++If you experience problems during modules load or running, check your
++kernel logs (or run dmesg command for the few most recent messages).
++
++IMPORTANT: Without loading appropriate device handler, corresponding devices
++========= will be invisible for remote initiators, which could lead to holes
++ in the LUN addressing, so automatic device scanning by remote SCSI
++ mid-level could not notice the devices. Therefore you will have
++ to add them manually via
++ 'echo "- - -" >/sys/class/scsi_host/hostX/scan',
++ where X - is the host number.
++
++IMPORTANT: Working of target and initiator on the same host is
++========= supported, except the following 2 cases: swap over target exported
++ device and using a writable mmap over a file from target
++ exported device. The latter means you can't mount a file
++ system over target exported device. In other words, you can
++ freely use any sg, sd, st, etc. devices imported from target
++ on the same host, but you can't mount file systems or put
++ swap on them. This is a limitation of Linux memory/cache
++ manager, because in this case an OOM deadlock like: system
++ needs some memory -> it decides to clear some cache -> cache
++ needs to write on target exported device -> initiator sends
++ request to the target -> target needs memory -> system needs
++ even more memory -> deadlock.
++
++IMPORTANT: In the current version simultaneous access to local SCSI devices
++========= via standard high-level SCSI drivers (sd, st, sg, etc.) and
++ SCST's target drivers is unsupported. Especially it is
++ important for execution via sg and st commands that change
++ the state of devices and their parameters, because that could
++ lead to data corruption. If any such command is done, at
++ least related device handler(s) must be restarted. For block
++ devices READ/WRITE commands using direct disk handler are
++ generally safe.
++
++Usage in failover mode
++----------------------
++
++It is recommended to use TEST UNIT READY ("tur") command to check if
++SCST target is alive in MPIO configurations.
++
++Device handlers
++---------------
++
++Device specific drivers (device handlers) are plugins for SCST, which
++help SCST to analyze incoming requests and determine parameters,
++specific to various types of devices. If an appropriate device handler
++for a SCSI device type isn't loaded, SCST doesn't know how to handle
++devices of this type, so they will be invisible for remote initiators
++(more precisely, "LUN not supported" sense code will be returned).
++
++In addition to device handlers for real devices, there are VDISK, user
++space and "performance" device handlers.
++
++VDISK device handler works over files on file systems and makes from
++them virtual remotely available SCSI disks or CDROM's. In addition, it
++allows to work directly over a block device, e.g. local IDE or SCSI disk
++or ever disk partition, where there is no file systems overhead. Using
++block devices comparing to sending SCSI commands directly to SCSI
++mid-level via scsi_do_req()/scsi_execute_async() has advantage that data
++are transferred via system cache, so it is possible to fully benefit from
++caching and read ahead performed by Linux's VM subsystem. The only
++disadvantage here that in the FILEIO mode there is superfluous data
++copying between the cache and SCST's buffers. This issue is going to be
++addressed in the next release. Virtual CDROM's are useful for remote
++installation. See below for details how to setup and use VDISK device
++handler.
++
++"Performance" device handlers for disks, MO disks and tapes in their
++exec() method skip (pretend to execute) all READ and WRITE operations
++and thus provide a way for direct link performance measurements without
++overhead of actual data transferring from/to underlying SCSI device.
++
++NOTE: Since "perf" device handlers on READ operations don't touch the
++==== commands' data buffer, it is returned to remote initiators as it
++ was allocated, without even being zeroed. Thus, "perf" device
++ handlers impose some security risk, so use them with caution.
++
++Compilation options
++-------------------
++
++There are the following compilation options, that could be change using
++your favorite kernel configuration Makefile target, e.g. "make xconfig":
++
++ - CONFIG_SCST_DEBUG - if defined, turns on some debugging code,
++ including some logging. Makes the driver considerably bigger and slower,
++ producing large amount of log data.
++
++ - CONFIG_SCST_TRACING - if defined, turns on ability to log events. Makes the
++ driver considerably bigger and leads to some performance loss.
++
++ - CONFIG_SCST_EXTRACHECKS - if defined, adds extra validity checks in
++ the various places.
++
++ - CONFIG_SCST_USE_EXPECTED_VALUES - if not defined (default), initiator
++ supplied expected data transfer length and direction will be used
++ only for verification purposes to return error or warn in case if one
++ of them is invalid. Instead, locally decoded from SCSI command values
++ will be used. This is necessary for security reasons, because
++ otherwise a faulty initiator can crash target by supplying invalid
++ value in one of those parameters. This is especially important in
++ case of pass-through mode. If CONFIG_SCST_USE_EXPECTED_VALUES is
++ defined, initiator supplied expected data transfer length and
++ direction will override the locally decoded values. This might be
++ necessary if internal SCST commands translation table doesn't contain
++ SCSI command, which is used in your environment. You can know that if
++ you enable "minor" trace level and have messages like "Unknown
++ opcode XX for YY. Should you update scst_scsi_op_table?" in your
++ kernel log and your initiator returns an error. Also report those
++ messages in the SCST mailing list scst-devel@lists.sourceforge.net.
++ Note, that not all SCSI transports support supplying expected values.
++
++ - CONFIG_SCST_DEBUG_TM - if defined, turns on task management functions
++ debugging, when on LUN 6 some of the commands will be delayed for
++ about 60 sec., so making the remote initiator send TM functions, eg
++ ABORT TASK and TARGET RESET. Also define
++ CONFIG_SCST_TM_DBG_GO_OFFLINE symbol in the Makefile if you want that
++ the device eventually become completely unresponsive, or otherwise to
++ circle around ABORTs and RESETs code. Needs CONFIG_SCST_DEBUG turned
++ on.
++
++ - CONFIG_SCST_STRICT_SERIALIZING - if defined, makes SCST send all commands to
++ underlying SCSI device synchronously, one after one. This makes task
++ management more reliable, with cost of some performance penalty. This
++ is mostly actual for stateful SCSI devices like tapes, where the
++ result of command's execution depends from device's settings defined
++ by previous commands. Disk and RAID devices are stateless in the most
++ cases. The current SCSI core in Linux doesn't allow to abort all
++ commands reliably if they sent asynchronously to a stateful device.
++ Turned off by default, turn it on if you use stateful device(s) and
++ need as much error recovery reliability as possible. As a side effect
++ of CONFIG_SCST_STRICT_SERIALIZING, on kernels below 2.6.30 no kernel
++ patching is necessary for pass-through device handlers (scst_disk,
++ etc.).
++
++ - CONFIG_SCST_TEST_IO_IN_SIRQ - if defined, allows SCST to submit selected
++ SCSI commands (TUR and READ/WRITE) from soft-IRQ context (tasklets).
++ Enabling it will decrease amount of context switches and slightly
++ improve performance. The goal of this option is to be able to measure
++ overhead of the context switches. If after enabling this option you
++ don't see under load in vmstat output on the target significant
++ decrease of amount of context switches, then your target driver
++ doesn't submit commands to SCST in IRQ context. For instance,
++ iSCSI-SCST doesn't do that, but qla2x00t with
++ CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD disabled - does. This option is
++ designed to be used with vdisk NULLIO backend.
++
++ WARNING! Using this option enabled with other backend than vdisk
++ NULLIO is unsafe and can lead you to a kernel crash!
++
++ - CONFIG_SCST_STRICT_SECURITY - if defined, makes SCST zero allocated data
++ buffers. Undefining it (default) considerably improves performance
++ and eases CPU load, but could create a security hole (information
++ leakage), so enable it, if you have strict security requirements.
++
++ - CONFIG_SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING - if defined,
++ in case when TASK MANAGEMENT function ABORT TASK is trying to abort a
++ command, which has already finished, remote initiator, which sent the
++ ABORT TASK request, will receive TASK NOT EXIST (or ABORT FAILED)
++ response for the ABORT TASK request. This is more logical response,
++ since, because the command finished, attempt to abort it failed, but
++ some initiators, particularly VMware iSCSI initiator, consider TASK
++ NOT EXIST response as if the target got crazy and try to RESET it.
++ Then sometimes get crazy itself. So, this option is disabled by
++ default.
++
++ - CONFIG_SCST_MEASURE_LATENCY - if defined, provides in "latency" files
++ global and per-LUN average commands processing latency statistic. You
++ can clear already measured results by writing 0 in each file. Note,
++ you need a non-preemptible kernel to have correct results.
++
++HIGHMEM kernel configurations are fully supported, but not recommended
++for performance reasons.
++
++Module parameters
++-----------------
++
++Module scst supports the following parameters:
++
++ - scst_threads - allows to set count of SCST's threads. By default it
++ is CPU count.
++
++ - scst_max_cmd_mem - sets maximum amount of memory in MB allowed to be
++ consumed by the SCST commands for data buffers at any given time. By
++ default it is approximately TotalMem/4.
++
++SCST sysfs interface
++--------------------
++
++Root of SCST sysfs interface is /sys/kernel/scst_tgt. It has the
++following entries:
++
++ - devices - this is a root subdirectory for all SCST devices
++
++ - handlers - this is a root subdirectory for all SCST dev handlers
++
++ - max_tasklet_cmd - specifies how many commands at max can be queued in
++ the SCST core simultaneously from all connected initiators to allow
++ processing commands in soft-IRQ context in tasklets. If the count of
++ the commands exceeds this value, then all of them will be processed
++ only in threads. This is to to prevent possible starvation under
++ heavy load and in some cases to improve performance by more evenly
++ spreading load over available CPUs.
++
++ - sgv - this is a root subdirectory for all SCST SGV caches
++
++ - targets - this is a root subdirectory for all SCST targets
++
++ - setup_id - allows to read and write SCST setup ID. This ID can be
++ used in cases, when the same SCST configuration should be installed
++ on several targets, but exported from those targets devices should
++ have different IDs and SNs. For instance, VDISK dev handler uses this
++ ID to generate T10 vendor specific identifier and SN of the devices.
++
++ - threads - allows to read and set number of global SCST I/O threads.
++ Those threads used with async. dev handlers, for instance, vdisk
++ BLOCKIO or NULLIO.
++
++ - trace_level - allows to enable and disable various tracing
++ facilities. See content of this file for help how to use it.
++
++ - version - read-only attribute, which allows to see version of
++ SCST and enabled optional features.
++
++ - last_sysfs_mgmt_res - read-only attribute returning completion status
++ of the last management command. In the sysfs implementation there are
++ some problems between internal sysfs and internal SCST locking. To
++ avoid them in some cases sysfs calls can return error with errno
++ EAGAIN. This doesn't mean the operation failed. It only means that
++ the operation queued and not yet completed. To wait for it to
++ complete, an management tool should poll this file. If the operation
++ hasn't yet completed, it will also return EAGAIN. But after it's
++ completed, it will return the result of this operation (0 for success
++ or -errno for error).
++
++Each SCST sysfs file (attribute) can contain in the last line mark
++"[key]". It is automatically added mark used to allow scstadmin to see
++which attributes it should save in the config file. You can ignore it.
++
++"Devices" subdirectory contains subdirectories for each SCST devices.
++
++Content of each device's subdirectory is dev handler specific. See
++documentation for your dev handlers for more info about it as well as
++SysfsRules file for more info about common to all dev handlers rules.
++SCST dev handlers can have the following common entries:
++
++ - exported - subdirectory containing links to all LUNs where this
++ device was exported.
++
++ - handler - if dev handler determined for this device, this link points
++ to it. The handler can be not set for pass-through devices.
++
++ - threads_num - shows and allows to set number of threads in this device's
++ threads pool. If 0 - no threads will be created, and global SCST
++ threads pool will be used. If <0 - creation of the threads pool is
++ prohibited.
++
++ - threads_pool_type - shows and allows to sets threads pool type.
++ Possible values: "per_initiator" and "shared". When the value is
++ "per_initiator" (default), each session from each initiator will use
++ separate dedicated pool of threads. When the value is "shared", all
++ sessions from all initiators will share the same per-device pool of
++ threads. Valid only if threads_num attribute >0.
++
++ - dump_prs - allows to dump persistent reservations information in the
++ kernel log.
++
++ - type - SCSI type of this device
++
++See below for more information about other entries of this subdirectory
++of the standard SCST dev handlers.
++
++"Handlers" subdirectory contains subdirectories for each SCST dev
++handler.
++
++Content of each handler's subdirectory is dev handler specific. See
++documentation for your dev handlers for more info about it as well as
++SysfsRules file for more info about common to all dev handlers rules.
++SCST dev handlers can have the following common entries:
++
++ - mgmt - this entry allows to create virtual devices and their
++ attributes (for virtual devices dev handlers) or assign/unassign real
++ SCSI devices to/from this dev handler (for pass-through dev
++ handlers).
++
++ - trace_level - allows to enable and disable various tracing
++ facilities. See content of this file for help how to use it.
++
++ - type - SCSI type of devices served by this dev handler.
++
++See below for more information about other entries of this subdirectory
++of the standard SCST dev handlers.
++
++"Sgv" subdirectory contains statistic information of SCST SGV caches. It
++has the following entries:
++
++ - None, one or more subdirectories for each existing SGV cache.
++
++ - global_stats - file containing global SGV caches statistics.
++
++Each SGV cache's subdirectory has the following item:
++
++ - stats - file containing statistics for this SGV caches.
++
++"Targets" subdirectory contains subdirectories for each SCST target.
++
++Content of each target's subdirectory is target specific. See
++documentation for your target for more info about it as well as
++SysfsRules file for more info about common to all targets rules.
++Every target should have at least the following entries:
++
++ - ini_groups - subdirectory, which contains and allows to define
++ initiator-oriented access control information, see below.
++
++ - luns - subdirectory, which contains list of available LUNs in the
++ target-oriented access control and allows to define it, see below.
++
++ - sessions - subdirectory containing connected to this target sessions.
++
++ - enabled - using this attribute you can enable or disable this target/
++ It allows to finish configuring it before it starts accepting new
++ connections. 0 by default.
++
++ - addr_method - used LUNs addressing method. Possible values:
++ "Peripheral" and "Flat". Most initiators work well with Peripheral
++ addressing method (default), but some (HP-UX, for instance) may
++ require Flat method. This attribute is also available in the
++ initiators security groups, so you can assign the addressing method
++ on per-initiator basis.
++
++ - io_grouping_type - defines how I/O from sessions to this target are
++ grouped together. This I/O grouping is very important for
++ performance. By setting this attribute in a right value, you can
++ considerably increase performance of your setup. This grouping is
++ performed only if you use CFQ I/O scheduler on the target and for
++ devices with threads_num >= 0 and, if threads_num > 0, with
++ threads_pool_type "per_initiator". Possible values:
++ "this_group_only", "never", "auto", or I/O group number >0. When the
++ value is "this_group_only" all I/O from all sessions in this target
++ will be grouped together. When the value is "never", I/O from
++ different sessions will not be grouped together, i.e. all sessions in
++ this target will have separate dedicated I/O groups. When the value
++ is "auto" (default), all I/O from initiators with the same name
++ (iSCSI initiator name, for instance) in all targets will be grouped
++ together with a separate dedicated I/O group for each initiator name.
++ For iSCSI this mode works well, but other transports usually use
++ different initiator names for different sessions, so using such
++ transports in MPIO configurations you should either use value
++ "this_group_only", or an explicit I/O group number. This attribute is
++ also available in the initiators security groups, so you can assign
++ the I/O grouping on per-initiator basis. See below for more info how
++ to use this attribute.
++
++ - rel_tgt_id - allows to read or write SCSI Relative Target Port
++ Identifier attribute. This identifier is used to identify SCSI Target
++ Ports by some SCSI commands, mainly by Persistent Reservations
++ commands. This identifier must be unique among all SCST targets, but
++ for convenience SCST allows disabled targets to have not unique
++ rel_tgt_id. In this case SCST will not allow to enable this target
++ until rel_tgt_id becomes unique. This attribute initialized unique by
++ SCST by default.
++
++A target driver may have also the following entries:
++
++ - "hw_target" - if the target driver supports both hardware and virtual
++ targets (for instance, an FC adapter supporting NPIV, which has
++ hardware targets for its physical ports as well as virtual NPIV
++ targets), this read only attribute for all hardware targets will
++ exist and contain value 1.
++
++Subdirectory "sessions" contains one subdirectory for each connected
++session with name equal to name of the connected initiator.
++
++Each session subdirectory contains the following entries:
++
++ - initiator_name - contains initiator name
++
++ - force_close - optional write-only attribute, which allows to force
++ close this session.
++
++ - active_commands - contains number of active, i.e. not yet or being
++ executed, SCSI commands in this session.
++
++ - commands - contains overall number of SCSI commands in this session.
++
++ - latency - if CONFIG_SCST_MEASURE_LATENCY enabled, contains latency
++ statistics for this session.
++
++ - luns - a link pointing out to the corresponding LUNs set (security
++ group) where this session was attached to.
++
++ - One or more "lunX" subdirectories, where 'X' is a number, for each LUN
++ this session has (see below).
++
++ - other target driver specific attributes and subdirectories.
++
++See below description of the VDISK's sysfs interface for samples.
++
++Access and devices visibility management (LUN masking)
++------------------------------------------------------
++
++Access and devices visibility management allows for an initiator or
++group of initiators to see different devices with different LUNs
++with necessary access permissions.
++
++SCST supports two modes of access control:
++
++1. Target-oriented. In this mode you define for each target a default
++set of LUNs, which are accessible to all initiators, connected to that
++target. This is a regular access control mode, which people usually mean
++thinking about access control in general. For instance, in IET this is
++the only supported mode.
++
++2. Initiator-oriented. In this mode you define which LUNs are accessible
++for each initiator. In this mode you should create for each set of one
++or more initiators, which should access to the same set of devices with
++the same LUNs, a separate security group, then add to it devices and
++names of allowed initiator(s).
++
++Both modes can be used simultaneously. In this case the
++initiator-oriented mode has higher priority, than the target-oriented,
++i.e. initiators are at first searched in all defined security groups for
++this target and, if none matches, the default target's set of LUNs is
++used. This set of LUNs might be empty, then the initiator will not see
++any LUNs from the target.
++
++You can at any time find out which set of LUNs each session is assigned
++to by looking where link
++/sys/kernel/scst_tgt/targets/target_driver/target_name/sessions/initiator_name/luns
++points to.
++
++To configure the target-oriented access control SCST provides the
++following interface. Each target's sysfs subdirectory
++(/sys/kernel/scst_tgt/targets/target_driver/target_name) has "luns"
++subdirectory. This subdirectory contains the list of already defined
++target-oriented access control LUNs for this target as well as file
++"mgmt". This file has the following commands, which you can send to it,
++for instance, using "echo" shell command. You can always get a small
++help about supported commands by looking inside this file. "Parameters"
++are one or more param_name=value pairs separated by ';'.
++
++ - "add H:C:I:L lun [parameters]" - adds a pass-through device with
++ host:channel:id:lun with LUN "lun". Optionally, the device could be
++ marked as read only by using parameter "read_only". The recommended
++ way to find out H:C:I:L numbers is use of lsscsi utility.
++
++ - "replace H:C:I:L lun [parameters]" - replaces by pass-through device
++ with host:channel:id:lun existing with LUN "lun" device with
++ generation of INQUIRY DATA HAS CHANGED Unit Attention. If the old
++ device doesn't exist, this command acts as the "add" command.
++ Optionally, the device could be marked as read only by using
++ parameter "read_only". The recommended way to find out H:C:I:L
++ numbers is use of lsscsi utility.
++
++ - "add VNAME lun [parameters]" - adds a virtual device with name VNAME
++ with LUN "lun". Optionally, the device could be marked as read only
++ by using parameter "read_only".
++
++ - "replace VNAME lun [parameters]" - replaces by virtual device
++ with name VNAME existing with LUN "lun" device with generation of
++ INQUIRY DATA HAS CHANGED Unit Attention. If the old device doesn't
++ exist, this command acts as the "add" command. Optionally, the device
++ could be marked as read only by using parameter "read_only".
++
++ - "del lun" - deletes LUN lun
++
++ - "clear" - clears the list of devices
++
++To configure the initiator-oriented access control SCST provides the
++following interface. Each target's sysfs subdirectory
++(/sys/kernel/scst_tgt/targets/target_driver/target_name) has "ini_groups"
++subdirectory. This subdirectory contains the list of already defined
++security groups for this target as well as file "mgmt". This file has
++the following commands, which you can send to it, for instance, using
++"echo" shell command. You can always get a small help about supported
++commands by looking inside this file.
++
++ - "create GROUP_NAME" - creates a new security group.
++
++ - "del GROUP_NAME" - deletes a new security group.
++
++Each security group's subdirectory contains 2 subdirectories: initiators
++and luns.
++
++Each "initiators" subdirectory contains list of added to this groups
++initiator as well as as well as file "mgmt". This file has the following
++commands, which you can send to it, for instance, using "echo" shell
++command. You can always get a small help about supported commands by
++looking inside this file.
++
++ - "add INITIATOR_NAME" - adds initiator with name INITIATOR_NAME to the
++ group.
++
++ - "del INITIATOR_NAME" - deletes initiator with name INITIATOR_NAME
++ from the group.
++
++ - "move INITIATOR_NAME DEST_GROUP_NAME" moves initiator with name
++ INITIATOR_NAME from the current group to group with name
++ DEST_GROUP_NAME.
++
++ - "clear" - deletes all initiators from this group.
++
++For "add" and "del" commands INITIATOR_NAME can be a simple DOS-type
++patterns, containing '*' and '?' symbols. '*' means match all any
++symbols, '?' means match only any single symbol. For instance,
++"blah.xxx" will match "bl?h.*". Additionally, you can use negative sign
++'!' to revert the value of the pattern. For instance, "ah.xxx" will
++match "!bl?h.*".
++
++Each "luns" subdirectory contains the list of already defined LUNs for
++this group as well as file "mgmt". Content of this file as well as list
++of available in it commands is fully identical to the "luns"
++subdirectory of the target-oriented access control.
++
++Examples:
++
++ - echo "create INI" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/mgmt -
++ creates security group INI for target iqn.2006-10.net.vlnb:tgt1.
++
++ - echo "add 2:0:1:0 11" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/INI/luns/mgmt -
++ adds a pass-through device sitting on host 2, channel 0, ID 1, LUN 0
++ to group with name INI as LUN 11.
++
++ - echo "add disk1 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/INI/luns/mgmt -
++ adds a virtual disk with name disk1 to group with name INI as LUN 0.
++
++ - echo "add 21:*:e0:?b:83:*" >/sys/kernel/scst_tgt/targets/21:00:00:a0:8c:54:52:12/ini_groups/INI/initiators/mgmt -
++ adds a pattern to group with name INI to Fibre Channel target with
++ WWN 21:00:00:a0:8c:54:52:12, which matches WWNs of Fibre Channel
++ initiator ports.
++
++Consider you need to have an iSCSI target with name
++"iqn.2007-05.com.example:storage.disk1.sys1.xyz", which should export
++virtual device "dev1" with LUN 0 and virtual device "dev2" with LUN 1,
++but initiator with name
++"iqn.2007-05.com.example:storage.disk1.spec_ini.xyz" should see only
++virtual device "dev2" read only with LUN 0. To achieve that you should
++do the following commands:
++
++# echo "iqn.2007-05.com.example:storage.disk1.sys1.xyz" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++# echo "add dev1 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2007-05.com.example:storage.disk1.sys1.xyz/luns/mgmt
++# echo "add dev2 1" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2007-05.com.example:storage.disk1.sys1.xyz/luns/mgmt
++# echo "create SPEC_INI" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2007-05.com.example:storage.disk1.sys1.xyz/ini_groups/mgmt
++# echo "add dev2 0 read_only=1" \
++ >/sys/kernel/scst_tgt/targets/iscsi/iqn.2007-05.com.example:storage.disk1.sys1.xyz/ini_groups/SPEC_INI/luns/mgmt
++# echo "iqn.2007-05.com.example:storage.disk1.spec_ini.xyz" \
++ >/sys/kernel/scst_tgt/targets/iscsi/iqn.2007-05.com.example:storage.disk1.sys1.xyz/ini_groups/SPEC_INI/initiators/mgmt
++
++For Fibre Channel or SAS in the above example you should use target's
++and initiator ports WWNs instead of iSCSI names.
++
++It is highly recommended to use scstadmin utility instead of described
++in this section low level interface.
++
++IMPORTANT
++=========
++
++There must be LUN 0 in each set of LUNs, i.e. LUs numeration must not
++start from, e.g., 1. Otherwise you will see no devices on remote
++initiators and SCST core will write into the kernel log message: "tgt_dev
++for LUN 0 not found, command to unexisting LU?"
++
++IMPORTANT
++=========
++
++All the access control must be fully configured BEFORE the corresponding
++target is enabled. When you enable a target, it will immediately start
++accepting new connections, hence creating new sessions, and those new
++sessions will be assigned to security groups according to the
++*currently* configured access control settings. For instance, to
++the default target's set of LUNs, instead of "HOST004" group as you may
++need, because "HOST004" doesn't exist yet. So, you must configure all
++the security groups before new connections from the initiators are
++created, i.e. before the target enabled.
++
++VDISK device handler
++--------------------
++
++VDISK has 4 built-in dev handlers: vdisk_fileio, vdisk_blockio,
++vdisk_nullio and vcdrom. Roots of their sysfs interface are
++/sys/kernel/scst_tgt/handlers/handler_name, e.g. for vdisk_fileio:
++/sys/kernel/scst_tgt/handlers/vdisk_fileio. Each root has the following
++entries:
++
++ - None, one or more links to devices with name equal to names
++ of the corresponding devices.
++
++ - trace_level - allows to enable and disable various tracing
++ facilities. See content of this file for help how to use it.
++
++ - mgmt - main management entry, which allows to add/delete VDISK
++ devices with the corresponding type.
++
++The "mgmt" file has the following commands, which you can send to it,
++for instance, using "echo" shell command. You can always get a small
++help about supported commands by looking inside this file. "Parameters"
++are one or more param_name=value pairs separated by ';'.
++
++ - echo "add_device device_name [parameters]" - adds a virtual device
++ with name device_name and specified parameters (see below)
++
++ - echo "del_device device_name" - deletes a virtual device with name
++ device_name.
++
++Handler vdisk_fileio provides FILEIO mode to create virtual devices.
++This mode uses as backend files and accesses to them using regular
++read()/write() file calls. This allows to use full power of Linux page
++cache. The following parameters possible for vdisk_fileio:
++
++ - filename - specifies path and file name of the backend file. The path
++ must be absolute.
++
++ - blocksize - specifies block size used by this virtual device. The
++ block size must be power of 2 and >= 512 bytes. Default is 512.
++
++ - write_through - disables write back caching. Note, this option
++ has sense only if you also *manually* disable write-back cache in
++ *all* your backstorage devices and make sure it's actually disabled,
++ since many devices are known to lie about this mode to get better
++ benchmark results. Default is 0.
++
++ - read_only - read only. Default is 0.
++
++ - o_direct - disables both read and write caching. This mode isn't
++ currently fully implemented, you should use user space fileio_tgt
++ program in O_DIRECT mode instead (see below).
++
++ - nv_cache - enables "non-volatile cache" mode. In this mode it is
++ assumed that the target has a GOOD UPS with ability to cleanly
++ shutdown target in case of power failure and it is software/hardware
++ bugs free, i.e. all data from the target's cache are guaranteed
++ sooner or later to go to the media. Hence all data synchronization
++ with media operations, like SYNCHRONIZE_CACHE, are ignored in order
++ to bring more performance. Also in this mode target reports to
++ initiators that the corresponding device has write-through cache to
++ disable all write-back cache workarounds used by initiators. Use with
++ extreme caution, since in this mode after a crash of the target
++ journaled file systems don't guarantee the consistency after journal
++ recovery, therefore manual fsck MUST be ran. Note, that since usually
++ the journal barrier protection (see "IMPORTANT" note below) turned
++ off, enabling NV_CACHE could change nothing from data protection
++ point of view, since no data synchronization with media operations
++ will go from the initiator. This option overrides "write_through"
++ option. Disabled by default.
++
++ - removable - with this flag set the device is reported to remote
++ initiators as removable.
++
++Handler vdisk_blockio provides BLOCKIO mode to create virtual devices.
++This mode performs direct block I/O with a block device, bypassing the
++page cache for all operations. This mode works ideally with high-end
++storage HBAs and for applications that either do not need caching
++between application and disk or need the large block throughput. See
++below for more info.
++
++The following parameters possible for vdisk_blockio: filename,
++blocksize, nv_cache, read_only, removable. See vdisk_fileio above for
++description of those parameters.
++
++Handler vdisk_nullio provides NULLIO mode to create virtual devices. In
++this mode no real I/O is done, but success returned to initiators.
++Intended to be used for performance measurements at the same way as
++"*_perf" handlers. The following parameters possible for vdisk_nullio:
++blocksize, read_only, removable. See vdisk_fileio above for description
++of those parameters.
++
++Handler vcdrom allows emulation of a virtual CDROM device using an ISO
++file as backend. It doesn't have any parameters.
++
++For example:
++
++echo "add_device disk1 filename=/disk1; blocksize=4096; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
++
++will create a FILEIO virtual device disk1 with backend file /disk1
++with block size 4K and NV_CACHE enabled.
++
++Each vdisk_fileio's device has the following attributes in
++/sys/kernel/scst_tgt/devices/device_name:
++
++ - filename - contains path and file name of the backend file.
++
++ - blocksize - contains block size used by this virtual device.
++
++ - write_through - contains status of write back caching of this virtual
++ device.
++
++ - read_only - contains read only status of this virtual device.
++
++ - o_direct - contains O_DIRECT status of this virtual device.
++
++ - nv_cache - contains NV_CACHE status of this virtual device.
++
++ - removable - contains removable status of this virtual device.
++
++ - size_mb - contains size of this virtual device in MB.
++
++ - t10_dev_id - contains and allows to set T10 vendor specific
++ identifier for Device Identification VPD page (0x83) of INQUIRY data.
++ By default VDISK handler always generates t10_dev_id for every new
++ created device at creation time based on the device name and
++ scst_vdisk_ID scst_vdisk.ko module parameter (see below).
++
++ - usn - contains the virtual device's serial number of INQUIRY data. It
++ is created at the device creation time based on the device name and
++ scst_vdisk_ID scst_vdisk.ko module parameter (see below).
++
++ - type - contains SCSI type of this virtual device.
++
++ - resync_size - write only attribute, which makes vdisk_fileio to
++ rescan size of the backend file. It is useful if you changed it, for
++ instance, if you resized it.
++
++For example:
++
++/sys/kernel/scst_tgt/devices/disk1
++|-- blocksize
++|-- exported
++| |-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/0
++| |-- export1 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/INI/luns/0
++| |-- export2 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/luns/0
++| |-- export3 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/INI1/luns/0
++| |-- export4 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/INI2/luns/0
++|-- filename
++|-- handler -> ../../handlers/vdisk_fileio
++|-- nv_cache
++|-- o_direct
++|-- read_only
++|-- removable
++|-- resync_size
++|-- size_mb
++|-- t10_dev_id
++|-- threads_num
++|-- threads_pool_type
++|-- type
++|-- usn
++`-- write_through
++
++Each vdisk_blockio's device has the following attributes in
++/sys/kernel/scst_tgt/devices/device_name: blocksize, filename, nv_cache,
++read_only, removable, resync_size, size_mb, t10_dev_id, threads_num,
++threads_pool_type, type, usn. See above description of those parameters.
++
++Each vdisk_nullio's device has the following attributes in
++/sys/kernel/scst_tgt/devices/device_name: blocksize, read_only,
++removable, size_mb, t10_dev_id, threads_num, threads_pool_type, type,
++usn. See above description of those parameters.
++
++Each vcdrom's device has the following attributes in
++/sys/kernel/scst_tgt/devices/device_name: filename, size_mb,
++t10_dev_id, threads_num, threads_pool_type, type, usn. See above
++description of those parameters. Exception is filename attribute. For
++vcdrom it is writable. Writing to it allows to virtually insert or
++change virtual CD media in the virtual CDROM device. For example:
++
++ - echo "/image.iso" >/sys/kernel/scst_tgt/devices/cdrom/filename - will
++ insert file /image.iso as virtual media to the virtual CDROM cdrom.
++
++ - echo "" >/sys/kernel/scst_tgt/devices/cdrom/filename - will remove
++ "media" from the virtual CDROM cdrom.
++
++Additionally VDISK handler has module parameter "num_threads", which
++specifies count of I/O threads for each FILEIO VDISK's or VCDROM device.
++If you have a workload, which tends to produce rather random accesses
++(e.g. DB-like), you should increase this count to a bigger value, like
++32. If you have a rather sequential workload, you should decrease it to
++a lower value, like number of CPUs on the target or even 1. Due to some
++limitations of Linux I/O subsystem, increasing number of I/O threads too
++much leads to sequential performance drop, especially with deadline
++scheduler, so decreasing it can improve sequential performance. The
++default provides a good compromise between random and sequential
++accesses.
++
++You shouldn't be afraid to have too many VDISK I/O threads if you have
++many VDISK devices. Kernel threads consume very little amount of
++resources (several KBs) and only necessary threads will be used by SCST,
++so the threads will not trash your system.
++
++CAUTION: If you partitioned/formatted your device with block size X, *NEVER*
++======== ever try to export and then mount it (even accidentally) with another
++ block size. Otherwise you can *instantly* damage it pretty
++ badly as well as all your data on it. Messages on initiator
++ like: "attempt to access beyond end of device" is the sign of
++ such damage.
++
++ Moreover, if you want to compare how well different block sizes
++ work for you, you **MUST** EVERY TIME AFTER CHANGING BLOCK SIZE
++ **COMPLETELY** **WIPE OFF** ALL THE DATA FROM THE DEVICE. In
++ other words, THE **WHOLE** DEVICE **MUST** HAVE ONLY **ZEROS**
++ AS THE DATA AFTER YOU SWITCH TO NEW BLOCK SIZE. Switching block
++ sizes isn't like switching between FILEIO and BLOCKIO, after
++ changing block size all previously written with another block
++ size data MUST BE ERASED. Otherwise you will have a full set of
++ very weird behaviors, because blocks addressing will be
++ changed, but initiators in most cases will not have a
++ possibility to detect that old addresses written on the device
++ in, e.g., partition table, don't refer anymore to what they are
++ intended to refer.
++
++IMPORTANT: Some disk and partition table management utilities don't support
++========= block sizes >512 bytes, therefore make sure that your favorite one
++ supports it. Currently only cfdisk is known to work only with
++ 512 bytes blocks, other utilities like fdisk on Linux or
++ standard disk manager on Windows are proved to work well with
++ non-512 bytes blocks. Note, if you export a disk file or
++ device with some block size, different from one, with which
++ it was already partitioned, you could get various weird
++ things like utilities hang up or other unexpected behavior.
++ Hence, to be sure, zero the exported file or device before
++ the first access to it from the remote initiator with another
++ block size. On Window initiator make sure you "Set Signature"
++ in the disk manager on the imported from the target drive
++ before doing any other partitioning on it. After you
++ successfully mounted a file system over non-512 bytes block
++ size device, the block size stops matter, any program will
++ work with files on such file system.
++
++Persistent Reservations
++-----------------------
++
++SCST implements Persistent Reservations with full set of capabilities,
++including "Persistence Through Power Loss".
++
++The "Persistence Through Power Loss" data are saved in /var/lib/scst/pr
++with files with names the same as the names of the corresponding
++devices. Also this directory contains backup versions of those files
++with suffix ".1". Those backup files are used in case of power or other
++failure to prevent Persistent Reservation information from corruption
++during update.
++
++The Persistent Reservations available on all transports implementing
++get_initiator_port_transport_id() callback. Transports not implementing
++this callback will act in one of 2 possible scenarios ("all or
++nothing"):
++
++1. If a device has such transport connected and doesn't have persistent
++reservations, it will refuse Persistent Reservations commands as if it
++doesn't support them.
++
++2. If a device has persistent reservations, all initiators newly
++connecting via such transports will not see this device. After all
++persistent reservations from this device are released, upon reconnect
++the initiators will see it.
++
++Caching
++-------
++
++By default for performance reasons VDISK FILEIO devices use write back
++caching policy.
++
++Generally, write back caching is safe for use and danger of it is
++greatly overestimated, because most modern (especially, Enterprise
++level) applications are well prepared to work with write back cached
++storage. Particularly, such are all transactions-based applications.
++Those applications flush cache to completely avoid ANY data loss on a
++crash or power failure. For instance, journaled file systems flush cache
++on each meta data update, so they survive power/hardware/software
++failures pretty well.
++
++Since locally on initiators write back caching is always on, if an
++application cares about its data consistency, it does flush the cache
++when necessary or on any write, if open files with O_SYNC. If it doesn't
++care, it doesn't flush the cache. As soon as the cache flushes
++propagated to the storage, write back caching on it doesn't make any
++difference. If application doesn't flush the cache, it's doomed to loose
++data in case of a crash or power failure doesn't matter where this cache
++located, locally or on the storage.
++
++To illustrate that consider, for example, a user who wants to copy /src
++directory to /dst directory reliably, i.e. after the copy finished no
++power failure or software/hardware crash could lead to a loss of the
++data in /dst. There are 2 ways to achieve this. Let's suppose for
++simplicity cp opens files for writing with O_SYNC flag, hence bypassing
++the local cache.
++
++1. Slow. Make the device behind /dst working in write through caching
++mode and then run "cp -a /src /dst".
++
++2. Fast. Let the device behind /dst working in write back caching mode
++and then run "cp -a /src /dst; sync". The reliability of the result is
++the same, but it's much faster than (1). Nobody would care if a crash
++happens during the copy, because after recovery simply leftovers from
++the not completed attempt would be deleted and the operation would be
++restarted from the very beginning.
++
++So, you can see in (2) there is no danger of ANY data loss from the
++write back caching. Moreover, since on practice cp doesn't open files
++for writing with O_SYNC flag, to get the copy done reliably, sync
++command must be called after cp anyway, so enabling write back caching
++wouldn't make any difference for reliability.
++
++Also you can consider it from another side. Modern HDDs have at least
++16MB of cache working in write back mode by default, so for a 10 drives
++RAID it is 160MB of a write back cache. How many people are happy with
++it and how many disabled write back cache of their HDDs? Almost all and
++almost nobody correspondingly? Moreover, many HDDs lie about state of
++their cache and report write through while working in write back mode.
++They are also successfully used.
++
++Note, Linux I/O subsystem guarantees to propagated cache flushes to the
++storage only using data protection barriers, which usually turned off by
++default (see http://lwn.net/Articles/283161). Without barriers enabled
++Linux doesn't provide a guarantee that after sync()/fsync() all written
++data really hit permanent storage. They can be stored in the cache of
++your backstorage devices and, hence, lost on a power failure event.
++Thus, ever with write-through cache mode, you still either need to
++enable barriers on your backend file system on the target (for direct
++/dev/sdX devices this is, indeed, impossible), or need a good UPS to
++protect yourself from not committed data loss. Some info about barriers
++from the XFS point of view could be found at
++http://oss.sgi.com/projects/xfs/faq.html#wcache. On Linux initiators for
++Ext3 and ReiserFS file systems the barrier protection could be turned on
++using "barrier=1" and "barrier=flush" mount options correspondingly. You
++can check if the barriers turn on or off by looking in /proc/mounts.
++Windows and, AFAIK, other UNIX'es don't need any special explicit
++options and do necessary barrier actions on write-back caching devices
++by default.
++
++To limit this data loss with write back caching you can use files in
++/proc/sys/vm to limit amount of unflushed data in the system cache.
++
++If you for some reason have to use VDISK FILEIO devices in write through
++caching mode, don't forget to disable internal caching on their backend
++devices or make sure they have additional battery or supercapacitors
++power supply on board. Otherwise, you still on a power failure would
++loose all the unsaved yet data in the devices internal cache.
++
++Note, on some real-life workloads write through caching might perform
++better, than write back one with the barrier protection turned on.
++
++BLOCKIO VDISK mode
++------------------
++
++This module works best for these types of scenarios:
++
++1) Data that are not aligned to 4K sector boundaries and <4K block sizes
++are used, which is normally found in virtualization environments where
++operating systems start partitions on odd sectors (Windows and it's
++sector 63).
++
++2) Large block data transfers normally found in database loads/dumps and
++streaming media.
++
++3) Advanced relational database systems that perform their own caching
++which prefer or demand direct IO access and, because of the nature of
++their data access, can actually see worse performance with
++non-discriminate caching.
++
++4) Multiple layers of targets were the secondary and above layers need
++to have a consistent view of the primary targets in order to preserve
++data integrity which a page cache backed IO type might not provide
++reliably.
++
++Also it has an advantage over FILEIO that it doesn't copy data between
++the system cache and the commands data buffers, so it saves a
++considerable amount of CPU power and memory bandwidth.
++
++IMPORTANT: Since data in BLOCKIO and FILEIO modes are not consistent between
++========= each other, if you try to use a device in both those modes
++ simultaneously, you will almost instantly corrupt your data
++ on that device.
++
++IMPORTANT: If SCST 1.x BLOCKIO worked by default in NV_CACHE mode, when
++========= each device reported to remote initiators as having write through
++ caching. But if your backend block device has internal write
++ back caching it might create a possibility for data loss of
++ the cached in the internal cache data in case of a power
++ failure. Starting from SCST 2.0 BLOCKIO works by default in
++ non-NV_CACHE mode, when each device reported to remote
++ initiators as having write back caching, and synchronizes the
++ internal device's cache on each SYNCHRONIZE_CACHE command
++ from the initiators. It might lead to some PERFORMANCE LOSS,
++ so if you are are sure in your power supply and want to
++ restore 1.x behavior, your should recreate your BLOCKIO
++ devices in NV_CACHE mode.
++
++Pass-through mode
++-----------------
++
++In the pass-through mode (i.e. using the pass-through device handlers
++scst_disk, scst_tape, etc) SCSI commands, coming from remote initiators,
++are passed to local SCSI devices on target as is, without any
++modifications.
++
++SCST supports 1 to many pass-through, when several initiators can safely
++connect a single pass-through device (a tape, for instance). For such
++cases SCST emulates all the necessary functionality.
++
++In the sysfs interface all real SCSI devices are listed in
++/sys/kernel/scst_tgt/devices in form host:channel:id:lun numbers, for
++instance 1:0:0:0. The recommended way to match those numbers to your
++devices is use of lsscsi utility.
++
++Each pass-through dev handler has in its root subdirectory
++/sys/kernel/scst_tgt/handlers/handler_name, e.g.
++/sys/kernel/scst_tgt/handlers/dev_disk, "mgmt" file. It allows the
++following commands. They can be sent to it using, e.g., echo command.
++
++ - "add_device" - this command assigns SCSI device with
++host:channel:id:lun numbers to this dev handler.
++
++echo "add_device 1:0:0:0" >/sys/kernel/scst_tgt/handlers/dev_disk/mgmt
++
++will assign SCSI device 1:0:0:0 to this dev handler.
++
++ - "del_device" - this command unassigns SCSI device with
++host:channel:id:lun numbers from this dev handler.
++
++As usually, on read the "mgmt" file returns small help about available
++commands.
++
++You need to manually assign each your real SCSI device to the
++corresponding pass-through dev handler using the "add_device" command,
++otherwise the real SCSI devices will not be visible remotely. The
++assignment isn't done automatically, because it could lead to the
++pass-through dev handlers load and initialization problems if any of the
++local real SCSI devices are malfunctioning.
++
++As any other hardware, the local SCSI hardware can not handle commands
++with amount of data and/or segments count in scatter-gather array bigger
++some values. Therefore, when using the pass-through mode you should note
++that values for maximum number of segments and maximum amount of
++transferred data (max_sectors) for each SCSI command on devices on
++initiators can not be bigger, than corresponding values of the
++corresponding SCSI devices on the target. Otherwise you will see
++symptoms like small transfers work well, but large ones stall and
++messages like: "Unable to complete command due to SG IO count
++limitation" are printed in the kernel logs.
++
++You can't control from the user space limit of the scatter-gather
++segments, but for block devices usually it is sufficient if you set on
++the initiators /sys/block/DEVICE_NAME/queue/max_sectors_kb in the same
++or lower value as in /sys/block/DEVICE_NAME/queue/max_hw_sectors_kb for
++the corresponding devices on the target.
++
++For not-block devices SCSI commands are usually generated directly by
++applications, so, if you experience large transfers stalls, you should
++check documentation for your application how to limit the transfer
++sizes.
++
++Another way to solve this issue is to build SG entries with more than 1
++page each. See the following patch as an example:
++http://scst.sourceforge.net/sgv_big_order_alloc.diff
++
++Performance
++-----------
++
++SCST from the very beginning has been designed and implemented to
++provide the best possible performance. Since there is no "one fit all"
++the best performance configuration for different setups and loads, SCST
++provides extensive set of settings to allow to tune it for the best
++performance in each particular case. You don't have to necessary use
++those settings. If you don't, SCST will do very good job to autotune for
++you, so the resulting performance will, in average, be better
++(sometimes, much better) than with other SCSI targets. But in some cases
++you can by manual tuning improve it even more.
++
++Before doing any performance measurements note that performance results
++are very much dependent from your type of load, so it is crucial that
++you choose access mode (FILEIO, BLOCKIO, O_DIRECT, pass-through), which
++suits your needs the best.
++
++In order to get the maximum performance you should:
++
++1. For SCST:
++
++ - Disable in Makefile CONFIG_SCST_STRICT_SERIALIZING, CONFIG_SCST_EXTRACHECKS,
++ CONFIG_SCST_TRACING, CONFIG_SCST_DEBUG*, CONFIG_SCST_STRICT_SECURITY,
++ CONFIG_SCST_MEASURE_LATENCY
++
++2. For target drivers:
++
++ - Disable in Makefiles CONFIG_SCST_EXTRACHECKS, CONFIG_SCST_TRACING,
++ CONFIG_SCST_DEBUG*
++
++3. For device handlers, including VDISK:
++
++ - Disable in Makefile CONFIG_SCST_TRACING and CONFIG_SCST_DEBUG.
++
++4. Make sure you have io_grouping_type option set correctly, especially
++in the following cases:
++
++ - Several initiators share your target's backstorage. It can be a
++ shared LU using some cluster FS, like VMFS, as well as can be
++ different LUs located on the same backstorage (RAID array). For
++ instance, if you have 3 initiators and each of them using its own
++ dedicated FILEIO device file from the same RAID-6 array on the
++ target.
++
++ In this case for the best performance you should have
++ io_grouping_type option set in value "never" in all the LUNs' targets
++ and security groups.
++
++ - Your initiator connected to your target in MPIO mode. In this case for
++ the best performance you should:
++
++ * Either connect all the sessions from the initiator to a single
++ target or security group and have io_grouping_type option set in
++ value "this_group_only" in the target or security group,
++
++ * Or, if it isn't possible to connect all the sessions from the
++ initiator to a single target or security group, assign the same
++ numeric io_grouping_type value for each target/security group this
++ initiator connected to. The exact value itself doesn't matter,
++ important only that all the targets/security groups use the same
++ value.
++
++Don't forget, io_grouping_type makes sense only if you use CFQ I/O
++scheduler on the target and for devices with threads_num >= 0 and, if
++threads_num > 0, with threads_pool_type "per_initiator".
++
++You can check if in your setup io_grouping_type set correctly as well as
++if the "auto" io_grouping_type value works for you by tests like the
++following:
++
++ - For not MPIO case you can run single thread sequential reading, e.g.
++ using buffered dd, from one initiator, then run the same single
++ thread sequential reading from the second initiator in parallel. If
++ io_grouping_type is set correctly the aggregate throughput measured
++ on the target should only slightly decrease as well as all initiators
++ should have nearly equal share of it. If io_grouping_type is not set
++ correctly, the aggregate throughput and/or throughput on any
++ initiator will decrease significantly, in 2 times or even more. For
++ instance, you have 80MB/s single thread sequential reading from the
++ target on any initiator. When then both initiators are reading in
++ parallel you should see on the target aggregate throughput something
++ like 70-75MB/s with correct io_grouping_type and something like
++ 35-40MB/s or 8-10MB/s on any initiator with incorrect.
++
++ - For the MPIO case it's quite easier. With incorrect io_grouping_type
++ you simply won't see performance increase from adding the second
++ session (assuming your hardware is capable to transfer data through
++ both sessions in parallel), or can even see a performance decrease.
++
++5. If you are going to use your target in an VM environment, for
++instance as a shared storage with VMware, make sure all your VMs
++connected to the target via *separate* sessions. For instance, for iSCSI
++it means that each VM has own connection to the target, not all VMs
++connected using a single connection. You can check it using SCST sysfs
++interface. For other transports you should use available facilities,
++like NPIV for Fibre Channel, to make separate sessions for each VM. If
++you miss it, you can greatly loose performance of parallel access to
++your target from different VMs. This isn't related to the case if your
++VMs are using the same shared storage, like with VMFS, for instance. In
++this case all your VM hosts will be connected to the target via separate
++sessions, which is enough.
++
++6. For other target and initiator software parts:
++
++ - Make sure you applied on your kernel all available SCST patches.
++ If for your kernel version this patch doesn't exist, it is strongly
++ recommended to upgrade your kernel to version, for which this patch
++ exists.
++
++ - Don't enable debug/hacking features in the kernel, i.e. use them as
++ they are by default.
++
++ - The default kernel read-ahead and queuing settings are optimized
++ for locally attached disks, therefore they are not optimal if they
++ attached remotely (SCSI target case), which sometimes could lead to
++ unexpectedly low throughput. You should increase read-ahead size to at
++ least 512KB or even more on all initiators and the target.
++
++ You should also limit on all initiators maximum amount of sectors per
++ SCSI command. This tuning is also recommended on targets with large
++ read-ahead values. To do it on Linux, run:
++
++ echo “64” > /sys/block/sdX/queue/max_sectors_kb
++
++ where specify instead of X your imported from target device letter,
++ like 'b', i.e. sdb.
++
++ To increase read-ahead size on Linux, run:
++
++ blockdev --setra N /dev/sdX
++
++ where N is a read-ahead number in 512-byte sectors and X is a device
++ letter like above.
++
++ Note: you need to set read-ahead setting for device sdX again after
++ you changed the maximum amount of sectors per SCSI command for that
++ device.
++
++ Note2: you need to restart SCST after you changed read-ahead settings
++ on the target.
++
++ - You may need to increase amount of requests that OS on initiator
++ sends to the target device. To do it on Linux initiators, run
++
++ echo “64” > /sys/block/sdX/queue/nr_requests
++
++ where X is a device letter like above.
++
++ You may also experiment with other parameters in /sys/block/sdX
++ directory, they also affect performance. If you find the best values,
++ please share them with us.
++
++ - On the target use CFQ IO scheduler. In most cases it has performance
++ advantage over other IO schedulers, sometimes huge (2+ times
++ aggregate throughput increase).
++
++ - It is recommended to turn the kernel preemption off, i.e. set
++ the kernel preemption model to "No Forced Preemption (Server)".
++
++ - Looks like XFS is the best filesystem on the target to store device
++ files, because it allows considerably better linear write throughput,
++ than ext3.
++
++7. For hardware on target.
++
++ - Make sure that your target hardware (e.g. target FC or network card)
++ and underlaying IO hardware (e.g. IO card, like SATA, SCSI or RAID to
++ which your disks connected) don't share the same PCI bus. You can
++ check it using lspci utility. They have to work in parallel, so it
++ will be better if they don't compete for the bus. The problem is not
++ only in the bandwidth, which they have to share, but also in the
++ interaction between cards during that competition. This is very
++ important, because in some cases if target and backend storage
++ controllers share the same PCI bus, it could lead up to 5-10 times
++ less performance, than expected. Moreover, some motherboard (by
++ Supermicro, particularly) have serious stability issues if there are
++ several high speed devices on the same bus working in parallel. If
++ you have no choice, but PCI bus sharing, set in the BIOS PCI latency
++ as low as possible.
++
++8. If you use VDISK IO module in FILEIO mode, NV_CACHE option will
++provide you the best performance. But using it make sure you use a good
++UPS with ability to shutdown the target on the power failure.
++
++Baseline performance numbers you can find in those measurements:
++http://lkml.org/lkml/2009/3/30/283.
++
++IMPORTANT: If you use on initiator some versions of Windows (at least W2K)
++========= you can't get good write performance for VDISK FILEIO devices with
++ default 512 bytes block sizes. You could get about 10% of the
++ expected one. This is because of the partition alignment, which
++ is (simplifying) incompatible with how Linux page cache
++ works, so for each write the corresponding block must be read
++ first. Use 4096 bytes block sizes for VDISK devices and you
++ will have the expected write performance. Actually, any OS on
++ initiators, not only Windows, will benefit from block size
++ max(PAGE_SIZE, BLOCK_SIZE_ON_UNDERLYING_FS), where PAGE_SIZE
++ is the page size, BLOCK_SIZE_ON_UNDERLYING_FS is block size
++ on the underlying FS, on which the device file located, or 0,
++ if a device node is used. Both values are from the target.
++ See also important notes about setting block sizes >512 bytes
++ for VDISK FILEIO devices above.
++
++9. In some cases, for instance working with SSD devices, which consume 100%
++of a single CPU load for data transfers in their internal threads, to
++maximize IOPS it can be needed to assign for those threads dedicated
++CPUs using Linux CPU affinity facilities. No IRQ processing should be
++done on those CPUs. Check that using /proc/interrupts. See taskset
++command and Documentation/IRQ-affinity.txt in your kernel's source tree
++for how to assign IRQ affinity to tasks and IRQs.
++
++The reason for that is that processing of coming commands in SIRQ
++context might be done on the same CPUs as SSD devices' threads doing data
++transfers. As the result, those threads won't receive all the processing
++power of those CPUs and perform worse.
++
++Work if target's backstorage or link is too slow
++------------------------------------------------
++
++Under high I/O load, when your target's backstorage gets overloaded, or
++working over a slow link between initiator and target, when the link
++can't serve all the queued commands on time, you can experience I/O
++stalls or see in the kernel log abort or reset messages.
++
++At first, consider the case of too slow target's backstorage. On some
++seek intensive workloads even fast disks or RAIDs, which able to serve
++continuous data stream on 500+ MB/s speed, can be as slow as 0.3 MB/s.
++Another possible cause for that can be MD/LVM/RAID on your target as in
++http://lkml.org/lkml/2008/2/27/96 (check the whole thread as well).
++
++Thus, in such situations simply processing of one or more commands takes
++too long time, hence initiator decides that they are stuck on the target
++and tries to recover. Particularly, it is known that the default amount
++of simultaneously queued commands (48) is sometimes too high if you do
++intensive writes from VMware on a target disk, which uses LVM in the
++snapshot mode. In this case value like 16 or even 8-10 depending of your
++backstorage speed could be more appropriate.
++
++Unfortunately, currently SCST lacks dynamic I/O flow control, when the
++queue depth on the target is dynamically decreased/increased based on
++how slow/fast the backstorage speed comparing to the target link. So,
++there are 6 possible actions, which you can do to workaround or fix this
++issue in this case:
++
++1. Ignore incoming task management (TM) commands. It's fine if there are
++not too many of them, so average performance isn't hurt and the
++corresponding device isn't getting put offline, i.e. if the backstorage
++isn't too slow.
++
++2. Decrease /sys/block/sdX/device/queue_depth on the initiator in case
++if it's Linux (see below how) or/and SCST_MAX_TGT_DEV_COMMANDS constant
++in scst_priv.h file until you stop seeing incoming TM commands.
++ISCSI-SCST driver also has its own iSCSI specific parameter for that,
++see its README file.
++
++To decrease device queue depth on Linux initiators you can run command:
++
++# echo Y >/sys/block/sdX/device/queue_depth
++
++where Y is the new number of simultaneously queued commands, X - your
++imported device letter, like 'a' for sda device. There are no special
++limitations for Y value, it can be any value from 1 to possible maximum
++(usually, 32), so start from dividing the current value on 2, i.e. set
++16, if /sys/block/sdX/device/queue_depth contains 32.
++
++3. Increase the corresponding timeout on the initiator. For Linux it is
++located in
++/sys/devices/platform/host*/session*/target*:0:0/*:0:0:1/timeout. It can
++be done automatically by an udev rule. For instance, the following
++rule will increase it to 300 seconds:
++
++SUBSYSTEM=="scsi", KERNEL=="[0-9]*:[0-9]*", ACTION=="add", ATTR{type}=="0|7|14", ATTR{timeout}="300"
++
++By default, this timeout is 30 or 60 seconds, depending on your distribution.
++
++4. Try to avoid such seek intensive workloads.
++
++5. Increase speed of the target's backstorage.
++
++6. Implement in SCST dynamic I/O flow control. This will be an ultimate
++solution. See "Dynamic I/O flow control" section on
++http://scst.sourceforge.net/contributing.html page for possible
++implementation idea.
++
++Next, consider the case of too slow link between initiator and target,
++when the initiator tries to simultaneously push N commands to the target
++over it. In this case time to serve those commands, i.e. send or receive
++data for them over the link, can be more, than timeout for any single
++command, hence one or more commands in the tail of the queue can not be
++served on time less than the timeout, so the initiator will decide that
++they are stuck on the target and will try to recover.
++
++To workaround/fix this issue in this case you can use ways 1, 2, 3, 6
++above or (7): increase speed of the link between target and initiator.
++But for some initiators implementations for WRITE commands there might
++be cases when target has no way to detect the issue, so dynamic I/O flow
++control will not be able to help. In those cases you could also need on
++the initiator(s) to either decrease the queue depth (way 2), or increase
++the corresponding timeout (way 3).
++
++Note, that logged messages about QUEUE_FULL status are quite different
++by nature. This is a normal work, just SCSI flow control in action.
++Simply don't enable "mgmt_minor" logging level, or, alternatively, if
++you are confident in the worst case performance of your back-end storage
++or initiator-target link, you can increase SCST_MAX_TGT_DEV_COMMANDS in
++scst_priv.h to 64. Usually initiators don't try to push more commands on
++the target.
++
++Credits
++-------
++
++Thanks to:
++
++ * Mark Buechler <mark.buechler@gmail.com> for a lot of useful
++ suggestions, bug reports and help in debugging.
++
++ * Ming Zhang <mingz@ele.uri.edu> for fixes and comments.
++
++ * Nathaniel Clark <nate@misrule.us> for fixes and comments.
++
++ * Calvin Morrow <calvin.morrow@comcast.net> for testing and useful
++ suggestions.
++
++ * Hu Gang <hugang@soulinfo.com> for the original version of the
++ LSI target driver.
++
++ * Erik Habbinga <erikhabbinga@inphase-tech.com> for fixes and support
++ of the LSI target driver.
++
++ * Ross S. W. Walker <rswwalker@hotmail.com> for the original block IO
++ code and Vu Pham <huongvp@yahoo.com> who updated it for the VDISK dev
++ handler.
++
++ * Michael G. Byrnes <michael.byrnes@hp.com> for fixes.
++
++ * Alessandro Premoli <a.premoli@andxor.it> for fixes
++
++ * Nathan Bullock <nbullock@yottayotta.com> for fixes.
++
++ * Terry Greeniaus <tgreeniaus@yottayotta.com> for fixes.
++
++ * Krzysztof Blaszkowski <kb@sysmikro.com.pl> for many fixes and bug reports.
++
++ * Jianxi Chen <pacers@users.sourceforge.net> for fixing problem with
++ devices >2TB in size
++
++ * Bart Van Assche <bvanassche@acm.org> for a lot of help
++
++ * Daniel Debonzi <debonzi@linux.vnet.ibm.com> for a big part of the
++ initial SCST sysfs tree implementation
++
++Vladislav Bolkhovitin <vst@vlnb.net>, http://scst.sourceforge.net
+diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Documentation/scst/SysfsRules
+--- orig/linux-2.6.36/Documentation/scst/SysfsRules
++++ linux-2.6.36/Documentation/scst/SysfsRules
+@@ -0,0 +1,933 @@
++ SCST SYSFS interface rules
++ ==========================
++
++This file describes SYSFS interface rules, which all SCST target
++drivers, dev handlers and management utilities MUST follow. This allows
++to have a simple, self-documented, target drivers and dev handlers
++independent management interface.
++
++Words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
++"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
++document are to be interpreted as described in RFC 2119.
++
++In this document "key attribute" means a configuration attribute with
++not default value, which must be configured during the target driver's
++initialization. A key attribute MUST have in the last line keyword
++"[key]". If a default value set to a key attribute, it becomes a regular
++none-key attribute. For instance, iSCSI target has attribute DataDigest.
++Default value for this attribute is "None". It value "CRC32C" is set to
++this attribute, it will become a key attribute. If value "None" is again
++set, this attribute will become back to a none-key attribute.
++
++Each user configurable attribute with a not default value MUST be marked
++as key attribute.
++
++Key attributes SHOULD NOT have sysfs names finished on digits, because
++such names SHOULD be used to store several attributes with the same name
++on the sysfs tree where duplicated names are not allowed. For instance,
++iSCSI targets can have several incoming user names, so the corresponding
++attribute should have sysfs name "IncomingUser". If there are 2 user
++names, they should have sysfs names "IncomingUser" and "IncomingUser1".
++In other words, all "IncomingUser[0-9]*" names should be considered as
++different instances of the same "IncomingUser" attribute.
++
++I. Rules for target drivers
++===========================
++
++SCST core for each target driver (struct scst_tgt_template) creates a
++root subdirectory in /sys/kernel/scst_tgt/targets with name
++scst_tgt_template.name (called "target_driver_name" further in this
++document).
++
++For each target (struct scst_tgt) SCST core creates a root subdirectory
++in /sys/kernel/scst_tgt/targets/target_driver_name with name
++scst_tgt.tgt_name (called "target_name" further in this document).
++
++There are 2 type of targets possible: hardware and virtual targets.
++Hardware targets are targets corresponding to real hardware, for
++instance, a Fibre Channel adapter's port. Virtual targets are hardware
++independent targets, which can be dynamically added or removed, for
++instance, an iSCSI target, or NPIV Fibre Channel target.
++
++A target driver supporting virtual targets MUST support "mgmt" attribute
++and "add_target"/"del_target" commands.
++
++If target driver supports both hardware and virtual targets (for
++instance, an FC adapter supporting NPIV, which has hardware targets for
++its physical ports as well as virtual NPIV targets), it MUST create each
++hardware target with hw_target mark to make SCST core create "hw_target"
++attribute (see below).
++
++Attributes for target drivers
++-----------------------------
++
++A target driver MAY support in its root subdirectory the following
++optional attributes. Target drivers MAY also support there other
++read-only or read-writable attributes.
++
++1. "enabled" - this attribute MUST allow to enable and disable target
++driver as a whole, i.e. if disabled, the target driver MUST NOT accept
++new connections. The goal of this attribute is to allow the target
++driver's initial configuration. For instance, iSCSI target may need to
++have discovery user names and passwords set before it starts serving
++discovery connections.
++
++This attribute MUST have read and write permissions for superuser and be
++read-only for other users.
++
++On read it MUST return 0, if the target driver is disabled, and 1, if it
++is enabled.
++
++On write it MUST accept '0' character as request to disable and '1' as
++request to enable, but MAY also accept other driver specific commands.
++
++During disabling the target driver MAY close already connected sessions
++in all targets, but this is OPTIONAL.
++
++MUST be 0 by default.
++
++2. "trace_level" - this attribute SHOULD allow to change log level of this
++driver.
++
++This attribute SHOULD have read and write permissions for superuser and be
++read-only for other users.
++
++On read it SHOULD return a help text about available command and log levels.
++
++On write it SHOULD accept commands to change log levels according to the
++help text.
++
++For example:
++
++out_of_mem | minor | pid | line | function | special | mgmt | mgmt_dbg | flow_control | conn
++
++Usage:
++ echo "all|none|default" >trace_level
++ echo "value DEC|0xHEX|0OCT" >trace_level
++ echo "add|del TOKEN" >trace_level
++
++where TOKEN is one of [debug, function, line, pid,
++ entryexit, buff, mem, sg, out_of_mem,
++ special, scsi, mgmt, minor,
++ mgmt_dbg, scsi_serializing,
++ retry, recv_bot, send_bot, recv_top,
++ send_top, d_read, d_write, conn, conn_dbg, iov, pdu, net_page]
++
++3. "version" - this read-only for all attribute SHOULD return version of
++the target driver and some info about its enabled compile time facilities.
++
++For example:
++
++2.0.0
++EXTRACHECKS
++DEBUG
++
++4. "mgmt" - if supported this attribute MUST allow to add and delete
++targets, if virtual targets are supported by this driver, as well as it
++MAY allow to add and delete the target driver's or its targets'
++attributes.
++
++This attribute MUST have read and write permissions for superuser and be
++read-only for other users.
++
++On read it MUST return a help string describing available commands,
++parameters and attributes.
++
++To achieve that the target driver should just set in its struct
++scst_tgt_template correctly the following fields: mgmt_cmd_help,
++add_target_parameters, tgtt_optional_attributes and
++tgt_optional_attributes.
++
++For example:
++
++Usage: echo "add_target target_name [parameters]" >mgmt
++ echo "del_target target_name" >mgmt
++ echo "add_attribute <attribute> <value>" >mgmt
++ echo "del_attribute <attribute> <value>" >mgmt
++ echo "add_target_attribute target_name <attribute> <value>" >mgmt
++ echo "del_target_attribute target_name <attribute> <value>" >mgmt
++
++where parameters are one or more param_name=value pairs separated by ';'
++
++The following target driver attributes available: IncomingUser, OutgoingUser
++The following target attributes available: IncomingUser, OutgoingUser, allowed_portal
++
++4.1. "add_target" - if supported, this command MUST add new target with
++name "target_name" and specified optional or required parameters. Each
++parameter MUST be in form "parameter=value". All parameters MUST be
++separated by ';' symbol.
++
++All target drivers supporting creation of virtual targets MUST support
++this command.
++
++All target drivers supporting "add_target" command MUST support all
++read-only targets' key attributes as parameters to "add_target" command
++with the attributes' names as parameters' names and the attributes'
++values as parameters' values.
++
++For example:
++
++echo "add_target TARGET1 parameter1=1; parameter2=2" >mgmt
++
++will add target with name "TARGET1" and parameters with names
++"parameter1" and "parameter2" with values 1 and 2 correspondingly.
++
++4.2. "del_target" - if supported, this command MUST delete target with
++name "target_name". If "add_target" command is supported "del_target"
++MUST also be supported.
++
++4.3. "add_attribute" - if supported, this command MUST add a target
++driver's attribute with the specified name and one or more values.
++
++All target drivers supporting run time creation of the target driver's
++key attributes MUST support this command.
++
++For example, for iSCSI target:
++
++echo "add_attribute IncomingUser name password" >mgmt
++
++will add for discovery sessions an incoming user (attribute
++/sys/kernel/scst_tgt/targets/iscsi/IncomingUser) with name "name" and
++password "password".
++
++4.4. "del_attribute" - if supported, this command MUST delete target
++driver's attribute with the specified name and values. The values MUST
++be specified, because in some cases attributes MAY internally be
++distinguished by values. For instance, iSCSI target might have several
++incoming users. If not needed, target driver might ignore the values.
++
++If "add_attribute" command is supported "del_attribute" MUST
++also be supported.
++
++4.5. "add_target_attribute" - if supported, this command MUST add new
++attribute for the specified target with the specified name and one or
++more values.
++
++All target drivers supporting run time creation of targets' key
++attributes MUST support this command.
++
++For example:
++
++echo "add_target_attribute iqn.2006-10.net.vlnb:tgt IncomingUser name password" >mgmt
++
++will add for target with name "iqn.2006-10.net.vlnb:tgt" an incoming
++user (attribute
++/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/IncomingUser)
++with name "name" and password "password".
++
++4.6. "del_target_attribute" - if supported, this command MUST delete
++target's attribute with the specified name and values. The values MUST
++be specified, because in some cases attributes MAY internally be
++distinguished by values. For instance, iSCSI target might have several
++incoming users. If not needed, target driver might ignore the values.
++
++If "add_target_attribute" command is supported "del_target_attribute"
++MUST also be supported.
++
++Attributes for targets
++----------------------
++
++Each target MAY support in its root subdirectory the following optional
++attributes. Target drivers MAY also support there other read-only or
++read-writable attributes.
++
++1. "enabled" - this attribute MUST allow to enable and disable the
++corresponding target, i.e. if disabled, the target MUST NOT accept new
++connections. The goal of this attribute is to allow the target's initial
++configuration. For instance, each target needs to have its LUNs setup
++before it starts serving initiators. Another example is iSCSI target,
++which may need to have initialized a number of iSCSI parameters before
++it starts accepting new iSCSI connections.
++
++This attribute MUST have read and write permissions for superuser and be
++read-only for other users.
++
++On read it MUST return 0, if the target is disabled, and 1, if it is
++enabled.
++
++On write it MUST accept '0' character as request to disable and '1' as
++request to enable. Other requests MUST be rejected.
++
++SCST core provides some facilities, which MUST be used to implement this
++attribute.
++
++During disabling the target driver MAY close already connected sessions
++to the target, but this is OPTIONAL.
++
++MUST be 0 by default.
++
++SCST core will automatically create for all targets the following
++attributes:
++
++1. "rel_tgt_id" - allows to read or write SCSI Relative Target Port
++Identifier attribute.
++
++2. "hw_target" - allows to distinguish hardware and virtual targets, if
++the target driver supports both.
++
++To provide OPTIONAL force close session functionality target drivers
++MUST implement it using "force_close" write only session's attribute,
++which on write to it MUST close the corresponding session.
++
++See SCST core's README for more info about those attributes.
++
++II. Rules for dev handlers
++==========================
++
++There are 2 types of dev handlers: parent dev handlers and children dev
++handlers. The children dev handlers depend from the parent dev handlers.
++
++SCST core for each parent dev handler (struct scst_dev_type with
++parent member with value NULL) creates a root subdirectory in
++/sys/kernel/scst_tgt/handlers with name scst_dev_type.name (called
++"dev_handler_name" further in this document).
++
++Parent dev handlers can have one or more subdirectories for children dev
++handlers with names scst_dev_type.name of them.
++
++Only one level of the dev handlers' parent/children hierarchy is
++allowed. Parent dev handlers, which support children dev handlers, MUST
++NOT handle devices and MUST be only placeholders for the children dev
++handlers.
++
++Further in this document children dev handlers or parent dev handlers,
++which don't support children, will be called "end level dev handlers".
++
++End level dev handlers can be recognized by existence of the "mgmt"
++attribute.
++
++For each device (struct scst_device) SCST core creates a root
++subdirectory in /sys/kernel/scst_tgt/devices/device_name with name
++scst_device.virt_name (called "device_name" further in this document).
++
++Attributes for dev handlers
++---------------------------
++
++Each dev handler MUST have it in its root subdirectory "mgmt" attribute,
++which MUST support "add_device" and "del_device" attributes as described
++below.
++
++Parent dev handlers and end level dev handlers without parents MAY
++support in its root subdirectory the following optional attributes. They
++MAY also support there other read-only or read-writable attributes.
++
++1. "trace_level" - this attribute SHOULD allow to change log level of this
++driver.
++
++This attribute SHOULD have read and write permissions for superuser and be
++read-only for other users.
++
++On read it SHOULD return a help text about available command and log levels.
++
++On write it SHOULD accept commands to change log levels according to the
++help text.
++
++For example:
++
++out_of_mem | minor | pid | line | function | special | mgmt | mgmt_dbg
++
++Usage:
++ echo "all|none|default" >trace_level
++ echo "value DEC|0xHEX|0OCT" >trace_level
++ echo "add|del TOKEN" >trace_level
++
++where TOKEN is one of [debug, function, line, pid,
++ entryexit, buff, mem, sg, out_of_mem,
++ special, scsi, mgmt, minor,
++ mgmt_dbg, scsi_serializing,
++ retry, recv_bot, send_bot, recv_top,
++ send_top]
++
++2. "version" - this read-only for all attribute SHOULD return version of
++the dev handler and some info about its enabled compile time facilities.
++
++For example:
++
++2.0.0
++EXTRACHECKS
++DEBUG
++
++End level dev handlers in their root subdirectories MUST support "mgmt"
++attribute and MAY support other read-only or read-writable attributes.
++This attribute MUST have read and write permissions for superuser and be
++read-only for other users.
++
++Attribute "mgmt" for virtual devices dev handlers
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++For virtual devices dev handlers "mgmt" attribute MUST allow to add and
++delete devices as well as it MAY allow to add and delete the dev
++handler's or its devices' attributes.
++
++On read it MUST return a help string describing available commands and
++parameters.
++
++To achieve that the dev handler should just set in its struct
++scst_dev_type correctly the following fields: mgmt_cmd_help,
++add_device_parameters, devt_optional_attributes and
++dev_optional_attributes.
++
++For example:
++
++Usage: echo "add_device device_name [parameters]" >mgmt
++ echo "del_device device_name" >mgmt
++ echo "add_attribute <attribute> <value>" >mgmt
++ echo "del_attribute <attribute> <value>" >mgmt
++ echo "add_device_attribute device_name <attribute> <value>" >mgmt
++ echo "del_device_attribute device_name <attribute> <value>" >mgmt
++
++where parameters are one or more param_name=value pairs separated by ';'
++
++The following parameters available: filename, blocksize, write_through, nv_cache, o_direct, read_only, removable
++The following device driver attributes available: AttributeX, AttributeY
++The following device attributes available: AttributeDX, AttributeDY
++
++1. "add_device" - this command MUST add new device with name
++"device_name" and specified optional or required parameters. Each
++parameter MUST be in form "parameter=value". All parameters MUST be
++separated by ';' symbol.
++
++All dev handlers supporting "add_device" command MUST support all
++read-only devices' key attributes as parameters to "add_device" command
++with the attributes' names as parameters' names and the attributes'
++values as parameters' values.
++
++For example:
++
++echo "add_device device1 parameter1=1; parameter2=2" >mgmt
++
++will add device with name "device1" and parameters with names
++"parameter1" and "parameter2" with values 1 and 2 correspondingly.
++
++2. "del_device" - this command MUST delete device with name
++"device_name".
++
++3. "add_attribute" - if supported, this command MUST add a device
++driver's attribute with the specified name and one or more values.
++
++All dev handlers supporting run time creation of the dev handler's
++key attributes MUST support this command.
++
++For example:
++
++echo "add_attribute AttributeX ValueX" >mgmt
++
++will add attribute
++/sys/kernel/scst_tgt/handlers/dev_handler_name/AttributeX with value ValueX.
++
++4. "del_attribute" - if supported, this command MUST delete device
++driver's attribute with the specified name and values. The values MUST
++be specified, because in some cases attributes MAY internally be
++distinguished by values. If not needed, dev handler might ignore the
++values.
++
++If "add_attribute" command is supported "del_attribute" MUST also be
++supported.
++
++5. "add_device_attribute" - if supported, this command MUST add new
++attribute for the specified device with the specified name and one or
++more values.
++
++All dev handlers supporting run time creation of devices' key attributes
++MUST support this command.
++
++For example:
++
++echo "add_device_attribute device1 AttributeDX ValueDX" >mgmt
++
++will add for device with name "device1" attribute
++/sys/kernel/scst_tgt/devices/device_name/AttributeDX) with value
++ValueDX.
++
++6. "del_device_attribute" - if supported, this command MUST delete
++device's attribute with the specified name and values. The values MUST
++be specified, because in some cases attributes MAY internally be
++distinguished by values. If not needed, dev handler might ignore the
++values.
++
++If "add_device_attribute" command is supported "del_device_attribute"
++MUST also be supported.
++
++Attribute "mgmt" for pass-through devices dev handlers
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++For pass-through devices dev handlers "mgmt" attribute MUST allow to
++assign and unassign this dev handler to existing SCSI devices via
++"add_device" and "del_device" commands correspondingly.
++
++On read it MUST return a help string describing available commands and
++parameters.
++
++For example:
++
++Usage: echo "add_device H:C:I:L" >mgmt
++ echo "del_device H:C:I:L" >mgmt
++
++1. "add_device" - this command MUST assign SCSI device with
++host:channel:id:lun numbers to this dev handler.
++
++All pass-through dev handlers MUST support this command.
++
++For example:
++
++echo "add_device 1:0:0:0" >mgmt
++
++will assign SCSI device 1:0:0:0 to this dev handler.
++
++2. "del_device" - this command MUST unassign SCSI device with
++host:channel:id:lun numbers from this dev handler.
++
++SCST core will automatically create for all dev handlers the following
++attributes:
++
++1. "type" - SCSI type of device this dev handler can handle.
++
++See SCST core's README for more info about those attributes.
++
++Attributes for devices
++----------------------
++
++Each device MAY support in its root subdirectory any read-only or
++read-writable attributes.
++
++SCST core will automatically create for all devices the following
++attributes:
++
++1. "type" - SCSI type of this device
++
++See SCST core's README for more info about those attributes.
++
++III. Rules for management utilities
++===================================
++
++Rules summary
++-------------
++
++A management utility (scstadmin) SHOULD NOT keep any knowledge specific
++to any device, dev handler, target or target driver. It SHOULD only know
++the common SCST SYSFS rules, which all dev handlers and target drivers
++MUST follow. Namely:
++
++Common rules:
++~~~~~~~~~~~~~
++
++1. All key attributes MUST be marked by mark "[key]" in the last line of
++the attribute.
++
++2. All not key attributes don't matter and SHOULD be ignored.
++
++For target drivers and targets:
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++1. If target driver supports adding new targets, it MUST have "mgmt"
++attribute, which MUST support "add_target" and "del_target" commands as
++specified above.
++
++2. If target driver supports run time adding new key attributes, it MUST
++have "mgmt" attribute, which MUST support "add_attribute" and
++"del_attribute" commands as specified above.
++
++3. If target driver supports both hardware and virtual targets, all its
++hardware targets MUST have "hw_target" attribute with value 1.
++
++4. If target has read-only key attributes, the add_target command MUST
++support them as parameters.
++
++5. If target supports run time adding new key attributes, the target
++driver MUST have "mgmt" attribute, which MUST support
++"add_target_attribute" and "del_target_attribute" commands as specified
++above.
++
++6. Both target drivers and targets MAY support "enable" attribute. If
++supported, after configuring the corresponding target driver or target
++"1" MUST be written to this attribute in the following order: at first,
++for all targets of the target driver, then for the target driver.
++
++For devices and dev handlers:
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++1. Each dev handler in its root subdirectory MUST have "mgmt" attribute.
++
++2. Each dev handler MUST support "add_device" and "del_device" commands
++to the "mgmt" attribute as specified above.
++
++3. If dev handler driver supports run time adding new key attributes, it
++MUST support "add_attribute" and "del_attribute" commands to the "mgmt"
++attribute as specified above.
++
++4. All device handlers have links in the root subdirectory pointing to
++their devices.
++
++5. If device has read-only key attributes, the "add_device" command MUST
++support them as parameters.
++
++6. If device supports run time adding new key attributes, its dev
++handler MUST support "add_device_attribute" and "del_device_attribute"
++commands to the "mgmt" attribute as specified above.
++
++7. Each device has "handler" link to its dev handler's root
++subdirectory.
++
++How to distinguish and process different types of attributes
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++Since management utilities only interested in key attributes, they
++should simply ignore all non-key attributes, like
++devices/device_name/type or targets/target_driver/target_name/version
++doesn't matter if they are read-only or writable. So, the word "key"
++will be omitted later in this section.
++
++At first, any attribute can be a key attribute, doesn't matter how it's
++created.
++
++All the existing on the configuration save time attributes should be
++treated the same. Management utilities shouldn't try to separate anyhow
++them in config files.
++
++1. Always existing attributes
++-----------------------------
++
++There are 2 type of them:
++
++1.1. Writable, like devices/device_name/t10_dev_id or
++targets/qla2x00tgt/target_name/explicit_confirmation. They are the
++simplest and all the values can just be read and written from/to them.
++
++On the configuration save time they can be distinguished as existing.
++
++On the write configuration time they can be distinguished as existing
++and writable.
++
++1.2. Read-only, like devices/fileio_device_name/filename or
++devices/fileio_device_name/block_size. They are also easy to distinguish
++looking at the permissions.
++
++On the configuration save time they can be distinguished the same as for
++(1.1) as existing.
++
++On the write configuration time they can be distinguished as existing
++and read-only. They all should be passed to "add_target" or
++"add_device" commands for virtual targets and devices correspondingly.
++To apply changes to them, the whole corresponding object
++(fileio_device_name in this example) should be removed then recreated.
++
++2. Optional
++-----------
++
++For instance, targets/iscsi/IncomingUser or
++targets/iscsi/target_name/IncomingUser. There are 4 types of them:
++
++2.1. Global for target drivers and dev handlers
++-----------------------------------------------
++
++For instance, targets/iscsi/IncomingUser or handlers/vdisk_fileio/XX
++(none at the moment).
++
++On the configuration save time they can be distinguished the same as for
++(1.1).
++
++On the write configuration time they can be distinguished as one of 4
++choices:
++
++2.1.1. Existing and writable. In this case they should be treated as
++(1.1)
++
++2.1.2. Existing and read-only. In this case they should be treated as
++(1.2).
++
++2.1.3. Not existing. In this case they should be added using
++"add_attribute" command.
++
++2.1.4. Existing in the sysfs tree and not existing in the config file.
++In this case they should be deleted using "del_attribute" command.
++
++2.2. Global for targets
++-----------------------
++
++For instance, targets/iscsi/target_name/IncomingUser.
++
++On the configuration save time they can be distinguished the same as (1.1).
++
++On the write configuration time they can be distinguished as one of 4
++choices:
++
++2.2.1. Existing and writable. In this case they should be treated as
++(1.1).
++
++2.2.2. Existing and read-only. In this case they should be treated as
++(1.2).
++
++2.2.3. Not existing. In this case they should be added using
++"add_target_attribute" command.
++
++2.2.4. Existing in the sysfs tree and not existing in the config file.
++In this case they should be deleted using "del_target_attribute"
++command.
++
++2.3. Global for devices
++-----------------------
++
++For instance, devices/nullio/t10_dev_id.
++
++On the configuration save time they can be distinguished the same as (1.1).
++
++On the write configuration time they can be distinguished as one of 4
++choices:
++
++2.3.1. Existing and writable. In this case they should be treated as
++(1.1)
++
++2.3.2. Existing and read-only. In this case they should be treated as
++(1.2).
++
++2.3.3. Not existing. In this case they should be added using
++"add_device_attribute" command for the corresponding handler, e.g.
++devices/nullio/handler/.
++
++2.3.4. Existing in the sysfs tree and not existing in the config file.
++In this case they should be deleted using "del_device_attribute"
++command for the corresponding handler, e.g. devices/nullio/handler/.
++
++Thus, management utility should implement only 8 procedures: (1.1),
++(1.2), (2.1.3), (2.1.4), (2.2.3), (2.2.4), (2.3.3), (2.3.4).
++
++How to distinguish hardware and virtual targets
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++A target is hardware:
++
++ * if exist both "hw_target" attribute and "mgmt" management file
++
++ * or if both don't exist
++
++A target is virtual if there is "mgmt" file and "hw_target" attribute
++doesn't exist.
++
++Algorithm to convert current SCST configuration to config file
++--------------------------------------------------------------
++
++A management utility SHOULD use the following algorithm when converting
++current SCST configuration to a config file.
++
++For all attributes with digits at the end the name, the digits part
++should be omitted from the attributes' names during the store. For
++instance, "IncomingUser1" should be stored as "IncomingUser".
++
++1. Scan all attributes in /sys/kernel/scst_tgt (not recursive) and store
++all found key attributes.
++
++2. Scan all subdirectories of /sys/kernel/scst_tgt/handlers. Each
++subdirectory with "mgmt" attribute is a root subdirectory of a dev
++handler with name the name of the subdirectory. For each found dev
++handler do the following:
++
++2.1. Store the dev handler's name. Store also its path to the root
++subdirectory, if it isn't default (/sys/kernel/scst_tgt/handlers/handler_name).
++
++2.2. Store all dev handler's key attributes.
++
++2.3. Go through all links in the root subdirectory pointing to
++/sys/kernel/scst_tgt/devices and for each device:
++
++2.3.1. For virtual devices dev handlers:
++
++2.3.1.1. Store the name of the device.
++
++2.3.1.2. Store all key attributes. Mark all read only key attributes
++during storing, they will be parameters for the device's creation.
++
++2.3.2. For pass-through devices dev handlers:
++
++2.3.2.1. Store the H:C:I:L name of the device. Optionally, instead of
++the name unique T10 vendor device ID found using command:
++
++sg_inq -p 0x83 /dev/sdX
++
++can be stored. It will allow to reliably find out this device if on the
++next reboot it will have another host:channel:id:lin numbers. The sdX
++device can be found as the last letters after ':' in
++/sys/kernel/scst_tgt/devices/H:C:I:L/scsi_device/device/block:sdX.
++
++3. Go through all subdirectories in /sys/kernel/scst_tgt/targets. For
++each target driver:
++
++3.1. Store the name of the target driver.
++
++3.2. Store all its key attributes.
++
++3.3. Go through all target's subdirectories. For each target:
++
++3.3.1. Store the name of the target.
++
++3.3.2. Mark if the target is hardware or virtual target. The target is a
++hardware target if it has "hw_target" attribute or its target driver
++doesn't have "mgmt" attribute.
++
++3.3.3. Store all key attributes. Mark all read only key attributes
++during storing, they will be parameters for the target's creation.
++
++3.3.4. Scan all "luns" subdirectory and store:
++
++ - LUN.
++
++ - LU's device name.
++
++ - Key attributes.
++
++3.3.5. Scan all "ini_groups" subdirectories. For each group store the following:
++
++ - The group's name.
++
++ - The group's LUNs (the same info as for 3.3.4).
++
++ - The group's initiators.
++
++3.3.6. Store value of "enabled" attribute, if it exists.
++
++3.4. Store value of "enabled" attribute, if it exists.
++
++Algorithm to initialize SCST from config file
++---------------------------------------------
++
++A management utility SHOULD use the following algorithm when doing
++initial SCST configuration from a config file. All necessary kernel
++modules and user space programs supposed to be already loaded, hence all
++dev handlers' entries in /sys/kernel/scst_tgt/handlers as well as all
++entries for hardware targets already created.
++
++1. Set stored values for all stored global (/sys/kernel/scst_tgt)
++attributes.
++
++2. For each dev driver:
++
++2.1. Set stored values for all already existing stored attributes.
++
++2.2. Create not existing stored attributes using "add_attribute" command.
++
++2.3. For virtual devices dev handlers for each stored device:
++
++2.3.1. Create the device using "add_device" command using marked read
++only attributes as parameters.
++
++2.3.2. Set stored values for all already existing stored attributes.
++
++2.3.3. Create not existing stored attributes using
++"add_device_attribute" command.
++
++2.4. For pass-through dev handlers for each stores device:
++
++2.4.1. Assign the corresponding pass-through device to this dev handler
++using "add_device" command.
++
++3. For each target driver:
++
++3.1. Set stored values for all already existing stored attributes.
++
++3.2. Create not existing stored attributes using "add_attribute" command.
++
++3.3. For each target:
++
++3.3.1. For virtual targets:
++
++3.3.1.1. Create the target using "add_target" command using marked read
++only attributes as parameters.
++
++3.3.1.2. Set stored values for all already existing stored attributes.
++
++3.3.1.3. Create not existing stored attributes using
++"add_target_attribute" command.
++
++3.3.2. For hardware targets for each target:
++
++3.3.2.1. Set stored values for all already existing stored attributes.
++
++3.3.2.2. Create not existing stored attributes using
++"add_target_attribute" command.
++
++3.3.3. Setup LUNs
++
++3.3.4. Setup ini_groups, their LUNs and initiators' names.
++
++3.3.5. If this target supports enabling, enable it.
++
++3.4. If this target driver supports enabling, enable it.
++
++Algorithm to apply changes in config file to currently running SCST
++-------------------------------------------------------------------
++
++A management utility SHOULD use the following algorithm when applying
++changes in config file to currently running SCST.
++
++Not all changes can be applied on enabled targets or enabled target
++drivers. From other side, for some target drivers enabling/disabling is
++a very long and disruptive operation, which should be performed as rare
++as possible. Thus, the management utility SHOULD support additional
++option, which, if set, will make it to disable all affected targets
++before doing any change with them.
++
++1. Scan all attributes in /sys/kernel/scst_tgt (not recursive) and
++compare stored and actual key attributes. Apply all changes.
++
++2. Scan all subdirectories of /sys/kernel/scst_tgt/handlers. Each
++subdirectory with "mgmt" attribute is a root subdirectory of a dev
++handler with name the name of the subdirectory. For each found dev
++handler do the following:
++
++2.1. Compare stored and actual key attributes. Apply all changes. Create
++new attributes using "add_attribute" commands and delete not needed any
++more attributes using "del_attribute" command.
++
++2.2. Compare existing devices (links in the root subdirectory pointing
++to /sys/kernel/scst_tgt/devices) and stored devices in the config file.
++Delete all not needed devices and create new devices.
++
++2.3. For all existing devices:
++
++2.3.1. Compare stored and actual key attributes. Apply all changes.
++Create new attributes using "add_device_attribute" commands and delete
++not needed any more attributes using "del_device_attribute" command.
++
++2.3.2. If any read only key attribute for virtual device should be
++changed, delete the devices and recreate it.
++
++3. Go through all subdirectories in /sys/kernel/scst_tgt/targets. For
++each target driver:
++
++3.1. If this target driver should be disabled, disable it.
++
++3.2. Compare stored and actual key attributes. Apply all changes. Create
++new attributes using "add_attribute" commands and delete not needed any
++more attributes using "del_attribute" command.
++
++3.3. Go through all target's subdirectories. Compare existing and stored
++targets. Delete all not needed targets and create new targets.
++
++3.4. For all existing targets:
++
++3.4.1. If this target should be disabled, disable it.
++
++3.4.2. Compare stored and actual key attributes. Apply all changes.
++Create new attributes using "add_target_attribute" commands and delete
++not needed any more attributes using "del_target_attribute" command.
++
++3.4.3. If any read only key attribute for virtual target should be
++changed, delete the target and recreate it.
++
++3.4.4. Scan all "luns" subdirectory and apply necessary changes, using
++"replace" commands to replace one LUN by another, if needed.
++
++3.4.5. Scan all "ini_groups" subdirectories and apply necessary changes,
++using "replace" commands to replace one LUN by another and "move"
++command to move initiator from one group to another, if needed. It MUST
++be done in the following order:
++
++ - Necessary initiators deleted, if they aren't going to be moved
++
++ - LUNs updated
++
++ - Necessary initiators added or moved
++
++3.4.6. If this target should be enabled, enable it.
++
++3.5. If this target driver should be enabled, enable it.
++
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/Makefile linux-2.6.36/drivers/scst/dev_handlers/Makefile
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/Makefile
++++ linux-2.6.36/drivers/scst/dev_handlers/Makefile
+@@ -0,0 +1,14 @@
++ccflags-y += -Wno-unused-parameter
++
++obj-m := scst_cdrom.o scst_changer.o scst_disk.o scst_modisk.o scst_tape.o \
++ scst_vdisk.o scst_raid.o scst_processor.o scst_user.o
++
++obj-$(CONFIG_SCST_DISK) += scst_disk.o
++obj-$(CONFIG_SCST_TAPE) += scst_tape.o
++obj-$(CONFIG_SCST_CDROM) += scst_cdrom.o
++obj-$(CONFIG_SCST_MODISK) += scst_modisk.o
++obj-$(CONFIG_SCST_CHANGER) += scst_changer.o
++obj-$(CONFIG_SCST_RAID) += scst_raid.o
++obj-$(CONFIG_SCST_PROCESSOR) += scst_processor.o
++obj-$(CONFIG_SCST_VDISK) += scst_vdisk.o
++obj-$(CONFIG_SCST_USER) += scst_user.o
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c
++++ linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c
+@@ -0,0 +1,302 @@
++/*
++ * scst_cdrom.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * SCSI CDROM (type 5) dev handler
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/cdrom.h>
++#include <scsi/scsi_host.h>
++#include <linux/slab.h>
++
++#define LOG_PREFIX "dev_cdrom"
++
++#include <scst/scst.h>
++#include "scst_dev_handler.h"
++
++#define CDROM_NAME "dev_cdrom"
++
++#define CDROM_DEF_BLOCK_SHIFT 11
++
++struct cdrom_params {
++ int block_shift;
++};
++
++static int cdrom_attach(struct scst_device *);
++static void cdrom_detach(struct scst_device *);
++static int cdrom_parse(struct scst_cmd *);
++static int cdrom_done(struct scst_cmd *);
++
++static struct scst_dev_type cdrom_devtype = {
++ .name = CDROM_NAME,
++ .type = TYPE_ROM,
++ .threads_num = 1,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = cdrom_attach,
++ .detach = cdrom_detach,
++ .parse = cdrom_parse,
++ .dev_done = cdrom_done,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++/**************************************************************
++ * Function: cdrom_attach
++ *
++ * Argument:
++ *
++ * Returns : 1 if attached, error code otherwise
++ *
++ * Description:
++ *************************************************************/
++static int cdrom_attach(struct scst_device *dev)
++{
++ int res, rc;
++ uint8_t cmd[10];
++ const int buffer_size = 512;
++ uint8_t *buffer = NULL;
++ int retries;
++ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
++ enum dma_data_direction data_dir;
++ struct cdrom_params *params;
++
++ TRACE_ENTRY();
++
++ if (dev->scsi_dev == NULL ||
++ dev->scsi_dev->type != dev->type) {
++ PRINT_ERROR("%s", "SCSI device not define or illegal type");
++ res = -ENODEV;
++ goto out;
++ }
++
++ params = kzalloc(sizeof(*params), GFP_KERNEL);
++ if (params == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s",
++ "Unable to allocate struct cdrom_params");
++ res = -ENOMEM;
++ goto out;
++ }
++
++ buffer = kmalloc(buffer_size, GFP_KERNEL);
++ if (!buffer) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Memory allocation failure");
++ res = -ENOMEM;
++ goto out_free_params;
++ }
++
++ /* Clear any existing UA's and get cdrom capacity (cdrom block size) */
++ memset(cmd, 0, sizeof(cmd));
++ cmd[0] = READ_CAPACITY;
++ cmd[1] = (dev->scsi_dev->scsi_level <= SCSI_2) ?
++ ((dev->scsi_dev->lun << 5) & 0xe0) : 0;
++ retries = SCST_DEV_UA_RETRIES;
++ while (1) {
++ memset(buffer, 0, buffer_size);
++ memset(sense_buffer, 0, sizeof(sense_buffer));
++ data_dir = SCST_DATA_READ;
++
++ TRACE_DBG("%s", "Doing READ_CAPACITY");
++ rc = scsi_execute(dev->scsi_dev, cmd, data_dir, buffer,
++ buffer_size, sense_buffer,
++ SCST_GENERIC_CDROM_REG_TIMEOUT, 3, 0
++ , NULL
++ );
++
++ TRACE_DBG("READ_CAPACITY done: %x", rc);
++
++ if ((rc == 0) ||
++ !scst_analyze_sense(sense_buffer,
++ sizeof(sense_buffer), SCST_SENSE_KEY_VALID,
++ UNIT_ATTENTION, 0, 0))
++ break;
++
++ if (!--retries) {
++ PRINT_ERROR("UA not cleared after %d retries",
++ SCST_DEV_UA_RETRIES);
++ params->block_shift = CDROM_DEF_BLOCK_SHIFT;
++ res = -ENODEV;
++ goto out_free_buf;
++ }
++ }
++
++ if (rc == 0) {
++ int sector_size = ((buffer[4] << 24) | (buffer[5] << 16) |
++ (buffer[6] << 8) | (buffer[7] << 0));
++ if (sector_size == 0)
++ params->block_shift = CDROM_DEF_BLOCK_SHIFT;
++ else
++ params->block_shift =
++ scst_calc_block_shift(sector_size);
++ TRACE_DBG("Sector size is %i scsi_level %d(SCSI_2 %d)",
++ sector_size, dev->scsi_dev->scsi_level, SCSI_2);
++ } else {
++ params->block_shift = CDROM_DEF_BLOCK_SHIFT;
++ TRACE(TRACE_MINOR, "Read capacity failed: %x, using default "
++ "sector size %d", rc, params->block_shift);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Returned sense", sense_buffer,
++ sizeof(sense_buffer));
++ }
++
++ res = scst_obtain_device_parameters(dev);
++ if (res != 0) {
++ PRINT_ERROR("Failed to obtain control parameters for device "
++ "%s", dev->virt_name);
++ goto out_free_buf;
++ }
++
++out_free_buf:
++ kfree(buffer);
++
++out_free_params:
++ if (res == 0)
++ dev->dh_priv = params;
++ else
++ kfree(params);
++
++out:
++ TRACE_EXIT();
++ return res;
++}
++
++/************************************************************
++ * Function: cdrom_detach
++ *
++ * Argument:
++ *
++ * Returns : None
++ *
++ * Description: Called to detach this device type driver
++ ************************************************************/
++static void cdrom_detach(struct scst_device *dev)
++{
++ struct cdrom_params *params =
++ (struct cdrom_params *)dev->dh_priv;
++
++ TRACE_ENTRY();
++
++ kfree(params);
++ dev->dh_priv = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++static int cdrom_get_block_shift(struct scst_cmd *cmd)
++{
++ struct cdrom_params *params = (struct cdrom_params *)cmd->dev->dh_priv;
++ /*
++ * No need for locks here, since *_detach() can not be
++ * called, when there are existing commands.
++ */
++ return params->block_shift;
++}
++
++/********************************************************************
++ * Function: cdrom_parse
++ *
++ * Argument:
++ *
++ * Returns : The state of the command
++ *
++ * Description: This does the parsing of the command
++ *
++ * Note: Not all states are allowed on return
++ ********************************************************************/
++static int cdrom_parse(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ scst_cdrom_generic_parse(cmd, cdrom_get_block_shift);
++
++ cmd->retries = SCST_PASSTHROUGH_RETRIES;
++
++ return res;
++}
++
++static void cdrom_set_block_shift(struct scst_cmd *cmd, int block_shift)
++{
++ struct cdrom_params *params = (struct cdrom_params *)cmd->dev->dh_priv;
++ /*
++ * No need for locks here, since *_detach() can not be
++ * called, when there are existing commands.
++ */
++ if (block_shift != 0)
++ params->block_shift = block_shift;
++ else
++ params->block_shift = CDROM_DEF_BLOCK_SHIFT;
++ return;
++}
++
++/********************************************************************
++ * Function: cdrom_done
++ *
++ * Argument:
++ *
++ * Returns :
++ *
++ * Description: This is the completion routine for the command,
++ * it is used to extract any necessary information
++ * about a command.
++ ********************************************************************/
++static int cdrom_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ TRACE_ENTRY();
++
++ res = scst_block_generic_dev_done(cmd, cdrom_set_block_shift);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int __init cdrom_init(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ cdrom_devtype.module = THIS_MODULE;
++
++ res = scst_register_dev_driver(&cdrom_devtype);
++ if (res < 0)
++ goto out;
++
++out:
++ TRACE_EXIT();
++ return res;
++
++}
++
++static void __exit cdrom_exit(void)
++{
++ TRACE_ENTRY();
++ scst_unregister_dev_driver(&cdrom_devtype);
++ TRACE_EXIT();
++ return;
++}
++
++module_init(cdrom_init);
++module_exit(cdrom_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
++MODULE_DESCRIPTION("SCSI CDROM (type 5) dev handler for SCST");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c
++++ linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c
+@@ -0,0 +1,223 @@
++/*
++ * scst_changer.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * SCSI medium changer (type 8) dev handler
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <scsi/scsi_host.h>
++#include <linux/slab.h>
++
++#define LOG_PREFIX "dev_changer"
++
++#include <scst/scst.h>
++#include "scst_dev_handler.h"
++
++#define CHANGER_NAME "dev_changer"
++
++#define CHANGER_RETRIES 2
++
++static int changer_attach(struct scst_device *);
++/* static void changer_detach(struct scst_device *); */
++static int changer_parse(struct scst_cmd *);
++/* static int changer_done(struct scst_cmd *); */
++
++static struct scst_dev_type changer_devtype = {
++ .name = CHANGER_NAME,
++ .type = TYPE_MEDIUM_CHANGER,
++ .threads_num = 1,
++ .parse_atomic = 1,
++/* .dev_done_atomic = 1, */
++ .attach = changer_attach,
++/* .detach = changer_detach, */
++ .parse = changer_parse,
++/* .dev_done = changer_done */
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++/**************************************************************
++ * Function: changer_attach
++ *
++ * Argument:
++ *
++ * Returns : 1 if attached, error code otherwise
++ *
++ * Description:
++ *************************************************************/
++static int changer_attach(struct scst_device *dev)
++{
++ int res, rc;
++ int retries;
++
++ TRACE_ENTRY();
++
++ if (dev->scsi_dev == NULL ||
++ dev->scsi_dev->type != dev->type) {
++ PRINT_ERROR("%s", "SCSI device not define or illegal type");
++ res = -ENODEV;
++ goto out;
++ }
++
++ /*
++ * If the device is offline, don't try to read capacity or any
++ * of the other stuff
++ */
++ if (dev->scsi_dev->sdev_state == SDEV_OFFLINE) {
++ TRACE_DBG("%s", "Device is offline");
++ res = -ENODEV;
++ goto out;
++ }
++
++ retries = SCST_DEV_UA_RETRIES;
++ do {
++ TRACE_DBG("%s", "Doing TEST_UNIT_READY");
++ rc = scsi_test_unit_ready(dev->scsi_dev,
++ SCST_GENERIC_CHANGER_TIMEOUT, CHANGER_RETRIES
++ , NULL);
++ TRACE_DBG("TEST_UNIT_READY done: %x", rc);
++ } while ((--retries > 0) && rc);
++
++ if (rc) {
++ PRINT_WARNING("Unit not ready: %x", rc);
++ /* Let's try not to be too smart and continue processing */
++ }
++
++ res = scst_obtain_device_parameters(dev);
++ if (res != 0) {
++ PRINT_ERROR("Failed to obtain control parameters for device "
++ "%s", dev->virt_name);
++ goto out;
++ }
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/************************************************************
++ * Function: changer_detach
++ *
++ * Argument:
++ *
++ * Returns : None
++ *
++ * Description: Called to detach this device type driver
++ ************************************************************/
++#if 0
++void changer_detach(struct scst_device *dev)
++{
++ TRACE_ENTRY();
++
++ TRACE_EXIT();
++ return;
++}
++#endif
++
++/********************************************************************
++ * Function: changer_parse
++ *
++ * Argument:
++ *
++ * Returns : The state of the command
++ *
++ * Description: This does the parsing of the command
++ *
++ * Note: Not all states are allowed on return
++ ********************************************************************/
++static int changer_parse(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ scst_changer_generic_parse(cmd, NULL);
++
++ cmd->retries = SCST_PASSTHROUGH_RETRIES;
++
++ return res;
++}
++
++/********************************************************************
++ * Function: changer_done
++ *
++ * Argument:
++ *
++ * Returns :
++ *
++ * Description: This is the completion routine for the command,
++ * it is used to extract any necessary information
++ * about a command.
++ ********************************************************************/
++#if 0
++int changer_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ TRACE_ENTRY();
++
++ /*
++ * SCST sets good defaults for cmd->is_send_status and
++ * cmd->resp_data_len based on cmd->status and cmd->data_direction,
++ * therefore change them only if necessary
++ */
++
++#if 0
++ switch (cmd->cdb[0]) {
++ default:
++ /* It's all good */
++ break;
++ }
++#endif
++
++ TRACE_EXIT();
++ return res;
++}
++#endif
++
++static int __init changer_init(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ changer_devtype.module = THIS_MODULE;
++
++ res = scst_register_dev_driver(&changer_devtype);
++ if (res < 0)
++ goto out;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void __exit changer_exit(void)
++{
++ TRACE_ENTRY();
++ scst_unregister_dev_driver(&changer_devtype);
++ TRACE_EXIT();
++ return;
++}
++
++module_init(changer_init);
++module_exit(changer_exit);
++
++MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SCSI medium changer (type 8) dev handler for SCST");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_dev_handler.h linux-2.6.36/drivers/scst/dev_handlers/scst_dev_handler.h
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_dev_handler.h
++++ linux-2.6.36/drivers/scst/dev_handlers/scst_dev_handler.h
+@@ -0,0 +1,27 @@
++#ifndef __SCST_DEV_HANDLER_H
++#define __SCST_DEV_HANDLER_H
++
++#include <linux/module.h>
++#include <scsi/scsi_eh.h>
++#include <scst/scst_debug.h>
++
++#define SCST_DEV_UA_RETRIES 5
++#define SCST_PASSTHROUGH_RETRIES 0
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++#ifdef CONFIG_SCST_DEBUG
++#define SCST_DEFAULT_DEV_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_PID | \
++ TRACE_LINE | TRACE_FUNCTION | TRACE_MGMT | TRACE_MINOR | \
++ TRACE_MGMT_DEBUG | TRACE_SPECIAL)
++#else
++#define SCST_DEFAULT_DEV_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
++ TRACE_SPECIAL)
++#endif
++
++static unsigned long dh_trace_flag = SCST_DEFAULT_DEV_LOG_FLAGS;
++#define trace_flag dh_trace_flag
++
++#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
++
++#endif /* __SCST_DEV_HANDLER_H */
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c
++++ linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c
+@@ -0,0 +1,380 @@
++/*
++ * scst_disk.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * SCSI disk (type 0) dev handler
++ * &
++ * SCSI disk (type 0) "performance" device handler (skip all READ and WRITE
++ * operations).
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <scsi/scsi_host.h>
++#include <linux/slab.h>
++
++#define LOG_PREFIX "dev_disk"
++
++#include <scst/scst.h>
++#include "scst_dev_handler.h"
++
++# define DISK_NAME "dev_disk"
++# define DISK_PERF_NAME "dev_disk_perf"
++
++#define DISK_DEF_BLOCK_SHIFT 9
++
++struct disk_params {
++ int block_shift;
++};
++
++static int disk_attach(struct scst_device *dev);
++static void disk_detach(struct scst_device *dev);
++static int disk_parse(struct scst_cmd *cmd);
++static int disk_done(struct scst_cmd *cmd);
++static int disk_exec(struct scst_cmd *cmd);
++
++static struct scst_dev_type disk_devtype = {
++ .name = DISK_NAME,
++ .type = TYPE_DISK,
++ .threads_num = 1,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = disk_attach,
++ .detach = disk_detach,
++ .parse = disk_parse,
++ .dev_done = disk_done,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++static struct scst_dev_type disk_devtype_perf = {
++ .name = DISK_PERF_NAME,
++ .type = TYPE_DISK,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = disk_attach,
++ .detach = disk_detach,
++ .parse = disk_parse,
++ .dev_done = disk_done,
++ .exec = disk_exec,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++static int __init init_scst_disk_driver(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ disk_devtype.module = THIS_MODULE;
++
++ res = scst_register_dev_driver(&disk_devtype);
++ if (res < 0)
++ goto out;
++
++ disk_devtype_perf.module = THIS_MODULE;
++
++ res = scst_register_dev_driver(&disk_devtype_perf);
++ if (res < 0)
++ goto out_unreg;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unreg:
++ scst_unregister_dev_driver(&disk_devtype);
++ goto out;
++}
++
++static void __exit exit_scst_disk_driver(void)
++{
++ TRACE_ENTRY();
++
++ scst_unregister_dev_driver(&disk_devtype_perf);
++ scst_unregister_dev_driver(&disk_devtype);
++
++ TRACE_EXIT();
++ return;
++}
++
++module_init(init_scst_disk_driver);
++module_exit(exit_scst_disk_driver);
++
++/**************************************************************
++ * Function: disk_attach
++ *
++ * Argument:
++ *
++ * Returns : 1 if attached, error code otherwise
++ *
++ * Description:
++ *************************************************************/
++static int disk_attach(struct scst_device *dev)
++{
++ int res, rc;
++ uint8_t cmd[10];
++ const int buffer_size = 512;
++ uint8_t *buffer = NULL;
++ int retries;
++ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
++ enum dma_data_direction data_dir;
++ struct disk_params *params;
++
++ TRACE_ENTRY();
++
++ if (dev->scsi_dev == NULL ||
++ dev->scsi_dev->type != dev->type) {
++ PRINT_ERROR("%s", "SCSI device not define or illegal type");
++ res = -ENODEV;
++ goto out;
++ }
++
++ params = kzalloc(sizeof(*params), GFP_KERNEL);
++ if (params == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s",
++ "Unable to allocate struct disk_params");
++ res = -ENOMEM;
++ goto out;
++ }
++
++ buffer = kmalloc(buffer_size, GFP_KERNEL);
++ if (!buffer) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Memory allocation failure");
++ res = -ENOMEM;
++ goto out_free_params;
++ }
++
++ /* Clear any existing UA's and get disk capacity (disk block size) */
++ memset(cmd, 0, sizeof(cmd));
++ cmd[0] = READ_CAPACITY;
++ cmd[1] = (dev->scsi_dev->scsi_level <= SCSI_2) ?
++ ((dev->scsi_dev->lun << 5) & 0xe0) : 0;
++ retries = SCST_DEV_UA_RETRIES;
++ while (1) {
++ memset(buffer, 0, buffer_size);
++ memset(sense_buffer, 0, sizeof(sense_buffer));
++ data_dir = SCST_DATA_READ;
++
++ TRACE_DBG("%s", "Doing READ_CAPACITY");
++ rc = scsi_execute(dev->scsi_dev, cmd, data_dir, buffer,
++ buffer_size, sense_buffer,
++ SCST_GENERIC_DISK_REG_TIMEOUT, 3, 0
++ , NULL
++ );
++
++ TRACE_DBG("READ_CAPACITY done: %x", rc);
++
++ if ((rc == 0) ||
++ !scst_analyze_sense(sense_buffer,
++ sizeof(sense_buffer), SCST_SENSE_KEY_VALID,
++ UNIT_ATTENTION, 0, 0))
++ break;
++ if (!--retries) {
++ PRINT_ERROR("UA not clear after %d retries",
++ SCST_DEV_UA_RETRIES);
++ res = -ENODEV;
++ goto out_free_buf;
++ }
++ }
++ if (rc == 0) {
++ int sector_size = ((buffer[4] << 24) | (buffer[5] << 16) |
++ (buffer[6] << 8) | (buffer[7] << 0));
++ if (sector_size == 0)
++ params->block_shift = DISK_DEF_BLOCK_SHIFT;
++ else
++ params->block_shift =
++ scst_calc_block_shift(sector_size);
++ } else {
++ params->block_shift = DISK_DEF_BLOCK_SHIFT;
++ TRACE(TRACE_MINOR, "Read capacity failed: %x, using default "
++ "sector size %d", rc, params->block_shift);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Returned sense", sense_buffer,
++ sizeof(sense_buffer));
++ }
++
++ res = scst_obtain_device_parameters(dev);
++ if (res != 0) {
++ PRINT_ERROR("Failed to obtain control parameters for device "
++ "%s", dev->virt_name);
++ goto out_free_buf;
++ }
++
++out_free_buf:
++ kfree(buffer);
++
++out_free_params:
++ if (res == 0)
++ dev->dh_priv = params;
++ else
++ kfree(params);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/************************************************************
++ * Function: disk_detach
++ *
++ * Argument:
++ *
++ * Returns : None
++ *
++ * Description: Called to detach this device type driver
++ ************************************************************/
++static void disk_detach(struct scst_device *dev)
++{
++ struct disk_params *params =
++ (struct disk_params *)dev->dh_priv;
++
++ TRACE_ENTRY();
++
++ kfree(params);
++ dev->dh_priv = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++static int disk_get_block_shift(struct scst_cmd *cmd)
++{
++ struct disk_params *params = (struct disk_params *)cmd->dev->dh_priv;
++ /*
++ * No need for locks here, since *_detach() can not be
++ * called, when there are existing commands.
++ */
++ return params->block_shift;
++}
++
++/********************************************************************
++ * Function: disk_parse
++ *
++ * Argument:
++ *
++ * Returns : The state of the command
++ *
++ * Description: This does the parsing of the command
++ *
++ * Note: Not all states are allowed on return
++ ********************************************************************/
++static int disk_parse(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ scst_sbc_generic_parse(cmd, disk_get_block_shift);
++
++ cmd->retries = SCST_PASSTHROUGH_RETRIES;
++
++ return res;
++}
++
++static void disk_set_block_shift(struct scst_cmd *cmd, int block_shift)
++{
++ struct disk_params *params = (struct disk_params *)cmd->dev->dh_priv;
++ /*
++ * No need for locks here, since *_detach() can not be
++ * called, when there are existing commands.
++ */
++ if (block_shift != 0)
++ params->block_shift = block_shift;
++ else
++ params->block_shift = DISK_DEF_BLOCK_SHIFT;
++ return;
++}
++
++/********************************************************************
++ * Function: disk_done
++ *
++ * Argument:
++ *
++ * Returns :
++ *
++ * Description: This is the completion routine for the command,
++ * it is used to extract any necessary information
++ * about a command.
++ ********************************************************************/
++static int disk_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ TRACE_ENTRY();
++
++ res = scst_block_generic_dev_done(cmd, disk_set_block_shift);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/********************************************************************
++ * Function: disk_exec
++ *
++ * Argument:
++ *
++ * Returns :
++ *
++ * Description: Make SCST do nothing for data READs and WRITES.
++ * Intended for raw line performance testing
++ ********************************************************************/
++static int disk_exec(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_NOT_COMPLETED, rc;
++ int opcode = cmd->cdb[0];
++
++ TRACE_ENTRY();
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++
++ switch (opcode) {
++ case WRITE_6:
++ case WRITE_10:
++ case WRITE_12:
++ case WRITE_16:
++ case READ_6:
++ case READ_10:
++ case READ_12:
++ case READ_16:
++ cmd->completed = 1;
++ goto out_done;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_done:
++ res = SCST_EXEC_COMPLETED;
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ goto out;
++}
++
++MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SCSI disk (type 0) dev handler for SCST");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c
++++ linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c
+@@ -0,0 +1,399 @@
++/*
++ * scst_modisk.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * SCSI MO disk (type 7) dev handler
++ * &
++ * SCSI MO disk (type 7) "performance" device handler (skip all READ and WRITE
++ * operations).
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <scsi/scsi_host.h>
++#include <linux/slab.h>
++
++#define LOG_PREFIX "dev_modisk"
++
++#include <scst/scst.h>
++#include "scst_dev_handler.h"
++
++# define MODISK_NAME "dev_modisk"
++# define MODISK_PERF_NAME "dev_modisk_perf"
++
++#define MODISK_DEF_BLOCK_SHIFT 10
++
++struct modisk_params {
++ int block_shift;
++};
++
++static int modisk_attach(struct scst_device *);
++static void modisk_detach(struct scst_device *);
++static int modisk_parse(struct scst_cmd *);
++static int modisk_done(struct scst_cmd *);
++static int modisk_exec(struct scst_cmd *);
++
++static struct scst_dev_type modisk_devtype = {
++ .name = MODISK_NAME,
++ .type = TYPE_MOD,
++ .threads_num = 1,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = modisk_attach,
++ .detach = modisk_detach,
++ .parse = modisk_parse,
++ .dev_done = modisk_done,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++static struct scst_dev_type modisk_devtype_perf = {
++ .name = MODISK_PERF_NAME,
++ .type = TYPE_MOD,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = modisk_attach,
++ .detach = modisk_detach,
++ .parse = modisk_parse,
++ .dev_done = modisk_done,
++ .exec = modisk_exec,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++static int __init init_scst_modisk_driver(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ modisk_devtype.module = THIS_MODULE;
++
++ res = scst_register_dev_driver(&modisk_devtype);
++ if (res < 0)
++ goto out;
++
++ modisk_devtype_perf.module = THIS_MODULE;
++
++ res = scst_register_dev_driver(&modisk_devtype_perf);
++ if (res < 0)
++ goto out_unreg;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unreg:
++ scst_unregister_dev_driver(&modisk_devtype);
++ goto out;
++}
++
++static void __exit exit_scst_modisk_driver(void)
++{
++ TRACE_ENTRY();
++
++ scst_unregister_dev_driver(&modisk_devtype_perf);
++ scst_unregister_dev_driver(&modisk_devtype);
++
++ TRACE_EXIT();
++ return;
++}
++
++module_init(init_scst_modisk_driver);
++module_exit(exit_scst_modisk_driver);
++
++/**************************************************************
++ * Function: modisk_attach
++ *
++ * Argument:
++ *
++ * Returns : 1 if attached, error code otherwise
++ *
++ * Description:
++ *************************************************************/
++static int modisk_attach(struct scst_device *dev)
++{
++ int res, rc;
++ uint8_t cmd[10];
++ const int buffer_size = 512;
++ uint8_t *buffer = NULL;
++ int retries;
++ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
++ enum dma_data_direction data_dir;
++ struct modisk_params *params;
++
++ TRACE_ENTRY();
++
++ if (dev->scsi_dev == NULL ||
++ dev->scsi_dev->type != dev->type) {
++ PRINT_ERROR("%s", "SCSI device not define or illegal type");
++ res = -ENODEV;
++ goto out;
++ }
++
++ params = kzalloc(sizeof(*params), GFP_KERNEL);
++ if (params == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s",
++ "Unable to allocate struct modisk_params");
++ res = -ENOMEM;
++ goto out;
++ }
++ params->block_shift = MODISK_DEF_BLOCK_SHIFT;
++
++ /*
++ * If the device is offline, don't try to read capacity or any
++ * of the other stuff
++ */
++ if (dev->scsi_dev->sdev_state == SDEV_OFFLINE) {
++ TRACE_DBG("%s", "Device is offline");
++ res = -ENODEV;
++ goto out_free_params;
++ }
++
++ buffer = kmalloc(buffer_size, GFP_KERNEL);
++ if (!buffer) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Memory allocation failure");
++ res = -ENOMEM;
++ goto out_free_params;
++ }
++
++ /*
++ * Clear any existing UA's and get modisk capacity (modisk block
++ * size).
++ */
++ memset(cmd, 0, sizeof(cmd));
++ cmd[0] = READ_CAPACITY;
++ cmd[1] = (dev->scsi_dev->scsi_level <= SCSI_2) ?
++ ((dev->scsi_dev->lun << 5) & 0xe0) : 0;
++ retries = SCST_DEV_UA_RETRIES;
++ while (1) {
++ memset(buffer, 0, buffer_size);
++ memset(sense_buffer, 0, sizeof(sense_buffer));
++ data_dir = SCST_DATA_READ;
++
++ TRACE_DBG("%s", "Doing READ_CAPACITY");
++ rc = scsi_execute(dev->scsi_dev, cmd, data_dir, buffer,
++ buffer_size, sense_buffer,
++ SCST_GENERIC_MODISK_REG_TIMEOUT, 3, 0
++ , NULL
++ );
++
++ TRACE_DBG("READ_CAPACITY done: %x", rc);
++
++ if (!rc || !scst_analyze_sense(sense_buffer,
++ sizeof(sense_buffer), SCST_SENSE_KEY_VALID,
++ UNIT_ATTENTION, 0, 0))
++ break;
++
++ if (!--retries) {
++ PRINT_ERROR("UA not cleared after %d retries",
++ SCST_DEV_UA_RETRIES);
++ res = -ENODEV;
++ goto out_free_buf;
++ }
++ }
++
++ if (rc == 0) {
++ int sector_size = ((buffer[4] << 24) | (buffer[5] << 16) |
++ (buffer[6] << 8) | (buffer[7] << 0));
++ if (sector_size == 0)
++ params->block_shift = MODISK_DEF_BLOCK_SHIFT;
++ else
++ params->block_shift =
++ scst_calc_block_shift(sector_size);
++ TRACE_DBG("Sector size is %i scsi_level %d(SCSI_2 %d)",
++ sector_size, dev->scsi_dev->scsi_level, SCSI_2);
++ } else {
++ params->block_shift = MODISK_DEF_BLOCK_SHIFT;
++ TRACE(TRACE_MINOR, "Read capacity failed: %x, using default "
++ "sector size %d", rc, params->block_shift);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Returned sense", sense_buffer,
++ sizeof(sense_buffer));
++ }
++
++ res = scst_obtain_device_parameters(dev);
++ if (res != 0) {
++ PRINT_ERROR("Failed to obtain control parameters for device "
++ "%s: %x", dev->virt_name, res);
++ goto out_free_buf;
++ }
++
++out_free_buf:
++ kfree(buffer);
++
++out_free_params:
++ if (res == 0)
++ dev->dh_priv = params;
++ else
++ kfree(params);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/************************************************************
++ * Function: modisk_detach
++ *
++ * Argument:
++ *
++ * Returns : None
++ *
++ * Description: Called to detach this device type driver
++ ************************************************************/
++static void modisk_detach(struct scst_device *dev)
++{
++ struct modisk_params *params =
++ (struct modisk_params *)dev->dh_priv;
++
++ TRACE_ENTRY();
++
++ kfree(params);
++ dev->dh_priv = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++static int modisk_get_block_shift(struct scst_cmd *cmd)
++{
++ struct modisk_params *params =
++ (struct modisk_params *)cmd->dev->dh_priv;
++ /*
++ * No need for locks here, since *_detach() can not be
++ * called, when there are existing commands.
++ */
++ return params->block_shift;
++}
++
++/********************************************************************
++ * Function: modisk_parse
++ *
++ * Argument:
++ *
++ * Returns : The state of the command
++ *
++ * Description: This does the parsing of the command
++ *
++ * Note: Not all states are allowed on return
++ ********************************************************************/
++static int modisk_parse(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ scst_modisk_generic_parse(cmd, modisk_get_block_shift);
++
++ cmd->retries = SCST_PASSTHROUGH_RETRIES;
++
++ return res;
++}
++
++static void modisk_set_block_shift(struct scst_cmd *cmd, int block_shift)
++{
++ struct modisk_params *params =
++ (struct modisk_params *)cmd->dev->dh_priv;
++ /*
++ * No need for locks here, since *_detach() can not be
++ * called, when there are existing commands.
++ */
++ if (block_shift != 0)
++ params->block_shift = block_shift;
++ else
++ params->block_shift = MODISK_DEF_BLOCK_SHIFT;
++ return;
++}
++
++/********************************************************************
++ * Function: modisk_done
++ *
++ * Argument:
++ *
++ * Returns :
++ *
++ * Description: This is the completion routine for the command,
++ * it is used to extract any necessary information
++ * about a command.
++ ********************************************************************/
++static int modisk_done(struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ res = scst_block_generic_dev_done(cmd, modisk_set_block_shift);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/********************************************************************
++ * Function: modisk_exec
++ *
++ * Argument:
++ *
++ * Returns :
++ *
++ * Description: Make SCST do nothing for data READs and WRITES.
++ * Intended for raw line performance testing
++ ********************************************************************/
++static int modisk_exec(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_NOT_COMPLETED, rc;
++ int opcode = cmd->cdb[0];
++
++ TRACE_ENTRY();
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++
++ switch (opcode) {
++ case WRITE_6:
++ case WRITE_10:
++ case WRITE_12:
++ case WRITE_16:
++ case READ_6:
++ case READ_10:
++ case READ_12:
++ case READ_16:
++ cmd->completed = 1;
++ goto out_done;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_done:
++ res = SCST_EXEC_COMPLETED;
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ goto out;
++}
++
++MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SCSI MO disk (type 7) dev handler for SCST");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c
++++ linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c
+@@ -0,0 +1,223 @@
++/*
++ * scst_processor.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * SCSI medium processor (type 3) dev handler
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <scsi/scsi_host.h>
++#include <linux/slab.h>
++
++#define LOG_PREFIX "dev_processor"
++
++#include <scst/scst.h>
++#include "scst_dev_handler.h"
++
++#define PROCESSOR_NAME "dev_processor"
++
++#define PROCESSOR_RETRIES 2
++
++static int processor_attach(struct scst_device *);
++/*static void processor_detach(struct scst_device *);*/
++static int processor_parse(struct scst_cmd *);
++/*static int processor_done(struct scst_cmd *);*/
++
++static struct scst_dev_type processor_devtype = {
++ .name = PROCESSOR_NAME,
++ .type = TYPE_PROCESSOR,
++ .threads_num = 1,
++ .parse_atomic = 1,
++/* .dev_done_atomic = 1,*/
++ .attach = processor_attach,
++/* .detach = processor_detach,*/
++ .parse = processor_parse,
++/* .dev_done = processor_done*/
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++/**************************************************************
++ * Function: processor_attach
++ *
++ * Argument:
++ *
++ * Returns : 1 if attached, error code otherwise
++ *
++ * Description:
++ *************************************************************/
++static int processor_attach(struct scst_device *dev)
++{
++ int res, rc;
++ int retries;
++
++ TRACE_ENTRY();
++
++ if (dev->scsi_dev == NULL ||
++ dev->scsi_dev->type != dev->type) {
++ PRINT_ERROR("%s", "SCSI device not define or illegal type");
++ res = -ENODEV;
++ goto out;
++ }
++
++ /*
++ * If the device is offline, don't try to read capacity or any
++ * of the other stuff
++ */
++ if (dev->scsi_dev->sdev_state == SDEV_OFFLINE) {
++ TRACE_DBG("%s", "Device is offline");
++ res = -ENODEV;
++ goto out;
++ }
++
++ retries = SCST_DEV_UA_RETRIES;
++ do {
++ TRACE_DBG("%s", "Doing TEST_UNIT_READY");
++ rc = scsi_test_unit_ready(dev->scsi_dev,
++ SCST_GENERIC_PROCESSOR_TIMEOUT, PROCESSOR_RETRIES
++ , NULL);
++ TRACE_DBG("TEST_UNIT_READY done: %x", rc);
++ } while ((--retries > 0) && rc);
++
++ if (rc) {
++ PRINT_WARNING("Unit not ready: %x", rc);
++ /* Let's try not to be too smart and continue processing */
++ }
++
++ res = scst_obtain_device_parameters(dev);
++ if (res != 0) {
++ PRINT_ERROR("Failed to obtain control parameters for device "
++ "%s", dev->virt_name);
++ goto out;
++ }
++
++out:
++ TRACE_EXIT();
++ return res;
++}
++
++/************************************************************
++ * Function: processor_detach
++ *
++ * Argument:
++ *
++ * Returns : None
++ *
++ * Description: Called to detach this device type driver
++ ************************************************************/
++#if 0
++void processor_detach(struct scst_device *dev)
++{
++ TRACE_ENTRY();
++
++ TRACE_EXIT();
++ return;
++}
++#endif
++
++/********************************************************************
++ * Function: processor_parse
++ *
++ * Argument:
++ *
++ * Returns : The state of the command
++ *
++ * Description: This does the parsing of the command
++ *
++ * Note: Not all states are allowed on return
++ ********************************************************************/
++static int processor_parse(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ scst_processor_generic_parse(cmd, NULL);
++
++ cmd->retries = SCST_PASSTHROUGH_RETRIES;
++
++ return res;
++}
++
++/********************************************************************
++ * Function: processor_done
++ *
++ * Argument:
++ *
++ * Returns :
++ *
++ * Description: This is the completion routine for the command,
++ * it is used to extract any necessary information
++ * about a command.
++ ********************************************************************/
++#if 0
++int processor_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ TRACE_ENTRY();
++
++ /*
++ * SCST sets good defaults for cmd->is_send_status and
++ * cmd->resp_data_len based on cmd->status and cmd->data_direction,
++ * therefore change them only if necessary.
++ */
++
++#if 0
++ switch (cmd->cdb[0]) {
++ default:
++ /* It's all good */
++ break;
++ }
++#endif
++
++ TRACE_EXIT();
++ return res;
++}
++#endif
++
++static int __init processor_init(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ processor_devtype.module = THIS_MODULE;
++
++ res = scst_register_dev_driver(&processor_devtype);
++ if (res < 0)
++ goto out;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void __exit processor_exit(void)
++{
++ TRACE_ENTRY();
++ scst_unregister_dev_driver(&processor_devtype);
++ TRACE_EXIT();
++ return;
++}
++
++module_init(processor_init);
++module_exit(processor_exit);
++
++MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SCSI medium processor (type 3) dev handler for SCST");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c
++++ linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c
+@@ -0,0 +1,224 @@
++/*
++ * scst_raid.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * SCSI raid(controller) (type 0xC) dev handler
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#define LOG_PREFIX "dev_raid"
++
++#include <scsi/scsi_host.h>
++#include <linux/slab.h>
++
++#include <scst/scst.h>
++#include "scst_dev_handler.h"
++
++#define RAID_NAME "dev_raid"
++
++#define RAID_RETRIES 2
++
++static int raid_attach(struct scst_device *);
++/* static void raid_detach(struct scst_device *); */
++static int raid_parse(struct scst_cmd *);
++/* static int raid_done(struct scst_cmd *); */
++
++static struct scst_dev_type raid_devtype = {
++ .name = RAID_NAME,
++ .type = TYPE_RAID,
++ .threads_num = 1,
++ .parse_atomic = 1,
++/* .dev_done_atomic = 1,*/
++ .attach = raid_attach,
++/* .detach = raid_detach,*/
++ .parse = raid_parse,
++/* .dev_done = raid_done,*/
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++/**************************************************************
++ * Function: raid_attach
++ *
++ * Argument:
++ *
++ * Returns : 1 if attached, error code otherwise
++ *
++ * Description:
++ *************************************************************/
++static int raid_attach(struct scst_device *dev)
++{
++ int res, rc;
++ int retries;
++
++ TRACE_ENTRY();
++
++ if (dev->scsi_dev == NULL ||
++ dev->scsi_dev->type != dev->type) {
++ PRINT_ERROR("%s", "SCSI device not define or illegal type");
++ res = -ENODEV;
++ goto out;
++ }
++
++ /*
++ * If the device is offline, don't try to read capacity or any
++ * of the other stuff
++ */
++ if (dev->scsi_dev->sdev_state == SDEV_OFFLINE) {
++ TRACE_DBG("%s", "Device is offline");
++ res = -ENODEV;
++ goto out;
++ }
++
++ retries = SCST_DEV_UA_RETRIES;
++ do {
++ TRACE_DBG("%s", "Doing TEST_UNIT_READY");
++ rc = scsi_test_unit_ready(dev->scsi_dev,
++ SCST_GENERIC_RAID_TIMEOUT, RAID_RETRIES
++ , NULL);
++ TRACE_DBG("TEST_UNIT_READY done: %x", rc);
++ } while ((--retries > 0) && rc);
++
++ if (rc) {
++ PRINT_WARNING("Unit not ready: %x", rc);
++ /* Let's try not to be too smart and continue processing */
++ }
++
++ res = scst_obtain_device_parameters(dev);
++ if (res != 0) {
++ PRINT_ERROR("Failed to obtain control parameters for device "
++ "%s", dev->virt_name);
++ goto out;
++ }
++
++out:
++ TRACE_EXIT();
++ return res;
++}
++
++/************************************************************
++ * Function: raid_detach
++ *
++ * Argument:
++ *
++ * Returns : None
++ *
++ * Description: Called to detach this device type driver
++ ************************************************************/
++#if 0
++void raid_detach(struct scst_device *dev)
++{
++ TRACE_ENTRY();
++
++ TRACE_EXIT();
++ return;
++}
++#endif
++
++/********************************************************************
++ * Function: raid_parse
++ *
++ * Argument:
++ *
++ * Returns : The state of the command
++ *
++ * Description: This does the parsing of the command
++ *
++ * Note: Not all states are allowed on return
++ ********************************************************************/
++static int raid_parse(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ scst_raid_generic_parse(cmd, NULL);
++
++ cmd->retries = SCST_PASSTHROUGH_RETRIES;
++
++ return res;
++}
++
++/********************************************************************
++ * Function: raid_done
++ *
++ * Argument:
++ *
++ * Returns :
++ *
++ * Description: This is the completion routine for the command,
++ * it is used to extract any necessary information
++ * about a command.
++ ********************************************************************/
++#if 0
++int raid_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ TRACE_ENTRY();
++
++ /*
++ * SCST sets good defaults for cmd->is_send_status and
++ * cmd->resp_data_len based on cmd->status and cmd->data_direction,
++ * therefore change them only if necessary.
++ */
++
++#if 0
++ switch (cmd->cdb[0]) {
++ default:
++ /* It's all good */
++ break;
++ }
++#endif
++
++ TRACE_EXIT();
++ return res;
++}
++#endif
++
++static int __init raid_init(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ raid_devtype.module = THIS_MODULE;
++
++ res = scst_register_dev_driver(&raid_devtype);
++ if (res < 0)
++ goto out;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++}
++
++static void __exit raid_exit(void)
++{
++ TRACE_ENTRY();
++ scst_unregister_dev_driver(&raid_devtype);
++ TRACE_EXIT();
++ return;
++}
++
++module_init(raid_init);
++module_exit(raid_exit);
++
++MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SCSI raid(controller) (type 0xC) dev handler for SCST");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c
+--- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c
++++ linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c
+@@ -0,0 +1,432 @@
++/*
++ * scst_tape.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * SCSI tape (type 1) dev handler
++ * &
++ * SCSI tape (type 1) "performance" device handler (skip all READ and WRITE
++ * operations).
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <scsi/scsi_host.h>
++#include <linux/slab.h>
++
++#define LOG_PREFIX "dev_tape"
++
++#include <scst/scst.h>
++#include "scst_dev_handler.h"
++
++# define TAPE_NAME "dev_tape"
++# define TAPE_PERF_NAME "dev_tape_perf"
++
++#define TAPE_RETRIES 2
++
++#define TAPE_DEF_BLOCK_SIZE 512
++
++/* The fixed bit in READ/WRITE/VERIFY */
++#define SILI_BIT 2
++
++struct tape_params {
++ int block_size;
++};
++
++static int tape_attach(struct scst_device *);
++static void tape_detach(struct scst_device *);
++static int tape_parse(struct scst_cmd *);
++static int tape_done(struct scst_cmd *);
++static int tape_exec(struct scst_cmd *);
++
++static struct scst_dev_type tape_devtype = {
++ .name = TAPE_NAME,
++ .type = TYPE_TAPE,
++ .threads_num = 1,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = tape_attach,
++ .detach = tape_detach,
++ .parse = tape_parse,
++ .dev_done = tape_done,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++static struct scst_dev_type tape_devtype_perf = {
++ .name = TAPE_PERF_NAME,
++ .type = TYPE_TAPE,
++ .parse_atomic = 1,
++ .dev_done_atomic = 1,
++ .attach = tape_attach,
++ .detach = tape_detach,
++ .parse = tape_parse,
++ .dev_done = tape_done,
++ .exec = tape_exec,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++static int __init init_scst_tape_driver(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ tape_devtype.module = THIS_MODULE;
++
++ res = scst_register_dev_driver(&tape_devtype);
++ if (res < 0)
++ goto out;
++
++ tape_devtype_perf.module = THIS_MODULE;
++
++ res = scst_register_dev_driver(&tape_devtype_perf);
++ if (res < 0)
++ goto out_unreg;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unreg:
++ scst_unregister_dev_driver(&tape_devtype);
++ goto out;
++}
++
++static void __exit exit_scst_tape_driver(void)
++{
++ TRACE_ENTRY();
++
++ scst_unregister_dev_driver(&tape_devtype_perf);
++ scst_unregister_dev_driver(&tape_devtype);
++
++ TRACE_EXIT();
++ return;
++}
++
++module_init(init_scst_tape_driver);
++module_exit(exit_scst_tape_driver);
++
++/**************************************************************
++ * Function: tape_attach
++ *
++ * Argument:
++ *
++ * Returns : 1 if attached, error code otherwise
++ *
++ * Description:
++ *************************************************************/
++static int tape_attach(struct scst_device *dev)
++{
++ int res, rc;
++ int retries;
++ struct scsi_mode_data data;
++ const int buffer_size = 512;
++ uint8_t *buffer = NULL;
++ struct tape_params *params;
++
++ TRACE_ENTRY();
++
++ if (dev->scsi_dev == NULL ||
++ dev->scsi_dev->type != dev->type) {
++ PRINT_ERROR("%s", "SCSI device not define or illegal type");
++ res = -ENODEV;
++ goto out;
++ }
++
++ params = kzalloc(sizeof(*params), GFP_KERNEL);
++ if (params == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s",
++ "Unable to allocate struct tape_params");
++ res = -ENOMEM;
++ goto out;
++ }
++
++ params->block_size = TAPE_DEF_BLOCK_SIZE;
++
++ buffer = kmalloc(buffer_size, GFP_KERNEL);
++ if (!buffer) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "Memory allocation failure");
++ res = -ENOMEM;
++ goto out_free_req;
++ }
++
++ retries = SCST_DEV_UA_RETRIES;
++ do {
++ TRACE_DBG("%s", "Doing TEST_UNIT_READY");
++ rc = scsi_test_unit_ready(dev->scsi_dev,
++ SCST_GENERIC_TAPE_SMALL_TIMEOUT, TAPE_RETRIES
++ , NULL);
++ TRACE_DBG("TEST_UNIT_READY done: %x", rc);
++ } while ((--retries > 0) && rc);
++
++ if (rc) {
++ PRINT_WARNING("Unit not ready: %x", rc);
++ /* Let's try not to be too smart and continue processing */
++ goto obtain;
++ }
++
++ TRACE_DBG("%s", "Doing MODE_SENSE");
++ rc = scsi_mode_sense(dev->scsi_dev,
++ ((dev->scsi_dev->scsi_level <= SCSI_2) ?
++ ((dev->scsi_dev->lun << 5) & 0xe0) : 0),
++ 0 /* Mode Page 0 */,
++ buffer, buffer_size,
++ SCST_GENERIC_TAPE_SMALL_TIMEOUT, TAPE_RETRIES,
++ &data, NULL);
++ TRACE_DBG("MODE_SENSE done: %x", rc);
++
++ if (rc == 0) {
++ int medium_type, mode, speed, density;
++ if (buffer[3] == 8) {
++ params->block_size = ((buffer[9] << 16) |
++ (buffer[10] << 8) |
++ (buffer[11] << 0));
++ } else
++ params->block_size = TAPE_DEF_BLOCK_SIZE;
++ medium_type = buffer[1];
++ mode = (buffer[2] & 0x70) >> 4;
++ speed = buffer[2] & 0x0f;
++ density = buffer[4];
++ TRACE_DBG("Tape: lun %d. bs %d. type 0x%02x mode 0x%02x "
++ "speed 0x%02x dens 0x%02x", dev->scsi_dev->lun,
++ params->block_size, medium_type, mode, speed, density);
++ } else {
++ PRINT_ERROR("MODE_SENSE failed: %x", rc);
++ res = -ENODEV;
++ goto out_free_buf;
++ }
++
++obtain:
++ res = scst_obtain_device_parameters(dev);
++ if (res != 0) {
++ PRINT_ERROR("Failed to obtain control parameters for device "
++ "%s", dev->virt_name);
++ goto out_free_buf;
++ }
++
++out_free_buf:
++ kfree(buffer);
++
++out_free_req:
++ if (res == 0)
++ dev->dh_priv = params;
++ else
++ kfree(params);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/************************************************************
++ * Function: tape_detach
++ *
++ * Argument:
++ *
++ * Returns : None
++ *
++ * Description: Called to detach this device type driver
++ ************************************************************/
++static void tape_detach(struct scst_device *dev)
++{
++ struct tape_params *params =
++ (struct tape_params *)dev->dh_priv;
++
++ TRACE_ENTRY();
++
++ kfree(params);
++ dev->dh_priv = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++static int tape_get_block_size(struct scst_cmd *cmd)
++{
++ struct tape_params *params = (struct tape_params *)cmd->dev->dh_priv;
++ /*
++ * No need for locks here, since *_detach() can not be called,
++ * when there are existing commands.
++ */
++ return params->block_size;
++}
++
++/********************************************************************
++ * Function: tape_parse
++ *
++ * Argument:
++ *
++ * Returns : The state of the command
++ *
++ * Description: This does the parsing of the command
++ *
++ * Note: Not all states are allowed on return
++ ********************************************************************/
++static int tape_parse(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ scst_tape_generic_parse(cmd, tape_get_block_size);
++
++ cmd->retries = SCST_PASSTHROUGH_RETRIES;
++
++ return res;
++}
++
++static void tape_set_block_size(struct scst_cmd *cmd, int block_size)
++{
++ struct tape_params *params = (struct tape_params *)cmd->dev->dh_priv;
++ /*
++ * No need for locks here, since *_detach() can not be called, when
++ * there are existing commands.
++ */
++ params->block_size = block_size;
++ return;
++}
++
++/********************************************************************
++ * Function: tape_done
++ *
++ * Argument:
++ *
++ * Returns :
++ *
++ * Description: This is the completion routine for the command,
++ * it is used to extract any necessary information
++ * about a command.
++ ********************************************************************/
++static int tape_done(struct scst_cmd *cmd)
++{
++ int opcode = cmd->cdb[0];
++ int status = cmd->status;
++ int res = SCST_CMD_STATE_DEFAULT;
++
++ TRACE_ENTRY();
++
++ if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET))
++ res = scst_tape_generic_dev_done(cmd, tape_set_block_size);
++ else if ((status == SAM_STAT_CHECK_CONDITION) &&
++ SCST_SENSE_VALID(cmd->sense)) {
++ struct tape_params *params;
++
++ TRACE_DBG("Extended sense %x", cmd->sense[0] & 0x7F);
++
++ if ((cmd->sense[0] & 0x7F) != 0x70) {
++ PRINT_ERROR("Sense format 0x%x is not supported",
++ cmd->sense[0] & 0x7F);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out;
++ }
++
++ if (opcode == READ_6 && !(cmd->cdb[1] & SILI_BIT) &&
++ (cmd->sense[2] & 0xe0)) {
++ /* EOF, EOM, or ILI */
++ int TransferLength, Residue = 0;
++ if ((cmd->sense[2] & 0x0f) == BLANK_CHECK)
++ /* No need for EOM in this case */
++ cmd->sense[2] &= 0xcf;
++ TransferLength = ((cmd->cdb[2] << 16) |
++ (cmd->cdb[3] << 8) | cmd->cdb[4]);
++ /* Compute the residual count */
++ if ((cmd->sense[0] & 0x80) != 0) {
++ Residue = ((cmd->sense[3] << 24) |
++ (cmd->sense[4] << 16) |
++ (cmd->sense[5] << 8) |
++ cmd->sense[6]);
++ }
++ TRACE_DBG("Checking the sense key "
++ "sn[2]=%x cmd->cdb[0,1]=%x,%x TransLen/Resid"
++ " %d/%d", (int)cmd->sense[2], cmd->cdb[0],
++ cmd->cdb[1], TransferLength, Residue);
++ if (TransferLength > Residue) {
++ int resp_data_len = TransferLength - Residue;
++ if (cmd->cdb[1] & SCST_TRANSFER_LEN_TYPE_FIXED) {
++ /*
++ * No need for locks here, since
++ * *_detach() can not be called, when
++ * there are existing commands.
++ */
++ params = (struct tape_params *)
++ cmd->dev->dh_priv;
++ resp_data_len *= params->block_size;
++ }
++ scst_set_resp_data_len(cmd, resp_data_len);
++ }
++ }
++ }
++
++out:
++ TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
++ "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/********************************************************************
++ * Function: tape_exec
++ *
++ * Argument:
++ *
++ * Returns :
++ *
++ * Description: Make SCST do nothing for data READs and WRITES.
++ * Intended for raw line performance testing
++ ********************************************************************/
++static int tape_exec(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_NOT_COMPLETED, rc;
++ int opcode = cmd->cdb[0];
++
++ TRACE_ENTRY();
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++
++ switch (opcode) {
++ case WRITE_6:
++ case READ_6:
++ cmd->completed = 1;
++ goto out_done;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_done:
++ res = SCST_EXEC_COMPLETED;
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ goto out;
++}
++
++MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SCSI tape (type 1) dev handler for SCST");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/drivers/scst/fcst/Makefile linux-2.6.36/drivers/scst/fcst/Makefile
+--- orig/linux-2.6.36/drivers/scst/fcst/Makefile
++++ linux-2.6.36/drivers/scst/fcst/Makefile
+@@ -0,0 +1,7 @@
++obj-$(CONFIG_FCST) += fcst.o
++
++fcst-objs := \
++ ft_cmd.o \
++ ft_io.o \
++ ft_scst.o \
++ ft_sess.o
+diff -uprN orig/linux-2.6.36/drivers/scst/fcst/Kconfig linux-2.6.36/drivers/scst/fcst/Kconfig
+--- orig/linux-2.6.36/drivers/scst/fcst/Kconfig
++++ linux-2.6.36/drivers/scst/fcst/Kconfig
+@@ -0,0 +1,5 @@
++config FCST
++ tristate "SCST target module for Fibre Channel using libfc"
++ depends on LIBFC && SCST
++ ---help---
++ Supports using libfc HBAs as target adapters with SCST
+diff -uprN orig/linux-2.6.36/drivers/scst/fcst/fcst.h linux-2.6.36/drivers/scst/fcst/fcst.h
+--- orig/linux-2.6.36/drivers/scst/fcst/fcst.h
++++ linux-2.6.36/drivers/scst/fcst/fcst.h
+@@ -0,0 +1,151 @@
++/*
++ * Copyright (c) 2010 Cisco Systems, Inc.
++ *
++ * This program is free software; you may redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * $Id$
++ */
++#ifndef __SCSI_FCST_H__
++#define __SCSI_FCST_H__
++
++#include <scst/scst.h>
++
++#define FT_VERSION "0.3"
++#define FT_MODULE "fcst"
++
++#define FT_MAX_HW_PENDING_TIME 20 /* max I/O time in seconds */
++
++/*
++ * Debug options.
++ */
++#define FT_DEBUG_CONF 0x01 /* configuration messages */
++#define FT_DEBUG_SESS 0x02 /* session messages */
++#define FT_DEBUG_IO 0x04 /* I/O operations */
++
++extern unsigned int ft_debug_logging; /* debug options */
++
++#define FT_ERR(fmt, args...) \
++ printk(KERN_ERR FT_MODULE ": %s: " fmt, __func__, ##args)
++
++#define FT_DEBUG(mask, fmt, args...) \
++ do { \
++ if (ft_debug_logging & (mask)) \
++ printk(KERN_INFO FT_MODULE ": %s: " fmt, \
++ __func__, ##args); \
++ } while (0)
++
++#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
++#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
++#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
++
++#define FT_NAMELEN 32 /* length of ASCI WWPNs including pad */
++
++/*
++ * Session (remote port).
++ */
++struct ft_sess {
++ u32 port_id; /* for hash lookup use only */
++ u32 params;
++ u16 max_payload; /* max transmitted payload size */
++ u32 max_lso_payload; /* max offloaded payload size */
++ u64 port_name; /* port name for transport ID */
++ struct ft_tport *tport;
++ struct hlist_node hash; /* linkage in ft_sess_hash table */
++ struct rcu_head rcu;
++ struct kref kref; /* ref for hash and outstanding I/Os */
++ struct scst_session *scst_sess;
++};
++
++/*
++ * Hash table of sessions per local port.
++ * Hash lookup by remote port FC_ID.
++ */
++#define FT_SESS_HASH_BITS 6
++#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS)
++
++/*
++ * Per local port data.
++ * This is created when the first session logs into the local port.
++ * Deleted when tpg is deleted or last session is logged off.
++ */
++struct ft_tport {
++ u32 sess_count; /* number of sessions in hash */
++ u8 enabled:1;
++ struct rcu_head rcu;
++ struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */
++ struct fc_lport *lport;
++ struct scst_tgt *tgt;
++};
++
++/*
++ * Commands
++ */
++struct ft_cmd {
++ int serial; /* order received, for debugging */
++ struct fc_seq *seq; /* sequence in exchange mgr */
++ struct fc_frame *req_frame; /* original request frame */
++ u32 write_data_len; /* data received from initiator */
++ u32 read_data_len; /* data sent to initiator */
++ u32 xfer_rdy_len; /* max xfer ready offset */
++ u32 max_lso_payload; /* max offloaded (LSO) data payload */
++ u16 max_payload; /* max transmitted data payload */
++ struct scst_cmd *scst_cmd;
++};
++
++extern struct list_head ft_lport_list;
++extern struct mutex ft_lport_lock;
++extern struct scst_tgt_template ft_scst_template;
++
++/*
++ * libfc interface.
++ */
++int ft_prli(struct fc_rport_priv *, u32 spp_len,
++ const struct fc_els_spp *, struct fc_els_spp *);
++void ft_prlo(struct fc_rport_priv *);
++void ft_recv(struct fc_lport *, struct fc_seq *, struct fc_frame *);
++
++/*
++ * SCST interface.
++ */
++int ft_send_response(struct scst_cmd *);
++int ft_send_xfer_rdy(struct scst_cmd *);
++void ft_cmd_timeout(struct scst_cmd *);
++void ft_cmd_free(struct scst_cmd *);
++void ft_cmd_tm_done(struct scst_mgmt_cmd *);
++int ft_tgt_detect(struct scst_tgt_template *);
++int ft_tgt_release(struct scst_tgt *);
++int ft_tgt_enable(struct scst_tgt *, bool);
++bool ft_tgt_enabled(struct scst_tgt *);
++int ft_report_aen(struct scst_aen *);
++int ft_get_transport_id(struct scst_session *, uint8_t **);
++
++/*
++ * Session interface.
++ */
++int ft_lport_notify(struct notifier_block *, unsigned long, void *);
++void ft_lport_add(struct fc_lport *, void *);
++void ft_lport_del(struct fc_lport *, void *);
++
++/*
++ * other internal functions.
++ */
++int ft_thread(void *);
++void ft_recv_req(struct ft_sess *, struct fc_seq *, struct fc_frame *);
++void ft_recv_write_data(struct scst_cmd *, struct fc_frame *);
++int ft_send_read_data(struct scst_cmd *);
++struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
++struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
++void ft_cmd_dump(struct scst_cmd *, const char *);
++
++#endif /* __SCSI_FCST_H__ */
+diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scst/fcst/ft_cmd.c
+--- orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c
++++ linux-2.6.36/drivers/scst/fcst/ft_cmd.c
+@@ -0,0 +1,686 @@
++/*
++ * Copyright (c) 2010 Cisco Systems, Inc.
++ *
++ * This program is free software; you may redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <scsi/libfc.h>
++#include <scsi/fc_encode.h>
++#include "fcst.h"
++
++/*
++ * Append string to buffer safely.
++ * Also prepends a space if there's already something the buf.
++ */
++static void ft_cmd_flag(char *buf, size_t len, const char *desc)
++{
++ if (buf[0])
++ strlcat(buf, " ", len);
++ strlcat(buf, desc, len);
++}
++
++/*
++ * Debug: dump command.
++ */
++void ft_cmd_dump(struct scst_cmd *cmd, const char *caller)
++{
++ static atomic_t serial;
++ struct ft_cmd *fcmd;
++ struct fc_exch *ep;
++ char prefix[30];
++ char buf[150];
++
++ if (!(ft_debug_logging & FT_DEBUG_IO))
++ return;
++
++ fcmd = scst_cmd_get_tgt_priv(cmd);
++ ep = fc_seq_exch(fcmd->seq);
++ snprintf(prefix, sizeof(prefix), FT_MODULE ": cmd %2x",
++ atomic_inc_return(&serial) & 0xff);
++
++ printk(KERN_INFO "%s %s oid %x oxid %x resp_len %u\n",
++ prefix, caller, ep->oid, ep->oxid,
++ scst_cmd_get_resp_data_len(cmd));
++ printk(KERN_INFO "%s scst_cmd %p wlen %u rlen %u\n",
++ prefix, cmd, fcmd->write_data_len, fcmd->read_data_len);
++ printk(KERN_INFO "%s exp_dir %x exp_xfer_len %d exp_in_len %d\n",
++ prefix, cmd->expected_data_direction,
++ cmd->expected_transfer_len, cmd->expected_out_transfer_len);
++ printk(KERN_INFO "%s dir %x data_len %d bufflen %d out_bufflen %d\n",
++ prefix, cmd->data_direction, cmd->data_len,
++ cmd->bufflen, cmd->out_bufflen);
++ printk(KERN_INFO "%s sg_cnt reg %d in %d tgt %d tgt_in %d\n",
++ prefix, cmd->sg_cnt, cmd->out_sg_cnt,
++ cmd->tgt_sg_cnt, cmd->tgt_out_sg_cnt);
++
++ buf[0] = '\0';
++ if (cmd->sent_for_exec)
++ ft_cmd_flag(buf, sizeof(buf), "sent");
++ if (cmd->completed)
++ ft_cmd_flag(buf, sizeof(buf), "comp");
++ if (cmd->ua_ignore)
++ ft_cmd_flag(buf, sizeof(buf), "ua_ign");
++ if (cmd->atomic)
++ ft_cmd_flag(buf, sizeof(buf), "atom");
++ if (cmd->double_ua_possible)
++ ft_cmd_flag(buf, sizeof(buf), "dbl_ua_poss");
++ if (cmd->is_send_status)
++ ft_cmd_flag(buf, sizeof(buf), "send_stat");
++ if (cmd->retry)
++ ft_cmd_flag(buf, sizeof(buf), "retry");
++ if (cmd->internal)
++ ft_cmd_flag(buf, sizeof(buf), "internal");
++ if (cmd->unblock_dev)
++ ft_cmd_flag(buf, sizeof(buf), "unblock_dev");
++ if (cmd->cmd_hw_pending)
++ ft_cmd_flag(buf, sizeof(buf), "hw_pend");
++ if (cmd->tgt_need_alloc_data_buf)
++ ft_cmd_flag(buf, sizeof(buf), "tgt_need_alloc");
++ if (cmd->tgt_data_buf_alloced)
++ ft_cmd_flag(buf, sizeof(buf), "tgt_alloced");
++ if (cmd->dh_data_buf_alloced)
++ ft_cmd_flag(buf, sizeof(buf), "dh_alloced");
++ if (cmd->expected_values_set)
++ ft_cmd_flag(buf, sizeof(buf), "exp_val");
++ if (cmd->sg_buff_modified)
++ ft_cmd_flag(buf, sizeof(buf), "sg_buf_mod");
++ if (cmd->preprocessing_only)
++ ft_cmd_flag(buf, sizeof(buf), "pre_only");
++ if (cmd->sn_set)
++ ft_cmd_flag(buf, sizeof(buf), "sn_set");
++ if (cmd->hq_cmd_inced)
++ ft_cmd_flag(buf, sizeof(buf), "hq_cmd_inc");
++ if (cmd->set_sn_on_restart_cmd)
++ ft_cmd_flag(buf, sizeof(buf), "set_sn_on_restart");
++ if (cmd->no_sgv)
++ ft_cmd_flag(buf, sizeof(buf), "no_sgv");
++ if (cmd->may_need_dma_sync)
++ ft_cmd_flag(buf, sizeof(buf), "dma_sync");
++ if (cmd->out_of_sn)
++ ft_cmd_flag(buf, sizeof(buf), "oo_sn");
++ if (cmd->inc_expected_sn_on_done)
++ ft_cmd_flag(buf, sizeof(buf), "inc_sn_exp");
++ if (cmd->done)
++ ft_cmd_flag(buf, sizeof(buf), "done");
++ if (cmd->finished)
++ ft_cmd_flag(buf, sizeof(buf), "fin");
++
++ printk(KERN_INFO "%s flags %s\n", prefix, buf);
++ printk(KERN_INFO "%s lun %lld sn %d tag %lld cmd_flags %lx\n",
++ prefix, cmd->lun, cmd->sn, cmd->tag, cmd->cmd_flags);
++ printk(KERN_INFO "%s tgt_sn %d op_flags %x op %s\n",
++ prefix, cmd->tgt_sn, cmd->op_flags, cmd->op_name);
++ printk(KERN_INFO "%s status %x msg_status %x "
++ "host_status %x driver_status %x\n",
++ prefix, cmd->status, cmd->msg_status,
++ cmd->host_status, cmd->driver_status);
++ printk(KERN_INFO "%s cdb_len %d ext_cdb_len %u\n",
++ prefix, cmd->cdb_len, cmd->ext_cdb_len);
++ snprintf(buf, sizeof(buf), "%s cdb ", prefix);
++ print_hex_dump(KERN_INFO, buf, DUMP_PREFIX_NONE,
++ 16, 4, cmd->cdb, SCST_MAX_CDB_SIZE, 0);
++}
++
++/*
++ * Debug: dump mgmt command.
++ */
++static void ft_cmd_tm_dump(struct scst_mgmt_cmd *mcmd, const char *caller)
++{
++ struct ft_cmd *fcmd;
++ struct fc_exch *ep;
++ char prefix[30];
++ char buf[150];
++
++ if (!(ft_debug_logging & FT_DEBUG_IO))
++ return;
++ fcmd = scst_mgmt_cmd_get_tgt_priv(mcmd);
++ ep = fc_seq_exch(fcmd->seq);
++
++ snprintf(prefix, sizeof(prefix), FT_MODULE ": mcmd");
++
++ printk(KERN_INFO "%s %s oid %x oxid %x lun %lld\n",
++ prefix, caller, ep->oid, ep->oxid,
++ (unsigned long long)mcmd->lun);
++ printk(KERN_INFO "%s state %d fn %d fin_wait %d done_wait %d comp %d\n",
++ prefix, mcmd->state, mcmd->fn,
++ mcmd->cmd_finish_wait_count, mcmd->cmd_done_wait_count,
++ mcmd->completed_cmd_count);
++ buf[0] = '\0';
++ if (mcmd->needs_unblocking)
++ ft_cmd_flag(buf, sizeof(buf), "needs_unblock");
++ if (mcmd->lun_set)
++ ft_cmd_flag(buf, sizeof(buf), "lun_set");
++ if (mcmd->cmd_sn_set)
++ ft_cmd_flag(buf, sizeof(buf), "cmd_sn_set");
++ printk(KERN_INFO "%s flags %s\n", prefix, buf);
++ if (mcmd->cmd_to_abort)
++ ft_cmd_dump(mcmd->cmd_to_abort, caller);
++}
++
++/*
++ * Free command.
++ */
++void ft_cmd_free(struct scst_cmd *cmd)
++{
++ struct ft_cmd *fcmd;
++
++ fcmd = scst_cmd_get_tgt_priv(cmd);
++ if (fcmd) {
++ scst_cmd_set_tgt_priv(cmd, NULL);
++ fc_frame_free(fcmd->req_frame);
++ kfree(fcmd);
++ }
++}
++
++/*
++ * Send response, after data if applicable.
++ */
++int ft_send_response(struct scst_cmd *cmd)
++{
++ struct ft_cmd *fcmd;
++ struct fc_frame *fp;
++ struct fcp_resp_with_ext *fcp;
++ struct fc_lport *lport;
++ struct fc_exch *ep;
++ unsigned int slen;
++ size_t len;
++ int resid = 0;
++ int bi_resid = 0;
++ int error;
++ int dir;
++ u32 status;
++
++ ft_cmd_dump(cmd, __func__);
++ fcmd = scst_cmd_get_tgt_priv(cmd);
++ ep = fc_seq_exch(fcmd->seq);
++ lport = ep->lp;
++
++ if (scst_cmd_aborted(cmd)) {
++ FT_IO_DBG("cmd aborted did %x oxid %x\n", ep->did, ep->oxid);
++ scst_set_delivery_status(cmd, SCST_CMD_DELIVERY_ABORTED);
++ goto done;
++ }
++
++ if (!scst_cmd_get_is_send_status(cmd)) {
++ FT_IO_DBG("send status not set. feature not implemented\n");
++ return SCST_TGT_RES_FATAL_ERROR;
++ }
++
++ status = scst_cmd_get_status(cmd);
++ dir = scst_cmd_get_data_direction(cmd);
++
++ slen = scst_cmd_get_sense_buffer_len(cmd);
++ len = sizeof(*fcp) + slen;
++
++ /*
++ * Send read data and set underflow/overflow residual count.
++ * For bi-directional comands, the bi_resid is for the read direction.
++ */
++ if (dir & SCST_DATA_WRITE)
++ resid = (signed)scst_cmd_get_bufflen(cmd) -
++ fcmd->write_data_len;
++ if (dir & SCST_DATA_READ) {
++ error = ft_send_read_data(cmd);
++ if (error) {
++ FT_ERR("ft_send_read_data returned %d\n", error);
++ return error;
++ }
++
++ if (dir == SCST_DATA_BIDI) {
++ bi_resid = (signed)scst_cmd_get_out_bufflen(cmd) -
++ scst_cmd_get_resp_data_len(cmd);
++ if (bi_resid)
++ len += sizeof(__be32);
++ } else
++ resid = (signed)scst_cmd_get_bufflen(cmd) -
++ scst_cmd_get_resp_data_len(cmd);
++ }
++
++ fp = fc_frame_alloc(lport, len);
++ if (!fp)
++ return SCST_TGT_RES_QUEUE_FULL;
++
++ fcp = fc_frame_payload_get(fp, len);
++ memset(fcp, 0, sizeof(*fcp));
++ fcp->resp.fr_status = status;
++
++ if (slen) {
++ fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
++ fcp->ext.fr_sns_len = htonl(slen);
++ memcpy(fcp + 1, scst_cmd_get_sense_buffer(cmd), slen);
++ }
++ if (bi_resid) {
++ if (bi_resid < 0) {
++ fcp->resp.fr_flags |= FCP_BIDI_READ_OVER;
++ bi_resid = -bi_resid;
++ } else
++ fcp->resp.fr_flags |= FCP_BIDI_READ_UNDER;
++ *(__be32 *)((u8 *)(fcp + 1) + slen) = htonl(bi_resid);
++ }
++ if (resid) {
++ if (resid < 0) {
++ resid = -resid;
++ fcp->resp.fr_flags |= FCP_RESID_OVER;
++ } else
++ fcp->resp.fr_flags |= FCP_RESID_UNDER;
++ fcp->ext.fr_resid = htonl(resid);
++ }
++ FT_IO_DBG("response did %x oxid %x\n", ep->did, ep->oxid);
++
++ /*
++ * Send response.
++ */
++ fcmd->seq = lport->tt.seq_start_next(fcmd->seq);
++ fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
++ FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
++
++ lport->tt.seq_send(lport, fcmd->seq, fp);
++done:
++ lport->tt.exch_done(fcmd->seq);
++ scst_tgt_cmd_done(cmd, SCST_CONTEXT_SAME);
++ return SCST_TGT_RES_SUCCESS;
++}
++
++/*
++ * FC sequence response handler for follow-on sequences (data) and aborts.
++ */
++static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
++{
++ struct scst_cmd *cmd = arg;
++ struct fc_frame_header *fh;
++
++ /*
++ * If an error is being reported, it must be FC_EX_CLOSED.
++ * Timeouts don't occur on incoming requests, and there are
++ * currently no other errors.
++ * The PRLO handler will be also called by libfc to delete
++ * the session and all pending commands, so we ignore this response.
++ */
++ if (IS_ERR(fp)) {
++ FT_IO_DBG("exchange error %ld - not handled\n", -PTR_ERR(fp));
++ return;
++ }
++
++ fh = fc_frame_header_get(fp);
++ switch (fh->fh_r_ctl) {
++ case FC_RCTL_DD_SOL_DATA: /* write data */
++ ft_recv_write_data(cmd, fp);
++ break;
++ case FC_RCTL_DD_UNSOL_CTL: /* command */
++ case FC_RCTL_DD_SOL_CTL: /* transfer ready */
++ case FC_RCTL_DD_DATA_DESC: /* transfer ready */
++ default:
++ printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
++ __func__, fh->fh_r_ctl);
++ fc_frame_free(fp);
++ break;
++ }
++}
++
++/*
++ * Command timeout.
++ * SCST calls this when the command has taken too long in the device handler.
++ */
++void ft_cmd_timeout(struct scst_cmd *cmd)
++{
++ FT_IO_DBG("timeout not implemented\n"); /* XXX TBD */
++}
++
++/*
++ * Send TX_RDY (transfer ready).
++ */
++static int ft_send_xfer_rdy_off(struct scst_cmd *cmd, u32 offset, u32 len)
++{
++ struct ft_cmd *fcmd;
++ struct fc_frame *fp;
++ struct fcp_txrdy *txrdy;
++ struct fc_lport *lport;
++ struct fc_exch *ep;
++
++ fcmd = scst_cmd_get_tgt_priv(cmd);
++ if (fcmd->xfer_rdy_len < len + offset)
++ fcmd->xfer_rdy_len = len + offset;
++
++ ep = fc_seq_exch(fcmd->seq);
++ lport = ep->lp;
++ fp = fc_frame_alloc(lport, sizeof(*txrdy));
++ if (!fp)
++ return SCST_TGT_RES_QUEUE_FULL;
++
++ txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
++ memset(txrdy, 0, sizeof(*txrdy));
++ txrdy->ft_data_ro = htonl(offset);
++ txrdy->ft_burst_len = htonl(len);
++
++ fcmd->seq = lport->tt.seq_start_next(fcmd->seq);
++ fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
++ FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
++ lport->tt.seq_send(lport, fcmd->seq, fp);
++ return SCST_TGT_RES_SUCCESS;
++}
++
++/*
++ * Send TX_RDY (transfer ready).
++ */
++int ft_send_xfer_rdy(struct scst_cmd *cmd)
++{
++ return ft_send_xfer_rdy_off(cmd, 0, scst_cmd_get_bufflen(cmd));
++}
++
++/*
++ * Send a FCP response including SCSI status and optional FCP rsp_code.
++ * status is SAM_STAT_GOOD (zero) if code is valid.
++ * This is used in error cases, such as allocation failures.
++ */
++static void ft_send_resp_status(struct fc_seq *sp, u32 status,
++ enum fcp_resp_rsp_codes code)
++{
++ struct fc_frame *fp;
++ size_t len;
++ struct fcp_resp_with_ext *fcp;
++ struct fcp_resp_rsp_info *info;
++ struct fc_lport *lport;
++ struct fc_exch *ep;
++
++ ep = fc_seq_exch(sp);
++
++ FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
++ ep->did, ep->oxid, status, code);
++ lport = ep->lp;
++ len = sizeof(*fcp);
++ if (status == SAM_STAT_GOOD)
++ len += sizeof(*info);
++ fp = fc_frame_alloc(lport, len);
++ if (!fp)
++ goto out;
++ fcp = fc_frame_payload_get(fp, len);
++ memset(fcp, 0, len);
++ fcp->resp.fr_status = status;
++ if (status == SAM_STAT_GOOD) {
++ fcp->ext.fr_rsp_len = htonl(sizeof(*info));
++ fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
++ info = (struct fcp_resp_rsp_info *)(fcp + 1);
++ info->rsp_code = code;
++ }
++
++ sp = lport->tt.seq_start_next(sp);
++ fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
++ FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
++
++ lport->tt.seq_send(lport, sp, fp);
++out:
++ lport->tt.exch_done(sp);
++}
++
++/*
++ * Send error or task management response.
++ * Always frees the fcmd and associated state.
++ */
++static void ft_send_resp_code(struct ft_cmd *fcmd, enum fcp_resp_rsp_codes code)
++{
++ ft_send_resp_status(fcmd->seq, SAM_STAT_GOOD, code);
++ fc_frame_free(fcmd->req_frame);
++ kfree(fcmd);
++}
++
++void ft_cmd_tm_done(struct scst_mgmt_cmd *mcmd)
++{
++ struct ft_cmd *fcmd;
++ enum fcp_resp_rsp_codes code;
++
++ ft_cmd_tm_dump(mcmd, __func__);
++ fcmd = scst_mgmt_cmd_get_tgt_priv(mcmd);
++ switch (scst_mgmt_cmd_get_status(mcmd)) {
++ case SCST_MGMT_STATUS_SUCCESS:
++ code = FCP_TMF_CMPL;
++ break;
++ case SCST_MGMT_STATUS_REJECTED:
++ code = FCP_TMF_REJECTED;
++ break;
++ case SCST_MGMT_STATUS_LUN_NOT_EXIST:
++ code = FCP_TMF_INVALID_LUN;
++ break;
++ case SCST_MGMT_STATUS_TASK_NOT_EXIST:
++ case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
++ case SCST_MGMT_STATUS_FAILED:
++ default:
++ code = FCP_TMF_FAILED;
++ break;
++ }
++ FT_IO_DBG("tm cmd done fn %d code %d\n", mcmd->fn, code);
++ ft_send_resp_code(fcmd, code);
++}
++
++/*
++ * Handle an incoming FCP task management command frame.
++ * Note that this may be called directly from the softirq context.
++ */
++static void ft_recv_tm(struct scst_session *scst_sess,
++ struct ft_cmd *fcmd, struct fcp_cmnd *fcp)
++{
++ struct scst_rx_mgmt_params params;
++ int ret;
++
++ memset(&params, 0, sizeof(params));
++ params.lun = fcp->fc_lun;
++ params.lun_len = sizeof(fcp->fc_lun);
++ params.lun_set = 1;
++ params.atomic = SCST_ATOMIC;
++ params.tgt_priv = fcmd;
++
++ switch (fcp->fc_tm_flags) {
++ case FCP_TMF_LUN_RESET:
++ params.fn = SCST_LUN_RESET;
++ break;
++ case FCP_TMF_TGT_RESET:
++ params.fn = SCST_TARGET_RESET;
++ params.lun_set = 0;
++ break;
++ case FCP_TMF_CLR_TASK_SET:
++ params.fn = SCST_CLEAR_TASK_SET;
++ break;
++ case FCP_TMF_ABT_TASK_SET:
++ params.fn = SCST_ABORT_TASK_SET;
++ break;
++ case FCP_TMF_CLR_ACA:
++ params.fn = SCST_CLEAR_ACA;
++ break;
++ default:
++ /*
++ * FCP4r01 indicates having a combination of
++ * tm_flags set is invalid.
++ */
++ FT_IO_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
++ ft_send_resp_code(fcmd, FCP_CMND_FIELDS_INVALID);
++ return;
++ }
++ FT_IO_DBG("submit tm cmd fn %d\n", params.fn);
++ ret = scst_rx_mgmt_fn(scst_sess, &params);
++ FT_IO_DBG("scst_rx_mgmt_fn ret %d\n", ret);
++ if (ret)
++ ft_send_resp_code(fcmd, FCP_TMF_FAILED);
++}
++
++/*
++ * Handle an incoming FCP command frame.
++ * Note that this may be called directly from the softirq context.
++ */
++static void ft_recv_cmd(struct ft_sess *sess, struct fc_seq *sp,
++ struct fc_frame *fp)
++{
++ static atomic_t serial;
++ struct scst_cmd *cmd;
++ struct ft_cmd *fcmd;
++ struct fcp_cmnd *fcp;
++ struct fc_lport *lport;
++ int data_dir;
++ u32 data_len;
++ int cdb_len;
++
++ lport = fc_seq_exch(sp)->lp;
++ fcmd = kzalloc(sizeof(*fcmd), GFP_ATOMIC);
++ if (!fcmd)
++ goto busy;
++ fcmd->serial = atomic_inc_return(&serial); /* debug only */
++ fcmd->seq = sp;
++ fcmd->max_payload = sess->max_payload;
++ fcmd->max_lso_payload = sess->max_lso_payload;
++ fcmd->req_frame = fp;
++
++ fcp = fc_frame_payload_get(fp, sizeof(*fcp));
++ if (!fcp)
++ goto err;
++ if (fcp->fc_tm_flags) {
++ ft_recv_tm(sess->scst_sess, fcmd, fcp);
++ return;
++ }
++
++ /*
++ * re-check length including specified CDB length.
++ * data_len is just after the CDB.
++ */
++ cdb_len = fcp->fc_flags & FCP_CFL_LEN_MASK;
++ fcp = fc_frame_payload_get(fp, sizeof(*fcp) + cdb_len);
++ if (!fcp)
++ goto err;
++ cdb_len += sizeof(fcp->fc_cdb);
++ data_len = ntohl(*(__be32 *)(fcp->fc_cdb + cdb_len));
++
++ cmd = scst_rx_cmd(sess->scst_sess, fcp->fc_lun, sizeof(fcp->fc_lun),
++ fcp->fc_cdb, cdb_len, SCST_ATOMIC);
++ if (!cmd) {
++ kfree(fcmd);
++ goto busy;
++ }
++ fcmd->scst_cmd = cmd;
++ scst_cmd_set_tgt_priv(cmd, fcmd);
++
++ switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
++ case 0:
++ data_dir = SCST_DATA_NONE;
++ break;
++ case FCP_CFL_RDDATA:
++ data_dir = SCST_DATA_READ;
++ break;
++ case FCP_CFL_WRDATA:
++ data_dir = SCST_DATA_WRITE;
++ break;
++ case FCP_CFL_RDDATA | FCP_CFL_WRDATA:
++ data_dir = SCST_DATA_BIDI;
++ break;
++ }
++ scst_cmd_set_expected(cmd, data_dir, data_len);
++
++ switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
++ case FCP_PTA_SIMPLE:
++ scst_cmd_set_queue_type(cmd, SCST_CMD_QUEUE_SIMPLE);
++ break;
++ case FCP_PTA_HEADQ:
++ scst_cmd_set_queue_type(cmd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
++ break;
++ case FCP_PTA_ACA:
++ scst_cmd_set_queue_type(cmd, SCST_CMD_QUEUE_ACA);
++ break;
++ case FCP_PTA_ORDERED:
++ default:
++ scst_cmd_set_queue_type(cmd, SCST_CMD_QUEUE_ORDERED);
++ break;
++ }
++
++ lport->tt.seq_set_resp(sp, ft_recv_seq, cmd);
++ scst_cmd_init_done(cmd, SCST_CONTEXT_THREAD);
++ return;
++
++err:
++ ft_send_resp_code(fcmd, FCP_CMND_FIELDS_INVALID);
++ return;
++
++busy:
++ FT_IO_DBG("cmd allocation failure - sending BUSY\n");
++ ft_send_resp_status(sp, SAM_STAT_BUSY, 0);
++ fc_frame_free(fp);
++}
++
++/*
++ * Send FCP ELS-4 Reject.
++ */
++static void ft_cmd_ls_rjt(struct fc_seq *sp, enum fc_els_rjt_reason reason,
++ enum fc_els_rjt_explan explan)
++{
++ struct fc_frame *fp;
++ struct fc_els_ls_rjt *rjt;
++ struct fc_lport *lport;
++ struct fc_exch *ep;
++
++ ep = fc_seq_exch(sp);
++ lport = ep->lp;
++ fp = fc_frame_alloc(lport, sizeof(*rjt));
++ if (!fp)
++ return;
++
++ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
++ memset(rjt, 0, sizeof(*rjt));
++ rjt->er_cmd = ELS_LS_RJT;
++ rjt->er_reason = reason;
++ rjt->er_explan = explan;
++
++ sp = lport->tt.seq_start_next(sp);
++ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, FC_TYPE_FCP,
++ FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_LAST_SEQ, 0);
++ lport->tt.seq_send(lport, sp, fp);
++}
++
++/*
++ * Handle an incoming FCP ELS-4 command frame.
++ * Note that this may be called directly from the softirq context.
++ */
++static void ft_recv_els4(struct ft_sess *sess, struct fc_seq *sp,
++ struct fc_frame *fp)
++{
++ u8 op = fc_frame_payload_op(fp);
++
++ switch (op) {
++ case ELS_SRR: /* TBD */
++ default:
++ FT_IO_DBG("unsupported ELS-4 op %x\n", op);
++ ft_cmd_ls_rjt(sp, ELS_RJT_INVAL, ELS_EXPL_NONE);
++ fc_frame_free(fp);
++ break;
++ }
++}
++
++/*
++ * Handle an incoming FCP frame.
++ * Note that this may be called directly from the softirq context.
++ */
++void ft_recv_req(struct ft_sess *sess, struct fc_seq *sp, struct fc_frame *fp)
++{
++ struct fc_frame_header *fh = fc_frame_header_get(fp);
++
++ switch (fh->fh_r_ctl) {
++ case FC_RCTL_DD_UNSOL_CMD:
++ ft_recv_cmd(sess, sp, fp);
++ break;
++ case FC_RCTL_ELS4_REQ:
++ ft_recv_els4(sess, sp, fp);
++ break;
++ default:
++ printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
++ __func__, fh->fh_r_ctl);
++ fc_frame_free(fp);
++ sess->tport->lport->tt.exch_done(sp);
++ break;
++ }
++}
+diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_io.c linux-2.6.36/drivers/scst/fcst/ft_io.c
+--- orig/linux-2.6.36/drivers/scst/fcst/ft_io.c
++++ linux-2.6.36/drivers/scst/fcst/ft_io.c
+@@ -0,0 +1,272 @@
++/*
++ * Copyright (c) 2010 Cisco Systems, Inc.
++ *
++ * Portions based on drivers/scsi/libfc/fc_fcp.c and subject to the following:
++ *
++ * Copyright (c) 2007 Intel Corporation. All rights reserved.
++ * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
++ * Copyright (c) 2008 Mike Christie
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <scsi/libfc.h>
++#include <scsi/fc_encode.h>
++#include "fcst.h"
++
++/*
++ * Receive write data frame.
++ */
++void ft_recv_write_data(struct scst_cmd *cmd, struct fc_frame *fp)
++{
++ struct ft_cmd *fcmd;
++ struct fc_frame_header *fh;
++ unsigned int bufflen;
++ u32 rel_off;
++ size_t frame_len;
++ size_t mem_len;
++ size_t tlen;
++ void *from;
++ void *to;
++ int dir;
++ u8 *buf;
++
++ dir = scst_cmd_get_data_direction(cmd);
++ if (dir == SCST_DATA_BIDI) {
++ mem_len = scst_get_out_buf_first(cmd, &buf);
++ bufflen = scst_cmd_get_out_bufflen(cmd);
++ } else {
++ mem_len = scst_get_buf_first(cmd, &buf);
++ bufflen = scst_cmd_get_bufflen(cmd);
++ }
++ to = buf;
++
++ fcmd = scst_cmd_get_tgt_priv(cmd);
++ fh = fc_frame_header_get(fp);
++ frame_len = fr_len(fp);
++ rel_off = ntohl(fh->fh_parm_offset);
++
++ FT_IO_DBG("sid %x oxid %x payload_len %zd rel_off %x\n",
++ ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id),
++ frame_len - sizeof(*fh), rel_off);
++
++ if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
++ goto drop;
++ if (frame_len <= sizeof(*fh))
++ goto drop;
++ frame_len -= sizeof(*fh);
++ from = fc_frame_payload_get(fp, 0);
++
++ if (rel_off >= bufflen)
++ goto drop;
++ if (frame_len + rel_off > bufflen)
++ frame_len = bufflen - rel_off;
++
++ while (frame_len) {
++ if (!mem_len) {
++ if (dir == SCST_DATA_BIDI) {
++ scst_put_out_buf(cmd, buf);
++ mem_len = scst_get_out_buf_next(cmd, &buf);
++ } else {
++ scst_put_buf(cmd, buf);
++ mem_len = scst_get_buf_next(cmd, &buf);
++ }
++ to = buf;
++ if (!mem_len)
++ break;
++ }
++ if (rel_off) {
++ if (rel_off >= mem_len) {
++ rel_off -= mem_len;
++ mem_len = 0;
++ continue;
++ }
++ mem_len -= rel_off;
++ to += rel_off;
++ rel_off = 0;
++ }
++
++ tlen = min(mem_len, frame_len);
++ memcpy(to, from, tlen);
++
++ from += tlen;
++ frame_len -= tlen;
++ mem_len -= tlen;
++ to += tlen;
++ fcmd->write_data_len += tlen;
++ }
++ if (mem_len) {
++ if (dir == SCST_DATA_BIDI)
++ scst_put_out_buf(cmd, buf);
++ else
++ scst_put_buf(cmd, buf);
++ }
++ if (fcmd->write_data_len == cmd->data_len)
++ scst_rx_data(cmd, SCST_RX_STATUS_SUCCESS, SCST_CONTEXT_THREAD);
++drop:
++ fc_frame_free(fp);
++}
++
++/*
++ * Send read data back to initiator.
++ */
++int ft_send_read_data(struct scst_cmd *cmd)
++{
++ struct ft_cmd *fcmd;
++ struct fc_frame *fp = NULL;
++ struct fc_exch *ep;
++ struct fc_lport *lport;
++ size_t remaining;
++ u32 fh_off = 0;
++ u32 frame_off;
++ size_t frame_len = 0;
++ size_t mem_len;
++ u32 mem_off;
++ size_t tlen;
++ struct page *page;
++ int use_sg;
++ int error;
++ void *to = NULL;
++ u8 *from = NULL;
++ int loop_limit = 10000;
++
++ fcmd = scst_cmd_get_tgt_priv(cmd);
++ ep = fc_seq_exch(fcmd->seq);
++ lport = ep->lp;
++
++ frame_off = fcmd->read_data_len;
++ tlen = scst_cmd_get_resp_data_len(cmd);
++ FT_IO_DBG("oid %x oxid %x resp_len %zd frame_off %u\n",
++ ep->oid, ep->oxid, tlen, frame_off);
++ if (tlen <= frame_off)
++ return SCST_TGT_RES_SUCCESS;
++ remaining = tlen - frame_off;
++ if (remaining > UINT_MAX)
++ FT_ERR("oid %x oxid %x resp_len %zd frame_off %u\n",
++ ep->oid, ep->oxid, tlen, frame_off);
++
++ mem_len = scst_get_buf_first(cmd, &from);
++ mem_off = 0;
++ if (!mem_len) {
++ FT_IO_DBG("mem_len 0\n");
++ return SCST_TGT_RES_SUCCESS;
++ }
++ FT_IO_DBG("sid %x oxid %x mem_len %zd frame_off %u remaining %zd\n",
++ ep->sid, ep->oxid, mem_len, frame_off, remaining);
++
++ /*
++ * If we've already transferred some of the data, skip through
++ * the buffer over the data already sent and continue with the
++ * same sequence. Otherwise, get a new sequence for the data.
++ */
++ if (frame_off) {
++ tlen = frame_off;
++ while (mem_len <= tlen) {
++ tlen -= mem_len;
++ scst_put_buf(cmd, from);
++ mem_len = scst_get_buf_next(cmd, &from);
++ if (!mem_len)
++ return SCST_TGT_RES_SUCCESS;
++ }
++ mem_len -= tlen;
++ mem_off = tlen;
++ } else
++ fcmd->seq = lport->tt.seq_start_next(fcmd->seq);
++
++ /* no scatter/gather in skb for odd word length due to fc_seq_send() */
++ use_sg = !(remaining % 4) && lport->sg_supp;
++
++ while (remaining) {
++ if (!loop_limit) {
++ FT_ERR("hit loop limit. remaining %zx mem_len %zx "
++ "frame_len %zx tlen %zx\n",
++ remaining, mem_len, frame_len, tlen);
++ break;
++ }
++ loop_limit--;
++ if (!mem_len) {
++ scst_put_buf(cmd, from);
++ mem_len = scst_get_buf_next(cmd, &from);
++ mem_off = 0;
++ if (!mem_len) {
++ FT_ERR("mem_len 0 from get_buf_next\n");
++ break;
++ }
++ }
++ if (!frame_len) {
++ frame_len = fcmd->max_lso_payload;
++ frame_len = min(frame_len, remaining);
++ fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
++ if (!fp) {
++ FT_IO_DBG("frame_alloc failed. "
++ "use_sg %d frame_len %zd\n",
++ use_sg, frame_len);
++ break;
++ }
++ fr_max_payload(fp) = fcmd->max_payload;
++ to = fc_frame_payload_get(fp, 0);
++ fh_off = frame_off;
++ frame_off += frame_len;
++ }
++ tlen = min(mem_len, frame_len);
++ BUG_ON(!tlen);
++ BUG_ON(tlen > remaining);
++ BUG_ON(tlen > mem_len);
++ BUG_ON(tlen > frame_len);
++
++ if (use_sg) {
++ page = virt_to_page(from + mem_off);
++ get_page(page);
++ tlen = min_t(size_t, tlen,
++ PAGE_SIZE - (mem_off & ~PAGE_MASK));
++ skb_fill_page_desc(fp_skb(fp),
++ skb_shinfo(fp_skb(fp))->nr_frags,
++ page, mem_off, tlen);
++ fr_len(fp) += tlen;
++ fp_skb(fp)->data_len += tlen;
++ fp_skb(fp)->truesize +=
++ PAGE_SIZE << compound_order(page);
++ } else {
++ memcpy(to, from + mem_off, tlen);
++ to += tlen;
++ }
++
++ mem_len -= tlen;
++ mem_off += tlen;
++ frame_len -= tlen;
++ remaining -= tlen;
++
++ if (frame_len)
++ continue;
++ fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
++ FC_TYPE_FCP,
++ remaining ? (FC_FC_EX_CTX | FC_FC_REL_OFF) :
++ (FC_FC_EX_CTX | FC_FC_REL_OFF | FC_FC_END_SEQ),
++ fh_off);
++ error = lport->tt.seq_send(lport, fcmd->seq, fp);
++ if (error) {
++ WARN_ON(1);
++ /* XXX For now, initiator will retry */
++ } else
++ fcmd->read_data_len = frame_off;
++ }
++ if (mem_len)
++ scst_put_buf(cmd, from);
++ if (remaining) {
++ FT_IO_DBG("remaining read data %zd\n", remaining);
++ return SCST_TGT_RES_QUEUE_FULL;
++ }
++ return SCST_TGT_RES_SUCCESS;
++}
+diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_scst.c linux-2.6.36/drivers/scst/fcst/ft_scst.c
+--- orig/linux-2.6.36/drivers/scst/fcst/ft_scst.c
++++ linux-2.6.36/drivers/scst/fcst/ft_scst.c
+@@ -0,0 +1,96 @@
++/*
++ * Copyright (c) 2010 Cisco Systems, Inc.
++ *
++ * This program is free software; you may redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <scsi/libfc.h>
++#include "fcst.h"
++
++MODULE_AUTHOR("Joe Eykholt <jeykholt@cisco.com>");
++MODULE_DESCRIPTION("Fibre-Channel SCST target");
++MODULE_LICENSE("GPL v2");
++
++unsigned int ft_debug_logging;
++module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(debug_logging, "log levels bigmask");
++
++DEFINE_MUTEX(ft_lport_lock);
++
++/*
++ * Provider ops for libfc.
++ */
++static struct fc4_prov ft_prov = {
++ .prli = ft_prli,
++ .prlo = ft_prlo,
++ .recv = ft_recv,
++ .module = THIS_MODULE,
++};
++
++static struct notifier_block ft_notifier = {
++ .notifier_call = ft_lport_notify
++};
++
++/*
++ * SCST target ops and configuration.
++ * XXX - re-check uninitialized fields
++ */
++struct scst_tgt_template ft_scst_template = {
++ .sg_tablesize = 128, /* XXX get true limit from libfc */
++ .xmit_response_atomic = 1,
++ .rdy_to_xfer_atomic = 1,
++ .xmit_response = ft_send_response,
++ .rdy_to_xfer = ft_send_xfer_rdy,
++ .on_hw_pending_cmd_timeout = ft_cmd_timeout,
++ .on_free_cmd = ft_cmd_free,
++ .task_mgmt_fn_done = ft_cmd_tm_done,
++ .detect = ft_tgt_detect,
++ .release = ft_tgt_release,
++ .report_aen = ft_report_aen,
++ .enable_target = ft_tgt_enable,
++ .is_target_enabled = ft_tgt_enabled,
++ .get_initiator_port_transport_id = ft_get_transport_id,
++ .max_hw_pending_time = FT_MAX_HW_PENDING_TIME,
++ .name = FT_MODULE,
++};
++
++static int __init ft_module_init(void)
++{
++ int err;
++
++ err = scst_register_target_template(&ft_scst_template);
++ if (err)
++ return err;
++ err = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov);
++ if (err) {
++ scst_unregister_target_template(&ft_scst_template);
++ return err;
++ }
++ blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
++ fc_lport_iterate(ft_lport_add, NULL);
++ return 0;
++}
++module_init(ft_module_init);
++
++static void __exit ft_module_exit(void)
++{
++ blocking_notifier_chain_unregister(&fc_lport_notifier_head,
++ &ft_notifier);
++ fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
++ fc_lport_iterate(ft_lport_del, NULL);
++ scst_unregister_target_template(&ft_scst_template);
++ synchronize_rcu();
++}
++module_exit(ft_module_exit);
+diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_sess.c linux-2.6.36/drivers/scst/fcst/ft_sess.c
+--- orig/linux-2.6.36/drivers/scst/fcst/ft_sess.c
++++ linux-2.6.36/drivers/scst/fcst/ft_sess.c
+@@ -0,0 +1,570 @@
++/*
++ * Copyright (c) 2010 Cisco Systems, Inc.
++ *
++ * This program is free software; you may redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/mutex.h>
++#include <linux/hash.h>
++#include <asm/unaligned.h>
++#include <scsi/libfc.h>
++#include <scsi/fc/fc_els.h>
++#include "fcst.h"
++
++static int ft_tport_count;
++
++static ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
++{
++ u8 b[8];
++
++ put_unaligned_be64(wwn, b);
++ return snprintf(buf, len,
++ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
++ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
++}
++
++/*
++ * Lookup or allocate target local port.
++ * Caller holds ft_lport_lock.
++ */
++static struct ft_tport *ft_tport_create(struct fc_lport *lport)
++{
++ struct ft_tport *tport;
++ char name[FT_NAMELEN];
++ int i;
++
++ ft_format_wwn(name, sizeof(name), lport->wwpn);
++ FT_SESS_DBG("create %s\n", name);
++
++ tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
++ if (tport)
++ return tport;
++
++ tport = kzalloc(sizeof(*tport), GFP_KERNEL);
++ if (!tport)
++ return NULL;
++
++ tport->tgt = scst_register_target(&ft_scst_template, name);
++ if (!tport->tgt) {
++ kfree(tport);
++ return NULL;
++ }
++ scst_tgt_set_tgt_priv(tport->tgt, tport);
++ ft_tport_count++;
++
++ tport->lport = lport;
++ for (i = 0; i < FT_SESS_HASH_SIZE; i++)
++ INIT_HLIST_HEAD(&tport->hash[i]);
++
++ rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
++ return tport;
++}
++
++/*
++ * Free tport via RCU.
++ */
++static void ft_tport_rcu_free(struct rcu_head *rcu)
++{
++ struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu);
++
++ kfree(tport);
++}
++
++/*
++ * Delete target local port, if any, associated with the local port.
++ * Caller holds ft_lport_lock.
++ */
++static void ft_tport_delete(struct ft_tport *tport)
++{
++ struct fc_lport *lport;
++ struct scst_tgt *tgt;
++
++ tgt = tport->tgt;
++ BUG_ON(!tgt);
++ FT_SESS_DBG("delete %s\n", scst_get_tgt_name(tgt));
++ scst_unregister_target(tgt);
++ lport = tport->lport;
++ BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
++ rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL);
++ tport->lport = NULL;
++ call_rcu(&tport->rcu, ft_tport_rcu_free);
++ ft_tport_count--;
++}
++
++/*
++ * Add local port.
++ * Called thru fc_lport_iterate().
++ */
++void ft_lport_add(struct fc_lport *lport, void *arg)
++{
++ mutex_lock(&ft_lport_lock);
++ ft_tport_create(lport);
++ mutex_unlock(&ft_lport_lock);
++}
++
++/*
++ * Delete local port.
++ * Called thru fc_lport_iterate().
++ */
++void ft_lport_del(struct fc_lport *lport, void *arg)
++{
++ struct ft_tport *tport;
++
++ mutex_lock(&ft_lport_lock);
++ tport = lport->prov[FC_TYPE_FCP];
++ if (tport)
++ ft_tport_delete(tport);
++ mutex_unlock(&ft_lport_lock);
++}
++
++/*
++ * Notification of local port change from libfc.
++ * Create or delete local port and associated tport.
++ */
++int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
++{
++ struct fc_lport *lport = arg;
++
++ switch (event) {
++ case FC_LPORT_EV_ADD:
++ ft_lport_add(lport, NULL);
++ break;
++ case FC_LPORT_EV_DEL:
++ ft_lport_del(lport, NULL);
++ break;
++ }
++ return NOTIFY_DONE;
++}
++
++/*
++ * Find session in local port.
++ * Sessions and hash lists are RCU-protected.
++ * A reference is taken which must be eventually freed.
++ */
++static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
++{
++ struct ft_tport *tport;
++ struct hlist_head *head;
++ struct hlist_node *pos;
++ struct ft_sess *sess = NULL;
++
++ rcu_read_lock();
++ tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
++ if (!tport)
++ goto out;
++
++ head = &tport->hash[hash_32(port_id, FT_SESS_HASH_BITS)];
++ hlist_for_each_entry_rcu(sess, pos, head, hash) {
++ if (sess->port_id == port_id) {
++ kref_get(&sess->kref);
++ rcu_read_unlock();
++ FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
++ return sess;
++ }
++ }
++out:
++ rcu_read_unlock();
++ FT_SESS_DBG("port_id %x not found\n", port_id);
++ return NULL;
++}
++
++/*
++ * Allocate session and enter it in the hash for the local port.
++ * Caller holds ft_lport_lock.
++ */
++static int ft_sess_create(struct ft_tport *tport, struct fc_rport_priv *rdata,
++ u32 fcp_parm)
++{
++ struct ft_sess *sess;
++ struct scst_session *scst_sess;
++ struct hlist_head *head;
++ struct hlist_node *pos;
++ u32 port_id;
++ char name[FT_NAMELEN];
++
++ port_id = rdata->ids.port_id;
++ if (!rdata->maxframe_size) {
++ FT_SESS_DBG("port_id %x maxframe_size 0\n", port_id);
++ return FC_SPP_RESP_CONF;
++ }
++
++ head = &tport->hash[hash_32(port_id, FT_SESS_HASH_BITS)];
++ hlist_for_each_entry_rcu(sess, pos, head, hash) {
++ if (sess->port_id == port_id) {
++ sess->params = fcp_parm;
++ return 0;
++ }
++ }
++
++ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
++ if (!sess)
++ return FC_SPP_RESP_RES; /* out of resources */
++
++ sess->port_name = rdata->ids.port_name;
++ sess->max_payload = rdata->maxframe_size;
++ sess->max_lso_payload = rdata->maxframe_size;
++ if (tport->lport->seq_offload)
++ sess->max_lso_payload = tport->lport->lso_max;
++ sess->params = fcp_parm;
++ sess->tport = tport;
++ sess->port_id = port_id;
++ kref_init(&sess->kref); /* ref for table entry */
++
++ ft_format_wwn(name, sizeof(name), rdata->ids.port_name);
++ FT_SESS_DBG("register %s\n", name);
++ scst_sess = scst_register_session(tport->tgt, 0, name, sess, NULL,
++ NULL);
++ if (!scst_sess) {
++ kfree(sess);
++ return FC_SPP_RESP_RES; /* out of resources */
++ }
++ sess->scst_sess = scst_sess;
++ hlist_add_head_rcu(&sess->hash, head);
++ tport->sess_count++;
++
++ FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
++
++ rdata->prli_count++;
++ return 0;
++}
++
++/*
++ * Unhash the session.
++ * Caller holds ft_lport_lock.
++ */
++static void ft_sess_unhash(struct ft_sess *sess)
++{
++ struct ft_tport *tport = sess->tport;
++
++ hlist_del_rcu(&sess->hash);
++ BUG_ON(!tport->sess_count);
++ tport->sess_count--;
++ sess->port_id = -1;
++ sess->params = 0;
++}
++
++/*
++ * Delete session from hash.
++ * Caller holds ft_lport_lock.
++ */
++static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
++{
++ struct hlist_head *head;
++ struct hlist_node *pos;
++ struct ft_sess *sess;
++
++ head = &tport->hash[hash_32(port_id, FT_SESS_HASH_BITS)];
++ hlist_for_each_entry_rcu(sess, pos, head, hash) {
++ if (sess->port_id == port_id) {
++ ft_sess_unhash(sess);
++ return sess;
++ }
++ }
++ return NULL;
++}
++
++/*
++ * Remove session and send PRLO.
++ * This is called when the target is being deleted.
++ * Caller holds ft_lport_lock.
++ */
++static void ft_sess_close(struct ft_sess *sess)
++{
++ struct fc_lport *lport;
++ u32 port_id;
++
++ lport = sess->tport->lport;
++ port_id = sess->port_id;
++ if (port_id == -1)
++ return;
++ FT_SESS_DBG("port_id %x\n", port_id);
++ ft_sess_unhash(sess);
++ /* XXX should send LOGO or PRLO to rport */
++}
++
++/*
++ * Allocate and fill in the SPC Transport ID for persistent reservations.
++ */
++int ft_get_transport_id(struct scst_session *scst_sess, uint8_t **result)
++{
++ struct ft_sess *sess;
++ struct {
++ u8 format_proto; /* format and protocol ID (0 for FC) */
++ u8 __resv1[7];
++ __be64 port_name; /* N_Port Name */
++ u8 __resv2[8];
++ } __attribute__((__packed__)) *id;
++
++ if (!scst_sess)
++ return SCSI_TRANSPORTID_PROTOCOLID_FCP2;
++
++ id = kzalloc(sizeof(*id), GFP_KERNEL);
++ if (!id)
++ return -ENOMEM;
++
++ sess = scst_sess_get_tgt_priv(scst_sess);
++ id->port_name = cpu_to_be64(sess->port_name);
++ id->format_proto = SCSI_TRANSPORTID_PROTOCOLID_FCP2;
++ *result = (uint8_t *)id;
++ return 0;
++}
++
++/*
++ * libfc ops involving sessions.
++ */
++
++/*
++ * Handle PRLI (process login) request.
++ * This could be a PRLI we're sending or receiving.
++ * Caller holds ft_lport_lock.
++ */
++static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
++ const struct fc_els_spp *rspp, struct fc_els_spp *spp)
++{
++ struct ft_tport *tport;
++ u32 fcp_parm;
++ int ret;
++
++ if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
++ return FC_SPP_RESP_NO_PA;
++
++ /*
++ * If both target and initiator bits are off, the SPP is invalid.
++ */
++ fcp_parm = ntohl(rspp->spp_params); /* requested parameters */
++ if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
++ return FC_SPP_RESP_INVL;
++
++ /*
++ * Create session (image pair) only if requested by
++ * EST_IMG_PAIR flag and if the requestor is an initiator.
++ */
++ if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
++ spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
++
++ if (!(fcp_parm & FCP_SPPF_INIT_FCN))
++ return FC_SPP_RESP_CONF;
++ tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
++ if (!tport || !tport->enabled)
++ return 0; /* not a target for this local port */
++
++ ret = ft_sess_create(tport, rdata, fcp_parm);
++ if (ret)
++ return ret;
++ }
++
++ /*
++ * OR in our service parameters with other provider (initiator), if any.
++ * If the initiator indicates RETRY, we must support that, too.
++ * Don't force RETRY on the initiator, though.
++ */
++ fcp_parm = ntohl(spp->spp_params); /* response parameters */
++ spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
++ return FC_SPP_RESP_ACK;
++}
++
++/**
++ * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
++ * @rdata: remote port private
++ * @spp_len: service parameter page length
++ * @rspp: received service parameter page (NULL for outgoing PRLI)
++ * @spp: response service parameter page
++ *
++ * Returns spp response code.
++ */
++int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
++ const struct fc_els_spp *rspp, struct fc_els_spp *spp)
++{
++ int ret;
++
++ FT_SESS_DBG("starting PRLI port_id %x\n", rdata->ids.port_id);
++ mutex_lock(&ft_lport_lock);
++ ret = ft_prli_locked(rdata, spp_len, rspp, spp);
++ mutex_unlock(&ft_lport_lock);
++ FT_SESS_DBG("port_id %x flags %x parms %x ret %x\n",
++ rdata->ids.port_id,
++ rspp->spp_flags,
++ ntohl(spp->spp_params), ret);
++ return ret;
++}
++
++static void ft_sess_rcu_free(struct rcu_head *rcu)
++{
++ struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
++
++ kfree(sess);
++}
++
++static void ft_sess_free(struct kref *kref)
++{
++ struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
++ struct scst_session *scst_sess;
++
++ scst_sess = sess->scst_sess;
++ FT_SESS_DBG("unregister %s\n", scst_sess->initiator_name);
++ scst_unregister_session(scst_sess, 0, NULL);
++ call_rcu(&sess->rcu, ft_sess_rcu_free);
++}
++
++static void ft_sess_put(struct ft_sess *sess)
++{
++ int sess_held = atomic_read(&sess->kref.refcount);
++
++ BUG_ON(!sess_held);
++ kref_put(&sess->kref, ft_sess_free);
++}
++
++/*
++ * Delete ft_sess for PRLO.
++ * Called with ft_lport_lock held.
++ */
++static struct ft_sess *ft_sess_lookup_delete(struct fc_rport_priv *rdata)
++{
++ struct ft_sess *sess;
++ struct ft_tport *tport;
++
++ tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
++ if (!tport)
++ return NULL;
++ sess = ft_sess_delete(tport, rdata->ids.port_id);
++ if (sess)
++ sess->params = 0;
++ return sess;
++}
++
++/*
++ * Handle PRLO.
++ */
++void ft_prlo(struct fc_rport_priv *rdata)
++{
++ struct ft_sess *sess;
++
++ mutex_lock(&ft_lport_lock);
++ sess = ft_sess_lookup_delete(rdata);
++ mutex_unlock(&ft_lport_lock);
++ if (!sess)
++ return;
++
++ /*
++ * Release the session hold from the table.
++ * When all command-starting threads have returned,
++ * kref will call ft_sess_free which will unregister
++ * the session.
++ * fcmds referencing the session are safe.
++ */
++ ft_sess_put(sess); /* release from table */
++ rdata->prli_count--;
++}
++
++/*
++ * Handle incoming FCP request.
++ *
++ * Caller has verified that the frame is type FCP.
++ * Note that this may be called directly from the softirq context.
++ */
++void ft_recv(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
++{
++ struct ft_sess *sess;
++ struct fc_frame_header *fh;
++ u32 sid;
++
++ fh = fc_frame_header_get(fp);
++ sid = ntoh24(fh->fh_s_id);
++
++ FT_SESS_DBG("sid %x preempt %x\n", sid, preempt_count());
++
++ sess = ft_sess_get(lport, sid);
++ if (!sess) {
++ FT_SESS_DBG("sid %x sess lookup failed\n", sid);
++ lport->tt.exch_done(sp);
++ /* TBD XXX - if FCP_CMND, send LOGO */
++ fc_frame_free(fp);
++ return;
++ }
++ FT_SESS_DBG("sid %x sess lookup returned %p preempt %x\n",
++ sid, sess, preempt_count());
++ ft_recv_req(sess, sp, fp);
++ ft_sess_put(sess);
++}
++
++/*
++ * Release all sessions for a target.
++ * Called through scst_unregister_target() as well as directly.
++ * Caller holds ft_lport_lock.
++ */
++int ft_tgt_release(struct scst_tgt *tgt)
++{
++ struct ft_tport *tport;
++ struct hlist_head *head;
++ struct hlist_node *pos;
++ struct ft_sess *sess;
++
++ tport = scst_tgt_get_tgt_priv(tgt);
++ tport->enabled = 0;
++ tport->lport->service_params &= ~FCP_SPPF_TARG_FCN;
++
++ for (head = tport->hash; head < &tport->hash[FT_SESS_HASH_SIZE]; head++)
++ hlist_for_each_entry_rcu(sess, pos, head, hash)
++ ft_sess_close(sess);
++
++ synchronize_rcu();
++ return 0;
++}
++
++int ft_tgt_enable(struct scst_tgt *tgt, bool enable)
++{
++ struct ft_tport *tport;
++ int ret = 0;
++
++ mutex_lock(&ft_lport_lock);
++ if (enable) {
++ FT_SESS_DBG("enable tgt %s\n", tgt->tgt_name);
++ tport = scst_tgt_get_tgt_priv(tgt);
++ tport->enabled = 1;
++ tport->lport->service_params |= FCP_SPPF_TARG_FCN;
++ } else {
++ FT_SESS_DBG("disable tgt %s\n", tgt->tgt_name);
++ ft_tgt_release(tgt);
++ }
++ mutex_unlock(&ft_lport_lock);
++ return ret;
++}
++
++bool ft_tgt_enabled(struct scst_tgt *tgt)
++{
++ struct ft_tport *tport;
++
++ tport = scst_tgt_get_tgt_priv(tgt);
++ return tport->enabled;
++}
++
++int ft_tgt_detect(struct scst_tgt_template *tt)
++{
++ return ft_tport_count;
++}
++
++/*
++ * Report AEN (Asynchronous Event Notification) from device to initiator.
++ * See notes in scst.h.
++ */
++int ft_report_aen(struct scst_aen *aen)
++{
++ struct ft_sess *sess;
++
++ sess = scst_sess_get_tgt_priv(scst_aen_get_sess(aen));
++ FT_SESS_DBG("AEN event %d sess to %x lun %lld\n",
++ aen->event_fn, sess->port_id, scst_aen_get_lun(aen));
++ return SCST_AEN_RES_FAILED; /* XXX TBD */
++}
+diff -uprN orig/linux-2.6.36/Documentation/scst/README.fcst linux-2.6.36/Documentation/scst/README.fcst
+--- orig/linux-2.6.36/Documentation/scst/README.fcst
++++ linux-2.6.36/Documentation/scst/README.fcst
+@@ -0,0 +1,99 @@
++fcst README v1.0 06/10/2010
++
++$Id$
++
++FCST is a module that depends on libfc and SCST to provide FC target support.
++
++To build for linux-2.6.34, do:
++
++1. Get the kernel source:
++
++ KERNEL=linux-2.6.34
++
++ cd /usr/src/kernels
++ URL_DIR=http://www.kernel.org/pub/linux/kernel/v2.6
++ TARFILE=$KERNEL.tar.bz2
++ wget -o $TARFILE $URL_DIR/$TARFILE
++ tar xfj $TARFILE
++ cd $KERNEL
++
++2. Apply patches needed for libfc target hooks and point-to-point fixes:
++
++ KDIR=/usr/src/kernels/$KERNEL
++ PDIR=/usr/src/scst/trunk/fcst/linux-patches # use your dir here
++
++ cd $PDIR
++ for patch in `grep -v '^#' series-2.6.34`
++ do
++ (cd $KDIR; patch -p1) < $patch
++ done
++
++3. Apply SCST patches to the kernel
++ See trunk/scst/README
++ The readahead patches are not needed in 2.6.33 or later.
++
++4. Configure, make, and install your kernel
++
++5. Install SCST
++ See trunk/scst/README. Make sure you are building sysfs SCST build,
++ because FCST supports only it. You need to do
++
++ cd trunk/scst
++ make
++ make install
++
++6. Make FCST
++ In the directory containing this README, just do
++ make
++ make install
++
++7. Install the FCoE admin tools, including dcbd and fcoeadm.
++ Some distros may have these.
++ You should be able to use the source at
++ http://www.open-fcoe.org/openfc/downloads/2.6.34/open-fcoe-2.6.34.tar.gz
++
++8. Bring up SCST and configure the devices.
++
++9. Bring up an FCoE initiator (we'll enable target mode on it later):
++ modprobe fcoe
++ fcoeadm -c eth3
++
++ The other end can be an initiator as well, in point-to-point mode
++ over a full-duplex loss-less link (enable pause on both sides).
++ Alternatively, the other end can be an FCoE switch.
++
++10. Use fcc (part of the open-fcoe contrib tools in step 7) to see the
++ initiator setup. To get the FCoE port name for eth3
++
++ # fcc
++ FC HBAs:
++ HBA Port Name Port ID State Device
++ host4 20:00:00:1b:21:06:58:21 01:01:02 Online eth3
++
++ host4 Remote Ports:
++ Path Port Name Port ID State Roles
++ 4:0-0 10:00:50:41:4c:4f:3b:00 01:01:01 Online FCP Initiator
++
++ In the above example, there's one local host on eth3, and it's in
++ a point-to-point connection with the remote initiator with Port_id 010101.
++
++11. Load fcst
++
++ modprobe fcst
++
++12. Add any disks (configured in step 8) you want to export
++ Note that you must have a LUN 0.
++
++ LPORT=20:00:00:1b:21:06:58:21 # the local Port_Name
++
++ cd /sys/kernel/scst_tgt/targets/fcst/$LPORT
++ echo add disk-name 0 > luns/mgmt
++ echo add disk-name 1 > luns/mgmt
++
++13. Enable the initiator:
++
++ echo 1 > $LPORT/enabled
++
++14. As a temporary workaround, you may need to reset the interface
++ on the initiator side so it sees the SCST device as a target and
++ discovers LUNs. You can avoid this by bringing up the initiator last.
+diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst.h linux-2.6.36/include/scst/iscsi_scst.h
+--- orig/linux-2.6.36/include/scst/iscsi_scst.h
++++ linux-2.6.36/include/scst/iscsi_scst.h
+@@ -0,0 +1,220 @@
++/*
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef _ISCSI_SCST_U_H
++#define _ISCSI_SCST_U_H
++
++#ifndef __KERNEL__
++#include <sys/uio.h>
++#endif
++
++#include "iscsi_scst_ver.h"
++#include "iscsi_scst_itf_ver.h"
++
++/* The maximum length of 223 bytes in the RFC. */
++#define ISCSI_NAME_LEN 256
++
++#define ISCSI_PORTAL_LEN 64
++
++/* Full name is iSCSI name + connected portal */
++#define ISCSI_FULL_NAME_LEN (ISCSI_NAME_LEN + ISCSI_PORTAL_LEN)
++
++#define ISCSI_LISTEN_PORT 3260
++
++#define SCSI_ID_LEN 24
++
++#ifndef aligned_u64
++#define aligned_u64 uint64_t __attribute__((aligned(8)))
++#endif
++
++#define ISCSI_MAX_ATTR_NAME_LEN 50
++#define ISCSI_MAX_ATTR_VALUE_LEN 512
++
++enum {
++ key_initial_r2t,
++ key_immediate_data,
++ key_max_connections,
++ key_max_recv_data_length,
++ key_max_xmit_data_length,
++ key_max_burst_length,
++ key_first_burst_length,
++ key_default_wait_time,
++ key_default_retain_time,
++ key_max_outstanding_r2t,
++ key_data_pdu_inorder,
++ key_data_sequence_inorder,
++ key_error_recovery_level,
++ key_header_digest,
++ key_data_digest,
++ key_ofmarker,
++ key_ifmarker,
++ key_ofmarkint,
++ key_ifmarkint,
++ session_key_last,
++};
++
++enum {
++ key_queued_cmnds,
++ key_rsp_timeout,
++ key_nop_in_interval,
++ key_max_sessions,
++ target_key_last,
++};
++
++enum {
++ key_session,
++ key_target,
++};
++
++struct iscsi_kern_target_info {
++ u32 tid;
++ u32 cookie;
++ char name[ISCSI_NAME_LEN];
++ u32 attrs_num;
++ aligned_u64 attrs_ptr;
++};
++
++struct iscsi_kern_session_info {
++ u32 tid;
++ aligned_u64 sid;
++ char initiator_name[ISCSI_NAME_LEN];
++ char full_initiator_name[ISCSI_FULL_NAME_LEN];
++ u32 exp_cmd_sn;
++ s32 session_params[session_key_last];
++ s32 target_params[target_key_last];
++};
++
++#define DIGEST_ALL (DIGEST_NONE | DIGEST_CRC32C)
++#define DIGEST_NONE (1 << 0)
++#define DIGEST_CRC32C (1 << 1)
++
++struct iscsi_kern_conn_info {
++ u32 tid;
++ aligned_u64 sid;
++
++ u32 cid;
++ u32 stat_sn;
++ u32 exp_stat_sn;
++ int fd;
++};
++
++struct iscsi_kern_attr {
++ u32 mode;
++ char name[ISCSI_MAX_ATTR_NAME_LEN];
++};
++
++struct iscsi_kern_mgmt_cmd_res_info {
++ u32 tid;
++ u32 cookie;
++ u32 req_cmd;
++ u32 result;
++ char value[ISCSI_MAX_ATTR_VALUE_LEN];
++};
++
++struct iscsi_kern_params_info {
++ u32 tid;
++ aligned_u64 sid;
++
++ u32 params_type;
++ u32 partial;
++
++ s32 session_params[session_key_last];
++ s32 target_params[target_key_last];
++};
++
++enum iscsi_kern_event_code {
++ E_ADD_TARGET,
++ E_DEL_TARGET,
++ E_MGMT_CMD,
++ E_ENABLE_TARGET,
++ E_DISABLE_TARGET,
++ E_GET_ATTR_VALUE,
++ E_SET_ATTR_VALUE,
++ E_CONN_CLOSE,
++};
++
++struct iscsi_kern_event {
++ u32 tid;
++ aligned_u64 sid;
++ u32 cid;
++ u32 code;
++ u32 cookie;
++ char target_name[ISCSI_NAME_LEN];
++ u32 param1_size;
++ u32 param2_size;
++};
++
++struct iscsi_kern_register_info {
++ union {
++ aligned_u64 version;
++ struct {
++ int max_data_seg_len;
++ int max_queued_cmds;
++ };
++ };
++};
++
++struct iscsi_kern_attr_info {
++ u32 tid;
++ u32 cookie;
++ struct iscsi_kern_attr attr;
++};
++
++struct iscsi_kern_initiator_info {
++ u32 tid;
++ char full_initiator_name[ISCSI_FULL_NAME_LEN];
++};
++
++#define DEFAULT_NR_QUEUED_CMNDS 32
++#define MIN_NR_QUEUED_CMNDS 1
++#define MAX_NR_QUEUED_CMNDS 256
++
++#define DEFAULT_RSP_TIMEOUT 30
++#define MIN_RSP_TIMEOUT 2
++#define MAX_RSP_TIMEOUT 65535
++
++#define DEFAULT_NOP_IN_INTERVAL 30
++#define MIN_NOP_IN_INTERVAL 0
++#define MAX_NOP_IN_INTERVAL 65535
++
++#define NETLINK_ISCSI_SCST 25
++
++#define REGISTER_USERD _IOWR('s', 0, struct iscsi_kern_register_info)
++#define ADD_TARGET _IOW('s', 1, struct iscsi_kern_target_info)
++#define DEL_TARGET _IOW('s', 2, struct iscsi_kern_target_info)
++#define ADD_SESSION _IOW('s', 3, struct iscsi_kern_session_info)
++#define DEL_SESSION _IOW('s', 4, struct iscsi_kern_session_info)
++#define ADD_CONN _IOW('s', 5, struct iscsi_kern_conn_info)
++#define DEL_CONN _IOW('s', 6, struct iscsi_kern_conn_info)
++#define ISCSI_PARAM_SET _IOW('s', 7, struct iscsi_kern_params_info)
++#define ISCSI_PARAM_GET _IOWR('s', 8, struct iscsi_kern_params_info)
++
++#define ISCSI_ATTR_ADD _IOW('s', 9, struct iscsi_kern_attr_info)
++#define ISCSI_ATTR_DEL _IOW('s', 10, struct iscsi_kern_attr_info)
++#define MGMT_CMD_CALLBACK _IOW('s', 11, struct iscsi_kern_mgmt_cmd_res_info)
++
++#define ISCSI_INITIATOR_ALLOWED _IOW('s', 12, struct iscsi_kern_initiator_info)
++
++static inline int iscsi_is_key_internal(int key)
++{
++ switch (key) {
++ case key_max_xmit_data_length:
++ return 1;
++ default:
++ return 0;
++ }
++}
++
++#endif
+diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst_ver.h linux-2.6.36/include/scst/iscsi_scst_ver.h
+--- orig/linux-2.6.36/include/scst/iscsi_scst_ver.h
++++ linux-2.6.36/include/scst/iscsi_scst_ver.h
+@@ -0,0 +1,20 @@
++/*
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++/* #define CONFIG_SCST_PROC */
++
++#define ISCSI_VERSION_STRING_SUFFIX
++
++#define ISCSI_VERSION_STRING "2.0.0" ISCSI_VERSION_STRING_SUFFIX
+diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst_itf_ver.h linux-2.6.36/include/scst/iscsi_scst_itf_ver.h
+--- orig/linux-2.6.36/include/scst/iscsi_scst_itf_ver.h
++++ linux-2.6.36/include/scst/iscsi_scst_itf_ver.h
+@@ -0,0 +1,3 @@
++/* Autogenerated, don't edit */
++
++#define ISCSI_SCST_INTERFACE_VERSION ISCSI_VERSION_STRING "_" "31815603fdea2196eb9774eac0e41bf15c9a9130"
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/Makefile linux-2.6.36/drivers/scst/iscsi-scst/Makefile
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/Makefile
++++ linux-2.6.36/drivers/scst/iscsi-scst/Makefile
+@@ -0,0 +1,4 @@
++iscsi-scst-y := iscsi.o nthread.o config.o digest.o \
++ conn.o session.o target.o event.o param.o
++
++obj-$(CONFIG_SCST_ISCSI) += iscsi-scst.o
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/Kconfig linux-2.6.36/drivers/scst/iscsi-scst/Kconfig
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/Kconfig
++++ linux-2.6.36/drivers/scst/iscsi-scst/Kconfig
+@@ -0,0 +1,25 @@
++config SCST_ISCSI
++ tristate "ISCSI Target"
++ depends on SCST && INET
++ default SCST
++ help
++ ISCSI target driver for SCST framework. The iSCSI protocol has been
++ defined in RFC 3720. To use it you should download from
++ http://scst.sourceforge.net the user space part of it.
++
++config SCST_ISCSI_DEBUG_DIGEST_FAILURES
++ bool "Simulate iSCSI digest failures"
++ depends on SCST_ISCSI
++ help
++ Simulates iSCSI digest failures in random places. Even when iSCSI
++ traffic is sent over a TCP connection, the 16-bit TCP checksum is too
++ weak for the requirements of a storage protocol. Furthermore, there
++ are also instances where the TCP checksum does not protect iSCSI
++ data, as when data is corrupted while being transferred on a PCI bus
++ or while in memory. The iSCSI protocol therefore defines a 32-bit CRC
++ digest on iSCSI packets in order to detect data corruption on an
++ end-to-end basis. CRCs can be used on iSCSI PDU headers and/or data.
++ Enabling this option allows to test digest failure recovery in the
++ iSCSI initiator that is talking to SCST.
++
++ If unsure, say "N".
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/config.c linux-2.6.36/drivers/scst/iscsi-scst/config.c
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/config.c
++++ linux-2.6.36/drivers/scst/iscsi-scst/config.c
+@@ -0,0 +1,1032 @@
++/*
++ * Copyright (C) 2004 - 2005 FUJITA Tomonori <tomof@acm.org>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include "iscsi.h"
++
++/* Protected by target_mgmt_mutex */
++int ctr_open_state;
++
++/* Protected by target_mgmt_mutex */
++static LIST_HEAD(iscsi_attrs_list);
++
++static ssize_t iscsi_version_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ TRACE_ENTRY();
++
++ sprintf(buf, "%s\n", ISCSI_VERSION_STRING);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ strcat(buf, "EXTRACHECKS\n");
++#endif
++
++#ifdef CONFIG_SCST_TRACING
++ strcat(buf, "TRACING\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++ strcat(buf, "DEBUG\n");
++#endif
++
++#ifdef CONFIG_SCST_ISCSI_DEBUG_DIGEST_FAILURES
++ strcat(buf, "DEBUG_DIGEST_FAILURES\n");
++#endif
++
++ TRACE_EXIT();
++ return strlen(buf);
++}
++
++static struct kobj_attribute iscsi_version_attr =
++ __ATTR(version, S_IRUGO, iscsi_version_show, NULL);
++
++static ssize_t iscsi_open_state_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ switch (ctr_open_state) {
++ case ISCSI_CTR_OPEN_STATE_CLOSED:
++ sprintf(buf, "%s\n", "closed");
++ break;
++ case ISCSI_CTR_OPEN_STATE_OPEN:
++ sprintf(buf, "%s\n", "open");
++ break;
++ case ISCSI_CTR_OPEN_STATE_CLOSING:
++ sprintf(buf, "%s\n", "closing");
++ break;
++ default:
++ sprintf(buf, "%s\n", "unknown");
++ break;
++ }
++
++ return strlen(buf);
++}
++
++static struct kobj_attribute iscsi_open_state_attr =
++ __ATTR(open_state, S_IRUGO, iscsi_open_state_show, NULL);
++
++const struct attribute *iscsi_attrs[] = {
++ &iscsi_version_attr.attr,
++ &iscsi_open_state_attr.attr,
++ NULL,
++};
++
++/* target_mgmt_mutex supposed to be locked */
++static int add_conn(void __user *ptr)
++{
++ int err, rc;
++ struct iscsi_session *session;
++ struct iscsi_kern_conn_info info;
++ struct iscsi_target *target;
++
++ TRACE_ENTRY();
++
++ rc = copy_from_user(&info, ptr, sizeof(info));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ err = -EFAULT;
++ goto out;
++ }
++
++ target = target_lookup_by_id(info.tid);
++ if (target == NULL) {
++ PRINT_ERROR("Target %d not found", info.tid);
++ err = -ENOENT;
++ goto out;
++ }
++
++ mutex_lock(&target->target_mutex);
++
++ session = session_lookup(target, info.sid);
++ if (!session) {
++ PRINT_ERROR("Session %lld not found",
++ (long long unsigned int)info.tid);
++ err = -ENOENT;
++ goto out_unlock;
++ }
++
++ err = __add_conn(session, &info);
++
++out_unlock:
++ mutex_unlock(&target->target_mutex);
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int del_conn(void __user *ptr)
++{
++ int err, rc;
++ struct iscsi_session *session;
++ struct iscsi_kern_conn_info info;
++ struct iscsi_target *target;
++
++ TRACE_ENTRY();
++
++ rc = copy_from_user(&info, ptr, sizeof(info));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ err = -EFAULT;
++ goto out;
++ }
++
++ target = target_lookup_by_id(info.tid);
++ if (target == NULL) {
++ PRINT_ERROR("Target %d not found", info.tid);
++ err = -ENOENT;
++ goto out;
++ }
++
++ mutex_lock(&target->target_mutex);
++
++ session = session_lookup(target, info.sid);
++ if (!session) {
++ PRINT_ERROR("Session %llx not found",
++ (long long unsigned int)info.sid);
++ err = -ENOENT;
++ goto out_unlock;
++ }
++
++ err = __del_conn(session, &info);
++
++out_unlock:
++ mutex_unlock(&target->target_mutex);
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int add_session(void __user *ptr)
++{
++ int err, rc;
++ struct iscsi_kern_session_info *info;
++ struct iscsi_target *target;
++
++ TRACE_ENTRY();
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (info == NULL) {
++ PRINT_ERROR("Can't alloc info (size %zd)", sizeof(*info));
++ err = -ENOMEM;
++ goto out;
++ }
++
++ rc = copy_from_user(info, ptr, sizeof(*info));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ err = -EFAULT;
++ goto out_free;
++ }
++
++ info->initiator_name[sizeof(info->initiator_name)-1] = '\0';
++ info->full_initiator_name[sizeof(info->full_initiator_name)-1] = '\0';
++
++ target = target_lookup_by_id(info->tid);
++ if (target == NULL) {
++ PRINT_ERROR("Target %d not found", info->tid);
++ err = -ENOENT;
++ goto out_free;
++ }
++
++ err = __add_session(target, info);
++
++out_free:
++ kfree(info);
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int del_session(void __user *ptr)
++{
++ int err, rc;
++ struct iscsi_kern_session_info *info;
++ struct iscsi_target *target;
++
++ TRACE_ENTRY();
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (info == NULL) {
++ PRINT_ERROR("Can't alloc info (size %zd)", sizeof(*info));
++ err = -ENOMEM;
++ goto out;
++ }
++
++ rc = copy_from_user(info, ptr, sizeof(*info));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ err = -EFAULT;
++ goto out_free;
++ }
++
++ info->initiator_name[sizeof(info->initiator_name)-1] = '\0';
++
++ target = target_lookup_by_id(info->tid);
++ if (target == NULL) {
++ PRINT_ERROR("Target %d not found", info->tid);
++ err = -ENOENT;
++ goto out_free;
++ }
++
++ mutex_lock(&target->target_mutex);
++ err = __del_session(target, info->sid);
++ mutex_unlock(&target->target_mutex);
++
++out_free:
++ kfree(info);
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int iscsi_params_config(void __user *ptr, int set)
++{
++ int err, rc;
++ struct iscsi_kern_params_info info;
++ struct iscsi_target *target;
++
++ TRACE_ENTRY();
++
++ rc = copy_from_user(&info, ptr, sizeof(info));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ err = -EFAULT;
++ goto out;
++ }
++
++ target = target_lookup_by_id(info.tid);
++ if (target == NULL) {
++ PRINT_ERROR("Target %d not found", info.tid);
++ err = -ENOENT;
++ goto out;
++ }
++
++ mutex_lock(&target->target_mutex);
++ err = iscsi_params_set(target, &info, set);
++ mutex_unlock(&target->target_mutex);
++
++ if (err < 0)
++ goto out;
++
++ if (!set) {
++ rc = copy_to_user(ptr, &info, sizeof(info));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy to user %d bytes", rc);
++ err = -EFAULT;
++ goto out;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int iscsi_initiator_allowed(void __user *ptr)
++{
++ int err = 0, rc;
++ struct iscsi_kern_initiator_info cinfo;
++ struct iscsi_target *target;
++
++ TRACE_ENTRY();
++
++ rc = copy_from_user(&cinfo, ptr, sizeof(cinfo));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ err = -EFAULT;
++ goto out;
++ }
++
++ cinfo.full_initiator_name[sizeof(cinfo.full_initiator_name)-1] = '\0';
++
++ target = target_lookup_by_id(cinfo.tid);
++ if (target == NULL) {
++ PRINT_ERROR("Target %d not found", cinfo.tid);
++ err = -ENOENT;
++ goto out;
++ }
++
++ err = scst_initiator_has_luns(target->scst_tgt,
++ cinfo.full_initiator_name);
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int mgmt_cmd_callback(void __user *ptr)
++{
++ int err = 0, rc;
++ struct iscsi_kern_mgmt_cmd_res_info cinfo;
++ struct scst_sysfs_user_info *info;
++
++ TRACE_ENTRY();
++
++ rc = copy_from_user(&cinfo, ptr, sizeof(cinfo));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ err = -EFAULT;
++ goto out;
++ }
++
++ cinfo.value[sizeof(cinfo.value)-1] = '\0';
++
++ info = scst_sysfs_user_get_info(cinfo.cookie);
++ TRACE_DBG("cookie %u, info %p, result %d", cinfo.cookie, info,
++ cinfo.result);
++ if (info == NULL) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ info->info_status = 0;
++
++ if (cinfo.result != 0) {
++ info->info_status = cinfo.result;
++ goto out_complete;
++ }
++
++ switch (cinfo.req_cmd) {
++ case E_ENABLE_TARGET:
++ case E_DISABLE_TARGET:
++ {
++ struct iscsi_target *target;
++
++ target = target_lookup_by_id(cinfo.tid);
++ if (target == NULL) {
++ PRINT_ERROR("Target %d not found", cinfo.tid);
++ err = -ENOENT;
++ goto out_status;
++ }
++
++ target->tgt_enabled = (cinfo.req_cmd == E_ENABLE_TARGET) ? 1 : 0;
++ break;
++ }
++
++ case E_GET_ATTR_VALUE:
++ info->data = kstrdup(cinfo.value, GFP_KERNEL);
++ if (info->data == NULL) {
++ PRINT_ERROR("Can't dublicate value %s", cinfo.value);
++ info->info_status = -ENOMEM;
++ goto out_complete;
++ }
++ break;
++ }
++
++out_complete:
++ complete(&info->info_completion);
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++
++out_status:
++ info->info_status = err;
++ goto out_complete;
++}
++
++static ssize_t iscsi_attr_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos;
++ struct iscsi_attr *tgt_attr;
++ void *value;
++
++ TRACE_ENTRY();
++
++ tgt_attr = container_of(attr, struct iscsi_attr, attr);
++
++ pos = iscsi_sysfs_send_event(
++ (tgt_attr->target != NULL) ? tgt_attr->target->tid : 0,
++ E_GET_ATTR_VALUE, tgt_attr->name, NULL, &value);
++
++ if (pos != 0)
++ goto out;
++
++ pos = scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, "%s\n", (char *)value);
++
++ kfree(value);
++
++out:
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t iscsi_attr_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ char *buffer;
++ struct iscsi_attr *tgt_attr;
++
++ TRACE_ENTRY();
++
++ buffer = kzalloc(count+1, GFP_KERNEL);
++ if (buffer == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++ memcpy(buffer, buf, count);
++ buffer[count] = '\0';
++
++ tgt_attr = container_of(attr, struct iscsi_attr, attr);
++
++ TRACE_DBG("attr %s, buffer %s", tgt_attr->attr.attr.name, buffer);
++
++ res = iscsi_sysfs_send_event(
++ (tgt_attr->target != NULL) ? tgt_attr->target->tid : 0,
++ E_SET_ATTR_VALUE, tgt_attr->name, buffer, NULL);
++
++ kfree(buffer);
++
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * target_mgmt_mutex supposed to be locked. If target != 0, target_mutex
++ * supposed to be locked as well.
++ */
++int iscsi_add_attr(struct iscsi_target *target,
++ const struct iscsi_kern_attr *attr_info)
++{
++ int res = 0;
++ struct iscsi_attr *tgt_attr;
++ struct list_head *attrs_list;
++ const char *name;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ static struct lock_class_key __key;
++#endif
++
++ TRACE_ENTRY();
++
++ if (target != NULL) {
++ attrs_list = &target->attrs_list;
++ name = target->name;
++ } else {
++ attrs_list = &iscsi_attrs_list;
++ name = "global";
++ }
++
++ list_for_each_entry(tgt_attr, attrs_list, attrs_list_entry) {
++ /* Both for sure NULL-terminated */
++ if (strcmp(tgt_attr->name, attr_info->name) == 0) {
++ PRINT_ERROR("Attribute %s for %s already exist",
++ attr_info->name, name);
++ res = -EEXIST;
++ goto out;
++ }
++ }
++
++ TRACE_DBG("Adding %s's attr %s with mode %x", name,
++ attr_info->name, attr_info->mode);
++
++ tgt_attr = kzalloc(sizeof(*tgt_attr), GFP_KERNEL);
++ if (tgt_attr == NULL) {
++ PRINT_ERROR("Unable to allocate user (size %zd)",
++ sizeof(*tgt_attr));
++ res = -ENOMEM;
++ goto out;
++ }
++
++ tgt_attr->target = target;
++
++ tgt_attr->name = kstrdup(attr_info->name, GFP_KERNEL);
++ if (tgt_attr->name == NULL) {
++ PRINT_ERROR("Unable to allocate attr %s name/value (target %s)",
++ attr_info->name, name);
++ res = -ENOMEM;
++ goto out_free;
++ }
++
++ list_add(&tgt_attr->attrs_list_entry, attrs_list);
++
++ tgt_attr->attr.attr.name = tgt_attr->name;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ tgt_attr->attr.attr.key = &__key;
++#endif
++ tgt_attr->attr.attr.mode = attr_info->mode & (S_IRUGO | S_IWUGO);
++ tgt_attr->attr.show = iscsi_attr_show;
++ tgt_attr->attr.store = iscsi_attr_store;
++
++ TRACE_DBG("tgt_attr %p, attr %p", tgt_attr, &tgt_attr->attr.attr);
++
++ res = sysfs_create_file(
++ (target != NULL) ? scst_sysfs_get_tgt_kobj(target->scst_tgt) :
++ scst_sysfs_get_tgtt_kobj(&iscsi_template),
++ &tgt_attr->attr.attr);
++ if (res != 0) {
++ PRINT_ERROR("Unable to create file '%s' for target '%s'",
++ tgt_attr->attr.attr.name, name);
++ goto out_del;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ list_del(&tgt_attr->attrs_list_entry);
++
++out_free:
++ kfree(tgt_attr->name);
++ kfree(tgt_attr);
++ goto out;
++}
++
++void __iscsi_del_attr(struct iscsi_target *target,
++ struct iscsi_attr *tgt_attr)
++{
++ TRACE_ENTRY();
++
++ TRACE_DBG("Deleting attr %s (target %s, tgt_attr %p, attr %p)",
++ tgt_attr->name, (target != NULL) ? target->name : "global",
++ tgt_attr, &tgt_attr->attr.attr);
++
++ list_del(&tgt_attr->attrs_list_entry);
++
++ sysfs_remove_file((target != NULL) ?
++ scst_sysfs_get_tgt_kobj(target->scst_tgt) :
++ scst_sysfs_get_tgtt_kobj(&iscsi_template),
++ &tgt_attr->attr.attr);
++
++ kfree(tgt_attr->name);
++ kfree(tgt_attr);
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * target_mgmt_mutex supposed to be locked. If target != 0, target_mutex
++ * supposed to be locked as well.
++ */
++static int iscsi_del_attr(struct iscsi_target *target,
++ const char *attr_name)
++{
++ int res = 0;
++ struct iscsi_attr *tgt_attr, *a;
++ struct list_head *attrs_list;
++
++ TRACE_ENTRY();
++
++ if (target != NULL)
++ attrs_list = &target->attrs_list;
++ else
++ attrs_list = &iscsi_attrs_list;
++
++ tgt_attr = NULL;
++ list_for_each_entry(a, attrs_list, attrs_list_entry) {
++ /* Both for sure NULL-terminated */
++ if (strcmp(a->name, attr_name) == 0) {
++ tgt_attr = a;
++ break;
++ }
++ }
++
++ if (tgt_attr == NULL) {
++ PRINT_ERROR("attr %s not found (target %s)", attr_name,
++ (target != NULL) ? target->name : "global");
++ res = -ENOENT;
++ goto out;
++ }
++
++ __iscsi_del_attr(target, tgt_attr);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int iscsi_attr_cmd(void __user *ptr, unsigned int cmd)
++{
++ int rc, err = 0;
++ struct iscsi_kern_attr_info info;
++ struct iscsi_target *target;
++ struct scst_sysfs_user_info *i = NULL;
++
++ TRACE_ENTRY();
++
++ rc = copy_from_user(&info, ptr, sizeof(info));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ err = -EFAULT;
++ goto out;
++ }
++
++ info.attr.name[sizeof(info.attr.name)-1] = '\0';
++
++ if (info.cookie != 0) {
++ i = scst_sysfs_user_get_info(info.cookie);
++ TRACE_DBG("cookie %u, uinfo %p", info.cookie, i);
++ if (i == NULL) {
++ err = -EINVAL;
++ goto out;
++ }
++ }
++
++ target = target_lookup_by_id(info.tid);
++
++ if (target != NULL)
++ mutex_lock(&target->target_mutex);
++
++ switch (cmd) {
++ case ISCSI_ATTR_ADD:
++ err = iscsi_add_attr(target, &info.attr);
++ break;
++ case ISCSI_ATTR_DEL:
++ err = iscsi_del_attr(target, info.attr.name);
++ break;
++ default:
++ BUG();
++ }
++
++ if (target != NULL)
++ mutex_unlock(&target->target_mutex);
++
++ if (i != NULL) {
++ i->info_status = err;
++ complete(&i->info_completion);
++ }
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int add_target(void __user *ptr)
++{
++ int err, rc;
++ struct iscsi_kern_target_info *info;
++ struct scst_sysfs_user_info *uinfo;
++
++ TRACE_ENTRY();
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (info == NULL) {
++ PRINT_ERROR("Can't alloc info (size %zd)", sizeof(*info));
++ err = -ENOMEM;
++ goto out;
++ }
++
++ rc = copy_from_user(info, ptr, sizeof(*info));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ err = -EFAULT;
++ goto out_free;
++ }
++
++ if (target_lookup_by_id(info->tid) != NULL) {
++ PRINT_ERROR("Target %u already exist!", info->tid);
++ err = -EEXIST;
++ goto out_free;
++ }
++
++ info->name[sizeof(info->name)-1] = '\0';
++
++ if (info->cookie != 0) {
++ uinfo = scst_sysfs_user_get_info(info->cookie);
++ TRACE_DBG("cookie %u, uinfo %p", info->cookie, uinfo);
++ if (uinfo == NULL) {
++ err = -EINVAL;
++ goto out_free;
++ }
++ } else
++ uinfo = NULL;
++
++ err = __add_target(info);
++
++ if (uinfo != NULL) {
++ uinfo->info_status = err;
++ complete(&uinfo->info_completion);
++ }
++
++out_free:
++ kfree(info);
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int del_target(void __user *ptr)
++{
++ int err, rc;
++ struct iscsi_kern_target_info info;
++ struct scst_sysfs_user_info *uinfo;
++
++ TRACE_ENTRY();
++
++ rc = copy_from_user(&info, ptr, sizeof(info));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy %d user's bytes", rc);
++ err = -EFAULT;
++ goto out;
++ }
++
++ info.name[sizeof(info.name)-1] = '\0';
++
++ if (info.cookie != 0) {
++ uinfo = scst_sysfs_user_get_info(info.cookie);
++ TRACE_DBG("cookie %u, uinfo %p", info.cookie, uinfo);
++ if (uinfo == NULL) {
++ err = -EINVAL;
++ goto out;
++ }
++ } else
++ uinfo = NULL;
++
++ err = __del_target(info.tid);
++
++ if (uinfo != NULL) {
++ uinfo->info_status = err;
++ complete(&uinfo->info_completion);
++ }
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++}
++
++static int iscsi_register(void __user *arg)
++{
++ struct iscsi_kern_register_info reg;
++ char ver[sizeof(ISCSI_SCST_INTERFACE_VERSION)+1];
++ int res, rc;
++
++ TRACE_ENTRY();
++
++ rc = copy_from_user(&reg, arg, sizeof(reg));
++ if (rc != 0) {
++ PRINT_ERROR("%s", "Unable to get register info");
++ res = -EFAULT;
++ goto out;
++ }
++
++ rc = copy_from_user(ver, (void __user *)(unsigned long)reg.version,
++ sizeof(ver));
++ if (rc != 0) {
++ PRINT_ERROR("%s", "Unable to get version string");
++ res = -EFAULT;
++ goto out;
++ }
++ ver[sizeof(ver)-1] = '\0';
++
++ if (strcmp(ver, ISCSI_SCST_INTERFACE_VERSION) != 0) {
++ PRINT_ERROR("Incorrect version of user space %s (expected %s)",
++ ver, ISCSI_SCST_INTERFACE_VERSION);
++ res = -EINVAL;
++ goto out;
++ }
++
++ memset(&reg, 0, sizeof(reg));
++ reg.max_data_seg_len = ISCSI_CONN_IOV_MAX << PAGE_SHIFT;
++ reg.max_queued_cmds = scst_get_max_lun_commands(NULL, NO_SUCH_LUN);
++
++ res = 0;
++
++ rc = copy_to_user(arg, &reg, sizeof(reg));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy to user %d bytes", rc);
++ res = -EFAULT;
++ goto out;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static long ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ long err;
++
++ TRACE_ENTRY();
++
++ if (cmd == REGISTER_USERD) {
++ err = iscsi_register((void __user *)arg);
++ goto out;
++ }
++
++ err = mutex_lock_interruptible(&target_mgmt_mutex);
++ if (err < 0)
++ goto out;
++
++ switch (cmd) {
++ case ADD_TARGET:
++ err = add_target((void __user *)arg);
++ break;
++
++ case DEL_TARGET:
++ err = del_target((void __user *)arg);
++ break;
++
++ case ISCSI_ATTR_ADD:
++ case ISCSI_ATTR_DEL:
++ err = iscsi_attr_cmd((void __user *)arg, cmd);
++ break;
++
++ case MGMT_CMD_CALLBACK:
++ err = mgmt_cmd_callback((void __user *)arg);
++ break;
++
++ case ISCSI_INITIATOR_ALLOWED:
++ err = iscsi_initiator_allowed((void __user *)arg);
++ break;
++
++ case ADD_SESSION:
++ err = add_session((void __user *)arg);
++ break;
++
++ case DEL_SESSION:
++ err = del_session((void __user *)arg);
++ break;
++
++ case ISCSI_PARAM_SET:
++ err = iscsi_params_config((void __user *)arg, 1);
++ break;
++
++ case ISCSI_PARAM_GET:
++ err = iscsi_params_config((void __user *)arg, 0);
++ break;
++
++ case ADD_CONN:
++ err = add_conn((void __user *)arg);
++ break;
++
++ case DEL_CONN:
++ err = del_conn((void __user *)arg);
++ break;
++
++ default:
++ PRINT_ERROR("Invalid ioctl cmd %x", cmd);
++ err = -EINVAL;
++ goto out_unlock;
++ }
++
++out_unlock:
++ mutex_unlock(&target_mgmt_mutex);
++
++out:
++ TRACE_EXIT_RES(err);
++ return err;
++}
++
++static int open(struct inode *inode, struct file *file)
++{
++ bool already;
++
++ mutex_lock(&target_mgmt_mutex);
++ already = (ctr_open_state != ISCSI_CTR_OPEN_STATE_CLOSED);
++ if (!already)
++ ctr_open_state = ISCSI_CTR_OPEN_STATE_OPEN;
++ mutex_unlock(&target_mgmt_mutex);
++
++ if (already) {
++ PRINT_WARNING("%s", "Attempt to second open the control "
++ "device!");
++ return -EBUSY;
++ } else
++ return 0;
++}
++
++static int release(struct inode *inode, struct file *filp)
++{
++ struct iscsi_attr *attr, *t;
++
++ TRACE(TRACE_MGMT, "%s", "Releasing allocated resources");
++
++ mutex_lock(&target_mgmt_mutex);
++ ctr_open_state = ISCSI_CTR_OPEN_STATE_CLOSING;
++ mutex_unlock(&target_mgmt_mutex);
++
++ target_del_all();
++
++ mutex_lock(&target_mgmt_mutex);
++
++ list_for_each_entry_safe(attr, t, &iscsi_attrs_list,
++ attrs_list_entry) {
++ __iscsi_del_attr(NULL, attr);
++ }
++
++ ctr_open_state = ISCSI_CTR_OPEN_STATE_CLOSED;
++
++ mutex_unlock(&target_mgmt_mutex);
++
++ return 0;
++}
++
++const struct file_operations ctr_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = ioctl,
++ .compat_ioctl = ioctl,
++ .open = open,
++ .release = release,
++};
++
++#ifdef CONFIG_SCST_DEBUG
++static void iscsi_dump_char(int ch, unsigned char *text, int *pos)
++{
++ int i = *pos;
++
++ if (ch < 0) {
++ while ((i % 16) != 0) {
++ printk(KERN_CONT " ");
++ text[i] = ' ';
++ i++;
++ if ((i % 16) == 0)
++ printk(KERN_CONT " | %.16s |\n", text);
++ else if ((i % 4) == 0)
++ printk(KERN_CONT " |");
++ }
++ i = 0;
++ goto out;
++ }
++
++ text[i] = (ch < 0x20 || (ch >= 0x80 && ch <= 0xa0)) ? ' ' : ch;
++ printk(KERN_CONT " %02x", ch);
++ i++;
++ if ((i % 16) == 0) {
++ printk(KERN_CONT " | %.16s |\n", text);
++ i = 0;
++ } else if ((i % 4) == 0)
++ printk(KERN_CONT " |");
++
++out:
++ *pos = i;
++ return;
++}
++
++void iscsi_dump_pdu(struct iscsi_pdu *pdu)
++{
++ unsigned char text[16];
++ int pos = 0;
++
++ if (trace_flag & TRACE_D_DUMP_PDU) {
++ unsigned char *buf;
++ int i;
++
++ buf = (void *)&pdu->bhs;
++ printk(KERN_DEBUG "BHS: (%p,%zd)\n", buf, sizeof(pdu->bhs));
++ for (i = 0; i < (int)sizeof(pdu->bhs); i++)
++ iscsi_dump_char(*buf++, text, &pos);
++ iscsi_dump_char(-1, text, &pos);
++
++ buf = (void *)pdu->ahs;
++ printk(KERN_DEBUG "AHS: (%p,%d)\n", buf, pdu->ahssize);
++ for (i = 0; i < pdu->ahssize; i++)
++ iscsi_dump_char(*buf++, text, &pos);
++ iscsi_dump_char(-1, text, &pos);
++
++ printk(KERN_DEBUG "Data: (%d)\n", pdu->datasize);
++ }
++}
++
++unsigned long iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(struct iscsi_cmnd *cmnd)
++{
++ unsigned long flag;
++
++ if (cmnd->cmd_req != NULL)
++ cmnd = cmnd->cmd_req;
++
++ if (cmnd->scst_cmd == NULL)
++ flag = TRACE_MGMT_DEBUG;
++ else {
++ int status = scst_cmd_get_status(cmnd->scst_cmd);
++ if ((status == SAM_STAT_TASK_SET_FULL) ||
++ (status == SAM_STAT_BUSY))
++ flag = TRACE_FLOW_CONTROL;
++ else
++ flag = TRACE_MGMT_DEBUG;
++ }
++ return flag;
++}
++
++#endif /* CONFIG_SCST_DEBUG */
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers/scst/iscsi-scst/conn.c
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c
++++ linux-2.6.36/drivers/scst/iscsi-scst/conn.c
+@@ -0,0 +1,910 @@
++/*
++ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/file.h>
++#include <linux/ip.h>
++#include <net/tcp.h>
++
++#include "iscsi.h"
++#include "digest.h"
++
++static int print_conn_state(char *p, size_t size, struct iscsi_conn *conn)
++{
++ int pos = 0;
++
++ if (conn->closing) {
++ pos += scnprintf(p, size, "%s", "closing");
++ goto out;
++ }
++
++ switch (conn->rd_state) {
++ case ISCSI_CONN_RD_STATE_PROCESSING:
++ pos += scnprintf(&p[pos], size - pos, "%s", "read_processing ");
++ break;
++ case ISCSI_CONN_RD_STATE_IN_LIST:
++ pos += scnprintf(&p[pos], size - pos, "%s", "in_read_list ");
++ break;
++ }
++
++ switch (conn->wr_state) {
++ case ISCSI_CONN_WR_STATE_PROCESSING:
++ pos += scnprintf(&p[pos], size - pos, "%s", "write_processing ");
++ break;
++ case ISCSI_CONN_WR_STATE_IN_LIST:
++ pos += scnprintf(&p[pos], size - pos, "%s", "in_write_list ");
++ break;
++ case ISCSI_CONN_WR_STATE_SPACE_WAIT:
++ pos += scnprintf(&p[pos], size - pos, "%s", "space_waiting ");
++ break;
++ }
++
++ if (test_bit(ISCSI_CONN_REINSTATING, &conn->conn_aflags))
++ pos += scnprintf(&p[pos], size - pos, "%s", "reinstating ");
++ else if (pos == 0)
++ pos += scnprintf(&p[pos], size - pos, "%s", "established idle ");
++
++out:
++ return pos;
++}
++
++static void iscsi_conn_release(struct kobject *kobj)
++{
++ struct iscsi_conn *conn;
++
++ TRACE_ENTRY();
++
++ conn = container_of(kobj, struct iscsi_conn, conn_kobj);
++ complete_all(&conn->conn_kobj_release_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++struct kobj_type iscsi_conn_ktype = {
++ .release = iscsi_conn_release,
++};
++
++static ssize_t iscsi_get_initiator_ip(struct iscsi_conn *conn,
++ char *buf, int size)
++{
++ int pos;
++ struct sock *sk;
++
++ TRACE_ENTRY();
++
++ sk = conn->sock->sk;
++ switch (sk->sk_family) {
++ case AF_INET:
++ pos = scnprintf(buf, size,
++ "%pI4", &inet_sk(sk)->inet_daddr);
++ break;
++ case AF_INET6:
++ pos = scnprintf(buf, size, "[%p6]",
++ &inet6_sk(sk)->daddr);
++ break;
++ default:
++ pos = scnprintf(buf, size, "Unknown family %d",
++ sk->sk_family);
++ break;
++ }
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t iscsi_conn_ip_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos;
++ struct iscsi_conn *conn;
++
++ TRACE_ENTRY();
++
++ conn = container_of(kobj, struct iscsi_conn, conn_kobj);
++
++ pos = iscsi_get_initiator_ip(conn, buf, SCST_SYSFS_BLOCK_SIZE);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static struct kobj_attribute iscsi_conn_ip_attr =
++ __ATTR(ip, S_IRUGO, iscsi_conn_ip_show, NULL);
++
++static ssize_t iscsi_conn_cid_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos;
++ struct iscsi_conn *conn;
++
++ TRACE_ENTRY();
++
++ conn = container_of(kobj, struct iscsi_conn, conn_kobj);
++
++ pos = sprintf(buf, "%u", conn->cid);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static struct kobj_attribute iscsi_conn_cid_attr =
++ __ATTR(cid, S_IRUGO, iscsi_conn_cid_show, NULL);
++
++static ssize_t iscsi_conn_state_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos;
++ struct iscsi_conn *conn;
++
++ TRACE_ENTRY();
++
++ conn = container_of(kobj, struct iscsi_conn, conn_kobj);
++
++ pos = print_conn_state(buf, SCST_SYSFS_BLOCK_SIZE, conn);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static struct kobj_attribute iscsi_conn_state_attr =
++ __ATTR(state, S_IRUGO, iscsi_conn_state_show, NULL);
++
++static void conn_sysfs_del(struct iscsi_conn *conn)
++{
++ int rc;
++
++ TRACE_ENTRY();
++
++ kobject_del(&conn->conn_kobj);
++ kobject_put(&conn->conn_kobj);
++
++ rc = wait_for_completion_timeout(&conn->conn_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for conn %p (%d refs)...", conn,
++ atomic_read(&conn->conn_kobj.kref.refcount));
++ wait_for_completion(&conn->conn_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs "
++ "entry for conn %p", conn);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static int conn_sysfs_add(struct iscsi_conn *conn)
++{
++ int res;
++ struct iscsi_session *session = conn->session;
++ struct iscsi_conn *c;
++ int n = 1;
++ char addr[64];
++
++ TRACE_ENTRY();
++
++ iscsi_get_initiator_ip(conn, addr, sizeof(addr));
++
++restart:
++ list_for_each_entry(c, &session->conn_list, conn_list_entry) {
++ if (strcmp(addr, kobject_name(&conn->conn_kobj)) == 0) {
++ char c_addr[64];
++
++ iscsi_get_initiator_ip(conn, c_addr, sizeof(c_addr));
++
++ TRACE_DBG("Duplicated conn from the same initiator "
++ "%s found", c_addr);
++
++ snprintf(addr, sizeof(addr), "%s_%d", c_addr, n);
++ n++;
++ goto restart;
++ }
++ }
++
++ init_completion(&conn->conn_kobj_release_cmpl);
++
++ res = kobject_init_and_add(&conn->conn_kobj, &iscsi_conn_ktype,
++ scst_sysfs_get_sess_kobj(session->scst_sess), addr);
++ if (res != 0) {
++ PRINT_ERROR("Unable create sysfs entries for conn %s",
++ addr);
++ goto out;
++ }
++
++ TRACE_DBG("conn %p, conn_kobj %p", conn, &conn->conn_kobj);
++
++ res = sysfs_create_file(&conn->conn_kobj,
++ &iscsi_conn_state_attr.attr);
++ if (res != 0) {
++ PRINT_ERROR("Unable create sysfs attribute %s for conn %s",
++ iscsi_conn_state_attr.attr.name, addr);
++ goto out_err;
++ }
++
++ res = sysfs_create_file(&conn->conn_kobj,
++ &iscsi_conn_cid_attr.attr);
++ if (res != 0) {
++ PRINT_ERROR("Unable create sysfs attribute %s for conn %s",
++ iscsi_conn_cid_attr.attr.name, addr);
++ goto out_err;
++ }
++
++ res = sysfs_create_file(&conn->conn_kobj,
++ &iscsi_conn_ip_attr.attr);
++ if (res != 0) {
++ PRINT_ERROR("Unable create sysfs attribute %s for conn %s",
++ iscsi_conn_ip_attr.attr.name, addr);
++ goto out_err;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err:
++ conn_sysfs_del(conn);
++ goto out;
++}
++
++/* target_mutex supposed to be locked */
++struct iscsi_conn *conn_lookup(struct iscsi_session *session, u16 cid)
++{
++ struct iscsi_conn *conn;
++
++ /*
++ * We need to find the latest conn to correctly handle
++ * multi-reinstatements
++ */
++ list_for_each_entry_reverse(conn, &session->conn_list,
++ conn_list_entry) {
++ if (conn->cid == cid)
++ return conn;
++ }
++ return NULL;
++}
++
++void iscsi_make_conn_rd_active(struct iscsi_conn *conn)
++{
++ TRACE_ENTRY();
++
++ spin_lock_bh(&iscsi_rd_lock);
++
++ TRACE_DBG("conn %p, rd_state %x, rd_data_ready %d", conn,
++ conn->rd_state, conn->rd_data_ready);
++
++ /*
++ * Let's start processing ASAP not waiting for all the being waited
++ * data be received, even if we need several wakup iteration to receive
++ * them all, because starting ASAP, i.e. in parallel, is better for
++ * performance, especially on multi-CPU/core systems.
++ */
++
++ conn->rd_data_ready = 1;
++
++ if (conn->rd_state == ISCSI_CONN_RD_STATE_IDLE) {
++ list_add_tail(&conn->rd_list_entry, &iscsi_rd_list);
++ conn->rd_state = ISCSI_CONN_RD_STATE_IN_LIST;
++ wake_up(&iscsi_rd_waitQ);
++ }
++
++ spin_unlock_bh(&iscsi_rd_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++void iscsi_make_conn_wr_active(struct iscsi_conn *conn)
++{
++ TRACE_ENTRY();
++
++ spin_lock_bh(&iscsi_wr_lock);
++
++ TRACE_DBG("conn %p, wr_state %x, wr_space_ready %d", conn,
++ conn->wr_state, conn->wr_space_ready);
++
++ /*
++ * Let's start sending waiting to be sent data ASAP, even if there's
++ * still not all the needed buffers ready and we need several wakup
++ * iteration to send them all, because starting ASAP, i.e. in parallel,
++ * is better for performance, especially on multi-CPU/core systems.
++ */
++
++ if (conn->wr_state == ISCSI_CONN_WR_STATE_IDLE) {
++ list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
++ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
++ wake_up(&iscsi_wr_waitQ);
++ }
++
++ spin_unlock_bh(&iscsi_wr_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++void __mark_conn_closed(struct iscsi_conn *conn, int flags)
++{
++ spin_lock_bh(&iscsi_rd_lock);
++ conn->closing = 1;
++ if (flags & ISCSI_CONN_ACTIVE_CLOSE)
++ conn->active_close = 1;
++ if (flags & ISCSI_CONN_DELETING)
++ conn->deleting = 1;
++ spin_unlock_bh(&iscsi_rd_lock);
++
++ iscsi_make_conn_rd_active(conn);
++}
++
++void mark_conn_closed(struct iscsi_conn *conn)
++{
++ __mark_conn_closed(conn, ISCSI_CONN_ACTIVE_CLOSE);
++}
++
++static void __iscsi_state_change(struct sock *sk)
++{
++ struct iscsi_conn *conn = sk->sk_user_data;
++
++ TRACE_ENTRY();
++
++ if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
++ if (!conn->closing) {
++ PRINT_ERROR("Connection with initiator %s "
++ "unexpectedly closed!",
++ conn->session->initiator_name);
++ TRACE_MGMT_DBG("conn %p, sk state %d", conn,
++ sk->sk_state);
++ __mark_conn_closed(conn, 0);
++ }
++ } else
++ iscsi_make_conn_rd_active(conn);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void iscsi_state_change(struct sock *sk)
++{
++ struct iscsi_conn *conn = sk->sk_user_data;
++
++ __iscsi_state_change(sk);
++ conn->old_state_change(sk);
++
++ return;
++}
++
++static void iscsi_data_ready(struct sock *sk, int len)
++{
++ struct iscsi_conn *conn = sk->sk_user_data;
++
++ TRACE_ENTRY();
++
++ iscsi_make_conn_rd_active(conn);
++
++ conn->old_data_ready(sk, len);
++
++ TRACE_EXIT();
++ return;
++}
++
++void __iscsi_write_space_ready(struct iscsi_conn *conn)
++{
++ TRACE_ENTRY();
++
++ spin_lock_bh(&iscsi_wr_lock);
++ conn->wr_space_ready = 1;
++ if ((conn->wr_state == ISCSI_CONN_WR_STATE_SPACE_WAIT)) {
++ TRACE_DBG("wr space ready (conn %p)", conn);
++ list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
++ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
++ wake_up(&iscsi_wr_waitQ);
++ }
++ spin_unlock_bh(&iscsi_wr_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void iscsi_write_space_ready(struct sock *sk)
++{
++ struct iscsi_conn *conn = sk->sk_user_data;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Write space ready for conn %p", conn);
++
++ __iscsi_write_space_ready(conn);
++
++ conn->old_write_space(sk);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void conn_rsp_timer_fn(unsigned long arg)
++{
++ struct iscsi_conn *conn = (struct iscsi_conn *)arg;
++ struct iscsi_cmnd *cmnd;
++ unsigned long j = jiffies;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Timer (conn %p)", conn);
++
++ spin_lock_bh(&conn->write_list_lock);
++
++ if (!list_empty(&conn->write_timeout_list)) {
++ unsigned long timeout_time;
++ cmnd = list_entry(conn->write_timeout_list.next,
++ struct iscsi_cmnd, write_timeout_list_entry);
++
++ timeout_time = j + conn->rsp_timeout + ISCSI_ADD_SCHED_TIME;
++
++ if (unlikely(time_after_eq(j, cmnd->write_start +
++ conn->rsp_timeout))) {
++ if (!conn->closing) {
++ PRINT_ERROR("Timeout sending data/waiting "
++ "for reply to/from initiator "
++ "%s (SID %llx), closing connection",
++ conn->session->initiator_name,
++ (long long unsigned int)
++ conn->session->sid);
++ /*
++ * We must call mark_conn_closed() outside of
++ * write_list_lock or we will have a circular
++ * locking dependency with iscsi_rd_lock.
++ */
++ spin_unlock_bh(&conn->write_list_lock);
++ mark_conn_closed(conn);
++ goto out;
++ }
++ } else if (!timer_pending(&conn->rsp_timer) ||
++ time_after(conn->rsp_timer.expires, timeout_time)) {
++ TRACE_DBG("Restarting timer on %ld (conn %p)",
++ timeout_time, conn);
++ /*
++ * Timer might have been restarted while we were
++ * entering here.
++ *
++ * Since we have not empty write_timeout_list, we are
++ * safe to restart the timer, because we not race with
++ * del_timer_sync() in conn_free().
++ */
++ mod_timer(&conn->rsp_timer, timeout_time);
++ }
++ }
++
++ spin_unlock_bh(&conn->write_list_lock);
++
++ if (unlikely(conn->conn_tm_active)) {
++ TRACE_MGMT_DBG("TM active: making conn %p RD active", conn);
++ iscsi_make_conn_rd_active(conn);
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void conn_nop_in_delayed_work_fn(struct delayed_work *work)
++{
++ struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
++ nop_in_delayed_work);
++
++ TRACE_ENTRY();
++
++ if (time_after_eq(jiffies, conn->last_rcv_time +
++ conn->nop_in_interval)) {
++ iscsi_send_nop_in(conn);
++ }
++
++ if ((conn->nop_in_interval > 0) &&
++ !test_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags)) {
++ TRACE_DBG("Reschedule Nop-In work for conn %p", conn);
++ schedule_delayed_work(&conn->nop_in_delayed_work,
++ conn->nop_in_interval + ISCSI_ADD_SCHED_TIME);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Must be called from rd thread only */
++void iscsi_check_tm_data_wait_timeouts(struct iscsi_conn *conn, bool force)
++{
++ struct iscsi_cmnd *cmnd;
++ unsigned long j = jiffies;
++ bool aborted_cmds_pending;
++ unsigned long timeout_time = j + ISCSI_TM_DATA_WAIT_TIMEOUT +
++ ISCSI_ADD_SCHED_TIME;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG_FLAG(force ? TRACE_CONN_OC_DBG : TRACE_MGMT_DEBUG,
++ "j %ld (TIMEOUT %d, force %d)", j,
++ ISCSI_TM_DATA_WAIT_TIMEOUT + ISCSI_ADD_SCHED_TIME, force);
++
++ iscsi_extracheck_is_rd_thread(conn);
++
++again:
++ spin_lock_bh(&iscsi_rd_lock);
++ spin_lock(&conn->write_list_lock);
++
++ aborted_cmds_pending = false;
++ list_for_each_entry(cmnd, &conn->write_timeout_list,
++ write_timeout_list_entry) {
++ if (test_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags)) {
++ TRACE_DBG_FLAG(force ? TRACE_CONN_OC_DBG : TRACE_MGMT_DEBUG,
++ "Checking aborted cmnd %p (scst_state %d, "
++ "on_write_timeout_list %d, write_start %ld, "
++ "r2t_len_to_receive %d)", cmnd,
++ cmnd->scst_state, cmnd->on_write_timeout_list,
++ cmnd->write_start, cmnd->r2t_len_to_receive);
++ if ((cmnd->r2t_len_to_receive != 0) &&
++ (time_after_eq(j, cmnd->write_start + ISCSI_TM_DATA_WAIT_TIMEOUT) ||
++ force)) {
++ spin_unlock(&conn->write_list_lock);
++ spin_unlock_bh(&iscsi_rd_lock);
++ iscsi_fail_data_waiting_cmnd(cmnd);
++ goto again;
++ }
++ aborted_cmds_pending = true;
++ }
++ }
++
++ if (aborted_cmds_pending) {
++ if (!force &&
++ (!timer_pending(&conn->rsp_timer) ||
++ time_after(conn->rsp_timer.expires, timeout_time))) {
++ TRACE_MGMT_DBG("Mod timer on %ld (conn %p)",
++ timeout_time, conn);
++ mod_timer(&conn->rsp_timer, timeout_time);
++ }
++ } else {
++ TRACE_MGMT_DBG("Clearing conn_tm_active for conn %p", conn);
++ conn->conn_tm_active = 0;
++ }
++
++ spin_unlock(&conn->write_list_lock);
++ spin_unlock_bh(&iscsi_rd_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* target_mutex supposed to be locked */
++void conn_reinst_finished(struct iscsi_conn *conn)
++{
++ struct iscsi_cmnd *cmnd, *t;
++
++ TRACE_ENTRY();
++
++ clear_bit(ISCSI_CONN_REINSTATING, &conn->conn_aflags);
++
++ list_for_each_entry_safe(cmnd, t, &conn->reinst_pending_cmd_list,
++ reinst_pending_cmd_list_entry) {
++ TRACE_MGMT_DBG("Restarting reinst pending cmnd %p",
++ cmnd);
++
++ list_del(&cmnd->reinst_pending_cmd_list_entry);
++
++ /* Restore the state for preliminary completion/cmnd_done() */
++ cmnd->scst_state = ISCSI_CMD_STATE_AFTER_PREPROC;
++
++ iscsi_restart_cmnd(cmnd);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static void conn_activate(struct iscsi_conn *conn)
++{
++ TRACE_MGMT_DBG("Enabling conn %p", conn);
++
++ /* Catch double bind */
++ BUG_ON(conn->sock->sk->sk_state_change == iscsi_state_change);
++
++ write_lock_bh(&conn->sock->sk->sk_callback_lock);
++
++ conn->old_state_change = conn->sock->sk->sk_state_change;
++ conn->sock->sk->sk_state_change = iscsi_state_change;
++
++ conn->old_data_ready = conn->sock->sk->sk_data_ready;
++ conn->sock->sk->sk_data_ready = iscsi_data_ready;
++
++ conn->old_write_space = conn->sock->sk->sk_write_space;
++ conn->sock->sk->sk_write_space = iscsi_write_space_ready;
++
++ write_unlock_bh(&conn->sock->sk->sk_callback_lock);
++
++ /*
++ * Check, if conn was closed while we were initializing it.
++ * This function will make conn rd_active, if necessary.
++ */
++ __iscsi_state_change(conn->sock->sk);
++
++ return;
++}
++
++/*
++ * Note: the code below passes a kernel space pointer (&opt) to setsockopt()
++ * while the declaration of setsockopt specifies that it expects a user space
++ * pointer. This seems to work fine, and this approach is also used in some
++ * other parts of the Linux kernel (see e.g. fs/ocfs2/cluster/tcp.c).
++ */
++static int conn_setup_sock(struct iscsi_conn *conn)
++{
++ int res = 0;
++ int opt = 1;
++ mm_segment_t oldfs;
++ struct iscsi_session *session = conn->session;
++
++ TRACE_DBG("%llx", (long long unsigned int)session->sid);
++
++ conn->sock = SOCKET_I(conn->file->f_dentry->d_inode);
++
++ if (conn->sock->ops->sendpage == NULL) {
++ PRINT_ERROR("Socket for sid %llx doesn't support sendpage()",
++ (long long unsigned int)session->sid);
++ res = -EINVAL;
++ goto out;
++ }
++
++#if 0
++ conn->sock->sk->sk_allocation = GFP_NOIO;
++#endif
++ conn->sock->sk->sk_user_data = conn;
++
++ oldfs = get_fs();
++ set_fs(get_ds());
++ conn->sock->ops->setsockopt(conn->sock, SOL_TCP, TCP_NODELAY,
++ (void __force __user *)&opt, sizeof(opt));
++ set_fs(oldfs);
++
++out:
++ return res;
++}
++
++/* target_mutex supposed to be locked */
++int conn_free(struct iscsi_conn *conn)
++{
++ struct iscsi_session *session = conn->session;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Freeing conn %p (sess=%p, %#Lx %u)", conn,
++ session, (long long unsigned int)session->sid, conn->cid);
++
++ del_timer_sync(&conn->rsp_timer);
++
++ conn_sysfs_del(conn);
++
++ BUG_ON(atomic_read(&conn->conn_ref_cnt) != 0);
++ BUG_ON(!list_empty(&conn->cmd_list));
++ BUG_ON(!list_empty(&conn->write_list));
++ BUG_ON(!list_empty(&conn->write_timeout_list));
++ BUG_ON(conn->conn_reinst_successor != NULL);
++ BUG_ON(!test_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags));
++
++ /* Just in case if new conn gets freed before the old one */
++ if (test_bit(ISCSI_CONN_REINSTATING, &conn->conn_aflags)) {
++ struct iscsi_conn *c;
++ TRACE_MGMT_DBG("Freeing being reinstated conn %p", conn);
++ list_for_each_entry(c, &session->conn_list,
++ conn_list_entry) {
++ if (c->conn_reinst_successor == conn) {
++ c->conn_reinst_successor = NULL;
++ break;
++ }
++ }
++ }
++
++ list_del(&conn->conn_list_entry);
++
++ fput(conn->file);
++ conn->file = NULL;
++ conn->sock = NULL;
++
++ free_page((unsigned long)conn->read_iov);
++
++ kfree(conn);
++
++ if (list_empty(&session->conn_list)) {
++ BUG_ON(session->sess_reinst_successor != NULL);
++ session_free(session, true);
++ }
++
++ return 0;
++}
++
++/* target_mutex supposed to be locked */
++static int iscsi_conn_alloc(struct iscsi_session *session,
++ struct iscsi_kern_conn_info *info, struct iscsi_conn **new_conn)
++{
++ struct iscsi_conn *conn;
++ int res = 0;
++
++ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
++ if (!conn) {
++ res = -ENOMEM;
++ goto out_err;
++ }
++
++ TRACE_MGMT_DBG("Creating connection %p for sid %#Lx, cid %u", conn,
++ (long long unsigned int)session->sid, info->cid);
++
++ /* Changing it, change ISCSI_CONN_IOV_MAX as well !! */
++ conn->read_iov = (struct iovec *)get_zeroed_page(GFP_KERNEL);
++ if (conn->read_iov == NULL) {
++ res = -ENOMEM;
++ goto out_err_free_conn;
++ }
++
++ atomic_set(&conn->conn_ref_cnt, 0);
++ conn->session = session;
++ if (session->sess_reinstating)
++ __set_bit(ISCSI_CONN_REINSTATING, &conn->conn_aflags);
++ conn->cid = info->cid;
++ conn->stat_sn = info->stat_sn;
++ conn->exp_stat_sn = info->exp_stat_sn;
++ conn->rd_state = ISCSI_CONN_RD_STATE_IDLE;
++ conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
++
++ conn->hdigest_type = session->sess_params.header_digest;
++ conn->ddigest_type = session->sess_params.data_digest;
++ res = digest_init(conn);
++ if (res != 0)
++ goto out_free_iov;
++
++ conn->target = session->target;
++ spin_lock_init(&conn->cmd_list_lock);
++ INIT_LIST_HEAD(&conn->cmd_list);
++ spin_lock_init(&conn->write_list_lock);
++ INIT_LIST_HEAD(&conn->write_list);
++ INIT_LIST_HEAD(&conn->write_timeout_list);
++ setup_timer(&conn->rsp_timer, conn_rsp_timer_fn, (unsigned long)conn);
++ init_waitqueue_head(&conn->read_state_waitQ);
++ init_completion(&conn->ready_to_free);
++ INIT_LIST_HEAD(&conn->reinst_pending_cmd_list);
++ INIT_LIST_HEAD(&conn->nop_req_list);
++ spin_lock_init(&conn->nop_req_list_lock);
++
++ conn->nop_in_ttt = 0;
++ INIT_DELAYED_WORK(&conn->nop_in_delayed_work,
++ (void (*)(struct work_struct *))conn_nop_in_delayed_work_fn);
++ conn->last_rcv_time = jiffies;
++ conn->rsp_timeout = session->tgt_params.rsp_timeout * HZ;
++ conn->nop_in_interval = session->tgt_params.nop_in_interval * HZ;
++ if (conn->nop_in_interval > 0) {
++ TRACE_DBG("Schedule Nop-In work for conn %p", conn);
++ schedule_delayed_work(&conn->nop_in_delayed_work,
++ conn->nop_in_interval + ISCSI_ADD_SCHED_TIME);
++ }
++
++ conn->file = fget(info->fd);
++
++ res = conn_setup_sock(conn);
++ if (res != 0)
++ goto out_fput;
++
++ res = conn_sysfs_add(conn);
++ if (res != 0)
++ goto out_fput;
++
++ list_add_tail(&conn->conn_list_entry, &session->conn_list);
++
++ *new_conn = conn;
++
++out:
++ return res;
++
++out_fput:
++ fput(conn->file);
++
++out_free_iov:
++ free_page((unsigned long)conn->read_iov);
++
++out_err_free_conn:
++ kfree(conn);
++
++out_err:
++ goto out;
++}
++
++/* target_mutex supposed to be locked */
++int __add_conn(struct iscsi_session *session, struct iscsi_kern_conn_info *info)
++{
++ struct iscsi_conn *conn, *new_conn = NULL;
++ int err;
++ bool reinstatement = false;
++
++ conn = conn_lookup(session, info->cid);
++ if ((conn != NULL) &&
++ !test_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags)) {
++ /* conn reinstatement */
++ reinstatement = true;
++ } else if (!list_empty(&session->conn_list)) {
++ err = -EEXIST;
++ goto out;
++ }
++
++ err = iscsi_conn_alloc(session, info, &new_conn);
++ if (err != 0)
++ goto out;
++
++ if (reinstatement) {
++ TRACE_MGMT_DBG("Reinstating conn (old %p, new %p)", conn,
++ new_conn);
++ conn->conn_reinst_successor = new_conn;
++ __set_bit(ISCSI_CONN_REINSTATING, &new_conn->conn_aflags);
++ __mark_conn_closed(conn, 0);
++ }
++
++ conn_activate(new_conn);
++
++out:
++ return err;
++}
++
++/* target_mutex supposed to be locked */
++int __del_conn(struct iscsi_session *session, struct iscsi_kern_conn_info *info)
++{
++ struct iscsi_conn *conn;
++ int err = -EEXIST;
++
++ conn = conn_lookup(session, info->cid);
++ if (!conn) {
++ PRINT_WARNING("Connection %d not found", info->cid);
++ return err;
++ }
++
++ PRINT_INFO("Deleting connection with initiator %s (%p)",
++ conn->session->initiator_name, conn);
++
++ __mark_conn_closed(conn, ISCSI_CONN_ACTIVE_CLOSE|ISCSI_CONN_DELETING);
++
++ return 0;
++}
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++
++void iscsi_extracheck_is_rd_thread(struct iscsi_conn *conn)
++{
++ if (unlikely(current != conn->rd_task)) {
++ printk(KERN_EMERG "conn %p rd_task != current %p (pid %d)\n",
++ conn, current, current->pid);
++ while (in_softirq())
++ local_bh_enable();
++ printk(KERN_EMERG "rd_state %x\n", conn->rd_state);
++ printk(KERN_EMERG "rd_task %p\n", conn->rd_task);
++ printk(KERN_EMERG "rd_task->pid %d\n", conn->rd_task->pid);
++ BUG();
++ }
++}
++
++void iscsi_extracheck_is_wr_thread(struct iscsi_conn *conn)
++{
++ if (unlikely(current != conn->wr_task)) {
++ printk(KERN_EMERG "conn %p wr_task != current %p (pid %d)\n",
++ conn, current, current->pid);
++ while (in_softirq())
++ local_bh_enable();
++ printk(KERN_EMERG "wr_state %x\n", conn->wr_state);
++ printk(KERN_EMERG "wr_task %p\n", conn->wr_task);
++ printk(KERN_EMERG "wr_task->pid %d\n", conn->wr_task->pid);
++ BUG();
++ }
++}
++
++#endif /* CONFIG_SCST_EXTRACHECKS */
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/digest.c linux-2.6.36/drivers/scst/iscsi-scst/digest.c
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/digest.c
++++ linux-2.6.36/drivers/scst/iscsi-scst/digest.c
+@@ -0,0 +1,244 @@
++/*
++ * iSCSI digest handling.
++ *
++ * Copyright (C) 2004 - 2006 Xiranet Communications GmbH
++ * <arne.redlich@xiranet.com>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/types.h>
++#include <linux/scatterlist.h>
++
++#include "iscsi.h"
++#include "digest.h"
++#include <linux/crc32c.h>
++
++void digest_alg_available(int *val)
++{
++#if defined(CONFIG_LIBCRC32C_MODULE) || defined(CONFIG_LIBCRC32C)
++ int crc32c = 1;
++#else
++ int crc32c = 0;
++#endif
++
++ if ((*val & DIGEST_CRC32C) && !crc32c) {
++ PRINT_ERROR("%s", "CRC32C digest algorithm not available "
++ "in kernel");
++ *val |= ~DIGEST_CRC32C;
++ }
++}
++
++/**
++ * initialize support for digest calculation.
++ *
++ * digest_init -
++ * @conn: ptr to connection to make use of digests
++ *
++ * @return: 0 on success, < 0 on error
++ */
++int digest_init(struct iscsi_conn *conn)
++{
++ if (!(conn->hdigest_type & DIGEST_ALL))
++ conn->hdigest_type = DIGEST_NONE;
++
++ if (!(conn->ddigest_type & DIGEST_ALL))
++ conn->ddigest_type = DIGEST_NONE;
++
++ return 0;
++}
++
++static __be32 evaluate_crc32_from_sg(struct scatterlist *sg, int nbytes,
++ uint32_t padding)
++{
++ u32 crc = ~0;
++ int pad_bytes = ((nbytes + 3) & -4) - nbytes;
++
++#ifdef CONFIG_SCST_ISCSI_DEBUG_DIGEST_FAILURES
++ if (((scst_random() % 100000) == 752)) {
++ PRINT_INFO("%s", "Simulating digest failure");
++ return 0;
++ }
++#endif
++
++#if defined(CONFIG_LIBCRC32C_MODULE) || defined(CONFIG_LIBCRC32C)
++ while (nbytes > 0) {
++ int d = min(nbytes, (int)(sg->length));
++ crc = crc32c(crc, sg_virt(sg), d);
++ nbytes -= d;
++ sg++;
++ }
++
++ if (pad_bytes)
++ crc = crc32c(crc, (u8 *)&padding, pad_bytes);
++#endif
++
++ return (__force __be32)~cpu_to_le32(crc);
++}
++
++static __be32 digest_header(struct iscsi_pdu *pdu)
++{
++ struct scatterlist sg[2];
++ unsigned int nbytes = sizeof(struct iscsi_hdr);
++ int asize = (pdu->ahssize + 3) & -4;
++
++ sg_init_table(sg, 2);
++
++ sg_set_buf(&sg[0], &pdu->bhs, nbytes);
++ if (pdu->ahssize) {
++ sg_set_buf(&sg[1], pdu->ahs, asize);
++ nbytes += asize;
++ }
++ EXTRACHECKS_BUG_ON((nbytes & 3) != 0);
++ return evaluate_crc32_from_sg(sg, nbytes, 0);
++}
++
++static __be32 digest_data(struct iscsi_cmnd *cmd, u32 size, u32 offset,
++ uint32_t padding)
++{
++ struct scatterlist *sg = cmd->sg;
++ int idx, count;
++ struct scatterlist saved_sg;
++ __be32 crc;
++
++ offset += sg[0].offset;
++ idx = offset >> PAGE_SHIFT;
++ offset &= ~PAGE_MASK;
++
++ count = get_pgcnt(size, offset);
++
++ TRACE_DBG("req %p, idx %d, count %d, sg_cnt %d, size %d, "
++ "offset %d", cmd, idx, count, cmd->sg_cnt, size, offset);
++ BUG_ON(idx + count > cmd->sg_cnt);
++
++ saved_sg = sg[idx];
++ sg[idx].offset = offset;
++ sg[idx].length -= offset - saved_sg.offset;
++
++ crc = evaluate_crc32_from_sg(sg + idx, size, padding);
++
++ sg[idx] = saved_sg;
++ return crc;
++}
++
++int digest_rx_header(struct iscsi_cmnd *cmnd)
++{
++ __be32 crc;
++
++ crc = digest_header(&cmnd->pdu);
++ if (unlikely(crc != cmnd->hdigest)) {
++ PRINT_ERROR("%s", "RX header digest failed");
++ return -EIO;
++ } else
++ TRACE_DBG("RX header digest OK for cmd %p", cmnd);
++
++ return 0;
++}
++
++void digest_tx_header(struct iscsi_cmnd *cmnd)
++{
++ cmnd->hdigest = digest_header(&cmnd->pdu);
++ TRACE_DBG("TX header digest for cmd %p: %x", cmnd, cmnd->hdigest);
++}
++
++int digest_rx_data(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_cmnd *req;
++ struct iscsi_data_out_hdr *req_hdr;
++ u32 offset;
++ __be32 crc;
++ int res = 0;
++
++ switch (cmnd_opcode(cmnd)) {
++ case ISCSI_OP_SCSI_DATA_OUT:
++ req = cmnd->cmd_req;
++ if (unlikely(req == NULL)) {
++ /* It can be for prelim completed commands */
++ req = cmnd;
++ goto out;
++ }
++ req_hdr = (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
++ offset = be32_to_cpu(req_hdr->buffer_offset);
++ break;
++
++ default:
++ req = cmnd;
++ offset = 0;
++ }
++
++ /*
++ * We need to skip the digest check for prelim completed commands,
++ * because we use shared data buffer for them, so, most likely, the
++ * check will fail. Plus, for such commands we sometimes don't have
++ * sg_cnt set correctly (cmnd_prepare_get_rejected_cmd_data() doesn't
++ * do it).
++ */
++ if (unlikely(req->prelim_compl_flags != 0))
++ goto out;
++
++ /*
++ * Temporary to not crash with write residual overflows. ToDo. Until
++ * that let's always have succeeded data digests for such overflows.
++ * In ideal, we should allocate additional one or more sg's for the
++ * overflowed data and free them here or on req release. It's quite
++ * not trivial for such virtually never used case, so let's do it,
++ * when it gets needed.
++ */
++ if (unlikely(offset + cmnd->pdu.datasize > req->bufflen)) {
++ PRINT_WARNING("Skipping RX data digest check for residual "
++ "overflow command op %x (data size %d, buffer size %d)",
++ cmnd_hdr(req)->scb[0], offset + cmnd->pdu.datasize,
++ req->bufflen);
++ goto out;
++ }
++
++ crc = digest_data(req, cmnd->pdu.datasize, offset,
++ cmnd->conn->rpadding);
++
++ if (unlikely(crc != cmnd->ddigest)) {
++ TRACE(TRACE_MINOR|TRACE_MGMT_DEBUG, "%s", "RX data digest "
++ "failed");
++ TRACE_MGMT_DBG("Calculated crc %x, ddigest %x, offset %d", crc,
++ cmnd->ddigest, offset);
++ iscsi_dump_pdu(&cmnd->pdu);
++ res = -EIO;
++ } else
++ TRACE_DBG("RX data digest OK for cmd %p", cmnd);
++
++out:
++ return res;
++}
++
++void digest_tx_data(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_data_in_hdr *hdr;
++ u32 offset;
++
++ TRACE_DBG("%s:%d req %p, own_sg %d, sg %p, sgcnt %d cmnd %p, "
++ "own_sg %d, sg %p, sgcnt %d", __func__, __LINE__,
++ cmnd->parent_req, cmnd->parent_req->own_sg,
++ cmnd->parent_req->sg, cmnd->parent_req->sg_cnt,
++ cmnd, cmnd->own_sg, cmnd->sg, cmnd->sg_cnt);
++
++ switch (cmnd_opcode(cmnd)) {
++ case ISCSI_OP_SCSI_DATA_IN:
++ hdr = (struct iscsi_data_in_hdr *)&cmnd->pdu.bhs;
++ offset = be32_to_cpu(hdr->buffer_offset);
++ break;
++ default:
++ offset = 0;
++ }
++
++ cmnd->ddigest = digest_data(cmnd, cmnd->pdu.datasize, offset, 0);
++ TRACE_DBG("TX data digest for cmd %p: %x (offset %d, opcode %x)", cmnd,
++ cmnd->ddigest, offset, cmnd_opcode(cmnd));
++}
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/digest.h linux-2.6.36/drivers/scst/iscsi-scst/digest.h
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/digest.h
++++ linux-2.6.36/drivers/scst/iscsi-scst/digest.h
+@@ -0,0 +1,31 @@
++/*
++ * iSCSI digest handling.
++ *
++ * Copyright (C) 2004 Xiranet Communications GmbH <arne.redlich@xiranet.com>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ISCSI_DIGEST_H__
++#define __ISCSI_DIGEST_H__
++
++extern void digest_alg_available(int *val);
++
++extern int digest_init(struct iscsi_conn *conn);
++
++extern int digest_rx_header(struct iscsi_cmnd *cmnd);
++extern int digest_rx_data(struct iscsi_cmnd *cmnd);
++
++extern void digest_tx_header(struct iscsi_cmnd *cmnd);
++extern void digest_tx_data(struct iscsi_cmnd *cmnd);
++
++#endif /* __ISCSI_DIGEST_H__ */
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/event.c linux-2.6.36/drivers/scst/iscsi-scst/event.c
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/event.c
++++ linux-2.6.36/drivers/scst/iscsi-scst/event.c
+@@ -0,0 +1,165 @@
++/*
++ * Event notification code.
++ *
++ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#include <net/tcp.h>
++#include <scst/iscsi_scst.h>
++#include "iscsi.h"
++
++static struct sock *nl;
++static u32 iscsid_pid;
++
++static int event_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
++{
++ u32 uid, pid, seq;
++ char *data;
++
++ pid = NETLINK_CREDS(skb)->pid;
++ uid = NETLINK_CREDS(skb)->uid;
++ seq = nlh->nlmsg_seq;
++ data = NLMSG_DATA(nlh);
++
++ iscsid_pid = pid;
++
++ return 0;
++}
++
++static void event_recv_skb(struct sk_buff *skb)
++{
++ int err;
++ struct nlmsghdr *nlh;
++ u32 rlen;
++
++ while (skb->len >= NLMSG_SPACE(0)) {
++ nlh = (struct nlmsghdr *)skb->data;
++ if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
++ goto out;
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++ err = event_recv_msg(skb, nlh);
++ if (err)
++ netlink_ack(skb, nlh, -err);
++ else if (nlh->nlmsg_flags & NLM_F_ACK)
++ netlink_ack(skb, nlh, 0);
++ skb_pull(skb, rlen);
++ }
++
++out:
++ return;
++}
++
++/* event_mutex supposed to be held */
++static int __event_send(const void *buf, int buf_len)
++{
++ int res = 0, len;
++ struct sk_buff *skb;
++ struct nlmsghdr *nlh;
++ static u32 seq; /* protected by event_mutex */
++
++ TRACE_ENTRY();
++
++ if (ctr_open_state != ISCSI_CTR_OPEN_STATE_OPEN)
++ goto out;
++
++ len = NLMSG_SPACE(buf_len);
++
++ skb = alloc_skb(NLMSG_SPACE(len), GFP_KERNEL);
++ if (skb == NULL) {
++ PRINT_ERROR("alloc_skb() failed (len %d)", len);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ nlh = __nlmsg_put(skb, iscsid_pid, seq++, NLMSG_DONE,
++ len - sizeof(*nlh), 0);
++
++ memcpy(NLMSG_DATA(nlh), buf, buf_len);
++ res = netlink_unicast(nl, skb, iscsid_pid, 0);
++ if (res <= 0) {
++ if (res != -ECONNREFUSED)
++ PRINT_ERROR("netlink_unicast() failed: %d", res);
++ else
++ TRACE(TRACE_MINOR, "netlink_unicast() failed: %s. "
++ "Not functioning user space?",
++ "Connection refused");
++ goto out;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++int event_send(u32 tid, u64 sid, u32 cid, u32 cookie,
++ enum iscsi_kern_event_code code,
++ const char *param1, const char *param2)
++{
++ int err;
++ static DEFINE_MUTEX(event_mutex);
++ struct iscsi_kern_event event;
++ int param1_size, param2_size;
++
++ param1_size = (param1 != NULL) ? strlen(param1) : 0;
++ param2_size = (param2 != NULL) ? strlen(param2) : 0;
++
++ event.tid = tid;
++ event.sid = sid;
++ event.cid = cid;
++ event.code = code;
++ event.cookie = cookie;
++ event.param1_size = param1_size;
++ event.param2_size = param2_size;
++
++ mutex_lock(&event_mutex);
++
++ err = __event_send(&event, sizeof(event));
++ if (err <= 0)
++ goto out_unlock;
++
++ if (param1_size > 0) {
++ err = __event_send(param1, param1_size);
++ if (err <= 0)
++ goto out_unlock;
++ }
++
++ if (param2_size > 0) {
++ err = __event_send(param2, param2_size);
++ if (err <= 0)
++ goto out_unlock;
++ }
++
++out_unlock:
++ mutex_unlock(&event_mutex);
++ return err;
++}
++
++int __init event_init(void)
++{
++ nl = netlink_kernel_create(&init_net, NETLINK_ISCSI_SCST, 1,
++ event_recv_skb, NULL, THIS_MODULE);
++ if (!nl) {
++ PRINT_ERROR("%s", "netlink_kernel_create() failed");
++ return -ENOMEM;
++ } else
++ return 0;
++}
++
++void event_exit(void)
++{
++ netlink_kernel_release(nl);
++}
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c
++++ linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c
+@@ -0,0 +1,3956 @@
++/*
++ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++#include <linux/hash.h>
++#include <linux/kthread.h>
++#include <linux/scatterlist.h>
++#include <linux/ctype.h>
++#include <net/tcp.h>
++#include <scsi/scsi.h>
++#include <asm/byteorder.h>
++#include <asm/unaligned.h>
++
++#include "iscsi.h"
++#include "digest.h"
++
++#ifndef GENERATING_UPSTREAM_PATCH
++#if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++#warning "Patch put_page_callback-<kernel-version>.patch not applied on your\
++ kernel or CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION\
++ config option not set. ISCSI-SCST will be working with not the best\
++ performance. Refer README file for details."
++#endif
++#endif
++
++#define ISCSI_INIT_WRITE_WAKE 0x1
++
++static int ctr_major;
++static char ctr_name[] = "iscsi-scst-ctl";
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++unsigned long iscsi_trace_flag = ISCSI_DEFAULT_LOG_FLAGS;
++#endif
++
++static struct kmem_cache *iscsi_cmnd_cache;
++
++DEFINE_SPINLOCK(iscsi_rd_lock);
++LIST_HEAD(iscsi_rd_list);
++DECLARE_WAIT_QUEUE_HEAD(iscsi_rd_waitQ);
++
++DEFINE_SPINLOCK(iscsi_wr_lock);
++LIST_HEAD(iscsi_wr_list);
++DECLARE_WAIT_QUEUE_HEAD(iscsi_wr_waitQ);
++
++static struct page *dummy_page;
++static struct scatterlist dummy_sg;
++
++struct iscsi_thread_t {
++ struct task_struct *thr;
++ struct list_head threads_list_entry;
++};
++
++static LIST_HEAD(iscsi_threads_list);
++
++static void cmnd_remove_data_wait_hash(struct iscsi_cmnd *cmnd);
++static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status);
++static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess);
++static void req_cmnd_release(struct iscsi_cmnd *req);
++static int cmnd_insert_data_wait_hash(struct iscsi_cmnd *cmnd);
++static void __cmnd_abort(struct iscsi_cmnd *cmnd);
++static void iscsi_cmnd_init_write(struct iscsi_cmnd *rsp, int flags);
++static void iscsi_set_resid_no_scst_cmd(struct iscsi_cmnd *rsp);
++static void iscsi_set_resid(struct iscsi_cmnd *rsp);
++
++static void iscsi_set_not_received_data_len(struct iscsi_cmnd *req,
++ unsigned int not_received)
++{
++ req->not_received_data_len = not_received;
++ if (req->scst_cmd != NULL)
++ scst_cmd_set_write_not_received_data_len(req->scst_cmd,
++ not_received);
++ return;
++}
++
++static void req_del_from_write_timeout_list(struct iscsi_cmnd *req)
++{
++ struct iscsi_conn *conn;
++
++ TRACE_ENTRY();
++
++ if (!req->on_write_timeout_list)
++ goto out;
++
++ conn = req->conn;
++
++ TRACE_DBG("Deleting cmd %p from conn %p write_timeout_list",
++ req, conn);
++
++ spin_lock_bh(&conn->write_list_lock);
++
++ /* Recheck, since it can be changed behind us */
++ if (unlikely(!req->on_write_timeout_list))
++ goto out_unlock;
++
++ list_del(&req->write_timeout_list_entry);
++ req->on_write_timeout_list = 0;
++
++out_unlock:
++ spin_unlock_bh(&conn->write_list_lock);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static inline u32 cmnd_write_size(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
++
++ if (hdr->flags & ISCSI_CMD_WRITE)
++ return be32_to_cpu(hdr->data_length);
++ return 0;
++}
++
++static inline int cmnd_read_size(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
++
++ if (hdr->flags & ISCSI_CMD_READ) {
++ struct iscsi_ahs_hdr *ahdr;
++
++ if (!(hdr->flags & ISCSI_CMD_WRITE))
++ return be32_to_cpu(hdr->data_length);
++
++ ahdr = (struct iscsi_ahs_hdr *)cmnd->pdu.ahs;
++ if (ahdr != NULL) {
++ uint8_t *p = (uint8_t *)ahdr;
++ unsigned int size = 0;
++ do {
++ int s;
++
++ ahdr = (struct iscsi_ahs_hdr *)p;
++
++ if (ahdr->ahstype == ISCSI_AHSTYPE_RLENGTH) {
++ struct iscsi_rlength_ahdr *rh =
++ (struct iscsi_rlength_ahdr *)ahdr;
++ return be32_to_cpu(rh->read_length);
++ }
++
++ s = 3 + be16_to_cpu(ahdr->ahslength);
++ s = (s + 3) & -4;
++ size += s;
++ p += s;
++ } while (size < cmnd->pdu.ahssize);
++ }
++ return -1;
++ }
++ return 0;
++}
++
++void iscsi_restart_cmnd(struct iscsi_cmnd *cmnd)
++{
++ int status;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(cmnd->r2t_len_to_receive != 0);
++ EXTRACHECKS_BUG_ON(cmnd->r2t_len_to_send != 0);
++
++ req_del_from_write_timeout_list(cmnd);
++
++ /*
++ * Let's remove cmnd from the hash earlier to keep it smaller.
++ * Also we have to remove hashed req from the hash before sending
++ * response. Otherwise we can have a race, when for some reason cmd's
++ * release (and, hence, removal from the hash) is delayed after the
++ * transmission and initiator sends cmd with the same ITT, hence
++ * the new command will be erroneously rejected as a duplicate.
++ */
++ if (cmnd->hashed)
++ cmnd_remove_data_wait_hash(cmnd);
++
++ if (unlikely(test_bit(ISCSI_CONN_REINSTATING,
++ &cmnd->conn->conn_aflags))) {
++ struct iscsi_target *target = cmnd->conn->session->target;
++ bool get_out;
++
++ mutex_lock(&target->target_mutex);
++
++ get_out = test_bit(ISCSI_CONN_REINSTATING,
++ &cmnd->conn->conn_aflags);
++ /* Let's don't look dead */
++ if (scst_cmd_get_cdb(cmnd->scst_cmd)[0] == TEST_UNIT_READY)
++ get_out = false;
++
++ if (!get_out)
++ goto unlock_cont;
++
++ TRACE_MGMT_DBG("Pending cmnd %p, because conn %p is "
++ "reinstated", cmnd, cmnd->conn);
++
++ cmnd->scst_state = ISCSI_CMD_STATE_REINST_PENDING;
++ list_add_tail(&cmnd->reinst_pending_cmd_list_entry,
++ &cmnd->conn->reinst_pending_cmd_list);
++
++unlock_cont:
++ mutex_unlock(&target->target_mutex);
++
++ if (get_out)
++ goto out;
++ }
++
++ if (unlikely(cmnd->prelim_compl_flags != 0)) {
++ if (test_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags)) {
++ TRACE_MGMT_DBG("cmnd %p (scst_cmd %p) aborted", cmnd,
++ cmnd->scst_cmd);
++ req_cmnd_release_force(cmnd);
++ goto out;
++ }
++
++ if (cmnd->scst_cmd == NULL) {
++ TRACE_MGMT_DBG("Finishing preliminary completed cmd %p "
++ "with NULL scst_cmd", cmnd);
++ req_cmnd_release(cmnd);
++ goto out;
++ }
++
++ status = SCST_PREPROCESS_STATUS_ERROR_SENSE_SET;
++ } else
++ status = SCST_PREPROCESS_STATUS_SUCCESS;
++
++ cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
++
++ scst_restart_cmd(cmnd->scst_cmd, status, SCST_CONTEXT_THREAD);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static struct iscsi_cmnd *iscsi_create_tm_clone(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_cmnd *tm_clone;
++
++ TRACE_ENTRY();
++
++ tm_clone = cmnd_alloc(cmnd->conn, NULL);
++ if (tm_clone != NULL) {
++ set_bit(ISCSI_CMD_ABORTED, &tm_clone->prelim_compl_flags);
++ tm_clone->pdu = cmnd->pdu;
++
++ TRACE_MGMT_DBG("TM clone %p for cmnd %p created",
++ tm_clone, cmnd);
++ } else
++ PRINT_ERROR("Failed to create TM clone for cmnd %p", cmnd);
++
++ TRACE_EXIT_HRES((unsigned long)tm_clone);
++ return tm_clone;
++}
++
++void iscsi_fail_data_waiting_cmnd(struct iscsi_cmnd *cmnd)
++{
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Failing data waiting cmnd %p", cmnd);
++
++ /*
++ * There is no race with conn_abort(), since all functions
++ * called from single read thread
++ */
++ iscsi_extracheck_is_rd_thread(cmnd->conn);
++
++ /* This cmnd is going to die without response */
++ cmnd->r2t_len_to_receive = 0;
++ cmnd->r2t_len_to_send = 0;
++
++ if (cmnd->pending) {
++ struct iscsi_session *session = cmnd->conn->session;
++ struct iscsi_cmnd *tm_clone;
++
++ TRACE_MGMT_DBG("Unpending cmnd %p (sn %u, exp_cmd_sn %u)", cmnd,
++ cmnd->pdu.bhs.sn, session->exp_cmd_sn);
++
++ /*
++ * If cmnd is pending, then the next command, if any, must be
++ * pending too. So, just insert a clone instead of cmnd to
++ * fill the hole in SNs. Then we can release cmnd.
++ */
++
++ tm_clone = iscsi_create_tm_clone(cmnd);
++
++ spin_lock(&session->sn_lock);
++
++ if (tm_clone != NULL) {
++ TRACE_MGMT_DBG("Adding tm_clone %p after its cmnd",
++ tm_clone);
++ list_add(&tm_clone->pending_list_entry,
++ &cmnd->pending_list_entry);
++ }
++
++ list_del(&cmnd->pending_list_entry);
++ cmnd->pending = 0;
++
++ spin_unlock(&session->sn_lock);
++ }
++
++ req_cmnd_release_force(cmnd);
++
++ TRACE_EXIT();
++ return;
++}
++
++struct iscsi_cmnd *cmnd_alloc(struct iscsi_conn *conn,
++ struct iscsi_cmnd *parent)
++{
++ struct iscsi_cmnd *cmnd;
++
++ /* ToDo: __GFP_NOFAIL?? */
++ cmnd = kmem_cache_zalloc(iscsi_cmnd_cache, GFP_KERNEL|__GFP_NOFAIL);
++
++ atomic_set(&cmnd->ref_cnt, 1);
++ cmnd->scst_state = ISCSI_CMD_STATE_NEW;
++ cmnd->conn = conn;
++ cmnd->parent_req = parent;
++
++ if (parent == NULL) {
++ conn_get(conn);
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ atomic_set(&cmnd->net_ref_cnt, 0);
++#endif
++ INIT_LIST_HEAD(&cmnd->rsp_cmd_list);
++ INIT_LIST_HEAD(&cmnd->rx_ddigest_cmd_list);
++ cmnd->target_task_tag = ISCSI_RESERVED_TAG_CPU32;
++
++ spin_lock_bh(&conn->cmd_list_lock);
++ list_add_tail(&cmnd->cmd_list_entry, &conn->cmd_list);
++ spin_unlock_bh(&conn->cmd_list_lock);
++ }
++
++ TRACE_DBG("conn %p, parent %p, cmnd %p", conn, parent, cmnd);
++ return cmnd;
++}
++
++/* Frees a command. Also frees the additional header. */
++static void cmnd_free(struct iscsi_cmnd *cmnd)
++{
++ TRACE_ENTRY();
++
++ TRACE_DBG("cmnd %p", cmnd);
++
++ if (unlikely(test_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags))) {
++ TRACE_MGMT_DBG("Free aborted cmd %p (scst cmd %p, state %d, "
++ "parent_req %p)", cmnd, cmnd->scst_cmd,
++ cmnd->scst_state, cmnd->parent_req);
++ }
++
++ /* Catch users from cmd_list or rsp_cmd_list */
++ EXTRACHECKS_BUG_ON(atomic_read(&cmnd->ref_cnt) != 0);
++
++ kfree(cmnd->pdu.ahs);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (unlikely(cmnd->on_write_list || cmnd->on_write_timeout_list)) {
++ struct iscsi_scsi_cmd_hdr *req = cmnd_hdr(cmnd);
++
++ PRINT_CRIT_ERROR("cmnd %p still on some list?, %x, %x, %x, "
++ "%x, %x, %x, %x", cmnd, req->opcode, req->scb[0],
++ req->flags, req->itt, be32_to_cpu(req->data_length),
++ req->cmd_sn, be32_to_cpu((__force __be32)(cmnd->pdu.datasize)));
++
++ if (unlikely(cmnd->parent_req)) {
++ struct iscsi_scsi_cmd_hdr *preq =
++ cmnd_hdr(cmnd->parent_req);
++ PRINT_CRIT_ERROR("%p %x %u", preq, preq->opcode,
++ preq->scb[0]);
++ }
++ BUG();
++ }
++#endif
++
++ kmem_cache_free(iscsi_cmnd_cache, cmnd);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void iscsi_dec_active_cmds(struct iscsi_cmnd *req)
++{
++ struct iscsi_session *sess = req->conn->session;
++
++ TRACE_DBG("Decrementing active_cmds (req %p, sess %p, "
++ "new value %d)", req, sess,
++ atomic_read(&sess->active_cmds)-1);
++
++ EXTRACHECKS_BUG_ON(!req->dec_active_cmds);
++
++ atomic_dec(&sess->active_cmds);
++ smp_mb__after_atomic_dec();
++ req->dec_active_cmds = 0;
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (unlikely(atomic_read(&sess->active_cmds) < 0)) {
++ PRINT_CRIT_ERROR("active_cmds < 0 (%d)!!",
++ atomic_read(&sess->active_cmds));
++ BUG();
++ }
++#endif
++ return;
++}
++
++/* Might be called under some lock and on SIRQ */
++void cmnd_done(struct iscsi_cmnd *cmnd)
++{
++ TRACE_ENTRY();
++
++ TRACE_DBG("cmnd %p", cmnd);
++
++ if (unlikely(test_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags))) {
++ TRACE_MGMT_DBG("Done aborted cmd %p (scst cmd %p, state %d, "
++ "parent_req %p)", cmnd, cmnd->scst_cmd,
++ cmnd->scst_state, cmnd->parent_req);
++ }
++
++ EXTRACHECKS_BUG_ON(cmnd->on_rx_digest_list);
++ EXTRACHECKS_BUG_ON(cmnd->hashed);
++
++ req_del_from_write_timeout_list(cmnd);
++
++ if (cmnd->parent_req == NULL) {
++ struct iscsi_conn *conn = cmnd->conn;
++ struct iscsi_cmnd *rsp, *t;
++
++ TRACE_DBG("Deleting req %p from conn %p", cmnd, conn);
++
++ spin_lock_bh(&conn->cmd_list_lock);
++ list_del(&cmnd->cmd_list_entry);
++ spin_unlock_bh(&conn->cmd_list_lock);
++
++ conn_put(conn);
++
++ EXTRACHECKS_BUG_ON(!list_empty(&cmnd->rx_ddigest_cmd_list));
++
++ /* Order between above and below code is important! */
++
++ if ((cmnd->scst_cmd != NULL) || (cmnd->scst_aen != NULL)) {
++ switch (cmnd->scst_state) {
++ case ISCSI_CMD_STATE_PROCESSED:
++ TRACE_DBG("cmd %p PROCESSED", cmnd);
++ scst_tgt_cmd_done(cmnd->scst_cmd,
++ SCST_CONTEXT_DIRECT_ATOMIC);
++ break;
++
++ case ISCSI_CMD_STATE_AFTER_PREPROC:
++ {
++ /* It can be for some aborted commands */
++ struct scst_cmd *scst_cmd = cmnd->scst_cmd;
++ TRACE_DBG("cmd %p AFTER_PREPROC", cmnd);
++ cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
++ cmnd->scst_cmd = NULL;
++ scst_restart_cmd(scst_cmd,
++ SCST_PREPROCESS_STATUS_ERROR_FATAL,
++ SCST_CONTEXT_THREAD);
++ break;
++ }
++
++ case ISCSI_CMD_STATE_AEN:
++ TRACE_DBG("cmd %p AEN PROCESSED", cmnd);
++ scst_aen_done(cmnd->scst_aen);
++ break;
++
++ case ISCSI_CMD_STATE_OUT_OF_SCST_PRELIM_COMPL:
++ break;
++
++ default:
++ PRINT_CRIT_ERROR("Unexpected cmnd scst state "
++ "%d", cmnd->scst_state);
++ BUG();
++ break;
++ }
++ }
++
++ if (cmnd->own_sg) {
++ TRACE_DBG("own_sg for req %p", cmnd);
++ if (cmnd->sg != &dummy_sg)
++ scst_free(cmnd->sg, cmnd->sg_cnt);
++#ifdef CONFIG_SCST_DEBUG
++ cmnd->own_sg = 0;
++ cmnd->sg = NULL;
++ cmnd->sg_cnt = -1;
++#endif
++ }
++
++ if (unlikely(cmnd->dec_active_cmds))
++ iscsi_dec_active_cmds(cmnd);
++
++ list_for_each_entry_safe(rsp, t, &cmnd->rsp_cmd_list,
++ rsp_cmd_list_entry) {
++ cmnd_free(rsp);
++ }
++
++ cmnd_free(cmnd);
++ } else {
++ struct iscsi_cmnd *parent = cmnd->parent_req;
++
++ if (cmnd->own_sg) {
++ TRACE_DBG("own_sg for rsp %p", cmnd);
++ if ((cmnd->sg != &dummy_sg) && (cmnd->sg != cmnd->rsp_sg))
++ scst_free(cmnd->sg, cmnd->sg_cnt);
++#ifdef CONFIG_SCST_DEBUG
++ cmnd->own_sg = 0;
++ cmnd->sg = NULL;
++ cmnd->sg_cnt = -1;
++#endif
++ }
++
++ EXTRACHECKS_BUG_ON(cmnd->dec_active_cmds);
++
++ if (cmnd == parent->main_rsp) {
++ TRACE_DBG("Finishing main rsp %p (req %p)", cmnd,
++ parent);
++ parent->main_rsp = NULL;
++ }
++
++ cmnd_put(parent);
++ /*
++ * cmnd will be freed on the last parent's put and can already
++ * be freed!!
++ */
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * Corresponding conn may also get destroyed after this function, except only
++ * if it's called from the read thread!
++ *
++ * It can't be called in parallel with iscsi_cmnds_init_write()!
++ */
++void req_cmnd_release_force(struct iscsi_cmnd *req)
++{
++ struct iscsi_cmnd *rsp, *t;
++ struct iscsi_conn *conn = req->conn;
++ LIST_HEAD(cmds_list);
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("req %p", req);
++
++ BUG_ON(req == conn->read_cmnd);
++
++ spin_lock_bh(&conn->write_list_lock);
++ list_for_each_entry_safe(rsp, t, &conn->write_list, write_list_entry) {
++ if (rsp->parent_req != req)
++ continue;
++
++ cmd_del_from_write_list(rsp);
++
++ list_add_tail(&rsp->write_list_entry, &cmds_list);
++ }
++ spin_unlock_bh(&conn->write_list_lock);
++
++ list_for_each_entry_safe(rsp, t, &cmds_list, write_list_entry) {
++ TRACE_MGMT_DBG("Putting write rsp %p", rsp);
++ list_del(&rsp->write_list_entry);
++ cmnd_put(rsp);
++ }
++
++ /* Supposed nobody can add responses in the list anymore */
++ list_for_each_entry_reverse(rsp, &req->rsp_cmd_list,
++ rsp_cmd_list_entry) {
++ bool r;
++
++ if (rsp->force_cleanup_done)
++ continue;
++
++ rsp->force_cleanup_done = 1;
++
++ if (cmnd_get_check(rsp))
++ continue;
++
++ spin_lock_bh(&conn->write_list_lock);
++ r = rsp->on_write_list || rsp->write_processing_started;
++ spin_unlock_bh(&conn->write_list_lock);
++
++ cmnd_put(rsp);
++
++ if (r)
++ continue;
++
++ /*
++ * If both on_write_list and write_processing_started not set,
++ * we can safely put() rsp.
++ */
++ TRACE_MGMT_DBG("Putting rsp %p", rsp);
++ cmnd_put(rsp);
++ }
++
++ if (req->main_rsp != NULL) {
++ TRACE_MGMT_DBG("Putting main rsp %p", req->main_rsp);
++ cmnd_put(req->main_rsp);
++ req->main_rsp = NULL;
++ }
++
++ req_cmnd_release(req);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void req_cmnd_pre_release(struct iscsi_cmnd *req)
++{
++ struct iscsi_cmnd *c, *t;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("req %p", req);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ BUG_ON(req->release_called);
++ req->release_called = 1;
++#endif
++
++ if (unlikely(test_bit(ISCSI_CMD_ABORTED, &req->prelim_compl_flags))) {
++ TRACE_MGMT_DBG("Release aborted req cmd %p (scst cmd %p, "
++ "state %d)", req, req->scst_cmd, req->scst_state);
++ }
++
++ BUG_ON(req->parent_req != NULL);
++
++ if (unlikely(req->hashed)) {
++ /* It sometimes can happen during errors recovery */
++ cmnd_remove_data_wait_hash(req);
++ }
++
++ if (unlikely(req->main_rsp != NULL)) {
++ TRACE_DBG("Sending main rsp %p", req->main_rsp);
++ if (cmnd_opcode(req) == ISCSI_OP_SCSI_CMD) {
++ if (req->scst_cmd != NULL)
++ iscsi_set_resid(req->main_rsp);
++ else
++ iscsi_set_resid_no_scst_cmd(req->main_rsp);
++ }
++ iscsi_cmnd_init_write(req->main_rsp, ISCSI_INIT_WRITE_WAKE);
++ req->main_rsp = NULL;
++ }
++
++ list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
++ rx_ddigest_cmd_list_entry) {
++ cmd_del_from_rx_ddigest_list(c);
++ cmnd_put(c);
++ }
++
++ EXTRACHECKS_BUG_ON(req->pending);
++
++ if (unlikely(req->dec_active_cmds))
++ iscsi_dec_active_cmds(req);
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * Corresponding conn may also get destroyed after this function, except only
++ * if it's called from the read thread!
++ */
++static void req_cmnd_release(struct iscsi_cmnd *req)
++{
++ TRACE_ENTRY();
++
++ req_cmnd_pre_release(req);
++ cmnd_put(req);
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * Corresponding conn may also get destroyed after this function, except only
++ * if it's called from the read thread!
++ */
++void rsp_cmnd_release(struct iscsi_cmnd *cmnd)
++{
++ TRACE_DBG("%p", cmnd);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ BUG_ON(cmnd->release_called);
++ cmnd->release_called = 1;
++#endif
++
++ EXTRACHECKS_BUG_ON(cmnd->parent_req == NULL);
++
++ cmnd_put(cmnd);
++ return;
++}
++
++static struct iscsi_cmnd *iscsi_alloc_rsp(struct iscsi_cmnd *parent)
++{
++ struct iscsi_cmnd *rsp;
++
++ TRACE_ENTRY();
++
++ rsp = cmnd_alloc(parent->conn, parent);
++
++ TRACE_DBG("Adding rsp %p to parent %p", rsp, parent);
++ list_add_tail(&rsp->rsp_cmd_list_entry, &parent->rsp_cmd_list);
++
++ cmnd_get(parent);
++
++ TRACE_EXIT_HRES((unsigned long)rsp);
++ return rsp;
++}
++
++static inline struct iscsi_cmnd *iscsi_alloc_main_rsp(struct iscsi_cmnd *parent)
++{
++ struct iscsi_cmnd *rsp;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(parent->main_rsp != NULL);
++
++ rsp = iscsi_alloc_rsp(parent);
++ parent->main_rsp = rsp;
++
++ TRACE_EXIT_HRES((unsigned long)rsp);
++ return rsp;
++}
++
++static void iscsi_cmnds_init_write(struct list_head *send, int flags)
++{
++ struct iscsi_cmnd *rsp = list_entry(send->next, struct iscsi_cmnd,
++ write_list_entry);
++ struct iscsi_conn *conn = rsp->conn;
++ struct list_head *pos, *next;
++
++ BUG_ON(list_empty(send));
++
++ if (!(conn->ddigest_type & DIGEST_NONE)) {
++ list_for_each(pos, send) {
++ rsp = list_entry(pos, struct iscsi_cmnd,
++ write_list_entry);
++
++ if (rsp->pdu.datasize != 0) {
++ TRACE_DBG("Doing data digest (%p:%x)", rsp,
++ cmnd_opcode(rsp));
++ digest_tx_data(rsp);
++ }
++ }
++ }
++
++ spin_lock_bh(&conn->write_list_lock);
++ list_for_each_safe(pos, next, send) {
++ rsp = list_entry(pos, struct iscsi_cmnd, write_list_entry);
++
++ TRACE_DBG("%p:%x", rsp, cmnd_opcode(rsp));
++
++ BUG_ON(conn != rsp->conn);
++
++ list_del(&rsp->write_list_entry);
++ cmd_add_on_write_list(conn, rsp);
++ }
++ spin_unlock_bh(&conn->write_list_lock);
++
++ if (flags & ISCSI_INIT_WRITE_WAKE)
++ iscsi_make_conn_wr_active(conn);
++
++ return;
++}
++
++static void iscsi_cmnd_init_write(struct iscsi_cmnd *rsp, int flags)
++{
++ LIST_HEAD(head);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (unlikely(rsp->on_write_list)) {
++ PRINT_CRIT_ERROR("cmd already on write list (%x %x %x "
++ "%u %u %d %d", rsp->pdu.bhs.itt,
++ cmnd_opcode(rsp), cmnd_scsicode(rsp),
++ rsp->hdigest, rsp->ddigest,
++ list_empty(&rsp->rsp_cmd_list), rsp->hashed);
++ BUG();
++ }
++#endif
++ list_add_tail(&rsp->write_list_entry, &head);
++ iscsi_cmnds_init_write(&head, flags);
++ return;
++}
++
++static void iscsi_set_resid_no_scst_cmd(struct iscsi_cmnd *rsp)
++{
++ struct iscsi_cmnd *req = rsp->parent_req;
++ struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
++ struct iscsi_scsi_rsp_hdr *rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
++ int resid, out_resid;
++
++ TRACE_ENTRY();
++
++ BUG_ON(req->scst_cmd != NULL);
++
++ TRACE_DBG("req %p, rsp %p, outstanding_r2t %d, r2t_len_to_receive %d, "
++ "r2t_len_to_send %d, not_received_data_len %d", req, rsp,
++ req->outstanding_r2t, req->r2t_len_to_receive,
++ req->r2t_len_to_send, req->not_received_data_len);
++
++ if ((req_hdr->flags & ISCSI_CMD_READ) &&
++ (req_hdr->flags & ISCSI_CMD_WRITE)) {
++ out_resid = req->not_received_data_len;
++ if (out_resid > 0) {
++ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
++ rsp_hdr->residual_count = cpu_to_be32(out_resid);
++ } else if (out_resid < 0) {
++ out_resid = -out_resid;
++ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
++ rsp_hdr->residual_count = cpu_to_be32(out_resid);
++ }
++
++ resid = cmnd_read_size(req);
++ if (resid > 0) {
++ rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_UNDERFLOW;
++ rsp_hdr->bi_residual_count = cpu_to_be32(resid);
++ } else if (resid < 0) {
++ resid = -resid;
++ rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_OVERFLOW;
++ rsp_hdr->bi_residual_count = cpu_to_be32(resid);
++ }
++ } else if (req_hdr->flags & ISCSI_CMD_READ) {
++ resid = be32_to_cpu(req_hdr->data_length);
++ if (resid > 0) {
++ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
++ rsp_hdr->residual_count = cpu_to_be32(resid);
++ }
++ } else if (req_hdr->flags & ISCSI_CMD_WRITE) {
++ resid = req->not_received_data_len;
++ if (resid > 0) {
++ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
++ rsp_hdr->residual_count = cpu_to_be32(resid);
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static void iscsi_set_resid(struct iscsi_cmnd *rsp)
++{
++ struct iscsi_cmnd *req = rsp->parent_req;
++ struct scst_cmd *scst_cmd = req->scst_cmd;
++ struct iscsi_scsi_cmd_hdr *req_hdr;
++ struct iscsi_scsi_rsp_hdr *rsp_hdr;
++ int resid, out_resid;
++
++ TRACE_ENTRY();
++
++ if (likely(!scst_get_resid(scst_cmd, &resid, &out_resid))) {
++ TRACE_DBG("No residuals for req %p", req);
++ goto out;
++ }
++
++ TRACE_DBG("req %p, resid %d, out_resid %d", req, resid, out_resid);
++
++ req_hdr = cmnd_hdr(req);
++ rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
++
++ if ((req_hdr->flags & ISCSI_CMD_READ) &&
++ (req_hdr->flags & ISCSI_CMD_WRITE)) {
++ if (out_resid > 0) {
++ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
++ rsp_hdr->residual_count = cpu_to_be32(out_resid);
++ } else if (out_resid < 0) {
++ out_resid = -out_resid;
++ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
++ rsp_hdr->residual_count = cpu_to_be32(out_resid);
++ }
++
++ if (resid > 0) {
++ rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_UNDERFLOW;
++ rsp_hdr->bi_residual_count = cpu_to_be32(resid);
++ } else if (resid < 0) {
++ resid = -resid;
++ rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_OVERFLOW;
++ rsp_hdr->bi_residual_count = cpu_to_be32(resid);
++ }
++ } else {
++ if (resid > 0) {
++ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
++ rsp_hdr->residual_count = cpu_to_be32(resid);
++ } else if (resid < 0) {
++ resid = -resid;
++ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
++ rsp_hdr->residual_count = cpu_to_be32(resid);
++ }
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void send_data_rsp(struct iscsi_cmnd *req, u8 status, int send_status)
++{
++ struct iscsi_cmnd *rsp;
++ struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
++ struct iscsi_data_in_hdr *rsp_hdr;
++ u32 pdusize, size, offset, sn;
++ LIST_HEAD(send);
++
++ TRACE_DBG("req %p", req);
++
++ pdusize = req->conn->session->sess_params.max_xmit_data_length;
++ size = req->bufflen;
++ offset = 0;
++ sn = 0;
++
++ while (1) {
++ rsp = iscsi_alloc_rsp(req);
++ TRACE_DBG("rsp %p", rsp);
++ rsp->sg = req->sg;
++ rsp->sg_cnt = req->sg_cnt;
++ rsp->bufflen = req->bufflen;
++ rsp_hdr = (struct iscsi_data_in_hdr *)&rsp->pdu.bhs;
++
++ rsp_hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
++ rsp_hdr->itt = req_hdr->itt;
++ rsp_hdr->ttt = ISCSI_RESERVED_TAG;
++ rsp_hdr->buffer_offset = cpu_to_be32(offset);
++ rsp_hdr->data_sn = cpu_to_be32(sn);
++
++ if (size <= pdusize) {
++ TRACE_DBG("offset %d, size %d", offset, size);
++ rsp->pdu.datasize = size;
++ if (send_status) {
++ TRACE_DBG("status %x", status);
++
++ EXTRACHECKS_BUG_ON((cmnd_hdr(req)->flags & ISCSI_CMD_WRITE) != 0);
++
++ rsp_hdr->flags = ISCSI_FLG_FINAL | ISCSI_FLG_STATUS;
++ rsp_hdr->cmd_status = status;
++
++ iscsi_set_resid(rsp);
++ }
++ list_add_tail(&rsp->write_list_entry, &send);
++ break;
++ }
++
++ TRACE_DBG("pdusize %d, offset %d, size %d", pdusize, offset,
++ size);
++
++ rsp->pdu.datasize = pdusize;
++
++ size -= pdusize;
++ offset += pdusize;
++ sn++;
++
++ list_add_tail(&rsp->write_list_entry, &send);
++ }
++ iscsi_cmnds_init_write(&send, 0);
++ return;
++}
++
++static void iscsi_init_status_rsp(struct iscsi_cmnd *rsp,
++ int status, const u8 *sense_buf, int sense_len)
++{
++ struct iscsi_cmnd *req = rsp->parent_req;
++ struct iscsi_scsi_rsp_hdr *rsp_hdr;
++ struct scatterlist *sg;
++
++ TRACE_ENTRY();
++
++ rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
++ rsp_hdr->opcode = ISCSI_OP_SCSI_RSP;
++ rsp_hdr->flags = ISCSI_FLG_FINAL;
++ rsp_hdr->response = ISCSI_RESPONSE_COMMAND_COMPLETED;
++ rsp_hdr->cmd_status = status;
++ rsp_hdr->itt = cmnd_hdr(req)->itt;
++
++ if (SCST_SENSE_VALID(sense_buf)) {
++ TRACE_DBG("%s", "SENSE VALID");
++
++ sg = rsp->sg = rsp->rsp_sg;
++ rsp->sg_cnt = 2;
++ rsp->own_sg = 1;
++
++ sg_init_table(sg, 2);
++ sg_set_buf(&sg[0], &rsp->sense_hdr, sizeof(rsp->sense_hdr));
++ sg_set_buf(&sg[1], sense_buf, sense_len);
++
++ rsp->sense_hdr.length = cpu_to_be16(sense_len);
++
++ rsp->pdu.datasize = sizeof(rsp->sense_hdr) + sense_len;
++ rsp->bufflen = rsp->pdu.datasize;
++ } else {
++ rsp->pdu.datasize = 0;
++ rsp->bufflen = 0;
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline struct iscsi_cmnd *create_status_rsp(struct iscsi_cmnd *req,
++ int status, const u8 *sense_buf, int sense_len)
++{
++ struct iscsi_cmnd *rsp;
++
++ TRACE_ENTRY();
++
++ rsp = iscsi_alloc_rsp(req);
++ TRACE_DBG("rsp %p", rsp);
++
++ iscsi_init_status_rsp(rsp, status, sense_buf, sense_len);
++ iscsi_set_resid(rsp);
++
++ TRACE_EXIT_HRES((unsigned long)rsp);
++ return rsp;
++}
++
++/*
++ * Initializes data receive fields. Can be called only when they have not been
++ * initialized yet.
++ */
++static int iscsi_set_prelim_r2t_len_to_receive(struct iscsi_cmnd *req)
++{
++ struct iscsi_scsi_cmd_hdr *req_hdr = (struct iscsi_scsi_cmd_hdr *)&req->pdu.bhs;
++ int res = 0;
++ unsigned int not_received;
++
++ TRACE_ENTRY();
++
++ if (req_hdr->flags & ISCSI_CMD_FINAL) {
++ if (req_hdr->flags & ISCSI_CMD_WRITE)
++ iscsi_set_not_received_data_len(req,
++ be32_to_cpu(req_hdr->data_length) -
++ req->pdu.datasize);
++ goto out;
++ }
++
++ BUG_ON(req->outstanding_r2t != 0);
++
++ res = cmnd_insert_data_wait_hash(req);
++ if (res != 0) {
++ /*
++ * We have to close connection, because otherwise a data
++ * corruption is possible if we allow to receive data
++ * for this request in another request with dublicated ITT.
++ */
++ mark_conn_closed(req->conn);
++ goto out;
++ }
++
++ /*
++ * We need to wait for one or more PDUs. Let's simplify
++ * other code and pretend we need to receive 1 byte.
++ * In data_out_start() we will correct it.
++ */
++ req->outstanding_r2t = 1;
++ req_add_to_write_timeout_list(req);
++ req->r2t_len_to_receive = 1;
++ req->r2t_len_to_send = 0;
++
++ not_received = be32_to_cpu(req_hdr->data_length) - req->pdu.datasize;
++ not_received -= min_t(unsigned int, not_received,
++ req->conn->session->sess_params.first_burst_length);
++ iscsi_set_not_received_data_len(req, not_received);
++
++ TRACE_DBG("req %p, op %x, outstanding_r2t %d, r2t_len_to_receive %d, "
++ "r2t_len_to_send %d, not_received_data_len %d", req,
++ cmnd_opcode(req), req->outstanding_r2t, req->r2t_len_to_receive,
++ req->r2t_len_to_send, req->not_received_data_len);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int create_preliminary_no_scst_rsp(struct iscsi_cmnd *req,
++ int status, const u8 *sense_buf, int sense_len)
++{
++ struct iscsi_cmnd *rsp;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (req->prelim_compl_flags != 0) {
++ TRACE_MGMT_DBG("req %p already prelim completed", req);
++ goto out;
++ }
++
++ req->scst_state = ISCSI_CMD_STATE_OUT_OF_SCST_PRELIM_COMPL;
++
++ BUG_ON(req->scst_cmd != NULL);
++
++ res = iscsi_preliminary_complete(req, req, true);
++
++ rsp = iscsi_alloc_main_rsp(req);
++ TRACE_DBG("main rsp %p", rsp);
++
++ iscsi_init_status_rsp(rsp, status, sense_buf, sense_len);
++
++ /* Resid will be set in req_cmnd_release() */
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++int set_scst_preliminary_status_rsp(struct iscsi_cmnd *req,
++ bool get_data, int key, int asc, int ascq)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (req->scst_cmd == NULL) {
++ /* There must be already error set */
++ goto complete;
++ }
++
++ scst_set_cmd_error(req->scst_cmd, key, asc, ascq);
++
++complete:
++ res = iscsi_preliminary_complete(req, req, get_data);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int create_reject_rsp(struct iscsi_cmnd *req, int reason, bool get_data)
++{
++ int res = 0;
++ struct iscsi_cmnd *rsp;
++ struct iscsi_reject_hdr *rsp_hdr;
++ struct scatterlist *sg;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Reject: req %p, reason %x", req, reason);
++
++ if (cmnd_opcode(req) == ISCSI_OP_SCSI_CMD) {
++ if (req->scst_cmd == NULL) {
++ /* BUSY status must be already set */
++ struct iscsi_scsi_rsp_hdr *rsp_hdr1;
++ rsp_hdr1 = (struct iscsi_scsi_rsp_hdr *)&req->main_rsp->pdu.bhs;
++ BUG_ON(rsp_hdr1->cmd_status == 0);
++ /*
++ * Let's not send REJECT here. The initiator will retry
++ * and, hopefully, next time we will not fail allocating
++ * scst_cmd, so we will then send the REJECT.
++ */
++ goto out;
++ } else {
++ /*
++ * "In all the cases in which a pre-instantiated SCSI
++ * task is terminated because of the reject, the target
++ * MUST issue a proper SCSI command response with CHECK
++ * CONDITION as described in Section 10.4.3 Response" -
++ * RFC 3720.
++ */
++ set_scst_preliminary_status_rsp(req, get_data,
++ SCST_LOAD_SENSE(scst_sense_invalid_message));
++ }
++ }
++
++ rsp = iscsi_alloc_main_rsp(req);
++ rsp_hdr = (struct iscsi_reject_hdr *)&rsp->pdu.bhs;
++
++ rsp_hdr->opcode = ISCSI_OP_REJECT;
++ rsp_hdr->ffffffff = ISCSI_RESERVED_TAG;
++ rsp_hdr->reason = reason;
++
++ sg = rsp->sg = rsp->rsp_sg;
++ rsp->sg_cnt = 1;
++ rsp->own_sg = 1;
++ sg_init_one(sg, &req->pdu.bhs, sizeof(struct iscsi_hdr));
++ rsp->bufflen = rsp->pdu.datasize = sizeof(struct iscsi_hdr);
++
++ res = iscsi_preliminary_complete(req, req, true);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static inline int iscsi_get_allowed_cmds(struct iscsi_session *sess)
++{
++ int res = max(-1, (int)sess->tgt_params.queued_cmnds -
++ atomic_read(&sess->active_cmds)-1);
++ TRACE_DBG("allowed cmds %d (sess %p, active_cmds %d)", res,
++ sess, atomic_read(&sess->active_cmds));
++ return res;
++}
++
++static __be32 cmnd_set_sn(struct iscsi_cmnd *cmnd, int set_stat_sn)
++{
++ struct iscsi_conn *conn = cmnd->conn;
++ struct iscsi_session *sess = conn->session;
++ __be32 res;
++
++ spin_lock(&sess->sn_lock);
++
++ if (set_stat_sn)
++ cmnd->pdu.bhs.sn = (__force u32)cpu_to_be32(conn->stat_sn++);
++ cmnd->pdu.bhs.exp_sn = (__force u32)cpu_to_be32(sess->exp_cmd_sn);
++ cmnd->pdu.bhs.max_sn = (__force u32)cpu_to_be32(sess->exp_cmd_sn +
++ iscsi_get_allowed_cmds(sess));
++
++ res = cpu_to_be32(conn->stat_sn);
++
++ spin_unlock(&sess->sn_lock);
++ return res;
++}
++
++/* Called under sn_lock */
++static void update_stat_sn(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_conn *conn = cmnd->conn;
++ u32 exp_stat_sn;
++
++ cmnd->pdu.bhs.exp_sn = exp_stat_sn = be32_to_cpu((__force __be32)cmnd->pdu.bhs.exp_sn);
++ TRACE_DBG("%x,%x", cmnd_opcode(cmnd), exp_stat_sn);
++ if ((int)(exp_stat_sn - conn->exp_stat_sn) > 0 &&
++ (int)(exp_stat_sn - conn->stat_sn) <= 0) {
++ /* free pdu resources */
++ cmnd->conn->exp_stat_sn = exp_stat_sn;
++ }
++ return;
++}
++
++static struct iscsi_cmnd *cmnd_find_itt_get(struct iscsi_conn *conn, __be32 itt)
++{
++ struct iscsi_cmnd *cmnd, *found_cmnd = NULL;
++
++ spin_lock_bh(&conn->cmd_list_lock);
++ list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
++ if ((cmnd->pdu.bhs.itt == itt) && !cmnd_get_check(cmnd)) {
++ found_cmnd = cmnd;
++ break;
++ }
++ }
++ spin_unlock_bh(&conn->cmd_list_lock);
++
++ return found_cmnd;
++}
++
++/**
++ ** We use the ITT hash only to find original request PDU for subsequent
++ ** Data-Out PDUs.
++ **/
++
++/* Must be called under cmnd_data_wait_hash_lock */
++static struct iscsi_cmnd *__cmnd_find_data_wait_hash(struct iscsi_conn *conn,
++ __be32 itt)
++{
++ struct list_head *head;
++ struct iscsi_cmnd *cmnd;
++
++ head = &conn->session->cmnd_data_wait_hash[cmnd_hashfn(itt)];
++
++ list_for_each_entry(cmnd, head, hash_list_entry) {
++ if (cmnd->pdu.bhs.itt == itt)
++ return cmnd;
++ }
++ return NULL;
++}
++
++static struct iscsi_cmnd *cmnd_find_data_wait_hash(struct iscsi_conn *conn,
++ __be32 itt)
++{
++ struct iscsi_cmnd *res;
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&session->cmnd_data_wait_hash_lock);
++ res = __cmnd_find_data_wait_hash(conn, itt);
++ spin_unlock(&session->cmnd_data_wait_hash_lock);
++
++ return res;
++}
++
++static inline u32 get_next_ttt(struct iscsi_conn *conn)
++{
++ u32 ttt;
++ struct iscsi_session *session = conn->session;
++
++ /* Not compatible with MC/S! */
++
++ iscsi_extracheck_is_rd_thread(conn);
++
++ if (unlikely(session->next_ttt == ISCSI_RESERVED_TAG_CPU32))
++ session->next_ttt++;
++ ttt = session->next_ttt++;
++
++ return ttt;
++}
++
++static int cmnd_insert_data_wait_hash(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_session *session = cmnd->conn->session;
++ struct iscsi_cmnd *tmp;
++ struct list_head *head;
++ int err = 0;
++ __be32 itt = cmnd->pdu.bhs.itt;
++
++ if (unlikely(cmnd->hashed)) {
++ /*
++ * It can be for preliminary completed commands, when this
++ * function already failed.
++ */
++ goto out;
++ }
++
++ /*
++ * We don't need TTT, because ITT/buffer_offset pair is sufficient
++ * to find out the original request and buffer for Data-Out PDUs, but
++ * crazy iSCSI spec requires us to send this superfluous field in
++ * R2T PDUs and some initiators may rely on it.
++ */
++ cmnd->target_task_tag = get_next_ttt(cmnd->conn);
++
++ TRACE_DBG("%p:%x", cmnd, itt);
++ if (unlikely(itt == ISCSI_RESERVED_TAG)) {
++ PRINT_ERROR("%s", "ITT is RESERVED_TAG");
++ PRINT_BUFFER("Incorrect BHS", &cmnd->pdu.bhs,
++ sizeof(cmnd->pdu.bhs));
++ err = -ISCSI_REASON_PROTOCOL_ERROR;
++ goto out;
++ }
++
++ spin_lock(&session->cmnd_data_wait_hash_lock);
++
++ head = &session->cmnd_data_wait_hash[cmnd_hashfn(itt)];
++
++ tmp = __cmnd_find_data_wait_hash(cmnd->conn, itt);
++ if (likely(!tmp)) {
++ TRACE_DBG("Adding cmnd %p to the hash (ITT %x)", cmnd,
++ cmnd->pdu.bhs.itt);
++ list_add_tail(&cmnd->hash_list_entry, head);
++ cmnd->hashed = 1;
++ } else {
++ PRINT_ERROR("Task %x in progress, cmnd %p", itt, cmnd);
++ err = -ISCSI_REASON_TASK_IN_PROGRESS;
++ }
++
++ spin_unlock(&session->cmnd_data_wait_hash_lock);
++
++out:
++ return err;
++}
++
++static void cmnd_remove_data_wait_hash(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_session *session = cmnd->conn->session;
++ struct iscsi_cmnd *tmp;
++
++ spin_lock(&session->cmnd_data_wait_hash_lock);
++
++ tmp = __cmnd_find_data_wait_hash(cmnd->conn, cmnd->pdu.bhs.itt);
++
++ if (likely(tmp && tmp == cmnd)) {
++ TRACE_DBG("Deleting cmnd %p from the hash (ITT %x)", cmnd,
++ cmnd->pdu.bhs.itt);
++ list_del(&cmnd->hash_list_entry);
++ cmnd->hashed = 0;
++ } else
++ PRINT_ERROR("%p:%x not found", cmnd, cmnd->pdu.bhs.itt);
++
++ spin_unlock(&session->cmnd_data_wait_hash_lock);
++
++ return;
++}
++
++static void cmnd_prepare_get_rejected_immed_data(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_conn *conn = cmnd->conn;
++ struct scatterlist *sg = cmnd->sg;
++ char __user *addr;
++ u32 size;
++ unsigned int i;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG_FLAG(iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(cmnd),
++ "Skipping (cmnd %p, ITT %x, op %x, cmd op %x, "
++ "datasize %u, scst_cmd %p, scst state %d)", cmnd,
++ cmnd->pdu.bhs.itt, cmnd_opcode(cmnd), cmnd_hdr(cmnd)->scb[0],
++ cmnd->pdu.datasize, cmnd->scst_cmd, cmnd->scst_state);
++
++ iscsi_extracheck_is_rd_thread(conn);
++
++ size = cmnd->pdu.datasize;
++ if (!size)
++ goto out;
++
++ /* We already checked pdu.datasize in check_segment_length() */
++
++ /*
++ * There are no problems with the safety from concurrent
++ * accesses to dummy_page in dummy_sg, since data only
++ * will be read and then discarded.
++ */
++ sg = &dummy_sg;
++ if (cmnd->sg == NULL) {
++ /* just in case */
++ cmnd->sg = sg;
++ cmnd->bufflen = PAGE_SIZE;
++ cmnd->own_sg = 1;
++ }
++
++ addr = (char __force __user *)(page_address(sg_page(&sg[0])));
++ conn->read_size = size;
++ for (i = 0; size > PAGE_SIZE; i++, size -= PAGE_SIZE) {
++ /* We already checked pdu.datasize in check_segment_length() */
++ BUG_ON(i >= ISCSI_CONN_IOV_MAX);
++ conn->read_iov[i].iov_base = addr;
++ conn->read_iov[i].iov_len = PAGE_SIZE;
++ }
++ conn->read_iov[i].iov_base = addr;
++ conn->read_iov[i].iov_len = size;
++ conn->read_msg.msg_iov = conn->read_iov;
++ conn->read_msg.msg_iovlen = ++i;
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++int iscsi_preliminary_complete(struct iscsi_cmnd *req,
++ struct iscsi_cmnd *orig_req, bool get_data)
++{
++ int res = 0;
++ bool set_r2t_len;
++ struct iscsi_hdr *orig_req_hdr = &orig_req->pdu.bhs;
++
++ TRACE_ENTRY();
++
++#ifdef CONFIG_SCST_DEBUG
++ {
++ struct iscsi_hdr *req_hdr = &req->pdu.bhs;
++ TRACE_DBG_FLAG(iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(orig_req),
++ "Prelim completed req %p, orig_req %p (FINAL %x, "
++ "outstanding_r2t %d)", req, orig_req,
++ (req_hdr->flags & ISCSI_CMD_FINAL),
++ orig_req->outstanding_r2t);
++ }
++#endif
++
++ iscsi_extracheck_is_rd_thread(req->conn);
++ BUG_ON(req->parent_req != NULL);
++
++ if (test_bit(ISCSI_CMD_PRELIM_COMPLETED, &req->prelim_compl_flags)) {
++ TRACE_MGMT_DBG("req %p already prelim completed", req);
++ /* To not try to get data twice */
++ get_data = false;
++ }
++
++ /*
++ * We need to receive all outstanding PDUs, even if direction isn't
++ * WRITE. Test of PRELIM_COMPLETED is needed, because
++ * iscsi_set_prelim_r2t_len_to_receive() could also have failed before.
++ */
++ set_r2t_len = !orig_req->hashed &&
++ (cmnd_opcode(orig_req) == ISCSI_OP_SCSI_CMD) &&
++ !test_bit(ISCSI_CMD_PRELIM_COMPLETED,
++ &orig_req->prelim_compl_flags);
++
++ TRACE_DBG("get_data %d, set_r2t_len %d", get_data, set_r2t_len);
++
++ if (get_data)
++ cmnd_prepare_get_rejected_immed_data(req);
++
++ if (test_bit(ISCSI_CMD_PRELIM_COMPLETED, &orig_req->prelim_compl_flags))
++ goto out_set;
++
++ if (set_r2t_len)
++ res = iscsi_set_prelim_r2t_len_to_receive(orig_req);
++ else if (orig_req_hdr->flags & ISCSI_CMD_WRITE) {
++ /*
++ * We will get here if orig_req prelim completed in the middle
++ * of data receiving. We won't send more R2T's, so
++ * r2t_len_to_send is final and won't be updated anymore in
++ * future.
++ */
++ iscsi_set_not_received_data_len(orig_req,
++ orig_req->r2t_len_to_send);
++ }
++
++out_set:
++ set_bit(ISCSI_CMD_PRELIM_COMPLETED, &orig_req->prelim_compl_flags);
++ set_bit(ISCSI_CMD_PRELIM_COMPLETED, &req->prelim_compl_flags);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int cmnd_prepare_recv_pdu(struct iscsi_conn *conn,
++ struct iscsi_cmnd *cmd, u32 offset, u32 size)
++{
++ struct scatterlist *sg = cmd->sg;
++ unsigned int bufflen = cmd->bufflen;
++ unsigned int idx, i, buff_offs;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("cmd %p, sg %p, offset %u, size %u", cmd, cmd->sg,
++ offset, size);
++
++ iscsi_extracheck_is_rd_thread(conn);
++
++ buff_offs = offset;
++ idx = (offset + sg[0].offset) >> PAGE_SHIFT;
++ offset &= ~PAGE_MASK;
++
++ conn->read_msg.msg_iov = conn->read_iov;
++ conn->read_size = size;
++
++ i = 0;
++ while (1) {
++ unsigned int sg_len;
++ char __user *addr;
++
++ if (unlikely(buff_offs >= bufflen)) {
++ TRACE_DBG("Residual overflow (cmd %p, buff_offs %d, "
++ "bufflen %d)", cmd, buff_offs, bufflen);
++ idx = 0;
++ sg = &dummy_sg;
++ offset = 0;
++ }
++
++ addr = (char __force __user *)(sg_virt(&sg[idx]));
++ EXTRACHECKS_BUG_ON(addr == NULL);
++ sg_len = sg[idx].length - offset;
++
++ conn->read_iov[i].iov_base = addr + offset;
++
++ if (size <= sg_len) {
++ TRACE_DBG("idx=%d, i=%d, offset=%u, size=%d, addr=%p",
++ idx, i, offset, size, addr);
++ conn->read_iov[i].iov_len = size;
++ conn->read_msg.msg_iovlen = i+1;
++ break;
++ }
++ conn->read_iov[i].iov_len = sg_len;
++
++ TRACE_DBG("idx=%d, i=%d, offset=%u, size=%d, sg_len=%u, "
++ "addr=%p", idx, i, offset, size, sg_len, addr);
++
++ size -= sg_len;
++ buff_offs += sg_len;
++
++ i++;
++ if (unlikely(i >= ISCSI_CONN_IOV_MAX)) {
++ PRINT_ERROR("Initiator %s violated negotiated "
++ "parameters by sending too much data (size "
++ "left %d)", conn->session->initiator_name,
++ size);
++ mark_conn_closed(conn);
++ res = -EINVAL;
++ break;
++ }
++
++ idx++;
++ offset = 0;
++ }
++
++ TRACE_DBG("msg_iov=%p, msg_iovlen=%zd",
++ conn->read_msg.msg_iov, conn->read_msg.msg_iovlen);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void send_r2t(struct iscsi_cmnd *req)
++{
++ struct iscsi_session *sess = req->conn->session;
++ struct iscsi_cmnd *rsp;
++ struct iscsi_r2t_hdr *rsp_hdr;
++ u32 offset, burst;
++ LIST_HEAD(send);
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(req->r2t_len_to_send == 0);
++
++ /*
++ * There is no race with data_out_start() and conn_abort(), since
++ * all functions called from single read thread
++ */
++ iscsi_extracheck_is_rd_thread(req->conn);
++
++ /*
++ * We don't need to check for PRELIM_COMPLETED here, because for such
++ * commands we set r2t_len_to_send = 0, hence made sure we won't be
++ * called here.
++ */
++
++ EXTRACHECKS_BUG_ON(req->outstanding_r2t >
++ sess->sess_params.max_outstanding_r2t);
++
++ if (req->outstanding_r2t == sess->sess_params.max_outstanding_r2t)
++ goto out;
++
++ burst = sess->sess_params.max_burst_length;
++ offset = be32_to_cpu(cmnd_hdr(req)->data_length) -
++ req->r2t_len_to_send;
++
++ do {
++ rsp = iscsi_alloc_rsp(req);
++ rsp->pdu.bhs.ttt = (__force __be32)req->target_task_tag;
++ rsp_hdr = (struct iscsi_r2t_hdr *)&rsp->pdu.bhs;
++ rsp_hdr->opcode = ISCSI_OP_R2T;
++ rsp_hdr->flags = ISCSI_FLG_FINAL;
++ rsp_hdr->lun = cmnd_hdr(req)->lun;
++ rsp_hdr->itt = cmnd_hdr(req)->itt;
++ rsp_hdr->r2t_sn = (__force u32)cpu_to_be32(req->r2t_sn++);
++ rsp_hdr->buffer_offset = cpu_to_be32(offset);
++ if (req->r2t_len_to_send > burst) {
++ rsp_hdr->data_length = cpu_to_be32(burst);
++ req->r2t_len_to_send -= burst;
++ offset += burst;
++ } else {
++ rsp_hdr->data_length = cpu_to_be32(req->r2t_len_to_send);
++ req->r2t_len_to_send = 0;
++ }
++
++ TRACE_WRITE("req %p, data_length %u, buffer_offset %u, "
++ "r2t_sn %u, outstanding_r2t %u", req,
++ be32_to_cpu(rsp_hdr->data_length),
++ be32_to_cpu(rsp_hdr->buffer_offset),
++ be32_to_cpu((__force __be32)rsp_hdr->r2t_sn), req->outstanding_r2t);
++
++ list_add_tail(&rsp->write_list_entry, &send);
++ req->outstanding_r2t++;
++
++ } while ((req->outstanding_r2t < sess->sess_params.max_outstanding_r2t) &&
++ (req->r2t_len_to_send != 0));
++
++ iscsi_cmnds_init_write(&send, ISCSI_INIT_WRITE_WAKE);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static int iscsi_pre_exec(struct scst_cmd *scst_cmd)
++{
++ int res = SCST_PREPROCESS_STATUS_SUCCESS;
++ struct iscsi_cmnd *req = (struct iscsi_cmnd *)
++ scst_cmd_get_tgt_priv(scst_cmd);
++ struct iscsi_cmnd *c, *t;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
++
++ /* If data digest isn't used this list will be empty */
++ list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
++ rx_ddigest_cmd_list_entry) {
++ TRACE_DBG("Checking digest of RX ddigest cmd %p", c);
++ if (digest_rx_data(c) != 0) {
++ scst_set_cmd_error(scst_cmd,
++ SCST_LOAD_SENSE(iscsi_sense_crc_error));
++ res = SCST_PREPROCESS_STATUS_ERROR_SENSE_SET;
++ /*
++ * The rest of rx_ddigest_cmd_list will be freed
++ * in req_cmnd_release()
++ */
++ goto out;
++ }
++ cmd_del_from_rx_ddigest_list(c);
++ cmnd_put(c);
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int nop_out_start(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_conn *conn = cmnd->conn;
++ struct iscsi_hdr *req_hdr = &cmnd->pdu.bhs;
++ u32 size, tmp;
++ int i, err = 0;
++
++ TRACE_DBG("%p", cmnd);
++
++ iscsi_extracheck_is_rd_thread(conn);
++
++ if (!(req_hdr->flags & ISCSI_FLG_FINAL)) {
++ PRINT_ERROR("%s", "Initiator sent Nop-Out with not a single "
++ "PDU");
++ err = -ISCSI_REASON_PROTOCOL_ERROR;
++ goto out;
++ }
++
++ if (cmnd->pdu.bhs.itt == ISCSI_RESERVED_TAG) {
++ if (unlikely(!(cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE)))
++ PRINT_ERROR("%s", "Initiator sent RESERVED tag for "
++ "non-immediate Nop-Out command");
++ }
++
++ update_stat_sn(cmnd);
++
++ size = cmnd->pdu.datasize;
++
++ if (size) {
++ conn->read_msg.msg_iov = conn->read_iov;
++ if (cmnd->pdu.bhs.itt != ISCSI_RESERVED_TAG) {
++ struct scatterlist *sg;
++
++ cmnd->sg = sg = scst_alloc(size, GFP_KERNEL,
++ &cmnd->sg_cnt);
++ if (sg == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "Allocating buffer for"
++ " %d Nop-Out payload failed", size);
++ err = -ISCSI_REASON_OUT_OF_RESOURCES;
++ goto out;
++ }
++
++ /* We already checked it in check_segment_length() */
++ BUG_ON(cmnd->sg_cnt > (signed)ISCSI_CONN_IOV_MAX);
++
++ cmnd->own_sg = 1;
++ cmnd->bufflen = size;
++
++ for (i = 0; i < cmnd->sg_cnt; i++) {
++ conn->read_iov[i].iov_base =
++ (void __force __user *)(page_address(sg_page(&sg[i])));
++ tmp = min_t(u32, size, PAGE_SIZE);
++ conn->read_iov[i].iov_len = tmp;
++ conn->read_size += tmp;
++ size -= tmp;
++ }
++ BUG_ON(size != 0);
++ } else {
++ /*
++ * There are no problems with the safety from concurrent
++ * accesses to dummy_page, since for ISCSI_RESERVED_TAG
++ * the data only read and then discarded.
++ */
++ for (i = 0; i < (signed)ISCSI_CONN_IOV_MAX; i++) {
++ conn->read_iov[i].iov_base =
++ (void __force __user *)(page_address(dummy_page));
++ tmp = min_t(u32, size, PAGE_SIZE);
++ conn->read_iov[i].iov_len = tmp;
++ conn->read_size += tmp;
++ size -= tmp;
++ }
++
++ /* We already checked size in check_segment_length() */
++ BUG_ON(size != 0);
++ }
++
++ conn->read_msg.msg_iovlen = i;
++ TRACE_DBG("msg_iov=%p, msg_iovlen=%zd", conn->read_msg.msg_iov,
++ conn->read_msg.msg_iovlen);
++ }
++
++out:
++ return err;
++}
++
++int cmnd_rx_continue(struct iscsi_cmnd *req)
++{
++ struct iscsi_conn *conn = req->conn;
++ struct iscsi_session *session = conn->session;
++ struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
++ struct scst_cmd *scst_cmd = req->scst_cmd;
++ scst_data_direction dir;
++ bool unsolicited_data_expected = false;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("scsi command: %x", req_hdr->scb[0]);
++
++ EXTRACHECKS_BUG_ON(req->scst_state != ISCSI_CMD_STATE_AFTER_PREPROC);
++
++ dir = scst_cmd_get_data_direction(scst_cmd);
++
++ /*
++ * Check for preliminary completion here to save R2Ts. For TASK QUEUE
++ * FULL statuses that might be a big performance win.
++ */
++ if (unlikely(scst_cmd_prelim_completed(scst_cmd) ||
++ unlikely(req->prelim_compl_flags != 0))) {
++ /*
++ * If necessary, ISCSI_CMD_ABORTED will be set by
++ * iscsi_xmit_response().
++ */
++ res = iscsi_preliminary_complete(req, req, true);
++ goto trace;
++ }
++
++ /* For prelim completed commands sg & K can be already set! */
++
++ if (dir & SCST_DATA_WRITE) {
++ req->bufflen = scst_cmd_get_write_fields(scst_cmd, &req->sg,
++ &req->sg_cnt);
++ unsolicited_data_expected = !(req_hdr->flags & ISCSI_CMD_FINAL);
++
++ if (unlikely(session->sess_params.initial_r2t &&
++ unsolicited_data_expected)) {
++ PRINT_ERROR("Initiator %s violated negotiated "
++ "parameters: initial R2T is required (ITT %x, "
++ "op %x)", session->initiator_name,
++ req->pdu.bhs.itt, req_hdr->scb[0]);
++ goto out_close;
++ }
++
++ if (unlikely(!session->sess_params.immediate_data &&
++ req->pdu.datasize)) {
++ PRINT_ERROR("Initiator %s violated negotiated "
++ "parameters: forbidden immediate data sent "
++ "(ITT %x, op %x)", session->initiator_name,
++ req->pdu.bhs.itt, req_hdr->scb[0]);
++ goto out_close;
++ }
++
++ if (unlikely(session->sess_params.first_burst_length < req->pdu.datasize)) {
++ PRINT_ERROR("Initiator %s violated negotiated "
++ "parameters: immediate data len (%d) > "
++ "first_burst_length (%d) (ITT %x, op %x)",
++ session->initiator_name,
++ req->pdu.datasize,
++ session->sess_params.first_burst_length,
++ req->pdu.bhs.itt, req_hdr->scb[0]);
++ goto out_close;
++ }
++
++ req->r2t_len_to_receive = be32_to_cpu(req_hdr->data_length) -
++ req->pdu.datasize;
++
++ /*
++ * In case of residual overflow req->r2t_len_to_receive and
++ * req->pdu.datasize might be > req->bufflen
++ */
++
++ res = cmnd_insert_data_wait_hash(req);
++ if (unlikely(res != 0)) {
++ /*
++ * We have to close connection, because otherwise a data
++ * corruption is possible if we allow to receive data
++ * for this request in another request with dublicated
++ * ITT.
++ */
++ goto out_close;
++ }
++
++ if (unsolicited_data_expected) {
++ req->outstanding_r2t = 1;
++ req->r2t_len_to_send = req->r2t_len_to_receive -
++ min_t(unsigned int,
++ session->sess_params.first_burst_length -
++ req->pdu.datasize,
++ req->r2t_len_to_receive);
++ } else
++ req->r2t_len_to_send = req->r2t_len_to_receive;
++
++ req_add_to_write_timeout_list(req);
++
++ if (req->pdu.datasize) {
++ res = cmnd_prepare_recv_pdu(conn, req, 0, req->pdu.datasize);
++ /* For performance better to send R2Ts ASAP */
++ if (likely(res == 0) && (req->r2t_len_to_send != 0))
++ send_r2t(req);
++ }
++ } else {
++ req->sg = scst_cmd_get_sg(scst_cmd);
++ req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
++ req->bufflen = scst_cmd_get_bufflen(scst_cmd);
++
++ if (unlikely(!(req_hdr->flags & ISCSI_CMD_FINAL) ||
++ req->pdu.datasize)) {
++ PRINT_ERROR("Unexpected unsolicited data (ITT %x "
++ "CDB %x)", req->pdu.bhs.itt, req_hdr->scb[0]);
++ set_scst_preliminary_status_rsp(req, true,
++ SCST_LOAD_SENSE(iscsi_sense_unexpected_unsolicited_data));
++ }
++ }
++
++trace:
++ TRACE_DBG("req=%p, dir=%d, unsolicited_data_expected=%d, "
++ "r2t_len_to_receive=%d, r2t_len_to_send=%d, bufflen=%d, "
++ "own_sg %d", req, dir, unsolicited_data_expected,
++ req->r2t_len_to_receive, req->r2t_len_to_send, req->bufflen,
++ req->own_sg);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_close:
++ mark_conn_closed(conn);
++ res = -EINVAL;
++ goto out;
++}
++
++static int scsi_cmnd_start(struct iscsi_cmnd *req)
++{
++ struct iscsi_conn *conn = req->conn;
++ struct iscsi_session *session = conn->session;
++ struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
++ struct scst_cmd *scst_cmd;
++ scst_data_direction dir;
++ struct iscsi_ahs_hdr *ahdr;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("scsi command: %x", req_hdr->scb[0]);
++
++ TRACE_DBG("Incrementing active_cmds (cmd %p, sess %p, "
++ "new value %d)", req, session,
++ atomic_read(&session->active_cmds)+1);
++ atomic_inc(&session->active_cmds);
++ req->dec_active_cmds = 1;
++
++ scst_cmd = scst_rx_cmd(session->scst_sess,
++ (uint8_t *)&req_hdr->lun, sizeof(req_hdr->lun),
++ req_hdr->scb, sizeof(req_hdr->scb), SCST_NON_ATOMIC);
++ if (scst_cmd == NULL) {
++ res = create_preliminary_no_scst_rsp(req, SAM_STAT_BUSY,
++ NULL, 0);
++ goto out;
++ }
++
++ req->scst_cmd = scst_cmd;
++ scst_cmd_set_tag(scst_cmd, (__force u32)req_hdr->itt);
++ scst_cmd_set_tgt_priv(scst_cmd, req);
++
++ if ((req_hdr->flags & ISCSI_CMD_READ) &&
++ (req_hdr->flags & ISCSI_CMD_WRITE)) {
++ int sz = cmnd_read_size(req);
++ if (unlikely(sz < 0)) {
++ PRINT_ERROR("%s", "BIDI data transfer, but initiator "
++ "not supplied Bidirectional Read Expected Data "
++ "Transfer Length AHS");
++ set_scst_preliminary_status_rsp(req, true,
++ SCST_LOAD_SENSE(scst_sense_parameter_value_invalid));
++ } else {
++ dir = SCST_DATA_BIDI;
++ scst_cmd_set_expected(scst_cmd, dir, sz);
++ scst_cmd_set_expected_out_transfer_len(scst_cmd,
++ be32_to_cpu(req_hdr->data_length));
++#if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ scst_cmd_set_tgt_need_alloc_data_buf(scst_cmd);
++#endif
++ }
++ } else if (req_hdr->flags & ISCSI_CMD_READ) {
++ dir = SCST_DATA_READ;
++ scst_cmd_set_expected(scst_cmd, dir,
++ be32_to_cpu(req_hdr->data_length));
++#if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ scst_cmd_set_tgt_need_alloc_data_buf(scst_cmd);
++#endif
++ } else if (req_hdr->flags & ISCSI_CMD_WRITE) {
++ dir = SCST_DATA_WRITE;
++ scst_cmd_set_expected(scst_cmd, dir,
++ be32_to_cpu(req_hdr->data_length));
++ } else {
++ dir = SCST_DATA_NONE;
++ scst_cmd_set_expected(scst_cmd, dir, 0);
++ }
++
++ switch (req_hdr->flags & ISCSI_CMD_ATTR_MASK) {
++ case ISCSI_CMD_SIMPLE:
++ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_SIMPLE);
++ break;
++ case ISCSI_CMD_HEAD_OF_QUEUE:
++ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
++ break;
++ case ISCSI_CMD_ORDERED:
++ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_ORDERED);
++ break;
++ case ISCSI_CMD_ACA:
++ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_ACA);
++ break;
++ case ISCSI_CMD_UNTAGGED:
++ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_UNTAGGED);
++ break;
++ default:
++ PRINT_ERROR("Unknown task code %x, use ORDERED instead",
++ req_hdr->flags & ISCSI_CMD_ATTR_MASK);
++ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_ORDERED);
++ break;
++ }
++
++ scst_cmd_set_tgt_sn(scst_cmd, req_hdr->cmd_sn);
++
++ ahdr = (struct iscsi_ahs_hdr *)req->pdu.ahs;
++ if (ahdr != NULL) {
++ uint8_t *p = (uint8_t *)ahdr;
++ unsigned int size = 0;
++ do {
++ int s;
++
++ ahdr = (struct iscsi_ahs_hdr *)p;
++
++ if (ahdr->ahstype == ISCSI_AHSTYPE_CDB) {
++ struct iscsi_cdb_ahdr *eca =
++ (struct iscsi_cdb_ahdr *)ahdr;
++ scst_cmd_set_ext_cdb(scst_cmd, eca->cdb,
++ be16_to_cpu(ahdr->ahslength) - 1);
++ break;
++ }
++ s = 3 + be16_to_cpu(ahdr->ahslength);
++ s = (s + 3) & -4;
++ size += s;
++ p += s;
++ } while (size < req->pdu.ahssize);
++ }
++
++ TRACE_DBG("START Command (itt %x, queue_type %d)",
++ req_hdr->itt, scst_cmd_get_queue_type(scst_cmd));
++ req->scst_state = ISCSI_CMD_STATE_RX_CMD;
++ conn->rx_task = current;
++ scst_cmd_init_stage1_done(scst_cmd, SCST_CONTEXT_DIRECT, 0);
++
++ if (req->scst_state != ISCSI_CMD_STATE_RX_CMD)
++ res = cmnd_rx_continue(req);
++ else {
++ TRACE_DBG("Delaying req %p post processing (scst_state %d)",
++ req, req->scst_state);
++ res = 1;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int data_out_start(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_conn *conn = cmnd->conn;
++ struct iscsi_data_out_hdr *req_hdr =
++ (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
++ struct iscsi_cmnd *orig_req;
++#if 0
++ struct iscsi_hdr *orig_req_hdr;
++#endif
++ u32 offset = be32_to_cpu(req_hdr->buffer_offset);
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /*
++ * There is no race with send_r2t() and conn_abort(), since
++ * all functions called from single read thread
++ */
++ iscsi_extracheck_is_rd_thread(cmnd->conn);
++
++ update_stat_sn(cmnd);
++
++ orig_req = cmnd_find_data_wait_hash(conn, req_hdr->itt);
++ cmnd->cmd_req = orig_req;
++ if (unlikely(orig_req == NULL)) {
++ /*
++ * It shouldn't happen, since we don't abort any request until
++ * we received all related PDUs from the initiator or timeout
++ * them. Let's quietly drop such PDUs.
++ */
++ TRACE_MGMT_DBG("Unable to find scsi task ITT %x",
++ cmnd->pdu.bhs.itt);
++ res = iscsi_preliminary_complete(cmnd, cmnd, true);
++ goto out;
++ }
++
++ if (unlikely(orig_req->r2t_len_to_receive < cmnd->pdu.datasize)) {
++ if (orig_req->prelim_compl_flags != 0) {
++ /* We can have fake r2t_len_to_receive */
++ goto go;
++ }
++ PRINT_ERROR("Data size (%d) > R2T length to receive (%d)",
++ cmnd->pdu.datasize, orig_req->r2t_len_to_receive);
++ set_scst_preliminary_status_rsp(orig_req, false,
++ SCST_LOAD_SENSE(iscsi_sense_incorrect_amount_of_data));
++ goto go;
++ }
++
++ /* Crazy iSCSI spec requires us to make this unneeded check */
++#if 0 /* ...but some initiators (Windows) don't care to correctly set it */
++ orig_req_hdr = &orig_req->pdu.bhs;
++ if (unlikely(orig_req_hdr->lun != req_hdr->lun)) {
++ PRINT_ERROR("Wrong LUN (%lld) in Data-Out PDU (expected %lld), "
++ "orig_req %p, cmnd %p", (unsigned long long)req_hdr->lun,
++ (unsigned long long)orig_req_hdr->lun, orig_req, cmnd);
++ create_reject_rsp(orig_req, ISCSI_REASON_PROTOCOL_ERROR, false);
++ goto go;
++ }
++#endif
++
++go:
++ if (req_hdr->flags & ISCSI_FLG_FINAL)
++ orig_req->outstanding_r2t--;
++
++ if (unlikely(orig_req->prelim_compl_flags != 0)) {
++ res = iscsi_preliminary_complete(cmnd, orig_req, true);
++ goto out;
++ }
++
++ TRACE_WRITE("cmnd %p, orig_req %p, offset %u, datasize %u", cmnd,
++ orig_req, offset, cmnd->pdu.datasize);
++
++ res = cmnd_prepare_recv_pdu(conn, orig_req, offset, cmnd->pdu.datasize);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void data_out_end(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_data_out_hdr *req_hdr =
++ (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
++ struct iscsi_cmnd *req;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(cmnd == NULL);
++ req = cmnd->cmd_req;
++ if (unlikely(req == NULL))
++ goto out;
++
++ TRACE_DBG("cmnd %p, req %p", cmnd, req);
++
++ iscsi_extracheck_is_rd_thread(cmnd->conn);
++
++ if (!(cmnd->conn->ddigest_type & DIGEST_NONE) &&
++ !cmnd->ddigest_checked) {
++ cmd_add_on_rx_ddigest_list(req, cmnd);
++ cmnd_get(cmnd);
++ }
++
++ /*
++ * Now we received the data and can adjust r2t_len_to_receive of the
++ * orig req. We couldn't do it earlier, because it will break data
++ * receiving errors recovery (calls of iscsi_fail_data_waiting_cmnd()).
++ */
++ req->r2t_len_to_receive -= cmnd->pdu.datasize;
++
++ if (unlikely(req->prelim_compl_flags != 0)) {
++ /*
++ * We need to call iscsi_preliminary_complete() again
++ * to handle the case if we just been aborted. This call must
++ * be done before zeroing r2t_len_to_send to correctly calc.
++ * residual.
++ */
++ iscsi_preliminary_complete(cmnd, req, false);
++
++ /*
++ * We might need to wait for one or more PDUs. Let's simplify
++ * other code and not perform exact r2t_len_to_receive
++ * calculation.
++ */
++ req->r2t_len_to_receive = req->outstanding_r2t;
++ req->r2t_len_to_send = 0;
++ }
++
++ TRACE_DBG("req %p, FINAL %x, outstanding_r2t %d, r2t_len_to_receive %d,"
++ " r2t_len_to_send %d", req, req_hdr->flags & ISCSI_FLG_FINAL,
++ req->outstanding_r2t, req->r2t_len_to_receive,
++ req->r2t_len_to_send);
++
++ if (!(req_hdr->flags & ISCSI_FLG_FINAL))
++ goto out;
++
++ if (req->r2t_len_to_receive == 0) {
++ if (!req->pending)
++ iscsi_restart_cmnd(req);
++ } else if (req->r2t_len_to_send != 0)
++ send_r2t(req);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* Might be called under target_mutex and cmd_list_lock */
++static void __cmnd_abort(struct iscsi_cmnd *cmnd)
++{
++ unsigned long timeout_time = jiffies + ISCSI_TM_DATA_WAIT_TIMEOUT +
++ ISCSI_ADD_SCHED_TIME;
++ struct iscsi_conn *conn = cmnd->conn;
++
++ TRACE_MGMT_DBG("Aborting cmd %p, scst_cmd %p (scst state %x, "
++ "ref_cnt %d, on_write_timeout_list %d, write_start %ld, ITT %x, "
++ "sn %u, op %x, r2t_len_to_receive %d, r2t_len_to_send %d, "
++ "CDB op %x, size to write %u, outstanding_r2t %d, "
++ "sess->exp_cmd_sn %u, conn %p, rd_task %p)",
++ cmnd, cmnd->scst_cmd, cmnd->scst_state,
++ atomic_read(&cmnd->ref_cnt), cmnd->on_write_timeout_list,
++ cmnd->write_start, cmnd->pdu.bhs.itt, cmnd->pdu.bhs.sn,
++ cmnd_opcode(cmnd), cmnd->r2t_len_to_receive,
++ cmnd->r2t_len_to_send, cmnd_scsicode(cmnd),
++ cmnd_write_size(cmnd), cmnd->outstanding_r2t,
++ cmnd->conn->session->exp_cmd_sn, cmnd->conn,
++ cmnd->conn->rd_task);
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ TRACE_MGMT_DBG("net_ref_cnt %d", atomic_read(&cmnd->net_ref_cnt));
++#endif
++
++ /*
++ * Lock to sync with iscsi_check_tm_data_wait_timeouts(), including
++ * CMD_ABORTED bit set.
++ */
++ spin_lock_bh(&iscsi_rd_lock);
++
++ /*
++ * We suppose that preliminary commands completion is tested by
++ * comparing prelim_compl_flags with 0. Otherwise a race is possible,
++ * like sending command in SCST core as PRELIM_COMPLETED, while it
++ * wasn't aborted in it yet and have as the result a wrong success
++ * status sent to the initiator.
++ */
++ set_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags);
++
++ TRACE_MGMT_DBG("Setting conn_tm_active for conn %p", conn);
++ conn->conn_tm_active = 1;
++
++ spin_unlock_bh(&iscsi_rd_lock);
++
++ /*
++ * We need the lock to sync with req_add_to_write_timeout_list() and
++ * close races for rsp_timer.expires.
++ */
++ spin_lock_bh(&conn->write_list_lock);
++ if (!timer_pending(&conn->rsp_timer) ||
++ time_after(conn->rsp_timer.expires, timeout_time)) {
++ TRACE_MGMT_DBG("Mod timer on %ld (conn %p)", timeout_time,
++ conn);
++ mod_timer(&conn->rsp_timer, timeout_time);
++ } else
++ TRACE_MGMT_DBG("Timer for conn %p is going to fire on %ld "
++ "(timeout time %ld)", conn, conn->rsp_timer.expires,
++ timeout_time);
++ spin_unlock_bh(&conn->write_list_lock);
++
++ return;
++}
++
++/* Must be called from the read or conn close thread */
++static int cmnd_abort(struct iscsi_cmnd *req, int *status)
++{
++ struct iscsi_task_mgt_hdr *req_hdr =
++ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
++ struct iscsi_cmnd *cmnd;
++ int res = -1;
++
++ req_hdr->ref_cmd_sn = be32_to_cpu((__force __be32)req_hdr->ref_cmd_sn);
++
++ if (!before(req_hdr->ref_cmd_sn, req_hdr->cmd_sn)) {
++ TRACE(TRACE_MGMT, "ABORT TASK: RefCmdSN(%u) > CmdSN(%u)",
++ req_hdr->ref_cmd_sn, req_hdr->cmd_sn);
++ *status = ISCSI_RESPONSE_UNKNOWN_TASK;
++ goto out;
++ }
++
++ cmnd = cmnd_find_itt_get(req->conn, req_hdr->rtt);
++ if (cmnd) {
++ struct iscsi_conn *conn = cmnd->conn;
++ struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
++
++ if (req_hdr->lun != hdr->lun) {
++ PRINT_ERROR("ABORT TASK: LUN mismatch: req LUN "
++ "%llx, cmd LUN %llx, rtt %u",
++ (long long unsigned)be64_to_cpu(req_hdr->lun),
++ (long long unsigned)be64_to_cpu(hdr->lun),
++ req_hdr->rtt);
++ *status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ goto out_put;
++ }
++
++ if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
++ if (req_hdr->ref_cmd_sn != req_hdr->cmd_sn) {
++ PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != TM "
++ "cmd CmdSN(%u) for immediate command "
++ "%p", req_hdr->ref_cmd_sn,
++ req_hdr->cmd_sn, cmnd);
++ *status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ goto out_put;
++ }
++ } else {
++ if (req_hdr->ref_cmd_sn != hdr->cmd_sn) {
++ PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != "
++ "CmdSN(%u) for command %p",
++ req_hdr->ref_cmd_sn, req_hdr->cmd_sn,
++ cmnd);
++ *status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ goto out_put;
++ }
++ }
++
++ if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
++ (req_hdr->cmd_sn == hdr->cmd_sn)) {
++ PRINT_ERROR("ABORT TASK: SN mismatch: req SN %x, "
++ "cmd SN %x, rtt %u", req_hdr->cmd_sn,
++ hdr->cmd_sn, req_hdr->rtt);
++ *status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ goto out_put;
++ }
++
++ spin_lock_bh(&conn->cmd_list_lock);
++ __cmnd_abort(cmnd);
++ spin_unlock_bh(&conn->cmd_list_lock);
++
++ cmnd_put(cmnd);
++ res = 0;
++ } else {
++ TRACE_MGMT_DBG("cmd RTT %x not found", req_hdr->rtt);
++ /*
++ * iSCSI RFC:
++ *
++ * b) If the Referenced Task Tag does not identify an existing task,
++ * but if the CmdSN indicated by the RefCmdSN field in the Task
++ * Management function request is within the valid CmdSN window
++ * and less than the CmdSN of the Task Management function
++ * request itself, then targets must consider the CmdSN received
++ * and return the "Function complete" response.
++ *
++ * c) If the Referenced Task Tag does not identify an existing task
++ * and if the CmdSN indicated by the RefCmdSN field in the Task
++ * Management function request is outside the valid CmdSN window,
++ * then targets must return the "Task does not exist" response.
++ *
++ * 128 seems to be a good "window".
++ */
++ if (between(req_hdr->ref_cmd_sn, req_hdr->cmd_sn - 128,
++ req_hdr->cmd_sn)) {
++ *status = ISCSI_RESPONSE_FUNCTION_COMPLETE;
++ res = 0;
++ } else
++ *status = ISCSI_RESPONSE_UNKNOWN_TASK;
++ }
++
++out:
++ return res;
++
++out_put:
++ cmnd_put(cmnd);
++ goto out;
++}
++
++/* Must be called from the read or conn close thread */
++static int target_abort(struct iscsi_cmnd *req, int all)
++{
++ struct iscsi_target *target = req->conn->session->target;
++ struct iscsi_task_mgt_hdr *req_hdr =
++ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
++ struct iscsi_session *session;
++ struct iscsi_conn *conn;
++ struct iscsi_cmnd *cmnd;
++
++ mutex_lock(&target->target_mutex);
++
++ list_for_each_entry(session, &target->session_list,
++ session_list_entry) {
++ list_for_each_entry(conn, &session->conn_list,
++ conn_list_entry) {
++ spin_lock_bh(&conn->cmd_list_lock);
++ list_for_each_entry(cmnd, &conn->cmd_list,
++ cmd_list_entry) {
++ if (cmnd == req)
++ continue;
++ if (all)
++ __cmnd_abort(cmnd);
++ else if (req_hdr->lun == cmnd_hdr(cmnd)->lun)
++ __cmnd_abort(cmnd);
++ }
++ spin_unlock_bh(&conn->cmd_list_lock);
++ }
++ }
++
++ mutex_unlock(&target->target_mutex);
++ return 0;
++}
++
++/* Must be called from the read or conn close thread */
++static void task_set_abort(struct iscsi_cmnd *req)
++{
++ struct iscsi_session *session = req->conn->session;
++ struct iscsi_task_mgt_hdr *req_hdr =
++ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
++ struct iscsi_target *target = session->target;
++ struct iscsi_conn *conn;
++ struct iscsi_cmnd *cmnd;
++
++ mutex_lock(&target->target_mutex);
++
++ list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
++ spin_lock_bh(&conn->cmd_list_lock);
++ list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
++ struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
++ if (cmnd == req)
++ continue;
++ if (req_hdr->lun != hdr->lun)
++ continue;
++ if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
++ req_hdr->cmd_sn == hdr->cmd_sn)
++ continue;
++ __cmnd_abort(cmnd);
++ }
++ spin_unlock_bh(&conn->cmd_list_lock);
++ }
++
++ mutex_unlock(&target->target_mutex);
++ return;
++}
++
++/* Must be called from the read or conn close thread */
++void conn_abort(struct iscsi_conn *conn)
++{
++ struct iscsi_cmnd *cmnd, *r, *t;
++
++ TRACE_MGMT_DBG("Aborting conn %p", conn);
++
++ iscsi_extracheck_is_rd_thread(conn);
++
++ cancel_delayed_work_sync(&conn->nop_in_delayed_work);
++
++ /* No locks, we are the only user */
++ list_for_each_entry_safe(r, t, &conn->nop_req_list,
++ nop_req_list_entry) {
++ list_del(&r->nop_req_list_entry);
++ cmnd_put(r);
++ }
++
++ spin_lock_bh(&conn->cmd_list_lock);
++again:
++ list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
++ __cmnd_abort(cmnd);
++ if (cmnd->r2t_len_to_receive != 0) {
++ if (!cmnd_get_check(cmnd)) {
++ spin_unlock_bh(&conn->cmd_list_lock);
++
++ /* ToDo: this is racy for MC/S */
++ iscsi_fail_data_waiting_cmnd(cmnd);
++
++ cmnd_put(cmnd);
++
++ /*
++ * We are in the read thread, so we may not
++ * worry that after cmnd release conn gets
++ * released as well.
++ */
++ spin_lock_bh(&conn->cmd_list_lock);
++ goto again;
++ }
++ }
++ }
++ spin_unlock_bh(&conn->cmd_list_lock);
++
++ return;
++}
++
++static void execute_task_management(struct iscsi_cmnd *req)
++{
++ struct iscsi_conn *conn = req->conn;
++ struct iscsi_session *sess = conn->session;
++ struct iscsi_task_mgt_hdr *req_hdr =
++ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
++ int rc, status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ int function = req_hdr->function & ISCSI_FUNCTION_MASK;
++ struct scst_rx_mgmt_params params;
++
++ TRACE(TRACE_MGMT, "iSCSI TM fn %d", function);
++
++ TRACE_MGMT_DBG("TM req %p, ITT %x, RTT %x, sn %u, con %p", req,
++ req->pdu.bhs.itt, req_hdr->rtt, req_hdr->cmd_sn, conn);
++
++ iscsi_extracheck_is_rd_thread(conn);
++
++ spin_lock(&sess->sn_lock);
++ sess->tm_active++;
++ sess->tm_sn = req_hdr->cmd_sn;
++ if (sess->tm_rsp != NULL) {
++ struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
++
++ TRACE_MGMT_DBG("Dropping delayed TM rsp %p", tm_rsp);
++
++ sess->tm_rsp = NULL;
++ sess->tm_active--;
++
++ spin_unlock(&sess->sn_lock);
++
++ BUG_ON(sess->tm_active < 0);
++
++ rsp_cmnd_release(tm_rsp);
++ } else
++ spin_unlock(&sess->sn_lock);
++
++ memset(&params, 0, sizeof(params));
++ params.atomic = SCST_NON_ATOMIC;
++ params.tgt_priv = req;
++
++ if ((function != ISCSI_FUNCTION_ABORT_TASK) &&
++ (req_hdr->rtt != ISCSI_RESERVED_TAG)) {
++ PRINT_ERROR("Invalid RTT %x (TM fn %d)", req_hdr->rtt,
++ function);
++ rc = -1;
++ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ goto reject;
++ }
++
++ /* cmd_sn is already in CPU format converted in cmnd_rx_start() */
++
++ switch (function) {
++ case ISCSI_FUNCTION_ABORT_TASK:
++ rc = cmnd_abort(req, &status);
++ if (rc == 0) {
++ params.fn = SCST_ABORT_TASK;
++ params.tag = (__force u32)req_hdr->rtt;
++ params.tag_set = 1;
++ params.lun = (uint8_t *)&req_hdr->lun;
++ params.lun_len = sizeof(req_hdr->lun);
++ params.lun_set = 1;
++ params.cmd_sn = req_hdr->cmd_sn;
++ params.cmd_sn_set = 1;
++ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
++ &params);
++ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ }
++ break;
++ case ISCSI_FUNCTION_ABORT_TASK_SET:
++ task_set_abort(req);
++ params.fn = SCST_ABORT_TASK_SET;
++ params.lun = (uint8_t *)&req_hdr->lun;
++ params.lun_len = sizeof(req_hdr->lun);
++ params.lun_set = 1;
++ params.cmd_sn = req_hdr->cmd_sn;
++ params.cmd_sn_set = 1;
++ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
++ &params);
++ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ break;
++ case ISCSI_FUNCTION_CLEAR_TASK_SET:
++ task_set_abort(req);
++ params.fn = SCST_CLEAR_TASK_SET;
++ params.lun = (uint8_t *)&req_hdr->lun;
++ params.lun_len = sizeof(req_hdr->lun);
++ params.lun_set = 1;
++ params.cmd_sn = req_hdr->cmd_sn;
++ params.cmd_sn_set = 1;
++ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
++ &params);
++ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ break;
++ case ISCSI_FUNCTION_CLEAR_ACA:
++ params.fn = SCST_CLEAR_ACA;
++ params.lun = (uint8_t *)&req_hdr->lun;
++ params.lun_len = sizeof(req_hdr->lun);
++ params.lun_set = 1;
++ params.cmd_sn = req_hdr->cmd_sn;
++ params.cmd_sn_set = 1;
++ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
++ &params);
++ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ break;
++ case ISCSI_FUNCTION_TARGET_COLD_RESET:
++ case ISCSI_FUNCTION_TARGET_WARM_RESET:
++ target_abort(req, 1);
++ params.fn = SCST_TARGET_RESET;
++ params.cmd_sn = req_hdr->cmd_sn;
++ params.cmd_sn_set = 1;
++ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
++ &params);
++ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ break;
++ case ISCSI_FUNCTION_LOGICAL_UNIT_RESET:
++ target_abort(req, 0);
++ params.fn = SCST_LUN_RESET;
++ params.lun = (uint8_t *)&req_hdr->lun;
++ params.lun_len = sizeof(req_hdr->lun);
++ params.lun_set = 1;
++ params.cmd_sn = req_hdr->cmd_sn;
++ params.cmd_sn_set = 1;
++ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
++ &params);
++ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ break;
++ case ISCSI_FUNCTION_TASK_REASSIGN:
++ rc = -1;
++ status = ISCSI_RESPONSE_ALLEGIANCE_REASSIGNMENT_UNSUPPORTED;
++ break;
++ default:
++ PRINT_ERROR("Unknown TM function %d", function);
++ rc = -1;
++ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
++ break;
++ }
++
++reject:
++ if (rc != 0)
++ iscsi_send_task_mgmt_resp(req, status);
++
++ return;
++}
++
++static void nop_out_exec(struct iscsi_cmnd *req)
++{
++ struct iscsi_cmnd *rsp;
++ struct iscsi_nop_in_hdr *rsp_hdr;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("%p", req);
++
++ if (req->pdu.bhs.itt != ISCSI_RESERVED_TAG) {
++ rsp = iscsi_alloc_main_rsp(req);
++
++ rsp_hdr = (struct iscsi_nop_in_hdr *)&rsp->pdu.bhs;
++ rsp_hdr->opcode = ISCSI_OP_NOP_IN;
++ rsp_hdr->flags = ISCSI_FLG_FINAL;
++ rsp_hdr->itt = req->pdu.bhs.itt;
++ rsp_hdr->ttt = ISCSI_RESERVED_TAG;
++
++ if (req->pdu.datasize)
++ BUG_ON(req->sg == NULL);
++ else
++ BUG_ON(req->sg != NULL);
++
++ if (req->sg) {
++ rsp->sg = req->sg;
++ rsp->sg_cnt = req->sg_cnt;
++ rsp->bufflen = req->bufflen;
++ }
++
++ /* We already checked it in check_segment_length() */
++ BUG_ON(get_pgcnt(req->pdu.datasize, 0) > ISCSI_CONN_IOV_MAX);
++
++ rsp->pdu.datasize = req->pdu.datasize;
++ } else {
++ bool found = false;
++ struct iscsi_cmnd *r;
++ struct iscsi_conn *conn = req->conn;
++
++ TRACE_DBG("Receive Nop-In response (ttt 0x%08x)",
++ be32_to_cpu(req->pdu.bhs.ttt));
++
++ spin_lock_bh(&conn->nop_req_list_lock);
++ list_for_each_entry(r, &conn->nop_req_list,
++ nop_req_list_entry) {
++ if (req->pdu.bhs.ttt == r->pdu.bhs.ttt) {
++ list_del(&r->nop_req_list_entry);
++ found = true;
++ break;
++ }
++ }
++ spin_unlock_bh(&conn->nop_req_list_lock);
++
++ if (found)
++ cmnd_put(r);
++ else
++ TRACE_MGMT_DBG("%s", "Got Nop-out response without "
++ "corresponding Nop-In request");
++ }
++
++ req_cmnd_release(req);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void logout_exec(struct iscsi_cmnd *req)
++{
++ struct iscsi_logout_req_hdr *req_hdr;
++ struct iscsi_cmnd *rsp;
++ struct iscsi_logout_rsp_hdr *rsp_hdr;
++
++ PRINT_INFO("Logout received from initiator %s",
++ req->conn->session->initiator_name);
++ TRACE_DBG("%p", req);
++
++ req_hdr = (struct iscsi_logout_req_hdr *)&req->pdu.bhs;
++ rsp = iscsi_alloc_main_rsp(req);
++ rsp_hdr = (struct iscsi_logout_rsp_hdr *)&rsp->pdu.bhs;
++ rsp_hdr->opcode = ISCSI_OP_LOGOUT_RSP;
++ rsp_hdr->flags = ISCSI_FLG_FINAL;
++ rsp_hdr->itt = req_hdr->itt;
++ rsp->should_close_conn = 1;
++
++ req_cmnd_release(req);
++
++ return;
++}
++
++static void iscsi_cmnd_exec(struct iscsi_cmnd *cmnd)
++{
++ TRACE_ENTRY();
++
++ TRACE_DBG("cmnd %p, op %x, SN %u", cmnd, cmnd_opcode(cmnd),
++ cmnd->pdu.bhs.sn);
++
++ iscsi_extracheck_is_rd_thread(cmnd->conn);
++
++ if (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD) {
++ if (cmnd->r2t_len_to_receive == 0)
++ iscsi_restart_cmnd(cmnd);
++ else if (cmnd->r2t_len_to_send != 0)
++ send_r2t(cmnd);
++ goto out;
++ }
++
++ if (cmnd->prelim_compl_flags != 0) {
++ TRACE_MGMT_DBG("Terminating prelim completed non-SCSI cmnd %p "
++ "(op %x)", cmnd, cmnd_opcode(cmnd));
++ req_cmnd_release(cmnd);
++ goto out;
++ }
++
++ switch (cmnd_opcode(cmnd)) {
++ case ISCSI_OP_NOP_OUT:
++ nop_out_exec(cmnd);
++ break;
++ case ISCSI_OP_SCSI_TASK_MGT_MSG:
++ execute_task_management(cmnd);
++ break;
++ case ISCSI_OP_LOGOUT_CMD:
++ logout_exec(cmnd);
++ break;
++ default:
++ PRINT_CRIT_ERROR("Unexpected cmnd op %x", cmnd_opcode(cmnd));
++ BUG();
++ break;
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void set_cork(struct socket *sock, int on)
++{
++ int opt = on;
++ mm_segment_t oldfs;
++
++ oldfs = get_fs();
++ set_fs(get_ds());
++ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK,
++ (void __force __user *)&opt, sizeof(opt));
++ set_fs(oldfs);
++ return;
++}
++
++void cmnd_tx_start(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_conn *conn = cmnd->conn;
++
++ TRACE_DBG("conn %p, cmnd %p, opcode %x", conn, cmnd, cmnd_opcode(cmnd));
++ iscsi_cmnd_set_length(&cmnd->pdu);
++
++ iscsi_extracheck_is_wr_thread(conn);
++
++ set_cork(conn->sock, 1);
++
++ conn->write_iop = conn->write_iov;
++ conn->write_iop->iov_base = (void __force __user *)(&cmnd->pdu.bhs);
++ conn->write_iop->iov_len = sizeof(cmnd->pdu.bhs);
++ conn->write_iop_used = 1;
++ conn->write_size = sizeof(cmnd->pdu.bhs) + cmnd->pdu.datasize;
++ conn->write_offset = 0;
++
++ switch (cmnd_opcode(cmnd)) {
++ case ISCSI_OP_NOP_IN:
++ if (cmnd->pdu.bhs.itt == ISCSI_RESERVED_TAG)
++ cmnd->pdu.bhs.sn = (__force u32)cmnd_set_sn(cmnd, 0);
++ else
++ cmnd_set_sn(cmnd, 1);
++ break;
++ case ISCSI_OP_SCSI_RSP:
++ cmnd_set_sn(cmnd, 1);
++ break;
++ case ISCSI_OP_SCSI_TASK_MGT_RSP:
++ cmnd_set_sn(cmnd, 1);
++ break;
++ case ISCSI_OP_TEXT_RSP:
++ cmnd_set_sn(cmnd, 1);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ {
++ struct iscsi_data_in_hdr *rsp =
++ (struct iscsi_data_in_hdr *)&cmnd->pdu.bhs;
++ u32 offset = be32_to_cpu(rsp->buffer_offset);
++
++ TRACE_DBG("cmnd %p, offset %u, datasize %u, bufflen %u", cmnd,
++ offset, cmnd->pdu.datasize, cmnd->bufflen);
++
++ BUG_ON(offset > cmnd->bufflen);
++ BUG_ON(offset + cmnd->pdu.datasize > cmnd->bufflen);
++
++ conn->write_offset = offset;
++
++ cmnd_set_sn(cmnd, (rsp->flags & ISCSI_FLG_FINAL) ? 1 : 0);
++ break;
++ }
++ case ISCSI_OP_LOGOUT_RSP:
++ cmnd_set_sn(cmnd, 1);
++ break;
++ case ISCSI_OP_R2T:
++ cmnd->pdu.bhs.sn = (__force u32)cmnd_set_sn(cmnd, 0);
++ break;
++ case ISCSI_OP_ASYNC_MSG:
++ cmnd_set_sn(cmnd, 1);
++ break;
++ case ISCSI_OP_REJECT:
++ cmnd_set_sn(cmnd, 1);
++ break;
++ default:
++ PRINT_ERROR("Unexpected cmnd op %x", cmnd_opcode(cmnd));
++ break;
++ }
++
++ iscsi_dump_pdu(&cmnd->pdu);
++ return;
++}
++
++void cmnd_tx_end(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_conn *conn = cmnd->conn;
++
++ TRACE_DBG("%p:%x (should_close_conn %d, should_close_all_conn %d)",
++ cmnd, cmnd_opcode(cmnd), cmnd->should_close_conn,
++ cmnd->should_close_all_conn);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ switch (cmnd_opcode(cmnd)) {
++ case ISCSI_OP_NOP_IN:
++ case ISCSI_OP_SCSI_RSP:
++ case ISCSI_OP_SCSI_TASK_MGT_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ case ISCSI_OP_R2T:
++ case ISCSI_OP_ASYNC_MSG:
++ case ISCSI_OP_REJECT:
++ case ISCSI_OP_SCSI_DATA_IN:
++ case ISCSI_OP_LOGOUT_RSP:
++ break;
++ default:
++ PRINT_CRIT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
++ BUG();
++ break;
++ }
++#endif
++
++ if (unlikely(cmnd->should_close_conn)) {
++ if (cmnd->should_close_all_conn) {
++ PRINT_INFO("Closing all connections for target %x at "
++ "initiator's %s request",
++ cmnd->conn->session->target->tid,
++ conn->session->initiator_name);
++ target_del_all_sess(cmnd->conn->session->target, 0);
++ } else {
++ PRINT_INFO("Closing connection at initiator's %s "
++ "request", conn->session->initiator_name);
++ mark_conn_closed(conn);
++ }
++ }
++
++ set_cork(cmnd->conn->sock, 0);
++ return;
++}
++
++/*
++ * Push the command for execution. This functions reorders the commands.
++ * Called from the read thread.
++ *
++ * Basically, since we don't support MC/S and TCP guarantees data delivery
++ * order, all that SN's stuff isn't needed at all (commands delivery order is
++ * a natural commands execution order), but insane iSCSI spec requires
++ * us to check it and we have to, because some crazy initiators can rely
++ * on the SN's based order and reorder requests during sending. For all other
++ * normal initiators all that code is a NOP.
++ */
++static void iscsi_push_cmnd(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_session *session = cmnd->conn->session;
++ struct list_head *entry;
++ u32 cmd_sn;
++
++ TRACE_DBG("cmnd %p, iSCSI opcode %x, sn %u, exp sn %u", cmnd,
++ cmnd_opcode(cmnd), cmnd->pdu.bhs.sn, session->exp_cmd_sn);
++
++ iscsi_extracheck_is_rd_thread(cmnd->conn);
++
++ BUG_ON(cmnd->parent_req != NULL);
++
++ if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
++ TRACE_DBG("Immediate cmd %p (cmd_sn %u)", cmnd,
++ cmnd->pdu.bhs.sn);
++ iscsi_cmnd_exec(cmnd);
++ goto out;
++ }
++
++ spin_lock(&session->sn_lock);
++
++ cmd_sn = cmnd->pdu.bhs.sn;
++ if (cmd_sn == session->exp_cmd_sn) {
++ while (1) {
++ session->exp_cmd_sn = ++cmd_sn;
++
++ if (unlikely(session->tm_active > 0)) {
++ if (before(cmd_sn, session->tm_sn)) {
++ struct iscsi_conn *conn = cmnd->conn;
++
++ spin_unlock(&session->sn_lock);
++
++ spin_lock_bh(&conn->cmd_list_lock);
++ __cmnd_abort(cmnd);
++ spin_unlock_bh(&conn->cmd_list_lock);
++
++ spin_lock(&session->sn_lock);
++ }
++ iscsi_check_send_delayed_tm_resp(session);
++ }
++
++ spin_unlock(&session->sn_lock);
++
++ iscsi_cmnd_exec(cmnd);
++
++ spin_lock(&session->sn_lock);
++
++ if (list_empty(&session->pending_list))
++ break;
++ cmnd = list_entry(session->pending_list.next,
++ struct iscsi_cmnd,
++ pending_list_entry);
++ if (cmnd->pdu.bhs.sn != cmd_sn)
++ break;
++
++ list_del(&cmnd->pending_list_entry);
++ cmnd->pending = 0;
++
++ TRACE_MGMT_DBG("Processing pending cmd %p (cmd_sn %u)",
++ cmnd, cmd_sn);
++ }
++ } else {
++ int drop = 0;
++
++ TRACE_DBG("Pending cmd %p (cmd_sn %u, exp_cmd_sn %u)",
++ cmnd, cmd_sn, session->exp_cmd_sn);
++
++ /*
++ * iSCSI RFC 3720: "The target MUST silently ignore any
++ * non-immediate command outside of [from ExpCmdSN to MaxCmdSN
++ * inclusive] range". But we won't honor the MaxCmdSN
++ * requirement, because, since we adjust MaxCmdSN from the
++ * separate write thread, rarely it is possible that initiator
++ * can legally send command with CmdSN>MaxSN. But it won't
++ * hurt anything, in the worst case it will lead to
++ * additional QUEUE FULL status.
++ */
++
++ if (unlikely(before(cmd_sn, session->exp_cmd_sn))) {
++ TRACE_MGMT_DBG("Ignoring out of expected range cmd_sn "
++ "(sn %u, exp_sn %u, op %x, CDB op %x)", cmd_sn,
++ session->exp_cmd_sn, cmnd_opcode(cmnd),
++ cmnd_scsicode(cmnd));
++ drop = 1;
++ }
++
++#if 0
++ if (unlikely(after(cmd_sn, session->exp_cmd_sn +
++ iscsi_get_allowed_cmds(session)))) {
++ TRACE_MGMT_DBG("Too large cmd_sn %u (exp_cmd_sn %u, "
++ "max_sn %u)", cmd_sn, session->exp_cmd_sn,
++ iscsi_get_allowed_cmds(session));
++ drop = 1;
++ }
++#endif
++
++ spin_unlock(&session->sn_lock);
++
++ if (unlikely(drop)) {
++ req_cmnd_release_force(cmnd);
++ goto out;
++ }
++
++ if (unlikely(test_bit(ISCSI_CMD_ABORTED,
++ &cmnd->prelim_compl_flags))) {
++ struct iscsi_cmnd *tm_clone;
++
++ TRACE_MGMT_DBG("Aborted pending cmnd %p, creating TM "
++ "clone (scst cmd %p, state %d)", cmnd,
++ cmnd->scst_cmd, cmnd->scst_state);
++
++ tm_clone = iscsi_create_tm_clone(cmnd);
++ if (tm_clone != NULL) {
++ iscsi_cmnd_exec(cmnd);
++ cmnd = tm_clone;
++ }
++ }
++
++ TRACE_MGMT_DBG("Pending cmnd %p (op %x, sn %u, exp sn %u)",
++ cmnd, cmnd_opcode(cmnd), cmd_sn, session->exp_cmd_sn);
++
++ spin_lock(&session->sn_lock);
++ list_for_each(entry, &session->pending_list) {
++ struct iscsi_cmnd *tmp =
++ list_entry(entry, struct iscsi_cmnd,
++ pending_list_entry);
++ if (before(cmd_sn, tmp->pdu.bhs.sn))
++ break;
++ }
++ list_add_tail(&cmnd->pending_list_entry, entry);
++ cmnd->pending = 1;
++ }
++
++ spin_unlock(&session->sn_lock);
++out:
++ return;
++}
++
++static int check_segment_length(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_conn *conn = cmnd->conn;
++ struct iscsi_session *session = conn->session;
++
++ if (unlikely(cmnd->pdu.datasize > session->sess_params.max_recv_data_length)) {
++ PRINT_ERROR("Initiator %s violated negotiated parameters: "
++ "data too long (ITT %x, datasize %u, "
++ "max_recv_data_length %u", session->initiator_name,
++ cmnd->pdu.bhs.itt, cmnd->pdu.datasize,
++ session->sess_params.max_recv_data_length);
++ mark_conn_closed(conn);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++int cmnd_rx_start(struct iscsi_cmnd *cmnd)
++{
++ int res, rc = 0;
++
++ iscsi_dump_pdu(&cmnd->pdu);
++
++ res = check_segment_length(cmnd);
++ if (res != 0)
++ goto out;
++
++ cmnd->pdu.bhs.sn = be32_to_cpu((__force __be32)cmnd->pdu.bhs.sn);
++
++ switch (cmnd_opcode(cmnd)) {
++ case ISCSI_OP_SCSI_CMD:
++ res = scsi_cmnd_start(cmnd);
++ if (unlikely(res < 0))
++ goto out;
++ update_stat_sn(cmnd);
++ break;
++ case ISCSI_OP_SCSI_DATA_OUT:
++ res = data_out_start(cmnd);
++ goto out;
++ case ISCSI_OP_NOP_OUT:
++ rc = nop_out_start(cmnd);
++ break;
++ case ISCSI_OP_SCSI_TASK_MGT_MSG:
++ case ISCSI_OP_LOGOUT_CMD:
++ update_stat_sn(cmnd);
++ break;
++ case ISCSI_OP_TEXT_CMD:
++ case ISCSI_OP_SNACK_CMD:
++ default:
++ rc = -ISCSI_REASON_UNSUPPORTED_COMMAND;
++ break;
++ }
++
++ if (unlikely(rc < 0)) {
++ PRINT_ERROR("Error %d (iSCSI opcode %x, ITT %x)", rc,
++ cmnd_opcode(cmnd), cmnd->pdu.bhs.itt);
++ res = create_reject_rsp(cmnd, -rc, true);
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++void cmnd_rx_end(struct iscsi_cmnd *cmnd)
++{
++ TRACE_ENTRY();
++
++ TRACE_DBG("cmnd %p, opcode %x", cmnd, cmnd_opcode(cmnd));
++
++ cmnd->conn->last_rcv_time = jiffies;
++ TRACE_DBG("Updated last_rcv_time %ld", cmnd->conn->last_rcv_time);
++
++ switch (cmnd_opcode(cmnd)) {
++ case ISCSI_OP_SCSI_CMD:
++ case ISCSI_OP_NOP_OUT:
++ case ISCSI_OP_SCSI_TASK_MGT_MSG:
++ case ISCSI_OP_LOGOUT_CMD:
++ iscsi_push_cmnd(cmnd);
++ goto out;
++ case ISCSI_OP_SCSI_DATA_OUT:
++ data_out_end(cmnd);
++ break;
++ default:
++ PRINT_ERROR("Unexpected cmnd op %x", cmnd_opcode(cmnd));
++ break;
++ }
++
++ req_cmnd_release(cmnd);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++#if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++static int iscsi_alloc_data_buf(struct scst_cmd *cmd)
++{
++ /*
++ * sock->ops->sendpage() is async zero copy operation,
++ * so we must be sure not to free and reuse
++ * the command's buffer before the sending was completed
++ * by the network layers. It is possible only if we
++ * don't use SGV cache.
++ */
++ EXTRACHECKS_BUG_ON(!(scst_cmd_get_data_direction(cmd) & SCST_DATA_READ));
++ scst_cmd_set_no_sgv(cmd);
++ return 1;
++}
++#endif
++
++static void iscsi_preprocessing_done(struct scst_cmd *scst_cmd)
++{
++ struct iscsi_cmnd *req = (struct iscsi_cmnd *)
++ scst_cmd_get_tgt_priv(scst_cmd);
++
++ TRACE_DBG("req %p", req);
++
++ if (req->conn->rx_task == current)
++ req->scst_state = ISCSI_CMD_STATE_AFTER_PREPROC;
++ else {
++ /*
++ * We wait for the state change without any protection, so
++ * without cmnd_get() it is possible that req will die
++ * "immediately" after the state assignment and
++ * iscsi_make_conn_rd_active() will operate on dead data.
++ * We use the ordered version of cmnd_get(), because "get"
++ * must be done before the state assignment.
++ *
++ * We protected from the race on calling cmnd_rx_continue(),
++ * because there can be only one read thread processing
++ * connection.
++ */
++ cmnd_get(req);
++ req->scst_state = ISCSI_CMD_STATE_AFTER_PREPROC;
++ iscsi_make_conn_rd_active(req->conn);
++ if (unlikely(req->conn->closing)) {
++ TRACE_DBG("Waking up closing conn %p", req->conn);
++ wake_up(&req->conn->read_state_waitQ);
++ }
++ cmnd_put(req);
++ }
++
++ return;
++}
++
++/* No locks */
++static void iscsi_try_local_processing(struct iscsi_cmnd *req)
++{
++ struct iscsi_conn *conn = req->conn;
++ bool local;
++
++ TRACE_ENTRY();
++
++ spin_lock_bh(&iscsi_wr_lock);
++ switch (conn->wr_state) {
++ case ISCSI_CONN_WR_STATE_IN_LIST:
++ list_del(&conn->wr_list_entry);
++ /* go through */
++ case ISCSI_CONN_WR_STATE_IDLE:
++#ifdef CONFIG_SCST_EXTRACHECKS
++ conn->wr_task = current;
++#endif
++ conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
++ conn->wr_space_ready = 0;
++ local = true;
++ break;
++ default:
++ local = false;
++ break;
++ }
++ spin_unlock_bh(&iscsi_wr_lock);
++
++ if (local) {
++ int rc = 1;
++
++ do {
++ rc = iscsi_send(conn);
++ if (rc <= 0)
++ break;
++ } while (req->not_processed_rsp_cnt != 0);
++
++ spin_lock_bh(&iscsi_wr_lock);
++#ifdef CONFIG_SCST_EXTRACHECKS
++ conn->wr_task = NULL;
++#endif
++ if ((rc == -EAGAIN) && !conn->wr_space_ready) {
++ TRACE_DBG("EAGAIN, setting WR_STATE_SPACE_WAIT "
++ "(conn %p)", conn);
++ conn->wr_state = ISCSI_CONN_WR_STATE_SPACE_WAIT;
++ } else if (test_write_ready(conn)) {
++ list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
++ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
++ wake_up(&iscsi_wr_waitQ);
++ } else
++ conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
++ spin_unlock_bh(&iscsi_wr_lock);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static int iscsi_xmit_response(struct scst_cmd *scst_cmd)
++{
++ int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
++ struct iscsi_cmnd *req = (struct iscsi_cmnd *)
++ scst_cmd_get_tgt_priv(scst_cmd);
++ struct iscsi_conn *conn = req->conn;
++ int status = scst_cmd_get_status(scst_cmd);
++ u8 *sense = scst_cmd_get_sense_buffer(scst_cmd);
++ int sense_len = scst_cmd_get_sense_buffer_len(scst_cmd);
++ struct iscsi_cmnd *wr_rsp, *our_rsp;
++
++ EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
++
++ scst_cmd_set_tgt_priv(scst_cmd, NULL);
++
++ EXTRACHECKS_BUG_ON(req->scst_state != ISCSI_CMD_STATE_RESTARTED);
++
++ if (unlikely(scst_cmd_aborted(scst_cmd)))
++ set_bit(ISCSI_CMD_ABORTED, &req->prelim_compl_flags);
++
++ if (unlikely(req->prelim_compl_flags != 0)) {
++ if (test_bit(ISCSI_CMD_ABORTED, &req->prelim_compl_flags)) {
++ TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted", req,
++ req->scst_cmd);
++ scst_set_delivery_status(req->scst_cmd,
++ SCST_CMD_DELIVERY_ABORTED);
++ req->scst_state = ISCSI_CMD_STATE_PROCESSED;
++ req_cmnd_release_force(req);
++ goto out;
++ }
++
++ TRACE_DBG("Prelim completed req %p", req);
++
++ /*
++ * We could preliminary have finished req before we
++ * knew its device, so check if we return correct sense
++ * format.
++ */
++ scst_check_convert_sense(scst_cmd);
++
++ if (!req->own_sg) {
++ req->sg = scst_cmd_get_sg(scst_cmd);
++ req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
++ }
++ } else {
++ EXTRACHECKS_BUG_ON(req->own_sg);
++ req->sg = scst_cmd_get_sg(scst_cmd);
++ req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
++ }
++
++ req->bufflen = scst_cmd_get_adjusted_resp_data_len(scst_cmd);
++
++ req->scst_state = ISCSI_CMD_STATE_PROCESSED;
++
++ TRACE_DBG("req %p, is_send_status=%x, req->bufflen=%d, req->sg=%p, "
++ "req->sg_cnt %d", req, is_send_status, req->bufflen, req->sg,
++ req->sg_cnt);
++
++ EXTRACHECKS_BUG_ON(req->hashed);
++ if (req->main_rsp != NULL)
++ EXTRACHECKS_BUG_ON(cmnd_opcode(req->main_rsp) != ISCSI_OP_REJECT);
++
++ if (unlikely((req->bufflen != 0) && !is_send_status)) {
++ PRINT_CRIT_ERROR("%s", "Sending DATA without STATUS is "
++ "unsupported");
++ scst_set_cmd_error(scst_cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ BUG(); /* ToDo */
++ }
++
++ /*
++ * We need to decrement active_cmds before adding any responses into
++ * the write queue to eliminate a race, when all responses sent
++ * with wrong MaxCmdSN.
++ */
++ if (likely(req->dec_active_cmds))
++ iscsi_dec_active_cmds(req);
++
++ if (req->bufflen != 0) {
++ /*
++ * Check above makes sure that is_send_status is set,
++ * so status is valid here, but in future that could change.
++ * ToDo
++ */
++ if ((status != SAM_STAT_CHECK_CONDITION) &&
++ ((cmnd_hdr(req)->flags & (ISCSI_CMD_WRITE|ISCSI_CMD_READ)) !=
++ (ISCSI_CMD_WRITE|ISCSI_CMD_READ))) {
++ send_data_rsp(req, status, is_send_status);
++ } else {
++ struct iscsi_cmnd *rsp;
++ send_data_rsp(req, 0, 0);
++ if (is_send_status) {
++ rsp = create_status_rsp(req, status, sense,
++ sense_len);
++ iscsi_cmnd_init_write(rsp, 0);
++ }
++ }
++ } else if (is_send_status) {
++ struct iscsi_cmnd *rsp;
++ rsp = create_status_rsp(req, status, sense, sense_len);
++ iscsi_cmnd_init_write(rsp, 0);
++ }
++#ifdef CONFIG_SCST_EXTRACHECKS
++ else
++ BUG();
++#endif
++
++ /*
++ * There's no need for protection, since we are not going to
++ * dereference them.
++ */
++ wr_rsp = list_entry(conn->write_list.next, struct iscsi_cmnd,
++ write_list_entry);
++ our_rsp = list_entry(req->rsp_cmd_list.next, struct iscsi_cmnd,
++ rsp_cmd_list_entry);
++ if (wr_rsp == our_rsp) {
++ /*
++ * This is our rsp, so let's try to process it locally to
++ * decrease latency. We need to call pre_release before
++ * processing to handle some error recovery cases.
++ */
++ if (scst_get_active_cmd_count(scst_cmd) <= 2) {
++ req_cmnd_pre_release(req);
++ iscsi_try_local_processing(req);
++ cmnd_put(req);
++ } else {
++ /*
++ * There's too much backend activity, so it could be
++ * better to push it to the write thread.
++ */
++ goto out_push_to_wr_thread;
++ }
++ } else
++ goto out_push_to_wr_thread;
++
++out:
++ return SCST_TGT_RES_SUCCESS;
++
++out_push_to_wr_thread:
++ TRACE_DBG("Waking up write thread (conn %p)", conn);
++ req_cmnd_release(req);
++ iscsi_make_conn_wr_active(conn);
++ goto out;
++}
++
++/* Called under sn_lock */
++static bool iscsi_is_delay_tm_resp(struct iscsi_cmnd *rsp)
++{
++ bool res = 0;
++ struct iscsi_task_mgt_hdr *req_hdr =
++ (struct iscsi_task_mgt_hdr *)&rsp->parent_req->pdu.bhs;
++ int function = req_hdr->function & ISCSI_FUNCTION_MASK;
++ struct iscsi_session *sess = rsp->conn->session;
++
++ TRACE_ENTRY();
++
++ /* This should be checked for immediate TM commands as well */
++
++ switch (function) {
++ default:
++ if (before(sess->exp_cmd_sn, req_hdr->cmd_sn))
++ res = 1;
++ break;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Called under sn_lock, but might drop it inside, then reaquire */
++static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess)
++ __acquires(&sn_lock)
++ __releases(&sn_lock)
++{
++ struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
++
++ TRACE_ENTRY();
++
++ if (tm_rsp == NULL)
++ goto out;
++
++ if (iscsi_is_delay_tm_resp(tm_rsp))
++ goto out;
++
++ TRACE_MGMT_DBG("Sending delayed rsp %p", tm_rsp);
++
++ sess->tm_rsp = NULL;
++ sess->tm_active--;
++
++ spin_unlock(&sess->sn_lock);
++
++ BUG_ON(sess->tm_active < 0);
++
++ iscsi_cmnd_init_write(tm_rsp, ISCSI_INIT_WRITE_WAKE);
++
++ spin_lock(&sess->sn_lock);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status)
++{
++ struct iscsi_cmnd *rsp;
++ struct iscsi_task_mgt_hdr *req_hdr =
++ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
++ struct iscsi_task_rsp_hdr *rsp_hdr;
++ struct iscsi_session *sess = req->conn->session;
++ int fn = req_hdr->function & ISCSI_FUNCTION_MASK;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("TM req %p finished", req);
++ TRACE(TRACE_MGMT, "iSCSI TM fn %d finished, status %d", fn, status);
++
++ rsp = iscsi_alloc_rsp(req);
++ rsp_hdr = (struct iscsi_task_rsp_hdr *)&rsp->pdu.bhs;
++
++ rsp_hdr->opcode = ISCSI_OP_SCSI_TASK_MGT_RSP;
++ rsp_hdr->flags = ISCSI_FLG_FINAL;
++ rsp_hdr->itt = req_hdr->itt;
++ rsp_hdr->response = status;
++
++ if (fn == ISCSI_FUNCTION_TARGET_COLD_RESET) {
++ rsp->should_close_conn = 1;
++ rsp->should_close_all_conn = 1;
++ }
++
++ BUG_ON(sess->tm_rsp != NULL);
++
++ spin_lock(&sess->sn_lock);
++ if (iscsi_is_delay_tm_resp(rsp)) {
++ TRACE_MGMT_DBG("Delaying TM fn %d response %p "
++ "(req %p), because not all affected commands "
++ "received (TM cmd sn %u, exp sn %u)",
++ req_hdr->function & ISCSI_FUNCTION_MASK, rsp, req,
++ req_hdr->cmd_sn, sess->exp_cmd_sn);
++ sess->tm_rsp = rsp;
++ spin_unlock(&sess->sn_lock);
++ goto out_release;
++ }
++ sess->tm_active--;
++ spin_unlock(&sess->sn_lock);
++
++ BUG_ON(sess->tm_active < 0);
++
++ iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_WAKE);
++
++out_release:
++ req_cmnd_release(req);
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline int iscsi_get_mgmt_response(int status)
++{
++ switch (status) {
++ case SCST_MGMT_STATUS_SUCCESS:
++ return ISCSI_RESPONSE_FUNCTION_COMPLETE;
++
++ case SCST_MGMT_STATUS_TASK_NOT_EXIST:
++ return ISCSI_RESPONSE_UNKNOWN_TASK;
++
++ case SCST_MGMT_STATUS_LUN_NOT_EXIST:
++ return ISCSI_RESPONSE_UNKNOWN_LUN;
++
++ case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
++ return ISCSI_RESPONSE_FUNCTION_UNSUPPORTED;
++
++ case SCST_MGMT_STATUS_REJECTED:
++ case SCST_MGMT_STATUS_FAILED:
++ default:
++ return ISCSI_RESPONSE_FUNCTION_REJECTED;
++ }
++}
++
++static void iscsi_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
++{
++ int fn = scst_mgmt_cmd_get_fn(scst_mcmd);
++ struct iscsi_cmnd *req = (struct iscsi_cmnd *)
++ scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
++ int status =
++ iscsi_get_mgmt_response(scst_mgmt_cmd_get_status(scst_mcmd));
++
++ if ((status == ISCSI_RESPONSE_UNKNOWN_TASK) &&
++ (fn == SCST_ABORT_TASK)) {
++ /* If we are here, we found the task, so must succeed */
++ status = ISCSI_RESPONSE_FUNCTION_COMPLETE;
++ }
++
++ TRACE_MGMT_DBG("req %p, scst_mcmd %p, fn %d, scst status %d, status %d",
++ req, scst_mcmd, fn, scst_mgmt_cmd_get_status(scst_mcmd),
++ status);
++
++ switch (fn) {
++ case SCST_NEXUS_LOSS_SESS:
++ case SCST_ABORT_ALL_TASKS_SESS:
++ /* They are internal */
++ break;
++ default:
++ iscsi_send_task_mgmt_resp(req, status);
++ scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
++ break;
++ }
++ return;
++}
++
++static int iscsi_scsi_aen(struct scst_aen *aen)
++{
++ int res = SCST_AEN_RES_SUCCESS;
++ __be64 lun = scst_aen_get_lun(aen);
++ const uint8_t *sense = scst_aen_get_sense(aen);
++ int sense_len = scst_aen_get_sense_len(aen);
++ struct iscsi_session *sess = scst_sess_get_tgt_priv(
++ scst_aen_get_sess(aen));
++ struct iscsi_conn *conn;
++ bool found;
++ struct iscsi_cmnd *fake_req, *rsp;
++ struct iscsi_async_msg_hdr *rsp_hdr;
++ struct scatterlist *sg;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("SCSI AEN to sess %p (initiator %s)", sess,
++ sess->initiator_name);
++
++ mutex_lock(&sess->target->target_mutex);
++
++ found = false;
++ list_for_each_entry_reverse(conn, &sess->conn_list, conn_list_entry) {
++ if (!test_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags) &&
++ (conn->conn_reinst_successor == NULL)) {
++ found = true;
++ break;
++ }
++ }
++ if (!found) {
++ TRACE_MGMT_DBG("Unable to find alive conn for sess %p", sess);
++ goto out_err;
++ }
++
++ /* Create a fake request */
++ fake_req = cmnd_alloc(conn, NULL);
++ if (fake_req == NULL) {
++ PRINT_ERROR("%s", "Unable to alloc fake AEN request");
++ goto out_err;
++ }
++
++ mutex_unlock(&sess->target->target_mutex);
++
++ rsp = iscsi_alloc_main_rsp(fake_req);
++ if (rsp == NULL) {
++ PRINT_ERROR("%s", "Unable to alloc AEN rsp");
++ goto out_err_free_req;
++ }
++
++ fake_req->scst_state = ISCSI_CMD_STATE_AEN;
++ fake_req->scst_aen = aen;
++
++ rsp_hdr = (struct iscsi_async_msg_hdr *)&rsp->pdu.bhs;
++
++ rsp_hdr->opcode = ISCSI_OP_ASYNC_MSG;
++ rsp_hdr->flags = ISCSI_FLG_FINAL;
++ rsp_hdr->lun = lun; /* it's already in SCSI form */
++ rsp_hdr->ffffffff = __constant_cpu_to_be32(0xffffffff);
++ rsp_hdr->async_event = ISCSI_ASYNC_SCSI;
++
++ sg = rsp->sg = rsp->rsp_sg;
++ rsp->sg_cnt = 2;
++ rsp->own_sg = 1;
++
++ sg_init_table(sg, 2);
++ sg_set_buf(&sg[0], &rsp->sense_hdr, sizeof(rsp->sense_hdr));
++ sg_set_buf(&sg[1], sense, sense_len);
++
++ rsp->sense_hdr.length = cpu_to_be16(sense_len);
++ rsp->pdu.datasize = sizeof(rsp->sense_hdr) + sense_len;
++ rsp->bufflen = rsp->pdu.datasize;
++
++ req_cmnd_release(fake_req);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err_free_req:
++ req_cmnd_release(fake_req);
++
++out_err:
++ mutex_unlock(&sess->target->target_mutex);
++ res = SCST_AEN_RES_FAILED;
++ goto out;
++}
++
++static int iscsi_report_aen(struct scst_aen *aen)
++{
++ int res;
++ int event_fn = scst_aen_get_event_fn(aen);
++
++ TRACE_ENTRY();
++
++ switch (event_fn) {
++ case SCST_AEN_SCSI:
++ res = iscsi_scsi_aen(aen);
++ break;
++ default:
++ TRACE_MGMT_DBG("Unsupported AEN %d", event_fn);
++ res = SCST_AEN_RES_NOT_SUPPORTED;
++ break;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int iscsi_get_initiator_port_transport_id(struct scst_session *scst_sess,
++ uint8_t **transport_id)
++{
++ struct iscsi_session *sess;
++ int res = 0;
++ union iscsi_sid sid;
++ int tr_id_size;
++ uint8_t *tr_id;
++ uint8_t q;
++
++ TRACE_ENTRY();
++
++ if (scst_sess == NULL) {
++ res = SCSI_TRANSPORTID_PROTOCOLID_ISCSI;
++ goto out;
++ }
++
++ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
++
++ sid = *(union iscsi_sid *)&sess->sid;
++ sid.id.tsih = 0;
++
++ tr_id_size = 4 + strlen(sess->initiator_name) + 5 +
++ snprintf(&q, sizeof(q), "%llx", sid.id64) + 1;
++ tr_id_size = (tr_id_size + 3) & -4;
++
++ tr_id = kzalloc(tr_id_size, GFP_KERNEL);
++ if (tr_id == NULL) {
++ PRINT_ERROR("Allocation of TransportID (size %d) failed",
++ tr_id_size);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ tr_id[0] = 0x40 | SCSI_TRANSPORTID_PROTOCOLID_ISCSI;
++ sprintf(&tr_id[4], "%s,i,0x%llx", sess->initiator_name, sid.id64);
++
++ put_unaligned(cpu_to_be16(tr_id_size - 4),
++ (__be16 *)&tr_id[2]);
++
++ *transport_id = tr_id;
++
++ TRACE_DBG("Created tid '%s'", &tr_id[4]);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++void iscsi_send_nop_in(struct iscsi_conn *conn)
++{
++ struct iscsi_cmnd *req, *rsp;
++ struct iscsi_nop_in_hdr *rsp_hdr;
++
++ TRACE_ENTRY();
++
++ req = cmnd_alloc(conn, NULL);
++ if (req == NULL) {
++ PRINT_ERROR("%s", "Unable to alloc fake Nop-In request");
++ goto out_err;
++ }
++
++ rsp = iscsi_alloc_main_rsp(req);
++ if (rsp == NULL) {
++ PRINT_ERROR("%s", "Unable to alloc Nop-In rsp");
++ goto out_err_free_req;
++ }
++
++ cmnd_get(rsp);
++
++ rsp_hdr = (struct iscsi_nop_in_hdr *)&rsp->pdu.bhs;
++ rsp_hdr->opcode = ISCSI_OP_NOP_IN;
++ rsp_hdr->flags = ISCSI_FLG_FINAL;
++ rsp_hdr->itt = ISCSI_RESERVED_TAG;
++ rsp_hdr->ttt = (__force __be32)conn->nop_in_ttt++;
++
++ if (conn->nop_in_ttt == ISCSI_RESERVED_TAG_CPU32)
++ conn->nop_in_ttt = 0;
++
++ /* Supposed that all other fields are zeroed */
++
++ TRACE_DBG("Sending Nop-In request (ttt 0x%08x)", rsp_hdr->ttt);
++ spin_lock_bh(&conn->nop_req_list_lock);
++ list_add_tail(&rsp->nop_req_list_entry, &conn->nop_req_list);
++ spin_unlock_bh(&conn->nop_req_list_lock);
++
++out_err_free_req:
++ req_cmnd_release(req);
++
++out_err:
++ TRACE_EXIT();
++ return;
++}
++
++static int iscsi_target_detect(struct scst_tgt_template *templ)
++{
++ /* Nothing to do */
++ return 0;
++}
++
++static int iscsi_target_release(struct scst_tgt *scst_tgt)
++{
++ /* Nothing to do */
++ return 0;
++}
++
++static struct scst_trace_log iscsi_local_trace_tbl[] = {
++ { TRACE_D_WRITE, "d_write" },
++ { TRACE_CONN_OC, "conn" },
++ { TRACE_CONN_OC_DBG, "conn_dbg" },
++ { TRACE_D_IOV, "iov" },
++ { TRACE_D_DUMP_PDU, "pdu" },
++ { TRACE_NET_PG, "net_page" },
++ { 0, NULL }
++};
++
++#define ISCSI_TRACE_TBL_HELP ", d_write, conn, conn_dbg, iov, pdu, net_page"
++
++static uint16_t iscsi_get_scsi_transport_version(struct scst_tgt *scst_tgt)
++{
++ return 0x0960; /* iSCSI */
++}
++
++struct scst_tgt_template iscsi_template = {
++ .name = "iscsi",
++ .sg_tablesize = 0xFFFF /* no limit */,
++ .threads_num = 0,
++ .no_clustering = 1,
++ .xmit_response_atomic = 0,
++ .tgtt_attrs = iscsi_attrs,
++ .tgt_attrs = iscsi_tgt_attrs,
++ .sess_attrs = iscsi_sess_attrs,
++ .enable_target = iscsi_enable_target,
++ .is_target_enabled = iscsi_is_target_enabled,
++ .add_target = iscsi_sysfs_add_target,
++ .del_target = iscsi_sysfs_del_target,
++ .mgmt_cmd = iscsi_sysfs_mgmt_cmd,
++ .tgtt_optional_attributes = "IncomingUser, OutgoingUser",
++ .tgt_optional_attributes = "IncomingUser, OutgoingUser, allowed_portal",
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = ISCSI_DEFAULT_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++ .trace_tbl = iscsi_local_trace_tbl,
++ .trace_tbl_help = ISCSI_TRACE_TBL_HELP,
++#endif
++ .detect = iscsi_target_detect,
++ .release = iscsi_target_release,
++ .xmit_response = iscsi_xmit_response,
++#if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ .alloc_data_buf = iscsi_alloc_data_buf,
++#endif
++ .preprocessing_done = iscsi_preprocessing_done,
++ .pre_exec = iscsi_pre_exec,
++ .task_mgmt_affected_cmds_done = iscsi_task_mgmt_affected_cmds_done,
++ .task_mgmt_fn_done = iscsi_task_mgmt_fn_done,
++ .report_aen = iscsi_report_aen,
++ .get_initiator_port_transport_id = iscsi_get_initiator_port_transport_id,
++ .get_scsi_transport_version = iscsi_get_scsi_transport_version,
++};
++
++static __init int iscsi_run_threads(int count, char *name, int (*fn)(void *))
++{
++ int res = 0;
++ int i;
++ struct iscsi_thread_t *thr;
++
++ for (i = 0; i < count; i++) {
++ thr = kmalloc(sizeof(*thr), GFP_KERNEL);
++ if (!thr) {
++ res = -ENOMEM;
++ PRINT_ERROR("Failed to allocate thr %d", res);
++ goto out;
++ }
++ thr->thr = kthread_run(fn, NULL, "%s%d", name, i);
++ if (IS_ERR(thr->thr)) {
++ res = PTR_ERR(thr->thr);
++ PRINT_ERROR("kthread_create() failed: %d", res);
++ kfree(thr);
++ goto out;
++ }
++ list_add_tail(&thr->threads_list_entry, &iscsi_threads_list);
++ }
++
++out:
++ return res;
++}
++
++static void iscsi_stop_threads(void)
++{
++ struct iscsi_thread_t *t, *tmp;
++
++ list_for_each_entry_safe(t, tmp, &iscsi_threads_list,
++ threads_list_entry) {
++ int rc = kthread_stop(t->thr);
++ if (rc < 0)
++ TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
++ list_del(&t->threads_list_entry);
++ kfree(t);
++ }
++ return;
++}
++
++static int __init iscsi_init(void)
++{
++ int err = 0;
++ int num;
++
++ PRINT_INFO("iSCSI SCST Target - version %s", ISCSI_VERSION_STRING);
++
++ dummy_page = alloc_pages(GFP_KERNEL, 0);
++ if (dummy_page == NULL) {
++ PRINT_ERROR("%s", "Dummy page allocation failed");
++ goto out;
++ }
++
++ sg_init_table(&dummy_sg, 1);
++ sg_set_page(&dummy_sg, dummy_page, PAGE_SIZE, 0);
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ err = net_set_get_put_page_callbacks(iscsi_get_page_callback,
++ iscsi_put_page_callback);
++ if (err != 0) {
++ PRINT_INFO("Unable to set page callbackes: %d", err);
++ goto out_free_dummy;
++ }
++#else
++#ifndef GENERATING_UPSTREAM_PATCH
++ PRINT_WARNING("%s",
++ "CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION "
++ "not enabled in your kernel. ISCSI-SCST will be working with "
++ "not the best performance. Refer README file for details.");
++#endif
++#endif
++
++ ctr_major = register_chrdev(0, ctr_name, &ctr_fops);
++ if (ctr_major < 0) {
++ PRINT_ERROR("failed to register the control device %d",
++ ctr_major);
++ err = ctr_major;
++ goto out_callb;
++ }
++
++ err = event_init();
++ if (err < 0)
++ goto out_reg;
++
++ iscsi_cmnd_cache = KMEM_CACHE(iscsi_cmnd, SCST_SLAB_FLAGS);
++ if (!iscsi_cmnd_cache) {
++ err = -ENOMEM;
++ goto out_event;
++ }
++
++ err = scst_register_target_template(&iscsi_template);
++ if (err < 0)
++ goto out_kmem;
++
++ iscsi_conn_ktype.sysfs_ops = scst_sysfs_get_sysfs_ops();
++
++ num = max((int)num_online_cpus(), 2);
++
++ err = iscsi_run_threads(num, "iscsird", istrd);
++ if (err != 0)
++ goto out_thr;
++
++ err = iscsi_run_threads(num, "iscsiwr", istwr);
++ if (err != 0)
++ goto out_thr;
++
++out:
++ return err;
++
++out_thr:
++ iscsi_stop_threads();
++
++ scst_unregister_target_template(&iscsi_template);
++
++out_kmem:
++ kmem_cache_destroy(iscsi_cmnd_cache);
++
++out_event:
++ event_exit();
++
++out_reg:
++ unregister_chrdev(ctr_major, ctr_name);
++
++out_callb:
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ net_set_get_put_page_callbacks(NULL, NULL);
++
++out_free_dummy:
++#endif
++ __free_pages(dummy_page, 0);
++ goto out;
++}
++
++static void __exit iscsi_exit(void)
++{
++ iscsi_stop_threads();
++
++ unregister_chrdev(ctr_major, ctr_name);
++
++ event_exit();
++
++ kmem_cache_destroy(iscsi_cmnd_cache);
++
++ scst_unregister_target_template(&iscsi_template);
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ net_set_get_put_page_callbacks(NULL, NULL);
++#endif
++
++ __free_pages(dummy_page, 0);
++ return;
++}
++
++module_init(iscsi_init);
++module_exit(iscsi_exit);
++
++MODULE_VERSION(ISCSI_VERSION_STRING);
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SCST iSCSI Target");
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi_dbg.h linux-2.6.36/drivers/scst/iscsi-scst/iscsi_dbg.h
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi_dbg.h
++++ linux-2.6.36/drivers/scst/iscsi-scst/iscsi_dbg.h
+@@ -0,0 +1,60 @@
++/*
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef ISCSI_DBG_H
++#define ISCSI_DBG_H
++
++#define LOG_PREFIX "iscsi-scst"
++
++#include <scst/scst_debug.h>
++
++#define TRACE_D_WRITE 0x80000000
++#define TRACE_CONN_OC 0x40000000
++#define TRACE_D_IOV 0x20000000
++#define TRACE_D_DUMP_PDU 0x10000000
++#define TRACE_NET_PG 0x08000000
++#define TRACE_CONN_OC_DBG 0x04000000
++
++#ifdef CONFIG_SCST_DEBUG
++#define ISCSI_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_LINE | TRACE_PID | \
++ TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_DEBUG | \
++ TRACE_MINOR | TRACE_SPECIAL | TRACE_CONN_OC)
++#else
++#define ISCSI_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
++ TRACE_SPECIAL)
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++struct iscsi_pdu;
++struct iscsi_cmnd;
++extern void iscsi_dump_pdu(struct iscsi_pdu *pdu);
++extern unsigned long iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(
++ struct iscsi_cmnd *cmnd);
++#else
++#define iscsi_dump_pdu(x) do {} while (0)
++#define iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(x) do {} while (0)
++#endif
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++extern unsigned long iscsi_trace_flag;
++#define trace_flag iscsi_trace_flag
++#endif
++
++#define TRACE_CONN_CLOSE(args...) TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_CONN_OC, args)
++#define TRACE_CONN_CLOSE_DBG(args...) TRACE(TRACE_CONN_OC_DBG, args)
++#define TRACE_NET_PAGE(args...) TRACE_DBG_FLAG(TRACE_NET_PG, args)
++#define TRACE_WRITE(args...) TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_D_WRITE, args)
++
++#endif
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h
++++ linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h
+@@ -0,0 +1,743 @@
++/*
++ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ISCSI_H__
++#define __ISCSI_H__
++
++#include <linux/pagemap.h>
++#include <linux/mm.h>
++#include <linux/net.h>
++#include <net/sock.h>
++
++#include <scst/scst.h>
++#include <scst/iscsi_scst.h>
++#include "iscsi_hdr.h"
++#include "iscsi_dbg.h"
++
++#define iscsi_sense_crc_error ABORTED_COMMAND, 0x47, 0x05
++#define iscsi_sense_unexpected_unsolicited_data ABORTED_COMMAND, 0x0C, 0x0C
++#define iscsi_sense_incorrect_amount_of_data ABORTED_COMMAND, 0x0C, 0x0D
++
++struct iscsi_sess_params {
++ int initial_r2t;
++ int immediate_data;
++ int max_connections;
++ unsigned int max_recv_data_length;
++ unsigned int max_xmit_data_length;
++ unsigned int max_burst_length;
++ unsigned int first_burst_length;
++ int default_wait_time;
++ int default_retain_time;
++ unsigned int max_outstanding_r2t;
++ int data_pdu_inorder;
++ int data_sequence_inorder;
++ int error_recovery_level;
++ int header_digest;
++ int data_digest;
++ int ofmarker;
++ int ifmarker;
++ int ofmarkint;
++ int ifmarkint;
++};
++
++struct iscsi_tgt_params {
++ int queued_cmnds;
++ unsigned int rsp_timeout;
++ unsigned int nop_in_interval;
++};
++
++struct network_thread_info {
++ struct task_struct *task;
++ unsigned int ready;
++};
++
++struct iscsi_target;
++struct iscsi_cmnd;
++
++struct iscsi_attr {
++ struct list_head attrs_list_entry;
++ struct kobj_attribute attr;
++ struct iscsi_target *target;
++ const char *name;
++};
++
++struct iscsi_target {
++ struct scst_tgt *scst_tgt;
++
++ struct mutex target_mutex;
++
++ struct list_head session_list; /* protected by target_mutex */
++
++ struct list_head target_list_entry;
++ u32 tid;
++
++ unsigned int tgt_enabled:1;
++
++ /* Protected by target_mutex */
++ struct list_head attrs_list;
++
++ char name[ISCSI_NAME_LEN];
++};
++
++#define ISCSI_HASH_ORDER 8
++#define cmnd_hashfn(itt) (BUILD_BUG_ON(!__same_type(itt, __be32)), \
++ hash_long((__force u32)(itt), ISCSI_HASH_ORDER))
++
++struct iscsi_session {
++ struct iscsi_target *target;
++ struct scst_session *scst_sess;
++
++ struct list_head pending_list; /* protected by sn_lock */
++
++ /* Unprotected, since accessed only from a single read thread */
++ u32 next_ttt;
++
++ /* Read only, if there are connection(s) */
++ struct iscsi_tgt_params tgt_params;
++ atomic_t active_cmds;
++
++ spinlock_t sn_lock;
++ u32 exp_cmd_sn; /* protected by sn_lock */
++
++ /* All 3 protected by sn_lock */
++ int tm_active;
++ u32 tm_sn;
++ struct iscsi_cmnd *tm_rsp;
++
++ /* Read only, if there are connection(s) */
++ struct iscsi_sess_params sess_params;
++
++ /*
++ * In some corner cases commands can be deleted from the hash
++ * not from the corresponding read thread. So, let's simplify
++ * errors recovery and have this lock.
++ */
++ spinlock_t cmnd_data_wait_hash_lock;
++ struct list_head cmnd_data_wait_hash[1 << ISCSI_HASH_ORDER];
++
++ struct list_head conn_list; /* protected by target_mutex */
++
++ struct list_head session_list_entry;
++
++ /* All protected by target_mutex, where necessary */
++ struct iscsi_session *sess_reinst_successor;
++ unsigned int sess_reinstating:1;
++ unsigned int sess_shutting_down:1;
++
++ /* All don't need any protection */
++ char *initiator_name;
++ u64 sid;
++};
++
++#define ISCSI_CONN_IOV_MAX (PAGE_SIZE/sizeof(struct iovec))
++
++#define ISCSI_CONN_RD_STATE_IDLE 0
++#define ISCSI_CONN_RD_STATE_IN_LIST 1
++#define ISCSI_CONN_RD_STATE_PROCESSING 2
++
++#define ISCSI_CONN_WR_STATE_IDLE 0
++#define ISCSI_CONN_WR_STATE_IN_LIST 1
++#define ISCSI_CONN_WR_STATE_SPACE_WAIT 2
++#define ISCSI_CONN_WR_STATE_PROCESSING 3
++
++struct iscsi_conn {
++ struct iscsi_session *session; /* owning session */
++
++ /* Both protected by session->sn_lock */
++ u32 stat_sn;
++ u32 exp_stat_sn;
++
++#define ISCSI_CONN_REINSTATING 1
++#define ISCSI_CONN_SHUTTINGDOWN 2
++ unsigned long conn_aflags;
++
++ spinlock_t cmd_list_lock; /* BH lock */
++
++ /* Protected by cmd_list_lock */
++ struct list_head cmd_list; /* in/outcoming pdus */
++
++ atomic_t conn_ref_cnt;
++
++ spinlock_t write_list_lock;
++ /* List of data pdus to be sent. Protected by write_list_lock */
++ struct list_head write_list;
++ /* List of data pdus being sent. Protected by write_list_lock */
++ struct list_head write_timeout_list;
++
++ /* Protected by write_list_lock */
++ struct timer_list rsp_timer;
++ unsigned int rsp_timeout; /* in jiffies */
++
++ /*
++ * All 2 protected by iscsi_wr_lock. Modified independently to the
++ * above field, hence the alignment.
++ */
++ unsigned short wr_state __attribute__((aligned(sizeof(long))));
++ unsigned short wr_space_ready:1;
++
++ struct list_head wr_list_entry;
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ struct task_struct *wr_task;
++#endif
++
++ /*
++ * All are unprotected, since accessed only from a single write
++ * thread.
++ */
++ struct iscsi_cmnd *write_cmnd;
++ struct iovec *write_iop;
++ int write_iop_used;
++ struct iovec write_iov[2];
++ u32 write_size;
++ u32 write_offset;
++ int write_state;
++
++ /* Both don't need any protection */
++ struct file *file;
++ struct socket *sock;
++
++ void (*old_state_change)(struct sock *);
++ void (*old_data_ready)(struct sock *, int);
++ void (*old_write_space)(struct sock *);
++
++ /* Both read only. Stay here for better CPU cache locality. */
++ int hdigest_type;
++ int ddigest_type;
++
++ /* All 6 protected by iscsi_rd_lock */
++ unsigned short rd_state;
++ unsigned short rd_data_ready:1;
++ /* Let's save some cache footprint by putting them here */
++ unsigned short closing:1;
++ unsigned short active_close:1;
++ unsigned short deleting:1;
++ unsigned short conn_tm_active:1;
++
++ struct list_head rd_list_entry;
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ struct task_struct *rd_task;
++#endif
++
++ unsigned long last_rcv_time;
++
++ /*
++ * All are unprotected, since accessed only from a single read
++ * thread.
++ */
++ struct iscsi_cmnd *read_cmnd;
++ struct msghdr read_msg;
++ u32 read_size;
++ int read_state;
++ struct iovec *read_iov;
++ struct task_struct *rx_task;
++ uint32_t rpadding;
++
++ struct iscsi_target *target;
++
++ struct list_head conn_list_entry; /* list entry in session conn_list */
++
++ /* All protected by target_mutex, where necessary */
++ struct iscsi_conn *conn_reinst_successor;
++ struct list_head reinst_pending_cmd_list;
++
++ wait_queue_head_t read_state_waitQ;
++ struct completion ready_to_free;
++
++ /* Doesn't need any protection */
++ u16 cid;
++
++ struct delayed_work nop_in_delayed_work;
++ unsigned int nop_in_interval; /* in jiffies */
++ struct list_head nop_req_list;
++ spinlock_t nop_req_list_lock;
++ u32 nop_in_ttt;
++
++ /* Don't need any protection */
++ struct kobject conn_kobj;
++ struct completion conn_kobj_release_cmpl;
++};
++
++struct iscsi_pdu {
++ struct iscsi_hdr bhs;
++ void *ahs;
++ unsigned int ahssize;
++ unsigned int datasize;
++};
++
++typedef void (iscsi_show_info_t)(struct seq_file *seq,
++ struct iscsi_target *target);
++
++/** Commands' states **/
++
++/* New command and SCST processes it */
++#define ISCSI_CMD_STATE_NEW 0
++
++/* SCST processes cmd after scst_rx_cmd() */
++#define ISCSI_CMD_STATE_RX_CMD 1
++
++/* The command returned from preprocessing_done() */
++#define ISCSI_CMD_STATE_AFTER_PREPROC 2
++
++/* The command is waiting for session or connection reinstatement finished */
++#define ISCSI_CMD_STATE_REINST_PENDING 3
++
++/* scst_restart_cmd() called and SCST processing it */
++#define ISCSI_CMD_STATE_RESTARTED 4
++
++/* SCST done processing */
++#define ISCSI_CMD_STATE_PROCESSED 5
++
++/* AEN processing */
++#define ISCSI_CMD_STATE_AEN 6
++
++/* Out of SCST core preliminary completed */
++#define ISCSI_CMD_STATE_OUT_OF_SCST_PRELIM_COMPL 7
++
++/*
++ * Most of the fields don't need any protection, since accessed from only a
++ * single thread, except where noted.
++ *
++ * ToDo: Eventually divide request and response structures in 2 separate
++ * structures and stop this IET-derived garbage.
++ */
++struct iscsi_cmnd {
++ struct iscsi_conn *conn;
++
++ /*
++ * Some flags used under conn->write_list_lock, but all modified only
++ * from single read thread or when there are no references to cmd.
++ */
++ unsigned int hashed:1;
++ unsigned int should_close_conn:1;
++ unsigned int should_close_all_conn:1;
++ unsigned int pending:1;
++ unsigned int own_sg:1;
++ unsigned int on_write_list:1;
++ unsigned int write_processing_started:1;
++ unsigned int force_cleanup_done:1;
++ unsigned int dec_active_cmds:1;
++ unsigned int ddigest_checked:1;
++#ifdef CONFIG_SCST_EXTRACHECKS
++ unsigned int on_rx_digest_list:1;
++ unsigned int release_called:1;
++#endif
++
++ /*
++ * We suppose that preliminary commands completion is tested by
++ * comparing prelim_compl_flags with 0. Otherwise, because of the
++ * gap between setting different flags a race is possible,
++ * like sending command in SCST core as PRELIM_COMPLETED, while it
++ * wasn't aborted in it yet and have as the result a wrong success
++ * status sent to the initiator.
++ */
++#define ISCSI_CMD_ABORTED 0
++#define ISCSI_CMD_PRELIM_COMPLETED 1
++ unsigned long prelim_compl_flags;
++
++ struct list_head hash_list_entry;
++
++ /*
++ * Unions are for readability and grepability and to save some
++ * cache footprint.
++ */
++
++ union {
++ /*
++ * Used only to abort not yet sent responses. Usage in
++ * cmnd_done() is only a side effect to have a lockless
++ * accesss to this list from always only a single thread
++ * at any time. So, all responses live in the parent
++ * until it has the last reference put.
++ */
++ struct list_head rsp_cmd_list;
++ struct list_head rsp_cmd_list_entry;
++ };
++
++ union {
++ struct list_head pending_list_entry;
++ struct list_head reinst_pending_cmd_list_entry;
++ };
++
++ union {
++ struct list_head write_list_entry;
++ struct list_head write_timeout_list_entry;
++ };
++
++ /* Both protected by conn->write_list_lock */
++ unsigned int on_write_timeout_list:1;
++ unsigned long write_start;
++
++ /*
++ * All unprotected, since could be accessed from only a single
++ * thread at time
++ */
++ struct iscsi_cmnd *parent_req;
++ struct iscsi_cmnd *cmd_req;
++
++ /*
++ * All unprotected, since could be accessed from only a single
++ * thread at time
++ */
++ union {
++ /* Request only fields */
++ struct {
++ struct list_head rx_ddigest_cmd_list;
++ struct list_head rx_ddigest_cmd_list_entry;
++
++ int scst_state;
++ union {
++ struct scst_cmd *scst_cmd;
++ struct scst_aen *scst_aen;
++ };
++
++ struct iscsi_cmnd *main_rsp;
++
++ /*
++ * Protected on modify by conn->write_list_lock, hence
++ * modified independently to the above field, hence the
++ * alignment.
++ */
++ int not_processed_rsp_cnt
++ __attribute__((aligned(sizeof(long))));
++ };
++
++ /* Response only fields */
++ struct {
++ struct scatterlist rsp_sg[2];
++ struct iscsi_sense_data sense_hdr;
++ };
++ };
++
++ atomic_t ref_cnt;
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ atomic_t net_ref_cnt;
++#endif
++
++ struct iscsi_pdu pdu;
++
++ struct scatterlist *sg;
++ int sg_cnt;
++ unsigned int bufflen;
++ u32 r2t_sn;
++ unsigned int r2t_len_to_receive;
++ unsigned int r2t_len_to_send;
++ unsigned int outstanding_r2t;
++ u32 target_task_tag;
++ __be32 hdigest;
++ __be32 ddigest;
++
++ struct list_head cmd_list_entry;
++ struct list_head nop_req_list_entry;
++
++ unsigned int not_received_data_len;
++};
++
++/* Max time to wait for our response satisfied for aborted commands */
++#define ISCSI_TM_DATA_WAIT_TIMEOUT (10 * HZ)
++
++/*
++ * Needed addition to all timeouts to complete a burst of commands at once.
++ * Otherwise, a part of the burst can be timeouted only in double timeout time.
++ */
++#define ISCSI_ADD_SCHED_TIME HZ
++
++#define ISCSI_CTR_OPEN_STATE_CLOSED 0
++#define ISCSI_CTR_OPEN_STATE_OPEN 1
++#define ISCSI_CTR_OPEN_STATE_CLOSING 2
++
++extern struct mutex target_mgmt_mutex;
++
++extern int ctr_open_state;
++extern const struct file_operations ctr_fops;
++
++extern spinlock_t iscsi_rd_lock;
++extern struct list_head iscsi_rd_list;
++extern wait_queue_head_t iscsi_rd_waitQ;
++
++extern spinlock_t iscsi_wr_lock;
++extern struct list_head iscsi_wr_list;
++extern wait_queue_head_t iscsi_wr_waitQ;
++
++/* iscsi.c */
++extern struct iscsi_cmnd *cmnd_alloc(struct iscsi_conn *,
++ struct iscsi_cmnd *parent);
++extern int cmnd_rx_start(struct iscsi_cmnd *);
++extern int cmnd_rx_continue(struct iscsi_cmnd *req);
++extern void cmnd_rx_end(struct iscsi_cmnd *);
++extern void cmnd_tx_start(struct iscsi_cmnd *);
++extern void cmnd_tx_end(struct iscsi_cmnd *);
++extern void req_cmnd_release_force(struct iscsi_cmnd *req);
++extern void rsp_cmnd_release(struct iscsi_cmnd *);
++extern void cmnd_done(struct iscsi_cmnd *cmnd);
++extern void conn_abort(struct iscsi_conn *conn);
++extern void iscsi_restart_cmnd(struct iscsi_cmnd *cmnd);
++extern void iscsi_fail_data_waiting_cmnd(struct iscsi_cmnd *cmnd);
++extern void iscsi_send_nop_in(struct iscsi_conn *conn);
++extern int iscsi_preliminary_complete(struct iscsi_cmnd *req,
++ struct iscsi_cmnd *orig_req, bool get_data);
++extern int set_scst_preliminary_status_rsp(struct iscsi_cmnd *req,
++ bool get_data, int key, int asc, int ascq);
++
++/* conn.c */
++extern struct kobj_type iscsi_conn_ktype;
++extern struct iscsi_conn *conn_lookup(struct iscsi_session *, u16);
++extern void conn_reinst_finished(struct iscsi_conn *);
++extern int __add_conn(struct iscsi_session *, struct iscsi_kern_conn_info *);
++extern int __del_conn(struct iscsi_session *, struct iscsi_kern_conn_info *);
++extern int conn_free(struct iscsi_conn *);
++extern void iscsi_make_conn_rd_active(struct iscsi_conn *conn);
++#define ISCSI_CONN_ACTIVE_CLOSE 1
++#define ISCSI_CONN_DELETING 2
++extern void __mark_conn_closed(struct iscsi_conn *, int);
++extern void mark_conn_closed(struct iscsi_conn *);
++extern void iscsi_make_conn_wr_active(struct iscsi_conn *);
++extern void iscsi_check_tm_data_wait_timeouts(struct iscsi_conn *conn,
++ bool force);
++extern void __iscsi_write_space_ready(struct iscsi_conn *conn);
++
++/* nthread.c */
++extern int iscsi_send(struct iscsi_conn *conn);
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++extern void iscsi_get_page_callback(struct page *page);
++extern void iscsi_put_page_callback(struct page *page);
++#endif
++extern int istrd(void *arg);
++extern int istwr(void *arg);
++extern void iscsi_task_mgmt_affected_cmds_done(struct scst_mgmt_cmd *scst_mcmd);
++extern void req_add_to_write_timeout_list(struct iscsi_cmnd *req);
++
++/* target.c */
++extern const struct attribute *iscsi_tgt_attrs[];
++extern int iscsi_enable_target(struct scst_tgt *scst_tgt, bool enable);
++extern bool iscsi_is_target_enabled(struct scst_tgt *scst_tgt);
++extern ssize_t iscsi_sysfs_send_event(uint32_t tid,
++ enum iscsi_kern_event_code code,
++ const char *param1, const char *param2, void **data);
++extern struct iscsi_target *target_lookup_by_id(u32);
++extern int __add_target(struct iscsi_kern_target_info *);
++extern int __del_target(u32 id);
++extern ssize_t iscsi_sysfs_add_target(const char *target_name, char *params);
++extern ssize_t iscsi_sysfs_del_target(const char *target_name);
++extern ssize_t iscsi_sysfs_mgmt_cmd(char *cmd);
++extern void target_del_session(struct iscsi_target *target,
++ struct iscsi_session *session, int flags);
++extern void target_del_all_sess(struct iscsi_target *target, int flags);
++extern void target_del_all(void);
++
++/* config.c */
++extern const struct attribute *iscsi_attrs[];
++extern int iscsi_add_attr(struct iscsi_target *target,
++ const struct iscsi_kern_attr *user_info);
++extern void __iscsi_del_attr(struct iscsi_target *target,
++ struct iscsi_attr *tgt_attr);
++
++/* session.c */
++extern const struct attribute *iscsi_sess_attrs[];
++extern const struct file_operations session_seq_fops;
++extern struct iscsi_session *session_lookup(struct iscsi_target *, u64);
++extern void sess_reinst_finished(struct iscsi_session *);
++extern int __add_session(struct iscsi_target *,
++ struct iscsi_kern_session_info *);
++extern int __del_session(struct iscsi_target *, u64);
++extern int session_free(struct iscsi_session *session, bool del);
++
++/* params.c */
++extern const char *iscsi_get_digest_name(int val, char *res);
++extern const char *iscsi_get_bool_value(int val);
++extern int iscsi_params_set(struct iscsi_target *,
++ struct iscsi_kern_params_info *, int);
++
++/* event.c */
++extern int event_send(u32, u64, u32, u32, enum iscsi_kern_event_code,
++ const char *param1, const char *param2);
++extern int event_init(void);
++extern void event_exit(void);
++
++#define get_pgcnt(size, offset) \
++ ((((size) + ((offset) & ~PAGE_MASK)) + PAGE_SIZE - 1) >> PAGE_SHIFT)
++
++static inline void iscsi_cmnd_get_length(struct iscsi_pdu *pdu)
++{
++#if defined(__BIG_ENDIAN)
++ pdu->ahssize = pdu->bhs.length.ahslength * 4;
++ pdu->datasize = pdu->bhs.length.datalength;
++#elif defined(__LITTLE_ENDIAN)
++ pdu->ahssize = ((__force __u32)pdu->bhs.length & 0xff) * 4;
++ pdu->datasize = be32_to_cpu((__force __be32)((__force __u32)pdu->bhs.length & ~0xff));
++#else
++#error
++#endif
++}
++
++static inline void iscsi_cmnd_set_length(struct iscsi_pdu *pdu)
++{
++#if defined(__BIG_ENDIAN)
++ pdu->bhs.length.ahslength = pdu->ahssize / 4;
++ pdu->bhs.length.datalength = pdu->datasize;
++#elif defined(__LITTLE_ENDIAN)
++ pdu->bhs.length = cpu_to_be32(pdu->datasize) | (__force __be32)(pdu->ahssize / 4);
++#else
++#error
++#endif
++}
++
++extern struct scst_tgt_template iscsi_template;
++
++/*
++ * Skip this command if result is not 0. Must be called under
++ * corresponding lock.
++ */
++static inline bool cmnd_get_check(struct iscsi_cmnd *cmnd)
++{
++ int r = atomic_inc_return(&cmnd->ref_cnt);
++ int res;
++ if (unlikely(r == 1)) {
++ TRACE_DBG("cmnd %p is being destroyed", cmnd);
++ atomic_dec(&cmnd->ref_cnt);
++ res = 1;
++ /* Necessary code is serialized by locks in cmnd_done() */
++ } else {
++ TRACE_DBG("cmnd %p, new ref_cnt %d", cmnd,
++ atomic_read(&cmnd->ref_cnt));
++ res = 0;
++ }
++ return res;
++}
++
++static inline void cmnd_get(struct iscsi_cmnd *cmnd)
++{
++ atomic_inc(&cmnd->ref_cnt);
++ TRACE_DBG("cmnd %p, new cmnd->ref_cnt %d", cmnd,
++ atomic_read(&cmnd->ref_cnt));
++ /*
++ * For the same reason as in kref_get(). Let's be safe and
++ * always do it.
++ */
++ smp_mb__after_atomic_inc();
++}
++
++static inline void cmnd_put(struct iscsi_cmnd *cmnd)
++{
++ TRACE_DBG("cmnd %p, new ref_cnt %d", cmnd,
++ atomic_read(&cmnd->ref_cnt)-1);
++
++ EXTRACHECKS_BUG_ON(atomic_read(&cmnd->ref_cnt) == 0);
++
++ if (atomic_dec_and_test(&cmnd->ref_cnt))
++ cmnd_done(cmnd);
++}
++
++/* conn->write_list_lock supposed to be locked and BHs off */
++static inline void cmd_add_on_write_list(struct iscsi_conn *conn,
++ struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_cmnd *parent = cmnd->parent_req;
++
++ TRACE_DBG("cmnd %p", cmnd);
++ /* See comment in iscsi_restart_cmnd() */
++ EXTRACHECKS_BUG_ON(cmnd->parent_req->hashed &&
++ (cmnd_opcode(cmnd) != ISCSI_OP_R2T));
++ list_add_tail(&cmnd->write_list_entry, &conn->write_list);
++ cmnd->on_write_list = 1;
++
++ parent->not_processed_rsp_cnt++;
++ TRACE_DBG("not processed rsp cnt %d (parent %p)",
++ parent->not_processed_rsp_cnt, parent);
++}
++
++/* conn->write_list_lock supposed to be locked and BHs off */
++static inline void cmd_del_from_write_list(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_cmnd *parent = cmnd->parent_req;
++
++ TRACE_DBG("%p", cmnd);
++ list_del(&cmnd->write_list_entry);
++ cmnd->on_write_list = 0;
++
++ parent->not_processed_rsp_cnt--;
++ TRACE_DBG("not processed rsp cnt %d (parent %p)",
++ parent->not_processed_rsp_cnt, parent);
++ EXTRACHECKS_BUG_ON(parent->not_processed_rsp_cnt < 0);
++}
++
++static inline void cmd_add_on_rx_ddigest_list(struct iscsi_cmnd *req,
++ struct iscsi_cmnd *cmnd)
++{
++ TRACE_DBG("Adding RX ddigest cmd %p to digest list "
++ "of req %p", cmnd, req);
++ list_add_tail(&cmnd->rx_ddigest_cmd_list_entry,
++ &req->rx_ddigest_cmd_list);
++#ifdef CONFIG_SCST_EXTRACHECKS
++ cmnd->on_rx_digest_list = 1;
++#endif
++}
++
++static inline void cmd_del_from_rx_ddigest_list(struct iscsi_cmnd *cmnd)
++{
++ TRACE_DBG("Deleting RX digest cmd %p from digest list", cmnd);
++ list_del(&cmnd->rx_ddigest_cmd_list_entry);
++#ifdef CONFIG_SCST_EXTRACHECKS
++ cmnd->on_rx_digest_list = 0;
++#endif
++}
++
++static inline int test_write_ready(struct iscsi_conn *conn)
++{
++ /*
++ * No need for write_list protection, in the worst case we will be
++ * restarted again.
++ */
++ return !list_empty(&conn->write_list) || conn->write_cmnd;
++}
++
++static inline void conn_get(struct iscsi_conn *conn)
++{
++ atomic_inc(&conn->conn_ref_cnt);
++ TRACE_DBG("conn %p, new conn_ref_cnt %d", conn,
++ atomic_read(&conn->conn_ref_cnt));
++ /*
++ * For the same reason as in kref_get(). Let's be safe and
++ * always do it.
++ */
++ smp_mb__after_atomic_inc();
++}
++
++static inline void conn_put(struct iscsi_conn *conn)
++{
++ TRACE_DBG("conn %p, new conn_ref_cnt %d", conn,
++ atomic_read(&conn->conn_ref_cnt)-1);
++ BUG_ON(atomic_read(&conn->conn_ref_cnt) == 0);
++
++ /*
++ * Make it always ordered to protect from undesired side effects like
++ * accessing just destroyed by close_conn() conn caused by reordering
++ * of this atomic_dec().
++ */
++ smp_mb__before_atomic_dec();
++ atomic_dec(&conn->conn_ref_cnt);
++}
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++extern void iscsi_extracheck_is_rd_thread(struct iscsi_conn *conn);
++extern void iscsi_extracheck_is_wr_thread(struct iscsi_conn *conn);
++#else
++static inline void iscsi_extracheck_is_rd_thread(struct iscsi_conn *conn) {}
++static inline void iscsi_extracheck_is_wr_thread(struct iscsi_conn *conn) {}
++#endif
++
++#endif /* __ISCSI_H__ */
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi_hdr.h linux-2.6.36/drivers/scst/iscsi-scst/iscsi_hdr.h
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi_hdr.h
++++ linux-2.6.36/drivers/scst/iscsi-scst/iscsi_hdr.h
+@@ -0,0 +1,525 @@
++/*
++ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __ISCSI_HDR_H__
++#define __ISCSI_HDR_H__
++
++#include <linux/types.h>
++#include <asm/byteorder.h>
++
++#define ISCSI_VERSION 0
++
++#ifndef __packed
++#define __packed __attribute__ ((packed))
++#endif
++
++/* iSCSI command PDU header. See also section 10.3 in RFC 3720. */
++struct iscsi_hdr {
++ u8 opcode; /* 0 */
++ u8 flags;
++ u8 spec1[2];
++#if defined(__BIG_ENDIAN_BITFIELD)
++ struct { /* 4 */
++ unsigned ahslength:8;
++ unsigned datalength:24;
++ } length;
++#elif defined(__LITTLE_ENDIAN_BITFIELD)
++ __be32 length; /* 4 */
++#endif
++ __be64 lun; /* 8 */
++ __be32 itt; /* 16 */
++ __be32 ttt; /* 20 */
++
++ /*
++ * SN fields most time stay converted to the CPU form and only received
++ * and send in the BE form.
++ */
++ u32 sn; /* 24 */
++ u32 exp_sn; /* 28 */
++ u32 max_sn; /* 32 */
++
++ __be32 spec3[3]; /* 36 */
++} __packed; /* 48 */
++
++/* Opcode encoding bits */
++#define ISCSI_OP_RETRY 0x80
++#define ISCSI_OP_IMMEDIATE 0x40
++#define ISCSI_OPCODE_MASK 0x3F
++
++/* Client to Server Message Opcode values */
++#define ISCSI_OP_NOP_OUT 0x00
++#define ISCSI_OP_SCSI_CMD 0x01
++#define ISCSI_OP_SCSI_TASK_MGT_MSG 0x02
++#define ISCSI_OP_LOGIN_CMD 0x03
++#define ISCSI_OP_TEXT_CMD 0x04
++#define ISCSI_OP_SCSI_DATA_OUT 0x05
++#define ISCSI_OP_LOGOUT_CMD 0x06
++#define ISCSI_OP_SNACK_CMD 0x10
++
++/* Server to Client Message Opcode values */
++#define ISCSI_OP_NOP_IN 0x20
++#define ISCSI_OP_SCSI_RSP 0x21
++#define ISCSI_OP_SCSI_TASK_MGT_RSP 0x22
++#define ISCSI_OP_LOGIN_RSP 0x23
++#define ISCSI_OP_TEXT_RSP 0x24
++#define ISCSI_OP_SCSI_DATA_IN 0x25
++#define ISCSI_OP_LOGOUT_RSP 0x26
++#define ISCSI_OP_R2T 0x31
++#define ISCSI_OP_ASYNC_MSG 0x32
++#define ISCSI_OP_REJECT 0x3f
++
++struct iscsi_ahs_hdr {
++ __be16 ahslength;
++ u8 ahstype;
++} __packed;
++
++#define ISCSI_AHSTYPE_CDB 1
++#define ISCSI_AHSTYPE_RLENGTH 2
++
++union iscsi_sid {
++ struct {
++ u8 isid[6]; /* Initiator Session ID */
++ __be16 tsih; /* Target Session ID */
++ } id;
++ __be64 id64;
++} __packed;
++
++struct iscsi_scsi_cmd_hdr {
++ u8 opcode;
++ u8 flags;
++ __be16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ __be64 lun;
++ __be32 itt;
++ __be32 data_length;
++ u32 cmd_sn;
++ u32 exp_stat_sn;
++ u8 scb[16];
++} __packed;
++
++#define ISCSI_CMD_FINAL 0x80
++#define ISCSI_CMD_READ 0x40
++#define ISCSI_CMD_WRITE 0x20
++#define ISCSI_CMD_ATTR_MASK 0x07
++#define ISCSI_CMD_UNTAGGED 0x00
++#define ISCSI_CMD_SIMPLE 0x01
++#define ISCSI_CMD_ORDERED 0x02
++#define ISCSI_CMD_HEAD_OF_QUEUE 0x03
++#define ISCSI_CMD_ACA 0x04
++
++struct iscsi_cdb_ahdr {
++ __be16 ahslength;
++ u8 ahstype;
++ u8 reserved;
++ u8 cdb[0];
++} __packed;
++
++struct iscsi_rlength_ahdr {
++ __be16 ahslength;
++ u8 ahstype;
++ u8 reserved;
++ __be32 read_length;
++} __packed;
++
++struct iscsi_scsi_rsp_hdr {
++ u8 opcode;
++ u8 flags;
++ u8 response;
++ u8 cmd_status;
++ u8 ahslength;
++ u8 datalength[3];
++ u32 rsvd1[2];
++ __be32 itt;
++ __be32 snack;
++ u32 stat_sn;
++ u32 exp_cmd_sn;
++ u32 max_cmd_sn;
++ u32 exp_data_sn;
++ __be32 bi_residual_count;
++ __be32 residual_count;
++} __packed;
++
++#define ISCSI_FLG_RESIDUAL_UNDERFLOW 0x02
++#define ISCSI_FLG_RESIDUAL_OVERFLOW 0x04
++#define ISCSI_FLG_BIRESIDUAL_UNDERFLOW 0x08
++#define ISCSI_FLG_BIRESIDUAL_OVERFLOW 0x10
++
++#define ISCSI_RESPONSE_COMMAND_COMPLETED 0x00
++#define ISCSI_RESPONSE_TARGET_FAILURE 0x01
++
++struct iscsi_sense_data {
++ __be16 length;
++ u8 data[0];
++} __packed;
++
++struct iscsi_task_mgt_hdr {
++ u8 opcode;
++ u8 function;
++ __be16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ __be64 lun;
++ __be32 itt;
++ __be32 rtt;
++ u32 cmd_sn;
++ u32 exp_stat_sn;
++ u32 ref_cmd_sn;
++ u32 exp_data_sn;
++ u32 rsvd2[2];
++} __packed;
++
++#define ISCSI_FUNCTION_MASK 0x7f
++
++#define ISCSI_FUNCTION_ABORT_TASK 1
++#define ISCSI_FUNCTION_ABORT_TASK_SET 2
++#define ISCSI_FUNCTION_CLEAR_ACA 3
++#define ISCSI_FUNCTION_CLEAR_TASK_SET 4
++#define ISCSI_FUNCTION_LOGICAL_UNIT_RESET 5
++#define ISCSI_FUNCTION_TARGET_WARM_RESET 6
++#define ISCSI_FUNCTION_TARGET_COLD_RESET 7
++#define ISCSI_FUNCTION_TASK_REASSIGN 8
++
++struct iscsi_task_rsp_hdr {
++ u8 opcode;
++ u8 flags;
++ u8 response;
++ u8 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ u32 rsvd2[2];
++ __be32 itt;
++ u32 rsvd3;
++ u32 stat_sn;
++ u32 exp_cmd_sn;
++ u32 max_cmd_sn;
++ u32 rsvd4[3];
++} __packed;
++
++#define ISCSI_RESPONSE_FUNCTION_COMPLETE 0
++#define ISCSI_RESPONSE_UNKNOWN_TASK 1
++#define ISCSI_RESPONSE_UNKNOWN_LUN 2
++#define ISCSI_RESPONSE_TASK_ALLEGIANT 3
++#define ISCSI_RESPONSE_ALLEGIANCE_REASSIGNMENT_UNSUPPORTED 4
++#define ISCSI_RESPONSE_FUNCTION_UNSUPPORTED 5
++#define ISCSI_RESPONSE_NO_AUTHORIZATION 6
++#define ISCSI_RESPONSE_FUNCTION_REJECTED 255
++
++struct iscsi_data_out_hdr {
++ u8 opcode;
++ u8 flags;
++ u16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ __be64 lun;
++ __be32 itt;
++ __be32 ttt;
++ u32 rsvd2;
++ u32 exp_stat_sn;
++ u32 rsvd3;
++ __be32 data_sn;
++ __be32 buffer_offset;
++ u32 rsvd4;
++} __packed;
++
++struct iscsi_data_in_hdr {
++ u8 opcode;
++ u8 flags;
++ u8 rsvd1;
++ u8 cmd_status;
++ u8 ahslength;
++ u8 datalength[3];
++ u32 rsvd2[2];
++ __be32 itt;
++ __be32 ttt;
++ u32 stat_sn;
++ u32 exp_cmd_sn;
++ u32 max_cmd_sn;
++ __be32 data_sn;
++ __be32 buffer_offset;
++ __be32 residual_count;
++} __packed;
++
++#define ISCSI_FLG_STATUS 0x01
++
++struct iscsi_r2t_hdr {
++ u8 opcode;
++ u8 flags;
++ u16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ __be64 lun;
++ __be32 itt;
++ __be32 ttt;
++ u32 stat_sn;
++ u32 exp_cmd_sn;
++ u32 max_cmd_sn;
++ u32 r2t_sn;
++ __be32 buffer_offset;
++ __be32 data_length;
++} __packed;
++
++struct iscsi_async_msg_hdr {
++ u8 opcode;
++ u8 flags;
++ u16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ __be64 lun;
++ __be32 ffffffff;
++ u32 rsvd2;
++ u32 stat_sn;
++ u32 exp_cmd_sn;
++ u32 max_cmd_sn;
++ u8 async_event;
++ u8 async_vcode;
++ __be16 param1;
++ __be16 param2;
++ __be16 param3;
++ u32 rsvd3;
++} __packed;
++
++#define ISCSI_ASYNC_SCSI 0
++#define ISCSI_ASYNC_LOGOUT 1
++#define ISCSI_ASYNC_DROP_CONNECTION 2
++#define ISCSI_ASYNC_DROP_SESSION 3
++#define ISCSI_ASYNC_PARAM_REQUEST 4
++#define ISCSI_ASYNC_VENDOR 255
++
++struct iscsi_text_req_hdr {
++ u8 opcode;
++ u8 flags;
++ u16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ u32 rsvd2[2];
++ __be32 itt;
++ __be32 ttt;
++ u32 cmd_sn;
++ u32 exp_stat_sn;
++ u32 rsvd3[4];
++} __packed;
++
++struct iscsi_text_rsp_hdr {
++ u8 opcode;
++ u8 flags;
++ u16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ u32 rsvd2[2];
++ __be32 itt;
++ __be32 ttt;
++ u32 stat_sn;
++ u32 exp_cmd_sn;
++ u32 max_cmd_sn;
++ u32 rsvd3[3];
++} __packed;
++
++struct iscsi_login_req_hdr {
++ u8 opcode;
++ u8 flags;
++ u8 max_version; /* Max. version supported */
++ u8 min_version; /* Min. version supported */
++ u8 ahslength;
++ u8 datalength[3];
++ union iscsi_sid sid;
++ __be32 itt; /* Initiator Task Tag */
++ __be16 cid; /* Connection ID */
++ u16 rsvd1;
++ u32 cmd_sn;
++ u32 exp_stat_sn;
++ u32 rsvd2[4];
++} __packed;
++
++struct iscsi_login_rsp_hdr {
++ u8 opcode;
++ u8 flags;
++ u8 max_version; /* Max. version supported */
++ u8 active_version; /* Active version */
++ u8 ahslength;
++ u8 datalength[3];
++ union iscsi_sid sid;
++ __be32 itt; /* Initiator Task Tag */
++ u32 rsvd1;
++ u32 stat_sn;
++ u32 exp_cmd_sn;
++ u32 max_cmd_sn;
++ u8 status_class; /* see Login RSP Status classes below */
++ u8 status_detail; /* see Login RSP Status details below */
++ u8 rsvd2[10];
++} __packed;
++
++#define ISCSI_FLG_FINAL 0x80
++#define ISCSI_FLG_TRANSIT 0x80
++#define ISCSI_FLG_CSG_SECURITY 0x00
++#define ISCSI_FLG_CSG_LOGIN 0x04
++#define ISCSI_FLG_CSG_FULL_FEATURE 0x0c
++#define ISCSI_FLG_CSG_MASK 0x0c
++#define ISCSI_FLG_NSG_SECURITY 0x00
++#define ISCSI_FLG_NSG_LOGIN 0x01
++#define ISCSI_FLG_NSG_FULL_FEATURE 0x03
++#define ISCSI_FLG_NSG_MASK 0x03
++
++/* Login Status response classes */
++#define ISCSI_STATUS_SUCCESS 0x00
++#define ISCSI_STATUS_REDIRECT 0x01
++#define ISCSI_STATUS_INITIATOR_ERR 0x02
++#define ISCSI_STATUS_TARGET_ERR 0x03
++
++/* Login Status response detail codes */
++/* Class-0 (Success) */
++#define ISCSI_STATUS_ACCEPT 0x00
++
++/* Class-1 (Redirection) */
++#define ISCSI_STATUS_TGT_MOVED_TEMP 0x01
++#define ISCSI_STATUS_TGT_MOVED_PERM 0x02
++
++/* Class-2 (Initiator Error) */
++#define ISCSI_STATUS_INIT_ERR 0x00
++#define ISCSI_STATUS_AUTH_FAILED 0x01
++#define ISCSI_STATUS_TGT_FORBIDDEN 0x02
++#define ISCSI_STATUS_TGT_NOT_FOUND 0x03
++#define ISCSI_STATUS_TGT_REMOVED 0x04
++#define ISCSI_STATUS_NO_VERSION 0x05
++#define ISCSI_STATUS_TOO_MANY_CONN 0x06
++#define ISCSI_STATUS_MISSING_FIELDS 0x07
++#define ISCSI_STATUS_CONN_ADD_FAILED 0x08
++#define ISCSI_STATUS_INV_SESSION_TYPE 0x09
++#define ISCSI_STATUS_SESSION_NOT_FOUND 0x0a
++#define ISCSI_STATUS_INV_REQ_TYPE 0x0b
++
++/* Class-3 (Target Error) */
++#define ISCSI_STATUS_TARGET_ERROR 0x00
++#define ISCSI_STATUS_SVC_UNAVAILABLE 0x01
++#define ISCSI_STATUS_NO_RESOURCES 0x02
++
++struct iscsi_logout_req_hdr {
++ u8 opcode;
++ u8 flags;
++ u16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ u32 rsvd2[2];
++ __be32 itt;
++ __be16 cid;
++ u16 rsvd3;
++ u32 cmd_sn;
++ u32 exp_stat_sn;
++ u32 rsvd4[4];
++} __packed;
++
++struct iscsi_logout_rsp_hdr {
++ u8 opcode;
++ u8 flags;
++ u8 response;
++ u8 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ u32 rsvd2[2];
++ __be32 itt;
++ u32 rsvd3;
++ u32 stat_sn;
++ u32 exp_cmd_sn;
++ u32 max_cmd_sn;
++ u32 rsvd4;
++ __be16 time2wait;
++ __be16 time2retain;
++ u32 rsvd5;
++} __packed;
++
++struct iscsi_snack_req_hdr {
++ u8 opcode;
++ u8 flags;
++ u16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ u32 rsvd2[2];
++ __be32 itt;
++ __be32 ttt;
++ u32 rsvd3;
++ u32 exp_stat_sn;
++ u32 rsvd4[2];
++ __be32 beg_run;
++ __be32 run_length;
++} __packed;
++
++struct iscsi_reject_hdr {
++ u8 opcode;
++ u8 flags;
++ u8 reason;
++ u8 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ u32 rsvd2[2];
++ __be32 ffffffff;
++ __be32 rsvd3;
++ u32 stat_sn;
++ u32 exp_cmd_sn;
++ u32 max_cmd_sn;
++ __be32 data_sn;
++ u32 rsvd4[2];
++} __packed;
++
++#define ISCSI_REASON_RESERVED 0x01
++#define ISCSI_REASON_DATA_DIGEST_ERROR 0x02
++#define ISCSI_REASON_DATA_SNACK_REJECT 0x03
++#define ISCSI_REASON_PROTOCOL_ERROR 0x04
++#define ISCSI_REASON_UNSUPPORTED_COMMAND 0x05
++#define ISCSI_REASON_IMMEDIATE_COMMAND_REJECT 0x06
++#define ISCSI_REASON_TASK_IN_PROGRESS 0x07
++#define ISCSI_REASON_INVALID_DATA_ACK 0x08
++#define ISCSI_REASON_INVALID_PDU_FIELD 0x09
++#define ISCSI_REASON_OUT_OF_RESOURCES 0x0a
++#define ISCSI_REASON_NEGOTIATION_RESET 0x0b
++#define ISCSI_REASON_WAITING_LOGOUT 0x0c
++
++struct iscsi_nop_out_hdr {
++ u8 opcode;
++ u8 flags;
++ u16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ __be64 lun;
++ __be32 itt;
++ __be32 ttt;
++ u32 cmd_sn;
++ u32 exp_stat_sn;
++ u32 rsvd2[4];
++} __packed;
++
++struct iscsi_nop_in_hdr {
++ u8 opcode;
++ u8 flags;
++ u16 rsvd1;
++ u8 ahslength;
++ u8 datalength[3];
++ __be64 lun;
++ __be32 itt;
++ __be32 ttt;
++ u32 stat_sn;
++ u32 exp_cmd_sn;
++ u32 max_cmd_sn;
++ u32 rsvd2[3];
++} __packed;
++
++#define ISCSI_RESERVED_TAG_CPU32 (0xffffffffU)
++#define ISCSI_RESERVED_TAG (__constant_cpu_to_be32(ISCSI_RESERVED_TAG_CPU32))
++
++#define cmnd_hdr(cmnd) ((struct iscsi_scsi_cmd_hdr *) (&((cmnd)->pdu.bhs)))
++#define cmnd_opcode(cmnd) ((cmnd)->pdu.bhs.opcode & ISCSI_OPCODE_MASK)
++#define cmnd_scsicode(cmnd) (cmnd_hdr((cmnd))->scb[0])
++
++#endif /* __ISCSI_HDR_H__ */
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/drivers/scst/iscsi-scst/nthread.c
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c
++++ linux-2.6.36/drivers/scst/iscsi-scst/nthread.c
+@@ -0,0 +1,1838 @@
++/*
++ * Network threads.
++ *
++ * Copyright (C) 2004 - 2005 FUJITA Tomonori <tomof@acm.org>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/kthread.h>
++#include <asm/ioctls.h>
++#include <linux/delay.h>
++#include <net/tcp.h>
++
++#include "iscsi.h"
++#include "digest.h"
++
++enum rx_state {
++ RX_INIT_BHS, /* Must be zero for better "switch" optimization. */
++ RX_BHS,
++ RX_CMD_START,
++ RX_DATA,
++ RX_END,
++
++ RX_CMD_CONTINUE,
++ RX_INIT_HDIGEST,
++ RX_CHECK_HDIGEST,
++ RX_INIT_DDIGEST,
++ RX_CHECK_DDIGEST,
++ RX_AHS,
++ RX_PADDING,
++};
++
++enum tx_state {
++ TX_INIT = 0, /* Must be zero for better "switch" optimization. */
++ TX_BHS_DATA,
++ TX_INIT_PADDING,
++ TX_PADDING,
++ TX_INIT_DDIGEST,
++ TX_DDIGEST,
++ TX_END,
++};
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++static void iscsi_check_closewait(struct iscsi_conn *conn)
++{
++ struct iscsi_cmnd *cmnd;
++
++ TRACE_ENTRY();
++
++ TRACE_CONN_CLOSE_DBG("conn %p, sk_state %d", conn,
++ conn->sock->sk->sk_state);
++
++ if (conn->sock->sk->sk_state != TCP_CLOSE) {
++ TRACE_CONN_CLOSE_DBG("conn %p, skipping", conn);
++ goto out;
++ }
++
++ /*
++ * No data are going to be sent, so all queued buffers can be freed
++ * now. In many cases TCP does that only in close(), but we can't rely
++ * on user space on calling it.
++ */
++
++again:
++ spin_lock_bh(&conn->cmd_list_lock);
++ list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
++ struct iscsi_cmnd *rsp;
++ int restart = 0;
++
++ TRACE_CONN_CLOSE_DBG("cmd %p, scst_state %x, "
++ "r2t_len_to_receive %d, ref_cnt %d, parent_req %p, "
++ "net_ref_cnt %d, sg %p", cmnd, cmnd->scst_state,
++ cmnd->r2t_len_to_receive, atomic_read(&cmnd->ref_cnt),
++ cmnd->parent_req, atomic_read(&cmnd->net_ref_cnt),
++ cmnd->sg);
++
++ BUG_ON(cmnd->parent_req != NULL);
++
++ if (cmnd->sg != NULL) {
++ int i;
++
++ if (cmnd_get_check(cmnd))
++ continue;
++
++ for (i = 0; i < cmnd->sg_cnt; i++) {
++ struct page *page = sg_page(&cmnd->sg[i]);
++ TRACE_CONN_CLOSE_DBG("page %p, net_priv %p, "
++ "_count %d", page, page->net_priv,
++ atomic_read(&page->_count));
++
++ if (page->net_priv != NULL) {
++ if (restart == 0) {
++ spin_unlock_bh(&conn->cmd_list_lock);
++ restart = 1;
++ }
++ while (page->net_priv != NULL)
++ iscsi_put_page_callback(page);
++ }
++ }
++ cmnd_put(cmnd);
++
++ if (restart)
++ goto again;
++ }
++
++ list_for_each_entry(rsp, &cmnd->rsp_cmd_list,
++ rsp_cmd_list_entry) {
++ TRACE_CONN_CLOSE_DBG(" rsp %p, ref_cnt %d, "
++ "net_ref_cnt %d, sg %p",
++ rsp, atomic_read(&rsp->ref_cnt),
++ atomic_read(&rsp->net_ref_cnt), rsp->sg);
++
++ if ((rsp->sg != cmnd->sg) && (rsp->sg != NULL)) {
++ int i;
++
++ if (cmnd_get_check(rsp))
++ continue;
++
++ for (i = 0; i < rsp->sg_cnt; i++) {
++ struct page *page =
++ sg_page(&rsp->sg[i]);
++ TRACE_CONN_CLOSE_DBG(
++ " page %p, net_priv %p, "
++ "_count %d",
++ page, page->net_priv,
++ atomic_read(&page->_count));
++
++ if (page->net_priv != NULL) {
++ if (restart == 0) {
++ spin_unlock_bh(&conn->cmd_list_lock);
++ restart = 1;
++ }
++ while (page->net_priv != NULL)
++ iscsi_put_page_callback(page);
++ }
++ }
++ cmnd_put(rsp);
++
++ if (restart)
++ goto again;
++ }
++ }
++ }
++ spin_unlock_bh(&conn->cmd_list_lock);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++#else
++static inline void iscsi_check_closewait(struct iscsi_conn *conn) {};
++#endif
++
++static void free_pending_commands(struct iscsi_conn *conn)
++{
++ struct iscsi_session *session = conn->session;
++ struct list_head *pending_list = &session->pending_list;
++ int req_freed;
++ struct iscsi_cmnd *cmnd;
++
++ spin_lock(&session->sn_lock);
++ do {
++ req_freed = 0;
++ list_for_each_entry(cmnd, pending_list, pending_list_entry) {
++ TRACE_CONN_CLOSE_DBG("Pending cmd %p"
++ "(conn %p, cmd_sn %u, exp_cmd_sn %u)",
++ cmnd, conn, cmnd->pdu.bhs.sn,
++ session->exp_cmd_sn);
++ if ((cmnd->conn == conn) &&
++ (session->exp_cmd_sn == cmnd->pdu.bhs.sn)) {
++ TRACE_MGMT_DBG("Freeing pending cmd %p "
++ "(cmd_sn %u, exp_cmd_sn %u)",
++ cmnd, cmnd->pdu.bhs.sn,
++ session->exp_cmd_sn);
++
++ list_del(&cmnd->pending_list_entry);
++ cmnd->pending = 0;
++
++ session->exp_cmd_sn++;
++
++ spin_unlock(&session->sn_lock);
++
++ req_cmnd_release_force(cmnd);
++
++ req_freed = 1;
++ spin_lock(&session->sn_lock);
++ break;
++ }
++ }
++ } while (req_freed);
++ spin_unlock(&session->sn_lock);
++
++ return;
++}
++
++static void free_orphaned_pending_commands(struct iscsi_conn *conn)
++{
++ struct iscsi_session *session = conn->session;
++ struct list_head *pending_list = &session->pending_list;
++ int req_freed;
++ struct iscsi_cmnd *cmnd;
++
++ spin_lock(&session->sn_lock);
++ do {
++ req_freed = 0;
++ list_for_each_entry(cmnd, pending_list, pending_list_entry) {
++ TRACE_CONN_CLOSE_DBG("Pending cmd %p"
++ "(conn %p, cmd_sn %u, exp_cmd_sn %u)",
++ cmnd, conn, cmnd->pdu.bhs.sn,
++ session->exp_cmd_sn);
++ if (cmnd->conn == conn) {
++ TRACE_MGMT_DBG("Freeing orphaned pending "
++ "cmnd %p (cmd_sn %u, exp_cmd_sn %u)",
++ cmnd, cmnd->pdu.bhs.sn,
++ session->exp_cmd_sn);
++
++ list_del(&cmnd->pending_list_entry);
++ cmnd->pending = 0;
++
++ if (session->exp_cmd_sn == cmnd->pdu.bhs.sn)
++ session->exp_cmd_sn++;
++
++ spin_unlock(&session->sn_lock);
++
++ req_cmnd_release_force(cmnd);
++
++ req_freed = 1;
++ spin_lock(&session->sn_lock);
++ break;
++ }
++ }
++ } while (req_freed);
++ spin_unlock(&session->sn_lock);
++
++ return;
++}
++
++#ifdef CONFIG_SCST_DEBUG
++static void trace_conn_close(struct iscsi_conn *conn)
++{
++ struct iscsi_cmnd *cmnd;
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ struct iscsi_cmnd *rsp;
++#endif
++
++#if 0
++ if (time_after(jiffies, start_waiting + 10*HZ))
++ trace_flag |= TRACE_CONN_OC_DBG;
++#endif
++
++ spin_lock_bh(&conn->cmd_list_lock);
++ list_for_each_entry(cmnd, &conn->cmd_list,
++ cmd_list_entry) {
++ TRACE_CONN_CLOSE_DBG(
++ "cmd %p, scst_cmd %p, scst_state %x, scst_cmd state "
++ "%d, r2t_len_to_receive %d, ref_cnt %d, sn %u, "
++ "parent_req %p, pending %d",
++ cmnd, cmnd->scst_cmd, cmnd->scst_state,
++ ((cmnd->parent_req == NULL) && cmnd->scst_cmd) ?
++ cmnd->scst_cmd->state : -1,
++ cmnd->r2t_len_to_receive, atomic_read(&cmnd->ref_cnt),
++ cmnd->pdu.bhs.sn, cmnd->parent_req, cmnd->pending);
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ TRACE_CONN_CLOSE_DBG("net_ref_cnt %d, sg %p",
++ atomic_read(&cmnd->net_ref_cnt),
++ cmnd->sg);
++ if (cmnd->sg != NULL) {
++ int i;
++ for (i = 0; i < cmnd->sg_cnt; i++) {
++ struct page *page = sg_page(&cmnd->sg[i]);
++ TRACE_CONN_CLOSE_DBG("page %p, "
++ "net_priv %p, _count %d",
++ page, page->net_priv,
++ atomic_read(&page->_count));
++ }
++ }
++
++ BUG_ON(cmnd->parent_req != NULL);
++
++ list_for_each_entry(rsp, &cmnd->rsp_cmd_list,
++ rsp_cmd_list_entry) {
++ TRACE_CONN_CLOSE_DBG(" rsp %p, "
++ "ref_cnt %d, net_ref_cnt %d, sg %p",
++ rsp, atomic_read(&rsp->ref_cnt),
++ atomic_read(&rsp->net_ref_cnt), rsp->sg);
++ if (rsp->sg != cmnd->sg && rsp->sg) {
++ int i;
++ for (i = 0; i < rsp->sg_cnt; i++) {
++ TRACE_CONN_CLOSE_DBG(" page %p, "
++ "net_priv %p, _count %d",
++ sg_page(&rsp->sg[i]),
++ sg_page(&rsp->sg[i])->net_priv,
++ atomic_read(&sg_page(&rsp->sg[i])->
++ _count));
++ }
++ }
++ }
++#endif /* CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION */
++ }
++ spin_unlock_bh(&conn->cmd_list_lock);
++ return;
++}
++#else /* CONFIG_SCST_DEBUG */
++static void trace_conn_close(struct iscsi_conn *conn) {}
++#endif /* CONFIG_SCST_DEBUG */
++
++void iscsi_task_mgmt_affected_cmds_done(struct scst_mgmt_cmd *scst_mcmd)
++{
++ int fn = scst_mgmt_cmd_get_fn(scst_mcmd);
++ void *priv = scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
++
++ TRACE_MGMT_DBG("scst_mcmd %p, fn %d, priv %p", scst_mcmd, fn, priv);
++
++ switch (fn) {
++ case SCST_NEXUS_LOSS_SESS:
++ case SCST_ABORT_ALL_TASKS_SESS:
++ {
++ struct iscsi_conn *conn = (struct iscsi_conn *)priv;
++ struct iscsi_session *sess = conn->session;
++ struct iscsi_conn *c;
++
++ if (sess->sess_reinst_successor != NULL)
++ scst_reassign_persistent_sess_states(
++ sess->sess_reinst_successor->scst_sess,
++ sess->scst_sess);
++
++ mutex_lock(&sess->target->target_mutex);
++
++ /*
++ * We can't mark sess as shutting down earlier, because until
++ * now it might have pending commands. Otherwise, in case of
++ * reinstatement, it might lead to data corruption, because
++ * commands in being reinstated session can be executed
++ * after commands in the new session.
++ */
++ sess->sess_shutting_down = 1;
++ list_for_each_entry(c, &sess->conn_list, conn_list_entry) {
++ if (!test_bit(ISCSI_CONN_SHUTTINGDOWN, &c->conn_aflags)) {
++ sess->sess_shutting_down = 0;
++ break;
++ }
++ }
++
++ if (conn->conn_reinst_successor != NULL) {
++ BUG_ON(!test_bit(ISCSI_CONN_REINSTATING,
++ &conn->conn_reinst_successor->conn_aflags));
++ conn_reinst_finished(conn->conn_reinst_successor);
++ conn->conn_reinst_successor = NULL;
++ } else if (sess->sess_reinst_successor != NULL) {
++ sess_reinst_finished(sess->sess_reinst_successor);
++ sess->sess_reinst_successor = NULL;
++ }
++ mutex_unlock(&sess->target->target_mutex);
++
++ complete_all(&conn->ready_to_free);
++ break;
++ }
++ default:
++ /* Nothing to do */
++ break;
++ }
++
++ return;
++}
++
++/* No locks */
++static void close_conn(struct iscsi_conn *conn)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_target *target = conn->target;
++ typeof(jiffies) start_waiting = jiffies;
++ typeof(jiffies) shut_start_waiting = start_waiting;
++ bool pending_reported = 0, wait_expired = 0, shut_expired = 0;
++ bool reinst;
++ uint32_t tid, cid;
++ uint64_t sid;
++
++#define CONN_PENDING_TIMEOUT ((typeof(jiffies))10*HZ)
++#define CONN_WAIT_TIMEOUT ((typeof(jiffies))10*HZ)
++#define CONN_REG_SHUT_TIMEOUT ((typeof(jiffies))125*HZ)
++#define CONN_DEL_SHUT_TIMEOUT ((typeof(jiffies))10*HZ)
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Closing connection %p (conn_ref_cnt=%d)", conn,
++ atomic_read(&conn->conn_ref_cnt));
++
++ iscsi_extracheck_is_rd_thread(conn);
++
++ BUG_ON(!conn->closing);
++
++ if (conn->active_close) {
++ /* We want all our already send operations to complete */
++ conn->sock->ops->shutdown(conn->sock, RCV_SHUTDOWN);
++ } else {
++ conn->sock->ops->shutdown(conn->sock,
++ RCV_SHUTDOWN|SEND_SHUTDOWN);
++ }
++
++ mutex_lock(&session->target->target_mutex);
++
++ set_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags);
++ reinst = (conn->conn_reinst_successor != NULL);
++
++ mutex_unlock(&session->target->target_mutex);
++
++ if (reinst) {
++ int rc;
++ int lun = 0;
++
++ /* Abort all outstanding commands */
++ rc = scst_rx_mgmt_fn_lun(session->scst_sess,
++ SCST_ABORT_ALL_TASKS_SESS, (uint8_t *)&lun, sizeof(lun),
++ SCST_NON_ATOMIC, conn);
++ if (rc != 0)
++ PRINT_ERROR("SCST_ABORT_ALL_TASKS_SESS failed %d", rc);
++ } else {
++ int rc;
++ int lun = 0;
++
++ rc = scst_rx_mgmt_fn_lun(session->scst_sess,
++ SCST_NEXUS_LOSS_SESS, (uint8_t *)&lun, sizeof(lun),
++ SCST_NON_ATOMIC, conn);
++ if (rc != 0)
++ PRINT_ERROR("SCST_NEXUS_LOSS_SESS failed %d", rc);
++ }
++
++ if (conn->read_state != RX_INIT_BHS) {
++ struct iscsi_cmnd *cmnd = conn->read_cmnd;
++
++ if (cmnd->scst_state == ISCSI_CMD_STATE_RX_CMD) {
++ TRACE_CONN_CLOSE_DBG("Going to wait for cmnd %p to "
++ "change state from RX_CMD", cmnd);
++ }
++ wait_event(conn->read_state_waitQ,
++ cmnd->scst_state != ISCSI_CMD_STATE_RX_CMD);
++
++ TRACE_CONN_CLOSE_DBG("Releasing conn->read_cmnd %p (conn %p)",
++ conn->read_cmnd, conn);
++
++ conn->read_cmnd = NULL;
++ conn->read_state = RX_INIT_BHS;
++ req_cmnd_release_force(cmnd);
++ }
++
++ conn_abort(conn);
++
++ /* ToDo: not the best way to wait */
++ while (atomic_read(&conn->conn_ref_cnt) != 0) {
++ if (conn->conn_tm_active)
++ iscsi_check_tm_data_wait_timeouts(conn, true);
++
++ mutex_lock(&target->target_mutex);
++ spin_lock(&session->sn_lock);
++ if (session->tm_rsp && session->tm_rsp->conn == conn) {
++ struct iscsi_cmnd *tm_rsp = session->tm_rsp;
++ TRACE_MGMT_DBG("Dropping delayed TM rsp %p", tm_rsp);
++ session->tm_rsp = NULL;
++ session->tm_active--;
++ WARN_ON(session->tm_active < 0);
++ spin_unlock(&session->sn_lock);
++ mutex_unlock(&target->target_mutex);
++
++ rsp_cmnd_release(tm_rsp);
++ } else {
++ spin_unlock(&session->sn_lock);
++ mutex_unlock(&target->target_mutex);
++ }
++
++ /* It's safe to check it without sn_lock */
++ if (!list_empty(&session->pending_list)) {
++ TRACE_CONN_CLOSE_DBG("Disposing pending commands on "
++ "connection %p (conn_ref_cnt=%d)", conn,
++ atomic_read(&conn->conn_ref_cnt));
++
++ free_pending_commands(conn);
++
++ if (time_after(jiffies,
++ start_waiting + CONN_PENDING_TIMEOUT)) {
++ if (!pending_reported) {
++ TRACE_CONN_CLOSE("%s",
++ "Pending wait time expired");
++ pending_reported = 1;
++ }
++ free_orphaned_pending_commands(conn);
++ }
++ }
++
++ iscsi_make_conn_wr_active(conn);
++
++ /* That's for active close only, actually */
++ if (time_after(jiffies, start_waiting + CONN_WAIT_TIMEOUT) &&
++ !wait_expired) {
++ TRACE_CONN_CLOSE("Wait time expired (conn %p, "
++ "sk_state %d)",
++ conn, conn->sock->sk->sk_state);
++ conn->sock->ops->shutdown(conn->sock, SEND_SHUTDOWN);
++ wait_expired = 1;
++ shut_start_waiting = jiffies;
++ }
++
++ if (wait_expired && !shut_expired &&
++ time_after(jiffies, shut_start_waiting +
++ conn->deleting ? CONN_DEL_SHUT_TIMEOUT :
++ CONN_REG_SHUT_TIMEOUT)) {
++ TRACE_CONN_CLOSE("Wait time after shutdown expired "
++ "(conn %p, sk_state %d)", conn,
++ conn->sock->sk->sk_state);
++ conn->sock->sk->sk_prot->disconnect(conn->sock->sk, 0);
++ shut_expired = 1;
++ }
++
++ if (conn->deleting)
++ msleep(200);
++ else
++ msleep(1000);
++
++ TRACE_CONN_CLOSE_DBG("conn %p, conn_ref_cnt %d left, "
++ "wr_state %d, exp_cmd_sn %u",
++ conn, atomic_read(&conn->conn_ref_cnt),
++ conn->wr_state, session->exp_cmd_sn);
++
++ trace_conn_close(conn);
++
++ /* It might never be called for being closed conn */
++ __iscsi_write_space_ready(conn);
++
++ iscsi_check_closewait(conn);
++ }
++
++ write_lock_bh(&conn->sock->sk->sk_callback_lock);
++ conn->sock->sk->sk_state_change = conn->old_state_change;
++ conn->sock->sk->sk_data_ready = conn->old_data_ready;
++ conn->sock->sk->sk_write_space = conn->old_write_space;
++ write_unlock_bh(&conn->sock->sk->sk_callback_lock);
++
++ while (1) {
++ bool t;
++
++ spin_lock_bh(&iscsi_wr_lock);
++ t = (conn->wr_state == ISCSI_CONN_WR_STATE_IDLE);
++ spin_unlock_bh(&iscsi_wr_lock);
++
++ if (t && (atomic_read(&conn->conn_ref_cnt) == 0))
++ break;
++
++ TRACE_CONN_CLOSE_DBG("Waiting for wr thread (conn %p), "
++ "wr_state %x", conn, conn->wr_state);
++ msleep(50);
++ }
++
++ wait_for_completion(&conn->ready_to_free);
++
++ tid = target->tid;
++ sid = session->sid;
++ cid = conn->cid;
++
++ mutex_lock(&target->target_mutex);
++ conn_free(conn);
++ mutex_unlock(&target->target_mutex);
++
++ /*
++ * We can't send E_CONN_CLOSE earlier, because otherwise we would have
++ * a race, when the user space tried to destroy session, which still
++ * has connections.
++ *
++ * !! All target, session and conn can be already dead here !!
++ */
++ TRACE_CONN_CLOSE("Notifying user space about closing connection %p",
++ conn);
++ event_send(tid, sid, cid, 0, E_CONN_CLOSE, NULL, NULL);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int close_conn_thr(void *arg)
++{
++ struct iscsi_conn *conn = (struct iscsi_conn *)arg;
++
++ TRACE_ENTRY();
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ /*
++ * To satisfy iscsi_extracheck_is_rd_thread() in functions called
++ * on the connection close. It is safe, because at this point conn
++ * can't be used by any other thread.
++ */
++ conn->rd_task = current;
++#endif
++ close_conn(conn);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++/* No locks */
++static void start_close_conn(struct iscsi_conn *conn)
++{
++ struct task_struct *t;
++
++ TRACE_ENTRY();
++
++ t = kthread_run(close_conn_thr, conn, "iscsi_conn_cleanup");
++ if (IS_ERR(t)) {
++ PRINT_ERROR("kthread_run() failed (%ld), closing conn %p "
++ "directly", PTR_ERR(t), conn);
++ close_conn(conn);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline void iscsi_conn_init_read(struct iscsi_conn *conn,
++ void __user *data, size_t len)
++{
++ conn->read_iov[0].iov_base = data;
++ conn->read_iov[0].iov_len = len;
++ conn->read_msg.msg_iov = conn->read_iov;
++ conn->read_msg.msg_iovlen = 1;
++ conn->read_size = len;
++ return;
++}
++
++static void iscsi_conn_prepare_read_ahs(struct iscsi_conn *conn,
++ struct iscsi_cmnd *cmnd)
++{
++ int asize = (cmnd->pdu.ahssize + 3) & -4;
++
++ /* ToDo: __GFP_NOFAIL ?? */
++ cmnd->pdu.ahs = kmalloc(asize, __GFP_NOFAIL|GFP_KERNEL);
++ BUG_ON(cmnd->pdu.ahs == NULL);
++ iscsi_conn_init_read(conn, (void __force __user *)cmnd->pdu.ahs, asize);
++ return;
++}
++
++static struct iscsi_cmnd *iscsi_get_send_cmnd(struct iscsi_conn *conn)
++{
++ struct iscsi_cmnd *cmnd = NULL;
++
++ spin_lock_bh(&conn->write_list_lock);
++ if (!list_empty(&conn->write_list)) {
++ cmnd = list_entry(conn->write_list.next, struct iscsi_cmnd,
++ write_list_entry);
++ cmd_del_from_write_list(cmnd);
++ cmnd->write_processing_started = 1;
++ } else {
++ spin_unlock_bh(&conn->write_list_lock);
++ goto out;
++ }
++ spin_unlock_bh(&conn->write_list_lock);
++
++ if (unlikely(test_bit(ISCSI_CMD_ABORTED,
++ &cmnd->parent_req->prelim_compl_flags))) {
++ TRACE_MGMT_DBG("Going to send acmd %p (scst cmd %p, "
++ "state %d, parent_req %p)", cmnd, cmnd->scst_cmd,
++ cmnd->scst_state, cmnd->parent_req);
++ }
++
++ if (unlikely(cmnd_opcode(cmnd) == ISCSI_OP_SCSI_TASK_MGT_RSP)) {
++#ifdef CONFIG_SCST_DEBUG
++ struct iscsi_task_mgt_hdr *req_hdr =
++ (struct iscsi_task_mgt_hdr *)&cmnd->parent_req->pdu.bhs;
++ struct iscsi_task_rsp_hdr *rsp_hdr =
++ (struct iscsi_task_rsp_hdr *)&cmnd->pdu.bhs;
++ TRACE_MGMT_DBG("Going to send TM response %p (status %d, "
++ "fn %d, parent_req %p)", cmnd, rsp_hdr->response,
++ req_hdr->function & ISCSI_FUNCTION_MASK,
++ cmnd->parent_req);
++#endif
++ }
++
++out:
++ return cmnd;
++}
++
++/* Returns number of bytes left to receive or <0 for error */
++static int do_recv(struct iscsi_conn *conn)
++{
++ int res;
++ mm_segment_t oldfs;
++ struct msghdr msg;
++ int first_len;
++
++ EXTRACHECKS_BUG_ON(conn->read_cmnd == NULL);
++
++ if (unlikely(conn->closing)) {
++ res = -EIO;
++ goto out;
++ }
++
++ /*
++ * We suppose that if sock_recvmsg() returned less data than requested,
++ * then next time it will return -EAGAIN, so there's no point to call
++ * it again.
++ */
++
++restart:
++ memset(&msg, 0, sizeof(msg));
++ msg.msg_iov = conn->read_msg.msg_iov;
++ msg.msg_iovlen = conn->read_msg.msg_iovlen;
++ first_len = msg.msg_iov->iov_len;
++
++ oldfs = get_fs();
++ set_fs(get_ds());
++ res = sock_recvmsg(conn->sock, &msg, conn->read_size,
++ MSG_DONTWAIT | MSG_NOSIGNAL);
++ set_fs(oldfs);
++
++ TRACE_DBG("msg_iovlen %zd, first_len %d, read_size %d, res %d",
++ msg.msg_iovlen, first_len, conn->read_size, res);
++
++ if (res > 0) {
++ /*
++ * To save some considerable effort and CPU power we
++ * suppose that TCP functions adjust
++ * conn->read_msg.msg_iov and conn->read_msg.msg_iovlen
++ * on amount of copied data. This BUG_ON is intended
++ * to catch if it is changed in the future.
++ */
++ BUG_ON((res >= first_len) &&
++ (conn->read_msg.msg_iov->iov_len != 0));
++ conn->read_size -= res;
++ if (conn->read_size != 0) {
++ if (res >= first_len) {
++ int done = 1 + ((res - first_len) >> PAGE_SHIFT);
++ TRACE_DBG("done %d", done);
++ conn->read_msg.msg_iov += done;
++ conn->read_msg.msg_iovlen -= done;
++ }
++ }
++ res = conn->read_size;
++ } else {
++ switch (res) {
++ case -EAGAIN:
++ TRACE_DBG("EAGAIN received for conn %p", conn);
++ res = conn->read_size;
++ break;
++ case -ERESTARTSYS:
++ TRACE_DBG("ERESTARTSYS received for conn %p", conn);
++ goto restart;
++ default:
++ if (!conn->closing) {
++ PRINT_ERROR("sock_recvmsg() failed: %d", res);
++ mark_conn_closed(conn);
++ }
++ if (res == 0)
++ res = -EIO;
++ break;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int iscsi_rx_check_ddigest(struct iscsi_conn *conn)
++{
++ struct iscsi_cmnd *cmnd = conn->read_cmnd;
++ int res;
++
++ res = do_recv(conn);
++ if (res == 0) {
++ conn->read_state = RX_END;
++
++ if (cmnd->pdu.datasize <= 16*1024) {
++ /*
++ * It's cache hot, so let's compute it inline. The
++ * choice here about what will expose more latency:
++ * possible cache misses or the digest calculation.
++ */
++ TRACE_DBG("cmnd %p, opcode %x: checking RX "
++ "ddigest inline", cmnd, cmnd_opcode(cmnd));
++ cmnd->ddigest_checked = 1;
++ res = digest_rx_data(cmnd);
++ if (unlikely(res != 0)) {
++ struct iscsi_cmnd *orig_req;
++ if (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_DATA_OUT)
++ orig_req = cmnd->cmd_req;
++ else
++ orig_req = cmnd;
++ if (unlikely(orig_req->scst_cmd == NULL)) {
++ /* Just drop it */
++ iscsi_preliminary_complete(cmnd, orig_req, false);
++ } else {
++ set_scst_preliminary_status_rsp(orig_req, false,
++ SCST_LOAD_SENSE(iscsi_sense_crc_error));
++ /*
++ * Let's prelim complete cmnd too to
++ * handle the DATA OUT case
++ */
++ iscsi_preliminary_complete(cmnd, orig_req, false);
++ }
++ res = 0;
++ }
++ } else if (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD) {
++ cmd_add_on_rx_ddigest_list(cmnd, cmnd);
++ cmnd_get(cmnd);
++ } else if (cmnd_opcode(cmnd) != ISCSI_OP_SCSI_DATA_OUT) {
++ /*
++ * We could get here only for Nop-Out. ISCSI RFC
++ * doesn't specify how to deal with digest errors in
++ * this case. Let's just drop the command.
++ */
++ TRACE_DBG("cmnd %p, opcode %x: checking NOP RX "
++ "ddigest", cmnd, cmnd_opcode(cmnd));
++ res = digest_rx_data(cmnd);
++ if (unlikely(res != 0)) {
++ iscsi_preliminary_complete(cmnd, cmnd, false);
++ res = 0;
++ }
++ }
++ }
++
++ return res;
++}
++
++/* No locks, conn is rd processing */
++static int process_read_io(struct iscsi_conn *conn, int *closed)
++{
++ struct iscsi_cmnd *cmnd = conn->read_cmnd;
++ int res;
++
++ TRACE_ENTRY();
++
++ /* In case of error cmnd will be freed in close_conn() */
++
++ do {
++ switch (conn->read_state) {
++ case RX_INIT_BHS:
++ EXTRACHECKS_BUG_ON(conn->read_cmnd != NULL);
++ cmnd = cmnd_alloc(conn, NULL);
++ conn->read_cmnd = cmnd;
++ iscsi_conn_init_read(cmnd->conn,
++ (void __force __user *)&cmnd->pdu.bhs,
++ sizeof(cmnd->pdu.bhs));
++ conn->read_state = RX_BHS;
++ /* go through */
++
++ case RX_BHS:
++ res = do_recv(conn);
++ if (res == 0) {
++ iscsi_cmnd_get_length(&cmnd->pdu);
++ if (cmnd->pdu.ahssize == 0) {
++ if ((conn->hdigest_type & DIGEST_NONE) == 0)
++ conn->read_state = RX_INIT_HDIGEST;
++ else
++ conn->read_state = RX_CMD_START;
++ } else {
++ iscsi_conn_prepare_read_ahs(conn, cmnd);
++ conn->read_state = RX_AHS;
++ }
++ }
++ break;
++
++ case RX_CMD_START:
++ res = cmnd_rx_start(cmnd);
++ if (res == 0) {
++ if (cmnd->pdu.datasize == 0)
++ conn->read_state = RX_END;
++ else
++ conn->read_state = RX_DATA;
++ } else if (res > 0)
++ conn->read_state = RX_CMD_CONTINUE;
++ else
++ BUG_ON(!conn->closing);
++ break;
++
++ case RX_CMD_CONTINUE:
++ if (cmnd->scst_state == ISCSI_CMD_STATE_RX_CMD) {
++ TRACE_DBG("cmnd %p is still in RX_CMD state",
++ cmnd);
++ res = 1;
++ break;
++ }
++ res = cmnd_rx_continue(cmnd);
++ if (unlikely(res != 0))
++ BUG_ON(!conn->closing);
++ else {
++ if (cmnd->pdu.datasize == 0)
++ conn->read_state = RX_END;
++ else
++ conn->read_state = RX_DATA;
++ }
++ break;
++
++ case RX_DATA:
++ res = do_recv(conn);
++ if (res == 0) {
++ int psz = ((cmnd->pdu.datasize + 3) & -4) - cmnd->pdu.datasize;
++ if (psz != 0) {
++ TRACE_DBG("padding %d bytes", psz);
++ iscsi_conn_init_read(conn,
++ (void __force __user *)&conn->rpadding, psz);
++ conn->read_state = RX_PADDING;
++ } else if ((conn->ddigest_type & DIGEST_NONE) != 0)
++ conn->read_state = RX_END;
++ else
++ conn->read_state = RX_INIT_DDIGEST;
++ }
++ break;
++
++ case RX_END:
++ if (unlikely(conn->read_size != 0)) {
++ PRINT_CRIT_ERROR("conn read_size !=0 on RX_END "
++ "(conn %p, op %x, read_size %d)", conn,
++ cmnd_opcode(cmnd), conn->read_size);
++ BUG();
++ }
++ conn->read_cmnd = NULL;
++ conn->read_state = RX_INIT_BHS;
++
++ cmnd_rx_end(cmnd);
++
++ EXTRACHECKS_BUG_ON(conn->read_size != 0);
++
++ /*
++ * To maintain fairness. Res must be 0 here anyway, the
++ * assignment is only to remove compiler warning about
++ * uninitialized variable.
++ */
++ res = 0;
++ goto out;
++
++ case RX_INIT_HDIGEST:
++ iscsi_conn_init_read(conn,
++ (void __force __user *)&cmnd->hdigest, sizeof(u32));
++ conn->read_state = RX_CHECK_HDIGEST;
++ /* go through */
++
++ case RX_CHECK_HDIGEST:
++ res = do_recv(conn);
++ if (res == 0) {
++ res = digest_rx_header(cmnd);
++ if (unlikely(res != 0)) {
++ PRINT_ERROR("rx header digest for "
++ "initiator %s failed (%d)",
++ conn->session->initiator_name,
++ res);
++ mark_conn_closed(conn);
++ } else
++ conn->read_state = RX_CMD_START;
++ }
++ break;
++
++ case RX_INIT_DDIGEST:
++ iscsi_conn_init_read(conn,
++ (void __force __user *)&cmnd->ddigest,
++ sizeof(u32));
++ conn->read_state = RX_CHECK_DDIGEST;
++ /* go through */
++
++ case RX_CHECK_DDIGEST:
++ res = iscsi_rx_check_ddigest(conn);
++ break;
++
++ case RX_AHS:
++ res = do_recv(conn);
++ if (res == 0) {
++ if ((conn->hdigest_type & DIGEST_NONE) == 0)
++ conn->read_state = RX_INIT_HDIGEST;
++ else
++ conn->read_state = RX_CMD_START;
++ }
++ break;
++
++ case RX_PADDING:
++ res = do_recv(conn);
++ if (res == 0) {
++ if ((conn->ddigest_type & DIGEST_NONE) == 0)
++ conn->read_state = RX_INIT_DDIGEST;
++ else
++ conn->read_state = RX_END;
++ }
++ break;
++
++ default:
++ PRINT_CRIT_ERROR("%d %x", conn->read_state, cmnd_opcode(cmnd));
++ res = -1; /* to keep compiler happy */
++ BUG();
++ }
++ } while (res == 0);
++
++ if (unlikely(conn->closing)) {
++ start_close_conn(conn);
++ *closed = 1;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * Called under iscsi_rd_lock and BHs disabled, but will drop it inside,
++ * then reaquire.
++ */
++static void scst_do_job_rd(void)
++ __acquires(&iscsi_rd_lock)
++ __releases(&iscsi_rd_lock)
++{
++ TRACE_ENTRY();
++
++ /*
++ * We delete/add to tail connections to maintain fairness between them.
++ */
++
++ while (!list_empty(&iscsi_rd_list)) {
++ int closed = 0, rc;
++ struct iscsi_conn *conn = list_entry(iscsi_rd_list.next,
++ typeof(*conn), rd_list_entry);
++
++ list_del(&conn->rd_list_entry);
++
++ BUG_ON(conn->rd_state == ISCSI_CONN_RD_STATE_PROCESSING);
++ conn->rd_data_ready = 0;
++ conn->rd_state = ISCSI_CONN_RD_STATE_PROCESSING;
++#ifdef CONFIG_SCST_EXTRACHECKS
++ conn->rd_task = current;
++#endif
++ spin_unlock_bh(&iscsi_rd_lock);
++
++ rc = process_read_io(conn, &closed);
++
++ spin_lock_bh(&iscsi_rd_lock);
++
++ if (unlikely(closed))
++ continue;
++
++ if (unlikely(conn->conn_tm_active)) {
++ spin_unlock_bh(&iscsi_rd_lock);
++ iscsi_check_tm_data_wait_timeouts(conn, false);
++ spin_lock_bh(&iscsi_rd_lock);
++ }
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ conn->rd_task = NULL;
++#endif
++ if ((rc == 0) || conn->rd_data_ready) {
++ list_add_tail(&conn->rd_list_entry, &iscsi_rd_list);
++ conn->rd_state = ISCSI_CONN_RD_STATE_IN_LIST;
++ } else
++ conn->rd_state = ISCSI_CONN_RD_STATE_IDLE;
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline int test_rd_list(void)
++{
++ int res = !list_empty(&iscsi_rd_list) ||
++ unlikely(kthread_should_stop());
++ return res;
++}
++
++int istrd(void *arg)
++{
++ TRACE_ENTRY();
++
++ PRINT_INFO("Read thread started, PID %d", current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ spin_lock_bh(&iscsi_rd_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_rd_list()) {
++ add_wait_queue_exclusive_head(&iscsi_rd_waitQ, &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_rd_list())
++ break;
++ spin_unlock_bh(&iscsi_rd_lock);
++ schedule();
++ spin_lock_bh(&iscsi_rd_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&iscsi_rd_waitQ, &wait);
++ }
++ scst_do_job_rd();
++ }
++ spin_unlock_bh(&iscsi_rd_lock);
++
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so iscsi_rd_list must be empty.
++ */
++ BUG_ON(!list_empty(&iscsi_rd_list));
++
++ PRINT_INFO("Read thread PID %d finished", current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++static inline void __iscsi_get_page_callback(struct iscsi_cmnd *cmd)
++{
++ int v;
++
++ TRACE_NET_PAGE("cmd %p, new net_ref_cnt %d",
++ cmd, atomic_read(&cmd->net_ref_cnt)+1);
++
++ v = atomic_inc_return(&cmd->net_ref_cnt);
++ if (v == 1) {
++ TRACE_NET_PAGE("getting cmd %p", cmd);
++ cmnd_get(cmd);
++ }
++ return;
++}
++
++void iscsi_get_page_callback(struct page *page)
++{
++ struct iscsi_cmnd *cmd = (struct iscsi_cmnd *)page->net_priv;
++
++ TRACE_NET_PAGE("page %p, _count %d", page,
++ atomic_read(&page->_count));
++
++ __iscsi_get_page_callback(cmd);
++ return;
++}
++
++static inline void __iscsi_put_page_callback(struct iscsi_cmnd *cmd)
++{
++ TRACE_NET_PAGE("cmd %p, new net_ref_cnt %d", cmd,
++ atomic_read(&cmd->net_ref_cnt)-1);
++
++ if (atomic_dec_and_test(&cmd->net_ref_cnt)) {
++ int i, sg_cnt = cmd->sg_cnt;
++ for (i = 0; i < sg_cnt; i++) {
++ struct page *page = sg_page(&cmd->sg[i]);
++ TRACE_NET_PAGE("Clearing page %p", page);
++ if (page->net_priv == cmd)
++ page->net_priv = NULL;
++ }
++ cmnd_put(cmd);
++ }
++ return;
++}
++
++void iscsi_put_page_callback(struct page *page)
++{
++ struct iscsi_cmnd *cmd = (struct iscsi_cmnd *)page->net_priv;
++
++ TRACE_NET_PAGE("page %p, _count %d", page,
++ atomic_read(&page->_count));
++
++ __iscsi_put_page_callback(cmd);
++ return;
++}
++
++static void check_net_priv(struct iscsi_cmnd *cmd, struct page *page)
++{
++ if ((atomic_read(&cmd->net_ref_cnt) == 1) && (page->net_priv == cmd)) {
++ TRACE_DBG("sendpage() not called get_page(), zeroing net_priv "
++ "%p (page %p)", page->net_priv, page);
++ page->net_priv = NULL;
++ }
++ return;
++}
++#else
++static inline void check_net_priv(struct iscsi_cmnd *cmd, struct page *page) {}
++static inline void __iscsi_get_page_callback(struct iscsi_cmnd *cmd) {}
++static inline void __iscsi_put_page_callback(struct iscsi_cmnd *cmd) {}
++#endif
++
++void req_add_to_write_timeout_list(struct iscsi_cmnd *req)
++{
++ struct iscsi_conn *conn;
++ bool set_conn_tm_active = false;
++
++ TRACE_ENTRY();
++
++ if (req->on_write_timeout_list)
++ goto out;
++
++ conn = req->conn;
++
++ TRACE_DBG("Adding req %p to conn %p write_timeout_list",
++ req, conn);
++
++ spin_lock_bh(&conn->write_list_lock);
++
++ /* Recheck, since it can be changed behind us */
++ if (unlikely(req->on_write_timeout_list)) {
++ spin_unlock_bh(&conn->write_list_lock);
++ goto out;
++ }
++
++ req->on_write_timeout_list = 1;
++ req->write_start = jiffies;
++
++ list_add_tail(&req->write_timeout_list_entry,
++ &conn->write_timeout_list);
++
++ if (!timer_pending(&conn->rsp_timer)) {
++ unsigned long timeout_time;
++ if (unlikely(conn->conn_tm_active ||
++ test_bit(ISCSI_CMD_ABORTED,
++ &req->prelim_compl_flags))) {
++ set_conn_tm_active = true;
++ timeout_time = req->write_start +
++ ISCSI_TM_DATA_WAIT_TIMEOUT +
++ ISCSI_ADD_SCHED_TIME;
++ } else
++ timeout_time = req->write_start +
++ conn->rsp_timeout + ISCSI_ADD_SCHED_TIME;
++
++ TRACE_DBG("Starting timer on %ld (con %p, write_start %ld)",
++ timeout_time, conn, req->write_start);
++
++ conn->rsp_timer.expires = timeout_time;
++ add_timer(&conn->rsp_timer);
++ } else if (unlikely(test_bit(ISCSI_CMD_ABORTED,
++ &req->prelim_compl_flags))) {
++ unsigned long timeout_time = jiffies +
++ ISCSI_TM_DATA_WAIT_TIMEOUT + ISCSI_ADD_SCHED_TIME;
++ set_conn_tm_active = true;
++ if (time_after(conn->rsp_timer.expires, timeout_time)) {
++ TRACE_MGMT_DBG("Mod timer on %ld (conn %p)",
++ timeout_time, conn);
++ mod_timer(&conn->rsp_timer, timeout_time);
++ }
++ }
++
++ spin_unlock_bh(&conn->write_list_lock);
++
++ /*
++ * conn_tm_active can be already cleared by
++ * iscsi_check_tm_data_wait_timeouts(). write_list_lock is an inner
++ * lock for iscsi_rd_lock.
++ */
++ if (unlikely(set_conn_tm_active)) {
++ spin_lock_bh(&iscsi_rd_lock);
++ TRACE_MGMT_DBG("Setting conn_tm_active for conn %p", conn);
++ conn->conn_tm_active = 1;
++ spin_unlock_bh(&iscsi_rd_lock);
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static int write_data(struct iscsi_conn *conn)
++{
++ mm_segment_t oldfs;
++ struct file *file;
++ struct iovec *iop;
++ struct socket *sock;
++ ssize_t (*sock_sendpage)(struct socket *, struct page *, int, size_t,
++ int);
++ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
++ struct iscsi_cmnd *write_cmnd = conn->write_cmnd;
++ struct iscsi_cmnd *ref_cmd;
++ struct page *page;
++ struct scatterlist *sg;
++ int saved_size, size, sendsize;
++ int length, offset, idx;
++ int flags, res, count, sg_size;
++ bool do_put = false, ref_cmd_to_parent;
++
++ TRACE_ENTRY();
++
++ iscsi_extracheck_is_wr_thread(conn);
++
++ if (!write_cmnd->own_sg) {
++ ref_cmd = write_cmnd->parent_req;
++ ref_cmd_to_parent = true;
++ } else {
++ ref_cmd = write_cmnd;
++ ref_cmd_to_parent = false;
++ }
++
++ req_add_to_write_timeout_list(write_cmnd->parent_req);
++
++ file = conn->file;
++ size = conn->write_size;
++ saved_size = size;
++ iop = conn->write_iop;
++ count = conn->write_iop_used;
++
++ if (iop) {
++ while (1) {
++ loff_t off = 0;
++ int rest;
++
++ BUG_ON(count > (signed)(sizeof(conn->write_iov) /
++ sizeof(conn->write_iov[0])));
++retry:
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ res = vfs_writev(file,
++ (struct iovec __force __user *)iop,
++ count, &off);
++ set_fs(oldfs);
++ TRACE_WRITE("sid %#Lx, cid %u, res %d, iov_len %ld",
++ (long long unsigned int)conn->session->sid,
++ conn->cid, res, (long)iop->iov_len);
++ if (unlikely(res <= 0)) {
++ if (res == -EAGAIN) {
++ conn->write_iop = iop;
++ conn->write_iop_used = count;
++ goto out_iov;
++ } else if (res == -EINTR)
++ goto retry;
++ goto out_err;
++ }
++
++ rest = res;
++ size -= res;
++ while ((typeof(rest))iop->iov_len <= rest && rest) {
++ rest -= iop->iov_len;
++ iop++;
++ count--;
++ }
++ if (count == 0) {
++ conn->write_iop = NULL;
++ conn->write_iop_used = 0;
++ if (size)
++ break;
++ goto out_iov;
++ }
++ BUG_ON(iop > conn->write_iov + sizeof(conn->write_iov)
++ /sizeof(conn->write_iov[0]));
++ iop->iov_base += rest;
++ iop->iov_len -= rest;
++ }
++ }
++
++ sg = write_cmnd->sg;
++ if (unlikely(sg == NULL)) {
++ PRINT_INFO("WARNING: Data missed (cmd %p)!", write_cmnd);
++ res = 0;
++ goto out;
++ }
++
++ /* To protect from too early transfer completion race */
++ __iscsi_get_page_callback(ref_cmd);
++ do_put = true;
++
++ sock = conn->sock;
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ sock_sendpage = sock->ops->sendpage;
++#else
++ if ((write_cmnd->parent_req->scst_cmd != NULL) &&
++ scst_cmd_get_dh_data_buff_alloced(write_cmnd->parent_req->scst_cmd))
++ sock_sendpage = sock_no_sendpage;
++ else
++ sock_sendpage = sock->ops->sendpage;
++#endif
++
++ flags = MSG_DONTWAIT;
++ sg_size = size;
++
++ if (sg != write_cmnd->rsp_sg) {
++ offset = conn->write_offset + sg[0].offset;
++ idx = offset >> PAGE_SHIFT;
++ offset &= ~PAGE_MASK;
++ length = min(size, (int)PAGE_SIZE - offset);
++ TRACE_WRITE("write_offset %d, sg_size %d, idx %d, offset %d, "
++ "length %d", conn->write_offset, sg_size, idx, offset,
++ length);
++ } else {
++ idx = 0;
++ offset = conn->write_offset;
++ while (offset >= sg[idx].length) {
++ offset -= sg[idx].length;
++ idx++;
++ }
++ length = sg[idx].length - offset;
++ offset += sg[idx].offset;
++ sock_sendpage = sock_no_sendpage;
++ TRACE_WRITE("rsp_sg: write_offset %d, sg_size %d, idx %d, "
++ "offset %d, length %d", conn->write_offset, sg_size,
++ idx, offset, length);
++ }
++ page = sg_page(&sg[idx]);
++
++ while (1) {
++ sendpage = sock_sendpage;
++
++#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
++ {
++ static DEFINE_SPINLOCK(net_priv_lock);
++ spin_lock(&net_priv_lock);
++ if (unlikely(page->net_priv != NULL)) {
++ if (page->net_priv != ref_cmd) {
++ /*
++ * This might happen if user space
++ * supplies to scst_user the same
++ * pages in different commands or in
++ * case of zero-copy FILEIO, when
++ * several initiators request the same
++ * data simultaneously.
++ */
++ TRACE_DBG("net_priv isn't NULL and != "
++ "ref_cmd (write_cmnd %p, ref_cmd "
++ "%p, sg %p, idx %d, page %p, "
++ "net_priv %p)",
++ write_cmnd, ref_cmd, sg, idx,
++ page, page->net_priv);
++ sendpage = sock_no_sendpage;
++ }
++ } else
++ page->net_priv = ref_cmd;
++ spin_unlock(&net_priv_lock);
++ }
++#endif
++ sendsize = min(size, length);
++ if (size <= sendsize) {
++retry2:
++ res = sendpage(sock, page, offset, size, flags);
++ TRACE_WRITE("Final %s sid %#Lx, cid %u, res %d (page "
++ "index %lu, offset %u, size %u, cmd %p, "
++ "page %p)", (sendpage != sock_no_sendpage) ?
++ "sendpage" : "sock_no_sendpage",
++ (long long unsigned int)conn->session->sid,
++ conn->cid, res, page->index,
++ offset, size, write_cmnd, page);
++ if (unlikely(res <= 0)) {
++ if (res == -EINTR)
++ goto retry2;
++ else
++ goto out_res;
++ }
++
++ check_net_priv(ref_cmd, page);
++ if (res == size) {
++ conn->write_size = 0;
++ res = saved_size;
++ goto out_put;
++ }
++
++ offset += res;
++ size -= res;
++ goto retry2;
++ }
++
++retry1:
++ res = sendpage(sock, page, offset, sendsize, flags | MSG_MORE);
++ TRACE_WRITE("%s sid %#Lx, cid %u, res %d (page index %lu, "
++ "offset %u, sendsize %u, size %u, cmd %p, page %p)",
++ (sendpage != sock_no_sendpage) ? "sendpage" :
++ "sock_no_sendpage",
++ (unsigned long long)conn->session->sid, conn->cid,
++ res, page->index, offset, sendsize, size,
++ write_cmnd, page);
++ if (unlikely(res <= 0)) {
++ if (res == -EINTR)
++ goto retry1;
++ else
++ goto out_res;
++ }
++
++ check_net_priv(ref_cmd, page);
++
++ size -= res;
++
++ if (res == sendsize) {
++ idx++;
++ EXTRACHECKS_BUG_ON(idx >= ref_cmd->sg_cnt);
++ page = sg_page(&sg[idx]);
++ length = sg[idx].length;
++ offset = sg[idx].offset;
++ } else {
++ offset += res;
++ sendsize -= res;
++ goto retry1;
++ }
++ }
++
++out_off:
++ conn->write_offset += sg_size - size;
++
++out_iov:
++ conn->write_size = size;
++ if ((saved_size == size) && res == -EAGAIN)
++ goto out_put;
++
++ res = saved_size - size;
++
++out_put:
++ if (do_put)
++ __iscsi_put_page_callback(ref_cmd);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_res:
++ check_net_priv(ref_cmd, page);
++ if (res == -EAGAIN)
++ goto out_off;
++ /* else go through */
++
++out_err:
++#ifndef CONFIG_SCST_DEBUG
++ if (!conn->closing)
++#endif
++ {
++ PRINT_ERROR("error %d at sid:cid %#Lx:%u, cmnd %p", res,
++ (long long unsigned int)conn->session->sid,
++ conn->cid, conn->write_cmnd);
++ }
++ if (ref_cmd_to_parent &&
++ ((ref_cmd->scst_cmd != NULL) || (ref_cmd->scst_aen != NULL))) {
++ if (ref_cmd->scst_state == ISCSI_CMD_STATE_AEN)
++ scst_set_aen_delivery_status(ref_cmd->scst_aen,
++ SCST_AEN_RES_FAILED);
++ else
++ scst_set_delivery_status(ref_cmd->scst_cmd,
++ SCST_CMD_DELIVERY_FAILED);
++ }
++ goto out_put;
++}
++
++static int exit_tx(struct iscsi_conn *conn, int res)
++{
++ iscsi_extracheck_is_wr_thread(conn);
++
++ switch (res) {
++ case -EAGAIN:
++ case -ERESTARTSYS:
++ break;
++ default:
++#ifndef CONFIG_SCST_DEBUG
++ if (!conn->closing)
++#endif
++ {
++ PRINT_ERROR("Sending data failed: initiator %s, "
++ "write_size %d, write_state %d, res %d",
++ conn->session->initiator_name,
++ conn->write_size,
++ conn->write_state, res);
++ }
++ conn->write_state = TX_END;
++ conn->write_size = 0;
++ mark_conn_closed(conn);
++ break;
++ }
++ return res;
++}
++
++static int tx_ddigest(struct iscsi_cmnd *cmnd, int state)
++{
++ int res, rest = cmnd->conn->write_size;
++ struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
++ struct kvec iov;
++
++ iscsi_extracheck_is_wr_thread(cmnd->conn);
++
++ TRACE_DBG("Sending data digest %x (cmd %p)", cmnd->ddigest, cmnd);
++
++ iov.iov_base = (char *)(&cmnd->ddigest) + (sizeof(u32) - rest);
++ iov.iov_len = rest;
++
++ res = kernel_sendmsg(cmnd->conn->sock, &msg, &iov, 1, rest);
++ if (res > 0) {
++ cmnd->conn->write_size -= res;
++ if (!cmnd->conn->write_size)
++ cmnd->conn->write_state = state;
++ } else
++ res = exit_tx(cmnd->conn, res);
++
++ return res;
++}
++
++static void init_tx_hdigest(struct iscsi_cmnd *cmnd)
++{
++ struct iscsi_conn *conn = cmnd->conn;
++ struct iovec *iop;
++
++ iscsi_extracheck_is_wr_thread(conn);
++
++ digest_tx_header(cmnd);
++
++ BUG_ON(conn->write_iop_used >=
++ (signed)(sizeof(conn->write_iov)/sizeof(conn->write_iov[0])));
++
++ iop = &conn->write_iop[conn->write_iop_used];
++ conn->write_iop_used++;
++ iop->iov_base = (void __force __user *)&(cmnd->hdigest);
++ iop->iov_len = sizeof(u32);
++ conn->write_size += sizeof(u32);
++
++ return;
++}
++
++static int tx_padding(struct iscsi_cmnd *cmnd, int state)
++{
++ int res, rest = cmnd->conn->write_size;
++ struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
++ struct kvec iov;
++ static const uint32_t padding;
++
++ iscsi_extracheck_is_wr_thread(cmnd->conn);
++
++ TRACE_DBG("Sending %d padding bytes (cmd %p)", rest, cmnd);
++
++ iov.iov_base = (char *)(&padding) + (sizeof(uint32_t) - rest);
++ iov.iov_len = rest;
++
++ res = kernel_sendmsg(cmnd->conn->sock, &msg, &iov, 1, rest);
++ if (res > 0) {
++ cmnd->conn->write_size -= res;
++ if (!cmnd->conn->write_size)
++ cmnd->conn->write_state = state;
++ } else
++ res = exit_tx(cmnd->conn, res);
++
++ return res;
++}
++
++static int iscsi_do_send(struct iscsi_conn *conn, int state)
++{
++ int res;
++
++ iscsi_extracheck_is_wr_thread(conn);
++
++ res = write_data(conn);
++ if (res > 0) {
++ if (!conn->write_size)
++ conn->write_state = state;
++ } else
++ res = exit_tx(conn, res);
++
++ return res;
++}
++
++/*
++ * No locks, conn is wr processing.
++ *
++ * IMPORTANT! Connection conn must be protected by additional conn_get()
++ * upon entrance in this function, because otherwise it could be destroyed
++ * inside as a result of cmnd release.
++ */
++int iscsi_send(struct iscsi_conn *conn)
++{
++ struct iscsi_cmnd *cmnd = conn->write_cmnd;
++ int ddigest, res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("conn %p, write_cmnd %p", conn, cmnd);
++
++ iscsi_extracheck_is_wr_thread(conn);
++
++ ddigest = conn->ddigest_type != DIGEST_NONE ? 1 : 0;
++
++ switch (conn->write_state) {
++ case TX_INIT:
++ BUG_ON(cmnd != NULL);
++ cmnd = conn->write_cmnd = iscsi_get_send_cmnd(conn);
++ if (!cmnd)
++ goto out;
++ cmnd_tx_start(cmnd);
++ if (!(conn->hdigest_type & DIGEST_NONE))
++ init_tx_hdigest(cmnd);
++ conn->write_state = TX_BHS_DATA;
++ case TX_BHS_DATA:
++ res = iscsi_do_send(conn, cmnd->pdu.datasize ?
++ TX_INIT_PADDING : TX_END);
++ if (res <= 0 || conn->write_state != TX_INIT_PADDING)
++ break;
++ case TX_INIT_PADDING:
++ cmnd->conn->write_size = ((cmnd->pdu.datasize + 3) & -4) -
++ cmnd->pdu.datasize;
++ if (cmnd->conn->write_size != 0)
++ conn->write_state = TX_PADDING;
++ else if (ddigest)
++ conn->write_state = TX_INIT_DDIGEST;
++ else
++ conn->write_state = TX_END;
++ break;
++ case TX_PADDING:
++ res = tx_padding(cmnd, ddigest ? TX_INIT_DDIGEST : TX_END);
++ if (res <= 0 || conn->write_state != TX_INIT_DDIGEST)
++ break;
++ case TX_INIT_DDIGEST:
++ cmnd->conn->write_size = sizeof(u32);
++ conn->write_state = TX_DDIGEST;
++ case TX_DDIGEST:
++ res = tx_ddigest(cmnd, TX_END);
++ break;
++ default:
++ PRINT_CRIT_ERROR("%d %d %x", res, conn->write_state,
++ cmnd_opcode(cmnd));
++ BUG();
++ }
++
++ if (res == 0)
++ goto out;
++
++ if (conn->write_state != TX_END)
++ goto out;
++
++ if (unlikely(conn->write_size)) {
++ PRINT_CRIT_ERROR("%d %x %u", res, cmnd_opcode(cmnd),
++ conn->write_size);
++ BUG();
++ }
++ cmnd_tx_end(cmnd);
++
++ rsp_cmnd_release(cmnd);
++
++ conn->write_cmnd = NULL;
++ conn->write_state = TX_INIT;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * Called under iscsi_wr_lock and BHs disabled, but will drop it inside,
++ * then reaquire.
++ */
++static void scst_do_job_wr(void)
++ __acquires(&iscsi_wr_lock)
++ __releases(&iscsi_wr_lock)
++{
++ TRACE_ENTRY();
++
++ /*
++ * We delete/add to tail connections to maintain fairness between them.
++ */
++
++ while (!list_empty(&iscsi_wr_list)) {
++ int rc;
++ struct iscsi_conn *conn = list_entry(iscsi_wr_list.next,
++ typeof(*conn), wr_list_entry);
++
++ TRACE_DBG("conn %p, wr_state %x, wr_space_ready %d, "
++ "write ready %d", conn, conn->wr_state,
++ conn->wr_space_ready, test_write_ready(conn));
++
++ list_del(&conn->wr_list_entry);
++
++ BUG_ON(conn->wr_state == ISCSI_CONN_WR_STATE_PROCESSING);
++
++ conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
++ conn->wr_space_ready = 0;
++#ifdef CONFIG_SCST_EXTRACHECKS
++ conn->wr_task = current;
++#endif
++ spin_unlock_bh(&iscsi_wr_lock);
++
++ conn_get(conn);
++
++ rc = iscsi_send(conn);
++
++ spin_lock_bh(&iscsi_wr_lock);
++#ifdef CONFIG_SCST_EXTRACHECKS
++ conn->wr_task = NULL;
++#endif
++ if ((rc == -EAGAIN) && !conn->wr_space_ready) {
++ TRACE_DBG("EAGAIN, setting WR_STATE_SPACE_WAIT "
++ "(conn %p)", conn);
++ conn->wr_state = ISCSI_CONN_WR_STATE_SPACE_WAIT;
++ } else if (test_write_ready(conn)) {
++ list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
++ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
++ } else
++ conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
++
++ conn_put(conn);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline int test_wr_list(void)
++{
++ int res = !list_empty(&iscsi_wr_list) ||
++ unlikely(kthread_should_stop());
++ return res;
++}
++
++int istwr(void *arg)
++{
++ TRACE_ENTRY();
++
++ PRINT_INFO("Write thread started, PID %d", current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ spin_lock_bh(&iscsi_wr_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_wr_list()) {
++ add_wait_queue_exclusive_head(&iscsi_wr_waitQ, &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_wr_list())
++ break;
++ spin_unlock_bh(&iscsi_wr_lock);
++ schedule();
++ spin_lock_bh(&iscsi_wr_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&iscsi_wr_waitQ, &wait);
++ }
++ scst_do_job_wr();
++ }
++ spin_unlock_bh(&iscsi_wr_lock);
++
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so iscsi_wr_list must be empty.
++ */
++ BUG_ON(!list_empty(&iscsi_wr_list));
++
++ PRINT_INFO("Write thread PID %d finished", current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/param.c linux-2.6.36/drivers/scst/iscsi-scst/param.c
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/param.c
++++ linux-2.6.36/drivers/scst/iscsi-scst/param.c
+@@ -0,0 +1,306 @@
++/*
++ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include "iscsi.h"
++#include "digest.h"
++
++#define CHECK_PARAM(info, iparams, word, min, max) \
++do { \
++ if (!(info)->partial || ((info)->partial & 1 << key_##word)) { \
++ TRACE_DBG("%s: %u", #word, (iparams)[key_##word]); \
++ if ((iparams)[key_##word] < (min) || \
++ (iparams)[key_##word] > (max)) { \
++ if ((iparams)[key_##word] < (min)) { \
++ (iparams)[key_##word] = (min); \
++ PRINT_WARNING("%s: %u is too small, resetting " \
++ "it to allowed min %u", \
++ #word, (iparams)[key_##word], (min)); \
++ } else { \
++ PRINT_WARNING("%s: %u is too big, resetting " \
++ "it to allowed max %u", \
++ #word, (iparams)[key_##word], (max)); \
++ (iparams)[key_##word] = (max); \
++ } \
++ } \
++ } \
++} while (0)
++
++#define SET_PARAM(params, info, iparams, word) \
++({ \
++ int changed = 0; \
++ if (!(info)->partial || ((info)->partial & 1 << key_##word)) { \
++ if ((params)->word != (iparams)[key_##word]) \
++ changed = 1; \
++ (params)->word = (iparams)[key_##word]; \
++ TRACE_DBG("%s set to %u", #word, (params)->word); \
++ } \
++ changed; \
++})
++
++#define GET_PARAM(params, info, iparams, word) \
++do { \
++ (iparams)[key_##word] = (params)->word; \
++} while (0)
++
++const char *iscsi_get_bool_value(int val)
++{
++ if (val)
++ return "Yes";
++ else
++ return "No";
++}
++
++const char *iscsi_get_digest_name(int val, char *res)
++{
++ int pos = 0;
++
++ if (val & DIGEST_NONE)
++ pos = sprintf(&res[pos], "%s", "None");
++
++ if (val & DIGEST_CRC32C)
++ pos += sprintf(&res[pos], "%s%s", (pos != 0) ? ", " : "",
++ "CRC32C");
++
++ if (pos == 0)
++ sprintf(&res[pos], "%s", "Unknown");
++
++ return res;
++}
++
++static void log_params(struct iscsi_sess_params *params)
++{
++ char digest_name[64];
++
++ PRINT_INFO("Negotiated parameters: InitialR2T %s, ImmediateData %s, "
++ "MaxConnections %d, MaxRecvDataSegmentLength %d, "
++ "MaxXmitDataSegmentLength %d, ",
++ iscsi_get_bool_value(params->initial_r2t),
++ iscsi_get_bool_value(params->immediate_data), params->max_connections,
++ params->max_recv_data_length, params->max_xmit_data_length);
++ PRINT_INFO(" MaxBurstLength %d, FirstBurstLength %d, "
++ "DefaultTime2Wait %d, DefaultTime2Retain %d, ",
++ params->max_burst_length, params->first_burst_length,
++ params->default_wait_time, params->default_retain_time);
++ PRINT_INFO(" MaxOutstandingR2T %d, DataPDUInOrder %s, "
++ "DataSequenceInOrder %s, ErrorRecoveryLevel %d, ",
++ params->max_outstanding_r2t,
++ iscsi_get_bool_value(params->data_pdu_inorder),
++ iscsi_get_bool_value(params->data_sequence_inorder),
++ params->error_recovery_level);
++ PRINT_INFO(" HeaderDigest %s, DataDigest %s, OFMarker %s, "
++ "IFMarker %s, OFMarkInt %d, IFMarkInt %d",
++ iscsi_get_digest_name(params->header_digest, digest_name),
++ iscsi_get_digest_name(params->data_digest, digest_name),
++ iscsi_get_bool_value(params->ofmarker),
++ iscsi_get_bool_value(params->ifmarker),
++ params->ofmarkint, params->ifmarkint);
++}
++
++/* target_mutex supposed to be locked */
++static void sess_params_check(struct iscsi_kern_params_info *info)
++{
++ int32_t *iparams = info->session_params;
++ const int max_len = ISCSI_CONN_IOV_MAX * PAGE_SIZE;
++
++ CHECK_PARAM(info, iparams, initial_r2t, 0, 1);
++ CHECK_PARAM(info, iparams, immediate_data, 0, 1);
++ CHECK_PARAM(info, iparams, max_connections, 1, 1);
++ CHECK_PARAM(info, iparams, max_recv_data_length, 512, max_len);
++ CHECK_PARAM(info, iparams, max_xmit_data_length, 512, max_len);
++ CHECK_PARAM(info, iparams, max_burst_length, 512, max_len);
++ CHECK_PARAM(info, iparams, first_burst_length, 512, max_len);
++ CHECK_PARAM(info, iparams, max_outstanding_r2t, 1, 65535);
++ CHECK_PARAM(info, iparams, error_recovery_level, 0, 0);
++ CHECK_PARAM(info, iparams, data_pdu_inorder, 0, 1);
++ CHECK_PARAM(info, iparams, data_sequence_inorder, 0, 1);
++
++ digest_alg_available(&iparams[key_header_digest]);
++ digest_alg_available(&iparams[key_data_digest]);
++
++ CHECK_PARAM(info, iparams, ofmarker, 0, 0);
++ CHECK_PARAM(info, iparams, ifmarker, 0, 0);
++
++ return;
++}
++
++/* target_mutex supposed to be locked */
++static void sess_params_set(struct iscsi_sess_params *params,
++ struct iscsi_kern_params_info *info)
++{
++ int32_t *iparams = info->session_params;
++
++ SET_PARAM(params, info, iparams, initial_r2t);
++ SET_PARAM(params, info, iparams, immediate_data);
++ SET_PARAM(params, info, iparams, max_connections);
++ SET_PARAM(params, info, iparams, max_recv_data_length);
++ SET_PARAM(params, info, iparams, max_xmit_data_length);
++ SET_PARAM(params, info, iparams, max_burst_length);
++ SET_PARAM(params, info, iparams, first_burst_length);
++ SET_PARAM(params, info, iparams, default_wait_time);
++ SET_PARAM(params, info, iparams, default_retain_time);
++ SET_PARAM(params, info, iparams, max_outstanding_r2t);
++ SET_PARAM(params, info, iparams, data_pdu_inorder);
++ SET_PARAM(params, info, iparams, data_sequence_inorder);
++ SET_PARAM(params, info, iparams, error_recovery_level);
++ SET_PARAM(params, info, iparams, header_digest);
++ SET_PARAM(params, info, iparams, data_digest);
++ SET_PARAM(params, info, iparams, ofmarker);
++ SET_PARAM(params, info, iparams, ifmarker);
++ SET_PARAM(params, info, iparams, ofmarkint);
++ SET_PARAM(params, info, iparams, ifmarkint);
++ return;
++}
++
++static void sess_params_get(struct iscsi_sess_params *params,
++ struct iscsi_kern_params_info *info)
++{
++ int32_t *iparams = info->session_params;
++
++ GET_PARAM(params, info, iparams, initial_r2t);
++ GET_PARAM(params, info, iparams, immediate_data);
++ GET_PARAM(params, info, iparams, max_connections);
++ GET_PARAM(params, info, iparams, max_recv_data_length);
++ GET_PARAM(params, info, iparams, max_xmit_data_length);
++ GET_PARAM(params, info, iparams, max_burst_length);
++ GET_PARAM(params, info, iparams, first_burst_length);
++ GET_PARAM(params, info, iparams, default_wait_time);
++ GET_PARAM(params, info, iparams, default_retain_time);
++ GET_PARAM(params, info, iparams, max_outstanding_r2t);
++ GET_PARAM(params, info, iparams, data_pdu_inorder);
++ GET_PARAM(params, info, iparams, data_sequence_inorder);
++ GET_PARAM(params, info, iparams, error_recovery_level);
++ GET_PARAM(params, info, iparams, header_digest);
++ GET_PARAM(params, info, iparams, data_digest);
++ GET_PARAM(params, info, iparams, ofmarker);
++ GET_PARAM(params, info, iparams, ifmarker);
++ GET_PARAM(params, info, iparams, ofmarkint);
++ GET_PARAM(params, info, iparams, ifmarkint);
++ return;
++}
++
++/* target_mutex supposed to be locked */
++static void tgt_params_check(struct iscsi_session *session,
++ struct iscsi_kern_params_info *info)
++{
++ int32_t *iparams = info->target_params;
++
++ CHECK_PARAM(info, iparams, queued_cmnds, MIN_NR_QUEUED_CMNDS,
++ min_t(int, MAX_NR_QUEUED_CMNDS,
++ scst_get_max_lun_commands(session->scst_sess, NO_SUCH_LUN)));
++ CHECK_PARAM(info, iparams, rsp_timeout, MIN_RSP_TIMEOUT,
++ MAX_RSP_TIMEOUT);
++ CHECK_PARAM(info, iparams, nop_in_interval, MIN_NOP_IN_INTERVAL,
++ MAX_NOP_IN_INTERVAL);
++ return;
++}
++
++/* target_mutex supposed to be locked */
++static int iscsi_tgt_params_set(struct iscsi_session *session,
++ struct iscsi_kern_params_info *info, int set)
++{
++ struct iscsi_tgt_params *params = &session->tgt_params;
++ int32_t *iparams = info->target_params;
++
++ if (set) {
++ struct iscsi_conn *conn;
++
++ tgt_params_check(session, info);
++
++ SET_PARAM(params, info, iparams, queued_cmnds);
++ SET_PARAM(params, info, iparams, rsp_timeout);
++ SET_PARAM(params, info, iparams, nop_in_interval);
++
++ PRINT_INFO("Target parameters set for session %llx: "
++ "QueuedCommands %d, Response timeout %d, Nop-In "
++ "interval %d", session->sid, params->queued_cmnds,
++ params->rsp_timeout, params->nop_in_interval);
++
++ list_for_each_entry(conn, &session->conn_list,
++ conn_list_entry) {
++ conn->rsp_timeout = session->tgt_params.rsp_timeout * HZ;
++ conn->nop_in_interval = session->tgt_params.nop_in_interval * HZ;
++ spin_lock_bh(&iscsi_rd_lock);
++ if (!conn->closing && (conn->nop_in_interval > 0)) {
++ TRACE_DBG("Schedule Nop-In work for conn %p", conn);
++ schedule_delayed_work(&conn->nop_in_delayed_work,
++ conn->nop_in_interval + ISCSI_ADD_SCHED_TIME);
++ }
++ spin_unlock_bh(&iscsi_rd_lock);
++ }
++ } else {
++ GET_PARAM(params, info, iparams, queued_cmnds);
++ GET_PARAM(params, info, iparams, rsp_timeout);
++ GET_PARAM(params, info, iparams, nop_in_interval);
++ }
++
++ return 0;
++}
++
++/* target_mutex supposed to be locked */
++static int iscsi_sess_params_set(struct iscsi_session *session,
++ struct iscsi_kern_params_info *info, int set)
++{
++ struct iscsi_sess_params *params;
++
++ if (set)
++ sess_params_check(info);
++
++ params = &session->sess_params;
++
++ if (set) {
++ sess_params_set(params, info);
++ log_params(params);
++ } else
++ sess_params_get(params, info);
++
++ return 0;
++}
++
++/* target_mutex supposed to be locked */
++int iscsi_params_set(struct iscsi_target *target,
++ struct iscsi_kern_params_info *info, int set)
++{
++ int err;
++ struct iscsi_session *session;
++
++ if (info->sid == 0) {
++ PRINT_ERROR("sid must not be %d", 0);
++ err = -EINVAL;
++ goto out;
++ }
++
++ session = session_lookup(target, info->sid);
++ if (session == NULL) {
++ PRINT_ERROR("Session for sid %llx not found", info->sid);
++ err = -ENOENT;
++ goto out;
++ }
++
++ if (set && !list_empty(&session->conn_list) &&
++ (info->params_type != key_target)) {
++ err = -EBUSY;
++ goto out;
++ }
++
++ if (info->params_type == key_session)
++ err = iscsi_sess_params_set(session, info, set);
++ else if (info->params_type == key_target)
++ err = iscsi_tgt_params_set(session, info, set);
++ else
++ err = -EINVAL;
++
++out:
++ return err;
++}
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/session.c linux-2.6.36/drivers/scst/iscsi-scst/session.c
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/session.c
++++ linux-2.6.36/drivers/scst/iscsi-scst/session.c
+@@ -0,0 +1,499 @@
++/*
++ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include "iscsi.h"
++
++/* target_mutex supposed to be locked */
++struct iscsi_session *session_lookup(struct iscsi_target *target, u64 sid)
++{
++ struct iscsi_session *session;
++
++ list_for_each_entry(session, &target->session_list,
++ session_list_entry) {
++ if (session->sid == sid)
++ return session;
++ }
++ return NULL;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int iscsi_session_alloc(struct iscsi_target *target,
++ struct iscsi_kern_session_info *info, struct iscsi_session **result)
++{
++ int err;
++ unsigned int i;
++ struct iscsi_session *session;
++ char *name = NULL;
++
++ session = kzalloc(sizeof(*session), GFP_KERNEL);
++ if (!session)
++ return -ENOMEM;
++
++ session->target = target;
++ session->sid = info->sid;
++ atomic_set(&session->active_cmds, 0);
++ session->exp_cmd_sn = info->exp_cmd_sn;
++
++ session->initiator_name = kstrdup(info->initiator_name, GFP_KERNEL);
++ if (!session->initiator_name) {
++ err = -ENOMEM;
++ goto err;
++ }
++
++ name = info->full_initiator_name;
++
++ INIT_LIST_HEAD(&session->conn_list);
++ INIT_LIST_HEAD(&session->pending_list);
++
++ spin_lock_init(&session->sn_lock);
++
++ spin_lock_init(&session->cmnd_data_wait_hash_lock);
++ for (i = 0; i < ARRAY_SIZE(session->cmnd_data_wait_hash); i++)
++ INIT_LIST_HEAD(&session->cmnd_data_wait_hash[i]);
++
++ session->next_ttt = 1;
++
++ session->scst_sess = scst_register_session(target->scst_tgt, 0,
++ name, session, NULL, NULL);
++ if (session->scst_sess == NULL) {
++ PRINT_ERROR("%s", "scst_register_session() failed");
++ err = -ENOMEM;
++ goto err;
++ }
++
++ TRACE_MGMT_DBG("Session %p created: target %p, tid %u, sid %#Lx",
++ session, target, target->tid, info->sid);
++
++ *result = session;
++ return 0;
++
++err:
++ if (session) {
++ kfree(session->initiator_name);
++ kfree(session);
++ }
++ return err;
++}
++
++/* target_mutex supposed to be locked */
++void sess_reinst_finished(struct iscsi_session *sess)
++{
++ struct iscsi_conn *c;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Enabling reinstate successor sess %p", sess);
++
++ BUG_ON(!sess->sess_reinstating);
++
++ list_for_each_entry(c, &sess->conn_list, conn_list_entry) {
++ conn_reinst_finished(c);
++ }
++ sess->sess_reinstating = 0;
++
++ TRACE_EXIT();
++ return;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++int __add_session(struct iscsi_target *target,
++ struct iscsi_kern_session_info *info)
++{
++ struct iscsi_session *new_sess = NULL, *sess, *old_sess;
++ int err = 0, i;
++ union iscsi_sid sid;
++ bool reinstatement = false;
++ struct iscsi_kern_params_info *params_info;
++
++ TRACE_MGMT_DBG("Adding session SID %llx", info->sid);
++
++ err = iscsi_session_alloc(target, info, &new_sess);
++ if (err != 0)
++ goto out;
++
++ mutex_lock(&target->target_mutex);
++
++ sess = session_lookup(target, info->sid);
++ if (sess != NULL) {
++ PRINT_ERROR("Attempt to add session with existing SID %llx",
++ info->sid);
++ err = -EEXIST;
++ goto out_err_unlock;
++ }
++
++ params_info = kmalloc(sizeof(*params_info), GFP_KERNEL);
++ if (params_info == NULL) {
++ PRINT_ERROR("Unable to allocate params info (size %zd)",
++ sizeof(*params_info));
++ err = -ENOMEM;
++ goto out_err_unlock;
++ }
++
++ sid = *(union iscsi_sid *)&info->sid;
++ sid.id.tsih = 0;
++ old_sess = NULL;
++
++ /*
++ * We need to find the latest session to correctly handle
++ * multi-reinstatements
++ */
++ list_for_each_entry_reverse(sess, &target->session_list,
++ session_list_entry) {
++ union iscsi_sid s = *(union iscsi_sid *)&sess->sid;
++ s.id.tsih = 0;
++ if ((sid.id64 == s.id64) &&
++ (strcmp(info->initiator_name, sess->initiator_name) == 0)) {
++ if (!sess->sess_shutting_down) {
++ /* session reinstatement */
++ old_sess = sess;
++ }
++ break;
++ }
++ }
++ sess = NULL;
++
++ list_add_tail(&new_sess->session_list_entry, &target->session_list);
++
++ memset(params_info, 0, sizeof(*params_info));
++ params_info->tid = target->tid;
++ params_info->sid = info->sid;
++ params_info->params_type = key_session;
++ for (i = 0; i < session_key_last; i++)
++ params_info->session_params[i] = info->session_params[i];
++
++ err = iscsi_params_set(target, params_info, 1);
++ if (err != 0)
++ goto out_del;
++
++ memset(params_info, 0, sizeof(*params_info));
++ params_info->tid = target->tid;
++ params_info->sid = info->sid;
++ params_info->params_type = key_target;
++ for (i = 0; i < target_key_last; i++)
++ params_info->target_params[i] = info->target_params[i];
++
++ err = iscsi_params_set(target, params_info, 1);
++ if (err != 0)
++ goto out_del;
++
++ kfree(params_info);
++ params_info = NULL;
++
++ if (old_sess != NULL) {
++ reinstatement = true;
++
++ TRACE_MGMT_DBG("Reinstating sess %p with SID %llx (old %p, "
++ "SID %llx)", new_sess, new_sess->sid, old_sess,
++ old_sess->sid);
++
++ new_sess->sess_reinstating = 1;
++ old_sess->sess_reinst_successor = new_sess;
++
++ target_del_session(old_sess->target, old_sess, 0);
++ }
++
++ mutex_unlock(&target->target_mutex);
++
++ if (reinstatement) {
++ /*
++ * Mutex target_mgmt_mutex won't allow to add connections to
++ * the new session after target_mutex was dropped, so it's safe
++ * to replace the initial UA without it. We can't do it under
++ * target_mutex, because otherwise we can establish a
++ * circular locking dependency between target_mutex and
++ * scst_mutex in SCST core (iscsi_report_aen() called by
++ * SCST core under scst_mutex).
++ */
++ scst_set_initial_UA(new_sess->scst_sess,
++ SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
++ }
++
++out:
++ return err;
++
++out_del:
++ list_del(&new_sess->session_list_entry);
++ kfree(params_info);
++
++out_err_unlock:
++ mutex_unlock(&target->target_mutex);
++
++ scst_unregister_session(new_sess->scst_sess, 1, NULL);
++ new_sess->scst_sess = NULL;
++
++ mutex_lock(&target->target_mutex);
++ session_free(new_sess, false);
++ mutex_unlock(&target->target_mutex);
++ goto out;
++}
++
++static void __session_free(struct iscsi_session *session)
++{
++ kfree(session->initiator_name);
++ kfree(session);
++}
++
++static void iscsi_unreg_sess_done(struct scst_session *scst_sess)
++{
++ struct iscsi_session *session;
++
++ TRACE_ENTRY();
++
++ session = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
++
++ session->scst_sess = NULL;
++ __session_free(session);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* target_mutex supposed to be locked */
++int session_free(struct iscsi_session *session, bool del)
++{
++ unsigned int i;
++
++ TRACE_MGMT_DBG("Freeing session %p (SID %llx)",
++ session, session->sid);
++
++ BUG_ON(!list_empty(&session->conn_list));
++ if (unlikely(atomic_read(&session->active_cmds) != 0)) {
++ PRINT_CRIT_ERROR("active_cmds not 0 (%d)!!",
++ atomic_read(&session->active_cmds));
++ BUG();
++ }
++
++ for (i = 0; i < ARRAY_SIZE(session->cmnd_data_wait_hash); i++)
++ BUG_ON(!list_empty(&session->cmnd_data_wait_hash[i]));
++
++ if (session->sess_reinst_successor != NULL)
++ sess_reinst_finished(session->sess_reinst_successor);
++
++ if (session->sess_reinstating) {
++ struct iscsi_session *s;
++ TRACE_MGMT_DBG("Freeing being reinstated sess %p", session);
++ list_for_each_entry(s, &session->target->session_list,
++ session_list_entry) {
++ if (s->sess_reinst_successor == session) {
++ s->sess_reinst_successor = NULL;
++ break;
++ }
++ }
++ }
++
++ if (del)
++ list_del(&session->session_list_entry);
++
++ if (session->scst_sess != NULL) {
++ /*
++ * We must NOT call scst_unregister_session() in the waiting
++ * mode, since we are under target_mutex. Otherwise we can
++ * establish a circular locking dependency between target_mutex
++ * and scst_mutex in SCST core (iscsi_report_aen() called by
++ * SCST core under scst_mutex).
++ */
++ scst_unregister_session(session->scst_sess, 0,
++ iscsi_unreg_sess_done);
++ } else
++ __session_free(session);
++
++ return 0;
++}
++
++/* target_mutex supposed to be locked */
++int __del_session(struct iscsi_target *target, u64 sid)
++{
++ struct iscsi_session *session;
++
++ session = session_lookup(target, sid);
++ if (!session)
++ return -ENOENT;
++
++ if (!list_empty(&session->conn_list)) {
++ PRINT_ERROR("%llx still have connections",
++ (long long unsigned int)session->sid);
++ return -EBUSY;
++ }
++
++ return session_free(session, true);
++}
++
++#define ISCSI_SESS_BOOL_PARAM_ATTR(name, exported_name) \
++static ssize_t iscsi_sess_show_##name(struct kobject *kobj, \
++ struct kobj_attribute *attr, char *buf) \
++{ \
++ int pos; \
++ struct scst_session *scst_sess; \
++ struct iscsi_session *sess; \
++ \
++ scst_sess = container_of(kobj, struct scst_session, sess_kobj); \
++ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess); \
++ \
++ pos = sprintf(buf, "%s\n", \
++ iscsi_get_bool_value(sess->sess_params.name)); \
++ \
++ return pos; \
++} \
++ \
++static struct kobj_attribute iscsi_sess_attr_##name = \
++ __ATTR(exported_name, S_IRUGO, iscsi_sess_show_##name, NULL);
++
++#define ISCSI_SESS_INT_PARAM_ATTR(name, exported_name) \
++static ssize_t iscsi_sess_show_##name(struct kobject *kobj, \
++ struct kobj_attribute *attr, char *buf) \
++{ \
++ int pos; \
++ struct scst_session *scst_sess; \
++ struct iscsi_session *sess; \
++ \
++ scst_sess = container_of(kobj, struct scst_session, sess_kobj); \
++ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess); \
++ \
++ pos = sprintf(buf, "%d\n", sess->sess_params.name); \
++ \
++ return pos; \
++} \
++ \
++static struct kobj_attribute iscsi_sess_attr_##name = \
++ __ATTR(exported_name, S_IRUGO, iscsi_sess_show_##name, NULL);
++
++#define ISCSI_SESS_DIGEST_PARAM_ATTR(name, exported_name) \
++static ssize_t iscsi_sess_show_##name(struct kobject *kobj, \
++ struct kobj_attribute *attr, char *buf) \
++{ \
++ int pos; \
++ struct scst_session *scst_sess; \
++ struct iscsi_session *sess; \
++ char digest_name[64]; \
++ \
++ scst_sess = container_of(kobj, struct scst_session, sess_kobj); \
++ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess); \
++ \
++ pos = sprintf(buf, "%s\n", iscsi_get_digest_name( \
++ sess->sess_params.name, digest_name)); \
++ \
++ return pos; \
++} \
++ \
++static struct kobj_attribute iscsi_sess_attr_##name = \
++ __ATTR(exported_name, S_IRUGO, iscsi_sess_show_##name, NULL);
++
++ISCSI_SESS_BOOL_PARAM_ATTR(initial_r2t, InitialR2T);
++ISCSI_SESS_BOOL_PARAM_ATTR(immediate_data, ImmediateData);
++ISCSI_SESS_INT_PARAM_ATTR(max_recv_data_length, MaxRecvDataSegmentLength);
++ISCSI_SESS_INT_PARAM_ATTR(max_xmit_data_length, MaxXmitDataSegmentLength);
++ISCSI_SESS_INT_PARAM_ATTR(max_burst_length, MaxBurstLength);
++ISCSI_SESS_INT_PARAM_ATTR(first_burst_length, FirstBurstLength);
++ISCSI_SESS_INT_PARAM_ATTR(max_outstanding_r2t, MaxOutstandingR2T);
++ISCSI_SESS_DIGEST_PARAM_ATTR(header_digest, HeaderDigest);
++ISCSI_SESS_DIGEST_PARAM_ATTR(data_digest, DataDigest);
++
++static ssize_t iscsi_sess_sid_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos;
++ struct scst_session *scst_sess;
++ struct iscsi_session *sess;
++
++ TRACE_ENTRY();
++
++ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
++ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
++
++ pos = sprintf(buf, "%llx\n", sess->sid);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static struct kobj_attribute iscsi_attr_sess_sid =
++ __ATTR(sid, S_IRUGO, iscsi_sess_sid_show, NULL);
++
++static ssize_t iscsi_sess_reinstating_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos;
++ struct scst_session *scst_sess;
++ struct iscsi_session *sess;
++
++ TRACE_ENTRY();
++
++ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
++ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
++
++ pos = sprintf(buf, "%d\n", sess->sess_reinstating ? 1 : 0);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static struct kobj_attribute iscsi_sess_attr_reinstating =
++ __ATTR(reinstating, S_IRUGO, iscsi_sess_reinstating_show, NULL);
++
++static ssize_t iscsi_sess_force_close_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_session *scst_sess;
++ struct iscsi_session *sess;
++ struct iscsi_conn *conn;
++
++ TRACE_ENTRY();
++
++ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
++ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
++
++ if (mutex_lock_interruptible(&sess->target->target_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ PRINT_INFO("Deleting session %llx with initiator %s (%p)",
++ (long long unsigned int)sess->sid, sess->initiator_name, sess);
++
++ list_for_each_entry(conn, &sess->conn_list, conn_list_entry) {
++ TRACE_MGMT_DBG("Deleting connection with initiator %p", conn);
++ __mark_conn_closed(conn, ISCSI_CONN_ACTIVE_CLOSE|ISCSI_CONN_DELETING);
++ }
++
++ mutex_unlock(&sess->target->target_mutex);
++
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute iscsi_sess_attr_force_close =
++ __ATTR(force_close, S_IWUSR, NULL, iscsi_sess_force_close_store);
++
++const struct attribute *iscsi_sess_attrs[] = {
++ &iscsi_sess_attr_initial_r2t.attr,
++ &iscsi_sess_attr_immediate_data.attr,
++ &iscsi_sess_attr_max_recv_data_length.attr,
++ &iscsi_sess_attr_max_xmit_data_length.attr,
++ &iscsi_sess_attr_max_burst_length.attr,
++ &iscsi_sess_attr_first_burst_length.attr,
++ &iscsi_sess_attr_max_outstanding_r2t.attr,
++ &iscsi_sess_attr_header_digest.attr,
++ &iscsi_sess_attr_data_digest.attr,
++ &iscsi_attr_sess_sid.attr,
++ &iscsi_sess_attr_reinstating.attr,
++ &iscsi_sess_attr_force_close.attr,
++ NULL,
++};
++
+diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/target.c linux-2.6.36/drivers/scst/iscsi-scst/target.c
+--- orig/linux-2.6.36/drivers/scst/iscsi-scst/target.c
++++ linux-2.6.36/drivers/scst/iscsi-scst/target.c
+@@ -0,0 +1,533 @@
++/*
++ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
++ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/delay.h>
++
++#include "iscsi.h"
++#include "digest.h"
++
++#define MAX_NR_TARGETS (1UL << 30)
++
++DEFINE_MUTEX(target_mgmt_mutex);
++
++/* All 3 protected by target_mgmt_mutex */
++static LIST_HEAD(target_list);
++static u32 next_target_id;
++static u32 nr_targets;
++
++/* target_mgmt_mutex supposed to be locked */
++struct iscsi_target *target_lookup_by_id(u32 id)
++{
++ struct iscsi_target *target;
++
++ list_for_each_entry(target, &target_list, target_list_entry) {
++ if (target->tid == id)
++ return target;
++ }
++ return NULL;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static struct iscsi_target *target_lookup_by_name(const char *name)
++{
++ struct iscsi_target *target;
++
++ list_for_each_entry(target, &target_list, target_list_entry) {
++ if (!strcmp(target->name, name))
++ return target;
++ }
++ return NULL;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++static int iscsi_target_create(struct iscsi_kern_target_info *info, u32 tid,
++ struct iscsi_target **out_target)
++{
++ int err = -EINVAL, len;
++ char *name = info->name;
++ struct iscsi_target *target;
++
++ TRACE_MGMT_DBG("Creating target tid %u, name %s", tid, name);
++
++ len = strlen(name);
++ if (!len) {
++ PRINT_ERROR("The length of the target name is zero %u", tid);
++ goto out;
++ }
++
++ if (!try_module_get(THIS_MODULE)) {
++ PRINT_ERROR("Fail to get module %u", tid);
++ goto out;
++ }
++
++ target = kzalloc(sizeof(*target), GFP_KERNEL);
++ if (!target) {
++ err = -ENOMEM;
++ goto out_put;
++ }
++
++ target->tid = info->tid = tid;
++
++ strlcpy(target->name, name, sizeof(target->name));
++
++ mutex_init(&target->target_mutex);
++ INIT_LIST_HEAD(&target->session_list);
++ INIT_LIST_HEAD(&target->attrs_list);
++
++ target->scst_tgt = scst_register_target(&iscsi_template, target->name);
++ if (!target->scst_tgt) {
++ PRINT_ERROR("%s", "scst_register_target() failed");
++ err = -EBUSY;
++ goto out_free;
++ }
++
++ scst_tgt_set_tgt_priv(target->scst_tgt, target);
++
++ list_add_tail(&target->target_list_entry, &target_list);
++
++ *out_target = target;
++
++ return 0;
++
++out_free:
++ kfree(target);
++
++out_put:
++ module_put(THIS_MODULE);
++
++out:
++ return err;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++int __add_target(struct iscsi_kern_target_info *info)
++{
++ int err;
++ u32 tid = info->tid;
++ struct iscsi_target *target = NULL; /* to calm down sparse */
++ struct iscsi_kern_params_info *params_info;
++ struct iscsi_kern_attr *attr_info;
++ union add_info_union {
++ struct iscsi_kern_params_info params_info;
++ struct iscsi_kern_attr attr_info;
++ } *add_info;
++ int i, rc;
++ unsigned long attrs_ptr_long;
++ struct iscsi_kern_attr __user *attrs_ptr;
++
++ if (nr_targets > MAX_NR_TARGETS) {
++ err = -EBUSY;
++ goto out;
++ }
++
++ if (target_lookup_by_name(info->name)) {
++ PRINT_ERROR("Target %s already exist!", info->name);
++ err = -EEXIST;
++ goto out;
++ }
++
++ if (tid && target_lookup_by_id(tid)) {
++ PRINT_ERROR("Target %u already exist!", tid);
++ err = -EEXIST;
++ goto out;
++ }
++
++ add_info = kmalloc(sizeof(*add_info), GFP_KERNEL);
++ if (add_info == NULL) {
++ PRINT_ERROR("Unable to allocate additional info (size %zd)",
++ sizeof(*add_info));
++ err = -ENOMEM;
++ goto out;
++ }
++ params_info = (struct iscsi_kern_params_info *)add_info;
++ attr_info = (struct iscsi_kern_attr *)add_info;
++
++ if (tid == 0) {
++ do {
++ if (!++next_target_id)
++ ++next_target_id;
++ } while (target_lookup_by_id(next_target_id));
++
++ tid = next_target_id;
++ }
++
++ err = iscsi_target_create(info, tid, &target);
++ if (err != 0)
++ goto out_free;
++
++ nr_targets++;
++
++ mutex_lock(&target->target_mutex);
++
++ attrs_ptr_long = info->attrs_ptr;
++ attrs_ptr = (struct iscsi_kern_attr __user *)attrs_ptr_long;
++ for (i = 0; i < info->attrs_num; i++) {
++ memset(attr_info, 0, sizeof(*attr_info));
++
++ rc = copy_from_user(attr_info, attrs_ptr, sizeof(*attr_info));
++ if (rc != 0) {
++ PRINT_ERROR("Failed to copy users of target %s "
++ "failed", info->name);
++ err = -EFAULT;
++ goto out_del_unlock;
++ }
++
++ attr_info->name[sizeof(attr_info->name)-1] = '\0';
++
++ err = iscsi_add_attr(target, attr_info);
++ if (err != 0)
++ goto out_del_unlock;
++
++ attrs_ptr++;
++ }
++
++ mutex_unlock(&target->target_mutex);
++
++ err = tid;
++
++out_free:
++ kfree(add_info);
++
++out:
++ return err;
++
++out_del_unlock:
++ mutex_unlock(&target->target_mutex);
++ __del_target(tid);
++ goto out_free;
++}
++
++static void target_destroy(struct iscsi_target *target)
++{
++ struct iscsi_attr *attr, *t;
++
++ TRACE_MGMT_DBG("Destroying target tid %u", target->tid);
++
++ list_for_each_entry_safe(attr, t, &target->attrs_list,
++ attrs_list_entry) {
++ __iscsi_del_attr(target, attr);
++ }
++
++ scst_unregister_target(target->scst_tgt);
++
++ kfree(target);
++
++ module_put(THIS_MODULE);
++ return;
++}
++
++/* target_mgmt_mutex supposed to be locked */
++int __del_target(u32 id)
++{
++ struct iscsi_target *target;
++ int err;
++
++ target = target_lookup_by_id(id);
++ if (!target) {
++ err = -ENOENT;
++ goto out;
++ }
++
++ mutex_lock(&target->target_mutex);
++
++ if (!list_empty(&target->session_list)) {
++ err = -EBUSY;
++ goto out_unlock;
++ }
++
++ list_del(&target->target_list_entry);
++ nr_targets--;
++
++ mutex_unlock(&target->target_mutex);
++
++ target_destroy(target);
++ return 0;
++
++out_unlock:
++ mutex_unlock(&target->target_mutex);
++
++out:
++ return err;
++}
++
++/* target_mutex supposed to be locked */
++void target_del_session(struct iscsi_target *target,
++ struct iscsi_session *session, int flags)
++{
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Deleting session %p", session);
++
++ if (!list_empty(&session->conn_list)) {
++ struct iscsi_conn *conn, *tc;
++ list_for_each_entry_safe(conn, tc, &session->conn_list,
++ conn_list_entry) {
++ TRACE_MGMT_DBG("Mark conn %p closing", conn);
++ __mark_conn_closed(conn, flags);
++ }
++ } else {
++ TRACE_MGMT_DBG("Freeing session %p without connections",
++ session);
++ __del_session(target, session->sid);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* target_mutex supposed to be locked */
++void target_del_all_sess(struct iscsi_target *target, int flags)
++{
++ struct iscsi_session *session, *ts;
++
++ TRACE_ENTRY();
++
++ if (!list_empty(&target->session_list)) {
++ TRACE_MGMT_DBG("Deleting all sessions from target %p", target);
++ list_for_each_entry_safe(session, ts, &target->session_list,
++ session_list_entry) {
++ target_del_session(target, session, flags);
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++void target_del_all(void)
++{
++ struct iscsi_target *target, *t;
++ bool first = true;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("%s", "Deleting all targets");
++
++ /* Not the best, ToDo */
++ while (1) {
++ mutex_lock(&target_mgmt_mutex);
++
++ if (list_empty(&target_list))
++ break;
++
++ /*
++ * In the first iteration we won't delete targets to go at
++ * first through all sessions of all targets and close their
++ * connections. Otherwise we can stuck for noticeable time
++ * waiting during a target's unregistration for the activities
++ * suspending over active connection. This can especially got
++ * bad if any being wait connection itself stuck waiting for
++ * something and can be recovered only by connection close.
++ * Let's for such cases not wait while such connection recover
++ * theyself, but act in advance.
++ */
++
++ list_for_each_entry_safe(target, t, &target_list,
++ target_list_entry) {
++ mutex_lock(&target->target_mutex);
++
++ if (!list_empty(&target->session_list)) {
++ target_del_all_sess(target,
++ ISCSI_CONN_ACTIVE_CLOSE |
++ ISCSI_CONN_DELETING);
++ } else if (!first) {
++ TRACE_MGMT_DBG("Deleting target %p", target);
++ list_del(&target->target_list_entry);
++ nr_targets--;
++ mutex_unlock(&target->target_mutex);
++ target_destroy(target);
++ continue;
++ }
++
++ mutex_unlock(&target->target_mutex);
++ }
++ mutex_unlock(&target_mgmt_mutex);
++ msleep(100);
++
++ first = false;
++ }
++
++ mutex_unlock(&target_mgmt_mutex);
++
++ TRACE_MGMT_DBG("%s", "Deleting all targets finished");
++
++ TRACE_EXIT();
++ return;
++}
++
++static ssize_t iscsi_tgt_tid_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos;
++ struct scst_tgt *scst_tgt;
++ struct iscsi_target *tgt;
++
++ TRACE_ENTRY();
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct iscsi_target *)scst_tgt_get_tgt_priv(scst_tgt);
++
++ pos = sprintf(buf, "%u\n", tgt->tid);
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static struct kobj_attribute iscsi_tgt_attr_tid =
++ __ATTR(tid, S_IRUGO, iscsi_tgt_tid_show, NULL);
++
++const struct attribute *iscsi_tgt_attrs[] = {
++ &iscsi_tgt_attr_tid.attr,
++ NULL,
++};
++
++ssize_t iscsi_sysfs_send_event(uint32_t tid, enum iscsi_kern_event_code code,
++ const char *param1, const char *param2, void **data)
++{
++ int res;
++ struct scst_sysfs_user_info *info;
++
++ TRACE_ENTRY();
++
++ if (ctr_open_state != ISCSI_CTR_OPEN_STATE_OPEN) {
++ PRINT_ERROR("%s", "User space process not connected");
++ res = -EPERM;
++ goto out;
++ }
++
++ res = scst_sysfs_user_add_info(&info);
++ if (res != 0)
++ goto out;
++
++ TRACE_DBG("Sending event %d (tid %d, param1 %s, param2 %s, cookie %d, "
++ "info %p)", tid, code, param1, param2, info->info_cookie, info);
++
++ res = event_send(tid, 0, 0, info->info_cookie, code, param1, param2);
++ if (res <= 0) {
++ PRINT_ERROR("event_send() failed: %d", res);
++ if (res == 0)
++ res = -EFAULT;
++ goto out_free;
++ }
++
++ /*
++ * It may wait 30 secs in blocking connect to an unreacheable
++ * iSNS server. It must be fixed, but not now. ToDo.
++ */
++ res = scst_wait_info_completion(info, 31 * HZ);
++
++ if (data != NULL)
++ *data = info->data;
++
++out_free:
++ scst_sysfs_user_del_info(info);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++int iscsi_enable_target(struct scst_tgt *scst_tgt, bool enable)
++{
++ struct iscsi_target *tgt =
++ (struct iscsi_target *)scst_tgt_get_tgt_priv(scst_tgt);
++ int res;
++ uint32_t type;
++
++ TRACE_ENTRY();
++
++ if (enable)
++ type = E_ENABLE_TARGET;
++ else
++ type = E_DISABLE_TARGET;
++
++ TRACE_DBG("%s target %d", enable ? "Enabling" : "Disabling", tgt->tid);
++
++ res = iscsi_sysfs_send_event(tgt->tid, type, NULL, NULL, NULL);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++bool iscsi_is_target_enabled(struct scst_tgt *scst_tgt)
++{
++ struct iscsi_target *tgt =
++ (struct iscsi_target *)scst_tgt_get_tgt_priv(scst_tgt);
++
++ return tgt->tgt_enabled;
++}
++
++ssize_t iscsi_sysfs_add_target(const char *target_name, char *params)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ res = iscsi_sysfs_send_event(0, E_ADD_TARGET, target_name,
++ params, NULL);
++ if (res > 0) {
++ /* It's tid */
++ res = 0;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++ssize_t iscsi_sysfs_del_target(const char *target_name)
++{
++ int res = 0, tid;
++
++ TRACE_ENTRY();
++
++ /* We don't want to have tgt visible after the mutex unlock */
++ {
++ struct iscsi_target *tgt;
++ mutex_lock(&target_mgmt_mutex);
++ tgt = target_lookup_by_name(target_name);
++ if (tgt == NULL) {
++ PRINT_ERROR("Target %s not found", target_name);
++ mutex_unlock(&target_mgmt_mutex);
++ res = -ENOENT;
++ goto out;
++ }
++ tid = tgt->tid;
++ mutex_unlock(&target_mgmt_mutex);
++ }
++
++ TRACE_DBG("Deleting target %s (tid %d)", target_name, tid);
++
++ res = iscsi_sysfs_send_event(tid, E_DEL_TARGET, NULL, NULL, NULL);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++ssize_t iscsi_sysfs_mgmt_cmd(char *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Sending mgmt cmd %s", cmd);
++
++ res = iscsi_sysfs_send_event(0, E_MGMT_CMD, cmd, NULL, NULL);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
+diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Documentation/scst/README.iscsi
+--- orig/linux-2.6.36/Documentation/scst/README.iscsi
++++ linux-2.6.36/Documentation/scst/README.iscsi
+@@ -0,0 +1,741 @@
++iSCSI SCST target driver
++========================
++
++ISCSI-SCST is a deeply reworked fork of iSCSI Enterprise Target (IET)
++(http://iscsitarget.sourceforge.net). Reasons of the fork were:
++
++ - To be able to use full power of SCST core.
++
++ - To fix all the problems, corner cases issues and iSCSI standard
++ violations which IET has.
++
++See for more info http://iscsi-scst.sourceforge.net.
++
++Usage
++-----
++
++See in http://iscsi-scst.sourceforge.net/iscsi-scst-howto.txt how to
++configure iSCSI-SCST.
++
++If you want to use Intel CRC32 offload and have corresponding hardware,
++you should load crc32c-intel module. Then iSCSI-SCST will do all digest
++calculations using this facility.
++
++In 2.0.0 usage of iscsi-scstd.conf as well as iscsi-scst-adm utility is
++obsolete. Use the sysfs interface facilities instead.
++
++The flow of iSCSI-SCST inialization should be as the following:
++
++1. Load of SCST and iSCSI-SCST kernel modules with necessary module
++parameters, if needed.
++
++2. Start iSCSI-SCST service.
++
++3. Configure targets, devices, LUNs, etc. either using scstadmin
++(recommended), or using the sysfs interface directly as described below.
++
++It is recommended to use TEST UNIT READY ("tur") command to check if
++iSCSI-SCST target is alive in MPIO configurations.
++
++Also see SCST README file how to tune for the best performance.
++
++CAUTION: Working of target and initiator on the same host isn't fully
++======= supported. See SCST README file for details.
++
++Sysfs interface
++---------------
++
++Root of SCST sysfs interface is /sys/kernel/scst_tgt. Root of iSCSI-SCST
++is /sys/kernel/scst_tgt/targets/iscsi. It has the following entries:
++
++ - None, one or more subdirectories for targets with name equal to names
++ of the corresponding targets.
++
++ - IncomingUser[num] - optional one or more attributes containing user
++ name and password for incoming discovery user name. Not exist by
++ default and can be added through "mgmt" entry, see below.
++
++ - OutgoingUser - optional attribute containing user name and password
++ for outgoing discovery user name. Not exist by default and can be
++ added through "mgmt" entry, see below.
++
++ - iSNSServer - contains name or IP address of iSNS server with optional
++ "AccessControl" attribute, which allows to enable iSNS access
++ control. Empty by default.
++
++ - allowed_portal[num] - optional attribute, which specifies, on which
++ portals (target's IP addresses) this target will be available. If not
++ specified (default) the target will be available on all all portals.
++ As soon as at least one allowed_portal specified, the target will be
++ accessible for initiators only on the specified portals. There might
++ be any number of the allowed_portal attributes. The portals
++ specification in the allowed_portal attributes can be a simple
++ DOS-type patterns, containing '*' and '?' symbols. '*' means match
++ all any symbols, '?' means match only any single symbol. For
++ instance, "10.170.77.2" will match "10.170.7?.*". Additionally, you
++ can use negative sign '!' to revert the value of the pattern. For
++ instance, "10.170.67.2" will match "!10.170.7?.*". See examples
++ below.
++
++ - enabled - using this attribute you can enable or disable iSCSI-SCST
++ accept new connections. It allows to finish configuring global
++ iSCSI-SCST attributes before it starts accepting new connections. 0
++ by default.
++
++ - open_state - read-only attribute, which allows to see if the user
++ space part of iSCSI-SCST connected to the kernel part.
++
++ - per_portal_acl - if set, makes iSCSI-SCST work in the per-portal
++ access control mode. In this mode iSCSI-SCST registers all initiators
++ in SCST core as "initiator_name#portal_IP_address" pattern, like
++ "iqn.2006-10.net.vlnb:ini#10.170.77.2" for initiator
++ iqn.2006-10.net.vlnb connected through portal 10.170.77.2. This mode
++ allows to make particular initiators be able to use only particular
++ portals on the target and don't see/be able to connect through
++ others. See below for more details.
++
++ - trace_level - allows to enable and disable various tracing
++ facilities. See content of this file for help how to use it.
++
++ - version - read-only attribute, which allows to see version of
++ iSCSI-SCST and enabled optional features.
++
++ - mgmt - main management entry, which allows to configure iSCSI-SCST.
++ Namely, add/delete targets as well as add/delete optional global and
++ per-target attributes. See content of this file for help how to use
++ it.
++
++Each iSCSI-SCST sysfs file (attribute) can contain in the last line mark
++"[key]". It is automatically added mark used to allow scstadmin to see
++which attributes it should save in the config file. You can ignore it.
++
++Each target subdirectory contains the following entries:
++
++ - ini_groups - subdirectory defining initiator groups for this target,
++ used to define per-initiator access control. See SCST core README for
++ more details.
++
++ - luns - subdirectory defining LUNs of this target. See SCST core
++ README for more details.
++
++ - sessions - subdirectory containing connected to this target sessions.
++
++ - IncomingUser[num] - optional one or more attributes containing user
++ name and password for incoming user name. Not exist by default and can
++ be added through the "mgmt" entry, see above.
++
++ - OutgoingUser - optional attribute containing user name and password
++ for outgoing user name. Not exist by default and can be added through
++ the "mgmt" entry, see above.
++
++ - Entries defining default iSCSI parameters values used during iSCSI
++ parameters negotiation. Only entries which can be changed or make
++ sense are listed there.
++
++ - QueuedCommands - defines maximum number of commands queued to any
++ session of this target. Default is 32 commands.
++
++ - RspTimeout - defines the maximum time in seconds a command can wait for
++ response from initiator, otherwise the corresponding connection will
++ be closed. For performance reasons it is implemented as a timer,
++ which once in RspTimeout time checks the oldest command waiting for
++ response and, if it's older than RspTimeout, then it closes the
++ connection. Hence, a stalled connection will be closed in time
++ between RspTimeout and 2*RspTimeout. Default is 30 seconds.
++
++ - NopInInterval - defines interval between NOP-In requests, which the
++ target will send on idle connections to check if the initiator is
++ still alive. If there is no NOP-Out reply from the initiator in
++ RspTimeout time, the corresponding connection will be closed. Default
++ is 30 seconds. If it's set to 0, then NOP-In requests are disabled.
++
++ - enabled - using this attribute you can enable or disable iSCSI-SCST
++ accept new connections to this target. It allows to finish
++ configuring it before it starts accepting new connections. 0 by
++ default.
++
++ - rel_tgt_id - allows to read or write SCSI Relative Target Port
++ Identifier attribute. This identifier is used to identify SCSI Target
++ Ports by some SCSI commands, mainly by Persistent Reservations
++ commands. This identifier must be unique among all SCST targets, but
++ for convenience SCST allows disabled targets to have not unique
++ rel_tgt_id. In this case SCST will not allow to enable this target
++ until rel_tgt_id becomes unique. This attribute initialized unique by
++ SCST by default.
++
++ - redirect - allows to temporarily or permanently redirect login to the
++ target to another portal. Discovery sessions will not be impacted,
++ but normal sessions will be redirected before security negotiation.
++ The destination should be specified using format "<ip_addr>[:port] temp|perm".
++ IPv6 addresses need to be enclosed in [] brackets. To remove
++ redirection, provide an empty string. For example:
++ echo "10.170.77.2:32600 temp" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/redirect
++ will temporarily redirect login to portal 10.170.77.2 and port 32600.
++
++ - tid - TID of this target.
++
++Subdirectory "sessions" contains one subdirectory for each connected
++session with name equal to name of the connected initiator.
++
++Each session subdirectory contains the following entries:
++
++ - One subdirectory for each TCP connection in this session. ISCSI-SCST
++ supports 1 connection per session, but the session subdirectory can
++ contain several connections: one active and other being closed.
++
++ - Entries defining negotiated iSCSI parameters. Only parameters which
++ can be changed or make sense are listed there.
++
++ - initiator_name - contains initiator name
++
++ - sid - contains SID of this session
++
++ - reinstating - contains reinstatement state of this session
++
++ - force_close - write-only attribute, which allows to force close this
++ session. This is the only writable session attribute.
++
++ - active_commands - contains number of active, i.e. not yet or being
++ executed, SCSI commands in this session.
++
++ - commands - contains overall number of SCSI commands in this session.
++
++Each connection subdirectory contains the following entries:
++
++ - cid - contains CID of this connection.
++
++ - ip - contains IP address of the connected initiator.
++
++ - state - contains processing state of this connection.
++
++Below is a sample script, which configures 1 virtual disk "disk1" using
++/disk1 image and one target iqn.2006-10.net.vlnb:tgt with all default
++parameters:
++
++#!/bin/bash
++
++modprobe scst
++modprobe scst_vdisk
++
++echo "add_device disk1 filename=/disk1; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
++
++service iscsi-scst start
++
++echo "add_target iqn.2006-10.net.vlnb:tgt" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++echo "add disk1 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/mgmt
++
++echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/enabled
++echo 1 >/sys/kernel/scst_tgt/targets/iscsi/enabled
++
++Below is another sample script, which configures 1 real local SCSI disk
++0:0:1:0 and one target iqn.2006-10.net.vlnb:tgt with all default parameters:
++
++#!/bin/bash
++
++modprobe scst
++modprobe scst_disk
++
++echo "add_device 0:0:1:0" >/sys/kernel/scst_tgt/handlers/dev_disk/mgmt
++
++service iscsi-scst start
++
++echo "add_target iqn.2006-10.net.vlnb:tgt" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++echo "add 0:0:1:0 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/mgmt
++
++echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/enabled
++echo 1 >/sys/kernel/scst_tgt/targets/iscsi/enabled
++
++Below is an advanced sample script, which configures more virtual
++devices of various types, including virtual CDROM and 2 targets, one
++with all default parameters, another one with some not default
++parameters, incoming and outgoing user names for CHAP authentification,
++and special permissions for initiator iqn.2005-03.org.open-iscsi:cacdcd2520,
++which will see another set of devices. Also this sample configures CHAP
++authentication for discovery sessions and iSNS server with access
++control.
++
++#!/bin/bash
++
++modprobe scst
++modprobe scst_vdisk
++
++echo "add_device disk1 filename=/disk1; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
++echo "add_device disk2 filename=/disk2; blocksize=4096; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
++echo "add_device blockio filename=/dev/sda5" >/sys/kernel/scst_tgt/handlers/vdisk_blockio/mgmt
++echo "add_device nullio" >/sys/kernel/scst_tgt/handlers/vdisk_nullio/mgmt
++echo "add_device cdrom" >/sys/kernel/scst_tgt/handlers/vcdrom/mgmt
++
++service iscsi-scst start
++
++echo "192.168.1.16 AccessControl" >/sys/kernel/scst_tgt/targets/iscsi/iSNSServer
++echo "add_attribute IncomingUser joeD 12charsecret" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++echo "add_attribute OutgoingUser jackD 12charsecret1" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++
++echo "add_target iqn.2006-10.net.vlnb:tgt" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++
++echo "add disk1 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/mgmt
++echo "add cdrom 1" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/mgmt
++
++echo "add_target iqn.2006-10.net.vlnb:tgt1" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++echo "add_target_attribute iqn.2006-10.net.vlnb:tgt1 IncomingUser1 joe2 12charsecret2" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++echo "add_target_attribute iqn.2006-10.net.vlnb:tgt1 IncomingUser joe 12charsecret" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++echo "add_target_attribute iqn.2006-10.net.vlnb:tgt1 OutgoingUser jim1 12charpasswd" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++echo "No" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/InitialR2T
++echo "Yes" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ImmediateData
++echo "8192" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/MaxRecvDataSegmentLength
++echo "8192" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/MaxXmitDataSegmentLength
++echo "131072" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/MaxBurstLength
++echo "32768" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/FirstBurstLength
++echo "1" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/MaxOutstandingR2T
++echo "CRC32C,None" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/HeaderDigest
++echo "CRC32C,None" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/DataDigest
++echo "32" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/QueuedCommands
++
++echo "add disk2 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/luns/mgmt
++echo "add nullio 26" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/luns/mgmt
++
++echo "create special_ini" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/mgmt
++echo "add blockio 0 read_only=1" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/special_ini/luns/mgmt
++echo "add iqn.2005-03.org.open-iscsi:cacdcd2520" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/special_ini/initiators/mgmt
++
++echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/enabled
++echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/enabled
++
++echo 1 >/sys/kernel/scst_tgt/targets/iscsi/enabled
++
++The resulting overall SCST sysfs hierarchy with an initiator connected to
++both iSCSI-SCST targets will look like:
++
++/sys/kernel/scst_tgt
++|-- devices
++| |-- blockio
++| | |-- blocksize
++| | |-- exported
++| | | `-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/special_ini/luns/0
++| | |-- filename
++| | |-- handler -> ../../handlers/vdisk_blockio
++| | |-- nv_cache
++| | |-- read_only
++| | |-- removable
++| | |-- resync_size
++| | |-- size_mb
++| | |-- t10_dev_id
++| | |-- threads_num
++| | |-- threads_pool_type
++| | |-- type
++| | `-- usn
++| |-- cdrom
++| | |-- exported
++| | | `-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/1
++| | |-- filename
++| | |-- handler -> ../../handlers/vcdrom
++| | |-- size_mb
++| | |-- t10_dev_id
++| | |-- threads_num
++| | |-- threads_pool_type
++| | |-- type
++| | `-- usn
++| |-- disk1
++| | |-- blocksize
++| | |-- exported
++| | | `-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/0
++| | |-- filename
++| | |-- handler -> ../../handlers/vdisk_fileio
++| | |-- nv_cache
++| | |-- o_direct
++| | |-- read_only
++| | |-- removable
++| | |-- resync_size
++| | |-- size_mb
++| | |-- t10_dev_id
++| | |-- type
++| | |-- usn
++| | `-- write_through
++| |-- disk2
++| | |-- blocksize
++| | |-- exported
++| | | `-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/luns/0
++| | |-- filename
++| | |-- handler -> ../../handlers/vdisk_fileio
++| | |-- nv_cache
++| | |-- o_direct
++| | |-- read_only
++| | |-- removable
++| | |-- resync_size
++| | |-- size_mb
++| | |-- t10_dev_id
++| | |-- threads_num
++| | |-- threads_pool_type
++| | |-- threads_num
++| | |-- threads_pool_type
++| | |-- type
++| | |-- usn
++| | `-- write_through
++| `-- nullio
++| |-- blocksize
++| |-- exported
++| | `-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/luns/26
++| |-- handler -> ../../handlers/vdisk_nullio
++| |-- read_only
++| |-- removable
++| |-- size_mb
++| |-- t10_dev_id
++| |-- threads_num
++| |-- threads_pool_type
++| |-- type
++| `-- usn
++|-- handlers
++| |-- vcdrom
++| | |-- cdrom -> ../../devices/cdrom
++| | |-- mgmt
++| | |-- trace_level
++| | `-- type
++| |-- vdisk_blockio
++| | |-- blockio -> ../../devices/blockio
++| | |-- mgmt
++| | |-- trace_level
++| | `-- type
++| |-- vdisk_fileio
++| | |-- disk1 -> ../../devices/disk1
++| | |-- disk2 -> ../../devices/disk2
++| | |-- mgmt
++| | |-- trace_level
++| | `-- type
++| `-- vdisk_nullio
++| |-- mgmt
++| |-- nullio -> ../../devices/nullio
++| |-- trace_level
++| `-- type
++|-- sgv
++| |-- global_stats
++| |-- sgv
++| | `-- stats
++| |-- sgv-clust
++| | `-- stats
++| `-- sgv-dma
++| `-- stats
++|-- targets
++| `-- iscsi
++| |-- IncomingUser
++| |-- OutgoingUser
++| |-- enabled
++| |-- iSNSServer
++| |-- iqn.2006-10.net.vlnb:tgt
++| | |-- DataDigest
++| | |-- FirstBurstLength
++| | |-- HeaderDigest
++| | |-- ImmediateData
++| | |-- InitialR2T
++| | |-- MaxBurstLength
++| | |-- MaxOutstandingR2T
++| | |-- MaxRecvDataSegmentLength
++| | |-- MaxXmitDataSegmentLength
++| | |-- NopInInterval
++| | |-- QueuedCommands
++| | |-- RspTimeout
++| | |-- enabled
++| | |-- ini_groups
++| | | `-- mgmt
++| | |-- luns
++| | | |-- 0
++| | | | |-- device -> ../../../../../devices/disk1
++| | | | `-- read_only
++| | | |-- 1
++| | | | |-- device -> ../../../../../devices/cdrom
++| | | | `-- read_only
++| | | `-- mgmt
++| | |-- per_portal_acl
++| | |-- redirect
++| | |-- rel_tgt_id
++| | |-- sessions
++| | | `-- iqn.2005-03.org.open-iscsi:cacdcd2520
++| | | |-- 10.170.75.2
++| | | | |-- cid
++| | | | |-- ip
++| | | | `-- state
++| | | |-- DataDigest
++| | | |-- FirstBurstLength
++| | | |-- HeaderDigest
++| | | |-- ImmediateData
++| | | |-- InitialR2T
++| | | |-- MaxBurstLength
++| | | |-- MaxOutstandingR2T
++| | | |-- MaxRecvDataSegmentLength
++| | | |-- MaxXmitDataSegmentLength
++| | | |-- active_commands
++| | | |-- commands
++| | | |-- force_close
++| | | |-- initiator_name
++| | | |-- luns -> ../../luns
++| | | |-- reinstating
++| | | `-- sid
++| | `-- tid
++| |-- iqn.2006-10.net.vlnb:tgt1
++| | |-- DataDigest
++| | |-- FirstBurstLength
++| | |-- HeaderDigest
++| | |-- ImmediateData
++| | |-- IncomingUser
++| | |-- IncomingUser1
++| | |-- InitialR2T
++| | |-- MaxBurstLength
++| | |-- MaxOutstandingR2T
++| | |-- MaxRecvDataSegmentLength
++| | |-- MaxXmitDataSegmentLength
++| | |-- OutgoingUser
++| | |-- NopInInterval
++| | |-- QueuedCommands
++| | |-- RspTimeout
++| | |-- enabled
++| | |-- ini_groups
++| | | |-- mgmt
++| | | `-- special_ini
++| | | |-- initiators
++| | | | |-- iqn.2005-03.org.open-iscsi:cacdcd2520
++| | | | `-- mgmt
++| | | `-- luns
++| | | |-- 0
++| | | | |-- device -> ../../../../../../../devices/blockio
++| | | | `-- read_only
++| | | `-- mgmt
++| | |-- luns
++| | | |-- 0
++| | | | |-- device -> ../../../../../devices/disk2
++| | | | `-- read_only
++| | | |-- 26
++| | | | |-- device -> ../../../../../devices/nullio
++| | | | `-- read_only
++| | | `-- mgmt
++| | |-- per_portal_acl
++| | |-- redirect
++| | |-- rel_tgt_id
++| | |-- sessions
++| | | `-- iqn.2005-03.org.open-iscsi:cacdcd2520
++| | | |-- 10.170.75.2
++| | | | |-- cid
++| | | | |-- ip
++| | | | `-- state
++| | | |-- DataDigest
++| | | |-- FirstBurstLength
++| | | |-- HeaderDigest
++| | | |-- ImmediateData
++| | | |-- InitialR2T
++| | | |-- MaxBurstLength
++| | | |-- MaxOutstandingR2T
++| | | |-- MaxRecvDataSegmentLength
++| | | |-- MaxXmitDataSegmentLength
++| | | |-- active_commands
++| | | |-- commands
++| | | |-- force_close
++| | | |-- initiator_name
++| | | |-- luns -> ../../ini_groups/special_ini/luns
++| | | |-- reinstating
++| | | `-- sid
++| | `-- tid
++| |-- mgmt
++| |-- open_state
++| |-- trace_level
++| `-- version
++|-- threads
++|-- trace_level
++`-- version
++
++Advanced initiators access control
++----------------------------------
++
++ISCSI-SCST allows you to optionally control visibility and accessibility
++of your target and its portals (IP addresses) to remote initiators. This
++control includes both the target's portals SendTargets discovery as well
++as regular LUNs access.
++
++This facility supersedes the obsolete initiators.[allow,deny] method,
++which is going to be removed in one of the future versions.
++
++This facility is available only in the sysfs build of iSCSI-SCST.
++
++By default, all portals are available for the initiators.
++
++1. If you want to enable/disable one or more target's portals for all
++initiators, you should define one ore more allowed_portal attributes.
++For example:
++
++echo 'add_target_attribute iqn.2006-10.net.vlnb:tgt allowed_portal 10.170.77.2' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++
++will enable only portal 10.170.77.2 and disable all other portals
++
++echo 'add_target_attribute iqn.2006-10.net.vlnb:tgt allowed_portal 10.170.77.2' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++echo 'add_target_attribute iqn.2006-10.net.vlnb:tgt allowed_portal 10.170.75.2' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++
++will enable only portals 10.170.77.2 and 10.170.75.2 and disable all
++other portals.
++
++echo 'add_target_attribute iqn.2006-10.net.vlnb:tgt allowed_portal 10.170.7?.2' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++
++will enable only portals 10.170.7x.2 and disable all other portals.
++
++echo 'add_target_attribute iqn.2006-10.net.vlnb:tgt allowed_portal !*' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++
++will disable all portals.
++
++2. If you want to want to allow only only specific set of initiators be
++able to connect to your target, you should don't add any default LUNs
++for the target and create for allowed initiators a security group to
++which they will be assigned.
++
++For example, we want initiator iqn.2005-03.org.vlnb:cacdcd2520 and only
++it be able to access target iqn.2006-10.net.vlnb:tgt:
++
++echo 'add_target iqn.2006-10.net.vlnb:tgt' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++echo 'create allowed_ini' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/mgmt
++echo 'add dev1 0' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/allowed_ini/luns/mgmt
++echo 'add iqn.2005-03.org.vlnb:cacdcd2520' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/allowed_ini/initiators/mgmt
++echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/enabled
++
++Since there will be no default LUNs for the target, all initiators other
++than iqn.2005-03.org.vlnb:cacdcd2520 will be blocked from accessing it.
++
++Alternatively, you can create an empty security group and filter out in
++it all initiators except the allowed one:
++
++echo 'add_target iqn.2006-10.net.vlnb:tgt' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
++echo 'add dev1 0' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/mgmt
++echo 'create denied_inis' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/mgmt
++echo 'add !iqn.2005-03.org.vlnb:cacdcd2520' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/denied_inis/initiators/mgmt
++echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/enabled
++
++3. If you want to enable/disable one or more target's portals for
++particular initiators, you should set per_portal_acl attribute to 1 and
++specify SCST access control to those initiators. If an SCST security
++group doesn't have any LUNs, all the initiator, which should be assigned
++to it, will not see this target and/or its portal. For example:
++
++(We assume that an empty group "BLOCKING_GROUP" is already created by for
++target iqn.2006-10.net.vlnb:tgt by command (see above for more information):
++"echo 'create BLOCKING_GROUP' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/mgmt)
++
++echo 'add iqn.2005-03.org.vlnb:cacdcd2520#10.170.77.2' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/BLOCKING_GROUP/initiators/mgmt
++
++will block access of initiator iqn.2005-03.org.vlnb:cacdcd2520 to
++target iqn.2006-10.net.vlnb:tgt portal 10.170.77.2.
++
++Another example:
++
++echo 'add iqn.2005-03.org.vlnb:cacdcd2520*' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/BLOCKING_GROUP/initiators/mgmt
++
++will block access of initiator iqn.2005-03.org.vlnb:cacdcd2520 to
++all target iqn.2006-10.net.vlnb:tgt portals.
++
++Troubleshooting
++---------------
++
++If you have any problems, start troubleshooting from looking at the
++kernel and system logs. In the kernel log iSCSI-SCST and SCST core send
++their messages, in the system log iscsi-scstd sends its messages. In
++most Linux distributions both those logs are put to /var/log/messages
++file.
++
++Then, it might be helpful to increase level of logging. For kernel
++modules you should make the debug build by enabling CONFIG_SCST_DEBUG.
++
++If after looking on the logs the reason of your problem is still unclear
++for you, report to SCST mailing list scst-devel@lists.sourceforge.net.
++
++Work if target's backstorage or link is too slow
++------------------------------------------------
++
++In some cases you can experience I/O stalls or see in the kernel log
++abort or reset messages. It can happen under high I/O load, when your
++target's backstorage gets overloaded, or working over a slow link, when
++the link can't serve all the queued commands on time,
++
++To workaround it you can reduce QueuedCommands parameter for the
++corresponding target to some lower value, like 8 (default is 32).
++
++Also see SCST README file for more details about that issue and ways to
++prevent it.
++
++Performance advices
++-------------------
++
++1. If you use Windows XP or Windows 2003+ as initiators, you can
++consider to decrease TcpAckFrequency parameter to 1. See
++http://support.microsoft.com/kb/328890/ or google for "TcpAckFrequency"
++for more details.
++
++2. See how to get the maximum throughput from iSCSI, for instance, at
++http://virtualgeek.typepad.com/virtual_geek/2009/01/a-multivendor-post-to-help-our-mutual-iscsi-customers-using-vmware.html.
++It's about VMware, but its recommendations apply to other environments
++as well.
++
++3. ISCSI initiators built in pre-CentOS/RHEL 5 reported to have some
++performance problems. If you use it, it is strongly advised to upgrade.
++
++4. If you are going to use your target in an VM environment, for
++instance as a shared storage with VMware, make sure all your VMs
++connected to the target via *separate* sessions, i.e. each VM has own
++connection to the target, not all VMs connected using a single
++connection. You can check it using SCST sysfs interface. If you
++miss it, you can greatly loose performance of parallel access to your
++target from different VMs. This isn't related to the case if your VMs
++are using the same shared storage, like with VMFS, for instance. In this
++case all your VM hosts will be connected to the target via separate
++sessions, which is enough.
++
++5. Many dual port network adapters are not able to transfer data
++simultaneously on both ports, i.e. they transfer data via both ports on
++the same speed as via any single port. Thus, using such adapters in MPIO
++configuration can't improve performance. To allow MPIO to have double
++performance you should either use separate network adapters, or find a
++dual-port adapter capable to to transfer data simultaneously on both
++ports. You can check it by running 2 iperf's through both ports in
++parallel.
++
++6. Since network offload works much better in the write direction, than
++for reading (simplifying, in the read direction often there's additional
++data copy) in many cases with 10GbE in a single initiator-target pair
++the initiator's CPU is a bottleneck, so you can see the initiator can
++read data on much slower rate, than write. You can check it by watching
++*each particular* CPU load to find out if any of them is close to 100%
++load, including IRQ processing load. Note, many tools like vmstat give
++aggregate load on all CPUs, so with 4 cores 25% corresponds to 100% load
++of any single CPU.
++
++7. See SCST core's README for more advices. Especially pay attention to
++have io_grouping_type option set correctly.
++
++Compilation options
++-------------------
++
++There are the following compilation options, that could be commented
++in/out in the kernel's module Makefile:
++
++ - CONFIG_SCST_DEBUG - turns on some debugging code, including some logging.
++ Makes the driver considerably bigger and slower, producing large amount of
++ log data.
++
++ - CONFIG_SCST_TRACING - turns on ability to log events. Makes the driver
++ considerably bigger and leads to some performance loss.
++
++ - CONFIG_SCST_EXTRACHECKS - adds extra validity checks in the various places.
++
++ - CONFIG_SCST_ISCSI_DEBUG_DIGEST_FAILURES - simulates digest failures in
++ random places.
++
++Credits
++-------
++
++Thanks to:
++
++ * Ming Zhang <blackmagic02881@gmail.com> for fixes
++
++ * Krzysztof Blaszkowski <kb@sysmikro.com.pl> for many fixes
++
++ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> for comments and help in
++ debugging
++
++ * Tomasz Chmielewski <mangoo@wpkg.org> for testing and suggestions
++
++ * Bart Van Assche <bart.vanassche@gmail.com> for a lot of help
++
++Vladislav Bolkhovitin <vst@vlnb.net>, http://scst.sourceforge.net
++
+diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h
+--- orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h
++++ linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h
+@@ -0,0 +1,131 @@
++/*
++ * qla2x_tgt.h
++ *
++ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * Additional file for the target driver support.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++/*
++ * This should be included only from within qla2xxx module.
++ */
++
++#ifndef __QLA2X_TGT_H
++#define __QLA2X_TGT_H
++
++extern request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
++
++#ifdef CONFIG_SCSI_QLA2XXX_TARGET
++
++#include "qla2x_tgt_def.h"
++
++extern struct qla_tgt_data qla_target;
++
++void qla_set_tgt_mode(scsi_qla_host_t *ha);
++void qla_clear_tgt_mode(scsi_qla_host_t *ha);
++
++static inline bool qla_tgt_mode_enabled(scsi_qla_host_t *ha)
++{
++ return ha->host->active_mode & MODE_TARGET;
++}
++
++static inline bool qla_ini_mode_enabled(scsi_qla_host_t *ha)
++{
++ return ha->host->active_mode & MODE_INITIATOR;
++}
++
++static inline void qla_reverse_ini_mode(scsi_qla_host_t *ha)
++{
++ if (ha->host->active_mode & MODE_INITIATOR)
++ ha->host->active_mode &= ~MODE_INITIATOR;
++ else
++ ha->host->active_mode |= MODE_INITIATOR;
++}
++
++/********************************************************************\
++ * ISP Queue types left out of new QLogic driver (from old version)
++\********************************************************************/
++
++/*
++ * qla2x00_do_en_dis_lun
++ * Issue enable or disable LUN entry IOCB.
++ *
++ * Input:
++ * ha = adapter block pointer.
++ *
++ * Caller MUST have hardware lock held. This function might release it,
++ * then reaquire.
++ */
++static inline void
++__qla2x00_send_enable_lun(scsi_qla_host_t *ha, int enable)
++{
++ elun_entry_t *pkt;
++
++ BUG_ON(IS_FWI2_CAPABLE(ha));
++
++ pkt = (elun_entry_t *)qla2x00_req_pkt(ha);
++ if (pkt != NULL) {
++ pkt->entry_type = ENABLE_LUN_TYPE;
++ if (enable) {
++ pkt->command_count = QLA2X00_COMMAND_COUNT_INIT;
++ pkt->immed_notify_count = QLA2X00_IMMED_NOTIFY_COUNT_INIT;
++ pkt->timeout = 0xffff;
++ } else {
++ pkt->command_count = 0;
++ pkt->immed_notify_count = 0;
++ pkt->timeout = 0;
++ }
++ DEBUG2(printk(KERN_DEBUG
++ "scsi%lu:ENABLE_LUN IOCB imm %u cmd %u timeout %u\n",
++ ha->host_no, pkt->immed_notify_count,
++ pkt->command_count, pkt->timeout));
++
++ /* Issue command to ISP */
++ qla2x00_isp_cmd(ha);
++
++ } else
++ qla_clear_tgt_mode(ha);
++#if defined(QL_DEBUG_LEVEL_2) || defined(QL_DEBUG_LEVEL_3)
++ if (!pkt)
++ printk(KERN_ERR "%s: **** FAILED ****\n", __func__);
++#endif
++
++ return;
++}
++
++/*
++ * qla2x00_send_enable_lun
++ * Issue enable LUN entry IOCB.
++ *
++ * Input:
++ * ha = adapter block pointer.
++ * enable = enable/disable flag.
++ */
++static inline void
++qla2x00_send_enable_lun(scsi_qla_host_t *ha, bool enable)
++{
++ if (!IS_FWI2_CAPABLE(ha)) {
++ unsigned long flags;
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++ __qla2x00_send_enable_lun(ha, enable);
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ }
++}
++
++extern void qla2xxx_add_targets(void);
++
++#endif /* CONFIG_SCSI_QLA2XXX_TARGET */
++
++#endif /* __QLA2X_TGT_H */
+diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h
+--- orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h
++++ linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h
+@@ -0,0 +1,729 @@
++/*
++ * qla2x_tgt_def.h
++ *
++ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ *
++ * Additional file for the target driver support.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++/*
++ * This is the global def file that is useful for including from the
++ * target portion.
++ */
++
++#ifndef __QLA2X_TGT_DEF_H
++#define __QLA2X_TGT_DEF_H
++
++#include "qla_def.h"
++
++#ifndef CONFIG_SCSI_QLA2XXX_TARGET
++#error __FILE__ " included without CONFIG_SCSI_QLA2XXX_TARGET"
++#endif
++
++#ifndef ENTER
++#define ENTER(a)
++#endif
++
++#ifndef LEAVE
++#define LEAVE(a)
++#endif
++
++/*
++ * Must be changed on any change in any initiator visible interfaces or
++ * data in the target add-on
++ */
++#define QLA2X_TARGET_MAGIC 267
++
++/*
++ * Must be changed on any change in any target visible interfaces or
++ * data in the initiator
++ */
++#define QLA2X_INITIATOR_MAGIC 57319
++
++#define QLA2X_INI_MODE_STR_EXCLUSIVE "exclusive"
++#define QLA2X_INI_MODE_STR_DISABLED "disabled"
++#define QLA2X_INI_MODE_STR_ENABLED "enabled"
++
++#define QLA2X_INI_MODE_EXCLUSIVE 0
++#define QLA2X_INI_MODE_DISABLED 1
++#define QLA2X_INI_MODE_ENABLED 2
++
++#define QLA2X00_COMMAND_COUNT_INIT 250
++#define QLA2X00_IMMED_NOTIFY_COUNT_INIT 250
++
++/*
++ * Used to mark which completion handles (for RIO Status's) are for CTIO's
++ * vs. regular (non-target) info. This is checked for in
++ * qla2x00_process_response_queue() to see if a handle coming back in a
++ * multi-complete should come to the tgt driver or be handled there by qla2xxx
++ */
++#define CTIO_COMPLETION_HANDLE_MARK BIT_29
++#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
++#error "Hackish CTIO_COMPLETION_HANDLE_MARK no longer larger than MAX_OUTSTANDING_COMMANDS"
++#endif
++#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
++
++/* Used to mark CTIO as intermediate */
++#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
++
++#ifndef OF_SS_MODE_0
++/*
++ * ISP target entries - Flags bit definitions.
++ */
++#define OF_SS_MODE_0 0
++#define OF_SS_MODE_1 1
++#define OF_SS_MODE_2 2
++#define OF_SS_MODE_3 3
++
++#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */
++#define OF_DATA_IN BIT_6 /* Data in to initiator */
++ /* (data from target to initiator) */
++#define OF_DATA_OUT BIT_7 /* Data out from initiator */
++ /* (data from initiator to target) */
++#define OF_NO_DATA (BIT_7 | BIT_6)
++#define OF_INC_RC BIT_8 /* Increment command resource count */
++#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */
++#define OF_CONF_REQ BIT_13 /* Confirmation Requested */
++#define OF_TERM_EXCH BIT_14 /* Terminate exchange */
++#define OF_SSTS BIT_15 /* Send SCSI status */
++#endif
++
++#ifndef DATASEGS_PER_COMMAND32
++#define DATASEGS_PER_COMMAND32 3
++#define DATASEGS_PER_CONT32 7
++#define QLA_MAX_SG32(ql) \
++ (((ql) > 0) ? (DATASEGS_PER_COMMAND32 + DATASEGS_PER_CONT32*((ql) - 1)) : 0)
++
++#define DATASEGS_PER_COMMAND64 2
++#define DATASEGS_PER_CONT64 5
++#define QLA_MAX_SG64(ql) \
++ (((ql) > 0) ? (DATASEGS_PER_COMMAND64 + DATASEGS_PER_CONT64*((ql) - 1)) : 0)
++#endif
++
++#ifndef DATASEGS_PER_COMMAND_24XX
++#define DATASEGS_PER_COMMAND_24XX 1
++#define DATASEGS_PER_CONT_24XX 5
++#define QLA_MAX_SG_24XX(ql) \
++ (min(1270, ((ql) > 0) ? (DATASEGS_PER_COMMAND_24XX + DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
++#endif
++
++/********************************************************************\
++ * ISP Queue types left out of new QLogic driver (from old version)
++\********************************************************************/
++
++#ifndef ENABLE_LUN_TYPE
++#define ENABLE_LUN_TYPE 0x0B /* Enable LUN entry. */
++/*
++ * ISP queue - enable LUN entry structure definition.
++ */
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t sys_define_2; /* System defined. */
++ uint8_t reserved_8;
++ uint8_t reserved_1;
++ uint16_t reserved_2;
++ uint32_t reserved_3;
++ uint8_t status;
++ uint8_t reserved_4;
++ uint8_t command_count; /* Number of ATIOs allocated. */
++ uint8_t immed_notify_count; /* Number of Immediate Notify entries allocated. */
++ uint16_t reserved_5;
++ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
++ uint16_t reserved_6[20];
++} __attribute__((packed)) elun_entry_t;
++#define ENABLE_LUN_SUCCESS 0x01
++#define ENABLE_LUN_RC_NONZERO 0x04
++#define ENABLE_LUN_INVALID_REQUEST 0x06
++#define ENABLE_LUN_ALREADY_ENABLED 0x3E
++#endif
++
++#ifndef MODIFY_LUN_TYPE
++#define MODIFY_LUN_TYPE 0x0C /* Modify LUN entry. */
++/*
++ * ISP queue - modify LUN entry structure definition.
++ */
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t sys_define_2; /* System defined. */
++ uint8_t reserved_8;
++ uint8_t reserved_1;
++ uint8_t operators;
++ uint8_t reserved_2;
++ uint32_t reserved_3;
++ uint8_t status;
++ uint8_t reserved_4;
++ uint8_t command_count; /* Number of ATIOs allocated. */
++ uint8_t immed_notify_count; /* Number of Immediate Notify */
++ /* entries allocated. */
++ uint16_t reserved_5;
++ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
++ uint16_t reserved_7[20];
++} __attribute__((packed)) modify_lun_entry_t;
++#define MODIFY_LUN_SUCCESS 0x01
++#define MODIFY_LUN_CMD_ADD BIT_0
++#define MODIFY_LUN_CMD_SUB BIT_1
++#define MODIFY_LUN_IMM_ADD BIT_2
++#define MODIFY_LUN_IMM_SUB BIT_3
++#endif
++
++#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
++ ? le16_to_cpu((iocb)->target.extended) \
++ : (uint16_t)(iocb)->target.id.standard)
++
++#ifndef IMMED_NOTIFY_TYPE
++#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
++/*
++ * ISP queue - immediate notify entry structure definition.
++ */
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t sys_define_2; /* System defined. */
++ target_id_t target;
++ uint16_t lun;
++ uint8_t target_id;
++ uint8_t reserved_1;
++ uint16_t status_modifier;
++ uint16_t status;
++ uint16_t task_flags;
++ uint16_t seq_id;
++ uint16_t srr_rx_id;
++ uint32_t srr_rel_offs;
++ uint16_t srr_ui;
++#define SRR_IU_DATA_IN 0x1
++#define SRR_IU_DATA_OUT 0x5
++#define SRR_IU_STATUS 0x7
++ uint16_t srr_ox_id;
++ uint8_t reserved_2[30];
++ uint16_t ox_id;
++} __attribute__((packed)) notify_entry_t;
++#endif
++
++#ifndef NOTIFY_ACK_TYPE
++#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
++/*
++ * ISP queue - notify acknowledge entry structure definition.
++ */
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t sys_define_2; /* System defined. */
++ target_id_t target;
++ uint8_t target_id;
++ uint8_t reserved_1;
++ uint16_t flags;
++ uint16_t resp_code;
++ uint16_t status;
++ uint16_t task_flags;
++ uint16_t seq_id;
++ uint16_t srr_rx_id;
++ uint32_t srr_rel_offs;
++ uint16_t srr_ui;
++ uint16_t srr_flags;
++ uint16_t srr_reject_code;
++ uint8_t srr_reject_vendor_uniq;
++ uint8_t srr_reject_code_expl;
++ uint8_t reserved_2[26];
++ uint16_t ox_id;
++} __attribute__((packed)) nack_entry_t;
++#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
++#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
++
++#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
++
++#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
++#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
++
++#define NOTIFY_ACK_SUCCESS 0x01
++#endif
++
++#ifndef ACCEPT_TGT_IO_TYPE
++#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
++/*
++ * ISP queue - Accept Target I/O (ATIO) entry structure definition.
++ */
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t sys_define_2; /* System defined. */
++ target_id_t target;
++ uint16_t rx_id;
++ uint16_t flags;
++ uint16_t status;
++ uint8_t command_ref;
++ uint8_t task_codes;
++ uint8_t task_flags;
++ uint8_t execution_codes;
++ uint8_t cdb[MAX_CMDSZ];
++ uint32_t data_length;
++ uint16_t lun;
++ uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
++ uint16_t reserved_32[6];
++ uint16_t ox_id;
++} __attribute__((packed)) atio_entry_t;
++#endif
++
++#ifndef CONTINUE_TGT_IO_TYPE
++#define CONTINUE_TGT_IO_TYPE 0x17
++/*
++ * ISP queue - Continue Target I/O (CTIO) entry for status mode 0
++ * structure definition.
++ */
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t handle; /* System defined handle */
++ target_id_t target;
++ uint16_t rx_id;
++ uint16_t flags;
++ uint16_t status;
++ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
++ uint16_t dseg_count; /* Data segment count. */
++ uint32_t relative_offset;
++ uint32_t residual;
++ uint16_t reserved_1[3];
++ uint16_t scsi_status;
++ uint32_t transfer_length;
++ uint32_t dseg_0_address[0];
++} __attribute__((packed)) ctio_common_entry_t;
++#define ATIO_PATH_INVALID 0x07
++#define ATIO_CANT_PROV_CAP 0x16
++#define ATIO_CDB_VALID 0x3D
++
++#define ATIO_EXEC_READ BIT_1
++#define ATIO_EXEC_WRITE BIT_0
++#endif
++
++#ifndef CTIO_A64_TYPE
++#define CTIO_A64_TYPE 0x1F
++typedef struct {
++ ctio_common_entry_t common;
++ uint32_t dseg_0_address; /* Data segment 0 address. */
++ uint32_t dseg_0_length; /* Data segment 0 length. */
++ uint32_t dseg_1_address; /* Data segment 1 address. */
++ uint32_t dseg_1_length; /* Data segment 1 length. */
++ uint32_t dseg_2_address; /* Data segment 2 address. */
++ uint32_t dseg_2_length; /* Data segment 2 length. */
++} __attribute__((packed)) ctio_entry_t;
++#define CTIO_SUCCESS 0x01
++#define CTIO_ABORTED 0x02
++#define CTIO_INVALID_RX_ID 0x08
++#define CTIO_TIMEOUT 0x0B
++#define CTIO_LIP_RESET 0x0E
++#define CTIO_TARGET_RESET 0x17
++#define CTIO_PORT_UNAVAILABLE 0x28
++#define CTIO_PORT_LOGGED_OUT 0x29
++#define CTIO_PORT_CONF_CHANGED 0x2A
++#define CTIO_SRR_RECEIVED 0x45
++
++#endif
++
++#ifndef CTIO_RET_TYPE
++#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
++/*
++ * ISP queue - CTIO returned entry structure definition.
++ */
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t handle; /* System defined handle. */
++ target_id_t target;
++ uint16_t rx_id;
++ uint16_t flags;
++ uint16_t status;
++ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
++ uint16_t dseg_count; /* Data segment count. */
++ uint32_t relative_offset;
++ uint32_t residual;
++ uint16_t reserved_1[2];
++ uint16_t sense_length;
++ uint16_t scsi_status;
++ uint16_t response_length;
++ uint8_t sense_data[26];
++} __attribute__((packed)) ctio_ret_entry_t;
++#endif
++
++#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
++
++typedef struct {
++ uint8_t r_ctl;
++ uint8_t d_id[3];
++ uint8_t cs_ctl;
++ uint8_t s_id[3];
++ uint8_t type;
++ uint8_t f_ctl[3];
++ uint8_t seq_id;
++ uint8_t df_ctl;
++ uint16_t seq_cnt;
++ uint16_t ox_id;
++ uint16_t rx_id;
++ uint32_t parameter;
++} __attribute__((packed)) fcp_hdr_t;
++
++typedef struct {
++ uint8_t d_id[3];
++ uint8_t r_ctl;
++ uint8_t s_id[3];
++ uint8_t cs_ctl;
++ uint8_t f_ctl[3];
++ uint8_t type;
++ uint16_t seq_cnt;
++ uint8_t df_ctl;
++ uint8_t seq_id;
++ uint16_t rx_id;
++ uint16_t ox_id;
++ uint32_t parameter;
++} __attribute__((packed)) fcp_hdr_le_t;
++
++#define F_CTL_EXCH_CONTEXT_RESP BIT_23
++#define F_CTL_SEQ_CONTEXT_RESIP BIT_22
++#define F_CTL_LAST_SEQ BIT_20
++#define F_CTL_END_SEQ BIT_19
++#define F_CTL_SEQ_INITIATIVE BIT_16
++
++#define R_CTL_BASIC_LINK_SERV 0x80
++#define R_CTL_B_ACC 0x4
++#define R_CTL_B_RJT 0x5
++
++typedef struct {
++ uint64_t lun;
++ uint8_t cmnd_ref;
++ uint8_t task_attr:3;
++ uint8_t reserved:5;
++ uint8_t task_mgmt_flags;
++#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6
++#define FCP_CMND_TASK_MGMT_TARGET_RESET 5
++#define FCP_CMND_TASK_MGMT_LU_RESET 4
++#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2
++#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1
++ uint8_t wrdata:1;
++ uint8_t rddata:1;
++ uint8_t add_cdb_len:6;
++ uint8_t cdb[16];
++ /* Valid only if add_cdb_len=0, otherwise this is additional CDB data */
++ uint32_t data_length;
++} __attribute__((packed)) fcp_cmnd_t;
++
++/*
++ * ISP queue - Accept Target I/O (ATIO) type 7 entry for 24xx structure
++ * definition.
++ */
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t fcp_cmnd_len_low;
++ uint8_t fcp_cmnd_len_high:4;
++ uint8_t attr:4;
++ uint32_t exchange_addr;
++#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
++ fcp_hdr_t fcp_hdr;
++ fcp_cmnd_t fcp_cmnd;
++} __attribute__((packed)) atio7_entry_t;
++
++#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
++
++/*
++ * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure
++ * definition.
++ */
++
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t handle; /* System defined handle */
++ uint16_t nport_handle;
++#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
++ uint16_t timeout;
++ uint16_t dseg_count; /* Data segment count. */
++ uint8_t vp_index;
++ uint8_t add_flags;
++ uint8_t initiator_id[3];
++ uint8_t reserved;
++ uint32_t exchange_addr;
++} __attribute__((packed)) ctio7_common_entry_t;
++
++typedef struct {
++ ctio7_common_entry_t common;
++ uint16_t reserved1;
++ uint16_t flags;
++ uint32_t residual;
++ uint16_t ox_id;
++ uint16_t scsi_status;
++ uint32_t relative_offset;
++ uint32_t reserved2;
++ uint32_t transfer_length;
++ uint32_t reserved3;
++ uint32_t dseg_0_address[2]; /* Data segment 0 address. */
++ uint32_t dseg_0_length; /* Data segment 0 length. */
++} __attribute__((packed)) ctio7_status0_entry_t;
++
++typedef struct {
++ ctio7_common_entry_t common;
++ uint16_t sense_length;
++ uint16_t flags;
++ uint32_t residual;
++ uint16_t ox_id;
++ uint16_t scsi_status;
++ uint16_t response_len;
++ uint16_t reserved;
++ uint8_t sense_data[24];
++} __attribute__((packed)) ctio7_status1_entry_t;
++
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t handle; /* System defined handle */
++ uint16_t status;
++ uint16_t timeout;
++ uint16_t dseg_count; /* Data segment count. */
++ uint8_t reserved1[6];
++ uint32_t exchange_address;
++ uint16_t reserved2;
++ uint16_t flags;
++ uint32_t residual;
++ uint16_t ox_id;
++ uint16_t reserved3;
++ uint32_t relative_offset;
++ uint8_t reserved4[24];
++} __attribute__((packed)) ctio7_fw_entry_t;
++
++/* CTIO7 flags values */
++#define CTIO7_FLAGS_SEND_STATUS BIT_15
++#define CTIO7_FLAGS_TERMINATE BIT_14
++#define CTIO7_FLAGS_CONFORM_REQ BIT_13
++#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
++#define CTIO7_FLAGS_STATUS_MODE_0 0
++#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
++#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
++#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
++#define CTIO7_FLAGS_DSD_PTR BIT_2
++#define CTIO7_FLAGS_DATA_IN BIT_1
++#define CTIO7_FLAGS_DATA_OUT BIT_0
++
++/*
++ * ISP queue - immediate notify entry structure definition for 24xx.
++ */
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t reserved;
++ uint16_t nport_handle;
++ uint16_t reserved_2;
++ uint16_t flags;
++#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
++#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
++ uint16_t srr_rx_id;
++ uint16_t status;
++ uint8_t status_subcode;
++ uint8_t reserved_3;
++ uint32_t exchange_address;
++ uint32_t srr_rel_offs;
++ uint16_t srr_ui;
++ uint16_t srr_ox_id;
++ uint8_t reserved_4[19];
++ uint8_t vp_index;
++ uint32_t reserved_5;
++ uint8_t port_id[3];
++ uint8_t reserved_6;
++ uint16_t reserved_7;
++ uint16_t ox_id;
++} __attribute__((packed)) notify24xx_entry_t;
++
++#define ELS_PLOGI 0x3
++#define ELS_FLOGI 0x4
++#define ELS_LOGO 0x5
++#define ELS_PRLI 0x20
++#define ELS_PRLO 0x21
++#define ELS_TPRLO 0x24
++#define ELS_PDISC 0x50
++#define ELS_ADISC 0x52
++
++/*
++ * ISP queue - notify acknowledge entry structure definition for 24xx.
++ */
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t handle;
++ uint16_t nport_handle;
++ uint16_t reserved_1;
++ uint16_t flags;
++ uint16_t srr_rx_id;
++ uint16_t status;
++ uint8_t status_subcode;
++ uint8_t reserved_3;
++ uint32_t exchange_address;
++ uint32_t srr_rel_offs;
++ uint16_t srr_ui;
++ uint16_t srr_flags;
++ uint8_t reserved_4[19];
++ uint8_t vp_index;
++ uint8_t srr_reject_vendor_uniq;
++ uint8_t srr_reject_code_expl;
++ uint8_t srr_reject_code;
++ uint8_t reserved_5[7];
++ uint16_t ox_id;
++} __attribute__((packed)) nack24xx_entry_t;
++
++/*
++ * ISP queue - ABTS received/response entries structure definition for 24xx.
++ */
++#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
++#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */
++
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint8_t reserved_1[6];
++ uint16_t nport_handle;
++ uint8_t reserved_2[3];
++ uint8_t reserved_3:4;
++ uint8_t sof_type:4;
++ uint32_t exchange_address;
++ fcp_hdr_le_t fcp_hdr_le;
++ uint8_t reserved_4[16];
++ uint32_t exchange_addr_to_abort;
++} __attribute__((packed)) abts24_recv_entry_t;
++
++#define ABTS_PARAM_ABORT_SEQ BIT_0
++
++typedef struct {
++ uint16_t reserved;
++ uint8_t seq_id_last;
++ uint8_t seq_id_valid;
++#define SEQ_ID_VALID 0x80
++#define SEQ_ID_INVALID 0x00
++ uint16_t rx_id;
++ uint16_t ox_id;
++ uint16_t high_seq_cnt;
++ uint16_t low_seq_cnt;
++} __attribute__((packed)) ba_acc_le_t;
++
++typedef struct {
++ uint8_t vendor_uniq;
++ uint8_t reason_expl;
++ uint8_t reason_code;
++#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1
++#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9
++ uint8_t reserved;
++} __attribute__((packed)) ba_rjt_le_t;
++
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t handle;
++ uint16_t reserved_1;
++ uint16_t nport_handle;
++ uint16_t control_flags;
++#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
++ uint8_t reserved_2;
++ uint8_t reserved_3:4;
++ uint8_t sof_type:4;
++ uint32_t exchange_address;
++ fcp_hdr_le_t fcp_hdr_le;
++ union {
++ ba_acc_le_t ba_acct;
++ ba_rjt_le_t ba_rjt;
++ } __attribute__((packed)) payload;
++ uint32_t reserved_4;
++ uint32_t exchange_addr_to_abort;
++} __attribute__((packed)) abts24_resp_entry_t;
++
++typedef struct {
++ uint8_t entry_type; /* Entry type. */
++ uint8_t entry_count; /* Entry count. */
++ uint8_t sys_define; /* System defined. */
++ uint8_t entry_status; /* Entry Status. */
++ uint32_t handle;
++ uint16_t compl_status;
++#define ABTS_RESP_COMPL_SUCCESS 0
++#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
++ uint16_t nport_handle;
++ uint16_t reserved_1;
++ uint8_t reserved_2;
++ uint8_t reserved_3:4;
++ uint8_t sof_type:4;
++ uint32_t exchange_address;
++ fcp_hdr_le_t fcp_hdr_le;
++ uint8_t reserved_4[8];
++ uint32_t error_subcode1;
++#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
++ uint32_t error_subcode2;
++ uint32_t exchange_addr_to_abort;
++} __attribute__((packed)) abts24_resp_fw_entry_t;
++
++/********************************************************************\
++ * Type Definitions used by initiator & target halves
++\********************************************************************/
++
++typedef enum {
++ ADD_TARGET = 0,
++ REMOVE_TARGET,
++ DISABLE_TARGET_MODE,
++ ENABLE_TARGET_MODE,
++} qla2x_tgt_host_action_t;
++
++/* Changing it don't forget to change QLA2X_TARGET_MAGIC! */
++struct qla_tgt_data {
++ int magic;
++
++ /* Callbacks */
++ void (*tgt24_atio_pkt)(scsi_qla_host_t *ha, atio7_entry_t *pkt);
++ void (*tgt_response_pkt)(scsi_qla_host_t *ha, response_t *pkt);
++ void (*tgt2x_ctio_completion)(scsi_qla_host_t *ha, uint32_t handle);
++ void (*tgt_async_event)(uint16_t code, scsi_qla_host_t *ha,
++ uint16_t *mailbox);
++ int (*tgt_host_action)(scsi_qla_host_t *ha, qla2x_tgt_host_action_t
++ action);
++ void (*tgt_fc_port_added)(scsi_qla_host_t *ha, fc_port_t *fcport);
++ void (*tgt_fc_port_deleted)(scsi_qla_host_t *ha, fc_port_t *fcport);
++};
++
++int qla2xxx_tgt_register_driver(struct qla_tgt_data *tgt);
++
++void qla2xxx_tgt_unregister_driver(void);
++
++int qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha);
++int qla2x00_wait_for_hba_online(scsi_qla_host_t *ha);
++
++#endif /* __QLA2X_TGT_DEF_H */
+diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/Makefile linux-2.6.36/drivers/scst/qla2xxx-target/Makefile
+--- orig/linux-2.6.36/drivers/scst/qla2xxx-target/Makefile
++++ linux-2.6.36/drivers/scst/qla2xxx-target/Makefile
+@@ -0,0 +1,5 @@
++ccflags-y += -Idrivers/scsi/qla2xxx
++
++qla2x00tgt-y := qla2x00t.o
++
++obj-$(CONFIG_SCST_QLA_TGT_ADDON) += qla2x00tgt.o
+diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/Kconfig linux-2.6.36/drivers/scst/qla2xxx-target/Kconfig
+--- orig/linux-2.6.36/drivers/scst/qla2xxx-target/Kconfig
++++ linux-2.6.36/drivers/scst/qla2xxx-target/Kconfig
+@@ -0,0 +1,30 @@
++config SCST_QLA_TGT_ADDON
++ tristate "QLogic 2XXX Target Mode Add-On"
++ depends on SCST && SCSI_QLA_FC && SCSI_QLA2XXX_TARGET
++ default SCST
++ help
++ Target mode add-on driver for QLogic 2xxx Fibre Channel host adapters.
++ Visit http://scst.sourceforge.net for more info about this driver.
++
++config QLA_TGT_DEBUG_WORK_IN_THREAD
++ bool "Use threads context only"
++ depends on SCST_QLA_TGT_ADDON
++ help
++ Makes SCST process incoming commands from the qla2x00t target
++ driver and call the driver's callbacks in internal SCST
++ threads context instead of SIRQ context, where thise commands
++ were received. Useful for debugging and lead to some
++ performance loss.
++
++ If unsure, say "N".
++
++config QLA_TGT_DEBUG_SRR
++ bool "SRR debugging"
++ depends on SCST_QLA_TGT_ADDON
++ help
++ Turns on retransmitting packets (SRR)
++ debugging. In this mode some CTIOs will be "broken" to force the
++ initiator to issue a retransmit request. Useful for debugging and lead to big
++ performance loss.
++
++ If unsure, say "N".
+diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c
+--- orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c
++++ linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c
+@@ -0,0 +1,5486 @@
++/*
++ * qla2x00t.c
++ *
++ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
++ * Copyright (C) 2006 - 2010 ID7 Ltd.
++ *
++ * QLogic 22xx/23xx/24xx/25xx FC target driver.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/version.h>
++#include <linux/blkdev.h>
++#include <linux/interrupt.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <linux/pci.h>
++#include <linux/delay.h>
++#include <linux/list.h>
++
++#include <scst/scst.h>
++
++#include "qla2x00t.h"
++
++/*
++ * This driver calls qla2x00_req_pkt() and qla2x00_issue_marker(), which
++ * must be called under HW lock and could unlock/lock it inside.
++ * It isn't an issue, since in the current implementation on the time when
++ * those functions are called:
++ *
++ * - Either context is IRQ and only IRQ handler can modify HW data,
++ * including rings related fields,
++ *
++ * - Or access to target mode variables from struct q2t_tgt doesn't
++ * cross those functions boundaries, except tgt_stop, which
++ * additionally protected by irq_cmd_count.
++ */
++
++#ifndef CONFIG_SCSI_QLA2XXX_TARGET
++#error "CONFIG_SCSI_QLA2XXX_TARGET is NOT DEFINED"
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++#define Q2T_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_LINE | TRACE_PID | \
++ TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_DEBUG | \
++ TRACE_MINOR | TRACE_SPECIAL)
++#else
++# ifdef CONFIG_SCST_TRACING
++#define Q2T_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
++ TRACE_SPECIAL)
++# endif
++#endif
++
++static int q2t_target_detect(struct scst_tgt_template *templ);
++static int q2t_target_release(struct scst_tgt *scst_tgt);
++static int q2x_xmit_response(struct scst_cmd *scst_cmd);
++static int __q24_xmit_response(struct q2t_cmd *cmd, int xmit_type);
++static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd);
++static void q2t_on_free_cmd(struct scst_cmd *scst_cmd);
++static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *mcmd);
++static int q2t_get_initiator_port_transport_id(struct scst_session *scst_sess,
++ uint8_t **transport_id);
++
++/* Predefs for callbacks handed to qla2xxx(target) */
++static void q24_atio_pkt(scsi_qla_host_t *ha, atio7_entry_t *pkt);
++static void q2t_response_pkt(scsi_qla_host_t *ha, response_t *pkt);
++static void q2t_async_event(uint16_t code, scsi_qla_host_t *ha,
++ uint16_t *mailbox);
++static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle);
++static int q2t_host_action(scsi_qla_host_t *ha,
++ qla2x_tgt_host_action_t action);
++static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport);
++static void q2t_fc_port_deleted(scsi_qla_host_t *ha, fc_port_t *fcport);
++static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
++ int lun_size, int fn, void *iocb, int flags);
++static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
++ atio_entry_t *atio, int ha_locked);
++static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
++ atio7_entry_t *atio, int ha_locked);
++static void q2t_reject_free_srr_imm(scsi_qla_host_t *ha, struct srr_imm *imm,
++ int ha_lock);
++static int q2t_cut_cmd_data_head(struct q2t_cmd *cmd, unsigned int offset);
++static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only);
++static void q2t_on_hw_pending_cmd_timeout(struct scst_cmd *scst_cmd);
++static int q2t_unreg_sess(struct q2t_sess *sess);
++static uint16_t q2t_get_scsi_transport_version(struct scst_tgt *scst_tgt);
++static uint16_t q2t_get_phys_transport_version(struct scst_tgt *scst_tgt);
++
++/** SYSFS **/
++
++static ssize_t q2t_version_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++
++struct kobj_attribute q2t_version_attr =
++ __ATTR(version, S_IRUGO, q2t_version_show, NULL);
++
++static const struct attribute *q2t_attrs[] = {
++ &q2t_version_attr.attr,
++ NULL,
++};
++
++static ssize_t q2t_show_expl_conf_enabled(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buffer);
++static ssize_t q2t_store_expl_conf_enabled(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buffer, size_t size);
++
++struct kobj_attribute q2t_expl_conf_attr =
++ __ATTR(explicit_confirmation, S_IRUGO|S_IWUSR,
++ q2t_show_expl_conf_enabled, q2t_store_expl_conf_enabled);
++
++static ssize_t q2t_abort_isp_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buffer, size_t size);
++
++struct kobj_attribute q2t_abort_isp_attr =
++ __ATTR(abort_isp, S_IWUSR, NULL, q2t_abort_isp_store);
++
++static const struct attribute *q2t_tgt_attrs[] = {
++ &q2t_expl_conf_attr.attr,
++ &q2t_abort_isp_attr.attr,
++ NULL,
++};
++
++static int q2t_enable_tgt(struct scst_tgt *tgt, bool enable);
++static bool q2t_is_tgt_enabled(struct scst_tgt *tgt);
++
++/*
++ * Global Variables
++ */
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++#define trace_flag q2t_trace_flag
++static unsigned long q2t_trace_flag = Q2T_DEFAULT_LOG_FLAGS;
++#endif
++
++static struct scst_tgt_template tgt2x_template = {
++ .name = "qla2x00t",
++ .sg_tablesize = 0,
++ .use_clustering = 1,
++#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
++ .xmit_response_atomic = 0,
++ .rdy_to_xfer_atomic = 0,
++#else
++ .xmit_response_atomic = 1,
++ .rdy_to_xfer_atomic = 1,
++#endif
++ .max_hw_pending_time = Q2T_MAX_HW_PENDING_TIME,
++ .detect = q2t_target_detect,
++ .release = q2t_target_release,
++ .xmit_response = q2x_xmit_response,
++ .rdy_to_xfer = q2t_rdy_to_xfer,
++ .on_free_cmd = q2t_on_free_cmd,
++ .task_mgmt_fn_done = q2t_task_mgmt_fn_done,
++ .get_initiator_port_transport_id = q2t_get_initiator_port_transport_id,
++ .get_scsi_transport_version = q2t_get_scsi_transport_version,
++ .get_phys_transport_version = q2t_get_phys_transport_version,
++ .on_hw_pending_cmd_timeout = q2t_on_hw_pending_cmd_timeout,
++ .enable_target = q2t_enable_tgt,
++ .is_target_enabled = q2t_is_tgt_enabled,
++ .tgtt_attrs = q2t_attrs,
++ .tgt_attrs = q2t_tgt_attrs,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = Q2T_DEFAULT_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++static struct kmem_cache *q2t_cmd_cachep;
++static struct kmem_cache *q2t_mgmt_cmd_cachep;
++static mempool_t *q2t_mgmt_cmd_mempool;
++
++static DECLARE_RWSEM(q2t_unreg_rwsem);
++
++/* It's not yet supported */
++static inline int scst_cmd_get_ppl_offset(struct scst_cmd *scst_cmd)
++{
++ return 0;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static inline void q2t_sess_get(struct q2t_sess *sess)
++{
++ sess->sess_ref++;
++ TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref);
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static inline void q2t_sess_put(struct q2t_sess *sess)
++{
++ TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref-1);
++ BUG_ON(sess->sess_ref == 0);
++
++ sess->sess_ref--;
++ if (sess->sess_ref == 0)
++ q2t_unreg_sess(sess);
++}
++
++/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
++static inline struct q2t_sess *q2t_find_sess_by_loop_id(struct q2t_tgt *tgt,
++ uint16_t lid)
++{
++ struct q2t_sess *sess;
++ BUG_ON(tgt == NULL);
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ if (lid == (sess->loop_id))
++ return sess;
++ }
++ return NULL;
++}
++
++/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
++static inline struct q2t_sess *q2t_find_sess_by_s_id(struct q2t_tgt *tgt,
++ const uint8_t *s_id)
++{
++ struct q2t_sess *sess;
++ BUG_ON(tgt == NULL);
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ if ((sess->s_id.b.al_pa == s_id[2]) &&
++ (sess->s_id.b.area == s_id[1]) &&
++ (sess->s_id.b.domain == s_id[0]))
++ return sess;
++ }
++ return NULL;
++}
++
++/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
++static inline struct q2t_sess *q2t_find_sess_by_s_id_le(struct q2t_tgt *tgt,
++ const uint8_t *s_id)
++{
++ struct q2t_sess *sess;
++ BUG_ON(tgt == NULL);
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ if ((sess->s_id.b.al_pa == s_id[0]) &&
++ (sess->s_id.b.area == s_id[1]) &&
++ (sess->s_id.b.domain == s_id[2]))
++ return sess;
++ }
++ return NULL;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static inline void q2t_exec_queue(scsi_qla_host_t *ha)
++{
++ qla2x00_isp_cmd(ha);
++}
++
++/* Might release hw lock, then reaquire!! */
++static inline int q2t_issue_marker(scsi_qla_host_t *ha, int ha_locked)
++{
++ /* Send marker if required */
++ if (unlikely(ha->marker_needed != 0)) {
++ int rc = qla2x00_issue_marker(ha, ha_locked);
++ if (rc != QLA_SUCCESS) {
++ PRINT_ERROR("qla2x00t(%ld): issue_marker() "
++ "failed", ha->instance);
++ }
++ return rc;
++ }
++ return QLA_SUCCESS;
++}
++
++/*
++ * Registers with initiator driver (but target mode isn't enabled till
++ * it's turned on via sysfs)
++ */
++static int q2t_target_detect(struct scst_tgt_template *tgtt)
++{
++ int res, rc;
++ struct qla_tgt_data t = {
++ .magic = QLA2X_TARGET_MAGIC,
++ .tgt24_atio_pkt = q24_atio_pkt,
++ .tgt_response_pkt = q2t_response_pkt,
++ .tgt2x_ctio_completion = q2x_ctio_completion,
++ .tgt_async_event = q2t_async_event,
++ .tgt_host_action = q2t_host_action,
++ .tgt_fc_port_added = q2t_fc_port_added,
++ .tgt_fc_port_deleted = q2t_fc_port_deleted,
++ };
++
++ TRACE_ENTRY();
++
++ rc = qla2xxx_tgt_register_driver(&t);
++ if (rc < 0) {
++ res = rc;
++ PRINT_ERROR("qla2x00t: Unable to register driver: %d", res);
++ goto out;
++ }
++
++ if (rc != QLA2X_INITIATOR_MAGIC) {
++ PRINT_ERROR("qla2x00t: Wrong version of the initiator part: "
++ "%d", rc);
++ res = -EINVAL;
++ goto out;
++ }
++
++ qla2xxx_add_targets();
++
++ res = 0;
++
++ PRINT_INFO("qla2x00t: %s", "Target mode driver for QLogic 2x00 controller "
++ "registered successfully");
++
++out:
++ TRACE_EXIT();
++ return res;
++}
++
++static void q2t_free_session_done(struct scst_session *scst_sess)
++{
++ struct q2t_sess *sess;
++ struct q2t_tgt *tgt;
++ scsi_qla_host_t *ha;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ BUG_ON(scst_sess == NULL);
++ sess = (struct q2t_sess *)scst_sess_get_tgt_priv(scst_sess);
++ BUG_ON(sess == NULL);
++ tgt = sess->tgt;
++
++ TRACE_MGMT_DBG("Unregistration of sess %p finished", sess);
++
++ kfree(sess);
++
++ if (tgt == NULL)
++ goto out;
++
++ TRACE_DBG("empty(sess_list) %d sess_count %d",
++ list_empty(&tgt->sess_list), tgt->sess_count);
++
++ ha = tgt->ha;
++
++ /*
++ * We need to protect against race, when tgt is freed before or
++ * inside wake_up()
++ */
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++ tgt->sess_count--;
++ if (tgt->sess_count == 0)
++ wake_up_all(&tgt->waitQ);
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static int q2t_unreg_sess(struct q2t_sess *sess)
++{
++ int res = 1;
++
++ TRACE_ENTRY();
++
++ BUG_ON(sess == NULL);
++ BUG_ON(sess->sess_ref != 0);
++
++ TRACE_MGMT_DBG("Deleting sess %p from tgt %p", sess, sess->tgt);
++ list_del(&sess->sess_list_entry);
++
++ if (sess->deleted)
++ list_del(&sess->del_list_entry);
++
++ PRINT_INFO("qla2x00t(%ld): %ssession for loop_id %d deleted",
++ sess->tgt->ha->instance, sess->local ? "local " : "",
++ sess->loop_id);
++
++ scst_unregister_session(sess->scst_sess, 0, q2t_free_session_done);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static int q2t_reset(scsi_qla_host_t *ha, void *iocb, int mcmd)
++{
++ struct q2t_sess *sess;
++ int loop_id;
++ uint16_t lun = 0;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ notify24xx_entry_t *n = (notify24xx_entry_t *)iocb;
++ loop_id = le16_to_cpu(n->nport_handle);
++ } else
++ loop_id = GET_TARGET_ID(ha, (notify_entry_t *)iocb);
++
++ if (loop_id == 0xFFFF) {
++ /* Global event */
++ q2t_clear_tgt_db(ha->tgt, 1);
++ if (!list_empty(&ha->tgt->sess_list)) {
++ sess = list_entry(ha->tgt->sess_list.next,
++ typeof(*sess), sess_list_entry);
++ switch (mcmd) {
++ case Q2T_NEXUS_LOSS_SESS:
++ mcmd = Q2T_NEXUS_LOSS;
++ break;
++
++ case Q2T_ABORT_ALL_SESS:
++ mcmd = Q2T_ABORT_ALL;
++ break;
++
++ case Q2T_NEXUS_LOSS:
++ case Q2T_ABORT_ALL:
++ break;
++
++ default:
++ PRINT_ERROR("qla2x00t(%ld): Not allowed "
++ "command %x in %s", ha->instance,
++ mcmd, __func__);
++ sess = NULL;
++ break;
++ }
++ } else
++ sess = NULL;
++ } else
++ sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
++
++ if (sess == NULL) {
++ res = -ESRCH;
++ ha->tgt->tm_to_unknown = 1;
++ goto out;
++ }
++
++ TRACE_MGMT_DBG("scsi(%ld): resetting (session %p from port "
++ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
++ "mcmd %x, loop_id %d)", ha->host_no, sess,
++ sess->port_name[0], sess->port_name[1],
++ sess->port_name[2], sess->port_name[3],
++ sess->port_name[4], sess->port_name[5],
++ sess->port_name[6], sess->port_name[7],
++ mcmd, loop_id);
++
++ res = q2t_issue_task_mgmt(sess, (uint8_t *)&lun, sizeof(lun),
++ mcmd, iocb, Q24_MGMT_SEND_NACK);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only)
++{
++ struct q2t_sess *sess, *sess_tmp;
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_MGMT, "qla2x00t: Clearing targets DB %p", tgt);
++
++ list_for_each_entry_safe(sess, sess_tmp, &tgt->sess_list,
++ sess_list_entry) {
++ if (local_only && !sess->local)
++ continue;
++ if (local_only && sess->local)
++ TRACE_MGMT_DBG("Putting local session %p from port "
++ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
++ sess, sess->port_name[0], sess->port_name[1],
++ sess->port_name[2], sess->port_name[3],
++ sess->port_name[4], sess->port_name[5],
++ sess->port_name[6], sess->port_name[7]);
++ q2t_sess_put(sess);
++ }
++
++ /* At this point tgt could be already dead */
++
++ TRACE_MGMT_DBG("Finished clearing tgt %p DB", tgt);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called in a thread context */
++static void q2t_alloc_session_done(struct scst_session *scst_sess,
++ void *data, int result)
++{
++ TRACE_ENTRY();
++
++ if (result != 0) {
++ struct q2t_sess *sess = (struct q2t_sess *)data;
++ struct q2t_tgt *tgt = sess->tgt;
++ scsi_qla_host_t *ha = tgt->ha;
++ unsigned long flags;
++
++ PRINT_INFO("qla2x00t(%ld): Session initialization failed",
++ ha->instance);
++
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++ q2t_sess_put(sess);
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static void q2t_del_sess_timer_fn(unsigned long arg)
++{
++ struct q2t_tgt *tgt = (struct q2t_tgt *)arg;
++ scsi_qla_host_t *ha = tgt->ha;
++ struct q2t_sess *sess;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++ while (!list_empty(&tgt->del_sess_list)) {
++ sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
++ del_list_entry);
++ if (time_after_eq(jiffies, sess->expires)) {
++ /*
++ * sess will be deleted from del_sess_list in
++ * q2t_unreg_sess()
++ */
++ TRACE_MGMT_DBG("Timeout: sess %p about to be deleted",
++ sess);
++ q2t_sess_put(sess);
++ } else {
++ tgt->sess_del_timer.expires = sess->expires;
++ add_timer(&tgt->sess_del_timer);
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* pha->hardware_lock supposed to be held on entry */
++static void q2t_undelete_sess(struct q2t_sess *sess)
++{
++ list_del(&sess->del_list_entry);
++ sess->deleted = 0;
++}
++
++/*
++ * Must be called under tgt_mutex.
++ *
++ * Adds an extra ref to allow to drop hw lock after adding sess to the list.
++ * Caller must put it.
++ */
++static struct q2t_sess *q2t_create_sess(scsi_qla_host_t *ha, fc_port_t *fcport,
++ bool local)
++{
++ char *wwn_str;
++ const int wwn_str_len = 3*WWN_SIZE+2;
++ struct q2t_tgt *tgt = ha->tgt;
++ struct q2t_sess *sess;
++
++ TRACE_ENTRY();
++
++ /* Check to avoid double sessions */
++ spin_lock_irq(&ha->hardware_lock);
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ if ((sess->port_name[0] == fcport->port_name[0]) &&
++ (sess->port_name[1] == fcport->port_name[1]) &&
++ (sess->port_name[2] == fcport->port_name[2]) &&
++ (sess->port_name[3] == fcport->port_name[3]) &&
++ (sess->port_name[4] == fcport->port_name[4]) &&
++ (sess->port_name[5] == fcport->port_name[5]) &&
++ (sess->port_name[6] == fcport->port_name[6]) &&
++ (sess->port_name[7] == fcport->port_name[7])) {
++ TRACE_MGMT_DBG("Double sess %p found (s_id %x:%x:%x, "
++ "loop_id %d), updating to d_id %x:%x:%x, "
++ "loop_id %d", sess, sess->s_id.b.domain,
++ sess->s_id.b.al_pa, sess->s_id.b.area,
++ sess->loop_id, fcport->d_id.b.domain,
++ fcport->d_id.b.al_pa, fcport->d_id.b.area,
++ fcport->loop_id);
++
++ if (sess->deleted)
++ q2t_undelete_sess(sess);
++
++ q2t_sess_get(sess);
++ sess->s_id = fcport->d_id;
++ sess->loop_id = fcport->loop_id;
++ sess->conf_compl_supported = fcport->conf_compl_supported;
++ if (sess->local && !local)
++ sess->local = 0;
++ spin_unlock_irq(&ha->hardware_lock);
++ goto out;
++ }
++ }
++ spin_unlock_irq(&ha->hardware_lock);
++
++ /* We are under tgt_mutex, so a new sess can't be added behind us */
++
++ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
++ if (sess == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): session allocation failed, "
++ "all commands from port %02x:%02x:%02x:%02x:"
++ "%02x:%02x:%02x:%02x will be refused", ha->instance,
++ fcport->port_name[0], fcport->port_name[1],
++ fcport->port_name[2], fcport->port_name[3],
++ fcport->port_name[4], fcport->port_name[5],
++ fcport->port_name[6], fcport->port_name[7]);
++ goto out;
++ }
++
++ sess->sess_ref = 2; /* plus 1 extra ref, see above */
++ sess->tgt = ha->tgt;
++ sess->s_id = fcport->d_id;
++ sess->loop_id = fcport->loop_id;
++ sess->conf_compl_supported = fcport->conf_compl_supported;
++ sess->local = local;
++ BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
++ memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
++
++ wwn_str = kmalloc(wwn_str_len, GFP_KERNEL);
++ if (wwn_str == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): Allocation of wwn_str failed. "
++ "All commands from port %02x:%02x:%02x:%02x:%02x:%02x:"
++ "%02x:%02x will be refused", ha->instance,
++ fcport->port_name[0], fcport->port_name[1],
++ fcport->port_name[2], fcport->port_name[3],
++ fcport->port_name[4], fcport->port_name[5],
++ fcport->port_name[6], fcport->port_name[7]);
++ goto out_free_sess;
++ }
++
++ sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
++ fcport->port_name[0], fcport->port_name[1],
++ fcport->port_name[2], fcport->port_name[3],
++ fcport->port_name[4], fcport->port_name[5],
++ fcport->port_name[6], fcport->port_name[7]);
++
++ /* Let's do the session creation async'ly */
++ sess->scst_sess = scst_register_session(tgt->scst_tgt, 1, wwn_str,
++ sess, sess, q2t_alloc_session_done);
++ if (sess->scst_sess == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): scst_register_session() "
++ "failed for host %ld (wwn %s, loop_id %d), all "
++ "commands from it will be refused", ha->instance,
++ ha->host_no, wwn_str, fcport->loop_id);
++ goto out_free_sess_wwn;
++ }
++
++ spin_lock_irq(&ha->hardware_lock);
++ TRACE_MGMT_DBG("Adding sess %p to tgt %p", sess, tgt);
++ list_add_tail(&sess->sess_list_entry, &tgt->sess_list);
++ tgt->sess_count++;
++ spin_unlock_irq(&ha->hardware_lock);
++
++ PRINT_INFO("qla2x00t(%ld): %ssession for wwn %s (loop_id %d, "
++ "s_id %x:%x:%x, confirmed completion %ssupported) added",
++ ha->instance, local ? "local " : "", wwn_str, fcport->loop_id,
++ sess->s_id.b.domain, sess->s_id.b.al_pa, sess->s_id.b.area,
++ sess->conf_compl_supported ? "" : "not ");
++
++ kfree(wwn_str);
++
++out:
++ TRACE_EXIT_HRES(sess);
++ return sess;
++
++out_free_sess_wwn:
++ kfree(wwn_str);
++ /* go through */
++
++out_free_sess:
++ kfree(sess);
++ sess = NULL;
++ goto out;
++}
++
++/* pha->hardware_lock supposed to be held on entry */
++static void q2t_reappear_sess(struct q2t_sess *sess, const char *reason)
++{
++ q2t_undelete_sess(sess);
++
++ PRINT_INFO("qla2x00t(%ld): %ssession for port %02x:"
++ "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
++ "reappeared%s", sess->tgt->ha->instance,
++ sess->local ? "local " : "", sess->port_name[0],
++ sess->port_name[1], sess->port_name[2], sess->port_name[3],
++ sess->port_name[4], sess->port_name[5], sess->port_name[6],
++ sess->port_name[7], sess->loop_id, reason);
++ TRACE_MGMT_DBG("Appeared sess %p", sess);
++}
++
++static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport)
++{
++ struct q2t_tgt *tgt;
++ struct q2t_sess *sess;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&ha->tgt_mutex);
++
++ tgt = ha->tgt;
++
++ if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
++ goto out_unlock;
++
++ if (tgt->tgt_stop)
++ goto out_unlock;
++
++ spin_lock_irq(&ha->hardware_lock);
++
++ sess = q2t_find_sess_by_loop_id(tgt, fcport->loop_id);
++ if (sess == NULL) {
++ spin_unlock_irq(&ha->hardware_lock);
++ sess = q2t_create_sess(ha, fcport, false);
++ spin_lock_irq(&ha->hardware_lock);
++ if (sess != NULL)
++ q2t_sess_put(sess); /* put the extra creation ref */
++ } else {
++ if (sess->deleted)
++ q2t_reappear_sess(sess, "");
++ }
++
++ if (sess->local) {
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): local session for "
++ "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
++ "(loop ID %d) became global", ha->instance,
++ fcport->port_name[0], fcport->port_name[1],
++ fcport->port_name[2], fcport->port_name[3],
++ fcport->port_name[4], fcport->port_name[5],
++ fcport->port_name[6], fcport->port_name[7],
++ sess->loop_id);
++ sess->local = 0;
++ }
++
++ spin_unlock_irq(&ha->hardware_lock);
++
++out_unlock:
++ mutex_unlock(&ha->tgt_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void q2t_fc_port_deleted(scsi_qla_host_t *ha, fc_port_t *fcport)
++{
++ struct q2t_tgt *tgt;
++ struct q2t_sess *sess;
++ uint32_t dev_loss_tmo;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&ha->tgt_mutex);
++
++ tgt = ha->tgt;
++
++ if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
++ goto out_unlock;
++
++ dev_loss_tmo = ha->port_down_retry_count + 5;
++
++ if (tgt->tgt_stop)
++ goto out_unlock;
++
++ spin_lock_irq(&ha->hardware_lock);
++
++ sess = q2t_find_sess_by_loop_id(tgt, fcport->loop_id);
++ if (sess == NULL)
++ goto out_unlock_ha;
++
++ if (!sess->deleted) {
++ int add_tmr;
++
++ add_tmr = list_empty(&tgt->del_sess_list);
++
++ TRACE_MGMT_DBG("Scheduling sess %p to deletion", sess);
++ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
++ sess->deleted = 1;
++
++ PRINT_INFO("qla2x00t(%ld): %ssession for port %02x:%02x:%02x:"
++ "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
++ "deletion in %d secs", ha->instance,
++ sess->local ? "local " : "",
++ fcport->port_name[0], fcport->port_name[1],
++ fcport->port_name[2], fcport->port_name[3],
++ fcport->port_name[4], fcport->port_name[5],
++ fcport->port_name[6], fcport->port_name[7],
++ sess->loop_id, dev_loss_tmo);
++
++ sess->expires = jiffies + dev_loss_tmo * HZ;
++ if (add_tmr)
++ mod_timer(&tgt->sess_del_timer, sess->expires);
++ }
++
++out_unlock_ha:
++ spin_unlock_irq(&ha->hardware_lock);
++
++out_unlock:
++ mutex_unlock(&ha->tgt_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline int test_tgt_sess_count(struct q2t_tgt *tgt)
++{
++ unsigned long flags;
++ int res;
++
++ /*
++ * We need to protect against race, when tgt is freed before or
++ * inside wake_up()
++ */
++ spin_lock_irqsave(&tgt->ha->hardware_lock, flags);
++ TRACE_DBG("tgt %p, empty(sess_list)=%d sess_count=%d",
++ tgt, list_empty(&tgt->sess_list), tgt->sess_count);
++ res = (tgt->sess_count == 0);
++ spin_unlock_irqrestore(&tgt->ha->hardware_lock, flags);
++
++ return res;
++}
++
++/* Must be called under tgt_host_action_mutex or q2t_unreg_rwsem write locked */
++static void q2t_target_stop(struct scst_tgt *scst_tgt)
++{
++ struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ scsi_qla_host_t *ha = tgt->ha;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Stopping target for host %ld(%p)", ha->host_no, ha);
++
++ /*
++ * Mutex needed to sync with q2t_fc_port_[added,deleted].
++ * Lock is needed, because we still can get an incoming packet.
++ */
++
++ mutex_lock(&ha->tgt_mutex);
++ spin_lock_irq(&ha->hardware_lock);
++ tgt->tgt_stop = 1;
++ q2t_clear_tgt_db(tgt, false);
++ spin_unlock_irq(&ha->hardware_lock);
++ mutex_unlock(&ha->tgt_mutex);
++
++ del_timer_sync(&tgt->sess_del_timer);
++
++ TRACE_MGMT_DBG("Waiting for sess works (tgt %p)", tgt);
++ spin_lock_irq(&tgt->sess_work_lock);
++ while (!list_empty(&tgt->sess_works_list)) {
++ spin_unlock_irq(&tgt->sess_work_lock);
++ flush_scheduled_work();
++ spin_lock_irq(&tgt->sess_work_lock);
++ }
++ spin_unlock_irq(&tgt->sess_work_lock);
++
++ TRACE_MGMT_DBG("Waiting for tgt %p: list_empty(sess_list)=%d "
++ "sess_count=%d", tgt, list_empty(&tgt->sess_list),
++ tgt->sess_count);
++
++ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
++
++ /* Big hammer */
++ if (!ha->host_shutting_down && qla_tgt_mode_enabled(ha))
++ qla2x00_disable_tgt_mode(ha);
++
++ /* Wait for sessions to clear out (just in case) */
++ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
++
++ TRACE_MGMT_DBG("Waiting for %d IRQ commands to complete (tgt %p)",
++ tgt->irq_cmd_count, tgt);
++
++ mutex_lock(&ha->tgt_mutex);
++ spin_lock_irq(&ha->hardware_lock);
++ while (tgt->irq_cmd_count != 0) {
++ spin_unlock_irq(&ha->hardware_lock);
++ udelay(2);
++ spin_lock_irq(&ha->hardware_lock);
++ }
++ ha->tgt = NULL;
++ spin_unlock_irq(&ha->hardware_lock);
++ mutex_unlock(&ha->tgt_mutex);
++
++ TRACE_MGMT_DBG("Stop of tgt %p finished", tgt);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Must be called under tgt_host_action_mutex or q2t_unreg_rwsem write locked */
++static int q2t_target_release(struct scst_tgt *scst_tgt)
++{
++ struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ scsi_qla_host_t *ha = tgt->ha;
++
++ TRACE_ENTRY();
++
++ q2t_target_stop(scst_tgt);
++
++ ha->q2t_tgt = NULL;
++ scst_tgt_set_tgt_priv(scst_tgt, NULL);
++
++ TRACE_MGMT_DBG("Release of tgt %p finished", tgt);
++
++ kfree(tgt);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q2x_modify_command_count(scsi_qla_host_t *ha, int cmd_count,
++ int imm_count)
++{
++ modify_lun_entry_t *pkt;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Sending MODIFY_LUN (ha=%p, cmd=%d, imm=%d)",
++ ha, cmd_count, imm_count);
++
++ /* Sending marker isn't necessary, since we called from ISR */
++
++ pkt = (modify_lun_entry_t *)qla2x00_req_pkt(ha);
++ if (pkt == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
++ "request packet", ha->instance, __func__);
++ goto out;
++ }
++
++ ha->tgt->modify_lun_expected++;
++
++ pkt->entry_type = MODIFY_LUN_TYPE;
++ pkt->entry_count = 1;
++ if (cmd_count < 0) {
++ pkt->operators = MODIFY_LUN_CMD_SUB; /* Subtract from command count */
++ pkt->command_count = -cmd_count;
++ } else if (cmd_count > 0) {
++ pkt->operators = MODIFY_LUN_CMD_ADD; /* Add to command count */
++ pkt->command_count = cmd_count;
++ }
++
++ if (imm_count < 0) {
++ pkt->operators |= MODIFY_LUN_IMM_SUB;
++ pkt->immed_notify_count = -imm_count;
++ } else if (imm_count > 0) {
++ pkt->operators |= MODIFY_LUN_IMM_ADD;
++ pkt->immed_notify_count = imm_count;
++ }
++
++ pkt->timeout = 0; /* Use default */
++
++ TRACE_BUFFER("MODIFY LUN packet data", pkt, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q2x_send_notify_ack(scsi_qla_host_t *ha, notify_entry_t *iocb,
++ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
++ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
++{
++ nack_entry_t *ntfy;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Sending NOTIFY_ACK (ha=%p)", ha);
++
++ /* Send marker if required */
++ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
++ goto out;
++
++ ntfy = (nack_entry_t *)qla2x00_req_pkt(ha);
++ if (ntfy == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
++ "request packet", ha->instance, __func__);
++ goto out;
++ }
++
++ if (ha->tgt != NULL)
++ ha->tgt->notify_ack_expected++;
++
++ ntfy->entry_type = NOTIFY_ACK_TYPE;
++ ntfy->entry_count = 1;
++ SET_TARGET_ID(ha, ntfy->target, GET_TARGET_ID(ha, iocb));
++ ntfy->status = iocb->status;
++ ntfy->task_flags = iocb->task_flags;
++ ntfy->seq_id = iocb->seq_id;
++ /* Do not increment here, the chip isn't decrementing */
++ /* ntfy->flags = __constant_cpu_to_le16(NOTIFY_ACK_RES_COUNT); */
++ ntfy->flags |= cpu_to_le16(add_flags);
++ ntfy->srr_rx_id = iocb->srr_rx_id;
++ ntfy->srr_rel_offs = iocb->srr_rel_offs;
++ ntfy->srr_ui = iocb->srr_ui;
++ ntfy->srr_flags = cpu_to_le16(srr_flags);
++ ntfy->srr_reject_code = cpu_to_le16(srr_reject_code);
++ ntfy->srr_reject_code_expl = srr_explan;
++ ntfy->ox_id = iocb->ox_id;
++
++ if (resp_code_valid) {
++ ntfy->resp_code = cpu_to_le16(resp_code);
++ ntfy->flags |= __constant_cpu_to_le16(
++ NOTIFY_ACK_TM_RESP_CODE_VALID);
++ }
++
++ TRACE(TRACE_SCSI, "qla2x00t(%ld): Sending Notify Ack Seq %#x -> I %#x "
++ "St %#x RC %#x", ha->instance,
++ le16_to_cpu(iocb->seq_id), GET_TARGET_ID(ha, iocb),
++ le16_to_cpu(iocb->status), le16_to_cpu(ntfy->resp_code));
++ TRACE_BUFFER("Notify Ack packet data", ntfy, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q24_send_abts_resp(scsi_qla_host_t *ha,
++ const abts24_recv_entry_t *abts, uint32_t status, bool ids_reversed)
++{
++ abts24_resp_entry_t *resp;
++ uint32_t f_ctl;
++ uint8_t *p;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Sending task mgmt ABTS response (ha=%p, atio=%p, "
++ "status=%x", ha, abts, status);
++
++ /* Send marker if required */
++ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
++ goto out;
++
++ resp = (abts24_resp_entry_t *)qla2x00_req_pkt(ha);
++ if (resp == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
++ "request packet", ha->instance, __func__);
++ goto out;
++ }
++
++ resp->entry_type = ABTS_RESP_24XX;
++ resp->entry_count = 1;
++ resp->nport_handle = abts->nport_handle;
++ resp->sof_type = abts->sof_type;
++ resp->exchange_address = abts->exchange_address;
++ resp->fcp_hdr_le = abts->fcp_hdr_le;
++ f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
++ F_CTL_LAST_SEQ | F_CTL_END_SEQ |
++ F_CTL_SEQ_INITIATIVE);
++ p = (uint8_t *)&f_ctl;
++ resp->fcp_hdr_le.f_ctl[0] = *p++;
++ resp->fcp_hdr_le.f_ctl[1] = *p++;
++ resp->fcp_hdr_le.f_ctl[2] = *p;
++ if (ids_reversed) {
++ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
++ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
++ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
++ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
++ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
++ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
++ } else {
++ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
++ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
++ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
++ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
++ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
++ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
++ }
++ resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
++ if (status == SCST_MGMT_STATUS_SUCCESS) {
++ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
++ resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
++ resp->payload.ba_acct.low_seq_cnt = 0x0000;
++ resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
++ resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
++ resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
++ } else {
++ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
++ resp->payload.ba_rjt.reason_code =
++ BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
++ /* Other bytes are zero */
++ }
++
++ TRACE_BUFFER("ABTS RESP packet data", resp, REQUEST_ENTRY_SIZE);
++
++ ha->tgt->abts_resp_expected++;
++
++ q2t_exec_queue(ha);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q24_retry_term_exchange(scsi_qla_host_t *ha,
++ abts24_resp_fw_entry_t *entry)
++{
++ ctio7_status1_entry_t *ctio;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Sending retry TERM EXCH CTIO7 (ha=%p)", ha);
++
++ /* Send marker if required */
++ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
++ goto out;
++
++ ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
++ if (ctio == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
++ "request packet", ha->instance, __func__);
++ goto out;
++ }
++
++ /*
++ * We've got on entrance firmware's response on by us generated
++ * ABTS response. So, in it ID fields are reversed.
++ */
++
++ ctio->common.entry_type = CTIO_TYPE7;
++ ctio->common.entry_count = 1;
++ ctio->common.nport_handle = entry->nport_handle;
++ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
++ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
++ ctio->common.initiator_id[0] = entry->fcp_hdr_le.d_id[0];
++ ctio->common.initiator_id[1] = entry->fcp_hdr_le.d_id[1];
++ ctio->common.initiator_id[2] = entry->fcp_hdr_le.d_id[2];
++ ctio->common.exchange_addr = entry->exchange_addr_to_abort;
++ ctio->flags = __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
++ ctio->ox_id = entry->fcp_hdr_le.ox_id;
++
++ TRACE_BUFFER("CTIO7 retry TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++ q24_send_abts_resp(ha, (abts24_recv_entry_t *)entry,
++ SCST_MGMT_STATUS_SUCCESS, true);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q24_handle_abts(scsi_qla_host_t *ha, abts24_recv_entry_t *abts)
++{
++ uint32_t tag;
++ int rc;
++ struct q2t_mgmt_cmd *mcmd;
++ struct q2t_sess *sess;
++
++ TRACE_ENTRY();
++
++ if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
++ PRINT_ERROR("qla2x00t(%ld): ABTS: Abort Sequence not "
++ "supported", ha->instance);
++ goto out_err;
++ }
++
++ tag = abts->exchange_addr_to_abort;
++
++ if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
++ TRACE_MGMT_DBG("qla2x00t(%ld): ABTS: Unknown Exchange "
++ "Address received", ha->instance);
++ goto out_err;
++ }
++
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): task abort (s_id=%x:%x:%x, "
++ "tag=%d, param=%x)", ha->instance, abts->fcp_hdr_le.s_id[2],
++ abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
++ le32_to_cpu(abts->fcp_hdr_le.parameter));
++
++ sess = q2t_find_sess_by_s_id_le(ha->tgt, abts->fcp_hdr_le.s_id);
++ if (sess == NULL) {
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): task abort for unexisting "
++ "session", ha->instance);
++ ha->tgt->tm_to_unknown = 1;
++ goto out_err;
++ }
++
++ mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
++ if (mcmd == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s: Allocation of ABORT cmd failed",
++ ha->instance, __func__);
++ goto out_err;
++ }
++ memset(mcmd, 0, sizeof(*mcmd));
++
++ mcmd->sess = sess;
++ memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
++
++ rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag,
++ SCST_ATOMIC, mcmd);
++ if (rc != 0) {
++ PRINT_ERROR("qla2x00t(%ld): scst_rx_mgmt_fn_tag() failed: %d",
++ ha->instance, rc);
++ goto out_err_free;
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_err_free:
++ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
++
++out_err:
++ q24_send_abts_resp(ha, abts, SCST_MGMT_STATUS_REJECTED, false);
++ goto out;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q24_send_task_mgmt_ctio(scsi_qla_host_t *ha,
++ struct q2t_mgmt_cmd *mcmd, uint32_t resp_code)
++{
++ const atio7_entry_t *atio = &mcmd->orig_iocb.atio7;
++ ctio7_status1_entry_t *ctio;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x",
++ ha, atio, resp_code);
++
++ /* Send marker if required */
++ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
++ goto out;
++
++ ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
++ if (ctio == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
++ "request packet", ha->instance, __func__);
++ goto out;
++ }
++
++ ctio->common.entry_type = CTIO_TYPE7;
++ ctio->common.entry_count = 1;
++ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
++ ctio->common.nport_handle = mcmd->sess->loop_id;
++ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
++ ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
++ ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
++ ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
++ ctio->common.exchange_addr = atio->exchange_addr;
++ ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
++ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
++ ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
++ ctio->scsi_status = __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
++ ctio->response_len = __constant_cpu_to_le16(8);
++ ((uint32_t *)ctio->sense_data)[0] = cpu_to_be32(resp_code);
++
++ TRACE_BUFFER("CTIO7 TASK MGMT packet data", ctio, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q24_send_notify_ack(scsi_qla_host_t *ha,
++ notify24xx_entry_t *iocb, uint16_t srr_flags,
++ uint8_t srr_reject_code, uint8_t srr_explan)
++{
++ nack24xx_entry_t *nack;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Sending NOTIFY_ACK24 (ha=%p)", ha);
++
++ /* Send marker if required */
++ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
++ goto out;
++
++ if (ha->tgt != NULL)
++ ha->tgt->notify_ack_expected++;
++
++ nack = (nack24xx_entry_t *)qla2x00_req_pkt(ha);
++ if (nack == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
++ "request packet", ha->instance, __func__);
++ goto out;
++ }
++
++ nack->entry_type = NOTIFY_ACK_TYPE;
++ nack->entry_count = 1;
++ nack->nport_handle = iocb->nport_handle;
++ if (le16_to_cpu(iocb->status) == IMM_NTFY_ELS) {
++ nack->flags = iocb->flags &
++ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
++ }
++ nack->srr_rx_id = iocb->srr_rx_id;
++ nack->status = iocb->status;
++ nack->status_subcode = iocb->status_subcode;
++ nack->exchange_address = iocb->exchange_address;
++ nack->srr_rel_offs = iocb->srr_rel_offs;
++ nack->srr_ui = iocb->srr_ui;
++ nack->srr_flags = cpu_to_le16(srr_flags);
++ nack->srr_reject_code = srr_reject_code;
++ nack->srr_reject_code_expl = srr_explan;
++ nack->ox_id = iocb->ox_id;
++
++ TRACE(TRACE_SCSI, "qla2x00t(%ld): Sending 24xx Notify Ack %d",
++ ha->instance, nack->status);
++ TRACE_BUFFER("24xx Notify Ack packet data", nack, sizeof(*nack));
++
++ q2t_exec_queue(ha);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static uint32_t q2t_convert_to_fc_tm_status(int scst_mstatus)
++{
++ uint32_t res;
++
++ switch (scst_mstatus) {
++ case SCST_MGMT_STATUS_SUCCESS:
++ res = FC_TM_SUCCESS;
++ break;
++ case SCST_MGMT_STATUS_TASK_NOT_EXIST:
++ res = FC_TM_BAD_CMD;
++ break;
++ case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
++ case SCST_MGMT_STATUS_REJECTED:
++ res = FC_TM_REJECT;
++ break;
++ case SCST_MGMT_STATUS_LUN_NOT_EXIST:
++ case SCST_MGMT_STATUS_FAILED:
++ default:
++ res = FC_TM_FAILED;
++ break;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* SCST Callback */
++static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
++{
++ struct q2t_mgmt_cmd *mcmd;
++ unsigned long flags;
++ scsi_qla_host_t *ha;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("scst_mcmd (%p) status %#x state %#x", scst_mcmd,
++ scst_mcmd->status, scst_mcmd->state);
++
++ mcmd = scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
++ if (unlikely(mcmd == NULL)) {
++ PRINT_ERROR("qla2x00t: scst_mcmd %p tgt_spec is NULL", mcmd);
++ goto out;
++ }
++
++ ha = mcmd->sess->tgt->ha;
++
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++ if (IS_FWI2_CAPABLE(ha)) {
++ if (mcmd->flags == Q24_MGMT_SEND_NACK) {
++ q24_send_notify_ack(ha,
++ &mcmd->orig_iocb.notify_entry24, 0, 0, 0);
++ } else {
++ if (scst_mcmd->fn == SCST_ABORT_TASK)
++ q24_send_abts_resp(ha, &mcmd->orig_iocb.abts,
++ scst_mgmt_cmd_get_status(scst_mcmd),
++ false);
++ else
++ q24_send_task_mgmt_ctio(ha, mcmd,
++ q2t_convert_to_fc_tm_status(
++ scst_mgmt_cmd_get_status(scst_mcmd)));
++ }
++ } else {
++ uint32_t resp_code = q2t_convert_to_fc_tm_status(
++ scst_mgmt_cmd_get_status(scst_mcmd));
++ q2x_send_notify_ack(ha, &mcmd->orig_iocb.notify_entry, 0,
++ resp_code, 1, 0, 0, 0);
++ }
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++ scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
++ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* No locks */
++static int q2t_pci_map_calc_cnt(struct q2t_prm *prm)
++{
++ int res = 0;
++
++ BUG_ON(prm->cmd->sg_cnt == 0);
++
++ prm->sg = (struct scatterlist *)prm->cmd->sg;
++ prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, prm->cmd->sg,
++ prm->cmd->sg_cnt, prm->cmd->dma_data_direction);
++ if (unlikely(prm->seg_cnt == 0))
++ goto out_err;
++
++ prm->cmd->sg_mapped = 1;
++
++ /*
++ * If greater than four sg entries then we need to allocate
++ * the continuation entries
++ */
++ if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) {
++ prm->req_cnt += (uint16_t)(prm->seg_cnt -
++ prm->tgt->datasegs_per_cmd) /
++ prm->tgt->datasegs_per_cont;
++ if (((uint16_t)(prm->seg_cnt - prm->tgt->datasegs_per_cmd)) %
++ prm->tgt->datasegs_per_cont)
++ prm->req_cnt++;
++ }
++
++out:
++ TRACE_DBG("seg_cnt=%d, req_cnt=%d, res=%d", prm->seg_cnt,
++ prm->req_cnt, res);
++ return res;
++
++out_err:
++ PRINT_ERROR("qla2x00t(%ld): PCI mapping failed: sg_cnt=%d",
++ prm->tgt->ha->instance, prm->cmd->sg_cnt);
++ res = -1;
++ goto out;
++}
++
++static inline void q2t_unmap_sg(scsi_qla_host_t *ha, struct q2t_cmd *cmd)
++{
++ EXTRACHECKS_BUG_ON(!cmd->sg_mapped);
++ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
++ cmd->sg_mapped = 0;
++}
++
++static int q2t_check_reserve_free_req(scsi_qla_host_t *ha, uint32_t req_cnt)
++{
++ int res = SCST_TGT_RES_SUCCESS;
++ device_reg_t __iomem *reg = ha->iobase;
++ uint32_t cnt;
++
++ TRACE_ENTRY();
++
++ if (ha->req_q_cnt < (req_cnt + 2)) {
++ if (IS_FWI2_CAPABLE(ha))
++ cnt = (uint16_t)RD_REG_DWORD(
++ &reg->isp24.req_q_out);
++ else
++ cnt = qla2x00_debounce_register(
++ ISP_REQ_Q_OUT(ha, &reg->isp));
++ TRACE_DBG("Request ring circled: cnt=%d, "
++ "ha->req_ring_index=%d, ha->req_q_cnt=%d, req_cnt=%d",
++ cnt, ha->req_ring_index, ha->req_q_cnt, req_cnt);
++ if (ha->req_ring_index < cnt)
++ ha->req_q_cnt = cnt - ha->req_ring_index;
++ else
++ ha->req_q_cnt = ha->request_q_length -
++ (ha->req_ring_index - cnt);
++ }
++
++ if (unlikely(ha->req_q_cnt < (req_cnt + 2))) {
++ TRACE(TRACE_OUT_OF_MEM, "qla2x00t(%ld): There is no room in the "
++ "request ring: ha->req_ring_index=%d, ha->req_q_cnt=%d, "
++ "req_cnt=%d", ha->instance, ha->req_ring_index,
++ ha->req_q_cnt, req_cnt);
++ res = SCST_TGT_RES_QUEUE_FULL;
++ goto out;
++ }
++
++ ha->req_q_cnt -= req_cnt;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static inline void *q2t_get_req_pkt(scsi_qla_host_t *ha)
++{
++ /* Adjust ring index. */
++ ha->req_ring_index++;
++ if (ha->req_ring_index == ha->request_q_length) {
++ ha->req_ring_index = 0;
++ ha->request_ring_ptr = ha->request_ring;
++ } else {
++ ha->request_ring_ptr++;
++ }
++ return (cont_entry_t *)ha->request_ring_ptr;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static inline uint32_t q2t_make_handle(scsi_qla_host_t *ha)
++{
++ uint32_t h;
++
++ h = ha->current_handle;
++ /* always increment cmd handle */
++ do {
++ ++h;
++ if (h > MAX_OUTSTANDING_COMMANDS)
++ h = 1; /* 0 is Q2T_NULL_HANDLE */
++ if (h == ha->current_handle) {
++ TRACE(TRACE_OUT_OF_MEM, "qla2x00t(%ld): Ran out of "
++ "empty cmd slots in ha %p", ha->instance, ha);
++ h = Q2T_NULL_HANDLE;
++ break;
++ }
++ } while ((h == Q2T_NULL_HANDLE) ||
++ (h == Q2T_SKIP_HANDLE) ||
++ (ha->cmds[h-1] != NULL));
++
++ if (h != Q2T_NULL_HANDLE)
++ ha->current_handle = h;
++
++ return h;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static void q2x_build_ctio_pkt(struct q2t_prm *prm)
++{
++ uint32_t h;
++ ctio_entry_t *pkt;
++ scsi_qla_host_t *ha = prm->tgt->ha;
++
++ pkt = (ctio_entry_t *)ha->request_ring_ptr;
++ prm->pkt = pkt;
++ memset(pkt, 0, sizeof(*pkt));
++
++ if (prm->tgt->tgt_enable_64bit_addr)
++ pkt->common.entry_type = CTIO_A64_TYPE;
++ else
++ pkt->common.entry_type = CONTINUE_TGT_IO_TYPE;
++
++ pkt->common.entry_count = (uint8_t)prm->req_cnt;
++
++ h = q2t_make_handle(ha);
++ if (h != Q2T_NULL_HANDLE)
++ ha->cmds[h-1] = prm->cmd;
++
++ pkt->common.handle = h | CTIO_COMPLETION_HANDLE_MARK;
++ pkt->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
++
++ /* Set initiator ID */
++ h = GET_TARGET_ID(ha, &prm->cmd->atio.atio2x);
++ SET_TARGET_ID(ha, pkt->common.target, h);
++
++ pkt->common.rx_id = prm->cmd->atio.atio2x.rx_id;
++ pkt->common.relative_offset = cpu_to_le32(prm->cmd->offset);
++
++ TRACE(TRACE_DEBUG|TRACE_SCSI, "qla2x00t(%ld): handle(scst_cmd) -> %08x, "
++ "timeout %d L %#x -> I %#x E %#x", ha->instance,
++ pkt->common.handle, Q2T_TIMEOUT,
++ le16_to_cpu(prm->cmd->atio.atio2x.lun),
++ GET_TARGET_ID(ha, &pkt->common), pkt->common.rx_id);
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static int q24_build_ctio_pkt(struct q2t_prm *prm)
++{
++ uint32_t h;
++ ctio7_status0_entry_t *pkt;
++ scsi_qla_host_t *ha = prm->tgt->ha;
++ atio7_entry_t *atio = &prm->cmd->atio.atio7;
++ int res = SCST_TGT_RES_SUCCESS;
++
++ TRACE_ENTRY();
++
++ pkt = (ctio7_status0_entry_t *)ha->request_ring_ptr;
++ prm->pkt = pkt;
++ memset(pkt, 0, sizeof(*pkt));
++
++ pkt->common.entry_type = CTIO_TYPE7;
++ pkt->common.entry_count = (uint8_t)prm->req_cnt;
++
++ h = q2t_make_handle(ha);
++ if (unlikely(h == Q2T_NULL_HANDLE)) {
++ /*
++ * CTIO type 7 from the firmware doesn't provide a way to
++ * know the initiator's LOOP ID, hence we can't find
++ * the session and, so, the command.
++ */
++ res = SCST_TGT_RES_QUEUE_FULL;
++ goto out;
++ } else
++ ha->cmds[h-1] = prm->cmd;
++
++ pkt->common.handle = h | CTIO_COMPLETION_HANDLE_MARK;
++ pkt->common.nport_handle = prm->cmd->loop_id;
++ pkt->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
++ pkt->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
++ pkt->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
++ pkt->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
++ pkt->common.exchange_addr = atio->exchange_addr;
++ pkt->flags |= (atio->attr << 9);
++ pkt->ox_id = swab16(atio->fcp_hdr.ox_id);
++ pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
++
++out:
++ TRACE(TRACE_DEBUG|TRACE_SCSI, "qla2x00t(%ld): handle(scst_cmd) -> %08x, "
++ "timeout %d, ox_id %#x", ha->instance, pkt->common.handle,
++ Q2T_TIMEOUT, le16_to_cpu(pkt->ox_id));
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. We have already made sure
++ * that there is sufficient amount of request entries to not drop it.
++ */
++static void q2t_load_cont_data_segments(struct q2t_prm *prm)
++{
++ int cnt;
++ uint32_t *dword_ptr;
++ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
++
++ TRACE_ENTRY();
++
++ /* Build continuation packets */
++ while (prm->seg_cnt > 0) {
++ cont_a64_entry_t *cont_pkt64 =
++ (cont_a64_entry_t *)q2t_get_req_pkt(prm->tgt->ha);
++
++ /*
++ * Make sure that from cont_pkt64 none of
++ * 64-bit specific fields used for 32-bit
++ * addressing. Cast to (cont_entry_t *) for
++ * that.
++ */
++
++ memset(cont_pkt64, 0, sizeof(*cont_pkt64));
++
++ cont_pkt64->entry_count = 1;
++ cont_pkt64->sys_define = 0;
++
++ if (enable_64bit_addressing) {
++ cont_pkt64->entry_type = CONTINUE_A64_TYPE;
++ dword_ptr =
++ (uint32_t *)&cont_pkt64->dseg_0_address;
++ } else {
++ cont_pkt64->entry_type = CONTINUE_TYPE;
++ dword_ptr =
++ (uint32_t *)&((cont_entry_t *)
++ cont_pkt64)->dseg_0_address;
++ }
++
++ /* Load continuation entry data segments */
++ for (cnt = 0;
++ cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
++ cnt++, prm->seg_cnt--) {
++ *dword_ptr++ =
++ cpu_to_le32(pci_dma_lo32
++ (sg_dma_address(prm->sg)));
++ if (enable_64bit_addressing) {
++ *dword_ptr++ =
++ cpu_to_le32(pci_dma_hi32
++ (sg_dma_address
++ (prm->sg)));
++ }
++ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
++
++ TRACE_SG("S/G Segment Cont. phys_addr=%llx:%llx, len=%d",
++ (long long unsigned int)pci_dma_hi32(sg_dma_address(prm->sg)),
++ (long long unsigned int)pci_dma_lo32(sg_dma_address(prm->sg)),
++ (int)sg_dma_len(prm->sg));
++
++ prm->sg++;
++ }
++
++ TRACE_BUFFER("Continuation packet data",
++ cont_pkt64, REQUEST_ENTRY_SIZE);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. We have already made sure
++ * that there is sufficient amount of request entries to not drop it.
++ */
++static void q2x_load_data_segments(struct q2t_prm *prm)
++{
++ int cnt;
++ uint32_t *dword_ptr;
++ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
++ ctio_common_entry_t *pkt = (ctio_common_entry_t *)prm->pkt;
++
++ TRACE_DBG("iocb->scsi_status=%x, iocb->flags=%x",
++ le16_to_cpu(pkt->scsi_status), le16_to_cpu(pkt->flags));
++
++ pkt->transfer_length = cpu_to_le32(prm->cmd->bufflen);
++
++ /* Setup packet address segment pointer */
++ dword_ptr = pkt->dseg_0_address;
++
++ if (prm->seg_cnt == 0) {
++ /* No data transfer */
++ *dword_ptr++ = 0;
++ *dword_ptr = 0;
++
++ TRACE_BUFFER("No data, CTIO packet data", pkt,
++ REQUEST_ENTRY_SIZE);
++ goto out;
++ }
++
++ /* Set total data segment count */
++ pkt->dseg_count = cpu_to_le16(prm->seg_cnt);
++
++ /* If scatter gather */
++ TRACE_SG("%s", "Building S/G data segments...");
++ /* Load command entry data segments */
++ for (cnt = 0;
++ (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
++ cnt++, prm->seg_cnt--) {
++ *dword_ptr++ =
++ cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
++ if (enable_64bit_addressing) {
++ *dword_ptr++ =
++ cpu_to_le32(pci_dma_hi32
++ (sg_dma_address(prm->sg)));
++ }
++ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
++
++ TRACE_SG("S/G Segment phys_addr=%llx:%llx, len=%d",
++ (long long unsigned int)pci_dma_hi32(sg_dma_address(prm->sg)),
++ (long long unsigned int)pci_dma_lo32(sg_dma_address(prm->sg)),
++ (int)sg_dma_len(prm->sg));
++
++ prm->sg++;
++ }
++
++ TRACE_BUFFER("Scatter/gather, CTIO packet data", pkt,
++ REQUEST_ENTRY_SIZE);
++
++ q2t_load_cont_data_segments(prm);
++
++out:
++ return;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. We have already made sure
++ * that there is sufficient amount of request entries to not drop it.
++ */
++static void q24_load_data_segments(struct q2t_prm *prm)
++{
++ int cnt;
++ uint32_t *dword_ptr;
++ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
++ ctio7_status0_entry_t *pkt = (ctio7_status0_entry_t *)prm->pkt;
++
++ TRACE_DBG("iocb->scsi_status=%x, iocb->flags=%x",
++ le16_to_cpu(pkt->scsi_status), le16_to_cpu(pkt->flags));
++
++ pkt->transfer_length = cpu_to_le32(prm->cmd->bufflen);
++
++ /* Setup packet address segment pointer */
++ dword_ptr = pkt->dseg_0_address;
++
++ if (prm->seg_cnt == 0) {
++ /* No data transfer */
++ *dword_ptr++ = 0;
++ *dword_ptr = 0;
++
++ TRACE_BUFFER("No data, CTIO7 packet data", pkt,
++ REQUEST_ENTRY_SIZE);
++ goto out;
++ }
++
++ /* Set total data segment count */
++ pkt->common.dseg_count = cpu_to_le16(prm->seg_cnt);
++
++ /* If scatter gather */
++ TRACE_SG("%s", "Building S/G data segments...");
++ /* Load command entry data segments */
++ for (cnt = 0;
++ (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
++ cnt++, prm->seg_cnt--) {
++ *dword_ptr++ =
++ cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
++ if (enable_64bit_addressing) {
++ *dword_ptr++ =
++ cpu_to_le32(pci_dma_hi32(
++ sg_dma_address(prm->sg)));
++ }
++ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
++
++ TRACE_SG("S/G Segment phys_addr=%llx:%llx, len=%d",
++ (long long unsigned int)pci_dma_hi32(sg_dma_address(
++ prm->sg)),
++ (long long unsigned int)pci_dma_lo32(sg_dma_address(
++ prm->sg)),
++ (int)sg_dma_len(prm->sg));
++
++ prm->sg++;
++ }
++
++ q2t_load_cont_data_segments(prm);
++
++out:
++ return;
++}
++
++static inline int q2t_has_data(struct q2t_cmd *cmd)
++{
++ return cmd->bufflen > 0;
++}
++
++static int q2t_pre_xmit_response(struct q2t_cmd *cmd,
++ struct q2t_prm *prm, int xmit_type, unsigned long *flags)
++{
++ int res;
++ struct q2t_tgt *tgt = cmd->tgt;
++ scsi_qla_host_t *ha = tgt->ha;
++ uint16_t full_req_cnt;
++ struct scst_cmd *scst_cmd = cmd->scst_cmd;
++
++ TRACE_ENTRY();
++
++ if (unlikely(cmd->aborted)) {
++ TRACE_MGMT_DBG("qla2x00t(%ld): terminating exchange "
++ "for aborted cmd=%p (scst_cmd=%p, tag=%d)",
++ ha->instance, cmd, scst_cmd, cmd->tag);
++
++ cmd->state = Q2T_STATE_ABORTED;
++ scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_ABORTED);
++
++ if (IS_FWI2_CAPABLE(ha))
++ q24_send_term_exchange(ha, cmd, &cmd->atio.atio7, 0);
++ else
++ q2x_send_term_exchange(ha, cmd, &cmd->atio.atio2x, 0);
++ /* !! At this point cmd could be already freed !! */
++ res = Q2T_PRE_XMIT_RESP_CMD_ABORTED;
++ goto out;
++ }
++
++ TRACE(TRACE_SCSI, "qla2x00t(%ld): tag=%lld", ha->instance,
++ scst_cmd_get_tag(scst_cmd));
++
++ prm->cmd = cmd;
++ prm->tgt = tgt;
++ prm->rq_result = scst_cmd_get_status(scst_cmd);
++ prm->sense_buffer = scst_cmd_get_sense_buffer(scst_cmd);
++ prm->sense_buffer_len = scst_cmd_get_sense_buffer_len(scst_cmd);
++ prm->sg = NULL;
++ prm->seg_cnt = -1;
++ prm->req_cnt = 1;
++ prm->add_status_pkt = 0;
++
++ TRACE_DBG("rq_result=%x, xmit_type=%x", prm->rq_result, xmit_type);
++ if (prm->rq_result != 0)
++ TRACE_BUFFER("Sense", prm->sense_buffer, prm->sense_buffer_len);
++
++ /* Send marker if required */
++ if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
++ res = SCST_TGT_RES_FATAL_ERROR;
++ goto out;
++ }
++
++ TRACE_DBG("CTIO start: ha(%d)", (int)ha->instance);
++
++ if ((xmit_type & Q2T_XMIT_DATA) && q2t_has_data(cmd)) {
++ if (q2t_pci_map_calc_cnt(prm) != 0) {
++ res = SCST_TGT_RES_QUEUE_FULL;
++ goto out;
++ }
++ }
++
++ full_req_cnt = prm->req_cnt;
++
++ if (xmit_type & Q2T_XMIT_STATUS) {
++ /* Bidirectional transfers not supported (yet) */
++ if (unlikely(scst_get_resid(scst_cmd, &prm->residual, NULL))) {
++ if (prm->residual > 0) {
++ TRACE_DBG("Residual underflow: %d (tag %lld, "
++ "op %x, bufflen %d, rq_result %x)",
++ prm->residual, scst_cmd->tag,
++ scst_cmd->cdb[0], cmd->bufflen,
++ prm->rq_result);
++ prm->rq_result |= SS_RESIDUAL_UNDER;
++ } else if (prm->residual < 0) {
++ TRACE_DBG("Residual overflow: %d (tag %lld, "
++ "op %x, bufflen %d, rq_result %x)",
++ prm->residual, scst_cmd->tag,
++ scst_cmd->cdb[0], cmd->bufflen,
++ prm->rq_result);
++ prm->rq_result |= SS_RESIDUAL_OVER;
++ prm->residual = -prm->residual;
++ }
++ }
++
++ /*
++ * If Q2T_XMIT_DATA is not set, add_status_pkt will be ignored
++ * in *xmit_response() below
++ */
++ if (q2t_has_data(cmd)) {
++ if (SCST_SENSE_VALID(prm->sense_buffer) ||
++ (IS_FWI2_CAPABLE(ha) &&
++ (prm->rq_result != 0))) {
++ prm->add_status_pkt = 1;
++ full_req_cnt++;
++ }
++ }
++ }
++
++ TRACE_DBG("req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d",
++ prm->req_cnt, full_req_cnt, prm->add_status_pkt);
++
++ /* Acquire ring specific lock */
++ spin_lock_irqsave(&ha->hardware_lock, *flags);
++
++ /* Does F/W have an IOCBs for this request */
++ res = q2t_check_reserve_free_req(ha, full_req_cnt);
++ if (unlikely(res != SCST_TGT_RES_SUCCESS) &&
++ (xmit_type & Q2T_XMIT_DATA))
++ goto out_unlock_free_unmap;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unlock_free_unmap:
++ if (cmd->sg_mapped)
++ q2t_unmap_sg(ha, cmd);
++
++ /* Release ring specific lock */
++ spin_unlock_irqrestore(&ha->hardware_lock, *flags);
++ goto out;
++}
++
++static inline int q2t_need_explicit_conf(scsi_qla_host_t *ha,
++ struct q2t_cmd *cmd, int sending_sense)
++{
++ if (ha->enable_class_2)
++ return 0;
++
++ if (sending_sense)
++ return cmd->conf_compl_supported;
++ else
++ return ha->enable_explicit_conf && cmd->conf_compl_supported;
++}
++
++static void q2x_init_ctio_ret_entry(ctio_ret_entry_t *ctio_m1,
++ struct q2t_prm *prm)
++{
++ TRACE_ENTRY();
++
++ prm->sense_buffer_len = min((uint32_t)prm->sense_buffer_len,
++ (uint32_t)sizeof(ctio_m1->sense_data));
++
++ ctio_m1->flags = __constant_cpu_to_le16(OF_SSTS | OF_FAST_POST |
++ OF_NO_DATA | OF_SS_MODE_1);
++ ctio_m1->flags |= __constant_cpu_to_le16(OF_INC_RC);
++ if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
++ ctio_m1->flags |= __constant_cpu_to_le16(OF_EXPL_CONF |
++ OF_CONF_REQ);
++ }
++ ctio_m1->scsi_status = cpu_to_le16(prm->rq_result);
++ ctio_m1->residual = cpu_to_le32(prm->residual);
++ if (SCST_SENSE_VALID(prm->sense_buffer)) {
++ if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
++ ctio_m1->flags |= __constant_cpu_to_le16(OF_EXPL_CONF |
++ OF_CONF_REQ);
++ }
++ ctio_m1->scsi_status |= __constant_cpu_to_le16(
++ SS_SENSE_LEN_VALID);
++ ctio_m1->sense_length = cpu_to_le16(prm->sense_buffer_len);
++ memcpy(ctio_m1->sense_data, prm->sense_buffer,
++ prm->sense_buffer_len);
++ } else {
++ memset(ctio_m1->sense_data, 0, sizeof(ctio_m1->sense_data));
++ ctio_m1->sense_length = 0;
++ }
++
++ /* Sense with len > 26, is it possible ??? */
++
++ TRACE_EXIT();
++ return;
++}
++
++static int __q2x_xmit_response(struct q2t_cmd *cmd, int xmit_type)
++{
++ int res;
++ unsigned long flags;
++ scsi_qla_host_t *ha;
++ struct q2t_prm prm;
++ ctio_common_entry_t *pkt;
++
++ TRACE_ENTRY();
++
++ memset(&prm, 0, sizeof(prm));
++
++ res = q2t_pre_xmit_response(cmd, &prm, xmit_type, &flags);
++ if (unlikely(res != SCST_TGT_RES_SUCCESS)) {
++ if (res == Q2T_PRE_XMIT_RESP_CMD_ABORTED)
++ res = SCST_TGT_RES_SUCCESS;
++ goto out;
++ }
++
++ /* Here ha->hardware_lock already locked */
++
++ ha = prm.tgt->ha;
++
++ q2x_build_ctio_pkt(&prm);
++ pkt = (ctio_common_entry_t *)prm.pkt;
++
++ if (q2t_has_data(cmd) && (xmit_type & Q2T_XMIT_DATA)) {
++ pkt->flags |= __constant_cpu_to_le16(OF_FAST_POST | OF_DATA_IN);
++ pkt->flags |= __constant_cpu_to_le16(OF_INC_RC);
++
++ q2x_load_data_segments(&prm);
++
++ if (prm.add_status_pkt == 0) {
++ if (xmit_type & Q2T_XMIT_STATUS) {
++ pkt->scsi_status = cpu_to_le16(prm.rq_result);
++ pkt->residual = cpu_to_le32(prm.residual);
++ pkt->flags |= __constant_cpu_to_le16(OF_SSTS);
++ if (q2t_need_explicit_conf(ha, cmd, 0)) {
++ pkt->flags |= __constant_cpu_to_le16(
++ OF_EXPL_CONF |
++ OF_CONF_REQ);
++ }
++ }
++ } else {
++ /*
++ * We have already made sure that there is sufficient
++ * amount of request entries to not drop HW lock in
++ * req_pkt().
++ */
++ ctio_ret_entry_t *ctio_m1 =
++ (ctio_ret_entry_t *)q2t_get_req_pkt(ha);
++
++ TRACE_DBG("%s", "Building additional status packet");
++
++ memcpy(ctio_m1, pkt, sizeof(*ctio_m1));
++ ctio_m1->entry_count = 1;
++ ctio_m1->dseg_count = 0;
++
++ /* Real finish is ctio_m1's finish */
++ pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
++ pkt->flags &= ~__constant_cpu_to_le16(OF_INC_RC);
++
++ q2x_init_ctio_ret_entry(ctio_m1, &prm);
++ TRACE_BUFFER("Status CTIO packet data", ctio_m1,
++ REQUEST_ENTRY_SIZE);
++ }
++ } else
++ q2x_init_ctio_ret_entry((ctio_ret_entry_t *)pkt, &prm);
++
++ cmd->state = Q2T_STATE_PROCESSED; /* Mid-level is done processing */
++
++ TRACE_BUFFER("Xmitting", pkt, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++ /* Release ring specific lock */
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++#ifdef CONFIG_QLA_TGT_DEBUG_SRR
++static void q2t_check_srr_debug(struct q2t_cmd *cmd, int *xmit_type)
++{
++#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
++ if ((*xmit_type & Q2T_XMIT_STATUS) && (scst_random() % 200) == 50) {
++ *xmit_type &= ~Q2T_XMIT_STATUS;
++ TRACE_MGMT_DBG("Dropping cmd %p (tag %d) status", cmd,
++ cmd->tag);
++ }
++#endif
++
++ if (q2t_has_data(cmd) && (cmd->sg_cnt > 1) &&
++ ((scst_random() % 100) == 20)) {
++ int i, leave = 0;
++ unsigned int tot_len = 0;
++
++ while (leave == 0)
++ leave = scst_random() % cmd->sg_cnt;
++
++ for (i = 0; i < leave; i++)
++ tot_len += cmd->sg[i].length;
++
++ TRACE_MGMT_DBG("Cutting cmd %p (tag %d) buffer tail to len %d, "
++ "sg_cnt %d (cmd->bufflen %d, cmd->sg_cnt %d)", cmd,
++ cmd->tag, tot_len, leave, cmd->bufflen, cmd->sg_cnt);
++
++ cmd->bufflen = tot_len;
++ cmd->sg_cnt = leave;
++ }
++
++ if (q2t_has_data(cmd) && ((scst_random() % 100) == 70)) {
++ unsigned int offset = scst_random() % cmd->bufflen;
++
++ TRACE_MGMT_DBG("Cutting cmd %p (tag %d) buffer head "
++ "to offset %d (cmd->bufflen %d)", cmd, cmd->tag,
++ offset, cmd->bufflen);
++ if (offset == 0)
++ *xmit_type &= ~Q2T_XMIT_DATA;
++ else if (q2t_cut_cmd_data_head(cmd, offset)) {
++ TRACE_MGMT_DBG("q2t_cut_cmd_data_head() failed (tag %d)",
++ cmd->tag);
++ }
++ }
++}
++#else
++static inline void q2t_check_srr_debug(struct q2t_cmd *cmd, int *xmit_type) {}
++#endif
++
++static int q2x_xmit_response(struct scst_cmd *scst_cmd)
++{
++ int xmit_type = Q2T_XMIT_DATA, res;
++ int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
++ struct q2t_cmd *cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ BUG_ON(!q2t_has_data(cmd) && !is_send_status);
++#endif
++
++#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
++ EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
++#endif
++
++ if (is_send_status)
++ xmit_type |= Q2T_XMIT_STATUS;
++
++ cmd->bufflen = scst_cmd_get_adjusted_resp_data_len(scst_cmd);
++ cmd->sg = scst_cmd_get_sg(scst_cmd);
++ cmd->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
++ cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
++ cmd->dma_data_direction = scst_to_tgt_dma_dir(cmd->data_direction);
++ cmd->offset = scst_cmd_get_ppl_offset(scst_cmd);
++ cmd->aborted = scst_cmd_aborted(scst_cmd);
++
++ q2t_check_srr_debug(cmd, &xmit_type);
++
++ TRACE_DBG("is_send_status=%x, cmd->bufflen=%d, cmd->sg_cnt=%d, "
++ "cmd->data_direction=%d", is_send_status, cmd->bufflen,
++ cmd->sg_cnt, cmd->data_direction);
++
++ if (IS_FWI2_CAPABLE(cmd->tgt->ha))
++ res = __q24_xmit_response(cmd, xmit_type);
++ else
++ res = __q2x_xmit_response(cmd, xmit_type);
++
++ return res;
++}
++
++static void q24_init_ctio_ret_entry(ctio7_status0_entry_t *ctio,
++ struct q2t_prm *prm)
++{
++ ctio7_status1_entry_t *ctio1;
++
++ TRACE_ENTRY();
++
++ prm->sense_buffer_len = min((uint32_t)prm->sense_buffer_len,
++ (uint32_t)sizeof(ctio1->sense_data));
++ ctio->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
++ if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
++ ctio->flags |= __constant_cpu_to_le16(
++ CTIO7_FLAGS_EXPLICIT_CONFORM |
++ CTIO7_FLAGS_CONFORM_REQ);
++ }
++ ctio->residual = cpu_to_le32(prm->residual);
++ ctio->scsi_status = cpu_to_le16(prm->rq_result);
++ if (SCST_SENSE_VALID(prm->sense_buffer)) {
++ int i;
++ ctio1 = (ctio7_status1_entry_t *)ctio;
++ if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
++ ctio1->flags |= __constant_cpu_to_le16(
++ CTIO7_FLAGS_EXPLICIT_CONFORM |
++ CTIO7_FLAGS_CONFORM_REQ);
++ }
++ ctio1->flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
++ ctio1->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
++ ctio1->scsi_status |= __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
++ ctio1->sense_length = cpu_to_le16(prm->sense_buffer_len);
++ for (i = 0; i < prm->sense_buffer_len/4; i++)
++ ((uint32_t *)ctio1->sense_data)[i] =
++ cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
++#if 0
++ if (unlikely((prm->sense_buffer_len % 4) != 0)) {
++ static int q;
++ if (q < 10) {
++ PRINT_INFO("qla2x00t(%ld): %d bytes of sense "
++ "lost", prm->tgt->ha->instance,
++ prm->sense_buffer_len % 4);
++ q++;
++ }
++ }
++#endif
++ } else {
++ ctio1 = (ctio7_status1_entry_t *)ctio;
++ ctio1->flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
++ ctio1->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
++ ctio1->sense_length = 0;
++ memset(ctio1->sense_data, 0, sizeof(ctio1->sense_data));
++ }
++
++ /* Sense with len > 24, is it possible ??? */
++
++ TRACE_EXIT();
++ return;
++}
++
++static int __q24_xmit_response(struct q2t_cmd *cmd, int xmit_type)
++{
++ int res;
++ unsigned long flags;
++ scsi_qla_host_t *ha;
++ struct q2t_prm prm;
++ ctio7_status0_entry_t *pkt;
++
++ TRACE_ENTRY();
++
++ memset(&prm, 0, sizeof(prm));
++
++ res = q2t_pre_xmit_response(cmd, &prm, xmit_type, &flags);
++ if (unlikely(res != SCST_TGT_RES_SUCCESS)) {
++ if (res == Q2T_PRE_XMIT_RESP_CMD_ABORTED)
++ res = SCST_TGT_RES_SUCCESS;
++ goto out;
++ }
++
++ /* Here ha->hardware_lock already locked */
++
++ ha = prm.tgt->ha;
++
++ res = q24_build_ctio_pkt(&prm);
++ if (unlikely(res != SCST_TGT_RES_SUCCESS))
++ goto out_unmap_unlock;
++
++ pkt = (ctio7_status0_entry_t *)prm.pkt;
++
++ if (q2t_has_data(cmd) && (xmit_type & Q2T_XMIT_DATA)) {
++ pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
++ CTIO7_FLAGS_STATUS_MODE_0);
++
++ q24_load_data_segments(&prm);
++
++ if (prm.add_status_pkt == 0) {
++ if (xmit_type & Q2T_XMIT_STATUS) {
++ pkt->scsi_status = cpu_to_le16(prm.rq_result);
++ pkt->residual = cpu_to_le32(prm.residual);
++ pkt->flags |= __constant_cpu_to_le16(
++ CTIO7_FLAGS_SEND_STATUS);
++ if (q2t_need_explicit_conf(ha, cmd, 0)) {
++ pkt->flags |= __constant_cpu_to_le16(
++ CTIO7_FLAGS_EXPLICIT_CONFORM |
++ CTIO7_FLAGS_CONFORM_REQ);
++ }
++ }
++ } else {
++ /*
++ * We have already made sure that there is sufficient
++ * amount of request entries to not drop HW lock in
++ * req_pkt().
++ */
++ ctio7_status1_entry_t *ctio =
++ (ctio7_status1_entry_t *)q2t_get_req_pkt(ha);
++
++ TRACE_DBG("%s", "Building additional status packet");
++
++ memcpy(ctio, pkt, sizeof(*ctio));
++ ctio->common.entry_count = 1;
++ ctio->common.dseg_count = 0;
++ ctio->flags &= ~__constant_cpu_to_le16(
++ CTIO7_FLAGS_DATA_IN);
++
++ /* Real finish is ctio_m1's finish */
++ pkt->common.handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
++ pkt->flags |= __constant_cpu_to_le16(
++ CTIO7_FLAGS_DONT_RET_CTIO);
++ q24_init_ctio_ret_entry((ctio7_status0_entry_t *)ctio,
++ &prm);
++ TRACE_BUFFER("Status CTIO7", ctio, REQUEST_ENTRY_SIZE);
++ }
++ } else
++ q24_init_ctio_ret_entry(pkt, &prm);
++
++ cmd->state = Q2T_STATE_PROCESSED; /* Mid-level is done processing */
++
++ TRACE_BUFFER("Xmitting CTIO7", pkt, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++out_unlock:
++ /* Release ring specific lock */
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unmap_unlock:
++ if (cmd->sg_mapped)
++ q2t_unmap_sg(ha, cmd);
++ goto out_unlock;
++}
++
++static int __q2t_rdy_to_xfer(struct q2t_cmd *cmd)
++{
++ int res = SCST_TGT_RES_SUCCESS;
++ unsigned long flags;
++ scsi_qla_host_t *ha;
++ struct q2t_tgt *tgt = cmd->tgt;
++ struct q2t_prm prm;
++ void *p;
++
++ TRACE_ENTRY();
++
++ memset(&prm, 0, sizeof(prm));
++ prm.cmd = cmd;
++ prm.tgt = tgt;
++ prm.sg = NULL;
++ prm.req_cnt = 1;
++ ha = tgt->ha;
++
++ /* Send marker if required */
++ if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
++ res = SCST_TGT_RES_FATAL_ERROR;
++ goto out;
++ }
++
++ TRACE_DBG("CTIO_start: ha(%d)", (int)ha->instance);
++
++ /* Calculate number of entries and segments required */
++ if (q2t_pci_map_calc_cnt(&prm) != 0) {
++ res = SCST_TGT_RES_QUEUE_FULL;
++ goto out;
++ }
++
++ /* Acquire ring specific lock */
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++
++ /* Does F/W have an IOCBs for this request */
++ res = q2t_check_reserve_free_req(ha, prm.req_cnt);
++ if (res != SCST_TGT_RES_SUCCESS)
++ goto out_unlock_free_unmap;
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ ctio7_status0_entry_t *pkt;
++ res = q24_build_ctio_pkt(&prm);
++ if (unlikely(res != SCST_TGT_RES_SUCCESS))
++ goto out_unlock_free_unmap;
++ pkt = (ctio7_status0_entry_t *)prm.pkt;
++ pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
++ CTIO7_FLAGS_STATUS_MODE_0);
++ q24_load_data_segments(&prm);
++ p = pkt;
++ } else {
++ ctio_common_entry_t *pkt;
++ q2x_build_ctio_pkt(&prm);
++ pkt = (ctio_common_entry_t *)prm.pkt;
++ pkt->flags = __constant_cpu_to_le16(OF_FAST_POST | OF_DATA_OUT);
++ q2x_load_data_segments(&prm);
++ p = pkt;
++ }
++
++ cmd->state = Q2T_STATE_NEED_DATA;
++
++ TRACE_BUFFER("Xfering", p, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++out_unlock:
++ /* Release ring specific lock */
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unlock_free_unmap:
++ if (cmd->sg_mapped)
++ q2t_unmap_sg(ha, cmd);
++ goto out_unlock;
++}
++
++static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd)
++{
++ int res;
++ struct q2t_cmd *cmd;
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_SCSI, "qla2x00t: tag=%lld", scst_cmd_get_tag(scst_cmd));
++
++ cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
++ cmd->bufflen = scst_cmd_get_write_fields(scst_cmd, &cmd->sg,
++ &cmd->sg_cnt);
++ cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
++ cmd->dma_data_direction = scst_to_tgt_dma_dir(cmd->data_direction);
++
++ res = __q2t_rdy_to_xfer(cmd);
++
++ TRACE_EXIT();
++ return res;
++}
++
++/* If hardware_lock held on entry, might drop it, then reaquire */
++static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
++ atio_entry_t *atio, int ha_locked)
++{
++ ctio_ret_entry_t *ctio;
++ unsigned long flags = 0; /* to stop compiler's warning */
++ int do_tgt_cmd_done = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Sending TERM EXCH CTIO (ha=%p)", ha);
++
++ /* Send marker if required */
++ if (q2t_issue_marker(ha, ha_locked) != QLA_SUCCESS)
++ goto out;
++
++ if (!ha_locked)
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++
++ ctio = (ctio_ret_entry_t *)qla2x00_req_pkt(ha);
++ if (ctio == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
++ "request packet", ha->instance, __func__);
++ goto out_unlock;
++ }
++
++ ctio->entry_type = CTIO_RET_TYPE;
++ ctio->entry_count = 1;
++ if (cmd != NULL) {
++ if (cmd->state < Q2T_STATE_PROCESSED) {
++ PRINT_ERROR("qla2x00t(%ld): Terminating cmd %p with "
++ "incorrect state %d", ha->instance, cmd,
++ cmd->state);
++ } else
++ do_tgt_cmd_done = 1;
++ }
++ ctio->handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
++
++ /* Set IDs */
++ SET_TARGET_ID(ha, ctio->target, GET_TARGET_ID(ha, atio));
++ ctio->rx_id = atio->rx_id;
++
++ /* Most likely, it isn't needed */
++ ctio->residual = atio->data_length;
++ if (ctio->residual != 0)
++ ctio->scsi_status |= SS_RESIDUAL_UNDER;
++
++ ctio->flags = __constant_cpu_to_le16(OF_FAST_POST | OF_TERM_EXCH |
++ OF_NO_DATA | OF_SS_MODE_1);
++ ctio->flags |= __constant_cpu_to_le16(OF_INC_RC);
++
++ TRACE_BUFFER("CTIO TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++out_unlock:
++ if (!ha_locked)
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++ if (do_tgt_cmd_done) {
++ if (!ha_locked && !in_interrupt()) {
++ msleep(250); /* just in case */
++ scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_DIRECT);
++ } else
++ scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_TASKLET);
++ /* !! At this point cmd could be already freed !! */
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* If hardware_lock held on entry, might drop it, then reaquire */
++static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
++ atio7_entry_t *atio, int ha_locked)
++{
++ ctio7_status1_entry_t *ctio;
++ unsigned long flags = 0; /* to stop compiler's warning */
++ int do_tgt_cmd_done = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Sending TERM EXCH CTIO7 (ha=%p)", ha);
++
++ /* Send marker if required */
++ if (q2t_issue_marker(ha, ha_locked) != QLA_SUCCESS)
++ goto out;
++
++ if (!ha_locked)
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++
++ ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
++ if (ctio == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
++ "request packet", ha->instance, __func__);
++ goto out_unlock;
++ }
++
++ ctio->common.entry_type = CTIO_TYPE7;
++ ctio->common.entry_count = 1;
++ if (cmd != NULL) {
++ ctio->common.nport_handle = cmd->loop_id;
++ if (cmd->state < Q2T_STATE_PROCESSED) {
++ PRINT_ERROR("qla2x00t(%ld): Terminating cmd %p with "
++ "incorrect state %d", ha->instance, cmd,
++ cmd->state);
++ } else
++ do_tgt_cmd_done = 1;
++ } else
++ ctio->common.nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
++ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
++ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
++ ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
++ ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
++ ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
++ ctio->common.exchange_addr = atio->exchange_addr;
++ ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
++ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
++ ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
++
++ /* Most likely, it isn't needed */
++ ctio->residual = atio->fcp_cmnd.data_length;
++ if (ctio->residual != 0)
++ ctio->scsi_status |= SS_RESIDUAL_UNDER;
++
++ TRACE_BUFFER("CTIO7 TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++out_unlock:
++ if (!ha_locked)
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++ if (do_tgt_cmd_done) {
++ if (!ha_locked && !in_interrupt()) {
++ msleep(250); /* just in case */
++ scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_DIRECT);
++ } else
++ scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_TASKLET);
++ /* !! At this point cmd could be already freed !! */
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static inline void q2t_free_cmd(struct q2t_cmd *cmd)
++{
++ EXTRACHECKS_BUG_ON(cmd->sg_mapped);
++
++ if (unlikely(cmd->free_sg))
++ kfree(cmd->sg);
++ kmem_cache_free(q2t_cmd_cachep, cmd);
++}
++
++static void q2t_on_free_cmd(struct scst_cmd *scst_cmd)
++{
++ struct q2t_cmd *cmd;
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_SCSI, "qla2x00t: Freeing command %p, tag %lld",
++ scst_cmd, scst_cmd_get_tag(scst_cmd));
++
++ cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
++ scst_cmd_set_tgt_priv(scst_cmd, NULL);
++
++ q2t_free_cmd(cmd);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static int q2t_prepare_srr_ctio(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
++ void *ctio)
++{
++ struct srr_ctio *sc;
++ struct q2t_tgt *tgt = ha->tgt;
++ int res = 0;
++ struct srr_imm *imm;
++
++ tgt->ctio_srr_id++;
++
++ TRACE_MGMT_DBG("qla2x00t(%ld): CTIO with SRR "
++ "status received", ha->instance);
++
++ if (ctio == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): SRR CTIO, "
++ "but ctio is NULL", ha->instance);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (cmd->scst_cmd != NULL)
++ scst_update_hw_pending_start(cmd->scst_cmd);
++
++ sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
++ if (sc != NULL) {
++ sc->cmd = cmd;
++ /* IRQ is already OFF */
++ spin_lock(&tgt->srr_lock);
++ sc->srr_id = tgt->ctio_srr_id;
++ list_add_tail(&sc->srr_list_entry,
++ &tgt->srr_ctio_list);
++ TRACE_MGMT_DBG("CTIO SRR %p added (id %d)",
++ sc, sc->srr_id);
++ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
++ int found = 0;
++ list_for_each_entry(imm, &tgt->srr_imm_list,
++ srr_list_entry) {
++ if (imm->srr_id == sc->srr_id) {
++ found = 1;
++ break;
++ }
++ }
++ if (found) {
++ TRACE_MGMT_DBG("%s", "Scheduling srr work");
++ schedule_work(&tgt->srr_work);
++ } else {
++ PRINT_ERROR("qla2x00t(%ld): imm_srr_id "
++ "== ctio_srr_id (%d), but there is no "
++ "corresponding SRR IMM, deleting CTIO "
++ "SRR %p", ha->instance, tgt->ctio_srr_id,
++ sc);
++ list_del(&sc->srr_list_entry);
++ spin_unlock(&tgt->srr_lock);
++
++ kfree(sc);
++ res = -EINVAL;
++ goto out;
++ }
++ }
++ spin_unlock(&tgt->srr_lock);
++ } else {
++ struct srr_imm *ti;
++ PRINT_ERROR("qla2x00t(%ld): Unable to allocate SRR CTIO entry",
++ ha->instance);
++ spin_lock(&tgt->srr_lock);
++ list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
++ srr_list_entry) {
++ if (imm->srr_id == tgt->ctio_srr_id) {
++ TRACE_MGMT_DBG("IMM SRR %p deleted "
++ "(id %d)", imm, imm->srr_id);
++ list_del(&imm->srr_list_entry);
++ q2t_reject_free_srr_imm(ha, imm, 1);
++ }
++ }
++ spin_unlock(&tgt->srr_lock);
++ res = -ENOMEM;
++ goto out;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static int q2t_term_ctio_exchange(scsi_qla_host_t *ha, void *ctio,
++ struct q2t_cmd *cmd, uint32_t status)
++{
++ int term = 0;
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ if (ctio != NULL) {
++ ctio7_fw_entry_t *c = (ctio7_fw_entry_t *)ctio;
++ term = !(c->flags &
++ __constant_cpu_to_le16(OF_TERM_EXCH));
++ } else
++ term = 1;
++ if (term) {
++ q24_send_term_exchange(ha, cmd,
++ &cmd->atio.atio7, 1);
++ }
++ } else {
++ if (status != CTIO_SUCCESS)
++ q2x_modify_command_count(ha, 1, 0);
++#if 0 /* seems, it isn't needed */
++ if (ctio != NULL) {
++ ctio_common_entry_t *c = (ctio_common_entry_t *)ctio;
++ term = !(c->flags &
++ __constant_cpu_to_le16(
++ CTIO7_FLAGS_TERMINATE));
++ } else
++ term = 1;
++ if (term) {
++ q2x_send_term_exchange(ha, cmd,
++ &cmd->atio.atio2x, 1);
++ }
++#endif
++ }
++ return term;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static inline struct q2t_cmd *q2t_get_cmd(scsi_qla_host_t *ha, uint32_t handle)
++{
++ handle--;
++ if (ha->cmds[handle] != NULL) {
++ struct q2t_cmd *cmd = ha->cmds[handle];
++ ha->cmds[handle] = NULL;
++ return cmd;
++ } else
++ return NULL;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static struct q2t_cmd *q2t_ctio_to_cmd(scsi_qla_host_t *ha, uint32_t handle,
++ void *ctio)
++{
++ struct q2t_cmd *cmd = NULL;
++
++ /* Clear out internal marks */
++ handle &= ~(CTIO_COMPLETION_HANDLE_MARK | CTIO_INTERMEDIATE_HANDLE_MARK);
++
++ if (handle != Q2T_NULL_HANDLE) {
++ if (unlikely(handle == Q2T_SKIP_HANDLE)) {
++ TRACE_DBG("%s", "SKIP_HANDLE CTIO");
++ goto out;
++ }
++ /* handle-1 is actually used */
++ if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
++ PRINT_ERROR("qla2x00t(%ld): Wrong handle %x "
++ "received", ha->instance, handle);
++ goto out;
++ }
++ cmd = q2t_get_cmd(ha, handle);
++ if (unlikely(cmd == NULL)) {
++ PRINT_WARNING("qla2x00t(%ld): Suspicious: unable to "
++ "find the command with handle %x",
++ ha->instance, handle);
++ goto out;
++ }
++ } else if (ctio != NULL) {
++ uint16_t loop_id;
++ int tag;
++ struct q2t_sess *sess;
++ struct scst_cmd *scst_cmd;
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ /* We can't get loop ID from CTIO7 */
++ PRINT_ERROR("qla2x00t(%ld): Wrong CTIO received: "
++ "QLA24xx doesn't support NULL handles",
++ ha->instance);
++ goto out;
++ } else {
++ ctio_common_entry_t *c = (ctio_common_entry_t *)ctio;
++ loop_id = GET_TARGET_ID(ha, c);
++ tag = c->rx_id;
++ }
++
++ sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
++ if (sess == NULL) {
++ PRINT_WARNING("qla2x00t(%ld): Suspicious: "
++ "ctio_completion for non-existing session "
++ "(loop_id %d, tag %d)",
++ ha->instance, loop_id, tag);
++ goto out;
++ }
++
++ scst_cmd = scst_find_cmd_by_tag(sess->scst_sess, tag);
++ if (scst_cmd == NULL) {
++ PRINT_WARNING("qla2x00t(%ld): Suspicious: unable to "
++ "find the command with tag %d (loop_id %d)",
++ ha->instance, tag, loop_id);
++ goto out;
++ }
++
++ cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
++ TRACE_DBG("Found q2t_cmd %p (tag %d)", cmd, tag);
++ }
++
++out:
++ return cmd;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q2t_do_ctio_completion(scsi_qla_host_t *ha, uint32_t handle,
++ uint32_t status, void *ctio)
++{
++ struct scst_cmd *scst_cmd;
++ struct q2t_cmd *cmd;
++ enum scst_exec_context context;
++
++ TRACE_ENTRY();
++
++#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
++ context = SCST_CONTEXT_THREAD;
++#else
++ context = SCST_CONTEXT_TASKLET;
++#endif
++
++ TRACE(TRACE_DEBUG|TRACE_SCSI, "qla2x00t(%ld): handle(ctio %p "
++ "status %#x) <- %08x", ha->instance, ctio, status, handle);
++
++ if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
++ /* That could happen only in case of an error/reset/abort */
++ if (status != CTIO_SUCCESS) {
++ TRACE_MGMT_DBG("Intermediate CTIO received (status %x)",
++ status);
++ }
++ goto out;
++ }
++
++ cmd = q2t_ctio_to_cmd(ha, handle, ctio);
++ if (cmd == NULL) {
++ if (status != CTIO_SUCCESS)
++ q2t_term_ctio_exchange(ha, ctio, NULL, status);
++ goto out;
++ }
++
++ scst_cmd = cmd->scst_cmd;
++
++ if (cmd->sg_mapped)
++ q2t_unmap_sg(ha, cmd);
++
++ if (unlikely(status != CTIO_SUCCESS)) {
++ switch (status & 0xFFFF) {
++ case CTIO_LIP_RESET:
++ case CTIO_TARGET_RESET:
++ case CTIO_ABORTED:
++ case CTIO_TIMEOUT:
++ case CTIO_INVALID_RX_ID:
++ /* They are OK */
++ TRACE(TRACE_MINOR_AND_MGMT_DBG,
++ "qla2x00t(%ld): CTIO with "
++ "status %#x received, state %x, scst_cmd %p, "
++ "op %x (LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
++ "TIMEOUT=b, INVALID_RX_ID=8)", ha->instance,
++ status, cmd->state, scst_cmd, scst_cmd->cdb[0]);
++ break;
++
++ case CTIO_PORT_LOGGED_OUT:
++ case CTIO_PORT_UNAVAILABLE:
++ PRINT_INFO("qla2x00t(%ld): CTIO with PORT LOGGED "
++ "OUT (29) or PORT UNAVAILABLE (28) status %x "
++ "received (state %x, scst_cmd %p, op %x)",
++ ha->instance, status, cmd->state, scst_cmd,
++ scst_cmd->cdb[0]);
++ break;
++
++ case CTIO_SRR_RECEIVED:
++ if (q2t_prepare_srr_ctio(ha, cmd, ctio) != 0)
++ break;
++ else
++ goto out;
++
++ default:
++ PRINT_ERROR("qla2x00t(%ld): CTIO with error status "
++ "0x%x received (state %x, scst_cmd %p, op %x)",
++ ha->instance, status, cmd->state, scst_cmd,
++ scst_cmd->cdb[0]);
++ break;
++ }
++
++ if (cmd->state != Q2T_STATE_NEED_DATA)
++ if (q2t_term_ctio_exchange(ha, ctio, cmd, status))
++ goto out;
++ }
++
++ if (cmd->state == Q2T_STATE_PROCESSED) {
++ TRACE_DBG("Command %p finished", cmd);
++ } else if (cmd->state == Q2T_STATE_NEED_DATA) {
++ int rx_status = SCST_RX_STATUS_SUCCESS;
++
++ cmd->state = Q2T_STATE_DATA_IN;
++
++ if (unlikely(status != CTIO_SUCCESS))
++ rx_status = SCST_RX_STATUS_ERROR;
++ else
++ cmd->write_data_transferred = 1;
++
++ TRACE_DBG("Data received, context %x, rx_status %d",
++ context, rx_status);
++
++ scst_rx_data(scst_cmd, rx_status, context);
++ goto out;
++ } else if (cmd->state == Q2T_STATE_ABORTED) {
++ TRACE_MGMT_DBG("Aborted command %p (tag %d) finished", cmd,
++ cmd->tag);
++ } else {
++ PRINT_ERROR("qla2x00t(%ld): A command in state (%d) should "
++ "not return a CTIO complete", ha->instance, cmd->state);
++ }
++
++ if (unlikely(status != CTIO_SUCCESS)) {
++ TRACE_MGMT_DBG("%s", "Finishing failed CTIO");
++ scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
++ }
++
++ scst_tgt_cmd_done(scst_cmd, context);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++/* called via callback from qla2xxx */
++static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle)
++{
++ struct q2t_tgt *tgt = ha->tgt;
++
++ TRACE_ENTRY();
++
++ if (likely(tgt != NULL)) {
++ tgt->irq_cmd_count++;
++ q2t_do_ctio_completion(ha, handle, CTIO_SUCCESS, NULL);
++ tgt->irq_cmd_count--;
++ } else {
++ TRACE_DBG("CTIO, but target mode not enabled (ha %p handle "
++ "%#x)", ha, handle);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* ha->hardware_lock is supposed to be held on entry */
++static int q2x_do_send_cmd_to_scst(struct q2t_cmd *cmd)
++{
++ int res = 0;
++ struct q2t_sess *sess = cmd->sess;
++ uint16_t lun;
++ atio_entry_t *atio = &cmd->atio.atio2x;
++ scst_data_direction dir;
++ int context;
++
++ TRACE_ENTRY();
++
++ /* make it be in network byte order */
++ lun = swab16(le16_to_cpu(atio->lun));
++ cmd->scst_cmd = scst_rx_cmd(sess->scst_sess, (uint8_t *)&lun,
++ sizeof(lun), atio->cdb, Q2T_MAX_CDB_LEN,
++ SCST_ATOMIC);
++
++ if (cmd->scst_cmd == NULL) {
++ PRINT_ERROR("%s", "qla2x00t: scst_rx_cmd() failed");
++ res = -EFAULT;
++ goto out;
++ }
++
++ cmd->tag = atio->rx_id;
++ scst_cmd_set_tag(cmd->scst_cmd, cmd->tag);
++ scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
++
++ if ((atio->execution_codes & (ATIO_EXEC_READ | ATIO_EXEC_WRITE)) ==
++ (ATIO_EXEC_READ | ATIO_EXEC_WRITE))
++ dir = SCST_DATA_BIDI;
++ else if (atio->execution_codes & ATIO_EXEC_READ)
++ dir = SCST_DATA_READ;
++ else if (atio->execution_codes & ATIO_EXEC_WRITE)
++ dir = SCST_DATA_WRITE;
++ else
++ dir = SCST_DATA_NONE;
++ scst_cmd_set_expected(cmd->scst_cmd, dir,
++ le32_to_cpu(atio->data_length));
++
++ switch (atio->task_codes) {
++ case ATIO_SIMPLE_QUEUE:
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_SIMPLE);
++ break;
++ case ATIO_HEAD_OF_QUEUE:
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
++ break;
++ case ATIO_ORDERED_QUEUE:
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ORDERED);
++ break;
++ case ATIO_ACA_QUEUE:
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ACA);
++ break;
++ case ATIO_UNTAGGED:
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_UNTAGGED);
++ break;
++ default:
++ PRINT_ERROR("qla2x00t: unknown task code %x, use "
++ "ORDERED instead", atio->task_codes);
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ORDERED);
++ break;
++ }
++
++#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
++ context = SCST_CONTEXT_THREAD;
++#else
++ context = SCST_CONTEXT_TASKLET;
++#endif
++
++ TRACE_DBG("Context %x", context);
++ TRACE(TRACE_SCSI, "qla2x00t: START Command (tag %d, queue_type %d)",
++ cmd->tag, scst_cmd_get_queue_type(cmd->scst_cmd));
++ scst_cmd_init_done(cmd->scst_cmd, context);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* ha->hardware_lock is supposed to be held on entry */
++static int q24_do_send_cmd_to_scst(struct q2t_cmd *cmd)
++{
++ int res = 0;
++ struct q2t_sess *sess = cmd->sess;
++ atio7_entry_t *atio = &cmd->atio.atio7;
++ scst_data_direction dir;
++ int context;
++
++ TRACE_ENTRY();
++
++ cmd->scst_cmd = scst_rx_cmd(sess->scst_sess,
++ (uint8_t *)&atio->fcp_cmnd.lun, sizeof(atio->fcp_cmnd.lun),
++ atio->fcp_cmnd.cdb, Q2T_MAX_CDB_LEN, SCST_ATOMIC);
++
++ if (cmd->scst_cmd == NULL) {
++ PRINT_ERROR("%s", "qla2x00t: scst_rx_cmd() failed");
++ res = -EFAULT;
++ goto out;
++ }
++
++ cmd->tag = atio->exchange_addr;
++ scst_cmd_set_tag(cmd->scst_cmd, cmd->tag);
++ scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
++
++ if (atio->fcp_cmnd.rddata && atio->fcp_cmnd.wrdata)
++ dir = SCST_DATA_BIDI;
++ else if (atio->fcp_cmnd.rddata)
++ dir = SCST_DATA_READ;
++ else if (atio->fcp_cmnd.wrdata)
++ dir = SCST_DATA_WRITE;
++ else
++ dir = SCST_DATA_NONE;
++ scst_cmd_set_expected(cmd->scst_cmd, dir,
++ be32_to_cpu(atio->fcp_cmnd.data_length));
++
++ switch (atio->fcp_cmnd.task_attr) {
++ case ATIO_SIMPLE_QUEUE:
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_SIMPLE);
++ break;
++ case ATIO_HEAD_OF_QUEUE:
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
++ break;
++ case ATIO_ORDERED_QUEUE:
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ORDERED);
++ break;
++ case ATIO_ACA_QUEUE:
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ACA);
++ break;
++ case ATIO_UNTAGGED:
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_UNTAGGED);
++ break;
++ default:
++ PRINT_ERROR("qla2x00t: unknown task code %x, use "
++ "ORDERED instead", atio->fcp_cmnd.task_attr);
++ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ORDERED);
++ break;
++ }
++
++#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
++ context = SCST_CONTEXT_THREAD;
++#else
++ context = SCST_CONTEXT_TASKLET;
++#endif
++
++ TRACE_DBG("Context %x", context);
++ TRACE(TRACE_SCSI, "qla2x00t: START Command %p (tag %d, queue type %x)",
++ cmd, cmd->tag, scst_cmd_get_queue_type(cmd->scst_cmd));
++ scst_cmd_init_done(cmd->scst_cmd, context);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static int q2t_do_send_cmd_to_scst(scsi_qla_host_t *ha,
++ struct q2t_cmd *cmd, struct q2t_sess *sess)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ cmd->sess = sess;
++ cmd->loop_id = sess->loop_id;
++ cmd->conf_compl_supported = sess->conf_compl_supported;
++
++ if (IS_FWI2_CAPABLE(ha))
++ res = q24_do_send_cmd_to_scst(cmd);
++ else
++ res = q2x_do_send_cmd_to_scst(cmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static int q2t_send_cmd_to_scst(scsi_qla_host_t *ha, atio_t *atio)
++{
++ int res = 0;
++ struct q2t_tgt *tgt = ha->tgt;
++ struct q2t_sess *sess;
++ struct q2t_cmd *cmd;
++
++ TRACE_ENTRY();
++
++ if (unlikely(tgt->tgt_stop)) {
++ TRACE_MGMT_DBG("New command while device %p is shutting "
++ "down", tgt);
++ res = -EFAULT;
++ goto out;
++ }
++
++ cmd = kmem_cache_zalloc(q2t_cmd_cachep, GFP_ATOMIC);
++ if (cmd == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "qla2x00t(%ld): Allocation of cmd "
++ "failed", ha->instance);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ memcpy(&cmd->atio.atio2x, atio, sizeof(*atio));
++ cmd->state = Q2T_STATE_NEW;
++ cmd->tgt = ha->tgt;
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ atio7_entry_t *a = (atio7_entry_t *)atio;
++ sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
++ if (unlikely(sess == NULL)) {
++ TRACE_MGMT_DBG("qla2x00t(%ld): Unable to find "
++ "wwn login (s_id %x:%x:%x), trying to create "
++ "it manually", ha->instance,
++ a->fcp_hdr.s_id[0], a->fcp_hdr.s_id[1],
++ a->fcp_hdr.s_id[2]);
++ goto out_sched;
++ }
++ } else {
++ sess = q2t_find_sess_by_loop_id(tgt,
++ GET_TARGET_ID(ha, (atio_entry_t *)atio));
++ if (unlikely(sess == NULL)) {
++ TRACE_MGMT_DBG("qla2x00t(%ld): Unable to find "
++ "wwn login (loop_id=%d), trying to create it "
++ "manually", ha->instance,
++ GET_TARGET_ID(ha, (atio_entry_t *)atio));
++ goto out_sched;
++ }
++ }
++
++ if (unlikely(sess->deleted))
++ q2t_reappear_sess(sess, " by new commands");
++
++ res = q2t_do_send_cmd_to_scst(ha, cmd, sess);
++ if (unlikely(res != 0))
++ goto out_free_cmd;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free_cmd:
++ q2t_free_cmd(cmd);
++ goto out;
++
++out_sched:
++ {
++ struct q2t_sess_work_param *prm;
++ unsigned long flags;
++
++ prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
++ if (prm == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): Unable to create session "
++ "work, command will be refused", ha->instance);
++ res = -1;
++ goto out_free_cmd;
++ }
++
++ TRACE_MGMT_DBG("Scheduling work to find session for cmd %p",
++ cmd);
++
++ prm->cmd = cmd;
++
++ spin_lock_irqsave(&tgt->sess_work_lock, flags);
++ if (!tgt->sess_works_pending)
++ tgt->tm_to_unknown = 0;
++ list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
++ tgt->sess_works_pending = 1;
++ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
++
++ schedule_work(&tgt->sess_work);
++ }
++ goto out;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
++ int lun_size, int fn, void *iocb, int flags)
++{
++ int res = 0, rc = -1;
++ struct q2t_mgmt_cmd *mcmd;
++
++ TRACE_ENTRY();
++
++ mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
++ if (mcmd == NULL) {
++ PRINT_CRIT_ERROR("qla2x00t(%ld): Allocation of management "
++ "command failed, some commands and their data could "
++ "leak", sess->tgt->ha->instance);
++ res = -ENOMEM;
++ goto out;
++ }
++ memset(mcmd, 0, sizeof(*mcmd));
++
++ mcmd->sess = sess;
++ if (iocb) {
++ memcpy(&mcmd->orig_iocb.notify_entry, iocb,
++ sizeof(mcmd->orig_iocb.notify_entry));
++ }
++ mcmd->flags = flags;
++
++ switch (fn) {
++ case Q2T_CLEAR_ACA:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): CLEAR_ACA received",
++ sess->tgt->ha->instance);
++ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_CLEAR_ACA,
++ lun, lun_size, SCST_ATOMIC, mcmd);
++ break;
++
++ case Q2T_TARGET_RESET:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): TARGET_RESET received",
++ sess->tgt->ha->instance);
++ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_TARGET_RESET,
++ lun, lun_size, SCST_ATOMIC, mcmd);
++ break;
++
++ case Q2T_LUN_RESET:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): LUN_RESET received",
++ sess->tgt->ha->instance);
++ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_LUN_RESET,
++ lun, lun_size, SCST_ATOMIC, mcmd);
++ break;
++
++ case Q2T_CLEAR_TS:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): CLEAR_TS received",
++ sess->tgt->ha->instance);
++ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_CLEAR_TASK_SET,
++ lun, lun_size, SCST_ATOMIC, mcmd);
++ break;
++
++ case Q2T_ABORT_TS:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): ABORT_TS received",
++ sess->tgt->ha->instance);
++ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_ABORT_TASK_SET,
++ lun, lun_size, SCST_ATOMIC, mcmd);
++ break;
++
++ case Q2T_ABORT_ALL:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Doing ABORT_ALL_TASKS",
++ sess->tgt->ha->instance);
++ rc = scst_rx_mgmt_fn_lun(sess->scst_sess,
++ SCST_ABORT_ALL_TASKS,
++ lun, lun_size, SCST_ATOMIC, mcmd);
++ break;
++
++ case Q2T_ABORT_ALL_SESS:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Doing ABORT_ALL_TASKS_SESS",
++ sess->tgt->ha->instance);
++ rc = scst_rx_mgmt_fn_lun(sess->scst_sess,
++ SCST_ABORT_ALL_TASKS_SESS,
++ lun, lun_size, SCST_ATOMIC, mcmd);
++ break;
++
++ case Q2T_NEXUS_LOSS_SESS:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Doing NEXUS_LOSS_SESS",
++ sess->tgt->ha->instance);
++ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_NEXUS_LOSS_SESS,
++ lun, lun_size, SCST_ATOMIC, mcmd);
++ break;
++
++ case Q2T_NEXUS_LOSS:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Doing NEXUS_LOSS",
++ sess->tgt->ha->instance);
++ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_NEXUS_LOSS,
++ lun, lun_size, SCST_ATOMIC, mcmd);
++ break;
++
++ default:
++ PRINT_ERROR("qla2x00t(%ld): Unknown task mgmt fn 0x%x",
++ sess->tgt->ha->instance, fn);
++ rc = -1;
++ break;
++ }
++
++ if (rc != 0) {
++ PRINT_ERROR("qla2x00t(%ld): scst_rx_mgmt_fn_lun() failed: %d",
++ sess->tgt->ha->instance, rc);
++ res = -EFAULT;
++ goto out_free;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
++ goto out;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static int q2t_handle_task_mgmt(scsi_qla_host_t *ha, void *iocb)
++{
++ int res = 0;
++ struct q2t_tgt *tgt;
++ struct q2t_sess *sess;
++ uint8_t *lun;
++ uint16_t lun_data;
++ int lun_size;
++ int fn;
++
++ TRACE_ENTRY();
++
++ tgt = ha->tgt;
++ if (IS_FWI2_CAPABLE(ha)) {
++ atio7_entry_t *a = (atio7_entry_t *)iocb;
++ lun = (uint8_t *)&a->fcp_cmnd.lun;
++ lun_size = sizeof(a->fcp_cmnd.lun);
++ fn = a->fcp_cmnd.task_mgmt_flags;
++ sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
++ if (sess != NULL) {
++ sess->s_id.b.al_pa = a->fcp_hdr.s_id[2];
++ sess->s_id.b.area = a->fcp_hdr.s_id[1];
++ sess->s_id.b.domain = a->fcp_hdr.s_id[0];
++ }
++ } else {
++ notify_entry_t *n = (notify_entry_t *)iocb;
++ /* make it be in network byte order */
++ lun_data = swab16(le16_to_cpu(n->lun));
++ lun = (uint8_t *)&lun_data;
++ lun_size = sizeof(lun_data);
++ fn = n->task_flags >> IMM_NTFY_TASK_MGMT_SHIFT;
++ sess = q2t_find_sess_by_loop_id(tgt, GET_TARGET_ID(ha, n));
++ }
++
++ if (sess == NULL) {
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): task mgmt fn 0x%x for "
++ "non-existant session", ha->instance, fn);
++ tgt->tm_to_unknown = 1;
++ res = -ESRCH;
++ goto out;
++ }
++
++ res = q2t_issue_task_mgmt(sess, lun, lun_size, fn, iocb, 0);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static int q2t_abort_task(scsi_qla_host_t *ha, notify_entry_t *iocb)
++{
++ int res = 0, rc;
++ struct q2t_mgmt_cmd *mcmd;
++ struct q2t_sess *sess;
++ int loop_id;
++ uint32_t tag;
++
++ TRACE_ENTRY();
++
++ loop_id = GET_TARGET_ID(ha, iocb);
++ tag = le16_to_cpu(iocb->seq_id);
++
++ sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
++ if (sess == NULL) {
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): task abort for unexisting "
++ "session", ha->instance);
++ ha->tgt->tm_to_unknown = 1;
++ res = -EFAULT;
++ goto out;
++ }
++
++ mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
++ if (mcmd == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s: Allocation of ABORT cmd failed",
++ ha->instance, __func__);
++ res = -ENOMEM;
++ goto out;
++ }
++ memset(mcmd, 0, sizeof(*mcmd));
++
++ mcmd->sess = sess;
++ memcpy(&mcmd->orig_iocb.notify_entry, iocb,
++ sizeof(mcmd->orig_iocb.notify_entry));
++
++ rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag,
++ SCST_ATOMIC, mcmd);
++ if (rc != 0) {
++ PRINT_ERROR("qla2x00t(%ld): scst_rx_mgmt_fn_tag() failed: %d",
++ ha->instance, rc);
++ res = -EFAULT;
++ goto out_free;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
++ goto out;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static int q24_handle_els(scsi_qla_host_t *ha, notify24xx_entry_t *iocb)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): ELS opcode %x", ha->instance,
++ iocb->status_subcode);
++
++ switch (iocb->status_subcode) {
++ case ELS_PLOGI:
++ case ELS_FLOGI:
++ case ELS_PRLI:
++ case ELS_LOGO:
++ case ELS_PRLO:
++ res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
++ break;
++
++ case ELS_PDISC:
++ case ELS_ADISC:
++ {
++ struct q2t_tgt *tgt = ha->tgt;
++ if (tgt->link_reinit_iocb_pending) {
++ q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
++ tgt->link_reinit_iocb_pending = 0;
++ }
++ res = 1; /* send notify ack */
++ break;
++ }
++
++ default:
++ PRINT_ERROR("qla2x00t(%ld): Unsupported ELS command %x "
++ "received", ha->instance, iocb->status_subcode);
++ res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
++ break;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int q2t_cut_cmd_data_head(struct q2t_cmd *cmd, unsigned int offset)
++{
++ int res = 0;
++ int cnt, first_sg, first_page = 0, first_page_offs = 0, i;
++ unsigned int l;
++ int cur_dst, cur_src;
++ struct scatterlist *sg;
++ size_t bufflen = 0;
++
++ TRACE_ENTRY();
++
++ first_sg = -1;
++ cnt = 0;
++ l = 0;
++ for (i = 0; i < cmd->sg_cnt; i++) {
++ l += cmd->sg[i].length;
++ if (l > offset) {
++ int sg_offs = l - cmd->sg[i].length;
++ first_sg = i;
++ if (cmd->sg[i].offset == 0) {
++ first_page_offs = offset % PAGE_SIZE;
++ first_page = (offset - sg_offs) >> PAGE_SHIFT;
++ } else {
++ TRACE_SG("i=%d, sg[i].offset=%d, "
++ "sg_offs=%d", i, cmd->sg[i].offset, sg_offs);
++ if ((cmd->sg[i].offset + sg_offs) > offset) {
++ first_page_offs = offset - sg_offs;
++ first_page = 0;
++ } else {
++ int sec_page_offs = sg_offs +
++ (PAGE_SIZE - cmd->sg[i].offset);
++ first_page_offs = sec_page_offs % PAGE_SIZE;
++ first_page = 1 +
++ ((offset - sec_page_offs) >>
++ PAGE_SHIFT);
++ }
++ }
++ cnt = cmd->sg_cnt - i + (first_page_offs != 0);
++ break;
++ }
++ }
++ if (first_sg == -1) {
++ PRINT_ERROR("qla2x00t(%ld): Wrong offset %d, buf length %d",
++ cmd->tgt->ha->instance, offset, cmd->bufflen);
++ res = -EINVAL;
++ goto out;
++ }
++
++ TRACE_SG("offset=%d, first_sg=%d, first_page=%d, "
++ "first_page_offs=%d, cmd->bufflen=%d, cmd->sg_cnt=%d", offset,
++ first_sg, first_page, first_page_offs, cmd->bufflen,
++ cmd->sg_cnt);
++
++ sg = kmalloc(cnt * sizeof(sg[0]), GFP_KERNEL);
++ if (sg == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): Unable to allocate cut "
++ "SG (len %zd)", cmd->tgt->ha->instance,
++ cnt * sizeof(sg[0]));
++ res = -ENOMEM;
++ goto out;
++ }
++ sg_init_table(sg, cnt);
++
++ cur_dst = 0;
++ cur_src = first_sg;
++ if (first_page_offs != 0) {
++ int fpgs;
++ sg_set_page(&sg[cur_dst], &sg_page(&cmd->sg[cur_src])[first_page],
++ PAGE_SIZE - first_page_offs, first_page_offs);
++ bufflen += sg[cur_dst].length;
++ TRACE_SG("cur_dst=%d, cur_src=%d, sg[].page=%p, "
++ "sg[].offset=%d, sg[].length=%d, bufflen=%zu",
++ cur_dst, cur_src, sg_page(&sg[cur_dst]), sg[cur_dst].offset,
++ sg[cur_dst].length, bufflen);
++ cur_dst++;
++
++ fpgs = (cmd->sg[cur_src].length >> PAGE_SHIFT) +
++ ((cmd->sg[cur_src].length & ~PAGE_MASK) != 0);
++ first_page++;
++ if (fpgs > first_page) {
++ sg_set_page(&sg[cur_dst],
++ &sg_page(&cmd->sg[cur_src])[first_page],
++ cmd->sg[cur_src].length - PAGE_SIZE*first_page,
++ 0);
++ TRACE_SG("fpgs=%d, cur_dst=%d, cur_src=%d, "
++ "sg[].page=%p, sg[].length=%d, bufflen=%zu",
++ fpgs, cur_dst, cur_src, sg_page(&sg[cur_dst]),
++ sg[cur_dst].length, bufflen);
++ bufflen += sg[cur_dst].length;
++ cur_dst++;
++ }
++ cur_src++;
++ }
++
++ while (cur_src < cmd->sg_cnt) {
++ sg_set_page(&sg[cur_dst], sg_page(&cmd->sg[cur_src]),
++ cmd->sg[cur_src].length, cmd->sg[cur_src].offset);
++ TRACE_SG("cur_dst=%d, cur_src=%d, "
++ "sg[].page=%p, sg[].length=%d, sg[].offset=%d, "
++ "bufflen=%zu", cur_dst, cur_src, sg_page(&sg[cur_dst]),
++ sg[cur_dst].length, sg[cur_dst].offset, bufflen);
++ bufflen += sg[cur_dst].length;
++ cur_dst++;
++ cur_src++;
++ }
++
++ if (cmd->free_sg)
++ kfree(cmd->sg);
++
++ cmd->sg = sg;
++ cmd->free_sg = 1;
++ cmd->sg_cnt = cur_dst;
++ cmd->bufflen = bufflen;
++ cmd->offset += offset;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static inline int q2t_srr_adjust_data(struct q2t_cmd *cmd,
++ uint32_t srr_rel_offs, int *xmit_type)
++{
++ int res = 0;
++ int rel_offs;
++
++ rel_offs = srr_rel_offs - cmd->offset;
++ TRACE_MGMT_DBG("srr_rel_offs=%d, rel_offs=%d", srr_rel_offs, rel_offs);
++
++ *xmit_type = Q2T_XMIT_ALL;
++
++ if (rel_offs < 0) {
++ PRINT_ERROR("qla2x00t(%ld): SRR rel_offs (%d) "
++ "< 0", cmd->tgt->ha->instance, rel_offs);
++ res = -1;
++ } else if (rel_offs == cmd->bufflen)
++ *xmit_type = Q2T_XMIT_STATUS;
++ else if (rel_offs > 0)
++ res = q2t_cut_cmd_data_head(cmd, rel_offs);
++
++ return res;
++}
++
++/* No locks, thread context */
++static void q24_handle_srr(scsi_qla_host_t *ha, struct srr_ctio *sctio,
++ struct srr_imm *imm)
++{
++ notify24xx_entry_t *ntfy = &imm->imm.notify_entry24;
++ struct q2t_cmd *cmd = sctio->cmd;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("SRR cmd %p, srr_ui %x", cmd, ntfy->srr_ui);
++
++ switch (ntfy->srr_ui) {
++ case SRR_IU_STATUS:
++ spin_lock_irq(&ha->hardware_lock);
++ q24_send_notify_ack(ha, ntfy,
++ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
++ spin_unlock_irq(&ha->hardware_lock);
++ __q24_xmit_response(cmd, Q2T_XMIT_STATUS);
++ break;
++ case SRR_IU_DATA_IN:
++ cmd->bufflen = scst_cmd_get_adjusted_resp_data_len(cmd->scst_cmd);
++ if (q2t_has_data(cmd)) {
++ uint32_t offset;
++ int xmit_type;
++ offset = le32_to_cpu(imm->imm.notify_entry24.srr_rel_offs);
++ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
++ goto out_reject;
++ spin_lock_irq(&ha->hardware_lock);
++ q24_send_notify_ack(ha, ntfy,
++ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
++ spin_unlock_irq(&ha->hardware_lock);
++ __q24_xmit_response(cmd, xmit_type);
++ } else {
++ PRINT_ERROR("qla2x00t(%ld): SRR for in data for cmd "
++ "without them (tag %d, SCSI status %d), "
++ "reject", ha->instance, cmd->tag,
++ scst_cmd_get_status(cmd->scst_cmd));
++ goto out_reject;
++ }
++ break;
++ case SRR_IU_DATA_OUT:
++ cmd->bufflen = scst_cmd_get_write_fields(cmd->scst_cmd,
++ &cmd->sg, &cmd->sg_cnt);
++ if (q2t_has_data(cmd)) {
++ uint32_t offset;
++ int xmit_type;
++ offset = le32_to_cpu(imm->imm.notify_entry24.srr_rel_offs);
++ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
++ goto out_reject;
++ spin_lock_irq(&ha->hardware_lock);
++ q24_send_notify_ack(ha, ntfy,
++ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
++ spin_unlock_irq(&ha->hardware_lock);
++ if (xmit_type & Q2T_XMIT_DATA)
++ __q2t_rdy_to_xfer(cmd);
++ } else {
++ PRINT_ERROR("qla2x00t(%ld): SRR for out data for cmd "
++ "without them (tag %d, SCSI status %d), "
++ "reject", ha->instance, cmd->tag,
++ scst_cmd_get_status(cmd->scst_cmd));
++ goto out_reject;
++ }
++ break;
++ default:
++ PRINT_ERROR("qla2x00t(%ld): Unknown srr_ui value %x",
++ ha->instance, ntfy->srr_ui);
++ goto out_reject;
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_reject:
++ spin_lock_irq(&ha->hardware_lock);
++ q24_send_notify_ack(ha, ntfy, NOTIFY_ACK_SRR_FLAGS_REJECT,
++ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
++ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
++ if (cmd->state == Q2T_STATE_NEED_DATA) {
++ cmd->state = Q2T_STATE_DATA_IN;
++ scst_rx_data(cmd->scst_cmd, SCST_RX_STATUS_ERROR,
++ SCST_CONTEXT_THREAD);
++ } else
++ q24_send_term_exchange(ha, cmd, &cmd->atio.atio7, 1);
++ spin_unlock_irq(&ha->hardware_lock);
++ goto out;
++}
++
++/* No locks, thread context */
++static void q2x_handle_srr(scsi_qla_host_t *ha, struct srr_ctio *sctio,
++ struct srr_imm *imm)
++{
++ notify_entry_t *ntfy = &imm->imm.notify_entry;
++ struct q2t_cmd *cmd = sctio->cmd;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("SRR cmd %p, srr_ui %x", cmd, ntfy->srr_ui);
++
++ switch (ntfy->srr_ui) {
++ case SRR_IU_STATUS:
++ spin_lock_irq(&ha->hardware_lock);
++ q2x_send_notify_ack(ha, ntfy, 0, 0, 0,
++ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
++ spin_unlock_irq(&ha->hardware_lock);
++ __q2x_xmit_response(cmd, Q2T_XMIT_STATUS);
++ break;
++ case SRR_IU_DATA_IN:
++ cmd->bufflen = scst_cmd_get_adjusted_resp_data_len(cmd->scst_cmd);
++ if (q2t_has_data(cmd)) {
++ uint32_t offset;
++ int xmit_type;
++ offset = le32_to_cpu(imm->imm.notify_entry.srr_rel_offs);
++ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
++ goto out_reject;
++ spin_lock_irq(&ha->hardware_lock);
++ q2x_send_notify_ack(ha, ntfy, 0, 0, 0,
++ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
++ spin_unlock_irq(&ha->hardware_lock);
++ __q2x_xmit_response(cmd, xmit_type);
++ } else {
++ PRINT_ERROR("qla2x00t(%ld): SRR for in data for cmd "
++ "without them (tag %d, SCSI status %d), "
++ "reject", ha->instance, cmd->tag,
++ scst_cmd_get_status(cmd->scst_cmd));
++ goto out_reject;
++ }
++ break;
++ case SRR_IU_DATA_OUT:
++ cmd->bufflen = scst_cmd_get_write_fields(cmd->scst_cmd,
++ &cmd->sg, &cmd->sg_cnt);
++ if (q2t_has_data(cmd)) {
++ uint32_t offset;
++ int xmit_type;
++ offset = le32_to_cpu(imm->imm.notify_entry.srr_rel_offs);
++ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
++ goto out_reject;
++ spin_lock_irq(&ha->hardware_lock);
++ q2x_send_notify_ack(ha, ntfy, 0, 0, 0,
++ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
++ spin_unlock_irq(&ha->hardware_lock);
++ if (xmit_type & Q2T_XMIT_DATA)
++ __q2t_rdy_to_xfer(cmd);
++ } else {
++ PRINT_ERROR("qla2x00t(%ld): SRR for out data for cmd "
++ "without them (tag %d, SCSI status %d), "
++ "reject", ha->instance, cmd->tag,
++ scst_cmd_get_status(cmd->scst_cmd));
++ goto out_reject;
++ }
++ break;
++ default:
++ PRINT_ERROR("qla2x00t(%ld): Unknown srr_ui value %x",
++ ha->instance, ntfy->srr_ui);
++ goto out_reject;
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_reject:
++ spin_lock_irq(&ha->hardware_lock);
++ q2x_send_notify_ack(ha, ntfy, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT,
++ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
++ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
++ if (cmd->state == Q2T_STATE_NEED_DATA) {
++ cmd->state = Q2T_STATE_DATA_IN;
++ scst_rx_data(cmd->scst_cmd, SCST_RX_STATUS_ERROR,
++ SCST_CONTEXT_THREAD);
++ } else
++ q2x_send_term_exchange(ha, cmd, &cmd->atio.atio2x, 1);
++ spin_unlock_irq(&ha->hardware_lock);
++ goto out;
++}
++
++static void q2t_reject_free_srr_imm(scsi_qla_host_t *ha, struct srr_imm *imm,
++ int ha_locked)
++{
++ if (!ha_locked)
++ spin_lock_irq(&ha->hardware_lock);
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ q24_send_notify_ack(ha, &imm->imm.notify_entry24,
++ NOTIFY_ACK_SRR_FLAGS_REJECT,
++ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
++ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
++ } else {
++ q2x_send_notify_ack(ha, &imm->imm.notify_entry,
++ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT,
++ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
++ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
++ }
++
++ if (!ha_locked)
++ spin_unlock_irq(&ha->hardware_lock);
++
++ kfree(imm);
++ return;
++}
++
++static void q2t_handle_srr_work(struct work_struct *work)
++{
++ struct q2t_tgt *tgt = container_of(work, struct q2t_tgt, srr_work);
++ scsi_qla_host_t *ha = tgt->ha;
++ struct srr_ctio *sctio;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("SRR work (tgt %p)", tgt);
++
++restart:
++ spin_lock_irq(&tgt->srr_lock);
++ list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
++ struct srr_imm *imm;
++ struct q2t_cmd *cmd;
++ struct srr_imm *i, *ti;
++
++ imm = NULL;
++ list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
++ srr_list_entry) {
++ if (i->srr_id == sctio->srr_id) {
++ list_del(&i->srr_list_entry);
++ if (imm) {
++ PRINT_ERROR("qla2x00t(%ld): There must "
++ "be only one IMM SRR per CTIO SRR "
++ "(IMM SRR %p, id %d, CTIO %p",
++ ha->instance, i, i->srr_id, sctio);
++ q2t_reject_free_srr_imm(ha, i, 0);
++ } else
++ imm = i;
++ }
++ }
++
++ TRACE_MGMT_DBG("IMM SRR %p, CTIO SRR %p (id %d)", imm, sctio,
++ sctio->srr_id);
++
++ if (imm == NULL) {
++ TRACE_MGMT_DBG("Not found matching IMM for SRR CTIO "
++ "(id %d)", sctio->srr_id);
++ continue;
++ } else
++ list_del(&sctio->srr_list_entry);
++
++ spin_unlock_irq(&tgt->srr_lock);
++
++ cmd = sctio->cmd;
++
++ /* Restore the originals, except bufflen */
++ cmd->offset = scst_cmd_get_ppl_offset(cmd->scst_cmd);
++ if (cmd->free_sg) {
++ kfree(cmd->sg);
++ cmd->free_sg = 0;
++ }
++ cmd->sg = scst_cmd_get_sg(cmd->scst_cmd);
++ cmd->sg_cnt = scst_cmd_get_sg_cnt(cmd->scst_cmd);
++
++ TRACE_MGMT_DBG("SRR cmd %p (scst_cmd %p, tag %d, op %x), "
++ "sg_cnt=%d, offset=%d", cmd, cmd->scst_cmd,
++ cmd->tag, cmd->scst_cmd->cdb[0], cmd->sg_cnt,
++ cmd->offset);
++
++ if (IS_FWI2_CAPABLE(ha))
++ q24_handle_srr(ha, sctio, imm);
++ else
++ q2x_handle_srr(ha, sctio, imm);
++
++ kfree(imm);
++ kfree(sctio);
++ goto restart;
++ }
++ spin_unlock_irq(&tgt->srr_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++static void q2t_prepare_srr_imm(scsi_qla_host_t *ha, void *iocb)
++{
++ struct srr_imm *imm;
++ struct q2t_tgt *tgt = ha->tgt;
++ notify_entry_t *iocb2x = (notify_entry_t *)iocb;
++ notify24xx_entry_t *iocb24 = (notify24xx_entry_t *)iocb;
++ struct srr_ctio *sctio;
++
++ tgt->imm_srr_id++;
++
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): SRR received", ha->instance);
++
++ imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
++ if (imm != NULL) {
++ memcpy(&imm->imm.notify_entry, iocb,
++ sizeof(imm->imm.notify_entry));
++
++ /* IRQ is already OFF */
++ spin_lock(&tgt->srr_lock);
++ imm->srr_id = tgt->imm_srr_id;
++ list_add_tail(&imm->srr_list_entry,
++ &tgt->srr_imm_list);
++ TRACE_MGMT_DBG("IMM NTFY SRR %p added (id %d, ui %x)", imm,
++ imm->srr_id, iocb24->srr_ui);
++ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
++ int found = 0;
++ list_for_each_entry(sctio, &tgt->srr_ctio_list,
++ srr_list_entry) {
++ if (sctio->srr_id == imm->srr_id) {
++ found = 1;
++ break;
++ }
++ }
++ if (found) {
++ TRACE_MGMT_DBG("%s", "Scheduling srr work");
++ schedule_work(&tgt->srr_work);
++ } else {
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): imm_srr_id "
++ "== ctio_srr_id (%d), but there is no "
++ "corresponding SRR CTIO, deleting IMM "
++ "SRR %p", ha->instance, tgt->ctio_srr_id,
++ imm);
++ list_del(&imm->srr_list_entry);
++
++ kfree(imm);
++
++ spin_unlock(&tgt->srr_lock);
++ goto out_reject;
++ }
++ }
++ spin_unlock(&tgt->srr_lock);
++ } else {
++ struct srr_ctio *ts;
++
++ PRINT_ERROR("qla2x00t(%ld): Unable to allocate SRR IMM "
++ "entry, SRR request will be rejected", ha->instance);
++
++ /* IRQ is already OFF */
++ spin_lock(&tgt->srr_lock);
++ list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
++ srr_list_entry) {
++ if (sctio->srr_id == tgt->imm_srr_id) {
++ TRACE_MGMT_DBG("CTIO SRR %p deleted "
++ "(id %d)", sctio, sctio->srr_id);
++ list_del(&sctio->srr_list_entry);
++ if (IS_FWI2_CAPABLE(ha)) {
++ q24_send_term_exchange(ha, sctio->cmd,
++ &sctio->cmd->atio.atio7, 1);
++ } else {
++ q2x_send_term_exchange(ha, sctio->cmd,
++ &sctio->cmd->atio.atio2x, 1);
++ }
++ kfree(sctio);
++ }
++ }
++ spin_unlock(&tgt->srr_lock);
++ goto out_reject;
++ }
++
++out:
++ return;
++
++out_reject:
++ if (IS_FWI2_CAPABLE(ha)) {
++ q24_send_notify_ack(ha, iocb24,
++ NOTIFY_ACK_SRR_FLAGS_REJECT,
++ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
++ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
++ } else {
++ q2x_send_notify_ack(ha, iocb2x,
++ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT,
++ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
++ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
++ }
++ goto out;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q2t_handle_imm_notify(scsi_qla_host_t *ha, void *iocb)
++{
++ uint16_t status;
++ uint32_t add_flags = 0;
++ int send_notify_ack = 1;
++ notify_entry_t *iocb2x = (notify_entry_t *)iocb;
++ notify24xx_entry_t *iocb24 = (notify24xx_entry_t *)iocb;
++
++ TRACE_ENTRY();
++
++ status = le16_to_cpu(iocb2x->status);
++
++ TRACE_BUFF_FLAG(TRACE_BUFF, "IMMED Notify Coming Up",
++ iocb, sizeof(*iocb2x));
++
++ switch (status) {
++ case IMM_NTFY_LIP_RESET:
++ {
++ if (IS_FWI2_CAPABLE(ha)) {
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): LIP reset (loop %#x), "
++ "subcode %x", ha->instance,
++ le16_to_cpu(iocb24->nport_handle),
++ iocb24->status_subcode);
++ } else {
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): LIP reset (I %#x)",
++ ha->instance, GET_TARGET_ID(ha, iocb2x));
++ /* set the Clear LIP reset event flag */
++ add_flags |= NOTIFY_ACK_CLEAR_LIP_RESET;
++ }
++ if (q2t_reset(ha, iocb, Q2T_ABORT_ALL) == 0)
++ send_notify_ack = 0;
++ break;
++ }
++
++ case IMM_NTFY_LIP_LINK_REINIT:
++ {
++ struct q2t_tgt *tgt = ha->tgt;
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): LINK REINIT (loop %#x, "
++ "subcode %x)", ha->instance,
++ le16_to_cpu(iocb24->nport_handle),
++ iocb24->status_subcode);
++ if (tgt->link_reinit_iocb_pending)
++ q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
++ memcpy(&tgt->link_reinit_iocb, iocb24, sizeof(*iocb24));
++ tgt->link_reinit_iocb_pending = 1;
++ /*
++ * QLogic requires to wait after LINK REINIT for possible
++ * PDISC or ADISC ELS commands
++ */
++ send_notify_ack = 0;
++ break;
++ }
++
++ case IMM_NTFY_PORT_LOGOUT:
++ if (IS_FWI2_CAPABLE(ha)) {
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Port logout (loop "
++ "%#x, subcode %x)", ha->instance,
++ le16_to_cpu(iocb24->nport_handle),
++ iocb24->status_subcode);
++ } else {
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Port logout (S "
++ "%08x -> L %#x)", ha->instance,
++ le16_to_cpu(iocb2x->seq_id),
++ le16_to_cpu(iocb2x->lun));
++ }
++ if (q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS) == 0)
++ send_notify_ack = 0;
++ /* The sessions will be cleared in the callback, if needed */
++ break;
++
++ case IMM_NTFY_GLBL_TPRLO:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Global TPRLO (%x)",
++ ha->instance, status);
++ if (q2t_reset(ha, iocb, Q2T_NEXUS_LOSS) == 0)
++ send_notify_ack = 0;
++ /* The sessions will be cleared in the callback, if needed */
++ break;
++
++ case IMM_NTFY_PORT_CONFIG:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Port config changed (%x)",
++ ha->instance, status);
++ if (q2t_reset(ha, iocb, Q2T_ABORT_ALL) == 0)
++ send_notify_ack = 0;
++ /* The sessions will be cleared in the callback, if needed */
++ break;
++
++ case IMM_NTFY_GLBL_LOGO:
++ PRINT_WARNING("qla2x00t(%ld): Link failure detected",
++ ha->instance);
++ /* I_T nexus loss */
++ if (q2t_reset(ha, iocb, Q2T_NEXUS_LOSS) == 0)
++ send_notify_ack = 0;
++ break;
++
++ case IMM_NTFY_IOCB_OVERFLOW:
++ PRINT_ERROR("qla2x00t(%ld): Cannot provide requested "
++ "capability (IOCB overflowed the immediate notify "
++ "resource count)", ha->instance);
++ break;
++
++ case IMM_NTFY_ABORT_TASK:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Abort Task (S %08x I %#x -> "
++ "L %#x)", ha->instance, le16_to_cpu(iocb2x->seq_id),
++ GET_TARGET_ID(ha, iocb2x), le16_to_cpu(iocb2x->lun));
++ if (q2t_abort_task(ha, iocb2x) == 0)
++ send_notify_ack = 0;
++ break;
++
++ case IMM_NTFY_RESOURCE:
++ PRINT_ERROR("qla2x00t(%ld): Out of resources, host %ld",
++ ha->instance, ha->host_no);
++ break;
++
++ case IMM_NTFY_MSG_RX:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Immediate notify task %x",
++ ha->instance, iocb2x->task_flags);
++ if (q2t_handle_task_mgmt(ha, iocb2x) == 0)
++ send_notify_ack = 0;
++ break;
++
++ case IMM_NTFY_ELS:
++ if (q24_handle_els(ha, iocb24) == 0)
++ send_notify_ack = 0;
++ break;
++
++ case IMM_NTFY_SRR:
++ q2t_prepare_srr_imm(ha, iocb);
++ send_notify_ack = 0;
++ break;
++
++ default:
++ PRINT_ERROR("qla2x00t(%ld): Received unknown immediate "
++ "notify status %x", ha->instance, status);
++ break;
++ }
++
++ if (send_notify_ack) {
++ if (IS_FWI2_CAPABLE(ha))
++ q24_send_notify_ack(ha, iocb24, 0, 0, 0);
++ else
++ q2x_send_notify_ack(ha, iocb2x, add_flags, 0, 0, 0,
++ 0, 0);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q2x_send_busy(scsi_qla_host_t *ha, atio_entry_t *atio)
++{
++ ctio_ret_entry_t *ctio;
++
++ TRACE_ENTRY();
++
++ /* Sending marker isn't necessary, since we called from ISR */
++
++ ctio = (ctio_ret_entry_t *)qla2x00_req_pkt(ha);
++ if (ctio == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
++ "request packet", ha->instance, __func__);
++ goto out;
++ }
++
++ ctio->entry_type = CTIO_RET_TYPE;
++ ctio->entry_count = 1;
++ ctio->handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
++ ctio->scsi_status = __constant_cpu_to_le16(SAM_STAT_BUSY);
++ ctio->residual = atio->data_length;
++ if (ctio->residual != 0)
++ ctio->scsi_status |= SS_RESIDUAL_UNDER;
++
++ /* Set IDs */
++ SET_TARGET_ID(ha, ctio->target, GET_TARGET_ID(ha, atio));
++ ctio->rx_id = atio->rx_id;
++
++ ctio->flags = __constant_cpu_to_le16(OF_SSTS | OF_FAST_POST |
++ OF_NO_DATA | OF_SS_MODE_1);
++ ctio->flags |= __constant_cpu_to_le16(OF_INC_RC);
++ /*
++ * CTIO from fw w/o scst_cmd doesn't provide enough info to retry it,
++ * if the explicit conformation is used.
++ */
++
++ TRACE_BUFFER("CTIO BUSY packet data", ctio, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q24_send_busy(scsi_qla_host_t *ha, atio7_entry_t *atio,
++ uint16_t status)
++{
++ ctio7_status1_entry_t *ctio;
++ struct q2t_sess *sess;
++
++ TRACE_ENTRY();
++
++ sess = q2t_find_sess_by_s_id(ha->tgt, atio->fcp_hdr.s_id);
++ if (sess == NULL) {
++ q24_send_term_exchange(ha, NULL, atio, 1);
++ goto out;
++ }
++
++ /* Sending marker isn't necessary, since we called from ISR */
++
++ ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
++ if (ctio == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
++ "request packet", ha->instance, __func__);
++ goto out;
++ }
++
++ ctio->common.entry_type = CTIO_TYPE7;
++ ctio->common.entry_count = 1;
++ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
++ ctio->common.nport_handle = sess->loop_id;
++ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
++ ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
++ ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
++ ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
++ ctio->common.exchange_addr = atio->exchange_addr;
++ ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
++ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
++ CTIO7_FLAGS_DONT_RET_CTIO);
++ /*
++ * CTIO from fw w/o scst_cmd doesn't provide enough info to retry it,
++ * if the explicit conformation is used.
++ */
++ ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
++ ctio->scsi_status = cpu_to_le16(status);
++ ctio->residual = atio->fcp_cmnd.data_length;
++ if (ctio->residual != 0)
++ ctio->scsi_status |= SS_RESIDUAL_UNDER;
++
++ TRACE_BUFFER("CTIO7 BUSY packet data", ctio, REQUEST_ENTRY_SIZE);
++
++ q2t_exec_queue(ha);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++/* called via callback from qla2xxx */
++static void q24_atio_pkt(scsi_qla_host_t *ha, atio7_entry_t *atio)
++{
++ int rc;
++ struct q2t_tgt *tgt = ha->tgt;
++
++ TRACE_ENTRY();
++
++ if (unlikely(tgt == NULL)) {
++ TRACE_MGMT_DBG("ATIO pkt, but no tgt (ha %p)", ha);
++ goto out;
++ }
++
++ TRACE(TRACE_SCSI, "qla2x00t(%ld): ATIO pkt %p: type %02x count %02x",
++ ha->instance, atio, atio->entry_type, atio->entry_count);
++
++ /*
++ * In tgt_stop mode we also should allow all requests to pass.
++ * Otherwise, some commands can stuck.
++ */
++
++ tgt->irq_cmd_count++;
++
++ switch (atio->entry_type) {
++ case ATIO_TYPE7:
++ if (unlikely(atio->entry_count > 1) ||
++ unlikely(atio->fcp_cmnd.add_cdb_len != 0)) {
++ PRINT_ERROR("qla2x00t(%ld): Multi entry ATIO7 IOCBs "
++ "(%d), ie with CDBs>16 bytes (%d), are not "
++ "supported", ha->instance, atio->entry_count,
++ atio->fcp_cmnd.add_cdb_len);
++ break;
++ }
++ TRACE_DBG("ATIO_TYPE7 instance %ld, lun %Lx, read/write %d/%d, "
++ "data_length %04x, s_id %x:%x:%x", ha->instance,
++ atio->fcp_cmnd.lun, atio->fcp_cmnd.rddata,
++ atio->fcp_cmnd.wrdata,
++ be32_to_cpu(atio->fcp_cmnd.data_length),
++ atio->fcp_hdr.s_id[0], atio->fcp_hdr.s_id[1],
++ atio->fcp_hdr.s_id[2]);
++ TRACE_BUFFER("Incoming ATIO7 packet data", atio,
++ REQUEST_ENTRY_SIZE);
++ PRINT_BUFF_FLAG(TRACE_SCSI, "FCP CDB", atio->fcp_cmnd.cdb,
++ sizeof(atio->fcp_cmnd.cdb));
++ if (unlikely(atio->exchange_addr ==
++ ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
++ TRACE(TRACE_OUT_OF_MEM, "qla2x00t(%ld): ATIO_TYPE7 "
++ "received with UNKNOWN exchange address, "
++ "sending QUEUE_FULL", ha->instance);
++ q24_send_busy(ha, atio, SAM_STAT_TASK_SET_FULL);
++ break;
++ }
++ if (likely(atio->fcp_cmnd.task_mgmt_flags == 0))
++ rc = q2t_send_cmd_to_scst(ha, (atio_t *)atio);
++ else
++ rc = q2t_handle_task_mgmt(ha, atio);
++ if (unlikely(rc != 0)) {
++ if (rc == -ESRCH) {
++#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
++ q24_send_busy(ha, atio, SAM_STAT_BUSY);
++#else
++ q24_send_term_exchange(ha, NULL, atio, 1);
++#endif
++ } else {
++ PRINT_INFO("qla2x00t(%ld): Unable to send "
++ "command to SCST, sending BUSY status",
++ ha->instance);
++ q24_send_busy(ha, atio, SAM_STAT_BUSY);
++ }
++ }
++ break;
++
++ case IMMED_NOTIFY_TYPE:
++ {
++ notify_entry_t *pkt = (notify_entry_t *)atio;
++ if (unlikely(pkt->entry_status != 0)) {
++ PRINT_ERROR("qla2x00t(%ld): Received ATIO packet %x "
++ "with error status %x", ha->instance,
++ pkt->entry_type, pkt->entry_status);
++ break;
++ }
++ TRACE_DBG("%s", "IMMED_NOTIFY ATIO");
++ q2t_handle_imm_notify(ha, pkt);
++ break;
++ }
++
++ default:
++ PRINT_ERROR("qla2x00t(%ld): Received unknown ATIO atio "
++ "type %x", ha->instance, atio->entry_type);
++ break;
++ }
++
++ tgt->irq_cmd_count--;
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* ha->hardware_lock supposed to be held on entry */
++/* called via callback from qla2xxx */
++static void q2t_response_pkt(scsi_qla_host_t *ha, response_t *pkt)
++{
++ struct q2t_tgt *tgt = ha->tgt;
++
++ TRACE_ENTRY();
++
++ if (unlikely(tgt == NULL)) {
++ PRINT_ERROR("qla2x00t(%ld): Response pkt %x received, but no "
++ "tgt (ha %p)", ha->instance, pkt->entry_type, ha);
++ goto out;
++ }
++
++ TRACE(TRACE_SCSI, "qla2x00t(%ld): pkt %p: T %02x C %02x S %02x "
++ "handle %#x", ha->instance, pkt, pkt->entry_type,
++ pkt->entry_count, pkt->entry_status, pkt->handle);
++
++ /*
++ * In tgt_stop mode we also should allow all requests to pass.
++ * Otherwise, some commands can stuck.
++ */
++
++ if (unlikely(pkt->entry_status != 0)) {
++ PRINT_ERROR("qla2x00t(%ld): Received response packet %x "
++ "with error status %x", ha->instance, pkt->entry_type,
++ pkt->entry_status);
++ switch (pkt->entry_type) {
++ case ACCEPT_TGT_IO_TYPE:
++ case IMMED_NOTIFY_TYPE:
++ case ABTS_RECV_24XX:
++ goto out;
++ default:
++ break;
++ }
++ }
++
++ tgt->irq_cmd_count++;
++
++ switch (pkt->entry_type) {
++ case CTIO_TYPE7:
++ {
++ ctio7_fw_entry_t *entry = (ctio7_fw_entry_t *)pkt;
++ TRACE_DBG("CTIO_TYPE7: instance %ld",
++ ha->instance);
++ TRACE_BUFFER("Incoming CTIO7 packet data", entry,
++ REQUEST_ENTRY_SIZE);
++ q2t_do_ctio_completion(ha, entry->handle,
++ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
++ entry);
++ break;
++ }
++
++ case ACCEPT_TGT_IO_TYPE:
++ {
++ atio_entry_t *atio;
++ int rc;
++ atio = (atio_entry_t *)pkt;
++ TRACE_DBG("ACCEPT_TGT_IO instance %ld status %04x "
++ "lun %04x read/write %d data_length %04x "
++ "target_id %02x rx_id %04x ",
++ ha->instance, le16_to_cpu(atio->status),
++ le16_to_cpu(atio->lun),
++ atio->execution_codes,
++ le32_to_cpu(atio->data_length),
++ GET_TARGET_ID(ha, atio), atio->rx_id);
++ TRACE_BUFFER("Incoming ATIO packet data", atio,
++ REQUEST_ENTRY_SIZE);
++ if (atio->status != __constant_cpu_to_le16(ATIO_CDB_VALID)) {
++ PRINT_ERROR("qla2x00t(%ld): ATIO with error "
++ "status %x received", ha->instance,
++ le16_to_cpu(atio->status));
++ break;
++ }
++ TRACE_BUFFER("Incoming ATIO packet data", atio, REQUEST_ENTRY_SIZE);
++ PRINT_BUFF_FLAG(TRACE_SCSI, "FCP CDB", atio->cdb,
++ sizeof(atio->cdb));
++ rc = q2t_send_cmd_to_scst(ha, (atio_t *)atio);
++ if (unlikely(rc != 0)) {
++ if (rc == -ESRCH) {
++#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
++ q2x_send_busy(ha, atio);
++#else
++ q2x_send_term_exchange(ha, NULL, atio, 1);
++#endif
++ } else {
++ PRINT_INFO("qla2x00t(%ld): Unable to send "
++ "command to SCST, sending BUSY status",
++ ha->instance);
++ q2x_send_busy(ha, atio);
++ }
++ }
++ }
++ break;
++
++ case CONTINUE_TGT_IO_TYPE:
++ {
++ ctio_common_entry_t *entry = (ctio_common_entry_t *)pkt;
++ TRACE_DBG("CONTINUE_TGT_IO: instance %ld", ha->instance);
++ TRACE_BUFFER("Incoming CTIO packet data", entry,
++ REQUEST_ENTRY_SIZE);
++ q2t_do_ctio_completion(ha, entry->handle,
++ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
++ entry);
++ break;
++ }
++
++ case CTIO_A64_TYPE:
++ {
++ ctio_common_entry_t *entry = (ctio_common_entry_t *)pkt;
++ TRACE_DBG("CTIO_A64: instance %ld", ha->instance);
++ TRACE_BUFFER("Incoming CTIO_A64 packet data", entry,
++ REQUEST_ENTRY_SIZE);
++ q2t_do_ctio_completion(ha, entry->handle,
++ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
++ entry);
++ break;
++ }
++
++ case IMMED_NOTIFY_TYPE:
++ TRACE_DBG("%s", "IMMED_NOTIFY");
++ q2t_handle_imm_notify(ha, (notify_entry_t *)pkt);
++ break;
++
++ case NOTIFY_ACK_TYPE:
++ if (tgt->notify_ack_expected > 0) {
++ nack_entry_t *entry = (nack_entry_t *)pkt;
++ TRACE_DBG("NOTIFY_ACK seq %08x status %x",
++ le16_to_cpu(entry->seq_id),
++ le16_to_cpu(entry->status));
++ TRACE_BUFFER("Incoming NOTIFY_ACK packet data", pkt,
++ RESPONSE_ENTRY_SIZE);
++ tgt->notify_ack_expected--;
++ if (entry->status != __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
++ PRINT_ERROR("qla2x00t(%ld): NOTIFY_ACK "
++ "failed %x", ha->instance,
++ le16_to_cpu(entry->status));
++ }
++ } else {
++ PRINT_ERROR("qla2x00t(%ld): Unexpected NOTIFY_ACK "
++ "received", ha->instance);
++ }
++ break;
++
++ case ABTS_RECV_24XX:
++ TRACE_DBG("ABTS_RECV_24XX: instance %ld", ha->instance);
++ TRACE_BUFF_FLAG(TRACE_BUFF, "Incoming ABTS_RECV "
++ "packet data", pkt, REQUEST_ENTRY_SIZE);
++ q24_handle_abts(ha, (abts24_recv_entry_t *)pkt);
++ break;
++
++ case ABTS_RESP_24XX:
++ if (tgt->abts_resp_expected > 0) {
++ abts24_resp_fw_entry_t *entry =
++ (abts24_resp_fw_entry_t *)pkt;
++ TRACE_DBG("ABTS_RESP_24XX: compl_status %x",
++ entry->compl_status);
++ TRACE_BUFF_FLAG(TRACE_BUFF, "Incoming ABTS_RESP "
++ "packet data", pkt, REQUEST_ENTRY_SIZE);
++ tgt->abts_resp_expected--;
++ if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
++ if ((entry->error_subcode1 == 0x1E) &&
++ (entry->error_subcode2 == 0)) {
++ /*
++ * We've got a race here: aborted exchange not
++ * terminated, i.e. response for the aborted
++ * command was sent between the abort request
++ * was received and processed. Unfortunately,
++ * the firmware has a silly requirement that
++ * all aborted exchanges must be explicitely
++ * terminated, otherwise it refuses to send
++ * responses for the abort requests. So, we
++ * have to (re)terminate the exchange and
++ * retry the abort response.
++ */
++ q24_retry_term_exchange(ha, entry);
++ } else
++ PRINT_ERROR("qla2x00t(%ld): ABTS_RESP_24XX "
++ "failed %x (subcode %x:%x)", ha->instance,
++ entry->compl_status, entry->error_subcode1,
++ entry->error_subcode2);
++ }
++ } else {
++ PRINT_ERROR("qla2x00t(%ld): Unexpected ABTS_RESP_24XX "
++ "received", ha->instance);
++ }
++ break;
++
++ case MODIFY_LUN_TYPE:
++ if (tgt->modify_lun_expected > 0) {
++ modify_lun_entry_t *entry = (modify_lun_entry_t *)pkt;
++ TRACE_DBG("MODIFY_LUN %x, imm %c%d, cmd %c%d",
++ entry->status,
++ (entry->operators & MODIFY_LUN_IMM_ADD) ? '+'
++ : (entry->operators & MODIFY_LUN_IMM_SUB) ? '-'
++ : ' ',
++ entry->immed_notify_count,
++ (entry->operators & MODIFY_LUN_CMD_ADD) ? '+'
++ : (entry->operators & MODIFY_LUN_CMD_SUB) ? '-'
++ : ' ',
++ entry->command_count);
++ tgt->modify_lun_expected--;
++ if (entry->status != MODIFY_LUN_SUCCESS) {
++ PRINT_ERROR("qla2x00t(%ld): MODIFY_LUN "
++ "failed %x", ha->instance,
++ entry->status);
++ }
++ } else {
++ PRINT_ERROR("qla2x00t(%ld): Unexpected MODIFY_LUN "
++ "received", (ha != NULL) ? (long)ha->instance : -1);
++ }
++ break;
++
++ case ENABLE_LUN_TYPE:
++ {
++ elun_entry_t *entry = (elun_entry_t *)pkt;
++ TRACE_DBG("ENABLE_LUN %x imm %u cmd %u ",
++ entry->status, entry->immed_notify_count,
++ entry->command_count);
++ if (entry->status == ENABLE_LUN_ALREADY_ENABLED) {
++ TRACE_DBG("LUN is already enabled: %#x",
++ entry->status);
++ entry->status = ENABLE_LUN_SUCCESS;
++ } else if (entry->status == ENABLE_LUN_RC_NONZERO) {
++ TRACE_DBG("ENABLE_LUN succeeded, but with "
++ "error: %#x", entry->status);
++ entry->status = ENABLE_LUN_SUCCESS;
++ } else if (entry->status != ENABLE_LUN_SUCCESS) {
++ PRINT_ERROR("qla2x00t(%ld): ENABLE_LUN "
++ "failed %x", ha->instance, entry->status);
++ qla_clear_tgt_mode(ha);
++ } /* else success */
++ break;
++ }
++
++ default:
++ PRINT_ERROR("qla2x00t(%ld): Received unknown response pkt "
++ "type %x", ha->instance, pkt->entry_type);
++ break;
++ }
++
++ tgt->irq_cmd_count--;
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ */
++static void q2t_async_event(uint16_t code, scsi_qla_host_t *ha,
++ uint16_t *mailbox)
++{
++ struct q2t_tgt *tgt = ha->tgt;
++
++ TRACE_ENTRY();
++
++ if (unlikely(tgt == NULL)) {
++ TRACE_DBG("ASYNC EVENT %#x, but no tgt (ha %p)", code, ha);
++ goto out;
++ }
++
++ /*
++ * In tgt_stop mode we also should allow all requests to pass.
++ * Otherwise, some commands can stuck.
++ */
++
++ tgt->irq_cmd_count++;
++
++ switch (code) {
++ case MBA_RESET: /* Reset */
++ case MBA_SYSTEM_ERR: /* System Error */
++ case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
++ case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
++ case MBA_ATIO_TRANSFER_ERR: /* ATIO Queue Transfer Error */
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): System error async event %#x "
++ "occured", ha->instance, code);
++ break;
++
++ case MBA_LOOP_UP:
++ {
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Async LOOP_UP occured "
++ "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", ha->instance,
++ le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
++ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
++ if (tgt->link_reinit_iocb_pending) {
++ q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
++ tgt->link_reinit_iocb_pending = 0;
++ }
++ break;
++ }
++
++ case MBA_LIP_OCCURRED:
++ case MBA_LOOP_DOWN:
++ case MBA_LIP_RESET:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Async event %#x occured "
++ "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", ha->instance,
++ code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
++ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
++ break;
++
++ case MBA_PORT_UPDATE:
++ case MBA_RSCN_UPDATE:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Port update async event %#x "
++ "occured: updating the ports database (m[1]=%x, m[2]=%x, "
++ "m[3]=%x, m[4]=%x)", ha->instance, code,
++ le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
++ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
++ /* .mark_all_devices_lost() is handled by the initiator driver */
++ break;
++
++ default:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Async event %#x occured: "
++ "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)",
++ ha->instance, code,
++ le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
++ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
++ break;
++ }
++
++ tgt->irq_cmd_count--;
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static int q2t_get_target_name(scsi_qla_host_t *ha, char **wwn)
++{
++ const int wwn_len = 3*WWN_SIZE+2;
++ int res = 0;
++ char *name;
++
++ name = kmalloc(wwn_len, GFP_KERNEL);
++ if (name == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "%s", "qla2x00t: Allocation of tgt "
++ "name failed");
++ res = -ENOMEM;
++ goto out;
++ }
++
++ sprintf(name, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
++ ha->port_name[0], ha->port_name[1],
++ ha->port_name[2], ha->port_name[3],
++ ha->port_name[4], ha->port_name[5],
++ ha->port_name[6], ha->port_name[7]);
++
++ *wwn = name;
++
++out:
++ return res;
++}
++
++static int q24_get_loop_id(scsi_qla_host_t *ha, atio7_entry_t *atio7,
++ uint16_t *loop_id)
++{
++ dma_addr_t gid_list_dma;
++ struct gid_list_info *gid_list;
++ char *id_iter;
++ int res, rc, i;
++ uint16_t entries;
++
++ TRACE_ENTRY();
++
++ gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
++ &gid_list_dma, GFP_KERNEL);
++ if (gid_list == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): DMA Alloc failed of %zd",
++ ha->instance, GID_LIST_SIZE);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ /* Get list of logged in devices */
++ rc = qla2x00_get_id_list(ha, gid_list, gid_list_dma, &entries);
++ if (rc != QLA_SUCCESS) {
++ PRINT_ERROR("qla2x00t(%ld): get_id_list() failed: %x",
++ ha->instance, rc);
++ res = -1;
++ goto out_free_id_list;
++ }
++
++ id_iter = (char *)gid_list;
++ res = -1;
++ for (i = 0; i < entries; i++) {
++ struct gid_list_info *gid = (struct gid_list_info *)id_iter;
++ if ((gid->al_pa == atio7->fcp_hdr.s_id[2]) &&
++ (gid->area == atio7->fcp_hdr.s_id[1]) &&
++ (gid->domain == atio7->fcp_hdr.s_id[0])) {
++ *loop_id = le16_to_cpu(gid->loop_id);
++ res = 0;
++ break;
++ }
++ id_iter += ha->gid_list_info_size;
++ }
++
++ if (res != 0) {
++ if ((atio7->fcp_hdr.s_id[0] == 0xFF) &&
++ (atio7->fcp_hdr.s_id[1] == 0xFC)) {
++ /*
++ * This is Domain Controller. It should be OK to drop
++ * SCSI commands from it.
++ */
++ TRACE_MGMT_DBG("Unable to find initiator with S_ID "
++ "%x:%x:%x", atio7->fcp_hdr.s_id[0],
++ atio7->fcp_hdr.s_id[1], atio7->fcp_hdr.s_id[2]);
++ } else
++ PRINT_ERROR("qla2x00t(%ld): Unable to find initiator with "
++ "S_ID %x:%x:%x", ha->instance,
++ atio7->fcp_hdr.s_id[0], atio7->fcp_hdr.s_id[1],
++ atio7->fcp_hdr.s_id[2]);
++ }
++
++out_free_id_list:
++ dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, gid_list, gid_list_dma);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Must be called under tgt_mutex */
++static struct q2t_sess *q2t_make_local_sess(scsi_qla_host_t *ha, atio_t *atio)
++{
++ struct q2t_sess *sess = NULL;
++ fc_port_t *fcport = NULL;
++ uint16_t loop_id = 0xFFFF; /* to remove warning */
++ int rc;
++
++ TRACE_ENTRY();
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ rc = q24_get_loop_id(ha, (atio7_entry_t *)atio, &loop_id);
++ if (rc != 0)
++ goto out;
++ } else
++ loop_id = GET_TARGET_ID(ha, (atio_entry_t *)atio);
++
++ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
++ if (fcport == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): Allocation of tmp FC port failed",
++ ha->instance);
++ goto out;
++ }
++
++ TRACE_MGMT_DBG("loop_id %d", loop_id);
++
++ fcport->loop_id = loop_id;
++
++ rc = qla2x00_get_port_database(ha, fcport, 0);
++ if (rc != QLA_SUCCESS) {
++ PRINT_ERROR("qla2x00t(%ld): Failed to retrieve fcport "
++ "information -- get_port_database() returned %x "
++ "(loop_id=0x%04x)", ha->instance, rc, loop_id);
++ goto out_free_fcport;
++ }
++
++ sess = q2t_create_sess(ha, fcport, true);
++
++out_free_fcport:
++ kfree(fcport);
++
++out:
++ TRACE_EXIT_HRES((unsigned long)sess);
++ return sess;
++}
++
++static int q2t_exec_sess_work(struct q2t_tgt *tgt,
++ struct q2t_sess_work_param *prm)
++{
++ scsi_qla_host_t *ha = tgt->ha;
++ int res = 0;
++ struct q2t_sess *sess = NULL;
++ struct q2t_cmd *cmd = prm->cmd;
++ atio_t *atio = (atio_t *)&cmd->atio;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("cmd %p", cmd);
++
++ mutex_lock(&ha->tgt_mutex);
++ spin_lock_irq(&ha->hardware_lock);
++
++ if (tgt->tgt_stop)
++ goto send;
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ atio7_entry_t *a = (atio7_entry_t *)atio;
++ sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
++ } else
++ sess = q2t_find_sess_by_loop_id(tgt,
++ GET_TARGET_ID(ha, (atio_entry_t *)atio));
++
++ if (sess != NULL) {
++ TRACE_MGMT_DBG("sess %p found", sess);
++ q2t_sess_get(sess);
++ } else {
++ /*
++ * We are under tgt_mutex, so a new sess can't be added
++ * behind us.
++ */
++ spin_unlock_irq(&ha->hardware_lock);
++ sess = q2t_make_local_sess(ha, atio);
++ spin_lock_irq(&ha->hardware_lock);
++ /* sess has got an extra creation ref */
++ }
++
++send:
++ if (!tgt->tm_to_unknown && !tgt->tgt_stop && (sess != NULL)) {
++ TRACE_MGMT_DBG("Sending work cmd %p to SCST", cmd);
++ res = q2t_do_send_cmd_to_scst(ha, cmd, sess);
++ } else {
++ /*
++ * Cmd might be already aborted behind us, so be safe and
++ * abort it. It should be OK, initiator will retry it. It has
++ * not sent to SCST yet, so pass NULL as the second argument.
++ */
++ TRACE_MGMT_DBG("Terminating work cmd %p", cmd);
++ if (IS_FWI2_CAPABLE(ha))
++ q24_send_term_exchange(ha, NULL , &cmd->atio.atio7, 1);
++ else
++ q2x_send_term_exchange(ha, NULL, &cmd->atio.atio2x, 1);
++ q2t_free_cmd(cmd);
++ }
++
++ if (sess != NULL)
++ q2t_sess_put(sess);
++
++ spin_unlock_irq(&ha->hardware_lock);
++ mutex_unlock(&ha->tgt_mutex);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void q2t_sess_work_fn(struct work_struct *work)
++{
++ struct q2t_tgt *tgt = container_of(work, struct q2t_tgt, sess_work);
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Sess work (tgt %p)", tgt);
++
++ spin_lock_irq(&tgt->sess_work_lock);
++ while (!list_empty(&tgt->sess_works_list)) {
++ int rc;
++ struct q2t_sess_work_param *prm = list_entry(
++ tgt->sess_works_list.next, typeof(*prm),
++ sess_works_list_entry);
++
++ /*
++ * This work can be scheduled on several CPUs at time, so we
++ * must delete the entry to eliminate double processing
++ */
++ list_del(&prm->sess_works_list_entry);
++
++ spin_unlock_irq(&tgt->sess_work_lock);
++
++ rc = q2t_exec_sess_work(tgt, prm);
++
++ spin_lock_irq(&tgt->sess_work_lock);
++
++ if (rc != 0) {
++ TRACE_MGMT_DBG("Unable to complete sess work (tgt %p), "
++ "freeing cmd %p", tgt, prm->cmd);
++ q2t_free_cmd(prm->cmd);
++ }
++
++ kfree(prm);
++ }
++ spin_unlock_irq(&tgt->sess_work_lock);
++
++ spin_lock_irq(&tgt->ha->hardware_lock);
++ spin_lock(&tgt->sess_work_lock);
++ if (list_empty(&tgt->sess_works_list)) {
++ tgt->sess_works_pending = 0;
++ tgt->tm_to_unknown = 0;
++ }
++ spin_unlock(&tgt->sess_work_lock);
++ spin_unlock_irq(&tgt->ha->hardware_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* ha->hardware_lock supposed to be held and IRQs off */
++static void q2t_cleanup_hw_pending_cmd(scsi_qla_host_t *ha, struct q2t_cmd *cmd)
++{
++ uint32_t h;
++
++ for (h = 0; h < MAX_OUTSTANDING_COMMANDS; h++) {
++ if (ha->cmds[h] == cmd) {
++ TRACE_DBG("Clearing handle %d for cmd %p", h, cmd);
++ ha->cmds[h] = NULL;
++ break;
++ }
++ }
++ return;
++}
++
++static void q2t_on_hw_pending_cmd_timeout(struct scst_cmd *scst_cmd)
++{
++ struct q2t_cmd *cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
++ struct q2t_tgt *tgt = cmd->tgt;
++ scsi_qla_host_t *ha = tgt->ha;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Cmd %p HW pending for too long (state %x)", cmd,
++ cmd->state);
++
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++
++ if (cmd->sg_mapped)
++ q2t_unmap_sg(ha, cmd);
++
++ if (cmd->state == Q2T_STATE_PROCESSED) {
++ TRACE_MGMT_DBG("Force finishing cmd %p", cmd);
++ } else if (cmd->state == Q2T_STATE_NEED_DATA) {
++ TRACE_MGMT_DBG("Force rx_data cmd %p", cmd);
++
++ q2t_cleanup_hw_pending_cmd(ha, cmd);
++
++ scst_rx_data(scst_cmd, SCST_RX_STATUS_ERROR_FATAL,
++ SCST_CONTEXT_THREAD);
++ goto out_unlock;
++ } else if (cmd->state == Q2T_STATE_ABORTED) {
++ TRACE_MGMT_DBG("Force finishing aborted cmd %p (tag %d)",
++ cmd, cmd->tag);
++ } else {
++ PRINT_ERROR("qla2x00t(%ld): A command in state (%d) should "
++ "not be HW pending", ha->instance, cmd->state);
++ goto out_unlock;
++ }
++
++ q2t_cleanup_hw_pending_cmd(ha, cmd);
++
++ scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
++ scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_THREAD);
++
++out_unlock:
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ TRACE_EXIT();
++ return;
++}
++
++/* Must be called under tgt_host_action_mutex */
++static int q2t_add_target(scsi_qla_host_t *ha)
++{
++ int res, rc;
++ char *wwn;
++ int sg_tablesize;
++ struct q2t_tgt *tgt;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Registering target for host %ld(%p)", ha->host_no, ha);
++
++ BUG_ON((ha->q2t_tgt != NULL) || (ha->tgt != NULL));
++
++ tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
++ if (tgt == NULL) {
++ TRACE(TRACE_OUT_OF_MEM, "qla2x00t: %s", "Allocation of tgt "
++ "failed");
++ res = -ENOMEM;
++ goto out;
++ }
++
++ tgt->ha = ha;
++ init_waitqueue_head(&tgt->waitQ);
++ INIT_LIST_HEAD(&tgt->sess_list);
++ INIT_LIST_HEAD(&tgt->del_sess_list);
++ init_timer(&tgt->sess_del_timer);
++ tgt->sess_del_timer.data = (unsigned long)tgt;
++ tgt->sess_del_timer.function = q2t_del_sess_timer_fn;
++ spin_lock_init(&tgt->sess_work_lock);
++ INIT_WORK(&tgt->sess_work, q2t_sess_work_fn);
++ INIT_LIST_HEAD(&tgt->sess_works_list);
++ spin_lock_init(&tgt->srr_lock);
++ INIT_LIST_HEAD(&tgt->srr_ctio_list);
++ INIT_LIST_HEAD(&tgt->srr_imm_list);
++ INIT_WORK(&tgt->srr_work, q2t_handle_srr_work);
++
++ ha->q2t_tgt = tgt;
++
++ if (q2t_get_target_name(ha, &wwn) != 0)
++ goto out_free;
++
++ tgt->scst_tgt = scst_register_target(&tgt2x_template, wwn);
++
++ kfree(wwn);
++
++ if (!tgt->scst_tgt) {
++ PRINT_ERROR("qla2x00t(%ld): scst_register_target() "
++ "failed for host %ld(%p)", ha->instance,
++ ha->host_no, ha);
++ res = -ENOMEM;
++ goto out_free;
++ }
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ PRINT_INFO("qla2x00t(%ld): using 64 Bit PCI "
++ "addressing", ha->instance);
++ tgt->tgt_enable_64bit_addr = 1;
++ /* 3 is reserved */
++ sg_tablesize =
++ QLA_MAX_SG_24XX(ha->request_q_length - 3);
++ tgt->datasegs_per_cmd = DATASEGS_PER_COMMAND_24XX;
++ tgt->datasegs_per_cont = DATASEGS_PER_CONT_24XX;
++ } else {
++ if (ha->flags.enable_64bit_addressing) {
++ PRINT_INFO("qla2x00t(%ld): 64 Bit PCI "
++ "addressing enabled", ha->instance);
++ tgt->tgt_enable_64bit_addr = 1;
++ /* 3 is reserved */
++ sg_tablesize =
++ QLA_MAX_SG64(ha->request_q_length - 3);
++ tgt->datasegs_per_cmd = DATASEGS_PER_COMMAND64;
++ tgt->datasegs_per_cont = DATASEGS_PER_CONT64;
++ } else {
++ PRINT_INFO("qla2x00t(%ld): Using 32 Bit "
++ "PCI addressing", ha->instance);
++ sg_tablesize =
++ QLA_MAX_SG32(ha->request_q_length - 3);
++ tgt->datasegs_per_cmd = DATASEGS_PER_COMMAND32;
++ tgt->datasegs_per_cont = DATASEGS_PER_CONT32;
++ }
++ }
++
++ rc = sysfs_create_link(scst_sysfs_get_tgt_kobj(tgt->scst_tgt),
++ &ha->host->shost_dev.kobj, "host");
++ if (rc != 0)
++ PRINT_ERROR("qla2x00t(%ld): Unable to create \"host\" link for "
++ "target %s", ha->instance,
++ scst_get_tgt_name(tgt->scst_tgt));
++
++ scst_tgt_set_sg_tablesize(tgt->scst_tgt, sg_tablesize);
++ scst_tgt_set_tgt_priv(tgt->scst_tgt, tgt);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ ha->q2t_tgt = NULL;
++ kfree(tgt);
++ goto out;
++}
++
++/* Must be called under tgt_host_action_mutex */
++static int q2t_remove_target(scsi_qla_host_t *ha)
++{
++ TRACE_ENTRY();
++
++ if ((ha->q2t_tgt == NULL) || (ha->tgt != NULL)) {
++ PRINT_ERROR("qla2x00t(%ld): Can't remove "
++ "existing target", ha->instance);
++ }
++
++ TRACE_DBG("Unregistering target for host %ld(%p)", ha->host_no, ha);
++ scst_unregister_target(ha->tgt->scst_tgt);
++ /*
++ * Free of tgt happens via callback q2t_target_release
++ * called from scst_unregister_target, so we shouldn't touch
++ * it again.
++ */
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static int q2t_host_action(scsi_qla_host_t *ha,
++ qla2x_tgt_host_action_t action)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ BUG_ON(ha == NULL);
++
++ /* To sync with q2t_exit() */
++ if (down_read_trylock(&q2t_unreg_rwsem) == 0)
++ goto out;
++
++ mutex_lock(&ha->tgt_host_action_mutex);
++
++ switch (action) {
++ case ADD_TARGET:
++ res = q2t_add_target(ha);
++ break;
++ case REMOVE_TARGET:
++ res = q2t_remove_target(ha);
++ break;
++ case ENABLE_TARGET_MODE:
++ {
++ fc_port_t *fcport;
++
++ if (qla_tgt_mode_enabled(ha)) {
++ PRINT_INFO("qla2x00t(%ld): Target mode already "
++ "enabled", ha->instance);
++ break;
++ }
++
++ if ((ha->q2t_tgt == NULL) || (ha->tgt != NULL)) {
++ PRINT_ERROR("qla2x00t(%ld): Can't enable target mode "
++ "for not existing target", ha->instance);
++ break;
++ }
++
++ PRINT_INFO("qla2x00t(%ld): Enabling target mode",
++ ha->instance);
++
++ spin_lock_irq(&ha->hardware_lock);
++ ha->tgt = ha->q2t_tgt;
++ ha->tgt->tgt_stop = 0;
++ spin_unlock_irq(&ha->hardware_lock);
++ list_for_each_entry_rcu(fcport, &ha->fcports, list) {
++ q2t_fc_port_added(ha, fcport);
++ }
++ TRACE_DBG("Enable tgt mode for host %ld(%ld,%p)",
++ ha->host_no, ha->instance, ha);
++ qla2x00_enable_tgt_mode(ha);
++ break;
++ }
++
++ case DISABLE_TARGET_MODE:
++ if (!qla_tgt_mode_enabled(ha)) {
++ PRINT_INFO("qla2x00t(%ld): Target mode already "
++ "disabled", ha->instance);
++ break;
++ }
++
++ PRINT_INFO("qla2x00t(%ld): Disabling target mode",
++ ha->instance);
++
++ BUG_ON(ha->tgt == NULL);
++
++ q2t_target_stop(ha->tgt->scst_tgt);
++ break;
++
++ default:
++ PRINT_ERROR("qla2x00t(%ld): %s: unsupported action %d",
++ ha->instance, __func__, action);
++ res = -EINVAL;
++ break;
++ }
++
++ mutex_unlock(&ha->tgt_host_action_mutex);
++
++ up_read(&q2t_unreg_rwsem);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int q2t_enable_tgt(struct scst_tgt *scst_tgt, bool enable)
++{
++ struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ scsi_qla_host_t *ha = tgt->ha;
++ int res;
++
++ if (enable)
++ res = q2t_host_action(ha, ENABLE_TARGET_MODE);
++ else
++ res = q2t_host_action(ha, DISABLE_TARGET_MODE);
++
++ return res;
++}
++
++static bool q2t_is_tgt_enabled(struct scst_tgt *scst_tgt)
++{
++ struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ scsi_qla_host_t *ha = tgt->ha;
++
++ return qla_tgt_mode_enabled(ha);
++}
++
++static int q2t_get_initiator_port_transport_id(struct scst_session *scst_sess,
++ uint8_t **transport_id)
++{
++ struct q2t_sess *sess;
++ int res = 0;
++ int tr_id_size;
++ uint8_t *tr_id;
++
++ TRACE_ENTRY();
++
++ if (scst_sess == NULL) {
++ res = SCSI_TRANSPORTID_PROTOCOLID_FCP2;
++ goto out;
++ }
++
++ sess = (struct q2t_sess *)scst_sess_get_tgt_priv(scst_sess);
++
++ tr_id_size = 24;
++
++ tr_id = kzalloc(tr_id_size, GFP_KERNEL);
++ if (tr_id == NULL) {
++ PRINT_ERROR("qla2x00t: Allocation of TransportID (size %d) "
++ "failed", tr_id_size);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ tr_id[0] = SCSI_TRANSPORTID_PROTOCOLID_FCP2;
++
++ BUILD_BUG_ON(sizeof(sess->port_name) != 8);
++ memcpy(&tr_id[8], sess->port_name, 8);
++
++ *transport_id = tr_id;
++
++ TRACE_BUFF_FLAG(TRACE_DEBUG, "Created tid", tr_id, tr_id_size);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t q2t_show_expl_conf_enabled(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buffer)
++{
++ struct scst_tgt *scst_tgt;
++ struct q2t_tgt *tgt;
++ scsi_qla_host_t *ha;
++ ssize_t size;
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ ha = tgt->ha;
++
++ size = scnprintf(buffer, PAGE_SIZE, "%d\n%s", ha->enable_explicit_conf,
++ ha->enable_explicit_conf ? SCST_SYSFS_KEY_MARK "\n" : "");
++
++ return size;
++}
++
++static ssize_t q2t_store_expl_conf_enabled(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buffer, size_t size)
++{
++ struct scst_tgt *scst_tgt;
++ struct q2t_tgt *tgt;
++ scsi_qla_host_t *ha;
++ unsigned long flags;
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ ha = tgt->ha;
++
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++
++ switch (buffer[0]) {
++ case '0':
++ ha->enable_explicit_conf = 0;
++ PRINT_INFO("qla2x00t(%ld): explicit conformations disabled",
++ ha->instance);
++ break;
++ case '1':
++ ha->enable_explicit_conf = 1;
++ PRINT_INFO("qla2x00t(%ld): explicit conformations enabled",
++ ha->instance);
++ break;
++ default:
++ PRINT_ERROR("%s: qla2x00t(%ld): Requested action not "
++ "understood: %s", __func__, ha->instance, buffer);
++ break;
++ }
++
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++ return size;
++}
++
++static ssize_t q2t_abort_isp_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buffer, size_t size)
++{
++ struct scst_tgt *scst_tgt;
++ struct q2t_tgt *tgt;
++ scsi_qla_host_t *ha;
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ ha = tgt->ha;
++
++ PRINT_INFO("qla2x00t(%ld): Aborting ISP", ha->instance);
++
++ set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
++ qla2x00_wait_for_hba_online(ha);
++
++ return size;
++}
++
++static ssize_t q2t_version_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ sprintf(buf, "%s\n", Q2T_VERSION_STRING);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ strcat(buf, "EXTRACHECKS\n");
++#endif
++
++#ifdef CONFIG_SCST_TRACING
++ strcat(buf, "TRACING\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++ strcat(buf, "DEBUG\n");
++#endif
++
++#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
++ strcat(buf, "QLA_TGT_DEBUG_WORK_IN_THREAD\n");
++#endif
++
++ TRACE_EXIT();
++ return strlen(buf);
++}
++
++static uint16_t q2t_get_scsi_transport_version(struct scst_tgt *scst_tgt)
++{
++ /* FCP-2 */
++ return 0x0900;
++}
++
++static uint16_t q2t_get_phys_transport_version(struct scst_tgt *scst_tgt)
++{
++ return 0x0DA0; /* FC-FS */
++}
++
++static int __init q2t_init(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ BUILD_BUG_ON(sizeof(atio7_entry_t) != sizeof(atio_entry_t));
++
++ PRINT_INFO("qla2x00t: Initializing QLogic Fibre Channel HBA Driver "
++ "target mode addon version %s", Q2T_VERSION_STRING);
++
++ q2t_cmd_cachep = KMEM_CACHE(q2t_cmd, SCST_SLAB_FLAGS);
++ if (q2t_cmd_cachep == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++
++ q2t_mgmt_cmd_cachep = KMEM_CACHE(q2t_mgmt_cmd, SCST_SLAB_FLAGS);
++ if (q2t_mgmt_cmd_cachep == NULL) {
++ res = -ENOMEM;
++ goto out_cmd_free;
++ }
++
++ q2t_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
++ mempool_free_slab, q2t_mgmt_cmd_cachep);
++ if (q2t_mgmt_cmd_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_kmem_free;
++ }
++
++ res = scst_register_target_template(&tgt2x_template);
++ if (res < 0)
++ goto out_mempool_free;
++
++ /*
++ * qla2xxx_tgt_register_driver() happens in q2t_target_detect
++ * called via scst_register_target_template()
++ */
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++ scst_unregister_target_template(&tgt2x_template);
++ qla2xxx_tgt_unregister_driver();
++
++out_mempool_free:
++ mempool_destroy(q2t_mgmt_cmd_mempool);
++
++out_kmem_free:
++ kmem_cache_destroy(q2t_mgmt_cmd_cachep);
++
++out_cmd_free:
++ kmem_cache_destroy(q2t_cmd_cachep);
++ goto out;
++}
++
++static void __exit q2t_exit(void)
++{
++ TRACE_ENTRY();
++
++ PRINT_INFO("qla2x00t: %s", "Unloading QLogic Fibre Channel HBA Driver "
++ "target mode addon driver");
++
++ /* To sync with q2t_host_action() */
++ down_write(&q2t_unreg_rwsem);
++
++ scst_unregister_target_template(&tgt2x_template);
++
++ /*
++ * Now we have everywhere target mode disabled and no possibilities
++ * to call us through sysfs, so we can safely remove all the references
++ * to our functions.
++ */
++ qla2xxx_tgt_unregister_driver();
++
++ mempool_destroy(q2t_mgmt_cmd_mempool);
++ kmem_cache_destroy(q2t_mgmt_cmd_cachep);
++ kmem_cache_destroy(q2t_cmd_cachep);
++
++ /* Let's make lockdep happy */
++ up_write(&q2t_unreg_rwsem);
++
++ TRACE_EXIT();
++ return;
++}
++
++module_init(q2t_init);
++module_exit(q2t_exit);
++
++MODULE_AUTHOR("Vladislav Bolkhovitin and others");
++MODULE_DESCRIPTION("Target mode addon for qla2[2,3,4,5+]xx");
++MODULE_LICENSE("GPL");
++MODULE_VERSION(Q2T_VERSION_STRING);
+diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h
+--- orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h
++++ linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h
+@@ -0,0 +1,273 @@
++/*
++ * qla2x00t.h
++ *
++ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
++ * Copyright (C) 2006 - 2010 ID7 Ltd.
++ *
++ * QLogic 22xx/23xx/24xx/25xx FC target driver.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __QLA2X00T_H
++#define __QLA2X00T_H
++
++#include <qla_def.h>
++#include <qla2x_tgt.h>
++#include <qla2x_tgt_def.h>
++
++#include <scst_debug.h>
++
++/* Version numbers, the same as for the kernel */
++#define Q2T_VERSION(a, b, c, d) (((a) << 030) + ((b) << 020) + (c) << 010 + (d))
++#define Q2T_VERSION_CODE Q2T_VERSION(1, 0, 2, 0)
++#define Q2T_VERSION_STRING "2.0.0"
++#define Q2T_PROC_VERSION_NAME "version"
++
++#define Q2T_MAX_CDB_LEN 16
++#define Q2T_TIMEOUT 10 /* in seconds */
++
++#define Q2T_MAX_HW_PENDING_TIME 60 /* in seconds */
++
++/* Immediate notify status constants */
++#define IMM_NTFY_LIP_RESET 0x000E
++#define IMM_NTFY_LIP_LINK_REINIT 0x000F
++#define IMM_NTFY_IOCB_OVERFLOW 0x0016
++#define IMM_NTFY_ABORT_TASK 0x0020
++#define IMM_NTFY_PORT_LOGOUT 0x0029
++#define IMM_NTFY_PORT_CONFIG 0x002A
++#define IMM_NTFY_GLBL_TPRLO 0x002D
++#define IMM_NTFY_GLBL_LOGO 0x002E
++#define IMM_NTFY_RESOURCE 0x0034
++#define IMM_NTFY_MSG_RX 0x0036
++#define IMM_NTFY_SRR 0x0045
++#define IMM_NTFY_ELS 0x0046
++
++/* Immediate notify task flags */
++#define IMM_NTFY_TASK_MGMT_SHIFT 8
++
++#define Q2T_CLEAR_ACA 0x40
++#define Q2T_TARGET_RESET 0x20
++#define Q2T_LUN_RESET 0x10
++#define Q2T_CLEAR_TS 0x04
++#define Q2T_ABORT_TS 0x02
++#define Q2T_ABORT_ALL_SESS 0xFFFF
++#define Q2T_ABORT_ALL 0xFFFE
++#define Q2T_NEXUS_LOSS_SESS 0xFFFD
++#define Q2T_NEXUS_LOSS 0xFFFC
++
++/* Notify Acknowledge flags */
++#define NOTIFY_ACK_RES_COUNT BIT_8
++#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5
++#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
++
++/* Command's states */
++#define Q2T_STATE_NEW 0 /* New command and SCST processing it */
++#define Q2T_STATE_NEED_DATA 1 /* SCST needs data to continue */
++#define Q2T_STATE_DATA_IN 2 /* Data arrived and SCST processing it */
++#define Q2T_STATE_PROCESSED 3 /* SCST done processing */
++#define Q2T_STATE_ABORTED 4 /* Command aborted */
++
++/* Special handles */
++#define Q2T_NULL_HANDLE 0
++#define Q2T_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
++
++/* ATIO task_codes field */
++#define ATIO_SIMPLE_QUEUE 0
++#define ATIO_HEAD_OF_QUEUE 1
++#define ATIO_ORDERED_QUEUE 2
++#define ATIO_ACA_QUEUE 4
++#define ATIO_UNTAGGED 5
++
++/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
++#define FC_TM_SUCCESS 0
++#define FC_TM_BAD_FCP_DATA 1
++#define FC_TM_BAD_CMD 2
++#define FC_TM_FCP_DATA_MISMATCH 3
++#define FC_TM_REJECT 4
++#define FC_TM_FAILED 5
++
++/*
++ * Error code of q2t_pre_xmit_response() meaning that cmd's exchange was
++ * terminated, so no more actions is needed and success should be returned
++ * to SCST. Must be different from any SCST_TGT_RES_* codes.
++ */
++#define Q2T_PRE_XMIT_RESP_CMD_ABORTED 0x1717
++
++#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
++#define pci_dma_lo32(a) (a & 0xffffffff)
++#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
++#else
++#define pci_dma_lo32(a) (a & 0xffffffff)
++#define pci_dma_hi32(a) 0
++#endif
++
++struct q2t_tgt {
++ struct scst_tgt *scst_tgt;
++ scsi_qla_host_t *ha;
++
++ /*
++ * To sync between IRQ handlers and q2t_target_release(). Needed,
++ * because req_pkt() can drop/reaquire HW lock inside. Protected by
++ * HW lock.
++ */
++ int irq_cmd_count;
++
++ int datasegs_per_cmd, datasegs_per_cont;
++
++ /* Target's flags, serialized by ha->hardware_lock */
++ unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addressing enabled */
++ unsigned int link_reinit_iocb_pending:1;
++ unsigned int tm_to_unknown:1; /* TM to unknown session was sent */
++ unsigned int sess_works_pending:1; /* there are sess_work entries */
++
++ /*
++ * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
++ * OR hardware_lock for reading.
++ */
++ unsigned long tgt_stop; /* the driver is being stopped */
++
++ /* Count of sessions refering q2t_tgt. Protected by hardware_lock. */
++ int sess_count;
++
++ /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
++ struct list_head sess_list;
++
++ /* Protected by hardware_lock */
++ struct list_head del_sess_list;
++ struct timer_list sess_del_timer;
++
++ spinlock_t sess_work_lock;
++ struct list_head sess_works_list;
++ struct work_struct sess_work;
++
++ notify24xx_entry_t link_reinit_iocb;
++ wait_queue_head_t waitQ;
++ int notify_ack_expected;
++ int abts_resp_expected;
++ int modify_lun_expected;
++
++ int ctio_srr_id;
++ int imm_srr_id;
++ spinlock_t srr_lock;
++ struct list_head srr_ctio_list;
++ struct list_head srr_imm_list;
++ struct work_struct srr_work;
++
++ struct list_head tgt_list_entry;
++};
++
++/*
++ * Equivilant to IT Nexus (Initiator-Target)
++ */
++struct q2t_sess {
++ uint16_t loop_id;
++ port_id_t s_id;
++
++ unsigned int conf_compl_supported:1;
++ unsigned int deleted:1;
++ unsigned int local:1;
++
++ struct scst_session *scst_sess;
++ struct q2t_tgt *tgt;
++
++ int sess_ref; /* protected by hardware_lock */
++
++ struct list_head sess_list_entry;
++ unsigned long expires;
++ struct list_head del_list_entry;
++
++ uint8_t port_name[WWN_SIZE];
++};
++
++struct q2t_cmd {
++ struct q2t_sess *sess;
++ int state;
++ struct scst_cmd *scst_cmd;
++
++ unsigned int conf_compl_supported:1;/* to save extra sess dereferences */
++ unsigned int sg_mapped:1;
++ unsigned int free_sg:1;
++ unsigned int aborted:1; /* Needed in case of SRR */
++ unsigned int write_data_transferred:1;
++
++ struct scatterlist *sg; /* cmd data buffer SG vector */
++ int sg_cnt; /* SG segments count */
++ int bufflen; /* cmd buffer length */
++ int offset;
++ scst_data_direction data_direction;
++ uint32_t tag;
++ dma_addr_t dma_handle;
++ enum dma_data_direction dma_data_direction;
++
++ uint16_t loop_id; /* to save extra sess dereferences */
++ struct q2t_tgt *tgt; /* to save extra sess dereferences */
++
++ union {
++ atio7_entry_t atio7;
++ atio_entry_t atio2x;
++ } __attribute__((packed)) atio;
++};
++
++struct q2t_sess_work_param {
++ struct list_head sess_works_list_entry;
++ struct q2t_cmd *cmd;
++};
++
++struct q2t_mgmt_cmd {
++ struct q2t_sess *sess;
++ unsigned int flags;
++#define Q24_MGMT_SEND_NACK 1
++ union {
++ atio7_entry_t atio7;
++ notify_entry_t notify_entry;
++ notify24xx_entry_t notify_entry24;
++ abts24_recv_entry_t abts;
++ } __attribute__((packed)) orig_iocb;
++};
++
++struct q2t_prm {
++ struct q2t_cmd *cmd;
++ struct q2t_tgt *tgt;
++ void *pkt;
++ struct scatterlist *sg; /* cmd data buffer SG vector */
++ int seg_cnt;
++ int req_cnt;
++ uint16_t rq_result;
++ uint16_t scsi_status;
++ unsigned char *sense_buffer;
++ int sense_buffer_len;
++ int residual;
++ int add_status_pkt;
++};
++
++struct srr_imm {
++ struct list_head srr_list_entry;
++ int srr_id;
++ union {
++ notify_entry_t notify_entry;
++ notify24xx_entry_t notify_entry24;
++ } __attribute__((packed)) imm;
++};
++
++struct srr_ctio {
++ struct list_head srr_list_entry;
++ int srr_id;
++ struct q2t_cmd *cmd;
++};
++
++#define Q2T_XMIT_DATA 1
++#define Q2T_XMIT_STATUS 2
++#define Q2T_XMIT_ALL (Q2T_XMIT_STATUS|Q2T_XMIT_DATA)
++
++#endif /* __QLA2X00T_H */
+diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Documentation/scst/README.qla2x00t
+--- orig/linux-2.6.36/Documentation/scst/README.qla2x00t
++++ linux-2.6.36/Documentation/scst/README.qla2x00t
+@@ -0,0 +1,526 @@
++Target driver for Qlogic 22xx/23xx/24xx/25xx Fibre Channel cards
++================================================================
++
++Version 2.0.0, XX XXXXX 2010
++----------------------------
++
++This driver consists from two parts: the target mode driver itself and
++the changed initiator driver from Linux kernel, which is, particularly,
++intended to perform all the initialization and shutdown tasks. The
++initiator driver was changed to provide the target mode support and all
++necessary callbacks, but it's still capable to work as initiator only.
++Mode, when a host acts as the initiator and the target simultaneously,
++is supported as well.
++
++This version is compatible with SCST core version 2.0.0 and higher and
++Linux kernel 2.6.26 and higher. Sorry, kernels below 2.6.26 are not
++supported, because it's too hard to backport used initiator driver to
++older kernels.
++
++NPIV is partially supported by this driver. You can create virtual
++targets using standard Linux interface by echoing wwpn:wwnn into
++/sys/class/fc_host/hostX/vport_create and work with them, but SCST core
++will not see those virtual targets and, hence, provide the
++target-oriented access control for them. However, the initiator-oriented
++access control will still work very well. Note, you need NPIV-supporting
++firmware as well as NPIV-supporting switches to use NPIV.
++
++The original initiator driver was taken from the kernel 2.6.26. Also the
++following 2.6.26.x commits have been applied to it (upstream ID):
++048feec5548c0582ee96148c61b87cccbcb5f9be,
++031e134e5f95233d80fb1b62fdaf5e1be587597c,
++5f3a9a207f1fccde476dd31b4c63ead2967d934f,
++85821c906cf3563a00a3d98fa380a2581a7a5ff1,
++3c01b4f9fbb43fc911acd33ea7a14ea7a4f9866b,
++8eca3f39c4b11320787f7b216f63214aee8415a9.
++
++See also "ToDo" file for list of known issues and unimplemented
++features.
++
++Installation
++------------
++
++Only vanilla kernels from kernel.org and RHEL/CentOS 5.2 kernels are
++supported, but SCST should work on other (vendors') kernels, if you
++manage to successfully compile it on them. The main problem with
++vendors' kernels is that they often contain patches, which will appear
++only in the next version of the vanilla kernel, therefore it's quite
++hard to track such changes. Thus, if during compilation for some vendor
++kernel your compiler complains about redefinition of some symbol, you
++should either switch to vanilla kernel, or add or change as necessary
++the corresponding to that symbol "#if LINUX_VERSION_CODE" statement.
++
++Before installation make sure that the link
++"/lib/modules/`you_kernel_version`/build" points to the source code for
++your currently running kernel.
++
++Then you should replace (or link) by the initiator driver from this
++package "qla2xxx" subdirectory in kernel_source/drivers/scsi/ of the
++currently running kernel and using your favorite kernel configuration
++tool enable in the QLogic QLA2XXX Fibre Channel driver target mode
++support (CONFIG_SCSI_QLA2XXX_TARGET). Then rebuild the kernel and its
++modules. During this step you will compile the initiator driver. To
++install it, install the built kernel and its modules.
++
++Then edit qla2x00-target/Makefile and set SCST_INC_DIR variable to point
++to the directory, where SCST's public include files are located. If you
++install QLA2x00 target driver's source code in the SCST's directory,
++then SCST_INC_DIR will be set correctly for you.
++
++Also you can set SCST_DIR variable to the directory, where SCST was
++built, but this is optional. If you don't set it or set incorrectly,
++during the compilation you will get a bunch of harmless warnings like
++"WARNING: "scst_rx_data" [/XXX/qla2x00tgt.ko] undefined!"
++
++To compile the target driver, type 'make' in qla2x00-target/
++subdirectory. It will build qla2x00tgt.ko module.
++
++To install the target driver, type 'make install' in qla2x00-target/
++subdirectory. The target driver will be installed in
++/lib/modules/`you_kernel_version`/extra. To uninstall it, type 'make
++uninstall'.
++
++Usage
++-----
++
++After the drivers are loaded and adapters successfully initialized by
++the initiator driver, including firmware image load, you should
++configure exported devices using the corresponding interface of SCST
++core. It is highly recommended to use scstadmin utility for that
++purpose.
++
++Then target mode should be enabled via a sysfs interface on a per card
++basis, like:
++
++echo "1" >/sys/kernel/scst_tgt/targets/qla2x00t/target/enabled
++
++See below for full description of the driver's sysfs interface.
++
++With the obsolete proc interface you should instead use
++target_mode_enabled under the appropriate scsi_host entry, like:
++
++echo "1" >/sys/class/scsi_host/host0/target_mode_enabled
++
++You can find some installation and configuration HOWTOs in
++http://scst.sourceforge.net/qla2x00t-howto.html and
++https://forums.openfiler.com/viewtopic.php?id=3422.
++
++IMPORTANT USAGE NOTES
++---------------------
++
++1. It is strongly recommended to use firmware version 5.x or higher
++for 24xx/25xx adapters. See
++http://sourceforge.net/mailarchive/forum.php?thread_name=4B4CD39F.6020401%40vlnb.net&forum_name=scst-devel
++for more details why.
++
++2. If you reload qla2x00tgt module, you should also reload qla2xxx
++module, otherwise your initiators could not see the target, when it is
++enabled after qla2x00tgt module load.
++
++3. If you delete and then add back with the same WWN an NPIV initiator
++on the initiator side, make sure it has the same port_id as well. In
++Fibre Channel initiators identified by port_id (s_id in FC terms), so if
++the recreated NPIV initiator has another port_id, which was already used
++by another (NPIV) initiator, those initiators could be confused by the
++target and assigned to incorrect security groups, hence they could see
++incorrect LUNs.
++
++If you can't ensure the same port_id's for recreated initiators, it is
++safer to restart qla2x00tgt and qla2xxx modules on the target to make
++sure the target doesn't have any initiator port_id cached.
++
++Initiator and target modes
++--------------------------
++
++When qla2xxx compiled with CONFIG_SCSI_QLA2XXX_TARGET enabled, it has
++parameter "qlini_mode", which determines when initiator mode will be
++enabled. Possible values:
++
++ - "exclusive" (default ) - initiator mode will be enabled on load,
++disabled on enabling target mode and then on disabling target mode
++enabled back.
++
++ - "disabled" - initiator mode will never be enabled.
++
++ - "enabled" - initiator mode will always stay enabled.
++
++Usage of mode "disabled" is recommended, if you have incorrectly
++functioning your target's initiators, which if once seen a port in
++initiator mode, later refuse to see it as a target. Although this mode
++does make a noticeable difference, it isn't absolutely strong, since the
++firmware once initialized requires a HBA to be in either initiator, or
++target mode, so until you enable target mode on a port, your initiators
++will report this port as working in initiator mode. If you need
++absolutely strong assurance that initiator mode never enabled, you can
++consider using patch
++unsupported-patches/qla_delayed_hw_init_tgt_mode_from_the_beginning.diff.
++See description of it inside the patch.
++
++Use mode "enabled" if you need your QLA adapters to work in both
++initiator and target modes at the same time.
++
++You can always see which modes are currently active in active_mode sysfs
++attribute.
++
++In all the modes you can at any time use sysfs attribute
++ini_mode_force_reverse to force enable or disable initiator mode on any
++particular port. Setting this attribute to 1 will reverse current status
++of the initiator mode from enabled to disabled and vice versa.
++
++Explicit conformation
++---------------------
++
++This option should (actually, almost always must) be enabled by echoing
++"1" in /sys/kernel/scst_tgt/targets/qla2x00t/target/host/explicit_conform_enabled,
++if a target card exports at least one stateful SCSI device, like tape,
++and class 2 isn't used, otherwise link-level errors could lead to loss
++of the target/initiator state synchronization. Also check if initiator
++supports this feature, it is reported in the kernel logs ("confirmed
++completion supported" or not). No major performance degradation was
++noticed, if it is enabled. Supported only for 23xx+. Disabled by
++default.
++
++Class 2
++-------
++
++Class 2 is the close equivalent of the TCP in the IP world. If you
++enable it, all the Fibre Channel packets will be acknowledged. By
++default, class 3 is used, which is UDP-like. Enable it by echoing "1" in
++/sys/kernel/scst_tgt/targets/qla2x00t/target/host/class2_enabled. This
++option needs a special firmware with class 2 support. Disabled by
++default.
++
++Compilation options
++-------------------
++
++There are the following compilation options, that could be commented
++in/out in Makefile:
++
++ - CONFIG_SCST_DEBUG - turns on some debugging code, including some logging.
++ Makes the driver considerably bigger and slower, producing large amount of
++ log data.
++
++ - CONFIG_SCST_TRACING - turns on ability to log events. Makes the driver
++ considerably bigger and leads to some performance loss.
++
++ - CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD - makes SCST process incoming
++ commands from the qla2x00t target driver and call the driver's
++ callbacks in internal SCST threads context instead of SIRQ context,
++ where those commands were received. Useful for debugging and lead to
++ some performance loss.
++
++ - CONFIG_QLA_TGT_DEBUG_SRR - turns on retransmitting packets (SRR)
++ debugging. In this mode some CTIOs will be "broken" to force the
++ initiator to issue a retransmit request.
++
++Sysfs interface
++---------------
++
++Starting from 2.0.0 this driver has sysfs interface. The procfs
++interface from version 2.0.0 is obsolete and will be removed in one of
++the next versions.
++
++Root of SCST sysfs interface is /sys/kernel/scst_tgt. Root of this
++driver is /sys/kernel/scst_tgt/targets/qla2x00t. It has the following
++entries:
++
++ - None, one or more subdirectories for targets with name equal to port
++ names of the corresponding targets.
++
++ - trace_level - allows to enable and disable various tracing
++ facilities. See content of this file for help how to use it.
++
++ - version - read-only attribute, which allows to see version of
++ this driver and enabled optional features.
++
++Each target subdirectory contains the following entries:
++
++ - host - link pointing on the corresponding scsi_host of the initiator
++ driver
++
++ - ini_groups - subdirectory defining initiator groups for this target,
++ used to define per-initiator access control. See SCST core README for
++ more details.
++
++ - luns - subdirectory defining LUNs of this target. See SCST core
++ README for more details.
++
++ - sessions - subdirectory containing connected to this target sessions.
++
++ - enabled - using this attribute you can enable or disable target mode
++ of this FC port. It allows to finish configuring it before it starts
++ accepting new connections. 0 by default.
++
++ - explicit_confirmation - allows to enable explicit conformations, see
++ above.
++
++ - rel_tgt_id - allows to read or write SCSI Relative Target Port
++ Identifier attribute. This identifier is used to identify SCSI Target
++ Ports by some SCSI commands, mainly by Persistent Reservations
++ commands. This identifier must be unique among all SCST targets, but
++ for convenience SCST allows disabled targets to have not unique
++ rel_tgt_id. In this case SCST will not allow to enable this target
++ until rel_tgt_id becomes unique. This attribute initialized unique by
++ SCST by default.
++
++Subdirectory "sessions" contains one subdirectory for each connected
++session with name equal to port name of the connected initiator.
++
++Each session subdirectory contains the following entries:
++
++ - initiator_name - contains initiator's port name
++
++ - active_commands - contains number of active, i.e. not yet or being
++ executed, SCSI commands in this session.
++
++ - commands - contains overall number of SCSI commands in this session.
++
++Below is a sample script, which configures 1 virtual disk "disk1" using
++/disk1 image for usage with 25:00:00:f0:98:87:92:f3 target. All
++initiators connected to this target will see this device.
++
++#!/bin/bash
++
++modprobe scst
++modprobe scst_vdisk
++
++echo "add_device disk1 filename=/disk1; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
++
++modprobe qla2x00tgt
++
++echo "add disk1 0" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
++echo 1 >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/enabled
++
++Below is another sample script, which configures 1 real local SCSI disk
++0:0:1:0 for usage with 25:00:00:f0:98:87:92:f3 target:
++
++#!/bin/bash
++
++modprobe scst
++modprobe scst_disk
++
++echo "add_device 0:0:1:0" >/sys/kernel/scst_tgt/handlers/dev_disk/mgmt
++
++modprobe qla2x00tgt
++
++echo "add 0:0:1:0 0" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
++echo 1 >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/enabled
++
++Below is an advanced sample script, which configures more virtual
++devices of various types, including virtual CDROM. In this script
++initiator 25:00:00:f0:99:87:94:a3 will see disk1 and disk2 devices, all
++other initiators will see read only blockio, nullio and cdrom devices.
++
++#!/bin/bash
++
++modprobe scst
++modprobe scst_vdisk
++
++echo "add_device disk1 filename=/disk1; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
++echo "add_device disk2 filename=/disk2; blocksize=4096; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
++echo "add_device blockio filename=/dev/sda5" >/sys/kernel/scst_tgt/handlers/vdisk_blockio/mgmt
++echo "add_device nullio" >/sys/kernel/scst_tgt/handlers/vdisk_nullio/mgmt
++echo "add_device cdrom" >/sys/kernel/scst_tgt/handlers/vcdrom/mgmt
++
++modprobe qla2x00tgt
++
++echo "add blockio 0 read_only=1" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
++echo "add nullio 1" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
++echo "add cdrom 2" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
++
++echo "create 25:00:00:f0:99:87:94:a3" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/mgmt
++echo "add disk1 0" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/25:00:00:f0:99:87:94:a3/luns/mgmt
++echo "add disk2 1" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/25:00:00:f0:99:87:94:a3/luns/mgmt
++echo "add 25:00:00:f0:99:87:94:a3" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/25:00:00:f0:99:87:94:a3/initiators/mgmt
++
++echo 1 >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/enabled
++
++The resulting overall SCST sysfs hierarchy with initiator
++25:00:00:f0:99:87:94:a3 connected will look like:
++
++/sys/kernel/scst_tgt
++|-- devices
++| |-- blockio
++| | |-- blocksize
++| | |-- exported
++| | | `-- export0 -> ../../../targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/0
++| | |-- filename
++| | |-- handler -> ../../handlers/vdisk_blockio
++| | |-- nv_cache
++| | |-- read_only
++| | |-- removable
++| | |-- resync_size
++| | |-- size_mb
++| | |-- t10_dev_id
++| | |-- threads_num
++| | |-- threads_pool_type
++| | |-- type
++| | `-- usn
++| |-- cdrom
++| | |-- exported
++| | | `-- export0 -> ../../../targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/2
++| | |-- filename
++| | |-- handler -> ../../handlers/vcdrom
++| | |-- size_mb
++| | |-- t10_dev_id
++| | |-- threads_num
++| | |-- threads_pool_type
++| | |-- type
++| | `-- usn
++| |-- disk1
++| | |-- blocksize
++| | |-- exported
++| | | `-- export0 -> ../../../targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/25:00:00:f0:99:87:94:a3/luns/0
++| | |-- filename
++| | |-- handler -> ../../handlers/vdisk_fileio
++| | |-- nv_cache
++| | |-- o_direct
++| | |-- read_only
++| | |-- removable
++| | |-- resync_size
++| | |-- size_mb
++| | |-- t10_dev_id
++| | |-- threads_num
++| | |-- threads_pool_type
++| | |-- type
++| | |-- usn
++| | `-- write_through
++| |-- disk2
++| | |-- blocksize
++| | |-- exported
++| | | `-- export0 -> ../../../targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/25:00:00:f0:99:87:94:a3/luns/1
++| | |-- filename
++| | |-- handler -> ../../handlers/vdisk_fileio
++| | |-- nv_cache
++| | |-- o_direct
++| | |-- read_only
++| | |-- removable
++| | |-- resync_size
++| | |-- size_mb
++| | |-- t10_dev_id
++| | |-- threads_num
++| | |-- threads_pool_type
++| | |-- type
++| | |-- usn
++| | `-- write_through
++| `-- nullio
++| |-- blocksize
++| |-- exported
++| | `-- export0 -> ../../../targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/1
++| |-- handler -> ../../handlers/vdisk_nullio
++| |-- read_only
++| |-- removable
++| |-- size_mb
++| |-- t10_dev_id
++| |-- threads_num
++| |-- threads_pool_type
++| |-- type
++| `-- usn
++|-- handlers
++| |-- vcdrom
++| | |-- cdrom -> ../../devices/cdrom
++| | |-- mgmt
++| | |-- trace_level
++| | `-- type
++| |-- vdisk_blockio
++| | |-- blockio -> ../../devices/blockio
++| | |-- mgmt
++| | |-- trace_level
++| | `-- type
++| |-- vdisk_fileio
++| | |-- disk1 -> ../../devices/disk1
++| | |-- disk2 -> ../../devices/disk2
++| | |-- mgmt
++| | |-- trace_level
++| | `-- type
++| `-- vdisk_nullio
++| |-- mgmt
++| |-- nullio -> ../../devices/nullio
++| |-- trace_level
++| `-- type
++|-- sgv
++| |-- global_stats
++| |-- sgv
++| | `-- stats
++| |-- sgv-clust
++| | `-- stats
++| `-- sgv-dma
++| `-- stats
++|-- targets
++| `-- qla2x00t
++| |-- 25:00:00:f0:98:87:92:f3
++| | |-- enabled
++| | |-- explicit_confirmation
++| | |-- host -> ../../../../../class/scsi_host/host4
++| | |-- ini_groups
++| | | |-- 25:00:00:f0:99:87:94:a3
++| | | | |-- initiators
++| | | | | |-- 25:00:00:f0:99:87:94:a3
++| | | | | `-- mgmt
++| | | | `-- luns
++| | | | |-- 0
++| | | | | |-- device -> ../../../../../../../devices/disk1
++| | | | | `-- read_only
++| | | | |-- 1
++| | | | | |-- device -> ../../../../../../../devices/disk2
++| | | | | `-- read_only
++| | | | `-- mgmt
++| | | `-- mgmt
++| | |-- luns
++| | | |-- 0
++| | | | |-- device -> ../../../../../devices/blockio
++| | | | `-- read_only
++| | | |-- 1
++| | | | |-- device -> ../../../../../devices/nullio
++| | | | `-- read_only
++| | | |-- 2
++| | | | |-- device -> ../../../../../devices/cdrom
++| | | | `-- read_only
++| | | `-- mgmt
++| | |-- rel_tgt_id
++| | `-- sessions
++| | `-- 25:00:00:f0:99:87:94:a3
++| | |-- active_commands
++| | |-- commands
++| | |-- initiator_name
++| | `-- luns -> ../../ini_groups/25:00:00:f0:99:87:94:a3/luns
++| |-- trace_level
++| `-- version
++|-- threads
++|-- trace_level
++`-- version
++
++Performance advices
++-------------------
++
++1. If you are going to use your target in an VM environment, for
++instance as a shared storage with VMware, make sure all your VMs
++connected to the target via *separate* sessions. You can check it using
++SCST proc or sysfs interface. You should use available facilities, like
++NPIV, to make separate sessions for each VM. If you miss it, you can
++greatly loose performance of parallel access to your target from
++different VMs. This isn't related to the case if your VMs are using the
++same shared storage, like with VMFS, for instance. In this case all your
++VM hosts will be connected to the target via separate sessions, which is
++enough.
++
++2. See SCST core's README for more advices. Especially pay attention to
++have io_grouping_type option set correctly.
++
++Credits
++-------
++
++Thanks to:
++
++ * QLogic support for their invaluable help.
++
++ * Nathaniel Clark <nate@misrule.us> for porting to new 2.6 kernel
++initiator driver.
++
++ * Mark Buechler <mark.buechler@gmail.com> for the original
++WWN-based authentification, a lot of useful suggestions, bug reports and
++help in debugging.
++
++ * Ming Zhang <mingz@ele.uri.edu> for fixes.
++
++Vladislav Bolkhovitin <vst@vlnb.net>, http://scst.sourceforge.net
+diff -uprN orig/linux-2.6.36/drivers/scst/srpt/Kconfig linux-2.6.36/drivers/scst/srpt/Kconfig
+--- orig/linux-2.6.36/drivers/scst/srpt/Kconfig
++++ linux-2.6.36/drivers/scst/srpt/Kconfig
+@@ -0,0 +1,12 @@
++config SCST_SRPT
++ tristate "InfiniBand SCSI RDMA Protocol target support"
++ depends on INFINIBAND && SCST
++ ---help---
++
++ Support for the SCSI RDMA Protocol (SRP) Target driver. The
++ SRP protocol is a protocol that allows an initiator to access
++ a block storage device on another host (target) over a network
++ that supports the RDMA protocol. Currently the RDMA protocol is
++ supported by InfiniBand and by iWarp network hardware. More
++ information about the SRP protocol can be found on the website
++ of the INCITS T10 technical committee (http://www.t10.org/).
+diff -uprN orig/linux-2.6.36/drivers/scst/srpt/Makefile linux-2.6.36/drivers/scst/srpt/Makefile
+--- orig/linux-2.6.36/drivers/scst/srpt/Makefile
++++ linux-2.6.36/drivers/scst/srpt/Makefile
+@@ -0,0 +1,1 @@
++obj-$(CONFIG_SCST_SRPT) += ib_srpt.o
+diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_dm_mad.h linux-2.6.36/drivers/scst/srpt/ib_dm_mad.h
+--- orig/linux-2.6.36/drivers/scst/srpt/ib_dm_mad.h
++++ linux-2.6.36/drivers/scst/srpt/ib_dm_mad.h
+@@ -0,0 +1,139 @@
++/*
++ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
++ *
++ * This software is available to you under a choice of one of two
++ * licenses. You may choose to be licensed under the terms of the GNU
++ * General Public License (GPL) Version 2, available from the file
++ * COPYING in the main directory of this source tree, or the
++ * OpenIB.org BSD license below:
++ *
++ * Redistribution and use in source and binary forms, with or
++ * without modification, are permitted provided that the following
++ * conditions are met:
++ *
++ * - Redistributions of source code must retain the above
++ * copyright notice, this list of conditions and the following
++ * disclaimer.
++ *
++ * - Redistributions in binary form must reproduce the above
++ * copyright notice, this list of conditions and the following
++ * disclaimer in the documentation and/or other materials
++ * provided with the distribution.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#ifndef IB_DM_MAD_H
++#define IB_DM_MAD_H
++
++#include <linux/types.h>
++
++#include <rdma/ib_mad.h>
++
++enum {
++ /*
++ * See also section 13.4.7 Status Field, table 115 MAD Common Status
++ * Field Bit Values and also section 16.3.1.1 Status Field in the
++ * InfiniBand Architecture Specification.
++ */
++ DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
++ DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
++ DM_MAD_STATUS_INVALID_FIELD = 0x001c,
++ DM_MAD_STATUS_NO_IOC = 0x0100,
++
++ /*
++ * See also the Device Management chapter, section 16.3.3 Attributes,
++ * table 279 Device Management Attributes in the InfiniBand
++ * Architecture Specification.
++ */
++ DM_ATTR_CLASS_PORT_INFO = 0x01,
++ DM_ATTR_IOU_INFO = 0x10,
++ DM_ATTR_IOC_PROFILE = 0x11,
++ DM_ATTR_SVC_ENTRIES = 0x12
++};
++
++struct ib_dm_hdr {
++ u8 reserved[28];
++};
++
++/*
++ * Structure of management datagram sent by the SRP target implementation.
++ * Contains a management datagram header, reliable multi-packet transaction
++ * protocol (RMPP) header and ib_dm_hdr. Notes:
++ * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
++ * management datagrams.
++ * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
++ * is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
++ * - The maximum supported size for a management datagram when not using RMPP
++ * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
++ */
++struct ib_dm_mad {
++ struct ib_mad_hdr mad_hdr;
++ struct ib_rmpp_hdr rmpp_hdr;
++ struct ib_dm_hdr dm_hdr;
++ u8 data[IB_MGMT_DEVICE_DATA];
++};
++
++/*
++ * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
++ * Architecture Specification.
++ */
++struct ib_dm_iou_info {
++ __be16 change_id;
++ u8 max_controllers;
++ u8 op_rom;
++ u8 controller_list[128];
++};
++
++/*
++ * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
++ * the InfiniBand Architecture Specification.
++ */
++struct ib_dm_ioc_profile {
++ __be64 guid;
++ __be32 vendor_id;
++ __be32 device_id;
++ __be16 device_version;
++ __be16 reserved1;
++ __be32 subsys_vendor_id;
++ __be32 subsys_device_id;
++ __be16 io_class;
++ __be16 io_subclass;
++ __be16 protocol;
++ __be16 protocol_version;
++ __be16 service_conn;
++ __be16 initiators_supported;
++ __be16 send_queue_depth;
++ u8 reserved2;
++ u8 rdma_read_depth;
++ __be32 send_size;
++ __be32 rdma_size;
++ u8 op_cap_mask;
++ u8 svc_cap_mask;
++ u8 num_svc_entries;
++ u8 reserved3[9];
++ u8 id_string[64];
++};
++
++struct ib_dm_svc_entry {
++ u8 name[40];
++ __be64 id;
++};
++
++/*
++ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
++ * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
++ */
++struct ib_dm_svc_entries {
++ struct ib_dm_svc_entry service_entries[4];
++};
++
++#endif
+diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/scst/srpt/ib_srpt.c
+--- orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c
++++ linux-2.6.36/drivers/scst/srpt/ib_srpt.c
+@@ -0,0 +1,3698 @@
++/*
++ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
++ * Copyright (C) 2008 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2008 - 2010 Bart Van Assche <bart.vanassche@gmail.com>
++ *
++ * This software is available to you under a choice of one of two
++ * licenses. You may choose to be licensed under the terms of the GNU
++ * General Public License (GPL) Version 2, available from the file
++ * COPYING in the main directory of this source tree, or the
++ * OpenIB.org BSD license below:
++ *
++ * Redistribution and use in source and binary forms, with or
++ * without modification, are permitted provided that the following
++ * conditions are met:
++ *
++ * - Redistributions of source code must retain the above
++ * copyright notice, this list of conditions and the following
++ * disclaimer.
++ *
++ * - Redistributions in binary form must reproduce the above
++ * copyright notice, this list of conditions and the following
++ * disclaimer in the documentation and/or other materials
++ * provided with the distribution.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/err.h>
++#include <linux/ctype.h>
++#include <linux/kthread.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#include <asm/atomic.h>
++#include "ib_srpt.h"
++#define LOG_PREFIX "ib_srpt" /* Prefix for SCST tracing macros. */
++#include <scst/scst_debug.h>
++
++/* Name of this kernel module. */
++#define DRV_NAME "ib_srpt"
++#define DRV_VERSION "2.0.0"
++#define DRV_RELDATE "October 25, 2010"
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++/* Flags to be used in SCST debug tracing statements. */
++#define DEFAULT_SRPT_TRACE_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR \
++ | TRACE_MGMT | TRACE_SPECIAL)
++/* Name of the entry that will be created under /proc/scsi_tgt/ib_srpt. */
++#define SRPT_PROC_TRACE_LEVEL_NAME "trace_level"
++#endif
++
++#define MELLANOX_SRPT_ID_STRING "SCST SRP target"
++
++MODULE_AUTHOR("Vu Pham");
++MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
++ "v" DRV_VERSION " (" DRV_RELDATE ")");
++MODULE_LICENSE("Dual BSD/GPL");
++
++/*
++ * Local data types.
++ */
++
++enum threading_mode {
++ MODE_ALL_IN_SIRQ = 0,
++ MODE_IB_COMPLETION_IN_THREAD = 1,
++ MODE_IB_COMPLETION_IN_SIRQ = 2,
++};
++
++/*
++ * Global Variables
++ */
++
++static u64 srpt_service_guid;
++/* List of srpt_device structures. */
++static atomic_t srpt_device_count;
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++static unsigned long trace_flag = DEFAULT_SRPT_TRACE_FLAGS;
++module_param(trace_flag, long, 0644);
++MODULE_PARM_DESC(trace_flag, "SCST trace flags.");
++#endif
++
++static int thread = 1;
++module_param(thread, int, 0444);
++MODULE_PARM_DESC(thread,
++ "IB completion and SCSI command processing context. Defaults"
++ " to one, i.e. process IB completions and SCSI commands in"
++ " kernel thread context. 0 means soft IRQ whenever possible"
++ " and 2 means process IB completions in soft IRQ context and"
++ " SCSI commands in kernel thread context.");
++
++static unsigned srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
++module_param(srp_max_rdma_size, int, 0744);
++MODULE_PARM_DESC(srp_max_rdma_size,
++ "Maximum size of SRP RDMA transfers for new connections.");
++
++static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
++module_param(srp_max_req_size, int, 0444);
++MODULE_PARM_DESC(srp_max_req_size,
++ "Maximum size of SRP request messages in bytes.");
++
++static unsigned int srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
++module_param(srp_max_rsp_size, int, 0444);
++MODULE_PARM_DESC(thread,
++ "Maximum size of SRP response messages in bytes.");
++
++static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
++module_param(srpt_srq_size, int, 0444);
++MODULE_PARM_DESC(srpt_srq_size,
++ "Shared receive queue (SRQ) size.");
++
++static int srpt_sq_size = DEF_SRPT_SQ_SIZE;
++module_param(srpt_sq_size, int, 0444);
++MODULE_PARM_DESC(srpt_sq_size,
++ "Per-channel send queue (SQ) size.");
++
++static bool use_port_guid_in_session_name;
++module_param(use_port_guid_in_session_name, bool, 0444);
++MODULE_PARM_DESC(use_port_guid_in_session_name,
++ "Use target port ID in the SCST session name such that"
++ " redundant paths between multiport systems can be masked.");
++
++static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
++{
++ return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
++}
++module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
++ 0444);
++MODULE_PARM_DESC(srpt_service_guid,
++ "Using this value for ioc_guid, id_ext, and cm_listen_id"
++ " instead of using the node_guid of the first HCA.");
++
++static void srpt_add_one(struct ib_device *device);
++static void srpt_remove_one(struct ib_device *device);
++static void srpt_unregister_mad_agent(struct srpt_device *sdev);
++static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx);
++static void srpt_release_channel(struct scst_session *scst_sess);
++
++static struct ib_client srpt_client = {
++ .name = DRV_NAME,
++ .add = srpt_add_one,
++ .remove = srpt_remove_one
++};
++
++/**
++ * srpt_test_and_set_channel_state() - Test and set the channel state.
++ *
++ * @ch: RDMA channel.
++ * @old: channel state to compare with.
++ * @new: state to change the channel state to if the current state matches the
++ * argument 'old'.
++ *
++ * Returns the previous channel state.
++ */
++static enum rdma_ch_state
++srpt_test_and_set_channel_state(struct srpt_rdma_ch *ch,
++ enum rdma_ch_state old,
++ enum rdma_ch_state new)
++{
++ return atomic_cmpxchg(&ch->state, old, new);
++}
++
++/**
++ * srpt_event_handler() - Asynchronous IB event callback function.
++ *
++ * Callback function called by the InfiniBand core when an asynchronous IB
++ * event occurs. This callback may occur in interrupt context. See also
++ * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
++ * Architecture Specification.
++ */
++static void srpt_event_handler(struct ib_event_handler *handler,
++ struct ib_event *event)
++{
++ struct srpt_device *sdev;
++ struct srpt_port *sport;
++
++ TRACE_ENTRY();
++
++ sdev = ib_get_client_data(event->device, &srpt_client);
++ if (!sdev || sdev->device != event->device)
++ return;
++
++ TRACE_DBG("ASYNC event= %d on device= %s",
++ event->event, sdev->device->name);
++
++ switch (event->event) {
++ case IB_EVENT_PORT_ERR:
++ if (event->element.port_num <= sdev->device->phys_port_cnt) {
++ sport = &sdev->port[event->element.port_num - 1];
++ sport->lid = 0;
++ sport->sm_lid = 0;
++ }
++ break;
++ case IB_EVENT_PORT_ACTIVE:
++ case IB_EVENT_LID_CHANGE:
++ case IB_EVENT_PKEY_CHANGE:
++ case IB_EVENT_SM_CHANGE:
++ case IB_EVENT_CLIENT_REREGISTER:
++ /*
++ * Refresh port data asynchronously. Note: it is safe to call
++ * schedule_work() even if &sport->work is already on the
++ * global workqueue because schedule_work() tests for the
++ * work_pending() condition before adding &sport->work to the
++ * global work queue.
++ */
++ if (event->element.port_num <= sdev->device->phys_port_cnt) {
++ sport = &sdev->port[event->element.port_num - 1];
++ if (!sport->lid && !sport->sm_lid)
++ schedule_work(&sport->work);
++ }
++ break;
++ default:
++ PRINT_ERROR("received unrecognized IB event %d", event->event);
++ break;
++ }
++
++ TRACE_EXIT();
++}
++
++/**
++ * srpt_srq_event() - SRQ event callback function.
++ */
++static void srpt_srq_event(struct ib_event *event, void *ctx)
++{
++ PRINT_INFO("SRQ event %d", event->event);
++}
++
++/**
++ * srpt_qp_event() - QP event callback function.
++ */
++static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
++{
++ TRACE_DBG("QP event %d on cm_id=%p sess_name=%s state=%d",
++ event->event, ch->cm_id, ch->sess_name,
++ atomic_read(&ch->state));
++
++ switch (event->event) {
++ case IB_EVENT_COMM_EST:
++ ib_cm_notify(ch->cm_id, event->event);
++ break;
++ case IB_EVENT_QP_LAST_WQE_REACHED:
++ if (srpt_test_and_set_channel_state(ch, RDMA_CHANNEL_LIVE,
++ RDMA_CHANNEL_DISCONNECTING) == RDMA_CHANNEL_LIVE) {
++ PRINT_INFO("disconnected session %s.", ch->sess_name);
++ ib_send_cm_dreq(ch->cm_id, NULL, 0);
++ }
++ break;
++ default:
++ PRINT_ERROR("received unrecognized IB QP event %d",
++ event->event);
++ break;
++ }
++}
++
++/**
++ * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
++ *
++ * @slot: one-based slot number.
++ * @value: four-bit value.
++ *
++ * Copies the lowest four bits of value in element slot of the array of four
++ * bit elements called c_list (controller list). The index slot is one-based.
++ */
++static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
++{
++ u16 id;
++ u8 tmp;
++
++ id = (slot - 1) / 2;
++ if (slot & 0x1) {
++ tmp = c_list[id] & 0xf;
++ c_list[id] = (value << 4) | tmp;
++ } else {
++ tmp = c_list[id] & 0xf0;
++ c_list[id] = (value & 0xf) | tmp;
++ }
++}
++
++/**
++ * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
++ *
++ * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
++ * Specification.
++ */
++static void srpt_get_class_port_info(struct ib_dm_mad *mad)
++{
++ struct ib_class_port_info *cif;
++
++ cif = (struct ib_class_port_info *)mad->data;
++ memset(cif, 0, sizeof *cif);
++ cif->base_version = 1;
++ cif->class_version = 1;
++ cif->resp_time_value = 20;
++
++ mad->mad_hdr.status = 0;
++}
++
++/**
++ * srpt_get_iou() - Write IOUnitInfo to a management datagram.
++ *
++ * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
++ * Specification. See also section B.7, table B.6 in the SRP r16a document.
++ */
++static void srpt_get_iou(struct ib_dm_mad *mad)
++{
++ struct ib_dm_iou_info *ioui;
++ u8 slot;
++ int i;
++
++ ioui = (struct ib_dm_iou_info *)mad->data;
++ ioui->change_id = __constant_cpu_to_be16(1);
++ ioui->max_controllers = 16;
++
++ /* set present for slot 1 and empty for the rest */
++ srpt_set_ioc(ioui->controller_list, 1, 1);
++ for (i = 1, slot = 2; i < 16; i++, slot++)
++ srpt_set_ioc(ioui->controller_list, slot, 0);
++
++ mad->mad_hdr.status = 0;
++}
++
++/**
++ * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
++ *
++ * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
++ * Architecture Specification. See also section B.7, table B.7 in the SRP
++ * r16a document.
++ */
++static void srpt_get_ioc(struct srpt_device *sdev, u32 slot,
++ struct ib_dm_mad *mad)
++{
++ struct ib_dm_ioc_profile *iocp;
++
++ iocp = (struct ib_dm_ioc_profile *)mad->data;
++
++ if (!slot || slot > 16) {
++ mad->mad_hdr.status
++ = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
++ return;
++ }
++
++ if (slot > 2) {
++ mad->mad_hdr.status
++ = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
++ return;
++ }
++
++ memset(iocp, 0, sizeof *iocp);
++ strcpy(iocp->id_string, MELLANOX_SRPT_ID_STRING);
++ iocp->guid = cpu_to_be64(srpt_service_guid);
++ iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
++ iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
++ iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
++ iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
++ iocp->subsys_device_id = 0x0;
++ iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
++ iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
++ iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
++ iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
++ iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
++ iocp->rdma_read_depth = 4;
++ iocp->send_size = cpu_to_be32(srp_max_req_size);
++ iocp->rdma_size = cpu_to_be32(min(max(srp_max_rdma_size, 256U),
++ 1U << 24));
++ iocp->num_svc_entries = 1;
++ iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
++ SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
++
++ mad->mad_hdr.status = 0;
++}
++
++/**
++ * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
++ *
++ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
++ * Specification. See also section B.7, table B.8 in the SRP r16a document.
++ */
++static void srpt_get_svc_entries(u64 ioc_guid,
++ u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
++{
++ struct ib_dm_svc_entries *svc_entries;
++
++ WARN_ON(!ioc_guid);
++
++ if (!slot || slot > 16) {
++ mad->mad_hdr.status
++ = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
++ return;
++ }
++
++ if (slot > 2 || lo > hi || hi > 1) {
++ mad->mad_hdr.status
++ = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
++ return;
++ }
++
++ svc_entries = (struct ib_dm_svc_entries *)mad->data;
++ memset(svc_entries, 0, sizeof *svc_entries);
++ svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
++ snprintf(svc_entries->service_entries[0].name,
++ sizeof(svc_entries->service_entries[0].name),
++ "%s%016llx",
++ SRP_SERVICE_NAME_PREFIX,
++ ioc_guid);
++
++ mad->mad_hdr.status = 0;
++}
++
++/**
++ * srpt_mgmt_method_get() - Process a received management datagram.
++ * @sp: source port through which the MAD has been received.
++ * @rq_mad: received MAD.
++ * @rsp_mad: response MAD.
++ */
++static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
++ struct ib_dm_mad *rsp_mad)
++{
++ u16 attr_id;
++ u32 slot;
++ u8 hi, lo;
++
++ attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
++ switch (attr_id) {
++ case DM_ATTR_CLASS_PORT_INFO:
++ srpt_get_class_port_info(rsp_mad);
++ break;
++ case DM_ATTR_IOU_INFO:
++ srpt_get_iou(rsp_mad);
++ break;
++ case DM_ATTR_IOC_PROFILE:
++ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
++ srpt_get_ioc(sp->sdev, slot, rsp_mad);
++ break;
++ case DM_ATTR_SVC_ENTRIES:
++ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
++ hi = (u8) ((slot >> 8) & 0xff);
++ lo = (u8) (slot & 0xff);
++ slot = (u16) ((slot >> 16) & 0xffff);
++ srpt_get_svc_entries(srpt_service_guid,
++ slot, hi, lo, rsp_mad);
++ break;
++ default:
++ rsp_mad->mad_hdr.status =
++ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
++ break;
++ }
++}
++
++/**
++ * srpt_mad_send_handler() - Post MAD-send callback function.
++ */
++static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
++ struct ib_mad_send_wc *mad_wc)
++{
++ ib_destroy_ah(mad_wc->send_buf->ah);
++ ib_free_send_mad(mad_wc->send_buf);
++}
++
++/**
++ * srpt_mad_recv_handler() - MAD reception callback function.
++ */
++static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
++ struct ib_mad_recv_wc *mad_wc)
++{
++ struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
++ struct ib_ah *ah;
++ struct ib_mad_send_buf *rsp;
++ struct ib_dm_mad *dm_mad;
++
++ if (!mad_wc || !mad_wc->recv_buf.mad)
++ return;
++
++ ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
++ mad_wc->recv_buf.grh, mad_agent->port_num);
++ if (IS_ERR(ah))
++ goto err;
++
++ BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
++
++ rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
++ mad_wc->wc->pkey_index, 0,
++ IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
++ GFP_KERNEL);
++ if (IS_ERR(rsp))
++ goto err_rsp;
++
++ rsp->ah = ah;
++
++ dm_mad = rsp->mad;
++ memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
++ dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
++ dm_mad->mad_hdr.status = 0;
++
++ switch (mad_wc->recv_buf.mad->mad_hdr.method) {
++ case IB_MGMT_METHOD_GET:
++ srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
++ break;
++ case IB_MGMT_METHOD_SET:
++ dm_mad->mad_hdr.status =
++ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
++ break;
++ default:
++ dm_mad->mad_hdr.status =
++ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
++ break;
++ }
++
++ if (!ib_post_send_mad(rsp, NULL)) {
++ ib_free_recv_mad(mad_wc);
++ /* will destroy_ah & free_send_mad in send completion */
++ return;
++ }
++
++ ib_free_send_mad(rsp);
++
++err_rsp:
++ ib_destroy_ah(ah);
++err:
++ ib_free_recv_mad(mad_wc);
++}
++
++/**
++ * srpt_refresh_port() - Configure a HCA port.
++ *
++ * Enable InfiniBand management datagram processing, update the cached sm_lid,
++ * lid and gid values, and register a callback function for processing MADs
++ * on the specified port.
++ *
++ * Note: It is safe to call this function more than once for the same port.
++ */
++static int srpt_refresh_port(struct srpt_port *sport)
++{
++ struct ib_mad_reg_req reg_req;
++ struct ib_port_modify port_modify;
++ struct ib_port_attr port_attr;
++ int ret;
++
++ TRACE_ENTRY();
++
++ memset(&port_modify, 0, sizeof port_modify);
++ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
++ port_modify.clr_port_cap_mask = 0;
++
++ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
++ if (ret)
++ goto err_mod_port;
++
++ ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
++ if (ret)
++ goto err_query_port;
++
++ sport->sm_lid = port_attr.sm_lid;
++ sport->lid = port_attr.lid;
++
++ ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
++ if (ret)
++ goto err_query_port;
++
++ if (!sport->mad_agent) {
++ memset(&reg_req, 0, sizeof reg_req);
++ reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
++ reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
++ set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
++ set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
++
++ sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
++ sport->port,
++ IB_QPT_GSI,
++ &reg_req, 0,
++ srpt_mad_send_handler,
++ srpt_mad_recv_handler,
++ sport);
++ if (IS_ERR(sport->mad_agent)) {
++ ret = PTR_ERR(sport->mad_agent);
++ sport->mad_agent = NULL;
++ goto err_query_port;
++ }
++ }
++
++ TRACE_EXIT_RES(0);
++
++ return 0;
++
++err_query_port:
++
++ port_modify.set_port_cap_mask = 0;
++ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
++ ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
++
++err_mod_port:
++
++ TRACE_EXIT_RES(ret);
++
++ return ret;
++}
++
++/**
++ * srpt_unregister_mad_agent() - Unregister MAD callback functions.
++ *
++ * Note: It is safe to call this function more than once for the same device.
++ */
++static void srpt_unregister_mad_agent(struct srpt_device *sdev)
++{
++ struct ib_port_modify port_modify = {
++ .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
++ };
++ struct srpt_port *sport;
++ int i;
++
++ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
++ sport = &sdev->port[i - 1];
++ WARN_ON(sport->port != i);
++ if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
++ PRINT_ERROR("%s", "disabling MAD processing failed.");
++ if (sport->mad_agent) {
++ ib_unregister_mad_agent(sport->mad_agent);
++ sport->mad_agent = NULL;
++ }
++ }
++}
++
++/**
++ * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
++ */
++static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
++ int ioctx_size, int dma_size,
++ enum dma_data_direction dir)
++{
++ struct srpt_ioctx *ioctx;
++
++ ioctx = kmalloc(ioctx_size, GFP_KERNEL);
++ if (!ioctx)
++ goto err;
++
++ ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
++ if (!ioctx->buf)
++ goto err_free_ioctx;
++
++ ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
++ if (ib_dma_mapping_error(sdev->device, ioctx->dma))
++ goto err_free_buf;
++
++ return ioctx;
++
++err_free_buf:
++ kfree(ioctx->buf);
++err_free_ioctx:
++ kfree(ioctx);
++err:
++ return NULL;
++}
++
++/**
++ * srpt_free_ioctx() - Free an SRPT I/O context structure.
++ */
++static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
++ int dma_size, enum dma_data_direction dir)
++{
++ if (!ioctx)
++ return;
++
++ ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
++ kfree(ioctx->buf);
++ kfree(ioctx);
++}
++
++/**
++ * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
++ * @sdev: Device to allocate the I/O context ring for.
++ * @ring_size: Number of elements in the I/O context ring.
++ * @ioctx_size: I/O context size.
++ * @dma_size: DMA buffer size.
++ * @dir: DMA data direction.
++ */
++static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
++ int ring_size, int ioctx_size,
++ int dma_size, enum dma_data_direction dir)
++{
++ struct srpt_ioctx **ring;
++ int i;
++
++ TRACE_ENTRY();
++
++ WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
++ && ioctx_size != sizeof(struct srpt_send_ioctx));
++ WARN_ON(dma_size != srp_max_req_size && dma_size != srp_max_rsp_size);
++
++ ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
++ if (!ring)
++ goto out;
++ for (i = 0; i < ring_size; ++i) {
++ ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
++ if (!ring[i])
++ goto err;
++ ring[i]->index = i;
++ }
++ goto out;
++
++err:
++ while (--i >= 0)
++ srpt_free_ioctx(sdev, ring[i], dma_size, dir);
++ kfree(ring);
++out:
++ TRACE_EXIT_RES(ring);
++ return ring;
++}
++
++/**
++ * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
++ */
++static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
++ struct srpt_device *sdev, int ring_size,
++ int dma_size, enum dma_data_direction dir)
++{
++ int i;
++
++ WARN_ON(dma_size != srp_max_req_size && dma_size != srp_max_rsp_size);
++
++ for (i = 0; i < ring_size; ++i)
++ srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
++ kfree(ioctx_ring);
++}
++
++/**
++ * srpt_get_cmd_state() - Get the state of a SCSI command.
++ */
++static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
++{
++ BUG_ON(!ioctx);
++
++ return atomic_read(&ioctx->state);
++}
++
++/**
++ * srpt_set_cmd_state() - Set the state of a SCSI command.
++ * @new: New state to be set.
++ *
++ * Does not modify the state of aborted commands. Returns the previous command
++ * state.
++ */
++static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
++ enum srpt_command_state new)
++{
++ enum srpt_command_state previous;
++
++ BUG_ON(!ioctx);
++
++ do {
++ previous = atomic_read(&ioctx->state);
++ } while (previous != SRPT_STATE_DONE
++ && atomic_cmpxchg(&ioctx->state, previous, new) != previous);
++
++ return previous;
++}
++
++/**
++ * srpt_test_and_set_cmd_state() - Test and set the state of a command.
++ * @old: State to compare against.
++ * @new: New state to be set if the current state matches 'old'.
++ *
++ * Returns the previous command state.
++ */
++static enum srpt_command_state
++srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
++ enum srpt_command_state old,
++ enum srpt_command_state new)
++{
++ WARN_ON(!ioctx);
++ WARN_ON(old == SRPT_STATE_DONE);
++ WARN_ON(new == SRPT_STATE_NEW);
++
++ return atomic_cmpxchg(&ioctx->state, old, new);
++}
++
++/**
++ * srpt_post_recv() - Post an IB receive request.
++ */
++static int srpt_post_recv(struct srpt_device *sdev,
++ struct srpt_recv_ioctx *ioctx)
++{
++ struct ib_sge list;
++ struct ib_recv_wr wr, *bad_wr;
++
++ BUG_ON(!sdev);
++ wr.wr_id = encode_wr_id(IB_WC_RECV, ioctx->ioctx.index);
++
++ list.addr = ioctx->ioctx.dma;
++ list.length = srp_max_req_size;
++ list.lkey = sdev->mr->lkey;
++
++ wr.next = NULL;
++ wr.sg_list = &list;
++ wr.num_sge = 1;
++
++ return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
++}
++
++/**
++ * srpt_post_send() - Post an IB send request.
++ * @ch: RDMA channel to post the send request on.
++ * @ioctx: I/O context of the send request.
++ * @len: length of the request to be sent in bytes.
++ *
++ * Returns zero upon success and a non-zero value upon failure.
++ */
++static int srpt_post_send(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx, int len)
++{
++ struct ib_sge list;
++ struct ib_send_wr wr, *bad_wr;
++ struct srpt_device *sdev = ch->sport->sdev;
++ int ret;
++
++ ret = -ENOMEM;
++ if (atomic_dec_return(&ch->sq_wr_avail) < 0) {
++ PRINT_WARNING("%s", "IB send queue full (needed 1)");
++ goto out;
++ }
++
++ ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
++ DMA_TO_DEVICE);
++
++ list.addr = ioctx->ioctx.dma;
++ list.length = len;
++ list.lkey = sdev->mr->lkey;
++
++ wr.next = NULL;
++ wr.wr_id = encode_wr_id(IB_WC_SEND, ioctx->ioctx.index);
++ wr.sg_list = &list;
++ wr.num_sge = 1;
++ wr.opcode = IB_WR_SEND;
++ wr.send_flags = IB_SEND_SIGNALED;
++
++ ret = ib_post_send(ch->qp, &wr, &bad_wr);
++
++out:
++ if (ret < 0)
++ atomic_inc(&ch->sq_wr_avail);
++ return ret;
++}
++
++/**
++ * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
++ * @ioctx: Pointer to the I/O context associated with the request.
++ * @srp_cmd: Pointer to the SRP_CMD request data.
++ * @dir: Pointer to the variable to which the transfer direction will be
++ * written.
++ * @data_len: Pointer to the variable to which the total data length of all
++ * descriptors in the SRP_CMD request will be written.
++ *
++ * This function initializes ioctx->nrbuf and ioctx->r_bufs.
++ *
++ * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
++ * -ENOMEM when memory allocation fails and zero upon success.
++ */
++static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
++ struct srp_cmd *srp_cmd,
++ scst_data_direction *dir, u64 *data_len)
++{
++ struct srp_indirect_buf *idb;
++ struct srp_direct_buf *db;
++ unsigned add_cdb_offset;
++ int ret;
++
++ /*
++ * The pointer computations below will only be compiled correctly
++ * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
++ * whether srp_cmd::add_data has been declared as a byte pointer.
++ */
++ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
++ && !__same_type(srp_cmd->add_data[0], (u8)0));
++
++ BUG_ON(!dir);
++ BUG_ON(!data_len);
++
++ ret = 0;
++ *data_len = 0;
++
++ /*
++ * The lower four bits of the buffer format field contain the DATA-IN
++ * buffer descriptor format, and the highest four bits contain the
++ * DATA-OUT buffer descriptor format.
++ */
++ *dir = SCST_DATA_NONE;
++ if (srp_cmd->buf_fmt & 0xf)
++ /* DATA-IN: transfer data from target to initiator. */
++ *dir = SCST_DATA_READ;
++ else if (srp_cmd->buf_fmt >> 4)
++ /* DATA-OUT: transfer data from initiator to target. */
++ *dir = SCST_DATA_WRITE;
++
++ /*
++ * According to the SRP spec, the lower two bits of the 'ADDITIONAL
++ * CDB LENGTH' field are reserved and the size in bytes of this field
++ * is four times the value specified in bits 3..7. Hence the "& ~3".
++ */
++ add_cdb_offset = srp_cmd->add_cdb_len & ~3;
++ if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
++ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
++ ioctx->n_rbuf = 1;
++ ioctx->rbufs = &ioctx->single_rbuf;
++
++ db = (struct srp_direct_buf *)(srp_cmd->add_data
++ + add_cdb_offset);
++ memcpy(ioctx->rbufs, db, sizeof *db);
++ *data_len = be32_to_cpu(db->len);
++ } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
++ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
++ idb = (struct srp_indirect_buf *)(srp_cmd->add_data
++ + add_cdb_offset);
++
++ ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
++
++ if (ioctx->n_rbuf >
++ (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
++ PRINT_ERROR("received unsupported SRP_CMD request type"
++ " (%u out + %u in != %u / %zu)",
++ srp_cmd->data_out_desc_cnt,
++ srp_cmd->data_in_desc_cnt,
++ be32_to_cpu(idb->table_desc.len),
++ sizeof(*db));
++ ioctx->n_rbuf = 0;
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (ioctx->n_rbuf == 1)
++ ioctx->rbufs = &ioctx->single_rbuf;
++ else {
++ ioctx->rbufs =
++ kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
++ if (!ioctx->rbufs) {
++ ioctx->n_rbuf = 0;
++ ret = -ENOMEM;
++ goto out;
++ }
++ }
++
++ db = idb->desc_list;
++ memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
++ *data_len = be32_to_cpu(idb->len);
++ }
++out:
++ return ret;
++}
++
++/**
++ * srpt_init_ch_qp() - Initialize queue pair attributes.
++ *
++ * Initialized the attributes of queue pair 'qp' by allowing local write,
++ * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
++ */
++static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
++{
++ struct ib_qp_attr *attr;
++ int ret;
++
++ attr = kzalloc(sizeof *attr, GFP_KERNEL);
++ if (!attr)
++ return -ENOMEM;
++
++ attr->qp_state = IB_QPS_INIT;
++ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
++ IB_ACCESS_REMOTE_WRITE;
++ attr->port_num = ch->sport->port;
++ attr->pkey_index = 0;
++
++ ret = ib_modify_qp(qp, attr,
++ IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
++ IB_QP_PKEY_INDEX);
++
++ kfree(attr);
++ return ret;
++}
++
++/**
++ * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
++ * @ch: channel of the queue pair.
++ * @qp: queue pair to change the state of.
++ *
++ * Returns zero upon success and a negative value upon failure.
++ *
++ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
++ * If this structure ever becomes larger, it might be necessary to allocate
++ * it dynamically instead of on the stack.
++ */
++static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
++{
++ struct ib_qp_attr qp_attr;
++ int attr_mask;
++ int ret;
++
++ qp_attr.qp_state = IB_QPS_RTR;
++ ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
++ if (ret)
++ goto out;
++
++ qp_attr.max_dest_rd_atomic = 4;
++
++ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
++
++out:
++ return ret;
++}
++
++/**
++ * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
++ * @ch: channel of the queue pair.
++ * @qp: queue pair to change the state of.
++ *
++ * Returns zero upon success and a negative value upon failure.
++ *
++ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
++ * If this structure ever becomes larger, it might be necessary to allocate
++ * it dynamically instead of on the stack.
++ */
++static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
++{
++ struct ib_qp_attr qp_attr;
++ int attr_mask;
++ int ret;
++
++ qp_attr.qp_state = IB_QPS_RTS;
++ ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
++ if (ret)
++ goto out;
++
++ qp_attr.max_rd_atomic = 4;
++
++ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
++
++out:
++ return ret;
++}
++
++/**
++ * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
++ */
++static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
++{
++ struct srpt_send_ioctx *ioctx;
++ unsigned long flags;
++
++ BUG_ON(!ch);
++
++ ioctx = NULL;
++ spin_lock_irqsave(&ch->spinlock, flags);
++ if (!list_empty(&ch->free_list)) {
++ ioctx = list_first_entry(&ch->free_list,
++ struct srpt_send_ioctx, free_list);
++ list_del(&ioctx->free_list);
++ }
++ spin_unlock_irqrestore(&ch->spinlock, flags);
++
++ if (!ioctx)
++ return ioctx;
++
++ BUG_ON(ioctx->ch != ch);
++ atomic_set(&ioctx->state, SRPT_STATE_NEW);
++ ioctx->n_rbuf = 0;
++ ioctx->rbufs = NULL;
++ ioctx->n_rdma = 0;
++ ioctx->n_rdma_ius = 0;
++ ioctx->rdma_ius = NULL;
++ ioctx->mapped_sg_count = 0;
++ ioctx->scmnd = NULL;
++
++ return ioctx;
++}
++
++/**
++ * srpt_put_send_ioctx() - Free up resources.
++ */
++static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
++{
++ struct srpt_rdma_ch *ch;
++ unsigned long flags;
++
++ BUG_ON(!ioctx);
++ ch = ioctx->ch;
++ BUG_ON(!ch);
++
++ WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
++
++ ioctx->scmnd = NULL;
++
++ /*
++ * If the WARN_ON() below gets triggered this means that
++ * srpt_unmap_sg_to_ib_sge() has not been called before
++ * scst_tgt_cmd_done().
++ */
++ WARN_ON(ioctx->mapped_sg_count);
++
++ if (ioctx->n_rbuf > 1) {
++ kfree(ioctx->rbufs);
++ ioctx->rbufs = NULL;
++ ioctx->n_rbuf = 0;
++ }
++
++ spin_lock_irqsave(&ch->spinlock, flags);
++ list_add(&ioctx->free_list, &ch->free_list);
++ spin_unlock_irqrestore(&ch->spinlock, flags);
++}
++
++/**
++ * srpt_abort_scst_cmd() - Abort a SCSI command.
++ * @ioctx: I/O context associated with the SCSI command.
++ * @context: Preferred execution context.
++ */
++static void srpt_abort_scst_cmd(struct srpt_send_ioctx *ioctx,
++ enum scst_exec_context context)
++{
++ struct scst_cmd *scmnd;
++ enum srpt_command_state state;
++
++ TRACE_ENTRY();
++
++ BUG_ON(!ioctx);
++
++ /*
++ * If the command is in a state where the SCST core is waiting for the
++ * ib_srpt driver, change the state to the next state. Changing the
++ * state of the command from SRPT_STATE_NEED_DATA to SRPT_STATE_DATA_IN
++ * ensures that srpt_xmit_response() will call this function a second
++ * time.
++ */
++ state = srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
++ SRPT_STATE_DATA_IN);
++ if (state != SRPT_STATE_NEED_DATA) {
++ state = srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_DATA_IN,
++ SRPT_STATE_DONE);
++ if (state != SRPT_STATE_DATA_IN) {
++ state = srpt_test_and_set_cmd_state(ioctx,
++ SRPT_STATE_CMD_RSP_SENT, SRPT_STATE_DONE);
++ }
++ }
++ if (state == SRPT_STATE_DONE)
++ goto out;
++
++ scmnd = ioctx->scmnd;
++ WARN_ON(!scmnd);
++ if (!scmnd)
++ goto out;
++
++ WARN_ON(ioctx != scst_cmd_get_tgt_priv(scmnd));
++
++ TRACE_DBG("Aborting cmd with state %d and tag %lld",
++ state, scst_cmd_get_tag(scmnd));
++
++ switch (state) {
++ case SRPT_STATE_NEW:
++ case SRPT_STATE_DATA_IN:
++ /*
++ * Do nothing - defer abort processing until
++ * srpt_xmit_response() is invoked.
++ */
++ WARN_ON(!scst_cmd_aborted(scmnd));
++ break;
++ case SRPT_STATE_NEED_DATA:
++ /* SCST_DATA_WRITE - RDMA read error or RDMA read timeout. */
++ scst_rx_data(ioctx->scmnd, SCST_RX_STATUS_ERROR, context);
++ break;
++ case SRPT_STATE_CMD_RSP_SENT:
++ /*
++ * SRP_RSP sending failed or the SRP_RSP send completion has
++ * not been received in time.
++ */
++ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
++ srpt_put_send_ioctx(ioctx);
++ scst_set_delivery_status(scmnd, SCST_CMD_DELIVERY_ABORTED);
++ scst_tgt_cmd_done(scmnd, context);
++ break;
++ case SRPT_STATE_MGMT_RSP_SENT:
++ /*
++ * Management command response sending failed. This state is
++ * never reached since there is no scmnd associated with
++ * management commands. Note: the SCST core frees these
++ * commands immediately after srpt_tsk_mgmt_done() returned.
++ */
++ WARN_ON("ERROR: unexpected command state");
++ break;
++ default:
++ WARN_ON("ERROR: unexpected command state");
++ break;
++ }
++
++out:
++ ;
++
++ TRACE_EXIT();
++}
++
++/**
++ * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
++ */
++static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id,
++ enum scst_exec_context context)
++{
++ struct srpt_send_ioctx *ioctx;
++ enum srpt_command_state state;
++ struct scst_cmd *scmnd;
++ u32 index;
++
++ atomic_inc(&ch->sq_wr_avail);
++
++ index = idx_from_wr_id(wr_id);
++ ioctx = ch->ioctx_ring[index];
++ state = srpt_get_cmd_state(ioctx);
++ scmnd = ioctx->scmnd;
++
++ EXTRACHECKS_WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
++ && state != SRPT_STATE_MGMT_RSP_SENT
++ && state != SRPT_STATE_NEED_DATA
++ && state != SRPT_STATE_DONE);
++
++ /* If SRP_RSP sending failed, undo the ch->req_lim change. */
++ if (state == SRPT_STATE_CMD_RSP_SENT
++ || state == SRPT_STATE_MGMT_RSP_SENT)
++ atomic_dec(&ch->req_lim);
++ if (state != SRPT_STATE_DONE) {
++ if (scmnd)
++ srpt_abort_scst_cmd(ioctx, context);
++ else {
++ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
++ srpt_put_send_ioctx(ioctx);
++ }
++ } else
++ PRINT_ERROR("Received more than one IB error completion"
++ " for wr_id = %u.", (unsigned)index);
++}
++
++/**
++ * srpt_handle_send_comp() - Process an IB send completion notification.
++ */
++static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx,
++ enum scst_exec_context context)
++{
++ enum srpt_command_state state;
++
++ atomic_inc(&ch->sq_wr_avail);
++
++ state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
++
++ EXTRACHECKS_WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
++ && state != SRPT_STATE_MGMT_RSP_SENT
++ && state != SRPT_STATE_DONE);
++
++ if (state != SRPT_STATE_DONE) {
++ struct scst_cmd *scmnd;
++
++ scmnd = ioctx->scmnd;
++ EXTRACHECKS_WARN_ON((state == SRPT_STATE_MGMT_RSP_SENT)
++ != (scmnd == NULL));
++ if (scmnd) {
++ srpt_unmap_sg_to_ib_sge(ch, ioctx);
++ srpt_put_send_ioctx(ioctx);
++ scst_tgt_cmd_done(scmnd, context);
++ } else
++ srpt_put_send_ioctx(ioctx);
++ } else {
++ PRINT_ERROR("IB completion has been received too late for"
++ " wr_id = %u.", ioctx->ioctx.index);
++ }
++}
++
++/**
++ * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
++ */
++static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx,
++ enum scst_exec_context context)
++{
++ enum srpt_command_state state;
++ struct scst_cmd *scmnd;
++
++ EXTRACHECKS_WARN_ON(ioctx->n_rdma <= 0);
++ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
++
++ scmnd = ioctx->scmnd;
++ if (scmnd) {
++ state = srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
++ SRPT_STATE_DATA_IN);
++ if (state == SRPT_STATE_NEED_DATA)
++ scst_rx_data(ioctx->scmnd, SCST_RX_STATUS_SUCCESS,
++ context);
++ else
++ PRINT_ERROR("%s[%d]: wrong state = %d", __func__,
++ __LINE__, state);
++ } else
++ PRINT_ERROR("%s[%d]: scmnd == NULL", __func__, __LINE__);
++}
++
++/**
++ * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
++ */
++static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx,
++ u8 opcode,
++ enum scst_exec_context context)
++{
++ struct scst_cmd *scmnd;
++ enum srpt_command_state state;
++
++ scmnd = ioctx->scmnd;
++ state = srpt_get_cmd_state(ioctx);
++ if (scmnd) {
++ switch (opcode) {
++ case IB_WC_RDMA_READ:
++ if (ioctx->n_rdma <= 0) {
++ PRINT_ERROR("Received invalid RDMA read error"
++ " completion with idx %d",
++ ioctx->ioctx.index);
++ break;
++ }
++ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
++ if (state == SRPT_STATE_NEED_DATA)
++ srpt_abort_scst_cmd(ioctx, context);
++ else
++ PRINT_ERROR("%s[%d]: wrong state = %d",
++ __func__, __LINE__, state);
++ break;
++ case IB_WC_RDMA_WRITE:
++ scst_set_delivery_status(scmnd,
++ SCST_CMD_DELIVERY_ABORTED);
++ break;
++ default:
++ PRINT_ERROR("%s[%d]: opcode = %u", __func__, __LINE__,
++ opcode);
++ break;
++ }
++ } else
++ PRINT_ERROR("%s[%d]: scmnd == NULL", __func__, __LINE__);
++}
++
++/**
++ * srpt_build_cmd_rsp() - Build an SRP_RSP response.
++ * @ch: RDMA channel through which the request has been received.
++ * @ioctx: I/O context associated with the SRP_CMD request. The response will
++ * be built in the buffer ioctx->buf points at and hence this function will
++ * overwrite the request data.
++ * @tag: tag of the request for which this response is being generated.
++ * @status: value for the STATUS field of the SRP_RSP information unit.
++ * @sense_data: pointer to sense data to be included in the response.
++ * @sense_data_len: length in bytes of the sense data.
++ *
++ * Returns the size in bytes of the SRP_RSP response.
++ *
++ * An SRP_RSP response contains a SCSI status or service response. See also
++ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
++ * response. See also SPC-2 for more information about sense data.
++ */
++static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx, u64 tag,
++ int status, const u8 *sense_data,
++ int sense_data_len)
++{
++ struct srp_rsp *srp_rsp;
++ int max_sense_len;
++
++ /*
++ * The lowest bit of all SAM-3 status codes is zero (see also
++ * paragraph 5.3 in SAM-3).
++ */
++ EXTRACHECKS_WARN_ON(status & 1);
++
++ srp_rsp = ioctx->ioctx.buf;
++ BUG_ON(!srp_rsp);
++ memset(srp_rsp, 0, sizeof *srp_rsp);
++
++ srp_rsp->opcode = SRP_RSP;
++ srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
++ + atomic_xchg(&ch->req_lim_delta, 0));
++ srp_rsp->tag = tag;
++ srp_rsp->status = status;
++
++ if (!SCST_SENSE_VALID(sense_data))
++ sense_data_len = 0;
++ else {
++ BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
++ max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
++ if (sense_data_len > max_sense_len) {
++ PRINT_WARNING("truncated sense data from %d to %d"
++ " bytes", sense_data_len, max_sense_len);
++ sense_data_len = max_sense_len;
++ }
++
++ srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
++ srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
++ memcpy(srp_rsp + 1, sense_data, sense_data_len);
++ }
++
++ return sizeof(*srp_rsp) + sense_data_len;
++}
++
++/**
++ * srpt_build_tskmgmt_rsp() - Build a task management response.
++ * @ch: RDMA channel through which the request has been received.
++ * @ioctx: I/O context in which the SRP_RSP response will be built.
++ * @rsp_code: RSP_CODE that will be stored in the response.
++ * @tag: Tag of the request for which this response is being generated.
++ *
++ * Returns the size in bytes of the SRP_RSP response.
++ *
++ * An SRP_RSP response contains a SCSI status or service response. See also
++ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
++ * response.
++ */
++static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx,
++ u8 rsp_code, u64 tag)
++{
++ struct srp_rsp *srp_rsp;
++ int resp_data_len;
++ int resp_len;
++
++ resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
++ resp_len = sizeof(*srp_rsp) + resp_data_len;
++
++ srp_rsp = ioctx->ioctx.buf;
++ BUG_ON(!srp_rsp);
++ memset(srp_rsp, 0, sizeof *srp_rsp);
++
++ srp_rsp->opcode = SRP_RSP;
++ srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
++ + atomic_xchg(&ch->req_lim_delta, 0));
++ srp_rsp->tag = tag;
++
++ if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
++ srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
++ srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
++ srp_rsp->data[3] = rsp_code;
++ }
++
++ return resp_len;
++}
++
++/**
++ * srpt_handle_cmd() - Process SRP_CMD.
++ */
++static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
++ struct srpt_recv_ioctx *recv_ioctx,
++ struct srpt_send_ioctx *send_ioctx,
++ enum scst_exec_context context)
++{
++ struct scst_cmd *scmnd;
++ struct srp_cmd *srp_cmd;
++ scst_data_direction dir;
++ u64 data_len;
++ int ret;
++ int atomic;
++
++ BUG_ON(!send_ioctx);
++
++ srp_cmd = recv_ioctx->ioctx.buf;
++
++ atomic = context == SCST_CONTEXT_TASKLET ? SCST_ATOMIC
++ : SCST_NON_ATOMIC;
++ scmnd = scst_rx_cmd(ch->scst_sess, (u8 *) &srp_cmd->lun,
++ sizeof srp_cmd->lun, srp_cmd->cdb,
++ sizeof srp_cmd->cdb, atomic);
++ if (!scmnd) {
++ PRINT_ERROR("0x%llx: allocation of an SCST command failed",
++ srp_cmd->tag);
++ goto err;
++ }
++
++ send_ioctx->scmnd = scmnd;
++
++ ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
++ if (ret) {
++ PRINT_ERROR("0x%llx: parsing SRP descriptor table failed.",
++ srp_cmd->tag);
++ scst_set_cmd_error(scmnd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ }
++
++ switch (srp_cmd->task_attr) {
++ case SRP_CMD_HEAD_OF_Q:
++ scst_cmd_set_queue_type(scmnd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
++ break;
++ case SRP_CMD_ORDERED_Q:
++ scst_cmd_set_queue_type(scmnd, SCST_CMD_QUEUE_ORDERED);
++ break;
++ case SRP_CMD_SIMPLE_Q:
++ scst_cmd_set_queue_type(scmnd, SCST_CMD_QUEUE_SIMPLE);
++ break;
++ case SRP_CMD_ACA:
++ scst_cmd_set_queue_type(scmnd, SCST_CMD_QUEUE_ACA);
++ break;
++ default:
++ scst_cmd_set_queue_type(scmnd, SCST_CMD_QUEUE_ORDERED);
++ break;
++ }
++
++ scst_cmd_set_tag(scmnd, srp_cmd->tag);
++ scst_cmd_set_tgt_priv(scmnd, send_ioctx);
++ scst_cmd_set_expected(scmnd, dir, data_len);
++ scst_cmd_init_done(scmnd, context);
++
++ return 0;
++
++err:
++ srpt_put_send_ioctx(send_ioctx);
++ return -1;
++}
++
++/**
++ * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
++ *
++ * Returns SCST_MGMT_STATUS_SUCCESS upon success.
++ *
++ * Each task management function is performed by calling one of the
++ * scst_rx_mgmt_fn*() functions. These functions will either report failure
++ * or process the task management function asynchronously. The function
++ * srpt_tsk_mgmt_done() will be called by the SCST core upon completion of the
++ * task management function. When srpt_handle_tsk_mgmt() reports failure
++ * (i.e. returns -1) a response will have been built in ioctx->buf. This
++ * information unit has to be sent back by the caller.
++ *
++ * For more information about SRP_TSK_MGMT information units, see also section
++ * 6.7 in the SRP r16a document.
++ */
++static u8 srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
++ struct srpt_recv_ioctx *recv_ioctx,
++ struct srpt_send_ioctx *send_ioctx)
++{
++ struct srp_tsk_mgmt *srp_tsk;
++ struct srpt_mgmt_ioctx *mgmt_ioctx;
++ int ret;
++
++ ret = SCST_MGMT_STATUS_FAILED;
++
++ BUG_ON(!send_ioctx);
++
++ srp_tsk = recv_ioctx->ioctx.buf;
++
++ TRACE_DBG("recv_tsk_mgmt= %d for task_tag= %lld"
++ " using tag= %lld cm_id= %p sess= %p",
++ srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag,
++ ch->cm_id, ch->scst_sess);
++
++ mgmt_ioctx = kmalloc(sizeof *mgmt_ioctx, GFP_ATOMIC);
++ if (!mgmt_ioctx) {
++ PRINT_ERROR("tag 0x%llx: memory allocation for task management"
++ " function failed. Ignoring task management request"
++ " (func %d).", srp_tsk->task_tag,
++ srp_tsk->tsk_mgmt_func);
++ goto err;
++ }
++
++ mgmt_ioctx->ioctx = send_ioctx;
++ BUG_ON(mgmt_ioctx->ioctx->ch != ch);
++ mgmt_ioctx->tag = srp_tsk->tag;
++
++ switch (srp_tsk->tsk_mgmt_func) {
++ case SRP_TSK_ABORT_TASK:
++ TRACE_DBG("%s", "Processing SRP_TSK_ABORT_TASK");
++ ret = scst_rx_mgmt_fn_tag(ch->scst_sess,
++ SCST_ABORT_TASK,
++ srp_tsk->task_tag,
++ SCST_ATOMIC, mgmt_ioctx);
++ break;
++ case SRP_TSK_ABORT_TASK_SET:
++ TRACE_DBG("%s", "Processing SRP_TSK_ABORT_TASK_SET");
++ ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
++ SCST_ABORT_TASK_SET,
++ (u8 *) &srp_tsk->lun,
++ sizeof srp_tsk->lun,
++ SCST_ATOMIC, mgmt_ioctx);
++ break;
++ case SRP_TSK_CLEAR_TASK_SET:
++ TRACE_DBG("%s", "Processing SRP_TSK_CLEAR_TASK_SET");
++ ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
++ SCST_CLEAR_TASK_SET,
++ (u8 *) &srp_tsk->lun,
++ sizeof srp_tsk->lun,
++ SCST_ATOMIC, mgmt_ioctx);
++ break;
++ case SRP_TSK_LUN_RESET:
++ TRACE_DBG("%s", "Processing SRP_TSK_LUN_RESET");
++ ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
++ SCST_LUN_RESET,
++ (u8 *) &srp_tsk->lun,
++ sizeof srp_tsk->lun,
++ SCST_ATOMIC, mgmt_ioctx);
++ break;
++ case SRP_TSK_CLEAR_ACA:
++ TRACE_DBG("%s", "Processing SRP_TSK_CLEAR_ACA");
++ ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
++ SCST_CLEAR_ACA,
++ (u8 *) &srp_tsk->lun,
++ sizeof srp_tsk->lun,
++ SCST_ATOMIC, mgmt_ioctx);
++ break;
++ default:
++ TRACE_DBG("%s", "Unsupported task management function.");
++ ret = SCST_MGMT_STATUS_FN_NOT_SUPPORTED;
++ }
++
++ if (ret != SCST_MGMT_STATUS_SUCCESS)
++ goto err;
++ return ret;
++
++err:
++ kfree(mgmt_ioctx);
++ return ret;
++}
++
++static u8 scst_to_srp_tsk_mgmt_status(const int scst_mgmt_status)
++{
++ switch (scst_mgmt_status) {
++ case SCST_MGMT_STATUS_SUCCESS:
++ return SRP_TSK_MGMT_SUCCESS;
++ case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
++ return SRP_TSK_MGMT_FUNC_NOT_SUPP;
++ case SCST_MGMT_STATUS_TASK_NOT_EXIST:
++ case SCST_MGMT_STATUS_LUN_NOT_EXIST:
++ case SCST_MGMT_STATUS_REJECTED:
++ case SCST_MGMT_STATUS_FAILED:
++ default:
++ break;
++ }
++ return SRP_TSK_MGMT_FAILED;
++}
++
++/**
++ * srpt_handle_new_iu() - Process a newly received information unit.
++ * @ch: RDMA channel through which the information unit has been received.
++ * @ioctx: SRPT I/O context associated with the information unit.
++ */
++static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
++ struct srpt_recv_ioctx *recv_ioctx,
++ struct srpt_send_ioctx *send_ioctx,
++ enum scst_exec_context context)
++{
++ struct srp_cmd *srp_cmd;
++ enum rdma_ch_state ch_state;
++
++ BUG_ON(!ch);
++ BUG_ON(!recv_ioctx);
++
++ ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
++ recv_ioctx->ioctx.dma, srp_max_req_size,
++ DMA_FROM_DEVICE);
++
++ ch_state = atomic_read(&ch->state);
++ srp_cmd = recv_ioctx->ioctx.buf;
++ if (unlikely(ch_state == RDMA_CHANNEL_CONNECTING)) {
++ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
++ goto out;
++ }
++
++ if (unlikely(ch_state == RDMA_CHANNEL_DISCONNECTING))
++ goto post_recv;
++
++ if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
++ if (!send_ioctx)
++ send_ioctx = srpt_get_send_ioctx(ch);
++ if (unlikely(!send_ioctx)) {
++ list_add_tail(&recv_ioctx->wait_list,
++ &ch->cmd_wait_list);
++ goto out;
++ }
++ }
++
++ WARN_ON(ch_state != RDMA_CHANNEL_LIVE);
++
++ switch (srp_cmd->opcode) {
++ case SRP_CMD:
++ srpt_handle_cmd(ch, recv_ioctx, send_ioctx, context);
++ break;
++ case SRP_TSK_MGMT:
++ srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
++ break;
++ case SRP_I_LOGOUT:
++ PRINT_ERROR("%s", "Not yet implemented: SRP_I_LOGOUT");
++ break;
++ case SRP_CRED_RSP:
++ TRACE_DBG("%s", "received SRP_CRED_RSP");
++ break;
++ case SRP_AER_RSP:
++ TRACE_DBG("%s", "received SRP_AER_RSP");
++ break;
++ case SRP_RSP:
++ PRINT_ERROR("%s", "Received SRP_RSP");
++ break;
++ default:
++ PRINT_ERROR("received IU with unknown opcode 0x%x",
++ srp_cmd->opcode);
++ break;
++ }
++
++post_recv:
++ srpt_post_recv(ch->sport->sdev, recv_ioctx);
++out:
++ return;
++}
++
++static void srpt_process_rcv_completion(struct ib_cq *cq,
++ struct srpt_rdma_ch *ch,
++ enum scst_exec_context context,
++ struct ib_wc *wc)
++{
++ struct srpt_device *sdev = ch->sport->sdev;
++ struct srpt_recv_ioctx *ioctx;
++ u32 index;
++
++ index = idx_from_wr_id(wc->wr_id);
++ if (wc->status == IB_WC_SUCCESS) {
++ int req_lim;
++
++ req_lim = atomic_dec_return(&ch->req_lim);
++ if (unlikely(req_lim < 0))
++ PRINT_ERROR("req_lim = %d < 0", req_lim);
++ ioctx = sdev->ioctx_ring[index];
++ srpt_handle_new_iu(ch, ioctx, NULL, context);
++ } else {
++ PRINT_INFO("receiving failed for idx %u with status %d",
++ index, wc->status);
++ }
++}
++
++/**
++ * srpt_process_send_completion() - Process an IB send completion.
++ *
++ * Note: Although this has not yet been observed during tests, at least in
++ * theory it is possible that the srpt_get_send_ioctx() call invoked by
++ * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
++ * value in each response is set to one, and it is possible that this response
++ * makes the initiator send a new request before the send completion for that
++ * response has been processed. This could e.g. happen if the call to
++ * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
++ * if IB retransmission causes generation of the send completion to be
++ * delayed. Incoming information units for which srpt_get_send_ioctx() fails
++ * are queued on cmd_wait_list. The code below processes these delayed
++ * requests one at a time.
++ */
++static void srpt_process_send_completion(struct ib_cq *cq,
++ struct srpt_rdma_ch *ch,
++ enum scst_exec_context context,
++ struct ib_wc *wc)
++{
++ struct srpt_send_ioctx *send_ioctx;
++ uint32_t index;
++ u8 opcode;
++
++ index = idx_from_wr_id(wc->wr_id);
++ opcode = opcode_from_wr_id(wc->wr_id);
++ send_ioctx = ch->ioctx_ring[index];
++ if (wc->status == IB_WC_SUCCESS) {
++ if (opcode == IB_WC_SEND)
++ srpt_handle_send_comp(ch, send_ioctx, context);
++ else {
++ EXTRACHECKS_WARN_ON(wc->opcode != IB_WC_RDMA_READ);
++ srpt_handle_rdma_comp(ch, send_ioctx, context);
++ }
++ } else {
++ if (opcode == IB_WC_SEND) {
++ PRINT_INFO("sending response for idx %u failed with"
++ " status %d", index, wc->status);
++ srpt_handle_send_err_comp(ch, wc->wr_id, context);
++ } else {
++ PRINT_INFO("RDMA %s for idx %u failed with status %d",
++ opcode == IB_WC_RDMA_READ ? "read"
++ : opcode == IB_WC_RDMA_WRITE ? "write"
++ : "???", index, wc->status);
++ srpt_handle_rdma_err_comp(ch, send_ioctx, opcode,
++ context);
++ }
++ }
++
++ while (unlikely(opcode == IB_WC_SEND
++ && !list_empty(&ch->cmd_wait_list)
++ && atomic_read(&ch->state) == RDMA_CHANNEL_LIVE
++ && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
++ struct srpt_recv_ioctx *recv_ioctx;
++
++ recv_ioctx = list_first_entry(&ch->cmd_wait_list,
++ struct srpt_recv_ioctx,
++ wait_list);
++ list_del(&recv_ioctx->wait_list);
++ srpt_handle_new_iu(ch, recv_ioctx, send_ioctx, context);
++ }
++}
++
++static void srpt_process_completion(struct ib_cq *cq,
++ struct srpt_rdma_ch *ch,
++ enum scst_exec_context context)
++{
++ struct ib_wc *const wc = ch->wc;
++ int i, n;
++
++ EXTRACHECKS_WARN_ON(cq != ch->cq);
++
++ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
++ while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
++ for (i = 0; i < n; i++) {
++ if (opcode_from_wr_id(wc[i].wr_id) & IB_WC_RECV)
++ srpt_process_rcv_completion(cq, ch, context,
++ &wc[i]);
++ else
++ srpt_process_send_completion(cq, ch, context,
++ &wc[i]);
++ }
++ }
++}
++
++/**
++ * srpt_completion() - IB completion queue callback function.
++ *
++ * Notes:
++ * - It is guaranteed that a completion handler will never be invoked
++ * concurrently on two different CPUs for the same completion queue. See also
++ * Documentation/infiniband/core_locking.txt and the implementation of
++ * handle_edge_irq() in kernel/irq/chip.c.
++ * - When threaded IRQs are enabled, completion handlers are invoked in thread
++ * context instead of interrupt context.
++ */
++static void srpt_completion(struct ib_cq *cq, void *ctx)
++{
++ struct srpt_rdma_ch *ch = ctx;
++
++ BUG_ON(!ch);
++ atomic_inc(&ch->processing_compl);
++ switch (thread) {
++ case MODE_IB_COMPLETION_IN_THREAD:
++ wake_up_interruptible(&ch->wait_queue);
++ break;
++ case MODE_IB_COMPLETION_IN_SIRQ:
++ srpt_process_completion(cq, ch, SCST_CONTEXT_THREAD);
++ break;
++ case MODE_ALL_IN_SIRQ:
++ srpt_process_completion(cq, ch, SCST_CONTEXT_TASKLET);
++ break;
++ }
++ atomic_dec(&ch->processing_compl);
++}
++
++static int srpt_compl_thread(void *arg)
++{
++ struct srpt_rdma_ch *ch;
++
++ /* Hibernation / freezing of the SRPT kernel thread is not supported. */
++ current->flags |= PF_NOFREEZE;
++
++ ch = arg;
++ BUG_ON(!ch);
++ PRINT_INFO("Session %s: kernel thread %s (PID %d) started",
++ ch->sess_name, ch->thread->comm, current->pid);
++ while (!kthread_should_stop()) {
++ wait_event_interruptible(ch->wait_queue,
++ (srpt_process_completion(ch->cq, ch,
++ SCST_CONTEXT_THREAD),
++ kthread_should_stop()));
++ }
++ PRINT_INFO("Session %s: kernel thread %s (PID %d) stopped",
++ ch->sess_name, ch->thread->comm, current->pid);
++ return 0;
++}
++
++/**
++ * srpt_create_ch_ib() - Create receive and send completion queues.
++ */
++static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
++{
++ struct ib_qp_init_attr *qp_init;
++ struct srpt_device *sdev = ch->sport->sdev;
++ int ret;
++
++ EXTRACHECKS_WARN_ON(ch->rq_size < 1);
++
++ ret = -ENOMEM;
++ qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
++ if (!qp_init)
++ goto out;
++
++ ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
++ ch->rq_size + srpt_sq_size, 0);
++ if (IS_ERR(ch->cq)) {
++ ret = PTR_ERR(ch->cq);
++ PRINT_ERROR("failed to create CQ cqe= %d ret= %d",
++ ch->rq_size + srpt_sq_size, ret);
++ goto out;
++ }
++
++ qp_init->qp_context = (void *)ch;
++ qp_init->event_handler
++ = (void(*)(struct ib_event *, void*))srpt_qp_event;
++ qp_init->send_cq = ch->cq;
++ qp_init->recv_cq = ch->cq;
++ qp_init->srq = sdev->srq;
++ qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
++ qp_init->qp_type = IB_QPT_RC;
++ qp_init->cap.max_send_wr = srpt_sq_size;
++ qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
++
++ ch->qp = ib_create_qp(sdev->pd, qp_init);
++ if (IS_ERR(ch->qp)) {
++ ret = PTR_ERR(ch->qp);
++ PRINT_ERROR("failed to create_qp ret= %d", ret);
++ goto err_destroy_cq;
++ }
++
++ atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
++
++ TRACE_DBG("%s: max_cqe= %d max_sge= %d sq_size = %d"
++ " cm_id= %p", __func__, ch->cq->cqe,
++ qp_init->cap.max_send_sge, qp_init->cap.max_send_wr,
++ ch->cm_id);
++
++ ret = srpt_init_ch_qp(ch, ch->qp);
++ if (ret)
++ goto err_destroy_qp;
++
++ if (thread == MODE_IB_COMPLETION_IN_THREAD) {
++ init_waitqueue_head(&ch->wait_queue);
++
++ TRACE_DBG("creating IB completion thread for session %s",
++ ch->sess_name);
++
++ ch->thread = kthread_run(srpt_compl_thread, ch,
++ "ib_srpt_compl");
++ if (IS_ERR(ch->thread)) {
++ PRINT_ERROR("failed to create kernel thread %ld",
++ PTR_ERR(ch->thread));
++ ch->thread = NULL;
++ goto err_destroy_qp;
++ }
++ } else
++ ib_req_notify_cq(ch->cq, IB_CQ_NEXT_COMP);
++
++out:
++ kfree(qp_init);
++ return ret;
++
++err_destroy_qp:
++ ib_destroy_qp(ch->qp);
++err_destroy_cq:
++ ib_destroy_cq(ch->cq);
++ goto out;
++}
++
++static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
++{
++ if (ch->thread)
++ kthread_stop(ch->thread);
++
++ ib_destroy_qp(ch->qp);
++ ib_destroy_cq(ch->cq);
++}
++
++/**
++ * srpt_unregister_channel() - Start RDMA channel disconnection.
++ *
++ * Note: The caller must hold ch->sdev->spinlock.
++ */
++static void srpt_unregister_channel(struct srpt_rdma_ch *ch)
++ __acquires(&ch->sport->sdev->spinlock)
++ __releases(&ch->sport->sdev->spinlock)
++{
++ struct srpt_device *sdev;
++ struct ib_qp_attr qp_attr;
++ int ret;
++
++ sdev = ch->sport->sdev;
++ list_del(&ch->list);
++ atomic_set(&ch->state, RDMA_CHANNEL_DISCONNECTING);
++ spin_unlock_irq(&sdev->spinlock);
++
++ qp_attr.qp_state = IB_QPS_ERR;
++ ret = ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
++ if (ret < 0)
++ PRINT_ERROR("Setting queue pair in error state failed: %d",
++ ret);
++
++ while (atomic_read(&ch->processing_compl))
++ ;
++
++ /*
++ * At this point it is guaranteed that no new commands will be sent to
++ * the SCST core for channel ch, which is a requirement for
++ * scst_unregister_session().
++ */
++
++ TRACE_DBG("unregistering session %p", ch->scst_sess);
++ scst_unregister_session(ch->scst_sess, 0, srpt_release_channel);
++ spin_lock_irq(&sdev->spinlock);
++}
++
++/**
++ * srpt_release_channel_by_cmid() - Release a channel.
++ * @cm_id: Pointer to the CM ID of the channel to be released.
++ *
++ * Note: Must be called from inside srpt_cm_handler to avoid a race between
++ * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
++ * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
++ * waits until all SCST sessions for the associated IB device have been
++ * unregistered and SCST session registration involves a call to
++ * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
++ * this function has finished).
++ */
++static void srpt_release_channel_by_cmid(struct ib_cm_id *cm_id)
++{
++ struct srpt_device *sdev;
++ struct srpt_rdma_ch *ch;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
++
++ sdev = cm_id->context;
++ BUG_ON(!sdev);
++ spin_lock_irq(&sdev->spinlock);
++ list_for_each_entry(ch, &sdev->rch_list, list) {
++ if (ch->cm_id == cm_id) {
++ srpt_unregister_channel(ch);
++ break;
++ }
++ }
++ spin_unlock_irq(&sdev->spinlock);
++
++ TRACE_EXIT();
++}
++
++/**
++ * srpt_find_channel() - Look up an RDMA channel.
++ * @cm_id: Pointer to the CM ID of the channel to be looked up.
++ *
++ * Return NULL if no matching RDMA channel has been found.
++ */
++static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
++ struct ib_cm_id *cm_id)
++{
++ struct srpt_rdma_ch *ch;
++ bool found;
++
++ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
++ BUG_ON(!sdev);
++
++ found = false;
++ spin_lock_irq(&sdev->spinlock);
++ list_for_each_entry(ch, &sdev->rch_list, list) {
++ if (ch->cm_id == cm_id) {
++ found = true;
++ break;
++ }
++ }
++ spin_unlock_irq(&sdev->spinlock);
++
++ return found ? ch : NULL;
++}
++
++/**
++ * srpt_release_channel() - Release all resources associated with an RDMA channel.
++ *
++ * Notes:
++ * - The caller must have removed the channel from the channel list before
++ * calling this function.
++ * - Must be called as a callback function via scst_unregister_session(). Never
++ * call this function directly because doing so would trigger several race
++ * conditions.
++ * - Do not access ch->sport or ch->sport->sdev in this function because the
++ * memory that was allocated for the sport and/or sdev data structures may
++ * already have been freed at the time this function is called.
++ */
++static void srpt_release_channel(struct scst_session *scst_sess)
++{
++ struct srpt_rdma_ch *ch;
++
++ TRACE_ENTRY();
++
++ ch = scst_sess_get_tgt_priv(scst_sess);
++ BUG_ON(!ch);
++ WARN_ON(atomic_read(&ch->state) != RDMA_CHANNEL_DISCONNECTING);
++
++ TRACE_DBG("destroying cm_id %p", ch->cm_id);
++ BUG_ON(!ch->cm_id);
++ ib_destroy_cm_id(ch->cm_id);
++
++ srpt_destroy_ch_ib(ch);
++
++ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
++ ch->sport->sdev, ch->rq_size,
++ srp_max_rsp_size, DMA_TO_DEVICE);
++
++ kfree(ch);
++
++ TRACE_EXIT();
++}
++
++/**
++ * srpt_enable_target() - Allows to enable a target via sysfs.
++ */
++static int srpt_enable_target(struct scst_tgt *scst_tgt, bool enable)
++{
++ struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
++
++ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
++
++ if (!sdev)
++ return -ENOENT;
++
++ TRACE_DBG("%s target %s", enable ? "Enabling" : "Disabling",
++ sdev->device->name);
++
++ spin_lock_irq(&sdev->spinlock);
++ sdev->enabled = enable;
++ spin_unlock_irq(&sdev->spinlock);
++
++ return 0;
++}
++
++/**
++ * srpt_is_target_enabled() - Allows to query a targets status via sysfs.
++ */
++static bool srpt_is_target_enabled(struct scst_tgt *scst_tgt)
++{
++ struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
++ bool res;
++
++ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
++
++ if (!sdev)
++ return false;
++
++ spin_lock_irq(&sdev->spinlock);
++ res = sdev->enabled;
++ spin_unlock_irq(&sdev->spinlock);
++ return res;
++}
++
++/**
++ * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
++ *
++ * Ownership of the cm_id is transferred to the SCST session if this functions
++ * returns zero. Otherwise the caller remains the owner of cm_id.
++ */
++static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
++ struct ib_cm_req_event_param *param,
++ void *private_data)
++{
++ struct srpt_device *sdev = cm_id->context;
++ struct srp_login_req *req;
++ struct srp_login_rsp *rsp;
++ struct srp_login_rej *rej;
++ struct ib_cm_rep_param *rep_param;
++ struct srpt_rdma_ch *ch, *tmp_ch;
++ u32 it_iu_len;
++ int i;
++ int ret = 0;
++
++ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
++
++ if (WARN_ON(!sdev || !private_data))
++ return -EINVAL;
++
++ req = (struct srp_login_req *)private_data;
++
++ it_iu_len = be32_to_cpu(req->req_it_iu_len);
++
++ PRINT_INFO("Received SRP_LOGIN_REQ with"
++ " i_port_id 0x%llx:0x%llx, t_port_id 0x%llx:0x%llx and it_iu_len %d"
++ " on port %d (guid=0x%llx:0x%llx)",
++ be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
++ be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
++ be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
++ be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
++ it_iu_len,
++ param->port,
++ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
++ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
++
++ rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
++ rej = kzalloc(sizeof *rej, GFP_KERNEL);
++ rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
++
++ if (!rsp || !rej || !rep_param) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
++ rej->reason = __constant_cpu_to_be32(
++ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
++ ret = -EINVAL;
++ PRINT_ERROR("rejected SRP_LOGIN_REQ because its"
++ " length (%d bytes) is out of range (%d .. %d)",
++ it_iu_len, 64, srp_max_req_size);
++ goto reject;
++ }
++
++ if (!srpt_is_target_enabled(sdev->scst_tgt)) {
++ rej->reason = __constant_cpu_to_be32(
++ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
++ ret = -EINVAL;
++ PRINT_ERROR("rejected SRP_LOGIN_REQ because the target %s"
++ " has not yet been enabled", sdev->device->name);
++ goto reject;
++ }
++
++ if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
++ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
++
++ spin_lock_irq(&sdev->spinlock);
++
++ list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
++ if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
++ && !memcmp(ch->t_port_id, req->target_port_id, 16)
++ && param->port == ch->sport->port
++ && param->listen_id == ch->sport->sdev->cm_id
++ && ch->cm_id) {
++ enum rdma_ch_state prev_state;
++
++ /* found an existing channel */
++ TRACE_DBG("Found existing channel name= %s"
++ " cm_id= %p state= %d",
++ ch->sess_name, ch->cm_id,
++ atomic_read(&ch->state));
++
++ prev_state = atomic_xchg(&ch->state,
++ RDMA_CHANNEL_DISCONNECTING);
++ if (prev_state == RDMA_CHANNEL_CONNECTING)
++ srpt_unregister_channel(ch);
++
++ spin_unlock_irq(&sdev->spinlock);
++
++ rsp->rsp_flags =
++ SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
++
++ if (prev_state == RDMA_CHANNEL_LIVE) {
++ ib_send_cm_dreq(ch->cm_id, NULL, 0);
++ PRINT_INFO("disconnected"
++ " session %s because a new"
++ " SRP_LOGIN_REQ has been received.",
++ ch->sess_name);
++ } else if (prev_state ==
++ RDMA_CHANNEL_CONNECTING) {
++ PRINT_ERROR("%s", "rejected"
++ " SRP_LOGIN_REQ because another login"
++ " request is being processed.");
++ ib_send_cm_rej(ch->cm_id,
++ IB_CM_REJ_NO_RESOURCES,
++ NULL, 0, NULL, 0);
++ }
++
++ spin_lock_irq(&sdev->spinlock);
++ }
++ }
++
++ spin_unlock_irq(&sdev->spinlock);
++
++ } else
++ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
++
++ if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
++ || *(__be64 *)(req->target_port_id + 8) !=
++ cpu_to_be64(srpt_service_guid)) {
++ rej->reason = __constant_cpu_to_be32(
++ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
++ ret = -ENOMEM;
++ PRINT_ERROR("%s", "rejected SRP_LOGIN_REQ because it"
++ " has an invalid target port identifier.");
++ goto reject;
++ }
++
++ ch = kzalloc(sizeof *ch, GFP_KERNEL);
++ if (!ch) {
++ rej->reason = __constant_cpu_to_be32(
++ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
++ PRINT_ERROR("%s",
++ "rejected SRP_LOGIN_REQ because out of memory.");
++ ret = -ENOMEM;
++ goto reject;
++ }
++
++ memcpy(ch->i_port_id, req->initiator_port_id, 16);
++ memcpy(ch->t_port_id, req->target_port_id, 16);
++ ch->sport = &sdev->port[param->port - 1];
++ ch->cm_id = cm_id;
++ /*
++ * Avoid QUEUE_FULL conditions by limiting the number of buffers used
++ * for the SRP protocol to the SCST SCSI command queue size.
++ */
++ ch->rq_size = min(SRPT_RQ_SIZE, scst_get_max_lun_commands(NULL, 0));
++ atomic_set(&ch->processing_compl, 0);
++ atomic_set(&ch->state, RDMA_CHANNEL_CONNECTING);
++ INIT_LIST_HEAD(&ch->cmd_wait_list);
++
++ spin_lock_init(&ch->spinlock);
++ ch->ioctx_ring = (struct srpt_send_ioctx **)
++ srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
++ sizeof(*ch->ioctx_ring[0]),
++ srp_max_rsp_size, DMA_TO_DEVICE);
++ if (!ch->ioctx_ring)
++ goto free_ch;
++
++ INIT_LIST_HEAD(&ch->free_list);
++ for (i = 0; i < ch->rq_size; i++) {
++ ch->ioctx_ring[i]->ch = ch;
++ list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
++ }
++
++ ret = srpt_create_ch_ib(ch);
++ if (ret) {
++ rej->reason = __constant_cpu_to_be32(
++ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
++ PRINT_ERROR("%s", "rejected SRP_LOGIN_REQ because creating"
++ " a new RDMA channel failed.");
++ goto free_ring;
++ }
++
++ ret = srpt_ch_qp_rtr(ch, ch->qp);
++ if (ret) {
++ rej->reason = __constant_cpu_to_be32(
++ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
++ PRINT_ERROR("rejected SRP_LOGIN_REQ because enabling"
++ " RTR failed (error code = %d)", ret);
++ goto destroy_ib;
++ }
++
++ if (use_port_guid_in_session_name) {
++ /*
++ * If the kernel module parameter use_port_guid_in_session_name
++ * has been specified, use a combination of the target port
++ * GUID and the initiator port ID as the session name. This
++ * was the original behavior of the SRP target implementation
++ * (i.e. before the SRPT was included in OFED 1.3).
++ */
++ snprintf(ch->sess_name, sizeof(ch->sess_name),
++ "0x%016llx%016llx",
++ be64_to_cpu(*(__be64 *)
++ &sdev->port[param->port - 1].gid.raw[8]),
++ be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
++ } else {
++ /*
++ * Default behavior: use the initator port identifier as the
++ * session name.
++ */
++ snprintf(ch->sess_name, sizeof(ch->sess_name),
++ "0x%016llx%016llx",
++ be64_to_cpu(*(__be64 *)ch->i_port_id),
++ be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
++ }
++
++ TRACE_DBG("registering session %s", ch->sess_name);
++
++ BUG_ON(!sdev->scst_tgt);
++ ch->scst_sess = scst_register_session(sdev->scst_tgt, 0, ch->sess_name,
++ ch, NULL, NULL);
++ if (!ch->scst_sess) {
++ rej->reason = __constant_cpu_to_be32(
++ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
++ TRACE_DBG("%s", "Failed to create SCST session");
++ goto release_channel;
++ }
++
++ TRACE_DBG("Establish connection sess=%p name=%s cm_id=%p",
++ ch->scst_sess, ch->sess_name, ch->cm_id);
++
++ /* create srp_login_response */
++ rsp->opcode = SRP_LOGIN_RSP;
++ rsp->tag = req->tag;
++ rsp->max_it_iu_len = req->req_it_iu_len;
++ rsp->max_ti_iu_len = req->req_it_iu_len;
++ ch->max_ti_iu_len = it_iu_len;
++ rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
++ | SRP_BUF_FORMAT_INDIRECT);
++ rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
++ atomic_set(&ch->req_lim, ch->rq_size);
++ atomic_set(&ch->req_lim_delta, 0);
++
++ /* create cm reply */
++ rep_param->qp_num = ch->qp->qp_num;
++ rep_param->private_data = (void *)rsp;
++ rep_param->private_data_len = sizeof *rsp;
++ rep_param->rnr_retry_count = 7;
++ rep_param->flow_control = 1;
++ rep_param->failover_accepted = 0;
++ rep_param->srq = 1;
++ rep_param->responder_resources = 4;
++ rep_param->initiator_depth = 4;
++
++ ret = ib_send_cm_rep(cm_id, rep_param);
++ if (ret) {
++ PRINT_ERROR("sending SRP_LOGIN_REQ response failed"
++ " (error code = %d)", ret);
++ goto release_channel;
++ }
++
++ spin_lock_irq(&sdev->spinlock);
++ list_add_tail(&ch->list, &sdev->rch_list);
++ spin_unlock_irq(&sdev->spinlock);
++
++ goto out;
++
++release_channel:
++ atomic_set(&ch->state, RDMA_CHANNEL_DISCONNECTING);
++ scst_unregister_session(ch->scst_sess, 0, NULL);
++ ch->scst_sess = NULL;
++
++destroy_ib:
++ srpt_destroy_ch_ib(ch);
++
++free_ring:
++ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
++ ch->sport->sdev, ch->rq_size,
++ srp_max_rsp_size, DMA_TO_DEVICE);
++
++free_ch:
++ kfree(ch);
++
++reject:
++ rej->opcode = SRP_LOGIN_REJ;
++ rej->tag = req->tag;
++ rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
++ | SRP_BUF_FORMAT_INDIRECT);
++
++ ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
++ (void *)rej, sizeof *rej);
++
++out:
++ kfree(rep_param);
++ kfree(rsp);
++ kfree(rej);
++
++ return ret;
++}
++
++static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
++{
++ PRINT_INFO("Received InfiniBand REJ packet for cm_id %p.", cm_id);
++ srpt_release_channel_by_cmid(cm_id);
++}
++
++/**
++ * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or IB_CM_USER_ESTABLISHED event.
++ *
++ * An IB_CM_RTU_RECEIVED message indicates that the connection is established
++ * and that the recipient may begin transmitting (RTU = ready to use).
++ */
++static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
++{
++ struct srpt_rdma_ch *ch;
++ int ret;
++
++ ch = srpt_find_channel(cm_id->context, cm_id);
++ WARN_ON(!ch);
++ if (!ch)
++ goto out;
++
++ if (srpt_test_and_set_channel_state(ch, RDMA_CHANNEL_CONNECTING,
++ RDMA_CHANNEL_LIVE) == RDMA_CHANNEL_CONNECTING) {
++ struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
++
++ ret = srpt_ch_qp_rts(ch, ch->qp);
++
++ list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
++ wait_list) {
++ list_del(&ioctx->wait_list);
++ srpt_handle_new_iu(ch, ioctx, NULL,
++ SCST_CONTEXT_THREAD);
++ }
++ if (ret && srpt_test_and_set_channel_state(ch,
++ RDMA_CHANNEL_LIVE,
++ RDMA_CHANNEL_DISCONNECTING) == RDMA_CHANNEL_LIVE) {
++ TRACE_DBG("cm_id=%p sess_name=%s state=%d",
++ cm_id, ch->sess_name,
++ atomic_read(&ch->state));
++ ib_send_cm_dreq(ch->cm_id, NULL, 0);
++ }
++ }
++
++out:
++ ;
++}
++
++static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
++{
++ PRINT_INFO("Received InfiniBand TimeWait exit for cm_id %p.", cm_id);
++ srpt_release_channel_by_cmid(cm_id);
++}
++
++static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
++{
++ PRINT_INFO("Received InfiniBand REP error for cm_id %p.", cm_id);
++ srpt_release_channel_by_cmid(cm_id);
++}
++
++/**
++ * srpt_cm_dreq_recv() - Process reception of a DREQ message.
++ */
++static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
++{
++ struct srpt_rdma_ch *ch;
++
++ ch = srpt_find_channel(cm_id->context, cm_id);
++ if (!ch) {
++ TRACE_DBG("Received DREQ for channel %p which is already"
++ " being unregistered.", cm_id);
++ goto out;
++ }
++
++ TRACE_DBG("cm_id= %p ch->state= %d", cm_id, atomic_read(&ch->state));
++
++ switch (atomic_read(&ch->state)) {
++ case RDMA_CHANNEL_LIVE:
++ case RDMA_CHANNEL_CONNECTING:
++ ib_send_cm_drep(ch->cm_id, NULL, 0);
++ PRINT_INFO("Received DREQ and sent DREP for session %s.",
++ ch->sess_name);
++ break;
++ case RDMA_CHANNEL_DISCONNECTING:
++ default:
++ break;
++ }
++
++out:
++ ;
++}
++
++/**
++ * srpt_cm_drep_recv() - Process reception of a DREP message.
++ */
++static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
++{
++ PRINT_INFO("Received InfiniBand DREP message for cm_id %p.", cm_id);
++ srpt_release_channel_by_cmid(cm_id);
++}
++
++/**
++ * srpt_cm_handler() - IB connection manager callback function.
++ *
++ * A non-zero return value will cause the caller destroy the CM ID.
++ *
++ * Note: srpt_cm_handler() must only return a non-zero value when transferring
++ * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
++ * a non-zero value in any other case will trigger a race with the
++ * ib_destroy_cm_id() call in srpt_release_channel().
++ */
++static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
++{
++ int ret;
++
++ ret = 0;
++ switch (event->event) {
++ case IB_CM_REQ_RECEIVED:
++ ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
++ event->private_data);
++ break;
++ case IB_CM_REJ_RECEIVED:
++ srpt_cm_rej_recv(cm_id);
++ break;
++ case IB_CM_RTU_RECEIVED:
++ case IB_CM_USER_ESTABLISHED:
++ srpt_cm_rtu_recv(cm_id);
++ break;
++ case IB_CM_DREQ_RECEIVED:
++ srpt_cm_dreq_recv(cm_id);
++ break;
++ case IB_CM_DREP_RECEIVED:
++ srpt_cm_drep_recv(cm_id);
++ break;
++ case IB_CM_TIMEWAIT_EXIT:
++ srpt_cm_timewait_exit(cm_id);
++ break;
++ case IB_CM_REP_ERROR:
++ srpt_cm_rep_error(cm_id);
++ break;
++ case IB_CM_DREQ_ERROR:
++ PRINT_INFO("%s", "Received IB DREQ ERROR event.");
++ break;
++ case IB_CM_MRA_RECEIVED:
++ PRINT_INFO("%s", "Received IB MRA event");
++ break;
++ default:
++ PRINT_ERROR("received unrecognized IB CM event %d",
++ event->event);
++ break;
++ }
++
++ return ret;
++}
++
++/**
++ * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
++ */
++static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx,
++ struct scst_cmd *scmnd)
++{
++ struct scatterlist *sg;
++ int sg_cnt;
++ scst_data_direction dir;
++ struct rdma_iu *riu;
++ struct srp_direct_buf *db;
++ dma_addr_t dma_addr;
++ struct ib_sge *sge;
++ u64 raddr;
++ u32 rsize;
++ u32 tsize;
++ u32 dma_len;
++ int count, nrdma;
++ int i, j, k;
++
++ BUG_ON(!ch);
++ BUG_ON(!ioctx);
++ BUG_ON(!scmnd);
++ dir = scst_cmd_get_data_direction(scmnd);
++ BUG_ON(dir == SCST_DATA_NONE);
++ /*
++ * Cache 'dir' because it is needed in srpt_unmap_sg_to_ib_sge()
++ * and because scst_set_cmd_error_status() resets scmnd->data_direction.
++ */
++ ioctx->dir = dir;
++ if (dir == SCST_DATA_WRITE) {
++ scst_cmd_get_write_fields(scmnd, &sg, &sg_cnt);
++ WARN_ON(!sg);
++ } else {
++ sg = scst_cmd_get_sg(scmnd);
++ sg_cnt = scst_cmd_get_sg_cnt(scmnd);
++ WARN_ON(!sg);
++ }
++ ioctx->sg = sg;
++ ioctx->sg_cnt = sg_cnt;
++ count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
++ scst_to_tgt_dma_dir(dir));
++ if (unlikely(!count))
++ return -EBUSY;
++
++ ioctx->mapped_sg_count = count;
++
++ if (ioctx->rdma_ius && ioctx->n_rdma_ius)
++ nrdma = ioctx->n_rdma_ius;
++ else {
++ nrdma = count / SRPT_DEF_SG_PER_WQE + ioctx->n_rbuf;
++
++ ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu,
++ scst_cmd_atomic(scmnd)
++ ? GFP_ATOMIC : GFP_KERNEL);
++ if (!ioctx->rdma_ius)
++ goto free_mem;
++
++ ioctx->n_rdma_ius = nrdma;
++ }
++
++ db = ioctx->rbufs;
++ tsize = (dir == SCST_DATA_READ)
++ ? scst_cmd_get_adjusted_resp_data_len(scmnd)
++ : scst_cmd_get_bufflen(scmnd);
++ dma_len = sg_dma_len(&sg[0]);
++ riu = ioctx->rdma_ius;
++
++ /*
++ * For each remote desc - calculate the #ib_sge.
++ * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
++ * each remote desc rdma_iu is required a rdma wr;
++ * else
++ * we need to allocate extra rdma_iu to carry extra #ib_sge in
++ * another rdma wr
++ */
++ for (i = 0, j = 0;
++ j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
++ rsize = be32_to_cpu(db->len);
++ raddr = be64_to_cpu(db->va);
++ riu->raddr = raddr;
++ riu->rkey = be32_to_cpu(db->key);
++ riu->sge_cnt = 0;
++
++ /* calculate how many sge required for this remote_buf */
++ while (rsize > 0 && tsize > 0) {
++
++ if (rsize >= dma_len) {
++ tsize -= dma_len;
++ rsize -= dma_len;
++ raddr += dma_len;
++
++ if (tsize > 0) {
++ ++j;
++ if (j < count)
++ dma_len = sg_dma_len(&sg[j]);
++ }
++ } else {
++ tsize -= rsize;
++ dma_len -= rsize;
++ rsize = 0;
++ }
++
++ ++riu->sge_cnt;
++
++ if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
++ ++ioctx->n_rdma;
++ riu->sge =
++ kmalloc(riu->sge_cnt * sizeof *riu->sge,
++ scst_cmd_atomic(scmnd)
++ ? GFP_ATOMIC : GFP_KERNEL);
++ if (!riu->sge)
++ goto free_mem;
++
++ ++riu;
++ riu->sge_cnt = 0;
++ riu->raddr = raddr;
++ riu->rkey = be32_to_cpu(db->key);
++ }
++ }
++
++ ++ioctx->n_rdma;
++ riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
++ scst_cmd_atomic(scmnd)
++ ? GFP_ATOMIC : GFP_KERNEL);
++ if (!riu->sge)
++ goto free_mem;
++ }
++
++ db = ioctx->rbufs;
++ tsize = (dir == SCST_DATA_READ)
++ ? scst_cmd_get_adjusted_resp_data_len(scmnd)
++ : scst_cmd_get_bufflen(scmnd);
++ riu = ioctx->rdma_ius;
++ dma_len = sg_dma_len(&sg[0]);
++ dma_addr = sg_dma_address(&sg[0]);
++
++ /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
++ for (i = 0, j = 0;
++ j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
++ rsize = be32_to_cpu(db->len);
++ sge = riu->sge;
++ k = 0;
++
++ while (rsize > 0 && tsize > 0) {
++ sge->addr = dma_addr;
++ sge->lkey = ch->sport->sdev->mr->lkey;
++
++ if (rsize >= dma_len) {
++ sge->length =
++ (tsize < dma_len) ? tsize : dma_len;
++ tsize -= dma_len;
++ rsize -= dma_len;
++
++ if (tsize > 0) {
++ ++j;
++ if (j < count) {
++ dma_len = sg_dma_len(&sg[j]);
++ dma_addr =
++ sg_dma_address(&sg[j]);
++ }
++ }
++ } else {
++ sge->length = (tsize < rsize) ? tsize : rsize;
++ tsize -= rsize;
++ dma_len -= rsize;
++ dma_addr += rsize;
++ rsize = 0;
++ }
++
++ ++k;
++ if (k == riu->sge_cnt && rsize > 0) {
++ ++riu;
++ sge = riu->sge;
++ k = 0;
++ } else if (rsize > 0)
++ ++sge;
++ }
++ }
++
++ return 0;
++
++free_mem:
++ srpt_unmap_sg_to_ib_sge(ch, ioctx);
++
++ return -ENOMEM;
++}
++
++/**
++ * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
++ */
++static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx)
++{
++ struct scst_cmd *scmnd;
++ struct scatterlist *sg;
++ scst_data_direction dir;
++
++ EXTRACHECKS_BUG_ON(!ch);
++ EXTRACHECKS_BUG_ON(!ioctx);
++ EXTRACHECKS_BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
++
++ while (ioctx->n_rdma)
++ kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
++
++ kfree(ioctx->rdma_ius);
++ ioctx->rdma_ius = NULL;
++
++ if (ioctx->mapped_sg_count) {
++ scmnd = ioctx->scmnd;
++ EXTRACHECKS_BUG_ON(!scmnd);
++ EXTRACHECKS_WARN_ON(ioctx->scmnd != scmnd);
++ EXTRACHECKS_WARN_ON(ioctx != scst_cmd_get_tgt_priv(scmnd));
++ sg = ioctx->sg;
++ EXTRACHECKS_WARN_ON(!sg);
++ dir = ioctx->dir;
++ EXTRACHECKS_BUG_ON(dir == SCST_DATA_NONE);
++ ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
++ scst_to_tgt_dma_dir(dir));
++ ioctx->mapped_sg_count = 0;
++ }
++}
++
++/**
++ * srpt_perform_rdmas() - Perform IB RDMA.
++ *
++ * Returns zero upon success or a negative number upon failure.
++ */
++static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx,
++ scst_data_direction dir)
++{
++ struct ib_send_wr wr;
++ struct ib_send_wr *bad_wr;
++ struct rdma_iu *riu;
++ int i;
++ int ret;
++ int sq_wr_avail;
++
++ if (dir == SCST_DATA_WRITE) {
++ ret = -ENOMEM;
++ sq_wr_avail = atomic_sub_return(ioctx->n_rdma,
++ &ch->sq_wr_avail);
++ if (sq_wr_avail < 0) {
++ PRINT_WARNING("IB send queue full (needed %d)",
++ ioctx->n_rdma);
++ goto out;
++ }
++ }
++
++ ret = 0;
++ riu = ioctx->rdma_ius;
++ memset(&wr, 0, sizeof wr);
++
++ for (i = 0; i < ioctx->n_rdma; ++i, ++riu) {
++ if (dir == SCST_DATA_READ) {
++ wr.opcode = IB_WR_RDMA_WRITE;
++ wr.wr_id = encode_wr_id(IB_WC_RDMA_WRITE,
++ ioctx->ioctx.index);
++ } else {
++ wr.opcode = IB_WR_RDMA_READ;
++ wr.wr_id = encode_wr_id(IB_WC_RDMA_READ,
++ ioctx->ioctx.index);
++ }
++ wr.next = NULL;
++ wr.wr.rdma.remote_addr = riu->raddr;
++ wr.wr.rdma.rkey = riu->rkey;
++ wr.num_sge = riu->sge_cnt;
++ wr.sg_list = riu->sge;
++
++ /* only get completion event for the last rdma wr */
++ if (i == (ioctx->n_rdma - 1) && dir == SCST_DATA_WRITE)
++ wr.send_flags = IB_SEND_SIGNALED;
++
++ ret = ib_post_send(ch->qp, &wr, &bad_wr);
++ if (ret)
++ goto out;
++ }
++
++out:
++ if (unlikely(dir == SCST_DATA_WRITE && ret < 0))
++ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
++ return ret;
++}
++
++/**
++ * srpt_xfer_data() - Start data transfer from initiator to target.
++ *
++ * Returns an SCST_TGT_RES_... status code.
++ *
++ * Note: Must not block.
++ */
++static int srpt_xfer_data(struct srpt_rdma_ch *ch,
++ struct srpt_send_ioctx *ioctx,
++ struct scst_cmd *scmnd)
++{
++ int ret;
++
++ ret = srpt_map_sg_to_ib_sge(ch, ioctx, scmnd);
++ if (ret) {
++ PRINT_ERROR("%s[%d] ret=%d", __func__, __LINE__, ret);
++ ret = SCST_TGT_RES_QUEUE_FULL;
++ goto out;
++ }
++
++ ret = srpt_perform_rdmas(ch, ioctx, scst_cmd_get_data_direction(scmnd));
++ if (ret) {
++ if (ret == -EAGAIN || ret == -ENOMEM) {
++ PRINT_INFO("%s[%d] queue full -- ret=%d",
++ __func__, __LINE__, ret);
++ ret = SCST_TGT_RES_QUEUE_FULL;
++ } else {
++ PRINT_ERROR("%s[%d] fatal error -- ret=%d",
++ __func__, __LINE__, ret);
++ ret = SCST_TGT_RES_FATAL_ERROR;
++ }
++ goto out_unmap;
++ }
++
++ ret = SCST_TGT_RES_SUCCESS;
++
++out:
++ return ret;
++out_unmap:
++ srpt_unmap_sg_to_ib_sge(ch, ioctx);
++ goto out;
++}
++
++/**
++ * srpt_pending_cmd_timeout() - SCST command HCA processing timeout callback.
++ *
++ * Called by the SCST core if no IB completion notification has been received
++ * within max_hw_pending_time seconds.
++ */
++static void srpt_pending_cmd_timeout(struct scst_cmd *scmnd)
++{
++ struct srpt_send_ioctx *ioctx;
++ enum srpt_command_state state;
++
++ ioctx = scst_cmd_get_tgt_priv(scmnd);
++ BUG_ON(!ioctx);
++
++ state = srpt_get_cmd_state(ioctx);
++ switch (state) {
++ case SRPT_STATE_NEW:
++ case SRPT_STATE_DATA_IN:
++ case SRPT_STATE_DONE:
++ /*
++ * srpt_pending_cmd_timeout() should never be invoked for
++ * commands in this state.
++ */
++ PRINT_ERROR("Processing SCST command %p (SRPT state %d) took"
++ " too long -- aborting", scmnd, state);
++ break;
++ case SRPT_STATE_NEED_DATA:
++ case SRPT_STATE_CMD_RSP_SENT:
++ case SRPT_STATE_MGMT_RSP_SENT:
++ default:
++ PRINT_ERROR("Command %p: IB completion for idx %u has not"
++ " been received in time (SRPT command state %d)",
++ scmnd, ioctx->ioctx.index, state);
++ break;
++ }
++
++ srpt_abort_scst_cmd(ioctx, SCST_CONTEXT_SAME);
++}
++
++/**
++ * srpt_rdy_to_xfer() - Transfers data from initiator to target.
++ *
++ * Called by the SCST core to transfer data from the initiator to the target
++ * (SCST_DATA_WRITE). Must not block.
++ */
++static int srpt_rdy_to_xfer(struct scst_cmd *scmnd)
++{
++ struct srpt_rdma_ch *ch;
++ struct srpt_send_ioctx *ioctx;
++ enum srpt_command_state new_state;
++ enum rdma_ch_state ch_state;
++ int ret;
++
++ ioctx = scst_cmd_get_tgt_priv(scmnd);
++ BUG_ON(!ioctx);
++
++ new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
++ WARN_ON(new_state == SRPT_STATE_DONE);
++
++ ch = ioctx->ch;
++ WARN_ON(ch != scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd)));
++ BUG_ON(!ch);
++
++ ch_state = atomic_read(&ch->state);
++ if (ch_state == RDMA_CHANNEL_DISCONNECTING) {
++ TRACE_DBG("cmd with tag %lld: channel disconnecting",
++ scst_cmd_get_tag(scmnd));
++ srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
++ ret = SCST_TGT_RES_FATAL_ERROR;
++ goto out;
++ } else if (ch_state == RDMA_CHANNEL_CONNECTING) {
++ ret = SCST_TGT_RES_QUEUE_FULL;
++ goto out;
++ }
++ ret = srpt_xfer_data(ch, ioctx, scmnd);
++
++out:
++ return ret;
++}
++
++/**
++ * srpt_xmit_response() - Transmits the response to a SCSI command.
++ *
++ * Callback function called by the SCST core. Must not block. Must ensure that
++ * scst_tgt_cmd_done() will get invoked when returning SCST_TGT_RES_SUCCESS.
++ */
++static int srpt_xmit_response(struct scst_cmd *scmnd)
++{
++ struct srpt_rdma_ch *ch;
++ struct srpt_send_ioctx *ioctx;
++ enum srpt_command_state state;
++ int ret;
++ scst_data_direction dir;
++ int resp_len;
++
++ ret = SCST_TGT_RES_SUCCESS;
++
++ ioctx = scst_cmd_get_tgt_priv(scmnd);
++ BUG_ON(!ioctx);
++
++ ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
++ BUG_ON(!ch);
++
++ state = srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEW,
++ SRPT_STATE_CMD_RSP_SENT);
++ if (state != SRPT_STATE_NEW) {
++ state = srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_DATA_IN,
++ SRPT_STATE_CMD_RSP_SENT);
++ if (state != SRPT_STATE_DATA_IN)
++ PRINT_ERROR("Unexpected command state %d",
++ srpt_get_cmd_state(ioctx));
++ }
++
++ if (unlikely(scst_cmd_aborted(scmnd))) {
++ atomic_inc(&ch->req_lim_delta);
++ srpt_abort_scst_cmd(ioctx, SCST_CONTEXT_SAME);
++ goto out;
++ }
++
++ EXTRACHECKS_BUG_ON(scst_cmd_atomic(scmnd));
++
++ dir = scst_cmd_get_data_direction(scmnd);
++
++ /* For read commands, transfer the data to the initiator. */
++ if (dir == SCST_DATA_READ
++ && scst_cmd_get_adjusted_resp_data_len(scmnd)) {
++ ret = srpt_xfer_data(ch, ioctx, scmnd);
++ if (ret == SCST_TGT_RES_QUEUE_FULL) {
++ srpt_set_cmd_state(ioctx, state);
++ PRINT_WARNING("xfer_data failed for tag %llu"
++ " - retrying", scst_cmd_get_tag(scmnd));
++ goto out;
++ } else if (ret != SCST_TGT_RES_SUCCESS) {
++ PRINT_ERROR("xfer_data failed for tag %llu",
++ scst_cmd_get_tag(scmnd));
++ goto out;
++ }
++ }
++
++ atomic_inc(&ch->req_lim);
++
++ resp_len = srpt_build_cmd_rsp(ch, ioctx,
++ scst_cmd_get_tag(scmnd),
++ scst_cmd_get_status(scmnd),
++ scst_cmd_get_sense_buffer(scmnd),
++ scst_cmd_get_sense_buffer_len(scmnd));
++
++ if (srpt_post_send(ch, ioctx, resp_len)) {
++ srpt_unmap_sg_to_ib_sge(ch, ioctx);
++ srpt_set_cmd_state(ioctx, state);
++ atomic_dec(&ch->req_lim);
++ PRINT_WARNING("sending response failed for tag %llu - retrying",
++ scst_cmd_get_tag(scmnd));
++ ret = SCST_TGT_RES_QUEUE_FULL;
++ }
++
++out:
++ return ret;
++}
++
++/**
++ * srpt_tsk_mgmt_done() - SCST callback function that sends back the response
++ * for a task management request.
++ *
++ * Must not block.
++ */
++static void srpt_tsk_mgmt_done(struct scst_mgmt_cmd *mcmnd)
++{
++ struct srpt_rdma_ch *ch;
++ struct srpt_mgmt_ioctx *mgmt_ioctx;
++ struct srpt_send_ioctx *ioctx;
++ enum srpt_command_state new_state;
++ int rsp_len;
++
++ mgmt_ioctx = scst_mgmt_cmd_get_tgt_priv(mcmnd);
++ BUG_ON(!mgmt_ioctx);
++
++ ioctx = mgmt_ioctx->ioctx;
++ BUG_ON(!ioctx);
++
++ ch = ioctx->ch;
++ BUG_ON(!ch);
++
++ TRACE_DBG("%s: tsk_mgmt_done for tag= %lld status=%d",
++ __func__, mgmt_ioctx->tag, scst_mgmt_cmd_get_status(mcmnd));
++
++ WARN_ON(in_irq());
++
++ new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_MGMT_RSP_SENT);
++ WARN_ON(new_state == SRPT_STATE_DONE);
++
++ atomic_inc(&ch->req_lim);
++
++ rsp_len = srpt_build_tskmgmt_rsp(ch, ioctx,
++ scst_to_srp_tsk_mgmt_status(
++ scst_mgmt_cmd_get_status(mcmnd)),
++ mgmt_ioctx->tag);
++ /*
++ * Note: the srpt_post_send() call below sends the task management
++ * response asynchronously. It is possible that the SCST core has
++ * already freed the struct scst_mgmt_cmd structure before the
++ * response is sent. This is fine however.
++ */
++ if (srpt_post_send(ch, ioctx, rsp_len)) {
++ PRINT_ERROR("%s", "Sending SRP_RSP response failed.");
++ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
++ srpt_put_send_ioctx(ioctx);
++ atomic_dec(&ch->req_lim);
++ }
++
++ scst_mgmt_cmd_set_tgt_priv(mcmnd, NULL);
++
++ kfree(mgmt_ioctx);
++}
++
++/**
++ * srpt_get_initiator_port_transport_id() - SCST TransportID callback function.
++ *
++ * See also SPC-3, section 7.5.4.5, TransportID for initiator ports using SRP.
++ */
++static int srpt_get_initiator_port_transport_id(struct scst_session *scst_sess,
++ uint8_t **transport_id)
++{
++ struct srpt_rdma_ch *ch;
++ struct spc_rdma_transport_id {
++ uint8_t protocol_identifier;
++ uint8_t reserved[7];
++ uint8_t i_port_id[16];
++ };
++ struct spc_rdma_transport_id *tr_id;
++ int res;
++
++ TRACE_ENTRY();
++
++ if (!scst_sess) {
++ res = SCSI_TRANSPORTID_PROTOCOLID_SRP;
++ goto out;
++ }
++
++ ch = scst_sess_get_tgt_priv(scst_sess);
++ BUG_ON(!ch);
++
++ BUILD_BUG_ON(sizeof(*tr_id) != 24);
++
++ tr_id = kzalloc(sizeof(struct spc_rdma_transport_id), GFP_KERNEL);
++ if (!tr_id) {
++ PRINT_ERROR("%s", "Allocation of TransportID failed");
++ res = -ENOMEM;
++ goto out;
++ }
++
++ res = 0;
++ tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
++ memcpy(tr_id->i_port_id, ch->i_port_id, sizeof(ch->i_port_id));
++
++ *transport_id = (uint8_t *)tr_id;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * srpt_on_free_cmd() - Free command-private data.
++ *
++ * Called by the SCST core. May be called in IRQ context.
++ */
++static void srpt_on_free_cmd(struct scst_cmd *scmnd)
++{
++}
++
++static void srpt_refresh_port_work(struct work_struct *work)
++{
++ struct srpt_port *sport = container_of(work, struct srpt_port, work);
++
++ srpt_refresh_port(sport);
++}
++
++/**
++ * srpt_detect() - Returns the number of target adapters.
++ *
++ * Callback function called by the SCST core.
++ */
++static int srpt_detect(struct scst_tgt_template *tp)
++{
++ int device_count;
++
++ TRACE_ENTRY();
++
++ device_count = atomic_read(&srpt_device_count);
++
++ TRACE_EXIT_RES(device_count);
++
++ return device_count;
++}
++
++/**
++ * srpt_release() - Free the resources associated with an SCST target.
++ *
++ * Callback function called by the SCST core from scst_unregister_target().
++ */
++static int srpt_release(struct scst_tgt *scst_tgt)
++{
++ struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
++ struct srpt_rdma_ch *ch;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
++
++ BUG_ON(!scst_tgt);
++ if (WARN_ON(!sdev))
++ return -ENODEV;
++
++ spin_lock_irq(&sdev->spinlock);
++ while (!list_empty(&sdev->rch_list)) {
++ ch = list_first_entry(&sdev->rch_list, typeof(*ch), list);
++ srpt_unregister_channel(ch);
++ }
++ spin_unlock_irq(&sdev->spinlock);
++
++ scst_tgt_set_tgt_priv(scst_tgt, NULL);
++
++ TRACE_EXIT();
++
++ return 0;
++}
++
++/**
++ * srpt_get_scsi_transport_version() - Returns the SCSI transport version.
++ * This function is called from scst_pres.c, the code that implements
++ * persistent reservation support.
++ */
++static uint16_t srpt_get_scsi_transport_version(struct scst_tgt *scst_tgt)
++{
++ return 0x0940; /* SRP */
++}
++
++static ssize_t show_req_lim(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_session *scst_sess;
++ struct srpt_rdma_ch *ch;
++
++ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
++ ch = scst_sess_get_tgt_priv(scst_sess);
++ if (!ch)
++ return -ENOENT;
++ return sprintf(buf, "%d\n", atomic_read(&ch->req_lim));
++}
++
++static ssize_t show_req_lim_delta(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_session *scst_sess;
++ struct srpt_rdma_ch *ch;
++
++ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
++ ch = scst_sess_get_tgt_priv(scst_sess);
++ if (!ch)
++ return -ENOENT;
++ return sprintf(buf, "%d\n", atomic_read(&ch->req_lim_delta));
++}
++
++static const struct kobj_attribute srpt_req_lim_attr =
++ __ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
++static const struct kobj_attribute srpt_req_lim_delta_attr =
++ __ATTR(req_lim_delta, S_IRUGO, show_req_lim_delta, NULL);
++
++static const struct attribute *srpt_sess_attrs[] = {
++ &srpt_req_lim_attr.attr,
++ &srpt_req_lim_delta_attr.attr,
++ NULL
++};
++
++/* SCST target template for the SRP target implementation. */
++static struct scst_tgt_template srpt_template = {
++ .name = DRV_NAME,
++ .sg_tablesize = SRPT_DEF_SG_TABLESIZE,
++ .max_hw_pending_time = 60/*seconds*/,
++ .enable_target = srpt_enable_target,
++ .is_target_enabled = srpt_is_target_enabled,
++ .sess_attrs = srpt_sess_attrs,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = DEFAULT_SRPT_TRACE_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++ .detect = srpt_detect,
++ .release = srpt_release,
++ .xmit_response = srpt_xmit_response,
++ .rdy_to_xfer = srpt_rdy_to_xfer,
++ .on_hw_pending_cmd_timeout = srpt_pending_cmd_timeout,
++ .on_free_cmd = srpt_on_free_cmd,
++ .task_mgmt_fn_done = srpt_tsk_mgmt_done,
++ .get_initiator_port_transport_id = srpt_get_initiator_port_transport_id,
++ .get_scsi_transport_version = srpt_get_scsi_transport_version,
++};
++
++/**
++ * srpt_dev_release() - Device release callback function.
++ *
++ * The callback function srpt_dev_release() is called whenever a
++ * device is removed from the /sys/class/infiniband_srpt device class.
++ * Although this function has been left empty, a release function has been
++ * defined such that upon module removal no complaint is logged about a
++ * missing release function.
++ */
++static void srpt_dev_release(struct device *dev)
++{
++}
++
++static ssize_t show_login_info(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct srpt_device *sdev;
++ struct srpt_port *sport;
++ int i;
++ int len;
++
++ sdev = container_of(dev, struct srpt_device, dev);
++ len = 0;
++ for (i = 0; i < sdev->device->phys_port_cnt; i++) {
++ sport = &sdev->port[i];
++
++ len += sprintf(buf + len,
++ "tid_ext=%016llx,ioc_guid=%016llx,pkey=ffff,"
++ "dgid=%04x%04x%04x%04x%04x%04x%04x%04x,"
++ "service_id=%016llx\n",
++ srpt_service_guid,
++ srpt_service_guid,
++ be16_to_cpu(((__be16 *) sport->gid.raw)[0]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[1]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[2]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[3]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[4]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[5]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[6]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[7]),
++ srpt_service_guid);
++ }
++
++ return len;
++}
++
++static struct class_attribute srpt_class_attrs[] = {
++ __ATTR_NULL,
++};
++
++static struct device_attribute srpt_dev_attrs[] = {
++ __ATTR(login_info, S_IRUGO, show_login_info, NULL),
++ __ATTR_NULL,
++};
++
++static struct class srpt_class = {
++ .name = "infiniband_srpt",
++ .dev_release = srpt_dev_release,
++ .class_attrs = srpt_class_attrs,
++ .dev_attrs = srpt_dev_attrs,
++};
++
++/**
++ * srpt_add_one() - Infiniband device addition callback function.
++ */
++static void srpt_add_one(struct ib_device *device)
++{
++ struct srpt_device *sdev;
++ struct srpt_port *sport;
++ struct ib_srq_init_attr srq_attr;
++ int i;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("device = %p, device->dma_ops = %p", device, device->dma_ops);
++
++ sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
++ if (!sdev)
++ goto err;
++
++ sdev->device = device;
++ INIT_LIST_HEAD(&sdev->rch_list);
++ spin_lock_init(&sdev->spinlock);
++
++ sdev->scst_tgt = scst_register_target(&srpt_template, NULL);
++ if (!sdev->scst_tgt) {
++ PRINT_ERROR("SCST registration failed for %s.",
++ sdev->device->name);
++ goto free_dev;
++ }
++
++ scst_tgt_set_tgt_priv(sdev->scst_tgt, sdev);
++
++ sdev->dev.class = &srpt_class;
++ sdev->dev.parent = device->dma_device;
++ dev_set_name(&sdev->dev, "srpt-%s", device->name);
++
++ if (device_register(&sdev->dev))
++ goto unregister_tgt;
++
++ if (ib_query_device(device, &sdev->dev_attr))
++ goto err_dev;
++
++ sdev->pd = ib_alloc_pd(device);
++ if (IS_ERR(sdev->pd))
++ goto err_dev;
++
++ sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
++ if (IS_ERR(sdev->mr))
++ goto err_pd;
++
++ sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
++
++ srq_attr.event_handler = srpt_srq_event;
++ srq_attr.srq_context = (void *)sdev;
++ srq_attr.attr.max_wr = sdev->srq_size;
++ srq_attr.attr.max_sge = 1;
++ srq_attr.attr.srq_limit = 0;
++
++ sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
++ if (IS_ERR(sdev->srq))
++ goto err_mr;
++
++ TRACE_DBG("%s: create SRQ #wr= %d max_allow=%d dev= %s", __func__,
++ sdev->srq_size, sdev->dev_attr.max_srq_wr, device->name);
++
++ if (!srpt_service_guid)
++ srpt_service_guid = be64_to_cpu(device->node_guid);
++
++ sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
++ if (IS_ERR(sdev->cm_id))
++ goto err_srq;
++
++ /* print out target login information */
++ TRACE_DBG("Target login info: id_ext=%016llx,"
++ "ioc_guid=%016llx,pkey=ffff,service_id=%016llx",
++ srpt_service_guid, srpt_service_guid, srpt_service_guid);
++
++ /*
++ * We do not have a consistent service_id (ie. also id_ext of target_id)
++ * to identify this target. We currently use the guid of the first HCA
++ * in the system as service_id; therefore, the target_id will change
++ * if this HCA is gone bad and replaced by different HCA
++ */
++ if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
++ goto err_cm;
++
++ INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
++ srpt_event_handler);
++ if (ib_register_event_handler(&sdev->event_handler))
++ goto err_cm;
++
++ sdev->ioctx_ring = (struct srpt_recv_ioctx **)
++ srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
++ sizeof(*sdev->ioctx_ring[0]),
++ srp_max_req_size, DMA_FROM_DEVICE);
++ if (!sdev->ioctx_ring)
++ goto err_event;
++
++ for (i = 0; i < sdev->srq_size; ++i)
++ srpt_post_recv(sdev, sdev->ioctx_ring[i]);
++
++ WARN_ON(sdev->device->phys_port_cnt
++ > sizeof(sdev->port)/sizeof(sdev->port[0]));
++
++ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
++ sport = &sdev->port[i - 1];
++ sport->sdev = sdev;
++ sport->port = i;
++ INIT_WORK(&sport->work, srpt_refresh_port_work);
++ if (srpt_refresh_port(sport)) {
++ PRINT_ERROR("MAD registration failed for %s-%d.",
++ sdev->device->name, i);
++ goto err_ring;
++ }
++ }
++
++ atomic_inc(&srpt_device_count);
++out:
++ ib_set_client_data(device, &srpt_client, sdev);
++
++ TRACE_EXIT();
++ return;
++
++err_ring:
++ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
++ sdev->srq_size, srp_max_req_size,
++ DMA_FROM_DEVICE);
++err_event:
++ ib_unregister_event_handler(&sdev->event_handler);
++err_cm:
++ ib_destroy_cm_id(sdev->cm_id);
++err_srq:
++ ib_destroy_srq(sdev->srq);
++err_mr:
++ ib_dereg_mr(sdev->mr);
++err_pd:
++ ib_dealloc_pd(sdev->pd);
++err_dev:
++ device_unregister(&sdev->dev);
++unregister_tgt:
++ scst_unregister_target(sdev->scst_tgt);
++free_dev:
++ kfree(sdev);
++err:
++ sdev = NULL;
++ PRINT_INFO("%s(%s) failed.", __func__, device->name);
++ goto out;
++}
++
++/**
++ * srpt_remove_one() - InfiniBand device removal callback function.
++ */
++static void srpt_remove_one(struct ib_device *device)
++{
++ int i;
++ struct srpt_device *sdev;
++
++ TRACE_ENTRY();
++
++ sdev = ib_get_client_data(device, &srpt_client);
++ if (!sdev) {
++ PRINT_INFO("%s(%s): nothing to do.", __func__, device->name);
++ return;
++ }
++
++ srpt_unregister_mad_agent(sdev);
++
++ ib_unregister_event_handler(&sdev->event_handler);
++
++ /* Cancel any work queued by the just unregistered IB event handler. */
++ for (i = 0; i < sdev->device->phys_port_cnt; i++)
++ cancel_work_sync(&sdev->port[i].work);
++
++ ib_destroy_cm_id(sdev->cm_id);
++ ib_destroy_srq(sdev->srq);
++ ib_dereg_mr(sdev->mr);
++ ib_dealloc_pd(sdev->pd);
++
++ device_unregister(&sdev->dev);
++
++ /*
++ * Unregistering an SCST target must happen after destroying sdev->cm_id
++ * such that no new SRP_LOGIN_REQ information units can arrive while
++ * destroying the SCST target.
++ */
++ scst_unregister_target(sdev->scst_tgt);
++ sdev->scst_tgt = NULL;
++
++ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
++ sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
++ sdev->ioctx_ring = NULL;
++ kfree(sdev);
++
++ TRACE_EXIT();
++}
++
++/**
++ * srpt_init_module() - Kernel module initialization.
++ *
++ * Note: Since ib_register_client() registers callback functions, and since at
++ * least one of these callback functions (srpt_add_one()) calls SCST functions,
++ * the SCST target template must be registered before ib_register_client() is
++ * called.
++ */
++static int __init srpt_init_module(void)
++{
++ int ret;
++
++ ret = -EINVAL;
++ if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
++ PRINT_ERROR("invalid value %d for kernel module parameter"
++ " srp_max_req_size -- must be at least %d.",
++ srp_max_req_size,
++ MIN_MAX_REQ_SIZE);
++ goto out;
++ }
++
++ if (srp_max_rsp_size < MIN_MAX_RSP_SIZE) {
++ PRINT_ERROR("invalid value %d for kernel module parameter"
++ " srp_max_rsp_size -- must be at least %d.",
++ srp_max_rsp_size,
++ MIN_MAX_RSP_SIZE);
++ goto out;
++ }
++
++ if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
++ || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
++ PRINT_ERROR("invalid value %d for kernel module parameter"
++ " srpt_srq_size -- must be in the range [%d..%d].",
++ srpt_srq_size, MIN_SRPT_SRQ_SIZE,
++ MAX_SRPT_SRQ_SIZE);
++ goto out;
++ }
++
++ if (srpt_sq_size < MIN_SRPT_SQ_SIZE) {
++ PRINT_ERROR("invalid value %d for kernel module parameter"
++ " srpt_sq_size -- must be at least %d.",
++ srpt_srq_size, MIN_SRPT_SQ_SIZE);
++ goto out;
++ }
++
++ ret = class_register(&srpt_class);
++ if (ret) {
++ PRINT_ERROR("%s", "couldn't register class ib_srpt");
++ goto out;
++ }
++
++ switch (thread) {
++ case MODE_ALL_IN_SIRQ:
++ /*
++ * Process both IB completions and SCST commands in SIRQ
++ * context. May lead to soft lockups and other scary behavior
++ * under sufficient load.
++ */
++ srpt_template.rdy_to_xfer_atomic = true;
++ break;
++ case MODE_IB_COMPLETION_IN_THREAD:
++ /*
++ * Process IB completions in the kernel thread associated with
++ * the RDMA channel, and process SCST commands in the kernel
++ * threads created by the SCST core.
++ */
++ srpt_template.rdy_to_xfer_atomic = false;
++ break;
++ case MODE_IB_COMPLETION_IN_SIRQ:
++ default:
++ /*
++ * Process IB completions in SIRQ context and SCST commands in
++ * the kernel threads created by the SCST core.
++ */
++ srpt_template.rdy_to_xfer_atomic = false;
++ break;
++ }
++
++ ret = scst_register_target_template(&srpt_template);
++ if (ret < 0) {
++ PRINT_ERROR("%s", "couldn't register with scst");
++ ret = -ENODEV;
++ goto out_unregister_class;
++ }
++
++ ret = ib_register_client(&srpt_client);
++ if (ret) {
++ PRINT_ERROR("%s", "couldn't register IB client");
++ goto out_unregister_procfs;
++ }
++
++ return 0;
++
++out_unregister_procfs:
++ scst_unregister_target_template(&srpt_template);
++out_unregister_class:
++ class_unregister(&srpt_class);
++out:
++ return ret;
++}
++
++static void __exit srpt_cleanup_module(void)
++{
++ TRACE_ENTRY();
++
++ ib_unregister_client(&srpt_client);
++ scst_unregister_target_template(&srpt_template);
++ class_unregister(&srpt_class);
++
++ TRACE_EXIT();
++}
++
++module_init(srpt_init_module);
++module_exit(srpt_cleanup_module);
++
++/*
++ * Local variables:
++ * c-basic-offset: 8
++ * indent-tabs-mode: t
++ * End:
++ */
+diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.h linux-2.6.36/drivers/scst/srpt/ib_srpt.h
+--- orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.h
++++ linux-2.6.36/drivers/scst/srpt/ib_srpt.h
+@@ -0,0 +1,353 @@
++/*
++ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
++ * Copyright (C) 2009 - 2010 Bart Van Assche <bart.vanassche@gmail.com>
++ *
++ * This software is available to you under a choice of one of two
++ * licenses. You may choose to be licensed under the terms of the GNU
++ * General Public License (GPL) Version 2, available from the file
++ * COPYING in the main directory of this source tree, or the
++ * OpenIB.org BSD license below:
++ *
++ * Redistribution and use in source and binary forms, with or
++ * without modification, are permitted provided that the following
++ * conditions are met:
++ *
++ * - Redistributions of source code must retain the above
++ * copyright notice, this list of conditions and the following
++ * disclaimer.
++ *
++ * - Redistributions in binary form must reproduce the above
++ * copyright notice, this list of conditions and the following
++ * disclaimer in the documentation and/or other materials
++ * provided with the distribution.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#ifndef IB_SRPT_H
++#define IB_SRPT_H
++
++#include <linux/version.h>
++#include <linux/types.h>
++#include <linux/list.h>
++
++#include <rdma/ib_verbs.h>
++#include <rdma/ib_sa.h>
++#include <rdma/ib_cm.h>
++
++#include <scsi/srp.h>
++
++#include <scst/scst.h>
++
++#include "ib_dm_mad.h"
++
++/*
++ * The prefix the ServiceName field must start with in the device management
++ * ServiceEntries attribute pair. See also the SRP r16a document.
++ */
++#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
++
++enum {
++ /*
++ * SRP IOControllerProfile attributes for SRP target ports that have
++ * not been defined in <scsi/srp.h>. Source: section B.7, table B.7
++ * in the SRP r16a document.
++ */
++ SRP_PROTOCOL = 0x0108,
++ SRP_PROTOCOL_VERSION = 0x0001,
++ SRP_IO_SUBCLASS = 0x609e,
++ SRP_SEND_TO_IOC = 0x01,
++ SRP_SEND_FROM_IOC = 0x02,
++ SRP_RDMA_READ_FROM_IOC = 0x08,
++ SRP_RDMA_WRITE_FROM_IOC = 0x20,
++
++ /*
++ * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP r16a
++ * document.
++ */
++ SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
++ SRP_LOSOLNT = 0x10, /* logout solicited notification */
++ SRP_CRSOLNT = 0x20, /* credit request solicited notification */
++ SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
++
++ /*
++ * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
++ * 18 and 20 in the T10 r16a document.
++ */
++ SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
++ SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
++
++ /*
++ * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
++ * 16 and 22 in the T10 r16a document.
++ */
++ SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
++
++ /* See also table 24 in the T10 r16a document. */
++ SRP_TSK_MGMT_SUCCESS = 0x00,
++ SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
++ SRP_TSK_MGMT_FAILED = 0x05,
++
++ /* See also table 21 in the T10 r16a document. */
++ SRP_CMD_SIMPLE_Q = 0x0,
++ SRP_CMD_HEAD_OF_Q = 0x1,
++ SRP_CMD_ORDERED_Q = 0x2,
++ SRP_CMD_ACA = 0x4,
++
++ SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
++ SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
++ SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
++
++ SRPT_DEF_SG_TABLESIZE = 128,
++ SRPT_DEF_SG_PER_WQE = 16,
++
++ MIN_SRPT_SQ_SIZE = 16,
++ DEF_SRPT_SQ_SIZE = 4096,
++ SRPT_RQ_SIZE = 128,
++ MIN_SRPT_SRQ_SIZE = 4,
++ DEFAULT_SRPT_SRQ_SIZE = 4095,
++ MAX_SRPT_SRQ_SIZE = 65535,
++
++ MIN_MAX_REQ_SIZE = 996,
++ DEFAULT_MAX_REQ_SIZE
++ = sizeof(struct srp_cmd)/*48*/
++ + sizeof(struct srp_indirect_buf)/*20*/
++ + 128 * sizeof(struct srp_direct_buf)/*16*/,
++
++ MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
++ DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
++
++ DEFAULT_MAX_RDMA_SIZE = 65536,
++};
++
++static inline u64 encode_wr_id(u8 opcode, u32 idx)
++{ return ((u64)opcode << 32) | idx; }
++static inline u8 opcode_from_wr_id(u64 wr_id)
++{ return wr_id >> 32; }
++static inline u32 idx_from_wr_id(u64 wr_id)
++{ return (u32)wr_id; }
++
++struct rdma_iu {
++ u64 raddr;
++ u32 rkey;
++ struct ib_sge *sge;
++ u32 sge_cnt;
++ int mem_id;
++};
++
++/**
++ * enum srpt_command_state - SCSI command state managed by SRPT.
++ * @SRPT_STATE_NEW: New command arrived and is being processed.
++ * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
++ * for data arrival.
++ * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
++ * being processed.
++ * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
++ * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
++ * @SRPT_STATE_DONE: Command processing finished successfully, command
++ * processing has been aborted or command processing
++ * failed.
++ */
++enum srpt_command_state {
++ SRPT_STATE_NEW = 0,
++ SRPT_STATE_NEED_DATA = 1,
++ SRPT_STATE_DATA_IN = 2,
++ SRPT_STATE_CMD_RSP_SENT = 3,
++ SRPT_STATE_MGMT_RSP_SENT = 4,
++ SRPT_STATE_DONE = 5,
++};
++
++/**
++ * struct srpt_ioctx - Shared SRPT I/O context information.
++ * @buf: Pointer to the buffer.
++ * @dma: DMA address of the buffer.
++ * @index: Index of the I/O context in its ioctx_ring array.
++ */
++struct srpt_ioctx {
++ void *buf;
++ dma_addr_t dma;
++ uint32_t index;
++};
++
++/**
++ * struct srpt_recv_ioctx - SRPT receive I/O context.
++ * @ioctx: See above.
++ * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
++ */
++struct srpt_recv_ioctx {
++ struct srpt_ioctx ioctx;
++ struct list_head wait_list;
++};
++
++/**
++ * struct srpt_send_ioctx - SRPT send I/O context.
++ * @ioctx: See above.
++ * @free_list: Allows to make this struct an entry in srpt_rdma_ch.free_list.
++ * @state: I/O context state. See also enum srpt_command_state.
++ */
++struct srpt_send_ioctx {
++ struct srpt_ioctx ioctx;
++ struct srpt_rdma_ch *ch;
++ struct rdma_iu *rdma_ius;
++ struct srp_direct_buf *rbufs;
++ struct srp_direct_buf single_rbuf;
++ struct scatterlist *sg;
++ struct list_head free_list;
++ int sg_cnt;
++ int mapped_sg_count;
++ u16 n_rdma_ius;
++ u8 n_rdma;
++ u8 n_rbuf;
++
++ struct scst_cmd *scmnd;
++ scst_data_direction dir;
++ atomic_t state;
++};
++
++/**
++ * struct srpt_mgmt_ioctx - SCST management command context information.
++ * @ioctx: SRPT I/O context associated with the management command.
++ * @tag: SCSI tag of the management command.
++ */
++struct srpt_mgmt_ioctx {
++ struct srpt_send_ioctx *ioctx;
++ u64 tag;
++};
++
++/**
++ * enum rdma_ch_state - SRP channel state.
++ */
++enum rdma_ch_state {
++ RDMA_CHANNEL_CONNECTING,
++ RDMA_CHANNEL_LIVE,
++ RDMA_CHANNEL_DISCONNECTING
++};
++
++/**
++ * struct srpt_rdma_ch - RDMA channel.
++ * @wait_queue: Allows the kernel thread to wait for more work.
++ * @thread: Kernel thread that processes the IB queues associated with
++ * the channel.
++ * @cm_id: IB CM ID associated with the channel.
++ * @rq_size: IB receive queue size.
++ * @processing_compl: whether or not an IB completion is being processed.
++ * @qp: IB queue pair used for communicating over this channel.
++ * @sq_wr_avail: number of work requests available in the send queue.
++ * @cq: IB completion queue for this channel.
++ * @sport: pointer to the information of the HCA port used by this
++ * channel.
++ * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
++ * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
++ * @max_ti_iu_len: maximum target-to-initiator information unit length.
++ * @supports_cred_req: whether or not the initiator supports SRP_CRED_REQ.
++ * @req_lim: request limit: maximum number of requests that may be sent
++ * by the initiator without having received a response.
++ * @state: channel state. See also enum rdma_ch_state.
++ * @list: node for insertion in the srpt_device.rch_list list.
++ * @cmd_wait_list: list of SCST commands that arrived before the RTU event. This
++ * list contains struct srpt_ioctx elements and is protected
++ * against concurrent modification by the cm_id spinlock.
++ * @spinlock: Protects free_list.
++ * @free_list: Head of list with free send I/O contexts.
++ * @scst_sess: SCST session information associated with this SRP channel.
++ * @sess_name: SCST session name.
++ */
++struct srpt_rdma_ch {
++ wait_queue_head_t wait_queue;
++ struct task_struct *thread;
++ struct ib_cm_id *cm_id;
++ struct ib_qp *qp;
++ int rq_size;
++ atomic_t processing_compl;
++ struct ib_cq *cq;
++ atomic_t sq_wr_avail;
++ struct srpt_port *sport;
++ u8 i_port_id[16];
++ u8 t_port_id[16];
++ int max_ti_iu_len;
++ atomic_t req_lim;
++ atomic_t req_lim_delta;
++ spinlock_t spinlock;
++ struct list_head free_list;
++ struct srpt_send_ioctx **ioctx_ring;
++ struct ib_wc wc[16];
++ atomic_t state;
++ struct list_head list;
++ struct list_head cmd_wait_list;
++
++ struct scst_session *scst_sess;
++ u8 sess_name[36];
++};
++
++/**
++ * struct srpt_port - Information associated by SRPT with a single IB port.
++ * @sdev: backpointer to the HCA information.
++ * @mad_agent: per-port management datagram processing information.
++ * @port: one-based port number.
++ * @sm_lid: cached value of the port's sm_lid.
++ * @lid: cached value of the port's lid.
++ * @gid: cached value of the port's gid.
++ * @work: work structure for refreshing the aforementioned cached values.
++ */
++struct srpt_port {
++ struct srpt_device *sdev;
++ struct ib_mad_agent *mad_agent;
++ u8 port;
++ u16 sm_lid;
++ u16 lid;
++ union ib_gid gid;
++ struct work_struct work;
++};
++
++/**
++ * struct srpt_device - Information associated by SRPT with a single HCA.
++ * @device: backpointer to the struct ib_device managed by the IB core.
++ * @pd: IB protection domain.
++ * @mr: L_Key (local key) with write access to all local memory.
++ * @srq: Per-HCA SRQ (shared receive queue).
++ * @cm_id: connection identifier.
++ * @dev_attr: attributes of the InfiniBand device as obtained during the
++ * ib_client.add() callback.
++ * @ioctx_ring: Per-HCA I/O context ring.
++ * @rch_list: per-device channel list -- see also srpt_rdma_ch.list.
++ * @spinlock: protects rch_list.
++ * @srpt_port: information about the ports owned by this HCA.
++ * @event_handler: per-HCA asynchronous IB event handler.
++ * @dev: per-port srpt-<portname> device instance.
++ * @scst_tgt: SCST target information associated with this HCA.
++ * @enabled: Whether or not this SCST target is enabled.
++ */
++struct srpt_device {
++ struct ib_device *device;
++ struct ib_pd *pd;
++ struct ib_mr *mr;
++ struct ib_srq *srq;
++ struct ib_cm_id *cm_id;
++ struct ib_device_attr dev_attr;
++ int srq_size;
++ struct srpt_recv_ioctx **ioctx_ring;
++ struct list_head rch_list;
++ spinlock_t spinlock;
++ struct srpt_port port[2];
++ struct ib_event_handler event_handler;
++ struct device dev;
++ struct scst_tgt *scst_tgt;
++ bool enabled;
++};
++
++#endif /* IB_SRPT_H */
++
++/*
++ * Local variables:
++ * c-basic-offset: 8
++ * indent-tabs-mode: t
++ * End:
++ */
+diff -uprN orig/linux-2.6.36/Documentation/scst/README.srpt linux-2.6.36/Documentation/scst/README.srpt
+--- orig/linux-2.6.36/Documentation/scst/README.srpt
++++ linux-2.6.36/Documentation/scst/README.srpt
+@@ -0,0 +1,109 @@
++SCSI RDMA Protocol (SRP) Target driver for Linux
++=================================================
++
++The SRP Target driver is designed to work directly on top of the
++OpenFabrics OFED-1.x software stack (http://www.openfabrics.org) or
++the Infiniband drivers in the Linux kernel tree
++(http://www.kernel.org). The SRP target driver also interfaces with
++the generic SCSI target mid-level driver called SCST
++(http://scst.sourceforge.net).
++
++How-to run
++-----------
++
++A. On srp target machine
++1. Please refer to SCST's README for loading scst driver and its
++dev_handlers drivers (scst_disk, scst_vdisk block or file IO mode, nullio, ...)
++
++Example 1: working with real back-end scsi disks
++a. modprobe scst
++b. modprobe scst_disk
++c. cat /proc/scsi_tgt/scsi_tgt
++
++ibstor00:~ # cat /proc/scsi_tgt/scsi_tgt
++Device (host:ch:id:lun or name) Device handler
++0:0:0:0 dev_disk
++4:0:0:0 dev_disk
++5:0:0:0 dev_disk
++6:0:0:0 dev_disk
++7:0:0:0 dev_disk
++
++Now you want to exclude the first scsi disk and expose the last 4 scsi disks as
++IB/SRP luns for I/O
++echo "add 4:0:0:0 0" >/proc/scsi_tgt/groups/Default/devices
++echo "add 5:0:0:0 1" >/proc/scsi_tgt/groups/Default/devices
++echo "add 6:0:0:0 2" >/proc/scsi_tgt/groups/Default/devices
++echo "add 7:0:0:0 3" >/proc/scsi_tgt/groups/Default/devices
++
++Example 2: working with VDISK FILEIO mode (using md0 device and file 10G-file)
++a. modprobe scst
++b. modprobe scst_vdisk
++c. echo "open vdisk0 /dev/md0" > /proc/scsi_tgt/vdisk/vdisk
++d. echo "open vdisk1 /10G-file" > /proc/scsi_tgt/vdisk/vdisk
++e. echo "add vdisk0 0" >/proc/scsi_tgt/groups/Default/devices
++f. echo "add vdisk1 1" >/proc/scsi_tgt/groups/Default/devices
++
++Example 3: working with VDISK BLOCKIO mode (using md0 device, sda, and cciss/c1d0)
++a. modprobe scst
++b. modprobe scst_vdisk
++c. echo "open vdisk0 /dev/md0 BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
++d. echo "open vdisk1 /dev/sda BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
++e. echo "open vdisk2 /dev/cciss/c1d0 BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
++f. echo "add vdisk0 0" >/proc/scsi_tgt/groups/Default/devices
++g. echo "add vdisk1 1" >/proc/scsi_tgt/groups/Default/devices
++h. echo "add vdisk2 2" >/proc/scsi_tgt/groups/Default/devices
++
++2. modprobe ib_srpt
++
++B. On initiator machines you can manualy do the following steps:
++1. modprobe ib_srp
++2. ibsrpdm -c (to discover new SRP target)
++3. echo <new target info> > /sys/class/infiniband_srp/srp-mthca0-1/add_target
++4. fdisk -l (will show new discovered scsi disks)
++
++Example:
++Assume that you use port 1 of first HCA in the system ie. mthca0
++
++[root@lab104 ~]# ibsrpdm -c -d /dev/infiniband/umad0
++id_ext=0002c90200226cf4,ioc_guid=0002c90200226cf4,
++dgid=fe800000000000000002c90200226cf5,pkey=ffff,service_id=0002c90200226cf4
++[root@lab104 ~]# echo id_ext=0002c90200226cf4,ioc_guid=0002c90200226cf4,
++dgid=fe800000000000000002c90200226cf5,pkey=ffff,service_id=0002c90200226cf4 >
++/sys/class/infiniband_srp/srp-mthca0-1/add_target
++
++OR
++
+++ You can edit /etc/infiniband/openib.conf to load srp driver and srp HA daemon
++automatically ie. set SRP_LOAD=yes, and SRPHA_ENABLE=yes
+++ To set up and use high availability feature you need dm-multipath driver
++and multipath tool
+++ Please refer to OFED-1.x SRP's user manual for more in-details instructions
++on how-to enable/use HA feature
++
++To minimize QUEUE_FULL conditions, you can apply scst_increase_max_tgt_cmds
++patch from SRPT package from http://sourceforge.net/project/showfiles.php?group_id=110471
++
++Performance notes
++-----------------
++
++In some cases, for instance working with SSD devices, which consume 100%
++of a single CPU load for data transfers in their internal threads, to
++maximize IOPS it can be needed to assign for those threads dedicated
++CPUs using Linux CPU affinity facilities. No IRQ processing should be
++done on those CPUs. Check that using /proc/interrupts. See taskset
++command and Documentation/IRQ-affinity.txt in your kernel's source tree
++for how to assign CPU affinity to tasks and IRQs.
++
++The reason for that is that processing of coming commands in SIRQ context
++can be done on the same CPUs as SSD devices' threads doing data
++transfers. As the result, those threads won't receive all the CPU power
++and perform worse.
++
++Alternatively to CPU affinity assignment, you can try to enable SRP
++target's internal thread. It will allows Linux CPU scheduler to better
++distribute load among available CPUs. To enable SRP target driver's
++internal thread you should load ib_srpt module with parameter
++"thread=1".
++
++Send questions about this driver to scst-devel@lists.sourceforge.net, CC:
++Vu Pham <vuhuong@mellanox.com> and Bart Van Assche <bart.vanassche@gmail.com>.
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/Kconfig linux-2.6.36/drivers/scst/scst_local/Kconfig
+--- orig/linux-2.6.36/drivers/scst/scst_local/Kconfig
++++ linux-2.6.36/drivers/scst/scst_local/Kconfig
+@@ -0,0 +1,22 @@
++config SCST_LOCAL
++ tristate "SCST Local driver"
++ depends on SCST && !HIGHMEM4G && !HIGHMEM64G
++ ---help---
++ This module provides a LLD SCSI driver that connects to
++ the SCST target mode subsystem in a loop-back manner.
++ It allows you to test target-mode device-handlers locally.
++ You will need the SCST subsystem as well.
++
++ If unsure whether you really want or need this, say N.
++
++config SCST_LOCAL_FORCE_DIRECT_PROCESSING
++ bool "Force local processing"
++ depends on SCST_LOCAL
++ help
++ This experimental option forces scst_local to make SCST process
++ SCSI commands in the same context, in which they was submitted.
++ Otherwise, they will be processed in SCST threads. Setting this
++ option to "Y" will give some performance increase, but might be
++ unsafe.
++
++ If unsure, say "N".
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/Makefile linux-2.6.36/drivers/scst/scst_local/Makefile
+--- orig/linux-2.6.36/drivers/scst/scst_local/Makefile
++++ linux-2.6.36/drivers/scst/scst_local/Makefile
+@@ -0,0 +1,2 @@
++obj-$(CONFIG_SCST_LOCAL) += scst_local.o
++
+diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/drivers/scst/scst_local/scst_local.c
+--- orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c
++++ linux-2.6.36/drivers/scst/scst_local/scst_local.c
+@@ -0,0 +1,1563 @@
++/*
++ * Copyright (C) 2008 - 2010 Richard Sharpe
++ * Copyright (C) 1992 Eric Youngdale
++ * Copyright (C) 2008 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
++ *
++ * Simulate a host adapter and an SCST target adapter back to back
++ *
++ * Based on the scsi_debug.c driver originally by Eric Youngdale and
++ * others, including D Gilbert et al
++ *
++ */
++
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/moduleparam.h>
++#include <linux/scatterlist.h>
++#include <linux/slab.h>
++#include <linux/completion.h>
++#include <linux/spinlock.h>
++
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_tcq.h>
++
++#define LOG_PREFIX "scst_local"
++
++/* SCST includes ... */
++#include <scst/scst_const.h>
++#include <scst/scst.h>
++#include <scst/scst_debug.h>
++
++#ifdef CONFIG_SCST_DEBUG
++#define SCST_LOCAL_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_PID | \
++ TRACE_LINE | TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_DEBUG | \
++ TRACE_MINOR | TRACE_SPECIAL)
++#else
++# ifdef CONFIG_SCST_TRACING
++#define SCST_LOCAL_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
++ TRACE_SPECIAL)
++# endif
++#endif
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++#define trace_flag scst_local_trace_flag
++static unsigned long scst_local_trace_flag = SCST_LOCAL_DEFAULT_LOG_FLAGS;
++#endif
++
++#define TRUE 1
++#define FALSE 0
++
++#define SCST_LOCAL_VERSION "1.0.0"
++static const char *scst_local_version_date = "20100910";
++
++/* Some statistics */
++static atomic_t num_aborts = ATOMIC_INIT(0);
++static atomic_t num_dev_resets = ATOMIC_INIT(0);
++static atomic_t num_target_resets = ATOMIC_INIT(0);
++
++static bool scst_local_add_default_tgt = true;
++module_param_named(add_default_tgt, scst_local_add_default_tgt, bool, S_IRUGO);
++MODULE_PARM_DESC(add_default_tgt, "add (default) or not on start default "
++ "target scst_local_tgt with default session scst_local_host");
++
++struct scst_aen_work_item {
++ struct list_head work_list_entry;
++ struct scst_aen *aen;
++};
++
++struct scst_local_tgt {
++ struct scst_tgt *scst_tgt;
++ struct list_head sessions_list; /* protected by scst_local_mutex */
++ struct list_head tgts_list_entry;
++
++ /* SCSI version descriptors */
++ uint16_t scsi_transport_version;
++ uint16_t phys_transport_version;
++};
++
++struct scst_local_sess {
++ struct scst_session *scst_sess;
++
++ unsigned int unregistering:1;
++
++ struct device dev;
++ struct Scsi_Host *shost;
++ struct scst_local_tgt *tgt;
++
++ int number;
++
++ struct mutex tr_id_mutex;
++ uint8_t *transport_id;
++ int transport_id_len;
++
++ struct work_struct aen_work;
++ spinlock_t aen_lock;
++ struct list_head aen_work_list; /* protected by aen_lock */
++
++ struct list_head sessions_list_entry;
++};
++
++#define to_scst_lcl_sess(d) \
++ container_of(d, struct scst_local_sess, dev)
++
++static int __scst_local_add_adapter(struct scst_local_tgt *tgt,
++ const char *initiator_name, struct scst_local_sess **out_sess,
++ bool locked);
++static int scst_local_add_adapter(struct scst_local_tgt *tgt,
++ const char *initiator_name, struct scst_local_sess **out_sess);
++static void scst_local_remove_adapter(struct scst_local_sess *sess);
++static int scst_local_add_target(const char *target_name,
++ struct scst_local_tgt **out_tgt);
++static void __scst_local_remove_target(struct scst_local_tgt *tgt);
++static void scst_local_remove_target(struct scst_local_tgt *tgt);
++
++static atomic_t scst_local_sess_num = ATOMIC_INIT(0);
++
++static LIST_HEAD(scst_local_tgts_list);
++static DEFINE_MUTEX(scst_local_mutex);
++
++static DECLARE_RWSEM(scst_local_exit_rwsem);
++
++MODULE_AUTHOR("Richard Sharpe, Vladislav Bolkhovitin + ideas from SCSI_DEBUG");
++MODULE_DESCRIPTION("SCSI+SCST local adapter driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION(SCST_LOCAL_VERSION);
++
++static int scst_local_get_sas_transport_id(struct scst_local_sess *sess,
++ uint8_t **transport_id, int *len)
++{
++ int res = 0;
++ int tr_id_size = 0;
++ uint8_t *tr_id = NULL;
++
++ TRACE_ENTRY();
++
++ tr_id_size = 24; /* A SAS TransportID */
++
++ tr_id = kzalloc(tr_id_size, GFP_KERNEL);
++ if (tr_id == NULL) {
++ PRINT_ERROR("Allocation of TransportID (size %d) failed",
++ tr_id_size);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ tr_id[0] = 0x00 | SCSI_TRANSPORTID_PROTOCOLID_SAS;
++
++ /*
++ * Assemble a valid SAS address = 0x5OOUUIIR12345678 ... Does SCST
++ * have one?
++ */
++
++ tr_id[4] = 0x5F;
++ tr_id[5] = 0xEE;
++ tr_id[6] = 0xDE;
++ tr_id[7] = 0x40 | ((sess->number >> 4) & 0x0F);
++ tr_id[8] = 0x0F | (sess->number & 0xF0);
++ tr_id[9] = 0xAD;
++ tr_id[10] = 0xE0;
++ tr_id[11] = 0x50;
++
++ *transport_id = tr_id;
++ *len = tr_id_size;
++
++ TRACE_DBG("Created tid '%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X'",
++ tr_id[4], tr_id[5], tr_id[6], tr_id[7],
++ tr_id[8], tr_id[9], tr_id[10], tr_id[11]);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_local_get_initiator_port_transport_id(
++ struct scst_session *scst_sess, uint8_t **transport_id)
++{
++ int res = 0;
++ int tr_id_size = 0;
++ uint8_t *tr_id = NULL;
++ struct scst_local_sess *sess;
++
++ TRACE_ENTRY();
++
++ if (scst_sess == NULL) {
++ res = SCSI_TRANSPORTID_PROTOCOLID_SAS;
++ goto out;
++ }
++
++ sess = (struct scst_local_sess *)scst_sess_get_tgt_priv(scst_sess);
++
++ mutex_lock(&sess->tr_id_mutex);
++
++ if (sess->transport_id == NULL) {
++ res = scst_local_get_sas_transport_id(sess,
++ transport_id, &tr_id_size);
++ goto out_unlock;
++ }
++
++ tr_id_size = sess->transport_id_len;
++ BUG_ON(tr_id_size == 0);
++
++ tr_id = kzalloc(tr_id_size, GFP_KERNEL);
++ if (tr_id == NULL) {
++ PRINT_ERROR("Allocation of TransportID (size %d) failed",
++ tr_id_size);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ memcpy(tr_id, sess->transport_id, sess->transport_id_len);
++
++out_unlock:
++ mutex_unlock(&sess->tr_id_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ ** Tgtt attributes
++ **/
++
++static ssize_t scst_local_version_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ sprintf(buf, "%s/%s\n", SCST_LOCAL_VERSION, scst_local_version_date);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ strcat(buf, "EXTRACHECKS\n");
++#endif
++
++#ifdef CONFIG_SCST_TRACING
++ strcat(buf, "TRACING\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++ strcat(buf, "DEBUG\n");
++#endif
++
++ TRACE_EXIT();
++ return strlen(buf);
++}
++
++static struct kobj_attribute scst_local_version_attr =
++ __ATTR(version, S_IRUGO, scst_local_version_show, NULL);
++
++static ssize_t scst_local_stats_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++
++{
++ return sprintf(buf, "Aborts: %d, Device Resets: %d, Target Resets: %d",
++ atomic_read(&num_aborts), atomic_read(&num_dev_resets),
++ atomic_read(&num_target_resets));
++}
++
++static struct kobj_attribute scst_local_stats_attr =
++ __ATTR(stats, S_IRUGO, scst_local_stats_show, NULL);
++
++static const struct attribute *scst_local_tgtt_attrs[] = {
++ &scst_local_version_attr.attr,
++ &scst_local_stats_attr.attr,
++ NULL,
++};
++
++/**
++ ** Tgt attributes
++ **/
++
++static ssize_t scst_local_scsi_transport_version_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_tgt *scst_tgt;
++ struct scst_local_tgt *tgt;
++ ssize_t res;
++
++ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
++ return -ENOENT;
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++
++ if (tgt->scsi_transport_version != 0)
++ res = sprintf(buf, "0x%x\n%s", tgt->scsi_transport_version,
++ SCST_SYSFS_KEY_MARK "\n");
++ else
++ res = sprintf(buf, "0x%x\n", 0x0BE0); /* SAS */
++
++ up_read(&scst_local_exit_rwsem);
++ return res;
++}
++
++static ssize_t scst_local_scsi_transport_version_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buffer, size_t size)
++{
++ ssize_t res;
++ struct scst_tgt *scst_tgt;
++ struct scst_local_tgt *tgt;
++ unsigned long val;
++
++ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
++ return -ENOENT;
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++
++ res = strict_strtoul(buffer, 0, &val);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtoul() for %s failed: %zd", buffer, res);
++ goto out_up;
++ }
++
++ tgt->scsi_transport_version = val;
++
++ res = size;
++
++out_up:
++ up_read(&scst_local_exit_rwsem);
++ return res;
++}
++
++static struct kobj_attribute scst_local_scsi_transport_version_attr =
++ __ATTR(scsi_transport_version, S_IRUGO | S_IWUSR,
++ scst_local_scsi_transport_version_show,
++ scst_local_scsi_transport_version_store);
++
++static ssize_t scst_local_phys_transport_version_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_tgt *scst_tgt;
++ struct scst_local_tgt *tgt;
++ ssize_t res;
++
++ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
++ return -ENOENT;
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++
++ res = sprintf(buf, "0x%x\n%s", tgt->phys_transport_version,
++ (tgt->phys_transport_version != 0) ?
++ SCST_SYSFS_KEY_MARK "\n" : "");
++
++ up_read(&scst_local_exit_rwsem);
++ return res;
++}
++
++static ssize_t scst_local_phys_transport_version_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buffer, size_t size)
++{
++ ssize_t res;
++ struct scst_tgt *scst_tgt;
++ struct scst_local_tgt *tgt;
++ unsigned long val;
++
++ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
++ return -ENOENT;
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++
++ res = strict_strtoul(buffer, 0, &val);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtoul() for %s failed: %zd", buffer, res);
++ goto out_up;
++ }
++
++ tgt->phys_transport_version = val;
++
++ res = size;
++
++out_up:
++ up_read(&scst_local_exit_rwsem);
++ return res;
++}
++
++static struct kobj_attribute scst_local_phys_transport_version_attr =
++ __ATTR(phys_transport_version, S_IRUGO | S_IWUSR,
++ scst_local_phys_transport_version_show,
++ scst_local_phys_transport_version_store);
++
++static const struct attribute *scst_local_tgt_attrs[] = {
++ &scst_local_scsi_transport_version_attr.attr,
++ &scst_local_phys_transport_version_attr.attr,
++ NULL,
++};
++
++/**
++ ** Session attributes
++ **/
++
++static ssize_t scst_local_transport_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ ssize_t res;
++ struct scst_session *scst_sess;
++ struct scst_local_sess *sess;
++ uint8_t *tr_id;
++ int tr_id_len, i;
++
++ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
++ return -ENOENT;
++
++ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
++ sess = (struct scst_local_sess *)scst_sess_get_tgt_priv(scst_sess);
++
++ mutex_lock(&sess->tr_id_mutex);
++
++ if (sess->transport_id != NULL) {
++ tr_id = sess->transport_id;
++ tr_id_len = sess->transport_id_len;
++ } else {
++ res = scst_local_get_sas_transport_id(sess, &tr_id, &tr_id_len);
++ if (res != 0)
++ goto out_unlock;
++ }
++
++ res = 0;
++ for (i = 0; i < tr_id_len; i++)
++ res += sprintf(&buf[res], "%c", tr_id[i]);
++
++ if (sess->transport_id == NULL)
++ kfree(tr_id);
++
++out_unlock:
++ mutex_unlock(&sess->tr_id_mutex);
++ up_read(&scst_local_exit_rwsem);
++ return res;
++}
++
++static ssize_t scst_local_transport_id_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buffer, size_t size)
++{
++ ssize_t res;
++ struct scst_session *scst_sess;
++ struct scst_local_sess *sess;
++
++ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
++ return -ENOENT;
++
++ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
++ sess = (struct scst_local_sess *)scst_sess_get_tgt_priv(scst_sess);
++
++ mutex_lock(&sess->tr_id_mutex);
++
++ if (sess->transport_id != NULL) {
++ kfree(sess->transport_id);
++ sess->transport_id = NULL;
++ sess->transport_id_len = 0;
++ }
++
++ if (size == 0)
++ goto out_res;
++
++ sess->transport_id = kzalloc(size, GFP_KERNEL);
++ if (sess->transport_id == NULL) {
++ PRINT_ERROR("Allocation of transport_id (size %zd) failed",
++ size);
++ res = -ENOMEM;
++ goto out_unlock;
++ }
++
++ sess->transport_id_len = size;
++
++ memcpy(sess->transport_id, buffer, sess->transport_id_len);
++
++out_res:
++ res = size;
++
++out_unlock:
++ mutex_unlock(&sess->tr_id_mutex);
++ up_read(&scst_local_exit_rwsem);
++ return res;
++}
++
++static struct kobj_attribute scst_local_transport_id_attr =
++ __ATTR(transport_id, S_IRUGO | S_IWUSR,
++ scst_local_transport_id_show,
++ scst_local_transport_id_store);
++
++static const struct attribute *scst_local_sess_attrs[] = {
++ &scst_local_transport_id_attr.attr,
++ NULL,
++};
++
++static ssize_t scst_local_sysfs_add_target(const char *target_name, char *params)
++{
++ int res;
++ struct scst_local_tgt *tgt;
++ char *param, *p;
++
++ TRACE_ENTRY();
++
++ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
++ return -ENOENT;
++
++ res = scst_local_add_target(target_name, &tgt);
++ if (res != 0)
++ goto out_up;
++
++ while (1) {
++ param = scst_get_next_token_str(&params);
++ if (param == NULL)
++ break;
++
++ p = scst_get_next_lexem(&param);
++ if (*p == '\0')
++ break;
++
++ if (strcasecmp("session_name", p) != 0) {
++ PRINT_ERROR("Unknown parameter %s", p);
++ res = -EINVAL;
++ goto out_remove;
++ }
++
++ p = scst_get_next_lexem(&param);
++ if (*p == '\0') {
++ PRINT_ERROR("Wrong session name %s", p);
++ res = -EINVAL;
++ goto out_remove;
++ }
++
++ res = scst_local_add_adapter(tgt, p, NULL);
++ if (res != 0)
++ goto out_remove;
++ }
++
++out_up:
++ up_read(&scst_local_exit_rwsem);
++
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_remove:
++ scst_local_remove_target(tgt);
++ goto out_up;
++}
++
++static ssize_t scst_local_sysfs_del_target(const char *target_name)
++{
++ int res;
++ struct scst_local_tgt *tgt;
++ bool deleted = false;
++
++ TRACE_ENTRY();
++
++ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
++ return -ENOENT;
++
++ mutex_lock(&scst_local_mutex);
++ list_for_each_entry(tgt, &scst_local_tgts_list, tgts_list_entry) {
++ if (strcmp(target_name, tgt->scst_tgt->tgt_name) == 0) {
++ __scst_local_remove_target(tgt);
++ deleted = true;
++ break;
++ }
++ }
++ mutex_unlock(&scst_local_mutex);
++
++ if (!deleted) {
++ PRINT_ERROR("Target %s not found", target_name);
++ res = -ENOENT;
++ goto out_up;
++ }
++
++ res = 0;
++
++out_up:
++ up_read(&scst_local_exit_rwsem);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_local_sysfs_mgmt_cmd(char *buf)
++{
++ ssize_t res;
++ char *command, *target_name, *session_name;
++ struct scst_local_tgt *t, *tgt;
++
++ TRACE_ENTRY();
++
++ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
++ return -ENOENT;
++
++ command = scst_get_next_lexem(&buf);
++
++ target_name = scst_get_next_lexem(&buf);
++ if (*target_name == '\0') {
++ PRINT_ERROR("%s", "Target name required");
++ res = -EINVAL;
++ goto out_up;
++ }
++
++ mutex_lock(&scst_local_mutex);
++
++ tgt = NULL;
++ list_for_each_entry(t, &scst_local_tgts_list, tgts_list_entry) {
++ if (strcmp(t->scst_tgt->tgt_name, target_name) == 0) {
++ tgt = t;
++ break;
++ }
++ }
++ if (tgt == NULL) {
++ PRINT_ERROR("Target %s not found", target_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ session_name = scst_get_next_lexem(&buf);
++ if (*session_name == '\0') {
++ PRINT_ERROR("%s", "Session name required");
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ if (strcasecmp("add_session", command) == 0) {
++ res = __scst_local_add_adapter(tgt, session_name, NULL, true);
++ } else if (strcasecmp("del_session", command) == 0) {
++ struct scst_local_sess *s, *sess = NULL;
++ list_for_each_entry(s, &tgt->sessions_list,
++ sessions_list_entry) {
++ if (strcmp(s->scst_sess->initiator_name, session_name) == 0) {
++ sess = s;
++ break;
++ }
++ }
++ if (sess == NULL) {
++ PRINT_ERROR("Session %s not found (target %s)",
++ session_name, target_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ scst_local_remove_adapter(sess);
++ }
++
++ res = 0;
++
++out_unlock:
++ mutex_unlock(&scst_local_mutex);
++
++out_up:
++ up_read(&scst_local_exit_rwsem);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_local_abort(struct scsi_cmnd *SCpnt)
++{
++ struct scst_local_sess *sess;
++ int ret;
++ DECLARE_COMPLETION_ONSTACK(dev_reset_completion);
++
++ TRACE_ENTRY();
++
++ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
++
++ ret = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, SCpnt->tag,
++ FALSE, &dev_reset_completion);
++
++ /* Now wait for the completion ... */
++ wait_for_completion_interruptible(&dev_reset_completion);
++
++ atomic_inc(&num_aborts);
++
++ if (ret == 0)
++ ret = SUCCESS;
++
++ TRACE_EXIT_RES(ret);
++ return ret;
++}
++
++static int scst_local_device_reset(struct scsi_cmnd *SCpnt)
++{
++ struct scst_local_sess *sess;
++ uint16_t lun;
++ int ret;
++ DECLARE_COMPLETION_ONSTACK(dev_reset_completion);
++
++ TRACE_ENTRY();
++
++ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
++
++ lun = SCpnt->device->lun;
++ lun = cpu_to_be16(lun);
++
++ ret = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_LUN_RESET,
++ (const uint8_t *)&lun, sizeof(lun), FALSE,
++ &dev_reset_completion);
++
++ /* Now wait for the completion ... */
++ wait_for_completion_interruptible(&dev_reset_completion);
++
++ atomic_inc(&num_dev_resets);
++
++ if (ret == 0)
++ ret = SUCCESS;
++
++ TRACE_EXIT_RES(ret);
++ return ret;
++}
++
++static int scst_local_target_reset(struct scsi_cmnd *SCpnt)
++{
++ struct scst_local_sess *sess;
++ uint16_t lun;
++ int ret;
++ DECLARE_COMPLETION_ONSTACK(dev_reset_completion);
++
++ TRACE_ENTRY();
++
++ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
++
++ lun = SCpnt->device->lun;
++ lun = cpu_to_be16(lun);
++
++ ret = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_TARGET_RESET,
++ (const uint8_t *)&lun, sizeof(lun), FALSE,
++ &dev_reset_completion);
++
++ /* Now wait for the completion ... */
++ wait_for_completion_interruptible(&dev_reset_completion);
++
++ atomic_inc(&num_target_resets);
++
++ if (ret == 0)
++ ret = SUCCESS;
++
++ TRACE_EXIT_RES(ret);
++ return ret;
++}
++
++static void copy_sense(struct scsi_cmnd *cmnd, struct scst_cmd *scst_cmnd)
++{
++ int scst_cmnd_sense_len = scst_cmd_get_sense_buffer_len(scst_cmnd);
++
++ TRACE_ENTRY();
++
++ scst_cmnd_sense_len = (SCSI_SENSE_BUFFERSIZE < scst_cmnd_sense_len ?
++ SCSI_SENSE_BUFFERSIZE : scst_cmnd_sense_len);
++ memcpy(cmnd->sense_buffer, scst_cmd_get_sense_buffer(scst_cmnd),
++ scst_cmnd_sense_len);
++
++ TRACE_BUFFER("Sense set", cmnd->sense_buffer, scst_cmnd_sense_len);
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * Utility function to handle processing of done and allow
++ * easy insertion of error injection if desired
++ */
++static int scst_local_send_resp(struct scsi_cmnd *cmnd,
++ struct scst_cmd *scst_cmnd,
++ void (*done)(struct scsi_cmnd *),
++ int scsi_result)
++{
++ int ret = 0;
++
++ TRACE_ENTRY();
++
++ if (scst_cmnd) {
++ /* The buffer isn't ours, so let's be safe and restore it */
++ scst_check_restore_sg_buff(scst_cmnd);
++
++ /* Simulate autosense by this driver */
++ if (unlikely(SCST_SENSE_VALID(scst_cmnd->sense)))
++ copy_sense(cmnd, scst_cmnd);
++ }
++
++ cmnd->result = scsi_result;
++
++ done(cmnd);
++
++ TRACE_EXIT_RES(ret);
++ return ret;
++}
++
++/*
++ * This does the heavy lifting ... we pass all the commands on to the
++ * target driver and have it do its magic ...
++ */
++static int scst_local_queuecommand(struct scsi_cmnd *SCpnt,
++ void (*done)(struct scsi_cmnd *))
++ __acquires(&h->host_lock)
++ __releases(&h->host_lock)
++{
++ struct scst_local_sess *sess;
++ struct scatterlist *sgl = NULL;
++ int sgl_count = 0;
++ uint16_t lun;
++ struct scst_cmd *scst_cmd = NULL;
++ scst_data_direction dir;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("lun %d, cmd: 0x%02X", SCpnt->device->lun, SCpnt->cmnd[0]);
++
++ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
++
++ scsi_set_resid(SCpnt, 0);
++
++ /*
++ * We save a pointer to the done routine in SCpnt->scsi_done and
++ * we save that as tgt specific stuff below.
++ */
++ SCpnt->scsi_done = done;
++
++ /*
++ * Tell the target that we have a command ... but first we need
++ * to get the LUN into a format that SCST understand
++ */
++ lun = SCpnt->device->lun;
++ lun = cpu_to_be16(lun);
++ scst_cmd = scst_rx_cmd(sess->scst_sess, (const uint8_t *)&lun,
++ sizeof(lun), SCpnt->cmnd, SCpnt->cmd_len, TRUE);
++ if (!scst_cmd) {
++ PRINT_ERROR("%s", "scst_rx_cmd() failed");
++ return -ENOMEM;
++ }
++
++ scst_cmd_set_tag(scst_cmd, SCpnt->tag);
++ switch (scsi_get_tag_type(SCpnt->device)) {
++ case MSG_SIMPLE_TAG:
++ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_SIMPLE);
++ break;
++ case MSG_HEAD_TAG:
++ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
++ break;
++ case MSG_ORDERED_TAG:
++ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_ORDERED);
++ break;
++ case SCSI_NO_TAG:
++ default:
++ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_UNTAGGED);
++ break;
++ }
++
++ sgl = scsi_sglist(SCpnt);
++ sgl_count = scsi_sg_count(SCpnt);
++
++ dir = SCST_DATA_NONE;
++ switch (SCpnt->sc_data_direction) {
++ case DMA_TO_DEVICE:
++ dir = SCST_DATA_WRITE;
++ scst_cmd_set_expected(scst_cmd, dir, scsi_bufflen(SCpnt));
++ scst_cmd_set_tgt_sg(scst_cmd, sgl, sgl_count);
++ break;
++ case DMA_FROM_DEVICE:
++ dir = SCST_DATA_READ;
++ scst_cmd_set_expected(scst_cmd, dir, scsi_bufflen(SCpnt));
++ scst_cmd_set_tgt_sg(scst_cmd, sgl, sgl_count);
++ break;
++ case DMA_BIDIRECTIONAL:
++ /* Some of these symbols are only defined after 2.6.24 */
++ dir = SCST_DATA_BIDI;
++ scst_cmd_set_expected(scst_cmd, dir, scsi_bufflen(SCpnt));
++ scst_cmd_set_expected_out_transfer_len(scst_cmd,
++ scsi_in(SCpnt)->length);
++ scst_cmd_set_tgt_sg(scst_cmd, scsi_in(SCpnt)->table.sgl,
++ scsi_in(SCpnt)->table.nents);
++ scst_cmd_set_tgt_out_sg(scst_cmd, sgl, sgl_count);
++ break;
++ case DMA_NONE:
++ default:
++ dir = SCST_DATA_NONE;
++ scst_cmd_set_expected(scst_cmd, dir, 0);
++ break;
++ }
++
++ /* Save the correct thing below depending on version */
++ scst_cmd_set_tgt_priv(scst_cmd, SCpnt);
++
++#ifdef CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING
++ {
++ struct Scsi_Host *h = SCpnt->device->host;
++ spin_unlock_irq(h->host_lock);
++ scst_cmd_init_done(scst_cmd, scst_estimate_context_direct());
++ spin_lock_irq(h->host_lock);
++ }
++#else
++ /*
++ * Unfortunately, we called with IRQs disabled, so have no choice,
++ * except to pass to the thread context.
++ */
++ scst_cmd_init_done(scst_cmd, SCST_CONTEXT_THREAD);
++#endif
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static int scst_local_targ_pre_exec(struct scst_cmd *scst_cmd)
++{
++ int res = SCST_PREPROCESS_STATUS_SUCCESS;
++
++ TRACE_ENTRY();
++
++ if (scst_cmd_get_dh_data_buff_alloced(scst_cmd) &&
++ (scst_cmd_get_data_direction(scst_cmd) & SCST_DATA_WRITE))
++ scst_copy_sg(scst_cmd, SCST_SG_COPY_FROM_TARGET);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Must be called under sess->aen_lock. Drops then reacquires it inside. */
++static void scst_process_aens(struct scst_local_sess *sess,
++ bool cleanup_only)
++{
++ struct scst_aen_work_item *work_item = NULL;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Target work sess %p", sess);
++
++ while (!list_empty(&sess->aen_work_list)) {
++ work_item = list_entry(sess->aen_work_list.next,
++ struct scst_aen_work_item, work_list_entry);
++ list_del(&work_item->work_list_entry);
++
++ spin_unlock(&sess->aen_lock);
++
++ if (cleanup_only)
++ goto done;
++
++ BUG_ON(work_item->aen->event_fn != SCST_AEN_SCSI);
++
++ /* Let's always rescan */
++ scsi_scan_target(&sess->shost->shost_gendev, 0, 0,
++ SCAN_WILD_CARD, 1);
++
++done:
++ scst_aen_done(work_item->aen);
++ kfree(work_item);
++
++ spin_lock(&sess->aen_lock);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static void scst_aen_work_fn(struct work_struct *work)
++{
++ struct scst_local_sess *sess =
++ container_of(work, struct scst_local_sess, aen_work);
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Target work %p)", sess);
++
++ spin_lock(&sess->aen_lock);
++ scst_process_aens(sess, false);
++ spin_unlock(&sess->aen_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_local_report_aen(struct scst_aen *aen)
++{
++ int res = 0;
++ int event_fn = scst_aen_get_event_fn(aen);
++ struct scst_local_sess *sess;
++ struct scst_aen_work_item *work_item = NULL;
++
++ TRACE_ENTRY();
++
++ sess = (struct scst_local_sess *)scst_sess_get_tgt_priv(
++ scst_aen_get_sess(aen));
++ switch (event_fn) {
++ case SCST_AEN_SCSI:
++ /*
++ * Allocate a work item and place it on the queue
++ */
++ work_item = kzalloc(sizeof(*work_item), GFP_KERNEL);
++ if (!work_item) {
++ PRINT_ERROR("%s", "Unable to allocate work item "
++ "to handle AEN!");
++ return -ENOMEM;
++ }
++
++ spin_lock(&sess->aen_lock);
++
++ if (unlikely(sess->unregistering)) {
++ spin_unlock(&sess->aen_lock);
++ kfree(work_item);
++ res = SCST_AEN_RES_NOT_SUPPORTED;
++ goto out;
++ }
++
++ list_add_tail(&work_item->work_list_entry, &sess->aen_work_list);
++ work_item->aen = aen;
++
++ spin_unlock(&sess->aen_lock);
++
++ schedule_work(&sess->aen_work);
++ break;
++
++ default:
++ TRACE_MGMT_DBG("Unsupported AEN %d", event_fn);
++ res = SCST_AEN_RES_NOT_SUPPORTED;
++ break;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_local_targ_detect(struct scst_tgt_template *tgt_template)
++{
++ TRACE_ENTRY();
++
++ TRACE_EXIT();
++ return 0;
++};
++
++static int scst_local_targ_release(struct scst_tgt *tgt)
++{
++ TRACE_ENTRY();
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static int scst_local_targ_xmit_response(struct scst_cmd *scst_cmd)
++{
++ struct scsi_cmnd *SCpnt = NULL;
++ void (*done)(struct scsi_cmnd *);
++
++ TRACE_ENTRY();
++
++ if (unlikely(scst_cmd_aborted(scst_cmd))) {
++ scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_ABORTED);
++ scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_SAME);
++ return SCST_TGT_RES_SUCCESS;
++ }
++
++ if (scst_cmd_get_dh_data_buff_alloced(scst_cmd) &&
++ (scst_cmd_get_data_direction(scst_cmd) & SCST_DATA_READ))
++ scst_copy_sg(scst_cmd, SCST_SG_COPY_TO_TARGET);
++
++ SCpnt = scst_cmd_get_tgt_priv(scst_cmd);
++ done = SCpnt->scsi_done;
++
++ /*
++ * This might have to change to use the two status flags
++ */
++ if (scst_cmd_get_is_send_status(scst_cmd)) {
++ int resid = 0, out_resid = 0;
++
++ /* Calculate the residual ... */
++ if (likely(!scst_get_resid(scst_cmd, &resid, &out_resid))) {
++ TRACE_DBG("No residuals for request %p", SCpnt);
++ } else {
++ if (out_resid != 0)
++ PRINT_ERROR("Unable to return OUT residual %d "
++ "(op %02x)", out_resid, SCpnt->cmnd[0]);
++ }
++
++ scsi_set_resid(SCpnt, resid);
++
++ /*
++ * It seems like there is no way to set out_resid ...
++ */
++
++ (void)scst_local_send_resp(SCpnt, scst_cmd, done,
++ scst_cmd_get_status(scst_cmd));
++ }
++
++ /* Now tell SCST that the command is done ... */
++ scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_SAME);
++
++ TRACE_EXIT();
++ return SCST_TGT_RES_SUCCESS;
++}
++
++static void scst_local_targ_task_mgmt_done(struct scst_mgmt_cmd *mgmt_cmd)
++{
++ struct completion *compl;
++
++ TRACE_ENTRY();
++
++ compl = (struct completion *)scst_mgmt_cmd_get_tgt_priv(mgmt_cmd);
++ if (compl)
++ complete(compl);
++
++ TRACE_EXIT();
++ return;
++}
++
++static uint16_t scst_local_get_scsi_transport_version(struct scst_tgt *scst_tgt)
++{
++ struct scst_local_tgt *tgt;
++
++ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++
++ if (tgt->scsi_transport_version == 0)
++ return 0x0BE0; /* SAS */
++ else
++ return tgt->scsi_transport_version;
++}
++
++static uint16_t scst_local_get_phys_transport_version(struct scst_tgt *scst_tgt)
++{
++ struct scst_local_tgt *tgt;
++
++ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++
++ return tgt->phys_transport_version;
++}
++
++static struct scst_tgt_template scst_local_targ_tmpl = {
++ .name = "scst_local",
++ .sg_tablesize = 0xffff,
++ .xmit_response_atomic = 1,
++ .enabled_attr_not_needed = 1,
++ .tgtt_attrs = scst_local_tgtt_attrs,
++ .tgt_attrs = scst_local_tgt_attrs,
++ .sess_attrs = scst_local_sess_attrs,
++ .add_target = scst_local_sysfs_add_target,
++ .del_target = scst_local_sysfs_del_target,
++ .mgmt_cmd = scst_local_sysfs_mgmt_cmd,
++ .add_target_parameters = "session_name",
++ .mgmt_cmd_help = " echo \"add_session target_name session_name\" >mgmt\n"
++ " echo \"del_session target_name session_name\" >mgmt\n",
++ .detect = scst_local_targ_detect,
++ .release = scst_local_targ_release,
++ .pre_exec = scst_local_targ_pre_exec,
++ .xmit_response = scst_local_targ_xmit_response,
++ .task_mgmt_fn_done = scst_local_targ_task_mgmt_done,
++ .report_aen = scst_local_report_aen,
++ .get_initiator_port_transport_id = scst_local_get_initiator_port_transport_id,
++ .get_scsi_transport_version = scst_local_get_scsi_transport_version,
++ .get_phys_transport_version = scst_local_get_phys_transport_version,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ .default_trace_flags = SCST_LOCAL_DEFAULT_LOG_FLAGS,
++ .trace_flags = &trace_flag,
++#endif
++};
++
++static struct scsi_host_template scst_lcl_ini_driver_template = {
++ .name = SCST_LOCAL_NAME,
++ .queuecommand = scst_local_queuecommand,
++ .eh_abort_handler = scst_local_abort,
++ .eh_device_reset_handler = scst_local_device_reset,
++ .eh_target_reset_handler = scst_local_target_reset,
++ .can_queue = 256,
++ .this_id = -1,
++ /* SCST doesn't support sg chaining */
++ .sg_tablesize = SG_MAX_SINGLE_ALLOC,
++ .cmd_per_lun = 32,
++ .max_sectors = 0xffff,
++ /* SCST doesn't support sg chaining */
++ .use_clustering = ENABLE_CLUSTERING,
++ .skip_settle_delay = 1,
++ .module = THIS_MODULE,
++};
++
++/*
++ * LLD Bus and functions
++ */
++
++static int scst_local_driver_probe(struct device *dev)
++{
++ int ret;
++ struct scst_local_sess *sess;
++ struct Scsi_Host *hpnt;
++
++ TRACE_ENTRY();
++
++ sess = to_scst_lcl_sess(dev);
++
++ TRACE_DBG("sess %p", sess);
++
++ hpnt = scsi_host_alloc(&scst_lcl_ini_driver_template, sizeof(*sess));
++ if (NULL == hpnt) {
++ PRINT_ERROR("%s", "scsi_register() failed");
++ ret = -ENODEV;
++ goto out;
++ }
++
++ sess->shost = hpnt;
++
++ hpnt->max_id = 0; /* Don't want more than one id */
++ hpnt->max_lun = 0xFFFF;
++
++ /*
++ * Because of a change in the size of this field at 2.6.26
++ * we use this check ... it allows us to work on earlier
++ * kernels. If we don't, max_cmd_size gets set to 4 (and we get
++ * a compiler warning) so a scan never occurs.
++ */
++ hpnt->max_cmd_len = 260;
++
++ ret = scsi_add_host(hpnt, &sess->dev);
++ if (ret) {
++ PRINT_ERROR("%s", "scsi_add_host() failed");
++ ret = -ENODEV;
++ scsi_host_put(hpnt);
++ goto out;
++ }
++
++out:
++ TRACE_EXIT_RES(ret);
++ return ret;
++}
++
++static int scst_local_driver_remove(struct device *dev)
++{
++ struct scst_local_sess *sess;
++
++ TRACE_ENTRY();
++
++ sess = to_scst_lcl_sess(dev);
++ if (!sess) {
++ PRINT_ERROR("%s", "Unable to locate sess info");
++ return -ENODEV;
++ }
++
++ scsi_remove_host(sess->shost);
++ scsi_host_put(sess->shost);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static int scst_local_bus_match(struct device *dev,
++ struct device_driver *dev_driver)
++{
++ TRACE_ENTRY();
++
++ TRACE_EXIT();
++ return 1;
++}
++
++static struct bus_type scst_local_lld_bus = {
++ .name = "scst_local_bus",
++ .match = scst_local_bus_match,
++ .probe = scst_local_driver_probe,
++ .remove = scst_local_driver_remove,
++};
++
++static struct device_driver scst_local_driver = {
++ .name = SCST_LOCAL_NAME,
++ .bus = &scst_local_lld_bus,
++};
++
++static struct device *scst_local_root;
++
++static void scst_local_release_adapter(struct device *dev)
++{
++ struct scst_local_sess *sess;
++
++ TRACE_ENTRY();
++
++ sess = to_scst_lcl_sess(dev);
++ if (sess == NULL)
++ goto out;
++
++ spin_lock(&sess->aen_lock);
++ sess->unregistering = 1;
++ scst_process_aens(sess, true);
++ spin_unlock(&sess->aen_lock);
++
++ cancel_work_sync(&sess->aen_work);
++
++ scst_unregister_session(sess->scst_sess, TRUE, NULL);
++
++ kfree(sess);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static int __scst_local_add_adapter(struct scst_local_tgt *tgt,
++ const char *initiator_name, struct scst_local_sess **out_sess,
++ bool locked)
++{
++ int res;
++ struct scst_local_sess *sess;
++
++ TRACE_ENTRY();
++
++ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
++ if (NULL == sess) {
++ PRINT_ERROR("Unable to alloc scst_lcl_host (size %zu)",
++ sizeof(*sess));
++ res = -ENOMEM;
++ goto out;
++ }
++
++ sess->tgt = tgt;
++ sess->number = atomic_inc_return(&scst_local_sess_num);
++ mutex_init(&sess->tr_id_mutex);
++
++ /*
++ * Init this stuff we need for scheduling AEN work
++ */
++ INIT_WORK(&sess->aen_work, scst_aen_work_fn);
++ spin_lock_init(&sess->aen_lock);
++ INIT_LIST_HEAD(&sess->aen_work_list);
++
++ sess->scst_sess = scst_register_session(tgt->scst_tgt, 0,
++ initiator_name, (void *)sess, NULL, NULL);
++ if (sess->scst_sess == NULL) {
++ PRINT_ERROR("%s", "scst_register_session failed");
++ kfree(sess);
++ res = -EFAULT;
++ goto out_free;
++ }
++
++ sess->dev.bus = &scst_local_lld_bus;
++ sess->dev.parent = scst_local_root;
++ sess->dev.release = &scst_local_release_adapter;
++ sess->dev.init_name = kobject_name(&sess->scst_sess->sess_kobj);
++
++ res = device_register(&sess->dev);
++ if (res != 0)
++ goto unregister_session;
++
++ res = sysfs_create_link(scst_sysfs_get_sess_kobj(sess->scst_sess),
++ &sess->shost->shost_dev.kobj, "host");
++ if (res != 0) {
++ PRINT_ERROR("Unable to create \"host\" link for target "
++ "%s", scst_get_tgt_name(tgt->scst_tgt));
++ goto unregister_dev;
++ }
++
++ if (!locked)
++ mutex_lock(&scst_local_mutex);
++ list_add_tail(&sess->sessions_list_entry, &tgt->sessions_list);
++ if (!locked)
++ mutex_unlock(&scst_local_mutex);
++
++ if (scst_initiator_has_luns(tgt->scst_tgt, initiator_name))
++ scsi_scan_target(&sess->shost->shost_gendev, 0, 0,
++ SCAN_WILD_CARD, 1);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++unregister_dev:
++ device_unregister(&sess->dev);
++
++unregister_session:
++ scst_unregister_session(sess->scst_sess, TRUE, NULL);
++
++out_free:
++ kfree(sess);
++ goto out;
++}
++
++static int scst_local_add_adapter(struct scst_local_tgt *tgt,
++ const char *initiator_name, struct scst_local_sess **out_sess)
++{
++ return __scst_local_add_adapter(tgt, initiator_name, out_sess, false);
++}
++
++/* Must be called under scst_local_mutex */
++static void scst_local_remove_adapter(struct scst_local_sess *sess)
++{
++ TRACE_ENTRY();
++
++ list_del(&sess->sessions_list_entry);
++
++ device_unregister(&sess->dev);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_local_add_target(const char *target_name,
++ struct scst_local_tgt **out_tgt)
++{
++ int res;
++ struct scst_local_tgt *tgt;
++
++ TRACE_ENTRY();
++
++ tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
++ if (NULL == tgt) {
++ PRINT_ERROR("Unable to alloc tgt (size %zu)", sizeof(*tgt));
++ res = -ENOMEM;
++ goto out;
++ }
++
++ INIT_LIST_HEAD(&tgt->sessions_list);
++
++ tgt->scst_tgt = scst_register_target(&scst_local_targ_tmpl, target_name);
++ if (tgt->scst_tgt == NULL) {
++ PRINT_ERROR("%s", "scst_register_target() failed:");
++ res = -EFAULT;
++ goto out_free;
++ }
++
++ scst_tgt_set_tgt_priv(tgt->scst_tgt, tgt);
++
++ mutex_lock(&scst_local_mutex);
++ list_add_tail(&tgt->tgts_list_entry, &scst_local_tgts_list);
++ mutex_unlock(&scst_local_mutex);
++
++ if (out_tgt != NULL)
++ *out_tgt = tgt;
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(tgt);
++ goto out;
++}
++
++/* Must be called under scst_local_mutex */
++static void __scst_local_remove_target(struct scst_local_tgt *tgt)
++{
++ struct scst_local_sess *sess, *ts;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry_safe(sess, ts, &tgt->sessions_list,
++ sessions_list_entry) {
++ scst_local_remove_adapter(sess);
++ }
++
++ list_del(&tgt->tgts_list_entry);
++
++ scst_unregister_target(tgt->scst_tgt);
++
++ kfree(tgt);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void scst_local_remove_target(struct scst_local_tgt *tgt)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_local_mutex);
++ __scst_local_remove_target(tgt);
++ mutex_unlock(&scst_local_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int __init scst_local_init(void)
++{
++ int ret;
++ struct scst_local_tgt *tgt;
++
++ TRACE_ENTRY();
++
++ scst_local_root = root_device_register(SCST_LOCAL_NAME);
++ if (IS_ERR(scst_local_root)) {
++ ret = PTR_ERR(scst_local_root);
++ goto out;
++ }
++
++ ret = bus_register(&scst_local_lld_bus);
++ if (ret < 0) {
++ PRINT_ERROR("bus_register() error: %d", ret);
++ goto dev_unreg;
++ }
++
++ ret = driver_register(&scst_local_driver);
++ if (ret < 0) {
++ PRINT_ERROR("driver_register() error: %d", ret);
++ goto bus_unreg;
++ }
++
++ ret = scst_register_target_template(&scst_local_targ_tmpl);
++ if (ret != 0) {
++ PRINT_ERROR("Unable to register target template: %d", ret);
++ goto driver_unreg;
++ }
++
++ /*
++ * If we are using sysfs, then don't add a default target unless
++ * we are told to do so. When using procfs, we always add a default
++ * target because that was what the earliest versions did. Just
++ * remove the preprocessor directives when no longer needed.
++ */
++ if (!scst_local_add_default_tgt)
++ goto out;
++
++ ret = scst_local_add_target("scst_local_tgt", &tgt);
++ if (ret != 0)
++ goto tgt_templ_unreg;
++
++ ret = scst_local_add_adapter(tgt, "scst_local_host", NULL);
++ if (ret != 0)
++ goto tgt_unreg;
++
++out:
++ TRACE_EXIT_RES(ret);
++ return ret;
++
++tgt_unreg:
++ scst_local_remove_target(tgt);
++
++tgt_templ_unreg:
++ scst_unregister_target_template(&scst_local_targ_tmpl);
++
++driver_unreg:
++ driver_unregister(&scst_local_driver);
++
++bus_unreg:
++ bus_unregister(&scst_local_lld_bus);
++
++dev_unreg:
++ root_device_unregister(scst_local_root);
++
++ goto out;
++}
++
++static void __exit scst_local_exit(void)
++{
++ struct scst_local_tgt *tgt, *tt;
++
++ TRACE_ENTRY();
++
++ down_write(&scst_local_exit_rwsem);
++
++ mutex_lock(&scst_local_mutex);
++ list_for_each_entry_safe(tgt, tt, &scst_local_tgts_list,
++ tgts_list_entry) {
++ __scst_local_remove_target(tgt);
++ }
++ mutex_unlock(&scst_local_mutex);
++
++ driver_unregister(&scst_local_driver);
++ bus_unregister(&scst_local_lld_bus);
++ root_device_unregister(scst_local_root);
++
++ /* Now unregister the target template */
++ scst_unregister_target_template(&scst_local_targ_tmpl);
++
++ /* To make lockdep happy */
++ up_write(&scst_local_exit_rwsem);
++
++ TRACE_EXIT();
++ return;
++}
++
++device_initcall(scst_local_init);
++module_exit(scst_local_exit);
++
+diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst_local linux-2.6.36/Documentation/scst/README.scst_local
+--- orig/linux-2.6.36/Documentation/scst/README.scst_local
++++ linux-2.6.36/Documentation/scst/README.scst_local
+@@ -0,0 +1,259 @@
++SCST Local ...
++Richard Sharpe, 30-Nov-2008
++
++This is the SCST Local driver. Its function is to allow you to access devices
++that are exported via SCST directly on the same Linux system that they are
++exported from.
++
++No assumptions are made in the code about the device types on the target, so
++any device handlers that you load in SCST should be visible, including tapes
++and so forth.
++
++You can freely use any sg, sd, st, etc. devices imported from target,
++except the following: you can't mount file systems or put swap on them.
++This is a limitation of Linux memory/cache manager. See SCST README file
++for details.
++
++To build, simply issue 'make' in the scst_local directory.
++
++Try 'modinfo scst_local' for a listing of module parameters so far.
++
++Here is how I have used it so far:
++
++1. Load up scst:
++
++ modprobe scst
++ modprobe scst_vdisk
++
++2. Create a virtual disk (or your own device handler):
++
++ dd if=/dev/zero of=/some/path/vdisk1.img bs=16384 count=1000000
++ echo "add_device vm_disk1 filename=/some/path/vdisk1.img" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
++
++3. Load the scst_local driver:
++
++ insmod scst_local
++ echo "add vm_disk1 0" >/sys/kernel/scst_tgt/targets/scst_local/scst_local_tgt/luns/mgmt
++
++4. Check what you have
++
++ cat /proc/scsi/scsi
++ Attached devices:
++ Host: scsi0 Channel: 00 Id: 00 Lun: 00
++ Vendor: ATA Model: ST9320320AS Rev: 0303
++ Type: Direct-Access ANSI SCSI revision: 05
++ Host: scsi4 Channel: 00 Id: 00 Lun: 00
++ Vendor: TSSTcorp Model: CD/DVDW TS-L632D Rev: TO04
++ Type: CD-ROM ANSI SCSI revision: 05
++ Host: scsi7 Channel: 00 Id: 00 Lun: 00
++ Vendor: SCST_FIO Model: vm_disk1 Rev: 200
++ Type: Direct-Access ANSI SCSI revision: 04
++
++Or instead of manually "add_device" in (2) and step (3) write a
++scstadmin config:
++
++HANDLER vdisk_fileio {
++ DEVICE vm_disk1 {
++ filename /some/path/vdisk1.img
++ }
++}
++
++TARGET_DRIVER scst_local {
++ TARGET scst_local_tgt {
++ LUN 0 vm_disk1
++ }
++}
++
++then:
++
++ insmod scst_local
++ scstadmin -config conf_file.cfg
++
++More advanced examples:
++
++For (3) you can:
++
++ insmod scst_local add_default_tgt=0
++ echo "add_target scst_local_tgt session_name=scst_local_host" >/sys/kernel/scst_tgt/targets/scst_local//mgmt
++ echo "add vm_disk1 0" >/sys/kernel/scst_tgt/targets/scst_local/scst_local_tgt/luns/mgmt
++
++Scst_local module's parameter add_default_tgt disables creation of
++default target "scst_local_tgt" and session "scst_local_host", so you
++needed to create it manually.
++
++There can be any number of targets and sessions created. Each SCST
++session corresponds to SCSI host. You can change which LUNs assigned to
++each session by using SCST access control. This mode is intended for
++user space target drivers (see below).
++
++Alternatively, you can write an scstadmin's config file conf_file.cfg:
++
++HANDLER vdisk_fileio {
++ DEVICE vm_disk1 {
++ filename /some/path/vdisk1.img
++ }
++}
++
++TARGET_DRIVER scst_local {
++ TARGET scst_local_tgt {
++ session_name scst_local_host
++
++ LUN 0 vm_disk1
++ }
++}
++
++then:
++
++ insmod scst_local add_default_tgt=0
++ scstadmin -config conf_file.cfg
++
++NOTE! Although scstadmin allows to create scst_local's sessions using
++"session_name" expression, it doesn't save existing sessions during
++writing config file by "write_config" command. If you need this
++functionality, feel free to send a request for it in SCST development
++mailing list.
++
++5. Have fun.
++
++Some of this was coded while in Santa Clara, some in Bangalore, and some in
++Hyderabad. Noe doubt some will be coded on the way back to Santa Clara.
++
++The code still has bugs, so if you encounter any, email me the fixes at:
++
++ realrichardsharpe@gmail.com
++
++I am thinking of renaming this to something more interesting.
++
++Sysfs interface
++===============
++
++See SCST's README for a common SCST sysfs description.
++
++Root of this driver is /sys/kernel/scst_tgt/targets/scst_local. It has
++the following additional entry:
++
++ - stats - read-only attribute with some statistical information.
++
++Each target subdirectory contains the following additional entries:
++
++ - phys_transport_version - contains and allows to change physical
++ transport version descriptor. It determines by which phisical
++ interface this target will look like. See SPC for more details. By
++ default, it is not defined (0).
++
++ - scsi_transport_version - contains and allows to change SCSI
++ transport version descriptor. It determines by which SCSI
++ transport this target will look like. See SPC for more details. By
++ default, it is SAS.
++
++Each session subdirectory contains the following additional entries:
++
++ - transport_id - contains this host's TransportID. This TransportID
++ used to identify initiator in Persisten Reservation commands. If you
++ change scsi_transport_version for a target, make sure you set for all
++ its sessions correct TransportID. See SPC for more details.
++
++ - host - links to the corresponding SCSI host. Using it you can find
++ local sg/bsg/sd/etc. devices of this session. For instance, this
++ links points out to host12, so you can find your sg devices by:
++
++$ lsscsi -g|grep "\[12:"
++[12:0:0:0] disk SCST_FIO rd1 200 /dev/sdc /dev/sg2
++[12:0:0:1] disk SCST_FIO nullio 200 /dev/sdd /dev/sg3
++
++They are /dev/sg2 and /dev/sg3.
++
++The following management commands available via /sys/kernel/scst_tgt/targets/scst_local/mgmt:
++
++ - add_target target_name [session_name=sess_name; [session_name=sess_name1;] [...]] -
++ creates a target with optionally one or more sessions.
++
++ - del_target target_name - deletes a target.
++
++ - add_session target_name session_name - adds to target target_name
++ session (SCSI host) with name session_name.
++
++ - del_session target_name session_name - deletes session session_name
++ from target target_name.
++
++Note on performance
++===================
++
++Although this driver implemented in the most performance effective way,
++including zero-copy passing data between SCSI/block subsystems and SCST,
++in many cases it is NOT suited to measure performance as a NULL link.
++For example, it is not suited for max IOPS measurements. This is because
++for such cases not performance of the link between the target and
++initiator is the bottleneck, but CPU or memory speed on the target or
++initiator. For scst_local you have both initiator and target on the same
++system, which means each your initiator and target are much less
++CPU/memory powerful.
++
++User space target drivers
++=========================
++
++Scst_local can be used to write full featured SCST target drivers in
++user space:
++
++1. For each SCSI target a user space target driver should create an
++ scst_local's target using "add_target" command.
++
++2. Then the user space target driver should, if needed, set its SCSI and
++ physical transport version descriptors using attributes
++ scsi_transport_version and phys_transport_version correspondingly in
++ /sys/kernel/scst_tgt/targets/scst_local/target_name directory.
++
++3. For incoming session (I_T nexus) from an initiator the user space
++ target driver should create scst_local's session using "add_session"
++ command.
++
++4. Then, if needed, the user space target driver should set TransportID
++ for this session (I_T nexus) using attribute
++ /sys/kernel/scst_tgt/targets/scst_local/target_name/sessions/session_name/transport_id
++
++5. Then the user space target driver should find out sg/bsg devices for
++ the LUNs the created session has using link
++ /sys/kernel/scst_tgt/targets/scst_local/target_name/sessions/session_name/host
++ as described above.
++
++6. Then the user space target driver can start serving the initiator using
++ found sg/bsg devices.
++
++For other connected initiators steps 3-6 should be repeated.
++
++Change log
++==========
++
++V0.1 24-Sep-2008 (Hyderabad) Initial coding, pretty chatty and messy,
++ but worked.
++
++V0.2 25-Sep-2008 (Hong Kong) Cleaned up the code a lot, reduced the log
++ chatter, fixed a bug where multiple LUNs did not
++ work. Also, added logging control. Tested with
++ five virtual disks. They all came up as /dev/sdb
++ through /dev/sdf and I could dd to them. Also
++ fixed a bug preventing multiple adapters.
++
++V0.3 26-Sep-2008 (Santa Clara) Added back a copyright plus cleaned up some
++ unused functions and structures.
++
++V0.4 5-Oct-2008 (Santa Clara) Changed name to scst_local as suggested, cleaned
++ up some unused variables (made them used) and
++ change allocation to a kmem_cache pool.
++
++V0.5 5-Oct-2008 (Santa Clara) Added mgmt commands to handle dev reset and
++ aborts. Not sure if aborts works. Also corrected
++ the version info and renamed readme to README.
++
++V0.6 7-Oct-2008 (Santa Clara) Removed some redundant code and made some
++ changes suggested by Vladislav.
++
++V0.7 11-Oct-2008 (Santa Clara) Moved into the scst tree. Cleaned up some
++ unused functions, used TRACE macros etc.
++
++V0.9 30-Nov-2008 (Mtn View) Cleaned up an additional problem with symbols not
++ being defined in older version of the kernel. Also
++ fixed some English and cleaned up this doc.
++
++V1.0 10-Sep-2010 (Moscow) Sysfs management added. Reviewed and cleaned up.
++
diff --git a/main/linux-scst/setlocalversion.patch b/main/linux-scst/setlocalversion.patch
new file mode 100644
index 000000000..d82eb170a
--- /dev/null
+++ b/main/linux-scst/setlocalversion.patch
@@ -0,0 +1,11 @@
+--- ./scripts/setlocalversion.orig
++++ ./scripts/setlocalversion
+@@ -43,7 +43,7 @@
+ fi
+
+ # Check for git and a git repo.
+- if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
++ if [ -d "$srctree"/.git ] && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
+
+ # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
+ # it, because this version is defined in the top level Makefile.
diff --git a/main/linux-scst/unionfs-2.5.7_for_2.6.36.diff b/main/linux-scst/unionfs-2.5.7_for_2.6.36.diff
new file mode 100644
index 000000000..fabe75809
--- /dev/null
+++ b/main/linux-scst/unionfs-2.5.7_for_2.6.36.diff
@@ -0,0 +1,11253 @@
+diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
+index 4303614..5ade4a8 100644
+--- a/Documentation/filesystems/00-INDEX
++++ b/Documentation/filesystems/00-INDEX
+@@ -112,6 +112,8 @@ udf.txt
+ - info and mount options for the UDF filesystem.
+ ufs.txt
+ - info on the ufs filesystem.
++unionfs/
++ - info on the unionfs filesystem
+ vfat.txt
+ - info on using the VFAT filesystem used in Windows NT and Windows 95
+ vfs.txt
+diff --git a/Documentation/filesystems/unionfs/00-INDEX b/Documentation/filesystems/unionfs/00-INDEX
+new file mode 100644
+index 0000000..96fdf67
+--- /dev/null
++++ b/Documentation/filesystems/unionfs/00-INDEX
+@@ -0,0 +1,10 @@
++00-INDEX
++ - this file.
++concepts.txt
++ - A brief introduction of concepts.
++issues.txt
++ - A summary of known issues with unionfs.
++rename.txt
++ - Information regarding rename operations.
++usage.txt
++ - Usage information and examples.
+diff --git a/Documentation/filesystems/unionfs/concepts.txt b/Documentation/filesystems/unionfs/concepts.txt
+new file mode 100644
+index 0000000..b853788
+--- /dev/null
++++ b/Documentation/filesystems/unionfs/concepts.txt
+@@ -0,0 +1,287 @@
++Unionfs 2.x CONCEPTS:
++=====================
++
++This file describes the concepts needed by a namespace unification file
++system.
++
++
++Branch Priority:
++================
++
++Each branch is assigned a unique priority - starting from 0 (highest
++priority). No two branches can have the same priority.
++
++
++Branch Mode:
++============
++
++Each branch is assigned a mode - read-write or read-only. This allows
++directories on media mounted read-write to be used in a read-only manner.
++
++
++Whiteouts:
++==========
++
++A whiteout removes a file name from the namespace. Whiteouts are needed when
++one attempts to remove a file on a read-only branch.
++
++Suppose we have a two-branch union, where branch 0 is read-write and branch
++1 is read-only. And a file 'foo' on branch 1:
++
++./b0/
++./b1/
++./b1/foo
++
++The unified view would simply be:
++
++./union/
++./union/foo
++
++Since 'foo' is stored on a read-only branch, it cannot be removed. A
++whiteout is used to remove the name 'foo' from the unified namespace. Again,
++since branch 1 is read-only, the whiteout cannot be created there. So, we
++try on a higher priority (lower numerically) branch and create the whiteout
++there.
++
++./b0/
++./b0/.wh.foo
++./b1/
++./b1/foo
++
++Later, when Unionfs traverses branches (due to lookup or readdir), it
++eliminate 'foo' from the namespace (as well as the whiteout itself.)
++
++
++Opaque Directories:
++===================
++
++Assume we have a unionfs mount comprising of two branches. Branch 0 is
++empty; branch 1 has the directory /a and file /a/f. Let's say we mount a
++union of branch 0 as read-write and branch 1 as read-only. Now, let's say
++we try to perform the following operation in the union:
++
++ rm -fr a
++
++Because branch 1 is not writable, we cannot physically remove the file /a/f
++or the directory /a. So instead, we will create a whiteout in branch 0
++named /.wh.a, masking out the name "a" from branch 1. Next, let's say we
++try to create a directory named "a" as follows:
++
++ mkdir a
++
++Because we have a whiteout for "a" already, Unionfs behaves as if "a"
++doesn't exist, and thus will delete the whiteout and replace it with an
++actual directory named "a".
++
++The problem now is that if you try to "ls" in the union, Unionfs will
++perform is normal directory name unification, for *all* directories named
++"a" in all branches. This will cause the file /a/f from branch 1 to
++re-appear in the union's namespace, which violates Unix semantics.
++
++To avoid this problem, we have a different form of whiteouts for
++directories, called "opaque directories" (same as BSD Union Mount does).
++Whenever we replace a whiteout with a directory, that directory is marked as
++opaque. In Unionfs 2.x, it means that we create a file named
++/a/.wh.__dir_opaque in branch 0, after having created directory /a there.
++When unionfs notices that a directory is opaque, it stops all namespace
++operations (including merging readdir contents) at that opaque directory.
++This prevents re-exposing names from masked out directories.
++
++
++Duplicate Elimination:
++======================
++
++It is possible for files on different branches to have the same name.
++Unionfs then has to select which instance of the file to show to the user.
++Given the fact that each branch has a priority associated with it, the
++simplest solution is to take the instance from the highest priority
++(numerically lowest value) and "hide" the others.
++
++
++Unlinking:
++=========
++
++Unlink operation on non-directory instances is optimized to remove the
++maximum possible objects in case multiple underlying branches have the same
++file name. The unlink operation will first try to delete file instances
++from highest priority branch and then move further to delete from remaining
++branches in order of their decreasing priority. Consider a case (F..D..F),
++where F is a file and D is a directory of the same name; here, some
++intermediate branch could have an empty directory instance with the same
++name, so this operation also tries to delete this directory instance and
++proceed further to delete from next possible lower priority branch. The
++unionfs unlink operation will smoothly delete the files with same name from
++all possible underlying branches. In case if some error occurs, it creates
++whiteout in highest priority branch that will hide file instance in rest of
++the branches. An error could occur either if an unlink operations in any of
++the underlying branch failed or if a branch has no write permission.
++
++This unlinking policy is known as "delete all" and it has the benefit of
++overall reducing the number of inodes used by duplicate files, and further
++reducing the total number of inodes consumed by whiteouts. The cost is of
++extra processing, but testing shows this extra processing is well worth the
++savings.
++
++
++Copyup:
++=======
++
++When a change is made to the contents of a file's data or meta-data, they
++have to be stored somewhere. The best way is to create a copy of the
++original file on a branch that is writable, and then redirect the write
++though to this copy. The copy must be made on a higher priority branch so
++that lookup and readdir return this newer "version" of the file rather than
++the original (see duplicate elimination).
++
++An entire unionfs mount can be read-only or read-write. If it's read-only,
++then none of the branches will be written to, even if some of the branches
++are physically writeable. If the unionfs mount is read-write, then the
++leftmost (highest priority) branch must be writeable (for copyup to take
++place); the remaining branches can be any mix of read-write and read-only.
++
++In a writeable mount, unionfs will create new files/dir in the leftmost
++branch. If one tries to modify a file in a read-only branch/media, unionfs
++will copyup the file to the leftmost branch and modify it there. If you try
++to modify a file from a writeable branch which is not the leftmost branch,
++then unionfs will modify it in that branch; this is useful if you, say,
++unify differnet packages (e.g., apache, sendmail, ftpd, etc.) and you want
++changes to specific package files to remain logically in the directory where
++they came from.
++
++Cache Coherency:
++================
++
++Unionfs users often want to be able to modify files and directories directly
++on the lower branches, and have those changes be visible at the Unionfs
++level. This means that data (e.g., pages) and meta-data (dentries, inodes,
++open files, etc.) have to be synchronized between the upper and lower
++layers. In other words, the newest changes from a layer below have to be
++propagated to the Unionfs layer above. If the two layers are not in sync, a
++cache incoherency ensues, which could lead to application failures and even
++oopses. The Linux kernel, however, has a rather limited set of mechanisms
++to ensure this inter-layer cache coherency---so Unionfs has to do most of
++the hard work on its own.
++
++Maintaining Invariants:
++
++The way Unionfs ensures cache coherency is as follows. At each entry point
++to a Unionfs file system method, we call a utility function to validate the
++primary objects of this method. Generally, we call unionfs_file_revalidate
++on open files, and __unionfs_d_revalidate_chain on dentries (which also
++validates inodes). These utility functions check to see whether the upper
++Unionfs object is in sync with any of the lower objects that it represents.
++The checks we perform include whether the Unionfs superblock has a newer
++generation number, or if any of the lower objects mtime's or ctime's are
++newer. (Note: generation numbers change when branch-management commands are
++issued, so in a way, maintaining cache coherency is also very important for
++branch-management.) If indeed we determine that any Unionfs object is no
++longer in sync with its lower counterparts, then we rebuild that object
++similarly to how we do so for branch-management.
++
++While rebuilding Unionfs's objects, we also purge any page mappings and
++truncate inode pages (see fs/unionfs/dentry.c:purge_inode_data). This is to
++ensure that Unionfs will re-get the newer data from the lower branches. We
++perform this purging only if the Unionfs operation in question is a reading
++operation; if Unionfs is performing a data writing operation (e.g., ->write,
++->commit_write, etc.) then we do NOT flush the lower mappings/pages: this is
++because (1) a self-deadlock could occur and (2) the upper Unionfs pages are
++considered more authoritative anyway, as they are newer and will overwrite
++any lower pages.
++
++Unionfs maintains the following important invariant regarding mtime's,
++ctime's, and atime's: the upper inode object's times are the max() of all of
++the lower ones. For non-directory objects, there's only one object below,
++so the mapping is simple; for directory objects, there could me multiple
++lower objects and we have to sync up with the newest one of all the lower
++ones. This invariant is important to maintain, especially for directories
++(besides, we need this to be POSIX compliant). A union could comprise
++multiple writable branches, each of which could change. If we don't reflect
++the newest possible mtime/ctime, some applications could fail. For example,
++NFSv2/v3 exports check for newer directory mtimes on the server to determine
++if the client-side attribute cache should be purged.
++
++To maintain these important invariants, of course, Unionfs carefully
++synchronizes upper and lower times in various places. For example, if we
++copy-up a file to a top-level branch, the parent directory where the file
++was copied up to will now have a new mtime: so after a successful copy-up,
++we sync up with the new top-level branch's parent directory mtime.
++
++Implementation:
++
++This cache-coherency implementation is efficient because it defers any
++synchronizing between the upper and lower layers until absolutely needed.
++Consider the example a common situation where users perform a lot of lower
++changes, such as untarring a whole package. While these take place,
++typically the user doesn't access the files via Unionfs; only after the
++lower changes are done, does the user try to access the lower files. With
++our cache-coherency implementation, the entirety of the changes to the lower
++branches will not result in a single CPU cycle spent at the Unionfs level
++until the user invokes a system call that goes through Unionfs.
++
++We have considered two alternate cache-coherency designs. (1) Using the
++dentry/inode notify functionality to register interest in finding out about
++any lower changes. This is a somewhat limited and also a heavy-handed
++approach which could result in many notifications to the Unionfs layer upon
++each small change at the lower layer (imagine a file being modified multiple
++times in rapid succession). (2) Rewriting the VFS to support explicit
++callbacks from lower objects to upper objects. We began exploring such an
++implementation, but found it to be very complicated--it would have resulted
++in massive VFS/MM changes which are unlikely to be accepted by the LKML
++community. We therefore believe that our current cache-coherency design and
++implementation represent the best approach at this time.
++
++Limitations:
++
++Our implementation works in that as long as a user process will have caused
++Unionfs to be called, directly or indirectly, even to just do
++->d_revalidate; then we will have purged the current Unionfs data and the
++process will see the new data. For example, a process that continually
++re-reads the same file's data will see the NEW data as soon as the lower
++file had changed, upon the next read(2) syscall (even if the file is still
++open!) However, this doesn't work when the process re-reads the open file's
++data via mmap(2) (unless the user unmaps/closes the file and remaps/reopens
++it). Once we respond to ->readpage(s), then the kernel maps the page into
++the process's address space and there doesn't appear to be a way to force
++the kernel to invalidate those pages/mappings, and force the process to
++re-issue ->readpage. If there's a way to invalidate active mappings and
++force a ->readpage, let us know please (invalidate_inode_pages2 doesn't do
++the trick).
++
++Our current Unionfs code has to perform many file-revalidation calls. It
++would be really nice if the VFS would export an optional file system hook
++->file_revalidate (similarly to dentry->d_revalidate) that will be called
++before each VFS op that has a "struct file" in it.
++
++Certain file systems have micro-second granularity (or better) for inode
++times, and asynchronous actions could cause those times to change with some
++small delay. In such cases, Unionfs may see a changed inode time that only
++differs by a tiny fraction of a second: such a change may be a false
++positive indication that the lower object has changed, whereas if unionfs
++waits a little longer, that false indication will not be seen. (These false
++positives are harmless, because they would at most cause unionfs to
++re-validate an object that may need no revalidation, and print a debugging
++message that clutters the console/logs.) Therefore, to minimize the chances
++of these situations, we delay the detection of changed times by a small
++factor of a few seconds, called UNIONFS_MIN_CC_TIME (which defaults to 3
++seconds, as does NFS). This means that we will detect the change, only a
++couple of seconds later, if indeed the time change persists in the lower
++file object. This delayed detection has an added performance benefit: we
++reduce the number of times that unionfs has to revalidate objects, in case
++there's a lot of concurrent activity on both the upper and lower objects,
++for the same file(s). Lastly, this delayed time attribute detection is
++similar to how NFS clients operate (e.g., acregmin).
++
++Finally, there is no way currently in Linux to prevent lower directories
++from being moved around (i.e., topology changes); there's no way to prevent
++modifications to directory sub-trees of whole file systems which are mounted
++read-write. It is therefore possible for in-flight operations in unionfs to
++take place, while a lower directory is being moved around. Therefore, if
++you try to, say, create a new file in a directory through unionfs, while the
++directory is being moved around directly, then the new file may get created
++in the new location where that directory was moved to. This is a somewhat
++similar behaviour in NFS: an NFS client could be creating a new file while
++th NFS server is moving th directory around; the file will get successfully
++created in the new location. (The one exception in unionfs is that if the
++branch is marked read-only by unionfs, then a copyup will take place.)
++
++For more information, see <http://unionfs.filesystems.org/>.
+diff --git a/Documentation/filesystems/unionfs/issues.txt b/Documentation/filesystems/unionfs/issues.txt
+new file mode 100644
+index 0000000..f4b7e7e
+--- /dev/null
++++ b/Documentation/filesystems/unionfs/issues.txt
+@@ -0,0 +1,28 @@
++KNOWN Unionfs 2.x ISSUES:
++=========================
++
++1. Unionfs should not use lookup_one_len() on the underlying f/s as it
++ confuses NFSv4. Currently, unionfs_lookup() passes lookup intents to the
++ lower file-system, this eliminates part of the problem. The remaining
++ calls to lookup_one_len may need to be changed to pass an intent. We are
++ currently introducing VFS changes to fs/namei.c's do_path_lookup() to
++ allow proper file lookup and opening in stackable file systems.
++
++2. Lockdep (a debugging feature) isn't aware of stacking, and so it
++ incorrectly complains about locking problems. The problem boils down to
++ this: Lockdep considers all objects of a certain type to be in the same
++ class, for example, all inodes. Lockdep doesn't like to see a lock held
++ on two inodes within the same task, and warns that it could lead to a
++ deadlock. However, stackable file systems do precisely that: they lock
++ an upper object, and then a lower object, in a strict order to avoid
++ locking problems; in addition, Unionfs, as a fan-out file system, may
++ have to lock several lower inodes. We are currently looking into Lockdep
++ to see how to make it aware of stackable file systems. For now, we
++ temporarily disable lockdep when calling vfs methods on lower objects,
++ but only for those places where lockdep complained. While this solution
++ may seem unclean, it is not without precedent: other places in the kernel
++ also do similar temporary disabling, of course after carefully having
++ checked that it is the right thing to do. Anyway, you get any warnings
++ from Lockdep, please report them to the Unionfs maintainers.
++
++For more information, see <http://unionfs.filesystems.org/>.
+diff --git a/Documentation/filesystems/unionfs/rename.txt b/Documentation/filesystems/unionfs/rename.txt
+new file mode 100644
+index 0000000..e20bb82
+--- /dev/null
++++ b/Documentation/filesystems/unionfs/rename.txt
+@@ -0,0 +1,31 @@
++Rename is a complex beast. The following table shows which rename(2) operations
++should succeed and which should fail.
++
++o: success
++E: error (either unionfs or vfs)
++X: EXDEV
++
++none = file does not exist
++file = file is a file
++dir = file is a empty directory
++child= file is a non-empty directory
++wh = file is a directory containing only whiteouts; this makes it logically
++ empty
++
++ none file dir child wh
++file o o E E E
++dir o E o E o
++child X E X E X
++wh o E o E o
++
++
++Renaming directories:
++=====================
++
++Whenever a empty (either physically or logically) directory is being renamed,
++the following sequence of events should take place:
++
++1) Remove whiteouts from both source and destination directory
++2) Rename source to destination
++3) Make destination opaque to prevent anything under it from showing up
++
+diff --git a/Documentation/filesystems/unionfs/usage.txt b/Documentation/filesystems/unionfs/usage.txt
+new file mode 100644
+index 0000000..1adde69
+--- /dev/null
++++ b/Documentation/filesystems/unionfs/usage.txt
+@@ -0,0 +1,134 @@
++Unionfs is a stackable unification file system, which can appear to merge
++the contents of several directories (branches), while keeping their physical
++content separate. Unionfs is useful for unified source tree management,
++merged contents of split CD-ROM, merged separate software package
++directories, data grids, and more. Unionfs allows any mix of read-only and
++read-write branches, as well as insertion and deletion of branches anywhere
++in the fan-out. To maintain Unix semantics, Unionfs handles elimination of
++duplicates, partial-error conditions, and more.
++
++GENERAL SYNTAX
++==============
++
++# mount -t unionfs -o <OPTIONS>,<BRANCH-OPTIONS> none MOUNTPOINT
++
++OPTIONS can be any legal combination of:
++
++- ro # mount file system read-only
++- rw # mount file system read-write
++- remount # remount the file system (see Branch Management below)
++- incgen # increment generation no. (see Cache Consistency below)
++
++BRANCH-OPTIONS can be either (1) a list of branches given to the "dirs="
++option, or (2) a list of individual branch manipulation commands, combined
++with the "remount" option, and is further described in the "Branch
++Management" section below.
++
++The syntax for the "dirs=" mount option is:
++
++ dirs=branch[=ro|=rw][:...]
++
++The "dirs=" option takes a colon-delimited list of directories to compose
++the union, with an optional branch mode for each of those directories.
++Directories that come earlier (specified first, on the left) in the list
++have a higher precedence than those which come later. Additionally,
++read-only or read-write permissions of the branch can be specified by
++appending =ro or =rw (default) to each directory. See the Copyup section in
++concepts.txt, for a description of Unionfs's behavior when mixing read-only
++and read-write branches and mounts.
++
++Syntax:
++
++ dirs=/branch1[=ro|=rw]:/branch2[=ro|=rw]:...:/branchN[=ro|=rw]
++
++Example:
++
++ dirs=/writable_branch=rw:/read-only_branch=ro
++
++
++BRANCH MANAGEMENT
++=================
++
++Once you mount your union for the first time, using the "dirs=" option, you
++can then change the union's overall mode or reconfigure the branches, using
++the remount option, as follows.
++
++To downgrade a union from read-write to read-only:
++
++# mount -t unionfs -o remount,ro none MOUNTPOINT
++
++To upgrade a union from read-only to read-write:
++
++# mount -t unionfs -o remount,rw none MOUNTPOINT
++
++To delete a branch /foo, regardless where it is in the current union:
++
++# mount -t unionfs -o remount,del=/foo none MOUNTPOINT
++
++To insert (add) a branch /foo before /bar:
++
++# mount -t unionfs -o remount,add=/bar:/foo none MOUNTPOINT
++
++To insert (add) a branch /foo (with the "rw" mode flag) before /bar:
++
++# mount -t unionfs -o remount,add=/bar:/foo=rw none MOUNTPOINT
++
++To insert (add) a branch /foo (in "rw" mode) at the very beginning (i.e., a
++new highest-priority branch), you can use the above syntax, or use a short
++hand version as follows:
++
++# mount -t unionfs -o remount,add=/foo none MOUNTPOINT
++
++To append a branch to the very end (new lowest-priority branch):
++
++# mount -t unionfs -o remount,add=:/foo none MOUNTPOINT
++
++To append a branch to the very end (new lowest-priority branch), in
++read-only mode:
++
++# mount -t unionfs -o remount,add=:/foo=ro none MOUNTPOINT
++
++Finally, to change the mode of one existing branch, say /foo, from read-only
++to read-write, and change /bar from read-write to read-only:
++
++# mount -t unionfs -o remount,mode=/foo=rw,mode=/bar=ro none MOUNTPOINT
++
++Note: in Unionfs 2.x, you cannot set the leftmost branch to readonly because
++then Unionfs won't have any writable place for copyups to take place.
++Moreover, the VFS can get confused when it tries to modify something in a
++file system mounted read-write, but isn't permitted to write to it.
++Instead, you should set the whole union as readonly, as described above.
++If, however, you must set the leftmost branch as readonly, perhaps so you
++can get a snapshot of it at a point in time, then you should insert a new
++writable top-level branch, and mark the one you want as readonly. This can
++be accomplished as follows, assuming that /foo is your current leftmost
++branch:
++
++# mount -t tmpfs -o size=NNN /new
++# mount -t unionfs -o remount,add=/new,mode=/foo=ro none MOUNTPOINT
++<do what you want safely in /foo>
++# mount -t unionfs -o remount,del=/new,mode=/foo=rw none MOUNTPOINT
++<check if there's anything in /new you want to preserve>
++# umount /new
++
++CACHE CONSISTENCY
++=================
++
++If you modify any file on any of the lower branches directly, while there is
++a Unionfs 2.x mounted above any of those branches, you should tell Unionfs
++to purge its caches and re-get the objects. To do that, you have to
++increment the generation number of the superblock using the following
++command:
++
++# mount -t unionfs -o remount,incgen none MOUNTPOINT
++
++Note that the older way of incrementing the generation number using an
++ioctl, is no longer supported in Unionfs 2.0 and newer. Ioctls in general
++are not encouraged. Plus, an ioctl is per-file concept, whereas the
++generation number is a per-file-system concept. Worse, such an ioctl
++requires an open file, which then has to be invalidated by the very nature
++of the generation number increase (read: the old generation increase ioctl
++was pretty racy).
++
++
++For more information, see <http://unionfs.filesystems.org/>.
+diff --git a/MAINTAINERS b/MAINTAINERS
+index f2a2b8e..11d7f45 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5917,6 +5917,14 @@ F: Documentation/cdrom/
+ F: drivers/cdrom/cdrom.c
+ F: include/linux/cdrom.h
+
++UNIONFS
++P: Erez Zadok
++M: ezk@cs.sunysb.edu
++L: unionfs@filesystems.org
++W: http://unionfs.filesystems.org/
++T: git git.kernel.org/pub/scm/linux/kernel/git/ezk/unionfs.git
++S: Maintained
++
+ UNSORTED BLOCK IMAGES (UBI)
+ M: Artem Bityutskiy <dedekind1@gmail.com>
+ W: http://www.linux-mtd.infradead.org/
+diff --git a/fs/Kconfig b/fs/Kconfig
+index 3d18530..65b6aa1 100644
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -169,6 +169,7 @@ if MISC_FILESYSTEMS
+ source "fs/adfs/Kconfig"
+ source "fs/affs/Kconfig"
+ source "fs/ecryptfs/Kconfig"
++source "fs/unionfs/Kconfig"
+ source "fs/hfs/Kconfig"
+ source "fs/hfsplus/Kconfig"
+ source "fs/befs/Kconfig"
+diff --git a/fs/Makefile b/fs/Makefile
+index e6ec1d3..787332e 100644
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -84,6 +84,7 @@ obj-$(CONFIG_ISO9660_FS) += isofs/
+ obj-$(CONFIG_HFSPLUS_FS) += hfsplus/ # Before hfs to find wrapped HFS+
+ obj-$(CONFIG_HFS_FS) += hfs/
+ obj-$(CONFIG_ECRYPT_FS) += ecryptfs/
++obj-$(CONFIG_UNION_FS) += unionfs/
+ obj-$(CONFIG_VXFS_FS) += freevxfs/
+ obj-$(CONFIG_NFS_FS) += nfs/
+ obj-$(CONFIG_EXPORTFS) += exportfs/
+diff --git a/fs/namei.c b/fs/namei.c
+index 24896e8..db22420 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -385,6 +385,7 @@ void release_open_intent(struct nameidata *nd)
+ else
+ fput(nd->intent.open.file);
+ }
++EXPORT_SYMBOL_GPL(release_open_intent);
+
+ static inline struct dentry *
+ do_revalidate(struct dentry *dentry, struct nameidata *nd)
+diff --git a/fs/splice.c b/fs/splice.c
+index 8f1dfae..7a57fab 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1092,8 +1092,8 @@ EXPORT_SYMBOL(generic_splice_sendpage);
+ /*
+ * Attempt to initiate a splice from pipe to file.
+ */
+-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+- loff_t *ppos, size_t len, unsigned int flags)
++long vfs_splice_from(struct pipe_inode_info *pipe, struct file *out,
++ loff_t *ppos, size_t len, unsigned int flags)
+ {
+ ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
+ loff_t *, size_t, unsigned int);
+@@ -1116,13 +1116,14 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+
+ return splice_write(pipe, out, ppos, len, flags);
+ }
++EXPORT_SYMBOL_GPL(vfs_splice_from);
+
+ /*
+ * Attempt to initiate a splice from a file to a pipe.
+ */
+-static long do_splice_to(struct file *in, loff_t *ppos,
+- struct pipe_inode_info *pipe, size_t len,
+- unsigned int flags)
++long vfs_splice_to(struct file *in, loff_t *ppos,
++ struct pipe_inode_info *pipe, size_t len,
++ unsigned int flags)
+ {
+ ssize_t (*splice_read)(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
+@@ -1142,6 +1143,7 @@ static long do_splice_to(struct file *in, loff_t *ppos,
+
+ return splice_read(in, ppos, pipe, len, flags);
+ }
++EXPORT_SYMBOL_GPL(vfs_splice_to);
+
+ /**
+ * splice_direct_to_actor - splices data directly between two non-pipes
+@@ -1211,7 +1213,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ size_t read_len;
+ loff_t pos = sd->pos, prev_pos = pos;
+
+- ret = do_splice_to(in, &pos, pipe, len, flags);
++ ret = vfs_splice_to(in, &pos, pipe, len, flags);
+ if (unlikely(ret <= 0))
+ goto out_release;
+
+@@ -1270,8 +1272,8 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
+ {
+ struct file *file = sd->u.file;
+
+- return do_splice_from(pipe, file, &file->f_pos, sd->total_len,
+- sd->flags);
++ return vfs_splice_from(pipe, file, &file->f_pos, sd->total_len,
++ sd->flags);
+ }
+
+ /**
+@@ -1368,7 +1370,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
+ } else
+ off = &out->f_pos;
+
+- ret = do_splice_from(ipipe, out, off, len, flags);
++ ret = vfs_splice_from(ipipe, out, off, len, flags);
+
+ if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
+ ret = -EFAULT;
+@@ -1388,7 +1390,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
+ } else
+ off = &in->f_pos;
+
+- ret = do_splice_to(in, off, opipe, len, flags);
++ ret = vfs_splice_to(in, off, opipe, len, flags);
+
+ if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
+ ret = -EFAULT;
+diff --git a/fs/stack.c b/fs/stack.c
+index 4a6f7f4..7eeef12 100644
+--- a/fs/stack.c
++++ b/fs/stack.c
+@@ -1,8 +1,20 @@
++/*
++ * Copyright (c) 2006-2009 Erez Zadok
++ * Copyright (c) 2006-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2006-2009 Stony Brook University
++ * Copyright (c) 2006-2009 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
+ #include <linux/module.h>
+ #include <linux/fs.h>
+ #include <linux/fs_stack.h>
+
+-/* does _NOT_ require i_mutex to be held.
++/*
++ * does _NOT_ require i_mutex to be held.
+ *
+ * This function cannot be inlined since i_size_{read,write} is rather
+ * heavy-weight on 32-bit systems
+diff --git a/fs/unionfs/Kconfig b/fs/unionfs/Kconfig
+new file mode 100644
+index 0000000..f3c1ac4
+--- /dev/null
++++ b/fs/unionfs/Kconfig
+@@ -0,0 +1,24 @@
++config UNION_FS
++ tristate "Union file system (EXPERIMENTAL)"
++ depends on EXPERIMENTAL
++ help
++ Unionfs is a stackable unification file system, which appears to
++ merge the contents of several directories (branches), while keeping
++ their physical content separate.
++
++ See <http://unionfs.filesystems.org> for details
++
++config UNION_FS_XATTR
++ bool "Unionfs extended attributes"
++ depends on UNION_FS
++ help
++ Extended attributes are name:value pairs associated with inodes by
++ the kernel or by users (see the attr(5) manual page).
++
++ If unsure, say N.
++
++config UNION_FS_DEBUG
++ bool "Debug Unionfs"
++ depends on UNION_FS
++ help
++ If you say Y here, you can turn on debugging output from Unionfs.
+diff --git a/fs/unionfs/Makefile b/fs/unionfs/Makefile
+new file mode 100644
+index 0000000..86c32ba
+--- /dev/null
++++ b/fs/unionfs/Makefile
+@@ -0,0 +1,17 @@
++UNIONFS_VERSION="2.5.7 (for 2.6.36)"
++
++EXTRA_CFLAGS += -DUNIONFS_VERSION=\"$(UNIONFS_VERSION)\"
++
++obj-$(CONFIG_UNION_FS) += unionfs.o
++
++unionfs-y := subr.o dentry.o file.o inode.o main.o super.o \
++ rdstate.o copyup.o dirhelper.o rename.o unlink.o \
++ lookup.o commonfops.o dirfops.o sioq.o mmap.o whiteout.o
++
++unionfs-$(CONFIG_UNION_FS_XATTR) += xattr.o
++
++unionfs-$(CONFIG_UNION_FS_DEBUG) += debug.o
++
++ifeq ($(CONFIG_UNION_FS_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+diff --git a/fs/unionfs/commonfops.c b/fs/unionfs/commonfops.c
+new file mode 100644
+index 0000000..51ea65e
+--- /dev/null
++++ b/fs/unionfs/commonfops.c
+@@ -0,0 +1,896 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * 1) Copyup the file
++ * 2) Rename the file to '.unionfs<original inode#><counter>' - obviously
++ * stolen from NFS's silly rename
++ */
++static int copyup_deleted_file(struct file *file, struct dentry *dentry,
++ struct dentry *parent, int bstart, int bindex)
++{
++ static unsigned int counter;
++ const int i_inosize = sizeof(dentry->d_inode->i_ino) * 2;
++ const int countersize = sizeof(counter) * 2;
++ const int nlen = sizeof(".unionfs") + i_inosize + countersize - 1;
++ char name[nlen + 1];
++ int err;
++ struct dentry *tmp_dentry = NULL;
++ struct dentry *lower_dentry;
++ struct dentry *lower_dir_dentry = NULL;
++
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bstart);
++
++ sprintf(name, ".unionfs%*.*lx",
++ i_inosize, i_inosize, lower_dentry->d_inode->i_ino);
++
++ /*
++ * Loop, looking for an unused temp name to copyup to.
++ *
++ * It's somewhat silly that we look for a free temp tmp name in the
++ * source branch (bstart) instead of the dest branch (bindex), where
++ * the final name will be created. We _will_ catch it if somehow
++ * the name exists in the dest branch, but it'd be nice to catch it
++ * sooner than later.
++ */
++retry:
++ tmp_dentry = NULL;
++ do {
++ char *suffix = name + nlen - countersize;
++
++ dput(tmp_dentry);
++ counter++;
++ sprintf(suffix, "%*.*x", countersize, countersize, counter);
++
++ pr_debug("unionfs: trying to rename %s to %s\n",
++ dentry->d_name.name, name);
++
++ tmp_dentry = lookup_lck_len(name, lower_dentry->d_parent,
++ nlen);
++ if (IS_ERR(tmp_dentry)) {
++ err = PTR_ERR(tmp_dentry);
++ goto out;
++ }
++ } while (tmp_dentry->d_inode != NULL); /* need negative dentry */
++ dput(tmp_dentry);
++
++ err = copyup_named_file(parent->d_inode, file, name, bstart, bindex,
++ i_size_read(file->f_path.dentry->d_inode));
++ if (err) {
++ if (unlikely(err == -EEXIST))
++ goto retry;
++ goto out;
++ }
++
++ /* bring it to the same state as an unlinked file */
++ lower_dentry = unionfs_lower_dentry_idx(dentry, dbstart(dentry));
++ if (!unionfs_lower_inode_idx(dentry->d_inode, bindex)) {
++ atomic_inc(&lower_dentry->d_inode->i_count);
++ unionfs_set_lower_inode_idx(dentry->d_inode, bindex,
++ lower_dentry->d_inode);
++ }
++ lower_dir_dentry = lock_parent(lower_dentry);
++ err = vfs_unlink(lower_dir_dentry->d_inode, lower_dentry);
++ unlock_dir(lower_dir_dentry);
++
++out:
++ if (!err)
++ unionfs_check_dentry(dentry);
++ return err;
++}
++
++/*
++ * put all references held by upper struct file and free lower file pointer
++ * array
++ */
++static void cleanup_file(struct file *file)
++{
++ int bindex, bstart, bend;
++ struct file **lower_files;
++ struct file *lower_file;
++ struct super_block *sb = file->f_path.dentry->d_sb;
++
++ lower_files = UNIONFS_F(file)->lower_files;
++ bstart = fbstart(file);
++ bend = fbend(file);
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ int i; /* holds (possibly) updated branch index */
++ int old_bid;
++
++ lower_file = unionfs_lower_file_idx(file, bindex);
++ if (!lower_file)
++ continue;
++
++ /*
++ * Find new index of matching branch with an open
++ * file, since branches could have been added or
++ * deleted causing the one with open files to shift.
++ */
++ old_bid = UNIONFS_F(file)->saved_branch_ids[bindex];
++ i = branch_id_to_idx(sb, old_bid);
++ if (unlikely(i < 0)) {
++ printk(KERN_ERR "unionfs: no superblock for "
++ "file %p\n", file);
++ continue;
++ }
++
++ /* decrement count of open files */
++ branchput(sb, i);
++ /*
++ * fput will perform an mntput for us on the correct branch.
++ * Although we're using the file's old branch configuration,
++ * bindex, which is the old index, correctly points to the
++ * right branch in the file's branch list. In other words,
++ * we're going to mntput the correct branch even if branches
++ * have been added/removed.
++ */
++ fput(lower_file);
++ UNIONFS_F(file)->lower_files[bindex] = NULL;
++ UNIONFS_F(file)->saved_branch_ids[bindex] = -1;
++ }
++
++ UNIONFS_F(file)->lower_files = NULL;
++ kfree(lower_files);
++ kfree(UNIONFS_F(file)->saved_branch_ids);
++ /* set to NULL because caller needs to know if to kfree on error */
++ UNIONFS_F(file)->saved_branch_ids = NULL;
++}
++
++/* open all lower files for a given file */
++static int open_all_files(struct file *file)
++{
++ int bindex, bstart, bend, err = 0;
++ struct file *lower_file;
++ struct dentry *lower_dentry;
++ struct dentry *dentry = file->f_path.dentry;
++ struct super_block *sb = dentry->d_sb;
++
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry)
++ continue;
++
++ dget(lower_dentry);
++ unionfs_mntget(dentry, bindex);
++ branchget(sb, bindex);
++
++ lower_file =
++ dentry_open(lower_dentry,
++ unionfs_lower_mnt_idx(dentry, bindex),
++ file->f_flags, current_cred());
++ if (IS_ERR(lower_file)) {
++ branchput(sb, bindex);
++ err = PTR_ERR(lower_file);
++ goto out;
++ } else {
++ unionfs_set_lower_file_idx(file, bindex, lower_file);
++ }
++ }
++out:
++ return err;
++}
++
++/* open the highest priority file for a given upper file */
++static int open_highest_file(struct file *file, bool willwrite)
++{
++ int bindex, bstart, bend, err = 0;
++ struct file *lower_file;
++ struct dentry *lower_dentry;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent = dget_parent(dentry);
++ struct inode *parent_inode = parent->d_inode;
++ struct super_block *sb = dentry->d_sb;
++
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++
++ lower_dentry = unionfs_lower_dentry(dentry);
++ if (willwrite && IS_WRITE_FLAG(file->f_flags) && is_robranch(dentry)) {
++ for (bindex = bstart - 1; bindex >= 0; bindex--) {
++ err = copyup_file(parent_inode, file, bstart, bindex,
++ i_size_read(dentry->d_inode));
++ if (!err)
++ break;
++ }
++ atomic_set(&UNIONFS_F(file)->generation,
++ atomic_read(&UNIONFS_I(dentry->d_inode)->
++ generation));
++ goto out;
++ }
++
++ dget(lower_dentry);
++ unionfs_mntget(dentry, bstart);
++ lower_file = dentry_open(lower_dentry,
++ unionfs_lower_mnt_idx(dentry, bstart),
++ file->f_flags, current_cred());
++ if (IS_ERR(lower_file)) {
++ err = PTR_ERR(lower_file);
++ goto out;
++ }
++ branchget(sb, bstart);
++ unionfs_set_lower_file(file, lower_file);
++ /* Fix up the position. */
++ lower_file->f_pos = file->f_pos;
++
++ memcpy(&lower_file->f_ra, &file->f_ra, sizeof(struct file_ra_state));
++out:
++ dput(parent);
++ return err;
++}
++
++/* perform a delayed copyup of a read-write file on a read-only branch */
++static int do_delayed_copyup(struct file *file, struct dentry *parent)
++{
++ int bindex, bstart, bend, err = 0;
++ struct dentry *dentry = file->f_path.dentry;
++ struct inode *parent_inode = parent->d_inode;
++
++ bstart = fbstart(file);
++ bend = fbend(file);
++
++ BUG_ON(!S_ISREG(dentry->d_inode->i_mode));
++
++ unionfs_check_file(file);
++ for (bindex = bstart - 1; bindex >= 0; bindex--) {
++ if (!d_deleted(dentry))
++ err = copyup_file(parent_inode, file, bstart,
++ bindex,
++ i_size_read(dentry->d_inode));
++ else
++ err = copyup_deleted_file(file, dentry, parent,
++ bstart, bindex);
++ /* if succeeded, set lower open-file flags and break */
++ if (!err) {
++ struct file *lower_file;
++ lower_file = unionfs_lower_file_idx(file, bindex);
++ lower_file->f_flags = file->f_flags;
++ break;
++ }
++ }
++ if (err || (bstart <= fbstart(file)))
++ goto out;
++ bend = fbend(file);
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ if (unionfs_lower_file_idx(file, bindex)) {
++ branchput(dentry->d_sb, bindex);
++ fput(unionfs_lower_file_idx(file, bindex));
++ unionfs_set_lower_file_idx(file, bindex, NULL);
++ }
++ }
++ path_put_lowers(dentry, bstart, bend, false);
++ iput_lowers(dentry->d_inode, bstart, bend, false);
++ /* for reg file, we only open it "once" */
++ fbend(file) = fbstart(file);
++ dbend(dentry) = dbstart(dentry);
++ ibend(dentry->d_inode) = ibstart(dentry->d_inode);
++
++out:
++ unionfs_check_file(file);
++ return err;
++}
++
++/*
++ * Helper function for unionfs_file_revalidate/locked.
++ * Expects dentry/parent to be locked already, and revalidated.
++ */
++static int __unionfs_file_revalidate(struct file *file, struct dentry *dentry,
++ struct dentry *parent,
++ struct super_block *sb, int sbgen,
++ int dgen, bool willwrite)
++{
++ int fgen;
++ int bstart, bend, orig_brid;
++ int size;
++ int err = 0;
++
++ fgen = atomic_read(&UNIONFS_F(file)->generation);
++
++ /*
++ * There are two cases we are interested in. The first is if the
++ * generation is lower than the super-block. The second is if
++ * someone has copied up this file from underneath us, we also need
++ * to refresh things.
++ */
++ if (d_deleted(dentry) ||
++ (sbgen <= fgen &&
++ dbstart(dentry) == fbstart(file) &&
++ unionfs_lower_file(file)))
++ goto out_may_copyup;
++
++ /* save orig branch ID */
++ orig_brid = UNIONFS_F(file)->saved_branch_ids[fbstart(file)];
++
++ /* First we throw out the existing files. */
++ cleanup_file(file);
++
++ /* Now we reopen the file(s) as in unionfs_open. */
++ bstart = fbstart(file) = dbstart(dentry);
++ bend = fbend(file) = dbend(dentry);
++
++ size = sizeof(struct file *) * sbmax(sb);
++ UNIONFS_F(file)->lower_files = kzalloc(size, GFP_KERNEL);
++ if (unlikely(!UNIONFS_F(file)->lower_files)) {
++ err = -ENOMEM;
++ goto out;
++ }
++ size = sizeof(int) * sbmax(sb);
++ UNIONFS_F(file)->saved_branch_ids = kzalloc(size, GFP_KERNEL);
++ if (unlikely(!UNIONFS_F(file)->saved_branch_ids)) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ if (S_ISDIR(dentry->d_inode->i_mode)) {
++ /* We need to open all the files. */
++ err = open_all_files(file);
++ if (err)
++ goto out;
++ } else {
++ int new_brid;
++ /* We only open the highest priority branch. */
++ err = open_highest_file(file, willwrite);
++ if (err)
++ goto out;
++ new_brid = UNIONFS_F(file)->saved_branch_ids[fbstart(file)];
++ if (unlikely(new_brid != orig_brid && sbgen > fgen)) {
++ /*
++ * If we re-opened the file on a different branch
++ * than the original one, and this was due to a new
++ * branch inserted, then update the mnt counts of
++ * the old and new branches accordingly.
++ */
++ unionfs_mntget(dentry, bstart);
++ unionfs_mntput(sb->s_root,
++ branch_id_to_idx(sb, orig_brid));
++ }
++ /* regular files have only one open lower file */
++ fbend(file) = fbstart(file);
++ }
++ atomic_set(&UNIONFS_F(file)->generation,
++ atomic_read(&UNIONFS_I(dentry->d_inode)->generation));
++
++out_may_copyup:
++ /* Copyup on the first write to a file on a readonly branch. */
++ if (willwrite && IS_WRITE_FLAG(file->f_flags) &&
++ !IS_WRITE_FLAG(unionfs_lower_file(file)->f_flags) &&
++ is_robranch(dentry)) {
++ pr_debug("unionfs: do delay copyup of \"%s\"\n",
++ dentry->d_name.name);
++ err = do_delayed_copyup(file, parent);
++ /* regular files have only one open lower file */
++ if (!err && !S_ISDIR(dentry->d_inode->i_mode))
++ fbend(file) = fbstart(file);
++ }
++
++out:
++ if (err) {
++ kfree(UNIONFS_F(file)->lower_files);
++ kfree(UNIONFS_F(file)->saved_branch_ids);
++ }
++ return err;
++}
++
++/*
++ * Revalidate the struct file
++ * @file: file to revalidate
++ * @parent: parent dentry (locked by caller)
++ * @willwrite: true if caller may cause changes to the file; false otherwise.
++ * Caller must lock/unlock dentry's branch configuration.
++ */
++int unionfs_file_revalidate(struct file *file, struct dentry *parent,
++ bool willwrite)
++{
++ struct super_block *sb;
++ struct dentry *dentry;
++ int sbgen, dgen;
++ int err = 0;
++
++ dentry = file->f_path.dentry;
++ sb = dentry->d_sb;
++ verify_locked(dentry);
++ verify_locked(parent);
++
++ /*
++ * First revalidate the dentry inside struct file,
++ * but not unhashed dentries.
++ */
++ if (!d_deleted(dentry) &&
++ !__unionfs_d_revalidate(dentry, parent, willwrite)) {
++ err = -ESTALE;
++ goto out;
++ }
++
++ sbgen = atomic_read(&UNIONFS_SB(sb)->generation);
++ dgen = atomic_read(&UNIONFS_D(dentry)->generation);
++
++ if (unlikely(sbgen > dgen)) { /* XXX: should never happen */
++ pr_debug("unionfs: failed to revalidate dentry (%s)\n",
++ dentry->d_name.name);
++ err = -ESTALE;
++ goto out;
++ }
++
++ err = __unionfs_file_revalidate(file, dentry, parent, sb,
++ sbgen, dgen, willwrite);
++out:
++ return err;
++}
++
++/* unionfs_open helper function: open a directory */
++static int __open_dir(struct inode *inode, struct file *file)
++{
++ struct dentry *lower_dentry;
++ struct file *lower_file;
++ int bindex, bstart, bend;
++ struct vfsmount *mnt;
++
++ bstart = fbstart(file) = dbstart(file->f_path.dentry);
++ bend = fbend(file) = dbend(file->f_path.dentry);
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_dentry =
++ unionfs_lower_dentry_idx(file->f_path.dentry, bindex);
++ if (!lower_dentry)
++ continue;
++
++ dget(lower_dentry);
++ unionfs_mntget(file->f_path.dentry, bindex);
++ mnt = unionfs_lower_mnt_idx(file->f_path.dentry, bindex);
++ lower_file = dentry_open(lower_dentry, mnt, file->f_flags,
++ current_cred());
++ if (IS_ERR(lower_file))
++ return PTR_ERR(lower_file);
++
++ unionfs_set_lower_file_idx(file, bindex, lower_file);
++
++ /*
++ * The branchget goes after the open, because otherwise
++ * we would miss the reference on release.
++ */
++ branchget(inode->i_sb, bindex);
++ }
++
++ return 0;
++}
++
++/* unionfs_open helper function: open a file */
++static int __open_file(struct inode *inode, struct file *file,
++ struct dentry *parent)
++{
++ struct dentry *lower_dentry;
++ struct file *lower_file;
++ int lower_flags;
++ int bindex, bstart, bend;
++
++ lower_dentry = unionfs_lower_dentry(file->f_path.dentry);
++ lower_flags = file->f_flags;
++
++ bstart = fbstart(file) = dbstart(file->f_path.dentry);
++ bend = fbend(file) = dbend(file->f_path.dentry);
++
++ /*
++ * check for the permission for lower file. If the error is
++ * COPYUP_ERR, copyup the file.
++ */
++ if (lower_dentry->d_inode && is_robranch(file->f_path.dentry)) {
++ /*
++ * if the open will change the file, copy it up otherwise
++ * defer it.
++ */
++ if (lower_flags & O_TRUNC) {
++ int size = 0;
++ int err = -EROFS;
++
++ /* copyup the file */
++ for (bindex = bstart - 1; bindex >= 0; bindex--) {
++ err = copyup_file(parent->d_inode, file,
++ bstart, bindex, size);
++ if (!err)
++ break;
++ }
++ return err;
++ } else {
++ /*
++ * turn off writeable flags, to force delayed copyup
++ * by caller.
++ */
++ lower_flags &= ~(OPEN_WRITE_FLAGS);
++ }
++ }
++
++ dget(lower_dentry);
++
++ /*
++ * dentry_open will decrement mnt refcnt if err.
++ * otherwise fput() will do an mntput() for us upon file close.
++ */
++ unionfs_mntget(file->f_path.dentry, bstart);
++ lower_file =
++ dentry_open(lower_dentry,
++ unionfs_lower_mnt_idx(file->f_path.dentry, bstart),
++ lower_flags, current_cred());
++ if (IS_ERR(lower_file))
++ return PTR_ERR(lower_file);
++
++ unionfs_set_lower_file(file, lower_file);
++ branchget(inode->i_sb, bstart);
++
++ return 0;
++}
++
++int unionfs_open(struct inode *inode, struct file *file)
++{
++ int err = 0;
++ struct file *lower_file = NULL;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++ int bindex = 0, bstart = 0, bend = 0;
++ int size;
++ int valid = 0;
++
++ unionfs_read_lock(inode->i_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ /* don't open unhashed/deleted files */
++ if (d_deleted(dentry)) {
++ err = -ENOENT;
++ goto out_nofree;
++ }
++
++ /* XXX: should I change 'false' below to the 'willwrite' flag? */
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out_nofree;
++ }
++
++ file->private_data =
++ kzalloc(sizeof(struct unionfs_file_info), GFP_KERNEL);
++ if (unlikely(!UNIONFS_F(file))) {
++ err = -ENOMEM;
++ goto out_nofree;
++ }
++ fbstart(file) = -1;
++ fbend(file) = -1;
++ atomic_set(&UNIONFS_F(file)->generation,
++ atomic_read(&UNIONFS_I(inode)->generation));
++
++ size = sizeof(struct file *) * sbmax(inode->i_sb);
++ UNIONFS_F(file)->lower_files = kzalloc(size, GFP_KERNEL);
++ if (unlikely(!UNIONFS_F(file)->lower_files)) {
++ err = -ENOMEM;
++ goto out;
++ }
++ size = sizeof(int) * sbmax(inode->i_sb);
++ UNIONFS_F(file)->saved_branch_ids = kzalloc(size, GFP_KERNEL);
++ if (unlikely(!UNIONFS_F(file)->saved_branch_ids)) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ bstart = fbstart(file) = dbstart(dentry);
++ bend = fbend(file) = dbend(dentry);
++
++ /*
++ * open all directories and make the unionfs file struct point to
++ * these lower file structs
++ */
++ if (S_ISDIR(inode->i_mode))
++ err = __open_dir(inode, file); /* open a dir */
++ else
++ err = __open_file(inode, file, parent); /* open a file */
++
++ /* freeing the allocated resources, and fput the opened files */
++ if (err) {
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_file = unionfs_lower_file_idx(file, bindex);
++ if (!lower_file)
++ continue;
++
++ branchput(dentry->d_sb, bindex);
++ /* fput calls dput for lower_dentry */
++ fput(lower_file);
++ }
++ }
++
++out:
++ if (err) {
++ kfree(UNIONFS_F(file)->lower_files);
++ kfree(UNIONFS_F(file)->saved_branch_ids);
++ kfree(UNIONFS_F(file));
++ }
++out_nofree:
++ if (!err) {
++ unionfs_postcopyup_setmnt(dentry);
++ unionfs_copy_attr_times(inode);
++ unionfs_check_file(file);
++ unionfs_check_inode(inode);
++ }
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(inode->i_sb);
++ return err;
++}
++
++/*
++ * release all lower object references & free the file info structure
++ *
++ * No need to grab sb info's rwsem.
++ */
++int unionfs_file_release(struct inode *inode, struct file *file)
++{
++ struct file *lower_file = NULL;
++ struct unionfs_file_info *fileinfo;
++ struct unionfs_inode_info *inodeinfo;
++ struct super_block *sb = inode->i_sb;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++ int bindex, bstart, bend;
++ int fgen, err = 0;
++
++ /*
++ * Since mm/memory.c:might_fault() (under PROVE_LOCKING) was
++ * modified in 2.6.29-rc1 to call might_lock_read on mmap_sem, this
++ * has been causing false positives in file system stacking layers.
++ * In particular, our ->mmap is called after sys_mmap2 already holds
++ * mmap_sem, then we lock our own mutexes; but earlier, it's
++ * possible for lockdep to have locked our mutexes first, and then
++ * we call a lower ->readdir which could call might_fault. The
++ * different ordering of the locks is what lockdep complains about
++ * -- unnecessarily. Therefore, we have no choice but to tell
++ * lockdep to temporarily turn off lockdep here. Note: the comments
++ * inside might_sleep also suggest that it would have been
++ * nicer to only annotate paths that needs that might_lock_read.
++ */
++ lockdep_off();
++ unionfs_read_lock(sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ /*
++ * We try to revalidate, but the VFS ignores return return values
++ * from file->release, so we must always try to succeed here,
++ * including to do the kfree and dput below. So if revalidation
++ * failed, all we can do is print some message and keep going.
++ */
++ err = unionfs_file_revalidate(file, parent,
++ UNIONFS_F(file)->wrote_to_file);
++ if (!err)
++ unionfs_check_file(file);
++ fileinfo = UNIONFS_F(file);
++ BUG_ON(file->f_path.dentry->d_inode != inode);
++ inodeinfo = UNIONFS_I(inode);
++
++ /* fput all the lower files */
++ fgen = atomic_read(&fileinfo->generation);
++ bstart = fbstart(file);
++ bend = fbend(file);
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_file = unionfs_lower_file_idx(file, bindex);
++
++ if (lower_file) {
++ unionfs_set_lower_file_idx(file, bindex, NULL);
++ fput(lower_file);
++ branchput(sb, bindex);
++ }
++
++ /* if there are no more refs to the dentry, dput it */
++ if (d_deleted(dentry)) {
++ dput(unionfs_lower_dentry_idx(dentry, bindex));
++ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
++ }
++ }
++
++ kfree(fileinfo->lower_files);
++ kfree(fileinfo->saved_branch_ids);
++
++ if (fileinfo->rdstate) {
++ fileinfo->rdstate->access = jiffies;
++ spin_lock(&inodeinfo->rdlock);
++ inodeinfo->rdcount++;
++ list_add_tail(&fileinfo->rdstate->cache,
++ &inodeinfo->readdircache);
++ mark_inode_dirty(inode);
++ spin_unlock(&inodeinfo->rdlock);
++ fileinfo->rdstate = NULL;
++ }
++ kfree(fileinfo);
++
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(sb);
++ lockdep_on();
++ return err;
++}
++
++/* pass the ioctl to the lower fs */
++static long do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct file *lower_file;
++ int err;
++
++ lower_file = unionfs_lower_file(file);
++
++ err = -ENOTTY;
++ if (!lower_file || !lower_file->f_op)
++ goto out;
++ if (lower_file->f_op->unlocked_ioctl) {
++ err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
++#ifdef CONFIG_COMPAT
++ } else if (lower_file->f_op->ioctl) {
++ err = lower_file->f_op->compat_ioctl(
++ lower_file->f_path.dentry->d_inode,
++ lower_file, cmd, arg);
++#endif
++ }
++
++out:
++ return err;
++}
++
++/*
++ * return to user-space the branch indices containing the file in question
++ *
++ * We use fd_set and therefore we are limited to the number of the branches
++ * to FD_SETSIZE, which is currently 1024 - plenty for most people
++ */
++static int unionfs_ioctl_queryfile(struct file *file, struct dentry *parent,
++ unsigned int cmd, unsigned long arg)
++{
++ int err = 0;
++ fd_set branchlist;
++ int bstart = 0, bend = 0, bindex = 0;
++ int orig_bstart, orig_bend;
++ struct dentry *dentry, *lower_dentry;
++ struct vfsmount *mnt;
++
++ dentry = file->f_path.dentry;
++ orig_bstart = dbstart(dentry);
++ orig_bend = dbend(dentry);
++ err = unionfs_partial_lookup(dentry, parent);
++ if (err)
++ goto out;
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++
++ FD_ZERO(&branchlist);
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry)
++ continue;
++ if (likely(lower_dentry->d_inode))
++ FD_SET(bindex, &branchlist);
++ /* purge any lower objects after partial_lookup */
++ if (bindex < orig_bstart || bindex > orig_bend) {
++ dput(lower_dentry);
++ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
++ iput(unionfs_lower_inode_idx(dentry->d_inode, bindex));
++ unionfs_set_lower_inode_idx(dentry->d_inode, bindex,
++ NULL);
++ mnt = unionfs_lower_mnt_idx(dentry, bindex);
++ if (!mnt)
++ continue;
++ unionfs_mntput(dentry, bindex);
++ unionfs_set_lower_mnt_idx(dentry, bindex, NULL);
++ }
++ }
++ /* restore original dentry's offsets */
++ dbstart(dentry) = orig_bstart;
++ dbend(dentry) = orig_bend;
++ ibstart(dentry->d_inode) = orig_bstart;
++ ibend(dentry->d_inode) = orig_bend;
++
++ err = copy_to_user((void __user *)arg, &branchlist, sizeof(fd_set));
++ if (unlikely(err))
++ err = -EFAULT;
++
++out:
++ return err < 0 ? err : bend;
++}
++
++long unionfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ long err;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ err = unionfs_file_revalidate(file, parent, true);
++ if (unlikely(err))
++ goto out;
++
++ /* check if asked for local commands */
++ switch (cmd) {
++ case UNIONFS_IOCTL_INCGEN:
++ /* Increment the superblock generation count */
++ pr_info("unionfs: incgen ioctl deprecated; "
++ "use \"-o remount,incgen\"\n");
++ err = -ENOSYS;
++ break;
++
++ case UNIONFS_IOCTL_QUERYFILE:
++ /* Return list of branches containing the given file */
++ err = unionfs_ioctl_queryfile(file, parent, cmd, arg);
++ break;
++
++ default:
++ /* pass the ioctl down */
++ err = do_ioctl(file, cmd, arg);
++ break;
++ }
++
++out:
++ unionfs_check_file(file);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++int unionfs_flush(struct file *file, fl_owner_t id)
++{
++ int err = 0;
++ struct file *lower_file = NULL;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++ int bindex, bstart, bend;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ err = unionfs_file_revalidate(file, parent,
++ UNIONFS_F(file)->wrote_to_file);
++ if (unlikely(err))
++ goto out;
++ unionfs_check_file(file);
++
++ bstart = fbstart(file);
++ bend = fbend(file);
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_file = unionfs_lower_file_idx(file, bindex);
++
++ if (lower_file && lower_file->f_op &&
++ lower_file->f_op->flush) {
++ err = lower_file->f_op->flush(lower_file, id);
++ if (err)
++ goto out;
++ }
++
++ }
++
++out:
++ if (!err)
++ unionfs_check_file(file);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
+diff --git a/fs/unionfs/copyup.c b/fs/unionfs/copyup.c
+new file mode 100644
+index 0000000..bba3a75
+--- /dev/null
++++ b/fs/unionfs/copyup.c
+@@ -0,0 +1,896 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * For detailed explanation of copyup see:
++ * Documentation/filesystems/unionfs/concepts.txt
++ */
++
++#ifdef CONFIG_UNION_FS_XATTR
++/* copyup all extended attrs for a given dentry */
++static int copyup_xattrs(struct dentry *old_lower_dentry,
++ struct dentry *new_lower_dentry)
++{
++ int err = 0;
++ ssize_t list_size = -1;
++ char *name_list = NULL;
++ char *attr_value = NULL;
++ char *name_list_buf = NULL;
++
++ /* query the actual size of the xattr list */
++ list_size = vfs_listxattr(old_lower_dentry, NULL, 0);
++ if (list_size <= 0) {
++ err = list_size;
++ goto out;
++ }
++
++ /* allocate space for the actual list */
++ name_list = unionfs_xattr_alloc(list_size + 1, XATTR_LIST_MAX);
++ if (unlikely(!name_list || IS_ERR(name_list))) {
++ err = PTR_ERR(name_list);
++ goto out;
++ }
++
++ name_list_buf = name_list; /* save for kfree at end */
++
++ /* now get the actual xattr list of the source file */
++ list_size = vfs_listxattr(old_lower_dentry, name_list, list_size);
++ if (list_size <= 0) {
++ err = list_size;
++ goto out;
++ }
++
++ /* allocate space to hold each xattr's value */
++ attr_value = unionfs_xattr_alloc(XATTR_SIZE_MAX, XATTR_SIZE_MAX);
++ if (unlikely(!attr_value || IS_ERR(attr_value))) {
++ err = PTR_ERR(name_list);
++ goto out;
++ }
++
++ /* in a loop, get and set each xattr from src to dst file */
++ while (*name_list) {
++ ssize_t size;
++
++ /* Lock here since vfs_getxattr doesn't lock for us */
++ mutex_lock(&old_lower_dentry->d_inode->i_mutex);
++ size = vfs_getxattr(old_lower_dentry, name_list,
++ attr_value, XATTR_SIZE_MAX);
++ mutex_unlock(&old_lower_dentry->d_inode->i_mutex);
++ if (size < 0) {
++ err = size;
++ goto out;
++ }
++ if (size > XATTR_SIZE_MAX) {
++ err = -E2BIG;
++ goto out;
++ }
++ /* Don't lock here since vfs_setxattr does it for us. */
++ err = vfs_setxattr(new_lower_dentry, name_list, attr_value,
++ size, 0);
++ /*
++ * Selinux depends on "security.*" xattrs, so to maintain
++ * the security of copied-up files, if Selinux is active,
++ * then we must copy these xattrs as well. So we need to
++ * temporarily get FOWNER privileges.
++ * XXX: move entire copyup code to SIOQ.
++ */
++ if (err == -EPERM && !capable(CAP_FOWNER)) {
++ const struct cred *old_creds;
++ struct cred *new_creds;
++
++ new_creds = prepare_creds();
++ if (unlikely(!new_creds)) {
++ err = -ENOMEM;
++ goto out;
++ }
++ cap_raise(new_creds->cap_effective, CAP_FOWNER);
++ old_creds = override_creds(new_creds);
++ err = vfs_setxattr(new_lower_dentry, name_list,
++ attr_value, size, 0);
++ revert_creds(old_creds);
++ }
++ if (err < 0)
++ goto out;
++ name_list += strlen(name_list) + 1;
++ }
++out:
++ unionfs_xattr_kfree(name_list_buf);
++ unionfs_xattr_kfree(attr_value);
++ /* Ignore if xattr isn't supported */
++ if (err == -ENOTSUPP || err == -EOPNOTSUPP)
++ err = 0;
++ return err;
++}
++#endif /* CONFIG_UNION_FS_XATTR */
++
++/*
++ * Determine the mode based on the copyup flags, and the existing dentry.
++ *
++ * Handle file systems which may not support certain options. For example
++ * jffs2 doesn't allow one to chmod a symlink. So we ignore such harmless
++ * errors, rather than propagating them up, which results in copyup errors
++ * and errors returned back to users.
++ */
++static int copyup_permissions(struct super_block *sb,
++ struct dentry *old_lower_dentry,
++ struct dentry *new_lower_dentry)
++{
++ struct inode *i = old_lower_dentry->d_inode;
++ struct iattr newattrs;
++ int err;
++
++ newattrs.ia_atime = i->i_atime;
++ newattrs.ia_mtime = i->i_mtime;
++ newattrs.ia_ctime = i->i_ctime;
++ newattrs.ia_gid = i->i_gid;
++ newattrs.ia_uid = i->i_uid;
++ newattrs.ia_valid = ATTR_CTIME | ATTR_ATIME | ATTR_MTIME |
++ ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_FORCE |
++ ATTR_GID | ATTR_UID;
++ mutex_lock(&new_lower_dentry->d_inode->i_mutex);
++ err = notify_change(new_lower_dentry, &newattrs);
++ if (err)
++ goto out;
++
++ /* now try to change the mode and ignore EOPNOTSUPP on symlinks */
++ newattrs.ia_mode = i->i_mode;
++ newattrs.ia_valid = ATTR_MODE | ATTR_FORCE;
++ err = notify_change(new_lower_dentry, &newattrs);
++ if (err == -EOPNOTSUPP &&
++ S_ISLNK(new_lower_dentry->d_inode->i_mode)) {
++ printk(KERN_WARNING
++ "unionfs: changing \"%s\" symlink mode unsupported\n",
++ new_lower_dentry->d_name.name);
++ err = 0;
++ }
++
++out:
++ mutex_unlock(&new_lower_dentry->d_inode->i_mutex);
++ return err;
++}
++
++/*
++ * create the new device/file/directory - use copyup_permission to copyup
++ * times, and mode
++ *
++ * if the object being copied up is a regular file, the file is only created,
++ * the contents have to be copied up separately
++ */
++static int __copyup_ndentry(struct dentry *old_lower_dentry,
++ struct dentry *new_lower_dentry,
++ struct dentry *new_lower_parent_dentry,
++ char *symbuf)
++{
++ int err = 0;
++ umode_t old_mode = old_lower_dentry->d_inode->i_mode;
++ struct sioq_args args;
++
++ if (S_ISDIR(old_mode)) {
++ args.mkdir.parent = new_lower_parent_dentry->d_inode;
++ args.mkdir.dentry = new_lower_dentry;
++ args.mkdir.mode = old_mode;
++
++ run_sioq(__unionfs_mkdir, &args);
++ err = args.err;
++ } else if (S_ISLNK(old_mode)) {
++ args.symlink.parent = new_lower_parent_dentry->d_inode;
++ args.symlink.dentry = new_lower_dentry;
++ args.symlink.symbuf = symbuf;
++
++ run_sioq(__unionfs_symlink, &args);
++ err = args.err;
++ } else if (S_ISBLK(old_mode) || S_ISCHR(old_mode) ||
++ S_ISFIFO(old_mode) || S_ISSOCK(old_mode)) {
++ args.mknod.parent = new_lower_parent_dentry->d_inode;
++ args.mknod.dentry = new_lower_dentry;
++ args.mknod.mode = old_mode;
++ args.mknod.dev = old_lower_dentry->d_inode->i_rdev;
++
++ run_sioq(__unionfs_mknod, &args);
++ err = args.err;
++ } else if (S_ISREG(old_mode)) {
++ struct nameidata nd;
++ err = init_lower_nd(&nd, LOOKUP_CREATE);
++ if (unlikely(err < 0))
++ goto out;
++ args.create.nd = &nd;
++ args.create.parent = new_lower_parent_dentry->d_inode;
++ args.create.dentry = new_lower_dentry;
++ args.create.mode = old_mode;
++
++ run_sioq(__unionfs_create, &args);
++ err = args.err;
++ release_lower_nd(&nd, err);
++ } else {
++ printk(KERN_CRIT "unionfs: unknown inode type %d\n",
++ old_mode);
++ BUG();
++ }
++
++out:
++ return err;
++}
++
++static int __copyup_reg_data(struct dentry *dentry,
++ struct dentry *new_lower_dentry, int new_bindex,
++ struct dentry *old_lower_dentry, int old_bindex,
++ struct file **copyup_file, loff_t len)
++{
++ struct super_block *sb = dentry->d_sb;
++ struct file *input_file;
++ struct file *output_file;
++ struct vfsmount *output_mnt;
++ mm_segment_t old_fs;
++ char *buf = NULL;
++ ssize_t read_bytes, write_bytes;
++ loff_t size;
++ int err = 0;
++
++ /* open old file */
++ unionfs_mntget(dentry, old_bindex);
++ branchget(sb, old_bindex);
++ /* dentry_open calls dput and mntput if it returns an error */
++ input_file = dentry_open(old_lower_dentry,
++ unionfs_lower_mnt_idx(dentry, old_bindex),
++ O_RDONLY | O_LARGEFILE, current_cred());
++ if (IS_ERR(input_file)) {
++ dput(old_lower_dentry);
++ err = PTR_ERR(input_file);
++ goto out;
++ }
++ if (unlikely(!input_file->f_op || !input_file->f_op->read)) {
++ err = -EINVAL;
++ goto out_close_in;
++ }
++
++ /* open new file */
++ dget(new_lower_dentry);
++ output_mnt = unionfs_mntget(sb->s_root, new_bindex);
++ branchget(sb, new_bindex);
++ output_file = dentry_open(new_lower_dentry, output_mnt,
++ O_RDWR | O_LARGEFILE, current_cred());
++ if (IS_ERR(output_file)) {
++ err = PTR_ERR(output_file);
++ goto out_close_in2;
++ }
++ if (unlikely(!output_file->f_op || !output_file->f_op->write)) {
++ err = -EINVAL;
++ goto out_close_out;
++ }
++
++ /* allocating a buffer */
++ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ if (unlikely(!buf)) {
++ err = -ENOMEM;
++ goto out_close_out;
++ }
++
++ input_file->f_pos = 0;
++ output_file->f_pos = 0;
++
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++
++ size = len;
++ err = 0;
++ do {
++ if (len >= PAGE_SIZE)
++ size = PAGE_SIZE;
++ else if ((len < PAGE_SIZE) && (len > 0))
++ size = len;
++
++ len -= PAGE_SIZE;
++
++ read_bytes =
++ input_file->f_op->read(input_file,
++ (char __user *)buf, size,
++ &input_file->f_pos);
++ if (read_bytes <= 0) {
++ err = read_bytes;
++ break;
++ }
++
++ /* see Documentation/filesystems/unionfs/issues.txt */
++ lockdep_off();
++ write_bytes =
++ output_file->f_op->write(output_file,
++ (char __user *)buf,
++ read_bytes,
++ &output_file->f_pos);
++ lockdep_on();
++ if ((write_bytes < 0) || (write_bytes < read_bytes)) {
++ err = write_bytes;
++ break;
++ }
++ } while ((read_bytes > 0) && (len > 0));
++
++ set_fs(old_fs);
++
++ kfree(buf);
++
++ if (!err)
++ err = output_file->f_op->fsync(output_file, 0);
++
++ if (err)
++ goto out_close_out;
++
++ if (copyup_file) {
++ *copyup_file = output_file;
++ goto out_close_in;
++ }
++
++out_close_out:
++ fput(output_file);
++
++out_close_in2:
++ branchput(sb, new_bindex);
++
++out_close_in:
++ fput(input_file);
++
++out:
++ branchput(sb, old_bindex);
++
++ return err;
++}
++
++/*
++ * dput the lower references for old and new dentry & clear a lower dentry
++ * pointer
++ */
++static void __clear(struct dentry *dentry, struct dentry *old_lower_dentry,
++ int old_bstart, int old_bend,
++ struct dentry *new_lower_dentry, int new_bindex)
++{
++ /* get rid of the lower dentry and all its traces */
++ unionfs_set_lower_dentry_idx(dentry, new_bindex, NULL);
++ dbstart(dentry) = old_bstart;
++ dbend(dentry) = old_bend;
++
++ dput(new_lower_dentry);
++ dput(old_lower_dentry);
++}
++
++/*
++ * Copy up a dentry to a file of specified name.
++ *
++ * @dir: used to pull the ->i_sb to access other branches
++ * @dentry: the non-negative dentry whose lower_inode we should copy
++ * @bstart: the branch of the lower_inode to copy from
++ * @new_bindex: the branch to create the new file in
++ * @name: the name of the file to create
++ * @namelen: length of @name
++ * @copyup_file: the "struct file" to return (optional)
++ * @len: how many bytes to copy-up?
++ */
++int copyup_dentry(struct inode *dir, struct dentry *dentry, int bstart,
++ int new_bindex, const char *name, int namelen,
++ struct file **copyup_file, loff_t len)
++{
++ struct dentry *new_lower_dentry;
++ struct dentry *old_lower_dentry = NULL;
++ struct super_block *sb;
++ int err = 0;
++ int old_bindex;
++ int old_bstart;
++ int old_bend;
++ struct dentry *new_lower_parent_dentry = NULL;
++ mm_segment_t oldfs;
++ char *symbuf = NULL;
++
++ verify_locked(dentry);
++
++ old_bindex = bstart;
++ old_bstart = dbstart(dentry);
++ old_bend = dbend(dentry);
++
++ BUG_ON(new_bindex < 0);
++ BUG_ON(new_bindex >= old_bindex);
++
++ sb = dir->i_sb;
++
++ err = is_robranch_super(sb, new_bindex);
++ if (err)
++ goto out;
++
++ /* Create the directory structure above this dentry. */
++ new_lower_dentry = create_parents(dir, dentry, name, new_bindex);
++ if (IS_ERR(new_lower_dentry)) {
++ err = PTR_ERR(new_lower_dentry);
++ goto out;
++ }
++
++ old_lower_dentry = unionfs_lower_dentry_idx(dentry, old_bindex);
++ /* we conditionally dput this old_lower_dentry at end of function */
++ dget(old_lower_dentry);
++
++ /* For symlinks, we must read the link before we lock the directory. */
++ if (S_ISLNK(old_lower_dentry->d_inode->i_mode)) {
++
++ symbuf = kmalloc(PATH_MAX, GFP_KERNEL);
++ if (unlikely(!symbuf)) {
++ __clear(dentry, old_lower_dentry,
++ old_bstart, old_bend,
++ new_lower_dentry, new_bindex);
++ err = -ENOMEM;
++ goto out_free;
++ }
++
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ err = old_lower_dentry->d_inode->i_op->readlink(
++ old_lower_dentry,
++ (char __user *)symbuf,
++ PATH_MAX);
++ set_fs(oldfs);
++ if (err < 0) {
++ __clear(dentry, old_lower_dentry,
++ old_bstart, old_bend,
++ new_lower_dentry, new_bindex);
++ goto out_free;
++ }
++ symbuf[err] = '\0';
++ }
++
++ /* Now we lock the parent, and create the object in the new branch. */
++ new_lower_parent_dentry = lock_parent(new_lower_dentry);
++
++ /* create the new inode */
++ err = __copyup_ndentry(old_lower_dentry, new_lower_dentry,
++ new_lower_parent_dentry, symbuf);
++
++ if (err) {
++ __clear(dentry, old_lower_dentry,
++ old_bstart, old_bend,
++ new_lower_dentry, new_bindex);
++ goto out_unlock;
++ }
++
++ /* We actually copyup the file here. */
++ if (S_ISREG(old_lower_dentry->d_inode->i_mode))
++ err = __copyup_reg_data(dentry, new_lower_dentry, new_bindex,
++ old_lower_dentry, old_bindex,
++ copyup_file, len);
++ if (err)
++ goto out_unlink;
++
++ /* Set permissions. */
++ err = copyup_permissions(sb, old_lower_dentry, new_lower_dentry);
++ if (err)
++ goto out_unlink;
++
++#ifdef CONFIG_UNION_FS_XATTR
++ /* Selinux uses extended attributes for permissions. */
++ err = copyup_xattrs(old_lower_dentry, new_lower_dentry);
++ if (err)
++ goto out_unlink;
++#endif /* CONFIG_UNION_FS_XATTR */
++
++ /* do not allow files getting deleted to be re-interposed */
++ if (!d_deleted(dentry))
++ unionfs_reinterpose(dentry);
++
++ goto out_unlock;
++
++out_unlink:
++ /*
++ * copyup failed, because we possibly ran out of space or
++ * quota, or something else happened so let's unlink; we don't
++ * really care about the return value of vfs_unlink
++ */
++ vfs_unlink(new_lower_parent_dentry->d_inode, new_lower_dentry);
++
++ if (copyup_file) {
++ /* need to close the file */
++
++ fput(*copyup_file);
++ branchput(sb, new_bindex);
++ }
++
++ /*
++ * TODO: should we reset the error to something like -EIO?
++ *
++ * If we don't reset, the user may get some nonsensical errors, but
++ * on the other hand, if we reset to EIO, we guarantee that the user
++ * will get a "confusing" error message.
++ */
++
++out_unlock:
++ unlock_dir(new_lower_parent_dentry);
++
++out_free:
++ /*
++ * If old_lower_dentry was not a file, then we need to dput it. If
++ * it was a file, then it was already dput indirectly by other
++ * functions we call above which operate on regular files.
++ */
++ if (old_lower_dentry && old_lower_dentry->d_inode &&
++ !S_ISREG(old_lower_dentry->d_inode->i_mode))
++ dput(old_lower_dentry);
++ kfree(symbuf);
++
++ if (err) {
++ /*
++ * if directory creation succeeded, but inode copyup failed,
++ * then purge new dentries.
++ */
++ if (dbstart(dentry) < old_bstart &&
++ ibstart(dentry->d_inode) > dbstart(dentry))
++ __clear(dentry, NULL, old_bstart, old_bend,
++ unionfs_lower_dentry(dentry), dbstart(dentry));
++ goto out;
++ }
++ if (!S_ISDIR(dentry->d_inode->i_mode)) {
++ unionfs_postcopyup_release(dentry);
++ if (!unionfs_lower_inode(dentry->d_inode)) {
++ /*
++ * If we got here, then we copied up to an
++ * unlinked-open file, whose name is .unionfsXXXXX.
++ */
++ struct inode *inode = new_lower_dentry->d_inode;
++ atomic_inc(&inode->i_count);
++ unionfs_set_lower_inode_idx(dentry->d_inode,
++ ibstart(dentry->d_inode),
++ inode);
++ }
++ }
++ unionfs_postcopyup_setmnt(dentry);
++ /* sync inode times from copied-up inode to our inode */
++ unionfs_copy_attr_times(dentry->d_inode);
++ unionfs_check_inode(dir);
++ unionfs_check_dentry(dentry);
++out:
++ return err;
++}
++
++/*
++ * This function creates a copy of a file represented by 'file' which
++ * currently resides in branch 'bstart' to branch 'new_bindex.' The copy
++ * will be named "name".
++ */
++int copyup_named_file(struct inode *dir, struct file *file, char *name,
++ int bstart, int new_bindex, loff_t len)
++{
++ int err = 0;
++ struct file *output_file = NULL;
++
++ err = copyup_dentry(dir, file->f_path.dentry, bstart, new_bindex,
++ name, strlen(name), &output_file, len);
++ if (!err) {
++ fbstart(file) = new_bindex;
++ unionfs_set_lower_file_idx(file, new_bindex, output_file);
++ }
++
++ return err;
++}
++
++/*
++ * This function creates a copy of a file represented by 'file' which
++ * currently resides in branch 'bstart' to branch 'new_bindex'.
++ */
++int copyup_file(struct inode *dir, struct file *file, int bstart,
++ int new_bindex, loff_t len)
++{
++ int err = 0;
++ struct file *output_file = NULL;
++ struct dentry *dentry = file->f_path.dentry;
++
++ err = copyup_dentry(dir, dentry, bstart, new_bindex,
++ dentry->d_name.name, dentry->d_name.len,
++ &output_file, len);
++ if (!err) {
++ fbstart(file) = new_bindex;
++ unionfs_set_lower_file_idx(file, new_bindex, output_file);
++ }
++
++ return err;
++}
++
++/* purge a dentry's lower-branch states (dput/mntput, etc.) */
++static void __cleanup_dentry(struct dentry *dentry, int bindex,
++ int old_bstart, int old_bend)
++{
++ int loop_start;
++ int loop_end;
++ int new_bstart = -1;
++ int new_bend = -1;
++ int i;
++
++ loop_start = min(old_bstart, bindex);
++ loop_end = max(old_bend, bindex);
++
++ /*
++ * This loop sets the bstart and bend for the new dentry by
++ * traversing from left to right. It also dputs all negative
++ * dentries except bindex
++ */
++ for (i = loop_start; i <= loop_end; i++) {
++ if (!unionfs_lower_dentry_idx(dentry, i))
++ continue;
++
++ if (i == bindex) {
++ new_bend = i;
++ if (new_bstart < 0)
++ new_bstart = i;
++ continue;
++ }
++
++ if (!unionfs_lower_dentry_idx(dentry, i)->d_inode) {
++ dput(unionfs_lower_dentry_idx(dentry, i));
++ unionfs_set_lower_dentry_idx(dentry, i, NULL);
++
++ unionfs_mntput(dentry, i);
++ unionfs_set_lower_mnt_idx(dentry, i, NULL);
++ } else {
++ if (new_bstart < 0)
++ new_bstart = i;
++ new_bend = i;
++ }
++ }
++
++ if (new_bstart < 0)
++ new_bstart = bindex;
++ if (new_bend < 0)
++ new_bend = bindex;
++ dbstart(dentry) = new_bstart;
++ dbend(dentry) = new_bend;
++
++}
++
++/* set lower inode ptr and update bstart & bend if necessary */
++static void __set_inode(struct dentry *upper, struct dentry *lower,
++ int bindex)
++{
++ unionfs_set_lower_inode_idx(upper->d_inode, bindex,
++ igrab(lower->d_inode));
++ if (likely(ibstart(upper->d_inode) > bindex))
++ ibstart(upper->d_inode) = bindex;
++ if (likely(ibend(upper->d_inode) < bindex))
++ ibend(upper->d_inode) = bindex;
++
++}
++
++/* set lower dentry ptr and update bstart & bend if necessary */
++static void __set_dentry(struct dentry *upper, struct dentry *lower,
++ int bindex)
++{
++ unionfs_set_lower_dentry_idx(upper, bindex, lower);
++ if (likely(dbstart(upper) > bindex))
++ dbstart(upper) = bindex;
++ if (likely(dbend(upper) < bindex))
++ dbend(upper) = bindex;
++}
++
++/*
++ * This function replicates the directory structure up-to given dentry
++ * in the bindex branch.
++ */
++struct dentry *create_parents(struct inode *dir, struct dentry *dentry,
++ const char *name, int bindex)
++{
++ int err;
++ struct dentry *child_dentry;
++ struct dentry *parent_dentry;
++ struct dentry *lower_parent_dentry = NULL;
++ struct dentry *lower_dentry = NULL;
++ const char *childname;
++ unsigned int childnamelen;
++ int nr_dentry;
++ int count = 0;
++ int old_bstart;
++ int old_bend;
++ struct dentry **path = NULL;
++ struct super_block *sb;
++
++ verify_locked(dentry);
++
++ err = is_robranch_super(dir->i_sb, bindex);
++ if (err) {
++ lower_dentry = ERR_PTR(err);
++ goto out;
++ }
++
++ old_bstart = dbstart(dentry);
++ old_bend = dbend(dentry);
++
++ lower_dentry = ERR_PTR(-ENOMEM);
++
++ /* There is no sense allocating any less than the minimum. */
++ nr_dentry = 1;
++ path = kmalloc(nr_dentry * sizeof(struct dentry *), GFP_KERNEL);
++ if (unlikely(!path))
++ goto out;
++
++ /* assume the negative dentry of unionfs as the parent dentry */
++ parent_dentry = dentry;
++
++ /*
++ * This loop finds the first parent that exists in the given branch.
++ * We start building the directory structure from there. At the end
++ * of the loop, the following should hold:
++ * - child_dentry is the first nonexistent child
++ * - parent_dentry is the first existent parent
++ * - path[0] is the = deepest child
++ * - path[count] is the first child to create
++ */
++ do {
++ child_dentry = parent_dentry;
++
++ /* find the parent directory dentry in unionfs */
++ parent_dentry = dget_parent(child_dentry);
++
++ /* find out the lower_parent_dentry in the given branch */
++ lower_parent_dentry =
++ unionfs_lower_dentry_idx(parent_dentry, bindex);
++
++ /* grow path table */
++ if (count == nr_dentry) {
++ void *p;
++
++ nr_dentry *= 2;
++ p = krealloc(path, nr_dentry * sizeof(struct dentry *),
++ GFP_KERNEL);
++ if (unlikely(!p)) {
++ lower_dentry = ERR_PTR(-ENOMEM);
++ goto out;
++ }
++ path = p;
++ }
++
++ /* store the child dentry */
++ path[count++] = child_dentry;
++ } while (!lower_parent_dentry);
++ count--;
++
++ sb = dentry->d_sb;
++
++ /*
++ * This code goes between the begin/end labels and basically
++ * emulates a while(child_dentry != dentry), only cleaner and
++ * shorter than what would be a much longer while loop.
++ */
++begin:
++ /* get lower parent dir in the current branch */
++ lower_parent_dentry = unionfs_lower_dentry_idx(parent_dentry, bindex);
++ dput(parent_dentry);
++
++ /* init the values to lookup */
++ childname = child_dentry->d_name.name;
++ childnamelen = child_dentry->d_name.len;
++
++ if (child_dentry != dentry) {
++ /* lookup child in the underlying file system */
++ lower_dentry = lookup_lck_len(childname, lower_parent_dentry,
++ childnamelen);
++ if (IS_ERR(lower_dentry))
++ goto out;
++ } else {
++ /*
++ * Is the name a whiteout of the child name ? lookup the
++ * whiteout child in the underlying file system
++ */
++ lower_dentry = lookup_lck_len(name, lower_parent_dentry,
++ strlen(name));
++ if (IS_ERR(lower_dentry))
++ goto out;
++
++ /* Replace the current dentry (if any) with the new one */
++ dput(unionfs_lower_dentry_idx(dentry, bindex));
++ unionfs_set_lower_dentry_idx(dentry, bindex,
++ lower_dentry);
++
++ __cleanup_dentry(dentry, bindex, old_bstart, old_bend);
++ goto out;
++ }
++
++ if (lower_dentry->d_inode) {
++ /*
++ * since this already exists we dput to avoid
++ * multiple references on the same dentry
++ */
++ dput(lower_dentry);
++ } else {
++ struct sioq_args args;
++
++ /* it's a negative dentry, create a new dir */
++ lower_parent_dentry = lock_parent(lower_dentry);
++
++ args.mkdir.parent = lower_parent_dentry->d_inode;
++ args.mkdir.dentry = lower_dentry;
++ args.mkdir.mode = child_dentry->d_inode->i_mode;
++
++ run_sioq(__unionfs_mkdir, &args);
++ err = args.err;
++
++ if (!err)
++ err = copyup_permissions(dir->i_sb, child_dentry,
++ lower_dentry);
++ unlock_dir(lower_parent_dentry);
++ if (err) {
++ dput(lower_dentry);
++ lower_dentry = ERR_PTR(err);
++ goto out;
++ }
++
++ }
++
++ __set_inode(child_dentry, lower_dentry, bindex);
++ __set_dentry(child_dentry, lower_dentry, bindex);
++ /*
++ * update times of this dentry, but also the parent, because if
++ * we changed, the parent may have changed too.
++ */
++ fsstack_copy_attr_times(parent_dentry->d_inode,
++ lower_parent_dentry->d_inode);
++ unionfs_copy_attr_times(child_dentry->d_inode);
++
++ parent_dentry = child_dentry;
++ child_dentry = path[--count];
++ goto begin;
++out:
++ /* cleanup any leftover locks from the do/while loop above */
++ if (IS_ERR(lower_dentry))
++ while (count)
++ dput(path[count--]);
++ kfree(path);
++ return lower_dentry;
++}
++
++/*
++ * Post-copyup helper to ensure we have valid mnts: set lower mnt of
++ * dentry+parents to the first parent node that has an mnt.
++ */
++void unionfs_postcopyup_setmnt(struct dentry *dentry)
++{
++ struct dentry *parent, *hasone;
++ int bindex = dbstart(dentry);
++
++ if (unionfs_lower_mnt_idx(dentry, bindex))
++ return;
++ hasone = dentry->d_parent;
++ /* this loop should stop at root dentry */
++ while (!unionfs_lower_mnt_idx(hasone, bindex))
++ hasone = hasone->d_parent;
++ parent = dentry;
++ while (!unionfs_lower_mnt_idx(parent, bindex)) {
++ unionfs_set_lower_mnt_idx(parent, bindex,
++ unionfs_mntget(hasone, bindex));
++ parent = parent->d_parent;
++ }
++}
++
++/*
++ * Post-copyup helper to release all non-directory source objects of a
++ * copied-up file. Regular files should have only one lower object.
++ */
++void unionfs_postcopyup_release(struct dentry *dentry)
++{
++ int bstart, bend;
++
++ BUG_ON(S_ISDIR(dentry->d_inode->i_mode));
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++
++ path_put_lowers(dentry, bstart + 1, bend, false);
++ iput_lowers(dentry->d_inode, bstart + 1, bend, false);
++
++ dbend(dentry) = bstart;
++ ibend(dentry->d_inode) = ibstart(dentry->d_inode) = bstart;
++}
+diff --git a/fs/unionfs/debug.c b/fs/unionfs/debug.c
+new file mode 100644
+index 0000000..100d2c6
+--- /dev/null
++++ b/fs/unionfs/debug.c
+@@ -0,0 +1,532 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * Helper debugging functions for maintainers (and for users to report back
++ * useful information back to maintainers)
++ */
++
++/* it's always useful to know what part of the code called us */
++#define PRINT_CALLER(fname, fxn, line) \
++ do { \
++ if (!printed_caller) { \
++ pr_debug("PC:%s:%s:%d\n", (fname), (fxn), (line)); \
++ printed_caller = 1; \
++ } \
++ } while (0)
++
++/*
++ * __unionfs_check_{inode,dentry,file} perform exhaustive sanity checking on
++ * the fan-out of various Unionfs objects. We check that no lower objects
++ * exist outside the start/end branch range; that all objects within are
++ * non-NULL (with some allowed exceptions); that for every lower file
++ * there's a lower dentry+inode; that the start/end ranges match for all
++ * corresponding lower objects; that open files/symlinks have only one lower
++ * objects, but directories can have several; and more.
++ */
++void __unionfs_check_inode(const struct inode *inode,
++ const char *fname, const char *fxn, int line)
++{
++ int bindex;
++ int istart, iend;
++ struct inode *lower_inode;
++ struct super_block *sb;
++ int printed_caller = 0;
++ void *poison_ptr;
++
++ /* for inodes now */
++ BUG_ON(!inode);
++ sb = inode->i_sb;
++ istart = ibstart(inode);
++ iend = ibend(inode);
++ /* don't check inode if no lower branches */
++ if (istart < 0 && iend < 0)
++ return;
++ if (unlikely(istart > iend)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" Ci0: inode=%p istart/end=%d:%d\n",
++ inode, istart, iend);
++ }
++ if (unlikely((istart == -1 && iend != -1) ||
++ (istart != -1 && iend == -1))) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" Ci1: inode=%p istart/end=%d:%d\n",
++ inode, istart, iend);
++ }
++ if (!S_ISDIR(inode->i_mode)) {
++ if (unlikely(iend != istart)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" Ci2: inode=%p istart=%d iend=%d\n",
++ inode, istart, iend);
++ }
++ }
++
++ for (bindex = sbstart(sb); bindex < sbmax(sb); bindex++) {
++ if (unlikely(!UNIONFS_I(inode))) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" Ci3: no inode_info %p\n", inode);
++ return;
++ }
++ if (unlikely(!UNIONFS_I(inode)->lower_inodes)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" Ci4: no lower_inodes %p\n", inode);
++ return;
++ }
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (lower_inode) {
++ memset(&poison_ptr, POISON_INUSE, sizeof(void *));
++ if (unlikely(bindex < istart || bindex > iend)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" Ci5: inode/linode=%p:%p bindex=%d "
++ "istart/end=%d:%d\n", inode,
++ lower_inode, bindex, istart, iend);
++ } else if (unlikely(lower_inode == poison_ptr)) {
++ /* freed inode! */
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" Ci6: inode/linode=%p:%p bindex=%d "
++ "istart/end=%d:%d\n", inode,
++ lower_inode, bindex, istart, iend);
++ }
++ continue;
++ }
++ /* if we get here, then lower_inode == NULL */
++ if (bindex < istart || bindex > iend)
++ continue;
++ /*
++ * directories can have NULL lower inodes in b/t start/end,
++ * but NOT if at the start/end range.
++ */
++ if (unlikely(S_ISDIR(inode->i_mode) &&
++ bindex > istart && bindex < iend))
++ continue;
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" Ci7: inode/linode=%p:%p "
++ "bindex=%d istart/end=%d:%d\n",
++ inode, lower_inode, bindex, istart, iend);
++ }
++}
++
++void __unionfs_check_dentry(const struct dentry *dentry,
++ const char *fname, const char *fxn, int line)
++{
++ int bindex;
++ int dstart, dend, istart, iend;
++ struct dentry *lower_dentry;
++ struct inode *inode, *lower_inode;
++ struct super_block *sb;
++ struct vfsmount *lower_mnt;
++ int printed_caller = 0;
++ void *poison_ptr;
++
++ BUG_ON(!dentry);
++ sb = dentry->d_sb;
++ inode = dentry->d_inode;
++ dstart = dbstart(dentry);
++ dend = dbend(dentry);
++ /* don't check dentry/mnt if no lower branches */
++ if (dstart < 0 && dend < 0)
++ goto check_inode;
++ BUG_ON(dstart > dend);
++
++ if (unlikely((dstart == -1 && dend != -1) ||
++ (dstart != -1 && dend == -1))) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CD0: dentry=%p dstart/end=%d:%d\n",
++ dentry, dstart, dend);
++ }
++ /*
++ * check for NULL dentries inside the start/end range, or
++ * non-NULL dentries outside the start/end range.
++ */
++ for (bindex = sbstart(sb); bindex < sbmax(sb); bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (lower_dentry) {
++ if (unlikely(bindex < dstart || bindex > dend)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CD1: dentry/lower=%p:%p(%p) "
++ "bindex=%d dstart/end=%d:%d\n",
++ dentry, lower_dentry,
++ (lower_dentry ? lower_dentry->d_inode :
++ (void *) -1L),
++ bindex, dstart, dend);
++ }
++ } else { /* lower_dentry == NULL */
++ if (bindex < dstart || bindex > dend)
++ continue;
++ /*
++ * Directories can have NULL lower inodes in b/t
++ * start/end, but NOT if at the start/end range.
++ * Ignore this rule, however, if this is a NULL
++ * dentry or a deleted dentry.
++ */
++ if (unlikely(!d_deleted((struct dentry *) dentry) &&
++ inode &&
++ !(inode && S_ISDIR(inode->i_mode) &&
++ bindex > dstart && bindex < dend))) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CD2: dentry/lower=%p:%p(%p) "
++ "bindex=%d dstart/end=%d:%d\n",
++ dentry, lower_dentry,
++ (lower_dentry ?
++ lower_dentry->d_inode :
++ (void *) -1L),
++ bindex, dstart, dend);
++ }
++ }
++ }
++
++ /* check for vfsmounts same as for dentries */
++ for (bindex = sbstart(sb); bindex < sbmax(sb); bindex++) {
++ lower_mnt = unionfs_lower_mnt_idx(dentry, bindex);
++ if (lower_mnt) {
++ if (unlikely(bindex < dstart || bindex > dend)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CM0: dentry/lmnt=%p:%p bindex=%d "
++ "dstart/end=%d:%d\n", dentry,
++ lower_mnt, bindex, dstart, dend);
++ }
++ } else { /* lower_mnt == NULL */
++ if (bindex < dstart || bindex > dend)
++ continue;
++ /*
++ * Directories can have NULL lower inodes in b/t
++ * start/end, but NOT if at the start/end range.
++ * Ignore this rule, however, if this is a NULL
++ * dentry.
++ */
++ if (unlikely(inode &&
++ !(inode && S_ISDIR(inode->i_mode) &&
++ bindex > dstart && bindex < dend))) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CM1: dentry/lmnt=%p:%p "
++ "bindex=%d dstart/end=%d:%d\n",
++ dentry, lower_mnt, bindex,
++ dstart, dend);
++ }
++ }
++ }
++
++check_inode:
++ /* for inodes now */
++ if (!inode)
++ return;
++ istart = ibstart(inode);
++ iend = ibend(inode);
++ /* don't check inode if no lower branches */
++ if (istart < 0 && iend < 0)
++ return;
++ BUG_ON(istart > iend);
++ if (unlikely((istart == -1 && iend != -1) ||
++ (istart != -1 && iend == -1))) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CI0: dentry/inode=%p:%p istart/end=%d:%d\n",
++ dentry, inode, istart, iend);
++ }
++ if (unlikely(istart != dstart)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CI1: dentry/inode=%p:%p istart=%d dstart=%d\n",
++ dentry, inode, istart, dstart);
++ }
++ if (unlikely(iend != dend)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CI2: dentry/inode=%p:%p iend=%d dend=%d\n",
++ dentry, inode, iend, dend);
++ }
++
++ if (!S_ISDIR(inode->i_mode)) {
++ if (unlikely(dend != dstart)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CI3: dentry/inode=%p:%p dstart=%d dend=%d\n",
++ dentry, inode, dstart, dend);
++ }
++ if (unlikely(iend != istart)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CI4: dentry/inode=%p:%p istart=%d iend=%d\n",
++ dentry, inode, istart, iend);
++ }
++ }
++
++ for (bindex = sbstart(sb); bindex < sbmax(sb); bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (lower_inode) {
++ memset(&poison_ptr, POISON_INUSE, sizeof(void *));
++ if (unlikely(bindex < istart || bindex > iend)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CI5: dentry/linode=%p:%p bindex=%d "
++ "istart/end=%d:%d\n", dentry,
++ lower_inode, bindex, istart, iend);
++ } else if (unlikely(lower_inode == poison_ptr)) {
++ /* freed inode! */
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CI6: dentry/linode=%p:%p bindex=%d "
++ "istart/end=%d:%d\n", dentry,
++ lower_inode, bindex, istart, iend);
++ }
++ continue;
++ }
++ /* if we get here, then lower_inode == NULL */
++ if (bindex < istart || bindex > iend)
++ continue;
++ /*
++ * directories can have NULL lower inodes in b/t start/end,
++ * but NOT if at the start/end range.
++ */
++ if (unlikely(S_ISDIR(inode->i_mode) &&
++ bindex > istart && bindex < iend))
++ continue;
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CI7: dentry/linode=%p:%p "
++ "bindex=%d istart/end=%d:%d\n",
++ dentry, lower_inode, bindex, istart, iend);
++ }
++
++ /*
++ * If it's a directory, then intermediate objects b/t start/end can
++ * be NULL. But, check that all three are NULL: lower dentry, mnt,
++ * and inode.
++ */
++ if (dstart >= 0 && dend >= 0 && S_ISDIR(inode->i_mode))
++ for (bindex = dstart+1; bindex < dend; bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ lower_dentry = unionfs_lower_dentry_idx(dentry,
++ bindex);
++ lower_mnt = unionfs_lower_mnt_idx(dentry, bindex);
++ if (unlikely(!((lower_inode && lower_dentry &&
++ lower_mnt) ||
++ (!lower_inode &&
++ !lower_dentry && !lower_mnt)))) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" Cx: lmnt/ldentry/linode=%p:%p:%p "
++ "bindex=%d dstart/end=%d:%d\n",
++ lower_mnt, lower_dentry, lower_inode,
++ bindex, dstart, dend);
++ }
++ }
++ /* check if lower inode is newer than upper one (it shouldn't) */
++ if (unlikely(is_newer_lower(dentry) && !is_negative_lower(dentry))) {
++ PRINT_CALLER(fname, fxn, line);
++ for (bindex = ibstart(inode); bindex <= ibend(inode);
++ bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (unlikely(!lower_inode))
++ continue;
++ pr_debug(" CI8: bindex=%d mtime/lmtime=%lu.%lu/%lu.%lu "
++ "ctime/lctime=%lu.%lu/%lu.%lu\n",
++ bindex,
++ inode->i_mtime.tv_sec,
++ inode->i_mtime.tv_nsec,
++ lower_inode->i_mtime.tv_sec,
++ lower_inode->i_mtime.tv_nsec,
++ inode->i_ctime.tv_sec,
++ inode->i_ctime.tv_nsec,
++ lower_inode->i_ctime.tv_sec,
++ lower_inode->i_ctime.tv_nsec);
++ }
++ }
++}
++
++void __unionfs_check_file(const struct file *file,
++ const char *fname, const char *fxn, int line)
++{
++ int bindex;
++ int dstart, dend, fstart, fend;
++ struct dentry *dentry;
++ struct file *lower_file;
++ struct inode *inode;
++ struct super_block *sb;
++ int printed_caller = 0;
++
++ BUG_ON(!file);
++ dentry = file->f_path.dentry;
++ sb = dentry->d_sb;
++ dstart = dbstart(dentry);
++ dend = dbend(dentry);
++ BUG_ON(dstart > dend);
++ fstart = fbstart(file);
++ fend = fbend(file);
++ BUG_ON(fstart > fend);
++
++ if (unlikely((fstart == -1 && fend != -1) ||
++ (fstart != -1 && fend == -1))) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CF0: file/dentry=%p:%p fstart/end=%d:%d\n",
++ file, dentry, fstart, fend);
++ }
++ if (unlikely(fstart != dstart)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CF1: file/dentry=%p:%p fstart=%d dstart=%d\n",
++ file, dentry, fstart, dstart);
++ }
++ if (unlikely(fend != dend)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CF2: file/dentry=%p:%p fend=%d dend=%d\n",
++ file, dentry, fend, dend);
++ }
++ inode = dentry->d_inode;
++ if (!S_ISDIR(inode->i_mode)) {
++ if (unlikely(fend != fstart)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CF3: file/inode=%p:%p fstart=%d fend=%d\n",
++ file, inode, fstart, fend);
++ }
++ if (unlikely(dend != dstart)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CF4: file/dentry=%p:%p dstart=%d dend=%d\n",
++ file, dentry, dstart, dend);
++ }
++ }
++
++ /*
++ * check for NULL dentries inside the start/end range, or
++ * non-NULL dentries outside the start/end range.
++ */
++ for (bindex = sbstart(sb); bindex < sbmax(sb); bindex++) {
++ lower_file = unionfs_lower_file_idx(file, bindex);
++ if (lower_file) {
++ if (unlikely(bindex < fstart || bindex > fend)) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CF5: file/lower=%p:%p bindex=%d "
++ "fstart/end=%d:%d\n", file,
++ lower_file, bindex, fstart, fend);
++ }
++ } else { /* lower_file == NULL */
++ if (bindex >= fstart && bindex <= fend) {
++ /*
++ * directories can have NULL lower inodes in
++ * b/t start/end, but NOT if at the
++ * start/end range.
++ */
++ if (unlikely(!(S_ISDIR(inode->i_mode) &&
++ bindex > fstart &&
++ bindex < fend))) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CF6: file/lower=%p:%p "
++ "bindex=%d fstart/end=%d:%d\n",
++ file, lower_file, bindex,
++ fstart, fend);
++ }
++ }
++ }
++ }
++
++ __unionfs_check_dentry(dentry, fname, fxn, line);
++}
++
++void __unionfs_check_nd(const struct nameidata *nd,
++ const char *fname, const char *fxn, int line)
++{
++ struct file *file;
++ int printed_caller = 0;
++
++ if (unlikely(!nd))
++ return;
++ if (nd->flags & LOOKUP_OPEN) {
++ file = nd->intent.open.file;
++ if (unlikely(file->f_path.dentry &&
++ strcmp(file->f_path.dentry->d_sb->s_type->name,
++ UNIONFS_NAME))) {
++ PRINT_CALLER(fname, fxn, line);
++ pr_debug(" CND1: lower_file of type %s\n",
++ file->f_path.dentry->d_sb->s_type->name);
++ }
++ }
++}
++
++/* useful to track vfsmount leaks that could cause EBUSY on unmount */
++void __show_branch_counts(const struct super_block *sb,
++ const char *file, const char *fxn, int line)
++{
++ int i;
++ struct vfsmount *mnt;
++
++ pr_debug("BC:");
++ for (i = 0; i < sbmax(sb); i++) {
++ if (likely(sb->s_root))
++ mnt = UNIONFS_D(sb->s_root)->lower_paths[i].mnt;
++ else
++ mnt = NULL;
++ printk(KERN_CONT "%d:",
++ (mnt ? atomic_read(&mnt->mnt_count) : -99));
++ }
++ printk(KERN_CONT "%s:%s:%d\n", file, fxn, line);
++}
++
++void __show_inode_times(const struct inode *inode,
++ const char *file, const char *fxn, int line)
++{
++ struct inode *lower_inode;
++ int bindex;
++
++ for (bindex = ibstart(inode); bindex <= ibend(inode); bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (unlikely(!lower_inode))
++ continue;
++ pr_debug("IT(%lu:%d): %s:%s:%d "
++ "um=%lu/%lu lm=%lu/%lu uc=%lu/%lu lc=%lu/%lu\n",
++ inode->i_ino, bindex,
++ file, fxn, line,
++ inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
++ lower_inode->i_mtime.tv_sec,
++ lower_inode->i_mtime.tv_nsec,
++ inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
++ lower_inode->i_ctime.tv_sec,
++ lower_inode->i_ctime.tv_nsec);
++ }
++}
++
++void __show_dinode_times(const struct dentry *dentry,
++ const char *file, const char *fxn, int line)
++{
++ struct inode *inode = dentry->d_inode;
++ struct inode *lower_inode;
++ int bindex;
++
++ for (bindex = ibstart(inode); bindex <= ibend(inode); bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (!lower_inode)
++ continue;
++ pr_debug("DT(%s:%lu:%d): %s:%s:%d "
++ "um=%lu/%lu lm=%lu/%lu uc=%lu/%lu lc=%lu/%lu\n",
++ dentry->d_name.name, inode->i_ino, bindex,
++ file, fxn, line,
++ inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
++ lower_inode->i_mtime.tv_sec,
++ lower_inode->i_mtime.tv_nsec,
++ inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
++ lower_inode->i_ctime.tv_sec,
++ lower_inode->i_ctime.tv_nsec);
++ }
++}
++
++void __show_inode_counts(const struct inode *inode,
++ const char *file, const char *fxn, int line)
++{
++ struct inode *lower_inode;
++ int bindex;
++
++ if (unlikely(!inode)) {
++ pr_debug("SiC: Null inode\n");
++ return;
++ }
++ for (bindex = sbstart(inode->i_sb); bindex <= sbend(inode->i_sb);
++ bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (unlikely(!lower_inode))
++ continue;
++ pr_debug("SIC(%lu:%d:%d): lc=%d %s:%s:%d\n",
++ inode->i_ino, bindex,
++ atomic_read(&(inode)->i_count),
++ atomic_read(&(lower_inode)->i_count),
++ file, fxn, line);
++ }
++}
+diff --git a/fs/unionfs/dentry.c b/fs/unionfs/dentry.c
+new file mode 100644
+index 0000000..a0c3bba
+--- /dev/null
++++ b/fs/unionfs/dentry.c
+@@ -0,0 +1,397 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++bool is_negative_lower(const struct dentry *dentry)
++{
++ int bindex;
++ struct dentry *lower_dentry;
++
++ BUG_ON(!dentry);
++ /* cache coherency: check if file was deleted on lower branch */
++ if (dbstart(dentry) < 0)
++ return true;
++ for (bindex = dbstart(dentry); bindex <= dbend(dentry); bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ /* unhashed (i.e., unlinked) lower dentries don't count */
++ if (lower_dentry && lower_dentry->d_inode &&
++ !d_deleted(lower_dentry) &&
++ !(lower_dentry->d_flags & DCACHE_NFSFS_RENAMED))
++ return false;
++ }
++ return true;
++}
++
++static inline void __dput_lowers(struct dentry *dentry, int start, int end)
++{
++ struct dentry *lower_dentry;
++ int bindex;
++
++ if (start < 0)
++ return;
++ for (bindex = start; bindex <= end; bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry)
++ continue;
++ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
++ dput(lower_dentry);
++ }
++}
++
++/*
++ * Purge and invalidate as many data pages of a unionfs inode. This is
++ * called when the lower inode has changed, and we want to force processes
++ * to re-get the new data.
++ */
++static inline void purge_inode_data(struct inode *inode)
++{
++ /* remove all non-private mappings */
++ unmap_mapping_range(inode->i_mapping, 0, 0, 0);
++ /* invalidate as many pages as possible */
++ invalidate_mapping_pages(inode->i_mapping, 0, -1);
++ /*
++ * Don't try to truncate_inode_pages here, because this could lead
++ * to a deadlock between some of address_space ops and dentry
++ * revalidation: the address space op is invoked with a lock on our
++ * own page, and truncate_inode_pages will block on locked pages.
++ */
++}
++
++/*
++ * Revalidate a single file/symlink/special dentry. Assume that info nodes
++ * of the @dentry and its @parent are locked. Assume parent is valid,
++ * otherwise return false (and let's hope the VFS will try to re-lookup this
++ * dentry). Returns true if valid, false otherwise.
++ */
++bool __unionfs_d_revalidate(struct dentry *dentry, struct dentry *parent,
++ bool willwrite)
++{
++ bool valid = true; /* default is valid */
++ struct dentry *lower_dentry;
++ struct dentry *result;
++ int bindex, bstart, bend;
++ int sbgen, dgen, pdgen;
++ int positive = 0;
++ int interpose_flag;
++
++ verify_locked(dentry);
++ verify_locked(parent);
++
++ /* if the dentry is unhashed, do NOT revalidate */
++ if (d_deleted(dentry))
++ goto out;
++
++ dgen = atomic_read(&UNIONFS_D(dentry)->generation);
++
++ if (is_newer_lower(dentry)) {
++ /* root dentry is always valid */
++ if (IS_ROOT(dentry)) {
++ unionfs_copy_attr_times(dentry->d_inode);
++ } else {
++ /*
++ * reset generation number to zero, guaranteed to be
++ * "old"
++ */
++ dgen = 0;
++ atomic_set(&UNIONFS_D(dentry)->generation, dgen);
++ }
++ if (!willwrite)
++ purge_inode_data(dentry->d_inode);
++ }
++
++ sbgen = atomic_read(&UNIONFS_SB(dentry->d_sb)->generation);
++
++ BUG_ON(dbstart(dentry) == -1);
++ if (dentry->d_inode)
++ positive = 1;
++
++ /* if our dentry is valid, then validate all lower ones */
++ if (sbgen == dgen)
++ goto validate_lowers;
++
++ /* The root entry should always be valid */
++ BUG_ON(IS_ROOT(dentry));
++
++ /* We can't work correctly if our parent isn't valid. */
++ pdgen = atomic_read(&UNIONFS_D(parent)->generation);
++
++ /* Free the pointers for our inodes and this dentry. */
++ path_put_lowers_all(dentry, false);
++
++ interpose_flag = INTERPOSE_REVAL_NEG;
++ if (positive) {
++ interpose_flag = INTERPOSE_REVAL;
++ iput_lowers_all(dentry->d_inode, true);
++ }
++
++ if (realloc_dentry_private_data(dentry) != 0) {
++ valid = false;
++ goto out;
++ }
++
++ result = unionfs_lookup_full(dentry, parent, interpose_flag);
++ if (result) {
++ if (IS_ERR(result)) {
++ valid = false;
++ goto out;
++ }
++ /*
++ * current unionfs_lookup_backend() doesn't return
++ * a valid dentry
++ */
++ dput(dentry);
++ dentry = result;
++ }
++
++ if (unlikely(positive && is_negative_lower(dentry))) {
++ /* call make_bad_inode here ? */
++ d_drop(dentry);
++ valid = false;
++ goto out;
++ }
++
++ /*
++ * if we got here then we have revalidated our dentry and all lower
++ * ones, so we can return safely.
++ */
++ if (!valid) /* lower dentry revalidation failed */
++ goto out;
++
++ /*
++ * If the parent's gen no. matches the superblock's gen no., then
++ * we can update our denty's gen no. If they didn't match, then it
++ * was OK to revalidate this dentry with a stale parent, but we'll
++ * purposely not update our dentry's gen no. (so it can be redone);
++ * and, we'll mark our parent dentry as invalid so it'll force it
++ * (and our dentry) to be revalidated.
++ */
++ if (pdgen == sbgen)
++ atomic_set(&UNIONFS_D(dentry)->generation, sbgen);
++ goto out;
++
++validate_lowers:
++
++ /* The revalidation must occur across all branches */
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++ BUG_ON(bstart == -1);
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry || !lower_dentry->d_op
++ || !lower_dentry->d_op->d_revalidate)
++ continue;
++ /*
++ * Don't pass nameidata to lower file system, because we
++ * don't want an arbitrary lower file being opened or
++ * returned to us: it may be useless to us because of the
++ * fanout nature of unionfs (cf. file/directory open-file
++ * invariants). We will open lower files as and when needed
++ * later on.
++ */
++ if (!lower_dentry->d_op->d_revalidate(lower_dentry, NULL))
++ valid = false;
++ }
++
++ if (!dentry->d_inode ||
++ ibstart(dentry->d_inode) < 0 ||
++ ibend(dentry->d_inode) < 0) {
++ valid = false;
++ goto out;
++ }
++
++ if (valid) {
++ /*
++ * If we get here, and we copy the meta-data from the lower
++ * inode to our inode, then it is vital that we have already
++ * purged all unionfs-level file data. We do that in the
++ * caller (__unionfs_d_revalidate) by calling
++ * purge_inode_data.
++ */
++ unionfs_copy_attr_all(dentry->d_inode,
++ unionfs_lower_inode(dentry->d_inode));
++ fsstack_copy_inode_size(dentry->d_inode,
++ unionfs_lower_inode(dentry->d_inode));
++ }
++
++out:
++ return valid;
++}
++
++/*
++ * Determine if the lower inode objects have changed from below the unionfs
++ * inode. Return true if changed, false otherwise.
++ *
++ * We check if the mtime or ctime have changed. However, the inode times
++ * can be changed by anyone without much protection, including
++ * asynchronously. This can sometimes cause unionfs to find that the lower
++ * file system doesn't change its inode times quick enough, resulting in a
++ * false positive indication (which is harmless, it just makes unionfs do
++ * extra work in re-validating the objects). To minimize the chances of
++ * these situations, we still consider such small time changes valid, but we
++ * don't print debugging messages unless the time changes are greater than
++ * UNIONFS_MIN_CC_TIME (which defaults to 3 seconds, as with NFS's acregmin)
++ * because significant changes are more likely due to users manually
++ * touching lower files.
++ */
++bool is_newer_lower(const struct dentry *dentry)
++{
++ int bindex;
++ struct inode *inode;
++ struct inode *lower_inode;
++
++ /* ignore if we're called on semi-initialized dentries/inodes */
++ if (!dentry || !UNIONFS_D(dentry))
++ return false;
++ inode = dentry->d_inode;
++ if (!inode || !UNIONFS_I(inode)->lower_inodes ||
++ ibstart(inode) < 0 || ibend(inode) < 0)
++ return false;
++
++ for (bindex = ibstart(inode); bindex <= ibend(inode); bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (!lower_inode)
++ continue;
++
++ /* check if mtime/ctime have changed */
++ if (unlikely(timespec_compare(&inode->i_mtime,
++ &lower_inode->i_mtime) < 0)) {
++ if ((lower_inode->i_mtime.tv_sec -
++ inode->i_mtime.tv_sec) > UNIONFS_MIN_CC_TIME) {
++ pr_info("unionfs: new lower inode mtime "
++ "(bindex=%d, name=%s)\n", bindex,
++ dentry->d_name.name);
++ show_dinode_times(dentry);
++ }
++ return true;
++ }
++ if (unlikely(timespec_compare(&inode->i_ctime,
++ &lower_inode->i_ctime) < 0)) {
++ if ((lower_inode->i_ctime.tv_sec -
++ inode->i_ctime.tv_sec) > UNIONFS_MIN_CC_TIME) {
++ pr_info("unionfs: new lower inode ctime "
++ "(bindex=%d, name=%s)\n", bindex,
++ dentry->d_name.name);
++ show_dinode_times(dentry);
++ }
++ return true;
++ }
++ }
++
++ /*
++ * Last check: if this is a positive dentry, but somehow all lower
++ * dentries are negative or unhashed, then this dentry needs to be
++ * revalidated, because someone probably deleted the objects from
++ * the lower branches directly.
++ */
++ if (is_negative_lower(dentry))
++ return true;
++
++ return false; /* default: lower is not newer */
++}
++
++static int unionfs_d_revalidate(struct dentry *dentry,
++ struct nameidata *nd_unused)
++{
++ bool valid = true;
++ int err = 1; /* 1 means valid for the VFS */
++ struct dentry *parent;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (valid) {
++ unionfs_postcopyup_setmnt(dentry);
++ unionfs_check_dentry(dentry);
++ } else {
++ d_drop(dentry);
++ err = valid;
++ }
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++
++ return err;
++}
++
++static void unionfs_d_release(struct dentry *dentry)
++{
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ if (unlikely(!UNIONFS_D(dentry)))
++ goto out; /* skip if no lower branches */
++ /* must lock our branch configuration here */
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ unionfs_check_dentry(dentry);
++ /* this could be a negative dentry, so check first */
++ if (dbstart(dentry) < 0) {
++ unionfs_unlock_dentry(dentry);
++ goto out; /* due to a (normal) failed lookup */
++ }
++
++ /* Release all the lower dentries */
++ path_put_lowers_all(dentry, true);
++
++ unionfs_unlock_dentry(dentry);
++
++out:
++ free_dentry_private_data(dentry);
++ unionfs_read_unlock(dentry->d_sb);
++ return;
++}
++
++/*
++ * Called when we're removing the last reference to our dentry. So we
++ * should drop all lower references too.
++ */
++static void unionfs_d_iput(struct dentry *dentry, struct inode *inode)
++{
++ int rc;
++
++ BUG_ON(!dentry);
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ if (!UNIONFS_D(dentry) || dbstart(dentry) < 0)
++ goto drop_lower_inodes;
++ path_put_lowers_all(dentry, false);
++
++drop_lower_inodes:
++ rc = atomic_read(&inode->i_count);
++ if (rc == 1 && inode->i_nlink == 1 && ibstart(inode) >= 0) {
++ /* see Documentation/filesystems/unionfs/issues.txt */
++ lockdep_off();
++ iput(unionfs_lower_inode(inode));
++ lockdep_on();
++ unionfs_set_lower_inode(inode, NULL);
++ /* XXX: may need to set start/end to -1? */
++ }
++
++ iput(inode);
++
++ unionfs_unlock_dentry(dentry);
++ unionfs_read_unlock(dentry->d_sb);
++}
++
++struct dentry_operations unionfs_dops = {
++ .d_revalidate = unionfs_d_revalidate,
++ .d_release = unionfs_d_release,
++ .d_iput = unionfs_d_iput,
++};
+diff --git a/fs/unionfs/dirfops.c b/fs/unionfs/dirfops.c
+new file mode 100644
+index 0000000..7da0ff0
+--- /dev/null
++++ b/fs/unionfs/dirfops.c
+@@ -0,0 +1,302 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/* Make sure our rdstate is playing by the rules. */
++static void verify_rdstate_offset(struct unionfs_dir_state *rdstate)
++{
++ BUG_ON(rdstate->offset >= DIREOF);
++ BUG_ON(rdstate->cookie >= MAXRDCOOKIE);
++}
++
++struct unionfs_getdents_callback {
++ struct unionfs_dir_state *rdstate;
++ void *dirent;
++ int entries_written;
++ int filldir_called;
++ int filldir_error;
++ filldir_t filldir;
++ struct super_block *sb;
++};
++
++/* based on generic filldir in fs/readir.c */
++static int unionfs_filldir(void *dirent, const char *oname, int namelen,
++ loff_t offset, u64 ino, unsigned int d_type)
++{
++ struct unionfs_getdents_callback *buf = dirent;
++ struct filldir_node *found = NULL;
++ int err = 0;
++ int is_whiteout;
++ char *name = (char *) oname;
++
++ buf->filldir_called++;
++
++ is_whiteout = is_whiteout_name(&name, &namelen);
++
++ found = find_filldir_node(buf->rdstate, name, namelen, is_whiteout);
++
++ if (found) {
++ /*
++ * If we had non-whiteout entry in dir cache, then mark it
++ * as a whiteout and but leave it in the dir cache.
++ */
++ if (is_whiteout && !found->whiteout)
++ found->whiteout = is_whiteout;
++ goto out;
++ }
++
++ /* if 'name' isn't a whiteout, filldir it. */
++ if (!is_whiteout) {
++ off_t pos = rdstate2offset(buf->rdstate);
++ u64 unionfs_ino = ino;
++
++ err = buf->filldir(buf->dirent, name, namelen, pos,
++ unionfs_ino, d_type);
++ buf->rdstate->offset++;
++ verify_rdstate_offset(buf->rdstate);
++ }
++ /*
++ * If we did fill it, stuff it in our hash, otherwise return an
++ * error.
++ */
++ if (err) {
++ buf->filldir_error = err;
++ goto out;
++ }
++ buf->entries_written++;
++ err = add_filldir_node(buf->rdstate, name, namelen,
++ buf->rdstate->bindex, is_whiteout);
++ if (err)
++ buf->filldir_error = err;
++
++out:
++ return err;
++}
++
++static int unionfs_readdir(struct file *file, void *dirent, filldir_t filldir)
++{
++ int err = 0;
++ struct file *lower_file = NULL;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++ struct inode *inode = NULL;
++ struct unionfs_getdents_callback buf;
++ struct unionfs_dir_state *uds;
++ int bend;
++ loff_t offset;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ err = unionfs_file_revalidate(file, parent, false);
++ if (unlikely(err))
++ goto out;
++
++ inode = dentry->d_inode;
++
++ uds = UNIONFS_F(file)->rdstate;
++ if (!uds) {
++ if (file->f_pos == DIREOF) {
++ goto out;
++ } else if (file->f_pos > 0) {
++ uds = find_rdstate(inode, file->f_pos);
++ if (unlikely(!uds)) {
++ err = -ESTALE;
++ goto out;
++ }
++ UNIONFS_F(file)->rdstate = uds;
++ } else {
++ init_rdstate(file);
++ uds = UNIONFS_F(file)->rdstate;
++ }
++ }
++ bend = fbend(file);
++
++ while (uds->bindex <= bend) {
++ lower_file = unionfs_lower_file_idx(file, uds->bindex);
++ if (!lower_file) {
++ uds->bindex++;
++ uds->dirpos = 0;
++ continue;
++ }
++
++ /* prepare callback buffer */
++ buf.filldir_called = 0;
++ buf.filldir_error = 0;
++ buf.entries_written = 0;
++ buf.dirent = dirent;
++ buf.filldir = filldir;
++ buf.rdstate = uds;
++ buf.sb = inode->i_sb;
++
++ /* Read starting from where we last left off. */
++ offset = vfs_llseek(lower_file, uds->dirpos, SEEK_SET);
++ if (offset < 0) {
++ err = offset;
++ goto out;
++ }
++ err = vfs_readdir(lower_file, unionfs_filldir, &buf);
++
++ /* Save the position for when we continue. */
++ offset = vfs_llseek(lower_file, 0, SEEK_CUR);
++ if (offset < 0) {
++ err = offset;
++ goto out;
++ }
++ uds->dirpos = offset;
++
++ /* Copy the atime. */
++ fsstack_copy_attr_atime(inode,
++ lower_file->f_path.dentry->d_inode);
++
++ if (err < 0)
++ goto out;
++
++ if (buf.filldir_error)
++ break;
++
++ if (!buf.entries_written) {
++ uds->bindex++;
++ uds->dirpos = 0;
++ }
++ }
++
++ if (!buf.filldir_error && uds->bindex >= bend) {
++ /* Save the number of hash entries for next time. */
++ UNIONFS_I(inode)->hashsize = uds->hashentries;
++ free_rdstate(uds);
++ UNIONFS_F(file)->rdstate = NULL;
++ file->f_pos = DIREOF;
++ } else {
++ file->f_pos = rdstate2offset(uds);
++ }
++
++out:
++ if (!err)
++ unionfs_check_file(file);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++/*
++ * This is not meant to be a generic repositioning function. If you do
++ * things that aren't supported, then we return EINVAL.
++ *
++ * What is allowed:
++ * (1) seeking to the same position that you are currently at
++ * This really has no effect, but returns where you are.
++ * (2) seeking to the beginning of the file
++ * This throws out all state, and lets you begin again.
++ */
++static loff_t unionfs_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ struct unionfs_dir_state *rdstate;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++ loff_t err;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ err = unionfs_file_revalidate(file, parent, false);
++ if (unlikely(err))
++ goto out;
++
++ rdstate = UNIONFS_F(file)->rdstate;
++
++ /*
++ * we let users seek to their current position, but not anywhere
++ * else.
++ */
++ if (!offset) {
++ switch (origin) {
++ case SEEK_SET:
++ if (rdstate) {
++ free_rdstate(rdstate);
++ UNIONFS_F(file)->rdstate = NULL;
++ }
++ init_rdstate(file);
++ err = 0;
++ break;
++ case SEEK_CUR:
++ err = file->f_pos;
++ break;
++ case SEEK_END:
++ /* Unsupported, because we would break everything. */
++ err = -EINVAL;
++ break;
++ }
++ } else {
++ switch (origin) {
++ case SEEK_SET:
++ if (rdstate) {
++ if (offset == rdstate2offset(rdstate))
++ err = offset;
++ else if (file->f_pos == DIREOF)
++ err = DIREOF;
++ else
++ err = -EINVAL;
++ } else {
++ struct inode *inode;
++ inode = dentry->d_inode;
++ rdstate = find_rdstate(inode, offset);
++ if (rdstate) {
++ UNIONFS_F(file)->rdstate = rdstate;
++ err = rdstate->offset;
++ } else {
++ err = -EINVAL;
++ }
++ }
++ break;
++ case SEEK_CUR:
++ case SEEK_END:
++ /* Unsupported, because we would break everything. */
++ err = -EINVAL;
++ break;
++ }
++ }
++
++out:
++ if (!err)
++ unionfs_check_file(file);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++/*
++ * Trimmed directory options, we shouldn't pass everything down since
++ * we don't want to operate on partial directories.
++ */
++struct file_operations unionfs_dir_fops = {
++ .llseek = unionfs_dir_llseek,
++ .read = generic_read_dir,
++ .readdir = unionfs_readdir,
++ .unlocked_ioctl = unionfs_ioctl,
++ .open = unionfs_open,
++ .release = unionfs_file_release,
++ .flush = unionfs_flush,
++ .fsync = unionfs_fsync,
++ .fasync = unionfs_fasync,
++};
+diff --git a/fs/unionfs/dirhelper.c b/fs/unionfs/dirhelper.c
+new file mode 100644
+index 0000000..033343b
+--- /dev/null
++++ b/fs/unionfs/dirhelper.c
+@@ -0,0 +1,158 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++#define RD_NONE 0
++#define RD_CHECK_EMPTY 1
++/* The callback structure for check_empty. */
++struct unionfs_rdutil_callback {
++ int err;
++ int filldir_called;
++ struct unionfs_dir_state *rdstate;
++ int mode;
++};
++
++/* This filldir function makes sure only whiteouts exist within a directory. */
++static int readdir_util_callback(void *dirent, const char *oname, int namelen,
++ loff_t offset, u64 ino, unsigned int d_type)
++{
++ int err = 0;
++ struct unionfs_rdutil_callback *buf = dirent;
++ int is_whiteout;
++ struct filldir_node *found;
++ char *name = (char *) oname;
++
++ buf->filldir_called = 1;
++
++ if (name[0] == '.' && (namelen == 1 ||
++ (name[1] == '.' && namelen == 2)))
++ goto out;
++
++ is_whiteout = is_whiteout_name(&name, &namelen);
++
++ found = find_filldir_node(buf->rdstate, name, namelen, is_whiteout);
++ /* If it was found in the table there was a previous whiteout. */
++ if (found)
++ goto out;
++
++ /*
++ * if it wasn't found and isn't a whiteout, the directory isn't
++ * empty.
++ */
++ err = -ENOTEMPTY;
++ if ((buf->mode == RD_CHECK_EMPTY) && !is_whiteout)
++ goto out;
++
++ err = add_filldir_node(buf->rdstate, name, namelen,
++ buf->rdstate->bindex, is_whiteout);
++
++out:
++ buf->err = err;
++ return err;
++}
++
++/* Is a directory logically empty? */
++int check_empty(struct dentry *dentry, struct dentry *parent,
++ struct unionfs_dir_state **namelist)
++{
++ int err = 0;
++ struct dentry *lower_dentry = NULL;
++ struct vfsmount *mnt;
++ struct super_block *sb;
++ struct file *lower_file;
++ struct unionfs_rdutil_callback *buf = NULL;
++ int bindex, bstart, bend, bopaque;
++
++ sb = dentry->d_sb;
++
++
++ BUG_ON(!S_ISDIR(dentry->d_inode->i_mode));
++
++ err = unionfs_partial_lookup(dentry, parent);
++ if (err)
++ goto out;
++
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++ bopaque = dbopaque(dentry);
++ if (0 <= bopaque && bopaque < bend)
++ bend = bopaque;
++
++ buf = kmalloc(sizeof(struct unionfs_rdutil_callback), GFP_KERNEL);
++ if (unlikely(!buf)) {
++ err = -ENOMEM;
++ goto out;
++ }
++ buf->err = 0;
++ buf->mode = RD_CHECK_EMPTY;
++ buf->rdstate = alloc_rdstate(dentry->d_inode, bstart);
++ if (unlikely(!buf->rdstate)) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ /* Process the lower directories with rdutil_callback as a filldir. */
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry)
++ continue;
++ if (!lower_dentry->d_inode)
++ continue;
++ if (!S_ISDIR(lower_dentry->d_inode->i_mode))
++ continue;
++
++ dget(lower_dentry);
++ mnt = unionfs_mntget(dentry, bindex);
++ branchget(sb, bindex);
++ lower_file = dentry_open(lower_dentry, mnt, O_RDONLY, current_cred());
++ if (IS_ERR(lower_file)) {
++ err = PTR_ERR(lower_file);
++ branchput(sb, bindex);
++ goto out;
++ }
++
++ do {
++ buf->filldir_called = 0;
++ buf->rdstate->bindex = bindex;
++ err = vfs_readdir(lower_file,
++ readdir_util_callback, buf);
++ if (buf->err)
++ err = buf->err;
++ } while ((err >= 0) && buf->filldir_called);
++
++ /* fput calls dput for lower_dentry */
++ fput(lower_file);
++ branchput(sb, bindex);
++
++ if (err < 0)
++ goto out;
++ }
++
++out:
++ if (buf) {
++ if (namelist && !err)
++ *namelist = buf->rdstate;
++ else if (buf->rdstate)
++ free_rdstate(buf->rdstate);
++ kfree(buf);
++ }
++
++
++ return err;
++}
+diff --git a/fs/unionfs/fanout.h b/fs/unionfs/fanout.h
+new file mode 100644
+index 0000000..5b77eac
+--- /dev/null
++++ b/fs/unionfs/fanout.h
+@@ -0,0 +1,407 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef _FANOUT_H_
++#define _FANOUT_H_
++
++/*
++ * Inode to private data
++ *
++ * Since we use containers and the struct inode is _inside_ the
++ * unionfs_inode_info structure, UNIONFS_I will always (given a non-NULL
++ * inode pointer), return a valid non-NULL pointer.
++ */
++static inline struct unionfs_inode_info *UNIONFS_I(const struct inode *inode)
++{
++ return container_of(inode, struct unionfs_inode_info, vfs_inode);
++}
++
++#define ibstart(ino) (UNIONFS_I(ino)->bstart)
++#define ibend(ino) (UNIONFS_I(ino)->bend)
++
++/* Dentry to private data */
++#define UNIONFS_D(dent) ((struct unionfs_dentry_info *)(dent)->d_fsdata)
++#define dbstart(dent) (UNIONFS_D(dent)->bstart)
++#define dbend(dent) (UNIONFS_D(dent)->bend)
++#define dbopaque(dent) (UNIONFS_D(dent)->bopaque)
++
++/* Superblock to private data */
++#define UNIONFS_SB(super) ((struct unionfs_sb_info *)(super)->s_fs_info)
++#define sbstart(sb) 0
++#define sbend(sb) (UNIONFS_SB(sb)->bend)
++#define sbmax(sb) (UNIONFS_SB(sb)->bend + 1)
++#define sbhbid(sb) (UNIONFS_SB(sb)->high_branch_id)
++
++/* File to private Data */
++#define UNIONFS_F(file) ((struct unionfs_file_info *)((file)->private_data))
++#define fbstart(file) (UNIONFS_F(file)->bstart)
++#define fbend(file) (UNIONFS_F(file)->bend)
++
++/* macros to manipulate branch IDs in stored in our superblock */
++static inline int branch_id(struct super_block *sb, int index)
++{
++ BUG_ON(!sb || index < 0);
++ return UNIONFS_SB(sb)->data[index].branch_id;
++}
++
++static inline void set_branch_id(struct super_block *sb, int index, int val)
++{
++ BUG_ON(!sb || index < 0);
++ UNIONFS_SB(sb)->data[index].branch_id = val;
++}
++
++static inline void new_branch_id(struct super_block *sb, int index)
++{
++ BUG_ON(!sb || index < 0);
++ set_branch_id(sb, index, ++UNIONFS_SB(sb)->high_branch_id);
++}
++
++/*
++ * Find new index of matching branch with an existing superblock of a known
++ * (possibly old) id. This is needed because branches could have been
++ * added/deleted causing the branches of any open files to shift.
++ *
++ * @sb: the new superblock which may have new/different branch IDs
++ * @id: the old/existing id we're looking for
++ * Returns index of newly found branch (0 or greater), -1 otherwise.
++ */
++static inline int branch_id_to_idx(struct super_block *sb, int id)
++{
++ int i;
++ for (i = 0; i < sbmax(sb); i++) {
++ if (branch_id(sb, i) == id)
++ return i;
++ }
++ /* in the non-ODF code, this should really never happen */
++ printk(KERN_WARNING "unionfs: cannot find branch with id %d\n", id);
++ return -1;
++}
++
++/* File to lower file. */
++static inline struct file *unionfs_lower_file(const struct file *f)
++{
++ BUG_ON(!f);
++ return UNIONFS_F(f)->lower_files[fbstart(f)];
++}
++
++static inline struct file *unionfs_lower_file_idx(const struct file *f,
++ int index)
++{
++ BUG_ON(!f || index < 0);
++ return UNIONFS_F(f)->lower_files[index];
++}
++
++static inline void unionfs_set_lower_file_idx(struct file *f, int index,
++ struct file *val)
++{
++ BUG_ON(!f || index < 0);
++ UNIONFS_F(f)->lower_files[index] = val;
++ /* save branch ID (may be redundant?) */
++ UNIONFS_F(f)->saved_branch_ids[index] =
++ branch_id((f)->f_path.dentry->d_sb, index);
++}
++
++static inline void unionfs_set_lower_file(struct file *f, struct file *val)
++{
++ BUG_ON(!f);
++ unionfs_set_lower_file_idx((f), fbstart(f), (val));
++}
++
++/* Inode to lower inode. */
++static inline struct inode *unionfs_lower_inode(const struct inode *i)
++{
++ BUG_ON(!i);
++ return UNIONFS_I(i)->lower_inodes[ibstart(i)];
++}
++
++static inline struct inode *unionfs_lower_inode_idx(const struct inode *i,
++ int index)
++{
++ BUG_ON(!i || index < 0);
++ return UNIONFS_I(i)->lower_inodes[index];
++}
++
++static inline void unionfs_set_lower_inode_idx(struct inode *i, int index,
++ struct inode *val)
++{
++ BUG_ON(!i || index < 0);
++ UNIONFS_I(i)->lower_inodes[index] = val;
++}
++
++static inline void unionfs_set_lower_inode(struct inode *i, struct inode *val)
++{
++ BUG_ON(!i);
++ UNIONFS_I(i)->lower_inodes[ibstart(i)] = val;
++}
++
++/* Superblock to lower superblock. */
++static inline struct super_block *unionfs_lower_super(
++ const struct super_block *sb)
++{
++ BUG_ON(!sb);
++ return UNIONFS_SB(sb)->data[sbstart(sb)].sb;
++}
++
++static inline struct super_block *unionfs_lower_super_idx(
++ const struct super_block *sb,
++ int index)
++{
++ BUG_ON(!sb || index < 0);
++ return UNIONFS_SB(sb)->data[index].sb;
++}
++
++static inline void unionfs_set_lower_super_idx(struct super_block *sb,
++ int index,
++ struct super_block *val)
++{
++ BUG_ON(!sb || index < 0);
++ UNIONFS_SB(sb)->data[index].sb = val;
++}
++
++static inline void unionfs_set_lower_super(struct super_block *sb,
++ struct super_block *val)
++{
++ BUG_ON(!sb);
++ UNIONFS_SB(sb)->data[sbstart(sb)].sb = val;
++}
++
++/* Branch count macros. */
++static inline int branch_count(const struct super_block *sb, int index)
++{
++ BUG_ON(!sb || index < 0);
++ return atomic_read(&UNIONFS_SB(sb)->data[index].open_files);
++}
++
++static inline void set_branch_count(struct super_block *sb, int index, int val)
++{
++ BUG_ON(!sb || index < 0);
++ atomic_set(&UNIONFS_SB(sb)->data[index].open_files, val);
++}
++
++static inline void branchget(struct super_block *sb, int index)
++{
++ BUG_ON(!sb || index < 0);
++ atomic_inc(&UNIONFS_SB(sb)->data[index].open_files);
++}
++
++static inline void branchput(struct super_block *sb, int index)
++{
++ BUG_ON(!sb || index < 0);
++ atomic_dec(&UNIONFS_SB(sb)->data[index].open_files);
++}
++
++/* Dentry macros */
++static inline void unionfs_set_lower_dentry_idx(struct dentry *dent, int index,
++ struct dentry *val)
++{
++ BUG_ON(!dent || index < 0);
++ UNIONFS_D(dent)->lower_paths[index].dentry = val;
++}
++
++static inline struct dentry *unionfs_lower_dentry_idx(
++ const struct dentry *dent,
++ int index)
++{
++ BUG_ON(!dent || index < 0);
++ return UNIONFS_D(dent)->lower_paths[index].dentry;
++}
++
++static inline struct dentry *unionfs_lower_dentry(const struct dentry *dent)
++{
++ BUG_ON(!dent);
++ return unionfs_lower_dentry_idx(dent, dbstart(dent));
++}
++
++static inline void unionfs_set_lower_mnt_idx(struct dentry *dent, int index,
++ struct vfsmount *mnt)
++{
++ BUG_ON(!dent || index < 0);
++ UNIONFS_D(dent)->lower_paths[index].mnt = mnt;
++}
++
++static inline struct vfsmount *unionfs_lower_mnt_idx(
++ const struct dentry *dent,
++ int index)
++{
++ BUG_ON(!dent || index < 0);
++ return UNIONFS_D(dent)->lower_paths[index].mnt;
++}
++
++static inline struct vfsmount *unionfs_lower_mnt(const struct dentry *dent)
++{
++ BUG_ON(!dent);
++ return unionfs_lower_mnt_idx(dent, dbstart(dent));
++}
++
++/* Macros for locking a dentry. */
++enum unionfs_dentry_lock_class {
++ UNIONFS_DMUTEX_NORMAL,
++ UNIONFS_DMUTEX_ROOT,
++ UNIONFS_DMUTEX_PARENT,
++ UNIONFS_DMUTEX_CHILD,
++ UNIONFS_DMUTEX_WHITEOUT,
++ UNIONFS_DMUTEX_REVAL_PARENT, /* for file/dentry revalidate */
++ UNIONFS_DMUTEX_REVAL_CHILD, /* for file/dentry revalidate */
++};
++
++static inline void unionfs_lock_dentry(struct dentry *d,
++ unsigned int subclass)
++{
++ BUG_ON(!d);
++ mutex_lock_nested(&UNIONFS_D(d)->lock, subclass);
++}
++
++static inline void unionfs_unlock_dentry(struct dentry *d)
++{
++ BUG_ON(!d);
++ mutex_unlock(&UNIONFS_D(d)->lock);
++}
++
++static inline struct dentry *unionfs_lock_parent(struct dentry *d,
++ unsigned int subclass)
++{
++ struct dentry *p;
++
++ BUG_ON(!d);
++ p = dget_parent(d);
++ if (p != d)
++ mutex_lock_nested(&UNIONFS_D(p)->lock, subclass);
++ return p;
++}
++
++static inline void unionfs_unlock_parent(struct dentry *d, struct dentry *p)
++{
++ BUG_ON(!d);
++ BUG_ON(!p);
++ if (p != d) {
++ BUG_ON(!mutex_is_locked(&UNIONFS_D(p)->lock));
++ mutex_unlock(&UNIONFS_D(p)->lock);
++ }
++ dput(p);
++}
++
++static inline void verify_locked(struct dentry *d)
++{
++ BUG_ON(!d);
++ BUG_ON(!mutex_is_locked(&UNIONFS_D(d)->lock));
++}
++
++/* macros to put lower objects */
++
++/*
++ * iput lower inodes of an unionfs dentry, from bstart to bend. If
++ * @free_lower is true, then also kfree the memory used to hold the lower
++ * object pointers.
++ */
++static inline void iput_lowers(struct inode *inode,
++ int bstart, int bend, bool free_lower)
++{
++ struct inode *lower_inode;
++ int bindex;
++
++ BUG_ON(!inode);
++ BUG_ON(!UNIONFS_I(inode));
++ BUG_ON(bstart < 0);
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (lower_inode) {
++ unionfs_set_lower_inode_idx(inode, bindex, NULL);
++ /* see Documentation/filesystems/unionfs/issues.txt */
++ lockdep_off();
++ iput(lower_inode);
++ lockdep_on();
++ }
++ }
++
++ if (free_lower) {
++ kfree(UNIONFS_I(inode)->lower_inodes);
++ UNIONFS_I(inode)->lower_inodes = NULL;
++ }
++}
++
++/* iput all lower inodes, and reset start/end branch indices to -1 */
++static inline void iput_lowers_all(struct inode *inode, bool free_lower)
++{
++ int bstart, bend;
++
++ BUG_ON(!inode);
++ BUG_ON(!UNIONFS_I(inode));
++ bstart = ibstart(inode);
++ bend = ibend(inode);
++ BUG_ON(bstart < 0);
++
++ iput_lowers(inode, bstart, bend, free_lower);
++ ibstart(inode) = ibend(inode) = -1;
++}
++
++/*
++ * dput/mntput all lower dentries and vfsmounts of an unionfs dentry, from
++ * bstart to bend. If @free_lower is true, then also kfree the memory used
++ * to hold the lower object pointers.
++ *
++ * XXX: implement using path_put VFS macros
++ */
++static inline void path_put_lowers(struct dentry *dentry,
++ int bstart, int bend, bool free_lower)
++{
++ struct dentry *lower_dentry;
++ struct vfsmount *lower_mnt;
++ int bindex;
++
++ BUG_ON(!dentry);
++ BUG_ON(!UNIONFS_D(dentry));
++ BUG_ON(bstart < 0);
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (lower_dentry) {
++ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
++ dput(lower_dentry);
++ }
++ lower_mnt = unionfs_lower_mnt_idx(dentry, bindex);
++ if (lower_mnt) {
++ unionfs_set_lower_mnt_idx(dentry, bindex, NULL);
++ mntput(lower_mnt);
++ }
++ }
++
++ if (free_lower) {
++ kfree(UNIONFS_D(dentry)->lower_paths);
++ UNIONFS_D(dentry)->lower_paths = NULL;
++ }
++}
++
++/*
++ * dput/mntput all lower dentries and vfsmounts, and reset start/end branch
++ * indices to -1.
++ */
++static inline void path_put_lowers_all(struct dentry *dentry, bool free_lower)
++{
++ int bstart, bend;
++
++ BUG_ON(!dentry);
++ BUG_ON(!UNIONFS_D(dentry));
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++ BUG_ON(bstart < 0);
++
++ path_put_lowers(dentry, bstart, bend, free_lower);
++ dbstart(dentry) = dbend(dentry) = -1;
++}
++
++#endif /* not _FANOUT_H */
+diff --git a/fs/unionfs/file.c b/fs/unionfs/file.c
+new file mode 100644
+index 0000000..1c694c3
+--- /dev/null
++++ b/fs/unionfs/file.c
+@@ -0,0 +1,382 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++static ssize_t unionfs_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ int err;
++ struct file *lower_file;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ err = unionfs_file_revalidate(file, parent, false);
++ if (unlikely(err))
++ goto out;
++
++ lower_file = unionfs_lower_file(file);
++ err = vfs_read(lower_file, buf, count, ppos);
++ /* update our inode atime upon a successful lower read */
++ if (err >= 0) {
++ fsstack_copy_attr_atime(dentry->d_inode,
++ lower_file->f_path.dentry->d_inode);
++ unionfs_check_file(file);
++ }
++
++out:
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++static ssize_t unionfs_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ int err = 0;
++ struct file *lower_file;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ err = unionfs_file_revalidate(file, parent, true);
++ if (unlikely(err))
++ goto out;
++
++ lower_file = unionfs_lower_file(file);
++ err = vfs_write(lower_file, buf, count, ppos);
++ /* update our inode times+sizes upon a successful lower write */
++ if (err >= 0) {
++ fsstack_copy_inode_size(dentry->d_inode,
++ lower_file->f_path.dentry->d_inode);
++ fsstack_copy_attr_times(dentry->d_inode,
++ lower_file->f_path.dentry->d_inode);
++ UNIONFS_F(file)->wrote_to_file = true; /* for delayed copyup */
++ unionfs_check_file(file);
++ }
++
++out:
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++static int unionfs_file_readdir(struct file *file, void *dirent,
++ filldir_t filldir)
++{
++ return -ENOTDIR;
++}
++
++static int unionfs_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ int err = 0;
++ bool willwrite;
++ struct file *lower_file;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++ const struct vm_operations_struct *saved_vm_ops = NULL;
++
++ /*
++ * Since mm/memory.c:might_fault() (under PROVE_LOCKING) was
++ * modified in 2.6.29-rc1 to call might_lock_read on mmap_sem, this
++ * has been causing false positives in file system stacking layers.
++ * In particular, our ->mmap is called after sys_mmap2 already holds
++ * mmap_sem, then we lock our own mutexes; but earlier, it's
++ * possible for lockdep to have locked our mutexes first, and then
++ * we call a lower ->readdir which could call might_fault. The
++ * different ordering of the locks is what lockdep complains about
++ * -- unnecessarily. Therefore, we have no choice but to tell
++ * lockdep to temporarily turn off lockdep here. Note: the comments
++ * inside might_sleep also suggest that it would have been
++ * nicer to only annotate paths that needs that might_lock_read.
++ */
++ lockdep_off();
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ /* This might be deferred to mmap's writepage */
++ willwrite = ((vma->vm_flags | VM_SHARED | VM_WRITE) == vma->vm_flags);
++ err = unionfs_file_revalidate(file, parent, willwrite);
++ if (unlikely(err))
++ goto out;
++ unionfs_check_file(file);
++
++ /*
++ * File systems which do not implement ->writepage may use
++ * generic_file_readonly_mmap as their ->mmap op. If you call
++ * generic_file_readonly_mmap with VM_WRITE, you'd get an -EINVAL.
++ * But we cannot call the lower ->mmap op, so we can't tell that
++ * writeable mappings won't work. Therefore, our only choice is to
++ * check if the lower file system supports the ->writepage, and if
++ * not, return EINVAL (the same error that
++ * generic_file_readonly_mmap returns in that case).
++ */
++ lower_file = unionfs_lower_file(file);
++ if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
++ err = -EINVAL;
++ printk(KERN_ERR "unionfs: branch %d file system does not "
++ "support writeable mmap\n", fbstart(file));
++ goto out;
++ }
++
++ /*
++ * find and save lower vm_ops.
++ *
++ * XXX: the VFS should have a cleaner way of finding the lower vm_ops
++ */
++ if (!UNIONFS_F(file)->lower_vm_ops) {
++ err = lower_file->f_op->mmap(lower_file, vma);
++ if (err) {
++ printk(KERN_ERR "unionfs: lower mmap failed %d\n", err);
++ goto out;
++ }
++ saved_vm_ops = vma->vm_ops;
++ err = do_munmap(current->mm, vma->vm_start,
++ vma->vm_end - vma->vm_start);
++ if (err) {
++ printk(KERN_ERR "unionfs: do_munmap failed %d\n", err);
++ goto out;
++ }
++ }
++
++ file->f_mapping->a_ops = &unionfs_dummy_aops;
++ err = generic_file_mmap(file, vma);
++ file->f_mapping->a_ops = &unionfs_aops;
++ if (err) {
++ printk(KERN_ERR "unionfs: generic_file_mmap failed %d\n", err);
++ goto out;
++ }
++ vma->vm_ops = &unionfs_vm_ops;
++ if (!UNIONFS_F(file)->lower_vm_ops)
++ UNIONFS_F(file)->lower_vm_ops = saved_vm_ops;
++
++out:
++ if (!err) {
++ /* copyup could cause parent dir times to change */
++ unionfs_copy_attr_times(parent->d_inode);
++ unionfs_check_file(file);
++ }
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ lockdep_on();
++ return err;
++}
++
++int unionfs_fsync(struct file *file, int datasync)
++{
++ int bindex, bstart, bend;
++ struct file *lower_file;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *lower_dentry;
++ struct dentry *parent;
++ struct inode *lower_inode, *inode;
++ int err = -EINVAL;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ err = unionfs_file_revalidate(file, parent, true);
++ if (unlikely(err))
++ goto out;
++ unionfs_check_file(file);
++
++ bstart = fbstart(file);
++ bend = fbend(file);
++ if (bstart < 0 || bend < 0)
++ goto out;
++
++ inode = dentry->d_inode;
++ if (unlikely(!inode)) {
++ printk(KERN_ERR
++ "unionfs: null lower inode in unionfs_fsync\n");
++ goto out;
++ }
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (!lower_inode || !lower_inode->i_fop->fsync)
++ continue;
++ lower_file = unionfs_lower_file_idx(file, bindex);
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ mutex_lock(&lower_inode->i_mutex);
++ err = lower_inode->i_fop->fsync(lower_file, datasync);
++ if (!err && bindex == bstart)
++ fsstack_copy_attr_times(inode, lower_inode);
++ mutex_unlock(&lower_inode->i_mutex);
++ if (err)
++ goto out;
++ }
++
++out:
++ if (!err)
++ unionfs_check_file(file);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++int unionfs_fasync(int fd, struct file *file, int flag)
++{
++ int bindex, bstart, bend;
++ struct file *lower_file;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++ struct inode *lower_inode, *inode;
++ int err = 0;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ err = unionfs_file_revalidate(file, parent, true);
++ if (unlikely(err))
++ goto out;
++ unionfs_check_file(file);
++
++ bstart = fbstart(file);
++ bend = fbend(file);
++ if (bstart < 0 || bend < 0)
++ goto out;
++
++ inode = dentry->d_inode;
++ if (unlikely(!inode)) {
++ printk(KERN_ERR
++ "unionfs: null lower inode in unionfs_fasync\n");
++ goto out;
++ }
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (!lower_inode || !lower_inode->i_fop->fasync)
++ continue;
++ lower_file = unionfs_lower_file_idx(file, bindex);
++ mutex_lock(&lower_inode->i_mutex);
++ err = lower_inode->i_fop->fasync(fd, lower_file, flag);
++ if (!err && bindex == bstart)
++ fsstack_copy_attr_times(inode, lower_inode);
++ mutex_unlock(&lower_inode->i_mutex);
++ if (err)
++ goto out;
++ }
++
++out:
++ if (!err)
++ unionfs_check_file(file);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++static ssize_t unionfs_splice_read(struct file *file, loff_t *ppos,
++ struct pipe_inode_info *pipe, size_t len,
++ unsigned int flags)
++{
++ ssize_t err;
++ struct file *lower_file;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ err = unionfs_file_revalidate(file, parent, false);
++ if (unlikely(err))
++ goto out;
++
++ lower_file = unionfs_lower_file(file);
++ err = vfs_splice_to(lower_file, ppos, pipe, len, flags);
++ /* update our inode atime upon a successful lower splice-read */
++ if (err >= 0) {
++ fsstack_copy_attr_atime(dentry->d_inode,
++ lower_file->f_path.dentry->d_inode);
++ unionfs_check_file(file);
++ }
++
++out:
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++static ssize_t unionfs_splice_write(struct pipe_inode_info *pipe,
++ struct file *file, loff_t *ppos,
++ size_t len, unsigned int flags)
++{
++ ssize_t err = 0;
++ struct file *lower_file;
++ struct dentry *dentry = file->f_path.dentry;
++ struct dentry *parent;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ err = unionfs_file_revalidate(file, parent, true);
++ if (unlikely(err))
++ goto out;
++
++ lower_file = unionfs_lower_file(file);
++ err = vfs_splice_from(pipe, lower_file, ppos, len, flags);
++ /* update our inode times+sizes upon a successful lower write */
++ if (err >= 0) {
++ fsstack_copy_inode_size(dentry->d_inode,
++ lower_file->f_path.dentry->d_inode);
++ fsstack_copy_attr_times(dentry->d_inode,
++ lower_file->f_path.dentry->d_inode);
++ unionfs_check_file(file);
++ }
++
++out:
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++struct file_operations unionfs_main_fops = {
++ .llseek = generic_file_llseek,
++ .read = unionfs_read,
++ .write = unionfs_write,
++ .readdir = unionfs_file_readdir,
++ .unlocked_ioctl = unionfs_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = unionfs_ioctl,
++#endif
++ .mmap = unionfs_mmap,
++ .open = unionfs_open,
++ .flush = unionfs_flush,
++ .release = unionfs_file_release,
++ .fsync = unionfs_fsync,
++ .fasync = unionfs_fasync,
++ .splice_read = unionfs_splice_read,
++ .splice_write = unionfs_splice_write,
++};
+diff --git a/fs/unionfs/inode.c b/fs/unionfs/inode.c
+new file mode 100644
+index 0000000..4c36f16
+--- /dev/null
++++ b/fs/unionfs/inode.c
+@@ -0,0 +1,1061 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * Find a writeable branch to create new object in. Checks all writeble
++ * branches of the parent inode, from istart to iend order; if none are
++ * suitable, also tries branch 0 (which may require a copyup).
++ *
++ * Return a lower_dentry we can use to create object in, or ERR_PTR.
++ */
++static struct dentry *find_writeable_branch(struct inode *parent,
++ struct dentry *dentry)
++{
++ int err = -EINVAL;
++ int bindex, istart, iend;
++ struct dentry *lower_dentry = NULL;
++
++ istart = ibstart(parent);
++ iend = ibend(parent);
++ if (istart < 0)
++ goto out;
++
++begin:
++ for (bindex = istart; bindex <= iend; bindex++) {
++ /* skip non-writeable branches */
++ err = is_robranch_super(dentry->d_sb, bindex);
++ if (err) {
++ err = -EROFS;
++ continue;
++ }
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry)
++ continue;
++ /*
++ * check for whiteouts in writeable branch, and remove them
++ * if necessary.
++ */
++ err = check_unlink_whiteout(dentry, lower_dentry, bindex);
++ if (err > 0) /* ignore if whiteout found and removed */
++ err = 0;
++ if (err)
++ continue;
++ /* if get here, we can write to the branch */
++ break;
++ }
++ /*
++ * If istart wasn't already branch 0, and we got any error, then try
++ * branch 0 (which may require copyup)
++ */
++ if (err && istart > 0) {
++ istart = iend = 0;
++ goto begin;
++ }
++
++ /*
++ * If we tried even branch 0, and still got an error, abort. But if
++ * the error was an EROFS, then we should try to copyup.
++ */
++ if (err && err != -EROFS)
++ goto out;
++
++ /*
++ * If we get here, then check if copyup needed. If lower_dentry is
++ * NULL, create the entire dentry directory structure in branch 0.
++ */
++ if (!lower_dentry) {
++ bindex = 0;
++ lower_dentry = create_parents(parent, dentry,
++ dentry->d_name.name, bindex);
++ if (IS_ERR(lower_dentry)) {
++ err = PTR_ERR(lower_dentry);
++ goto out;
++ }
++ }
++ err = 0; /* all's well */
++out:
++ if (err)
++ return ERR_PTR(err);
++ return lower_dentry;
++}
++
++static int unionfs_create(struct inode *dir, struct dentry *dentry,
++ int mode, struct nameidata *nd_unused)
++{
++ int err = 0;
++ struct dentry *lower_dentry = NULL;
++ struct dentry *lower_parent_dentry = NULL;
++ struct dentry *parent;
++ int valid = 0;
++ struct nameidata lower_nd;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE; /* same as what real_lookup does */
++ goto out;
++ }
++
++ lower_dentry = find_writeable_branch(dir, dentry);
++ if (IS_ERR(lower_dentry)) {
++ err = PTR_ERR(lower_dentry);
++ goto out;
++ }
++
++ lower_parent_dentry = lock_parent(lower_dentry);
++ if (IS_ERR(lower_parent_dentry)) {
++ err = PTR_ERR(lower_parent_dentry);
++ goto out_unlock;
++ }
++
++ err = init_lower_nd(&lower_nd, LOOKUP_CREATE);
++ if (unlikely(err < 0))
++ goto out_unlock;
++ err = vfs_create(lower_parent_dentry->d_inode, lower_dentry, mode,
++ &lower_nd);
++ release_lower_nd(&lower_nd, err);
++
++ if (!err) {
++ err = PTR_ERR(unionfs_interpose(dentry, dir->i_sb, 0));
++ if (!err) {
++ unionfs_copy_attr_times(dir);
++ fsstack_copy_inode_size(dir,
++ lower_parent_dentry->d_inode);
++ /* update no. of links on parent directory */
++ dir->i_nlink = unionfs_get_nlinks(dir);
++ }
++ }
++
++out_unlock:
++ unlock_dir(lower_parent_dentry);
++out:
++ if (!err) {
++ unionfs_postcopyup_setmnt(dentry);
++ unionfs_check_inode(dir);
++ unionfs_check_dentry(dentry);
++ }
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++/*
++ * unionfs_lookup is the only special function which takes a dentry, yet we
++ * do NOT want to call __unionfs_d_revalidate_chain because by definition,
++ * we don't have a valid dentry here yet.
++ */
++static struct dentry *unionfs_lookup(struct inode *dir,
++ struct dentry *dentry,
++ struct nameidata *nd_unused)
++{
++ struct dentry *ret, *parent;
++ int err = 0;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++
++ /*
++ * As long as we lock/dget the parent, then can skip validating the
++ * parent now; we may have to rebuild this dentry on the next
++ * ->d_revalidate, however.
++ */
++
++ /* allocate dentry private data. We free it in ->d_release */
++ err = new_dentry_private_data(dentry, UNIONFS_DMUTEX_CHILD);
++ if (unlikely(err)) {
++ ret = ERR_PTR(err);
++ goto out;
++ }
++
++ ret = unionfs_lookup_full(dentry, parent, INTERPOSE_LOOKUP);
++
++ if (!IS_ERR(ret)) {
++ if (ret)
++ dentry = ret;
++ /* lookup_full can return multiple positive dentries */
++ if (dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode)) {
++ BUG_ON(dbstart(dentry) < 0);
++ unionfs_postcopyup_release(dentry);
++ }
++ unionfs_copy_attr_times(dentry->d_inode);
++ }
++
++ unionfs_check_inode(dir);
++ if (!IS_ERR(ret))
++ unionfs_check_dentry(dentry);
++ unionfs_check_dentry(parent);
++ unionfs_unlock_dentry(dentry); /* locked in new_dentry_private data */
++
++out:
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++
++ return ret;
++}
++
++static int unionfs_link(struct dentry *old_dentry, struct inode *dir,
++ struct dentry *new_dentry)
++{
++ int err = 0;
++ struct dentry *lower_old_dentry = NULL;
++ struct dentry *lower_new_dentry = NULL;
++ struct dentry *lower_dir_dentry = NULL;
++ struct dentry *old_parent, *new_parent;
++ char *name = NULL;
++ bool valid;
++
++ unionfs_read_lock(old_dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ old_parent = dget_parent(old_dentry);
++ new_parent = dget_parent(new_dentry);
++ unionfs_double_lock_parents(old_parent, new_parent);
++ unionfs_double_lock_dentry(old_dentry, new_dentry);
++
++ valid = __unionfs_d_revalidate(old_dentry, old_parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++ if (new_dentry->d_inode) {
++ valid = __unionfs_d_revalidate(new_dentry, new_parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++ }
++
++ lower_new_dentry = unionfs_lower_dentry(new_dentry);
++
++ /* check for a whiteout in new dentry branch, and delete it */
++ err = check_unlink_whiteout(new_dentry, lower_new_dentry,
++ dbstart(new_dentry));
++ if (err > 0) { /* whiteout found and removed successfully */
++ lower_dir_dentry = dget_parent(lower_new_dentry);
++ fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
++ dput(lower_dir_dentry);
++ dir->i_nlink = unionfs_get_nlinks(dir);
++ err = 0;
++ }
++ if (err)
++ goto out;
++
++ /* check if parent hierachy is needed, then link in same branch */
++ if (dbstart(old_dentry) != dbstart(new_dentry)) {
++ lower_new_dentry = create_parents(dir, new_dentry,
++ new_dentry->d_name.name,
++ dbstart(old_dentry));
++ err = PTR_ERR(lower_new_dentry);
++ if (IS_COPYUP_ERR(err))
++ goto docopyup;
++ if (!lower_new_dentry || IS_ERR(lower_new_dentry))
++ goto out;
++ }
++ lower_new_dentry = unionfs_lower_dentry(new_dentry);
++ lower_old_dentry = unionfs_lower_dentry(old_dentry);
++
++ BUG_ON(dbstart(old_dentry) != dbstart(new_dentry));
++ lower_dir_dentry = lock_parent(lower_new_dentry);
++ err = is_robranch(old_dentry);
++ if (!err) {
++ /* see Documentation/filesystems/unionfs/issues.txt */
++ lockdep_off();
++ err = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode,
++ lower_new_dentry);
++ lockdep_on();
++ }
++ unlock_dir(lower_dir_dentry);
++
++docopyup:
++ if (IS_COPYUP_ERR(err)) {
++ int old_bstart = dbstart(old_dentry);
++ int bindex;
++
++ for (bindex = old_bstart - 1; bindex >= 0; bindex--) {
++ err = copyup_dentry(old_parent->d_inode,
++ old_dentry, old_bstart,
++ bindex, old_dentry->d_name.name,
++ old_dentry->d_name.len, NULL,
++ i_size_read(old_dentry->d_inode));
++ if (err)
++ continue;
++ lower_new_dentry =
++ create_parents(dir, new_dentry,
++ new_dentry->d_name.name,
++ bindex);
++ lower_old_dentry = unionfs_lower_dentry(old_dentry);
++ lower_dir_dentry = lock_parent(lower_new_dentry);
++ /* see Documentation/filesystems/unionfs/issues.txt */
++ lockdep_off();
++ /* do vfs_link */
++ err = vfs_link(lower_old_dentry,
++ lower_dir_dentry->d_inode,
++ lower_new_dentry);
++ lockdep_on();
++ unlock_dir(lower_dir_dentry);
++ goto check_link;
++ }
++ goto out;
++ }
++
++check_link:
++ if (err || !lower_new_dentry->d_inode)
++ goto out;
++
++ /* Its a hard link, so use the same inode */
++ new_dentry->d_inode = igrab(old_dentry->d_inode);
++ d_add(new_dentry, new_dentry->d_inode);
++ unionfs_copy_attr_all(dir, lower_new_dentry->d_parent->d_inode);
++ fsstack_copy_inode_size(dir, lower_new_dentry->d_parent->d_inode);
++
++ /* propagate number of hard-links */
++ old_dentry->d_inode->i_nlink = unionfs_get_nlinks(old_dentry->d_inode);
++ /* new dentry's ctime may have changed due to hard-link counts */
++ unionfs_copy_attr_times(new_dentry->d_inode);
++
++out:
++ if (!new_dentry->d_inode)
++ d_drop(new_dentry);
++
++ kfree(name);
++ if (!err)
++ unionfs_postcopyup_setmnt(new_dentry);
++
++ unionfs_check_inode(dir);
++ unionfs_check_dentry(new_dentry);
++ unionfs_check_dentry(old_dentry);
++
++ unionfs_double_unlock_dentry(old_dentry, new_dentry);
++ unionfs_double_unlock_parents(old_parent, new_parent);
++ dput(new_parent);
++ dput(old_parent);
++ unionfs_read_unlock(old_dentry->d_sb);
++
++ return err;
++}
++
++static int unionfs_symlink(struct inode *dir, struct dentry *dentry,
++ const char *symname)
++{
++ int err = 0;
++ struct dentry *lower_dentry = NULL;
++ struct dentry *wh_dentry = NULL;
++ struct dentry *lower_parent_dentry = NULL;
++ struct dentry *parent;
++ char *name = NULL;
++ int valid = 0;
++ umode_t mode;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++
++ /*
++ * It's only a bug if this dentry was not negative and couldn't be
++ * revalidated (shouldn't happen).
++ */
++ BUG_ON(!valid && dentry->d_inode);
++
++ lower_dentry = find_writeable_branch(dir, dentry);
++ if (IS_ERR(lower_dentry)) {
++ err = PTR_ERR(lower_dentry);
++ goto out;
++ }
++
++ lower_parent_dentry = lock_parent(lower_dentry);
++ if (IS_ERR(lower_parent_dentry)) {
++ err = PTR_ERR(lower_parent_dentry);
++ goto out_unlock;
++ }
++
++ mode = S_IALLUGO;
++ err = vfs_symlink(lower_parent_dentry->d_inode, lower_dentry, symname);
++ if (!err) {
++ err = PTR_ERR(unionfs_interpose(dentry, dir->i_sb, 0));
++ if (!err) {
++ unionfs_copy_attr_times(dir);
++ fsstack_copy_inode_size(dir,
++ lower_parent_dentry->d_inode);
++ /* update no. of links on parent directory */
++ dir->i_nlink = unionfs_get_nlinks(dir);
++ }
++ }
++
++out_unlock:
++ unlock_dir(lower_parent_dentry);
++out:
++ dput(wh_dentry);
++ kfree(name);
++
++ if (!err) {
++ unionfs_postcopyup_setmnt(dentry);
++ unionfs_check_inode(dir);
++ unionfs_check_dentry(dentry);
++ }
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++static int unionfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++{
++ int err = 0;
++ struct dentry *lower_dentry = NULL;
++ struct dentry *lower_parent_dentry = NULL;
++ struct dentry *parent;
++ int bindex = 0, bstart;
++ char *name = NULL;
++ int valid;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE; /* same as what real_lookup does */
++ goto out;
++ }
++
++ bstart = dbstart(dentry);
++
++ lower_dentry = unionfs_lower_dentry(dentry);
++
++ /* check for a whiteout in new dentry branch, and delete it */
++ err = check_unlink_whiteout(dentry, lower_dentry, bstart);
++ if (err > 0) /* whiteout found and removed successfully */
++ err = 0;
++ if (err) {
++ /* exit if the error returned was NOT -EROFS */
++ if (!IS_COPYUP_ERR(err))
++ goto out;
++ bstart--;
++ }
++
++ /* check if copyup's needed, and mkdir */
++ for (bindex = bstart; bindex >= 0; bindex--) {
++ int i;
++ int bend = dbend(dentry);
++
++ if (is_robranch_super(dentry->d_sb, bindex))
++ continue;
++
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry) {
++ lower_dentry = create_parents(dir, dentry,
++ dentry->d_name.name,
++ bindex);
++ if (!lower_dentry || IS_ERR(lower_dentry)) {
++ printk(KERN_ERR "unionfs: lower dentry "
++ " NULL for bindex = %d\n", bindex);
++ continue;
++ }
++ }
++
++ lower_parent_dentry = lock_parent(lower_dentry);
++
++ if (IS_ERR(lower_parent_dentry)) {
++ err = PTR_ERR(lower_parent_dentry);
++ goto out;
++ }
++
++ err = vfs_mkdir(lower_parent_dentry->d_inode, lower_dentry,
++ mode);
++
++ unlock_dir(lower_parent_dentry);
++
++ /* did the mkdir succeed? */
++ if (err)
++ break;
++
++ for (i = bindex + 1; i <= bend; i++) {
++ /* XXX: use path_put_lowers? */
++ if (unionfs_lower_dentry_idx(dentry, i)) {
++ dput(unionfs_lower_dentry_idx(dentry, i));
++ unionfs_set_lower_dentry_idx(dentry, i, NULL);
++ }
++ }
++ dbend(dentry) = bindex;
++
++ /*
++ * Only INTERPOSE_LOOKUP can return a value other than 0 on
++ * err.
++ */
++ err = PTR_ERR(unionfs_interpose(dentry, dir->i_sb, 0));
++ if (!err) {
++ unionfs_copy_attr_times(dir);
++ fsstack_copy_inode_size(dir,
++ lower_parent_dentry->d_inode);
++
++ /* update number of links on parent directory */
++ dir->i_nlink = unionfs_get_nlinks(dir);
++ }
++
++ err = make_dir_opaque(dentry, dbstart(dentry));
++ if (err) {
++ printk(KERN_ERR "unionfs: mkdir: error creating "
++ ".wh.__dir_opaque: %d\n", err);
++ goto out;
++ }
++
++ /* we are done! */
++ break;
++ }
++
++out:
++ if (!dentry->d_inode)
++ d_drop(dentry);
++
++ kfree(name);
++
++ if (!err) {
++ unionfs_copy_attr_times(dentry->d_inode);
++ unionfs_postcopyup_setmnt(dentry);
++ }
++ unionfs_check_inode(dir);
++ unionfs_check_dentry(dentry);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++
++ return err;
++}
++
++static int unionfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ dev_t dev)
++{
++ int err = 0;
++ struct dentry *lower_dentry = NULL;
++ struct dentry *wh_dentry = NULL;
++ struct dentry *lower_parent_dentry = NULL;
++ struct dentry *parent;
++ char *name = NULL;
++ int valid = 0;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++
++ /*
++ * It's only a bug if this dentry was not negative and couldn't be
++ * revalidated (shouldn't happen).
++ */
++ BUG_ON(!valid && dentry->d_inode);
++
++ lower_dentry = find_writeable_branch(dir, dentry);
++ if (IS_ERR(lower_dentry)) {
++ err = PTR_ERR(lower_dentry);
++ goto out;
++ }
++
++ lower_parent_dentry = lock_parent(lower_dentry);
++ if (IS_ERR(lower_parent_dentry)) {
++ err = PTR_ERR(lower_parent_dentry);
++ goto out_unlock;
++ }
++
++ err = vfs_mknod(lower_parent_dentry->d_inode, lower_dentry, mode, dev);
++ if (!err) {
++ err = PTR_ERR(unionfs_interpose(dentry, dir->i_sb, 0));
++ if (!err) {
++ unionfs_copy_attr_times(dir);
++ fsstack_copy_inode_size(dir,
++ lower_parent_dentry->d_inode);
++ /* update no. of links on parent directory */
++ dir->i_nlink = unionfs_get_nlinks(dir);
++ }
++ }
++
++out_unlock:
++ unlock_dir(lower_parent_dentry);
++out:
++ dput(wh_dentry);
++ kfree(name);
++
++ if (!err) {
++ unionfs_postcopyup_setmnt(dentry);
++ unionfs_check_inode(dir);
++ unionfs_check_dentry(dentry);
++ }
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++/* requires sb, dentry, and parent to already be locked */
++static int __unionfs_readlink(struct dentry *dentry, char __user *buf,
++ int bufsiz)
++{
++ int err;
++ struct dentry *lower_dentry;
++
++ lower_dentry = unionfs_lower_dentry(dentry);
++
++ if (!lower_dentry->d_inode->i_op ||
++ !lower_dentry->d_inode->i_op->readlink) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ err = lower_dentry->d_inode->i_op->readlink(lower_dentry,
++ buf, bufsiz);
++ if (err >= 0)
++ fsstack_copy_attr_atime(dentry->d_inode,
++ lower_dentry->d_inode);
++
++out:
++ return err;
++}
++
++static int unionfs_readlink(struct dentry *dentry, char __user *buf,
++ int bufsiz)
++{
++ int err;
++ struct dentry *parent;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ if (unlikely(!__unionfs_d_revalidate(dentry, parent, false))) {
++ err = -ESTALE;
++ goto out;
++ }
++
++ err = __unionfs_readlink(dentry, buf, bufsiz);
++
++out:
++ unionfs_check_dentry(dentry);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++
++ return err;
++}
++
++static void *unionfs_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++ char *buf;
++ int len = PAGE_SIZE, err;
++ mm_segment_t old_fs;
++ struct dentry *parent;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ /* This is freed by the put_link method assuming a successful call. */
++ buf = kmalloc(len, GFP_KERNEL);
++ if (unlikely(!buf)) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ /* read the symlink, and then we will follow it */
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++ err = __unionfs_readlink(dentry, buf, len);
++ set_fs(old_fs);
++ if (err < 0) {
++ kfree(buf);
++ buf = NULL;
++ goto out;
++ }
++ buf[err] = 0;
++ nd_set_link(nd, buf);
++ err = 0;
++
++out:
++ if (err >= 0) {
++ unionfs_check_nd(nd);
++ unionfs_check_dentry(dentry);
++ }
++
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++
++ return ERR_PTR(err);
++}
++
++/* this @nd *IS* still used */
++static void unionfs_put_link(struct dentry *dentry, struct nameidata *nd,
++ void *cookie)
++{
++ struct dentry *parent;
++ char *buf;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ if (unlikely(!__unionfs_d_revalidate(dentry, parent, false)))
++ printk(KERN_ERR
++ "unionfs: put_link failed to revalidate dentry\n");
++
++ unionfs_check_dentry(dentry);
++#if 0
++ /* XXX: can't run this check b/c this fxn can receive a poisoned 'nd' PTR */
++ unionfs_check_nd(nd);
++#endif
++ buf = nd_get_link(nd);
++ if (!IS_ERR(buf))
++ kfree(buf);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++}
++
++/*
++ * This is a variant of fs/namei.c:permission() or inode_permission() which
++ * skips over EROFS tests (because we perform copyup on EROFS).
++ */
++static int __inode_permission(struct inode *inode, int mask)
++{
++ int retval;
++
++ /* nobody gets write access to an immutable file */
++ if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
++ return -EACCES;
++
++ /* Ordinary permission routines do not understand MAY_APPEND. */
++ if (inode->i_op && inode->i_op->permission) {
++ retval = inode->i_op->permission(inode, mask);
++ if (!retval) {
++ /*
++ * Exec permission on a regular file is denied if none
++ * of the execute bits are set.
++ *
++ * This check should be done by the ->permission()
++ * method.
++ */
++ if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode) &&
++ !(inode->i_mode & S_IXUGO))
++ return -EACCES;
++ }
++ } else {
++ retval = generic_permission(inode, mask, NULL);
++ }
++ if (retval)
++ return retval;
++
++ return security_inode_permission(inode,
++ mask & (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND));
++}
++
++/*
++ * Don't grab the superblock read-lock in unionfs_permission, which prevents
++ * a deadlock with the branch-management "add branch" code (which grabbed
++ * the write lock). It is safe to not grab the read lock here, because even
++ * with branch management taking place, there is no chance that
++ * unionfs_permission, or anything it calls, will use stale branch
++ * information.
++ */
++static int unionfs_permission(struct inode *inode, int mask)
++{
++ struct inode *lower_inode = NULL;
++ int err = 0;
++ int bindex, bstart, bend;
++ const int is_file = !S_ISDIR(inode->i_mode);
++ const int write_mask = (mask & MAY_WRITE) && !(mask & MAY_READ);
++ struct inode *inode_grabbed = igrab(inode);
++ struct dentry *dentry = d_find_alias(inode);
++
++ if (dentry)
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ if (!UNIONFS_I(inode)->lower_inodes) {
++ if (is_file) /* dirs can be unlinked but chdir'ed to */
++ err = -ESTALE; /* force revalidate */
++ goto out;
++ }
++ bstart = ibstart(inode);
++ bend = ibend(inode);
++ if (unlikely(bstart < 0 || bend < 0)) {
++ /*
++ * With branch-management, we can get a stale inode here.
++ * If so, we return ESTALE back to link_path_walk, which
++ * would discard the dcache entry and re-lookup the
++ * dentry+inode. This should be equivalent to issuing
++ * __unionfs_d_revalidate_chain on nd.dentry here.
++ */
++ if (is_file) /* dirs can be unlinked but chdir'ed to */
++ err = -ESTALE; /* force revalidate */
++ goto out;
++ }
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (!lower_inode)
++ continue;
++
++ /*
++ * check the condition for D-F-D underlying files/directories,
++ * we don't have to check for files, if we are checking for
++ * directories.
++ */
++ if (!is_file && !S_ISDIR(lower_inode->i_mode))
++ continue;
++
++ /*
++ * We check basic permissions, but we ignore any conditions
++ * such as readonly file systems or branches marked as
++ * readonly, because those conditions should lead to a
++ * copyup taking place later on. However, if user never had
++ * access to the file, then no copyup could ever take place.
++ */
++ err = __inode_permission(lower_inode, mask);
++ if (err && err != -EACCES && err != EPERM && bindex > 0) {
++ umode_t mode = lower_inode->i_mode;
++ if ((is_robranch_super(inode->i_sb, bindex) ||
++ __is_rdonly(lower_inode)) &&
++ (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
++ err = 0;
++ if (IS_COPYUP_ERR(err))
++ err = 0;
++ }
++
++ /*
++ * NFS HACK: NFSv2/3 return EACCES on readonly-exported,
++ * locally readonly-mounted file systems, instead of EROFS
++ * like other file systems do. So we have no choice here
++ * but to intercept this and ignore it for NFS branches
++ * marked readonly. Specifically, we avoid using NFS's own
++ * "broken" ->permission method, and rely on
++ * generic_permission() to do basic checking for us.
++ */
++ if (err && err == -EACCES &&
++ is_robranch_super(inode->i_sb, bindex) &&
++ lower_inode->i_sb->s_magic == NFS_SUPER_MAGIC)
++ err = generic_permission(lower_inode, mask, NULL);
++
++ /*
++ * The permissions are an intersection of the overall directory
++ * permissions, so we fail if one fails.
++ */
++ if (err)
++ goto out;
++
++ /* only the leftmost file matters. */
++ if (is_file || write_mask) {
++ if (is_file && write_mask) {
++ err = get_write_access(lower_inode);
++ if (!err)
++ put_write_access(lower_inode);
++ }
++ break;
++ }
++ }
++ /* sync times which may have changed (asynchronously) below */
++ unionfs_copy_attr_times(inode);
++
++out:
++ unionfs_check_inode(inode);
++ if (dentry) {
++ unionfs_unlock_dentry(dentry);
++ dput(dentry);
++ }
++ iput(inode_grabbed);
++ return err;
++}
++
++static int unionfs_setattr(struct dentry *dentry, struct iattr *ia)
++{
++ int err = 0;
++ struct dentry *lower_dentry;
++ struct dentry *parent;
++ struct inode *inode;
++ struct inode *lower_inode;
++ int bstart, bend, bindex;
++ loff_t size;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ if (unlikely(!__unionfs_d_revalidate(dentry, parent, false))) {
++ err = -ESTALE;
++ goto out;
++ }
++
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++ inode = dentry->d_inode;
++
++ /*
++ * mode change is for clearing setuid/setgid. Allow lower filesystem
++ * to reinterpret it in its own way.
++ */
++ if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
++ ia->ia_valid &= ~ATTR_MODE;
++
++ lower_dentry = unionfs_lower_dentry(dentry);
++ if (!lower_dentry) { /* should never happen after above revalidate */
++ err = -EINVAL;
++ goto out;
++ }
++ lower_inode = unionfs_lower_inode(inode);
++
++ /* check if user has permission to change lower inode */
++ err = inode_change_ok(lower_inode, ia);
++ if (err)
++ goto out;
++
++ /* copyup if the file is on a read only branch */
++ if (is_robranch_super(dentry->d_sb, bstart)
++ || __is_rdonly(lower_inode)) {
++ /* check if we have a branch to copy up to */
++ if (bstart <= 0) {
++ err = -EACCES;
++ goto out;
++ }
++
++ if (ia->ia_valid & ATTR_SIZE)
++ size = ia->ia_size;
++ else
++ size = i_size_read(inode);
++ /* copyup to next available branch */
++ for (bindex = bstart - 1; bindex >= 0; bindex--) {
++ err = copyup_dentry(parent->d_inode,
++ dentry, bstart, bindex,
++ dentry->d_name.name,
++ dentry->d_name.len,
++ NULL, size);
++ if (!err)
++ break;
++ }
++ if (err)
++ goto out;
++ /* get updated lower_dentry/inode after copyup */
++ lower_dentry = unionfs_lower_dentry(dentry);
++ lower_inode = unionfs_lower_inode(inode);
++ }
++
++ /*
++ * If shrinking, first truncate upper level to cancel writing dirty
++ * pages beyond the new eof; and also if its' maxbytes is more
++ * limiting (fail with -EFBIG before making any change to the lower
++ * level). There is no need to vmtruncate the upper level
++ * afterwards in the other cases: we fsstack_copy_inode_size from
++ * the lower level.
++ */
++ if (ia->ia_valid & ATTR_SIZE) {
++ size = i_size_read(inode);
++ if (ia->ia_size < size || (ia->ia_size > size &&
++ inode->i_sb->s_maxbytes < lower_inode->i_sb->s_maxbytes)) {
++ err = vmtruncate(inode, ia->ia_size);
++ if (err)
++ goto out;
++ }
++ }
++
++ /* notify the (possibly copied-up) lower inode */
++ /*
++ * Note: we use lower_dentry->d_inode, because lower_inode may be
++ * unlinked (no inode->i_sb and i_ino==0. This happens if someone
++ * tries to open(), unlink(), then ftruncate() a file.
++ */
++ mutex_lock(&lower_dentry->d_inode->i_mutex);
++ err = notify_change(lower_dentry, ia);
++ mutex_unlock(&lower_dentry->d_inode->i_mutex);
++ if (err)
++ goto out;
++
++ /* get attributes from the first lower inode */
++ if (ibstart(inode) >= 0)
++ unionfs_copy_attr_all(inode, lower_inode);
++ /*
++ * unionfs_copy_attr_all will copy the lower times to our inode if
++ * the lower ones are newer (useful for cache coherency). However,
++ * ->setattr is the only place in which we may have to copy the
++ * lower inode times absolutely, to support utimes(2).
++ */
++ if (ia->ia_valid & ATTR_MTIME_SET)
++ inode->i_mtime = lower_inode->i_mtime;
++ if (ia->ia_valid & ATTR_CTIME)
++ inode->i_ctime = lower_inode->i_ctime;
++ if (ia->ia_valid & ATTR_ATIME_SET)
++ inode->i_atime = lower_inode->i_atime;
++ fsstack_copy_inode_size(inode, lower_inode);
++
++out:
++ if (!err)
++ unionfs_check_dentry(dentry);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++
++ return err;
++}
++
++struct inode_operations unionfs_symlink_iops = {
++ .readlink = unionfs_readlink,
++ .permission = unionfs_permission,
++ .follow_link = unionfs_follow_link,
++ .setattr = unionfs_setattr,
++ .put_link = unionfs_put_link,
++};
++
++struct inode_operations unionfs_dir_iops = {
++ .create = unionfs_create,
++ .lookup = unionfs_lookup,
++ .link = unionfs_link,
++ .unlink = unionfs_unlink,
++ .symlink = unionfs_symlink,
++ .mkdir = unionfs_mkdir,
++ .rmdir = unionfs_rmdir,
++ .mknod = unionfs_mknod,
++ .rename = unionfs_rename,
++ .permission = unionfs_permission,
++ .setattr = unionfs_setattr,
++#ifdef CONFIG_UNION_FS_XATTR
++ .setxattr = unionfs_setxattr,
++ .getxattr = unionfs_getxattr,
++ .removexattr = unionfs_removexattr,
++ .listxattr = unionfs_listxattr,
++#endif /* CONFIG_UNION_FS_XATTR */
++};
++
++struct inode_operations unionfs_main_iops = {
++ .permission = unionfs_permission,
++ .setattr = unionfs_setattr,
++#ifdef CONFIG_UNION_FS_XATTR
++ .setxattr = unionfs_setxattr,
++ .getxattr = unionfs_getxattr,
++ .removexattr = unionfs_removexattr,
++ .listxattr = unionfs_listxattr,
++#endif /* CONFIG_UNION_FS_XATTR */
++};
+diff --git a/fs/unionfs/lookup.c b/fs/unionfs/lookup.c
+new file mode 100644
+index 0000000..b63c17e
+--- /dev/null
++++ b/fs/unionfs/lookup.c
+@@ -0,0 +1,569 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * Lookup one path component @name relative to a <base,mnt> path pair.
++ * Behaves nearly the same as lookup_one_len (i.e., return negative dentry
++ * on ENOENT), but uses the @mnt passed, so it can cross bind mounts and
++ * other lower mounts properly. If @new_mnt is non-null, will fill in the
++ * new mnt there. Caller is responsible to dput/mntput/path_put returned
++ * @dentry and @new_mnt.
++ */
++struct dentry *__lookup_one(struct dentry *base, struct vfsmount *mnt,
++ const char *name, struct vfsmount **new_mnt)
++{
++ struct dentry *dentry = NULL;
++ struct nameidata lower_nd;
++ int err;
++
++ /* we use flags=0 to get basic lookup */
++ err = vfs_path_lookup(base, mnt, name, 0, &lower_nd);
++
++ switch (err) {
++ case 0: /* no error */
++ dentry = lower_nd.path.dentry;
++ if (new_mnt)
++ *new_mnt = lower_nd.path.mnt; /* rc already inc'ed */
++ break;
++ case -ENOENT:
++ /*
++ * We don't consider ENOENT an error, and we want to return
++ * a negative dentry (ala lookup_one_len). As we know
++ * there was no inode for this name before (-ENOENT), then
++ * it's safe to call lookup_one_len (which doesn't take a
++ * vfsmount).
++ */
++ dentry = lookup_lck_len(name, base, strlen(name));
++ if (new_mnt)
++ *new_mnt = mntget(lower_nd.path.mnt);
++ break;
++ default: /* all other real errors */
++ dentry = ERR_PTR(err);
++ break;
++ }
++
++ return dentry;
++}
++
++/*
++ * This is a utility function that fills in a unionfs dentry.
++ * Caller must lock this dentry with unionfs_lock_dentry.
++ *
++ * Returns: 0 (ok), or -ERRNO if an error occurred.
++ * XXX: get rid of _partial_lookup and make callers call _lookup_full directly
++ */
++int unionfs_partial_lookup(struct dentry *dentry, struct dentry *parent)
++{
++ struct dentry *tmp;
++ int err = -ENOSYS;
++
++ tmp = unionfs_lookup_full(dentry, parent, INTERPOSE_PARTIAL);
++
++ if (!tmp) {
++ err = 0;
++ goto out;
++ }
++ if (IS_ERR(tmp)) {
++ err = PTR_ERR(tmp);
++ goto out;
++ }
++ /* XXX: need to change the interface */
++ BUG_ON(tmp != dentry);
++out:
++ return err;
++}
++
++/* The dentry cache is just so we have properly sized dentries. */
++static struct kmem_cache *unionfs_dentry_cachep;
++int unionfs_init_dentry_cache(void)
++{
++ unionfs_dentry_cachep =
++ kmem_cache_create("unionfs_dentry",
++ sizeof(struct unionfs_dentry_info),
++ 0, SLAB_RECLAIM_ACCOUNT, NULL);
++
++ return (unionfs_dentry_cachep ? 0 : -ENOMEM);
++}
++
++void unionfs_destroy_dentry_cache(void)
++{
++ if (unionfs_dentry_cachep)
++ kmem_cache_destroy(unionfs_dentry_cachep);
++}
++
++void free_dentry_private_data(struct dentry *dentry)
++{
++ if (!dentry || !dentry->d_fsdata)
++ return;
++ kfree(UNIONFS_D(dentry)->lower_paths);
++ UNIONFS_D(dentry)->lower_paths = NULL;
++ kmem_cache_free(unionfs_dentry_cachep, dentry->d_fsdata);
++ dentry->d_fsdata = NULL;
++}
++
++static inline int __realloc_dentry_private_data(struct dentry *dentry)
++{
++ struct unionfs_dentry_info *info = UNIONFS_D(dentry);
++ void *p;
++ int size;
++
++ BUG_ON(!info);
++
++ size = sizeof(struct path) * sbmax(dentry->d_sb);
++ p = krealloc(info->lower_paths, size, GFP_ATOMIC);
++ if (unlikely(!p))
++ return -ENOMEM;
++
++ info->lower_paths = p;
++
++ info->bstart = -1;
++ info->bend = -1;
++ info->bopaque = -1;
++ info->bcount = sbmax(dentry->d_sb);
++ atomic_set(&info->generation,
++ atomic_read(&UNIONFS_SB(dentry->d_sb)->generation));
++
++ memset(info->lower_paths, 0, size);
++
++ return 0;
++}
++
++/* UNIONFS_D(dentry)->lock must be locked */
++int realloc_dentry_private_data(struct dentry *dentry)
++{
++ if (!__realloc_dentry_private_data(dentry))
++ return 0;
++
++ kfree(UNIONFS_D(dentry)->lower_paths);
++ free_dentry_private_data(dentry);
++ return -ENOMEM;
++}
++
++/* allocate new dentry private data */
++int new_dentry_private_data(struct dentry *dentry, int subclass)
++{
++ struct unionfs_dentry_info *info = UNIONFS_D(dentry);
++
++ BUG_ON(info);
++
++ info = kmem_cache_alloc(unionfs_dentry_cachep, GFP_ATOMIC);
++ if (unlikely(!info))
++ return -ENOMEM;
++
++ mutex_init(&info->lock);
++ mutex_lock_nested(&info->lock, subclass);
++
++ info->lower_paths = NULL;
++
++ dentry->d_fsdata = info;
++
++ if (!__realloc_dentry_private_data(dentry))
++ return 0;
++
++ mutex_unlock(&info->lock);
++ free_dentry_private_data(dentry);
++ return -ENOMEM;
++}
++
++/*
++ * scan through the lower dentry objects, and set bstart to reflect the
++ * starting branch
++ */
++void update_bstart(struct dentry *dentry)
++{
++ int bindex;
++ int bstart = dbstart(dentry);
++ int bend = dbend(dentry);
++ struct dentry *lower_dentry;
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry)
++ continue;
++ if (lower_dentry->d_inode) {
++ dbstart(dentry) = bindex;
++ break;
++ }
++ dput(lower_dentry);
++ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
++ }
++}
++
++
++/*
++ * Initialize a nameidata structure (the intent part) we can pass to a lower
++ * file system. Returns 0 on success or -error (only -ENOMEM possible).
++ * Inside that nd structure, this function may also return an allocated
++ * struct file (for open intents). The caller, when done with this nd, must
++ * kfree the intent file (using release_lower_nd).
++ *
++ * XXX: this code, and the callers of this code, should be redone using
++ * vfs_path_lookup() when (1) the nameidata structure is refactored into a
++ * separate intent-structure, and (2) open_namei() is broken into a VFS-only
++ * function and a method that other file systems can call.
++ */
++int init_lower_nd(struct nameidata *nd, unsigned int flags)
++{
++ int err = 0;
++#ifdef ALLOC_LOWER_ND_FILE
++ /*
++ * XXX: one day we may need to have the lower return an open file
++ * for us. It is not needed in 2.6.23-rc1 for nfs2/nfs3, but may
++ * very well be needed for nfs4.
++ */
++ struct file *file;
++#endif /* ALLOC_LOWER_ND_FILE */
++
++ memset(nd, 0, sizeof(struct nameidata));
++ if (!flags)
++ return err;
++
++ switch (flags) {
++ case LOOKUP_CREATE:
++ nd->intent.open.flags |= O_CREAT;
++ /* fall through: shared code for create/open cases */
++ case LOOKUP_OPEN:
++ nd->flags = flags;
++ nd->intent.open.flags |= (FMODE_READ | FMODE_WRITE);
++#ifdef ALLOC_LOWER_ND_FILE
++ file = kzalloc(sizeof(struct file), GFP_KERNEL);
++ if (unlikely(!file)) {
++ err = -ENOMEM;
++ break; /* exit switch statement and thus return */
++ }
++ nd->intent.open.file = file;
++#endif /* ALLOC_LOWER_ND_FILE */
++ break;
++ default:
++ /*
++ * We should never get here, for now.
++ * We can add new cases here later on.
++ */
++ pr_debug("unionfs: unknown nameidata flag 0x%x\n", flags);
++ BUG();
++ break;
++ }
++
++ return err;
++}
++
++void release_lower_nd(struct nameidata *nd, int err)
++{
++ if (!nd->intent.open.file)
++ return;
++ else if (!err)
++ release_open_intent(nd);
++#ifdef ALLOC_LOWER_ND_FILE
++ kfree(nd->intent.open.file);
++#endif /* ALLOC_LOWER_ND_FILE */
++}
++
++/*
++ * Main (and complex) driver function for Unionfs's lookup
++ *
++ * Returns: NULL (ok), ERR_PTR if an error occurred, or a non-null non-error
++ * PTR if d_splice returned a different dentry.
++ *
++ * If lookupmode is INTERPOSE_PARTIAL/REVAL/REVAL_NEG, the passed dentry's
++ * inode info must be locked. If lookupmode is INTERPOSE_LOOKUP (i.e., a
++ * newly looked-up dentry), then unionfs_lookup_backend will return a locked
++ * dentry's info, which the caller must unlock.
++ */
++struct dentry *unionfs_lookup_full(struct dentry *dentry,
++ struct dentry *parent, int lookupmode)
++{
++ int err = 0;
++ struct dentry *lower_dentry = NULL;
++ struct vfsmount *lower_mnt;
++ struct vfsmount *lower_dir_mnt;
++ struct dentry *wh_lower_dentry = NULL;
++ struct dentry *lower_dir_dentry = NULL;
++ struct dentry *d_interposed = NULL;
++ int bindex, bstart, bend, bopaque;
++ int opaque, num_positive = 0;
++ const char *name;
++ int namelen;
++ int pos_start, pos_end;
++
++ /*
++ * We should already have a lock on this dentry in the case of a
++ * partial lookup, or a revalidation. Otherwise it is returned from
++ * new_dentry_private_data already locked.
++ */
++ verify_locked(dentry);
++ verify_locked(parent);
++
++ /* must initialize dentry operations */
++ dentry->d_op = &unionfs_dops;
++
++ /* We never partial lookup the root directory. */
++ if (IS_ROOT(dentry))
++ goto out;
++
++ name = dentry->d_name.name;
++ namelen = dentry->d_name.len;
++
++ /* No dentries should get created for possible whiteout names. */
++ if (!is_validname(name)) {
++ err = -EPERM;
++ goto out_free;
++ }
++
++ /* Now start the actual lookup procedure. */
++ bstart = dbstart(parent);
++ bend = dbend(parent);
++ bopaque = dbopaque(parent);
++ BUG_ON(bstart < 0);
++
++ /* adjust bend to bopaque if needed */
++ if ((bopaque >= 0) && (bopaque < bend))
++ bend = bopaque;
++
++ /* lookup all possible dentries */
++ for (bindex = bstart; bindex <= bend; bindex++) {
++
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ lower_mnt = unionfs_lower_mnt_idx(dentry, bindex);
++
++ /* skip if we already have a positive lower dentry */
++ if (lower_dentry) {
++ if (dbstart(dentry) < 0)
++ dbstart(dentry) = bindex;
++ if (bindex > dbend(dentry))
++ dbend(dentry) = bindex;
++ if (lower_dentry->d_inode)
++ num_positive++;
++ continue;
++ }
++
++ lower_dir_dentry =
++ unionfs_lower_dentry_idx(parent, bindex);
++ /* if the lower dentry's parent does not exist, skip this */
++ if (!lower_dir_dentry || !lower_dir_dentry->d_inode)
++ continue;
++
++ /* also skip it if the parent isn't a directory. */
++ if (!S_ISDIR(lower_dir_dentry->d_inode->i_mode))
++ continue; /* XXX: should be BUG_ON */
++
++ /* check for whiteouts: stop lookup if found */
++ wh_lower_dentry = lookup_whiteout(name, lower_dir_dentry);
++ if (IS_ERR(wh_lower_dentry)) {
++ err = PTR_ERR(wh_lower_dentry);
++ goto out_free;
++ }
++ if (wh_lower_dentry->d_inode) {
++ dbend(dentry) = dbopaque(dentry) = bindex;
++ if (dbstart(dentry) < 0)
++ dbstart(dentry) = bindex;
++ dput(wh_lower_dentry);
++ break;
++ }
++ dput(wh_lower_dentry);
++
++ /* Now do regular lookup; lookup @name */
++ lower_dir_mnt = unionfs_lower_mnt_idx(parent, bindex);
++ lower_mnt = NULL; /* XXX: needed? */
++
++ lower_dentry = __lookup_one(lower_dir_dentry, lower_dir_mnt,
++ name, &lower_mnt);
++
++ if (IS_ERR(lower_dentry)) {
++ err = PTR_ERR(lower_dentry);
++ goto out_free;
++ }
++ unionfs_set_lower_dentry_idx(dentry, bindex, lower_dentry);
++ if (!lower_mnt)
++ lower_mnt = unionfs_mntget(dentry->d_sb->s_root,
++ bindex);
++ unionfs_set_lower_mnt_idx(dentry, bindex, lower_mnt);
++
++ /* adjust dbstart/end */
++ if (dbstart(dentry) < 0)
++ dbstart(dentry) = bindex;
++ if (bindex > dbend(dentry))
++ dbend(dentry) = bindex;
++ /*
++ * We always store the lower dentries above, and update
++ * dbstart/dbend, even if the whole unionfs dentry is
++ * negative (i.e., no lower inodes).
++ */
++ if (!lower_dentry->d_inode)
++ continue;
++ num_positive++;
++
++ /*
++ * check if we just found an opaque directory, if so, stop
++ * lookups here.
++ */
++ if (!S_ISDIR(lower_dentry->d_inode->i_mode))
++ continue;
++ opaque = is_opaque_dir(dentry, bindex);
++ if (opaque < 0) {
++ err = opaque;
++ goto out_free;
++ } else if (opaque) {
++ dbend(dentry) = dbopaque(dentry) = bindex;
++ break;
++ }
++ dbend(dentry) = bindex;
++
++ /* update parent directory's atime with the bindex */
++ fsstack_copy_attr_atime(parent->d_inode,
++ lower_dir_dentry->d_inode);
++ }
++
++ /* sanity checks, then decide if to process a negative dentry */
++ BUG_ON(dbstart(dentry) < 0 && dbend(dentry) >= 0);
++ BUG_ON(dbstart(dentry) >= 0 && dbend(dentry) < 0);
++
++ if (num_positive > 0)
++ goto out_positive;
++
++ /*** handle NEGATIVE dentries ***/
++
++ /*
++ * If negative, keep only first lower negative dentry, to save on
++ * memory.
++ */
++ if (dbstart(dentry) < dbend(dentry)) {
++ path_put_lowers(dentry, dbstart(dentry) + 1,
++ dbend(dentry), false);
++ dbend(dentry) = dbstart(dentry);
++ }
++ if (lookupmode == INTERPOSE_PARTIAL)
++ goto out;
++ if (lookupmode == INTERPOSE_LOOKUP) {
++ /*
++ * If all we found was a whiteout in the first available
++ * branch, then create a negative dentry for a possibly new
++ * file to be created.
++ */
++ if (dbopaque(dentry) < 0)
++ goto out;
++ /* XXX: need to get mnt here */
++ bindex = dbstart(dentry);
++ if (unionfs_lower_dentry_idx(dentry, bindex))
++ goto out;
++ lower_dir_dentry =
++ unionfs_lower_dentry_idx(parent, bindex);
++ if (!lower_dir_dentry || !lower_dir_dentry->d_inode)
++ goto out;
++ if (!S_ISDIR(lower_dir_dentry->d_inode->i_mode))
++ goto out; /* XXX: should be BUG_ON */
++ /* XXX: do we need to cross bind mounts here? */
++ lower_dentry = lookup_lck_len(name, lower_dir_dentry, namelen);
++ if (IS_ERR(lower_dentry)) {
++ err = PTR_ERR(lower_dentry);
++ goto out;
++ }
++ /* XXX: need to mntget/mntput as needed too! */
++ unionfs_set_lower_dentry_idx(dentry, bindex, lower_dentry);
++ /* XXX: wrong mnt for crossing bind mounts! */
++ lower_mnt = unionfs_mntget(dentry->d_sb->s_root, bindex);
++ unionfs_set_lower_mnt_idx(dentry, bindex, lower_mnt);
++
++ goto out;
++ }
++
++ /* if we're revalidating a positive dentry, don't make it negative */
++ if (lookupmode != INTERPOSE_REVAL)
++ d_add(dentry, NULL);
++
++ goto out;
++
++out_positive:
++ /*** handle POSITIVE dentries ***/
++
++ /*
++ * This unionfs dentry is positive (at least one lower inode
++ * exists), so scan entire dentry from beginning to end, and remove
++ * any negative lower dentries, if any. Then, update dbstart/dbend
++ * to reflect the start/end of positive dentries.
++ */
++ pos_start = pos_end = -1;
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry,
++ bindex);
++ if (lower_dentry && lower_dentry->d_inode) {
++ if (pos_start < 0)
++ pos_start = bindex;
++ if (bindex > pos_end)
++ pos_end = bindex;
++ continue;
++ }
++ path_put_lowers(dentry, bindex, bindex, false);
++ }
++ if (pos_start >= 0)
++ dbstart(dentry) = pos_start;
++ if (pos_end >= 0)
++ dbend(dentry) = pos_end;
++
++ /* Partial lookups need to re-interpose, or throw away older negs. */
++ if (lookupmode == INTERPOSE_PARTIAL) {
++ if (dentry->d_inode) {
++ unionfs_reinterpose(dentry);
++ goto out;
++ }
++
++ /*
++ * This dentry was positive, so it is as if we had a
++ * negative revalidation.
++ */
++ lookupmode = INTERPOSE_REVAL_NEG;
++ update_bstart(dentry);
++ }
++
++ /*
++ * Interpose can return a dentry if d_splice returned a different
++ * dentry.
++ */
++ d_interposed = unionfs_interpose(dentry, dentry->d_sb, lookupmode);
++ if (IS_ERR(d_interposed))
++ err = PTR_ERR(d_interposed);
++ else if (d_interposed)
++ dentry = d_interposed;
++
++ if (!err)
++ goto out;
++ d_drop(dentry);
++
++out_free:
++ /* should dput/mntput all the underlying dentries on error condition */
++ if (dbstart(dentry) >= 0)
++ path_put_lowers_all(dentry, false);
++ /* free lower_paths unconditionally */
++ kfree(UNIONFS_D(dentry)->lower_paths);
++ UNIONFS_D(dentry)->lower_paths = NULL;
++
++out:
++ if (dentry && UNIONFS_D(dentry)) {
++ BUG_ON(dbstart(dentry) < 0 && dbend(dentry) >= 0);
++ BUG_ON(dbstart(dentry) >= 0 && dbend(dentry) < 0);
++ }
++ if (d_interposed && UNIONFS_D(d_interposed)) {
++ BUG_ON(dbstart(d_interposed) < 0 && dbend(d_interposed) >= 0);
++ BUG_ON(dbstart(d_interposed) >= 0 && dbend(d_interposed) < 0);
++ }
++
++ if (!err && d_interposed)
++ return d_interposed;
++ return ERR_PTR(err);
++}
+diff --git a/fs/unionfs/main.c b/fs/unionfs/main.c
+new file mode 100644
+index 0000000..258386e
+--- /dev/null
++++ b/fs/unionfs/main.c
+@@ -0,0 +1,758 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++
++static void unionfs_fill_inode(struct dentry *dentry,
++ struct inode *inode)
++{
++ struct inode *lower_inode;
++ struct dentry *lower_dentry;
++ int bindex, bstart, bend;
++
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry) {
++ unionfs_set_lower_inode_idx(inode, bindex, NULL);
++ continue;
++ }
++
++ /* Initialize the lower inode to the new lower inode. */
++ if (!lower_dentry->d_inode)
++ continue;
++
++ unionfs_set_lower_inode_idx(inode, bindex,
++ igrab(lower_dentry->d_inode));
++ }
++
++ ibstart(inode) = dbstart(dentry);
++ ibend(inode) = dbend(dentry);
++
++ /* Use attributes from the first branch. */
++ lower_inode = unionfs_lower_inode(inode);
++
++ /* Use different set of inode ops for symlinks & directories */
++ if (S_ISLNK(lower_inode->i_mode))
++ inode->i_op = &unionfs_symlink_iops;
++ else if (S_ISDIR(lower_inode->i_mode))
++ inode->i_op = &unionfs_dir_iops;
++
++ /* Use different set of file ops for directories */
++ if (S_ISDIR(lower_inode->i_mode))
++ inode->i_fop = &unionfs_dir_fops;
++
++ /* properly initialize special inodes */
++ if (S_ISBLK(lower_inode->i_mode) || S_ISCHR(lower_inode->i_mode) ||
++ S_ISFIFO(lower_inode->i_mode) || S_ISSOCK(lower_inode->i_mode))
++ init_special_inode(inode, lower_inode->i_mode,
++ lower_inode->i_rdev);
++
++ /* all well, copy inode attributes */
++ unionfs_copy_attr_all(inode, lower_inode);
++ fsstack_copy_inode_size(inode, lower_inode);
++}
++
++/*
++ * Connect a unionfs inode dentry/inode with several lower ones. This is
++ * the classic stackable file system "vnode interposition" action.
++ *
++ * @sb: unionfs's super_block
++ */
++struct dentry *unionfs_interpose(struct dentry *dentry, struct super_block *sb,
++ int flag)
++{
++ int err = 0;
++ struct inode *inode;
++ int need_fill_inode = 1;
++ struct dentry *spliced = NULL;
++
++ verify_locked(dentry);
++
++ /*
++ * We allocate our new inode below by calling unionfs_iget,
++ * which will initialize some of the new inode's fields
++ */
++
++ /*
++ * On revalidate we've already got our own inode and just need
++ * to fix it up.
++ */
++ if (flag == INTERPOSE_REVAL) {
++ inode = dentry->d_inode;
++ UNIONFS_I(inode)->bstart = -1;
++ UNIONFS_I(inode)->bend = -1;
++ atomic_set(&UNIONFS_I(inode)->generation,
++ atomic_read(&UNIONFS_SB(sb)->generation));
++
++ UNIONFS_I(inode)->lower_inodes =
++ kcalloc(sbmax(sb), sizeof(struct inode *), GFP_KERNEL);
++ if (unlikely(!UNIONFS_I(inode)->lower_inodes)) {
++ err = -ENOMEM;
++ goto out;
++ }
++ } else {
++ /* get unique inode number for unionfs */
++ inode = unionfs_iget(sb, iunique(sb, UNIONFS_ROOT_INO));
++ if (IS_ERR(inode)) {
++ err = PTR_ERR(inode);
++ goto out;
++ }
++ if (atomic_read(&inode->i_count) > 1)
++ goto skip;
++ }
++
++ need_fill_inode = 0;
++ unionfs_fill_inode(dentry, inode);
++
++skip:
++ /* only (our) lookup wants to do a d_add */
++ switch (flag) {
++ case INTERPOSE_DEFAULT:
++ /* for operations which create new inodes */
++ d_add(dentry, inode);
++ break;
++ case INTERPOSE_REVAL_NEG:
++ d_instantiate(dentry, inode);
++ break;
++ case INTERPOSE_LOOKUP:
++ spliced = d_splice_alias(inode, dentry);
++ if (spliced && spliced != dentry) {
++ /*
++ * d_splice can return a dentry if it was
++ * disconnected and had to be moved. We must ensure
++ * that the private data of the new dentry is
++ * correct and that the inode info was filled
++ * properly. Finally we must return this new
++ * dentry.
++ */
++ spliced->d_op = &unionfs_dops;
++ spliced->d_fsdata = dentry->d_fsdata;
++ dentry->d_fsdata = NULL;
++ dentry = spliced;
++ if (need_fill_inode) {
++ need_fill_inode = 0;
++ unionfs_fill_inode(dentry, inode);
++ }
++ goto out_spliced;
++ } else if (!spliced) {
++ if (need_fill_inode) {
++ need_fill_inode = 0;
++ unionfs_fill_inode(dentry, inode);
++ goto out_spliced;
++ }
++ }
++ break;
++ case INTERPOSE_REVAL:
++ /* Do nothing. */
++ break;
++ default:
++ printk(KERN_CRIT "unionfs: invalid interpose flag passed!\n");
++ BUG();
++ }
++ goto out;
++
++out_spliced:
++ if (!err)
++ return spliced;
++out:
++ return ERR_PTR(err);
++}
++
++/* like interpose above, but for an already existing dentry */
++void unionfs_reinterpose(struct dentry *dentry)
++{
++ struct dentry *lower_dentry;
++ struct inode *inode;
++ int bindex, bstart, bend;
++
++ verify_locked(dentry);
++
++ /* This is pre-allocated inode */
++ inode = dentry->d_inode;
++
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry)
++ continue;
++
++ if (!lower_dentry->d_inode)
++ continue;
++ if (unionfs_lower_inode_idx(inode, bindex))
++ continue;
++ unionfs_set_lower_inode_idx(inode, bindex,
++ igrab(lower_dentry->d_inode));
++ }
++ ibstart(inode) = dbstart(dentry);
++ ibend(inode) = dbend(dentry);
++}
++
++/*
++ * make sure the branch we just looked up (nd) makes sense:
++ *
++ * 1) we're not trying to stack unionfs on top of unionfs
++ * 2) it exists
++ * 3) is a directory
++ */
++int check_branch(struct nameidata *nd)
++{
++ /* XXX: remove in ODF code -- stacking unions allowed there */
++ if (!strcmp(nd->path.dentry->d_sb->s_type->name, UNIONFS_NAME))
++ return -EINVAL;
++ if (!nd->path.dentry->d_inode)
++ return -ENOENT;
++ if (!S_ISDIR(nd->path.dentry->d_inode->i_mode))
++ return -ENOTDIR;
++ return 0;
++}
++
++/* checks if two lower_dentries have overlapping branches */
++static int is_branch_overlap(struct dentry *dent1, struct dentry *dent2)
++{
++ struct dentry *dent = NULL;
++
++ dent = dent1;
++ while ((dent != dent2) && (dent->d_parent != dent))
++ dent = dent->d_parent;
++
++ if (dent == dent2)
++ return 1;
++
++ dent = dent2;
++ while ((dent != dent1) && (dent->d_parent != dent))
++ dent = dent->d_parent;
++
++ return (dent == dent1);
++}
++
++/*
++ * Parse "ro" or "rw" options, but default to "rw" if no mode options was
++ * specified. Fill the mode bits in @perms. If encounter an unknown
++ * string, return -EINVAL. Otherwise return 0.
++ */
++int parse_branch_mode(const char *name, int *perms)
++{
++ if (!name || !strcmp(name, "rw")) {
++ *perms = MAY_READ | MAY_WRITE;
++ return 0;
++ }
++ if (!strcmp(name, "ro")) {
++ *perms = MAY_READ;
++ return 0;
++ }
++ return -EINVAL;
++}
++
++/*
++ * parse the dirs= mount argument
++ *
++ * We don't need to lock the superblock private data's rwsem, as we get
++ * called only by unionfs_read_super - it is still a long time before anyone
++ * can even get a reference to us.
++ */
++static int parse_dirs_option(struct super_block *sb, struct unionfs_dentry_info
++ *lower_root_info, char *options)
++{
++ struct nameidata nd;
++ char *name;
++ int err = 0;
++ int branches = 1;
++ int bindex = 0;
++ int i = 0;
++ int j = 0;
++ struct dentry *dent1;
++ struct dentry *dent2;
++
++ if (options[0] == '\0') {
++ printk(KERN_ERR "unionfs: no branches specified\n");
++ err = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * Each colon means we have a separator, this is really just a rough
++ * guess, since strsep will handle empty fields for us.
++ */
++ for (i = 0; options[i]; i++)
++ if (options[i] == ':')
++ branches++;
++
++ /* allocate space for underlying pointers to lower dentry */
++ UNIONFS_SB(sb)->data =
++ kcalloc(branches, sizeof(struct unionfs_data), GFP_KERNEL);
++ if (unlikely(!UNIONFS_SB(sb)->data)) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ lower_root_info->lower_paths =
++ kcalloc(branches, sizeof(struct path), GFP_KERNEL);
++ if (unlikely(!lower_root_info->lower_paths)) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ /* now parsing a string such as "b1:b2=rw:b3=ro:b4" */
++ branches = 0;
++ while ((name = strsep(&options, ":")) != NULL) {
++ int perms;
++ char *mode = strchr(name, '=');
++
++ if (!name)
++ continue;
++ if (!*name) { /* bad use of ':' (extra colons) */
++ err = -EINVAL;
++ goto out;
++ }
++
++ branches++;
++
++ /* strip off '=' if any */
++ if (mode)
++ *mode++ = '\0';
++
++ err = parse_branch_mode(mode, &perms);
++ if (err) {
++ printk(KERN_ERR "unionfs: invalid mode \"%s\" for "
++ "branch %d\n", mode, bindex);
++ goto out;
++ }
++ /* ensure that leftmost branch is writeable */
++ if (!bindex && !(perms & MAY_WRITE)) {
++ printk(KERN_ERR "unionfs: leftmost branch cannot be "
++ "read-only (use \"-o ro\" to create a "
++ "read-only union)\n");
++ err = -EINVAL;
++ goto out;
++ }
++
++ err = path_lookup(name, LOOKUP_FOLLOW, &nd);
++ if (err) {
++ printk(KERN_ERR "unionfs: error accessing "
++ "lower directory '%s' (error %d)\n",
++ name, err);
++ goto out;
++ }
++
++ err = check_branch(&nd);
++ if (err) {
++ printk(KERN_ERR "unionfs: lower directory "
++ "'%s' is not a valid branch\n", name);
++ path_put(&nd.path);
++ goto out;
++ }
++
++ lower_root_info->lower_paths[bindex].dentry = nd.path.dentry;
++ lower_root_info->lower_paths[bindex].mnt = nd.path.mnt;
++
++ set_branchperms(sb, bindex, perms);
++ set_branch_count(sb, bindex, 0);
++ new_branch_id(sb, bindex);
++
++ if (lower_root_info->bstart < 0)
++ lower_root_info->bstart = bindex;
++ lower_root_info->bend = bindex;
++ bindex++;
++ }
++
++ if (branches == 0) {
++ printk(KERN_ERR "unionfs: no branches specified\n");
++ err = -EINVAL;
++ goto out;
++ }
++
++ BUG_ON(branches != (lower_root_info->bend + 1));
++
++ /*
++ * Ensure that no overlaps exist in the branches.
++ *
++ * This test is required because the Linux kernel has no support
++ * currently for ensuring coherency between stackable layers and
++ * branches. If we were to allow overlapping branches, it would be
++ * possible, for example, to delete a file via one branch, which
++ * would not be reflected in another branch. Such incoherency could
++ * lead to inconsistencies and even kernel oopses. Rather than
++ * implement hacks to work around some of these cache-coherency
++ * problems, we prevent branch overlapping, for now. A complete
++ * solution will involve proper kernel/VFS support for cache
++ * coherency, at which time we could safely remove this
++ * branch-overlapping test.
++ */
++ for (i = 0; i < branches; i++) {
++ dent1 = lower_root_info->lower_paths[i].dentry;
++ for (j = i + 1; j < branches; j++) {
++ dent2 = lower_root_info->lower_paths[j].dentry;
++ if (is_branch_overlap(dent1, dent2)) {
++ printk(KERN_ERR "unionfs: branches %d and "
++ "%d overlap\n", i, j);
++ err = -EINVAL;
++ goto out;
++ }
++ }
++ }
++
++out:
++ if (err) {
++ for (i = 0; i < branches; i++)
++ path_put(&lower_root_info->lower_paths[i]);
++
++ kfree(lower_root_info->lower_paths);
++ kfree(UNIONFS_SB(sb)->data);
++
++ /*
++ * MUST clear the pointers to prevent potential double free if
++ * the caller dies later on
++ */
++ lower_root_info->lower_paths = NULL;
++ UNIONFS_SB(sb)->data = NULL;
++ }
++ return err;
++}
++
++/*
++ * Parse mount options. See the manual page for usage instructions.
++ *
++ * Returns the dentry object of the lower-level (lower) directory;
++ * We want to mount our stackable file system on top of that lower directory.
++ */
++static struct unionfs_dentry_info *unionfs_parse_options(
++ struct super_block *sb,
++ char *options)
++{
++ struct unionfs_dentry_info *lower_root_info;
++ char *optname;
++ int err = 0;
++ int bindex;
++ int dirsfound = 0;
++
++ /* allocate private data area */
++ err = -ENOMEM;
++ lower_root_info =
++ kzalloc(sizeof(struct unionfs_dentry_info), GFP_KERNEL);
++ if (unlikely(!lower_root_info))
++ goto out_error;
++ lower_root_info->bstart = -1;
++ lower_root_info->bend = -1;
++ lower_root_info->bopaque = -1;
++
++ while ((optname = strsep(&options, ",")) != NULL) {
++ char *optarg;
++
++ if (!optname || !*optname)
++ continue;
++
++ optarg = strchr(optname, '=');
++ if (optarg)
++ *optarg++ = '\0';
++
++ /*
++ * All of our options take an argument now. Insert ones that
++ * don't, above this check.
++ */
++ if (!optarg) {
++ printk(KERN_ERR "unionfs: %s requires an argument\n",
++ optname);
++ err = -EINVAL;
++ goto out_error;
++ }
++
++ if (!strcmp("dirs", optname)) {
++ if (++dirsfound > 1) {
++ printk(KERN_ERR
++ "unionfs: multiple dirs specified\n");
++ err = -EINVAL;
++ goto out_error;
++ }
++ err = parse_dirs_option(sb, lower_root_info, optarg);
++ if (err)
++ goto out_error;
++ continue;
++ }
++
++ err = -EINVAL;
++ printk(KERN_ERR
++ "unionfs: unrecognized option '%s'\n", optname);
++ goto out_error;
++ }
++ if (dirsfound != 1) {
++ printk(KERN_ERR "unionfs: dirs option required\n");
++ err = -EINVAL;
++ goto out_error;
++ }
++ goto out;
++
++out_error:
++ if (lower_root_info && lower_root_info->lower_paths) {
++ for (bindex = lower_root_info->bstart;
++ bindex >= 0 && bindex <= lower_root_info->bend;
++ bindex++)
++ path_put(&lower_root_info->lower_paths[bindex]);
++ }
++
++ kfree(lower_root_info->lower_paths);
++ kfree(lower_root_info);
++
++ kfree(UNIONFS_SB(sb)->data);
++ UNIONFS_SB(sb)->data = NULL;
++
++ lower_root_info = ERR_PTR(err);
++out:
++ return lower_root_info;
++}
++
++/*
++ * our custom d_alloc_root work-alike
++ *
++ * we can't use d_alloc_root if we want to use our own interpose function
++ * unchanged, so we simply call our own "fake" d_alloc_root
++ */
++static struct dentry *unionfs_d_alloc_root(struct super_block *sb)
++{
++ struct dentry *ret = NULL;
++
++ if (sb) {
++ static const struct qstr name = {
++ .name = "/",
++ .len = 1
++ };
++
++ ret = d_alloc(NULL, &name);
++ if (likely(ret)) {
++ ret->d_op = &unionfs_dops;
++ ret->d_sb = sb;
++ ret->d_parent = ret;
++ }
++ }
++ return ret;
++}
++
++/*
++ * There is no need to lock the unionfs_super_info's rwsem as there is no
++ * way anyone can have a reference to the superblock at this point in time.
++ */
++static int unionfs_read_super(struct super_block *sb, void *raw_data,
++ int silent)
++{
++ int err = 0;
++ struct unionfs_dentry_info *lower_root_info = NULL;
++ int bindex, bstart, bend;
++
++ if (!raw_data) {
++ printk(KERN_ERR
++ "unionfs: read_super: missing data argument\n");
++ err = -EINVAL;
++ goto out;
++ }
++
++ /* Allocate superblock private data */
++ sb->s_fs_info = kzalloc(sizeof(struct unionfs_sb_info), GFP_KERNEL);
++ if (unlikely(!UNIONFS_SB(sb))) {
++ printk(KERN_CRIT "unionfs: read_super: out of memory\n");
++ err = -ENOMEM;
++ goto out;
++ }
++
++ UNIONFS_SB(sb)->bend = -1;
++ atomic_set(&UNIONFS_SB(sb)->generation, 1);
++ init_rwsem(&UNIONFS_SB(sb)->rwsem);
++ UNIONFS_SB(sb)->high_branch_id = -1; /* -1 == invalid branch ID */
++
++ lower_root_info = unionfs_parse_options(sb, raw_data);
++ if (IS_ERR(lower_root_info)) {
++ printk(KERN_ERR
++ "unionfs: read_super: error while parsing options "
++ "(err = %ld)\n", PTR_ERR(lower_root_info));
++ err = PTR_ERR(lower_root_info);
++ lower_root_info = NULL;
++ goto out_free;
++ }
++ if (lower_root_info->bstart == -1) {
++ err = -ENOENT;
++ goto out_free;
++ }
++
++ /* set the lower superblock field of upper superblock */
++ bstart = lower_root_info->bstart;
++ BUG_ON(bstart != 0);
++ sbend(sb) = bend = lower_root_info->bend;
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ struct dentry *d = lower_root_info->lower_paths[bindex].dentry;
++ atomic_inc(&d->d_sb->s_active);
++ unionfs_set_lower_super_idx(sb, bindex, d->d_sb);
++ }
++
++ /* max Bytes is the maximum bytes from highest priority branch */
++ sb->s_maxbytes = unionfs_lower_super_idx(sb, 0)->s_maxbytes;
++
++ /*
++ * Our c/m/atime granularity is 1 ns because we may stack on file
++ * systems whose granularity is as good. This is important for our
++ * time-based cache coherency.
++ */
++ sb->s_time_gran = 1;
++
++ sb->s_op = &unionfs_sops;
++
++ /* See comment next to the definition of unionfs_d_alloc_root */
++ sb->s_root = unionfs_d_alloc_root(sb);
++ if (unlikely(!sb->s_root)) {
++ err = -ENOMEM;
++ goto out_dput;
++ }
++
++ /* link the upper and lower dentries */
++ sb->s_root->d_fsdata = NULL;
++ err = new_dentry_private_data(sb->s_root, UNIONFS_DMUTEX_ROOT);
++ if (unlikely(err))
++ goto out_freedpd;
++
++ /* Set the lower dentries for s_root */
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ struct dentry *d;
++ struct vfsmount *m;
++
++ d = lower_root_info->lower_paths[bindex].dentry;
++ m = lower_root_info->lower_paths[bindex].mnt;
++
++ unionfs_set_lower_dentry_idx(sb->s_root, bindex, d);
++ unionfs_set_lower_mnt_idx(sb->s_root, bindex, m);
++ }
++ dbstart(sb->s_root) = bstart;
++ dbend(sb->s_root) = bend;
++
++ /* Set the generation number to one, since this is for the mount. */
++ atomic_set(&UNIONFS_D(sb->s_root)->generation, 1);
++
++ /*
++ * Call interpose to create the upper level inode. Only
++ * INTERPOSE_LOOKUP can return a value other than 0 on err.
++ */
++ err = PTR_ERR(unionfs_interpose(sb->s_root, sb, 0));
++ unionfs_unlock_dentry(sb->s_root);
++ if (!err)
++ goto out;
++ /* else fall through */
++
++out_freedpd:
++ if (UNIONFS_D(sb->s_root)) {
++ kfree(UNIONFS_D(sb->s_root)->lower_paths);
++ free_dentry_private_data(sb->s_root);
++ }
++ dput(sb->s_root);
++
++out_dput:
++ if (lower_root_info && !IS_ERR(lower_root_info)) {
++ for (bindex = lower_root_info->bstart;
++ bindex <= lower_root_info->bend; bindex++) {
++ struct dentry *d;
++ d = lower_root_info->lower_paths[bindex].dentry;
++ /* drop refs we took earlier */
++ atomic_dec(&d->d_sb->s_active);
++ path_put(&lower_root_info->lower_paths[bindex]);
++ }
++ kfree(lower_root_info->lower_paths);
++ kfree(lower_root_info);
++ lower_root_info = NULL;
++ }
++
++out_free:
++ kfree(UNIONFS_SB(sb)->data);
++ kfree(UNIONFS_SB(sb));
++ sb->s_fs_info = NULL;
++
++out:
++ if (lower_root_info && !IS_ERR(lower_root_info)) {
++ kfree(lower_root_info->lower_paths);
++ kfree(lower_root_info);
++ }
++ return err;
++}
++
++static int unionfs_get_sb(struct file_system_type *fs_type,
++ int flags, const char *dev_name,
++ void *raw_data, struct vfsmount *mnt)
++{
++ int err;
++ err = get_sb_nodev(fs_type, flags, raw_data, unionfs_read_super, mnt);
++ if (!err)
++ UNIONFS_SB(mnt->mnt_sb)->dev_name =
++ kstrdup(dev_name, GFP_KERNEL);
++ return err;
++}
++
++static struct file_system_type unionfs_fs_type = {
++ .owner = THIS_MODULE,
++ .name = UNIONFS_NAME,
++ .get_sb = unionfs_get_sb,
++ .kill_sb = generic_shutdown_super,
++ .fs_flags = FS_REVAL_DOT,
++};
++
++static int __init init_unionfs_fs(void)
++{
++ int err;
++
++ pr_info("Registering unionfs " UNIONFS_VERSION "\n");
++
++ err = unionfs_init_filldir_cache();
++ if (unlikely(err))
++ goto out;
++ err = unionfs_init_inode_cache();
++ if (unlikely(err))
++ goto out;
++ err = unionfs_init_dentry_cache();
++ if (unlikely(err))
++ goto out;
++ err = init_sioq();
++ if (unlikely(err))
++ goto out;
++ err = register_filesystem(&unionfs_fs_type);
++out:
++ if (unlikely(err)) {
++ stop_sioq();
++ unionfs_destroy_filldir_cache();
++ unionfs_destroy_inode_cache();
++ unionfs_destroy_dentry_cache();
++ }
++ return err;
++}
++
++static void __exit exit_unionfs_fs(void)
++{
++ stop_sioq();
++ unionfs_destroy_filldir_cache();
++ unionfs_destroy_inode_cache();
++ unionfs_destroy_dentry_cache();
++ unregister_filesystem(&unionfs_fs_type);
++ pr_info("Completed unionfs module unload\n");
++}
++
++MODULE_AUTHOR("Erez Zadok, Filesystems and Storage Lab, Stony Brook University"
++ " (http://www.fsl.cs.sunysb.edu)");
++MODULE_DESCRIPTION("Unionfs " UNIONFS_VERSION
++ " (http://unionfs.filesystems.org)");
++MODULE_LICENSE("GPL");
++
++module_init(init_unionfs_fs);
++module_exit(exit_unionfs_fs);
+diff --git a/fs/unionfs/mmap.c b/fs/unionfs/mmap.c
+new file mode 100644
+index 0000000..1f70535
+--- /dev/null
++++ b/fs/unionfs/mmap.c
+@@ -0,0 +1,89 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2006 Shaya Potter
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++
++/*
++ * XXX: we need a dummy readpage handler because generic_file_mmap (which we
++ * use in unionfs_mmap) checks for the existence of
++ * mapping->a_ops->readpage, else it returns -ENOEXEC. The VFS will need to
++ * be fixed to allow a file system to define vm_ops->fault without any
++ * address_space_ops whatsoever.
++ *
++ * Otherwise, we don't want to use our readpage method at all.
++ */
++static int unionfs_readpage(struct file *file, struct page *page)
++{
++ BUG();
++ return -EINVAL;
++}
++
++static int unionfs_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ int err;
++ struct file *file, *lower_file;
++ const struct vm_operations_struct *lower_vm_ops;
++ struct vm_area_struct lower_vma;
++
++ BUG_ON(!vma);
++ memcpy(&lower_vma, vma, sizeof(struct vm_area_struct));
++ file = lower_vma.vm_file;
++ lower_vm_ops = UNIONFS_F(file)->lower_vm_ops;
++ BUG_ON(!lower_vm_ops);
++
++ lower_file = unionfs_lower_file(file);
++ BUG_ON(!lower_file);
++ /*
++ * XXX: vm_ops->fault may be called in parallel. Because we have to
++ * resort to temporarily changing the vma->vm_file to point to the
++ * lower file, a concurrent invocation of unionfs_fault could see a
++ * different value. In this workaround, we keep a different copy of
++ * the vma structure in our stack, so we never expose a different
++ * value of the vma->vm_file called to us, even temporarily. A
++ * better fix would be to change the calling semantics of ->fault to
++ * take an explicit file pointer.
++ */
++ lower_vma.vm_file = lower_file;
++ err = lower_vm_ops->fault(&lower_vma, vmf);
++ return err;
++}
++
++/*
++ * XXX: the default address_space_ops for unionfs is empty. We cannot set
++ * our inode->i_mapping->a_ops to NULL because too many code paths expect
++ * the a_ops vector to be non-NULL.
++ */
++struct address_space_operations unionfs_aops = {
++ /* empty on purpose */
++};
++
++/*
++ * XXX: we need a second, dummy address_space_ops vector, to be used
++ * temporarily during unionfs_mmap, because the latter calls
++ * generic_file_mmap, which checks if ->readpage exists, else returns
++ * -ENOEXEC.
++ */
++struct address_space_operations unionfs_dummy_aops = {
++ .readpage = unionfs_readpage,
++};
++
++struct vm_operations_struct unionfs_vm_ops = {
++ .fault = unionfs_fault,
++};
+diff --git a/fs/unionfs/rdstate.c b/fs/unionfs/rdstate.c
+new file mode 100644
+index 0000000..f745fbc
+--- /dev/null
++++ b/fs/unionfs/rdstate.c
+@@ -0,0 +1,285 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/* This file contains the routines for maintaining readdir state. */
++
++/*
++ * There are two structures here, rdstate which is a hash table
++ * of the second structure which is a filldir_node.
++ */
++
++/*
++ * This is a struct kmem_cache for filldir nodes, because we allocate a lot
++ * of them and they shouldn't waste memory. If the node has a small name
++ * (as defined by the dentry structure), then we use an inline name to
++ * preserve kmalloc space.
++ */
++static struct kmem_cache *unionfs_filldir_cachep;
++
++int unionfs_init_filldir_cache(void)
++{
++ unionfs_filldir_cachep =
++ kmem_cache_create("unionfs_filldir",
++ sizeof(struct filldir_node), 0,
++ SLAB_RECLAIM_ACCOUNT, NULL);
++
++ return (unionfs_filldir_cachep ? 0 : -ENOMEM);
++}
++
++void unionfs_destroy_filldir_cache(void)
++{
++ if (unionfs_filldir_cachep)
++ kmem_cache_destroy(unionfs_filldir_cachep);
++}
++
++/*
++ * This is a tuning parameter that tells us roughly how big to make the
++ * hash table in directory entries per page. This isn't perfect, but
++ * at least we get a hash table size that shouldn't be too overloaded.
++ * The following averages are based on my home directory.
++ * 14.44693 Overall
++ * 12.29 Single Page Directories
++ * 117.93 Multi-page directories
++ */
++#define DENTPAGE 4096
++#define DENTPERONEPAGE 12
++#define DENTPERPAGE 118
++#define MINHASHSIZE 1
++static int guesstimate_hash_size(struct inode *inode)
++{
++ struct inode *lower_inode;
++ int bindex;
++ int hashsize = MINHASHSIZE;
++
++ if (UNIONFS_I(inode)->hashsize > 0)
++ return UNIONFS_I(inode)->hashsize;
++
++ for (bindex = ibstart(inode); bindex <= ibend(inode); bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (!lower_inode)
++ continue;
++
++ if (i_size_read(lower_inode) == DENTPAGE)
++ hashsize += DENTPERONEPAGE;
++ else
++ hashsize += (i_size_read(lower_inode) / DENTPAGE) *
++ DENTPERPAGE;
++ }
++
++ return hashsize;
++}
++
++int init_rdstate(struct file *file)
++{
++ BUG_ON(sizeof(loff_t) !=
++ (sizeof(unsigned int) + sizeof(unsigned int)));
++ BUG_ON(UNIONFS_F(file)->rdstate != NULL);
++
++ UNIONFS_F(file)->rdstate = alloc_rdstate(file->f_path.dentry->d_inode,
++ fbstart(file));
++
++ return (UNIONFS_F(file)->rdstate ? 0 : -ENOMEM);
++}
++
++struct unionfs_dir_state *find_rdstate(struct inode *inode, loff_t fpos)
++{
++ struct unionfs_dir_state *rdstate = NULL;
++ struct list_head *pos;
++
++ spin_lock(&UNIONFS_I(inode)->rdlock);
++ list_for_each(pos, &UNIONFS_I(inode)->readdircache) {
++ struct unionfs_dir_state *r =
++ list_entry(pos, struct unionfs_dir_state, cache);
++ if (fpos == rdstate2offset(r)) {
++ UNIONFS_I(inode)->rdcount--;
++ list_del(&r->cache);
++ rdstate = r;
++ break;
++ }
++ }
++ spin_unlock(&UNIONFS_I(inode)->rdlock);
++ return rdstate;
++}
++
++struct unionfs_dir_state *alloc_rdstate(struct inode *inode, int bindex)
++{
++ int i = 0;
++ int hashsize;
++ unsigned long mallocsize = sizeof(struct unionfs_dir_state);
++ struct unionfs_dir_state *rdstate;
++
++ hashsize = guesstimate_hash_size(inode);
++ mallocsize += hashsize * sizeof(struct list_head);
++ mallocsize = __roundup_pow_of_two(mallocsize);
++
++ /* This should give us about 500 entries anyway. */
++ if (mallocsize > PAGE_SIZE)
++ mallocsize = PAGE_SIZE;
++
++ hashsize = (mallocsize - sizeof(struct unionfs_dir_state)) /
++ sizeof(struct list_head);
++
++ rdstate = kmalloc(mallocsize, GFP_KERNEL);
++ if (unlikely(!rdstate))
++ return NULL;
++
++ spin_lock(&UNIONFS_I(inode)->rdlock);
++ if (UNIONFS_I(inode)->cookie >= (MAXRDCOOKIE - 1))
++ UNIONFS_I(inode)->cookie = 1;
++ else
++ UNIONFS_I(inode)->cookie++;
++
++ rdstate->cookie = UNIONFS_I(inode)->cookie;
++ spin_unlock(&UNIONFS_I(inode)->rdlock);
++ rdstate->offset = 1;
++ rdstate->access = jiffies;
++ rdstate->bindex = bindex;
++ rdstate->dirpos = 0;
++ rdstate->hashentries = 0;
++ rdstate->size = hashsize;
++ for (i = 0; i < rdstate->size; i++)
++ INIT_LIST_HEAD(&rdstate->list[i]);
++
++ return rdstate;
++}
++
++static void free_filldir_node(struct filldir_node *node)
++{
++ if (node->namelen >= DNAME_INLINE_LEN_MIN)
++ kfree(node->name);
++ kmem_cache_free(unionfs_filldir_cachep, node);
++}
++
++void free_rdstate(struct unionfs_dir_state *state)
++{
++ struct filldir_node *tmp;
++ int i;
++
++ for (i = 0; i < state->size; i++) {
++ struct list_head *head = &(state->list[i]);
++ struct list_head *pos, *n;
++
++ /* traverse the list and deallocate space */
++ list_for_each_safe(pos, n, head) {
++ tmp = list_entry(pos, struct filldir_node, file_list);
++ list_del(&tmp->file_list);
++ free_filldir_node(tmp);
++ }
++ }
++
++ kfree(state);
++}
++
++struct filldir_node *find_filldir_node(struct unionfs_dir_state *rdstate,
++ const char *name, int namelen,
++ int is_whiteout)
++{
++ int index;
++ unsigned int hash;
++ struct list_head *head;
++ struct list_head *pos;
++ struct filldir_node *cursor = NULL;
++ int found = 0;
++
++ BUG_ON(namelen <= 0);
++
++ hash = full_name_hash(name, namelen);
++ index = hash % rdstate->size;
++
++ head = &(rdstate->list[index]);
++ list_for_each(pos, head) {
++ cursor = list_entry(pos, struct filldir_node, file_list);
++
++ if (cursor->namelen == namelen && cursor->hash == hash &&
++ !strncmp(cursor->name, name, namelen)) {
++ /*
++ * a duplicate exists, and hence no need to create
++ * entry to the list
++ */
++ found = 1;
++
++ /*
++ * if a duplicate is found in this branch, and is
++ * not due to the caller looking for an entry to
++ * whiteout, then the file system may be corrupted.
++ */
++ if (unlikely(!is_whiteout &&
++ cursor->bindex == rdstate->bindex))
++ printk(KERN_ERR "unionfs: filldir: possible "
++ "I/O error: a file is duplicated "
++ "in the same branch %d: %s\n",
++ rdstate->bindex, cursor->name);
++ break;
++ }
++ }
++
++ if (!found)
++ cursor = NULL;
++
++ return cursor;
++}
++
++int add_filldir_node(struct unionfs_dir_state *rdstate, const char *name,
++ int namelen, int bindex, int whiteout)
++{
++ struct filldir_node *new;
++ unsigned int hash;
++ int index;
++ int err = 0;
++ struct list_head *head;
++
++ BUG_ON(namelen <= 0);
++
++ hash = full_name_hash(name, namelen);
++ index = hash % rdstate->size;
++ head = &(rdstate->list[index]);
++
++ new = kmem_cache_alloc(unionfs_filldir_cachep, GFP_KERNEL);
++ if (unlikely(!new)) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ INIT_LIST_HEAD(&new->file_list);
++ new->namelen = namelen;
++ new->hash = hash;
++ new->bindex = bindex;
++ new->whiteout = whiteout;
++
++ if (namelen < DNAME_INLINE_LEN_MIN) {
++ new->name = new->iname;
++ } else {
++ new->name = kmalloc(namelen + 1, GFP_KERNEL);
++ if (unlikely(!new->name)) {
++ kmem_cache_free(unionfs_filldir_cachep, new);
++ new = NULL;
++ goto out;
++ }
++ }
++
++ memcpy(new->name, name, namelen);
++ new->name[namelen] = '\0';
++
++ rdstate->hashentries++;
++
++ list_add(&(new->file_list), head);
++out:
++ return err;
++}
+diff --git a/fs/unionfs/rename.c b/fs/unionfs/rename.c
+new file mode 100644
+index 0000000..936700e
+--- /dev/null
++++ b/fs/unionfs/rename.c
+@@ -0,0 +1,517 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * This is a helper function for rename, used when rename ends up with hosed
++ * over dentries and we need to revert.
++ */
++static int unionfs_refresh_lower_dentry(struct dentry *dentry,
++ struct dentry *parent, int bindex)
++{
++ struct dentry *lower_dentry;
++ struct dentry *lower_parent;
++ int err = 0;
++
++ verify_locked(dentry);
++
++ lower_parent = unionfs_lower_dentry_idx(parent, bindex);
++
++ BUG_ON(!S_ISDIR(lower_parent->d_inode->i_mode));
++
++ lower_dentry = lookup_one_len(dentry->d_name.name, lower_parent,
++ dentry->d_name.len);
++ if (IS_ERR(lower_dentry)) {
++ err = PTR_ERR(lower_dentry);
++ goto out;
++ }
++
++ dput(unionfs_lower_dentry_idx(dentry, bindex));
++ iput(unionfs_lower_inode_idx(dentry->d_inode, bindex));
++ unionfs_set_lower_inode_idx(dentry->d_inode, bindex, NULL);
++
++ if (!lower_dentry->d_inode) {
++ dput(lower_dentry);
++ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
++ } else {
++ unionfs_set_lower_dentry_idx(dentry, bindex, lower_dentry);
++ unionfs_set_lower_inode_idx(dentry->d_inode, bindex,
++ igrab(lower_dentry->d_inode));
++ }
++
++out:
++ return err;
++}
++
++static int __unionfs_rename(struct inode *old_dir, struct dentry *old_dentry,
++ struct dentry *old_parent,
++ struct inode *new_dir, struct dentry *new_dentry,
++ struct dentry *new_parent,
++ int bindex)
++{
++ int err = 0;
++ struct dentry *lower_old_dentry;
++ struct dentry *lower_new_dentry;
++ struct dentry *lower_old_dir_dentry;
++ struct dentry *lower_new_dir_dentry;
++ struct dentry *trap;
++
++ lower_new_dentry = unionfs_lower_dentry_idx(new_dentry, bindex);
++ lower_old_dentry = unionfs_lower_dentry_idx(old_dentry, bindex);
++
++ if (!lower_new_dentry) {
++ lower_new_dentry =
++ create_parents(new_parent->d_inode,
++ new_dentry, new_dentry->d_name.name,
++ bindex);
++ if (IS_ERR(lower_new_dentry)) {
++ err = PTR_ERR(lower_new_dentry);
++ if (IS_COPYUP_ERR(err))
++ goto out;
++ printk(KERN_ERR "unionfs: error creating directory "
++ "tree for rename, bindex=%d err=%d\n",
++ bindex, err);
++ goto out;
++ }
++ }
++
++ /* check for and remove whiteout, if any */
++ err = check_unlink_whiteout(new_dentry, lower_new_dentry, bindex);
++ if (err > 0) /* ignore if whiteout found and successfully removed */
++ err = 0;
++ if (err)
++ goto out;
++
++ /* check of old_dentry branch is writable */
++ err = is_robranch_super(old_dentry->d_sb, bindex);
++ if (err)
++ goto out;
++
++ dget(lower_old_dentry);
++ dget(lower_new_dentry);
++ lower_old_dir_dentry = dget_parent(lower_old_dentry);
++ lower_new_dir_dentry = dget_parent(lower_new_dentry);
++
++ trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
++ /* source should not be ancenstor of target */
++ if (trap == lower_old_dentry) {
++ err = -EINVAL;
++ goto out_err_unlock;
++ }
++ /* target should not be ancenstor of source */
++ if (trap == lower_new_dentry) {
++ err = -ENOTEMPTY;
++ goto out_err_unlock;
++ }
++ err = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
++ lower_new_dir_dentry->d_inode, lower_new_dentry);
++out_err_unlock:
++ if (!err) {
++ /* update parent dir times */
++ fsstack_copy_attr_times(old_dir, lower_old_dir_dentry->d_inode);
++ fsstack_copy_attr_times(new_dir, lower_new_dir_dentry->d_inode);
++ }
++ unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
++
++ dput(lower_old_dir_dentry);
++ dput(lower_new_dir_dentry);
++ dput(lower_old_dentry);
++ dput(lower_new_dentry);
++
++out:
++ if (!err) {
++ /* Fixup the new_dentry. */
++ if (bindex < dbstart(new_dentry))
++ dbstart(new_dentry) = bindex;
++ else if (bindex > dbend(new_dentry))
++ dbend(new_dentry) = bindex;
++ }
++
++ return err;
++}
++
++/*
++ * Main rename code. This is sufficiently complex, that it's documented in
++ * Documentation/filesystems/unionfs/rename.txt. This routine calls
++ * __unionfs_rename() above to perform some of the work.
++ */
++static int do_unionfs_rename(struct inode *old_dir,
++ struct dentry *old_dentry,
++ struct dentry *old_parent,
++ struct inode *new_dir,
++ struct dentry *new_dentry,
++ struct dentry *new_parent)
++{
++ int err = 0;
++ int bindex;
++ int old_bstart, old_bend;
++ int new_bstart, new_bend;
++ int do_copyup = -1;
++ int local_err = 0;
++ int eio = 0;
++ int revert = 0;
++
++ old_bstart = dbstart(old_dentry);
++ old_bend = dbend(old_dentry);
++
++ new_bstart = dbstart(new_dentry);
++ new_bend = dbend(new_dentry);
++
++ /* Rename source to destination. */
++ err = __unionfs_rename(old_dir, old_dentry, old_parent,
++ new_dir, new_dentry, new_parent,
++ old_bstart);
++ if (err) {
++ if (!IS_COPYUP_ERR(err))
++ goto out;
++ do_copyup = old_bstart - 1;
++ } else {
++ revert = 1;
++ }
++
++ /*
++ * Unlink all instances of destination that exist to the left of
++ * bstart of source. On error, revert back, goto out.
++ */
++ for (bindex = old_bstart - 1; bindex >= new_bstart; bindex--) {
++ struct dentry *unlink_dentry;
++ struct dentry *unlink_dir_dentry;
++
++ BUG_ON(bindex < 0);
++ unlink_dentry = unionfs_lower_dentry_idx(new_dentry, bindex);
++ if (!unlink_dentry)
++ continue;
++
++ unlink_dir_dentry = lock_parent(unlink_dentry);
++ err = is_robranch_super(old_dir->i_sb, bindex);
++ if (!err)
++ err = vfs_unlink(unlink_dir_dentry->d_inode,
++ unlink_dentry);
++
++ fsstack_copy_attr_times(new_parent->d_inode,
++ unlink_dir_dentry->d_inode);
++ /* propagate number of hard-links */
++ new_parent->d_inode->i_nlink =
++ unionfs_get_nlinks(new_parent->d_inode);
++
++ unlock_dir(unlink_dir_dentry);
++ if (!err) {
++ if (bindex != new_bstart) {
++ dput(unlink_dentry);
++ unionfs_set_lower_dentry_idx(new_dentry,
++ bindex, NULL);
++ }
++ } else if (IS_COPYUP_ERR(err)) {
++ do_copyup = bindex - 1;
++ } else if (revert) {
++ goto revert;
++ }
++ }
++
++ if (do_copyup != -1) {
++ for (bindex = do_copyup; bindex >= 0; bindex--) {
++ /*
++ * copyup the file into some left directory, so that
++ * you can rename it
++ */
++ err = copyup_dentry(old_parent->d_inode,
++ old_dentry, old_bstart, bindex,
++ old_dentry->d_name.name,
++ old_dentry->d_name.len, NULL,
++ i_size_read(old_dentry->d_inode));
++ /* if copyup failed, try next branch to the left */
++ if (err)
++ continue;
++ /*
++ * create whiteout before calling __unionfs_rename
++ * because the latter will change the old_dentry's
++ * lower name and parent dir, resulting in the
++ * whiteout getting created in the wrong dir.
++ */
++ err = create_whiteout(old_dentry, bindex);
++ if (err) {
++ printk(KERN_ERR "unionfs: can't create a "
++ "whiteout for %s in rename (err=%d)\n",
++ old_dentry->d_name.name, err);
++ continue;
++ }
++ err = __unionfs_rename(old_dir, old_dentry, old_parent,
++ new_dir, new_dentry, new_parent,
++ bindex);
++ break;
++ }
++ }
++
++ /* make it opaque */
++ if (S_ISDIR(old_dentry->d_inode->i_mode)) {
++ err = make_dir_opaque(old_dentry, dbstart(old_dentry));
++ if (err)
++ goto revert;
++ }
++
++ /*
++ * Create whiteout for source, only if:
++ * (1) There is more than one underlying instance of source.
++ * (We did a copy_up is taken care of above).
++ */
++ if ((old_bstart != old_bend) && (do_copyup == -1)) {
++ err = create_whiteout(old_dentry, old_bstart);
++ if (err) {
++ /* can't fix anything now, so we exit with -EIO */
++ printk(KERN_ERR "unionfs: can't create a whiteout for "
++ "%s in rename!\n", old_dentry->d_name.name);
++ err = -EIO;
++ }
++ }
++
++out:
++ return err;
++
++revert:
++ /* Do revert here. */
++ local_err = unionfs_refresh_lower_dentry(new_dentry, new_parent,
++ old_bstart);
++ if (local_err) {
++ printk(KERN_ERR "unionfs: revert failed in rename: "
++ "the new refresh failed\n");
++ eio = -EIO;
++ }
++
++ local_err = unionfs_refresh_lower_dentry(old_dentry, old_parent,
++ old_bstart);
++ if (local_err) {
++ printk(KERN_ERR "unionfs: revert failed in rename: "
++ "the old refresh failed\n");
++ eio = -EIO;
++ goto revert_out;
++ }
++
++ if (!unionfs_lower_dentry_idx(new_dentry, bindex) ||
++ !unionfs_lower_dentry_idx(new_dentry, bindex)->d_inode) {
++ printk(KERN_ERR "unionfs: revert failed in rename: "
++ "the object disappeared from under us!\n");
++ eio = -EIO;
++ goto revert_out;
++ }
++
++ if (unionfs_lower_dentry_idx(old_dentry, bindex) &&
++ unionfs_lower_dentry_idx(old_dentry, bindex)->d_inode) {
++ printk(KERN_ERR "unionfs: revert failed in rename: "
++ "the object was created underneath us!\n");
++ eio = -EIO;
++ goto revert_out;
++ }
++
++ local_err = __unionfs_rename(new_dir, new_dentry, new_parent,
++ old_dir, old_dentry, old_parent,
++ old_bstart);
++
++ /* If we can't fix it, then we cop-out with -EIO. */
++ if (local_err) {
++ printk(KERN_ERR "unionfs: revert failed in rename!\n");
++ eio = -EIO;
++ }
++
++ local_err = unionfs_refresh_lower_dentry(new_dentry, new_parent,
++ bindex);
++ if (local_err)
++ eio = -EIO;
++ local_err = unionfs_refresh_lower_dentry(old_dentry, old_parent,
++ bindex);
++ if (local_err)
++ eio = -EIO;
++
++revert_out:
++ if (eio)
++ err = eio;
++ return err;
++}
++
++/*
++ * We can't copyup a directory, because it may involve huge numbers of
++ * children, etc. Doing that in the kernel would be bad, so instead we
++ * return EXDEV to the user-space utility that caused this, and let the
++ * user-space recurse and ask us to copy up each file separately.
++ */
++static int may_rename_dir(struct dentry *dentry, struct dentry *parent)
++{
++ int err, bstart;
++
++ err = check_empty(dentry, parent, NULL);
++ if (err == -ENOTEMPTY) {
++ if (is_robranch(dentry))
++ return -EXDEV;
++ } else if (err) {
++ return err;
++ }
++
++ bstart = dbstart(dentry);
++ if (dbend(dentry) == bstart || dbopaque(dentry) == bstart)
++ return 0;
++
++ dbstart(dentry) = bstart + 1;
++ err = check_empty(dentry, parent, NULL);
++ dbstart(dentry) = bstart;
++ if (err == -ENOTEMPTY)
++ err = -EXDEV;
++ return err;
++}
++
++/*
++ * The locking rules in unionfs_rename are complex. We could use a simpler
++ * superblock-level name-space lock for renames and copy-ups.
++ */
++int unionfs_rename(struct inode *old_dir, struct dentry *old_dentry,
++ struct inode *new_dir, struct dentry *new_dentry)
++{
++ int err = 0;
++ struct dentry *wh_dentry;
++ struct dentry *old_parent, *new_parent;
++ int valid = true;
++
++ unionfs_read_lock(old_dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ old_parent = dget_parent(old_dentry);
++ new_parent = dget_parent(new_dentry);
++ /* un/lock parent dentries only if they differ from old/new_dentry */
++ if (old_parent != old_dentry &&
++ old_parent != new_dentry)
++ unionfs_lock_dentry(old_parent, UNIONFS_DMUTEX_REVAL_PARENT);
++ if (new_parent != old_dentry &&
++ new_parent != new_dentry &&
++ new_parent != old_parent)
++ unionfs_lock_dentry(new_parent, UNIONFS_DMUTEX_REVAL_CHILD);
++ unionfs_double_lock_dentry(old_dentry, new_dentry);
++
++ valid = __unionfs_d_revalidate(old_dentry, old_parent, false);
++ if (!valid) {
++ err = -ESTALE;
++ goto out;
++ }
++ if (!d_deleted(new_dentry) && new_dentry->d_inode) {
++ valid = __unionfs_d_revalidate(new_dentry, new_parent, false);
++ if (!valid) {
++ err = -ESTALE;
++ goto out;
++ }
++ }
++
++ if (!S_ISDIR(old_dentry->d_inode->i_mode))
++ err = unionfs_partial_lookup(old_dentry, old_parent);
++ else
++ err = may_rename_dir(old_dentry, old_parent);
++
++ if (err)
++ goto out;
++
++ err = unionfs_partial_lookup(new_dentry, new_parent);
++ if (err)
++ goto out;
++
++ /*
++ * if new_dentry is already lower because of whiteout,
++ * simply override it even if the whited-out dir is not empty.
++ */
++ wh_dentry = find_first_whiteout(new_dentry);
++ if (!IS_ERR(wh_dentry)) {
++ dput(wh_dentry);
++ } else if (new_dentry->d_inode) {
++ if (S_ISDIR(old_dentry->d_inode->i_mode) !=
++ S_ISDIR(new_dentry->d_inode->i_mode)) {
++ err = S_ISDIR(old_dentry->d_inode->i_mode) ?
++ -ENOTDIR : -EISDIR;
++ goto out;
++ }
++
++ if (S_ISDIR(new_dentry->d_inode->i_mode)) {
++ struct unionfs_dir_state *namelist = NULL;
++ /* check if this unionfs directory is empty or not */
++ err = check_empty(new_dentry, new_parent, &namelist);
++ if (err)
++ goto out;
++
++ if (!is_robranch(new_dentry))
++ err = delete_whiteouts(new_dentry,
++ dbstart(new_dentry),
++ namelist);
++
++ free_rdstate(namelist);
++
++ if (err)
++ goto out;
++ }
++ }
++
++ err = do_unionfs_rename(old_dir, old_dentry, old_parent,
++ new_dir, new_dentry, new_parent);
++ if (err)
++ goto out;
++
++ /*
++ * force re-lookup since the dir on ro branch is not renamed, and
++ * lower dentries still indicate the un-renamed ones.
++ */
++ if (S_ISDIR(old_dentry->d_inode->i_mode))
++ atomic_dec(&UNIONFS_D(old_dentry)->generation);
++ else
++ unionfs_postcopyup_release(old_dentry);
++ if (new_dentry->d_inode && !S_ISDIR(new_dentry->d_inode->i_mode)) {
++ unionfs_postcopyup_release(new_dentry);
++ unionfs_postcopyup_setmnt(new_dentry);
++ if (!unionfs_lower_inode(new_dentry->d_inode)) {
++ /*
++ * If we get here, it means that no copyup was
++ * needed, and that a file by the old name already
++ * existing on the destination branch; that file got
++ * renamed earlier in this function, so all we need
++ * to do here is set the lower inode.
++ */
++ struct inode *inode;
++ inode = unionfs_lower_inode(old_dentry->d_inode);
++ igrab(inode);
++ unionfs_set_lower_inode_idx(new_dentry->d_inode,
++ dbstart(new_dentry),
++ inode);
++ }
++ }
++ /* if all of this renaming succeeded, update our times */
++ unionfs_copy_attr_times(old_dentry->d_inode);
++ unionfs_copy_attr_times(new_dentry->d_inode);
++ unionfs_check_inode(old_dir);
++ unionfs_check_inode(new_dir);
++ unionfs_check_dentry(old_dentry);
++ unionfs_check_dentry(new_dentry);
++
++out:
++ if (err) /* clear the new_dentry stuff created */
++ d_drop(new_dentry);
++
++ unionfs_double_unlock_dentry(old_dentry, new_dentry);
++ if (new_parent != old_dentry &&
++ new_parent != new_dentry &&
++ new_parent != old_parent)
++ unionfs_unlock_dentry(new_parent);
++ if (old_parent != old_dentry &&
++ old_parent != new_dentry)
++ unionfs_unlock_dentry(old_parent);
++ dput(new_parent);
++ dput(old_parent);
++ unionfs_read_unlock(old_dentry->d_sb);
++
++ return err;
++}
+diff --git a/fs/unionfs/sioq.c b/fs/unionfs/sioq.c
+new file mode 100644
+index 0000000..760c580
+--- /dev/null
++++ b/fs/unionfs/sioq.c
+@@ -0,0 +1,101 @@
++/*
++ * Copyright (c) 2006-2010 Erez Zadok
++ * Copyright (c) 2006 Charles P. Wright
++ * Copyright (c) 2006-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2006 Junjiro Okajima
++ * Copyright (c) 2006 David P. Quigley
++ * Copyright (c) 2006-2010 Stony Brook University
++ * Copyright (c) 2006-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * Super-user IO work Queue - sometimes we need to perform actions which
++ * would fail due to the unix permissions on the parent directory (e.g.,
++ * rmdir a directory which appears empty, but in reality contains
++ * whiteouts).
++ */
++
++static struct workqueue_struct *superio_workqueue;
++
++int __init init_sioq(void)
++{
++ int err;
++
++ superio_workqueue = create_workqueue("unionfs_siod");
++ if (!IS_ERR(superio_workqueue))
++ return 0;
++
++ err = PTR_ERR(superio_workqueue);
++ printk(KERN_ERR "unionfs: create_workqueue failed %d\n", err);
++ superio_workqueue = NULL;
++ return err;
++}
++
++void stop_sioq(void)
++{
++ if (superio_workqueue)
++ destroy_workqueue(superio_workqueue);
++}
++
++void run_sioq(work_func_t func, struct sioq_args *args)
++{
++ INIT_WORK(&args->work, func);
++
++ init_completion(&args->comp);
++ while (!queue_work(superio_workqueue, &args->work)) {
++ /* TODO: do accounting if needed */
++ schedule();
++ }
++ wait_for_completion(&args->comp);
++}
++
++void __unionfs_create(struct work_struct *work)
++{
++ struct sioq_args *args = container_of(work, struct sioq_args, work);
++ struct create_args *c = &args->create;
++
++ args->err = vfs_create(c->parent, c->dentry, c->mode, c->nd);
++ complete(&args->comp);
++}
++
++void __unionfs_mkdir(struct work_struct *work)
++{
++ struct sioq_args *args = container_of(work, struct sioq_args, work);
++ struct mkdir_args *m = &args->mkdir;
++
++ args->err = vfs_mkdir(m->parent, m->dentry, m->mode);
++ complete(&args->comp);
++}
++
++void __unionfs_mknod(struct work_struct *work)
++{
++ struct sioq_args *args = container_of(work, struct sioq_args, work);
++ struct mknod_args *m = &args->mknod;
++
++ args->err = vfs_mknod(m->parent, m->dentry, m->mode, m->dev);
++ complete(&args->comp);
++}
++
++void __unionfs_symlink(struct work_struct *work)
++{
++ struct sioq_args *args = container_of(work, struct sioq_args, work);
++ struct symlink_args *s = &args->symlink;
++
++ args->err = vfs_symlink(s->parent, s->dentry, s->symbuf);
++ complete(&args->comp);
++}
++
++void __unionfs_unlink(struct work_struct *work)
++{
++ struct sioq_args *args = container_of(work, struct sioq_args, work);
++ struct unlink_args *u = &args->unlink;
++
++ args->err = vfs_unlink(u->parent, u->dentry);
++ complete(&args->comp);
++}
+diff --git a/fs/unionfs/sioq.h b/fs/unionfs/sioq.h
+new file mode 100644
+index 0000000..b26d248
+--- /dev/null
++++ b/fs/unionfs/sioq.h
+@@ -0,0 +1,91 @@
++/*
++ * Copyright (c) 2006-2010 Erez Zadok
++ * Copyright (c) 2006 Charles P. Wright
++ * Copyright (c) 2006-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2006 Junjiro Okajima
++ * Copyright (c) 2006 David P. Quigley
++ * Copyright (c) 2006-2010 Stony Brook University
++ * Copyright (c) 2006-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef _SIOQ_H
++#define _SIOQ_H
++
++struct deletewh_args {
++ struct unionfs_dir_state *namelist;
++ struct dentry *dentry;
++ int bindex;
++};
++
++struct is_opaque_args {
++ struct dentry *dentry;
++};
++
++struct create_args {
++ struct inode *parent;
++ struct dentry *dentry;
++ umode_t mode;
++ struct nameidata *nd;
++};
++
++struct mkdir_args {
++ struct inode *parent;
++ struct dentry *dentry;
++ umode_t mode;
++};
++
++struct mknod_args {
++ struct inode *parent;
++ struct dentry *dentry;
++ umode_t mode;
++ dev_t dev;
++};
++
++struct symlink_args {
++ struct inode *parent;
++ struct dentry *dentry;
++ char *symbuf;
++};
++
++struct unlink_args {
++ struct inode *parent;
++ struct dentry *dentry;
++};
++
++
++struct sioq_args {
++ struct completion comp;
++ struct work_struct work;
++ int err;
++ void *ret;
++
++ union {
++ struct deletewh_args deletewh;
++ struct is_opaque_args is_opaque;
++ struct create_args create;
++ struct mkdir_args mkdir;
++ struct mknod_args mknod;
++ struct symlink_args symlink;
++ struct unlink_args unlink;
++ };
++};
++
++/* Extern definitions for SIOQ functions */
++extern int __init init_sioq(void);
++extern void stop_sioq(void);
++extern void run_sioq(work_func_t func, struct sioq_args *args);
++
++/* Extern definitions for our privilege escalation helpers */
++extern void __unionfs_create(struct work_struct *work);
++extern void __unionfs_mkdir(struct work_struct *work);
++extern void __unionfs_mknod(struct work_struct *work);
++extern void __unionfs_symlink(struct work_struct *work);
++extern void __unionfs_unlink(struct work_struct *work);
++extern void __delete_whiteouts(struct work_struct *work);
++extern void __is_opaque_dir(struct work_struct *work);
++
++#endif /* not _SIOQ_H */
+diff --git a/fs/unionfs/subr.c b/fs/unionfs/subr.c
+new file mode 100644
+index 0000000..570a344
+--- /dev/null
++++ b/fs/unionfs/subr.c
+@@ -0,0 +1,95 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * returns the right n_link value based on the inode type
++ */
++int unionfs_get_nlinks(const struct inode *inode)
++{
++ /* don't bother to do all the work since we're unlinked */
++ if (inode->i_nlink == 0)
++ return 0;
++
++ if (!S_ISDIR(inode->i_mode))
++ return unionfs_lower_inode(inode)->i_nlink;
++
++ /*
++ * For directories, we return 1. The only place that could cares
++ * about links is readdir, and there's d_type there so even that
++ * doesn't matter.
++ */
++ return 1;
++}
++
++/* copy a/m/ctime from the lower branch with the newest times */
++void unionfs_copy_attr_times(struct inode *upper)
++{
++ int bindex;
++ struct inode *lower;
++
++ if (!upper)
++ return;
++ if (ibstart(upper) < 0) {
++#ifdef CONFIG_UNION_FS_DEBUG
++ WARN_ON(ibstart(upper) < 0);
++#endif /* CONFIG_UNION_FS_DEBUG */
++ return;
++ }
++ for (bindex = ibstart(upper); bindex <= ibend(upper); bindex++) {
++ lower = unionfs_lower_inode_idx(upper, bindex);
++ if (!lower)
++ continue; /* not all lower dir objects may exist */
++ if (unlikely(timespec_compare(&upper->i_mtime,
++ &lower->i_mtime) < 0))
++ upper->i_mtime = lower->i_mtime;
++ if (unlikely(timespec_compare(&upper->i_ctime,
++ &lower->i_ctime) < 0))
++ upper->i_ctime = lower->i_ctime;
++ if (unlikely(timespec_compare(&upper->i_atime,
++ &lower->i_atime) < 0))
++ upper->i_atime = lower->i_atime;
++ }
++}
++
++/*
++ * A unionfs/fanout version of fsstack_copy_attr_all. Uses a
++ * unionfs_get_nlinks to properly calcluate the number of links to a file.
++ * Also, copies the max() of all a/m/ctimes for all lower inodes (which is
++ * important if the lower inode is a directory type)
++ */
++void unionfs_copy_attr_all(struct inode *dest,
++ const struct inode *src)
++{
++ dest->i_mode = src->i_mode;
++ dest->i_uid = src->i_uid;
++ dest->i_gid = src->i_gid;
++ dest->i_rdev = src->i_rdev;
++
++ unionfs_copy_attr_times(dest);
++
++ dest->i_blkbits = src->i_blkbits;
++ dest->i_flags = src->i_flags;
++
++ /*
++ * Update the nlinks AFTER updating the above fields, because the
++ * get_links callback may depend on them.
++ */
++ dest->i_nlink = unionfs_get_nlinks(dest);
++}
+diff --git a/fs/unionfs/super.c b/fs/unionfs/super.c
+new file mode 100644
+index 0000000..45bb9bf
+--- /dev/null
++++ b/fs/unionfs/super.c
+@@ -0,0 +1,1029 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * The inode cache is used with alloc_inode for both our inode info and the
++ * vfs inode.
++ */
++static struct kmem_cache *unionfs_inode_cachep;
++
++struct inode *unionfs_iget(struct super_block *sb, unsigned long ino)
++{
++ int size;
++ struct unionfs_inode_info *info;
++ struct inode *inode;
++
++ inode = iget_locked(sb, ino);
++ if (!inode)
++ return ERR_PTR(-ENOMEM);
++ if (!(inode->i_state & I_NEW))
++ return inode;
++
++ info = UNIONFS_I(inode);
++ memset(info, 0, offsetof(struct unionfs_inode_info, vfs_inode));
++ info->bstart = -1;
++ info->bend = -1;
++ atomic_set(&info->generation,
++ atomic_read(&UNIONFS_SB(inode->i_sb)->generation));
++ spin_lock_init(&info->rdlock);
++ info->rdcount = 1;
++ info->hashsize = -1;
++ INIT_LIST_HEAD(&info->readdircache);
++
++ size = sbmax(inode->i_sb) * sizeof(struct inode *);
++ info->lower_inodes = kzalloc(size, GFP_KERNEL);
++ if (unlikely(!info->lower_inodes)) {
++ printk(KERN_CRIT "unionfs: no kernel memory when allocating "
++ "lower-pointer array!\n");
++ iget_failed(inode);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ inode->i_version++;
++ inode->i_op = &unionfs_main_iops;
++ inode->i_fop = &unionfs_main_fops;
++
++ inode->i_mapping->a_ops = &unionfs_aops;
++
++ /*
++ * reset times so unionfs_copy_attr_all can keep out time invariants
++ * right (upper inode time being the max of all lower ones).
++ */
++ inode->i_atime.tv_sec = inode->i_atime.tv_nsec = 0;
++ inode->i_mtime.tv_sec = inode->i_mtime.tv_nsec = 0;
++ inode->i_ctime.tv_sec = inode->i_ctime.tv_nsec = 0;
++ unlock_new_inode(inode);
++ return inode;
++}
++
++/*
++ * final actions when unmounting a file system
++ *
++ * No need to lock rwsem.
++ */
++static void unionfs_put_super(struct super_block *sb)
++{
++ int bindex, bstart, bend;
++ struct unionfs_sb_info *spd;
++ int leaks = 0;
++
++ spd = UNIONFS_SB(sb);
++ if (!spd)
++ return;
++
++ bstart = sbstart(sb);
++ bend = sbend(sb);
++
++ /* Make sure we have no leaks of branchget/branchput. */
++ for (bindex = bstart; bindex <= bend; bindex++)
++ if (unlikely(branch_count(sb, bindex) != 0)) {
++ printk(KERN_CRIT
++ "unionfs: branch %d has %d references left!\n",
++ bindex, branch_count(sb, bindex));
++ leaks = 1;
++ }
++ WARN_ON(leaks != 0);
++
++ /* decrement lower super references */
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ struct super_block *s;
++ s = unionfs_lower_super_idx(sb, bindex);
++ unionfs_set_lower_super_idx(sb, bindex, NULL);
++ atomic_dec(&s->s_active);
++ }
++
++ kfree(spd->dev_name);
++ kfree(spd->data);
++ kfree(spd);
++ sb->s_fs_info = NULL;
++}
++
++/*
++ * Since people use this to answer the "How big of a file can I write?"
++ * question, we report the size of the highest priority branch as the size of
++ * the union.
++ */
++static int unionfs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++ int err = 0;
++ struct super_block *sb;
++ struct dentry *lower_dentry;
++ struct dentry *parent;
++ struct path lower_path;
++ bool valid;
++
++ sb = dentry->d_sb;
++
++ unionfs_read_lock(sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++ unionfs_check_dentry(dentry);
++
++ lower_dentry = unionfs_lower_dentry(sb->s_root);
++ lower_path.dentry = lower_dentry;
++ lower_path.mnt = unionfs_mntget(sb->s_root, 0);
++ err = vfs_statfs(&lower_path, buf);
++ mntput(lower_path.mnt);
++
++ /* set return buf to our f/s to avoid confusing user-level utils */
++ buf->f_type = UNIONFS_SUPER_MAGIC;
++ /*
++ * Our maximum file name can is shorter by a few bytes because every
++ * file name could potentially be whited-out.
++ *
++ * XXX: this restriction goes away with ODF.
++ */
++ unionfs_set_max_namelen(&buf->f_namelen);
++
++ /*
++ * reset two fields to avoid confusing user-land.
++ * XXX: is this still necessary?
++ */
++ memset(&buf->f_fsid, 0, sizeof(__kernel_fsid_t));
++ memset(&buf->f_spare, 0, sizeof(buf->f_spare));
++
++out:
++ unionfs_check_dentry(dentry);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(sb);
++ return err;
++}
++
++/* handle mode changing during remount */
++static noinline_for_stack int do_remount_mode_option(
++ char *optarg,
++ int cur_branches,
++ struct unionfs_data *new_data,
++ struct path *new_lower_paths)
++{
++ int err = -EINVAL;
++ int perms, idx;
++ char *modename = strchr(optarg, '=');
++ struct nameidata nd;
++
++ /* by now, optarg contains the branch name */
++ if (!*optarg) {
++ printk(KERN_ERR
++ "unionfs: no branch specified for mode change\n");
++ goto out;
++ }
++ if (!modename) {
++ printk(KERN_ERR "unionfs: branch \"%s\" requires a mode\n",
++ optarg);
++ goto out;
++ }
++ *modename++ = '\0';
++ err = parse_branch_mode(modename, &perms);
++ if (err) {
++ printk(KERN_ERR "unionfs: invalid mode \"%s\" for \"%s\"\n",
++ modename, optarg);
++ goto out;
++ }
++
++ /*
++ * Find matching branch index. For now, this assumes that nothing
++ * has been mounted on top of this Unionfs stack. Once we have /odf
++ * and cache-coherency resolved, we'll address the branch-path
++ * uniqueness.
++ */
++ err = path_lookup(optarg, LOOKUP_FOLLOW, &nd);
++ if (err) {
++ printk(KERN_ERR "unionfs: error accessing "
++ "lower directory \"%s\" (error %d)\n",
++ optarg, err);
++ goto out;
++ }
++ for (idx = 0; idx < cur_branches; idx++)
++ if (nd.path.mnt == new_lower_paths[idx].mnt &&
++ nd.path.dentry == new_lower_paths[idx].dentry)
++ break;
++ path_put(&nd.path); /* no longer needed */
++ if (idx == cur_branches) {
++ err = -ENOENT; /* err may have been reset above */
++ printk(KERN_ERR "unionfs: branch \"%s\" "
++ "not found\n", optarg);
++ goto out;
++ }
++ /* check/change mode for existing branch */
++ /* we don't warn if perms==branchperms */
++ new_data[idx].branchperms = perms;
++ err = 0;
++out:
++ return err;
++}
++
++/* handle branch deletion during remount */
++static noinline_for_stack int do_remount_del_option(
++ char *optarg, int cur_branches,
++ struct unionfs_data *new_data,
++ struct path *new_lower_paths)
++{
++ int err = -EINVAL;
++ int idx;
++ struct nameidata nd;
++
++ /* optarg contains the branch name to delete */
++
++ /*
++ * Find matching branch index. For now, this assumes that nothing
++ * has been mounted on top of this Unionfs stack. Once we have /odf
++ * and cache-coherency resolved, we'll address the branch-path
++ * uniqueness.
++ */
++ err = path_lookup(optarg, LOOKUP_FOLLOW, &nd);
++ if (err) {
++ printk(KERN_ERR "unionfs: error accessing "
++ "lower directory \"%s\" (error %d)\n",
++ optarg, err);
++ goto out;
++ }
++ for (idx = 0; idx < cur_branches; idx++)
++ if (nd.path.mnt == new_lower_paths[idx].mnt &&
++ nd.path.dentry == new_lower_paths[idx].dentry)
++ break;
++ path_put(&nd.path); /* no longer needed */
++ if (idx == cur_branches) {
++ printk(KERN_ERR "unionfs: branch \"%s\" "
++ "not found\n", optarg);
++ err = -ENOENT;
++ goto out;
++ }
++ /* check if there are any open files on the branch to be deleted */
++ if (atomic_read(&new_data[idx].open_files) > 0) {
++ err = -EBUSY;
++ goto out;
++ }
++
++ /*
++ * Now we have to delete the branch. First, release any handles it
++ * has. Then, move the remaining array indexes past "idx" in
++ * new_data and new_lower_paths one to the left. Finally, adjust
++ * cur_branches.
++ */
++ path_put(&new_lower_paths[idx]);
++
++ if (idx < cur_branches - 1) {
++ /* if idx==cur_branches-1, we delete last branch: easy */
++ memmove(&new_data[idx], &new_data[idx+1],
++ (cur_branches - 1 - idx) *
++ sizeof(struct unionfs_data));
++ memmove(&new_lower_paths[idx], &new_lower_paths[idx+1],
++ (cur_branches - 1 - idx) * sizeof(struct path));
++ }
++
++ err = 0;
++out:
++ return err;
++}
++
++/* handle branch insertion during remount */
++static noinline_for_stack int do_remount_add_option(
++ char *optarg, int cur_branches,
++ struct unionfs_data *new_data,
++ struct path *new_lower_paths,
++ int *high_branch_id)
++{
++ int err = -EINVAL;
++ int perms;
++ int idx = 0; /* default: insert at beginning */
++ char *new_branch , *modename = NULL;
++ struct nameidata nd;
++
++ /*
++ * optarg can be of several forms:
++ *
++ * /bar:/foo insert /foo before /bar
++ * /bar:/foo=ro insert /foo in ro mode before /bar
++ * /foo insert /foo in the beginning (prepend)
++ * :/foo insert /foo at the end (append)
++ */
++ if (*optarg == ':') { /* append? */
++ new_branch = optarg + 1; /* skip ':' */
++ idx = cur_branches;
++ goto found_insertion_point;
++ }
++ new_branch = strchr(optarg, ':');
++ if (!new_branch) { /* prepend? */
++ new_branch = optarg;
++ goto found_insertion_point;
++ }
++ *new_branch++ = '\0'; /* holds path+mode of new branch */
++
++ /*
++ * Find matching branch index. For now, this assumes that nothing
++ * has been mounted on top of this Unionfs stack. Once we have /odf
++ * and cache-coherency resolved, we'll address the branch-path
++ * uniqueness.
++ */
++ err = path_lookup(optarg, LOOKUP_FOLLOW, &nd);
++ if (err) {
++ printk(KERN_ERR "unionfs: error accessing "
++ "lower directory \"%s\" (error %d)\n",
++ optarg, err);
++ goto out;
++ }
++ for (idx = 0; idx < cur_branches; idx++)
++ if (nd.path.mnt == new_lower_paths[idx].mnt &&
++ nd.path.dentry == new_lower_paths[idx].dentry)
++ break;
++ path_put(&nd.path); /* no longer needed */
++ if (idx == cur_branches) {
++ printk(KERN_ERR "unionfs: branch \"%s\" "
++ "not found\n", optarg);
++ err = -ENOENT;
++ goto out;
++ }
++
++ /*
++ * At this point idx will hold the index where the new branch should
++ * be inserted before.
++ */
++found_insertion_point:
++ /* find the mode for the new branch */
++ if (new_branch)
++ modename = strchr(new_branch, '=');
++ if (modename)
++ *modename++ = '\0';
++ if (!new_branch || !*new_branch) {
++ printk(KERN_ERR "unionfs: null new branch\n");
++ err = -EINVAL;
++ goto out;
++ }
++ err = parse_branch_mode(modename, &perms);
++ if (err) {
++ printk(KERN_ERR "unionfs: invalid mode \"%s\" for "
++ "branch \"%s\"\n", modename, new_branch);
++ goto out;
++ }
++ err = path_lookup(new_branch, LOOKUP_FOLLOW, &nd);
++ if (err) {
++ printk(KERN_ERR "unionfs: error accessing "
++ "lower directory \"%s\" (error %d)\n",
++ new_branch, err);
++ goto out;
++ }
++ /*
++ * It's probably safe to check_mode the new branch to insert. Note:
++ * we don't allow inserting branches which are unionfs's by
++ * themselves (check_branch returns EINVAL in that case). This is
++ * because this code base doesn't support stacking unionfs: the ODF
++ * code base supports that correctly.
++ */
++ err = check_branch(&nd);
++ if (err) {
++ printk(KERN_ERR "unionfs: lower directory "
++ "\"%s\" is not a valid branch\n", optarg);
++ path_put(&nd.path);
++ goto out;
++ }
++
++ /*
++ * Now we have to insert the new branch. But first, move the bits
++ * to make space for the new branch, if needed. Finally, adjust
++ * cur_branches.
++ * We don't release nd here; it's kept until umount/remount.
++ */
++ if (idx < cur_branches) {
++ /* if idx==cur_branches, we append: easy */
++ memmove(&new_data[idx+1], &new_data[idx],
++ (cur_branches - idx) * sizeof(struct unionfs_data));
++ memmove(&new_lower_paths[idx+1], &new_lower_paths[idx],
++ (cur_branches - idx) * sizeof(struct path));
++ }
++ new_lower_paths[idx].dentry = nd.path.dentry;
++ new_lower_paths[idx].mnt = nd.path.mnt;
++
++ new_data[idx].sb = nd.path.dentry->d_sb;
++ atomic_set(&new_data[idx].open_files, 0);
++ new_data[idx].branchperms = perms;
++ new_data[idx].branch_id = ++*high_branch_id; /* assign new branch ID */
++
++ err = 0;
++out:
++ return err;
++}
++
++
++/*
++ * Support branch management options on remount.
++ *
++ * See Documentation/filesystems/unionfs/ for details.
++ *
++ * @flags: numeric mount options
++ * @options: mount options string
++ *
++ * This function can rearrange a mounted union dynamically, adding and
++ * removing branches, including changing branch modes. Clearly this has to
++ * be done safely and atomically. Luckily, the VFS already calls this
++ * function with lock_super(sb) and lock_kernel() held, preventing
++ * concurrent mixing of new mounts, remounts, and unmounts. Moreover,
++ * do_remount_sb(), our caller function, already called shrink_dcache_sb(sb)
++ * to purge dentries/inodes from our superblock, and also called
++ * fsync_super(sb) to purge any dirty pages. So we're good.
++ *
++ * XXX: however, our remount code may also need to invalidate mapped pages
++ * so as to force them to be re-gotten from the (newly reconfigured) lower
++ * branches. This has to wait for proper mmap and cache coherency support
++ * in the VFS.
++ *
++ */
++static int unionfs_remount_fs(struct super_block *sb, int *flags,
++ char *options)
++{
++ int err = 0;
++ int i;
++ char *optionstmp, *tmp_to_free; /* kstrdup'ed of "options" */
++ char *optname;
++ int cur_branches = 0; /* no. of current branches */
++ int new_branches = 0; /* no. of branches actually left in the end */
++ int add_branches; /* est. no. of branches to add */
++ int del_branches; /* est. no. of branches to del */
++ int max_branches; /* max possible no. of branches */
++ struct unionfs_data *new_data = NULL, *tmp_data = NULL;
++ struct path *new_lower_paths = NULL, *tmp_lower_paths = NULL;
++ struct inode **new_lower_inodes = NULL;
++ int new_high_branch_id; /* new high branch ID */
++ int size; /* memory allocation size, temp var */
++ int old_ibstart, old_ibend;
++
++ unionfs_write_lock(sb);
++
++ /*
++ * The VFS will take care of "ro" and "rw" flags, and we can safely
++ * ignore MS_SILENT, but anything else left over is an error. So we
++ * need to check if any other flags may have been passed (none are
++ * allowed/supported as of now).
++ */
++ if ((*flags & ~(MS_RDONLY | MS_SILENT)) != 0) {
++ printk(KERN_ERR
++ "unionfs: remount flags 0x%x unsupported\n", *flags);
++ err = -EINVAL;
++ goto out_error;
++ }
++
++ /*
++ * If 'options' is NULL, it's probably because the user just changed
++ * the union to a "ro" or "rw" and the VFS took care of it. So
++ * nothing to do and we're done.
++ */
++ if (!options || options[0] == '\0')
++ goto out_error;
++
++ /*
++ * Find out how many branches we will have in the end, counting
++ * "add" and "del" commands. Copy the "options" string because
++ * strsep modifies the string and we need it later.
++ */
++ tmp_to_free = kstrdup(options, GFP_KERNEL);
++ optionstmp = tmp_to_free;
++ if (unlikely(!optionstmp)) {
++ err = -ENOMEM;
++ goto out_free;
++ }
++ cur_branches = sbmax(sb); /* current no. branches */
++ new_branches = sbmax(sb);
++ del_branches = 0;
++ add_branches = 0;
++ new_high_branch_id = sbhbid(sb); /* save current high_branch_id */
++ while ((optname = strsep(&optionstmp, ",")) != NULL) {
++ char *optarg;
++
++ if (!optname || !*optname)
++ continue;
++
++ optarg = strchr(optname, '=');
++ if (optarg)
++ *optarg++ = '\0';
++
++ if (!strcmp("add", optname))
++ add_branches++;
++ else if (!strcmp("del", optname))
++ del_branches++;
++ }
++ kfree(tmp_to_free);
++ /* after all changes, will we have at least one branch left? */
++ if ((new_branches + add_branches - del_branches) < 1) {
++ printk(KERN_ERR
++ "unionfs: no branches left after remount\n");
++ err = -EINVAL;
++ goto out_free;
++ }
++
++ /*
++ * Since we haven't actually parsed all the add/del options, nor
++ * have we checked them for errors, we don't know for sure how many
++ * branches we will have after all changes have taken place. In
++ * fact, the total number of branches left could be less than what
++ * we have now. So we need to allocate space for a temporary
++ * placeholder that is at least as large as the maximum number of
++ * branches we *could* have, which is the current number plus all
++ * the additions. Once we're done with these temp placeholders, we
++ * may have to re-allocate the final size, copy over from the temp,
++ * and then free the temps (done near the end of this function).
++ */
++ max_branches = cur_branches + add_branches;
++ /* allocate space for new pointers to lower dentry */
++ tmp_data = kcalloc(max_branches,
++ sizeof(struct unionfs_data), GFP_KERNEL);
++ if (unlikely(!tmp_data)) {
++ err = -ENOMEM;
++ goto out_free;
++ }
++ /* allocate space for new pointers to lower paths */
++ tmp_lower_paths = kcalloc(max_branches,
++ sizeof(struct path), GFP_KERNEL);
++ if (unlikely(!tmp_lower_paths)) {
++ err = -ENOMEM;
++ goto out_free;
++ }
++ /* copy current info into new placeholders, incrementing refcnts */
++ memcpy(tmp_data, UNIONFS_SB(sb)->data,
++ cur_branches * sizeof(struct unionfs_data));
++ memcpy(tmp_lower_paths, UNIONFS_D(sb->s_root)->lower_paths,
++ cur_branches * sizeof(struct path));
++ for (i = 0; i < cur_branches; i++)
++ path_get(&tmp_lower_paths[i]); /* drop refs at end of fxn */
++
++ /*******************************************************************
++ * For each branch command, do path_lookup on the requested branch,
++ * and apply the change to a temp branch list. To handle errors, we
++ * already dup'ed the old arrays (above), and increased the refcnts
++ * on various f/s objects. So now we can do all the path_lookups
++ * and branch-management commands on the new arrays. If it fail mid
++ * way, we free the tmp arrays and *put all objects. If we succeed,
++ * then we free old arrays and *put its objects, and then replace
++ * the arrays with the new tmp list (we may have to re-allocate the
++ * memory because the temp lists could have been larger than what we
++ * actually needed).
++ *******************************************************************/
++
++ while ((optname = strsep(&options, ",")) != NULL) {
++ char *optarg;
++
++ if (!optname || !*optname)
++ continue;
++ /*
++ * At this stage optname holds a comma-delimited option, but
++ * without the commas. Next, we need to break the string on
++ * the '=' symbol to separate CMD=ARG, where ARG itself can
++ * be KEY=VAL. For example, in mode=/foo=rw, CMD is "mode",
++ * KEY is "/foo", and VAL is "rw".
++ */
++ optarg = strchr(optname, '=');
++ if (optarg)
++ *optarg++ = '\0';
++ /* incgen remount option (instead of old ioctl) */
++ if (!strcmp("incgen", optname)) {
++ err = 0;
++ goto out_no_change;
++ }
++
++ /*
++ * All of our options take an argument now. (Insert ones
++ * that don't above this check.) So at this stage optname
++ * contains the CMD part and optarg contains the ARG part.
++ */
++ if (!optarg || !*optarg) {
++ printk(KERN_ERR "unionfs: all remount options require "
++ "an argument (%s)\n", optname);
++ err = -EINVAL;
++ goto out_release;
++ }
++
++ if (!strcmp("add", optname)) {
++ err = do_remount_add_option(optarg, new_branches,
++ tmp_data,
++ tmp_lower_paths,
++ &new_high_branch_id);
++ if (err)
++ goto out_release;
++ new_branches++;
++ if (new_branches > UNIONFS_MAX_BRANCHES) {
++ printk(KERN_ERR "unionfs: command exceeds "
++ "%d branches\n", UNIONFS_MAX_BRANCHES);
++ err = -E2BIG;
++ goto out_release;
++ }
++ continue;
++ }
++ if (!strcmp("del", optname)) {
++ err = do_remount_del_option(optarg, new_branches,
++ tmp_data,
++ tmp_lower_paths);
++ if (err)
++ goto out_release;
++ new_branches--;
++ continue;
++ }
++ if (!strcmp("mode", optname)) {
++ err = do_remount_mode_option(optarg, new_branches,
++ tmp_data,
++ tmp_lower_paths);
++ if (err)
++ goto out_release;
++ continue;
++ }
++
++ /*
++ * When you use "mount -o remount,ro", mount(8) will
++ * reportedly pass the original dirs= string from
++ * /proc/mounts. So for now, we have to ignore dirs= and
++ * not consider it an error, unless we want to allow users
++ * to pass dirs= in remount. Note that to allow the VFS to
++ * actually process the ro/rw remount options, we have to
++ * return 0 from this function.
++ */
++ if (!strcmp("dirs", optname)) {
++ printk(KERN_WARNING
++ "unionfs: remount ignoring option \"%s\"\n",
++ optname);
++ continue;
++ }
++
++ err = -EINVAL;
++ printk(KERN_ERR
++ "unionfs: unrecognized option \"%s\"\n", optname);
++ goto out_release;
++ }
++
++out_no_change:
++
++ /******************************************************************
++ * WE'RE ALMOST DONE: check if leftmost branch might be read-only,
++ * see if we need to allocate a small-sized new vector, copy the
++ * vectors to their correct place, release the refcnt of the older
++ * ones, and return. Also handle invalidating any pages that will
++ * have to be re-read.
++ *******************************************************************/
++
++ if (!(tmp_data[0].branchperms & MAY_WRITE)) {
++ printk(KERN_ERR "unionfs: leftmost branch cannot be read-only "
++ "(use \"remount,ro\" to create a read-only union)\n");
++ err = -EINVAL;
++ goto out_release;
++ }
++
++ /* (re)allocate space for new pointers to lower dentry */
++ size = new_branches * sizeof(struct unionfs_data);
++ new_data = krealloc(tmp_data, size, GFP_KERNEL);
++ if (unlikely(!new_data)) {
++ err = -ENOMEM;
++ goto out_release;
++ }
++
++ /* allocate space for new pointers to lower paths */
++ size = new_branches * sizeof(struct path);
++ new_lower_paths = krealloc(tmp_lower_paths, size, GFP_KERNEL);
++ if (unlikely(!new_lower_paths)) {
++ err = -ENOMEM;
++ goto out_release;
++ }
++
++ /* allocate space for new pointers to lower inodes */
++ new_lower_inodes = kcalloc(new_branches,
++ sizeof(struct inode *), GFP_KERNEL);
++ if (unlikely(!new_lower_inodes)) {
++ err = -ENOMEM;
++ goto out_release;
++ }
++
++ /*
++ * OK, just before we actually put the new set of branches in place,
++ * we need to ensure that our own f/s has no dirty objects left.
++ * Luckily, do_remount_sb() already calls shrink_dcache_sb(sb) and
++ * fsync_super(sb), taking care of dentries, inodes, and dirty
++ * pages. So all that's left is for us to invalidate any leftover
++ * (non-dirty) pages to ensure that they will be re-read from the
++ * new lower branches (and to support mmap).
++ */
++
++ /*
++ * Once we finish the remounting successfully, our superblock
++ * generation number will have increased. This will be detected by
++ * our dentry-revalidation code upon subsequent f/s operations
++ * through unionfs. The revalidation code will rebuild the union of
++ * lower inodes for a given unionfs inode and invalidate any pages
++ * of such "stale" inodes (by calling our purge_inode_data
++ * function). This revalidation will happen lazily and
++ * incrementally, as users perform operations on cached inodes. We
++ * would like to encourage this revalidation to happen sooner if
++ * possible, so we like to try to invalidate as many other pages in
++ * our superblock as we can. We used to call drop_pagecache_sb() or
++ * a variant thereof, but either method was racy (drop_caches alone
++ * is known to be racy). So now we let the revalidation happen on a
++ * per file basis in ->d_revalidate.
++ */
++
++ /* grab new lower super references; release old ones */
++ for (i = 0; i < new_branches; i++)
++ atomic_inc(&new_data[i].sb->s_active);
++ for (i = 0; i < sbmax(sb); i++)
++ atomic_dec(&UNIONFS_SB(sb)->data[i].sb->s_active);
++
++ /* copy new vectors into their correct place */
++ tmp_data = UNIONFS_SB(sb)->data;
++ UNIONFS_SB(sb)->data = new_data;
++ new_data = NULL; /* so don't free good pointers below */
++ tmp_lower_paths = UNIONFS_D(sb->s_root)->lower_paths;
++ UNIONFS_D(sb->s_root)->lower_paths = new_lower_paths;
++ new_lower_paths = NULL; /* so don't free good pointers below */
++
++ /* update our unionfs_sb_info and root dentry index of last branch */
++ i = sbmax(sb); /* save no. of branches to release at end */
++ sbend(sb) = new_branches - 1;
++ dbend(sb->s_root) = new_branches - 1;
++ old_ibstart = ibstart(sb->s_root->d_inode);
++ old_ibend = ibend(sb->s_root->d_inode);
++ ibend(sb->s_root->d_inode) = new_branches - 1;
++ UNIONFS_D(sb->s_root)->bcount = new_branches;
++ new_branches = i; /* no. of branches to release below */
++
++ /*
++ * Update lower inodes: 3 steps
++ * 1. grab ref on all new lower inodes
++ */
++ for (i = dbstart(sb->s_root); i <= dbend(sb->s_root); i++) {
++ struct dentry *lower_dentry =
++ unionfs_lower_dentry_idx(sb->s_root, i);
++ igrab(lower_dentry->d_inode);
++ new_lower_inodes[i] = lower_dentry->d_inode;
++ }
++ /* 2. release reference on all older lower inodes */
++ iput_lowers(sb->s_root->d_inode, old_ibstart, old_ibend, true);
++ /* 3. update root dentry's inode to new lower_inodes array */
++ UNIONFS_I(sb->s_root->d_inode)->lower_inodes = new_lower_inodes;
++ new_lower_inodes = NULL;
++
++ /* maxbytes may have changed */
++ sb->s_maxbytes = unionfs_lower_super_idx(sb, 0)->s_maxbytes;
++ /* update high branch ID */
++ sbhbid(sb) = new_high_branch_id;
++
++ /* update our sb->generation for revalidating objects */
++ i = atomic_inc_return(&UNIONFS_SB(sb)->generation);
++ atomic_set(&UNIONFS_D(sb->s_root)->generation, i);
++ atomic_set(&UNIONFS_I(sb->s_root->d_inode)->generation, i);
++ if (!(*flags & MS_SILENT))
++ pr_info("unionfs: %s: new generation number %d\n",
++ UNIONFS_SB(sb)->dev_name, i);
++ /* finally, update the root dentry's times */
++ unionfs_copy_attr_times(sb->s_root->d_inode);
++ err = 0; /* reset to success */
++
++ /*
++ * The code above falls through to the next label, and releases the
++ * refcnts of the older ones (stored in tmp_*): if we fell through
++ * here, it means success. However, if we jump directly to this
++ * label from any error above, then an error occurred after we
++ * grabbed various refcnts, and so we have to release the
++ * temporarily constructed structures.
++ */
++out_release:
++ /* no need to cleanup/release anything in tmp_data */
++ if (tmp_lower_paths)
++ for (i = 0; i < new_branches; i++)
++ path_put(&tmp_lower_paths[i]);
++out_free:
++ kfree(tmp_lower_paths);
++ kfree(tmp_data);
++ kfree(new_lower_paths);
++ kfree(new_data);
++ kfree(new_lower_inodes);
++out_error:
++ unionfs_check_dentry(sb->s_root);
++ unionfs_write_unlock(sb);
++ return err;
++}
++
++/*
++ * Called by iput() when the inode reference count reached zero
++ * and the inode is not hashed anywhere. Used to clear anything
++ * that needs to be, before the inode is completely destroyed and put
++ * on the inode free list.
++ *
++ * No need to lock sb info's rwsem.
++ */
++static void unionfs_evict_inode(struct inode *inode)
++{
++ int bindex, bstart, bend;
++ struct inode *lower_inode;
++ struct list_head *pos, *n;
++ struct unionfs_dir_state *rdstate;
++
++ truncate_inode_pages(&inode->i_data, 0);
++ end_writeback(inode);
++
++ list_for_each_safe(pos, n, &UNIONFS_I(inode)->readdircache) {
++ rdstate = list_entry(pos, struct unionfs_dir_state, cache);
++ list_del(&rdstate->cache);
++ free_rdstate(rdstate);
++ }
++
++ /*
++ * Decrement a reference to a lower_inode, which was incremented
++ * by our read_inode when it was created initially.
++ */
++ bstart = ibstart(inode);
++ bend = ibend(inode);
++ if (bstart >= 0) {
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_inode = unionfs_lower_inode_idx(inode, bindex);
++ if (!lower_inode)
++ continue;
++ unionfs_set_lower_inode_idx(inode, bindex, NULL);
++ /* see Documentation/filesystems/unionfs/issues.txt */
++ lockdep_off();
++ iput(lower_inode);
++ lockdep_on();
++ }
++ }
++
++ kfree(UNIONFS_I(inode)->lower_inodes);
++ UNIONFS_I(inode)->lower_inodes = NULL;
++}
++
++static struct inode *unionfs_alloc_inode(struct super_block *sb)
++{
++ struct unionfs_inode_info *i;
++
++ i = kmem_cache_alloc(unionfs_inode_cachep, GFP_KERNEL);
++ if (unlikely(!i))
++ return NULL;
++
++ /* memset everything up to the inode to 0 */
++ memset(i, 0, offsetof(struct unionfs_inode_info, vfs_inode));
++
++ i->vfs_inode.i_version = 1;
++ return &i->vfs_inode;
++}
++
++static void unionfs_destroy_inode(struct inode *inode)
++{
++ kmem_cache_free(unionfs_inode_cachep, UNIONFS_I(inode));
++}
++
++/* unionfs inode cache constructor */
++static void init_once(void *obj)
++{
++ struct unionfs_inode_info *i = obj;
++
++ inode_init_once(&i->vfs_inode);
++}
++
++int unionfs_init_inode_cache(void)
++{
++ int err = 0;
++
++ unionfs_inode_cachep =
++ kmem_cache_create("unionfs_inode_cache",
++ sizeof(struct unionfs_inode_info), 0,
++ SLAB_RECLAIM_ACCOUNT, init_once);
++ if (unlikely(!unionfs_inode_cachep))
++ err = -ENOMEM;
++ return err;
++}
++
++/* unionfs inode cache destructor */
++void unionfs_destroy_inode_cache(void)
++{
++ if (unionfs_inode_cachep)
++ kmem_cache_destroy(unionfs_inode_cachep);
++}
++
++/*
++ * Called when we have a dirty inode, right here we only throw out
++ * parts of our readdir list that are too old.
++ *
++ * No need to grab sb info's rwsem.
++ */
++static int unionfs_write_inode(struct inode *inode,
++ struct writeback_control *wbc)
++{
++ struct list_head *pos, *n;
++ struct unionfs_dir_state *rdstate;
++
++ spin_lock(&UNIONFS_I(inode)->rdlock);
++ list_for_each_safe(pos, n, &UNIONFS_I(inode)->readdircache) {
++ rdstate = list_entry(pos, struct unionfs_dir_state, cache);
++ /* We keep this list in LRU order. */
++ if ((rdstate->access + RDCACHE_JIFFIES) > jiffies)
++ break;
++ UNIONFS_I(inode)->rdcount--;
++ list_del(&rdstate->cache);
++ free_rdstate(rdstate);
++ }
++ spin_unlock(&UNIONFS_I(inode)->rdlock);
++
++ return 0;
++}
++
++/*
++ * Used only in nfs, to kill any pending RPC tasks, so that subsequent
++ * code can actually succeed and won't leave tasks that need handling.
++ */
++static void unionfs_umount_begin(struct super_block *sb)
++{
++ struct super_block *lower_sb;
++ int bindex, bstart, bend;
++
++ unionfs_read_lock(sb, UNIONFS_SMUTEX_CHILD);
++
++ bstart = sbstart(sb);
++ bend = sbend(sb);
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_sb = unionfs_lower_super_idx(sb, bindex);
++
++ if (lower_sb && lower_sb->s_op &&
++ lower_sb->s_op->umount_begin)
++ lower_sb->s_op->umount_begin(lower_sb);
++ }
++
++ unionfs_read_unlock(sb);
++}
++
++static int unionfs_show_options(struct seq_file *m, struct vfsmount *mnt)
++{
++ struct super_block *sb = mnt->mnt_sb;
++ int ret = 0;
++ char *tmp_page;
++ char *path;
++ int bindex, bstart, bend;
++ int perms;
++
++ unionfs_read_lock(sb, UNIONFS_SMUTEX_CHILD);
++
++ unionfs_lock_dentry(sb->s_root, UNIONFS_DMUTEX_CHILD);
++
++ tmp_page = (char *) __get_free_page(GFP_KERNEL);
++ if (unlikely(!tmp_page)) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ bstart = sbstart(sb);
++ bend = sbend(sb);
++
++ seq_printf(m, ",dirs=");
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ struct path p;
++ p.dentry = unionfs_lower_dentry_idx(sb->s_root, bindex);
++ p.mnt = unionfs_lower_mnt_idx(sb->s_root, bindex);
++ path = d_path(&p, tmp_page, PAGE_SIZE);
++ if (IS_ERR(path)) {
++ ret = PTR_ERR(path);
++ goto out;
++ }
++
++ perms = branchperms(sb, bindex);
++
++ seq_printf(m, "%s=%s", path,
++ perms & MAY_WRITE ? "rw" : "ro");
++ if (bindex != bend)
++ seq_printf(m, ":");
++ }
++
++out:
++ free_page((unsigned long) tmp_page);
++
++ unionfs_unlock_dentry(sb->s_root);
++
++ unionfs_read_unlock(sb);
++
++ return ret;
++}
++
++struct super_operations unionfs_sops = {
++ .put_super = unionfs_put_super,
++ .statfs = unionfs_statfs,
++ .remount_fs = unionfs_remount_fs,
++ .evict_inode = unionfs_evict_inode,
++ .umount_begin = unionfs_umount_begin,
++ .show_options = unionfs_show_options,
++ .write_inode = unionfs_write_inode,
++ .alloc_inode = unionfs_alloc_inode,
++ .destroy_inode = unionfs_destroy_inode,
++};
+diff --git a/fs/unionfs/union.h b/fs/unionfs/union.h
+new file mode 100644
+index 0000000..d49c834
+--- /dev/null
++++ b/fs/unionfs/union.h
+@@ -0,0 +1,669 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef _UNION_H_
++#define _UNION_H_
++
++#include <linux/dcache.h>
++#include <linux/file.h>
++#include <linux/list.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/mount.h>
++#include <linux/namei.h>
++#include <linux/page-flags.h>
++#include <linux/pagemap.h>
++#include <linux/poll.h>
++#include <linux/security.h>
++#include <linux/seq_file.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/statfs.h>
++#include <linux/string.h>
++#include <linux/vmalloc.h>
++#include <linux/writeback.h>
++#include <linux/buffer_head.h>
++#include <linux/xattr.h>
++#include <linux/fs_stack.h>
++#include <linux/magic.h>
++#include <linux/log2.h>
++#include <linux/poison.h>
++#include <linux/mman.h>
++#include <linux/backing-dev.h>
++#include <linux/splice.h>
++
++#include <asm/system.h>
++
++#include <linux/union_fs.h>
++
++/* the file system name */
++#define UNIONFS_NAME "unionfs"
++
++/* unionfs root inode number */
++#define UNIONFS_ROOT_INO 1
++
++/* number of times we try to get a unique temporary file name */
++#define GET_TMPNAM_MAX_RETRY 5
++
++/* maximum number of branches we support, to avoid memory blowup */
++#define UNIONFS_MAX_BRANCHES 128
++
++/* minimum time (seconds) required for time-based cache-coherency */
++#define UNIONFS_MIN_CC_TIME 3
++
++/* Operations vectors defined in specific files. */
++extern struct file_operations unionfs_main_fops;
++extern struct file_operations unionfs_dir_fops;
++extern struct inode_operations unionfs_main_iops;
++extern struct inode_operations unionfs_dir_iops;
++extern struct inode_operations unionfs_symlink_iops;
++extern struct super_operations unionfs_sops;
++extern struct dentry_operations unionfs_dops;
++extern struct address_space_operations unionfs_aops, unionfs_dummy_aops;
++extern struct vm_operations_struct unionfs_vm_ops;
++
++/* How long should an entry be allowed to persist */
++#define RDCACHE_JIFFIES (5*HZ)
++
++/* compatibility with Real-Time patches */
++#ifdef CONFIG_PREEMPT_RT
++# define unionfs_rw_semaphore compat_rw_semaphore
++#else /* not CONFIG_PREEMPT_RT */
++# define unionfs_rw_semaphore rw_semaphore
++#endif /* not CONFIG_PREEMPT_RT */
++
++/* file private data. */
++struct unionfs_file_info {
++ int bstart;
++ int bend;
++ atomic_t generation;
++
++ struct unionfs_dir_state *rdstate;
++ struct file **lower_files;
++ int *saved_branch_ids; /* IDs of branches when file was opened */
++ const struct vm_operations_struct *lower_vm_ops;
++ bool wrote_to_file; /* for delayed copyup */
++};
++
++/* unionfs inode data in memory */
++struct unionfs_inode_info {
++ int bstart;
++ int bend;
++ atomic_t generation;
++ /* Stuff for readdir over NFS. */
++ spinlock_t rdlock;
++ struct list_head readdircache;
++ int rdcount;
++ int hashsize;
++ int cookie;
++
++ /* The lower inodes */
++ struct inode **lower_inodes;
++
++ struct inode vfs_inode;
++};
++
++/* unionfs dentry data in memory */
++struct unionfs_dentry_info {
++ /*
++ * The semaphore is used to lock the dentry as soon as we get into a
++ * unionfs function from the VFS. Our lock ordering is that children
++ * go before their parents.
++ */
++ struct mutex lock;
++ int bstart;
++ int bend;
++ int bopaque;
++ int bcount;
++ atomic_t generation;
++ struct path *lower_paths;
++};
++
++/* These are the pointers to our various objects. */
++struct unionfs_data {
++ struct super_block *sb; /* lower super_block */
++ atomic_t open_files; /* number of open files on branch */
++ int branchperms;
++ int branch_id; /* unique branch ID at re/mount time */
++};
++
++/* unionfs super-block data in memory */
++struct unionfs_sb_info {
++ int bend;
++
++ atomic_t generation;
++
++ /*
++ * This rwsem is used to make sure that a branch management
++ * operation...
++ * 1) will not begin before all currently in-flight operations
++ * complete.
++ * 2) any new operations do not execute until the currently
++ * running branch management operation completes.
++ *
++ * The write_lock_owner records the PID of the task which grabbed
++ * the rw_sem for writing. If the same task also tries to grab the
++ * read lock, we allow it. This prevents a self-deadlock when
++ * branch-management is used on a pivot_root'ed union, because we
++ * have to ->lookup paths which belong to the same union.
++ */
++ struct unionfs_rw_semaphore rwsem;
++ pid_t write_lock_owner; /* PID of rw_sem owner (write lock) */
++ int high_branch_id; /* last unique branch ID given */
++ char *dev_name; /* to identify different unions in pr_debug */
++ struct unionfs_data *data;
++};
++
++/*
++ * structure for making the linked list of entries by readdir on left branch
++ * to compare with entries on right branch
++ */
++struct filldir_node {
++ struct list_head file_list; /* list for directory entries */
++ char *name; /* name entry */
++ int hash; /* name hash */
++ int namelen; /* name len since name is not 0 terminated */
++
++ /*
++ * we can check for duplicate whiteouts and files in the same branch
++ * in order to return -EIO.
++ */
++ int bindex;
++
++ /* is this a whiteout entry? */
++ int whiteout;
++
++ /* Inline name, so we don't need to separately kmalloc small ones */
++ char iname[DNAME_INLINE_LEN_MIN];
++};
++
++/* Directory hash table. */
++struct unionfs_dir_state {
++ unsigned int cookie; /* the cookie, based off of rdversion */
++ unsigned int offset; /* The entry we have returned. */
++ int bindex;
++ loff_t dirpos; /* offset within the lower level directory */
++ int size; /* How big is the hash table? */
++ int hashentries; /* How many entries have been inserted? */
++ unsigned long access;
++
++ /* This cache list is used when the inode keeps us around. */
++ struct list_head cache;
++ struct list_head list[0];
++};
++
++/* externs needed for fanout.h or sioq.h */
++extern int unionfs_get_nlinks(const struct inode *inode);
++extern void unionfs_copy_attr_times(struct inode *upper);
++extern void unionfs_copy_attr_all(struct inode *dest, const struct inode *src);
++
++/* include miscellaneous macros */
++#include "fanout.h"
++#include "sioq.h"
++
++/* externs for cache creation/deletion routines */
++extern void unionfs_destroy_filldir_cache(void);
++extern int unionfs_init_filldir_cache(void);
++extern int unionfs_init_inode_cache(void);
++extern void unionfs_destroy_inode_cache(void);
++extern int unionfs_init_dentry_cache(void);
++extern void unionfs_destroy_dentry_cache(void);
++
++/* Initialize and free readdir-specific state. */
++extern int init_rdstate(struct file *file);
++extern struct unionfs_dir_state *alloc_rdstate(struct inode *inode,
++ int bindex);
++extern struct unionfs_dir_state *find_rdstate(struct inode *inode,
++ loff_t fpos);
++extern void free_rdstate(struct unionfs_dir_state *state);
++extern int add_filldir_node(struct unionfs_dir_state *rdstate,
++ const char *name, int namelen, int bindex,
++ int whiteout);
++extern struct filldir_node *find_filldir_node(struct unionfs_dir_state *rdstate,
++ const char *name, int namelen,
++ int is_whiteout);
++
++extern struct dentry **alloc_new_dentries(int objs);
++extern struct unionfs_data *alloc_new_data(int objs);
++
++/* We can only use 32-bits of offset for rdstate --- blech! */
++#define DIREOF (0xfffff)
++#define RDOFFBITS 20 /* This is the number of bits in DIREOF. */
++#define MAXRDCOOKIE (0xfff)
++/* Turn an rdstate into an offset. */
++static inline off_t rdstate2offset(struct unionfs_dir_state *buf)
++{
++ off_t tmp;
++
++ tmp = ((buf->cookie & MAXRDCOOKIE) << RDOFFBITS)
++ | (buf->offset & DIREOF);
++ return tmp;
++}
++
++/* Macros for locking a super_block. */
++enum unionfs_super_lock_class {
++ UNIONFS_SMUTEX_NORMAL,
++ UNIONFS_SMUTEX_PARENT, /* when locking on behalf of file */
++ UNIONFS_SMUTEX_CHILD, /* when locking on behalf of dentry */
++};
++static inline void unionfs_read_lock(struct super_block *sb, int subclass)
++{
++ if (UNIONFS_SB(sb)->write_lock_owner &&
++ UNIONFS_SB(sb)->write_lock_owner == current->pid)
++ return;
++ down_read_nested(&UNIONFS_SB(sb)->rwsem, subclass);
++}
++static inline void unionfs_read_unlock(struct super_block *sb)
++{
++ if (UNIONFS_SB(sb)->write_lock_owner &&
++ UNIONFS_SB(sb)->write_lock_owner == current->pid)
++ return;
++ up_read(&UNIONFS_SB(sb)->rwsem);
++}
++static inline void unionfs_write_lock(struct super_block *sb)
++{
++ down_write(&UNIONFS_SB(sb)->rwsem);
++ UNIONFS_SB(sb)->write_lock_owner = current->pid;
++}
++static inline void unionfs_write_unlock(struct super_block *sb)
++{
++ up_write(&UNIONFS_SB(sb)->rwsem);
++ UNIONFS_SB(sb)->write_lock_owner = 0;
++}
++
++static inline void unionfs_double_lock_dentry(struct dentry *d1,
++ struct dentry *d2)
++{
++ BUG_ON(d1 == d2);
++ if (d1 < d2) {
++ unionfs_lock_dentry(d1, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(d2, UNIONFS_DMUTEX_CHILD);
++ } else {
++ unionfs_lock_dentry(d2, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(d1, UNIONFS_DMUTEX_CHILD);
++ }
++}
++
++static inline void unionfs_double_unlock_dentry(struct dentry *d1,
++ struct dentry *d2)
++{
++ BUG_ON(d1 == d2);
++ if (d1 < d2) { /* unlock in reverse order than double_lock_dentry */
++ unionfs_unlock_dentry(d1);
++ unionfs_unlock_dentry(d2);
++ } else {
++ unionfs_unlock_dentry(d2);
++ unionfs_unlock_dentry(d1);
++ }
++}
++
++static inline void unionfs_double_lock_parents(struct dentry *p1,
++ struct dentry *p2)
++{
++ if (p1 == p2) {
++ unionfs_lock_dentry(p1, UNIONFS_DMUTEX_REVAL_PARENT);
++ return;
++ }
++ if (p1 < p2) {
++ unionfs_lock_dentry(p1, UNIONFS_DMUTEX_REVAL_PARENT);
++ unionfs_lock_dentry(p2, UNIONFS_DMUTEX_REVAL_CHILD);
++ } else {
++ unionfs_lock_dentry(p2, UNIONFS_DMUTEX_REVAL_PARENT);
++ unionfs_lock_dentry(p1, UNIONFS_DMUTEX_REVAL_CHILD);
++ }
++}
++
++static inline void unionfs_double_unlock_parents(struct dentry *p1,
++ struct dentry *p2)
++{
++ if (p1 == p2) {
++ unionfs_unlock_dentry(p1);
++ return;
++ }
++ if (p1 < p2) { /* unlock in reverse order of double_lock_parents */
++ unionfs_unlock_dentry(p1);
++ unionfs_unlock_dentry(p2);
++ } else {
++ unionfs_unlock_dentry(p2);
++ unionfs_unlock_dentry(p1);
++ }
++}
++
++extern int new_dentry_private_data(struct dentry *dentry, int subclass);
++extern int realloc_dentry_private_data(struct dentry *dentry);
++extern void free_dentry_private_data(struct dentry *dentry);
++extern void update_bstart(struct dentry *dentry);
++extern int init_lower_nd(struct nameidata *nd, unsigned int flags);
++extern void release_lower_nd(struct nameidata *nd, int err);
++
++/*
++ * EXTERNALS:
++ */
++
++/* replicates the directory structure up to given dentry in given branch */
++extern struct dentry *create_parents(struct inode *dir, struct dentry *dentry,
++ const char *name, int bindex);
++
++/* partial lookup */
++extern int unionfs_partial_lookup(struct dentry *dentry,
++ struct dentry *parent);
++extern struct dentry *unionfs_lookup_full(struct dentry *dentry,
++ struct dentry *parent,
++ int lookupmode);
++
++/* copies a file from dbstart to newbindex branch */
++extern int copyup_file(struct inode *dir, struct file *file, int bstart,
++ int newbindex, loff_t size);
++extern int copyup_named_file(struct inode *dir, struct file *file,
++ char *name, int bstart, int new_bindex,
++ loff_t len);
++/* copies a dentry from dbstart to newbindex branch */
++extern int copyup_dentry(struct inode *dir, struct dentry *dentry,
++ int bstart, int new_bindex, const char *name,
++ int namelen, struct file **copyup_file, loff_t len);
++/* helper functions for post-copyup actions */
++extern void unionfs_postcopyup_setmnt(struct dentry *dentry);
++extern void unionfs_postcopyup_release(struct dentry *dentry);
++
++/* Is this directory empty: 0 if it is empty, -ENOTEMPTY if not. */
++extern int check_empty(struct dentry *dentry, struct dentry *parent,
++ struct unionfs_dir_state **namelist);
++/* whiteout and opaque directory helpers */
++extern char *alloc_whname(const char *name, int len);
++extern bool is_whiteout_name(char **namep, int *namelenp);
++extern bool is_validname(const char *name);
++extern struct dentry *lookup_whiteout(const char *name,
++ struct dentry *lower_parent);
++extern struct dentry *find_first_whiteout(struct dentry *dentry);
++extern int unlink_whiteout(struct dentry *wh_dentry);
++extern int check_unlink_whiteout(struct dentry *dentry,
++ struct dentry *lower_dentry, int bindex);
++extern int create_whiteout(struct dentry *dentry, int start);
++extern int delete_whiteouts(struct dentry *dentry, int bindex,
++ struct unionfs_dir_state *namelist);
++extern int is_opaque_dir(struct dentry *dentry, int bindex);
++extern int make_dir_opaque(struct dentry *dir, int bindex);
++extern void unionfs_set_max_namelen(long *namelen);
++
++extern void unionfs_reinterpose(struct dentry *this_dentry);
++extern struct super_block *unionfs_duplicate_super(struct super_block *sb);
++
++/* Locking functions. */
++extern int unionfs_setlk(struct file *file, int cmd, struct file_lock *fl);
++extern int unionfs_getlk(struct file *file, struct file_lock *fl);
++
++/* Common file operations. */
++extern int unionfs_file_revalidate(struct file *file, struct dentry *parent,
++ bool willwrite);
++extern int unionfs_open(struct inode *inode, struct file *file);
++extern int unionfs_file_release(struct inode *inode, struct file *file);
++extern int unionfs_flush(struct file *file, fl_owner_t id);
++extern long unionfs_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg);
++extern int unionfs_fsync(struct file *file, int datasync);
++extern int unionfs_fasync(int fd, struct file *file, int flag);
++
++/* Inode operations */
++extern struct inode *unionfs_iget(struct super_block *sb, unsigned long ino);
++extern int unionfs_rename(struct inode *old_dir, struct dentry *old_dentry,
++ struct inode *new_dir, struct dentry *new_dentry);
++extern int unionfs_unlink(struct inode *dir, struct dentry *dentry);
++extern int unionfs_rmdir(struct inode *dir, struct dentry *dentry);
++
++extern bool __unionfs_d_revalidate(struct dentry *dentry,
++ struct dentry *parent, bool willwrite);
++extern bool is_negative_lower(const struct dentry *dentry);
++extern bool is_newer_lower(const struct dentry *dentry);
++extern void purge_sb_data(struct super_block *sb);
++
++/* The values for unionfs_interpose's flag. */
++#define INTERPOSE_DEFAULT 0
++#define INTERPOSE_LOOKUP 1
++#define INTERPOSE_REVAL 2
++#define INTERPOSE_REVAL_NEG 3
++#define INTERPOSE_PARTIAL 4
++
++extern struct dentry *unionfs_interpose(struct dentry *this_dentry,
++ struct super_block *sb, int flag);
++
++#ifdef CONFIG_UNION_FS_XATTR
++/* Extended attribute functions. */
++extern void *unionfs_xattr_alloc(size_t size, size_t limit);
++static inline void unionfs_xattr_kfree(const void *p)
++{
++ kfree(p);
++}
++extern ssize_t unionfs_getxattr(struct dentry *dentry, const char *name,
++ void *value, size_t size);
++extern int unionfs_removexattr(struct dentry *dentry, const char *name);
++extern ssize_t unionfs_listxattr(struct dentry *dentry, char *list,
++ size_t size);
++extern int unionfs_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags);
++#endif /* CONFIG_UNION_FS_XATTR */
++
++/* The root directory is unhashed, but isn't deleted. */
++static inline int d_deleted(struct dentry *d)
++{
++ return d_unhashed(d) && (d != d->d_sb->s_root);
++}
++
++/* unionfs_permission, check if we should bypass error to facilitate copyup */
++#define IS_COPYUP_ERR(err) ((err) == -EROFS)
++
++/* unionfs_open, check if we need to copyup the file */
++#define OPEN_WRITE_FLAGS (O_WRONLY | O_RDWR | O_APPEND)
++#define IS_WRITE_FLAG(flag) ((flag) & OPEN_WRITE_FLAGS)
++
++static inline int branchperms(const struct super_block *sb, int index)
++{
++ BUG_ON(index < 0);
++ return UNIONFS_SB(sb)->data[index].branchperms;
++}
++
++static inline int set_branchperms(struct super_block *sb, int index, int perms)
++{
++ BUG_ON(index < 0);
++ UNIONFS_SB(sb)->data[index].branchperms = perms;
++ return perms;
++}
++
++/* check if readonly lower inode, but possibly unlinked (no inode->i_sb) */
++static inline int __is_rdonly(const struct inode *inode)
++{
++ /* if unlinked, can't be readonly (?) */
++ if (!inode->i_sb)
++ return 0;
++ return IS_RDONLY(inode);
++
++}
++/* Is this file on a read-only branch? */
++static inline int is_robranch_super(const struct super_block *sb, int index)
++{
++ int ret;
++
++ ret = (!(branchperms(sb, index) & MAY_WRITE)) ? -EROFS : 0;
++ return ret;
++}
++
++/* Is this file on a read-only branch? */
++static inline int is_robranch_idx(const struct dentry *dentry, int index)
++{
++ struct super_block *lower_sb;
++
++ BUG_ON(index < 0);
++
++ if (!(branchperms(dentry->d_sb, index) & MAY_WRITE))
++ return -EROFS;
++
++ lower_sb = unionfs_lower_super_idx(dentry->d_sb, index);
++ BUG_ON(lower_sb == NULL);
++ /*
++ * test sb flags directly, not IS_RDONLY(lower_inode) because the
++ * lower_dentry could be a negative.
++ */
++ if (lower_sb->s_flags & MS_RDONLY)
++ return -EROFS;
++
++ return 0;
++}
++
++static inline int is_robranch(const struct dentry *dentry)
++{
++ int index;
++
++ index = UNIONFS_D(dentry)->bstart;
++ BUG_ON(index < 0);
++
++ return is_robranch_idx(dentry, index);
++}
++
++/*
++ * EXTERNALS:
++ */
++extern int check_branch(struct nameidata *nd);
++extern int parse_branch_mode(const char *name, int *perms);
++
++/* locking helpers */
++static inline struct dentry *lock_parent(struct dentry *dentry)
++{
++ struct dentry *dir = dget_parent(dentry);
++ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
++ return dir;
++}
++static inline struct dentry *lock_parent_wh(struct dentry *dentry)
++{
++ struct dentry *dir = dget_parent(dentry);
++
++ mutex_lock_nested(&dir->d_inode->i_mutex, UNIONFS_DMUTEX_WHITEOUT);
++ return dir;
++}
++
++static inline void unlock_dir(struct dentry *dir)
++{
++ mutex_unlock(&dir->d_inode->i_mutex);
++ dput(dir);
++}
++
++/* lock base inode mutex before calling lookup_one_len */
++static inline struct dentry *lookup_lck_len(const char *name,
++ struct dentry *base, int len)
++{
++ struct dentry *d;
++ mutex_lock(&base->d_inode->i_mutex);
++ d = lookup_one_len(name, base, len);
++ mutex_unlock(&base->d_inode->i_mutex);
++ return d;
++}
++
++static inline struct vfsmount *unionfs_mntget(struct dentry *dentry,
++ int bindex)
++{
++ struct vfsmount *mnt;
++
++ BUG_ON(!dentry || bindex < 0);
++
++ mnt = mntget(unionfs_lower_mnt_idx(dentry, bindex));
++#ifdef CONFIG_UNION_FS_DEBUG
++ if (!mnt)
++ pr_debug("unionfs: mntget: mnt=%p bindex=%d\n",
++ mnt, bindex);
++#endif /* CONFIG_UNION_FS_DEBUG */
++
++ return mnt;
++}
++
++static inline void unionfs_mntput(struct dentry *dentry, int bindex)
++{
++ struct vfsmount *mnt;
++
++ if (!dentry && bindex < 0)
++ return;
++ BUG_ON(!dentry || bindex < 0);
++
++ mnt = unionfs_lower_mnt_idx(dentry, bindex);
++#ifdef CONFIG_UNION_FS_DEBUG
++ /*
++ * Directories can have NULL lower objects in between start/end, but
++ * NOT if at the start/end range. We cannot verify that this dentry
++ * is a type=DIR, because it may already be a negative dentry. But
++ * if dbstart is greater than dbend, we know that this couldn't have
++ * been a regular file: it had to have been a directory.
++ */
++ if (!mnt && !(bindex > dbstart(dentry) && bindex < dbend(dentry)))
++ pr_debug("unionfs: mntput: mnt=%p bindex=%d\n", mnt, bindex);
++#endif /* CONFIG_UNION_FS_DEBUG */
++ mntput(mnt);
++}
++
++#ifdef CONFIG_UNION_FS_DEBUG
++
++/* useful for tracking code reachability */
++#define UDBG pr_debug("DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__)
++
++#define unionfs_check_inode(i) __unionfs_check_inode((i), \
++ __FILE__, __func__, __LINE__)
++#define unionfs_check_dentry(d) __unionfs_check_dentry((d), \
++ __FILE__, __func__, __LINE__)
++#define unionfs_check_file(f) __unionfs_check_file((f), \
++ __FILE__, __func__, __LINE__)
++#define unionfs_check_nd(n) __unionfs_check_nd((n), \
++ __FILE__, __func__, __LINE__)
++#define show_branch_counts(sb) __show_branch_counts((sb), \
++ __FILE__, __func__, __LINE__)
++#define show_inode_times(i) __show_inode_times((i), \
++ __FILE__, __func__, __LINE__)
++#define show_dinode_times(d) __show_dinode_times((d), \
++ __FILE__, __func__, __LINE__)
++#define show_inode_counts(i) __show_inode_counts((i), \
++ __FILE__, __func__, __LINE__)
++
++extern void __unionfs_check_inode(const struct inode *inode, const char *fname,
++ const char *fxn, int line);
++extern void __unionfs_check_dentry(const struct dentry *dentry,
++ const char *fname, const char *fxn,
++ int line);
++extern void __unionfs_check_file(const struct file *file,
++ const char *fname, const char *fxn, int line);
++extern void __unionfs_check_nd(const struct nameidata *nd,
++ const char *fname, const char *fxn, int line);
++extern void __show_branch_counts(const struct super_block *sb,
++ const char *file, const char *fxn, int line);
++extern void __show_inode_times(const struct inode *inode,
++ const char *file, const char *fxn, int line);
++extern void __show_dinode_times(const struct dentry *dentry,
++ const char *file, const char *fxn, int line);
++extern void __show_inode_counts(const struct inode *inode,
++ const char *file, const char *fxn, int line);
++
++#else /* not CONFIG_UNION_FS_DEBUG */
++
++/* we leave useful hooks for these check functions throughout the code */
++#define unionfs_check_inode(i) do { } while (0)
++#define unionfs_check_dentry(d) do { } while (0)
++#define unionfs_check_file(f) do { } while (0)
++#define unionfs_check_nd(n) do { } while (0)
++#define show_branch_counts(sb) do { } while (0)
++#define show_inode_times(i) do { } while (0)
++#define show_dinode_times(d) do { } while (0)
++#define show_inode_counts(i) do { } while (0)
++
++#endif /* not CONFIG_UNION_FS_DEBUG */
++
++#endif /* not _UNION_H_ */
+diff --git a/fs/unionfs/unlink.c b/fs/unionfs/unlink.c
+new file mode 100644
+index 0000000..542c513
+--- /dev/null
++++ b/fs/unionfs/unlink.c
+@@ -0,0 +1,278 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * Helper function for Unionfs's unlink operation.
++ *
++ * The main goal of this function is to optimize the unlinking of non-dir
++ * objects in unionfs by deleting all possible lower inode objects from the
++ * underlying branches having same dentry name as the non-dir dentry on
++ * which this unlink operation is called. This way we delete as many lower
++ * inodes as possible, and save space. Whiteouts need to be created in
++ * branch0 only if unlinking fails on any of the lower branch other than
++ * branch0, or if a lower branch is marked read-only.
++ *
++ * Also, while unlinking a file, if we encounter any dir type entry in any
++ * intermediate branch, then we remove the directory by calling vfs_rmdir.
++ * The following special cases are also handled:
++
++ * (1) If an error occurs in branch0 during vfs_unlink, then we return
++ * appropriate error.
++ *
++ * (2) If we get an error during unlink in any of other lower branch other
++ * than branch0, then we create a whiteout in branch0.
++ *
++ * (3) If a whiteout already exists in any intermediate branch, we delete
++ * all possible inodes only up to that branch (this is an "opaqueness"
++ * as as per Documentation/filesystems/unionfs/concepts.txt).
++ *
++ */
++static int unionfs_unlink_whiteout(struct inode *dir, struct dentry *dentry,
++ struct dentry *parent)
++{
++ struct dentry *lower_dentry;
++ struct dentry *lower_dir_dentry;
++ int bindex;
++ int err = 0;
++
++ err = unionfs_partial_lookup(dentry, parent);
++ if (err)
++ goto out;
++
++ /* trying to unlink all possible valid instances */
++ for (bindex = dbstart(dentry); bindex <= dbend(dentry); bindex++) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ if (!lower_dentry || !lower_dentry->d_inode)
++ continue;
++
++ lower_dir_dentry = lock_parent(lower_dentry);
++
++ /* avoid destroying the lower inode if the object is in use */
++ dget(lower_dentry);
++ err = is_robranch_super(dentry->d_sb, bindex);
++ if (!err) {
++ /* see Documentation/filesystems/unionfs/issues.txt */
++ lockdep_off();
++ if (!S_ISDIR(lower_dentry->d_inode->i_mode))
++ err = vfs_unlink(lower_dir_dentry->d_inode,
++ lower_dentry);
++ else
++ err = vfs_rmdir(lower_dir_dentry->d_inode,
++ lower_dentry);
++ lockdep_on();
++ }
++
++ /* if lower object deletion succeeds, update inode's times */
++ if (!err)
++ unionfs_copy_attr_times(dentry->d_inode);
++ dput(lower_dentry);
++ fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
++ unlock_dir(lower_dir_dentry);
++
++ if (err)
++ break;
++ }
++
++ /*
++ * Create the whiteout in branch 0 (highest priority) only if (a)
++ * there was an error in any intermediate branch other than branch 0
++ * due to failure of vfs_unlink/vfs_rmdir or (b) a branch marked or
++ * mounted read-only.
++ */
++ if (err) {
++ if ((bindex == 0) ||
++ ((bindex == dbstart(dentry)) &&
++ (!IS_COPYUP_ERR(err))))
++ goto out;
++ else {
++ if (!IS_COPYUP_ERR(err))
++ pr_debug("unionfs: lower object deletion "
++ "failed in branch:%d\n", bindex);
++ err = create_whiteout(dentry, sbstart(dentry->d_sb));
++ }
++ }
++
++out:
++ if (!err)
++ inode_dec_link_count(dentry->d_inode);
++
++ /* We don't want to leave negative leftover dentries for revalidate. */
++ if (!err && (dbopaque(dentry) != -1))
++ update_bstart(dentry);
++
++ return err;
++}
++
++int unionfs_unlink(struct inode *dir, struct dentry *dentry)
++{
++ int err = 0;
++ struct inode *inode = dentry->d_inode;
++ struct dentry *parent;
++ int valid;
++
++ BUG_ON(S_ISDIR(inode->i_mode));
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++ unionfs_check_dentry(dentry);
++
++ err = unionfs_unlink_whiteout(dir, dentry, parent);
++ /* call d_drop so the system "forgets" about us */
++ if (!err) {
++ unionfs_postcopyup_release(dentry);
++ unionfs_postcopyup_setmnt(parent);
++ if (inode->i_nlink == 0) /* drop lower inodes */
++ iput_lowers_all(inode, false);
++ d_drop(dentry);
++ /*
++ * if unlink/whiteout succeeded, parent dir mtime has
++ * changed
++ */
++ unionfs_copy_attr_times(dir);
++ }
++
++out:
++ if (!err) {
++ unionfs_check_dentry(dentry);
++ unionfs_check_inode(dir);
++ }
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++static int unionfs_rmdir_first(struct inode *dir, struct dentry *dentry,
++ struct unionfs_dir_state *namelist)
++{
++ int err;
++ struct dentry *lower_dentry;
++ struct dentry *lower_dir_dentry = NULL;
++
++ /* Here we need to remove whiteout entries. */
++ err = delete_whiteouts(dentry, dbstart(dentry), namelist);
++ if (err)
++ goto out;
++
++ lower_dentry = unionfs_lower_dentry(dentry);
++
++ lower_dir_dentry = lock_parent(lower_dentry);
++
++ /* avoid destroying the lower inode if the file is in use */
++ dget(lower_dentry);
++ err = is_robranch(dentry);
++ if (!err)
++ err = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
++ dput(lower_dentry);
++
++ fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
++ /* propagate number of hard-links */
++ dentry->d_inode->i_nlink = unionfs_get_nlinks(dentry->d_inode);
++
++out:
++ if (lower_dir_dentry)
++ unlock_dir(lower_dir_dentry);
++ return err;
++}
++
++int unionfs_rmdir(struct inode *dir, struct dentry *dentry)
++{
++ int err = 0;
++ struct unionfs_dir_state *namelist = NULL;
++ struct dentry *parent;
++ int dstart, dend;
++ bool valid;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++ unionfs_check_dentry(dentry);
++
++ /* check if this unionfs directory is empty or not */
++ err = check_empty(dentry, parent, &namelist);
++ if (err)
++ goto out;
++
++ err = unionfs_rmdir_first(dir, dentry, namelist);
++ dstart = dbstart(dentry);
++ dend = dbend(dentry);
++ /*
++ * We create a whiteout for the directory if there was an error to
++ * rmdir the first directory entry in the union. Otherwise, we
++ * create a whiteout only if there is no chance that a lower
++ * priority branch might also have the same named directory. IOW,
++ * if there is not another same-named directory at a lower priority
++ * branch, then we don't need to create a whiteout for it.
++ */
++ if (!err) {
++ if (dstart < dend)
++ err = create_whiteout(dentry, dstart);
++ } else {
++ int new_err;
++
++ if (dstart == 0)
++ goto out;
++
++ /* exit if the error returned was NOT -EROFS */
++ if (!IS_COPYUP_ERR(err))
++ goto out;
++
++ new_err = create_whiteout(dentry, dstart - 1);
++ if (new_err != -EEXIST)
++ err = new_err;
++ }
++
++out:
++ /*
++ * Drop references to lower dentry/inode so storage space for them
++ * can be reclaimed. Then, call d_drop so the system "forgets"
++ * about us.
++ */
++ if (!err) {
++ iput_lowers_all(dentry->d_inode, false);
++ dput(unionfs_lower_dentry_idx(dentry, dstart));
++ unionfs_set_lower_dentry_idx(dentry, dstart, NULL);
++ d_drop(dentry);
++ /* update our lower vfsmnts, in case a copyup took place */
++ unionfs_postcopyup_setmnt(dentry);
++ unionfs_check_dentry(dentry);
++ unionfs_check_inode(dir);
++ }
++
++ if (namelist)
++ free_rdstate(namelist);
++
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
+diff --git a/fs/unionfs/whiteout.c b/fs/unionfs/whiteout.c
+new file mode 100644
+index 0000000..405073a
+--- /dev/null
++++ b/fs/unionfs/whiteout.c
+@@ -0,0 +1,584 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/*
++ * whiteout and opaque directory helpers
++ */
++
++/* What do we use for whiteouts. */
++#define UNIONFS_WHPFX ".wh."
++#define UNIONFS_WHLEN 4
++/*
++ * If a directory contains this file, then it is opaque. We start with the
++ * .wh. flag so that it is blocked by lookup.
++ */
++#define UNIONFS_DIR_OPAQUE_NAME "__dir_opaque"
++#define UNIONFS_DIR_OPAQUE UNIONFS_WHPFX UNIONFS_DIR_OPAQUE_NAME
++
++/* construct whiteout filename */
++char *alloc_whname(const char *name, int len)
++{
++ char *buf;
++
++ buf = kmalloc(len + UNIONFS_WHLEN + 1, GFP_KERNEL);
++ if (unlikely(!buf))
++ return ERR_PTR(-ENOMEM);
++
++ strcpy(buf, UNIONFS_WHPFX);
++ strlcat(buf, name, len + UNIONFS_WHLEN + 1);
++
++ return buf;
++}
++
++/*
++ * XXX: this can be inline or CPP macro, but is here to keep all whiteout
++ * code in one place.
++ */
++void unionfs_set_max_namelen(long *namelen)
++{
++ *namelen -= UNIONFS_WHLEN;
++}
++
++/* check if @namep is a whiteout, update @namep and @namelenp accordingly */
++bool is_whiteout_name(char **namep, int *namelenp)
++{
++ if (*namelenp > UNIONFS_WHLEN &&
++ !strncmp(*namep, UNIONFS_WHPFX, UNIONFS_WHLEN)) {
++ *namep += UNIONFS_WHLEN;
++ *namelenp -= UNIONFS_WHLEN;
++ return true;
++ }
++ return false;
++}
++
++/* is the filename valid == !(whiteout for a file or opaque dir marker) */
++bool is_validname(const char *name)
++{
++ if (!strncmp(name, UNIONFS_WHPFX, UNIONFS_WHLEN))
++ return false;
++ if (!strncmp(name, UNIONFS_DIR_OPAQUE_NAME,
++ sizeof(UNIONFS_DIR_OPAQUE_NAME) - 1))
++ return false;
++ return true;
++}
++
++/*
++ * Look for a whiteout @name in @lower_parent directory. If error, return
++ * ERR_PTR. Caller must dput() the returned dentry if not an error.
++ *
++ * XXX: some callers can reuse the whname allocated buffer to avoid repeated
++ * free then re-malloc calls. Need to provide a different API for those
++ * callers.
++ */
++struct dentry *lookup_whiteout(const char *name, struct dentry *lower_parent)
++{
++ char *whname = NULL;
++ int err = 0, namelen;
++ struct dentry *wh_dentry = NULL;
++
++ namelen = strlen(name);
++ whname = alloc_whname(name, namelen);
++ if (unlikely(IS_ERR(whname))) {
++ err = PTR_ERR(whname);
++ goto out;
++ }
++
++ /* check if whiteout exists in this branch: lookup .wh.foo */
++ wh_dentry = lookup_lck_len(whname, lower_parent, strlen(whname));
++ if (IS_ERR(wh_dentry)) {
++ err = PTR_ERR(wh_dentry);
++ goto out;
++ }
++
++ /* check if negative dentry (ENOENT) */
++ if (!wh_dentry->d_inode)
++ goto out;
++
++ /* whiteout found: check if valid type */
++ if (!S_ISREG(wh_dentry->d_inode->i_mode)) {
++ printk(KERN_ERR "unionfs: invalid whiteout %s entry type %d\n",
++ whname, wh_dentry->d_inode->i_mode);
++ dput(wh_dentry);
++ err = -EIO;
++ goto out;
++ }
++
++out:
++ kfree(whname);
++ if (err)
++ wh_dentry = ERR_PTR(err);
++ return wh_dentry;
++}
++
++/* find and return first whiteout in parent directory, else ENOENT */
++struct dentry *find_first_whiteout(struct dentry *dentry)
++{
++ int bindex, bstart, bend;
++ struct dentry *parent, *lower_parent, *wh_dentry;
++
++ parent = dget_parent(dentry);
++
++ bstart = dbstart(parent);
++ bend = dbend(parent);
++ wh_dentry = ERR_PTR(-ENOENT);
++
++ for (bindex = bstart; bindex <= bend; bindex++) {
++ lower_parent = unionfs_lower_dentry_idx(parent, bindex);
++ if (!lower_parent)
++ continue;
++ wh_dentry = lookup_whiteout(dentry->d_name.name, lower_parent);
++ if (IS_ERR(wh_dentry))
++ continue;
++ if (wh_dentry->d_inode)
++ break;
++ dput(wh_dentry);
++ wh_dentry = ERR_PTR(-ENOENT);
++ }
++
++ dput(parent);
++
++ return wh_dentry;
++}
++
++/*
++ * Unlink a whiteout dentry. Returns 0 or -errno. Caller must hold and
++ * release dentry reference.
++ */
++int unlink_whiteout(struct dentry *wh_dentry)
++{
++ int err;
++ struct dentry *lower_dir_dentry;
++
++ /* dget and lock parent dentry */
++ lower_dir_dentry = lock_parent_wh(wh_dentry);
++
++ /* see Documentation/filesystems/unionfs/issues.txt */
++ lockdep_off();
++ err = vfs_unlink(lower_dir_dentry->d_inode, wh_dentry);
++ lockdep_on();
++ unlock_dir(lower_dir_dentry);
++
++ /*
++ * Whiteouts are special files and should be deleted no matter what
++ * (as if they never existed), in order to allow this create
++ * operation to succeed. This is especially important in sticky
++ * directories: a whiteout may have been created by one user, but
++ * the newly created file may be created by another user.
++ * Therefore, in order to maintain Unix semantics, if the vfs_unlink
++ * above failed, then we have to try to directly unlink the
++ * whiteout. Note: in the ODF version of unionfs, whiteout are
++ * handled much more cleanly.
++ */
++ if (err == -EPERM) {
++ struct inode *inode = lower_dir_dentry->d_inode;
++ err = inode->i_op->unlink(inode, wh_dentry);
++ }
++ if (err)
++ printk(KERN_ERR "unionfs: could not unlink whiteout %s, "
++ "err = %d\n", wh_dentry->d_name.name, err);
++
++ return err;
++
++}
++
++/*
++ * Helper function when creating new objects (create, symlink, mknod, etc.).
++ * Checks to see if there's a whiteout in @lower_dentry's parent directory,
++ * whose name is taken from @dentry. Then tries to remove that whiteout, if
++ * found. If <dentry,bindex> is a branch marked readonly, return -EROFS.
++ * If it finds both a regular file and a whiteout, return -EIO (this should
++ * never happen).
++ *
++ * Return 0 if no whiteout was found. Return 1 if one was found and
++ * successfully removed. Therefore a value >= 0 tells the caller that
++ * @lower_dentry belongs to a good branch to create the new object in).
++ * Return -ERRNO if an error occurred during whiteout lookup or in trying to
++ * unlink the whiteout.
++ */
++int check_unlink_whiteout(struct dentry *dentry, struct dentry *lower_dentry,
++ int bindex)
++{
++ int err;
++ struct dentry *wh_dentry = NULL;
++ struct dentry *lower_dir_dentry = NULL;
++
++ /* look for whiteout dentry first */
++ lower_dir_dentry = dget_parent(lower_dentry);
++ wh_dentry = lookup_whiteout(dentry->d_name.name, lower_dir_dentry);
++ dput(lower_dir_dentry);
++ if (IS_ERR(wh_dentry)) {
++ err = PTR_ERR(wh_dentry);
++ goto out;
++ }
++
++ if (!wh_dentry->d_inode) { /* no whiteout exists*/
++ err = 0;
++ goto out_dput;
++ }
++
++ /* check if regular file and whiteout were both found */
++ if (unlikely(lower_dentry->d_inode)) {
++ err = -EIO;
++ printk(KERN_ERR "unionfs: found both whiteout and regular "
++ "file in directory %s (branch %d)\n",
++ lower_dir_dentry->d_name.name, bindex);
++ goto out_dput;
++ }
++
++ /* check if branch is writeable */
++ err = is_robranch_super(dentry->d_sb, bindex);
++ if (err)
++ goto out_dput;
++
++ /* .wh.foo has been found, so let's unlink it */
++ err = unlink_whiteout(wh_dentry);
++ if (!err)
++ err = 1; /* a whiteout was found and successfully removed */
++out_dput:
++ dput(wh_dentry);
++out:
++ return err;
++}
++
++/*
++ * Pass an unionfs dentry and an index. It will try to create a whiteout
++ * for the filename in dentry, and will try in branch 'index'. On error,
++ * it will proceed to a branch to the left.
++ */
++int create_whiteout(struct dentry *dentry, int start)
++{
++ int bstart, bend, bindex;
++ struct dentry *lower_dir_dentry;
++ struct dentry *lower_dentry;
++ struct dentry *lower_wh_dentry;
++ struct nameidata nd;
++ char *name = NULL;
++ int err = -EINVAL;
++
++ verify_locked(dentry);
++
++ bstart = dbstart(dentry);
++ bend = dbend(dentry);
++
++ /* create dentry's whiteout equivalent */
++ name = alloc_whname(dentry->d_name.name, dentry->d_name.len);
++ if (unlikely(IS_ERR(name))) {
++ err = PTR_ERR(name);
++ goto out;
++ }
++
++ for (bindex = start; bindex >= 0; bindex--) {
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++
++ if (!lower_dentry) {
++ /*
++ * if lower dentry is not present, create the
++ * entire lower dentry directory structure and go
++ * ahead. Since we want to just create whiteout, we
++ * only want the parent dentry, and hence get rid of
++ * this dentry.
++ */
++ lower_dentry = create_parents(dentry->d_inode,
++ dentry,
++ dentry->d_name.name,
++ bindex);
++ if (!lower_dentry || IS_ERR(lower_dentry)) {
++ int ret = PTR_ERR(lower_dentry);
++ if (!IS_COPYUP_ERR(ret))
++ printk(KERN_ERR
++ "unionfs: create_parents for "
++ "whiteout failed: bindex=%d "
++ "err=%d\n", bindex, ret);
++ continue;
++ }
++ }
++
++ lower_wh_dentry =
++ lookup_lck_len(name, lower_dentry->d_parent,
++ dentry->d_name.len + UNIONFS_WHLEN);
++ if (IS_ERR(lower_wh_dentry))
++ continue;
++
++ /*
++ * The whiteout already exists. This used to be impossible,
++ * but now is possible because of opaqueness.
++ */
++ if (lower_wh_dentry->d_inode) {
++ dput(lower_wh_dentry);
++ err = 0;
++ goto out;
++ }
++
++ err = init_lower_nd(&nd, LOOKUP_CREATE);
++ if (unlikely(err < 0))
++ goto out;
++ lower_dir_dentry = lock_parent_wh(lower_wh_dentry);
++ err = is_robranch_super(dentry->d_sb, bindex);
++ if (!err)
++ err = vfs_create(lower_dir_dentry->d_inode,
++ lower_wh_dentry,
++ current_umask() & S_IRUGO,
++ &nd);
++ unlock_dir(lower_dir_dentry);
++ dput(lower_wh_dentry);
++ release_lower_nd(&nd, err);
++
++ if (!err || !IS_COPYUP_ERR(err))
++ break;
++ }
++
++ /* set dbopaque so that lookup will not proceed after this branch */
++ if (!err)
++ dbopaque(dentry) = bindex;
++
++out:
++ kfree(name);
++ return err;
++}
++
++/*
++ * Delete all of the whiteouts in a given directory for rmdir.
++ *
++ * lower directory inode should be locked
++ */
++static int do_delete_whiteouts(struct dentry *dentry, int bindex,
++ struct unionfs_dir_state *namelist)
++{
++ int err = 0;
++ struct dentry *lower_dir_dentry = NULL;
++ struct dentry *lower_dentry;
++ char *name = NULL, *p;
++ struct inode *lower_dir;
++ int i;
++ struct list_head *pos;
++ struct filldir_node *cursor;
++
++ /* Find out lower parent dentry */
++ lower_dir_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ BUG_ON(!S_ISDIR(lower_dir_dentry->d_inode->i_mode));
++ lower_dir = lower_dir_dentry->d_inode;
++ BUG_ON(!S_ISDIR(lower_dir->i_mode));
++
++ err = -ENOMEM;
++ name = __getname();
++ if (unlikely(!name))
++ goto out;
++ strcpy(name, UNIONFS_WHPFX);
++ p = name + UNIONFS_WHLEN;
++
++ err = 0;
++ for (i = 0; !err && i < namelist->size; i++) {
++ list_for_each(pos, &namelist->list[i]) {
++ cursor =
++ list_entry(pos, struct filldir_node,
++ file_list);
++ /* Only operate on whiteouts in this branch. */
++ if (cursor->bindex != bindex)
++ continue;
++ if (!cursor->whiteout)
++ continue;
++
++ strlcpy(p, cursor->name, PATH_MAX - UNIONFS_WHLEN);
++ lower_dentry =
++ lookup_lck_len(name, lower_dir_dentry,
++ cursor->namelen +
++ UNIONFS_WHLEN);
++ if (IS_ERR(lower_dentry)) {
++ err = PTR_ERR(lower_dentry);
++ break;
++ }
++ if (lower_dentry->d_inode)
++ err = vfs_unlink(lower_dir, lower_dentry);
++ dput(lower_dentry);
++ if (err)
++ break;
++ }
++ }
++
++ __putname(name);
++
++ /* After all of the removals, we should copy the attributes once. */
++ fsstack_copy_attr_times(dentry->d_inode, lower_dir_dentry->d_inode);
++
++out:
++ return err;
++}
++
++
++void __delete_whiteouts(struct work_struct *work)
++{
++ struct sioq_args *args = container_of(work, struct sioq_args, work);
++ struct deletewh_args *d = &args->deletewh;
++
++ args->err = do_delete_whiteouts(d->dentry, d->bindex, d->namelist);
++ complete(&args->comp);
++}
++
++/* delete whiteouts in a dir (for rmdir operation) using sioq if necessary */
++int delete_whiteouts(struct dentry *dentry, int bindex,
++ struct unionfs_dir_state *namelist)
++{
++ int err;
++ struct super_block *sb;
++ struct dentry *lower_dir_dentry;
++ struct inode *lower_dir;
++ struct sioq_args args;
++
++ sb = dentry->d_sb;
++
++ BUG_ON(!S_ISDIR(dentry->d_inode->i_mode));
++ BUG_ON(bindex < dbstart(dentry));
++ BUG_ON(bindex > dbend(dentry));
++ err = is_robranch_super(sb, bindex);
++ if (err)
++ goto out;
++
++ lower_dir_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ BUG_ON(!S_ISDIR(lower_dir_dentry->d_inode->i_mode));
++ lower_dir = lower_dir_dentry->d_inode;
++ BUG_ON(!S_ISDIR(lower_dir->i_mode));
++
++ if (!inode_permission(lower_dir, MAY_WRITE | MAY_EXEC)) {
++ err = do_delete_whiteouts(dentry, bindex, namelist);
++ } else {
++ args.deletewh.namelist = namelist;
++ args.deletewh.dentry = dentry;
++ args.deletewh.bindex = bindex;
++ run_sioq(__delete_whiteouts, &args);
++ err = args.err;
++ }
++
++out:
++ return err;
++}
++
++/****************************************************************************
++ * Opaque directory helpers *
++ ****************************************************************************/
++
++/*
++ * is_opaque_dir: returns 0 if it is NOT an opaque dir, 1 if it is, and
++ * -errno if an error occurred trying to figure this out.
++ */
++int is_opaque_dir(struct dentry *dentry, int bindex)
++{
++ int err = 0;
++ struct dentry *lower_dentry;
++ struct dentry *wh_lower_dentry;
++ struct inode *lower_inode;
++ struct sioq_args args;
++
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ lower_inode = lower_dentry->d_inode;
++
++ BUG_ON(!S_ISDIR(lower_inode->i_mode));
++
++ mutex_lock(&lower_inode->i_mutex);
++
++ if (!inode_permission(lower_inode, MAY_EXEC)) {
++ wh_lower_dentry =
++ lookup_one_len(UNIONFS_DIR_OPAQUE, lower_dentry,
++ sizeof(UNIONFS_DIR_OPAQUE) - 1);
++ } else {
++ args.is_opaque.dentry = lower_dentry;
++ run_sioq(__is_opaque_dir, &args);
++ wh_lower_dentry = args.ret;
++ }
++
++ mutex_unlock(&lower_inode->i_mutex);
++
++ if (IS_ERR(wh_lower_dentry)) {
++ err = PTR_ERR(wh_lower_dentry);
++ goto out;
++ }
++
++ /* This is an opaque dir iff wh_lower_dentry is positive */
++ err = !!wh_lower_dentry->d_inode;
++
++ dput(wh_lower_dentry);
++out:
++ return err;
++}
++
++void __is_opaque_dir(struct work_struct *work)
++{
++ struct sioq_args *args = container_of(work, struct sioq_args, work);
++
++ args->ret = lookup_one_len(UNIONFS_DIR_OPAQUE, args->is_opaque.dentry,
++ sizeof(UNIONFS_DIR_OPAQUE) - 1);
++ complete(&args->comp);
++}
++
++int make_dir_opaque(struct dentry *dentry, int bindex)
++{
++ int err = 0;
++ struct dentry *lower_dentry, *diropq;
++ struct inode *lower_dir;
++ struct nameidata nd;
++ const struct cred *old_creds;
++ struct cred *new_creds;
++
++ /*
++ * Opaque directory whiteout markers are special files (like regular
++ * whiteouts), and should appear to the users as if they don't
++ * exist. They should be created/deleted regardless of directory
++ * search/create permissions, but only for the duration of this
++ * creation of the .wh.__dir_opaque: file. Note, this does not
++ * circumvent normal ->permission).
++ */
++ new_creds = prepare_creds();
++ if (unlikely(!new_creds)) {
++ err = -ENOMEM;
++ goto out_err;
++ }
++ cap_raise(new_creds->cap_effective, CAP_DAC_READ_SEARCH);
++ cap_raise(new_creds->cap_effective, CAP_DAC_OVERRIDE);
++ old_creds = override_creds(new_creds);
++
++ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
++ lower_dir = lower_dentry->d_inode;
++ BUG_ON(!S_ISDIR(dentry->d_inode->i_mode) ||
++ !S_ISDIR(lower_dir->i_mode));
++
++ mutex_lock(&lower_dir->i_mutex);
++ diropq = lookup_one_len(UNIONFS_DIR_OPAQUE, lower_dentry,
++ sizeof(UNIONFS_DIR_OPAQUE) - 1);
++ if (IS_ERR(diropq)) {
++ err = PTR_ERR(diropq);
++ goto out;
++ }
++
++ err = init_lower_nd(&nd, LOOKUP_CREATE);
++ if (unlikely(err < 0))
++ goto out;
++ if (!diropq->d_inode)
++ err = vfs_create(lower_dir, diropq, S_IRUGO, &nd);
++ if (!err)
++ dbopaque(dentry) = bindex;
++ release_lower_nd(&nd, err);
++
++ dput(diropq);
++
++out:
++ mutex_unlock(&lower_dir->i_mutex);
++ revert_creds(old_creds);
++out_err:
++ return err;
++}
+diff --git a/fs/unionfs/xattr.c b/fs/unionfs/xattr.c
+new file mode 100644
+index 0000000..9002e06
+--- /dev/null
++++ b/fs/unionfs/xattr.c
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (c) 2003-2010 Erez Zadok
++ * Copyright (c) 2003-2006 Charles P. Wright
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2005-2006 Junjiro Okajima
++ * Copyright (c) 2005 Arun M. Krishnakumar
++ * Copyright (c) 2004-2006 David P. Quigley
++ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
++ * Copyright (c) 2003 Puja Gupta
++ * Copyright (c) 2003 Harikesavan Krishnan
++ * Copyright (c) 2003-2010 Stony Brook University
++ * Copyright (c) 2003-2010 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "union.h"
++
++/* This is lifted from fs/xattr.c */
++void *unionfs_xattr_alloc(size_t size, size_t limit)
++{
++ void *ptr;
++
++ if (size > limit)
++ return ERR_PTR(-E2BIG);
++
++ if (!size) /* size request, no buffer is needed */
++ return NULL;
++
++ ptr = kmalloc(size, GFP_KERNEL);
++ if (unlikely(!ptr))
++ return ERR_PTR(-ENOMEM);
++ return ptr;
++}
++
++/*
++ * BKL held by caller.
++ * dentry->d_inode->i_mutex locked
++ */
++ssize_t unionfs_getxattr(struct dentry *dentry, const char *name, void *value,
++ size_t size)
++{
++ struct dentry *lower_dentry = NULL;
++ struct dentry *parent;
++ int err = -EOPNOTSUPP;
++ bool valid;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++
++ lower_dentry = unionfs_lower_dentry(dentry);
++
++ err = vfs_getxattr(lower_dentry, (char *) name, value, size);
++
++out:
++ unionfs_check_dentry(dentry);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++/*
++ * BKL held by caller.
++ * dentry->d_inode->i_mutex locked
++ */
++int unionfs_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags)
++{
++ struct dentry *lower_dentry = NULL;
++ struct dentry *parent;
++ int err = -EOPNOTSUPP;
++ bool valid;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++
++ lower_dentry = unionfs_lower_dentry(dentry);
++
++ err = vfs_setxattr(lower_dentry, (char *) name, (void *) value,
++ size, flags);
++
++out:
++ unionfs_check_dentry(dentry);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++/*
++ * BKL held by caller.
++ * dentry->d_inode->i_mutex locked
++ */
++int unionfs_removexattr(struct dentry *dentry, const char *name)
++{
++ struct dentry *lower_dentry = NULL;
++ struct dentry *parent;
++ int err = -EOPNOTSUPP;
++ bool valid;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++
++ lower_dentry = unionfs_lower_dentry(dentry);
++
++ err = vfs_removexattr(lower_dentry, (char *) name);
++
++out:
++ unionfs_check_dentry(dentry);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
++
++/*
++ * BKL held by caller.
++ * dentry->d_inode->i_mutex locked
++ */
++ssize_t unionfs_listxattr(struct dentry *dentry, char *list, size_t size)
++{
++ struct dentry *lower_dentry = NULL;
++ struct dentry *parent;
++ int err = -EOPNOTSUPP;
++ char *encoded_list = NULL;
++ bool valid;
++
++ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
++ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
++ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
++
++ valid = __unionfs_d_revalidate(dentry, parent, false);
++ if (unlikely(!valid)) {
++ err = -ESTALE;
++ goto out;
++ }
++
++ lower_dentry = unionfs_lower_dentry(dentry);
++
++ encoded_list = list;
++ err = vfs_listxattr(lower_dentry, encoded_list, size);
++
++out:
++ unionfs_check_dentry(dentry);
++ unionfs_unlock_dentry(dentry);
++ unionfs_unlock_parent(dentry, parent);
++ unionfs_read_unlock(dentry->d_sb);
++ return err;
++}
+diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
+index da317c7..64f1ced 100644
+--- a/include/linux/fs_stack.h
++++ b/include/linux/fs_stack.h
+@@ -1,7 +1,19 @@
++/*
++ * Copyright (c) 2006-2009 Erez Zadok
++ * Copyright (c) 2006-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2006-2009 Stony Brook University
++ * Copyright (c) 2006-2009 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
+ #ifndef _LINUX_FS_STACK_H
+ #define _LINUX_FS_STACK_H
+
+-/* This file defines generic functions used primarily by stackable
++/*
++ * This file defines generic functions used primarily by stackable
+ * filesystems; none of these functions require i_mutex to be held.
+ */
+
+diff --git a/include/linux/magic.h b/include/linux/magic.h
+index eb9800f..9770154 100644
+--- a/include/linux/magic.h
++++ b/include/linux/magic.h
+@@ -47,6 +47,8 @@
+ #define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs"
+ #define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs"
+
++#define UNIONFS_SUPER_MAGIC 0xf15f083d
++
+ #define SMB_SUPER_MAGIC 0x517B
+ #define USBDEVICE_SUPER_MAGIC 0x9fa2
+ #define CGROUP_SUPER_MAGIC 0x27e0eb
+diff --git a/include/linux/namei.h b/include/linux/namei.h
+index 05b441d..dca6f9a 100644
+--- a/include/linux/namei.h
++++ b/include/linux/namei.h
+@@ -72,6 +72,7 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
+
+ extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
+ int (*open)(struct inode *, struct file *));
++extern void release_open_intent(struct nameidata *);
+
+ extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
+
+diff --git a/include/linux/splice.h b/include/linux/splice.h
+index 997c3b4..54f5501 100644
+--- a/include/linux/splice.h
++++ b/include/linux/splice.h
+@@ -81,6 +81,11 @@ extern ssize_t splice_to_pipe(struct pipe_inode_info *,
+ struct splice_pipe_desc *);
+ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
+ splice_direct_actor *);
++extern long vfs_splice_from(struct pipe_inode_info *pipe, struct file *out,
++ loff_t *ppos, size_t len, unsigned int flags);
++extern long vfs_splice_to(struct file *in, loff_t *ppos,
++ struct pipe_inode_info *pipe, size_t len,
++ unsigned int flags);
+
+ /*
+ * for dynamic pipe sizing
+diff --git a/include/linux/union_fs.h b/include/linux/union_fs.h
+new file mode 100644
+index 0000000..c84d97e
+--- /dev/null
++++ b/include/linux/union_fs.h
+@@ -0,0 +1,22 @@
++/*
++ * Copyright (c) 2003-2009 Erez Zadok
++ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
++ * Copyright (c) 2003-2009 Stony Brook University
++ * Copyright (c) 2003-2009 The Research Foundation of SUNY
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef _LINUX_UNION_FS_H
++#define _LINUX_UNION_FS_H
++
++/*
++ * DEFINITIONS FOR USER AND KERNEL CODE:
++ */
++# define UNIONFS_IOCTL_INCGEN _IOR(0x15, 11, int)
++# define UNIONFS_IOCTL_QUERYFILE _IOR(0x15, 15, int)
++
++#endif /* _LINUX_UNIONFS_H */
++
+diff --git a/security/security.c b/security/security.c
+index c53949f..eb71394 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -528,6 +528,7 @@ int security_inode_permission(struct inode *inode, int mask)
+ return 0;
+ return security_ops->inode_permission(inode, mask);
+ }
++EXPORT_SYMBOL(security_inode_permission);
+
+ int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
+ {
diff --git a/main/scstadmin/APKBUILD b/main/scstadmin/APKBUILD
new file mode 100644
index 000000000..6a694e27b
--- /dev/null
+++ b/main/scstadmin/APKBUILD
@@ -0,0 +1,38 @@
+# Contributor: Carlo Landmeter
+# Maintainer:
+pkgname=scstadmin
+pkgver=2.0.0
+pkgrel=2
+pkgdesc="SCST administration tool written in perl"
+url="http://scst.sourceforge.net"
+arch="x86_64"
+license="GPL-2"
+depends="perl"
+makedepends="perl-dev"
+install=
+subpackages="$pkgname-doc"
+source="http://downloads.sourceforge.net/scst/$pkgname-$pkgver.tar.gz
+ scst-init-ash-comapt.patch
+ "
+
+_builddir="$srcdir/$pkgname-$pkgver"
+
+prepare() {
+ cd "$_builddir"/scstadmin.sysfs/scst-0.9.00
+ PERL_MM_USE_DEFAULT=1 perl Makefile.PL INSTALLDIRS=vendor || return 1
+ cd "$_builddir"
+ patch -p1 < "$srcdir"/scst-init-ash-comapt.patch
+}
+
+package() {
+ cd "$_builddir"/scstadmin.sysfs/scst-0.9.00
+ make DESTDIR="$pkgdir" install || return 1
+
+ cd "$_builddir"
+ mkdir -p "$pkgdir"/var/lib/scst/pr
+ install -Dm755 scstadmin.sysfs/scstadmin "$pkgdir"/usr/sbin/scstadmin || return 1
+ install -Dm755 init.d/scst.gentoo "$pkgdir"/etc/init.d/scstadmin || return 1
+}
+
+md5sums="ae94761148cc4eaade2973ba84387825 scstadmin-2.0.0.tar.gz
+061580b8ec84b5f7da0b1332601f505a scst-init-ash-comapt.patch"
diff --git a/main/scstadmin/scst-init-ash-comapt.patch b/main/scstadmin/scst-init-ash-comapt.patch
new file mode 100644
index 000000000..67cd7adf1
--- /dev/null
+++ b/main/scstadmin/scst-init-ash-comapt.patch
@@ -0,0 +1,35 @@
+--- scstadmin/init.d/scst.gentoo
++++ scstadmin/init.d/scst.gentoo
+@@ -12,13 +12,11 @@
+ # Note: on most Linux distributions /bin/sh is a soft link to /bin/bash, while
+ # on a default Ubuntu setup /bin/sh is a soft link to /bin/dash !
+
+-opts="${opts} try-restart reload force-reload"
++opts="${opts} try_restart reload force_reload"
+ depend() {
+ use logger
+ }
+
+-PATH=/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin
+-
+ DEFAULTFILE="/etc/conf.d/scst"
+ SCST_CFG=/etc/scst.conf
+ MODPROBE="/sbin/modprobe"
+@@ -121,7 +119,7 @@
+ start
+ }
+
+-try-restart() {
++try_restart() {
+ ## Restart the service if the service is already running.
+ status >/dev/null 2>&1 && restart
+ }
+@@ -140,7 +138,7 @@
+ fi
+ }
+
+-force-reload() {
++force_reload() {
+ ## Cause the configuration to be reloaded if the service supports this,
+ ## otherwise restart the service if it is running.
+ einfo "Reloading SCST configuration"