summaryrefslogtreecommitdiffstats
path: root/main
diff options
context:
space:
mode:
authorCarlo Landmeter <clandmeter@gmail.com>2011-10-05 10:00:22 +0000
committerCarlo Landmeter <clandmeter@gmail.com>2011-10-05 10:06:30 +0000
commitad1c2d730e7d9bf73112bae8b8935e1c1327181e (patch)
tree1aa06d7cb6f32f662a0c5ce123d053c92bf6700c /main
parent3bef980d7c43dff9d82349aeb708cb179fc2ebed (diff)
downloadaports-ad1c2d730e7d9bf73112bae8b8935e1c1327181e.tar.bz2
aports-ad1c2d730e7d9bf73112bae8b8935e1c1327181e.tar.xz
main/linux-scst: update scst to 2.1.0 and kernel to 2.6.39.4
Diffstat (limited to 'main')
-rw-r--r--main/linux-scst/0004-arp-flush-arp-cache-on-device-change.patch29
-rw-r--r--main/linux-scst/APKBUILD77
-rw-r--r--main/linux-scst/kernelconfig.x86_64797
-rw-r--r--main/linux-scst/scst-2.1.0-2.6.39.patch (renamed from main/linux-scst/scst-2.0.0.1-2.6.36.patch)41218
-rw-r--r--main/linux-scst/setlocalversion.patch11
-rw-r--r--main/linux-scst/unionfs-2.5.7_for_2.6.36.diff11253
6 files changed, 23605 insertions, 29780 deletions
diff --git a/main/linux-scst/0004-arp-flush-arp-cache-on-device-change.patch b/main/linux-scst/0004-arp-flush-arp-cache-on-device-change.patch
deleted file mode 100644
index 85161ea3a..000000000
--- a/main/linux-scst/0004-arp-flush-arp-cache-on-device-change.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 8a0e3ea4924059a7268446177d6869e3399adbb2 Mon Sep 17 00:00:00 2001
-From: Timo Teras <timo.teras@iki.fi>
-Date: Mon, 12 Apr 2010 13:46:45 +0000
-Subject: [PATCH 04/18] arp: flush arp cache on device change
-
-If IFF_NOARP is changed, we must flush the arp cache.
-
-Signed-off-by: Timo Teras <timo.teras@iki.fi>
----
- net/ipv4/arp.c | 3 +++
- 1 files changed, 3 insertions(+), 0 deletions(-)
-
-diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
-index 4e80f33..580bfc3 100644
---- a/net/ipv4/arp.c
-+++ b/net/ipv4/arp.c
-@@ -1200,6 +1200,9 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, vo
- neigh_changeaddr(&arp_tbl, dev);
- rt_cache_flush(dev_net(dev), 0);
- break;
-+ case NETDEV_CHANGE:
-+ neigh_changeaddr(&arp_tbl, dev);
-+ break;
- default:
- break;
- }
---
-1.7.0.2
-
diff --git a/main/linux-scst/APKBUILD b/main/linux-scst/APKBUILD
index 6caf4f285..5b61125c5 100644
--- a/main/linux-scst/APKBUILD
+++ b/main/linux-scst/APKBUILD
@@ -2,31 +2,36 @@
_flavor=scst
pkgname=linux-${_flavor}
-pkgver=2.6.36.3
-_kernver=2.6.36
+pkgver=2.6.39.4
+
+_scstver=2.1.0
+
+if [ "${pkgver##*.*.*.*}" = "$pkgver" ]; then
+ _kernver=$pkgver
+else
+ _kernver=${pkgver%.*}
+fi
+
pkgrel=0
-pkgdesc="Linux kernel optimised for scst"
+pkgdesc="Linux kernel with SCST"
url="http://scst.sourceforge.net"
depends="mkinitfs linux-firmware"
-makedepends="perl installkernel bash"
+makedepends="perl installkernel"
options="!strip"
_config=${config:-kernelconfig.${CARCH}}
install=
-source="ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-$_kernver.tar.bz2
- ftp://ftp.kernel.org/pub/linux/kernel/v2.6/patch-$pkgver.bz2
+source="http://download.xs4all.nl/ftp.kernel.org/pub/linux/kernel/v2.6/linux-$_kernver.tar.bz2
+ http://download.xs4all.nl/ftp.kernel.org/pub/linux/kernel/v2.6/patch-$pkgver.bz2
kernelconfig.x86_64
- scst-2.0.0.1-2.6.36.patch
- unionfs-2.5.7_for_$_kernver.diff
- 0004-arp-flush-arp-cache-on-device-change.patch
+ scst-2.1.0-2.6.39.patch
"
-subpackages="$pkgname-dev linux-firmware:firmware"
+subpackages="$pkgname-dev"
arch="x86_64"
license="GPL-2"
-_abi_release=${pkgver}-${_flavor}
+_abi_release=${pkgver}-${_scstver}
prepare() {
- local _patch_failed=
cd "$srcdir"/linux-$_kernver
if [ "$_kernver" != "$pkgver" ]; then
bunzip2 -c < ../patch-$pkgver.bz2 | patch -p1 -N || return 1
@@ -34,25 +39,15 @@ prepare() {
# first apply patches in specified order
for i in $source; do
+ i=${i##*/}
case $i in
- *.patch)
+ *.patch|*.diff)
msg "Applying $i..."
- if ! patch -s -p1 -N -i "$srcdir"/$i; then
- echo $i >>failed
- _patch_failed=1
- fi
+ patch -s -p1 -i "$srcdir"/$i || return 1
;;
esac
done
- if ! [ -z "$_patch_failed" ]; then
- error "The following patches failed:"
- cat failed
- return 1
- fi
-
- echo "-scst" > "$srcdir"/linux-$_kernver/localversion-scst
-
mkdir -p "$srcdir"/build
cp "$srcdir"/$_config "$srcdir"/build/.config || return 1
make -C "$srcdir"/linux-$_kernver O="$srcdir"/build HOSTCC="${CC:-gcc}" \
@@ -61,13 +56,14 @@ prepare() {
# this is so we can do: 'abuild menuconfig' to reconfigure kernel
menuconfig() {
- cd "$srcdir"/build || return 1
+ cd "$srcdir"/build
make menuconfig
- cp .config "$startdir"/$_config
+ cp .config "$startdir"/$_config || return 1
}
build() {
cd "$srcdir"/build
+ export GCC_SPECS=/usr/share/gcc/hardenednopie.specs
make CC="${CC:-gcc}" \
KBUILD_BUILD_VERSION="$((pkgrel + 1 ))-Alpine" \
|| return 1
@@ -76,13 +72,15 @@ build() {
package() {
cd "$srcdir"/build
mkdir -p "$pkgdir"/boot "$pkgdir"/lib/modules
- make -j1 modules_install firmware_install install \
+ make -j1 modules_install install \
INSTALL_MOD_PATH="$pkgdir" \
INSTALL_PATH="$pkgdir"/boot \
|| return 1
- rm -f "$pkgdir"/lib/modules/${_abi_release}/build \
- "$pkgdir"/lib/modules/${_abi_release}/source
+ rm -rf "$pkgdir"/lib/modules/*/build \
+ "$pkgdir"/lib/modules/*/source
+ rm -rf "$pkgdir"/lib/firmware
+
install -D include/config/kernel.release \
"$pkgdir"/usr/share/kernel/$_flavor/kernel.release
}
@@ -95,7 +93,7 @@ dev() {
# this way you dont need to install the 300-400 kernel sources to
# build a tiny kernel module
#
- pkgdesc="Headers and script for third party modules for grsec kernel"
+ pkgdesc="Headers and script for third party modules for kernel"
local dir="$subpkgdir"/usr/src/linux-headers-${_abi_release}
# first we import config, run prepare to set up for building
@@ -133,16 +131,7 @@ dev() {
"$subpkgdir"/lib/modules/${_abi_release}/build
}
-firmware() {
- pkgdesc="Firmware for linux kernel"
- replaces="linux-grsec linux-vserver"
- mkdir -p "$subpkgdir"/lib
- mv "$pkgdir"/lib/firmware "$subpkgdir"/lib/
-}
-
-md5sums="61f3739a73afb6914cb007f37fb09b62 linux-2.6.36.tar.bz2
-33f51375d4baa343502b39acf94d5a6c patch-2.6.36.3.bz2
-68d4cbd30411aca485293117bd98ec38 kernelconfig.x86_64
-e62cd51e9452633821e4457564a094f3 scst-2.0.0.1-2.6.36.patch
-fec281a4e03fed560ce309ad8fc5a592 unionfs-2.5.7_for_2.6.36.diff
-776adeeb5272093574f8836c5037dd7d 0004-arp-flush-arp-cache-on-device-change.patch"
+md5sums="1aab7a741abe08d42e8eccf20de61e05 linux-2.6.39.tar.bz2
+ff5eb7323c054a128d2922bde3297ed5 patch-2.6.39.4.bz2
+4fea1df4f5f8358c521d88b94af6c704 kernelconfig.x86_64
+456cd9f0a71d9b2ca15d207f2d2a59a1 scst-2.1.0-2.6.39.patch"
diff --git a/main/linux-scst/kernelconfig.x86_64 b/main/linux-scst/kernelconfig.x86_64
index d98eb305a..5482bee6e 100644
--- a/main/linux-scst/kernelconfig.x86_64
+++ b/main/linux-scst/kernelconfig.x86_64
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.36.2
-# Thu Dec 23 12:32:35 2010
+# Linux/x86_64 2.6.39.4 Kernel Configuration
+# Tue Oct 4 16:04:36 2011
#
CONFIG_64BIT=y
# CONFIG_X86_32 is not set
@@ -47,26 +47,20 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_AUDIT_ARCH=y
CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
-CONFIG_HAVE_EARLY_RES=y
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_GENERIC_PENDING_IRQ=y
-CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_X86_64_SMP=y
CONFIG_X86_HT=y
-CONFIG_X86_TRAMPOLINE=y
CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11"
# CONFIG_KTIME_SCALAR is not set
CONFIG_ARCH_CPU_PROBE_RELEASE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
+CONFIG_HAVE_IRQ_WORK=y
+CONFIG_IRQ_WORK=y
#
# General setup
#
CONFIG_EXPERIMENTAL=y
-CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_CROSS_COMPILE=""
CONFIG_LOCALVERSION=""
@@ -74,10 +68,12 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
@@ -85,13 +81,27 @@ CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_FHANDLE is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_HAVE_SPARSE_IRQ=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_IRQ_FORCED_THREADING=y
+# CONFIG_SPARSE_IRQ is not set
#
# RCU Subsystem
#
CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -101,22 +111,46 @@ CONFIG_IKCONFIG=m
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
-# CONFIG_CGROUPS is not set
-# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+# CONFIG_PROC_PID_CPUSET is not set
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_CGROUP_MEM_RES_CTLR is not set
+# CONFIG_CGROUP_PERF is not set
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+# CONFIG_SYSFS_DEPRECATED is not set
# CONFIG_RELAY is not set
-# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
+CONFIG_RD_XZ=y
CONFIG_RD_LZO=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
CONFIG_SYSCTL_SYSCALL=y
-# CONFIG_KALLSYMS is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -130,6 +164,7 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_EMBEDDED=y
CONFIG_HAVE_PERF_EVENTS=y
#
@@ -148,8 +183,11 @@ CONFIG_PROFILING=y
CONFIG_OPROFILE=m
# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
CONFIG_HAVE_OPROFILE=y
-# CONFIG_KPROBES is not set
+CONFIG_KPROBES=y
+# CONFIG_JUMP_LABEL is not set
+CONFIG_OPTPROBES=y
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_KRETPROBES=y
CONFIG_USER_RETURN_NOTIFIER=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -157,12 +195,14 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_OPTPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
CONFIG_HAVE_DMA_API_DEBUG=y
CONFIG_HAVE_HW_BREAKPOINT=y
CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
CONFIG_HAVE_USER_RETURN_NOTIFIER=y
CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
#
# GCOV-based kernel profiling
@@ -181,6 +221,8 @@ CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
CONFIG_BLK_DEV_BSG=y
# CONFIG_BLK_DEV_INTEGRITY is not set
+# CONFIG_BLK_DEV_THROTTLING is not set
+CONFIG_BLOCK_COMPAT=y
#
# IO Schedulers
@@ -188,6 +230,7 @@ CONFIG_BLK_DEV_BSG=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_IOSCHED_CFQ=y
+# CONFIG_CFQ_GROUP_IOSCHED is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
@@ -232,13 +275,19 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_SMP=y
-# CONFIG_SPARSE_IRQ is not set
CONFIG_X86_MPPARSE=y
CONFIG_X86_EXTENDED_PLATFORM=y
# CONFIG_X86_VSMP is not set
CONFIG_SCHED_OMIT_FRAME_POINTER=y
CONFIG_PARAVIRT_GUEST=y
-# CONFIG_XEN is not set
+CONFIG_XEN=y
+CONFIG_XEN_DOM0=y
+CONFIG_XEN_PRIVILEGED_GUEST=y
+CONFIG_XEN_PVHVM=y
+CONFIG_XEN_MAX_DOMAIN_MEMORY=128
+CONFIG_XEN_SAVE_RESTORE=y
+# CONFIG_XEN_DEBUG_FS is not set
+# CONFIG_XEN_DEBUG is not set
CONFIG_KVM_CLOCK=y
CONFIG_KVM_GUEST=y
CONFIG_PARAVIRT=y
@@ -251,9 +300,9 @@ CONFIG_NO_BOOTMEM=y
# CONFIG_MCORE2 is not set
# CONFIG_MATOM is not set
CONFIG_GENERIC_CPU=y
-CONFIG_X86_CPU=y
CONFIG_X86_INTERNODE_CACHE_SHIFT=6
CONFIG_X86_CMPXCHG=y
+CONFIG_CMPXCHG_LOCAL=y
CONFIG_X86_L1_CACHE_SHIFT=6
CONFIG_X86_XADD=y
CONFIG_X86_WP_WORKS_OK=y
@@ -270,16 +319,15 @@ CONFIG_HPET_TIMER=y
CONFIG_HPET_EMULATE_RTC=y
CONFIG_DMI=y
CONFIG_GART_IOMMU=y
-CONFIG_CALGARY_IOMMU=y
-CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
-CONFIG_AMD_IOMMU=y
-# CONFIG_AMD_IOMMU_STATS is not set
+# CONFIG_CALGARY_IOMMU is not set
+# CONFIG_AMD_IOMMU is not set
CONFIG_SWIOTLB=y
CONFIG_IOMMU_HELPER=y
-CONFIG_IOMMU_API=y
+# CONFIG_IOMMU_API is not set
CONFIG_NR_CPUS=8
CONFIG_SCHED_SMT=y
CONFIG_SCHED_MC=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
# CONFIG_PREEMPT_NONE is not set
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
@@ -295,6 +343,7 @@ CONFIG_MICROCODE_OLD_INTERFACE=y
CONFIG_X86_MSR=m
CONFIG_X86_CPUID=m
CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
CONFIG_DIRECT_GBPAGES=y
# CONFIG_NUMA is not set
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
@@ -309,9 +358,12 @@ CONFIG_SPARSEMEM_EXTREME=y
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y
CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_HAVE_MEMBLOCK=y
# CONFIG_MEMORY_HOTPLUG is not set
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_COMPACTION=y
+CONFIG_MIGRATION=y
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
@@ -319,8 +371,11 @@ CONFIG_VIRT_TO_BUS=y
CONFIG_MMU_NOTIFIER=y
CONFIG_KSM=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
+# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
-CONFIG_X86_RESERVE_LOW_64K=y
+CONFIG_X86_RESERVE_LOW=64
CONFIG_MTRR=y
CONFIG_MTRR_SANITIZER=y
CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0
@@ -342,29 +397,28 @@ CONFIG_PHYSICAL_START=0x1000000
# CONFIG_RELOCATABLE is not set
CONFIG_PHYSICAL_ALIGN=0x1000000
CONFIG_HOTPLUG_CPU=y
+# CONFIG_COMPAT_VDSO is not set
# CONFIG_CMDLINE_BOOL is not set
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
#
# Power management and ACPI options
#
-CONFIG_PM=y
-# CONFIG_PM_DEBUG is not set
-CONFIG_PM_SLEEP_SMP=y
-CONFIG_PM_SLEEP=y
-CONFIG_SUSPEND_NVS=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+CONFIG_HIBERNATE_CALLBACKS=y
# CONFIG_HIBERNATION is not set
+CONFIG_PM_SLEEP=y
+CONFIG_PM_SLEEP_SMP=y
# CONFIG_PM_RUNTIME is not set
-CONFIG_PM_OPS=y
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
CONFIG_ACPI=y
CONFIG_ACPI_SLEEP=y
CONFIG_ACPI_PROCFS=y
CONFIG_ACPI_PROCFS_POWER=y
# CONFIG_ACPI_POWER_METER is not set
-CONFIG_ACPI_SYSFS_POWER=y
-CONFIG_ACPI_EC_DEBUGFS=m
+CONFIG_ACPI_EC_DEBUGFS=y
CONFIG_ACPI_PROC_EVENT=y
CONFIG_ACPI_AC=m
CONFIG_ACPI_BATTERY=m
@@ -373,6 +427,7 @@ CONFIG_ACPI_VIDEO=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_DOCK=y
CONFIG_ACPI_PROCESSOR=m
+CONFIG_ACPI_IPMI=m
CONFIG_ACPI_HOTPLUG_CPU=y
# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set
CONFIG_ACPI_THERMAL=m
@@ -387,7 +442,7 @@ CONFIG_ACPI_HED=m
CONFIG_ACPI_APEI=y
CONFIG_ACPI_APEI_GHES=m
CONFIG_ACPI_APEI_EINJ=m
-CONFIG_ACPI_APEI_ERST_DEBUG=m
+CONFIG_ACPI_APEI_ERST_DEBUG=y
# CONFIG_SFI is not set
#
@@ -425,13 +480,12 @@ CONFIG_X86_SPEEDSTEP_LIB=m
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPU_IDLE_GOV_MENU=y
-# CONFIG_INTEL_IDLE is not set
+CONFIG_INTEL_IDLE=y
#
# Memory power savings
#
-CONFIG_I7300_IDLE_IOAT_CHANNEL=y
-CONFIG_I7300_IDLE=m
+# CONFIG_I7300_IDLE is not set
#
# Bus options (PCI etc.)
@@ -439,6 +493,7 @@ CONFIG_I7300_IDLE=m
CONFIG_PCI=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_XEN=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCI_CNB20LE_QUIRK=y
# CONFIG_DMAR is not set
@@ -451,11 +506,13 @@ CONFIG_PCIEASPM=y
CONFIG_ARCH_SUPPORTS_MSI=y
CONFIG_PCI_MSI=y
CONFIG_PCI_STUB=m
+CONFIG_XEN_PCIDEV_FRONTEND=y
CONFIG_HT_IRQ=y
# CONFIG_PCI_IOV is not set
CONFIG_PCI_IOAPIC=y
+CONFIG_PCI_LABEL=y
CONFIG_ISA_DMA_API=y
-CONFIG_K8_NB=y
+CONFIG_AMD_NB=y
CONFIG_PCCARD=m
CONFIG_PCMCIA=m
CONFIG_PCMCIA_LOAD_CIS=y
@@ -481,17 +538,25 @@ CONFIG_HOTPLUG_PCI_CPCI=y
CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
CONFIG_HOTPLUG_PCI_SHPC=m
+# CONFIG_RAPIDIO is not set
#
# Executable file formats / Emulations
#
CONFIG_BINFMT_ELF=y
+CONFIG_COMPAT_BINFMT_ELF=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_HAVE_AOUT is not set
CONFIG_BINFMT_MISC=m
-# CONFIG_IA32_EMULATION is not set
-# CONFIG_COMPAT_FOR_U64_ALIGNMENT is not set
+CONFIG_IA32_EMULATION=y
+# CONFIG_IA32_AOUT is not set
+CONFIG_COMPAT=y
+CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
+CONFIG_SYSVIPC_COMPAT=y
+CONFIG_KEYS_COMPAT=y
+CONFIG_HAVE_TEXT_POKE_SMP=y
CONFIG_NET=y
+CONFIG_COMPAT_NETLINK_MESSAGES=y
#
# Networking options
@@ -510,17 +575,17 @@ CONFIG_INET=y
CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_ASK_IP_FIB_HASH=y
-# CONFIG_IP_FIB_TRIE is not set
-CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_ROUTE_CLASSID=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=y
CONFIG_NET_IPGRE=m
CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
@@ -557,7 +622,7 @@ CONFIG_DEFAULT_CUBIC=y
# CONFIG_DEFAULT_RENO is not set
CONFIG_DEFAULT_TCP_CONG="cubic"
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=m
+CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -583,7 +648,7 @@ CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
CONFIG_IPV6_PIMSM_V2=y
CONFIG_NETLABEL=y
CONFIG_NETWORK_SECMARK=y
-# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
CONFIG_NETFILTER_ADVANCED=y
@@ -600,6 +665,7 @@ CONFIG_NF_CONNTRACK_MARK=y
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_CONNTRACK_EVENTS=y
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
CONFIG_NF_CT_PROTO_DCCP=m
CONFIG_NF_CT_PROTO_GRE=m
CONFIG_NF_CT_PROTO_SCTP=m
@@ -608,7 +674,9 @@ CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_BROADCAST=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
@@ -622,6 +690,7 @@ CONFIG_NETFILTER_XTABLES=m
#
CONFIG_NETFILTER_XT_MARK=m
CONFIG_NETFILTER_XT_CONNMARK=m
+CONFIG_NETFILTER_XT_SET=m
#
# Xtables targets
@@ -650,6 +719,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
#
# Xtables matches
#
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
@@ -658,6 +728,7 @@ CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_CPU=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -687,6 +758,18 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_MAX=256
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_LIST_SET=m
CONFIG_IP_VS=m
CONFIG_IP_VS_IPV6=y
# CONFIG_IP_VS_DEBUG is not set
@@ -720,6 +803,8 @@ CONFIG_IP_VS_NQ=m
# IPVS application helper
#
CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_IP_VS_PE_SIP=m
#
# IP: Netfilter Configuration
@@ -729,7 +814,6 @@ CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
@@ -767,6 +851,7 @@ CONFIG_IP_NF_ARP_MANGLE=m
#
# IPv6: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV6=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
@@ -821,9 +906,9 @@ CONFIG_INET_DCCP_DIAG=m
# CONFIG_IP_DCCP_CCID2_DEBUG is not set
CONFIG_IP_DCCP_CCID3=y
# CONFIG_IP_DCCP_CCID3_DEBUG is not set
-CONFIG_IP_DCCP_CCID3_RTO=100
CONFIG_IP_DCCP_TFRC_LIB=y
CONFIG_IP_SCTP=m
+CONFIG_NET_SCTPPROBE=m
# CONFIG_SCTP_DBG_MSG is not set
# CONFIG_SCTP_DBG_OBJCNT is not set
# CONFIG_SCTP_HMAC_NONE is not set
@@ -867,9 +952,7 @@ CONFIG_IPDDP_ENCAP=y
CONFIG_IPDDP_DECAP=y
CONFIG_X25=m
CONFIG_LAPB=m
-CONFIG_ECONET=m
-CONFIG_ECONET_AUNUDP=y
-CONFIG_ECONET_NATIVE=y
+# CONFIG_ECONET is not set
CONFIG_WAN_ROUTER=m
CONFIG_PHONET=m
CONFIG_IEEE802154=m
@@ -885,6 +968,7 @@ CONFIG_NET_SCH_ATM=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_MULTIQ=m
CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
CONFIG_NET_SCH_TBF=m
@@ -892,6 +976,8 @@ CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
CONFIG_NET_SCH_INGRESS=m
#
@@ -901,7 +987,6 @@ CONFIG_NET_CLS=y
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
-CONFIG_NET_CLS_ROUTE=y
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
CONFIG_CLS_U32_PERF=y
@@ -909,6 +994,7 @@ CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_FLOW=m
+# CONFIG_NET_CLS_CGROUP is not set
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_STACK=32
CONFIG_NET_EMATCH_CMP=m
@@ -926,16 +1012,21 @@ CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
# CONFIG_NET_CLS_IND is not set
CONFIG_NET_SCH_FIFO=y
# CONFIG_DCB is not set
CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
#
# Network testing
#
CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
# CONFIG_HAMRADIO is not set
CONFIG_CAN=m
CONFIG_CAN_RAW=m
@@ -945,21 +1036,27 @@ CONFIG_CAN_BCM=m
# CAN Device Drivers
#
CONFIG_CAN_VCAN=m
+CONFIG_CAN_SLCAN=m
CONFIG_CAN_DEV=m
# CONFIG_CAN_CALC_BITTIMING is not set
CONFIG_CAN_MCP251X=m
CONFIG_CAN_JANZ_ICAN3=m
+# CONFIG_PCH_CAN is not set
CONFIG_CAN_SJA1000=m
CONFIG_CAN_SJA1000_PLATFORM=m
CONFIG_CAN_EMS_PCI=m
CONFIG_CAN_KVASER_PCI=m
CONFIG_CAN_PLX_PCI=m
+CONFIG_CAN_C_CAN=m
+CONFIG_CAN_C_CAN_PLATFORM=m
#
# CAN USB interfaces
#
# CONFIG_CAN_EMS_USB is not set
-CONFIG_CAN_ESD_USB2=m
+# CONFIG_CAN_ESD_USB2 is not set
+CONFIG_CAN_SOFTING=m
+CONFIG_CAN_SOFTING_CS=m
# CONFIG_CAN_DEBUG_DEVICES is not set
CONFIG_IRDA=m
@@ -1018,15 +1115,8 @@ CONFIG_VLSI_FIR=m
CONFIG_VIA_FIR=m
CONFIG_MCS_FIR=m
CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_CMTP=m
-CONFIG_BT_HIDP=m
+# CONFIG_BT_L2CAP is not set
+# CONFIG_BT_SCO is not set
#
# Bluetooth device drivers
@@ -1036,7 +1126,7 @@ CONFIG_BT_HCIBTSDIO=m
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_BCSP=y
-# CONFIG_BT_HCIUART_ATH3K is not set
+CONFIG_BT_HCIUART_ATH3K=y
CONFIG_BT_HCIUART_LL=y
CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
@@ -1048,6 +1138,7 @@ CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
# CONFIG_BT_MRVL is not set
CONFIG_BT_ATH3K=m
+CONFIG_BT_WILINK=m
CONFIG_AF_RXRPC=m
# CONFIG_AF_RXRPC_DEBUG is not set
CONFIG_RXKAD=m
@@ -1096,6 +1187,8 @@ CONFIG_NET_9P_RDMA=m
CONFIG_CAIF=m
# CONFIG_CAIF_DEBUG is not set
CONFIG_CAIF_NETDEV=m
+CONFIG_CEPH_LIB=m
+# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
#
# Device Drivers
@@ -1111,12 +1204,12 @@ CONFIG_STANDALONE=y
CONFIG_FW_LOADER=m
# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_EXTRA_FIRMWARE=""
-# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_SYS_HYPERVISOR=y
+CONFIG_ARCH_NO_SYSDEV_OPS=y
CONFIG_CONNECTOR=m
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
CONFIG_MTD_TESTS=m
-CONFIG_MTD_CONCAT=m
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_REDBOOT_PARTS=m
CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
@@ -1140,6 +1233,7 @@ CONFIG_RFD_FTL=m
CONFIG_SSFDC=m
CONFIG_SM_FTL=m
CONFIG_MTD_OOPS=m
+CONFIG_MTD_SWAP=m
#
# RAM/ROM/Flash chip drivers
@@ -1189,6 +1283,7 @@ CONFIG_MTD_PCMCIA=m
# CONFIG_MTD_GPIO_ADDR is not set
CONFIG_MTD_INTEL_VR_NOR=m
CONFIG_MTD_PLATRAM=m
+CONFIG_MTD_LATCH_ADDR=m
#
# Self-contained MTD device drivers
@@ -1225,6 +1320,7 @@ CONFIG_MTD_NAND_ECC=m
CONFIG_MTD_NAND_ECC_SMC=y
CONFIG_MTD_NAND=m
# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_BCH is not set
CONFIG_MTD_SM_COMMON=m
# CONFIG_MTD_NAND_MUSEUM_IDS is not set
CONFIG_MTD_NAND_DENALI=m
@@ -1251,18 +1347,10 @@ CONFIG_MTD_ONENAND_SIM=m
#
CONFIG_MTD_LPDDR=m
CONFIG_MTD_QINFO_PROBE=m
-
-#
-# UBI - Unsorted block images
-#
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_WL_THRESHOLD=4096
CONFIG_MTD_UBI_BEB_RESERVE=1
# CONFIG_MTD_UBI_GLUEBI is not set
-
-#
-# UBI debugging options
-#
# CONFIG_MTD_UBI_DEBUG is not set
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
@@ -1305,8 +1393,11 @@ CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_BUFFERS=8
# CONFIG_CDROM_PKTCDVD_WCACHE is not set
CONFIG_ATA_OVER_ETH=m
+CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_VIRTIO_BLK=m
# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+CONFIG_SENSORS_LIS3LV02D=m
CONFIG_MISC_DEVICES=y
CONFIG_AD525X_DPOT=m
CONFIG_AD525X_DPOT_I2C=m
@@ -1322,14 +1413,19 @@ CONFIG_CS5535_MFGPT=m
CONFIG_CS5535_MFGPT_DEFAULT_IRQ=7
CONFIG_CS5535_CLOCK_EVENT_SRC=m
CONFIG_HP_ILO=m
+CONFIG_APDS9802ALS=m
CONFIG_ISL29003=m
+CONFIG_ISL29020=m
CONFIG_SENSORS_TSL2550=m
CONFIG_SENSORS_BH1780=m
+CONFIG_SENSORS_BH1770=m
+CONFIG_SENSORS_APDS990X=m
CONFIG_HMC6352=m
CONFIG_DS1682=m
CONFIG_TI_DAC7512=m
CONFIG_VMWARE_BALLOON=m
CONFIG_BMP085=m
+CONFIG_PCH_PHUB=m
CONFIG_C2PORT=m
CONFIG_C2PORT_DURAMAR_2150=m
@@ -1347,6 +1443,12 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y
CONFIG_IWMC3200TOP=m
# CONFIG_IWMC3200TOP_DEBUG is not set
# CONFIG_IWMC3200TOP_DEBUGFS is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+CONFIG_TI_ST=m
+CONFIG_SENSORS_LIS3_I2C=m
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -1389,14 +1491,15 @@ CONFIG_SCSI_SAS_ATTRS=m
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_SCSI_SAS_ATA=y
CONFIG_SCSI_SAS_HOST_SMP=y
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
CONFIG_SCSI_SRP_ATTRS=m
CONFIG_SCSI_SRP_TGT_ATTRS=y
CONFIG_SCSI_LOWLEVEL=y
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_SCSI_CXGB3_ISCSI=m
+CONFIG_SCSI_CXGB4_ISCSI=m
CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_SCSI_BNX2X_FCOE=m
# CONFIG_BE2ISCSI is not set
CONFIG_BLK_DEV_3W_XXXX_RAID=m
CONFIG_SCSI_HPSA=m
@@ -1477,6 +1580,7 @@ CONFIG_SCSI_PM8001=m
CONFIG_SCSI_SRP=m
# CONFIG_SCSI_BFA_FC is not set
CONFIG_SCSI_LOWLEVEL_PCMCIA=y
+CONFIG_PCMCIA_AHA152X=m
CONFIG_PCMCIA_FDOMAIN=m
CONFIG_PCMCIA_QLOGIC=m
CONFIG_PCMCIA_SYM53C500=m
@@ -1517,6 +1621,8 @@ CONFIG_SCST_TRACING=y
# CONFIG_SCST_MEASURE_LATENCY is not set
CONFIG_SCST_ISCSI=m
# CONFIG_SCST_ISCSI_DEBUG_DIGEST_FAILURES is not set
+CONFIG_SCST_LOCAL=m
+# CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING is not set
CONFIG_SCST_SRPT=m
CONFIG_ATA=m
# CONFIG_ATA_NONSTANDARD is not set
@@ -1530,6 +1636,7 @@ CONFIG_SATA_PMP=y
CONFIG_SATA_AHCI=m
CONFIG_SATA_AHCI_PLATFORM=m
CONFIG_SATA_INIC162X=m
+CONFIG_SATA_ACARD_AHCI=m
CONFIG_SATA_SIL24=m
CONFIG_ATA_SFF=y
@@ -1560,12 +1667,14 @@ CONFIG_SATA_VITESSE=m
#
CONFIG_PATA_ALI=m
CONFIG_PATA_AMD=m
+CONFIG_PATA_ARASAN_CF=m
CONFIG_PATA_ARTOP=m
CONFIG_PATA_ATIIXP=m
CONFIG_PATA_ATP867X=m
CONFIG_PATA_CMD64X=m
CONFIG_PATA_CS5520=m
CONFIG_PATA_CS5530=m
+CONFIG_PATA_CS5536=m
CONFIG_PATA_CYPRESS=m
CONFIG_PATA_EFAR=m
CONFIG_PATA_HPT366=m
@@ -1629,6 +1738,7 @@ CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
@@ -1636,6 +1746,13 @@ CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_DELAY=m
# CONFIG_DM_UEVENT is not set
+CONFIG_DM_FLAKEY=m
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_LOOPBACK_TARGET=m
+# CONFIG_LOOPBACK_TARGET_CDB_DEBUG is not set
CONFIG_FUSION=y
CONFIG_FUSION_SPI=m
CONFIG_FUSION_FC=m
@@ -1647,30 +1764,11 @@ CONFIG_FUSION_CTL=m
#
# IEEE 1394 (FireWire) support
#
-
-#
-# You can enable one or both FireWire driver stacks.
-#
-
-#
-# The newer stack is recommended.
-#
CONFIG_FIREWIRE=m
CONFIG_FIREWIRE_OHCI=m
CONFIG_FIREWIRE_OHCI_DEBUG=y
CONFIG_FIREWIRE_SBP2=m
CONFIG_FIREWIRE_NET=m
-CONFIG_IEEE1394=m
-CONFIG_IEEE1394_OHCI1394=m
-CONFIG_IEEE1394_PCILYNX=m
-CONFIG_IEEE1394_SBP2=m
-# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-CONFIG_IEEE1394_ETH1394_ROM_ENTRY=y
-CONFIG_IEEE1394_ETH1394=m
-CONFIG_IEEE1394_RAWIO=m
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_DV1394=m
-# CONFIG_IEEE1394_VERBOSEDEBUG is not set
CONFIG_FIREWIRE_NOSY=m
CONFIG_I2O=m
CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
@@ -1703,6 +1801,7 @@ CONFIG_ARCNET_COM90xxIO=m
CONFIG_ARCNET_RIM_I=m
CONFIG_ARCNET_COM20020=m
CONFIG_ARCNET_COM20020_PCI=m
+CONFIG_MII=m
CONFIG_PHYLIB=m
#
@@ -1716,6 +1815,7 @@ CONFIG_CICADA_PHY=m
CONFIG_VITESSE_PHY=m
CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
+CONFIG_BCM63XX_PHY=m
CONFIG_ICPLUS_PHY=m
CONFIG_REALTEK_PHY=m
CONFIG_NATIONAL_PHY=m
@@ -1725,7 +1825,6 @@ CONFIG_MICREL_PHY=m
CONFIG_MDIO_BITBANG=m
CONFIG_MDIO_GPIO=m
CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
CONFIG_HAPPYMEAL=m
CONFIG_SUNGEM=m
CONFIG_CASSINI=m
@@ -1808,7 +1907,6 @@ CONFIG_NS83820=m
CONFIG_HAMACHI=m
CONFIG_YELLOWFIN=m
CONFIG_R8169=m
-CONFIG_R8169_VLAN=y
CONFIG_SIS190=m
CONFIG_SKGE=m
# CONFIG_SKGE_DEBUG is not set
@@ -1823,15 +1921,16 @@ CONFIG_ATL1=m
CONFIG_ATL1E=m
CONFIG_ATL1C=m
CONFIG_JME=m
+CONFIG_STMMAC_ETH=m
+# CONFIG_STMMAC_DA is not set
+# CONFIG_STMMAC_DUAL_MAC is not set
+CONFIG_PCH_GBE=m
CONFIG_NETDEV_10000=y
CONFIG_MDIO=m
CONFIG_CHELSIO_T1=m
CONFIG_CHELSIO_T1_1G=y
-CONFIG_CHELSIO_T3_DEPENDS=y
CONFIG_CHELSIO_T3=m
-CONFIG_CHELSIO_T4_DEPENDS=y
CONFIG_CHELSIO_T4=m
-CONFIG_CHELSIO_T4VF_DEPENDS=y
CONFIG_CHELSIO_T4VF=m
CONFIG_ENIC=m
CONFIG_IXGBE=m
@@ -1852,6 +1951,7 @@ CONFIG_TEHUTI=m
CONFIG_BNX2X=m
CONFIG_QLCNIC=m
CONFIG_QLGE=m
+CONFIG_BNA=m
CONFIG_SFC=m
CONFIG_SFC_MTD=y
CONFIG_BE2NET=m
@@ -1881,20 +1981,26 @@ CONFIG_ATH_COMMON=m
# CONFIG_ATH_DEBUG is not set
CONFIG_ATH5K=m
# CONFIG_ATH5K_DEBUG is not set
+CONFIG_ATH5K_PCI=y
CONFIG_ATH9K_HW=m
CONFIG_ATH9K_COMMON=m
CONFIG_ATH9K=m
# CONFIG_ATH9K_DEBUGFS is not set
+CONFIG_ATH9K_RATE_CONTROL=y
CONFIG_ATH9K_HTC=m
# CONFIG_ATH9K_HTC_DEBUGFS is not set
CONFIG_AR9170_USB=m
CONFIG_AR9170_LEDS=y
+CONFIG_CARL9170=m
+CONFIG_CARL9170_LEDS=y
+CONFIG_CARL9170_WPC=y
CONFIG_B43=m
CONFIG_B43_PCI_AUTOSELECT=y
CONFIG_B43_PCICORE_AUTOSELECT=y
CONFIG_B43_PCMCIA=y
CONFIG_B43_SDIO=y
CONFIG_B43_PIO=y
+CONFIG_B43_PHY_N=y
CONFIG_B43_PHY_LP=y
CONFIG_B43_LEDS=y
CONFIG_B43_HWRNG=y
@@ -1927,11 +2033,20 @@ CONFIG_IPW2200_QOS=y
# CONFIG_IPW2200_DEBUG is not set
CONFIG_LIBIPW=m
# CONFIG_LIBIPW_DEBUG is not set
-CONFIG_IWLWIFI=m
-# CONFIG_IWLWIFI_DEBUG is not set
CONFIG_IWLAGN=m
-CONFIG_IWL4965=y
-CONFIG_IWL5000=y
+
+#
+# Debugging Options
+#
+# CONFIG_IWLWIFI_DEBUG is not set
+# CONFIG_IWL_P2P is not set
+CONFIG_IWLWIFI_LEGACY=m
+
+#
+# Debugging Options
+#
+# CONFIG_IWLWIFI_LEGACY_DEBUG is not set
+CONFIG_IWL4965=m
CONFIG_IWL3945=m
CONFIG_IWM=m
# CONFIG_IWM_DEBUG is not set
@@ -1955,19 +2070,20 @@ CONFIG_P54_COMMON=m
CONFIG_P54_USB=m
CONFIG_P54_PCI=m
CONFIG_P54_SPI=m
+# CONFIG_P54_SPI_DEFAULT_EEPROM is not set
CONFIG_P54_LEDS=y
CONFIG_RT2X00=m
CONFIG_RT2400PCI=m
CONFIG_RT2500PCI=m
CONFIG_RT61PCI=m
-CONFIG_RT2800PCI_PCI=y
CONFIG_RT2800PCI=m
-CONFIG_RT2800PCI_RT30XX=y
+CONFIG_RT2800PCI_RT33XX=y
# CONFIG_RT2800PCI_RT35XX is not set
+# CONFIG_RT2800PCI_RT53XX is not set
CONFIG_RT2500USB=m
CONFIG_RT73USB=m
CONFIG_RT2800USB=m
-CONFIG_RT2800USB_RT30XX=y
+CONFIG_RT2800USB_RT33XX=y
# CONFIG_RT2800USB_RT35XX is not set
CONFIG_RT2800USB_UNKNOWN=y
CONFIG_RT2800_LIB=m
@@ -1979,13 +2095,20 @@ CONFIG_RT2X00_LIB_FIRMWARE=y
CONFIG_RT2X00_LIB_CRYPTO=y
CONFIG_RT2X00_LIB_LEDS=y
# CONFIG_RT2X00_DEBUG is not set
-CONFIG_WL12XX=m
+CONFIG_RTL8192CE=m
+CONFIG_RTL8192CU=m
+CONFIG_RTLWIFI=m
+CONFIG_RTL8192C_COMMON=m
CONFIG_WL1251=m
CONFIG_WL1251_SPI=m
CONFIG_WL1251_SDIO=m
-CONFIG_WL1271=m
-CONFIG_WL1271_SPI=m
-CONFIG_WL1271_SDIO=m
+CONFIG_WL12XX_MENU=m
+CONFIG_WL12XX=m
+CONFIG_WL12XX_HT=y
+CONFIG_WL12XX_SPI=m
+CONFIG_WL12XX_SDIO=m
+CONFIG_WL12XX_SDIO_TEST=m
+CONFIG_WL12XX_PLATFORM_DATA=y
CONFIG_ZD1211RW=m
# CONFIG_ZD1211RW_DEBUG is not set
@@ -2009,6 +2132,7 @@ CONFIG_USB_USBNET=m
CONFIG_USB_NET_AX8817X=m
CONFIG_USB_NET_CDCETHER=m
CONFIG_USB_NET_CDC_EEM=m
+CONFIG_USB_NET_CDC_NCM=m
CONFIG_USB_NET_DM9601=m
CONFIG_USB_NET_SMSC75XX=m
CONFIG_USB_NET_SMSC95XX=m
@@ -2025,11 +2149,13 @@ CONFIG_USB_ARMLINUX=y
CONFIG_USB_EPSON2888=y
CONFIG_USB_KC2190=y
CONFIG_USB_NET_ZAURUS=m
+CONFIG_USB_NET_CX82310_ETH=m
CONFIG_USB_HSO=m
CONFIG_USB_NET_INT51X1=m
CONFIG_USB_CDC_PHONET=m
CONFIG_USB_IPHETH=m
CONFIG_USB_SIERRA_NET=m
+CONFIG_USB_VL600=m
CONFIG_NET_PCMCIA=y
CONFIG_PCMCIA_3C589=m
CONFIG_PCMCIA_3C574=m
@@ -2105,6 +2231,8 @@ CONFIG_IEEE802154_FAKEHARD=m
CONFIG_CAIF_TTY=m
CONFIG_CAIF_SPI_SLAVE=m
# CONFIG_CAIF_SPI_SYNC is not set
+CONFIG_XEN_NETDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_BACKEND=m
CONFIG_FDDI=y
CONFIG_DEFXX=m
# CONFIG_DEFXX_MMIO is not set
@@ -2122,6 +2250,7 @@ CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPTP=m
CONFIG_PPPOATM=m
CONFIG_PPPOL2TP=m
CONFIG_SLIP=m
@@ -2216,9 +2345,11 @@ CONFIG_INPUT_EVBUG=m
CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_ADP5588 is not set
CONFIG_KEYBOARD_ATKBD=y
+CONFIG_KEYBOARD_QT1070=m
# CONFIG_KEYBOARD_QT2160 is not set
CONFIG_KEYBOARD_LKKBD=m
CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_GPIO_POLLED=m
CONFIG_KEYBOARD_TCA6416=m
CONFIG_KEYBOARD_MATRIX=m
CONFIG_KEYBOARD_LM8323=m
@@ -2253,6 +2384,8 @@ CONFIG_TOUCHSCREEN_AD7877=m
CONFIG_TOUCHSCREEN_AD7879=m
CONFIG_TOUCHSCREEN_AD7879_I2C=m
CONFIG_TOUCHSCREEN_AD7879_SPI=m
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_TOUCHSCREEN_BU21013=m
CONFIG_TOUCHSCREEN_CY8CTMG110=m
CONFIG_TOUCHSCREEN_DYNAPRO=m
CONFIG_TOUCHSCREEN_HAMPSHIRE=m
@@ -2266,7 +2399,6 @@ CONFIG_TOUCHSCREEN_MTOUCH=m
CONFIG_TOUCHSCREEN_INEXIO=m
CONFIG_TOUCHSCREEN_MK712=m
CONFIG_TOUCHSCREEN_PENMOUNT=m
-CONFIG_TOUCHSCREEN_QT602240=m
CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
CONFIG_TOUCHSCREEN_TOUCHWIN=m
CONFIG_TOUCHSCREEN_UCB1400=m
@@ -2275,6 +2407,7 @@ CONFIG_TOUCHSCREEN_WM9705=y
CONFIG_TOUCHSCREEN_WM9712=y
CONFIG_TOUCHSCREEN_WM9713=y
CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
+# CONFIG_TOUCHSCREEN_MC13783 is not set
CONFIG_TOUCHSCREEN_USB_EGALAX=y
CONFIG_TOUCHSCREEN_USB_PANJIT=y
CONFIG_TOUCHSCREEN_USB_3M=y
@@ -2292,7 +2425,9 @@ CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
CONFIG_TOUCHSCREEN_USB_NEXIO=y
CONFIG_TOUCHSCREEN_TOUCHIT213=m
+CONFIG_TOUCHSCREEN_TSC2005=m
CONFIG_TOUCHSCREEN_TSC2007=m
+CONFIG_TOUCHSCREEN_ST1232=m
CONFIG_TOUCHSCREEN_TPS6507X=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_AD714X=m
@@ -2308,13 +2443,15 @@ CONFIG_INPUT_POWERMATE=m
CONFIG_INPUT_YEALINK=m
CONFIG_INPUT_CM109=m
CONFIG_INPUT_UINPUT=m
-CONFIG_INPUT_WINBOND_CIR=m
CONFIG_INPUT_PCF50633_PMU=m
CONFIG_INPUT_PCF8574=m
CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
CONFIG_INPUT_ADXL34X=m
CONFIG_INPUT_ADXL34X_I2C=m
CONFIG_INPUT_ADXL34X_SPI=m
+CONFIG_INPUT_CMA3000=m
+CONFIG_INPUT_CMA3000_I2C=m
+CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
#
# Hardware I/O ports
@@ -2328,6 +2465,7 @@ CONFIG_SERIO_PCIPS2=m
CONFIG_SERIO_LIBPS2=y
CONFIG_SERIO_RAW=m
CONFIG_SERIO_ALTERA_PS2=m
+CONFIG_SERIO_PS2MULT=m
# CONFIG_GAMEPORT is not set
#
@@ -2338,27 +2476,24 @@ CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
# CONFIG_VT_HW_CONSOLE_BINDING is not set
-# CONFIG_DEVKMEM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_COMPUTONE=m
CONFIG_ROCKETPORT=m
CONFIG_CYCLADES=m
# CONFIG_CYZ_INTR is not set
-CONFIG_DIGIEPCA=m
CONFIG_MOXA_INTELLIO=m
CONFIG_MOXA_SMARTIO=m
-CONFIG_ISI=m
CONFIG_SYNCLINK=m
CONFIG_SYNCLINKMP=m
CONFIG_SYNCLINK_GT=m
+CONFIG_NOZOMI=m
+CONFIG_ISI=m
CONFIG_N_HDLC=m
# CONFIG_N_GSM is not set
-CONFIG_RISCOM8=m
-CONFIG_SPECIALIX=m
+# CONFIG_DEVKMEM is not set
CONFIG_STALDRV=y
-CONFIG_STALLION=m
-CONFIG_ISTALLION=m
-CONFIG_NOZOMI=m
#
# Serial drivers
@@ -2393,13 +2528,15 @@ CONFIG_SERIAL_ALTERA_JTAGUART=m
CONFIG_SERIAL_ALTERA_UART=m
CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
-CONFIG_UNIX98_PTYS=y
-# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
-# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_IFX6X60=m
+CONFIG_SERIAL_PCH_UART=m
+# CONFIG_TTY_PRINTK is not set
CONFIG_PRINTER=m
# CONFIG_LP_CONSOLE is not set
CONFIG_PPDEV=m
CONFIG_HVC_DRIVER=y
+CONFIG_HVC_IRQ=y
+CONFIG_HVC_XEN=y
CONFIG_VIRTIO_CONSOLE=m
CONFIG_IPMI_HANDLER=m
# CONFIG_IPMI_PANIC_EVENT is not set
@@ -2447,6 +2584,8 @@ CONFIG_I2C_MUX=m
#
# Multiplexer I2C Chip support
#
+CONFIG_I2C_MUX_GPIO=m
+CONFIG_I2C_MUX_PCA9541=m
CONFIG_I2C_MUX_PCA954x=m
CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_SMBUS=m
@@ -2486,14 +2625,18 @@ CONFIG_I2C_SCMI=m
# I2C system bus drivers (mostly embedded / system-on-chip)
#
CONFIG_I2C_GPIO=m
+CONFIG_I2C_INTEL_MID=m
CONFIG_I2C_OCORES=m
CONFIG_I2C_PCA_PLATFORM=m
+# CONFIG_I2C_PXA_PCI is not set
CONFIG_I2C_SIMTEC=m
CONFIG_I2C_XILINX=m
+CONFIG_I2C_EG20T=m
#
# External I2C/SMBus adapter drivers
#
+CONFIG_I2C_DIOLAN_U2C=m
CONFIG_I2C_PARPORT=m
CONFIG_I2C_PARPORT_LIGHT=m
CONFIG_I2C_TAOS_EVM=m
@@ -2512,10 +2655,14 @@ CONFIG_SPI_MASTER=y
#
# SPI Master Controller Drivers
#
+CONFIG_SPI_ALTERA=m
CONFIG_SPI_BITBANG=m
CONFIG_SPI_BUTTERFLY=m
CONFIG_SPI_GPIO=m
CONFIG_SPI_LM70_LLP=m
+CONFIG_SPI_OC_TINY=m
+# CONFIG_SPI_PXA2XX_PCI is not set
+CONFIG_SPI_TOPCLIFF_PCH=m
# CONFIG_SPI_XILINX is not set
CONFIG_SPI_DESIGNWARE=m
CONFIG_SPI_DW_PCI=m
@@ -2530,6 +2677,10 @@ CONFIG_SPI_TLE62X0=m
# PPS support
#
# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_GPIO_SYSFS is not set
@@ -2538,8 +2689,10 @@ CONFIG_GPIO_MAX730X=m
#
# Memory mapped GPIO expanders:
#
+CONFIG_GPIO_BASIC_MMIO=m
CONFIG_GPIO_IT8761E=m
CONFIG_GPIO_SCH=m
+CONFIG_GPIO_VX855=m
#
# I2C GPIO expanders:
@@ -2555,6 +2708,8 @@ CONFIG_GPIO_ADP5588=m
#
CONFIG_GPIO_CS5535=m
# CONFIG_GPIO_LANGWELL is not set
+CONFIG_GPIO_PCH=m
+CONFIG_GPIO_ML_IOH=m
CONFIG_GPIO_TIMBERDALE=y
CONFIG_GPIO_RDC321X=m
@@ -2564,6 +2719,7 @@ CONFIG_GPIO_RDC321X=m
CONFIG_GPIO_MAX7301=m
CONFIG_GPIO_MCP23S08=m
# CONFIG_GPIO_MC33880 is not set
+CONFIG_GPIO_74X164=m
#
# AC97 GPIO expanders:
@@ -2590,6 +2746,7 @@ CONFIG_W1_MASTER_GPIO=m
#
CONFIG_W1_SLAVE_THERM=m
CONFIG_W1_SLAVE_SMEM=m
+CONFIG_W1_SLAVE_DS2423=m
CONFIG_W1_SLAVE_DS2431=m
CONFIG_W1_SLAVE_DS2433=m
# CONFIG_W1_SLAVE_DS2433_CRC is not set
@@ -2601,9 +2758,15 @@ CONFIG_PDA_POWER=m
CONFIG_TEST_POWER=m
CONFIG_BATTERY_DS2760=m
CONFIG_BATTERY_DS2782=m
+CONFIG_BATTERY_BQ20Z75=m
CONFIG_BATTERY_BQ27x00=m
+CONFIG_BATTERY_BQ27X00_I2C=y
+CONFIG_BATTERY_BQ27X00_PLATFORM=y
CONFIG_BATTERY_MAX17040=m
+CONFIG_BATTERY_MAX17042=m
CONFIG_CHARGER_PCF50633=m
+CONFIG_CHARGER_ISP1704=m
+CONFIG_CHARGER_GPIO=m
CONFIG_HWMON=m
CONFIG_HWMON_VID=m
# CONFIG_HWMON_DEBUG_CHIP is not set
@@ -2631,6 +2794,7 @@ CONFIG_SENSORS_K8TEMP=m
CONFIG_SENSORS_K10TEMP=m
CONFIG_SENSORS_ASB100=m
CONFIG_SENSORS_ATXP1=m
+CONFIG_SENSORS_DS620=m
CONFIG_SENSORS_DS1621=m
CONFIG_SENSORS_I5K_AMB=m
CONFIG_SENSORS_F71805F=m
@@ -2640,12 +2804,14 @@ CONFIG_SENSORS_FSCHMD=m
CONFIG_SENSORS_G760A=m
CONFIG_SENSORS_GL518SM=m
CONFIG_SENSORS_GL520SM=m
+CONFIG_SENSORS_GPIO_FAN=m
CONFIG_SENSORS_CORETEMP=m
CONFIG_SENSORS_PKGTEMP=m
CONFIG_SENSORS_IBMAEM=m
CONFIG_SENSORS_IBMPEX=m
CONFIG_SENSORS_IT87=m
CONFIG_SENSORS_JC42=m
+CONFIG_SENSORS_LINEAGE=m
CONFIG_SENSORS_LM63=m
CONFIG_SENSORS_LM70=m
CONFIG_SENSORS_LM73=m
@@ -2659,16 +2825,25 @@ CONFIG_SENSORS_LM87=m
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_LM92=m
CONFIG_SENSORS_LM93=m
+CONFIG_SENSORS_LTC4151=m
CONFIG_SENSORS_LTC4215=m
CONFIG_SENSORS_LTC4245=m
+CONFIG_SENSORS_LTC4261=m
CONFIG_SENSORS_LM95241=m
CONFIG_SENSORS_MAX1111=m
CONFIG_SENSORS_MAX1619=m
+CONFIG_SENSORS_MAX6639=m
CONFIG_SENSORS_MAX6650=m
CONFIG_SENSORS_PC87360=m
CONFIG_SENSORS_PC87427=m
CONFIG_SENSORS_PCF8591=m
+CONFIG_PMBUS=m
+CONFIG_SENSORS_PMBUS=m
+CONFIG_SENSORS_MAX16064=m
+CONFIG_SENSORS_MAX34440=m
+CONFIG_SENSORS_MAX8688=m
CONFIG_SENSORS_SHT15=m
+CONFIG_SENSORS_SHT21=m
CONFIG_SENSORS_SIS5595=m
CONFIG_SENSORS_SMM665=m
CONFIG_SENSORS_DME1737=m
@@ -2677,6 +2852,8 @@ CONFIG_SENSORS_EMC2103=m
CONFIG_SENSORS_SMSC47M1=m
CONFIG_SENSORS_SMSC47M192=m
CONFIG_SENSORS_SMSC47B397=m
+CONFIG_SENSORS_SCH5627=m
+CONFIG_SENSORS_ADS1015=m
CONFIG_SENSORS_ADS7828=m
CONFIG_SENSORS_ADS7871=m
CONFIG_SENSORS_AMC6821=m
@@ -2692,19 +2869,19 @@ CONFIG_SENSORS_W83781D=m
CONFIG_SENSORS_W83791D=m
CONFIG_SENSORS_W83792D=m
CONFIG_SENSORS_W83793=m
+CONFIG_SENSORS_W83795=m
+CONFIG_SENSORS_W83795_FANCTRL=y
CONFIG_SENSORS_W83L785TS=m
CONFIG_SENSORS_W83L786NG=m
CONFIG_SENSORS_W83627HF=m
CONFIG_SENSORS_W83627EHF=m
-CONFIG_SENSORS_HDAPS=m
-CONFIG_SENSORS_LIS3_I2C=m
CONFIG_SENSORS_APPLESMC=m
+# CONFIG_SENSORS_MC13783_ADC is not set
#
# ACPI drivers
#
CONFIG_SENSORS_ATK0110=m
-CONFIG_SENSORS_LIS3LV02D=m
CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -2718,6 +2895,7 @@ CONFIG_ADVANTECH_WDT=m
CONFIG_ALIM1535_WDT=m
CONFIG_ALIM7101_WDT=m
CONFIG_F71808E_WDT=m
+CONFIG_SP5100_TCO=m
CONFIG_GEODE_WDT=m
CONFIG_SC520_WDT=m
# CONFIG_SBC_FITPC2_WATCHDOG is not set
@@ -2733,6 +2911,7 @@ CONFIG_IT87_WDT=m
# CONFIG_HP_WATCHDOG is not set
CONFIG_SC1200_WDT=m
CONFIG_PC87413_WDT=m
+CONFIG_NV_TCO=m
CONFIG_60XX_WDT=m
CONFIG_SBC8360_WDT=m
CONFIG_CPU5_WDT=m
@@ -2745,6 +2924,7 @@ CONFIG_W83877F_WDT=m
CONFIG_W83977F_WDT=m
CONFIG_MACHZ_WDT=m
CONFIG_SBC_EPX_C3_WATCHDOG=m
+CONFIG_XEN_WDT=m
#
# PCI-based Watchdog Cards
@@ -2776,55 +2956,64 @@ CONFIG_SSB_SDIOHOST=y
CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
CONFIG_SSB_DRIVER_PCICORE=y
CONFIG_MFD_SUPPORT=y
-CONFIG_MFD_CORE=y
+CONFIG_MFD_CORE=m
CONFIG_MFD_SM501=m
# CONFIG_MFD_SM501_GPIO is not set
CONFIG_HTC_PASIC3=m
CONFIG_UCB1400_CORE=m
+CONFIG_TPS6105X=m
CONFIG_TPS65010=m
CONFIG_TPS6507X=m
# CONFIG_MFD_TMIO is not set
CONFIG_MFD_WM8400=m
+# CONFIG_MFD_WM831X_SPI is not set
CONFIG_MFD_PCF50633=m
-# CONFIG_MFD_MC13783 is not set
+CONFIG_MFD_MC13783=m
+CONFIG_MFD_MC13XXX=m
CONFIG_PCF50633_ADC=m
CONFIG_PCF50633_GPIO=m
CONFIG_ABX500_CORE=y
# CONFIG_EZX_PCAP is not set
-CONFIG_AB8500_CORE=y
+# CONFIG_AB8500_CORE is not set
+CONFIG_MFD_CS5535=m
CONFIG_MFD_TIMBERDALE=m
CONFIG_LPC_SCH=m
CONFIG_MFD_RDC321X=m
CONFIG_MFD_JANZ_CMODIO=m
-CONFIG_MFD_TPS6586X=m
+CONFIG_MFD_VX855=m
+CONFIG_MFD_WL1273_CORE=m
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_DEBUG is not set
# CONFIG_REGULATOR_DUMMY is not set
-# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=m
CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
CONFIG_REGULATOR_USERSPACE_CONSUMER=m
CONFIG_REGULATOR_BQ24022=m
CONFIG_REGULATOR_MAX1586=m
CONFIG_REGULATOR_MAX8649=m
CONFIG_REGULATOR_MAX8660=m
+CONFIG_REGULATOR_MAX8952=m
CONFIG_REGULATOR_WM8400=m
CONFIG_REGULATOR_PCF50633=m
CONFIG_REGULATOR_LP3971=m
+CONFIG_REGULATOR_LP3972=m
+CONFIG_REGULATOR_MC13XXX_CORE=m
+CONFIG_REGULATOR_MC13783=m
+CONFIG_REGULATOR_MC13892=m
+CONFIG_REGULATOR_TPS6105X=m
# CONFIG_REGULATOR_TPS65023 is not set
# CONFIG_REGULATOR_TPS6507X is not set
CONFIG_REGULATOR_ISL6271A=m
-CONFIG_REGULATOR_AD5398=m
-# CONFIG_REGULATOR_AB8500 is not set
-CONFIG_REGULATOR_TPS6586X=m
+# CONFIG_REGULATOR_AD5398 is not set
+CONFIG_REGULATOR_TPS6524X=m
CONFIG_MEDIA_SUPPORT=m
#
# Multimedia core support
#
+# CONFIG_MEDIA_CONTROLLER is not set
CONFIG_VIDEO_DEV=m
CONFIG_VIDEO_V4L2_COMMON=m
-# CONFIG_VIDEO_ALLOW_V4L1 is not set
-CONFIG_VIDEO_V4L1_COMPAT=y
CONFIG_DVB_CORE=m
CONFIG_VIDEO_MEDIA=m
@@ -2833,8 +3022,7 @@ CONFIG_VIDEO_MEDIA=m
#
CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
-CONFIG_IR_CORE=m
-CONFIG_VIDEO_IR=m
+CONFIG_RC_CORE=m
CONFIG_LIRC=m
CONFIG_RC_MAP=m
CONFIG_IR_NEC_DECODER=m
@@ -2842,11 +3030,16 @@ CONFIG_IR_RC5_DECODER=m
CONFIG_IR_RC6_DECODER=m
CONFIG_IR_JVC_DECODER=m
CONFIG_IR_SONY_DECODER=m
+CONFIG_IR_RC5_SZ_DECODER=m
CONFIG_IR_LIRC_CODEC=m
+# CONFIG_IR_ENE is not set
CONFIG_IR_IMON=m
-CONFIG_IR_MCEUSB=m
-CONFIG_IR_ENE=m
-CONFIG_IR_STREAMZAP=m
+# CONFIG_IR_MCEUSB is not set
+CONFIG_IR_ITE_CIR=m
+# CONFIG_IR_NUVOTON is not set
+# CONFIG_IR_STREAMZAP is not set
+CONFIG_IR_WINBOND_CIR=m
+CONFIG_RC_LOOPBACK=m
# CONFIG_MEDIA_ATTACH is not set
CONFIG_MEDIA_TUNER=m
# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
@@ -2868,6 +3061,7 @@ CONFIG_MEDIA_TUNER_MXL5005S=m
CONFIG_MEDIA_TUNER_MXL5007T=m
CONFIG_MEDIA_TUNER_MC44S803=m
CONFIG_MEDIA_TUNER_MAX2165=m
+CONFIG_MEDIA_TUNER_TDA18218=m
CONFIG_VIDEO_V4L2=m
CONFIG_VIDEOBUF_GEN=m
CONFIG_VIDEOBUF_DMA_SG=m
@@ -2878,6 +3072,9 @@ CONFIG_VIDEO_BTCX=m
CONFIG_VIDEO_TVEEPROM=m
CONFIG_VIDEO_TUNER=m
CONFIG_V4L2_MEM2MEM_DEV=m
+CONFIG_VIDEOBUF2_CORE=m
+CONFIG_VIDEOBUF2_MEMOPS=m
+CONFIG_VIDEOBUF2_VMALLOC=m
CONFIG_VIDEO_CAPTURE_DRIVERS=y
# CONFIG_VIDEO_ADV_DEBUG is not set
# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
@@ -2894,7 +3091,6 @@ CONFIG_VIDEO_IR_I2C=m
CONFIG_VIDEO_TVAUDIO=m
CONFIG_VIDEO_TDA7432=m
CONFIG_VIDEO_TDA9840=m
-CONFIG_VIDEO_TDA9875=m
CONFIG_VIDEO_TEA6415C=m
CONFIG_VIDEO_TEA6420=m
CONFIG_VIDEO_MSP3400=m
@@ -2914,7 +3110,7 @@ CONFIG_VIDEO_SAA6588=m
#
# Video decoders
#
-# CONFIG_VIDEO_ADV7180 is not set
+CONFIG_VIDEO_ADV7180=m
CONFIG_VIDEO_BT819=m
CONFIG_VIDEO_BT856=m
CONFIG_VIDEO_BT866=m
@@ -2957,12 +3153,12 @@ CONFIG_VIDEO_AK881X=m
#
CONFIG_VIDEO_UPD64031A=m
CONFIG_VIDEO_UPD64083=m
+# CONFIG_VIDEO_VIVI is not set
CONFIG_VIDEO_BT848=m
CONFIG_VIDEO_BT848_DVB=y
CONFIG_VIDEO_BWQCAM=m
CONFIG_VIDEO_CQCAM=m
-CONFIG_VIDEO_SAA5246A=m
-CONFIG_VIDEO_SAA5249=m
+# CONFIG_VIDEO_CPIA2 is not set
CONFIG_VIDEO_ZORAN=m
CONFIG_VIDEO_ZORAN_DC30=m
CONFIG_VIDEO_ZORAN_ZR36060=m
@@ -2974,10 +3170,12 @@ CONFIG_VIDEO_ZORAN_AVS6EYES=m
CONFIG_VIDEO_MEYE=m
CONFIG_VIDEO_SAA7134=m
CONFIG_VIDEO_SAA7134_ALSA=m
+CONFIG_VIDEO_SAA7134_RC=y
CONFIG_VIDEO_SAA7134_DVB=m
CONFIG_VIDEO_MXB=m
CONFIG_VIDEO_HEXIUM_ORION=m
CONFIG_VIDEO_HEXIUM_GEMINI=m
+CONFIG_VIDEO_TIMBERDALE=m
CONFIG_VIDEO_CX88=m
CONFIG_VIDEO_CX88_ALSA=m
CONFIG_VIDEO_CX88_BLACKBIRD=m
@@ -2985,6 +3183,7 @@ CONFIG_VIDEO_CX88_DVB=m
CONFIG_VIDEO_CX88_MPEG=m
CONFIG_VIDEO_CX88_VP3054=m
CONFIG_VIDEO_CX23885=m
+CONFIG_MEDIA_ALTERA_CI=m
CONFIG_VIDEO_AU0828=m
CONFIG_VIDEO_IVTV=m
CONFIG_VIDEO_FB_IVTV=m
@@ -2992,7 +3191,11 @@ CONFIG_VIDEO_CX18=m
CONFIG_VIDEO_CX18_ALSA=m
CONFIG_VIDEO_SAA7164=m
CONFIG_VIDEO_CAFE_CCIC=m
+# CONFIG_VIDEO_SR030PC30 is not set
+# CONFIG_VIDEO_VIA_CAMERA is not set
+CONFIG_VIDEO_NOON010PC30=m
CONFIG_SOC_CAMERA=m
+# CONFIG_SOC_CAMERA_IMX074 is not set
CONFIG_SOC_CAMERA_MT9M001=m
CONFIG_SOC_CAMERA_MT9M111=m
CONFIG_SOC_CAMERA_MT9T031=m
@@ -3001,8 +3204,11 @@ CONFIG_SOC_CAMERA_MT9V022=m
CONFIG_SOC_CAMERA_RJ54N1=m
CONFIG_SOC_CAMERA_TW9910=m
CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_SOC_CAMERA_OV2640=m
+# CONFIG_SOC_CAMERA_OV6650 is not set
CONFIG_SOC_CAMERA_OV772X=m
CONFIG_SOC_CAMERA_OV9640=m
+CONFIG_SOC_CAMERA_OV9740=m
CONFIG_V4L_USB_DRIVERS=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
@@ -3016,8 +3222,10 @@ CONFIG_USB_GSPCA_CPIA1=m
CONFIG_USB_GSPCA_ETOMS=m
CONFIG_USB_GSPCA_FINEPIX=m
CONFIG_USB_GSPCA_JEILINJ=m
+# CONFIG_USB_GSPCA_KONICA is not set
CONFIG_USB_GSPCA_MARS=m
CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
CONFIG_USB_GSPCA_OV519=m
CONFIG_USB_GSPCA_OV534=m
CONFIG_USB_GSPCA_OV534_9=m
@@ -3034,16 +3242,18 @@ CONFIG_USB_GSPCA_SPCA505=m
CONFIG_USB_GSPCA_SPCA506=m
CONFIG_USB_GSPCA_SPCA508=m
CONFIG_USB_GSPCA_SPCA561=m
-CONFIG_USB_GSPCA_SPCA1528=m
+# CONFIG_USB_GSPCA_SPCA1528 is not set
CONFIG_USB_GSPCA_SQ905=m
CONFIG_USB_GSPCA_SQ905C=m
-CONFIG_USB_GSPCA_SQ930X=m
+# CONFIG_USB_GSPCA_SQ930X is not set
CONFIG_USB_GSPCA_STK014=m
CONFIG_USB_GSPCA_STV0680=m
CONFIG_USB_GSPCA_SUNPLUS=m
CONFIG_USB_GSPCA_T613=m
CONFIG_USB_GSPCA_TV8532=m
CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+# CONFIG_USB_GSPCA_XIRLINK_CIT is not set
CONFIG_USB_GSPCA_ZC3XX=m
CONFIG_VIDEO_PVRUSB2=m
CONFIG_VIDEO_PVRUSB2_SYSFS=y
@@ -3055,18 +3265,19 @@ CONFIG_VIDEO_EM28XX_ALSA=m
CONFIG_VIDEO_EM28XX_DVB=m
CONFIG_VIDEO_TLG2300=m
CONFIG_VIDEO_CX231XX=m
+CONFIG_VIDEO_CX231XX_RC=y
CONFIG_VIDEO_CX231XX_ALSA=m
CONFIG_VIDEO_CX231XX_DVB=m
CONFIG_VIDEO_USBVISION=m
CONFIG_USB_ET61X251=m
CONFIG_USB_SN9C102=m
+# CONFIG_USB_PWC is not set
CONFIG_USB_ZR364XX=m
CONFIG_USB_STKWEBCAM=m
CONFIG_USB_S2255=m
CONFIG_V4L_MEM2MEM_DRIVERS=y
CONFIG_VIDEO_MEM2MEM_TESTDEV=m
CONFIG_RADIO_ADAPTERS=y
-CONFIG_RADIO_GEMTEK_PCI=m
CONFIG_RADIO_MAXIRADIO=m
CONFIG_RADIO_MAESTRO=m
# CONFIG_I2C_SI4713 is not set
@@ -3078,6 +3289,12 @@ CONFIG_RADIO_TEA5764=m
CONFIG_RADIO_SAA7706H=m
CONFIG_RADIO_TEF6862=m
CONFIG_RADIO_TIMBERDALE=m
+CONFIG_RADIO_WL1273=m
+
+#
+# Texas Instruments WL128x FM driver (ST based)
+#
+CONFIG_RADIO_WL128X=m
CONFIG_DVB_MAX_ADAPTERS=8
# CONFIG_DVB_DYNAMIC_MINORS is not set
CONFIG_DVB_CAPTURE_DRIVERS=y
@@ -3128,6 +3345,8 @@ CONFIG_DVB_USB_CE6230=m
# CONFIG_DVB_USB_FRIIO is not set
CONFIG_DVB_USB_EC168=m
CONFIG_DVB_USB_AZ6027=m
+# CONFIG_DVB_USB_LME2510 is not set
+CONFIG_DVB_USB_TECHNISAT_USB2=m
CONFIG_DVB_TTUSB_BUDGET=m
CONFIG_DVB_TTUSB_DEC=m
CONFIG_SMS_SIANO_MDTV=m
@@ -3165,8 +3384,6 @@ CONFIG_DVB_DM1105=m
# Supported FireWire (IEEE 1394) Adapters
#
CONFIG_DVB_FIREDTV=m
-CONFIG_DVB_FIREDTV_FIREWIRE=y
-CONFIG_DVB_FIREDTV_IEEE1394=y
CONFIG_DVB_FIREDTV_INPUT=y
#
@@ -3190,10 +3407,18 @@ CONFIG_DVB_NGENE=m
# Supported DVB Frontends
#
# CONFIG_DVB_FE_CUSTOMISE is not set
+
+#
+# Multistandard (satellite) frontends
+#
CONFIG_DVB_STB0899=m
CONFIG_DVB_STB6100=m
CONFIG_DVB_STV090x=m
CONFIG_DVB_STV6110x=m
+
+#
+# DVB-S (satellite) frontends
+#
CONFIG_DVB_CX24110=m
CONFIG_DVB_CX24123=m
CONFIG_DVB_MT312=m
@@ -3217,6 +3442,10 @@ CONFIG_DVB_CX24116=m
CONFIG_DVB_SI21XX=m
CONFIG_DVB_DS3000=m
CONFIG_DVB_MB86A16=m
+
+#
+# DVB-T (terrestrial) frontends
+#
CONFIG_DVB_SP8870=m
CONFIG_DVB_SP887X=m
CONFIG_DVB_CX22700=m
@@ -3233,10 +3462,19 @@ CONFIG_DVB_DIB7000P=m
CONFIG_DVB_TDA10048=m
CONFIG_DVB_AF9013=m
CONFIG_DVB_EC100=m
+CONFIG_DVB_STV0367=m
+
+#
+# DVB-C (cable) frontends
+#
CONFIG_DVB_VES1820=m
CONFIG_DVB_TDA10021=m
CONFIG_DVB_TDA10023=m
CONFIG_DVB_STV0297=m
+
+#
+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+#
CONFIG_DVB_NXT200X=m
CONFIG_DVB_OR51211=m
CONFIG_DVB_OR51132=m
@@ -3246,10 +3484,24 @@ CONFIG_DVB_LGDT3305=m
CONFIG_DVB_S5H1409=m
CONFIG_DVB_AU8522=m
CONFIG_DVB_S5H1411=m
+
+#
+# ISDB-T (terrestrial) frontends
+#
+CONFIG_DVB_S921=m
CONFIG_DVB_DIB8000=m
+CONFIG_DVB_MB86A20S=m
+
+#
+# Digital terrestrial only tuners/PLL
+#
CONFIG_DVB_PLL=m
CONFIG_DVB_TUNER_DIB0070=m
CONFIG_DVB_TUNER_DIB0090=m
+
+#
+# SEC control devices for DVB-S
+#
CONFIG_DVB_LNBP21=m
CONFIG_DVB_ISL6405=m
CONFIG_DVB_ISL6421=m
@@ -3257,8 +3509,11 @@ CONFIG_DVB_ISL6423=m
CONFIG_DVB_LGS8GXX=m
CONFIG_DVB_ATBM8830=m
CONFIG_DVB_TDA665x=m
-CONFIG_DAB=y
-CONFIG_USB_DABUSB=m
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
#
# Graphics support
@@ -3278,13 +3533,13 @@ CONFIG_DRM_R128=m
CONFIG_DRM_RADEON=m
# CONFIG_DRM_RADEON_KMS is not set
CONFIG_DRM_I810=m
-CONFIG_DRM_I830=m
CONFIG_DRM_I915=m
# CONFIG_DRM_I915_KMS is not set
CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
CONFIG_DRM_VIA=m
CONFIG_DRM_SAVAGE=m
+# CONFIG_STUB_POULSBO is not set
CONFIG_VGASTATE=m
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=m
@@ -3300,6 +3555,7 @@ CONFIG_FB_SYS_COPYAREA=m
CONFIG_FB_SYS_IMAGEBLIT=m
# CONFIG_FB_FOREIGN_ENDIAN is not set
CONFIG_FB_SYS_FOPS=m
+# CONFIG_FB_WMT_GE_ROPS is not set
CONFIG_FB_DEFERRED_IO=y
CONFIG_FB_HECUBA=m
CONFIG_FB_SVGALIB=m
@@ -3315,12 +3571,12 @@ CONFIG_FB_CIRRUS=m
CONFIG_FB_PM2=m
CONFIG_FB_PM2_FIFO_DISCONNECT=y
CONFIG_FB_CYBER2000=m
+CONFIG_FB_CYBER2000_DDC=y
CONFIG_FB_ARC=m
CONFIG_FB_VGA16=m
CONFIG_FB_UVESA=m
CONFIG_FB_N411=m
CONFIG_FB_HGA=m
-# CONFIG_FB_HGA_ACCEL is not set
CONFIG_FB_S1D13XXX=m
CONFIG_FB_NVIDIA=m
CONFIG_FB_NVIDIA_I2C=y
@@ -3381,7 +3637,9 @@ CONFIG_FB_GEODE_GX1=m
CONFIG_FB_TMIO=m
CONFIG_FB_TMIO_ACCELL=y
CONFIG_FB_SM501=m
+# CONFIG_FB_UDL is not set
# CONFIG_FB_VIRTUAL is not set
+CONFIG_XEN_FBDEV_FRONTEND=m
CONFIG_FB_METRONOME=m
CONFIG_FB_MB862XX=m
# CONFIG_FB_MB862XX_PCI_GDC is not set
@@ -3396,11 +3654,12 @@ CONFIG_LCD_TDO24M=m
CONFIG_LCD_VGG2432A4=m
CONFIG_LCD_PLATFORM=m
CONFIG_LCD_S6E63M0=m
+CONFIG_LCD_LD9040=m
CONFIG_BACKLIGHT_CLASS_DEVICE=m
CONFIG_BACKLIGHT_GENERIC=m
CONFIG_BACKLIGHT_PROGEAR=m
CONFIG_BACKLIGHT_CARILLO_RANCH=m
-CONFIG_BACKLIGHT_MBP_NVIDIA=m
+CONFIG_BACKLIGHT_APPLE=m
CONFIG_BACKLIGHT_SAHARA=m
CONFIG_BACKLIGHT_ADP8860=m
CONFIG_BACKLIGHT_PCF50633=m
@@ -3464,6 +3723,7 @@ CONFIG_SND_AC97_CODEC=m
CONFIG_SND_DRIVERS=y
CONFIG_SND_PCSP=m
CONFIG_SND_DUMMY=m
+# CONFIG_SND_ALOOP is not set
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
CONFIG_SND_MTS64=m
@@ -3533,10 +3793,7 @@ CONFIG_SND_HDA_CODEC_REALTEK=y
CONFIG_SND_HDA_CODEC_ANALOG=y
CONFIG_SND_HDA_CODEC_SIGMATEL=y
CONFIG_SND_HDA_CODEC_VIA=y
-CONFIG_SND_HDA_CODEC_ATIHDMI=y
-CONFIG_SND_HDA_CODEC_NVHDMI=y
-CONFIG_SND_HDA_CODEC_INTELHDMI=y
-CONFIG_SND_HDA_ELD=y
+CONFIG_SND_HDA_CODEC_HDMI=y
CONFIG_SND_HDA_CODEC_CIRRUS=y
CONFIG_SND_HDA_CODEC_CONEXANT=y
CONFIG_SND_HDA_CODEC_CA0110=y
@@ -3546,7 +3803,6 @@ CONFIG_SND_HDA_GENERIC=y
# CONFIG_SND_HDA_POWER_SAVE is not set
CONFIG_SND_HDSP=m
CONFIG_SND_HDSPM=m
-CONFIG_SND_HIFIER=m
CONFIG_SND_ICE1712=m
CONFIG_SND_ICE1724=m
CONFIG_SND_INTEL8X0=m
@@ -3577,10 +3833,15 @@ CONFIG_SND_USB_USX2Y=m
CONFIG_SND_USB_CAIAQ=m
# CONFIG_SND_USB_CAIAQ_INPUT is not set
CONFIG_SND_USB_US122L=m
+CONFIG_SND_USB_6FIRE=m
+CONFIG_SND_FIREWIRE=y
+CONFIG_SND_FIREWIRE_LIB=m
+CONFIG_SND_FIREWIRE_SPEAKERS=m
CONFIG_SND_PCMCIA=y
CONFIG_SND_VXPOCKET=m
CONFIG_SND_PDAUDIOCF=m
CONFIG_SND_SOC=m
+# CONFIG_SND_SOC_CACHE_LZO is not set
CONFIG_SND_SOC_I2C_AND_SPI=m
CONFIG_SND_SOC_ALL_CODECS=m
CONFIG_SND_SOC_WM_HUBS=m
@@ -3592,19 +3853,28 @@ CONFIG_SND_SOC_AK4104=m
CONFIG_SND_SOC_AK4535=m
CONFIG_SND_SOC_AK4642=m
CONFIG_SND_SOC_AK4671=m
+CONFIG_SND_SOC_ALC5623=m
CONFIG_SND_SOC_CS42L51=m
CONFIG_SND_SOC_CS4270=m
+CONFIG_SND_SOC_CS4271=m
+CONFIG_SND_SOC_CX20442=m
CONFIG_SND_SOC_L3=m
CONFIG_SND_SOC_DA7210=m
+CONFIG_SND_SOC_DFBMCS320=m
+CONFIG_SND_SOC_MAX98088=m
+CONFIG_SND_SOC_MAX9850=m
CONFIG_SND_SOC_PCM3008=m
+CONFIG_SND_SOC_SGTL5000=m
CONFIG_SND_SOC_SPDIF=m
CONFIG_SND_SOC_SSM2602=m
CONFIG_SND_SOC_TLV320AIC23=m
CONFIG_SND_SOC_TLV320AIC26=m
+CONFIG_SND_SOC_TVL320AIC32X4=m
CONFIG_SND_SOC_TLV320AIC3X=m
CONFIG_SND_SOC_TLV320DAC33=m
CONFIG_SND_SOC_UDA134X=m
CONFIG_SND_SOC_UDA1380=m
+CONFIG_SND_SOC_WL1273=m
CONFIG_SND_SOC_WM8400=m
CONFIG_SND_SOC_WM8510=m
CONFIG_SND_SOC_WM8523=m
@@ -3613,10 +3883,13 @@ CONFIG_SND_SOC_WM8711=m
CONFIG_SND_SOC_WM8727=m
CONFIG_SND_SOC_WM8728=m
CONFIG_SND_SOC_WM8731=m
+CONFIG_SND_SOC_WM8737=m
CONFIG_SND_SOC_WM8741=m
CONFIG_SND_SOC_WM8750=m
CONFIG_SND_SOC_WM8753=m
+CONFIG_SND_SOC_WM8770=m
CONFIG_SND_SOC_WM8776=m
+CONFIG_SND_SOC_WM8804=m
CONFIG_SND_SOC_WM8900=m
CONFIG_SND_SOC_WM8903=m
CONFIG_SND_SOC_WM8904=m
@@ -3624,13 +3897,18 @@ CONFIG_SND_SOC_WM8940=m
CONFIG_SND_SOC_WM8955=m
CONFIG_SND_SOC_WM8960=m
CONFIG_SND_SOC_WM8961=m
+CONFIG_SND_SOC_WM8962=m
CONFIG_SND_SOC_WM8971=m
CONFIG_SND_SOC_WM8974=m
CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SOC_WM8985=m
CONFIG_SND_SOC_WM8988=m
CONFIG_SND_SOC_WM8990=m
+CONFIG_SND_SOC_WM8991=m
CONFIG_SND_SOC_WM8993=m
+CONFIG_SND_SOC_WM8995=m
CONFIG_SND_SOC_WM9081=m
+CONFIG_SND_SOC_LM4857=m
CONFIG_SND_SOC_MAX9877=m
CONFIG_SND_SOC_TPA6130A2=m
CONFIG_SND_SOC_WM2000=m
@@ -3659,7 +3937,7 @@ CONFIG_USB_MOUSE=m
#
CONFIG_HID_3M_PCT=m
# CONFIG_HID_A4TECH is not set
-CONFIG_HID_ACRUX_FF=m
+# CONFIG_HID_ACRUX is not set
# CONFIG_HID_APPLE is not set
# CONFIG_HID_BELKIN is not set
CONFIG_HID_CANDO=m
@@ -3668,18 +3946,21 @@ CONFIG_HID_CANDO=m
CONFIG_HID_PRODIKEYS=m
# CONFIG_HID_CYPRESS is not set
# CONFIG_HID_DRAGONRISE is not set
-CONFIG_HID_EGALAX=m
-CONFIG_HID_ELECOM=m
+# CONFIG_HID_EMS_FF is not set
# CONFIG_HID_EZKEY is not set
+CONFIG_HID_KEYTOUCH=m
# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
# CONFIG_HID_GYRATION is not set
# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
+CONFIG_HID_LCPOWER=m
# CONFIG_HID_LOGITECH is not set
-CONFIG_HID_MAGICMOUSE=m
# CONFIG_HID_MICROSOFT is not set
CONFIG_HID_MOSART=m
# CONFIG_HID_MONTEREY is not set
+CONFIG_HID_MULTITOUCH=m
# CONFIG_HID_NTRIG is not set
CONFIG_HID_ORTEK=m
# CONFIG_HID_PANTHERLORD is not set
@@ -3691,16 +3972,20 @@ CONFIG_HID_PICOLCD_LCD=y
CONFIG_HID_PICOLCD_LEDS=y
CONFIG_HID_QUANTA=m
CONFIG_HID_ROCCAT=m
+CONFIG_HID_ROCCAT_COMMON=m
+CONFIG_HID_ROCCAT_ARVO=m
CONFIG_HID_ROCCAT_KONE=m
+CONFIG_HID_ROCCAT_KONEPLUS=m
+CONFIG_HID_ROCCAT_KOVAPLUS=m
+# CONFIG_HID_ROCCAT_PYRA is not set
# CONFIG_HID_SAMSUNG is not set
-# CONFIG_HID_SONY is not set
+CONFIG_HID_SONY=m
CONFIG_HID_STANTUM=m
# CONFIG_HID_SUNPLUS is not set
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
# CONFIG_HID_TOPSEED is not set
# CONFIG_HID_THRUSTMASTER is not set
-# CONFIG_HID_WACOM is not set
# CONFIG_HID_ZEROPLUS is not set
CONFIG_HID_ZYDACRON=m
CONFIG_USB_SUPPORT=y
@@ -3771,6 +4056,7 @@ CONFIG_USB_TMC=m
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_REALTEK=m
CONFIG_USB_STORAGE_DATAFAB=m
CONFIG_USB_STORAGE_FREECOM=m
CONFIG_USB_STORAGE_ISD200=m
@@ -3782,6 +4068,8 @@ CONFIG_USB_STORAGE_ALAUDA=m
CONFIG_USB_STORAGE_ONETOUCH=m
CONFIG_USB_STORAGE_KARMA=m
CONFIG_USB_STORAGE_CYPRESS_ATACB=m
+CONFIG_USB_STORAGE_ENE_UB6250=m
+# CONFIG_USB_UAS is not set
CONFIG_USB_LIBUSUAL=y
#
@@ -3834,6 +4122,7 @@ CONFIG_USB_SERIAL_SPCP8X5=m
CONFIG_USB_SERIAL_HP4X=m
CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_SAFE_PADDED=y
+# CONFIG_USB_SERIAL_SAMBA is not set
CONFIG_USB_SERIAL_SIEMENS_MPI=m
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_SYMBOL=m
@@ -3846,7 +4135,7 @@ CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_SERIAL_OPTICON=m
CONFIG_USB_SERIAL_VIVOPAY_SERIAL=m
CONFIG_USB_SERIAL_ZIO=m
-CONFIG_USB_SERIAL_SSU100=m
+# CONFIG_USB_SERIAL_SSU100 is not set
CONFIG_USB_SERIAL_DEBUG=m
#
@@ -3872,6 +4161,7 @@ CONFIG_USB_LD=m
CONFIG_USB_IOWARRIOR=m
CONFIG_USB_TEST=m
CONFIG_USB_ISIGHTFW=m
+# CONFIG_USB_YUREX is not set
CONFIG_USB_ATM=m
CONFIG_USB_SPEEDTOUCH=m
CONFIG_USB_CXACRU=m
@@ -3888,17 +4178,17 @@ CONFIG_NOP_USB_XCEIV=m
CONFIG_UWB=m
CONFIG_UWB_HWA=m
CONFIG_UWB_WHCI=m
-CONFIG_UWB_WLP=m
CONFIG_UWB_I1480U=m
-CONFIG_UWB_I1480U_WLP=m
CONFIG_MMC=m
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
+# CONFIG_MMC_CLKGATE is not set
#
# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=m
+CONFIG_MMC_BLOCK_MINORS=8
CONFIG_MMC_BLOCK_BOUNCE=y
CONFIG_SDIO_UART=m
CONFIG_MMC_TEST=m
@@ -3916,6 +4206,7 @@ CONFIG_MMC_TIFM_SD=m
CONFIG_MMC_SDRICOH_CS=m
CONFIG_MMC_CB710=m
CONFIG_MMC_VIA_SDMMC=m
+# CONFIG_MMC_USHC is not set
CONFIG_MEMSTICK=m
# CONFIG_MEMSTICK_DEBUG is not set
@@ -3930,18 +4221,22 @@ CONFIG_MSPRO_BLOCK=m
#
CONFIG_MEMSTICK_TIFM_MS=m
CONFIG_MEMSTICK_JMICRON_38X=m
+CONFIG_MEMSTICK_R592=m
CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+CONFIG_LEDS_LM3530=m
CONFIG_LEDS_NET5501=m
CONFIG_LEDS_ALIX2=m
CONFIG_LEDS_PCA9532=m
CONFIG_LEDS_GPIO=m
CONFIG_LEDS_GPIO_PLATFORM=y
CONFIG_LEDS_LP3944=m
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
CONFIG_LEDS_CLEVO_MAIL=m
CONFIG_LEDS_PCA955X=m
CONFIG_LEDS_DAC124S085=m
@@ -3950,6 +4245,7 @@ CONFIG_LEDS_BD2802=m
CONFIG_LEDS_INTEL_SS4200=m
CONFIG_LEDS_LT3593=m
CONFIG_LEDS_DELL_NETBOOKS=m
+# CONFIG_LEDS_MC13783 is not set
CONFIG_LEDS_TRIGGERS=y
#
@@ -3964,6 +4260,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
#
# iptables trigger is under Netfilter config (LED target)
#
+# CONFIG_NFC_DEVICES is not set
CONFIG_ACCESSIBILITY=y
# CONFIG_A11Y_BRAILLE_CONSOLE is not set
CONFIG_INFINIBAND=m
@@ -4007,11 +4304,11 @@ CONFIG_RTC_DRV_TEST=m
CONFIG_RTC_DRV_DS1307=m
CONFIG_RTC_DRV_DS1374=m
CONFIG_RTC_DRV_DS1672=m
-CONFIG_RTC_DRV_DS3232=m
+# CONFIG_RTC_DRV_DS3232 is not set
CONFIG_RTC_DRV_MAX6900=m
CONFIG_RTC_DRV_RS5C372=m
CONFIG_RTC_DRV_ISL1208=m
-CONFIG_RTC_DRV_ISL12022=m
+# CONFIG_RTC_DRV_ISL12022 is not set
CONFIG_RTC_DRV_X1205=m
CONFIG_RTC_DRV_PCF8563=m
CONFIG_RTC_DRV_PCF8583=m
@@ -4052,19 +4349,18 @@ CONFIG_RTC_DRV_BQ4802=m
CONFIG_RTC_DRV_RP5C01=m
CONFIG_RTC_DRV_V3020=m
CONFIG_RTC_DRV_PCF50633=m
-CONFIG_RTC_DRV_AB8500=m
#
# on-CPU RTC drivers
#
+# CONFIG_RTC_DRV_MC13XXX is not set
CONFIG_DMADEVICES=y
# CONFIG_DMADEVICES_DEBUG is not set
#
# DMA Devices
#
-CONFIG_INTEL_MID_DMAC=m
-CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y
+# CONFIG_INTEL_MID_DMAC is not set
CONFIG_INTEL_IOATDMA=m
CONFIG_TIMB_DMA=m
CONFIG_PCH_DMA=m
@@ -4091,33 +4387,63 @@ CONFIG_UIO_AEC=m
CONFIG_UIO_SERCOS3=m
# CONFIG_UIO_PCI_GENERIC is not set
CONFIG_UIO_NETX=m
+
+#
+# Xen driver support
+#
+CONFIG_XEN_BALLOON=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_XEN_DEV_EVTCHN=y
+CONFIG_XEN_BACKEND=y
+CONFIG_XENFS=y
+CONFIG_XEN_COMPAT_XENFS=y
+CONFIG_XEN_SYS_HYPERVISOR=y
+CONFIG_XEN_XENBUS_FRONTEND=y
+CONFIG_XEN_GNTDEV=m
+CONFIG_XEN_GRANT_DEV_ALLOC=m
+CONFIG_XEN_PLATFORM_PCI=m
+CONFIG_SWIOTLB_XEN=y
CONFIG_STAGING=y
# CONFIG_STAGING_EXCLUDE_BUILD is not set
+CONFIG_STALLION=m
+CONFIG_ISTALLION=m
+CONFIG_DIGIEPCA=m
+CONFIG_RISCOM8=m
+CONFIG_SPECIALIX=m
+CONFIG_COMPUTONE=m
# CONFIG_ET131X is not set
# CONFIG_SLICOSS is not set
# CONFIG_VIDEO_GO7007 is not set
# CONFIG_VIDEO_CX25821 is not set
# CONFIG_VIDEO_TM6000 is not set
+CONFIG_DVB_CXD2099=m
# CONFIG_USB_IP_COMMON is not set
# CONFIG_W35UND is not set
# CONFIG_PRISM2_USB is not set
# CONFIG_ECHO is not set
-# CONFIG_OTUS is not set
+CONFIG_BRCM80211=m
+# CONFIG_BRCMSMAC is not set
+# CONFIG_BRCMFMAC is not set
+# CONFIG_BRCMDBG is not set
# CONFIG_RT2860 is not set
# CONFIG_RT2870 is not set
# CONFIG_COMEDI is not set
# CONFIG_ASUS_OLED is not set
# CONFIG_PANEL is not set
# CONFIG_R8187SE is not set
-# CONFIG_RTL8192SU is not set
# CONFIG_RTL8192U is not set
# CONFIG_RTL8192E is not set
+# CONFIG_R8712U is not set
+CONFIG_RTS_PSTOR=m
+# CONFIG_RTS_PSTOR_DEBUG is not set
# CONFIG_TRANZPORT is not set
# CONFIG_POHMELFS is not set
# CONFIG_IDE_PHISON is not set
# CONFIG_LINE6_USB is not set
-# CONFIG_DRM_VMWGFX is not set
-# CONFIG_DRM_NOUVEAU is not set
+CONFIG_DRM_VMWGFX=m
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_NOUVEAU_BACKLIGHT=y
+CONFIG_DRM_NOUVEAU_DEBUG=y
#
# I2C encoder or helper chips
@@ -4128,50 +4454,63 @@ CONFIG_DRM_I2C_SIL164=m
# CONFIG_USB_SERIAL_QUATECH_USB2 is not set
# CONFIG_VT6655 is not set
# CONFIG_VT6656 is not set
-# CONFIG_FB_UDL is not set
CONFIG_HYPERV=m
CONFIG_HYPERV_STORAGE=m
CONFIG_HYPERV_BLOCK=m
CONFIG_HYPERV_NET=m
CONFIG_HYPERV_UTILS=m
+CONFIG_HYPERV_MOUSE=m
# CONFIG_VME_BUS is not set
+# CONFIG_DX_SEP is not set
# CONFIG_IIO is not set
-CONFIG_ZRAM=m
-CONFIG_ZRAM_STATS=y
+# CONFIG_XVMALLOC is not set
+# CONFIG_ZRAM is not set
# CONFIG_WLAGS49_H2 is not set
# CONFIG_WLAGS49_H25 is not set
-# CONFIG_BATMAN_ADV is not set
-# CONFIG_SAMSUNG_LAPTOP is not set
# CONFIG_FB_SM7XX is not set
# CONFIG_VIDEO_DT3155 is not set
# CONFIG_CRYSTALHD is not set
# CONFIG_CXT1E1 is not set
+# CONFIG_FB_XGI is not set
+# CONFIG_LIRC_STAGING is not set
+# CONFIG_EASYCAP is not set
+# CONFIG_SOLO6X10 is not set
+# CONFIG_ACPI_QUICKSTART is not set
+CONFIG_MACH_NO_WESTBRIDGE=y
+# CONFIG_SBE_2T3E3 is not set
+# CONFIG_ATH6K_LEGACY is not set
+# CONFIG_USB_ENESTORAGE is not set
+# CONFIG_BCM_WIMAX is not set
+# CONFIG_FT1000 is not set
#
-# Texas Instruments shared transport line discipline
+# Speakup console speech
#
-# CONFIG_TI_ST is not set
-# CONFIG_ST_BT is not set
-# CONFIG_ADIS16255 is not set
-# CONFIG_FB_XGI is not set
-# CONFIG_LIRC_STAGING is not set
-CONFIG_EASYCAP=m
-CONFIG_SOLO6X10=m
-CONFIG_ACPI_QUICKSTART=m
+# CONFIG_SPEAKUP is not set
+# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+CONFIG_DRM_PSB=m
+
+#
+# Altera FPGA firmware download module
+#
+CONFIG_ALTERA_STAPL=m
CONFIG_X86_PLATFORM_DEVICES=y
CONFIG_ACER_WMI=m
CONFIG_ASUS_LAPTOP=m
CONFIG_DELL_LAPTOP=m
CONFIG_DELL_WMI=m
+CONFIG_DELL_WMI_AIO=m
CONFIG_FUJITSU_LAPTOP=m
# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
+CONFIG_HP_ACCEL=m
CONFIG_HP_WMI=m
CONFIG_MSI_LAPTOP=m
CONFIG_PANASONIC_LAPTOP=m
CONFIG_COMPAL_LAPTOP=m
CONFIG_SONY_LAPTOP=m
# CONFIG_SONYPI_COMPAT is not set
-CONFIG_IDEAPAD_ACPI=m
+# CONFIG_IDEAPAD_LAPTOP is not set
CONFIG_THINKPAD_ACPI=m
CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
@@ -4179,8 +4518,11 @@ CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
CONFIG_THINKPAD_ACPI_VIDEO=y
CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
+CONFIG_SENSORS_HDAPS=m
CONFIG_INTEL_MENLOW=m
CONFIG_EEEPC_LAPTOP=m
+CONFIG_ASUS_WMI=m
+CONFIG_ASUS_NB_WMI=m
CONFIG_EEEPC_WMI=m
CONFIG_ACPI_WMI=m
CONFIG_MSI_WMI=m
@@ -4189,7 +4531,10 @@ CONFIG_ACPI_ASUS=m
CONFIG_ACPI_TOSHIBA=m
CONFIG_TOSHIBA_BT_RFKILL=m
CONFIG_ACPI_CMPC=m
-CONFIG_INTEL_IPS=m
+# CONFIG_INTEL_IPS is not set
+# CONFIG_IBM_RTL is not set
+CONFIG_XO15_EBOOK=m
+# CONFIG_SAMSUNG_LAPTOP is not set
#
# Firmware Drivers
@@ -4200,7 +4545,9 @@ CONFIG_FIRMWARE_MEMMAP=y
CONFIG_DELL_RBU=m
CONFIG_DCDBAS=m
CONFIG_DMIID=y
+CONFIG_DMI_SYSFS=m
# CONFIG_ISCSI_IBFT_FIND is not set
+CONFIG_SIGMA=m
#
# File systems
@@ -4237,7 +4584,6 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
# CONFIG_JFS_DEBUG is not set
CONFIG_JFS_STATISTICS=y
-CONFIG_FS_POSIX_ACL=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
@@ -4254,10 +4600,13 @@ CONFIG_OCFS2_DEBUG_MASKLOG=y
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY_USER=y
+# CONFIG_FANOTIFY is not set
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -4266,7 +4615,7 @@ CONFIG_QUOTA_TREE=m
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_QUOTACTL=y
-CONFIG_AUTOFS_FS=m
+CONFIG_QUOTACTL_COMPAT=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
# CONFIG_CUSE is not set
@@ -4310,7 +4659,7 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_FS=y
# CONFIG_PROC_KCORE is not set
CONFIG_PROC_SYSCTL=y
-CONFIG_PROC_PAGE_MONITOR=y
+# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
@@ -4321,9 +4670,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
CONFIG_ECRYPT_FS=m
-CONFIG_UNION_FS=m
-# CONFIG_UNION_FS_XATTR is not set
-# CONFIG_UNION_FS_DEBUG is not set
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
# CONFIG_BEFS_FS is not set
@@ -4357,6 +4703,7 @@ CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
# CONFIG_SQUASHFS_XATTR is not set
# CONFIG_SQUASHFS_LZO is not set
+CONFIG_SQUASHFS_XZ=y
# CONFIG_SQUASHFS_EMBEDDED is not set
CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_VXFS_FS is not set
@@ -4369,6 +4716,7 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
# CONFIG_ROMFS_BACKED_BY_MTD is not set
# CONFIG_ROMFS_BACKED_BY_BOTH is not set
CONFIG_ROMFS_ON_BLOCK=y
+CONFIG_PSTORE=y
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
# CONFIG_UFS_FS_WRITE is not set
@@ -4384,22 +4732,20 @@ CONFIG_NFS_V4=y
# CONFIG_NFS_FSCACHE is not set
# CONFIG_NFS_USE_LEGACY_DNS is not set
CONFIG_NFS_USE_KERNEL_DNS=y
+# CONFIG_NFS_USE_NEW_IDMAPPER is not set
CONFIG_NFSD=m
+CONFIG_NFSD_DEPRECATED=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
CONFIG_NFSD_V4=y
CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
CONFIG_SUNRPC_XPRT_RDMA=m
CONFIG_RPCSEC_GSS_KRB5=m
-# CONFIG_RPCSEC_GSS_SPKM3 is not set
-# CONFIG_SMB_FS is not set
CONFIG_CEPH_FS=m
-# CONFIG_CEPH_FS_PRETTYDEBUG is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
# CONFIG_CIFS_WEAK_PW_HASH is not set
@@ -4409,6 +4755,7 @@ CONFIG_CIFS_POSIX=y
# CONFIG_CIFS_DEBUG2 is not set
CONFIG_CIFS_DFS_UPCALL=y
# CONFIG_CIFS_FSCACHE is not set
+# CONFIG_CIFS_ACL is not set
CONFIG_CIFS_EXPERIMENTAL=y
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -4436,7 +4783,7 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_KARMA_PARTITION is not set
CONFIG_EFI_PARTITION=y
# CONFIG_SYSV68_PARTITION is not set
-CONFIG_NLS=m
+CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_737=m
@@ -4484,6 +4831,7 @@ CONFIG_DLM=m
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
@@ -4492,8 +4840,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_HARDLOCKUP_DETECTOR is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
CONFIG_ARCH_WANT_FRAME_POINTERS=y
@@ -4509,6 +4860,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
CONFIG_RING_BUFFER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING_SUPPORT=y
@@ -4518,12 +4870,15 @@ CONFIG_TRACING_SUPPORT=y
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_DMA_API_DEBUG is not set
# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_ASYNC_RAID6_TEST is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
CONFIG_HAVE_ARCH_KMEMCHECK=y
+CONFIG_TEST_KSTRTOX=m
CONFIG_STRICT_DEVMEM=y
# CONFIG_X86_VERBOSE_BOOTUP is not set
# CONFIG_EARLY_PRINTK is not set
+CONFIG_DEBUG_SET_MODULE_RONX=y
# CONFIG_IOMMU_STRESS is not set
CONFIG_HAVE_MMIOTRACE_SUPPORT=y
CONFIG_IO_DELAY_TYPE_0X80=0
@@ -4541,7 +4896,10 @@ CONFIG_DEFAULT_IO_DELAY_TYPE=0
# Security options
#
CONFIG_KEYS=y
+CONFIG_TRUSTED_KEYS=m
+CONFIG_ENCRYPTED_KEYS=m
# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
# CONFIG_SECURITY_NETWORK is not set
@@ -4557,7 +4915,6 @@ CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m
CONFIG_ASYNC_PQ=m
CONFIG_ASYNC_RAID6_RECOV=m
-# CONFIG_ASYNC_RAID6_TEST is not set
CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA=y
CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA=y
CONFIG_CRYPTO=y
@@ -4565,7 +4922,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
@@ -4606,6 +4962,7 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_FPU=m
#
# Hash modes
@@ -4632,14 +4989,14 @@ CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-# CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set
+CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
#
# Ciphers
#
CONFIG_CRYPTO_AES=m
-# CONFIG_CRYPTO_AES_X86_64 is not set
-# CONFIG_CRYPTO_AES_NI_INTEL is not set
+CONFIG_CRYPTO_AES_X86_64=m
+CONFIG_CRYPTO_AES_NI_INTEL=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
@@ -4650,13 +5007,13 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
-# CONFIG_CRYPTO_SALSA20_X86_64 is not set
+CONFIG_CRYPTO_SALSA20_X86_64=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_TWOFISH_COMMON=m
-# CONFIG_CRYPTO_TWOFISH_X86_64 is not set
+CONFIG_CRYPTO_TWOFISH_X86_64=m
#
# Compression
@@ -4669,6 +5026,9 @@ CONFIG_CRYPTO_LZO=m
# Random Number Generation
#
CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_USER_API=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_HW=y
CONFIG_CRYPTO_DEV_PADLOCK=m
CONFIG_CRYPTO_DEV_PADLOCK_AES=m
@@ -4680,6 +5040,7 @@ CONFIG_HAVE_KVM_IRQCHIP=y
CONFIG_HAVE_KVM_EVENTFD=y
CONFIG_KVM_APIC_ARCHITECTURE=y
CONFIG_KVM_MMIO=y
+CONFIG_KVM_ASYNC_PF=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_KVM_INTEL=m
@@ -4710,9 +5071,19 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
CONFIG_LZO_COMPRESS=m
CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
CONFIG_DECOMPRESS_GZIP=y
CONFIG_DECOMPRESS_BZIP2=y
CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_XZ=y
CONFIG_DECOMPRESS_LZO=y
CONFIG_GENERIC_ALLOCATOR=y
CONFIG_REED_SOLOMON=m
@@ -4726,4 +5097,6 @@ CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_CHECK_SIGNATURE=y
+CONFIG_CPU_RMAP=y
CONFIG_NLATTR=y
+CONFIG_AVERAGE=y
diff --git a/main/linux-scst/scst-2.0.0.1-2.6.36.patch b/main/linux-scst/scst-2.1.0-2.6.39.patch
index c8699d826..264fda4b7 100644
--- a/main/linux-scst/scst-2.0.0.1-2.6.36.patch
+++ b/main/linux-scst/scst-2.1.0-2.6.39.patch
@@ -1,8 +1,6 @@
-Signed-off-by:
-
-diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
---- linux-2.6.36/block/blk-map.c 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/block/blk-map.c 2010-11-26 17:52:19.467689539 +0300
+diff -upkr linux-2.6.39/block/blk-map.c linux-2.6.39/block/blk-map.c
+--- linux-2.6.39/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/block/blk-map.c 2011-05-19 10:49:02.753812997 -0400
@@ -5,6 +5,8 @@
#include <linux/module.h>
#include <linux/bio.h>
@@ -12,7 +10,7 @@ diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
#include <scsi/sg.h> /* for struct sg_iovec */
#include "blk.h"
-@@ -271,6 +273,337 @@ int blk_rq_unmap_user(struct bio *bio)
+@@ -274,6 +276,339 @@ int blk_rq_unmap_user(struct bio *bio)
}
EXPORT_SYMBOL(blk_rq_unmap_user);
@@ -24,7 +22,18 @@ diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
+
+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
+{
-+ sg_free_table(&bw->sg_table);
++ struct sg_table *sgt = &bw->sg_table;
++ struct scatterlist *sg;
++ int i;
++
++ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
++ struct page *pg = sg_page(sg);
++ if (pg == NULL)
++ break;
++ __free_page(pg);
++ }
++
++ sg_free_table(sgt);
+ kfree(bw);
+ return;
+}
@@ -78,7 +87,7 @@ diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
+
+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
+ if (res != 0)
-+ goto out_free_bw;
++ goto err_free;
+
+ new_sgl = bw->sg_table.sgl;
+
@@ -87,7 +96,7 @@ diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
+
+ pg = alloc_page(page_gfp);
+ if (pg == NULL)
-+ goto err_free_new_sgl;
++ goto err_free;
+
+ sg_assign_page(sg, pg);
+ sg->length = min_t(size_t, PAGE_SIZE, len);
@@ -115,17 +124,8 @@ diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
+out:
+ return res;
+
-+err_free_new_sgl:
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+ sg_free_table(&bw->sg_table);
-+
-+out_free_bw:
-+ kfree(bw);
++err_free:
++ blk_free_kern_sg_work(bw);
+ res = -ENOMEM;
+ goto out;
+}
@@ -350,10 +350,10 @@ diff -upkr linux-2.6.36/block/blk-map.c linux-2.6.36/block/blk-map.c
/**
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
-diff -upkr linux-2.6.36/include/linux/blkdev.h linux-2.6.36/include/linux/blkdev.h
---- linux-2.6.36/include/linux/blkdev.h 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/include/linux/blkdev.h 2010-10-26 12:00:15.899759399 +0400
-@@ -746,6 +748,9 @@ extern int blk_rq_map_kern(struct reques
+diff -upkr linux-2.6.39/include/linux/blkdev.h linux-2.6.39/include/linux/blkdev.h
+--- linux-2.6.39/include/linux/blkdev.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/include/linux/blkdev.h 2011-05-19 10:49:02.753812997 -0400
+@@ -707,6 +709,9 @@ extern int blk_rq_map_kern(struct reques
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, struct sg_iovec *, int,
unsigned int, gfp_t);
@@ -363,9 +363,9 @@ diff -upkr linux-2.6.36/include/linux/blkdev.h linux-2.6.36/include/linux/blkdev
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-diff -upkr linux-2.6.36/include/linux/scatterlist.h linux-2.6.36/include/linux/scatterlist.h
---- linux-2.6.36/include/linux/scatterlist.h 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/include/linux/scatterlist.h 2010-10-26 12:00:15.899759399 +0400
+diff -upkr linux-2.6.39/include/linux/scatterlist.h linux-2.6.39/include/linux/scatterlist.h
+--- linux-2.6.39/include/linux/scatterlist.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/include/linux/scatterlist.h 2011-05-19 10:49:02.753812997 -0400
@@ -3,6 +3,7 @@
#include <asm/types.h>
@@ -385,9 +385,9 @@ diff -upkr linux-2.6.36/include/linux/scatterlist.h linux-2.6.36/include/linux/s
/*
* Maximum number of entries that will be allocated in one piece, if
* a list larger than this is required then chaining will be utilized.
-diff -upkr linux-2.6.36/lib/scatterlist.c linux-2.6.36/lib/scatterlist.c
---- linux-2.6.36/lib/scatterlist.c 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/lib/scatterlist.c 2010-10-26 12:00:15.899759399 +0400
+diff -upkr linux-2.6.39/lib/scatterlist.c linux-2.6.39/lib/scatterlist.c
+--- linux-2.6.39/lib/scatterlist.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/lib/scatterlist.c 2011-05-19 10:49:02.753812997 -0400
@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
return sg_copy_buffer(sgl, nents, buf, buflen, 1);
}
@@ -521,10 +521,9 @@ diff -upkr linux-2.6.36/lib/scatterlist.c linux-2.6.36/lib/scatterlist.c
+ return res;
+}
+EXPORT_SYMBOL(sg_copy);
-
-diff -upkr linux-2.6.36/include/linux/mm_types.h linux-2.6.36/include/linux/mm_types.h
---- linux-2.6.36/include/linux/mm_types.h 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/include/linux/mm_types.h 2010-10-26 12:01:40.651752329 +0400
+diff -upkr linux-2.6.39/include/linux/mm_types.h linux-2.6.39/include/linux/mm_types.h
+--- linux-2.6.39/include/linux/mm_types.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/include/linux/mm_types.h 2011-05-19 10:46:24.669812999 -0400
@@ -100,6 +100,18 @@ struct page {
*/
void *shadow;
@@ -544,18 +543,18 @@ diff -upkr linux-2.6.36/include/linux/mm_types.h linux-2.6.36/include/linux/mm_t
};
/*
-diff -upkr linux-2.6.36/include/linux/net.h linux-2.6.36/include/linux/net.h
---- linux-2.6.36/include/linux/net.h 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/include/linux/net.h 2010-10-26 12:01:40.651752329 +0400
-@@ -20,6 +20,7 @@
-
- #include <linux/socket.h>
- #include <asm/socket.h>
+diff -upkr linux-2.6.39/include/linux/net.h linux-2.6.39/include/linux/net.h
+--- linux-2.6.39/include/linux/net.h 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/include/linux/net.h 2011-05-19 10:46:24.669812999 -0400
+@@ -60,6 +60,7 @@ typedef enum {
+ #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
+ #include <linux/kmemcheck.h>
+ #include <linux/rcupdate.h>
+#include <linux/mm.h>
- #define NPROTO AF_MAX
-
-@@ -291,5 +292,44 @@ extern int kernel_sock_shutdown(struct s
+ struct poll_table_struct;
+ struct pipe_inode_info;
+@@ -294,5 +295,44 @@ extern int kernel_sock_shutdown(struct s
extern struct ratelimit_state net_ratelimit_state;
#endif
@@ -600,10 +599,10 @@ diff -upkr linux-2.6.36/include/linux/net.h linux-2.6.36/include/linux/net.h
+
#endif /* __KERNEL__ */
#endif /* _LINUX_NET_H */
-diff -upkr linux-2.6.36/net/core/dev.c linux-2.6.36/net/core/dev.c
---- linux-2.6.36/net/core/dev.c 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/net/core/dev.c 2010-10-26 12:01:40.651752329 +0400
-@@ -3140,7 +3140,7 @@ pull:
+diff -upkr linux-2.6.39/net/core/dev.c linux-2.6.39/net/core/dev.c
+--- linux-2.6.39/net/core/dev.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/net/core/dev.c 2011-05-19 10:46:24.669812999 -0400
+@@ -3418,7 +3418,7 @@ pull:
skb_shinfo(skb)->frags[0].size -= grow;
if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
@@ -612,9 +611,9 @@ diff -upkr linux-2.6.36/net/core/dev.c linux-2.6.36/net/core/dev.c
memmove(skb_shinfo(skb)->frags,
skb_shinfo(skb)->frags + 1,
--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
-diff -upkr linux-2.6.36/net/core/skbuff.c linux-2.6.36/net/core/skbuff.c
---- linux-2.6.36/net/core/skbuff.c 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/net/core/skbuff.c 2010-10-26 12:01:40.655752708 +0400
+diff -upkr linux-2.6.39/net/core/skbuff.c linux-2.6.39/net/core/skbuff.c
+--- linux-2.6.39/net/core/skbuff.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/net/core/skbuff.c 2011-05-19 10:46:24.669812999 -0400
@@ -76,13 +76,13 @@ static struct kmem_cache *skbuff_fclone_
static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
@@ -631,7 +630,7 @@ diff -upkr linux-2.6.36/net/core/skbuff.c linux-2.6.36/net/core/skbuff.c
}
static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
-@@ -337,7 +337,7 @@ static void skb_release_data(struct sk_b
+@@ -325,7 +325,7 @@ static void skb_release_data(struct sk_b
if (skb_shinfo(skb)->nr_frags) {
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
@@ -639,8 +638,8 @@ diff -upkr linux-2.6.36/net/core/skbuff.c linux-2.6.36/net/core/skbuff.c
+ net_put_page(skb_shinfo(skb)->frags[i].page);
}
- if (skb_has_frags(skb))
-@@ -754,7 +754,7 @@ struct sk_buff *pskb_copy(struct sk_buff
+ if (skb_has_frag_list(skb))
+@@ -732,7 +732,7 @@ struct sk_buff *pskb_copy(struct sk_buff
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -649,15 +648,15 @@ diff -upkr linux-2.6.36/net/core/skbuff.c linux-2.6.36/net/core/skbuff.c
}
skb_shinfo(n)->nr_frags = i;
}
-@@ -820,7 +820,7 @@ int pskb_expand_head(struct sk_buff *skb
- offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-- get_page(skb_shinfo(skb)->frags[i].page);
-+ net_get_page(skb_shinfo(skb)->frags[i].page);
+@@ -819,7 +819,7 @@ int pskb_expand_head(struct sk_buff *skb
+ kfree(skb->head);
+ } else {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+- get_page(skb_shinfo(skb)->frags[i].page);
++ net_get_page(skb_shinfo(skb)->frags[i].page);
- if (skb_has_frags(skb))
- skb_clone_fraglist(skb);
+ if (skb_has_frag_list(skb))
+ skb_clone_fraglist(skb);
@@ -1097,7 +1097,7 @@ drop_pages:
skb_shinfo(skb)->nr_frags = i;
@@ -665,7 +664,7 @@ diff -upkr linux-2.6.36/net/core/skbuff.c linux-2.6.36/net/core/skbuff.c
- put_page(skb_shinfo(skb)->frags[i].page);
+ net_put_page(skb_shinfo(skb)->frags[i].page);
- if (skb_has_frags(skb))
+ if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
@@ -1266,7 +1266,7 @@ pull_pages:
k = 0;
@@ -739,7 +738,7 @@ diff -upkr linux-2.6.36/net/core/skbuff.c linux-2.6.36/net/core/skbuff.c
}
/* Reposition in the original skb */
-@@ -2601,7 +2601,7 @@ struct sk_buff *skb_segment(struct sk_bu
+@@ -2598,7 +2598,7 @@ struct sk_buff *skb_segment(struct sk_bu
while (pos < offset + len && i < nfrags) {
*frag = skb_shinfo(skb)->frags[i];
@@ -748,19 +747,19 @@ diff -upkr linux-2.6.36/net/core/skbuff.c linux-2.6.36/net/core/skbuff.c
size = frag->size;
if (pos < offset) {
-diff -upkr linux-2.6.36/net/ipv4/ip_output.c linux-2.6.36/net/ipv4/ip_output.c
---- linux-2.6.36/net/ipv4/ip_output.c 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/net/ipv4/ip_output.c 2010-10-26 12:01:40.655752708 +0400
-@@ -1040,7 +1040,7 @@ alloc_new_skb:
+diff -upkr linux-2.6.39/net/ipv4/ip_output.c linux-2.6.39/net/ipv4/ip_output.c
+--- linux-2.6.39/net/ipv4/ip_output.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/net/ipv4/ip_output.c 2011-05-19 10:47:39.565813000 -0400
+@@ -985,7 +985,7 @@ alloc_new_skb:
err = -EMSGSIZE;
goto error;
}
- get_page(page);
+ net_get_page(page);
- skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+ skb_fill_page_desc(skb, i, page, off, 0);
frag = &skb_shinfo(skb)->frags[i];
}
-@@ -1199,7 +1199,7 @@ ssize_t ip_append_page(struct sock *sk,
+@@ -1220,7 +1220,7 @@ ssize_t ip_append_page(struct sock *sk,
if (skb_can_coalesce(skb, i, page, offset)) {
skb_shinfo(skb)->frags[i-1].size += len;
} else if (i < MAX_SKB_FRAGS) {
@@ -769,10 +768,10 @@ diff -upkr linux-2.6.36/net/ipv4/ip_output.c linux-2.6.36/net/ipv4/ip_output.c
skb_fill_page_desc(skb, i, page, offset, len);
} else {
err = -EMSGSIZE;
-diff -upkr linux-2.6.36/net/ipv4/Makefile linux-2.6.36/net/ipv4/Makefile
---- linux-2.6.36/net/ipv4/Makefile 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/net/ipv4/Makefile 2010-10-26 12:01:40.655752708 +0400
-@@ -49,6 +49,7 @@ obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
+diff -upkr linux-2.6.39/net/ipv4/Makefile linux-2.6.39/net/ipv4/Makefile
+--- linux-2.6.39/net/ipv4/Makefile 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/net/ipv4/Makefile 2011-05-19 10:46:24.669812999 -0400
+@@ -48,6 +48,7 @@ obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
@@ -780,10 +779,10 @@ diff -upkr linux-2.6.36/net/ipv4/Makefile linux-2.6.36/net/ipv4/Makefile
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
xfrm4_output.o
-diff -upkr linux-2.6.36/net/ipv4/tcp.c linux-2.6.36/net/ipv4/tcp.c
---- linux-2.6.36/net/ipv4/tcp.c 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/net/ipv4/tcp.c 2010-10-26 12:01:40.659752056 +0400
-@@ -806,7 +806,7 @@ new_segment:
+diff -upkr linux-2.6.39/net/ipv4/tcp.c linux-2.6.39/net/ipv4/tcp.c
+--- linux-2.6.39/net/ipv4/tcp.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/net/ipv4/tcp.c 2011-05-19 10:46:24.673813002 -0400
+@@ -815,7 +815,7 @@ new_segment:
if (can_coalesce) {
skb_shinfo(skb)->frags[i - 1].size += copy;
} else {
@@ -792,7 +791,7 @@ diff -upkr linux-2.6.36/net/ipv4/tcp.c linux-2.6.36/net/ipv4/tcp.c
skb_fill_page_desc(skb, i, page, offset, copy);
}
-@@ -1015,7 +1015,7 @@ new_segment:
+@@ -1021,7 +1021,7 @@ new_segment:
goto new_segment;
} else if (page) {
if (off == PAGE_SIZE) {
@@ -801,7 +800,7 @@ diff -upkr linux-2.6.36/net/ipv4/tcp.c linux-2.6.36/net/ipv4/tcp.c
TCP_PAGE(sk) = page = NULL;
off = 0;
}
-@@ -1056,9 +1056,9 @@ new_segment:
+@@ -1062,9 +1062,9 @@ new_segment:
} else {
skb_fill_page_desc(skb, i, page, off, copy);
if (TCP_PAGE(sk)) {
@@ -813,10 +812,10 @@ diff -upkr linux-2.6.36/net/ipv4/tcp.c linux-2.6.36/net/ipv4/tcp.c
TCP_PAGE(sk) = page;
}
}
-diff -upkr linux-2.6.36/net/ipv4/tcp_output.c linux-2.6.36/net/ipv4/tcp_output.c
---- linux-2.6.36/net/ipv4/tcp_output.c 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/net/ipv4/tcp_output.c 2010-10-26 12:01:40.659752056 +0400
-@@ -1086,7 +1086,7 @@ static void __pskb_trim_head(struct sk_b
+diff -upkr linux-2.6.39/net/ipv4/tcp_output.c linux-2.6.39/net/ipv4/tcp_output.c
+--- linux-2.6.39/net/ipv4/tcp_output.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/net/ipv4/tcp_output.c 2011-05-19 10:46:24.673813002 -0400
+@@ -1095,7 +1095,7 @@ static void __pskb_trim_head(struct sk_b
k = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
if (skb_shinfo(skb)->frags[i].size <= eat) {
@@ -825,9 +824,9 @@ diff -upkr linux-2.6.36/net/ipv4/tcp_output.c linux-2.6.36/net/ipv4/tcp_output.c
eat -= skb_shinfo(skb)->frags[i].size;
} else {
skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
-diff -upkr linux-2.6.36/net/ipv4/tcp_zero_copy.c linux-2.6.36/net/ipv4/tcp_zero_copy.c
---- linux-2.6.36/net/ipv4/tcp_zero_copy.c 2010-10-26 12:02:24.519252006 +0400
-+++ linux-2.6.36/net/ipv4/tcp_zero_copy.c 2010-10-26 12:01:40.659752056 +0400
+diff -upkr linux-2.6.39/net/ipv4/tcp_zero_copy.c linux-2.6.39/net/ipv4/tcp_zero_copy.c
+--- linux-2.6.39/net/ipv4/tcp_zero_copy.c 2011-05-19 10:44:53.685813002 -0400
++++ linux-2.6.39/net/ipv4/tcp_zero_copy.c 2011-05-19 10:46:24.673813002 -0400
@@ -0,0 +1,49 @@
+/*
+ * Support routines for TCP zero copy transmit
@@ -838,7 +837,7 @@ diff -upkr linux-2.6.36/net/ipv4/tcp_zero_copy.c linux-2.6.36/net/ipv4/tcp_zero_
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
-+
++
+#include <linux/skbuff.h>
+
+net_get_page_callback_t net_get_page_callback __read_mostly;
@@ -878,10 +877,10 @@ diff -upkr linux-2.6.36/net/ipv4/tcp_zero_copy.c linux-2.6.36/net/ipv4/tcp_zero_
+ return res;
+}
+EXPORT_SYMBOL(net_set_get_put_page_callbacks);
-diff -upkr linux-2.6.36/net/ipv6/ip6_output.c linux-2.6.36/net/ipv6/ip6_output.c
---- linux-2.6.36/net/ipv6/ip6_output.c 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/net/ipv6/ip6_output.c 2010-10-26 12:01:40.659752056 +0400
-@@ -1391,7 +1391,7 @@ alloc_new_skb:
+diff -upkr linux-2.6.39/net/ipv6/ip6_output.c linux-2.6.39/net/ipv6/ip6_output.c
+--- linux-2.6.39/net/ipv6/ip6_output.c 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/net/ipv6/ip6_output.c 2011-05-19 10:46:24.673813002 -0400
+@@ -1444,7 +1444,7 @@ alloc_new_skb:
err = -EMSGSIZE;
goto error;
}
@@ -890,9 +889,9 @@ diff -upkr linux-2.6.36/net/ipv6/ip6_output.c linux-2.6.36/net/ipv6/ip6_output.c
skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
frag = &skb_shinfo(skb)->frags[i];
}
-diff -upkr linux-2.6.36/net/Kconfig linux-2.6.36/net/Kconfig
---- linux-2.6.36/net/Kconfig 2010-10-21 00:30:22.000000000 +0400
-+++ linux-2.6.36/net/Kconfig 2010-10-26 12:01:40.659752056 +0400
+diff -upkr linux-2.6.39/net/Kconfig linux-2.6.39/net/Kconfig
+--- linux-2.6.39/net/Kconfig 2011-05-19 00:06:34.000000000 -0400
++++ linux-2.6.39/net/Kconfig 2011-05-19 10:46:24.673813002 -0400
@@ -72,6 +72,18 @@ config INET
Short answer: say Y.
@@ -912,427 +911,310 @@ diff -upkr linux-2.6.36/net/Kconfig linux-2.6.36/net/Kconfig
if INET
source "net/ipv4/Kconfig"
source "net/ipv6/Kconfig"
-diff -uprN orig/linux-2.6.36/include/scst/scst_const.h linux-2.6.36/include/scst/scst_const.h
---- orig/linux-2.6.36/include/scst/scst_const.h
-+++ linux-2.6.36/include/scst/scst_const.h
-@@ -0,0 +1,413 @@
-+/*
-+ * include/scst_const.h
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * Contains common SCST constants.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef __SCST_CONST_H
-+#define __SCST_CONST_H
-+
-+#ifndef GENERATING_UPSTREAM_PATCH
-+/*
-+ * Include <linux/version.h> only when not converting this header file into
-+ * a patch for upstream review because only then the symbol LINUX_VERSION_CODE
-+ * is needed.
-+ */
-+#include <linux/version.h>
-+#endif
-+#include <scsi/scsi.h>
-+
-+#define SCST_CONST_VERSION "$Revision: 2605 $"
-+
-+/*** Shared constants between user and kernel spaces ***/
-+
-+/* Max size of CDB */
-+#define SCST_MAX_CDB_SIZE 16
-+
-+/* Max size of various names */
-+#define SCST_MAX_NAME 50
-+
-+/* Max size of external names, like initiator name */
-+#define SCST_MAX_EXTERNAL_NAME 256
-+
-+/*
-+ * Size of sense sufficient to carry standard sense data.
-+ * Warning! It's allocated on stack!
-+ */
-+#define SCST_STANDARD_SENSE_LEN 18
-+
-+/* Max size of sense */
-+#define SCST_SENSE_BUFFERSIZE 96
-+
-+/*************************************************************
-+ ** Allowed delivery statuses for cmd's delivery_status
-+ *************************************************************/
-+
-+#define SCST_CMD_DELIVERY_SUCCESS 0
-+#define SCST_CMD_DELIVERY_FAILED -1
-+#define SCST_CMD_DELIVERY_ABORTED -2
-+
-+/*************************************************************
-+ ** Values for task management functions
-+ *************************************************************/
-+#define SCST_ABORT_TASK 0
-+#define SCST_ABORT_TASK_SET 1
-+#define SCST_CLEAR_ACA 2
-+#define SCST_CLEAR_TASK_SET 3
-+#define SCST_LUN_RESET 4
-+#define SCST_TARGET_RESET 5
-+
-+/** SCST extensions **/
-+
-+/*
-+ * Notifies about I_T nexus loss event in the corresponding session.
-+ * Aborts all tasks there, resets the reservation, if any, and sets
-+ * up the I_T Nexus loss UA.
-+ */
-+#define SCST_NEXUS_LOSS_SESS 6
+diff --git a/drivers/Kconfig b/drivers/Kconfig
+index a2b902f..92e3d67 100644
+--- orig/linux-2.6.39/drivers/Kconfig
++++ linux-2.6.39/drivers/Kconfig
+@@ -22,6 +22,8 @@ source "drivers/ide/Kconfig"
+
+ source "drivers/scsi/Kconfig"
+
++source "drivers/scst/Kconfig"
+
-+/* Aborts all tasks in the corresponding session */
-+#define SCST_ABORT_ALL_TASKS_SESS 7
+ source "drivers/ata/Kconfig"
+
+ source "drivers/md/Kconfig"
+diff --git a/drivers/Makefile b/drivers/Makefile
+index b423bb1..f780114 100644
+--- orig/linux-2.6.39/drivers/Makefile
++++ linux-2.6.39/drivers/Makefile
+@@ -115,5 +115,6 @@ obj-$(CONFIG_VLYNQ) += vlynq/
+ obj-$(CONFIG_STAGING) += staging/
+ obj-y += platform/
+ obj-y += ieee802154/
++obj-$(CONFIG_SCST) += scst/
+ #common clk code
+ obj-y += clk/
+diff -uprN orig/linux-2.6.39/drivers/scst/Kconfig linux-2.6.39/drivers/scst/Kconfig
+--- orig/linux-2.6.39/drivers/scst/Kconfig
++++ linux-2.6.39/drivers/scst/Kconfig
+@@ -0,0 +1,255 @@
++menu "SCSI target (SCST) support"
+
-+/*
-+ * Notifies about I_T nexus loss event. Aborts all tasks in all sessions
-+ * of the tgt, resets the reservations, if any, and sets up the I_T Nexus
-+ * loss UA.
-+ */
-+#define SCST_NEXUS_LOSS 8
++config SCST
++ tristate "SCSI target (SCST) support"
++ depends on SCSI
++ help
++ SCSI target (SCST) is designed to provide unified, consistent
++ interface between SCSI target drivers and Linux kernel and
++ simplify target drivers development as much as possible. Visit
++ http://scst.sourceforge.net for more info about it.
+
-+/* Aborts all tasks in all sessions of the tgt */
-+#define SCST_ABORT_ALL_TASKS 9
++config SCST_DISK
++ tristate "SCSI target disk support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for disk device.
+
-+/*
-+ * Internal TM command issued by SCST in scst_unregister_session(). It is the
-+ * same as SCST_NEXUS_LOSS_SESS, except:
-+ * - it doesn't call task_mgmt_affected_cmds_done()
-+ * - it doesn't call task_mgmt_fn_done()
-+ * - it doesn't queue NEXUS LOSS UA.
-+ *
-+ * Target drivers must NEVER use it!!
-+ */
-+#define SCST_UNREG_SESS_TM 10
++config SCST_TAPE
++ tristate "SCSI target tape support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for tape device.
+
-+/*
-+ * Internal TM command issued by SCST in scst_pr_abort_reg(). It aborts all
-+ * tasks from mcmd->origin_pr_cmd->tgt_dev, except mcmd->origin_pr_cmd.
-+ * Additionally:
-+ * - it signals pr_aborting_cmpl completion when all affected
-+ * commands marked as aborted.
-+ * - it doesn't call task_mgmt_affected_cmds_done()
-+ * - it doesn't call task_mgmt_fn_done()
-+ * - it calls mcmd->origin_pr_cmd->scst_cmd_done() when all affected
-+ * commands aborted.
-+ *
-+ * Target drivers must NEVER use it!!
-+ */
-+#define SCST_PR_ABORT_ALL 11
++config SCST_CDROM
++ tristate "SCSI target CDROM support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for CDROM device.
+
-+/*************************************************************
-+ ** Values for mgmt cmd's status field. Codes taken from iSCSI
-+ *************************************************************/
-+#define SCST_MGMT_STATUS_SUCCESS 0
-+#define SCST_MGMT_STATUS_TASK_NOT_EXIST -1
-+#define SCST_MGMT_STATUS_LUN_NOT_EXIST -2
-+#define SCST_MGMT_STATUS_FN_NOT_SUPPORTED -5
-+#define SCST_MGMT_STATUS_REJECTED -255
-+#define SCST_MGMT_STATUS_FAILED -129
++config SCST_MODISK
++ tristate "SCSI target MO disk support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for MO disk device.
+
-+/*************************************************************
-+ ** SCSI task attribute queue types
-+ *************************************************************/
-+enum scst_cmd_queue_type {
-+ SCST_CMD_QUEUE_UNTAGGED = 0,
-+ SCST_CMD_QUEUE_SIMPLE,
-+ SCST_CMD_QUEUE_ORDERED,
-+ SCST_CMD_QUEUE_HEAD_OF_QUEUE,
-+ SCST_CMD_QUEUE_ACA
-+};
++config SCST_CHANGER
++ tristate "SCSI target changer support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for changer device.
+
-+/*************************************************************
-+ ** CDB flags
-+ **
-+ ** Implicit ordered used for commands which need calm environment
-+ ** without any simultaneous activities. For instance, for MODE
-+ ** SELECT it is needed to correctly generate its UA.
-+ *************************************************************/
-+enum scst_cdb_flags {
-+ SCST_TRANSFER_LEN_TYPE_FIXED = 0x0001,
-+ SCST_SMALL_TIMEOUT = 0x0002,
-+ SCST_LONG_TIMEOUT = 0x0004,
-+ SCST_UNKNOWN_LENGTH = 0x0008,
-+ SCST_INFO_VALID = 0x0010, /* must be single bit */
-+ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED = 0x0020,
-+ SCST_IMPLICIT_HQ = 0x0040,
-+ SCST_IMPLICIT_ORDERED = 0x0080, /* ToDo: remove it's nonsense */
-+ SCST_SKIP_UA = 0x0100,
-+ SCST_WRITE_MEDIUM = 0x0200,
-+ SCST_LOCAL_CMD = 0x0400,
-+ SCST_FULLY_LOCAL_CMD = 0x0800,
-+ SCST_REG_RESERVE_ALLOWED = 0x1000,
-+ SCST_WRITE_EXCL_ALLOWED = 0x2000,
-+ SCST_EXCL_ACCESS_ALLOWED = 0x4000,
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED = 0x8000,
-+#endif
-+};
++config SCST_PROCESSOR
++ tristate "SCSI target processor support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for processor device.
+
-+/*************************************************************
-+ ** Data direction aliases. Changing it don't forget to change
-+ ** scst_to_tgt_dma_dir as well!!
-+ *************************************************************/
-+#define SCST_DATA_UNKNOWN 0
-+#define SCST_DATA_WRITE 1
-+#define SCST_DATA_READ 2
-+#define SCST_DATA_BIDI (SCST_DATA_WRITE | SCST_DATA_READ)
-+#define SCST_DATA_NONE 4
++config SCST_RAID
++ tristate "SCSI target storage array controller (RAID) support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST pass-through device handler for raid storage array controller (RAID) device.
+
-+/*************************************************************
-+ ** Default suffix for targets with NULL names
-+ *************************************************************/
-+#define SCST_DEFAULT_TGT_NAME_SUFFIX "_target_"
++config SCST_VDISK
++ tristate "SCSI target virtual disk and/or CDROM support"
++ default SCST
++ depends on SCSI && SCST
++ help
++ SCST device handler for virtual disk and/or CDROM device.
+
-+/*************************************************************
-+ ** Sense manipulation and examination
-+ *************************************************************/
-+#define SCST_LOAD_SENSE(key_asc_ascq) key_asc_ascq
++config SCST_USER
++ tristate "User-space SCSI target driver support"
++ default SCST
++ depends on SCSI && SCST && !HIGHMEM4G && !HIGHMEM64G
++ help
++ The SCST device handler scst_user allows to implement full-feature
++ SCSI target devices in user space.
+
-+#define SCST_SENSE_VALID(sense) ((sense != NULL) && \
-+ ((((const uint8_t *)(sense))[0] & 0x70) == 0x70))
++ If unsure, say "N".
+
-+#define SCST_NO_SENSE(sense) ((sense != NULL) && \
-+ (((const uint8_t *)(sense))[2] == 0))
++config SCST_STRICT_SERIALIZING
++ bool "Strict serialization"
++ depends on SCST
++ help
++ Enable strict SCSI command serialization. When enabled, SCST sends
++ all SCSI commands to the underlying SCSI device synchronously, one
++ after one. This makes task management more reliable, at the cost of
++ a performance penalty. This is most useful for stateful SCSI devices
++ like tapes, where the result of the execution of a command
++ depends on the device settings configured by previous commands. Disk
++ and RAID devices are stateless in most cases. The current SCSI core
++ in Linux doesn't allow to abort all commands reliably if they have
++ been sent asynchronously to a stateful device.
++ Enable this option if you use stateful device(s) and need as much
++ error recovery reliability as possible.
+
-+/*************************************************************
-+ ** Sense data for the appropriate errors. Can be used with
-+ ** scst_set_cmd_error()
-+ *************************************************************/
-+#define scst_sense_no_sense NO_SENSE, 0x00, 0
-+#define scst_sense_hardw_error HARDWARE_ERROR, 0x44, 0
-+#define scst_sense_aborted_command ABORTED_COMMAND, 0x00, 0
-+#define scst_sense_invalid_opcode ILLEGAL_REQUEST, 0x20, 0
-+#define scst_sense_invalid_field_in_cdb ILLEGAL_REQUEST, 0x24, 0
-+#define scst_sense_invalid_field_in_parm_list ILLEGAL_REQUEST, 0x26, 0
-+#define scst_sense_parameter_value_invalid ILLEGAL_REQUEST, 0x26, 2
-+#define scst_sense_invalid_release ILLEGAL_REQUEST, 0x26, 4
-+#define scst_sense_parameter_list_length_invalid \
-+ ILLEGAL_REQUEST, 0x1A, 0
-+#define scst_sense_reset_UA UNIT_ATTENTION, 0x29, 0
-+#define scst_sense_nexus_loss_UA UNIT_ATTENTION, 0x29, 0x7
-+#define scst_sense_saving_params_unsup ILLEGAL_REQUEST, 0x39, 0
-+#define scst_sense_lun_not_supported ILLEGAL_REQUEST, 0x25, 0
-+#define scst_sense_data_protect DATA_PROTECT, 0x00, 0
-+#define scst_sense_miscompare_error MISCOMPARE, 0x1D, 0
-+#define scst_sense_block_out_range_error ILLEGAL_REQUEST, 0x21, 0
-+#define scst_sense_medium_changed_UA UNIT_ATTENTION, 0x28, 0
-+#define scst_sense_read_error MEDIUM_ERROR, 0x11, 0
-+#define scst_sense_write_error MEDIUM_ERROR, 0x03, 0
-+#define scst_sense_not_ready NOT_READY, 0x04, 0x10
-+#define scst_sense_invalid_message ILLEGAL_REQUEST, 0x49, 0
-+#define scst_sense_cleared_by_another_ini_UA UNIT_ATTENTION, 0x2F, 0
-+#define scst_sense_capacity_data_changed UNIT_ATTENTION, 0x2A, 0x9
-+#define scst_sense_reservation_preempted UNIT_ATTENTION, 0x2A, 0x03
-+#define scst_sense_reservation_released UNIT_ATTENTION, 0x2A, 0x04
-+#define scst_sense_registrations_preempted UNIT_ATTENTION, 0x2A, 0x05
-+#define scst_sense_reported_luns_data_changed UNIT_ATTENTION, 0x3F, 0xE
-+#define scst_sense_inquery_data_changed UNIT_ATTENTION, 0x3F, 0x3
++ If unsure, say "N".
+
-+/*************************************************************
-+ * SCSI opcodes not listed anywhere else
-+ *************************************************************/
-+#define REPORT_DEVICE_IDENTIFIER 0xA3
-+#define INIT_ELEMENT_STATUS 0x07
-+#define INIT_ELEMENT_STATUS_RANGE 0x37
-+#define PREVENT_ALLOW_MEDIUM 0x1E
-+#define READ_ATTRIBUTE 0x8C
-+#define REQUEST_VOLUME_ADDRESS 0xB5
-+#define WRITE_ATTRIBUTE 0x8D
-+#define WRITE_VERIFY_16 0x8E
-+#define VERIFY_6 0x13
-+#ifndef VERIFY_12
-+#define VERIFY_12 0xAF
-+#endif
-+#ifndef GENERATING_UPSTREAM_PATCH
-+/*
-+ * The constants below have been defined in the kernel header <scsi/scsi.h>
-+ * and hence are not needed when this header file is included in kernel code.
-+ * The definitions below are only used when this header file is included during
-+ * compilation of SCST's user space components.
-+ */
-+#ifndef READ_16
-+#define READ_16 0x88
-+#endif
-+#ifndef WRITE_16
-+#define WRITE_16 0x8a
-+#endif
-+#ifndef VERIFY_16
-+#define VERIFY_16 0x8f
-+#endif
-+#ifndef SERVICE_ACTION_IN
-+#define SERVICE_ACTION_IN 0x9e
-+#endif
-+#ifndef SAI_READ_CAPACITY_16
-+/* values for service action in */
-+#define SAI_READ_CAPACITY_16 0x10
-+#endif
-+#endif
-+#ifndef GENERATING_UPSTREAM_PATCH
-+#ifndef REPORT_LUNS
-+#define REPORT_LUNS 0xa0
-+#endif
-+#endif
++config SCST_STRICT_SECURITY
++ bool "Strict security"
++ depends on SCST
++ help
++ Makes SCST clear (zero-fill) allocated data buffers. Note: this has a
++ significant performance penalty.
+
-+/*************************************************************
-+ ** SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
-+ ** T10/1561-D Revision 4 Draft dated 7th November 2002.
-+ *************************************************************/
-+#define SAM_STAT_GOOD 0x00
-+#define SAM_STAT_CHECK_CONDITION 0x02
-+#define SAM_STAT_CONDITION_MET 0x04
-+#define SAM_STAT_BUSY 0x08
-+#define SAM_STAT_INTERMEDIATE 0x10
-+#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
-+#define SAM_STAT_RESERVATION_CONFLICT 0x18
-+#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
-+#define SAM_STAT_TASK_SET_FULL 0x28
-+#define SAM_STAT_ACA_ACTIVE 0x30
-+#define SAM_STAT_TASK_ABORTED 0x40
++ If unsure, say "N".
+
-+/*************************************************************
-+ ** Control byte field in CDB
-+ *************************************************************/
-+#define CONTROL_BYTE_LINK_BIT 0x01
-+#define CONTROL_BYTE_NACA_BIT 0x04
++config SCST_TEST_IO_IN_SIRQ
++ bool "Allow test I/O from soft-IRQ context"
++ depends on SCST
++ help
++ Allows SCST to submit selected SCSI commands (TUR and
++ READ/WRITE) from soft-IRQ context (tasklets). Enabling it will
++ decrease amount of context switches and slightly improve
++ performance. The goal of this option is to be able to measure
++ overhead of the context switches. See more info about it in
++ README.scst.
+
-+/*************************************************************
-+ ** Byte 1 in INQUIRY CDB
-+ *************************************************************/
-+#define SCST_INQ_EVPD 0x01
++ WARNING! Improperly used, this option can lead you to a kernel crash!
+
-+/*************************************************************
-+ ** Byte 3 in Standard INQUIRY data
-+ *************************************************************/
-+#define SCST_INQ_BYTE3 3
++ If unsure, say "N".
+
-+#define SCST_INQ_NORMACA_BIT 0x20
++config SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
++ bool "Send back UNKNOWN TASK when an already finished task is aborted"
++ depends on SCST
++ help
++ Controls which response is sent by SCST to the initiator in case
++ the initiator attempts to abort (ABORT TASK) an already finished
++ request. If this option is enabled, the response UNKNOWN TASK is
++ sent back to the initiator. However, some initiators, particularly
++ the VMware iSCSI initiator, interpret the UNKNOWN TASK response as
++ if the target got crazy and try to RESET it. Then sometimes the
++ initiator gets crazy itself.
+
-+/*************************************************************
-+ ** Byte 2 in RESERVE_10 CDB
-+ *************************************************************/
-+#define SCST_RES_3RDPTY 0x10
-+#define SCST_RES_LONGID 0x02
++ If unsure, say "N".
+
-+/*************************************************************
-+ ** Values for the control mode page TST field
-+ *************************************************************/
-+#define SCST_CONTR_MODE_ONE_TASK_SET 0
-+#define SCST_CONTR_MODE_SEP_TASK_SETS 1
++config SCST_USE_EXPECTED_VALUES
++ bool "Prefer initiator-supplied SCSI command attributes"
++ depends on SCST
++ help
++ When SCST receives a SCSI command from an initiator, such a SCSI
++ command has both data transfer length and direction attributes.
++ There are two possible sources for these attributes: either the
++ values computed by SCST from its internal command translation table
++ or the values supplied by the initiator. The former are used by
++ default because of security reasons. Invalid initiator-supplied
++ attributes can crash the target, especially in pass-through mode.
++ Only consider enabling this option when SCST logs the following
++ message: "Unknown opcode XX for YY. Should you update
++ scst_scsi_op_table?" and when the initiator complains. Please
++ report any unrecognized commands to scst-devel@lists.sourceforge.net.
+
-+/*******************************************************************
-+ ** Values for the control mode page QUEUE ALGORITHM MODIFIER field
-+ *******************************************************************/
-+#define SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER 0
-+#define SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER 1
++ If unsure, say "N".
+
-+/*************************************************************
-+ ** Values for the control mode page D_SENSE field
-+ *************************************************************/
-+#define SCST_CONTR_MODE_FIXED_SENSE 0
-+#define SCST_CONTR_MODE_DESCR_SENSE 1
++config SCST_EXTRACHECKS
++ bool "Extra consistency checks"
++ depends on SCST
++ help
++ Enable additional consistency checks in the SCSI middle level target
++ code. This may be helpful for SCST developers. Enable it if you have
++ any problems.
+
-+/*************************************************************
-+ ** TransportID protocol identifiers
-+ *************************************************************/
++ If unsure, say "N".
+
-+#define SCSI_TRANSPORTID_PROTOCOLID_FCP2 0
-+#define SCSI_TRANSPORTID_PROTOCOLID_SPI5 1
-+#define SCSI_TRANSPORTID_PROTOCOLID_SRP 4
-+#define SCSI_TRANSPORTID_PROTOCOLID_ISCSI 5
-+#define SCSI_TRANSPORTID_PROTOCOLID_SAS 6
++config SCST_TRACING
++ bool "Tracing support"
++ depends on SCST
++ default y
++ help
++ Enable SCSI middle level tracing support. Tracing can be controlled
++ dynamically via sysfs interface. The traced information
++ is sent to the kernel log and may be very helpful when analyzing
++ the cause of a communication problem between initiator and target.
+
-+/*************************************************************
-+ ** Misc SCSI constants
-+ *************************************************************/
-+#define SCST_SENSE_ASC_UA_RESET 0x29
-+#define BYTCHK 0x02
-+#define POSITION_LEN_SHORT 20
-+#define POSITION_LEN_LONG 32
++ If unsure, say "Y".
+
-+/*************************************************************
-+ ** Various timeouts
-+ *************************************************************/
-+#define SCST_DEFAULT_TIMEOUT (60 * HZ)
++config SCST_DEBUG
++ bool "Debugging support"
++ depends on SCST
++ select DEBUG_BUGVERBOSE
++ help
++ Enables support for debugging SCST. This may be helpful for SCST
++ developers.
+
-+#define SCST_GENERIC_CHANGER_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_CHANGER_LONG_TIMEOUT (14000 * HZ)
++ If unsure, say "N".
+
-+#define SCST_GENERIC_PROCESSOR_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_PROCESSOR_LONG_TIMEOUT (14000 * HZ)
++config SCST_DEBUG_OOM
++ bool "Out-of-memory debugging support"
++ depends on SCST
++ help
++ Let SCST's internal memory allocation function
++ (scst_alloc_sg_entries()) fail about once in every 10000 calls, at
++ least if the flag __GFP_NOFAIL has not been set. This allows SCST
++ developers to test the behavior of SCST in out-of-memory conditions.
++ This may be helpful for SCST developers.
+
-+#define SCST_GENERIC_TAPE_SMALL_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_TAPE_REG_TIMEOUT (900 * HZ)
-+#define SCST_GENERIC_TAPE_LONG_TIMEOUT (14000 * HZ)
++ If unsure, say "N".
+
-+#define SCST_GENERIC_MODISK_SMALL_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_MODISK_REG_TIMEOUT (900 * HZ)
-+#define SCST_GENERIC_MODISK_LONG_TIMEOUT (14000 * HZ)
++config SCST_DEBUG_RETRY
++ bool "SCSI command retry debugging support"
++ depends on SCST
++ help
++ Let SCST's internal SCSI command transfer function
++ (scst_rdy_to_xfer()) fail about once in every 100 calls. This allows
++ SCST developers to test the behavior of SCST when SCSI queues fill
++ up. This may be helpful for SCST developers.
+
-+#define SCST_GENERIC_DISK_SMALL_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_DISK_REG_TIMEOUT (60 * HZ)
-+#define SCST_GENERIC_DISK_LONG_TIMEOUT (3600 * HZ)
++ If unsure, say "N".
+
-+#define SCST_GENERIC_RAID_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_RAID_LONG_TIMEOUT (14000 * HZ)
++config SCST_DEBUG_SN
++ bool "SCSI sequence number debugging support"
++ depends on SCST
++ help
++ Allows to test SCSI command ordering via sequence numbers by
++ randomly changing the type of SCSI commands into
++ SCST_CMD_QUEUE_ORDERED, SCST_CMD_QUEUE_HEAD_OF_QUEUE or
++ SCST_CMD_QUEUE_SIMPLE for about one in 300 SCSI commands.
++ This may be helpful for SCST developers.
+
-+#define SCST_GENERIC_CDROM_SMALL_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_CDROM_REG_TIMEOUT (900 * HZ)
-+#define SCST_GENERIC_CDROM_LONG_TIMEOUT (14000 * HZ)
++ If unsure, say "N".
+
-+#define SCST_MAX_OTHER_TIMEOUT (14000 * HZ)
++config SCST_DEBUG_TM
++ bool "Task management debugging support"
++ depends on SCST_DEBUG
++ help
++ Enables support for debugging of SCST's task management functions.
++ When enabled, some of the commands on LUN 0 in the default access
++ control group will be delayed for about 60 seconds. This will
++ cause the remote initiator send SCSI task management functions,
++ e.g. ABORT TASK and TARGET RESET.
+
-+/*************************************************************
-+ ** I/O grouping attribute string values. Must match constants
-+ ** w/o '_STR' suffix!
-+ *************************************************************/
-+#define SCST_IO_GROUPING_AUTO_STR "auto"
-+#define SCST_IO_GROUPING_THIS_GROUP_ONLY_STR "this_group_only"
-+#define SCST_IO_GROUPING_NEVER_STR "never"
++ If unsure, say "N".
+
-+/*************************************************************
-+ ** Threads pool type attribute string values.
-+ ** Must match scst_dev_type_threads_pool_type!
-+ *************************************************************/
-+#define SCST_THREADS_POOL_PER_INITIATOR_STR "per_initiator"
-+#define SCST_THREADS_POOL_SHARED_STR "shared"
++config SCST_TM_DBG_GO_OFFLINE
++ bool "Let devices become completely unresponsive"
++ depends on SCST_DEBUG_TM
++ help
++ Enable this option if you want that the device eventually becomes
++ completely unresponsive. When disabled, the device will receive
++ ABORT and RESET commands.
+
-+/*************************************************************
-+ ** Misc constants
-+ *************************************************************/
-+#define SCST_SYSFS_BLOCK_SIZE PAGE_SIZE
++config SCST_MEASURE_LATENCY
++ bool "Commands processing latency measurement facility"
++ depends on SCST
++ help
++ This option enables commands processing latency measurement
++ facility in SCST. It will provide in the sysfs interface
++ average commands processing latency statistics. You can clear
++ already measured results by writing 0 in the corresponding sysfs file.
++ Note, you need a non-preemtible kernel to have correct results.
+
-+#define SCST_PR_DIR "/var/lib/scst/pr"
++ If unsure, say "N".
+
-+#define TID_COMMON_SIZE 24
++source "drivers/scst/iscsi-scst/Kconfig"
++source "drivers/scst/scst_local/Kconfig"
++source "drivers/scst/srpt/Kconfig"
+
-+#define SCST_SYSFS_KEY_MARK "[key]"
++endmenu
+diff -uprN orig/linux-2.6.39/drivers/scst/Makefile linux-2.6.39/drivers/scst/Makefile
+--- orig/linux-2.6.39/drivers/scst/Makefile
++++ linux-2.6.39/drivers/scst/Makefile
+@@ -0,0 +1,13 @@
++ccflags-y += -Wno-unused-parameter
+
-+#define SCST_MIN_REL_TGT_ID 1
-+#define SCST_MAX_REL_TGT_ID 65535
++scst-y += scst_main.o
++scst-y += scst_pres.o
++scst-y += scst_targ.o
++scst-y += scst_lib.o
++scst-y += scst_sysfs.o
++scst-y += scst_mem.o
++scst-y += scst_tg.o
++scst-y += scst_debug.o
+
-+#endif /* __SCST_CONST_H */
-diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.h
---- orig/linux-2.6.36/include/scst/scst.h
-+++ linux-2.6.36/include/scst/scst.h
-@@ -0,0 +1,3524 @@
++obj-$(CONFIG_SCST) += scst.o dev_handlers/ iscsi-scst/ qla2xxx-target/ \
++ srpt/ scst_local/
+diff -uprN orig/linux-2.6.39/include/scst/scst.h linux-2.6.39/include/scst/scst.h
+--- orig/linux-2.6.39/include/scst/scst.h
++++ linux-2.6.39/include/scst/scst.h
+@@ -0,0 +1,3868 @@
+/*
+ * include/scst.h
+ *
@@ -1340,6 +1222,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
+ * Copyright (C) 2010 - 2011 SCST Ltd.
++ * Copyright (C) 2010 - 2011 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * Main SCSI target mid-level include file.
+ *
@@ -1362,8 +1245,8 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
++#include <linux/cpumask.h>
+
-+/* #define CONFIG_SCST_PROC */
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
@@ -1374,18 +1257,8 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+#include <scst/scst_sgv.h>
+
-+/*
-+ * Version numbers, the same as for the kernel.
-+ *
-+ * Changing it don't forget to change SCST_FIO_REV in scst_vdisk.c
-+ * and FIO_REV in usr/fileio/common.h as well.
-+ */
-+#define SCST_VERSION(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + d)
-+#define SCST_VERSION_CODE SCST_VERSION(2, 0, 0, 1)
-+#define SCST_VERSION_STRING_SUFFIX
-+#define SCST_VERSION_STRING "2.0.0.1" SCST_VERSION_STRING_SUFFIX
+#define SCST_INTERFACE_VERSION \
-+ SCST_VERSION_STRING "$Revision: 3165 $" SCST_CONST_VERSION
++ SCST_VERSION_STRING "$Revision: 3836 $" SCST_CONST_VERSION
+
+#define SCST_LOCAL_NAME "scst_local"
+
@@ -1414,32 +1287,26 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+/* Cmd is going to be sent for execution */
+#define SCST_CMD_STATE_SEND_FOR_EXEC 5
+
-+/* Cmd is being checked if it should be executed locally */
-+#define SCST_CMD_STATE_LOCAL_EXEC 6
-+
-+/* Cmd is ready for execution */
-+#define SCST_CMD_STATE_REAL_EXEC 7
-+
+/* Internal post-exec checks */
-+#define SCST_CMD_STATE_PRE_DEV_DONE 8
++#define SCST_CMD_STATE_PRE_DEV_DONE 6
+
+/* Internal MODE SELECT pages related checks */
-+#define SCST_CMD_STATE_MODE_SELECT_CHECKS 9
++#define SCST_CMD_STATE_MODE_SELECT_CHECKS 7
+
+/* Dev handler's dev_done() is going to be called */
-+#define SCST_CMD_STATE_DEV_DONE 10
++#define SCST_CMD_STATE_DEV_DONE 8
+
-+/* Target driver's xmit_response() is going to be called */
-+#define SCST_CMD_STATE_PRE_XMIT_RESP 11
++/* Checks before target driver's xmit_response() is called */
++#define SCST_CMD_STATE_PRE_XMIT_RESP 9
+
+/* Target driver's xmit_response() is going to be called */
-+#define SCST_CMD_STATE_XMIT_RESP 12
++#define SCST_CMD_STATE_XMIT_RESP 10
+
+/* Cmd finished */
-+#define SCST_CMD_STATE_FINISHED 13
++#define SCST_CMD_STATE_FINISHED 11
+
+/* Internal cmd finished */
-+#define SCST_CMD_STATE_FINISHED_INTERNAL 14
++#define SCST_CMD_STATE_FINISHED_INTERNAL 12
+
+#define SCST_CMD_STATE_LAST_ACTIVE (SCST_CMD_STATE_FINISHED_INTERNAL+100)
+
@@ -1455,20 +1322,32 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+/* Waiting for data from the initiator (until scst_rx_data() called) */
+#define SCST_CMD_STATE_DATA_WAIT (SCST_CMD_STATE_LAST_ACTIVE+4)
+
++/*
++ * Cmd is ready for exec (after check if its device is blocked or should
++ * be blocked)
++ */
++#define SCST_CMD_STATE_START_EXEC (SCST_CMD_STATE_LAST_ACTIVE+5)
++
++/* Cmd is being checked if it should be executed locally */
++#define SCST_CMD_STATE_LOCAL_EXEC (SCST_CMD_STATE_LAST_ACTIVE+6)
++
++/* Cmd is ready for execution */
++#define SCST_CMD_STATE_REAL_EXEC (SCST_CMD_STATE_LAST_ACTIVE+7)
++
+/* Waiting for CDB's execution finish */
-+#define SCST_CMD_STATE_REAL_EXECUTING (SCST_CMD_STATE_LAST_ACTIVE+5)
++#define SCST_CMD_STATE_REAL_EXECUTING (SCST_CMD_STATE_LAST_ACTIVE+8)
+
+/* Waiting for response's transmission finish */
-+#define SCST_CMD_STATE_XMIT_WAIT (SCST_CMD_STATE_LAST_ACTIVE+6)
++#define SCST_CMD_STATE_XMIT_WAIT (SCST_CMD_STATE_LAST_ACTIVE+9)
+
+/*************************************************************
-+ * Can be retuned instead of cmd's state by dev handlers'
++ * Can be returned instead of cmd's state by dev handlers'
+ * functions, if the command's state should be set by default
+ *************************************************************/
+#define SCST_CMD_STATE_DEFAULT 500
+
+/*************************************************************
-+ * Can be retuned instead of cmd's state by dev handlers'
++ * Can be returned instead of cmd's state by dev handlers'
+ * functions, if it is impossible to complete requested
+ * task in atomic context. The cmd will be restarted in thread
+ * context.
@@ -1476,7 +1355,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+#define SCST_CMD_STATE_NEED_THREAD_CTX 1000
+
+/*************************************************************
-+ * Can be retuned instead of cmd's state by dev handlers'
++ * Can be returned instead of cmd's state by dev handlers'
+ * parse function, if the cmd processing should be stopped
+ * for now. The cmd will be restarted by dev handlers itself.
+ *************************************************************/
@@ -1614,6 +1493,11 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ */
+#define SCST_AEN_SCSI 0
+
++/*
++ * Notifies that CPU affinity mask on the corresponding session changed
++ */
++#define SCST_AEN_CPU_MASK_CHANGED 1
++
+/*************************************************************
+ ** Allowed return/status codes for report_aen() callback and
+ ** scst_set_aen_delivery_status() function
@@ -1652,7 +1536,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+#define SCST_TGT_RES_FATAL_ERROR -3
+
+/*************************************************************
-+ ** Allowed return codes for dev handler's exec()
++ ** Return codes for dev handler's exec()
+ *************************************************************/
+
+/* The cmd is done, go to other ones */
@@ -1661,15 +1545,6 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+/* The cmd should be sent to SCSI mid-level */
+#define SCST_EXEC_NOT_COMPLETED 1
+
-+/*
-+ * Set if cmd is finished and there is status/sense to be sent.
-+ * The status should be not sent (i.e. the flag not set) if the
-+ * possibility to perform a command in "chunks" (i.e. with multiple
-+ * xmit_response()/rdy_to_xfer()) is used (not implemented yet).
-+ * Obsolete, use scst_cmd_get_is_send_status() instead.
-+ */
-+#define SCST_TSC_FLAG_STATUS 0x2
-+
+/*************************************************************
+ ** Additional return code for dev handler's task_mgmt_fn()
+ *************************************************************/
@@ -1745,14 +1620,14 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+/* Set if tgt_dev is RESERVED by another session */
+#define SCST_TGT_DEV_RESERVED 1
+
-+/* Set if the corresponding context is atomic */
++/* Set if the corresponding context should be atomic */
+#define SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC 5
+#define SCST_TGT_DEV_AFTER_EXEC_ATOMIC 6
+
+#define SCST_TGT_DEV_CLUST_POOL 11
+
+/*************************************************************
-+ ** I/O groupping types. Changing them don't forget to change
++ ** I/O grouping types. Changing them don't forget to change
+ ** the corresponding *_STR values in scst_const.h!
+ *************************************************************/
+
@@ -1772,14 +1647,9 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+/*************************************************************
+ ** Kernel cache creation helper
+ *************************************************************/
-+#ifndef KMEM_CACHE
-+#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
-+ sizeof(struct __struct), __alignof__(struct __struct),\
-+ (__flags), NULL, NULL)
-+#endif
+
+/*************************************************************
-+ ** Vlaid_mask constants for scst_analyze_sense()
++ ** Valid_mask constants for scst_analyze_sense()
+ *************************************************************/
+
+#define SCST_SENSE_KEY_VALID 1
@@ -1860,6 +1730,18 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ unsigned enabled_attr_not_needed:1;
+
+ /*
++ * True if SCST should report that it supports ACA although it does
++ * not yet support ACA. Necessary for the IBM virtual SCSI target
++ * driver.
++ */
++ unsigned fake_aca:1;
++
++ /*
++ * Preferred SCSI LUN addressing method.
++ */
++ enum scst_lun_addr_method preferred_addr_method;
++
++ /*
+ * The maximum time in seconds cmd can stay inside the target
+ * hardware, i.e. after rdy_to_xfer() and xmit_response(), before
+ * on_hw_pending_cmd_timeout() will be called, if defined.
@@ -1942,7 +1824,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ * double buffer allocation and memory leaks alloc_data_buf() shall
+ * fail.
+ *
-+ * Shall return 0 in case of success or < 0 (preferrably -ENOMEM)
++ * Shall return 0 in case of success or < 0 (preferably -ENOMEM)
+ * in case of error, or > 0 if the regular SCST allocation should be
+ * done. In case of returning successfully,
+ * scst_cmd->tgt_data_buf_alloced will be set by SCST.
@@ -2030,6 +1912,15 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ void (*task_mgmt_fn_done) (struct scst_mgmt_cmd *mgmt_cmd);
+
+ /*
++ * Called to notify target driver that the command is being aborted.
++ * If target driver wants to redirect processing to some outside
++ * processing, it should get it using scst_cmd_get().
++ *
++ * OPTIONAL
++ */
++ void (*on_abort_cmd) (struct scst_cmd *cmd);
++
++ /*
+ * This function should detect the target adapters that
+ * are present in the system. The function should return a value
+ * >= 0 to signify the number of detected target adapters.
@@ -2071,20 +1962,20 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+ /*
+ * This function returns in tr_id the corresponding to sess initiator
-+ * port TransporID in the form as it's used by PR commands, see
++ * port TransportID in the form as it's used by PR commands, see
+ * "Transport Identifiers" in SPC. Space for the initiator port
-+ * TransporID must be allocated via kmalloc(). Caller supposed to
++ * TransportID must be allocated via kmalloc(). Caller supposed to
+ * kfree() it, when it isn't needed anymore.
+ *
+ * If sess is NULL, this function must return TransportID PROTOCOL
-+ * IDENTIFIER of this transport.
++ * IDENTIFIER for the requested target.
+ *
+ * Returns 0 on success or negative error code otherwise.
+ *
+ * SHOULD HAVE, because it's required for Persistent Reservations.
+ */
-+ int (*get_initiator_port_transport_id) (struct scst_session *sess,
-+ uint8_t **transport_id);
++ int (*get_initiator_port_transport_id) (struct scst_tgt *tgt,
++ struct scst_session *sess, uint8_t **transport_id);
+
+ /*
+ * This function allows to enable or disable particular target.
@@ -2229,8 +2120,51 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ int tgtt_active_sysfs_works_count;
+
+ /* sysfs release completion */
-+ struct completion tgtt_kobj_release_cmpl;
++ struct completion *tgtt_kobj_release_cmpl;
++
++ /*
++ * Optional vendor to be reported via the SCSI inquiry data. If NULL,
++ * an SCST device handler specific default value will be used, e.g.
++ * "SCST_FIO" for scst_vdisk file I/O.
++ */
++ const char *vendor;
++
++ /*
++ * Optional method that sets the product ID in [buf, buf+size) based
++ * on the device type (byte 0 of the SCSI inquiry data, which contains
++ * the peripheral qualifier in the highest three bits and the
++ * peripheral device type in the lower five bits).
++ */
++ void (*get_product_id)(const struct scst_tgt_dev *tgt_dev,
++ char *buf, int size);
++
++ /*
++ * Optional revision to be reported in the SCSI inquiry response. If
++ * NULL, an SCST device handler specific default value will be used,
++ * e.g. " 210" for scst_vdisk file I/O.
++ */
++ const char *revision;
++
++ /*
++ * Optional method that writes the serial number of a target device in
++ * [buf, buf+size) and returns the number of bytes written.
++ *
++ * Note: SCST can be configured such that a device can be accessed
++ * from several different transports at the same time. It is important
++ * that all clients see the same USN for proper operation. Overriding
++ * the serial number can lead to subtle misbehavior. Particularly,
++ * "usn" sysfs attribute of the corresponding devices will still show
++ * the devices generated or assigned serial numbers.
++ */
++ int (*get_serial)(const struct scst_tgt_dev *tgt_dev, char *buf,
++ int size);
+
++ /*
++ * Optional method that writes the SCSI inquiry vendor-specific data in
++ * [buf, buf+size) and returns the number of bytes written.
++ */
++ int (*get_vend_specific)(const struct scst_tgt_dev *tgt_dev, char *buf,
++ int size);
+};
+
+/*
@@ -2356,12 +2290,17 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ * Pay attention to "atomic" attribute of the cmd, which can be get
+ * by scst_cmd_atomic(): it is true if the function called in the
+ * atomic (non-sleeping) context.
++ *
++ * OPTIONAL
+ */
+ int (*dev_done) (struct scst_cmd *cmd);
+
+ /*
+ * Called to notify dev hander that the command is about to be freed.
++ *
+ * Could be called on IRQ context.
++ *
++ * OPTIONAL
+ */
+ void (*on_free_cmd) (struct scst_cmd *cmd);
+
@@ -2369,33 +2308,65 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ * Called to execute a task management command.
+ * Returns:
+ * - SCST_MGMT_STATUS_SUCCESS - the command is done with success,
-+ * no firther actions required
++ * no further actions required
+ * - The SCST_MGMT_STATUS_* error code if the command is failed and
+ * no further actions required
+ * - SCST_DEV_TM_NOT_COMPLETED - regular standard actions for the
+ * command should be done
+ *
-+ * Called without any locks held from a thread context.
++ * Can be called under many internal SCST locks, including under
++ * disabled IRQs, so dev handler should be careful with locking and,
++ * if necessary, pass processing somewhere outside (in a work, e.g.)
++ *
++ * But at the moment it's called under disabled IRQs only for
++ * SCST_ABORT_TASK, however dev handler using it should add a BUG_ON
++ * trap to catch if it's changed in future.
++ *
++ * OPTIONAL
+ */
+ int (*task_mgmt_fn) (struct scst_mgmt_cmd *mgmt_cmd,
+ struct scst_tgt_dev *tgt_dev);
+
+ /*
++ * Called to notify dev handler that its sg_tablesize is too low to
++ * satisfy this command's data transfer requirements. Should return
++ * true if exec() callback will split this command's CDB on smaller
++ * transfers, false otherwise.
++ *
++ * Could be called on SIRQ context.
++ *
++ * MUST HAVE, if dev handler supports CDB splitting.
++ */
++ bool (*on_sg_tablesize_low) (struct scst_cmd *cmd);
++
++ /*
+ * Called when new device is attaching to the dev handler
+ * Returns 0 on success, error code otherwise.
++ *
++ * OPTIONAL
+ */
+ int (*attach) (struct scst_device *dev);
+
-+ /* Called when a device is detaching from the dev handler */
++ /*
++ * Called when a device is detaching from the dev handler.
++ *
++ * OPTIONAL
++ */
+ void (*detach) (struct scst_device *dev);
+
+ /*
+ * Called when new tgt_dev (session) is attaching to the dev handler.
+ * Returns 0 on success, error code otherwise.
++ *
++ * OPTIONAL
+ */
+ int (*attach_tgt) (struct scst_tgt_dev *tgt_dev);
+
-+ /* Called when tgt_dev (session) is detaching from the dev handler */
++ /*
++ * Called when tgt_dev (session) is detaching from the dev handler.
++ *
++ * OPTIONAL
++ */
+ void (*detach_tgt) (struct scst_tgt_dev *tgt_dev);
+
+ /*
@@ -2509,7 +2480,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ int devt_active_sysfs_works_count;
+
+ /* To wait until devt_kobj released */
-+ struct completion devt_kobj_release_compl;
++ struct completion *devt_kobj_release_compl;
+};
+
+/*
@@ -2556,10 +2527,13 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ /* Name of the target */
+ char *tgt_name;
+
++ /* User comment to it to let easier distinguish targets */
++ char *tgt_comment;
++
+ uint16_t rel_tgt_id;
+
+ /* sysfs release completion */
-+ struct completion tgt_kobj_release_cmpl;
++ struct completion *tgt_kobj_release_cmpl;
+
+ struct kobject tgt_kobj; /* main targets/target kobject */
+ struct kobject *tgt_sess_kobj; /* target/sessions/ */
@@ -2567,11 +2541,6 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ struct kobject *tgt_ini_grp_kobj; /* target/ini_groups/ */
+};
+
-+/* Hash size and hash fn for hash based lun translation */
-+#define TGT_DEV_HASH_SHIFT 5
-+#define TGT_DEV_HASH_SIZE (1 << TGT_DEV_HASH_SHIFT)
-+#define HASH_VAL(_val) (_val & (TGT_DEV_HASH_SIZE - 1))
-+
+#ifdef CONFIG_SCST_MEASURE_LATENCY
+
+/* Defines extended latency statistics */
@@ -2601,6 +2570,11 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+#endif /* CONFIG_SCST_MEASURE_LATENCY */
+
++struct scst_io_stat_entry {
++ uint64_t cmd_count;
++ uint64_t io_byte_count;
++};
++
+/*
+ * SCST session, analog of SCSI I_T nexus
+ */
@@ -2620,10 +2594,13 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ unsigned long sess_aflags;
+
+ /*
-+ * Hash list of tgt_dev's for this session, protected by scst_mutex
-+ * and suspended activity
++ * Hash list for tgt_dev's for this session with size and fn. It isn't
++ * hlist_entry, because we need ability to go over the list in the
++ * reverse order. Protected by scst_mutex and suspended activity.
+ */
-+ struct list_head sess_tgt_dev_list_hash[TGT_DEV_HASH_SIZE];
++#define SESS_TGT_DEV_LIST_HASH_SIZE (1 << 5)
++#define SESS_TGT_DEV_LIST_HASH_FN(val) ((val) & (SESS_TGT_DEV_LIST_HASH_SIZE - 1))
++ struct list_head sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_SIZE];
+
+ /*
+ * List of cmds in this session. Protected by sess_list_lock.
@@ -2644,6 +2621,9 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ */
+ atomic_t sess_cmd_count;
+
++ /* Some statistics. Protected by sess_list_lock. */
++ struct scst_io_stat_entry io_stats[SCST_DATA_DIR_MAX];
++
+ /* Access control for this session and list entry there */
+ struct scst_acg *acg;
+
@@ -2687,7 +2667,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ struct completion *shutdown_compl;
+
+ /* sysfs release completion */
-+ struct completion sess_kobj_release_cmpl;
++ struct completion *sess_kobj_release_cmpl;
+
+ unsigned int sess_kobj_ready:1;
+
@@ -2726,7 +2706,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ */
+ atomic_t pr_abort_pending_cnt;
+
-+ /* Saved completition routine */
++ /* Saved completion routine */
+ void (*saved_cmd_done) (struct scst_cmd *cmd, int next_state,
+ enum scst_exec_context pref_context);
+
@@ -2762,6 +2742,34 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+};
+
+/*
++ * Used to execute cmd's in order of arrival, honoring SCSI task attributes
++ */
++struct scst_order_data {
++ /*
++ * Protected by sn_lock, except expected_sn, which is protected by
++ * itself. Curr_sn must have the same size as expected_sn to
++ * overflow simultaneously.
++ */
++ int def_cmd_count;
++ spinlock_t sn_lock;
++ unsigned int expected_sn;
++ unsigned int curr_sn;
++ int hq_cmd_count;
++ struct list_head deferred_cmd_list;
++ struct list_head skipped_sn_list;
++
++ /*
++ * Set if the prev cmd was ORDERED. Size and, hence, alignment must
++ * allow unprotected modifications independently to the neighbour fields.
++ */
++ unsigned long prev_cmd_ordered;
++
++ int num_free_sn_slots; /* if it's <0, then all slots are busy */
++ atomic_t *cur_sn_slot;
++ atomic_t sn_slots[15];
++};
++
++/*
+ * SCST command, analog of I_T_L_Q nexus or task
+ */
+struct scst_cmd {
@@ -2775,6 +2783,8 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+ struct scst_session *sess; /* corresponding session */
+
++ atomic_t *cpu_cmd_counter;
++
+ /* Cmd state, one of SCST_CMD_STATE_* constants */
+ int state;
+
@@ -2812,10 +2822,24 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ /* Set if the device was blocked by scst_check_blocked_dev() */
+ unsigned int unblock_dev:1;
+
++ /* Set if this cmd incremented dev->pr_readers_count */
++ unsigned int dec_pr_readers_count_needed:1;
++
++ /* Set if scst_dec_on_dev_cmd() call is needed on the cmd's finish */
++ unsigned int dec_on_dev_needed:1;
++
+ /* Set if cmd is queued as hw pending */
+ unsigned int cmd_hw_pending:1;
+
+ /*
++ * Set, if for this cmd required to not have any IO or FS calls on
++ * memory buffers allocations, at least for READ and WRITE commands.
++ * Needed for cases like file systems mounted over scst_local's
++ * devices.
++ */
++ unsigned noio_mem_alloc:1;
++
++ /*
+ * Set if the target driver wants to alloc data buffers on its own.
+ * In this case alloc_data_buf() must be provided in the target driver
+ * template.
@@ -2888,9 +2912,18 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ /* Set if cmd is done */
+ unsigned int done:1;
+
-+ /* Set if cmd is finished */
++ /*
++ * Set if cmd is finished. Used under sess_list_lock to sync
++ * between scst_finish_cmd() and scst_abort_cmd()
++ */
+ unsigned int finished:1;
+
++ /*
++ * Set if scst_check_local_events() can be called more than once. Set by
++ * scst_pre_check_local_events().
++ */
++ unsigned int check_local_events_once_done:1;
++
+#ifdef CONFIG_SCST_DEBUG_TM
+ /* Set if the cmd was delayed by task management debugging code */
+ unsigned int tm_dbg_delayed:1;
@@ -2911,7 +2944,10 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ struct scst_tgt *tgt; /* to save extra dereferences */
+ struct scst_device *dev; /* to save extra dereferences */
+
-+ struct scst_tgt_dev *tgt_dev; /* corresponding device for this cmd */
++ /* corresponding I_T_L device for this cmd */
++ struct scst_tgt_dev *tgt_dev;
++
++ struct scst_order_data *cur_order_data; /* to save extra dereferences */
+
+ uint64_t lun; /* LUN for this cmd */
+
@@ -2937,11 +2973,9 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+ uint32_t tgt_sn; /* SN set by target driver (for TM purposes) */
+
-+ /* CDB and its len */
-+ uint8_t cdb[SCST_MAX_CDB_SIZE];
++ uint8_t *cdb; /* Pointer on CDB. Points on cdb_buf for small CDBs. */
+ unsigned short cdb_len;
-+ unsigned short ext_cdb_len;
-+ uint8_t *ext_cdb;
++ uint8_t cdb_buf[SCST_MAX_CDB_SIZE];
+
+ enum scst_cdb_flags op_flags;
+ const char *op_name;
@@ -2966,7 +3000,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ */
+ int data_len;
+
-+ /* Completition routine */
++ /* Completion routine */
+ void (*scst_cmd_done) (struct scst_cmd *cmd, int next_state,
+ enum scst_exec_context pref_context);
+
@@ -3002,6 +3036,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ int *write_sg_cnt;
+
+ /* scst_get_sg_buf_[first,next]() support */
++ struct scatterlist *get_sg_buf_cur_sg_entry;
+ int get_sg_buf_entry_num;
+
+ /* Bidirectional transfers support */
@@ -3121,6 +3156,8 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+ struct scst_session *sess;
+
++ atomic_t *cpu_cmd_counter;
++
+ /* Mgmt cmd state, one of SCST_MCMD_STATE_* constants */
+ int state;
+
@@ -3158,7 +3195,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ /* corresponding device for this mgmt cmd (found by lun) */
+ struct scst_tgt_dev *mcmd_tgt_dev;
+
-+ /* completition status, one of the SCST_MGMT_STATUS_* constants */
++ /* completion status, one of the SCST_MGMT_STATUS_* constants */
+ int status;
+
+ /* Used for storage of target driver private stuff or origin PR cmd */
@@ -3207,6 +3244,17 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ /* If set, dev is read only */
+ unsigned short rd_only:1;
+
++ /* Set, if a strictly serialized cmd is waiting blocked */
++ unsigned short strictly_serialized_cmd_waiting:1;
++
++ /*
++ * Set, if this device is being unregistered. Useful to let sysfs
++ * attributes know when they should exit immediatelly to prevent
++ * possible deadlocks with their device unregistration waiting for
++ * their kobj last put.
++ */
++ unsigned short dev_unregistering:1;
++
+ /**************************************************************/
+
+ /*************************************************************
@@ -3230,14 +3278,28 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+ /**************************************************************/
+
++ /* How many cmds alive on this dev */
++ atomic_t dev_cmd_count;
++
++ spinlock_t dev_lock; /* device lock */
++
+ /*
+ * How many times device was blocked for new cmds execution.
-+ * Protected by dev_lock
++ * Protected by dev_lock.
+ */
+ int block_count;
+
-+ /* How many cmds alive on this dev */
-+ atomic_t dev_cmd_count;
++ /*
++ * How many there are "on_dev" commands, i.e. ones who passed
++ * scst_check_blocked_dev(). Protected by dev_lock.
++ */
++ int on_dev_cmd_count;
++
++ /*
++ * How many threads are checking commands for PR allowance.
++ * Protected by dev_lock.
++ */
++ int pr_readers_count;
+
+ /*
+ * Set if dev is persistently reserved. Protected by dev_pr_mutex.
@@ -3251,12 +3313,6 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ */
+ unsigned int pr_writer_active:1;
+
-+ /*
-+ * How many threads are checking commands for PR allowance. Used to
-+ * implement lockless read-only fast path.
-+ */
-+ atomic_t pr_readers_count;
-+
+ struct scst_dev_type *handler; /* corresponding dev handler */
+
+ /* Used for storage of dev handler private stuff */
@@ -3271,9 +3327,6 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ /* Memory limits for this device */
+ struct scst_mem_lim dev_mem_lim;
+
-+ /* How many write cmds alive on this dev. Temporary, ToDo */
-+ atomic_t write_cmd_count;
-+
+ /*************************************************************
+ ** Persistent reservation fields. Protected by dev_pr_mutex.
+ *************************************************************/
@@ -3309,27 +3362,26 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ */
+ int not_pr_supporting_tgt_devs_num;
+
++ struct scst_order_data dev_order_data;
++
+ /* Persist through power loss files */
+ char *pr_file_name;
+ char *pr_file_name1;
+
+ /**************************************************************/
+
-+ spinlock_t dev_lock; /* device lock */
-+
-+ struct list_head blocked_cmd_list; /* protected by dev_lock */
++ /* List of blocked commands, protected by dev_lock. */
++ struct list_head blocked_cmd_list;
+
+ /* A list entry used during TM, protected by scst_mutex */
+ struct list_head tm_dev_list_entry;
+
-+ /* Virtual device internal ID */
-+ int virt_id;
++ int virt_id; /* virtual device internal ID */
+
+ /* Pointer to virtual device name, for convenience only */
+ char *virt_name;
+
-+ /* List entry in global devices list */
-+ struct list_head dev_list_entry;
++ struct list_head dev_list_entry; /* list entry in global devices list */
+
+ /*
+ * List of tgt_dev's, one per session, protected by scst_mutex or
@@ -3347,7 +3399,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ enum scst_dev_type_threads_pool_type threads_pool_type;
+
+ /* sysfs release completion */
-+ struct completion dev_kobj_release_cmpl;
++ struct completion *dev_kobj_release_cmpl;
+
+ struct kobject dev_kobj; /* kobject for this struct */
+ struct kobject *dev_exp_kobj; /* exported groups */
@@ -3384,7 +3436,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ * SCSI I_T_L nexus.
+ */
+struct scst_tgt_dev {
-+ /* List entry in sess->sess_tgt_dev_list_hash */
++ /* List entry in sess->sess_tgt_dev_list */
+ struct list_head sess_tgt_dev_list_entry;
+
+ struct scst_device *dev; /* to save extra dereferences */
@@ -3406,31 +3458,8 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ /* How many cmds alive on this dev in this session */
+ atomic_t tgt_dev_cmd_count;
+
-+ /*
-+ * Used to execute cmd's in order of arrival, honoring SCSI task
-+ * attributes.
-+ *
-+ * Protected by sn_lock, except expected_sn, which is protected by
-+ * itself. Curr_sn must have the same size as expected_sn to
-+ * overflow simultaneously.
-+ */
-+ int def_cmd_count;
-+ spinlock_t sn_lock;
-+ unsigned int expected_sn;
-+ unsigned int curr_sn;
-+ int hq_cmd_count;
-+ struct list_head deferred_cmd_list;
-+ struct list_head skipped_sn_list;
-+
-+ /*
-+ * Set if the prev cmd was ORDERED. Size and, hence, alignment must
-+ * allow unprotected modifications independently to the neighbour fields.
-+ */
-+ unsigned long prev_cmd_ordered;
-+
-+ int num_free_sn_slots; /* if it's <0, then all slots are busy */
-+ atomic_t *cur_sn_slot;
-+ atomic_t sn_slots[15];
++ struct scst_order_data *curr_order_data;
++ struct scst_order_data tgt_dev_order_data;
+
+ /* List of scst_thr_data_hdr and lock */
+ spinlock_t thr_data_lock;
@@ -3480,7 +3509,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ uint8_t tgt_dev_sense[SCST_SENSE_BUFFERSIZE];
+
+ /* sysfs release completion */
-+ struct completion tgt_dev_kobj_release_cmpl;
++ struct completion *tgt_dev_kobj_release_cmpl;
+
+ struct kobject tgt_dev_kobj; /* kobject for this struct */
+
@@ -3520,7 +3549,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ struct kobject acg_dev_kobj;
+
+ /* sysfs release completion */
-+ struct completion acg_dev_kobj_release_cmpl;
++ struct completion *acg_dev_kobj_release_cmpl;
+
+ /* Name of the link to the corresponding LUN */
+ char acg_dev_link_name[20];
@@ -3549,13 +3578,16 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ /* Name of this acg */
+ const char *acg_name;
+
-+ /* Type of I/O initiators groupping */
++ /* Type of I/O initiators grouping */
+ int acg_io_grouping_type;
+
++ /* CPU affinity for threads in this ACG */
++ cpumask_t acg_cpu_mask;
++
+ unsigned int tgt_acg:1;
+
+ /* sysfs release completion */
-+ struct completion acg_kobj_release_cmpl;
++ struct completion *acg_kobj_release_cmpl;
+
+ /* kobject for this structure */
+ struct kobject acg_kobj;
@@ -3563,7 +3595,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ struct kobject *luns_kobj;
+ struct kobject *initiators_kobj;
+
-+ unsigned int addr_method;
++ enum scst_lun_addr_method addr_method;
+};
+
+/*
@@ -3582,6 +3614,64 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ struct kobj_attribute *acn_attr;
+};
+
++/**
++ * struct scst_dev_group - A group of SCST devices (struct scst_device).
++ *
++ * Each device is member of zero or one device groups. With each device group
++ * there are zero or more target groups associated.
++ */
++struct scst_dev_group {
++ char *name;
++ struct list_head entry;
++ struct list_head dev_list;
++ struct list_head tg_list;
++ struct kobject kobj;
++ struct kobject *dev_kobj;
++ struct kobject *tg_kobj;
++};
++
++/**
++ * struct scst_dg_dev - A node in scst_dev_group.dev_list.
++ */
++struct scst_dg_dev {
++ struct list_head entry;
++ struct scst_device *dev;
++};
++
++/**
++ * struct scst_target_group - A group of SCSI targets (struct scst_tgt).
++ *
++ * Such a group is either a primary target port group or a secondary
++ * port group. See also SPC-4 for more information.
++ */
++struct scst_target_group {
++ struct scst_dev_group *dg;
++ char *name;
++ uint16_t group_id;
++ enum scst_tg_state state;
++ bool preferred;
++ struct list_head entry;
++ struct list_head tgt_list;
++ struct kobject kobj;
++};
++
++/**
++ * struct scst_tg_tgt - A node in scst_target_group.tgt_list.
++ *
++ * Such a node can either represent a local storage target (struct scst_tgt)
++ * or a storage target on another system running SCST. In the former case tgt
++ * != NULL and rel_tgt_id is ignored. In the latter case tgt == NULL and
++ * rel_tgt_id is relevant.
++ */
++struct scst_tg_tgt {
++ struct list_head entry;
++ struct scst_target_group *tg;
++ struct kobject kobj;
++ struct scst_tgt *tgt;
++ char *name;
++ uint16_t rel_tgt_id;
++};
++
+/*
+ * Used to store per-session UNIT ATTENTIONs
+ */
@@ -3698,7 +3788,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ * Notifies SCST that the driver finished the first stage of the command
+ * initialization, and the command is ready for execution, but after
+ * SCST done the command's preprocessing preprocessing_done() function
-+ * should be called. The second argument sets preferred command execition
++ * should be called. The second argument sets preferred command execution
+ * context. See SCST_CONTEXT_* constants for details.
+ *
+ * See comment for scst_cmd_init_done() for the serialization requirements.
@@ -3862,20 +3952,29 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ sess->tgt_priv = val;
+}
+
++uint16_t scst_lookup_tg_id(struct scst_device *dev, struct scst_tgt *tgt);
++bool scst_impl_alua_configured(struct scst_device *dev);
++int scst_tg_get_group_info(void **buf, uint32_t *response_length,
++ struct scst_device *dev, uint8_t data_format);
++
+/**
+ * Returns TRUE if cmd is being executed in atomic context.
+ *
-+ * Note: checkpatch will complain on the use of in_atomic() below. You can
-+ * safely ignore this warning since in_atomic() is used here only for debugging
-+ * purposes.
++ * This function must be used outside of spinlocks and preempt/BH/IRQ
++ * disabled sections, because of the EXTRACHECK in it.
+ */
+static inline bool scst_cmd_atomic(struct scst_cmd *cmd)
+{
+ int res = cmd->atomic;
+#ifdef CONFIG_SCST_EXTRACHECKS
++ /*
++ * Checkpatch will complain on the use of in_atomic() below. You
++ * can safely ignore this warning since in_atomic() is used here
++ * only for debugging purposes.
++ */
+ if (unlikely((in_atomic() || in_interrupt() || irqs_disabled()) &&
+ !res)) {
-+ printk(KERN_ERR "ERROR: atomic context and non-atomic cmd\n");
++ printk(KERN_ERR "ERROR: atomic context and non-atomic cmd!\n");
+ dump_stack();
+ cmd->atomic = 1;
+ res = 1;
@@ -3938,25 +4037,8 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ return cmd->cdb_len;
+}
+
-+/* Returns cmd's extended CDB */
-+static inline const uint8_t *scst_cmd_get_ext_cdb(struct scst_cmd *cmd)
-+{
-+ return cmd->ext_cdb;
-+}
-+
-+/* Returns cmd's extended CDB length */
-+static inline unsigned int scst_cmd_get_ext_cdb_len(struct scst_cmd *cmd)
-+{
-+ return cmd->ext_cdb_len;
-+}
-+
-+/* Sets cmd's extended CDB and its length */
-+static inline void scst_cmd_set_ext_cdb(struct scst_cmd *cmd,
-+ uint8_t *ext_cdb, unsigned int ext_cdb_len)
-+{
-+ cmd->ext_cdb = ext_cdb;
-+ cmd->ext_cdb_len = ext_cdb_len;
-+}
++void scst_cmd_set_ext_cdb(struct scst_cmd *cmd,
++ uint8_t *ext_cdb, unsigned int ext_cdb_len, gfp_t gfp_mask);
+
+/* Returns cmd's session */
+static inline struct scst_session *scst_cmd_get_session(struct scst_cmd *cmd)
@@ -4283,6 +4365,19 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+}
+
+/*
++ * Get/Set functions for noio_mem_alloc
++ */
++static inline bool scst_cmd_get_noio_mem_alloc(struct scst_cmd *cmd)
++{
++ return cmd->noio_mem_alloc;
++}
++
++static inline void scst_cmd_set_noio_mem_alloc(struct scst_cmd *cmd)
++{
++ cmd->noio_mem_alloc = 1;
++}
++
++/*
+ * Returns 1 if the cmd was aborted, so its status is invalid and no
+ * reply shall be sent to the remote initiator. A target driver should
+ * only clear internal resources, associated with cmd.
@@ -4393,7 +4488,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ mcmd->tgt_priv = val;
+}
+
-+/* Returns mgmt cmd's completition status (SCST_MGMT_STATUS_* constants) */
++/* Returns mgmt cmd's completion status (SCST_MGMT_STATUS_* constants) */
+static inline int scst_mgmt_cmd_get_status(struct scst_mgmt_cmd *mcmd)
+{
+ return mcmd->status;
@@ -4481,31 +4576,39 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir);
+
+/*
-+ * Functions for access to the commands data (SG) buffer,
-+ * including HIGHMEM environment. Should be used instead of direct
-+ * access. Returns the mapped buffer length for success, 0 for EOD,
++ * Functions for access to the commands data (SG) buffer. Should be used
++ * instead of direct access. Returns the buffer length for success, 0 for EOD,
+ * negative error code otherwise.
+ *
++ * Never EVER use this function to process only "the first page" of the buffer.
++ * The first SG entry can be as low as few bytes long. Use scst_get_buf_full()
++ * instead for such cases.
++ *
+ * "Buf" argument returns the mapped buffer
+ *
+ * The "put" function unmaps the buffer.
+ */
-+static inline int __scst_get_buf(struct scst_cmd *cmd, struct scatterlist *sg,
-+ int sg_cnt, uint8_t **buf)
++static inline int __scst_get_buf(struct scst_cmd *cmd, int sg_cnt,
++ uint8_t **buf)
+{
+ int res = 0;
-+ int i = cmd->get_sg_buf_entry_num;
-+
-+ *buf = NULL;
++ struct scatterlist *sg = cmd->get_sg_buf_cur_sg_entry;
+
-+ if ((i >= sg_cnt) || unlikely(sg == NULL))
++ if (cmd->get_sg_buf_entry_num >= sg_cnt) {
++ *buf = NULL;
+ goto out;
++ }
++
++ if (unlikely(sg_is_chain(sg)))
++ sg = sg_chain_ptr(sg);
+
-+ *buf = page_address(sg_page(&sg[i]));
-+ *buf += sg[i].offset;
++ *buf = page_address(sg_page(sg));
++ *buf += sg->offset;
++
++ res = sg->length;
+
-+ res = sg[i].length;
+ cmd->get_sg_buf_entry_num++;
++ cmd->get_sg_buf_cur_sg_entry = ++sg;
+
+out:
+ return res;
@@ -4513,14 +4616,19 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+static inline int scst_get_buf_first(struct scst_cmd *cmd, uint8_t **buf)
+{
++ if (unlikely(cmd->sg == NULL)) {
++ *buf = NULL;
++ return 0;
++ }
+ cmd->get_sg_buf_entry_num = 0;
++ cmd->get_sg_buf_cur_sg_entry = cmd->sg;
+ cmd->may_need_dma_sync = 1;
-+ return __scst_get_buf(cmd, cmd->sg, cmd->sg_cnt, buf);
++ return __scst_get_buf(cmd, cmd->sg_cnt, buf);
+}
+
+static inline int scst_get_buf_next(struct scst_cmd *cmd, uint8_t **buf)
+{
-+ return __scst_get_buf(cmd, cmd->sg, cmd->sg_cnt, buf);
++ return __scst_get_buf(cmd, cmd->sg_cnt, buf);
+}
+
+static inline void scst_put_buf(struct scst_cmd *cmd, void *buf)
@@ -4530,14 +4638,19 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+static inline int scst_get_out_buf_first(struct scst_cmd *cmd, uint8_t **buf)
+{
++ if (unlikely(cmd->out_sg == NULL)) {
++ *buf = NULL;
++ return 0;
++ }
+ cmd->get_sg_buf_entry_num = 0;
++ cmd->get_sg_buf_cur_sg_entry = cmd->out_sg;
+ cmd->may_need_dma_sync = 1;
-+ return __scst_get_buf(cmd, cmd->out_sg, cmd->out_sg_cnt, buf);
++ return __scst_get_buf(cmd, cmd->out_sg_cnt, buf);
+}
+
+static inline int scst_get_out_buf_next(struct scst_cmd *cmd, uint8_t **buf)
+{
-+ return __scst_get_buf(cmd, cmd->out_sg, cmd->out_sg_cnt, buf);
++ return __scst_get_buf(cmd, cmd->out_sg_cnt, buf);
+}
+
+static inline void scst_put_out_buf(struct scst_cmd *cmd, void *buf)
@@ -4548,15 +4661,20 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+static inline int scst_get_sg_buf_first(struct scst_cmd *cmd, uint8_t **buf,
+ struct scatterlist *sg, int sg_cnt)
+{
++ if (unlikely(sg == NULL)) {
++ *buf = NULL;
++ return 0;
++ }
+ cmd->get_sg_buf_entry_num = 0;
++ cmd->get_sg_buf_cur_sg_entry = cmd->sg;
+ cmd->may_need_dma_sync = 1;
-+ return __scst_get_buf(cmd, sg, sg_cnt, buf);
++ return __scst_get_buf(cmd, sg_cnt, buf);
+}
+
+static inline int scst_get_sg_buf_next(struct scst_cmd *cmd, uint8_t **buf,
+ struct scatterlist *sg, int sg_cnt)
+{
-+ return __scst_get_buf(cmd, sg, sg_cnt, buf);
++ return __scst_get_buf(cmd, sg_cnt, buf);
+}
+
+static inline void scst_put_sg_buf(struct scst_cmd *cmd, void *buf,
@@ -4566,6 +4684,92 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+}
+
+/*
++ * Functions for access to the commands data (SG) page. Should be used
++ * instead of direct access. Returns the buffer length for success, 0 for EOD,
++ * negative error code otherwise.
++ *
++ * "Page" argument returns the starting page, "offset" - offset in it.
++ *
++ * The "put" function "puts" the buffer. It should be always be used, because
++ * in future may need to do some additional operations.
++ */
++static inline int __scst_get_sg_page(struct scst_cmd *cmd, int sg_cnt,
++ struct page **page, int *offset)
++{
++ int res = 0;
++ struct scatterlist *sg = cmd->get_sg_buf_cur_sg_entry;
++
++ if (cmd->get_sg_buf_entry_num >= sg_cnt) {
++ *page = NULL;
++ *offset = 0;
++ goto out;
++ }
++
++ if (unlikely(sg_is_chain(sg)))
++ sg = sg_chain_ptr(sg);
++
++ *page = sg_page(sg);
++ *offset = sg->offset;
++ res = sg->length;
++
++ cmd->get_sg_buf_entry_num++;
++ cmd->get_sg_buf_cur_sg_entry = ++sg;
++
++out:
++ return res;
++}
++
++static inline int scst_get_sg_page_first(struct scst_cmd *cmd,
++ struct page **page, int *offset)
++{
++ if (unlikely(cmd->sg == NULL)) {
++ *page = NULL;
++ *offset = 0;
++ return 0;
++ }
++ cmd->get_sg_buf_entry_num = 0;
++ cmd->get_sg_buf_cur_sg_entry = cmd->sg;
++ return __scst_get_sg_page(cmd, cmd->sg_cnt, page, offset);
++}
++
++static inline int scst_get_sg_page_next(struct scst_cmd *cmd,
++ struct page **page, int *offset)
++{
++ return __scst_get_sg_page(cmd, cmd->sg_cnt, page, offset);
++}
++
++static inline void scst_put_sg_page(struct scst_cmd *cmd,
++ struct page *page, int offset)
++{
++ /* Nothing to do */
++}
++
++static inline int scst_get_out_sg_page_first(struct scst_cmd *cmd,
++ struct page **page, int *offset)
++{
++ if (unlikely(cmd->out_sg == NULL)) {
++ *page = NULL;
++ *offset = 0;
++ return 0;
++ }
++ cmd->get_sg_buf_entry_num = 0;
++ cmd->get_sg_buf_cur_sg_entry = cmd->out_sg;
++ return __scst_get_sg_page(cmd, cmd->out_sg_cnt, page, offset);
++}
++
++static inline int scst_get_out_sg_page_next(struct scst_cmd *cmd,
++ struct page **page, int *offset)
++{
++ return __scst_get_sg_page(cmd, cmd->out_sg_cnt, page, offset);
++}
++
++static inline void scst_put_out_sg_page(struct scst_cmd *cmd,
++ struct page *page, int offset)
++{
++ /* Nothing to do */
++}
++
++/*
+ * Returns approximate higher rounded buffers count that
+ * scst_get_buf_[first|next]() return.
+ */
@@ -4583,6 +4787,16 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ return (cmd->out_sg_cnt == 0) ? 1 : cmd->out_sg_cnt;
+}
+
++int scst_get_buf_full(struct scst_cmd *cmd, uint8_t **buf);
++void scst_put_buf_full(struct scst_cmd *cmd, uint8_t *buf);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++extern struct lockdep_map scst_suspend_dep_map;
++#define scst_assert_activity_suspended() \
++ WARN_ON(debug_locks && !lock_is_held(&scst_suspend_dep_map));
++#else
++#define scst_assert_activity_suspended() do { } while (0)
++#endif
+int scst_suspend_activity(bool interruptible);
+void scst_resume_activity(void);
+
@@ -4593,6 +4807,13 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+int scst_check_local_events(struct scst_cmd *cmd);
+
++static inline int scst_pre_check_local_events(struct scst_cmd *cmd)
++{
++ int res = scst_check_local_events(cmd);
++ cmd->check_local_events_once_done = 1;
++ return res;
++}
++
+int scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd);
+
+struct scst_trace_log {
@@ -4676,9 +4897,6 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+
+void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len);
+
-+void scst_get(void);
-+void scst_put(void);
-+
+void scst_cmd_get(struct scst_cmd *cmd);
+void scst_cmd_put(struct scst_cmd *cmd);
+
@@ -4805,7 +5023,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ * needed to allow read only sysfs monitoring during management actions.
+ * All management actions are supposed to be externally serialized,
+ * so then last_sysfs_mgmt_res automatically serialized too.
-+ * Othewrwise a monitoring action can overwrite value of simultaneous
++ * Otherwise a monitoring action can overwrite value of simultaneous
+ * management action's last_sysfs_mgmt_res.
+ */
+ bool read_only_action;
@@ -4826,6 +5044,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ bool is_tgt_kobj;
+ int io_grouping_type;
+ bool enable;
++ cpumask_t cpu_mask;
+ };
+ };
+ struct {
@@ -4835,8 +5054,11 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+ };
+ struct scst_session *sess;
+ struct {
-+ struct scst_tgt *tgt;
-+ unsigned long l;
++ struct scst_tgt *tgt_r;
++ unsigned long rel_tgt_id;
++ };
++ struct {
++ struct kobject *kobj;
+ };
+ };
+ int work_res;
@@ -4856,305 +5078,10167 @@ diff -uprN orig/linux-2.6.36/include/scst/scst.h linux-2.6.36/include/scst/scst.
+void scst_init_threads(struct scst_cmd_threads *cmd_threads);
+void scst_deinit_threads(struct scst_cmd_threads *cmd_threads);
+
++void scst_pass_through_cmd_done(void *data, char *sense, int result, int resid);
++int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
++ void (*done)(void *data, char *sense, int result, int resid));
++
+#endif /* __SCST_H */
-diff -upkr -X linux-2.6.36/Documentation/dontdiff linux-2.6.36/drivers/Kconfig linux-2.6.36/drivers/Kconfig
---- orig/linux-2.6.36/drivers/Kconfig 01:51:29.000000000 +0400
-+++ linux-2.6.36/drivers/Kconfig 14:14:46.000000000 +0400
-@@ -22,6 +22,8 @@ source "drivers/ide/Kconfig"
-
- source "drivers/scsi/Kconfig"
-
-+source "drivers/scst/Kconfig"
+diff -uprN orig/linux-2.6.39/include/scst/scst_const.h linux-2.6.39/include/scst/scst_const.h
+--- orig/linux-2.6.39/include/scst/scst_const.h
++++ linux-2.6.39/include/scst/scst_const.h
+@@ -0,0 +1,487 @@
++/*
++ * include/scst_const.h
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * Contains common SCST constants.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
+
- source "drivers/ata/Kconfig"
-
- source "drivers/md/Kconfig"
-diff -upkr -X linux-2.6.36/Documentation/dontdiff linux-2.6.36/drivers/Makefile linux-2.6.36/drivers/Makefile
---- orig/linux-2.6.36/drivers/Makefile 15:40:04.000000000 +0200
-+++ linux-2.6.36/drivers/Makefile 15:40:20.000000000 +0200
-@@ -113,3 +113,4 @@ obj-$(CONFIG_VLYNQ) += vlynq/
- obj-$(CONFIG_STAGING) += staging/
- obj-y += platform/
- obj-y += ieee802154/
-+obj-$(CONFIG_SCST) += scst/
-diff -uprN orig/linux-2.6.36/drivers/scst/Kconfig linux-2.6.36/drivers/scst/Kconfig
---- orig/linux-2.6.36/drivers/scst/Kconfig
-+++ linux-2.6.36/drivers/scst/Kconfig
-@@ -0,0 +1,254 @@
-+menu "SCSI target (SCST) support"
++#ifndef __SCST_CONST_H
++#define __SCST_CONST_H
+
-+config SCST
-+ tristate "SCSI target (SCST) support"
-+ depends on SCSI
-+ help
-+ SCSI target (SCST) is designed to provide unified, consistent
-+ interface between SCSI target drivers and Linux kernel and
-+ simplify target drivers development as much as possible. Visit
-+ http://scst.sourceforge.net for more info about it.
++#ifndef GENERATING_UPSTREAM_PATCH
++/*
++ * Include <linux/version.h> only when not converting this header file into
++ * a patch for upstream review because only then the symbol LINUX_VERSION_CODE
++ * is needed.
++ */
++#include <linux/version.h>
++#endif
++#include <scsi/scsi.h>
+
-+config SCST_DISK
-+ tristate "SCSI target disk support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for disk device.
++/*
++ * Version numbers, the same as for the kernel.
++ *
++ * Changing it don't forget to change SCST_FIO_REV in scst_vdisk.c
++ * and FIO_REV in usr/fileio/common.h as well.
++ */
++#define SCST_VERSION(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + d)
++#define SCST_VERSION_CODE SCST_VERSION(2, 1, 0, 0)
++#define SCST_VERSION_STRING_SUFFIX
++#define SCST_VERSION_NAME "2.1.0"
++#define SCST_VERSION_STRING SCST_VERSION_NAME SCST_VERSION_STRING_SUFFIX
+
-+config SCST_TAPE
-+ tristate "SCSI target tape support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for tape device.
++#define SCST_CONST_VERSION "$Revision: 3837 $"
+
-+config SCST_CDROM
-+ tristate "SCSI target CDROM support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for CDROM device.
++/*** Shared constants between user and kernel spaces ***/
+
-+config SCST_MODISK
-+ tristate "SCSI target MO disk support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for MO disk device.
++/* Max size of CDB */
++#define SCST_MAX_CDB_SIZE 16
+
-+config SCST_CHANGER
-+ tristate "SCSI target changer support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for changer device.
++/* Max size of long CDB */
++#define SCST_MAX_LONG_CDB_SIZE 65536
+
-+config SCST_PROCESSOR
-+ tristate "SCSI target processor support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for processor device.
++/* Max size of various names */
++#define SCST_MAX_NAME 50
+
-+config SCST_RAID
-+ tristate "SCSI target storage array controller (RAID) support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for raid storage array controller (RAID) device.
++/* Max size of external names, like initiator name */
++#define SCST_MAX_EXTERNAL_NAME 256
+
-+config SCST_VDISK
-+ tristate "SCSI target virtual disk and/or CDROM support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST device handler for virtual disk and/or CDROM device.
++/* Max LUN. 2 bits are used for addressing method. */
++#define SCST_MAX_LUN ((1 << (16-2)) - 1)
+
-+config SCST_USER
-+ tristate "User-space SCSI target driver support"
-+ default SCST
-+ depends on SCSI && SCST && !HIGHMEM4G && !HIGHMEM64G
-+ help
-+ The SCST device handler scst_user allows to implement full-feature
-+ SCSI target devices in user space.
++/*
++ * Size of sense sufficient to carry standard sense data.
++ * Warning! It's allocated on stack!
++ */
++#define SCST_STANDARD_SENSE_LEN 18
+
-+ If unsure, say "N".
++/* Max size of sense */
++#define SCST_SENSE_BUFFERSIZE 96
+
-+config SCST_STRICT_SERIALIZING
-+ bool "Strict serialization"
-+ depends on SCST
-+ help
-+ Enable strict SCSI command serialization. When enabled, SCST sends
-+ all SCSI commands to the underlying SCSI device synchronously, one
-+ after one. This makes task management more reliable, at the cost of
-+ a performance penalty. This is most useful for stateful SCSI devices
-+ like tapes, where the result of the execution of a command
-+ depends on the device settings configured by previous commands. Disk
-+ and RAID devices are stateless in most cases. The current SCSI core
-+ in Linux doesn't allow to abort all commands reliably if they have
-+ been sent asynchronously to a stateful device.
-+ Enable this option if you use stateful device(s) and need as much
-+ error recovery reliability as possible.
++/*************************************************************
++ ** Allowed delivery statuses for cmd's delivery_status
++ *************************************************************/
+
-+ If unsure, say "N".
++#define SCST_CMD_DELIVERY_SUCCESS 0
++#define SCST_CMD_DELIVERY_FAILED -1
++#define SCST_CMD_DELIVERY_ABORTED -2
+
-+config SCST_STRICT_SECURITY
-+ bool "Strict security"
-+ depends on SCST
-+ help
-+ Makes SCST clear (zero-fill) allocated data buffers. Note: this has a
-+ significant performance penalty.
++/*************************************************************
++ ** Values for task management functions
++ *************************************************************/
++#define SCST_ABORT_TASK 0
++#define SCST_ABORT_TASK_SET 1
++#define SCST_CLEAR_ACA 2
++#define SCST_CLEAR_TASK_SET 3
++#define SCST_LUN_RESET 4
++#define SCST_TARGET_RESET 5
+
-+ If unsure, say "N".
++/** SCST extensions **/
+
-+config SCST_TEST_IO_IN_SIRQ
-+ bool "Allow test I/O from soft-IRQ context"
-+ depends on SCST
-+ help
-+ Allows SCST to submit selected SCSI commands (TUR and
-+ READ/WRITE) from soft-IRQ context (tasklets). Enabling it will
-+ decrease amount of context switches and slightly improve
-+ performance. The goal of this option is to be able to measure
-+ overhead of the context switches. See more info about it in
-+ README.scst.
++/*
++ * Notifies about I_T nexus loss event in the corresponding session.
++ * Aborts all tasks there, resets the reservation, if any, and sets
++ * up the I_T Nexus loss UA.
++ */
++#define SCST_NEXUS_LOSS_SESS 6
+
-+ WARNING! Improperly used, this option can lead you to a kernel crash!
++/* Aborts all tasks in the corresponding session */
++#define SCST_ABORT_ALL_TASKS_SESS 7
+
-+ If unsure, say "N".
++/*
++ * Notifies about I_T nexus loss event. Aborts all tasks in all sessions
++ * of the tgt, resets the reservations, if any, and sets up the I_T Nexus
++ * loss UA.
++ */
++#define SCST_NEXUS_LOSS 8
+
-+config SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
-+ bool "Send back UNKNOWN TASK when an already finished task is aborted"
-+ depends on SCST
-+ help
-+ Controls which response is sent by SCST to the initiator in case
-+ the initiator attempts to abort (ABORT TASK) an already finished
-+ request. If this option is enabled, the response UNKNOWN TASK is
-+ sent back to the initiator. However, some initiators, particularly
-+ the VMware iSCSI initiator, interpret the UNKNOWN TASK response as
-+ if the target got crazy and try to RESET it. Then sometimes the
-+ initiator gets crazy itself.
++/* Aborts all tasks in all sessions of the tgt */
++#define SCST_ABORT_ALL_TASKS 9
+
-+ If unsure, say "N".
++/*
++ * Internal TM command issued by SCST in scst_unregister_session(). It is the
++ * same as SCST_NEXUS_LOSS_SESS, except:
++ * - it doesn't call task_mgmt_affected_cmds_done()
++ * - it doesn't call task_mgmt_fn_done()
++ * - it doesn't queue NEXUS LOSS UA.
++ *
++ * Target drivers must NEVER use it!!
++ */
++#define SCST_UNREG_SESS_TM 10
+
-+config SCST_USE_EXPECTED_VALUES
-+ bool "Prefer initiator-supplied SCSI command attributes"
-+ depends on SCST
-+ help
-+ When SCST receives a SCSI command from an initiator, such a SCSI
-+ command has both data transfer length and direction attributes.
-+ There are two possible sources for these attributes: either the
-+ values computed by SCST from its internal command translation table
-+ or the values supplied by the initiator. The former are used by
-+ default because of security reasons. Invalid initiator-supplied
-+ attributes can crash the target, especially in pass-through mode.
-+ Only consider enabling this option when SCST logs the following
-+ message: "Unknown opcode XX for YY. Should you update
-+ scst_scsi_op_table?" and when the initiator complains. Please
-+ report any unrecognized commands to scst-devel@lists.sourceforge.net.
++/*
++ * Internal TM command issued by SCST in scst_pr_abort_reg(). It aborts all
++ * tasks from mcmd->origin_pr_cmd->tgt_dev, except mcmd->origin_pr_cmd.
++ * Additionally:
++ * - it signals pr_aborting_cmpl completion when all affected
++ * commands marked as aborted.
++ * - it doesn't call task_mgmt_affected_cmds_done()
++ * - it doesn't call task_mgmt_fn_done()
++ * - it calls mcmd->origin_pr_cmd->scst_cmd_done() when all affected
++ * commands aborted.
++ *
++ * Target drivers must NEVER use it!!
++ */
++#define SCST_PR_ABORT_ALL 11
+
-+ If unsure, say "N".
++/*************************************************************
++ ** Values for mgmt cmd's status field. Codes taken from iSCSI
++ *************************************************************/
++#define SCST_MGMT_STATUS_SUCCESS 0
++#define SCST_MGMT_STATUS_TASK_NOT_EXIST -1
++#define SCST_MGMT_STATUS_LUN_NOT_EXIST -2
++#define SCST_MGMT_STATUS_FN_NOT_SUPPORTED -5
++#define SCST_MGMT_STATUS_REJECTED -255
++#define SCST_MGMT_STATUS_FAILED -129
+
-+config SCST_EXTRACHECKS
-+ bool "Extra consistency checks"
-+ depends on SCST
-+ help
-+ Enable additional consistency checks in the SCSI middle level target
-+ code. This may be helpful for SCST developers. Enable it if you have
-+ any problems.
++/*************************************************************
++ ** SCSI LUN addressing methods. See also SAM-2 and the
++ ** section about eight byte LUNs.
++ *************************************************************/
++enum scst_lun_addr_method {
++ SCST_LUN_ADDR_METHOD_PERIPHERAL = 0,
++ SCST_LUN_ADDR_METHOD_FLAT = 1,
++ SCST_LUN_ADDR_METHOD_LUN = 2,
++ SCST_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
++};
+
-+ If unsure, say "N".
++/*************************************************************
++ ** SCSI task attribute queue types
++ *************************************************************/
++enum scst_cmd_queue_type {
++ SCST_CMD_QUEUE_UNTAGGED = 0,
++ SCST_CMD_QUEUE_SIMPLE,
++ SCST_CMD_QUEUE_ORDERED,
++ SCST_CMD_QUEUE_HEAD_OF_QUEUE,
++ SCST_CMD_QUEUE_ACA
++};
+
-+config SCST_TRACING
-+ bool "Tracing support"
-+ depends on SCST
-+ default y
-+ help
-+ Enable SCSI middle level tracing support. Tracing can be controlled
-+ dynamically via sysfs interface. The traced information
-+ is sent to the kernel log and may be very helpful when analyzing
-+ the cause of a communication problem between initiator and target.
++/*************************************************************
++ ** CDB flags
++ *************************************************************/
++enum scst_cdb_flags {
++ SCST_TRANSFER_LEN_TYPE_FIXED = 0x0001,
++ SCST_SMALL_TIMEOUT = 0x0002,
++ SCST_LONG_TIMEOUT = 0x0004,
++ SCST_UNKNOWN_LENGTH = 0x0008,
++ SCST_INFO_VALID = 0x0010, /* must be single bit */
++ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED = 0x0020,
++ SCST_IMPLICIT_HQ = 0x0040,
++ SCST_SKIP_UA = 0x0080,
++ SCST_WRITE_MEDIUM = 0x0100,
++ SCST_LOCAL_CMD = 0x0200,
++ SCST_FULLY_LOCAL_CMD = 0x0400,
++ SCST_REG_RESERVE_ALLOWED = 0x0800,
++ SCST_WRITE_EXCL_ALLOWED = 0x1000,
++ SCST_EXCL_ACCESS_ALLOWED = 0x2000,
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ SCST_TEST_IO_IN_SIRQ_ALLOWED = 0x4000,
++#endif
++ SCST_SERIALIZED = 0x8000,
++ SCST_STRICTLY_SERIALIZED = 0x10000|SCST_SERIALIZED,
++};
+
-+ If unsure, say "Y".
++/*************************************************************
++ ** Data direction aliases. Changing it don't forget to change
++ ** scst_to_tgt_dma_dir and SCST_DATA_DIR_MAX as well!!
++ *************************************************************/
++#define SCST_DATA_UNKNOWN 0
++#define SCST_DATA_WRITE 1
++#define SCST_DATA_READ 2
++#define SCST_DATA_BIDI (SCST_DATA_WRITE | SCST_DATA_READ)
++#define SCST_DATA_NONE 4
+
-+config SCST_DEBUG
-+ bool "Debugging support"
-+ depends on SCST
-+ select DEBUG_BUGVERBOSE
-+ help
-+ Enables support for debugging SCST. This may be helpful for SCST
-+ developers.
++#define SCST_DATA_DIR_MAX (SCST_DATA_NONE+1)
+
-+ If unsure, say "N".
++/*************************************************************
++ ** Default suffix for targets with NULL names
++ *************************************************************/
++#define SCST_DEFAULT_TGT_NAME_SUFFIX "_target_"
+
-+config SCST_DEBUG_OOM
-+ bool "Out-of-memory debugging support"
-+ depends on SCST
-+ help
-+ Let SCST's internal memory allocation function
-+ (scst_alloc_sg_entries()) fail about once in every 10000 calls, at
-+ least if the flag __GFP_NOFAIL has not been set. This allows SCST
-+ developers to test the behavior of SCST in out-of-memory conditions.
-+ This may be helpful for SCST developers.
++/*************************************************************
++ ** Sense manipulation and examination
++ *************************************************************/
++#define SCST_LOAD_SENSE(key_asc_ascq) key_asc_ascq
+
-+ If unsure, say "N".
++#define SCST_SENSE_VALID(sense) ((sense != NULL) && \
++ ((((const uint8_t *)(sense))[0] & 0x70) == 0x70))
+
-+config SCST_DEBUG_RETRY
-+ bool "SCSI command retry debugging support"
-+ depends on SCST
-+ help
-+ Let SCST's internal SCSI command transfer function
-+ (scst_rdy_to_xfer()) fail about once in every 100 calls. This allows
-+ SCST developers to test the behavior of SCST when SCSI queues fill
-+ up. This may be helpful for SCST developers.
++#define SCST_NO_SENSE(sense) ((sense != NULL) && \
++ (((const uint8_t *)(sense))[2] == 0))
+
-+ If unsure, say "N".
++/*************************************************************
++ ** Sense data for the appropriate errors. Can be used with
++ ** scst_set_cmd_error()
++ *************************************************************/
++#define scst_sense_no_sense NO_SENSE, 0x00, 0
++#define scst_sense_hardw_error HARDWARE_ERROR, 0x44, 0
++#define scst_sense_aborted_command ABORTED_COMMAND, 0x00, 0
++#define scst_sense_invalid_opcode ILLEGAL_REQUEST, 0x20, 0
++#define scst_sense_invalid_field_in_cdb ILLEGAL_REQUEST, 0x24, 0
++#define scst_sense_invalid_field_in_parm_list ILLEGAL_REQUEST, 0x26, 0
++#define scst_sense_parameter_value_invalid ILLEGAL_REQUEST, 0x26, 2
++#define scst_sense_invalid_release ILLEGAL_REQUEST, 0x26, 4
++#define scst_sense_parameter_list_length_invalid \
++ ILLEGAL_REQUEST, 0x1A, 0
++#define scst_sense_reset_UA UNIT_ATTENTION, 0x29, 0
++#define scst_sense_nexus_loss_UA UNIT_ATTENTION, 0x29, 0x7
++#define scst_sense_saving_params_unsup ILLEGAL_REQUEST, 0x39, 0
++#define scst_sense_lun_not_supported ILLEGAL_REQUEST, 0x25, 0
++#define scst_sense_data_protect DATA_PROTECT, 0x00, 0
++#define scst_sense_miscompare_error MISCOMPARE, 0x1D, 0
++#define scst_sense_block_out_range_error ILLEGAL_REQUEST, 0x21, 0
++#define scst_sense_medium_changed_UA UNIT_ATTENTION, 0x28, 0
++#define scst_sense_read_error MEDIUM_ERROR, 0x11, 0
++#define scst_sense_write_error MEDIUM_ERROR, 0x03, 0
++#define scst_sense_not_ready NOT_READY, 0x04, 0x10
++#define scst_sense_invalid_message ILLEGAL_REQUEST, 0x49, 0
++#define scst_sense_cleared_by_another_ini_UA UNIT_ATTENTION, 0x2F, 0
++#define scst_sense_capacity_data_changed UNIT_ATTENTION, 0x2A, 0x9
++#define scst_sense_reservation_preempted UNIT_ATTENTION, 0x2A, 0x03
++#define scst_sense_reservation_released UNIT_ATTENTION, 0x2A, 0x04
++#define scst_sense_registrations_preempted UNIT_ATTENTION, 0x2A, 0x05
++#define scst_sense_asym_access_state_changed UNIT_ATTENTION, 0x2A, 0x06
++#define scst_sense_reported_luns_data_changed UNIT_ATTENTION, 0x3F, 0xE
++#define scst_sense_inquery_data_changed UNIT_ATTENTION, 0x3F, 0x3
+
-+config SCST_DEBUG_SN
-+ bool "SCSI sequence number debugging support"
-+ depends on SCST
-+ help
-+ Allows to test SCSI command ordering via sequence numbers by
-+ randomly changing the type of SCSI commands into
-+ SCST_CMD_QUEUE_ORDERED, SCST_CMD_QUEUE_HEAD_OF_QUEUE or
-+ SCST_CMD_QUEUE_SIMPLE for about one in 300 SCSI commands.
-+ This may be helpful for SCST developers.
++/*************************************************************
++ * SCSI opcodes not listed anywhere else
++ *************************************************************/
++#define INIT_ELEMENT_STATUS 0x07
++#define INIT_ELEMENT_STATUS_RANGE 0x37
++#define PREVENT_ALLOW_MEDIUM 0x1E
++#define REQUEST_VOLUME_ADDRESS 0xB5
++#define WRITE_VERIFY_16 0x8E
++#define VERIFY_6 0x13
++#ifndef VERIFY_12
++#define VERIFY_12 0xAF
++#endif
++#ifndef GENERATING_UPSTREAM_PATCH
++/*
++ * The constants below have been defined in the kernel header <scsi/scsi.h>
++ * and hence are not needed when this header file is included in kernel code.
++ * The definitions below are only used when this header file is included during
++ * compilation of SCST's user space components.
++ */
++#ifndef READ_16
++#define READ_16 0x88
++#endif
++#ifndef WRITE_16
++#define WRITE_16 0x8a
++#endif
++#ifndef VERIFY_16
++#define VERIFY_16 0x8f
++#endif
++#ifndef SERVICE_ACTION_IN
++#define SERVICE_ACTION_IN 0x9e
++#endif
++#ifndef SAI_READ_CAPACITY_16
++/* values for service action in */
++#define SAI_READ_CAPACITY_16 0x10
++#endif
++#endif
++#ifndef GENERATING_UPSTREAM_PATCH
++#ifndef REPORT_LUNS
++#define REPORT_LUNS 0xa0
++#endif
++#endif
+
-+ If unsure, say "N".
+
-+config SCST_DEBUG_TM
-+ bool "Task management debugging support"
-+ depends on SCST_DEBUG
-+ help
-+ Enables support for debugging of SCST's task management functions.
-+ When enabled, some of the commands on LUN 0 in the default access
-+ control group will be delayed for about 60 seconds. This will
-+ cause the remote initiator send SCSI task management functions,
-+ e.g. ABORT TASK and TARGET RESET.
++/*************************************************************
++ ** SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
++ ** T10/1561-D Revision 4 Draft dated 7th November 2002.
++ *************************************************************/
++#define SAM_STAT_GOOD 0x00
++#define SAM_STAT_CHECK_CONDITION 0x02
++#define SAM_STAT_CONDITION_MET 0x04
++#define SAM_STAT_BUSY 0x08
++#define SAM_STAT_INTERMEDIATE 0x10
++#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
++#define SAM_STAT_RESERVATION_CONFLICT 0x18
++#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
++#define SAM_STAT_TASK_SET_FULL 0x28
++#define SAM_STAT_ACA_ACTIVE 0x30
++#define SAM_STAT_TASK_ABORTED 0x40
+
-+ If unsure, say "N".
++/*************************************************************
++ ** Control byte field in CDB
++ *************************************************************/
++#define CONTROL_BYTE_LINK_BIT 0x01
++#define CONTROL_BYTE_NACA_BIT 0x04
+
-+config SCST_TM_DBG_GO_OFFLINE
-+ bool "Let devices become completely unresponsive"
-+ depends on SCST_DEBUG_TM
-+ help
-+ Enable this option if you want that the device eventually becomes
-+ completely unresponsive. When disabled, the device will receive
-+ ABORT and RESET commands.
++/*************************************************************
++ ** Byte 1 in INQUIRY CDB
++ *************************************************************/
++#define SCST_INQ_EVPD 0x01
+
-+config SCST_MEASURE_LATENCY
-+ bool "Commands processing latency measurement facility"
-+ depends on SCST
-+ help
-+ This option enables commands processing latency measurement
-+ facility in SCST. It will provide in the sysfs interface
-+ average commands processing latency statistics. You can clear
-+ already measured results by writing 0 in the corresponding sysfs file.
-+ Note, you need a non-preemtible kernel to have correct results.
++/*************************************************************
++ ** Byte 3 in Standard INQUIRY data
++ *************************************************************/
++#define SCST_INQ_BYTE3 3
+
-+ If unsure, say "N".
++#define SCST_INQ_NORMACA_BIT 0x20
+
-+source "drivers/scst/iscsi-scst/Kconfig"
-+source "drivers/scst/srpt/Kconfig"
++/*************************************************************
++ ** TPGS field in byte 5 of the INQUIRY response (SPC-4).
++ *************************************************************/
++enum {
++ SCST_INQ_TPGS_MODE_IMPLICIT = 0x10,
++ SCST_INQ_TPGS_MODE_EXPLICIT = 0x20,
++};
+
-+endmenu
-diff -uprN orig/linux-2.6.36/drivers/scst/Makefile linux-2.6.36/drivers/scst/Makefile
---- orig/linux-2.6.36/drivers/scst/Makefile
-+++ linux-2.6.36/drivers/scst/Makefile
-@@ -0,0 +1,12 @@
-+ccflags-y += -Wno-unused-parameter
++/*************************************************************
++ ** Byte 2 in RESERVE_10 CDB
++ *************************************************************/
++#define SCST_RES_3RDPTY 0x10
++#define SCST_RES_LONGID 0x02
+
-+scst-y += scst_main.o
-+scst-y += scst_pres.o
-+scst-y += scst_targ.o
-+scst-y += scst_lib.o
-+scst-y += scst_sysfs.o
-+scst-y += scst_mem.o
-+scst-y += scst_debug.o
++/*************************************************************
++ ** Values for the control mode page TST field
++ *************************************************************/
++#define SCST_CONTR_MODE_ONE_TASK_SET 0
++#define SCST_CONTR_MODE_SEP_TASK_SETS 1
++
++/*******************************************************************
++ ** Values for the control mode page QUEUE ALGORITHM MODIFIER field
++ *******************************************************************/
++#define SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER 0
++#define SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER 1
++
++/*************************************************************
++ ** Values for the control mode page D_SENSE field
++ *************************************************************/
++#define SCST_CONTR_MODE_FIXED_SENSE 0
++#define SCST_CONTR_MODE_DESCR_SENSE 1
++
++/*************************************************************
++ ** TransportID protocol identifiers
++ *************************************************************/
++
++#define SCSI_TRANSPORTID_PROTOCOLID_FCP2 0
++#define SCSI_TRANSPORTID_PROTOCOLID_SPI5 1
++#define SCSI_TRANSPORTID_PROTOCOLID_SRP 4
++#define SCSI_TRANSPORTID_PROTOCOLID_ISCSI 5
++#define SCSI_TRANSPORTID_PROTOCOLID_SAS 6
++
++/**
++ * enum scst_tg_state - SCSI target port group asymmetric access state.
++ *
++ * See also the documentation of the REPORT TARGET PORT GROUPS command in SPC-4.
++ */
++enum scst_tg_state {
++ SCST_TG_STATE_OPTIMIZED = 0x0,
++ SCST_TG_STATE_NONOPTIMIZED = 0x1,
++ SCST_TG_STATE_STANDBY = 0x2,
++ SCST_TG_STATE_UNAVAILABLE = 0x3,
++ SCST_TG_STATE_LBA_DEPENDENT = 0x4,
++ SCST_TG_STATE_OFFLINE = 0xe,
++ SCST_TG_STATE_TRANSITIONING = 0xf,
++};
++
++/**
++ * Target port group preferred bit.
++ *
++ * See also the documentation of the REPORT TARGET PORT GROUPS command in SPC-4.
++ */
++enum {
++ SCST_TG_PREFERRED = 0x80,
++};
++
++/**
++ * enum scst_tg_sup - Supported SCSI target port group states.
++ *
++ * See also the documentation of the REPORT TARGET PORT GROUPS command in SPC-4.
++ */
++enum scst_tg_sup {
++ SCST_TG_SUP_OPTIMIZED = 0x01,
++ SCST_TG_SUP_NONOPTIMIZED = 0x02,
++ SCST_TG_SUP_STANDBY = 0x04,
++ SCST_TG_SUP_UNAVAILABLE = 0x08,
++ SCST_TG_SUP_LBA_DEPENDENT = 0x10,
++ SCST_TG_SUP_OFFLINE = 0x40,
++ SCST_TG_SUP_TRANSITION = 0x80,
++};
++
++/*************************************************************
++ ** Misc SCSI constants
++ *************************************************************/
++#define SCST_SENSE_ASC_UA_RESET 0x29
++#define BYTCHK 0x02
++#define POSITION_LEN_SHORT 20
++#define POSITION_LEN_LONG 32
++
++/*************************************************************
++ ** Various timeouts
++ *************************************************************/
++#define SCST_DEFAULT_TIMEOUT (60 * HZ)
++
++#define SCST_GENERIC_CHANGER_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_CHANGER_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_GENERIC_PROCESSOR_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_PROCESSOR_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_GENERIC_TAPE_SMALL_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_TAPE_REG_TIMEOUT (900 * HZ)
++#define SCST_GENERIC_TAPE_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_GENERIC_MODISK_SMALL_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_MODISK_REG_TIMEOUT (900 * HZ)
++#define SCST_GENERIC_MODISK_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_GENERIC_DISK_SMALL_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_DISK_REG_TIMEOUT (60 * HZ)
++#define SCST_GENERIC_DISK_LONG_TIMEOUT (3600 * HZ)
++
++#define SCST_GENERIC_RAID_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_RAID_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_GENERIC_CDROM_SMALL_TIMEOUT (3 * HZ)
++#define SCST_GENERIC_CDROM_REG_TIMEOUT (900 * HZ)
++#define SCST_GENERIC_CDROM_LONG_TIMEOUT (14000 * HZ)
++
++#define SCST_MAX_OTHER_TIMEOUT (14000 * HZ)
++
++/*************************************************************
++ ** I/O grouping attribute string values. Must match constants
++ ** w/o '_STR' suffix!
++ *************************************************************/
++#define SCST_IO_GROUPING_AUTO_STR "auto"
++#define SCST_IO_GROUPING_THIS_GROUP_ONLY_STR "this_group_only"
++#define SCST_IO_GROUPING_NEVER_STR "never"
++
++/*************************************************************
++ ** Threads pool type attribute string values.
++ ** Must match scst_dev_type_threads_pool_type!
++ *************************************************************/
++#define SCST_THREADS_POOL_PER_INITIATOR_STR "per_initiator"
++#define SCST_THREADS_POOL_SHARED_STR "shared"
++
++/*************************************************************
++ ** Misc constants
++ *************************************************************/
++#define SCST_SYSFS_BLOCK_SIZE PAGE_SIZE
++
++#define SCST_PR_DIR "/var/lib/scst/pr"
++
++#define TID_COMMON_SIZE 24
++
++#define SCST_SYSFS_KEY_MARK "[key]"
++
++#define SCST_MIN_REL_TGT_ID 1
++#define SCST_MAX_REL_TGT_ID 65535
++
++#endif /* __SCST_CONST_H */
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_main.c linux-2.6.39/drivers/scst/scst_main.c
+--- orig/linux-2.6.39/drivers/scst/scst_main.c
++++ linux-2.6.39/drivers/scst/scst_main.c
+@@ -0,0 +1,2229 @@
++/*
++ * scst_main.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/unistd.h>
++#include <linux/string.h>
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#include <linux/lockdep.h>
++
++#include <scst/scst.h>
++#include "scst_priv.h"
++#include "scst_mem.h"
++#include "scst_pres.h"
++
++#if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
++#warning "HIGHMEM kernel configurations are fully supported, but not\
++ recommended for performance reasons. Consider changing VMSPLIT\
++ option or use a 64-bit configuration instead. See README file for\
++ details."
++#endif
++
++/**
++ ** SCST global variables. They are all uninitialized to have their layout in
++ ** memory be exactly as specified. Otherwise compiler puts zero-initialized
++ ** variable separately from nonzero-initialized ones.
++ **/
++
++/*
++ * Main SCST mutex. All targets, devices and dev_types management is done
++ * under this mutex.
++ *
++ * It must NOT be used in any works (schedule_work(), etc.), because
++ * otherwise a deadlock (double lock, actually) is possible, e.g., with
++ * scst_user detach_tgt(), which is called under scst_mutex and calls
++ * flush_scheduled_work().
++ */
++struct mutex scst_mutex;
++EXPORT_SYMBOL_GPL(scst_mutex);
++
++/*
++ * Secondary level main mutex, inner for scst_mutex. Needed for
++ * __scst_pr_register_all_tg_pt(), since we can't use scst_mutex there,
++ * because of the circular locking dependency with dev_pr_mutex.
++ */
++struct mutex scst_mutex2;
++
++/* Both protected by scst_mutex or scst_mutex2 on read and both on write */
++struct list_head scst_template_list;
++struct list_head scst_dev_list;
++
++/* Protected by scst_mutex */
++struct list_head scst_dev_type_list;
++struct list_head scst_virtual_dev_type_list;
++
++spinlock_t scst_main_lock;
++
++static struct kmem_cache *scst_mgmt_cachep;
++mempool_t *scst_mgmt_mempool;
++static struct kmem_cache *scst_mgmt_stub_cachep;
++mempool_t *scst_mgmt_stub_mempool;
++static struct kmem_cache *scst_ua_cachep;
++mempool_t *scst_ua_mempool;
++static struct kmem_cache *scst_sense_cachep;
++mempool_t *scst_sense_mempool;
++static struct kmem_cache *scst_aen_cachep;
++mempool_t *scst_aen_mempool;
++struct kmem_cache *scst_tgtd_cachep;
++struct kmem_cache *scst_sess_cachep;
++struct kmem_cache *scst_acgd_cachep;
++
++unsigned int scst_setup_id;
++
++spinlock_t scst_init_lock;
++wait_queue_head_t scst_init_cmd_list_waitQ;
++struct list_head scst_init_cmd_list;
++unsigned int scst_init_poll_cnt;
++
++struct kmem_cache *scst_cmd_cachep;
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++unsigned long scst_trace_flag;
++#endif
++
++int scst_max_tasklet_cmd = SCST_DEF_MAX_TASKLET_CMD;
++
++unsigned long scst_flags;
++
++struct scst_cmd_threads scst_main_cmd_threads;
++
++struct scst_percpu_info scst_percpu_infos[NR_CPUS];
++
++spinlock_t scst_mcmd_lock;
++struct list_head scst_active_mgmt_cmd_list;
++struct list_head scst_delayed_mgmt_cmd_list;
++wait_queue_head_t scst_mgmt_cmd_list_waitQ;
++
++wait_queue_head_t scst_mgmt_waitQ;
++spinlock_t scst_mgmt_lock;
++struct list_head scst_sess_init_list;
++struct list_head scst_sess_shut_list;
++
++wait_queue_head_t scst_dev_cmd_waitQ;
++
++#ifdef CONFIG_LOCKDEP
++static struct lock_class_key scst_suspend_key;
++struct lockdep_map scst_suspend_dep_map =
++ STATIC_LOCKDEP_MAP_INIT("scst_suspend_activity", &scst_suspend_key);
++#endif
++static struct mutex scst_suspend_mutex;
++/* protected by scst_suspend_mutex */
++static struct list_head scst_cmd_threads_list;
++
++int scst_threads;
++static struct task_struct *scst_init_cmd_thread;
++static struct task_struct *scst_mgmt_thread;
++static struct task_struct *scst_mgmt_cmd_thread;
++
++static int suspend_count;
++
++static int scst_virt_dev_last_id; /* protected by scst_mutex */
++
++cpumask_t default_cpu_mask;
++
++static unsigned int scst_max_cmd_mem;
++unsigned int scst_max_dev_cmd_mem;
++
++module_param_named(scst_threads, scst_threads, int, 0);
++MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
++
++module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, S_IRUGO);
++MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
++ "all SCSI commands of all devices at any given time in MB");
++
++module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, S_IRUGO);
++MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
++ "by all SCSI commands of a device at any given time in MB");
++
++struct scst_dev_type scst_null_devtype = {
++ .name = "none",
++ .threads_num = -1,
++};
++
++static void __scst_resume_activity(void);
++
++/**
++ * __scst_register_target_template() - register target template.
++ * @vtt: target template
++ * @version: SCST_INTERFACE_VERSION version string to ensure that
++ * SCST core and the target driver use the same version of
++ * the SCST interface
++ *
++ * Description:
++ * Registers a target template and returns 0 on success or appropriate
++ * error code otherwise.
++ *
++ * Target drivers supposed to behave sanely and not call register()
++ * and unregister() randomly simultaneously.
++ */
++int __scst_register_target_template(struct scst_tgt_template *vtt,
++ const char *version)
++{
++ int res = 0;
++ struct scst_tgt_template *t;
++
++ TRACE_ENTRY();
++
++ INIT_LIST_HEAD(&vtt->tgt_list);
++
++ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
++ PRINT_ERROR("Incorrect version of target %s", vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (!vtt->detect) {
++ PRINT_ERROR("Target driver %s must have "
++ "detect() method.", vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (!vtt->release) {
++ PRINT_ERROR("Target driver %s must have "
++ "release() method.", vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (!vtt->xmit_response) {
++ PRINT_ERROR("Target driver %s must have "
++ "xmit_response() method.", vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (vtt->get_initiator_port_transport_id == NULL)
++ PRINT_WARNING("Target driver %s doesn't support Persistent "
++ "Reservations", vtt->name);
++
++ if (vtt->threads_num < 0) {
++ PRINT_ERROR("Wrong threads_num value %d for "
++ "target \"%s\"", vtt->threads_num,
++ vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if ((!vtt->enable_target || !vtt->is_target_enabled) &&
++ !vtt->enabled_attr_not_needed)
++ PRINT_WARNING("Target driver %s doesn't have enable_target() "
++ "and/or is_target_enabled() method(s). This is unsafe "
++ "and can lead that initiators connected on the "
++ "initialization time can see an unexpected set of "
++ "devices or no devices at all!", vtt->name);
++
++ if (((vtt->add_target != NULL) && (vtt->del_target == NULL)) ||
++ ((vtt->add_target == NULL) && (vtt->del_target != NULL))) {
++ PRINT_ERROR("Target driver %s must either define both "
++ "add_target() and del_target(), or none.", vtt->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (vtt->rdy_to_xfer == NULL)
++ vtt->rdy_to_xfer_atomic = 1;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
++ goto out;
++ list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
++ if (strcmp(t->name, vtt->name) == 0) {
++ PRINT_ERROR("Target driver %s already registered",
++ vtt->name);
++ mutex_unlock(&scst_mutex);
++ goto out_unlock;
++ }
++ }
++ mutex_unlock(&scst_mutex);
++
++ res = scst_tgtt_sysfs_create(vtt);
++ if (res != 0)
++ goto out;
++
++ mutex_lock(&scst_mutex);
++ mutex_lock(&scst_mutex2);
++ list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
++ mutex_unlock(&scst_mutex2);
++ mutex_unlock(&scst_mutex);
++
++ TRACE_DBG("%s", "Calling target driver's detect()");
++ res = vtt->detect(vtt);
++ TRACE_DBG("Target driver's detect() returned %d", res);
++ if (res < 0) {
++ PRINT_ERROR("%s", "The detect() routine failed");
++ res = -EINVAL;
++ goto out_del;
++ }
++
++ PRINT_INFO("Target template %s registered successfully", vtt->name);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ scst_tgtt_sysfs_del(vtt);
++
++ mutex_lock(&scst_mutex);
++
++ mutex_lock(&scst_mutex2);
++ list_del(&vtt->scst_template_list_entry);
++ mutex_unlock(&scst_mutex2);
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ goto out;
++}
++EXPORT_SYMBOL_GPL(__scst_register_target_template);
++
++static int scst_check_non_gpl_target_template(struct scst_tgt_template *vtt)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (vtt->task_mgmt_affected_cmds_done || vtt->threads_num ||
++ vtt->on_hw_pending_cmd_timeout) {
++ PRINT_ERROR("Not allowed functionality in non-GPL version for "
++ "target template %s", vtt->name);
++ res = -EPERM;
++ goto out;
++ }
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * __scst_register_target_template_non_gpl() - register target template,
++ * non-GPL version
++ * @vtt: target template
++ * @version: SCST_INTERFACE_VERSION version string to ensure that
++ * SCST core and the target driver use the same version of
++ * the SCST interface
++ *
++ * Description:
++ * Registers a target template and returns 0 on success or appropriate
++ * error code otherwise.
++ *
++ * Note: *vtt must be static!
++ */
++int __scst_register_target_template_non_gpl(struct scst_tgt_template *vtt,
++ const char *version)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ res = scst_check_non_gpl_target_template(vtt);
++ if (res != 0)
++ goto out;
++
++ res = __scst_register_target_template(vtt, version);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL(__scst_register_target_template_non_gpl);
++
++/**
++ * scst_unregister_target_template() - unregister target template
++ *
++ * Target drivers supposed to behave sanely and not call register()
++ * and unregister() randomly simultaneously. Also it is supposed that
++ * no attempts to create new targets for this vtt will be done in a race
++ * with this function.
++ */
++void scst_unregister_target_template(struct scst_tgt_template *vtt)
++{
++ struct scst_tgt *tgt;
++ struct scst_tgt_template *t;
++ int found = 0;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
++ if (strcmp(t->name, vtt->name) == 0) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found) {
++ PRINT_ERROR("Target driver %s isn't registered", vtt->name);
++ goto out_err_up;
++ }
++
++ mutex_lock(&scst_mutex2);
++ list_del(&vtt->scst_template_list_entry);
++ mutex_unlock(&scst_mutex2);
++
++ /* Wait for outstanding sysfs mgmt calls completed */
++ while (vtt->tgtt_active_sysfs_works_count > 0) {
++ mutex_unlock(&scst_mutex);
++ msleep(100);
++ mutex_lock(&scst_mutex);
++ }
++
++restart:
++ list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
++ mutex_unlock(&scst_mutex);
++ scst_unregister_target(tgt);
++ mutex_lock(&scst_mutex);
++ goto restart;
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ scst_tgtt_sysfs_del(vtt);
++
++ PRINT_INFO("Target template %s unregistered successfully", vtt->name);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_err_up:
++ mutex_unlock(&scst_mutex);
++ goto out;
++}
++EXPORT_SYMBOL(scst_unregister_target_template);
++
++/**
++ * scst_register_target() - register target
++ *
++ * Registers a target for template vtt and returns new target structure on
++ * success or NULL otherwise.
++ */
++struct scst_tgt *scst_register_target(struct scst_tgt_template *vtt,
++ const char *target_name)
++{
++ struct scst_tgt *tgt, *t;
++ int rc = 0;
++
++ TRACE_ENTRY();
++
++ rc = scst_alloc_tgt(vtt, &tgt);
++ if (rc != 0)
++ goto out;
++
++ if (target_name != NULL) {
++
++ tgt->tgt_name = kstrdup(target_name, GFP_KERNEL);
++ if (tgt->tgt_name == NULL) {
++ PRINT_ERROR("Allocation of tgt name %s failed",
++ target_name);
++ rc = -ENOMEM;
++ goto out_free_tgt;
++ }
++ } else {
++ static int tgt_num; /* protected by scst_mutex */
++
++ PRINT_WARNING("Usage of autogenerated SCST target names "
++ "is deprecated and will be removed in one of the next "
++ "versions. It is strongly recommended to update target "
++ "driver %s to use hardware related persistent target "
++ "names instead", vtt->name);
++
++ tgt->tgt_name = kasprintf(GFP_KERNEL, "%s%s%d", vtt->name,
++ SCST_DEFAULT_TGT_NAME_SUFFIX, tgt_num);
++ if (tgt->tgt_name == NULL) {
++ PRINT_ERROR("Allocation of tgt name failed "
++ "(template name %s)", vtt->name);
++ rc = -ENOMEM;
++ goto out_free_tgt;
++ }
++ tgt_num++;
++ }
++
++ rc = mutex_lock_interruptible(&scst_mutex);
++ if (rc != 0)
++ goto out_free_tgt;
++
++ list_for_each_entry(t, &vtt->tgt_list, tgt_list_entry) {
++ if (strcmp(t->tgt_name, tgt->tgt_name) == 0) {
++ PRINT_ERROR("target %s already exists", tgt->tgt_name);
++ rc = -EEXIST;
++ goto out_unlock;
++ }
++ }
++
++ rc = scst_tgt_sysfs_create(tgt);
++ if (rc < 0)
++ goto out_unlock;
++
++ tgt->default_acg = scst_alloc_add_acg(tgt, tgt->tgt_name, false);
++ if (tgt->default_acg == NULL)
++ goto out_sysfs_del;
++
++ mutex_lock(&scst_mutex2);
++ list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
++ mutex_unlock(&scst_mutex2);
++
++ mutex_unlock(&scst_mutex);
++
++ PRINT_INFO("Target %s for template %s registered successfully",
++ tgt->tgt_name, vtt->name);
++
++ TRACE_DBG("tgt %p", tgt);
++
++out:
++ TRACE_EXIT();
++ return tgt;
++
++out_sysfs_del:
++ mutex_unlock(&scst_mutex);
++ scst_tgt_sysfs_del(tgt);
++ goto out_free_tgt;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_free_tgt:
++ /* In case of error tgt_name will be freed in scst_free_tgt() */
++ scst_free_tgt(tgt);
++ tgt = NULL;
++ goto out;
++}
++EXPORT_SYMBOL(scst_register_target);
++
++static inline int test_sess_list(struct scst_tgt *tgt)
++{
++ int res;
++ mutex_lock(&scst_mutex);
++ res = list_empty(&tgt->sess_list);
++ mutex_unlock(&scst_mutex);
++ return res;
++}
++
++/**
++ * scst_unregister_target() - unregister target.
++ *
++ * It is supposed that no attempts to create new sessions for this
++ * target will be done in a race with this function.
++ */
++void scst_unregister_target(struct scst_tgt *tgt)
++{
++ struct scst_session *sess;
++ struct scst_tgt_template *vtt = tgt->tgtt;
++ struct scst_acg *acg, *acg_tmp;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("%s", "Calling target driver's release()");
++ tgt->tgtt->release(tgt);
++ TRACE_DBG("%s", "Target driver's release() returned");
++
++ mutex_lock(&scst_mutex);
++again:
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ if (sess->shut_phase == SCST_SESS_SPH_READY) {
++ /*
++ * Sometimes it's hard for target driver to track all
++ * its sessions (see scst_local, eg), so let's help it.
++ */
++ mutex_unlock(&scst_mutex);
++ scst_unregister_session(sess, 0, NULL);
++ mutex_lock(&scst_mutex);
++ goto again;
++ }
++ }
++ mutex_unlock(&scst_mutex);
++
++ TRACE_DBG("%s", "Waiting for sessions shutdown");
++ wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
++ TRACE_DBG("%s", "wait_event() returned");
++
++ scst_suspend_activity(false);
++ mutex_lock(&scst_mutex);
++
++ mutex_lock(&scst_mutex2);
++ list_del(&tgt->tgt_list_entry);
++ mutex_unlock(&scst_mutex2);
++
++ del_timer_sync(&tgt->retry_timer);
++
++ scst_tg_tgt_remove_by_tgt(tgt);
++
++ scst_del_free_acg(tgt->default_acg);
++
++ list_for_each_entry_safe(acg, acg_tmp, &tgt->tgt_acg_list,
++ acg_list_entry) {
++ scst_del_free_acg(acg);
++ }
++
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++
++ scst_tgt_sysfs_del(tgt);
++
++ PRINT_INFO("Target %s for template %s unregistered successfully",
++ tgt->tgt_name, vtt->name);
++
++ scst_free_tgt(tgt);
++
++ TRACE_DBG("Unregistering tgt %p finished", tgt);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_unregister_target);
++
++int scst_get_cmd_counter(void)
++{
++ int i, res = 0;
++ for (i = 0; i < (int)ARRAY_SIZE(scst_percpu_infos); i++)
++ res += atomic_read(&scst_percpu_infos[i].cpu_cmd_count);
++ return res;
++}
++
++static int scst_susp_wait(bool interruptible)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (interruptible) {
++ res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
++ (scst_get_cmd_counter() == 0),
++ SCST_SUSPENDING_TIMEOUT);
++ if (res <= 0) {
++ __scst_resume_activity();
++ if (res == 0)
++ res = -EBUSY;
++ } else
++ res = 0;
++ } else
++ wait_event(scst_dev_cmd_waitQ, scst_get_cmd_counter() == 0);
++
++ TRACE_MGMT_DBG("wait_event() returned %d", res);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_suspend_activity() - globally suspend any activity
++ *
++ * Description:
++ * Globally suspends any activity and doesn't return, until there are any
++ * active commands (state after SCST_CMD_STATE_INIT). If "interruptible"
++ * is true, it returns after SCST_SUSPENDING_TIMEOUT or if it was interrupted
++ * by a signal with the corresponding error status < 0. If "interruptible"
++ * is false, it will wait virtually forever. On success returns 0.
++ *
++ * New arriving commands stay in the suspended state until
++ * scst_resume_activity() is called.
++ */
++int scst_suspend_activity(bool interruptible)
++{
++ int res = 0;
++ bool rep = false;
++
++ TRACE_ENTRY();
++
++ rwlock_acquire_read(&scst_suspend_dep_map, 0, 0, _RET_IP_);
++
++ if (interruptible) {
++ if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++ } else
++ mutex_lock(&scst_suspend_mutex);
++
++ TRACE_MGMT_DBG("suspend_count %d", suspend_count);
++ suspend_count++;
++ if (suspend_count > 1)
++ goto out_up;
++
++ set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
++ set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
++ /*
++ * Assignment of SCST_FLAG_SUSPENDING and SCST_FLAG_SUSPENDED must be
++ * ordered with cpu_cmd_count in scst_get(). Otherwise lockless logic in
++ * scst_translate_lun() and scst_mgmt_translate_lun() won't work.
++ */
++ smp_mb__after_set_bit();
++
++ /*
++ * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
++ * information about scst_user behavior.
++ *
++ * ToDo: make the global suspending unneeded (switch to per-device
++ * reference counting? That would mean to switch off from lockless
++ * implementation of scst_translate_lun().. )
++ */
++
++ if (scst_get_cmd_counter() != 0) {
++ PRINT_INFO("Waiting for %d active commands to complete... This "
++ "might take few minutes for disks or few hours for "
++ "tapes, if you use long executed commands, like "
++ "REWIND or FORMAT. In case, if you have a hung user "
++ "space device (i.e. made using scst_user module) not "
++ "responding to any commands, if might take virtually "
++ "forever until the corresponding user space "
++ "program recovers and starts responding or gets "
++ "killed.", scst_get_cmd_counter());
++ rep = true;
++
++ lock_contended(&scst_suspend_dep_map, _RET_IP_);
++ }
++
++ res = scst_susp_wait(interruptible);
++ if (res != 0)
++ goto out_clear;
++
++ clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
++ /* See comment about smp_mb() above */
++ smp_mb__after_clear_bit();
++
++ TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
++ scst_get_cmd_counter());
++
++ res = scst_susp_wait(interruptible);
++ if (res != 0)
++ goto out_clear;
++
++ if (rep)
++ PRINT_INFO("%s", "All active commands completed");
++
++out_up:
++ mutex_unlock(&scst_suspend_mutex);
++
++out:
++ if (res == 0)
++ lock_acquired(&scst_suspend_dep_map, _RET_IP_);
++ else
++ rwlock_release(&scst_suspend_dep_map, 1, _RET_IP_);
++
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_clear:
++ clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
++ /* See comment about smp_mb() above */
++ smp_mb__after_clear_bit();
++ goto out_up;
++}
++EXPORT_SYMBOL_GPL(scst_suspend_activity);
++
++static void __scst_resume_activity(void)
++{
++ struct scst_cmd_threads *l;
++
++ TRACE_ENTRY();
++
++ suspend_count--;
++ TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
++ if (suspend_count > 0)
++ goto out;
++
++ clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
++ /*
++ * The barrier is needed to make sure all woken up threads see the
++ * cleared flag. Not sure if it's really needed, but let's be safe.
++ */
++ smp_mb__after_clear_bit();
++
++ list_for_each_entry(l, &scst_cmd_threads_list, lists_list_entry) {
++ wake_up_all(&l->cmd_list_waitQ);
++ }
++ wake_up_all(&scst_init_cmd_list_waitQ);
++
++ spin_lock_irq(&scst_mcmd_lock);
++ if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
++ struct scst_mgmt_cmd *m;
++ m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
++ mgmt_cmd_list_entry);
++ TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
++ "mgmt cmd list", m);
++ list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
++ }
++ spin_unlock_irq(&scst_mcmd_lock);
++ wake_up_all(&scst_mgmt_cmd_list_waitQ);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_resume_activity() - globally resume all activities
++ *
++ * Resumes suspended by scst_suspend_activity() activities.
++ */
++void scst_resume_activity(void)
++{
++ TRACE_ENTRY();
++
++ rwlock_release(&scst_suspend_dep_map, 1, _RET_IP_);
++
++ mutex_lock(&scst_suspend_mutex);
++ __scst_resume_activity();
++ mutex_unlock(&scst_suspend_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_resume_activity);
++
++static int scst_register_device(struct scsi_device *scsidp)
++{
++ int res;
++ struct scst_device *dev, *d;
++
++ TRACE_ENTRY();
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
++ goto out;
++
++ res = scst_alloc_device(GFP_KERNEL, &dev);
++ if (res != 0)
++ goto out_unlock;
++
++ dev->type = scsidp->type;
++
++ dev->virt_name = kasprintf(GFP_KERNEL, "%d:%d:%d:%d",
++ scsidp->host->host_no,
++ scsidp->channel, scsidp->id, scsidp->lun);
++ if (dev->virt_name == NULL) {
++ PRINT_ERROR("%s", "Unable to alloc device name");
++ res = -ENOMEM;
++ goto out_free_dev;
++ }
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (strcmp(d->virt_name, dev->virt_name) == 0) {
++ PRINT_ERROR("Device %s already exists", dev->virt_name);
++ res = -EEXIST;
++ goto out_free_dev;
++ }
++ }
++
++ dev->scsi_dev = scsidp;
++
++ list_add_tail(&dev->dev_list_entry, &scst_dev_list);
++
++ mutex_unlock(&scst_mutex);
++
++ res = scst_dev_sysfs_create(dev);
++ if (res != 0)
++ goto out_del;
++
++ PRINT_INFO("Attached to scsi%d, channel %d, id %d, lun %d, "
++ "type %d", scsidp->host->host_no, scsidp->channel,
++ scsidp->id, scsidp->lun, scsidp->type);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_del:
++ list_del(&dev->dev_list_entry);
++
++out_free_dev:
++ scst_free_device(dev);
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ goto out;
++}
++
++static void scst_unregister_device(struct scsi_device *scsidp)
++{
++ struct scst_device *d, *dev = NULL;
++ struct scst_acg_dev *acg_dev, *aa;
++
++ TRACE_ENTRY();
++
++ scst_suspend_activity(false);
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (d->scsi_dev == scsidp) {
++ dev = d;
++ TRACE_DBG("Device %p found", dev);
++ break;
++ }
++ }
++ if (dev == NULL) {
++ PRINT_ERROR("SCST device for SCSI device %d:%d:%d:%d not found",
++ scsidp->host->host_no, scsidp->channel, scsidp->id,
++ scsidp->lun);
++ goto out_unlock;
++ }
++
++ dev->dev_unregistering = 1;
++
++ list_del(&dev->dev_list_entry);
++
++ scst_dg_dev_remove_by_dev(dev);
++
++ scst_assign_dev_handler(dev, &scst_null_devtype);
++
++ list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
++ dev_acg_dev_list_entry) {
++ scst_acg_del_lun(acg_dev->acg, acg_dev->lun, true);
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ scst_resume_activity();
++
++ scst_dev_sysfs_del(dev);
++
++ PRINT_INFO("Detached from scsi%d, channel %d, id %d, lun %d, type %d",
++ scsidp->host->host_no, scsidp->channel, scsidp->id,
++ scsidp->lun, scsidp->type);
++
++ scst_free_device(dev);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++ goto out;
++}
++
++static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
++{
++ int res = 0;
++
++ if (dev_handler->parse == NULL) {
++ PRINT_ERROR("scst dev handler %s must have "
++ "parse() method.", dev_handler->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (((dev_handler->add_device != NULL) &&
++ (dev_handler->del_device == NULL)) ||
++ ((dev_handler->add_device == NULL) &&
++ (dev_handler->del_device != NULL))) {
++ PRINT_ERROR("Dev handler %s must either define both "
++ "add_device() and del_device(), or none.",
++ dev_handler->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (dev_handler->alloc_data_buf == NULL)
++ dev_handler->alloc_data_buf_atomic = 1;
++
++ if (dev_handler->dev_done == NULL)
++ dev_handler->dev_done_atomic = 1;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_check_device_name(const char *dev_name)
++{
++ int res = 0;
++
++ if (strchr(dev_name, '/') != NULL) {
++ PRINT_ERROR("Dev name %s contains illegal character '/'",
++ dev_name);
++ res = -EINVAL;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_register_virtual_device() - register a virtual device.
++ * @dev_handler: the device's device handler
++ * @dev_name: the new device name, NULL-terminated string. Must be uniq
++ * among all virtual devices in the system.
++ *
++ * Registers a virtual device and returns ID assigned to the device on
++ * success, or negative value otherwise
++ */
++int scst_register_virtual_device(struct scst_dev_type *dev_handler,
++ const char *dev_name)
++{
++ int res;
++ struct scst_device *dev, *d;
++ bool sysfs_del = false;
++
++ TRACE_ENTRY();
++
++ if (dev_handler == NULL) {
++ PRINT_ERROR("%s: valid device handler must be supplied",
++ __func__);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (dev_name == NULL) {
++ PRINT_ERROR("%s: device name must be non-NULL", __func__);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_check_device_name(dev_name);
++ if (res != 0)
++ goto out;
++
++ res = scst_dev_handler_check(dev_handler);
++ if (res != 0)
++ goto out;
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
++ goto out_resume;
++
++ res = scst_alloc_device(GFP_KERNEL, &dev);
++ if (res != 0)
++ goto out_unlock;
++
++ dev->type = dev_handler->type;
++ dev->scsi_dev = NULL;
++ dev->virt_name = kstrdup(dev_name, GFP_KERNEL);
++ if (dev->virt_name == NULL) {
++ PRINT_ERROR("Unable to allocate virt_name for dev %s",
++ dev_name);
++ res = -ENOMEM;
++ goto out_free_dev;
++ }
++
++ while (1) {
++ dev->virt_id = scst_virt_dev_last_id++;
++ if (dev->virt_id > 0)
++ break;
++ scst_virt_dev_last_id = 1;
++ }
++
++ res = dev->virt_id;
++
++ res = scst_pr_init_dev(dev);
++ if (res != 0)
++ goto out_free_dev;
++
++ /*
++ * We can drop scst_mutex, because we have not yet added the dev in
++ * scst_dev_list, so it "doesn't exist" yet.
++ */
++ mutex_unlock(&scst_mutex);
++
++ res = scst_dev_sysfs_create(dev);
++ if (res != 0)
++ goto out_lock_pr_clear_dev;
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (strcmp(d->virt_name, dev_name) == 0) {
++ PRINT_ERROR("Device %s already exists", dev_name);
++ res = -EEXIST;
++ sysfs_del = true;
++ goto out_pr_clear_dev;
++ }
++ }
++
++ res = scst_assign_dev_handler(dev, dev_handler);
++ if (res != 0) {
++ sysfs_del = true;
++ goto out_pr_clear_dev;
++ }
++
++ list_add_tail(&dev->dev_list_entry, &scst_dev_list);
++
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++
++ res = dev->virt_id;
++
++ PRINT_INFO("Attached to virtual device %s (id %d)", dev_name, res);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_lock_pr_clear_dev:
++ mutex_lock(&scst_mutex);
++
++out_pr_clear_dev:
++ scst_pr_clear_dev(dev);
++
++out_free_dev:
++ mutex_unlock(&scst_mutex);
++ if (sysfs_del)
++ scst_dev_sysfs_del(dev);
++ scst_free_device(dev);
++ goto out_resume;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_resume:
++ scst_resume_activity();
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_register_virtual_device);
++
++/**
++ * scst_unregister_virtual_device() - unegister a virtual device.
++ * @id: the device's ID, returned by the registration function
++ */
++void scst_unregister_virtual_device(int id)
++{
++ struct scst_device *d, *dev = NULL;
++ struct scst_acg_dev *acg_dev, *aa;
++
++ TRACE_ENTRY();
++
++ scst_suspend_activity(false);
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (d->virt_id == id) {
++ dev = d;
++ TRACE_DBG("Virtual device %p (id %d) found", dev, id);
++ break;
++ }
++ }
++ if (dev == NULL) {
++ PRINT_ERROR("Virtual device (id %d) not found", id);
++ goto out_unlock;
++ }
++
++ dev->dev_unregistering = 1;
++
++ list_del(&dev->dev_list_entry);
++
++ scst_pr_clear_dev(dev);
++
++ scst_dg_dev_remove_by_dev(dev);
++
++ scst_assign_dev_handler(dev, &scst_null_devtype);
++
++ list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
++ dev_acg_dev_list_entry) {
++ scst_acg_del_lun(acg_dev->acg, acg_dev->lun, true);
++ }
++
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++
++ scst_dev_sysfs_del(dev);
++
++ PRINT_INFO("Detached from virtual device %s (id %d)",
++ dev->virt_name, dev->virt_id);
++
++ scst_free_device(dev);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_unregister_virtual_device);
++
++/**
++ * __scst_register_dev_driver() - register pass-through dev handler driver
++ * @dev_type: dev handler template
++ * @version: SCST_INTERFACE_VERSION version string to ensure that
++ * SCST core and the dev handler use the same version of
++ * the SCST interface
++ *
++ * Description:
++ * Registers a pass-through dev handler driver. Returns 0 on success
++ * or appropriate error code otherwise.
++ */
++int __scst_register_dev_driver(struct scst_dev_type *dev_type,
++ const char *version)
++{
++ int res, exist;
++ struct scst_dev_type *dt;
++
++ TRACE_ENTRY();
++
++ res = -EINVAL;
++ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
++ PRINT_ERROR("Incorrect version of dev handler %s",
++ dev_type->name);
++ goto out;
++ }
++
++ res = scst_dev_handler_check(dev_type);
++ if (res != 0)
++ goto out;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
++ goto out;
++
++ exist = 0;
++ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
++ if (strcmp(dt->name, dev_type->name) == 0) {
++ PRINT_ERROR("Device type handler \"%s\" already "
++ "exists", dt->name);
++ exist = 1;
++ break;
++ }
++ }
++ if (exist)
++ goto out_unlock;
++
++ list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
++
++ mutex_unlock(&scst_mutex);
++
++ res = scst_devt_sysfs_create(dev_type);
++ if (res < 0)
++ goto out;
++
++ PRINT_INFO("Device handler \"%s\" for type %d registered "
++ "successfully", dev_type->name, dev_type->type);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++ goto out;
++}
++EXPORT_SYMBOL_GPL(__scst_register_dev_driver);
++
++/**
++ * scst_unregister_dev_driver() - unregister pass-through dev handler driver
++ */
++void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
++{
++ struct scst_device *dev;
++ struct scst_dev_type *dt;
++ int found = 0;
++
++ TRACE_ENTRY();
++
++ scst_suspend_activity(false);
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
++ if (strcmp(dt->name, dev_type->name) == 0) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found) {
++ PRINT_ERROR("Dev handler \"%s\" isn't registered",
++ dev_type->name);
++ goto out_up;
++ }
++
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ if (dev->handler == dev_type) {
++ scst_assign_dev_handler(dev, &scst_null_devtype);
++ TRACE_DBG("Dev handler removed from device %p", dev);
++ }
++ }
++
++ list_del(&dev_type->dev_type_list_entry);
++
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++
++ scst_devt_sysfs_del(dev_type);
++
++ PRINT_INFO("Device handler \"%s\" for type %d unloaded",
++ dev_type->name, dev_type->type);
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_up:
++ mutex_unlock(&scst_mutex);
++ scst_resume_activity();
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_unregister_dev_driver);
++
++/**
++ * __scst_register_virtual_dev_driver() - register virtual dev handler driver
++ * @dev_type: dev handler template
++ * @version: SCST_INTERFACE_VERSION version string to ensure that
++ * SCST core and the dev handler use the same version of
++ * the SCST interface
++ *
++ * Description:
++ * Registers a virtual dev handler driver. Returns 0 on success or
++ * appropriate error code otherwise.
++ */
++int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
++ const char *version)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
++ PRINT_ERROR("Incorrect version of virtual dev handler %s",
++ dev_type->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_dev_handler_check(dev_type);
++ if (res != 0)
++ goto out;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
++ goto out;
++ list_add_tail(&dev_type->dev_type_list_entry, &scst_virtual_dev_type_list);
++ mutex_unlock(&scst_mutex);
++
++ res = scst_devt_sysfs_create(dev_type);
++ if (res < 0)
++ goto out;
++
++ if (dev_type->type != -1) {
++ PRINT_INFO("Virtual device handler %s for type %d "
++ "registered successfully", dev_type->name,
++ dev_type->type);
++ } else {
++ PRINT_INFO("Virtual device handler \"%s\" registered "
++ "successfully", dev_type->name);
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(__scst_register_virtual_dev_driver);
++
++/**
++ * scst_unregister_virtual_dev_driver() - unregister virtual dev driver
++ */
++void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ /* Disable sysfs mgmt calls (e.g. addition of new devices) */
++ list_del(&dev_type->dev_type_list_entry);
++
++ /* Wait for outstanding sysfs mgmt calls completed */
++ while (dev_type->devt_active_sysfs_works_count > 0) {
++ mutex_unlock(&scst_mutex);
++ msleep(100);
++ mutex_lock(&scst_mutex);
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ scst_devt_sysfs_del(dev_type);
++
++ PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_unregister_virtual_dev_driver);
++
++/* scst_mutex supposed to be held */
++int scst_add_threads(struct scst_cmd_threads *cmd_threads,
++ struct scst_device *dev, struct scst_tgt_dev *tgt_dev, int num)
++{
++ int res = 0, i;
++ struct scst_cmd_thread_t *thr;
++ int n = 0, tgt_dev_num = 0;
++
++ TRACE_ENTRY();
++
++ if (num == 0) {
++ res = 0;
++ goto out;
++ }
++
++ list_for_each_entry(thr, &cmd_threads->threads_list, thread_list_entry) {
++ n++;
++ }
++
++ TRACE_DBG("cmd_threads %p, dev %s, tgt_dev %p, num %d, n %d",
++ cmd_threads, dev ? dev->virt_name : NULL, tgt_dev, num, n);
++
++ if (tgt_dev != NULL) {
++ struct scst_tgt_dev *t;
++ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if (t == tgt_dev)
++ break;
++ tgt_dev_num++;
++ }
++ }
++
++ for (i = 0; i < num; i++) {
++ thr = kmalloc(sizeof(*thr), GFP_KERNEL);
++ if (!thr) {
++ res = -ENOMEM;
++ PRINT_ERROR("Fail to allocate thr %d", res);
++ goto out_wait;
++ }
++
++ if (dev != NULL) {
++ thr->cmd_thread = kthread_create(scst_cmd_thread,
++ cmd_threads, "%.13s%d", dev->virt_name, n++);
++ } else if (tgt_dev != NULL) {
++ thr->cmd_thread = kthread_create(scst_cmd_thread,
++ cmd_threads, "%.10s%d_%d",
++ tgt_dev->dev->virt_name, tgt_dev_num, n++);
++ } else
++ thr->cmd_thread = kthread_create(scst_cmd_thread,
++ cmd_threads, "scstd%d", n++);
++
++ if (IS_ERR(thr->cmd_thread)) {
++ res = PTR_ERR(thr->cmd_thread);
++ PRINT_ERROR("kthread_create() failed: %d", res);
++ kfree(thr);
++ goto out_wait;
++ }
++
++ if (tgt_dev != NULL) {
++ int rc;
++ /*
++ * sess->acg can be NULL here, if called from
++ * scst_check_reassign_sess()!
++ */
++ rc = set_cpus_allowed_ptr(thr->cmd_thread,
++ &tgt_dev->acg_dev->acg->acg_cpu_mask);
++ if (rc != 0)
++ PRINT_ERROR("Setting CPU affinity failed: "
++ "%d", rc);
++ }
++
++ list_add(&thr->thread_list_entry, &cmd_threads->threads_list);
++ cmd_threads->nr_threads++;
++
++ TRACE_DBG("Added thr %p to threads list (nr_threads %d, n %d)",
++ thr, cmd_threads->nr_threads, n);
++
++ wake_up_process(thr->cmd_thread);
++ }
++
++out_wait:
++ if (i > 0 && cmd_threads != &scst_main_cmd_threads) {
++ /*
++ * Wait for io_context gets initialized to avoid possible races
++ * for it from the sharing it tgt_devs.
++ */
++ while (!*(volatile bool*)&cmd_threads->io_context_ready) {
++ TRACE_DBG("Waiting for io_context for cmd_threads %p "
++ "initialized", cmd_threads);
++ msleep(50);
++ }
++ smp_rmb();
++ }
++
++ if (res != 0)
++ scst_del_threads(cmd_threads, i);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_mutex supposed to be held */
++void scst_del_threads(struct scst_cmd_threads *cmd_threads, int num)
++{
++ struct scst_cmd_thread_t *ct, *tmp;
++
++ TRACE_ENTRY();
++
++ if (num == 0)
++ goto out;
++
++ list_for_each_entry_safe_reverse(ct, tmp, &cmd_threads->threads_list,
++ thread_list_entry) {
++ int rc;
++ struct scst_device *dev;
++
++ rc = kthread_stop(ct->cmd_thread);
++ if (rc != 0 && rc != -EINTR)
++ TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
++
++ list_del(&ct->thread_list_entry);
++
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ scst_del_thr_data(tgt_dev, ct->cmd_thread);
++ }
++ }
++
++ kfree(ct);
++
++ cmd_threads->nr_threads--;
++
++ --num;
++ if (num == 0)
++ break;
++ }
++
++ EXTRACHECKS_BUG_ON((cmd_threads->nr_threads == 0) &&
++ (cmd_threads->io_context != NULL));
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++void scst_stop_dev_threads(struct scst_device *dev)
++{
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ scst_tgt_dev_stop_threads(tgt_dev);
++ }
++
++ if ((dev->threads_num > 0) &&
++ (dev->threads_pool_type == SCST_THREADS_POOL_SHARED))
++ scst_del_threads(&dev->dev_cmd_threads, -1);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++int scst_create_dev_threads(struct scst_device *dev)
++{
++ int res = 0;
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ res = scst_tgt_dev_setup_threads(tgt_dev);
++ if (res != 0)
++ goto out_err;
++ }
++
++ if ((dev->threads_num > 0) &&
++ (dev->threads_pool_type == SCST_THREADS_POOL_SHARED)) {
++ res = scst_add_threads(&dev->dev_cmd_threads, dev, NULL,
++ dev->threads_num);
++ if (res != 0)
++ goto out_err;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err:
++ scst_stop_dev_threads(dev);
++ goto out;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++int scst_assign_dev_handler(struct scst_device *dev,
++ struct scst_dev_type *handler)
++{
++ int res = 0;
++ struct scst_tgt_dev *tgt_dev;
++ LIST_HEAD(attached_tgt_devs);
++
++ TRACE_ENTRY();
++
++ BUG_ON(handler == NULL);
++
++ if (dev->handler == handler)
++ goto out;
++
++ if (dev->handler == NULL)
++ goto assign;
++
++ if (dev->handler->detach_tgt) {
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ TRACE_DBG("Calling dev handler's detach_tgt(%p)",
++ tgt_dev);
++ dev->handler->detach_tgt(tgt_dev);
++ TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
++ }
++ }
++
++ /*
++ * devt_dev sysfs must be created AFTER attach() and deleted BEFORE
++ * detach() to avoid calls from sysfs for not yet ready or already dead
++ * objects.
++ */
++ scst_devt_dev_sysfs_del(dev);
++
++ if (dev->handler->detach) {
++ TRACE_DBG("%s", "Calling dev handler's detach()");
++ dev->handler->detach(dev);
++ TRACE_DBG("%s", "Old handler's detach() returned");
++ }
++
++ scst_stop_dev_threads(dev);
++
++assign:
++ dev->handler = handler;
++
++ if (handler == NULL)
++ goto out;
++
++ dev->threads_num = handler->threads_num;
++ dev->threads_pool_type = handler->threads_pool_type;
++
++ if (handler->attach) {
++ TRACE_DBG("Calling new dev handler's attach(%p)", dev);
++ res = handler->attach(dev);
++ TRACE_DBG("New dev handler's attach() returned %d", res);
++ if (res != 0) {
++ PRINT_ERROR("New device handler's %s attach() "
++ "failed: %d", handler->name, res);
++ goto out;
++ }
++ }
++
++ res = scst_devt_dev_sysfs_create(dev);
++ if (res != 0)
++ goto out_detach;
++
++ if (handler->attach_tgt) {
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ TRACE_DBG("Calling dev handler's attach_tgt(%p)",
++ tgt_dev);
++ res = handler->attach_tgt(tgt_dev);
++ TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
++ if (res != 0) {
++ PRINT_ERROR("Device handler's %s attach_tgt() "
++ "failed: %d", handler->name, res);
++ goto out_err_remove_sysfs;
++ }
++ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
++ &attached_tgt_devs);
++ }
++ }
++
++ res = scst_create_dev_threads(dev);
++ if (res != 0)
++ goto out_err_detach_tgt;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err_detach_tgt:
++ if (handler && handler->detach_tgt) {
++ list_for_each_entry(tgt_dev, &attached_tgt_devs,
++ extra_tgt_dev_list_entry) {
++ TRACE_DBG("Calling handler's detach_tgt(%p)",
++ tgt_dev);
++ handler->detach_tgt(tgt_dev);
++ TRACE_DBG("%s", "Handler's detach_tgt() returned");
++ }
++ }
++
++out_err_remove_sysfs:
++ scst_devt_dev_sysfs_del(dev);
++
++out_detach:
++ if (handler && handler->detach) {
++ TRACE_DBG("%s", "Calling handler's detach()");
++ handler->detach(dev);
++ TRACE_DBG("%s", "Handler's detach() returned");
++ }
++
++ dev->handler = &scst_null_devtype;
++ dev->threads_num = scst_null_devtype.threads_num;
++ dev->threads_pool_type = scst_null_devtype.threads_pool_type;
++ goto out;
++}
++
++/**
++ * scst_init_threads() - initialize SCST processing threads pool
++ *
++ * Initializes scst_cmd_threads structure
++ */
++void scst_init_threads(struct scst_cmd_threads *cmd_threads)
++{
++ TRACE_ENTRY();
++
++ spin_lock_init(&cmd_threads->cmd_list_lock);
++ INIT_LIST_HEAD(&cmd_threads->active_cmd_list);
++ init_waitqueue_head(&cmd_threads->cmd_list_waitQ);
++ INIT_LIST_HEAD(&cmd_threads->threads_list);
++ mutex_init(&cmd_threads->io_context_mutex);
++
++ mutex_lock(&scst_suspend_mutex);
++ list_add_tail(&cmd_threads->lists_list_entry,
++ &scst_cmd_threads_list);
++ mutex_unlock(&scst_suspend_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_init_threads);
++
++/**
++ * scst_deinit_threads() - deinitialize SCST processing threads pool
++ *
++ * Deinitializes scst_cmd_threads structure
++ */
++void scst_deinit_threads(struct scst_cmd_threads *cmd_threads)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_suspend_mutex);
++ list_del(&cmd_threads->lists_list_entry);
++ mutex_unlock(&scst_suspend_mutex);
++
++ BUG_ON(cmd_threads->io_context);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_deinit_threads);
++
++static void scst_stop_global_threads(void)
++{
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ scst_del_threads(&scst_main_cmd_threads, -1);
++
++ if (scst_mgmt_cmd_thread)
++ kthread_stop(scst_mgmt_cmd_thread);
++ if (scst_mgmt_thread)
++ kthread_stop(scst_mgmt_thread);
++ if (scst_init_cmd_thread)
++ kthread_stop(scst_init_cmd_thread);
++
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* It does NOT stop ran threads on error! */
++static int scst_start_global_threads(int num)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ res = scst_add_threads(&scst_main_cmd_threads, NULL, NULL, num);
++ if (res < 0)
++ goto out_unlock;
++
++ scst_init_cmd_thread = kthread_run(scst_init_thread,
++ NULL, "scst_initd");
++ if (IS_ERR(scst_init_cmd_thread)) {
++ res = PTR_ERR(scst_init_cmd_thread);
++ PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
++ scst_init_cmd_thread = NULL;
++ goto out_unlock;
++ }
++
++ scst_mgmt_cmd_thread = kthread_run(scst_tm_thread,
++ NULL, "scsi_tm");
++ if (IS_ERR(scst_mgmt_cmd_thread)) {
++ res = PTR_ERR(scst_mgmt_cmd_thread);
++ PRINT_ERROR("kthread_create() for TM failed: %d", res);
++ scst_mgmt_cmd_thread = NULL;
++ goto out_unlock;
++ }
++
++ scst_mgmt_thread = kthread_run(scst_global_mgmt_thread,
++ NULL, "scst_mgmtd");
++ if (IS_ERR(scst_mgmt_thread)) {
++ res = PTR_ERR(scst_mgmt_thread);
++ PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
++ scst_mgmt_thread = NULL;
++ goto out_unlock;
++ }
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_get_setup_id() - return SCST setup ID
++ *
++ * Returns SCST setup ID. This ID can be used for multiple
++ * setups with the same configuration.
++ */
++unsigned int scst_get_setup_id(void)
++{
++ return scst_setup_id;
++}
++EXPORT_SYMBOL_GPL(scst_get_setup_id);
++
++static int scst_add(struct device *cdev, struct class_interface *intf)
++{
++ struct scsi_device *scsidp;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ scsidp = to_scsi_device(cdev->parent);
++
++ if ((scsidp->host->hostt->name == NULL) ||
++ (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0))
++ res = scst_register_device(scsidp);
++
++ TRACE_EXIT();
++ return res;
++}
++
++static void scst_remove(struct device *cdev, struct class_interface *intf)
++{
++ struct scsi_device *scsidp;
++
++ TRACE_ENTRY();
++
++ scsidp = to_scsi_device(cdev->parent);
++
++ if ((scsidp->host->hostt->name == NULL) ||
++ (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0))
++ scst_unregister_device(scsidp);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct class_interface scst_interface = {
++ .add_dev = scst_add,
++ .remove_dev = scst_remove,
++};
++
++static void __init scst_print_config(void)
++{
++ char buf[128];
++ int i, j;
++
++ i = snprintf(buf, sizeof(buf), "Enabled features: ");
++ j = i;
++
++#ifdef CONFIG_SCST_STRICT_SERIALIZING
++ i += snprintf(&buf[i], sizeof(buf) - i, "STRICT_SERIALIZING");
++#endif
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_TRACING
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_TM
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_RETRY
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_OOM
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_SN
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ i += snprintf(&buf[i], sizeof(buf) - i,
++ "%sTEST_IO_IN_SIRQ",
++ (j == i) ? "" : ", ");
++#endif
++
++#ifdef CONFIG_SCST_STRICT_SECURITY
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sSTRICT_SECURITY",
++ (j == i) ? "" : ", ");
++#endif
++
++ if (j != i)
++ PRINT_INFO("%s", buf);
++}
++
++static int __init init_scst(void)
++{
++ int res, i;
++ int scst_num_cpus;
++
++ TRACE_ENTRY();
++
++ {
++ struct scsi_sense_hdr *shdr;
++ struct scst_order_data *o;
++ struct scst_cmd *c;
++ BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
++ BUILD_BUG_ON(sizeof(o->curr_sn) != sizeof(o->expected_sn));
++ BUILD_BUG_ON(sizeof(c->sn) != sizeof(o->expected_sn));
++ }
++
++ mutex_init(&scst_mutex);
++ mutex_init(&scst_mutex2);
++ INIT_LIST_HEAD(&scst_template_list);
++ INIT_LIST_HEAD(&scst_dev_list);
++ INIT_LIST_HEAD(&scst_dev_type_list);
++ INIT_LIST_HEAD(&scst_virtual_dev_type_list);
++ spin_lock_init(&scst_main_lock);
++ spin_lock_init(&scst_init_lock);
++ init_waitqueue_head(&scst_init_cmd_list_waitQ);
++ INIT_LIST_HEAD(&scst_init_cmd_list);
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
++#endif
++ spin_lock_init(&scst_mcmd_lock);
++ INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
++ INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
++ init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
++ init_waitqueue_head(&scst_mgmt_waitQ);
++ spin_lock_init(&scst_mgmt_lock);
++ INIT_LIST_HEAD(&scst_sess_init_list);
++ INIT_LIST_HEAD(&scst_sess_shut_list);
++ init_waitqueue_head(&scst_dev_cmd_waitQ);
++ mutex_init(&scst_suspend_mutex);
++ INIT_LIST_HEAD(&scst_cmd_threads_list);
++ cpus_setall(default_cpu_mask);
++
++ scst_init_threads(&scst_main_cmd_threads);
++
++ res = scst_lib_init();
++ if (res != 0)
++ goto out_deinit_threads;
++
++ scst_num_cpus = num_online_cpus();
++
++ /* ToDo: register_cpu_notifier() */
++
++ if (scst_threads == 0)
++ scst_threads = scst_num_cpus;
++
++ if (scst_threads < 1) {
++ PRINT_ERROR("%s", "scst_threads can not be less than 1");
++ scst_threads = scst_num_cpus;
++ }
++
++#define INIT_CACHEP(p, s, o) do { \
++ p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
++ TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
++ sizeof(struct s)); \
++ if (p == NULL) { \
++ res = -ENOMEM; \
++ goto o; \
++ } \
++ } while (0)
++
++ INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out_lib_exit);
++ INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
++ out_destroy_mgmt_cache);
++ INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
++ out_destroy_mgmt_stub_cache);
++ {
++ struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
++ INIT_CACHEP(scst_sense_cachep, scst_sense,
++ out_destroy_ua_cache);
++ }
++ INIT_CACHEP(scst_aen_cachep, scst_aen, out_destroy_sense_cache);
++ INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_aen_cache);
++ INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
++ INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
++ INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
++
++ scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
++ mempool_free_slab, scst_mgmt_cachep);
++ if (scst_mgmt_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_destroy_acg_cache;
++ }
++
++ /*
++ * All mgmt stubs, UAs and sense buffers are bursty and loosing them
++ * may have fatal consequences, so let's have big pools for them.
++ */
++
++ scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
++ mempool_free_slab, scst_mgmt_stub_cachep);
++ if (scst_mgmt_stub_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_destroy_mgmt_mempool;
++ }
++
++ scst_ua_mempool = mempool_create(512, mempool_alloc_slab,
++ mempool_free_slab, scst_ua_cachep);
++ if (scst_ua_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_destroy_mgmt_stub_mempool;
++ }
++
++ scst_sense_mempool = mempool_create(1024, mempool_alloc_slab,
++ mempool_free_slab, scst_sense_cachep);
++ if (scst_sense_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_destroy_ua_mempool;
++ }
++
++ scst_aen_mempool = mempool_create(100, mempool_alloc_slab,
++ mempool_free_slab, scst_aen_cachep);
++ if (scst_aen_mempool == NULL) {
++ res = -ENOMEM;
++ goto out_destroy_sense_mempool;
++ }
++
++ res = scst_sysfs_init();
++ if (res != 0)
++ goto out_destroy_aen_mempool;
++
++ scst_tg_init();
++
++ if (scst_max_cmd_mem == 0) {
++ struct sysinfo si;
++ si_meminfo(&si);
++#if BITS_PER_LONG == 32
++ scst_max_cmd_mem = min(
++ (((uint64_t)(si.totalram - si.totalhigh) << PAGE_SHIFT)
++ >> 20) >> 2, (uint64_t)1 << 30);
++#else
++ scst_max_cmd_mem = (((si.totalram - si.totalhigh) << PAGE_SHIFT)
++ >> 20) >> 2;
++#endif
++ }
++
++ if (scst_max_dev_cmd_mem != 0) {
++ if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
++ PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
++ "scst_max_cmd_mem (%d)",
++ scst_max_dev_cmd_mem,
++ scst_max_cmd_mem);
++ scst_max_dev_cmd_mem = scst_max_cmd_mem;
++ }
++ } else
++ scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
++
++ res = scst_sgv_pools_init(
++ ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
++ if (res != 0)
++ goto out_sysfs_cleanup;
++
++ res = scsi_register_interface(&scst_interface);
++ if (res != 0)
++ goto out_destroy_sgv_pool;
++
++ for (i = 0; i < (int)ARRAY_SIZE(scst_percpu_infos); i++) {
++ atomic_set(&scst_percpu_infos[i].cpu_cmd_count, 0);
++ spin_lock_init(&scst_percpu_infos[i].tasklet_lock);
++ INIT_LIST_HEAD(&scst_percpu_infos[i].tasklet_cmd_list);
++ tasklet_init(&scst_percpu_infos[i].tasklet,
++ (void *)scst_cmd_tasklet,
++ (unsigned long)&scst_percpu_infos[i]);
++ }
++
++ TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
++ scst_threads);
++
++ res = scst_start_global_threads(scst_threads);
++ if (res < 0)
++ goto out_thread_free;
++
++ PRINT_INFO("SCST version %s loaded successfully (max mem for "
++ "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
++ scst_max_cmd_mem, scst_max_dev_cmd_mem);
++
++ scst_print_config();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_thread_free:
++ scst_stop_global_threads();
++
++ scsi_unregister_interface(&scst_interface);
++
++out_destroy_sgv_pool:
++ scst_sgv_pools_deinit();
++ scst_tg_cleanup();
++
++out_sysfs_cleanup:
++ scst_sysfs_cleanup();
++
++out_destroy_aen_mempool:
++ mempool_destroy(scst_aen_mempool);
++
++out_destroy_sense_mempool:
++ mempool_destroy(scst_sense_mempool);
++
++out_destroy_ua_mempool:
++ mempool_destroy(scst_ua_mempool);
++
++out_destroy_mgmt_stub_mempool:
++ mempool_destroy(scst_mgmt_stub_mempool);
++
++out_destroy_mgmt_mempool:
++ mempool_destroy(scst_mgmt_mempool);
++
++out_destroy_acg_cache:
++ kmem_cache_destroy(scst_acgd_cachep);
++
++out_destroy_tgt_cache:
++ kmem_cache_destroy(scst_tgtd_cachep);
++
++out_destroy_sess_cache:
++ kmem_cache_destroy(scst_sess_cachep);
++
++out_destroy_cmd_cache:
++ kmem_cache_destroy(scst_cmd_cachep);
++
++out_destroy_aen_cache:
++ kmem_cache_destroy(scst_aen_cachep);
++
++out_destroy_sense_cache:
++ kmem_cache_destroy(scst_sense_cachep);
++
++out_destroy_ua_cache:
++ kmem_cache_destroy(scst_ua_cachep);
++
++out_destroy_mgmt_stub_cache:
++ kmem_cache_destroy(scst_mgmt_stub_cachep);
++
++out_destroy_mgmt_cache:
++ kmem_cache_destroy(scst_mgmt_cachep);
++
++out_lib_exit:
++ scst_lib_exit();
++
++out_deinit_threads:
++ scst_deinit_threads(&scst_main_cmd_threads);
++ goto out;
++}
++
++static void __exit exit_scst(void)
++{
++ TRACE_ENTRY();
++
++ /* ToDo: unregister_cpu_notifier() */
++
++ scst_stop_global_threads();
++
++ scst_deinit_threads(&scst_main_cmd_threads);
++
++ scsi_unregister_interface(&scst_interface);
++
++ scst_sgv_pools_deinit();
++
++ scst_tg_cleanup();
++
++ scst_sysfs_cleanup();
++
++#define DEINIT_CACHEP(p) do { \
++ kmem_cache_destroy(p); \
++ p = NULL; \
++ } while (0)
++
++ mempool_destroy(scst_mgmt_mempool);
++ mempool_destroy(scst_mgmt_stub_mempool);
++ mempool_destroy(scst_ua_mempool);
++ mempool_destroy(scst_sense_mempool);
++ mempool_destroy(scst_aen_mempool);
++
++ DEINIT_CACHEP(scst_mgmt_cachep);
++ DEINIT_CACHEP(scst_mgmt_stub_cachep);
++ DEINIT_CACHEP(scst_ua_cachep);
++ DEINIT_CACHEP(scst_sense_cachep);
++ DEINIT_CACHEP(scst_aen_cachep);
++ DEINIT_CACHEP(scst_cmd_cachep);
++ DEINIT_CACHEP(scst_sess_cachep);
++ DEINIT_CACHEP(scst_tgtd_cachep);
++ DEINIT_CACHEP(scst_acgd_cachep);
++
++ scst_lib_exit();
++
++ PRINT_INFO("%s", "SCST unloaded");
++
++ TRACE_EXIT();
++ return;
++}
++
++module_init(init_scst);
++module_exit(exit_scst);
++
++MODULE_AUTHOR("Vladislav Bolkhovitin");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SCSI target core");
++MODULE_VERSION(SCST_VERSION_STRING);
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_module.c linux-2.6.39/drivers/scst/scst_module.c
+--- orig/linux-2.6.39/drivers/scst/scst_module.c
++++ linux-2.6.39/drivers/scst/scst_module.c
+@@ -0,0 +1,70 @@
++/*
++ * scst_module.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * Support for loading target modules. The usage is similar to scsi_module.c
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++
++#include <scst.h>
++
++static int __init init_this_scst_driver(void)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ res = scst_register_target_template(&driver_target_template);
++ TRACE_DBG("scst_register_target_template() returned %d", res);
++ if (res < 0)
++ goto out;
++
++#ifdef SCST_REGISTER_INITIATOR_DRIVER
++ driver_template.module = THIS_MODULE;
++ scsi_register_module(MODULE_SCSI_HA, &driver_template);
++ TRACE_DBG("driver_template.present=%d",
++ driver_template.present);
++ if (driver_template.present == 0) {
++ res = -ENODEV;
++ MOD_DEC_USE_COUNT;
++ goto out;
++ }
++#endif
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void __exit exit_this_scst_driver(void)
++{
++ TRACE_ENTRY();
++
++#ifdef SCST_REGISTER_INITIATOR_DRIVER
++ scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
++#endif
++
++ scst_unregister_target_template(&driver_target_template);
++
++ TRACE_EXIT();
++ return;
++}
++
++module_init(init_this_scst_driver);
++module_exit(exit_this_scst_driver);
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_priv.h linux-2.6.39/drivers/scst/scst_priv.h
+--- orig/linux-2.6.39/drivers/scst/scst_priv.h
++++ linux-2.6.39/drivers/scst/scst_priv.h
+@@ -0,0 +1,645 @@
++/*
++ * scst_priv.h
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef __SCST_PRIV_H
++#define __SCST_PRIV_H
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_driver.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_host.h>
++
++#define LOG_PREFIX "scst"
++
++#include <scst/scst_debug.h>
++
++#define TRACE_RTRY 0x80000000
++#define TRACE_SCSI_SERIALIZING 0x40000000
++/** top being the edge away from the interrupt */
++#define TRACE_SND_TOP 0x20000000
++#define TRACE_RCV_TOP 0x01000000
++/** bottom being the edge toward the interrupt */
++#define TRACE_SND_BOT 0x08000000
++#define TRACE_RCV_BOT 0x04000000
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++#define trace_flag scst_trace_flag
++extern unsigned long scst_trace_flag;
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++
++#define SCST_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR | TRACE_PID | \
++ TRACE_LINE | TRACE_FUNCTION | TRACE_SPECIAL | TRACE_MGMT | \
++ TRACE_MGMT_DEBUG | TRACE_RTRY)
++
++#define TRACE_RETRY(args...) TRACE_DBG_FLAG(TRACE_RTRY, args)
++#define TRACE_SN(args...) TRACE_DBG_FLAG(TRACE_SCSI_SERIALIZING, args)
++#define TRACE_SEND_TOP(args...) TRACE_DBG_FLAG(TRACE_SND_TOP, args)
++#define TRACE_RECV_TOP(args...) TRACE_DBG_FLAG(TRACE_RCV_TOP, args)
++#define TRACE_SEND_BOT(args...) TRACE_DBG_FLAG(TRACE_SND_BOT, args)
++#define TRACE_RECV_BOT(args...) TRACE_DBG_FLAG(TRACE_RCV_BOT, args)
++
++#else /* CONFIG_SCST_DEBUG */
++
++# ifdef CONFIG_SCST_TRACING
++#define SCST_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
++ TRACE_SPECIAL)
++# else
++#define SCST_DEFAULT_LOG_FLAGS 0
++# endif
++
++#define TRACE_RETRY(args...)
++#define TRACE_SN(args...)
++#define TRACE_SEND_TOP(args...)
++#define TRACE_RECV_TOP(args...)
++#define TRACE_SEND_BOT(args...)
++#define TRACE_RECV_BOT(args...)
++
++#endif
++
++/**
++ ** Bits for scst_flags
++ **/
++
++/*
++ * Set if new commands initialization is being suspended for a while.
++ * Used to let TM commands execute while preparing the suspend, since
++ * RESET or ABORT could be necessary to free SCSI commands.
++ */
++#define SCST_FLAG_SUSPENDING 0
++
++/* Set if new commands initialization is suspended for a while */
++#define SCST_FLAG_SUSPENDED 1
++
++/**
++ ** Return codes for cmd state process functions. Codes are the same as
++ ** for SCST_EXEC_* to avoid translation to them and, hence, have better code.
++ **/
++#define SCST_CMD_STATE_RES_CONT_NEXT SCST_EXEC_COMPLETED
++#define SCST_CMD_STATE_RES_CONT_SAME SCST_EXEC_NOT_COMPLETED
++#define SCST_CMD_STATE_RES_NEED_THREAD (SCST_EXEC_NOT_COMPLETED+1)
++
++/**
++ ** Maximum count of uncompleted commands that an initiator could
++ ** queue on any device. Then it will start getting TASK QUEUE FULL status.
++ **/
++#define SCST_MAX_TGT_DEV_COMMANDS 48
++
++/**
++ ** Maximum count of uncompleted commands that could be queued on any device.
++ ** Then initiators sending commands to this device will start getting
++ ** TASK QUEUE FULL status.
++ **/
++#define SCST_MAX_DEV_COMMANDS 256
++
++#define SCST_TGT_RETRY_TIMEOUT (3/2*HZ)
++
++/* Activities suspending timeout */
++#define SCST_SUSPENDING_TIMEOUT (90 * HZ)
++
++extern struct mutex scst_mutex2;
++
++extern int scst_threads;
++
++extern unsigned int scst_max_dev_cmd_mem;
++
++extern mempool_t *scst_mgmt_mempool;
++extern mempool_t *scst_mgmt_stub_mempool;
++extern mempool_t *scst_ua_mempool;
++extern mempool_t *scst_sense_mempool;
++extern mempool_t *scst_aen_mempool;
++
++extern struct kmem_cache *scst_cmd_cachep;
++extern struct kmem_cache *scst_sess_cachep;
++extern struct kmem_cache *scst_tgtd_cachep;
++extern struct kmem_cache *scst_acgd_cachep;
++
++extern spinlock_t scst_main_lock;
++
++extern unsigned long scst_flags;
++extern struct list_head scst_template_list;
++extern struct list_head scst_dev_list;
++extern struct list_head scst_dev_type_list;
++extern struct list_head scst_virtual_dev_type_list;
++extern wait_queue_head_t scst_dev_cmd_waitQ;
++
++extern unsigned int scst_setup_id;
++
++#define SCST_DEF_MAX_TASKLET_CMD 10
++extern int scst_max_tasklet_cmd;
++
++extern spinlock_t scst_init_lock;
++extern struct list_head scst_init_cmd_list;
++extern wait_queue_head_t scst_init_cmd_list_waitQ;
++extern unsigned int scst_init_poll_cnt;
++
++extern struct scst_cmd_threads scst_main_cmd_threads;
++
++extern spinlock_t scst_mcmd_lock;
++/* The following lists protected by scst_mcmd_lock */
++extern struct list_head scst_active_mgmt_cmd_list;
++extern struct list_head scst_delayed_mgmt_cmd_list;
++extern wait_queue_head_t scst_mgmt_cmd_list_waitQ;
++
++struct scst_percpu_info {
++ atomic_t cpu_cmd_count;
++ spinlock_t tasklet_lock;
++ struct list_head tasklet_cmd_list;
++ struct tasklet_struct tasklet;
++} ____cacheline_aligned_in_smp;
++extern struct scst_percpu_info scst_percpu_infos[NR_CPUS];
++
++extern wait_queue_head_t scst_mgmt_waitQ;
++extern spinlock_t scst_mgmt_lock;
++extern struct list_head scst_sess_init_list;
++extern struct list_head scst_sess_shut_list;
++
++extern cpumask_t default_cpu_mask;
++
++struct scst_cmd_thread_t {
++ struct task_struct *cmd_thread;
++ struct list_head thread_list_entry;
++};
++
++static inline bool scst_set_io_context(struct scst_cmd *cmd,
++ struct io_context **old)
++{
++ bool res;
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ return false;
++#endif
++
++ if (cmd->cmd_threads == &scst_main_cmd_threads) {
++ EXTRACHECKS_BUG_ON(in_interrupt());
++ /*
++ * No need for any ref counting action, because io_context
++ * supposed to be cleared in the end of the caller function.
++ */
++ current->io_context = cmd->tgt_dev->async_io_context;
++ res = true;
++ TRACE_DBG("io_context %p (tgt_dev %p)", current->io_context,
++ cmd->tgt_dev);
++ EXTRACHECKS_BUG_ON(current->io_context == NULL);
++ } else
++ res = false;
++
++ return res;
++}
++
++static inline void scst_reset_io_context(struct scst_tgt_dev *tgt_dev,
++ struct io_context *old)
++{
++ current->io_context = old;
++ TRACE_DBG("io_context %p reset", current->io_context);
++ return;
++}
++
++/*
++ * Converts string presentation of threads pool type to enum.
++ * Returns SCST_THREADS_POOL_TYPE_INVALID if the string is invalid.
++ */
++extern enum scst_dev_type_threads_pool_type scst_parse_threads_pool_type(
++ const char *p, int len);
++
++extern int scst_add_threads(struct scst_cmd_threads *cmd_threads,
++ struct scst_device *dev, struct scst_tgt_dev *tgt_dev, int num);
++extern void scst_del_threads(struct scst_cmd_threads *cmd_threads, int num);
++
++extern int scst_create_dev_threads(struct scst_device *dev);
++extern void scst_stop_dev_threads(struct scst_device *dev);
++
++extern int scst_tgt_dev_setup_threads(struct scst_tgt_dev *tgt_dev);
++extern void scst_tgt_dev_stop_threads(struct scst_tgt_dev *tgt_dev);
++
++extern bool scst_del_thr_data(struct scst_tgt_dev *tgt_dev,
++ struct task_struct *tsk);
++
++extern struct scst_dev_type scst_null_devtype;
++
++extern struct scst_cmd *__scst_check_deferred_commands(
++ struct scst_order_data *order_data);
++
++/* Used to save the function call on the fast path */
++static inline struct scst_cmd *scst_check_deferred_commands(
++ struct scst_order_data *order_data)
++{
++ if (order_data->def_cmd_count == 0)
++ return NULL;
++ else
++ return __scst_check_deferred_commands(order_data);
++}
++
++static inline void scst_make_deferred_commands_active(
++ struct scst_order_data *order_data)
++{
++ struct scst_cmd *c;
++
++ c = __scst_check_deferred_commands(order_data);
++ if (c != NULL) {
++ TRACE_SN("Adding cmd %p to active cmd list", c);
++ spin_lock_irq(&c->cmd_threads->cmd_list_lock);
++ list_add_tail(&c->cmd_list_entry,
++ &c->cmd_threads->active_cmd_list);
++ wake_up(&c->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irq(&c->cmd_threads->cmd_list_lock);
++ }
++
++ return;
++}
++
++void scst_inc_expected_sn(struct scst_order_data *order_data, atomic_t *slot);
++int scst_check_hq_cmd(struct scst_cmd *cmd);
++
++void scst_unblock_deferred(struct scst_order_data *order_data,
++ struct scst_cmd *cmd_sn);
++
++void scst_on_hq_cmd_response(struct scst_cmd *cmd);
++void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd);
++
++int scst_pre_parse(struct scst_cmd *cmd);
++
++int scst_cmd_thread(void *arg);
++void scst_cmd_tasklet(long p);
++int scst_init_thread(void *arg);
++int scst_tm_thread(void *arg);
++int scst_global_mgmt_thread(void *arg);
++
++void scst_zero_write_rest(struct scst_cmd *cmd);
++void scst_limit_sg_write_len(struct scst_cmd *cmd);
++void scst_adjust_resp_data_len(struct scst_cmd *cmd);
++
++int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds);
++
++int scst_alloc_tgt(struct scst_tgt_template *tgtt, struct scst_tgt **tgt);
++void scst_free_tgt(struct scst_tgt *tgt);
++
++int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev);
++void scst_free_device(struct scst_device *dev);
++
++struct scst_acg *scst_alloc_add_acg(struct scst_tgt *tgt,
++ const char *acg_name, bool tgt_acg);
++void scst_del_free_acg(struct scst_acg *acg);
++
++struct scst_acg *scst_tgt_find_acg(struct scst_tgt *tgt, const char *name);
++struct scst_acg *scst_find_acg(const struct scst_session *sess);
++
++void scst_check_reassign_sessions(void);
++
++int scst_sess_alloc_tgt_devs(struct scst_session *sess);
++void scst_sess_free_tgt_devs(struct scst_session *sess);
++void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA);
++
++int scst_acg_add_lun(struct scst_acg *acg, struct kobject *parent,
++ struct scst_device *dev, uint64_t lun, int read_only,
++ bool gen_scst_report_luns_changed, struct scst_acg_dev **out_acg_dev);
++int scst_acg_del_lun(struct scst_acg *acg, uint64_t lun,
++ bool gen_scst_report_luns_changed);
++
++int scst_acg_add_acn(struct scst_acg *acg, const char *name);
++void scst_del_free_acn(struct scst_acn *acn, bool reassign);
++struct scst_acn *scst_find_acn(struct scst_acg *acg, const char *name);
++
++/* The activity supposed to be suspended and scst_mutex held */
++static inline bool scst_acg_sess_is_empty(struct scst_acg *acg)
++{
++ return list_empty(&acg->acg_sess_list);
++}
++
++int scst_prepare_request_sense(struct scst_cmd *orig_cmd);
++int scst_finish_internal_cmd(struct scst_cmd *cmd);
++
++void scst_store_sense(struct scst_cmd *cmd);
++
++int scst_assign_dev_handler(struct scst_device *dev,
++ struct scst_dev_type *handler);
++
++struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
++ const char *initiator_name);
++void scst_free_session(struct scst_session *sess);
++void scst_free_session_callback(struct scst_session *sess);
++
++struct scst_cmd *scst_alloc_cmd(const uint8_t *cdb,
++ unsigned int cdb_len, gfp_t gfp_mask);
++void scst_free_cmd(struct scst_cmd *cmd);
++static inline void scst_destroy_cmd(struct scst_cmd *cmd)
++{
++ kmem_cache_free(scst_cmd_cachep, cmd);
++ return;
++}
++
++void scst_check_retries(struct scst_tgt *tgt);
++
++int scst_alloc_space(struct scst_cmd *cmd);
++
++int scst_lib_init(void);
++void scst_lib_exit(void);
++
++__be64 scst_pack_lun(const uint64_t lun, enum scst_lun_addr_method addr_method);
++uint64_t scst_unpack_lun(const uint8_t *lun, int len);
++
++struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask);
++void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd);
++void scst_done_cmd_mgmt(struct scst_cmd *cmd);
++
++static inline void scst_devt_cleanup(struct scst_dev_type *devt) { }
++
++void scst_tg_init(void);
++void scst_tg_cleanup(void);
++int scst_dg_add(struct kobject *parent, const char *name);
++int scst_dg_remove(const char *name);
++struct scst_dev_group *scst_lookup_dg_by_kobj(struct kobject *kobj);
++int scst_dg_dev_add(struct scst_dev_group *dg, const char *name);
++int scst_dg_dev_remove_by_name(struct scst_dev_group *dg, const char *name);
++int scst_dg_dev_remove_by_dev(struct scst_device *dev);
++int scst_tg_add(struct scst_dev_group *dg, const char *name);
++int scst_tg_remove_by_name(struct scst_dev_group *dg, const char *name);
++int scst_tg_set_state(struct scst_target_group *tg, enum scst_tg_state state);
++int scst_tg_tgt_add(struct scst_target_group *tg, const char *name);
++int scst_tg_tgt_remove_by_name(struct scst_target_group *tg, const char *name);
++void scst_tg_tgt_remove_by_tgt(struct scst_tgt *tgt);
++int scst_dg_sysfs_add(struct kobject *parent, struct scst_dev_group *dg);
++void scst_dg_sysfs_del(struct scst_dev_group *dg);
++int scst_dg_dev_sysfs_add(struct scst_dev_group *dg, struct scst_dg_dev *dgdev);
++void scst_dg_dev_sysfs_del(struct scst_dev_group *dg,
++ struct scst_dg_dev *dgdev);
++int scst_tg_sysfs_add(struct scst_dev_group *dg,
++ struct scst_target_group *tg);
++void scst_tg_sysfs_del(struct scst_target_group *tg);
++int scst_tg_tgt_sysfs_add(struct scst_target_group *tg,
++ struct scst_tg_tgt *tg_tgt);
++void scst_tg_tgt_sysfs_del(struct scst_target_group *tg,
++ struct scst_tg_tgt *tg_tgt);
++
++extern const struct sysfs_ops scst_sysfs_ops;
++int scst_sysfs_init(void);
++void scst_sysfs_cleanup(void);
++int scst_tgtt_sysfs_create(struct scst_tgt_template *tgtt);
++void scst_tgtt_sysfs_del(struct scst_tgt_template *tgtt);
++int scst_tgt_sysfs_create(struct scst_tgt *tgt);
++void scst_tgt_sysfs_prepare_put(struct scst_tgt *tgt);
++void scst_tgt_sysfs_del(struct scst_tgt *tgt);
++int scst_sess_sysfs_create(struct scst_session *sess);
++void scst_sess_sysfs_del(struct scst_session *sess);
++int scst_recreate_sess_luns_link(struct scst_session *sess);
++int scst_add_sgv_kobj(struct kobject *parent, const char *name);
++void scst_del_put_sgv_kobj(void);
++int scst_devt_sysfs_create(struct scst_dev_type *devt);
++void scst_devt_sysfs_del(struct scst_dev_type *devt);
++int scst_dev_sysfs_create(struct scst_device *dev);
++void scst_dev_sysfs_del(struct scst_device *dev);
++int scst_tgt_dev_sysfs_create(struct scst_tgt_dev *tgt_dev);
++void scst_tgt_dev_sysfs_del(struct scst_tgt_dev *tgt_dev);
++int scst_devt_dev_sysfs_create(struct scst_device *dev);
++void scst_devt_dev_sysfs_del(struct scst_device *dev);
++int scst_acg_sysfs_create(struct scst_tgt *tgt,
++ struct scst_acg *acg);
++void scst_acg_sysfs_del(struct scst_acg *acg);
++int scst_acg_dev_sysfs_create(struct scst_acg_dev *acg_dev,
++ struct kobject *parent);
++void scst_acg_dev_sysfs_del(struct scst_acg_dev *acg_dev);
++int scst_acn_sysfs_create(struct scst_acn *acn);
++void scst_acn_sysfs_del(struct scst_acn *acn);
++
++void __scst_dev_check_set_UA(struct scst_device *dev, struct scst_cmd *exclude,
++ const uint8_t *sense, int sense_len);
++void scst_tgt_dev_del_free_UA(struct scst_tgt_dev *tgt_dev,
++ struct scst_tgt_dev_UA *ua);
++static inline void scst_dev_check_set_UA(struct scst_device *dev,
++ struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
++{
++ spin_lock_bh(&dev->dev_lock);
++ __scst_dev_check_set_UA(dev, exclude, sense, sense_len);
++ spin_unlock_bh(&dev->dev_lock);
++ return;
++}
++void scst_dev_check_set_local_UA(struct scst_device *dev,
++ struct scst_cmd *exclude, const uint8_t *sense, int sense_len);
++
++#define SCST_SET_UA_FLAG_AT_HEAD 1
++#define SCST_SET_UA_FLAG_GLOBAL 2
++
++void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
++ const uint8_t *sense, int sense_len, int flags);
++int scst_set_pending_UA(struct scst_cmd *cmd);
++
++void scst_report_luns_changed(struct scst_acg *acg);
++
++void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
++ bool other_ini, bool call_dev_task_mgmt_fn);
++void scst_process_reset(struct scst_device *dev,
++ struct scst_session *originator, struct scst_cmd *exclude_cmd,
++ struct scst_mgmt_cmd *mcmd, bool setUA);
++
++bool scst_is_ua_global(const uint8_t *sense, int len);
++void scst_requeue_ua(struct scst_cmd *cmd);
++
++struct scst_aen *scst_alloc_aen(struct scst_session *sess,
++ uint64_t unpacked_lun);
++void scst_free_aen(struct scst_aen *aen);
++
++void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
++ int key, int asc, int ascq);
++
++static inline bool scst_is_implicit_hq_cmd(struct scst_cmd *cmd)
++{
++ return (cmd->op_flags & SCST_IMPLICIT_HQ) != 0;
++}
++
++static inline bool scst_is_serialized_cmd(struct scst_cmd *cmd)
++{
++ return (cmd->op_flags & SCST_SERIALIZED) != 0;
++}
++
++static inline bool scst_is_strictly_serialized_cmd(struct scst_cmd *cmd)
++{
++ return (cmd->op_flags & SCST_STRICTLY_SERIALIZED) == SCST_STRICTLY_SERIALIZED;
++}
++
++/*
++ * Some notes on devices "blocking". Blocking means that no
++ * commands will go from SCST to underlying SCSI device until it
++ * is unblocked. But, except for strictly serialized commands,
++ * we don't care about all commands that already on the device.
++ */
++
++extern void scst_block_dev(struct scst_device *dev);
++extern void scst_unblock_dev(struct scst_device *dev);
++
++bool __scst_check_blocked_dev(struct scst_cmd *cmd);
++
++/*
++ * Increases global SCST ref counters which prevent from entering into suspended
++ * activities stage, so protects from any global management operations.
++ */
++static inline atomic_t *scst_get(void)
++{
++ atomic_t *a;
++ /*
++ * We don't mind if we because of preemption inc counter from another
++ * CPU as soon in the majority cases we will the correct one. So, let's
++ * have preempt_disable/enable only in the debug build to avoid warning.
++ */
++#ifdef CONFIG_DEBUG_PREEMPT
++ preempt_disable();
++#endif
++ a = &scst_percpu_infos[smp_processor_id()].cpu_cmd_count;
++ atomic_inc(a);
++#ifdef CONFIG_DEBUG_PREEMPT
++ preempt_enable();
++#endif
++ TRACE_DBG("Incrementing cpu_cmd_count %p (new value %d)",
++ a, atomic_read(a));
++ /* See comment about smp_mb() in scst_suspend_activity() */
++ smp_mb__after_atomic_inc();
++
++ return a;
++}
++
++/*
++ * Decreases global SCST ref counters which prevent from entering into suspended
++ * activities stage, so protects from any global management operations. On
++ * all them zero, if suspending activities is waiting, it will be proceed.
++ */
++static inline void scst_put(atomic_t *a)
++{
++ int f;
++ f = atomic_dec_and_test(a);
++ /* See comment about smp_mb() in scst_suspend_activity() */
++ if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) && f) {
++ TRACE_MGMT_DBG("%s", "Waking up scst_dev_cmd_waitQ");
++ wake_up_all(&scst_dev_cmd_waitQ);
++ }
++ TRACE_DBG("Decrementing cpu_cmd_count %p (new value %d)",
++ a, atomic_read(a));
++}
++
++int scst_get_cmd_counter(void);
++
++void scst_sched_session_free(struct scst_session *sess);
++
++static inline void scst_sess_get(struct scst_session *sess)
++{
++ atomic_inc(&sess->refcnt);
++ TRACE_DBG("Incrementing sess %p refcnt (new value %d)",
++ sess, atomic_read(&sess->refcnt));
++}
++
++static inline void scst_sess_put(struct scst_session *sess)
++{
++ TRACE_DBG("Decrementing sess %p refcnt (new value %d)",
++ sess, atomic_read(&sess->refcnt)-1);
++ if (atomic_dec_and_test(&sess->refcnt))
++ scst_sched_session_free(sess);
++}
++
++static inline void __scst_cmd_get(struct scst_cmd *cmd)
++{
++ atomic_inc(&cmd->cmd_ref);
++ TRACE_DBG("Incrementing cmd %p ref (new value %d)",
++ cmd, atomic_read(&cmd->cmd_ref));
++}
++
++static inline void __scst_cmd_put(struct scst_cmd *cmd)
++{
++ TRACE_DBG("Decrementing cmd %p ref (new value %d)",
++ cmd, atomic_read(&cmd->cmd_ref)-1);
++ if (atomic_dec_and_test(&cmd->cmd_ref))
++ scst_free_cmd(cmd);
++}
++
++extern void scst_throttle_cmd(struct scst_cmd *cmd);
++extern void scst_unthrottle_cmd(struct scst_cmd *cmd);
++
++#ifdef CONFIG_SCST_DEBUG_TM
++extern void tm_dbg_check_released_cmds(void);
++extern int tm_dbg_check_cmd(struct scst_cmd *cmd);
++extern void tm_dbg_release_cmd(struct scst_cmd *cmd);
++extern void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn,
++ int force);
++extern int tm_dbg_is_release(void);
++#else
++static inline void tm_dbg_check_released_cmds(void) {}
++static inline int tm_dbg_check_cmd(struct scst_cmd *cmd)
++{
++ return 0;
++}
++static inline void tm_dbg_release_cmd(struct scst_cmd *cmd) {}
++static inline void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn,
++ int force) {}
++static inline int tm_dbg_is_release(void)
++{
++ return 0;
++}
++#endif /* CONFIG_SCST_DEBUG_TM */
++
++#ifdef CONFIG_SCST_DEBUG_SN
++void scst_check_debug_sn(struct scst_cmd *cmd);
++#else
++static inline void scst_check_debug_sn(struct scst_cmd *cmd) {}
++#endif
++
++static inline int scst_sn_before(uint32_t seq1, uint32_t seq2)
++{
++ return (int32_t)(seq1-seq2) < 0;
++}
++
++int gen_relative_target_port_id(uint16_t *id);
++bool scst_is_relative_target_port_id_unique(uint16_t id,
++ const struct scst_tgt *t);
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++
++void scst_set_start_time(struct scst_cmd *cmd);
++void scst_set_cur_start(struct scst_cmd *cmd);
++void scst_set_parse_time(struct scst_cmd *cmd);
++void scst_set_alloc_buf_time(struct scst_cmd *cmd);
++void scst_set_restart_waiting_time(struct scst_cmd *cmd);
++void scst_set_rdy_to_xfer_time(struct scst_cmd *cmd);
++void scst_set_pre_exec_time(struct scst_cmd *cmd);
++void scst_set_exec_time(struct scst_cmd *cmd);
++void scst_set_dev_done_time(struct scst_cmd *cmd);
++void scst_set_xmit_time(struct scst_cmd *cmd);
++void scst_set_tgt_on_free_time(struct scst_cmd *cmd);
++void scst_set_dev_on_free_time(struct scst_cmd *cmd);
++void scst_update_lat_stats(struct scst_cmd *cmd);
++
++#else
++
++static inline void scst_set_start_time(struct scst_cmd *cmd) {}
++static inline void scst_set_cur_start(struct scst_cmd *cmd) {}
++static inline void scst_set_parse_time(struct scst_cmd *cmd) {}
++static inline void scst_set_alloc_buf_time(struct scst_cmd *cmd) {}
++static inline void scst_set_restart_waiting_time(struct scst_cmd *cmd) {}
++static inline void scst_set_rdy_to_xfer_time(struct scst_cmd *cmd) {}
++static inline void scst_set_pre_exec_time(struct scst_cmd *cmd) {}
++static inline void scst_set_exec_time(struct scst_cmd *cmd) {}
++static inline void scst_set_dev_done_time(struct scst_cmd *cmd) {}
++static inline void scst_set_xmit_time(struct scst_cmd *cmd) {}
++static inline void scst_set_tgt_on_free_time(struct scst_cmd *cmd) {}
++static inline void scst_set_dev_on_free_time(struct scst_cmd *cmd) {}
++static inline void scst_update_lat_stats(struct scst_cmd *cmd) {}
++
++#endif /* CONFIG_SCST_MEASURE_LATENCY */
++
++#endif /* __SCST_PRIV_H */
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_targ.c linux-2.6.39/drivers/scst/scst_targ.c
+--- orig/linux-2.6.39/drivers/scst/scst_targ.c
++++ linux-2.6.39/drivers/scst/scst_targ.c
+@@ -0,0 +1,6701 @@
++/*
++ * scst_targ.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/unistd.h>
++#include <linux/string.h>
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#include <linux/ktime.h>
++
++#include <scst/scst.h>
++#include "scst_priv.h"
++#include "scst_pres.h"
++
++#if 0 /* Let's disable it for now to see if users will complain about it */
++/* Deleting it don't forget to delete dev_cmd_count */
++#define CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
++#endif
++
++static void scst_cmd_set_sn(struct scst_cmd *cmd);
++static int __scst_init_cmd(struct scst_cmd *cmd);
++static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
++static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
++ uint64_t tag, bool to_abort);
++static void scst_process_redirect_cmd(struct scst_cmd *cmd,
++ enum scst_exec_context context, int check_retries);
++
++/**
++ * scst_post_parse() - do post parse actions
++ *
++ * This function must be called by dev handler after its parse() callback
++ * returned SCST_CMD_STATE_STOP before calling scst_process_active_cmd().
++ */
++void scst_post_parse(struct scst_cmd *cmd)
++{
++ scst_set_parse_time(cmd);
++}
++EXPORT_SYMBOL_GPL(scst_post_parse);
++
++/**
++ * scst_post_alloc_data_buf() - do post alloc_data_buf actions
++ *
++ * This function must be called by dev handler after its alloc_data_buf()
++ * callback returned SCST_CMD_STATE_STOP before calling
++ * scst_process_active_cmd().
++ */
++void scst_post_alloc_data_buf(struct scst_cmd *cmd)
++{
++ scst_set_alloc_buf_time(cmd);
++}
++EXPORT_SYMBOL_GPL(scst_post_alloc_data_buf);
++
++static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
++{
++ struct scst_percpu_info *i = &scst_percpu_infos[smp_processor_id()];
++ unsigned long flags;
++
++ if (atomic_read(&i->cpu_cmd_count) <= scst_max_tasklet_cmd) {
++ spin_lock_irqsave(&i->tasklet_lock, flags);
++ TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
++ smp_processor_id());
++ list_add_tail(&cmd->cmd_list_entry, &i->tasklet_cmd_list);
++ spin_unlock_irqrestore(&i->tasklet_lock, flags);
++
++ tasklet_schedule(&i->tasklet);
++ } else {
++ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
++ TRACE_DBG("Too many tasklet commands (%d), adding cmd %p to "
++ "active cmd list", atomic_read(&i->cpu_cmd_count), cmd);
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
++ }
++ return;
++}
++
++/* No locks */
++static bool scst_check_blocked_dev(struct scst_cmd *cmd)
++{
++ bool res;
++ struct scst_device *dev = cmd->dev;
++
++ spin_lock_bh(&dev->dev_lock);
++
++ dev->on_dev_cmd_count++;
++ cmd->dec_on_dev_needed = 1;
++ TRACE_DBG("New inc on_dev_count %d (cmd %p)", dev->on_dev_cmd_count,
++ cmd);
++
++ scst_inc_pr_readers_count(cmd, true);
++
++ if (unlikely(dev->block_count > 0) ||
++ unlikely(dev->dev_double_ua_possible) ||
++ unlikely(scst_is_serialized_cmd(cmd)))
++ res = __scst_check_blocked_dev(cmd);
++ else
++ res = false;
++
++ if (unlikely(res)) {
++ /* Undo increments */
++ dev->on_dev_cmd_count--;
++ cmd->dec_on_dev_needed = 0;
++ TRACE_DBG("New dec on_dev_count %d (cmd %p)",
++ dev->on_dev_cmd_count, cmd);
++
++ scst_dec_pr_readers_count(cmd, true);
++ }
++
++ spin_unlock_bh(&dev->dev_lock);
++
++ return res;
++}
++
++/* No locks */
++static void scst_check_unblock_dev(struct scst_cmd *cmd)
++{
++ struct scst_device *dev = cmd->dev;
++
++ spin_lock_bh(&dev->dev_lock);
++
++ if (likely(cmd->dec_on_dev_needed)) {
++ dev->on_dev_cmd_count--;
++ cmd->dec_on_dev_needed = 0;
++ TRACE_DBG("New dec on_dev_count %d (cmd %p)",
++ dev->on_dev_cmd_count, cmd);
++ }
++
++ if (unlikely(cmd->dec_pr_readers_count_needed))
++ scst_dec_pr_readers_count(cmd, true);
++
++ if (unlikely(cmd->unblock_dev)) {
++ TRACE_MGMT_DBG("cmd %p (tag %llu): unblocking dev %s", cmd,
++ (long long unsigned int)cmd->tag, dev->virt_name);
++ cmd->unblock_dev = 0;
++ scst_unblock_dev(dev);
++ } else if (unlikely(dev->strictly_serialized_cmd_waiting)) {
++ if (dev->on_dev_cmd_count == 0) {
++ TRACE_MGMT_DBG("Strictly serialized cmd waiting: "
++ "unblocking dev %s", dev->virt_name);
++ scst_unblock_dev(dev);
++ }
++ }
++
++ spin_unlock_bh(&dev->dev_lock);
++ return;
++}
++
++/**
++ * scst_rx_cmd() - create new command
++ * @sess: SCST session
++ * @lun: LUN for the command
++ * @lun_len: length of the LUN in bytes
++ * @cdb: CDB of the command
++ * @cdb_len: length of the CDB in bytes
++ * @atomic: true, if current context is atomic
++ *
++ * Description:
++ * Creates new SCST command. Returns new command on success or
++ * NULL otherwise.
++ *
++ * Must not be called in parallel with scst_unregister_session() for the
++ * same session.
++ */
++struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
++ const uint8_t *lun, int lun_len, const uint8_t *cdb,
++ unsigned int cdb_len, int atomic)
++{
++ struct scst_cmd *cmd;
++
++ TRACE_ENTRY();
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
++ PRINT_CRIT_ERROR("%s",
++ "New cmd while shutting down the session");
++ BUG();
++ }
++#endif
++
++ cmd = scst_alloc_cmd(cdb, cdb_len, atomic ? GFP_ATOMIC : GFP_KERNEL);
++ if (unlikely(cmd == NULL))
++ goto out;
++
++ cmd->sess = sess;
++ cmd->tgt = sess->tgt;
++ cmd->tgtt = sess->tgt->tgtt;
++
++ cmd->lun = scst_unpack_lun(lun, lun_len);
++ if (unlikely(cmd->lun == NO_SUCH_LUN))
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_lun_not_supported));
++
++ TRACE_DBG("cmd %p, sess %p", cmd, sess);
++ scst_sess_get(sess);
++
++out:
++ TRACE_EXIT();
++ return cmd;
++}
++EXPORT_SYMBOL(scst_rx_cmd);
++
++/*
++ * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
++ * this command should be stopped.
++ */
++static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
++{
++ int rc, res = 0;
++
++ TRACE_ENTRY();
++
++ /* See the comment in scst_do_job_init() */
++ if (unlikely(!list_empty(&scst_init_cmd_list))) {
++ TRACE_MGMT_DBG("%s", "init cmd list busy");
++ goto out_redirect;
++ }
++ /*
++ * Memory barrier isn't necessary here, because CPU appears to
++ * be self-consistent and we don't care about the race, described
++ * in comment in scst_do_job_init().
++ */
++
++ rc = __scst_init_cmd(cmd);
++ if (unlikely(rc > 0))
++ goto out_redirect;
++ else if (unlikely(rc != 0)) {
++ res = 1;
++ goto out;
++ }
++
++ EXTRACHECKS_BUG_ON(*context == SCST_CONTEXT_SAME);
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
++ goto out;
++#endif
++
++ /* Small context optimization */
++ if ((*context == SCST_CONTEXT_TASKLET) ||
++ (*context == SCST_CONTEXT_DIRECT_ATOMIC)) {
++ /*
++ * If any data_direction not set, it's SCST_DATA_UNKNOWN,
++ * which is 0, so we can safely | them
++ */
++ BUILD_BUG_ON(SCST_DATA_UNKNOWN != 0);
++ if ((cmd->data_direction | cmd->expected_data_direction) & SCST_DATA_WRITE) {
++ if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
++ &cmd->tgt_dev->tgt_dev_flags))
++ *context = SCST_CONTEXT_THREAD;
++ } else
++ *context = SCST_CONTEXT_THREAD;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_redirect:
++ if (cmd->preprocessing_only) {
++ /*
++ * Poor man solution for single threaded targets, where
++ * blocking receiver at least sometimes means blocking all.
++ * For instance, iSCSI target won't be able to receive
++ * Data-Out PDUs.
++ */
++ BUG_ON(*context != SCST_CONTEXT_DIRECT);
++ scst_set_busy(cmd);
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = 1;
++ /* Keep initiator away from too many BUSY commands */
++ msleep(50);
++ } else {
++ unsigned long flags;
++ spin_lock_irqsave(&scst_init_lock, flags);
++ TRACE_MGMT_DBG("Adding cmd %p to init cmd list)", cmd);
++ list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
++ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
++ scst_init_poll_cnt++;
++ spin_unlock_irqrestore(&scst_init_lock, flags);
++ wake_up(&scst_init_cmd_list_waitQ);
++ res = -1;
++ }
++ goto out;
++}
++
++/**
++ * scst_cmd_init_done() - the command's initialization done
++ * @cmd: SCST command
++ * @pref_context: preferred command execution context
++ *
++ * Description:
++ * Notifies SCST that the driver finished its part of the command
++ * initialization, and the command is ready for execution.
++ * The second argument sets preferred command execution context.
++ * See SCST_CONTEXT_* constants for details.
++ *
++ * !!IMPORTANT!!
++ *
++ * If cmd->set_sn_on_restart_cmd not set, this function, as well as
++ * scst_cmd_init_stage1_done() and scst_restart_cmd(), must not be
++ * called simultaneously for the same session (more precisely,
++ * for the same session/LUN, i.e. tgt_dev), i.e. they must be
++ * somehow externally serialized. This is needed to have lock free fast
++ * path in scst_cmd_set_sn(). For majority of targets those functions are
++ * naturally serialized by the single source of commands. Only iSCSI
++ * immediate commands with multiple connections per session seems to be an
++ * exception. For it, some mutex/lock shall be used for the serialization.
++ */
++void scst_cmd_init_done(struct scst_cmd *cmd,
++ enum scst_exec_context pref_context)
++{
++ unsigned long flags;
++ struct scst_session *sess = cmd->sess;
++ int rc;
++
++ TRACE_ENTRY();
++
++ scst_set_start_time(cmd);
++
++ TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
++ TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
++ "(cmd %p, sess %p)", (long long unsigned int)cmd->tag,
++ (long long unsigned int)cmd->lun, cmd->cdb_len,
++ cmd->queue_type, cmd, sess);
++ PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Receiving CDB",
++ cmd->cdb, cmd->cdb_len);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (unlikely((in_irq() || irqs_disabled())) &&
++ ((pref_context == SCST_CONTEXT_DIRECT) ||
++ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
++ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
++ "SCST_CONTEXT_THREAD instead", pref_context,
++ cmd->tgtt->name);
++ dump_stack();
++ pref_context = SCST_CONTEXT_THREAD;
++ }
++#endif
++
++ atomic_inc(&sess->sess_cmd_count);
++
++ spin_lock_irqsave(&sess->sess_list_lock, flags);
++
++ if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
++ /*
++ * We must always keep commands in the sess list from the
++ * very beginning, because otherwise they can be missed during
++ * TM processing. This check is needed because there might be
++ * old, i.e. deferred, commands and new, i.e. just coming, ones.
++ */
++ if (cmd->sess_cmd_list_entry.next == NULL)
++ list_add_tail(&cmd->sess_cmd_list_entry,
++ &sess->sess_cmd_list);
++ switch (sess->init_phase) {
++ case SCST_SESS_IPH_SUCCESS:
++ break;
++ case SCST_SESS_IPH_INITING:
++ TRACE_DBG("Adding cmd %p to init deferred cmd list",
++ cmd);
++ list_add_tail(&cmd->cmd_list_entry,
++ &sess->init_deferred_cmd_list);
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++ goto out;
++ case SCST_SESS_IPH_FAILED:
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++ scst_set_busy(cmd);
++ goto set_state;
++ default:
++ BUG();
++ }
++ } else
++ list_add_tail(&cmd->sess_cmd_list_entry,
++ &sess->sess_cmd_list);
++
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++
++ if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
++ PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_message));
++ }
++
++set_state:
++ if (unlikely(cmd->status != SAM_STAT_GOOD)) {
++ scst_set_cmd_abnormal_done_state(cmd);
++ goto active;
++ }
++
++ /*
++ * Cmd must be inited here to preserve the order. In case if cmd
++ * already preliminary completed by target driver we need to init
++ * cmd anyway to find out in which format we should return sense.
++ */
++ cmd->state = SCST_CMD_STATE_INIT;
++ rc = scst_init_cmd(cmd, &pref_context);
++ if (unlikely(rc < 0))
++ goto out;
++
++active:
++ /* Here cmd must not be in any cmd list, no locks */
++ switch (pref_context) {
++ case SCST_CONTEXT_TASKLET:
++ scst_schedule_tasklet(cmd);
++ break;
++
++ default:
++ PRINT_ERROR("Context %x is undefined, using the thread one",
++ pref_context);
++ /* go through */
++ case SCST_CONTEXT_THREAD:
++ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
++ TRACE_DBG("Adding cmd %p to active cmd list", cmd);
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ list_add(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ else
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
++ break;
++
++ case SCST_CONTEXT_DIRECT:
++ scst_process_active_cmd(cmd, false);
++ break;
++
++ case SCST_CONTEXT_DIRECT_ATOMIC:
++ scst_process_active_cmd(cmd, true);
++ break;
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_cmd_init_done);
++
++int scst_pre_parse(struct scst_cmd *cmd)
++{
++ int res;
++ struct scst_device *dev = cmd->dev;
++ int rc;
++
++ TRACE_ENTRY();
++
++ /*
++ * Expected transfer data supplied by the SCSI transport via the
++ * target driver are untrusted, so we prefer to fetch them from CDB.
++ * Additionally, not all transports support supplying the expected
++ * transfer data.
++ */
++
++ rc = scst_get_cdb_info(cmd);
++ if (unlikely(rc != 0)) {
++ if (rc > 0) {
++ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
++ goto out_err;
++ }
++
++ EXTRACHECKS_BUG_ON(cmd->op_flags & SCST_INFO_VALID);
++
++ TRACE(TRACE_MINOR, "Unknown opcode 0x%02x for %s. "
++ "Should you update scst_scsi_op_table?",
++ cmd->cdb[0], dev->handler->name);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Failed CDB", cmd->cdb,
++ cmd->cdb_len);
++ } else
++ EXTRACHECKS_BUG_ON(!(cmd->op_flags & SCST_INFO_VALID));
++
++#ifdef CONFIG_SCST_STRICT_SERIALIZING
++ cmd->inc_expected_sn_on_done = 1;
++#else
++ cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
++ (!dev->has_own_order_mgmt &&
++ (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
++ cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
++#endif
++
++ TRACE_DBG("op_name <%s> (cmd %p), direction=%d "
++ "(expected %d, set %s), bufflen=%d, out_bufflen=%d (expected "
++ "len %d, out expected len %d), flags=0x%x", cmd->op_name, cmd,
++ cmd->data_direction, cmd->expected_data_direction,
++ scst_cmd_is_expected_set(cmd) ? "yes" : "no",
++ cmd->bufflen, cmd->out_bufflen, cmd->expected_transfer_len,
++ cmd->expected_out_transfer_len, cmd->op_flags);
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = -1;
++ goto out;
++}
++
++#ifndef CONFIG_SCST_USE_EXPECTED_VALUES
++static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
++{
++ bool res = false;
++
++ /* VERIFY commands with BYTCHK unset shouldn't fail here */
++ if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
++ (cmd->cdb[1] & BYTCHK) == 0) {
++ res = true;
++ goto out;
++ }
++
++ switch (cmd->cdb[0]) {
++ case TEST_UNIT_READY:
++ /* Crazy VMware people sometimes do TUR with READ direction */
++ if ((cmd->expected_data_direction == SCST_DATA_READ) ||
++ (cmd->expected_data_direction == SCST_DATA_NONE))
++ res = true;
++ break;
++ }
++
++out:
++ return res;
++}
++#endif
++
++static int scst_parse_cmd(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_RES_CONT_SAME;
++ int state;
++ struct scst_device *dev = cmd->dev;
++ int orig_bufflen = cmd->bufflen;
++
++ TRACE_ENTRY();
++
++ if (likely(!scst_is_cmd_fully_local(cmd))) {
++ if (unlikely(!dev->handler->parse_atomic &&
++ scst_cmd_atomic(cmd))) {
++ /*
++ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
++ * optimization.
++ */
++ TRACE_MGMT_DBG("Dev handler %s parse() needs thread "
++ "context, rescheduling", dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ TRACE_DBG("Calling dev handler %s parse(%p)",
++ dev->handler->name, cmd);
++ TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
++ cmd->cdb, cmd->cdb_len);
++ scst_set_cur_start(cmd);
++ state = dev->handler->parse(cmd);
++ /* Caution: cmd can be already dead here */
++ TRACE_DBG("Dev handler %s parse() returned %d",
++ dev->handler->name, state);
++
++ switch (state) {
++ case SCST_CMD_STATE_NEED_THREAD_CTX:
++ scst_set_parse_time(cmd);
++ TRACE_DBG("Dev handler %s parse() requested thread "
++ "context, rescheduling", dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++
++ case SCST_CMD_STATE_STOP:
++ TRACE_DBG("Dev handler %s parse() requested stop "
++ "processing", dev->handler->name);
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ goto out;
++ }
++
++ scst_set_parse_time(cmd);
++
++ if (state == SCST_CMD_STATE_DEFAULT)
++ state = SCST_CMD_STATE_PREPARE_SPACE;
++ } else
++ state = SCST_CMD_STATE_PREPARE_SPACE;
++
++ if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
++ goto set_res;
++
++ if (unlikely(!(cmd->op_flags & SCST_INFO_VALID))) {
++#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
++ if (scst_cmd_is_expected_set(cmd)) {
++ TRACE(TRACE_MINOR, "Using initiator supplied values: "
++ "direction %d, transfer_len %d/%d",
++ cmd->expected_data_direction,
++ cmd->expected_transfer_len,
++ cmd->expected_out_transfer_len);
++ cmd->data_direction = cmd->expected_data_direction;
++ cmd->bufflen = cmd->expected_transfer_len;
++ cmd->out_bufflen = cmd->expected_out_transfer_len;
++ } else {
++ PRINT_ERROR("Unknown opcode 0x%02x for %s and "
++ "target %s not supplied expected values",
++ cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++ }
++#else
++ /*
++ * Let's ignore reporting T10/04-262r7 16-byte and 12-byte ATA
++ * pass-thru commands to not pollute logs (udev(?) checks them
++ * for some reason). If somebody has their description, please,
++ * update scst_scsi_op_table.
++ */
++ if ((cmd->cdb[0] != 0x85) && (cmd->cdb[0] != 0xa1))
++ PRINT_ERROR("Refusing unknown opcode %x", cmd->cdb[0]);
++ else
++ TRACE(TRACE_MINOR, "Refusing unknown opcode %x",
++ cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++#endif
++ }
++
++ if (unlikely(cmd->cdb_len == 0)) {
++ PRINT_ERROR("Unable to get CDB length for "
++ "opcode 0x%02x. Returning INVALID "
++ "OPCODE", cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++ }
++
++ EXTRACHECKS_BUG_ON(cmd->cdb_len == 0);
++
++ TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
++ "(expected %d, set %s), bufflen=%d, out_bufflen=%d, (expected "
++ "len %d, out expected len %d), flags=%x", cmd->op_name, cmd,
++ cmd->data_direction, cmd->expected_data_direction,
++ scst_cmd_is_expected_set(cmd) ? "yes" : "no",
++ cmd->bufflen, cmd->out_bufflen, cmd->expected_transfer_len,
++ cmd->expected_out_transfer_len, cmd->op_flags);
++
++ if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
++ if (scst_cmd_is_expected_set(cmd)) {
++ /*
++ * Command data length can't be easily
++ * determined from the CDB. ToDo, all such
++ * commands processing should be fixed. Until
++ * it's done, get the length from the supplied
++ * expected value, but limit it to some
++ * reasonable value (15MB).
++ */
++ cmd->bufflen = min(cmd->expected_transfer_len,
++ 15*1024*1024);
++ if (cmd->data_direction == SCST_DATA_BIDI)
++ cmd->out_bufflen = min(cmd->expected_out_transfer_len,
++ 15*1024*1024);
++ cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
++ } else {
++ PRINT_ERROR("Unknown data transfer length for opcode "
++ "0x%x (handler %s, target %s)", cmd->cdb[0],
++ dev->handler->name, cmd->tgtt->name);
++ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_message));
++ goto out_done;
++ }
++ }
++
++ if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
++ PRINT_ERROR("NACA bit in control byte CDB is not supported "
++ "(opcode 0x%02x)", cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_done;
++ }
++
++ if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
++ PRINT_ERROR("Linked commands are not supported "
++ "(opcode 0x%02x)", cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_done;
++ }
++
++ if (cmd->dh_data_buf_alloced &&
++ unlikely((orig_bufflen > cmd->bufflen))) {
++ PRINT_ERROR("Dev handler supplied data buffer (size %d), "
++ "is less, than required (size %d)", cmd->bufflen,
++ orig_bufflen);
++ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
++ goto out_hw_error;
++ }
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if ((cmd->bufflen != 0) &&
++ ((cmd->data_direction == SCST_DATA_NONE) ||
++ ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
++ PRINT_ERROR("Dev handler %s parse() returned "
++ "invalid cmd data_direction %d, bufflen %d, state %d "
++ "or sg %p (opcode 0x%x)", dev->handler->name,
++ cmd->data_direction, cmd->bufflen, state, cmd->sg,
++ cmd->cdb[0]);
++ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
++ goto out_hw_error;
++ }
++#endif
++
++ if (scst_cmd_is_expected_set(cmd)) {
++#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
++ if (unlikely((cmd->data_direction != cmd->expected_data_direction) ||
++ (cmd->bufflen != cmd->expected_transfer_len) ||
++ (cmd->out_bufflen != cmd->expected_out_transfer_len))) {
++ TRACE(TRACE_MINOR, "Expected values don't match "
++ "decoded ones: data_direction %d, "
++ "expected_data_direction %d, "
++ "bufflen %d, expected_transfer_len %d, "
++ "out_bufflen %d, expected_out_transfer_len %d",
++ cmd->data_direction,
++ cmd->expected_data_direction,
++ cmd->bufflen, cmd->expected_transfer_len,
++ cmd->out_bufflen, cmd->expected_out_transfer_len);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
++ cmd->cdb, cmd->cdb_len);
++ cmd->data_direction = cmd->expected_data_direction;
++ cmd->bufflen = cmd->expected_transfer_len;
++ cmd->out_bufflen = cmd->expected_out_transfer_len;
++ cmd->resid_possible = 1;
++ }
++#else
++ if (unlikely(cmd->data_direction !=
++ cmd->expected_data_direction)) {
++ if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
++ (cmd->bufflen != 0)) &&
++ !scst_is_allowed_to_mismatch_cmd(cmd)) {
++ PRINT_ERROR("Expected data direction %d for "
++ "opcode 0x%02x (handler %s, target %s) "
++ "doesn't match decoded value %d",
++ cmd->expected_data_direction,
++ cmd->cdb[0], dev->handler->name,
++ cmd->tgtt->name, cmd->data_direction);
++ PRINT_BUFFER("Failed CDB", cmd->cdb,
++ cmd->cdb_len);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_message));
++ goto out_done;
++ }
++ }
++ if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
++ TRACE(TRACE_MINOR, "Warning: expected "
++ "transfer length %d for opcode 0x%02x "
++ "(handler %s, target %s) doesn't match "
++ "decoded value %d",
++ cmd->expected_transfer_len, cmd->cdb[0],
++ dev->handler->name, cmd->tgtt->name,
++ cmd->bufflen);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
++ cmd->cdb, cmd->cdb_len);
++ if ((cmd->data_direction & SCST_DATA_READ) ||
++ (cmd->data_direction & SCST_DATA_WRITE))
++ cmd->resid_possible = 1;
++ }
++ if (unlikely(cmd->out_bufflen != cmd->expected_out_transfer_len)) {
++ TRACE(TRACE_MINOR, "Warning: expected bidirectional OUT "
++ "transfer length %d for opcode 0x%02x "
++ "(handler %s, target %s) doesn't match "
++ "decoded value %d",
++ cmd->expected_out_transfer_len, cmd->cdb[0],
++ dev->handler->name, cmd->tgtt->name,
++ cmd->out_bufflen);
++ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
++ cmd->cdb, cmd->cdb_len);
++ cmd->resid_possible = 1;
++ }
++#endif
++ }
++
++ if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
++ PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
++ "target %s", cmd->cdb[0], dev->handler->name,
++ cmd->tgtt->name);
++ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
++ goto out_hw_error;
++ }
++
++set_res:
++ if (cmd->data_len == -1)
++ cmd->data_len = cmd->bufflen;
++
++ if (cmd->bufflen == 0) {
++ /*
++ * According to SPC bufflen 0 for data transfer commands isn't
++ * an error, so we need to fix the transfer direction.
++ */
++ cmd->data_direction = SCST_DATA_NONE;
++ }
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ switch (state) {
++ case SCST_CMD_STATE_PREPARE_SPACE:
++ case SCST_CMD_STATE_PARSE:
++ case SCST_CMD_STATE_RDY_TO_XFER:
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_START_EXEC:
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXEC:
++ case SCST_CMD_STATE_PRE_DEV_DONE:
++ case SCST_CMD_STATE_DEV_DONE:
++ case SCST_CMD_STATE_PRE_XMIT_RESP:
++ case SCST_CMD_STATE_XMIT_RESP:
++ case SCST_CMD_STATE_FINISHED:
++ case SCST_CMD_STATE_FINISHED_INTERNAL:
++#endif
++ cmd->state = state;
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++#ifdef CONFIG_SCST_EXTRACHECKS
++ break;
++
++ default:
++ if (state >= 0) {
++ PRINT_ERROR("Dev handler %s parse() returned "
++ "invalid cmd state %d (opcode %d)",
++ dev->handler->name, state, cmd->cdb[0]);
++ } else {
++ PRINT_ERROR("Dev handler %s parse() returned "
++ "error %d (opcode %d)", dev->handler->name,
++ state, cmd->cdb[0]);
++ }
++ goto out_hw_error;
++ }
++#endif
++
++ if (cmd->resp_data_len == -1) {
++ if (cmd->data_direction & SCST_DATA_READ)
++ cmd->resp_data_len = cmd->bufflen;
++ else
++ cmd->resp_data_len = 0;
++ }
++
++ /* We already completed (with an error) */
++ if (unlikely(cmd->completed))
++ goto out_done;
++
++#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
++ /*
++ * We can't allow atomic command on the exec stages. It shouldn't
++ * be because of the SCST_TGT_DEV_AFTER_* optimization, but during
++ * parsing data_direction can change, so we need to recheck.
++ */
++ if (unlikely(scst_cmd_atomic(cmd) &&
++ !(cmd->data_direction & SCST_DATA_WRITE))) {
++ TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_MINOR, "Atomic context and "
++ "non-WRITE data direction, rescheduling (cmd %p)", cmd);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++#endif
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++
++out_hw_error:
++ /* dev_done() will be called as part of the regular cmd's finish */
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++
++out_done:
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++}
++
++static void scst_set_write_len(struct scst_cmd *cmd)
++{
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(!(cmd->data_direction & SCST_DATA_WRITE));
++
++ if (cmd->data_direction & SCST_DATA_READ) {
++ cmd->write_len = cmd->out_bufflen;
++ cmd->write_sg = &cmd->out_sg;
++ cmd->write_sg_cnt = &cmd->out_sg_cnt;
++ } else {
++ cmd->write_len = cmd->bufflen;
++ /* write_sg and write_sg_cnt already initialized correctly */
++ }
++
++ TRACE_MEM("cmd %p, write_len %d, write_sg %p, write_sg_cnt %d, "
++ "resid_possible %d", cmd, cmd->write_len, *cmd->write_sg,
++ *cmd->write_sg_cnt, cmd->resid_possible);
++
++ if (unlikely(cmd->resid_possible)) {
++ if (cmd->data_direction & SCST_DATA_READ) {
++ cmd->write_len = min(cmd->out_bufflen,
++ cmd->expected_out_transfer_len);
++ if (cmd->write_len == cmd->out_bufflen)
++ goto out;
++ } else {
++ cmd->write_len = min(cmd->bufflen,
++ cmd->expected_transfer_len);
++ if (cmd->write_len == cmd->bufflen)
++ goto out;
++ }
++ scst_limit_sg_write_len(cmd);
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_prepare_space(struct scst_cmd *cmd)
++{
++ int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE_ENTRY();
++
++ if (cmd->data_direction == SCST_DATA_NONE)
++ goto done;
++
++ if (likely(!scst_is_cmd_fully_local(cmd)) &&
++ (dev->handler->alloc_data_buf != NULL)) {
++ int state;
++
++ if (unlikely(!dev->handler->alloc_data_buf_atomic &&
++ scst_cmd_atomic(cmd))) {
++ /*
++ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
++ * optimization.
++ */
++ TRACE_MGMT_DBG("Dev handler %s alloc_data_buf() needs "
++ "thread context, rescheduling",
++ dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ TRACE_DBG("Calling dev handler %s alloc_data_buf(%p)",
++ dev->handler->name, cmd);
++ scst_set_cur_start(cmd);
++ state = dev->handler->alloc_data_buf(cmd);
++ /* Caution: cmd can be already dead here */
++ TRACE_DBG("Dev handler %s alloc_data_buf() returned %d",
++ dev->handler->name, state);
++
++ switch (state) {
++ case SCST_CMD_STATE_NEED_THREAD_CTX:
++ scst_set_alloc_buf_time(cmd);
++ TRACE_DBG("Dev handler %s alloc_data_buf() requested "
++ "thread context, rescheduling",
++ dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++
++ case SCST_CMD_STATE_STOP:
++ TRACE_DBG("Dev handler %s alloc_data_buf() requested "
++ "stop processing", dev->handler->name);
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ goto out;
++ }
++
++ scst_set_alloc_buf_time(cmd);
++
++ if (unlikely(state != SCST_CMD_STATE_DEFAULT)) {
++ cmd->state = state;
++ goto out;
++ }
++ }
++
++ if (cmd->tgt_need_alloc_data_buf) {
++ int orig_bufflen = cmd->bufflen;
++
++ TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
++ cmd);
++
++ scst_set_cur_start(cmd);
++ r = cmd->tgtt->alloc_data_buf(cmd);
++ scst_set_alloc_buf_time(cmd);
++
++ if (r > 0)
++ goto alloc;
++ else if (r == 0) {
++ if (unlikely(cmd->bufflen == 0)) {
++ /* See comment in scst_alloc_space() */
++ if (cmd->sg == NULL)
++ goto alloc;
++ }
++
++ cmd->tgt_data_buf_alloced = 1;
++
++ if (unlikely(orig_bufflen < cmd->bufflen)) {
++ PRINT_ERROR("Target driver allocated data "
++ "buffer (size %d), is less, than "
++ "required (size %d)", orig_bufflen,
++ cmd->bufflen);
++ goto out_error;
++ }
++ TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
++ } else
++ goto check;
++ }
++
++alloc:
++ if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
++ r = scst_alloc_space(cmd);
++ } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
++ TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
++ r = 0;
++ } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
++ TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
++ cmd->sg = cmd->tgt_sg;
++ cmd->sg_cnt = cmd->tgt_sg_cnt;
++ cmd->out_sg = cmd->tgt_out_sg;
++ cmd->out_sg_cnt = cmd->tgt_out_sg_cnt;
++ r = 0;
++ } else {
++ TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
++ "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
++ cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
++ r = 0;
++ }
++
++check:
++ if (r != 0) {
++ if (scst_cmd_atomic(cmd)) {
++ TRACE_MEM("%s", "Atomic memory allocation failed, "
++ "rescheduling to the thread");
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ } else
++ goto out_no_space;
++ }
++
++done:
++ if (cmd->preprocessing_only) {
++ cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE;
++ if (cmd->data_direction & SCST_DATA_WRITE)
++ scst_set_write_len(cmd);
++ } else if (cmd->data_direction & SCST_DATA_WRITE) {
++ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
++ scst_set_write_len(cmd);
++ } else
++ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++
++out_no_space:
++ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
++ "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
++ scst_set_busy(cmd);
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++
++out_error:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++}
++
++static int scst_preprocessing_done(struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(!cmd->preprocessing_only);
++
++ cmd->preprocessing_only = 0;
++
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE_CALLED;
++
++ TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
++ scst_set_cur_start(cmd);
++ cmd->tgtt->preprocessing_done(cmd);
++ TRACE_DBG("%s", "preprocessing_done() returned");
++
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/**
++ * scst_restart_cmd() - restart execution of the command
++ * @cmd: SCST commands
++ * @status: completion status
++ * @pref_context: preferred command execution context
++ *
++ * Description:
++ * Notifies SCST that the driver finished its part of the command's
++ * preprocessing and it is ready for further processing.
++ *
++ * The second argument sets completion status
++ * (see SCST_PREPROCESS_STATUS_* constants for details)
++ *
++ * See also comment for scst_cmd_init_done() for the serialization
++ * requirements.
++ */
++void scst_restart_cmd(struct scst_cmd *cmd, int status,
++ enum scst_exec_context pref_context)
++{
++ TRACE_ENTRY();
++
++ scst_set_restart_waiting_time(cmd);
++
++ TRACE_DBG("Preferred context: %d", pref_context);
++ TRACE_DBG("tag=%llu, status=%#x",
++ (long long unsigned int)scst_cmd_get_tag(cmd),
++ status);
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if ((in_irq() || irqs_disabled()) &&
++ ((pref_context == SCST_CONTEXT_DIRECT) ||
++ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
++ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
++ "SCST_CONTEXT_THREAD instead", pref_context,
++ cmd->tgtt->name);
++ dump_stack();
++ pref_context = SCST_CONTEXT_THREAD;
++ }
++#endif
++
++ switch (status) {
++ case SCST_PREPROCESS_STATUS_SUCCESS:
++ if (cmd->data_direction & SCST_DATA_WRITE)
++ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
++ else
++ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
++ if (cmd->set_sn_on_restart_cmd)
++ scst_cmd_set_sn(cmd);
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
++ break;
++#endif
++ /* Small context optimization */
++ if ((pref_context == SCST_CONTEXT_TASKLET) ||
++ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
++ ((pref_context == SCST_CONTEXT_SAME) &&
++ scst_cmd_atomic(cmd)))
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ case SCST_PREPROCESS_STATUS_ERROR_FATAL:
++ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
++ /* go through */
++ case SCST_PREPROCESS_STATUS_ERROR:
++ if (cmd->sense != NULL)
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ default:
++ PRINT_ERROR("%s() received unknown status %x", __func__,
++ status);
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++ }
++
++ scst_process_redirect_cmd(cmd, pref_context, 1);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_restart_cmd);
++
++static int scst_rdy_to_xfer(struct scst_cmd *cmd)
++{
++ int res, rc;
++ struct scst_tgt_template *tgtt = cmd->tgtt;
++
++ TRACE_ENTRY();
++
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
++ goto out_dev_done;
++ }
++
++ if ((tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
++ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
++#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
++ /* We can't allow atomic command on the exec stages */
++ if (scst_cmd_atomic(cmd)) {
++ TRACE_DBG("NULL rdy_to_xfer() and atomic context, "
++ "rescheduling (cmd %p)", cmd);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ } else
++#endif
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++ }
++
++ if (unlikely(!tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
++ /*
++ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
++ * optimization.
++ */
++ TRACE_MGMT_DBG("Target driver %s rdy_to_xfer() needs thread "
++ "context, rescheduling", tgtt->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ while (1) {
++ int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
++
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ cmd->state = SCST_CMD_STATE_DATA_WAIT;
++
++ if (tgtt->on_hw_pending_cmd_timeout != NULL) {
++ struct scst_session *sess = cmd->sess;
++ cmd->hw_pending_start = jiffies;
++ cmd->cmd_hw_pending = 1;
++ if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
++ TRACE_DBG("Sched HW pending work for sess %p "
++ "(max time %d)", sess,
++ tgtt->max_hw_pending_time);
++ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
++ &sess->sess_aflags);
++ schedule_delayed_work(&sess->hw_pending_work,
++ tgtt->max_hw_pending_time * HZ);
++ }
++ }
++
++ scst_set_cur_start(cmd);
++
++ TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
++#ifdef CONFIG_SCST_DEBUG_RETRY
++ if (((scst_random() % 100) == 75))
++ rc = SCST_TGT_RES_QUEUE_FULL;
++ else
++#endif
++ rc = tgtt->rdy_to_xfer(cmd);
++ TRACE_DBG("rdy_to_xfer() returned %d", rc);
++
++ if (likely(rc == SCST_TGT_RES_SUCCESS))
++ goto out;
++
++ scst_set_rdy_to_xfer_time(cmd);
++
++ cmd->cmd_hw_pending = 0;
++
++ /* Restore the previous state */
++ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
++
++ switch (rc) {
++ case SCST_TGT_RES_QUEUE_FULL:
++ if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
++ break;
++ else
++ continue;
++
++ case SCST_TGT_RES_NEED_THREAD_CTX:
++ TRACE_DBG("Target driver %s "
++ "rdy_to_xfer() requested thread "
++ "context, rescheduling", tgtt->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++
++ default:
++ goto out_error_rc;
++ }
++ break;
++ }
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++
++out_error_rc:
++ if (rc == SCST_TGT_RES_FATAL_ERROR) {
++ PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
++ "fatal error", tgtt->name);
++ } else {
++ PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
++ "value %d", tgtt->name, rc);
++ }
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++
++out_dev_done:
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++}
++
++/* No locks, but might be in IRQ */
++static void scst_process_redirect_cmd(struct scst_cmd *cmd,
++ enum scst_exec_context context, int check_retries)
++{
++ struct scst_tgt *tgt = cmd->tgt;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Context: %x", context);
++
++ if (check_retries)
++ scst_check_retries(tgt);
++
++ if (context == SCST_CONTEXT_SAME)
++ context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
++ SCST_CONTEXT_DIRECT;
++
++ switch (context) {
++ case SCST_CONTEXT_DIRECT_ATOMIC:
++ scst_process_active_cmd(cmd, true);
++ break;
++
++ case SCST_CONTEXT_DIRECT:
++ scst_process_active_cmd(cmd, false);
++ break;
++
++ case SCST_CONTEXT_TASKLET:
++ scst_schedule_tasklet(cmd);
++ break;
++
++ default:
++ PRINT_ERROR("Context %x is unknown, using the thread one",
++ context);
++ /* go through */
++ case SCST_CONTEXT_THREAD:
++ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
++ TRACE_DBG("Adding cmd %p to active cmd list", cmd);
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ list_add(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ else
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
++ break;
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_rx_data() - the command's data received
++ * @cmd: SCST commands
++ * @status: data receiving completion status
++ * @pref_context: preferred command execution context
++ *
++ * Description:
++ * Notifies SCST that the driver received all the necessary data
++ * and the command is ready for further processing.
++ *
++ * The second argument sets data receiving completion status
++ * (see SCST_RX_STATUS_* constants for details)
++ */
++void scst_rx_data(struct scst_cmd *cmd, int status,
++ enum scst_exec_context pref_context)
++{
++ TRACE_ENTRY();
++
++ scst_set_rdy_to_xfer_time(cmd);
++
++ TRACE_DBG("Preferred context: %d", pref_context);
++ TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
++
++ cmd->cmd_hw_pending = 0;
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if ((in_irq() || irqs_disabled()) &&
++ ((pref_context == SCST_CONTEXT_DIRECT) ||
++ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
++ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
++ "SCST_CONTEXT_THREAD instead", pref_context,
++ cmd->tgtt->name);
++ dump_stack();
++ pref_context = SCST_CONTEXT_THREAD;
++ }
++#endif
++
++ switch (status) {
++ case SCST_RX_STATUS_SUCCESS:
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ if (trace_flag & TRACE_RCV_BOT) {
++ int i, j;
++ struct scatterlist *sg;
++ if (cmd->out_sg != NULL)
++ sg = cmd->out_sg;
++ else if (cmd->tgt_out_sg != NULL)
++ sg = cmd->tgt_out_sg;
++ else if (cmd->tgt_sg != NULL)
++ sg = cmd->tgt_sg;
++ else
++ sg = cmd->sg;
++ if (sg != NULL) {
++ TRACE_RECV_BOT("RX data for cmd %p "
++ "(sg_cnt %d, sg %p, sg[0].page %p)",
++ cmd, cmd->tgt_sg_cnt, sg,
++ (void *)sg_page(&sg[0]));
++ for (i = 0, j = 0; i < cmd->tgt_sg_cnt; ++i, ++j) {
++ if (unlikely(sg_is_chain(&sg[j]))) {
++ sg = sg_chain_ptr(&sg[j]);
++ j = 0;
++ }
++ PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
++ sg_virt(&sg[j]), sg[j].length);
++ }
++ }
++ }
++#endif
++ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
++ break;
++#endif
++
++ /* Small context optimization */
++ if ((pref_context == SCST_CONTEXT_TASKLET) ||
++ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
++ ((pref_context == SCST_CONTEXT_SAME) &&
++ scst_cmd_atomic(cmd)))
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ case SCST_RX_STATUS_ERROR_SENSE_SET:
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ case SCST_RX_STATUS_ERROR_FATAL:
++ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
++ /* go through */
++ case SCST_RX_STATUS_ERROR:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++
++ default:
++ PRINT_ERROR("scst_rx_data() received unknown status %x",
++ status);
++ scst_set_cmd_abnormal_done_state(cmd);
++ pref_context = SCST_CONTEXT_THREAD;
++ break;
++ }
++
++ scst_process_redirect_cmd(cmd, pref_context, 1);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_rx_data);
++
++static int scst_tgt_pre_exec(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
++
++ TRACE_ENTRY();
++
++ if (unlikely(cmd->resid_possible)) {
++ if (cmd->data_direction & SCST_DATA_WRITE) {
++ bool do_zero = false;
++ if (cmd->data_direction & SCST_DATA_READ) {
++ if (cmd->write_len != cmd->out_bufflen)
++ do_zero = true;
++ } else {
++ if (cmd->write_len != cmd->bufflen)
++ do_zero = true;
++ }
++ if (do_zero) {
++ scst_check_restore_sg_buff(cmd);
++ scst_zero_write_rest(cmd);
++ }
++ }
++ }
++
++ cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
++
++ if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
++ goto out;
++
++ TRACE_DBG("Calling pre_exec(%p)", cmd);
++ scst_set_cur_start(cmd);
++ rc = cmd->tgtt->pre_exec(cmd);
++ scst_set_pre_exec_time(cmd);
++ TRACE_DBG("pre_exec() returned %d", rc);
++
++ if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
++ switch (rc) {
++ case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
++ scst_set_cmd_abnormal_done_state(cmd);
++ break;
++ case SCST_PREPROCESS_STATUS_ERROR_FATAL:
++ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
++ /* go through */
++ case SCST_PREPROCESS_STATUS_ERROR:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ break;
++ default:
++ BUG();
++ break;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
++ const uint8_t *rq_sense, int rq_sense_len, int resid)
++{
++ TRACE_ENTRY();
++
++ scst_set_exec_time(cmd);
++
++ cmd->status = result & 0xff;
++ cmd->msg_status = msg_byte(result);
++ cmd->host_status = host_byte(result);
++ cmd->driver_status = driver_byte(result);
++ if (unlikely(resid != 0)) {
++ if ((cmd->data_direction & SCST_DATA_READ) &&
++ (resid > 0) && (resid < cmd->resp_data_len))
++ scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
++ /*
++ * We ignore write direction residue, because from the
++ * initiator's POV we already transferred all the data.
++ */
++ }
++
++ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
++ /* We might have double reset UA here */
++ cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
++ cmd->dbl_ua_orig_data_direction = cmd->data_direction;
++
++ scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
++ }
++
++ TRACE(TRACE_SCSI, "cmd %p, result %x, cmd->status %x, resid %d, "
++ "cmd->msg_status %x, cmd->host_status %x, "
++ "cmd->driver_status %x", cmd, result, cmd->status, resid,
++ cmd->msg_status, cmd->host_status, cmd->driver_status);
++
++ cmd->completed = 1;
++
++ TRACE_EXIT();
++ return;
++}
++
++/* For small context optimization */
++static inline enum scst_exec_context scst_optimize_post_exec_context(
++ struct scst_cmd *cmd, enum scst_exec_context context)
++{
++ if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
++ (context == SCST_CONTEXT_TASKLET) ||
++ (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
++ if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
++ &cmd->tgt_dev->tgt_dev_flags))
++ context = SCST_CONTEXT_THREAD;
++ }
++ return context;
++}
++
++/**
++ * scst_pass_through_cmd_done - done callback for pass-through commands
++ * @data: private opaque data
++ * @sense: pointer to the sense data, if any
++ * @result: command's execution result
++ * @resid: residual, if any
++ */
++void scst_pass_through_cmd_done(void *data, char *sense, int result, int resid)
++{
++ struct scst_cmd *cmd;
++
++ TRACE_ENTRY();
++
++ cmd = (struct scst_cmd *)data;
++ if (cmd == NULL)
++ goto out;
++
++ scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
++
++ cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
++
++ scst_process_redirect_cmd(cmd,
++ scst_optimize_post_exec_context(cmd, scst_estimate_context()), 0);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_pass_through_cmd_done);
++
++static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
++ enum scst_exec_context pref_context)
++{
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(cmd->pr_abort_counter != NULL);
++
++ scst_set_exec_time(cmd);
++
++ TRACE(TRACE_SCSI, "cmd %p, status %x, msg_status %x, host_status %x, "
++ "driver_status %x, resp_data_len %d", cmd, cmd->status,
++ cmd->msg_status, cmd->host_status, cmd->driver_status,
++ cmd->resp_data_len);
++
++ if (next_state == SCST_CMD_STATE_DEFAULT)
++ next_state = SCST_CMD_STATE_PRE_DEV_DONE;
++
++#if defined(CONFIG_SCST_DEBUG)
++ if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
++ if ((trace_flag & TRACE_RCV_TOP) && (cmd->sg != NULL)) {
++ int i, j;
++ struct scatterlist *sg = cmd->sg;
++ TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
++ "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
++ for (i = 0, j = 0; i < cmd->sg_cnt; ++i, ++j) {
++ if (unlikely(sg_is_chain(&sg[j]))) {
++ sg = sg_chain_ptr(&sg[j]);
++ j = 0;
++ }
++ TRACE_BUFF_FLAG(TRACE_RCV_TOP,
++ "Exec'd sg", sg_virt(&sg[j]),
++ sg[j].length);
++ }
++ }
++ }
++#endif
++
++ cmd->state = next_state;
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
++ (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
++ (next_state != SCST_CMD_STATE_FINISHED) &&
++ (next_state != SCST_CMD_STATE_FINISHED_INTERNAL)) {
++ PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
++ __func__, next_state, cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ }
++#endif
++ pref_context = scst_optimize_post_exec_context(cmd, pref_context);
++ scst_process_redirect_cmd(cmd, pref_context, 0);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_report_luns_local(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_COMPLETED, rc;
++ int dev_cnt = 0;
++ int buffer_size;
++ int i;
++ struct scst_tgt_dev *tgt_dev = NULL;
++ uint8_t *buffer;
++ int offs, overflow = 0;
++
++ TRACE_ENTRY();
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++
++ if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
++ PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
++ "LUNS command", cmd->cdb[2]);
++ goto out_err;
++ }
++
++ buffer_size = scst_get_buf_full(cmd, &buffer);
++ if (unlikely(buffer_size == 0))
++ goto out_compl;
++ else if (unlikely(buffer_size < 0))
++ goto out_hw_err;
++
++ if (buffer_size < 16)
++ goto out_put_err;
++
++ memset(buffer, 0, buffer_size);
++ offs = 8;
++
++ /*
++ * cmd won't allow to suspend activities, so we can access
++ * sess->sess_tgt_dev_list without any additional protection.
++ */
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &cmd->sess->sess_tgt_dev_list[i];
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
++ if (!overflow) {
++ if ((buffer_size - offs) < 8) {
++ overflow = 1;
++ goto inc_dev_cnt;
++ }
++ *(__force __be64 *)&buffer[offs]
++ = scst_pack_lun(tgt_dev->lun,
++ cmd->sess->acg->addr_method);
++ offs += 8;
++ }
++inc_dev_cnt:
++ dev_cnt++;
++ }
++ }
++
++ /* Set the response header */
++ dev_cnt *= 8;
++ buffer[0] = (dev_cnt >> 24) & 0xff;
++ buffer[1] = (dev_cnt >> 16) & 0xff;
++ buffer[2] = (dev_cnt >> 8) & 0xff;
++ buffer[3] = dev_cnt & 0xff;
++
++ scst_put_buf_full(cmd, buffer);
++
++ dev_cnt += 8;
++ if (dev_cnt < cmd->resp_data_len)
++ scst_set_resp_data_len(cmd, dev_cnt);
++
++out_compl:
++ cmd->completed = 1;
++
++ /* Clear left sense_reported_luns_data_changed UA, if any. */
++
++ /*
++ * cmd won't allow to suspend activities, so we can access
++ * sess->sess_tgt_dev_list without any additional protection.
++ */
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &cmd->sess->sess_tgt_dev_list[i];
++
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
++ struct scst_tgt_dev_UA *ua;
++
++ spin_lock_bh(&tgt_dev->tgt_dev_lock);
++ list_for_each_entry(ua, &tgt_dev->UA_list,
++ UA_list_entry) {
++ if (scst_analyze_sense(ua->UA_sense_buffer,
++ ua->UA_valid_sense_len,
++ SCST_SENSE_ALL_VALID,
++ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
++ TRACE_MGMT_DBG("Freeing not needed "
++ "REPORTED LUNS DATA CHANGED UA "
++ "%p", ua);
++ scst_tgt_dev_del_free_UA(tgt_dev, ua);
++ break;
++ }
++ }
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++ }
++ }
++
++out_done:
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_put_err:
++ scst_put_buf_full(cmd, buffer);
++
++out_err:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_compl;
++
++out_hw_err:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_compl;
++}
++
++static int scst_request_sense_local(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_COMPLETED, rc;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ uint8_t *buffer;
++ int buffer_size = 0, sl = 0;
++
++ TRACE_ENTRY();
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++
++ spin_lock_bh(&tgt_dev->tgt_dev_lock);
++
++ if (tgt_dev->tgt_dev_valid_sense_len == 0)
++ goto out_unlock_not_completed;
++
++ TRACE(TRACE_SCSI, "%s: Returning stored sense", cmd->op_name);
++
++ buffer_size = scst_get_buf_full(cmd, &buffer);
++ if (unlikely(buffer_size == 0))
++ goto out_unlock_compl;
++ else if (unlikely(buffer_size < 0))
++ goto out_unlock_hw_err;
++
++ memset(buffer, 0, buffer_size);
++
++ if (((tgt_dev->tgt_dev_sense[0] == 0x70) ||
++ (tgt_dev->tgt_dev_sense[0] == 0x71)) && (cmd->cdb[1] & 1)) {
++ PRINT_WARNING("%s: Fixed format of the saved sense, but "
++ "descriptor format requested. Conversion will "
++ "truncated data", cmd->op_name);
++ PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
++ tgt_dev->tgt_dev_valid_sense_len);
++
++ buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
++ sl = scst_set_sense(buffer, buffer_size, true,
++ tgt_dev->tgt_dev_sense[2], tgt_dev->tgt_dev_sense[12],
++ tgt_dev->tgt_dev_sense[13]);
++ } else if (((tgt_dev->tgt_dev_sense[0] == 0x72) ||
++ (tgt_dev->tgt_dev_sense[0] == 0x73)) && !(cmd->cdb[1] & 1)) {
++ PRINT_WARNING("%s: Descriptor format of the "
++ "saved sense, but fixed format requested. Conversion "
++ "will truncated data", cmd->op_name);
++ PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
++ tgt_dev->tgt_dev_valid_sense_len);
++
++ buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
++ sl = scst_set_sense(buffer, buffer_size, false,
++ tgt_dev->tgt_dev_sense[1], tgt_dev->tgt_dev_sense[2],
++ tgt_dev->tgt_dev_sense[3]);
++ } else {
++ if (buffer_size >= tgt_dev->tgt_dev_valid_sense_len)
++ sl = tgt_dev->tgt_dev_valid_sense_len;
++ else {
++ sl = buffer_size;
++ TRACE(TRACE_MINOR, "%s: Being returned sense truncated "
++ "to size %d (needed %d)", cmd->op_name,
++ buffer_size, tgt_dev->tgt_dev_valid_sense_len);
++ }
++ memcpy(buffer, tgt_dev->tgt_dev_sense, sl);
++ }
++
++ scst_put_buf_full(cmd, buffer);
++
++ tgt_dev->tgt_dev_valid_sense_len = 0;
++
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++
++ scst_set_resp_data_len(cmd, sl);
++
++out_compl:
++ cmd->completed = 1;
++
++out_done:
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unlock_hw_err:
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_compl;
++
++out_unlock_not_completed:
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++ res = SCST_EXEC_NOT_COMPLETED;
++ goto out;
++
++out_unlock_compl:
++ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++ goto out_compl;
++}
++
++static int scst_reserve_local(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_NOT_COMPLETED, rc;
++ struct scst_device *dev;
++ struct scst_tgt_dev *tgt_dev_tmp;
++
++ TRACE_ENTRY();
++
++ if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
++ PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
++ "(lun=%lld)", (long long unsigned int)cmd->lun);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_done;
++ }
++
++ dev = cmd->dev;
++
++ /*
++ * There's no need to block this device, even for
++ * SCST_CONTR_MODE_ONE_TASK_SET, or anyhow else protect reservations
++ * changes, because:
++ *
++ * 1. The reservation changes are (rather) atomic, i.e., in contrast
++ * to persistent reservations, don't have any invalid intermediate
++ * states during being changed.
++ *
++ * 2. It's a duty of initiators to ensure order of regular commands
++ * around the reservation command either by ORDERED attribute, or by
++ * queue draining, or etc. For case of SCST_CONTR_MODE_ONE_TASK_SET
++ * there are no target drivers which can ensure even for ORDERED
++ * commands order of their delivery, so, because initiators know
++ * it, also there's no point to do any extra protection actions.
++ */
++
++ rc = scst_pre_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ if (!list_empty(&dev->dev_registrants_list)) {
++ if (scst_pr_crh_case(cmd))
++ goto out_completed;
++ else {
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++ }
++
++ spin_lock_bh(&dev->dev_lock);
++
++ if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
++ spin_unlock_bh(&dev->dev_lock);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++
++ list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if (cmd->tgt_dev != tgt_dev_tmp)
++ set_bit(SCST_TGT_DEV_RESERVED,
++ &tgt_dev_tmp->tgt_dev_flags);
++ }
++ dev->dev_reserved = 1;
++
++ spin_unlock_bh(&dev->dev_lock);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_completed:
++ cmd->completed = 1;
++
++out_done:
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ res = SCST_EXEC_COMPLETED;
++ goto out;
++}
++
++static int scst_release_local(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_NOT_COMPLETED, rc;
++ struct scst_tgt_dev *tgt_dev_tmp;
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ dev = cmd->dev;
++
++ /*
++ * See comment in scst_reserve_local() why no dev blocking or any
++ * other protection is needed here.
++ */
++
++ rc = scst_pre_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ if (!list_empty(&dev->dev_registrants_list)) {
++ if (scst_pr_crh_case(cmd))
++ goto out_completed;
++ else {
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++ }
++
++ spin_lock_bh(&dev->dev_lock);
++
++ /*
++ * The device could be RELEASED behind us, if RESERVING session
++ * is closed (see scst_free_tgt_dev()), but this actually doesn't
++ * matter, so use lock and no retest for DEV_RESERVED bits again
++ */
++ if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
++ res = SCST_EXEC_COMPLETED;
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++ cmd->completed = 1;
++ } else {
++ list_for_each_entry(tgt_dev_tmp,
++ &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ clear_bit(SCST_TGT_DEV_RESERVED,
++ &tgt_dev_tmp->tgt_dev_flags);
++ }
++ dev->dev_reserved = 0;
++ }
++
++ spin_unlock_bh(&dev->dev_lock);
++
++ if (res == SCST_EXEC_COMPLETED)
++ goto out_done;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_completed:
++ cmd->completed = 1;
++
++out_done:
++ res = SCST_EXEC_COMPLETED;
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ goto out;
++}
++
++/* No locks, no IRQ or IRQ-disabled context allowed */
++static int scst_persistent_reserve_in_local(struct scst_cmd *cmd)
++{
++ int rc;
++ struct scst_device *dev;
++ struct scst_tgt_dev *tgt_dev;
++ struct scst_session *session;
++ int action;
++ uint8_t *buffer;
++ int buffer_size;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(scst_cmd_atomic(cmd));
++
++ dev = cmd->dev;
++ tgt_dev = cmd->tgt_dev;
++ session = cmd->sess;
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ if (unlikely(dev->not_pr_supporting_tgt_devs_num != 0)) {
++ PRINT_WARNING("Persistent Reservation command %x refused for "
++ "device %s, because the device has not supporting PR "
++ "transports connected", cmd->cdb[0], dev->virt_name);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++ }
++
++ if (dev->dev_reserved) {
++ TRACE_PR("PR command rejected, because device %s holds regular "
++ "reservation", dev->virt_name);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++
++ if (dev->scsi_dev != NULL) {
++ PRINT_WARNING("PR commands for pass-through devices not "
++ "supported (device %s)", dev->virt_name);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++ }
++
++ buffer_size = scst_get_buf_full(cmd, &buffer);
++ if (unlikely(buffer_size <= 0)) {
++ if (buffer_size < 0)
++ scst_set_busy(cmd);
++ goto out_done;
++ }
++
++ scst_pr_write_lock(dev);
++
++ /* We can be aborted by another PR command while waiting for the lock */
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
++ goto out_unlock;
++ }
++
++ action = cmd->cdb[1] & 0x1f;
++
++ TRACE(TRACE_SCSI, "PR action %x for '%s' (LUN %llx) from '%s'", action,
++ dev->virt_name, tgt_dev->lun, session->initiator_name);
++
++ switch (action) {
++ case PR_READ_KEYS:
++ scst_pr_read_keys(cmd, buffer, buffer_size);
++ break;
++ case PR_READ_RESERVATION:
++ scst_pr_read_reservation(cmd, buffer, buffer_size);
++ break;
++ case PR_REPORT_CAPS:
++ scst_pr_report_caps(cmd, buffer, buffer_size);
++ break;
++ case PR_READ_FULL_STATUS:
++ scst_pr_read_full_status(cmd, buffer, buffer_size);
++ break;
++ default:
++ PRINT_ERROR("Unsupported action %x", action);
++ scst_pr_write_unlock(dev);
++ goto out_err;
++ }
++
++out_complete:
++ cmd->completed = 1;
++
++out_unlock:
++ scst_pr_write_unlock(dev);
++
++ scst_put_buf_full(cmd, buffer);
++
++out_done:
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++
++ TRACE_EXIT_RES(SCST_EXEC_COMPLETED);
++ return SCST_EXEC_COMPLETED;
++
++out_err:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_complete;
++}
++
++/* No locks, no IRQ or IRQ-disabled context allowed */
++static int scst_persistent_reserve_out_local(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_COMPLETED;
++ int rc;
++ struct scst_device *dev;
++ struct scst_tgt_dev *tgt_dev;
++ struct scst_session *session;
++ int action;
++ uint8_t *buffer;
++ int buffer_size;
++ bool aborted = false;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(scst_cmd_atomic(cmd));
++
++ dev = cmd->dev;
++ tgt_dev = cmd->tgt_dev;
++ session = cmd->sess;
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ if (unlikely(dev->not_pr_supporting_tgt_devs_num != 0)) {
++ PRINT_WARNING("Persistent Reservation command %x refused for "
++ "device %s, because the device has not supporting PR "
++ "transports connected", cmd->cdb[0], dev->virt_name);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_done;
++ }
++
++ action = cmd->cdb[1] & 0x1f;
++
++ TRACE(TRACE_SCSI, "PR action %x for '%s' (LUN %llx) from '%s'", action,
++ dev->virt_name, tgt_dev->lun, session->initiator_name);
++
++ if (dev->dev_reserved) {
++ TRACE_PR("PR command rejected, because device %s holds regular "
++ "reservation", dev->virt_name);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++
++ /*
++ * Check if tgt_dev already registered. Also by this check we make
++ * sure that table "PERSISTENT RESERVE OUT service actions that are
++ * allowed in the presence of various reservations" is honored.
++ * REGISTER AND MOVE and RESERVE will be additionally checked for
++ * conflicts later.
++ */
++ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
++ (tgt_dev->registrant == NULL)) {
++ TRACE_PR("'%s' not registered", cmd->sess->initiator_name);
++ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
++ goto out_done;
++ }
++
++ buffer_size = scst_get_buf_full(cmd, &buffer);
++ if (unlikely(buffer_size <= 0)) {
++ if (buffer_size < 0)
++ scst_set_busy(cmd);
++ goto out_done;
++ }
++
++ /* Check scope */
++ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
++ (action != PR_CLEAR) && ((cmd->cdb[2] & 0x0f) >> 4) != SCOPE_LU) {
++ TRACE_PR("Scope must be SCOPE_LU for action %x", action);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_put_buf_full;
++ }
++
++ /* Check SPEC_I_PT (PR_REGISTER_AND_MOVE has another format) */
++ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_MOVE) &&
++ ((buffer[20] >> 3) & 0x01)) {
++ TRACE_PR("SPEC_I_PT must be zero for action %x", action);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_invalid_field_in_cdb));
++ goto out_put_buf_full;
++ }
++
++ /* Check ALL_TG_PT (PR_REGISTER_AND_MOVE has another format) */
++ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
++ (action != PR_REGISTER_AND_MOVE) && ((buffer[20] >> 2) & 0x01)) {
++ TRACE_PR("ALL_TG_PT must be zero for action %x", action);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
++ scst_sense_invalid_field_in_cdb));
++ goto out_put_buf_full;
++ }
++
++ scst_pr_write_lock(dev);
++
++ /* We can be aborted by another PR command while waiting for the lock */
++ aborted = test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
++ if (unlikely(aborted)) {
++ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
++ goto out_unlock;
++ }
++
++ switch (action) {
++ case PR_REGISTER:
++ scst_pr_register(cmd, buffer, buffer_size);
++ break;
++ case PR_RESERVE:
++ scst_pr_reserve(cmd, buffer, buffer_size);
++ break;
++ case PR_RELEASE:
++ scst_pr_release(cmd, buffer, buffer_size);
++ break;
++ case PR_CLEAR:
++ scst_pr_clear(cmd, buffer, buffer_size);
++ break;
++ case PR_PREEMPT:
++ scst_pr_preempt(cmd, buffer, buffer_size);
++ break;
++ case PR_PREEMPT_AND_ABORT:
++ scst_pr_preempt_and_abort(cmd, buffer, buffer_size);
++ break;
++ case PR_REGISTER_AND_IGNORE:
++ scst_pr_register_and_ignore(cmd, buffer, buffer_size);
++ break;
++ case PR_REGISTER_AND_MOVE:
++ scst_pr_register_and_move(cmd, buffer, buffer_size);
++ break;
++ default:
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_unlock;
++ }
++
++ if (cmd->status == SAM_STAT_GOOD)
++ scst_pr_sync_device_file(tgt_dev, cmd);
++
++ if ((dev->handler->pr_cmds_notifications) &&
++ (cmd->status == SAM_STAT_GOOD)) /* sync file may change status */
++ res = SCST_EXEC_NOT_COMPLETED;
++
++out_unlock:
++ scst_pr_write_unlock(dev);
++
++out_put_buf_full:
++ scst_put_buf_full(cmd, buffer);
++
++out_done:
++ if (SCST_EXEC_COMPLETED == res) {
++ if (!aborted)
++ cmd->completed = 1;
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT,
++ SCST_CONTEXT_SAME);
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/**
++ * scst_check_local_events() - check if there are any local SCSI events
++ *
++ * Description:
++ * Checks if the command can be executed or there are local events,
++ * like reservations, pending UAs, etc. Returns < 0 if command must be
++ * aborted, > 0 if there is an event and command should be immediately
++ * completed, or 0 otherwise.
++ *
++ * !! 1.Dev handlers implementing exec() callback must call this function there
++ * !! just before the actual command's execution!
++ * !!
++ * !! 2. If this function can be called more than once on the processing path
++ * !! scst_pre_check_local_events() should be used for the first call!
++ *
++ * On call no locks, no IRQ or IRQ-disabled context allowed.
++ */
++int scst_check_local_events(struct scst_cmd *cmd)
++{
++ int res, rc;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE_ENTRY();
++
++ /*
++ * There's no race here, because we need to trace commands sent
++ * *after* dev_double_ua_possible flag was set.
++ */
++ if (unlikely(dev->dev_double_ua_possible))
++ cmd->double_ua_possible = 1;
++
++ /* Reserve check before Unit Attention */
++ if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
++ &tgt_dev->tgt_dev_flags))) {
++ if ((cmd->op_flags & SCST_REG_RESERVE_ALLOWED) == 0) {
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out_dec_pr_readers_count;
++ }
++ }
++
++ if (likely(!cmd->check_local_events_once_done)) {
++ if (dev->pr_is_set) {
++ if (unlikely(!scst_pr_is_cmd_allowed(cmd))) {
++ scst_set_cmd_error_status(cmd,
++ SAM_STAT_RESERVATION_CONFLICT);
++ goto out_complete;
++ }
++ } else
++ scst_dec_pr_readers_count(cmd, false);
++ }
++
++ /*
++ * Let's check for ABORTED after scst_pr_is_cmd_allowed(), because
++ * we might sleep for a while there.
++ */
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
++ goto out_uncomplete;
++ }
++
++ /* If we had internal bus reset, set the command error unit attention */
++ if ((dev->scsi_dev != NULL) &&
++ unlikely(dev->scsi_dev->was_reset)) {
++ if (scst_is_ua_command(cmd)) {
++ int done = 0;
++ /*
++ * Prevent more than 1 cmd to be triggered by was_reset
++ */
++ spin_lock_bh(&dev->dev_lock);
++ if (dev->scsi_dev->was_reset) {
++ TRACE(TRACE_MGMT, "was_reset is %d", 1);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_reset_UA));
++ /*
++ * It looks like it is safe to clear was_reset
++ * here
++ */
++ dev->scsi_dev->was_reset = 0;
++ done = 1;
++ }
++ spin_unlock_bh(&dev->dev_lock);
++
++ if (done)
++ goto out_complete;
++ }
++ }
++
++ if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
++ &cmd->tgt_dev->tgt_dev_flags))) {
++ if (scst_is_ua_command(cmd)) {
++ rc = scst_set_pending_UA(cmd);
++ if (rc == 0)
++ goto out_complete;
++ }
++ }
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_dec_pr_readers_count:
++ if (cmd->dec_pr_readers_count_needed)
++ scst_dec_pr_readers_count(cmd, false);
++
++out_complete:
++ res = 1;
++ BUG_ON(!cmd->completed);
++ goto out;
++
++out_uncomplete:
++ res = -1;
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_check_local_events);
++
++/* No locks */
++void scst_inc_expected_sn(struct scst_order_data *order_data, atomic_t *slot)
++{
++ if (slot == NULL)
++ goto inc;
++
++ /* Optimized for lockless fast path */
++
++ TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - order_data->sn_slots,
++ atomic_read(slot));
++
++ if (!atomic_dec_and_test(slot))
++ goto out;
++
++ TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
++ order_data->num_free_sn_slots);
++ if (order_data->num_free_sn_slots < (int)ARRAY_SIZE(order_data->sn_slots)-1) {
++ spin_lock_irq(&order_data->sn_lock);
++ if (likely(order_data->num_free_sn_slots < (int)ARRAY_SIZE(order_data->sn_slots)-1)) {
++ if (order_data->num_free_sn_slots < 0)
++ order_data->cur_sn_slot = slot;
++ /* To be in-sync with SIMPLE case in scst_cmd_set_sn() */
++ smp_mb();
++ order_data->num_free_sn_slots++;
++ TRACE_SN("Incremented num_free_sn_slots (%d)",
++ order_data->num_free_sn_slots);
++
++ }
++ spin_unlock_irq(&order_data->sn_lock);
++ }
++
++inc:
++ /*
++ * No protection of expected_sn is needed, because only one thread
++ * at time can be here (serialized by sn). Also it is supposed that
++ * there could not be half-incremented halves.
++ */
++ order_data->expected_sn++;
++ /*
++ * Write must be before def_cmd_count read to be in sync. with
++ * scst_post_exec_sn(). See comment in scst_send_for_exec().
++ */
++ smp_mb();
++ TRACE_SN("Next expected_sn: %d", order_data->expected_sn);
++
++out:
++ return;
++}
++
++/* No locks */
++static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
++ bool make_active)
++{
++ /* For HQ commands SN is not set */
++ bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
++ cmd->sn_set && !cmd->retry;
++ struct scst_order_data *order_data = cmd->cur_order_data;
++ struct scst_cmd *res;
++
++ TRACE_ENTRY();
++
++ if (inc_expected_sn)
++ scst_inc_expected_sn(order_data, cmd->sn_slot);
++
++ if (make_active) {
++ scst_make_deferred_commands_active(order_data);
++ res = NULL;
++ } else
++ res = scst_check_deferred_commands(order_data);
++
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* cmd must be additionally referenced to not die inside */
++static int scst_do_real_exec(struct scst_cmd *cmd)
++{
++ int res = SCST_EXEC_NOT_COMPLETED;
++ int rc;
++ struct scst_device *dev = cmd->dev;
++ struct scst_dev_type *handler = dev->handler;
++ struct io_context *old_ctx = NULL;
++ bool ctx_changed = false;
++ struct scsi_device *scsi_dev;
++
++ TRACE_ENTRY();
++
++ ctx_changed = scst_set_io_context(cmd, &old_ctx);
++
++ cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
++
++ if (handler->exec) {
++ TRACE_DBG("Calling dev handler %s exec(%p)",
++ handler->name, cmd);
++ TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
++ cmd->cdb_len);
++ scst_set_cur_start(cmd);
++ res = handler->exec(cmd);
++ TRACE_DBG("Dev handler %s exec() returned %d",
++ handler->name, res);
++
++ if (res == SCST_EXEC_COMPLETED)
++ goto out_complete;
++
++ scst_set_exec_time(cmd);
++
++ BUG_ON(res != SCST_EXEC_NOT_COMPLETED);
++ }
++
++ TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
++
++ scsi_dev = dev->scsi_dev;
++
++ if (unlikely(scsi_dev == NULL)) {
++ PRINT_ERROR("Command for virtual device must be "
++ "processed by device handler (LUN %lld)!",
++ (long long unsigned int)cmd->lun);
++ goto out_error;
++ }
++
++ res = scst_check_local_events(cmd);
++ if (unlikely(res != 0))
++ goto out_done;
++
++ scst_set_cur_start(cmd);
++
++ rc = scst_scsi_exec_async(cmd, cmd, scst_pass_through_cmd_done);
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("scst pass-through exec failed: %x", rc);
++ if (((int)rc == -EINVAL) &&
++ (cmd->bufflen > queue_max_hw_sectors(scsi_dev->request_queue)))
++ PRINT_ERROR("Too low max_hw_sectors %d sectors on %s "
++ "to serve command %x with bufflen %db."
++ "See README for more details.",
++ queue_max_hw_sectors(scsi_dev->request_queue),
++ dev->virt_name, cmd->cdb[0], cmd->bufflen);
++ goto out_error;
++ }
++
++out_complete:
++ res = SCST_EXEC_COMPLETED;
++
++ if (ctx_changed)
++ scst_reset_io_context(cmd->tgt_dev, old_ctx);
++
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_error:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_done;
++
++out_done:
++ res = SCST_EXEC_COMPLETED;
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ goto out_complete;
++}
++
++static inline int scst_real_exec(struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
++ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
++
++ __scst_cmd_get(cmd);
++
++ res = scst_do_real_exec(cmd);
++ if (likely(res == SCST_EXEC_COMPLETED)) {
++ scst_post_exec_sn(cmd, true);
++ } else
++ BUG();
++
++ __scst_cmd_put(cmd);
++
++ /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_do_local_exec(struct scst_cmd *cmd)
++{
++ int res;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++
++ TRACE_ENTRY();
++
++ /* Check READ_ONLY device status */
++ if ((cmd->op_flags & SCST_WRITE_MEDIUM) &&
++ (tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
++ cmd->dev->rd_only)) {
++ PRINT_WARNING("Attempt of write access to read-only device: "
++ "initiator %s, LUN %lld, op %x",
++ cmd->sess->initiator_name, cmd->lun, cmd->cdb[0]);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_data_protect));
++ goto out_done;
++ }
++
++ if (!scst_is_cmd_local(cmd)) {
++ res = SCST_EXEC_NOT_COMPLETED;
++ goto out;
++ }
++
++ switch (cmd->cdb[0]) {
++ case RESERVE:
++ case RESERVE_10:
++ res = scst_reserve_local(cmd);
++ break;
++ case RELEASE:
++ case RELEASE_10:
++ res = scst_release_local(cmd);
++ break;
++ case PERSISTENT_RESERVE_IN:
++ res = scst_persistent_reserve_in_local(cmd);
++ break;
++ case PERSISTENT_RESERVE_OUT:
++ res = scst_persistent_reserve_out_local(cmd);
++ break;
++ case REPORT_LUNS:
++ res = scst_report_luns_local(cmd);
++ break;
++ case REQUEST_SENSE:
++ res = scst_request_sense_local(cmd);
++ break;
++ default:
++ res = SCST_EXEC_NOT_COMPLETED;
++ break;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_done:
++ /* Report the result */
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++ res = SCST_EXEC_COMPLETED;
++ goto out;
++}
++
++static int scst_local_exec(struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
++ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
++
++ __scst_cmd_get(cmd);
++
++ res = scst_do_local_exec(cmd);
++ if (likely(res == SCST_EXEC_NOT_COMPLETED))
++ cmd->state = SCST_CMD_STATE_REAL_EXEC;
++ else if (res == SCST_EXEC_COMPLETED)
++ scst_post_exec_sn(cmd, true);
++ else
++ BUG();
++
++ __scst_cmd_put(cmd);
++
++ /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_exec(struct scst_cmd **active_cmd)
++{
++ struct scst_cmd *cmd = *active_cmd;
++ struct scst_cmd *ref_cmd;
++ int res = SCST_CMD_STATE_RES_CONT_NEXT, count = 0;
++
++ TRACE_ENTRY();
++
++ cmd->state = SCST_CMD_STATE_START_EXEC;
++
++ if (unlikely(scst_check_blocked_dev(cmd)))
++ goto out;
++
++ /* To protect tgt_dev */
++ ref_cmd = cmd;
++ __scst_cmd_get(ref_cmd);
++
++ while (1) {
++ int rc;
++
++ cmd->sent_for_exec = 1;
++ /*
++ * To sync with scst_abort_cmd(). The above assignment must
++ * be before SCST_CMD_ABORTED test, done later in
++ * scst_check_local_events(). It's far from here, so the order
++ * is virtually guaranteed, but let's have it just in case.
++ */
++ smp_mb();
++
++ cmd->scst_cmd_done = scst_cmd_done_local;
++ cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
++
++ rc = scst_do_local_exec(cmd);
++ if (likely(rc == SCST_EXEC_NOT_COMPLETED))
++ /* Nothing to do */;
++ else {
++ BUG_ON(rc != SCST_EXEC_COMPLETED);
++ goto done;
++ }
++
++ cmd->state = SCST_CMD_STATE_REAL_EXEC;
++
++ rc = scst_do_real_exec(cmd);
++ BUG_ON(rc != SCST_EXEC_COMPLETED);
++
++done:
++ count++;
++
++ cmd = scst_post_exec_sn(cmd, false);
++ if (cmd == NULL)
++ break;
++
++ cmd->state = SCST_CMD_STATE_START_EXEC;
++
++ if (unlikely(scst_check_blocked_dev(cmd)))
++ break;
++
++ __scst_cmd_put(ref_cmd);
++ ref_cmd = cmd;
++ __scst_cmd_get(ref_cmd);
++
++ }
++
++ *active_cmd = cmd;
++
++ if (count == 0)
++ goto out_put;
++
++out_put:
++ __scst_cmd_put(ref_cmd);
++ /* !! At this point sess, dev and tgt_dev can be already freed !! */
++
++out:
++ EXTRACHECKS_BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_send_for_exec(struct scst_cmd **active_cmd)
++{
++ int res;
++ struct scst_cmd *cmd = *active_cmd;
++ struct scst_order_data *order_data = cmd->cur_order_data;
++ typeof(order_data->expected_sn) expected_sn;
++
++ TRACE_ENTRY();
++
++ if (unlikely(cmd->internal))
++ goto exec;
++
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ goto exec;
++
++ BUG_ON(!cmd->sn_set);
++
++ expected_sn = order_data->expected_sn;
++ /* Optimized for lockless fast path */
++ if ((cmd->sn != expected_sn) || (order_data->hq_cmd_count > 0)) {
++ spin_lock_irq(&order_data->sn_lock);
++
++ order_data->def_cmd_count++;
++ /*
++ * Memory barrier is needed here to implement lockless fast
++ * path. We need the exact order of read and write between
++ * def_cmd_count and expected_sn. Otherwise, we can miss case,
++ * when expected_sn was changed to be equal to cmd->sn while
++ * we are queueing cmd the deferred list after the expected_sn
++ * below. It will lead to a forever stuck command. But with
++ * the barrier in such case __scst_check_deferred_commands()
++ * will be called and it will take sn_lock, so we will be
++ * synchronized.
++ */
++ smp_mb();
++
++ expected_sn = order_data->expected_sn;
++ if ((cmd->sn != expected_sn) || (order_data->hq_cmd_count > 0)) {
++ if (unlikely(test_bit(SCST_CMD_ABORTED,
++ &cmd->cmd_flags))) {
++ /* Necessary to allow aborting out of sn cmds */
++ TRACE_MGMT_DBG("Aborting out of sn cmd %p "
++ "(tag %llu, sn %u)", cmd,
++ (long long unsigned)cmd->tag, cmd->sn);
++ order_data->def_cmd_count--;
++ scst_set_cmd_abnormal_done_state(cmd);
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ } else {
++ TRACE_SN("Deferring cmd %p (sn=%d, set %d, "
++ "expected_sn=%d)", cmd, cmd->sn,
++ cmd->sn_set, expected_sn);
++ list_add_tail(&cmd->sn_cmd_list_entry,
++ &order_data->deferred_cmd_list);
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ }
++ spin_unlock_irq(&order_data->sn_lock);
++ goto out;
++ } else {
++ TRACE_SN("Somebody incremented expected_sn %d, "
++ "continuing", expected_sn);
++ order_data->def_cmd_count--;
++ spin_unlock_irq(&order_data->sn_lock);
++ }
++ }
++
++exec:
++ res = scst_exec(active_cmd);
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* No locks supposed to be held */
++static int scst_check_sense(struct scst_cmd *cmd)
++{
++ int res = 0;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE_ENTRY();
++
++ if (unlikely(cmd->ua_ignore))
++ goto out;
++
++ /* If we had internal bus reset behind us, set the command error UA */
++ if ((dev->scsi_dev != NULL) &&
++ unlikely(cmd->host_status == DID_RESET) &&
++ scst_is_ua_command(cmd)) {
++ TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
++ dev->scsi_dev->was_reset, cmd->host_status);
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_reset_UA));
++ /* It looks like it is safe to clear was_reset here */
++ dev->scsi_dev->was_reset = 0;
++ }
++
++ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
++ SCST_SENSE_VALID(cmd->sense)) {
++ PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
++ cmd->sense_valid_len);
++
++ /* Check Unit Attention Sense Key */
++ if (scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
++ if (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASC_VALID,
++ 0, SCST_SENSE_ASC_UA_RESET, 0)) {
++ if (cmd->double_ua_possible) {
++ TRACE_MGMT_DBG("Double UA "
++ "detected for device %p", dev);
++ TRACE_MGMT_DBG("Retrying cmd"
++ " %p (tag %llu)", cmd,
++ (long long unsigned)cmd->tag);
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++ cmd->completed = 0;
++
++ mempool_free(cmd->sense,
++ scst_sense_mempool);
++ cmd->sense = NULL;
++
++ scst_check_restore_sg_buff(cmd);
++
++ BUG_ON(cmd->dbl_ua_orig_resp_data_len < 0);
++ cmd->data_direction =
++ cmd->dbl_ua_orig_data_direction;
++ cmd->resp_data_len =
++ cmd->dbl_ua_orig_resp_data_len;
++
++ cmd->state = SCST_CMD_STATE_REAL_EXEC;
++ cmd->retry = 1;
++ scst_reset_requeued_cmd(cmd);
++ res = 1;
++ goto out;
++ }
++ }
++ scst_dev_check_set_UA(dev, cmd, cmd->sense,
++ cmd->sense_valid_len);
++ }
++ }
++
++ if (unlikely(cmd->double_ua_possible)) {
++ if (scst_is_ua_command(cmd)) {
++ TRACE_MGMT_DBG("Clearing dbl_ua_possible flag (dev %p, "
++ "cmd %p)", dev, cmd);
++ /*
++ * Lock used to protect other flags in the bitfield
++ * (just in case, actually). Those flags can't be
++ * changed in parallel, because the device is
++ * serialized.
++ */
++ spin_lock_bh(&dev->dev_lock);
++ dev->dev_double_ua_possible = 0;
++ spin_unlock_bh(&dev->dev_lock);
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_check_auto_sense(struct scst_cmd *cmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
++ (!SCST_SENSE_VALID(cmd->sense) ||
++ SCST_NO_SENSE(cmd->sense))) {
++ TRACE(TRACE_SCSI|TRACE_MINOR_AND_MGMT_DBG, "CHECK_CONDITION, "
++ "but no sense: cmd->status=%x, cmd->msg_status=%x, "
++ "cmd->host_status=%x, cmd->driver_status=%x (cmd %p)",
++ cmd->status, cmd->msg_status, cmd->host_status,
++ cmd->driver_status, cmd);
++ res = 1;
++ } else if (unlikely(cmd->host_status)) {
++ if ((cmd->host_status == DID_REQUEUE) ||
++ (cmd->host_status == DID_IMM_RETRY) ||
++ (cmd->host_status == DID_SOFT_ERROR) ||
++ (cmd->host_status == DID_ABORT)) {
++ scst_set_busy(cmd);
++ } else {
++ TRACE(TRACE_SCSI|TRACE_MINOR_AND_MGMT_DBG, "Host "
++ "status %x received, returning HARDWARE ERROR "
++ "instead (cmd %p)", cmd->host_status, cmd);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_pre_dev_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
++
++ TRACE_ENTRY();
++
++ if (unlikely(scst_check_auto_sense(cmd))) {
++ PRINT_INFO("Command finished with CHECK CONDITION, but "
++ "without sense data (opcode 0x%x), issuing "
++ "REQUEST SENSE", cmd->cdb[0]);
++ rc = scst_prepare_request_sense(cmd);
++ if (rc == 0)
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ else {
++ PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
++ "returning HARDWARE ERROR");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ }
++ goto out;
++ } else if (unlikely(scst_check_sense(cmd))) {
++ /*
++ * We can't allow atomic command on the exec stages, so
++ * restart to the thread
++ */
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ if (likely(scsi_status_is_good(cmd->status))) {
++ unsigned char type = cmd->dev->type;
++ if (unlikely((cmd->cdb[0] == MODE_SENSE ||
++ cmd->cdb[0] == MODE_SENSE_10)) &&
++ (cmd->tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
++ cmd->dev->rd_only) &&
++ (type == TYPE_DISK ||
++ type == TYPE_WORM ||
++ type == TYPE_MOD ||
++ type == TYPE_TAPE)) {
++ int32_t length;
++ uint8_t *address;
++ bool err = false;
++
++ length = scst_get_buf_full(cmd, &address);
++ if (length < 0) {
++ PRINT_ERROR("%s", "Unable to get "
++ "MODE_SENSE buffer");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(
++ scst_sense_hardw_error));
++ err = true;
++ } else if (length > 2 && cmd->cdb[0] == MODE_SENSE)
++ address[2] |= 0x80; /* Write Protect*/
++ else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
++ address[3] |= 0x80; /* Write Protect*/
++ scst_put_buf_full(cmd, address);
++
++ if (err)
++ goto out;
++ }
++
++ /*
++ * Check and clear NormACA option for the device, if necessary,
++ * since we don't support ACA
++ */
++ if (unlikely((cmd->cdb[0] == INQUIRY)) &&
++ /* Std INQUIRY data (no EVPD) */
++ !(cmd->cdb[1] & SCST_INQ_EVPD) &&
++ (cmd->resp_data_len > SCST_INQ_BYTE3)) {
++ uint8_t *buffer;
++ int buflen;
++ bool err = false;
++
++ buflen = scst_get_buf_full(cmd, &buffer);
++ if (buflen > SCST_INQ_BYTE3 && !cmd->tgtt->fake_aca) {
++#ifdef CONFIG_SCST_EXTRACHECKS
++ if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
++ PRINT_INFO("NormACA set for device: "
++ "lun=%lld, type 0x%02x. Clear it, "
++ "since it's unsupported.",
++ (long long unsigned int)cmd->lun,
++ buffer[0]);
++ }
++#endif
++ buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
++ } else if (buflen <= SCST_INQ_BYTE3 && buflen != 0) {
++ PRINT_ERROR("%s", "Unable to get INQUIRY "
++ "buffer");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ err = true;
++ }
++ if (buflen > 0)
++ scst_put_buf_full(cmd, buffer);
++
++ if (err)
++ goto out;
++ }
++
++ if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
++ (cmd->cdb[0] == MODE_SELECT_10) ||
++ (cmd->cdb[0] == LOG_SELECT))) {
++ TRACE(TRACE_SCSI,
++ "MODE/LOG SELECT succeeded (LUN %lld)",
++ (long long unsigned int)cmd->lun);
++ cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
++ goto out;
++ }
++ } else {
++ TRACE(TRACE_SCSI, "cmd %p not succeeded with status %x",
++ cmd, cmd->status);
++
++ if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
++ if (!test_bit(SCST_TGT_DEV_RESERVED,
++ &cmd->tgt_dev->tgt_dev_flags)) {
++ struct scst_tgt_dev *tgt_dev_tmp;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE(TRACE_SCSI, "RESERVE failed lun=%lld, "
++ "status=%x",
++ (long long unsigned int)cmd->lun,
++ cmd->status);
++ PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
++ cmd->sense_valid_len);
++
++ /* Clearing the reservation */
++ spin_lock_bh(&dev->dev_lock);
++ list_for_each_entry(tgt_dev_tmp,
++ &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ clear_bit(SCST_TGT_DEV_RESERVED,
++ &tgt_dev_tmp->tgt_dev_flags);
++ }
++ dev->dev_reserved = 0;
++ spin_unlock_bh(&dev->dev_lock);
++ }
++ }
++
++ /* Check for MODE PARAMETERS CHANGED UA */
++ if ((cmd->dev->scsi_dev != NULL) &&
++ (cmd->status == SAM_STAT_CHECK_CONDITION) &&
++ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
++ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASCx_VALID,
++ 0, 0x2a, 0x01)) {
++ TRACE(TRACE_SCSI, "MODE PARAMETERS CHANGED UA (lun "
++ "%lld)", (long long unsigned int)cmd->lun);
++ cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
++ goto out;
++ }
++ }
++
++ cmd->state = SCST_CMD_STATE_DEV_DONE;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_mode_select_checks(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_RES_CONT_SAME;
++
++ TRACE_ENTRY();
++
++ if (likely(scsi_status_is_good(cmd->status))) {
++ int atomic = scst_cmd_atomic(cmd);
++ if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
++ (cmd->cdb[0] == MODE_SELECT_10) ||
++ (cmd->cdb[0] == LOG_SELECT))) {
++ struct scst_device *dev = cmd->dev;
++ int sl;
++ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
++
++ if (atomic && (dev->scsi_dev != NULL)) {
++ TRACE_DBG("%s", "MODE/LOG SELECT: thread "
++ "context required");
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
++ "setting the SELECT UA (lun=%lld)",
++ (long long unsigned int)cmd->lun);
++
++ spin_lock_bh(&dev->dev_lock);
++ if (cmd->cdb[0] == LOG_SELECT) {
++ sl = scst_set_sense(sense_buffer,
++ sizeof(sense_buffer),
++ dev->d_sense,
++ UNIT_ATTENTION, 0x2a, 0x02);
++ } else {
++ sl = scst_set_sense(sense_buffer,
++ sizeof(sense_buffer),
++ dev->d_sense,
++ UNIT_ATTENTION, 0x2a, 0x01);
++ }
++ scst_dev_check_set_local_UA(dev, cmd, sense_buffer, sl);
++ spin_unlock_bh(&dev->dev_lock);
++
++ if (dev->scsi_dev != NULL)
++ scst_obtain_device_parameters(dev);
++ }
++ } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
++ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
++ /* mode parameters changed */
++ (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASCx_VALID,
++ 0, 0x2a, 0x01) ||
++ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASC_VALID,
++ 0, 0x29, 0) /* reset */ ||
++ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASC_VALID,
++ 0, 0x28, 0) /* medium changed */ ||
++ /* cleared by another ini (just in case) */
++ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
++ SCST_SENSE_ASC_VALID,
++ 0, 0x2F, 0))) {
++ int atomic = scst_cmd_atomic(cmd);
++ if (atomic) {
++ TRACE_DBG("Possible parameters changed UA %x: "
++ "thread context required", cmd->sense[12]);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
++ "(LUN %lld): getting new parameters", cmd->sense[12],
++ (long long unsigned int)cmd->lun);
++
++ scst_obtain_device_parameters(cmd->dev);
++ } else
++ BUG();
++
++ cmd->state = SCST_CMD_STATE_DEV_DONE;
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
++{
++ if (likely(cmd->sn_set))
++ scst_inc_expected_sn(cmd->cur_order_data, cmd->sn_slot);
++
++ scst_make_deferred_commands_active(cmd->cur_order_data);
++}
++
++static int scst_dev_done(struct scst_cmd *cmd)
++{
++ int res = SCST_CMD_STATE_RES_CONT_SAME;
++ int state;
++ struct scst_device *dev = cmd->dev;
++
++ TRACE_ENTRY();
++
++ state = SCST_CMD_STATE_PRE_XMIT_RESP;
++
++ if (likely(!scst_is_cmd_fully_local(cmd)) &&
++ likely(dev->handler->dev_done != NULL)) {
++ int rc;
++
++ if (unlikely(!dev->handler->dev_done_atomic &&
++ scst_cmd_atomic(cmd))) {
++ /*
++ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
++ * optimization.
++ */
++ TRACE_MGMT_DBG("Dev handler %s dev_done() needs thread "
++ "context, rescheduling", dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ TRACE_DBG("Calling dev handler %s dev_done(%p)",
++ dev->handler->name, cmd);
++ scst_set_cur_start(cmd);
++ rc = dev->handler->dev_done(cmd);
++ scst_set_dev_done_time(cmd);
++ TRACE_DBG("Dev handler %s dev_done() returned %d",
++ dev->handler->name, rc);
++ if (rc != SCST_CMD_STATE_DEFAULT)
++ state = rc;
++ }
++
++ switch (state) {
++#ifdef CONFIG_SCST_EXTRACHECKS
++ case SCST_CMD_STATE_PRE_XMIT_RESP:
++ case SCST_CMD_STATE_PARSE:
++ case SCST_CMD_STATE_PREPARE_SPACE:
++ case SCST_CMD_STATE_RDY_TO_XFER:
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_START_EXEC:
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXEC:
++ case SCST_CMD_STATE_PRE_DEV_DONE:
++ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
++ case SCST_CMD_STATE_DEV_DONE:
++ case SCST_CMD_STATE_XMIT_RESP:
++ case SCST_CMD_STATE_FINISHED:
++ case SCST_CMD_STATE_FINISHED_INTERNAL:
++#else
++ default:
++#endif
++ cmd->state = state;
++ break;
++ case SCST_CMD_STATE_NEED_THREAD_CTX:
++ TRACE_DBG("Dev handler %s dev_done() requested "
++ "thread context, rescheduling",
++ dev->handler->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++#ifdef CONFIG_SCST_EXTRACHECKS
++ default:
++ if (state >= 0) {
++ PRINT_ERROR("Dev handler %s dev_done() returned "
++ "invalid cmd state %d",
++ dev->handler->name, state);
++ } else {
++ PRINT_ERROR("Dev handler %s dev_done() returned "
++ "error %d", dev->handler->name,
++ state);
++ }
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ scst_set_cmd_abnormal_done_state(cmd);
++ break;
++#endif
++ }
++
++ scst_check_unblock_dev(cmd);
++
++ if (cmd->inc_expected_sn_on_done && cmd->sent_for_exec)
++ scst_inc_check_expected_sn(cmd);
++
++ if (unlikely(cmd->internal))
++ cmd->state = SCST_CMD_STATE_FINISHED_INTERNAL;
++
++#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
++ if (cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) {
++ /* We can't allow atomic command on the exec stages */
++ if (scst_cmd_atomic(cmd)) {
++ switch (state) {
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_START_EXEC:
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXEC:
++ TRACE_DBG("Atomic context and redirect, "
++ "rescheduling (cmd %p)", cmd);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ break;
++ }
++ }
++ }
++#endif
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++static int scst_pre_xmit_response(struct scst_cmd *cmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(cmd->internal);
++
++#ifdef CONFIG_SCST_DEBUG_TM
++ if (cmd->tm_dbg_delayed &&
++ !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
++ if (scst_cmd_atomic(cmd)) {
++ TRACE_MGMT_DBG("%s",
++ "DEBUG_TM delayed cmd needs a thread");
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ return res;
++ }
++ TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
++ cmd, cmd->tag);
++ schedule_timeout_uninterruptible(HZ);
++ }
++#endif
++
++ if (likely(cmd->tgt_dev != NULL)) {
++ /*
++ * Those counters protect from not getting too long processing
++ * latency, so we should decrement them after cmd completed.
++ */
++ atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
++#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
++ atomic_dec(&cmd->dev->dev_cmd_count);
++#endif
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ scst_on_hq_cmd_response(cmd);
++
++ if (unlikely(!cmd->sent_for_exec)) {
++ TRACE_SN("cmd %p was not sent to mid-lev"
++ " (sn %d, set %d)",
++ cmd, cmd->sn, cmd->sn_set);
++ scst_unblock_deferred(cmd->cur_order_data, cmd);
++ cmd->sent_for_exec = 1;
++ }
++ }
++
++ cmd->done = 1;
++ smp_mb(); /* to sync with scst_abort_cmd() */
++
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
++ scst_xmit_process_aborted_cmd(cmd);
++ else if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION))
++ scst_store_sense(cmd);
++
++ if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu), "
++ "skipping", cmd, (long long unsigned int)cmd->tag);
++ cmd->state = SCST_CMD_STATE_FINISHED;
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++ }
++
++ if (unlikely(cmd->resid_possible))
++ scst_adjust_resp_data_len(cmd);
++ else
++ cmd->adjusted_resp_data_len = cmd->resp_data_len;
++
++ cmd->state = SCST_CMD_STATE_XMIT_RESP;
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++static int scst_xmit_response(struct scst_cmd *cmd)
++{
++ struct scst_tgt_template *tgtt = cmd->tgtt;
++ int res, rc;
++
++ TRACE_ENTRY();
++
++ EXTRACHECKS_BUG_ON(cmd->internal);
++
++ if (unlikely(!tgtt->xmit_response_atomic &&
++ scst_cmd_atomic(cmd))) {
++ /*
++ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
++ * optimization.
++ */
++ TRACE_MGMT_DBG("Target driver %s xmit_response() needs thread "
++ "context, rescheduling", tgtt->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++
++ while (1) {
++ int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
++
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ cmd->state = SCST_CMD_STATE_XMIT_WAIT;
++
++ TRACE_DBG("Calling xmit_response(%p)", cmd);
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ if (trace_flag & TRACE_SND_BOT) {
++ int i, j;
++ struct scatterlist *sg;
++ if (cmd->tgt_sg != NULL)
++ sg = cmd->tgt_sg;
++ else
++ sg = cmd->sg;
++ if (sg != NULL) {
++ TRACE(TRACE_SND_BOT, "Xmitting data for cmd %p "
++ "(sg_cnt %d, sg %p, sg[0].page %p, buf %p, "
++ "resp len %d)", cmd, cmd->tgt_sg_cnt,
++ sg, (void *)sg_page(&sg[0]), sg_virt(sg),
++ cmd->resp_data_len);
++ for (i = 0, j = 0; i < cmd->tgt_sg_cnt; ++i, ++j) {
++ if (unlikely(sg_is_chain(&sg[j]))) {
++ sg = sg_chain_ptr(&sg[j]);
++ j = 0;
++ }
++ TRACE(TRACE_SND_BOT, "sg %d", j);
++ PRINT_BUFF_FLAG(TRACE_SND_BOT,
++ "Xmitting sg", sg_virt(&sg[j]),
++ sg[j].length);
++ }
++ }
++ }
++#endif
++
++ if (tgtt->on_hw_pending_cmd_timeout != NULL) {
++ struct scst_session *sess = cmd->sess;
++ cmd->hw_pending_start = jiffies;
++ cmd->cmd_hw_pending = 1;
++ if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
++ TRACE_DBG("Sched HW pending work for sess %p "
++ "(max time %d)", sess,
++ tgtt->max_hw_pending_time);
++ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
++ &sess->sess_aflags);
++ schedule_delayed_work(&sess->hw_pending_work,
++ tgtt->max_hw_pending_time * HZ);
++ }
++ }
++
++ scst_set_cur_start(cmd);
++
++#ifdef CONFIG_SCST_DEBUG_RETRY
++ if (((scst_random() % 100) == 77))
++ rc = SCST_TGT_RES_QUEUE_FULL;
++ else
++#endif
++ rc = tgtt->xmit_response(cmd);
++ TRACE_DBG("xmit_response() returned %d", rc);
++
++ if (likely(rc == SCST_TGT_RES_SUCCESS))
++ goto out;
++
++ scst_set_xmit_time(cmd);
++
++ cmd->cmd_hw_pending = 0;
++
++ /* Restore the previous state */
++ cmd->state = SCST_CMD_STATE_XMIT_RESP;
++
++ switch (rc) {
++ case SCST_TGT_RES_QUEUE_FULL:
++ if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
++ break;
++ else
++ continue;
++
++ case SCST_TGT_RES_NEED_THREAD_CTX:
++ TRACE_DBG("Target driver %s xmit_response() "
++ "requested thread context, rescheduling",
++ tgtt->name);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++
++ default:
++ goto out_error;
++ }
++ break;
++ }
++
++out:
++ /* Caution: cmd can be already dead here */
++ TRACE_EXIT_HRES(res);
++ return res;
++
++out_error:
++ if (rc == SCST_TGT_RES_FATAL_ERROR) {
++ PRINT_ERROR("Target driver %s xmit_response() returned "
++ "fatal error", tgtt->name);
++ } else {
++ PRINT_ERROR("Target driver %s xmit_response() returned "
++ "invalid value %d", tgtt->name, rc);
++ }
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ cmd->state = SCST_CMD_STATE_FINISHED;
++ res = SCST_CMD_STATE_RES_CONT_SAME;
++ goto out;
++}
++
++/**
++ * scst_tgt_cmd_done() - the command's processing done
++ * @cmd: SCST command
++ * @pref_context: preferred command execution context
++ *
++ * Description:
++ * Notifies SCST that the driver sent the response and the command
++ * can be freed now. Don't forget to set the delivery status, if it
++ * isn't success, using scst_set_delivery_status() before calling
++ * this function. The third argument sets preferred command execution
++ * context (see SCST_CONTEXT_* constants for details)
++ */
++void scst_tgt_cmd_done(struct scst_cmd *cmd,
++ enum scst_exec_context pref_context)
++{
++ TRACE_ENTRY();
++
++ BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
++
++ scst_set_xmit_time(cmd);
++
++ cmd->cmd_hw_pending = 0;
++
++ if (unlikely(cmd->tgt_dev == NULL))
++ pref_context = SCST_CONTEXT_THREAD;
++
++ cmd->state = SCST_CMD_STATE_FINISHED;
++
++ scst_process_redirect_cmd(cmd, pref_context, 1);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_tgt_cmd_done);
++
++static int scst_finish_cmd(struct scst_cmd *cmd)
++{
++ int res;
++ struct scst_session *sess = cmd->sess;
++ struct scst_io_stat_entry *stat;
++
++ TRACE_ENTRY();
++
++ scst_update_lat_stats(cmd);
++
++ if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
++ if ((cmd->tgt_dev != NULL) &&
++ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
++ /* This UA delivery failed, so we need to requeue it */
++ if (scst_cmd_atomic(cmd) &&
++ scst_is_ua_global(cmd->sense, cmd->sense_valid_len)) {
++ TRACE_MGMT_DBG("Requeuing of global UA for "
++ "failed cmd %p needs a thread", cmd);
++ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ goto out;
++ }
++ scst_requeue_ua(cmd);
++ }
++ }
++
++ atomic_dec(&sess->sess_cmd_count);
++
++ spin_lock_irq(&sess->sess_list_lock);
++
++ stat = &sess->io_stats[cmd->data_direction];
++ stat->cmd_count++;
++ stat->io_byte_count += cmd->bufflen + cmd->out_bufflen;
++
++ list_del(&cmd->sess_cmd_list_entry);
++
++ /*
++ * Done under sess_list_lock to sync with scst_abort_cmd() without
++ * using extra barrier.
++ */
++ cmd->finished = 1;
++
++ spin_unlock_irq(&sess->sess_list_lock);
++
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
++ TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d)",
++ cmd, atomic_read(&cmd->cmd_ref));
++
++ scst_finish_cmd_mgmt(cmd);
++ }
++
++ __scst_cmd_put(cmd);
++
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/*
++ * No locks, but it must be externally serialized (see comment for
++ * scst_cmd_init_done() in scst.h)
++ */
++static void scst_cmd_set_sn(struct scst_cmd *cmd)
++{
++ struct scst_order_data *order_data = cmd->cur_order_data;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ if (scst_is_implicit_hq_cmd(cmd) &&
++ likely(cmd->queue_type == SCST_CMD_QUEUE_SIMPLE)) {
++ TRACE_SN("Implicit HQ cmd %p", cmd);
++ cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
++ }
++
++ EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
++
++ /* Optimized for lockless fast path */
++
++ scst_check_debug_sn(cmd);
++
++#ifdef CONFIG_SCST_STRICT_SERIALIZING
++ cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
++#endif
++
++ if (cmd->dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
++ /*
++ * Not the best way, but good enough until there is a
++ * possibility to specify queue type during pass-through
++ * commands submission.
++ */
++ cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
++ }
++
++ switch (cmd->queue_type) {
++ case SCST_CMD_QUEUE_SIMPLE:
++ case SCST_CMD_QUEUE_UNTAGGED:
++ if (likely(order_data->num_free_sn_slots >= 0)) {
++ /*
++ * atomic_inc_return() implies memory barrier to sync
++ * with scst_inc_expected_sn()
++ */
++ if (atomic_inc_return(order_data->cur_sn_slot) == 1) {
++ order_data->curr_sn++;
++ TRACE_SN("Incremented curr_sn %d",
++ order_data->curr_sn);
++ }
++ cmd->sn_slot = order_data->cur_sn_slot;
++ cmd->sn = order_data->curr_sn;
++
++ order_data->prev_cmd_ordered = 0;
++ } else {
++ TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
++ "%zd", ARRAY_SIZE(order_data->sn_slots));
++ goto ordered;
++ }
++ break;
++
++ case SCST_CMD_QUEUE_ORDERED:
++ TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
++ordered:
++ if (!order_data->prev_cmd_ordered) {
++ spin_lock_irqsave(&order_data->sn_lock, flags);
++ if (order_data->num_free_sn_slots >= 0) {
++ order_data->num_free_sn_slots--;
++ if (order_data->num_free_sn_slots >= 0) {
++ int i = 0;
++ /* Commands can finish in any order, so
++ * we don't know which slot is empty.
++ */
++ while (1) {
++ order_data->cur_sn_slot++;
++ if (order_data->cur_sn_slot ==
++ order_data->sn_slots + ARRAY_SIZE(order_data->sn_slots))
++ order_data->cur_sn_slot = order_data->sn_slots;
++
++ if (atomic_read(order_data->cur_sn_slot) == 0)
++ break;
++
++ i++;
++ BUG_ON(i == ARRAY_SIZE(order_data->sn_slots));
++ }
++ TRACE_SN("New cur SN slot %zd",
++ order_data->cur_sn_slot -
++ order_data->sn_slots);
++ }
++ }
++ spin_unlock_irqrestore(&order_data->sn_lock, flags);
++ }
++ order_data->prev_cmd_ordered = 1;
++ order_data->curr_sn++;
++ cmd->sn = order_data->curr_sn;
++ break;
++
++ case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
++ TRACE_SN("HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
++ spin_lock_irqsave(&order_data->sn_lock, flags);
++ order_data->hq_cmd_count++;
++ spin_unlock_irqrestore(&order_data->sn_lock, flags);
++ cmd->hq_cmd_inced = 1;
++ goto out;
++
++ default:
++ BUG();
++ }
++
++ TRACE_SN("cmd(%p)->sn: %d (order_data %p, *cur_sn_slot %d, "
++ "num_free_sn_slots %d, prev_cmd_ordered %ld, "
++ "cur_sn_slot %zd)", cmd, cmd->sn, order_data,
++ atomic_read(order_data->cur_sn_slot),
++ order_data->num_free_sn_slots, order_data->prev_cmd_ordered,
++ order_data->cur_sn_slot - order_data->sn_slots);
++
++ cmd->sn_set = 1;
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * Returns 0 on success, > 0 when we need to wait for unblock,
++ * < 0 if there is no device (lun) or device type handler.
++ *
++ * No locks, but might be on IRQ, protection is done by the
++ * suspended activity.
++ */
++static int scst_translate_lun(struct scst_cmd *cmd)
++{
++ struct scst_tgt_dev *tgt_dev = NULL;
++ int res;
++
++ TRACE_ENTRY();
++
++ cmd->cpu_cmd_counter = scst_get();
++
++ if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
++ struct list_head *head =
++ &cmd->sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(cmd->lun)];
++ TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
++ (long long unsigned int)cmd->lun);
++ res = -1;
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
++ if (tgt_dev->lun == cmd->lun) {
++ TRACE_DBG("tgt_dev %p found", tgt_dev);
++
++ if (unlikely(tgt_dev->dev->handler ==
++ &scst_null_devtype)) {
++ PRINT_INFO("Dev handler for device "
++ "%lld is NULL, the device will not "
++ "be visible remotely",
++ (long long unsigned int)cmd->lun);
++ break;
++ }
++
++ cmd->cmd_threads = tgt_dev->active_cmd_threads;
++ cmd->tgt_dev = tgt_dev;
++ cmd->cur_order_data = tgt_dev->curr_order_data;
++ cmd->dev = tgt_dev->dev;
++
++ res = 0;
++ break;
++ }
++ }
++ if (res != 0) {
++ TRACE(TRACE_MINOR,
++ "tgt_dev for LUN %lld not found, command to "
++ "unexisting LU (initiator %s, target %s)?",
++ (long long unsigned int)cmd->lun,
++ cmd->sess->initiator_name, cmd->tgt->tgt_name);
++ scst_put(cmd->cpu_cmd_counter);
++ }
++ } else {
++ TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
++ scst_put(cmd->cpu_cmd_counter);
++ res = 1;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/*
++ * No locks, but might be on IRQ.
++ *
++ * Returns 0 on success, > 0 when we need to wait for unblock,
++ * < 0 if there is no device (lun) or device type handler.
++ */
++static int __scst_init_cmd(struct scst_cmd *cmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ res = scst_translate_lun(cmd);
++ if (likely(res == 0)) {
++ int cnt;
++ bool failure = false;
++
++ cmd->state = SCST_CMD_STATE_PARSE;
++
++ cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
++ if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
++ TRACE(TRACE_FLOW_CONTROL,
++ "Too many pending commands (%d) in "
++ "session, returning BUSY to initiator \"%s\"",
++ cnt, (cmd->sess->initiator_name[0] == '\0') ?
++ "Anonymous" : cmd->sess->initiator_name);
++ failure = true;
++ }
++
++#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
++ cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
++ if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
++ if (!failure) {
++ TRACE(TRACE_FLOW_CONTROL,
++ "Too many pending device "
++ "commands (%d), returning BUSY to "
++ "initiator \"%s\"", cnt,
++ (cmd->sess->initiator_name[0] == '\0') ?
++ "Anonymous" :
++ cmd->sess->initiator_name);
++ failure = true;
++ }
++ }
++#endif
++
++ if (unlikely(failure))
++ goto out_busy;
++
++ /*
++ * SCST_IMPLICIT_HQ for unknown commands not implemented for
++ * case when set_sn_on_restart_cmd not set, because custom parse
++ * can reorder commands due to multithreaded processing. To
++ * implement it we need to implement all unknown commands as
++ * ORDERED in the beginning and post parse reprocess of
++ * queue_type to change it if needed. ToDo.
++ */
++ scst_pre_parse(cmd);
++
++ if (!cmd->set_sn_on_restart_cmd)
++ scst_cmd_set_sn(cmd);
++ } else if (res < 0) {
++ TRACE_DBG("Finishing cmd %p", cmd);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_lun_not_supported));
++ scst_set_cmd_abnormal_done_state(cmd);
++ } else
++ goto out;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_busy:
++ scst_set_busy(cmd);
++ scst_set_cmd_abnormal_done_state(cmd);
++ goto out;
++}
++
++/* Called under scst_init_lock and IRQs disabled */
++static void scst_do_job_init(void)
++ __releases(&scst_init_lock)
++ __acquires(&scst_init_lock)
++{
++ struct scst_cmd *cmd;
++ int susp;
++
++ TRACE_ENTRY();
++
++restart:
++ /*
++ * There is no need for read barrier here, because we don't care where
++ * this check will be done.
++ */
++ susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
++ if (scst_init_poll_cnt > 0)
++ scst_init_poll_cnt--;
++
++ list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
++ int rc;
++ if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
++ continue;
++ if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
++ spin_unlock_irq(&scst_init_lock);
++ rc = __scst_init_cmd(cmd);
++ spin_lock_irq(&scst_init_lock);
++ if (rc > 0) {
++ TRACE_MGMT_DBG("%s",
++ "FLAG SUSPENDED set, restarting");
++ goto restart;
++ }
++ } else {
++ TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
++ cmd, (long long unsigned int)cmd->tag);
++ scst_set_cmd_abnormal_done_state(cmd);
++ }
++
++ /*
++ * Deleting cmd from init cmd list after __scst_init_cmd()
++ * is necessary to keep the check in scst_init_cmd() correct
++ * to preserve the commands order.
++ *
++ * We don't care about the race, when init cmd list is empty
++ * and one command detected that it just was not empty, so
++ * it's inserting to it, but another command at the same time
++ * seeing init cmd list empty and goes directly, because it
++ * could affect only commands from the same initiator to the
++ * same tgt_dev, but scst_cmd_init_done*() doesn't guarantee
++ * the order in case of simultaneous such calls anyway.
++ */
++ TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
++ smp_wmb(); /* enforce the required order */
++ list_del(&cmd->cmd_list_entry);
++ spin_unlock(&scst_init_lock);
++
++ spin_lock(&cmd->cmd_threads->cmd_list_lock);
++ TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
++ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
++ list_add(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ else
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
++
++ spin_lock(&scst_init_lock);
++ goto restart;
++ }
++
++ /* It isn't really needed, but let's keep it */
++ if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
++ goto restart;
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline int test_init_cmd_list(void)
++{
++ int res = (!list_empty(&scst_init_cmd_list) &&
++ !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
++ unlikely(kthread_should_stop()) ||
++ (scst_init_poll_cnt > 0);
++ return res;
++}
++
++int scst_init_thread(void *arg)
++{
++ TRACE_ENTRY();
++
++ PRINT_INFO("Init thread started, PID %d", current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ set_user_nice(current, -10);
++
++ spin_lock_irq(&scst_init_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_init_cmd_list()) {
++ add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
++ &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_init_cmd_list())
++ break;
++ spin_unlock_irq(&scst_init_lock);
++ schedule();
++ spin_lock_irq(&scst_init_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
++ }
++ scst_do_job_init();
++ }
++ spin_unlock_irq(&scst_init_lock);
++
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so scst_init_cmd_list must be empty.
++ */
++ BUG_ON(!list_empty(&scst_init_cmd_list));
++
++ PRINT_INFO("Init thread PID %d finished", current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++/**
++ * scst_process_active_cmd() - process active command
++ *
++ * Description:
++ * Main SCST commands processing routing. Must be used only by dev handlers.
++ *
++ * Argument atomic is true, if function called in atomic context.
++ *
++ * Must be called with no locks held.
++ */
++void scst_process_active_cmd(struct scst_cmd *cmd, bool atomic)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ /*
++ * Checkpatch will complain on the use of in_atomic() below. You
++ * can safely ignore this warning since in_atomic() is used here only
++ * for debugging purposes.
++ */
++ EXTRACHECKS_BUG_ON(in_irq() || irqs_disabled());
++ EXTRACHECKS_WARN_ON((in_atomic() || in_interrupt()) && !atomic);
++
++ cmd->atomic = atomic;
++
++ TRACE_DBG("cmd %p, atomic %d", cmd, atomic);
++
++ do {
++ switch (cmd->state) {
++ case SCST_CMD_STATE_PARSE:
++ res = scst_parse_cmd(cmd);
++ break;
++
++ case SCST_CMD_STATE_PREPARE_SPACE:
++ res = scst_prepare_space(cmd);
++ break;
++
++ case SCST_CMD_STATE_PREPROCESSING_DONE:
++ res = scst_preprocessing_done(cmd);
++ break;
++
++ case SCST_CMD_STATE_RDY_TO_XFER:
++ res = scst_rdy_to_xfer(cmd);
++ break;
++
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ res = scst_tgt_pre_exec(cmd);
++ break;
++
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ if (tm_dbg_check_cmd(cmd) != 0) {
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ TRACE_MGMT_DBG("Skipping cmd %p (tag %llu), "
++ "because of TM DBG delay", cmd,
++ (long long unsigned int)cmd->tag);
++ break;
++ }
++ res = scst_send_for_exec(&cmd);
++ EXTRACHECKS_BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
++ /*
++ * !! At this point cmd, sess & tgt_dev can already be
++ * freed !!
++ */
++ break;
++
++ case SCST_CMD_STATE_START_EXEC:
++ res = scst_exec(&cmd);
++ EXTRACHECKS_BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
++ /*
++ * !! At this point cmd, sess & tgt_dev can already be
++ * freed !!
++ */
++ break;
++
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ res = scst_local_exec(cmd);
++ EXTRACHECKS_BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
++ /*
++ * !! At this point cmd, sess & tgt_dev can already be
++ * freed !!
++ */
++ break;
++
++ case SCST_CMD_STATE_REAL_EXEC:
++ res = scst_real_exec(cmd);
++ EXTRACHECKS_BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
++ /*
++ * !! At this point cmd, sess & tgt_dev can already be
++ * freed !!
++ */
++ break;
++
++ case SCST_CMD_STATE_PRE_DEV_DONE:
++ res = scst_pre_dev_done(cmd);
++ EXTRACHECKS_BUG_ON((res == SCST_CMD_STATE_RES_NEED_THREAD) &&
++ (cmd->state == SCST_CMD_STATE_PRE_DEV_DONE));
++ break;
++
++ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
++ res = scst_mode_select_checks(cmd);
++ break;
++
++ case SCST_CMD_STATE_DEV_DONE:
++ res = scst_dev_done(cmd);
++ break;
++
++ case SCST_CMD_STATE_PRE_XMIT_RESP:
++ res = scst_pre_xmit_response(cmd);
++ EXTRACHECKS_BUG_ON(res ==
++ SCST_CMD_STATE_RES_NEED_THREAD);
++ break;
++
++ case SCST_CMD_STATE_XMIT_RESP:
++ res = scst_xmit_response(cmd);
++ break;
++
++ case SCST_CMD_STATE_FINISHED:
++ res = scst_finish_cmd(cmd);
++ break;
++
++ case SCST_CMD_STATE_FINISHED_INTERNAL:
++ res = scst_finish_internal_cmd(cmd);
++ EXTRACHECKS_BUG_ON(res ==
++ SCST_CMD_STATE_RES_NEED_THREAD);
++ break;
++
++ default:
++ PRINT_CRIT_ERROR("cmd (%p) in state %d, but shouldn't "
++ "be", cmd, cmd->state);
++ BUG();
++ res = SCST_CMD_STATE_RES_CONT_NEXT;
++ break;
++ }
++ } while (res == SCST_CMD_STATE_RES_CONT_SAME);
++
++ if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
++ /* None */
++ } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
++ spin_lock_irq(&cmd->cmd_threads->cmd_list_lock);
++#ifdef CONFIG_SCST_EXTRACHECKS
++ switch (cmd->state) {
++ case SCST_CMD_STATE_PARSE:
++ case SCST_CMD_STATE_PREPARE_SPACE:
++ case SCST_CMD_STATE_RDY_TO_XFER:
++ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_START_EXEC:
++ case SCST_CMD_STATE_LOCAL_EXEC:
++ case SCST_CMD_STATE_REAL_EXEC:
++ case SCST_CMD_STATE_DEV_DONE:
++ case SCST_CMD_STATE_XMIT_RESP:
++#endif
++ TRACE_DBG("Adding cmd %p to head of active cmd list",
++ cmd);
++ list_add(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++#ifdef CONFIG_SCST_EXTRACHECKS
++ break;
++ default:
++ PRINT_CRIT_ERROR("cmd %p is in invalid state %d)", cmd,
++ cmd->state);
++ spin_unlock_irq(&cmd->cmd_threads->cmd_list_lock);
++ BUG();
++ spin_lock_irq(&cmd->cmd_threads->cmd_list_lock);
++ break;
++ }
++#endif
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock_irq(&cmd->cmd_threads->cmd_list_lock);
++ } else
++ BUG();
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_process_active_cmd);
++
++/* Called under cmd_list_lock and IRQs disabled */
++static void scst_do_job_active(struct list_head *cmd_list,
++ spinlock_t *cmd_list_lock, bool atomic)
++ __releases(cmd_list_lock)
++ __acquires(cmd_list_lock)
++{
++ TRACE_ENTRY();
++
++ while (!list_empty(cmd_list)) {
++ struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
++ cmd_list_entry);
++ TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
++ list_del(&cmd->cmd_list_entry);
++ spin_unlock_irq(cmd_list_lock);
++ scst_process_active_cmd(cmd, atomic);
++ spin_lock_irq(cmd_list_lock);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static inline int test_cmd_threads(struct scst_cmd_threads *p_cmd_threads)
++{
++ int res = !list_empty(&p_cmd_threads->active_cmd_list) ||
++ unlikely(kthread_should_stop()) ||
++ tm_dbg_is_release();
++ return res;
++}
++
++int scst_cmd_thread(void *arg)
++{
++ struct scst_cmd_threads *p_cmd_threads = arg;
++
++ TRACE_ENTRY();
++
++ PRINT_INFO("Processing thread %s (PID %d) started", current->comm,
++ current->pid);
++
++#if 0
++ set_user_nice(current, 10);
++#endif
++ current->flags |= PF_NOFREEZE;
++
++ mutex_lock(&p_cmd_threads->io_context_mutex);
++
++ WARN_ON(current->io_context);
++
++ if (p_cmd_threads != &scst_main_cmd_threads) {
++ /*
++ * For linked IO contexts io_context might be not NULL while
++ * io_context 0.
++ */
++ if (p_cmd_threads->io_context == NULL) {
++ p_cmd_threads->io_context = get_io_context(GFP_KERNEL, -1);
++ TRACE_MGMT_DBG("Alloced new IO context %p "
++ "(p_cmd_threads %p)",
++ p_cmd_threads->io_context,
++ p_cmd_threads);
++ /*
++ * Put the extra reference created by get_io_context()
++ * because we don't need it.
++ */
++ put_io_context(p_cmd_threads->io_context);
++ } else {
++ current->io_context = ioc_task_link(p_cmd_threads->io_context);
++ TRACE_MGMT_DBG("Linked IO context %p "
++ "(p_cmd_threads %p)", p_cmd_threads->io_context,
++ p_cmd_threads);
++ }
++ p_cmd_threads->io_context_refcnt++;
++ }
++
++ mutex_unlock(&p_cmd_threads->io_context_mutex);
++
++ smp_wmb();
++ p_cmd_threads->io_context_ready = true;
++
++ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_cmd_threads(p_cmd_threads)) {
++ add_wait_queue_exclusive_head(
++ &p_cmd_threads->cmd_list_waitQ,
++ &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_cmd_threads(p_cmd_threads))
++ break;
++ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
++ schedule();
++ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&p_cmd_threads->cmd_list_waitQ, &wait);
++ }
++
++ if (tm_dbg_is_release()) {
++ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
++ tm_dbg_check_released_cmds();
++ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
++ }
++
++ scst_do_job_active(&p_cmd_threads->active_cmd_list,
++ &p_cmd_threads->cmd_list_lock, false);
++ }
++ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
++
++ if (p_cmd_threads != &scst_main_cmd_threads) {
++ mutex_lock(&p_cmd_threads->io_context_mutex);
++ if (--p_cmd_threads->io_context_refcnt == 0)
++ p_cmd_threads->io_context = NULL;
++ mutex_unlock(&p_cmd_threads->io_context_mutex);
++ }
++
++ PRINT_INFO("Processing thread %s (PID %d) finished", current->comm,
++ current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++void scst_cmd_tasklet(long p)
++{
++ struct scst_percpu_info *i = (struct scst_percpu_info *)p;
++
++ TRACE_ENTRY();
++
++ spin_lock_irq(&i->tasklet_lock);
++ scst_do_job_active(&i->tasklet_cmd_list, &i->tasklet_lock, true);
++ spin_unlock_irq(&i->tasklet_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++/*
++ * Returns 0 on success, < 0 if there is no device handler or
++ * > 0 if SCST_FLAG_SUSPENDED set and SCST_FLAG_SUSPENDING - not.
++ * No locks, protection is done by the suspended activity.
++ */
++static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
++{
++ struct scst_tgt_dev *tgt_dev = NULL;
++ struct list_head *head;
++ int res = -1;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %lld)", mcmd,
++ (long long unsigned int)mcmd->lun);
++
++ mcmd->cpu_cmd_counter = scst_get();
++
++ if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
++ !test_bit(SCST_FLAG_SUSPENDING, &scst_flags))) {
++ TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
++ scst_put(mcmd->cpu_cmd_counter);
++ res = 1;
++ goto out;
++ }
++
++ head = &mcmd->sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(mcmd->lun)];
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
++ if (tgt_dev->lun == mcmd->lun) {
++ TRACE_DBG("tgt_dev %p found", tgt_dev);
++ mcmd->mcmd_tgt_dev = tgt_dev;
++ res = 0;
++ break;
++ }
++ }
++ if (mcmd->mcmd_tgt_dev == NULL)
++ scst_put(mcmd->cpu_cmd_counter);
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* No locks */
++void scst_done_cmd_mgmt(struct scst_cmd *cmd)
++{
++ struct scst_mgmt_cmd_stub *mstb, *t;
++ bool wake = 0;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("cmd %p done (tag %llu)",
++ cmd, (long long unsigned int)cmd->tag);
++
++ spin_lock_irqsave(&scst_mcmd_lock, flags);
++
++ list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
++ cmd_mgmt_cmd_list_entry) {
++ struct scst_mgmt_cmd *mcmd;
++
++ if (!mstb->done_counted)
++ continue;
++
++ mcmd = mstb->mcmd;
++ TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_done_wait_count %d",
++ mcmd, mcmd->cmd_done_wait_count);
++
++ mcmd->cmd_done_wait_count--;
++
++ BUG_ON(mcmd->cmd_done_wait_count < 0);
++
++ if (mcmd->cmd_done_wait_count > 0) {
++ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
++ "skipping", mcmd->cmd_done_wait_count);
++ goto check_free;
++ }
++
++ if (mcmd->state == SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_DONE) {
++ mcmd->state = SCST_MCMD_STATE_AFFECTED_CMDS_DONE;
++ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
++ "list", mcmd);
++ list_add_tail(&mcmd->mgmt_cmd_list_entry,
++ &scst_active_mgmt_cmd_list);
++ wake = 1;
++ }
++
++check_free:
++ if (!mstb->finish_counted) {
++ TRACE_DBG("Releasing mstb %p", mstb);
++ list_del(&mstb->cmd_mgmt_cmd_list_entry);
++ mempool_free(mstb, scst_mgmt_stub_mempool);
++ }
++ }
++
++ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
++
++ if (wake)
++ wake_up(&scst_mgmt_cmd_list_waitQ);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called under scst_mcmd_lock and IRQs disabled */
++static void __scst_dec_finish_wait_count(struct scst_mgmt_cmd *mcmd, bool *wake)
++{
++ TRACE_ENTRY();
++
++ mcmd->cmd_finish_wait_count--;
++
++ BUG_ON(mcmd->cmd_finish_wait_count < 0);
++
++ if (mcmd->cmd_finish_wait_count > 0) {
++ TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, "
++ "skipping", mcmd->cmd_finish_wait_count);
++ goto out;
++ }
++
++ if (mcmd->cmd_done_wait_count > 0) {
++ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
++ "skipping", mcmd->cmd_done_wait_count);
++ goto out;
++ }
++
++ if (mcmd->state == SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_FINISHED) {
++ mcmd->state = SCST_MCMD_STATE_DONE;
++ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
++ "list", mcmd);
++ list_add_tail(&mcmd->mgmt_cmd_list_entry,
++ &scst_active_mgmt_cmd_list);
++ *wake = true;
++ }
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/**
++ * scst_prepare_async_mcmd() - prepare async management command
++ *
++ * Notifies SCST that management command is going to be async, i.e.
++ * will be completed in another context.
++ *
++ * No SCST locks supposed to be held on entrance.
++ */
++void scst_prepare_async_mcmd(struct scst_mgmt_cmd *mcmd)
++{
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Preparing mcmd %p for async execution "
++ "(cmd_finish_wait_count %d)", mcmd,
++ mcmd->cmd_finish_wait_count);
++
++ spin_lock_irqsave(&scst_mcmd_lock, flags);
++ mcmd->cmd_finish_wait_count++;
++ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_prepare_async_mcmd);
++
++/**
++ * scst_async_mcmd_completed() - async management command completed
++ *
++ * Notifies SCST that async management command, prepared by
++ * scst_prepare_async_mcmd(), completed.
++ *
++ * No SCST locks supposed to be held on entrance.
++ */
++void scst_async_mcmd_completed(struct scst_mgmt_cmd *mcmd, int status)
++{
++ unsigned long flags;
++ bool wake = false;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Async mcmd %p completed (status %d)", mcmd, status);
++
++ spin_lock_irqsave(&scst_mcmd_lock, flags);
++
++ if (status != SCST_MGMT_STATUS_SUCCESS)
++ mcmd->status = status;
++
++ __scst_dec_finish_wait_count(mcmd, &wake);
++
++ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
++
++ if (wake)
++ wake_up(&scst_mgmt_cmd_list_waitQ);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_async_mcmd_completed);
++
++/* No locks */
++static void scst_finish_cmd_mgmt(struct scst_cmd *cmd)
++{
++ struct scst_mgmt_cmd_stub *mstb, *t;
++ bool wake = false;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("cmd %p finished (tag %llu)",
++ cmd, (long long unsigned int)cmd->tag);
++
++ spin_lock_irqsave(&scst_mcmd_lock, flags);
++
++ list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
++ cmd_mgmt_cmd_list_entry) {
++ struct scst_mgmt_cmd *mcmd = mstb->mcmd;
++
++ TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_finish_wait_count %d", mcmd,
++ mcmd->cmd_finish_wait_count);
++
++ BUG_ON(!mstb->finish_counted);
++
++ if (cmd->completed)
++ mcmd->completed_cmd_count++;
++
++ __scst_dec_finish_wait_count(mcmd, &wake);
++
++ TRACE_DBG("Releasing mstb %p", mstb);
++ list_del(&mstb->cmd_mgmt_cmd_list_entry);
++ mempool_free(mstb, scst_mgmt_stub_mempool);
++ }
++
++ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
++
++ if (wake)
++ wake_up(&scst_mgmt_cmd_list_waitQ);
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
++ struct scst_tgt_dev *tgt_dev, int set_status)
++{
++ int res = SCST_DEV_TM_NOT_COMPLETED;
++ struct scst_dev_type *h = tgt_dev->dev->handler;
++
++ if (h->task_mgmt_fn) {
++ TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
++ h->name, mcmd->fn);
++ res = h->task_mgmt_fn(mcmd, tgt_dev);
++ TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
++ h->name, res);
++ if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED))
++ mcmd->status = res;
++ }
++ return res;
++}
++
++static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
++{
++ switch (mgmt_fn) {
++#ifdef CONFIG_SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
++ case SCST_ABORT_TASK:
++#endif
++#if 0
++ case SCST_ABORT_TASK_SET:
++ case SCST_CLEAR_TASK_SET:
++#endif
++ return 1;
++ default:
++ return 0;
++ }
++}
++
++/*
++ * Must be called under sess_list_lock to sync with finished flag assignment in
++ * scst_finish_cmd()
++ */
++void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
++ bool other_ini, bool call_dev_task_mgmt_fn)
++{
++ unsigned long flags;
++ static DEFINE_SPINLOCK(other_ini_lock);
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_SCSI|TRACE_MGMT_DEBUG, "Aborting cmd %p (tag %llu, op %x)",
++ cmd, (long long unsigned int)cmd->tag, cmd->cdb[0]);
++
++ /* To protect from concurrent aborts */
++ spin_lock_irqsave(&other_ini_lock, flags);
++
++ if (other_ini) {
++ struct scst_device *dev = NULL;
++
++ /* Might be necessary if command aborted several times */
++ if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
++ set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
++
++ /* Necessary for scst_xmit_process_aborted_cmd */
++ if (cmd->dev != NULL)
++ dev = cmd->dev;
++ else if ((mcmd != NULL) && (mcmd->mcmd_tgt_dev != NULL))
++ dev = mcmd->mcmd_tgt_dev->dev;
++
++ if (dev != NULL) {
++ if (dev->tas)
++ set_bit(SCST_CMD_DEVICE_TAS, &cmd->cmd_flags);
++ } else
++ PRINT_WARNING("Abort cmd %p from other initiator, but "
++ "neither cmd, nor mcmd %p have tgt_dev set, so "
++ "TAS information can be lost", cmd, mcmd);
++ } else {
++ /* Might be necessary if command aborted several times */
++ clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
++ }
++
++ set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
++
++ spin_unlock_irqrestore(&other_ini_lock, flags);
++
++ /*
++ * To sync with setting cmd->done in scst_pre_xmit_response() (with
++ * scst_finish_cmd() we synced by using sess_list_lock) and with
++ * setting UA for aborted cmd in scst_set_pending_UA().
++ */
++ smp_mb__after_set_bit();
++
++ if (cmd->tgt_dev == NULL) {
++ spin_lock_irqsave(&scst_init_lock, flags);
++ scst_init_poll_cnt++;
++ spin_unlock_irqrestore(&scst_init_lock, flags);
++ wake_up(&scst_init_cmd_list_waitQ);
++ }
++
++ if (!cmd->finished && call_dev_task_mgmt_fn && (cmd->tgt_dev != NULL))
++ scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 1);
++
++ spin_lock_irqsave(&scst_mcmd_lock, flags);
++ if ((mcmd != NULL) && !cmd->finished) {
++ struct scst_mgmt_cmd_stub *mstb;
++
++ mstb = mempool_alloc(scst_mgmt_stub_mempool, GFP_ATOMIC);
++ if (mstb == NULL) {
++ PRINT_CRIT_ERROR("Allocation of management command "
++ "stub failed (mcmd %p, cmd %p)", mcmd, cmd);
++ goto unlock;
++ }
++ memset(mstb, 0, sizeof(*mstb));
++
++ TRACE_DBG("mstb %p, mcmd %p", mstb, mcmd);
++
++ mstb->mcmd = mcmd;
++
++ /*
++ * Delay the response until the command's finish in order to
++ * guarantee that "no further responses from the task are sent
++ * to the SCSI initiator port" after response from the TM
++ * function is sent (SAM). Plus, we must wait here to be sure
++ * that we won't receive double commands with the same tag.
++ * Moreover, if we don't wait here, we might have a possibility
++ * for data corruption, when aborted and reported as completed
++ * command actually gets executed *after* new commands sent
++ * after this TM command completed.
++ */
++
++ if (cmd->sent_for_exec && !cmd->done) {
++ TRACE_MGMT_DBG("cmd %p (tag %llu) is being executed",
++ cmd, (long long unsigned int)cmd->tag);
++ mstb->done_counted = 1;
++ mcmd->cmd_done_wait_count++;
++ }
++
++ /*
++ * We don't have to wait the command's status delivery finish
++ * to other initiators + it can affect MPIO failover.
++ */
++ if (!other_ini) {
++ mstb->finish_counted = 1;
++ mcmd->cmd_finish_wait_count++;
++ }
++
++ if (mstb->done_counted || mstb->finish_counted) {
++ TRACE_MGMT_DBG("cmd %p (tag %llu, sn %u) being "
++ "executed/xmitted (state %d, op %x, proc time "
++ "%ld sec., timeout %d sec.), deferring ABORT "
++ "(cmd_done_wait_count %d, cmd_finish_wait_count "
++ "%d)", cmd, (long long unsigned int)cmd->tag,
++ cmd->sn, cmd->state, cmd->cdb[0],
++ (long)(jiffies - cmd->start_time) / HZ,
++ cmd->timeout / HZ, mcmd->cmd_done_wait_count,
++ mcmd->cmd_finish_wait_count);
++ /*
++ * cmd can't die here or sess_list_lock already taken
++ * and cmd is in the sess list
++ */
++ list_add_tail(&mstb->cmd_mgmt_cmd_list_entry,
++ &cmd->mgmt_cmd_list);
++ } else {
++ /* We don't need to wait for this cmd */
++ mempool_free(mstb, scst_mgmt_stub_mempool);
++ }
++
++ if (cmd->tgtt->on_abort_cmd)
++ cmd->tgtt->on_abort_cmd(cmd);
++ }
++
++unlock:
++ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
++
++ tm_dbg_release_cmd(cmd);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* No locks. Returns 0, if mcmd should be processed further. */
++static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
++{
++ int res;
++
++ spin_lock_irq(&scst_mcmd_lock);
++
++ switch (mcmd->state) {
++ case SCST_MCMD_STATE_INIT:
++ case SCST_MCMD_STATE_EXEC:
++ if (mcmd->cmd_done_wait_count == 0) {
++ mcmd->state = SCST_MCMD_STATE_AFFECTED_CMDS_DONE;
++ res = 0;
++ } else {
++ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
++ "preparing to wait", mcmd->cmd_done_wait_count);
++ mcmd->state = SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_DONE;
++ res = -1;
++ }
++ break;
++
++ case SCST_MCMD_STATE_AFFECTED_CMDS_DONE:
++ if (mcmd->cmd_finish_wait_count == 0) {
++ mcmd->state = SCST_MCMD_STATE_DONE;
++ res = 0;
++ } else {
++ TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, "
++ "preparing to wait",
++ mcmd->cmd_finish_wait_count);
++ mcmd->state = SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_FINISHED;
++ res = -1;
++ }
++ break;
++
++ case SCST_MCMD_STATE_DONE:
++ mcmd->state = SCST_MCMD_STATE_FINISHED;
++ res = 0;
++ break;
++
++ default:
++ PRINT_CRIT_ERROR("Wrong mcmd %p state %d (fn %d, "
++ "cmd_finish_wait_count %d, cmd_done_wait_count %d)",
++ mcmd, mcmd->state, mcmd->fn,
++ mcmd->cmd_finish_wait_count, mcmd->cmd_done_wait_count);
++ spin_unlock_irq(&scst_mcmd_lock);
++ res = -1;
++ BUG();
++ goto out;
++ }
++
++ spin_unlock_irq(&scst_mcmd_lock);
++
++out:
++ return res;
++}
++
++/* IRQs supposed to be disabled */
++static bool __scst_check_unblock_aborted_cmd(struct scst_cmd *cmd,
++ struct list_head *list_entry)
++{
++ bool res;
++ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
++ list_del(list_entry);
++ spin_lock(&cmd->cmd_threads->cmd_list_lock);
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
++ res = 1;
++ } else
++ res = 0;
++ return res;
++}
++
++static void scst_unblock_aborted_cmds(int scst_mutex_held)
++{
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ if (!scst_mutex_held)
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ struct scst_cmd *cmd, *tcmd;
++ struct scst_tgt_dev *tgt_dev;
++ spin_lock_bh(&dev->dev_lock);
++ local_irq_disable();
++ list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
++ blocked_cmd_list_entry) {
++ if (__scst_check_unblock_aborted_cmd(cmd,
++ &cmd->blocked_cmd_list_entry)) {
++ TRACE_MGMT_DBG("Unblock aborted blocked cmd %p",
++ cmd);
++ }
++ }
++ local_irq_enable();
++ spin_unlock_bh(&dev->dev_lock);
++
++ local_irq_disable();
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ struct scst_order_data *order_data = tgt_dev->curr_order_data;
++ spin_lock(&order_data->sn_lock);
++ list_for_each_entry_safe(cmd, tcmd,
++ &order_data->deferred_cmd_list,
++ sn_cmd_list_entry) {
++ if (__scst_check_unblock_aborted_cmd(cmd,
++ &cmd->sn_cmd_list_entry)) {
++ TRACE_MGMT_DBG("Unblocked aborted SN "
++ "cmd %p (sn %u)",
++ cmd, cmd->sn);
++ order_data->def_cmd_count--;
++ }
++ }
++ spin_unlock(&order_data->sn_lock);
++ }
++ local_irq_enable();
++ }
++
++ if (!scst_mutex_held)
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT();
++ return;
++}
++
++static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
++ struct scst_tgt_dev *tgt_dev)
++{
++ struct scst_cmd *cmd;
++ struct scst_session *sess = tgt_dev->sess;
++ bool other_ini;
++
++ TRACE_ENTRY();
++
++ if ((mcmd->fn == SCST_PR_ABORT_ALL) &&
++ (mcmd->origin_pr_cmd->sess != sess))
++ other_ini = true;
++ else
++ other_ini = false;
++
++ spin_lock_irq(&sess->sess_list_lock);
++
++ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
++ list_for_each_entry(cmd, &sess->sess_cmd_list,
++ sess_cmd_list_entry) {
++ if ((mcmd->fn == SCST_PR_ABORT_ALL) &&
++ (mcmd->origin_pr_cmd == cmd))
++ continue;
++ if ((cmd->tgt_dev == tgt_dev) ||
++ ((cmd->tgt_dev == NULL) &&
++ (cmd->lun == tgt_dev->lun))) {
++ if (mcmd->cmd_sn_set) {
++ BUG_ON(!cmd->tgt_sn_set);
++ if (scst_sn_before(mcmd->cmd_sn, cmd->tgt_sn) ||
++ (mcmd->cmd_sn == cmd->tgt_sn))
++ continue;
++ }
++ scst_abort_cmd(cmd, mcmd, other_ini, 0);
++ }
++ }
++ spin_unlock_irq(&sess->sess_list_lock);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
++{
++ int res;
++ struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
++
++ TRACE(TRACE_MGMT, "Aborting task set (lun=%lld, mcmd=%p)",
++ (long long unsigned int)tgt_dev->lun, mcmd);
++
++ __scst_abort_task_set(mcmd, tgt_dev);
++
++ if (mcmd->fn == SCST_PR_ABORT_ALL) {
++ struct scst_pr_abort_all_pending_mgmt_cmds_counter *pr_cnt =
++ mcmd->origin_pr_cmd->pr_abort_counter;
++ if (atomic_dec_and_test(&pr_cnt->pr_aborting_cnt))
++ complete_all(&pr_cnt->pr_aborting_cmpl);
++ }
++
++ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "ABORT TASK SET/PR ABORT", 0);
++
++ scst_unblock_aborted_cmds(0);
++
++ scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_is_cmd_belongs_to_dev(struct scst_cmd *cmd,
++ struct scst_device *dev)
++{
++ struct scst_tgt_dev *tgt_dev = NULL;
++ struct list_head *head;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("Finding match for dev %s and cmd %p (lun %lld)", dev->virt_name,
++ cmd, (long long unsigned int)cmd->lun);
++
++ head = &cmd->sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(cmd->lun)];
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
++ if (tgt_dev->lun == cmd->lun) {
++ TRACE_DBG("dev %s found", tgt_dev->dev->virt_name);
++ res = (tgt_dev->dev == dev);
++ goto out;
++ }
++ }
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_clear_task_set(struct scst_mgmt_cmd *mcmd)
++{
++ int res;
++ struct scst_device *dev = mcmd->mcmd_tgt_dev->dev;
++ struct scst_tgt_dev *tgt_dev;
++ LIST_HEAD(UA_tgt_devs);
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_MGMT, "Clearing task set (lun=%lld, mcmd=%p)",
++ (long long unsigned int)mcmd->lun, mcmd);
++
++#if 0 /* we are SAM-3 */
++ /*
++ * When a logical unit is aborting one or more tasks from a SCSI
++ * initiator port with the TASK ABORTED status it should complete all
++ * of those tasks before entering additional tasks from that SCSI
++ * initiator port into the task set - SAM2
++ */
++ mcmd->needs_unblocking = 1;
++ spin_lock_bh(&dev->dev_lock);
++ scst_block_dev(dev);
++ spin_unlock_bh(&dev->dev_lock);
++#endif
++
++ __scst_abort_task_set(mcmd, mcmd->mcmd_tgt_dev);
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ struct scst_session *sess = tgt_dev->sess;
++ struct scst_cmd *cmd;
++ int aborted = 0;
++
++ if (tgt_dev == mcmd->mcmd_tgt_dev)
++ continue;
++
++ spin_lock_irq(&sess->sess_list_lock);
++
++ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
++ list_for_each_entry(cmd, &sess->sess_cmd_list,
++ sess_cmd_list_entry) {
++ if ((cmd->dev == dev) ||
++ ((cmd->dev == NULL) &&
++ scst_is_cmd_belongs_to_dev(cmd, dev))) {
++ scst_abort_cmd(cmd, mcmd, 1, 0);
++ aborted = 1;
++ }
++ }
++ spin_unlock_irq(&sess->sess_list_lock);
++
++ if (aborted)
++ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
++ &UA_tgt_devs);
++ }
++
++ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "CLEAR TASK SET", 0);
++
++ scst_unblock_aborted_cmds(1);
++
++ mutex_unlock(&scst_mutex);
++
++ if (!dev->tas) {
++ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
++ int sl;
++
++ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
++ dev->d_sense,
++ SCST_LOAD_SENSE(scst_sense_cleared_by_another_ini_UA));
++
++ list_for_each_entry(tgt_dev, &UA_tgt_devs,
++ extra_tgt_dev_list_entry) {
++ scst_check_set_UA(tgt_dev, sense_buffer, sl, 0);
++ }
++ }
++
++ scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 0);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Returns 0 if the command processing should be continued,
++ * >0, if it should be requeued, <0 otherwise */
++static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
++{
++ int res = 0, rc;
++
++ TRACE_ENTRY();
++
++ switch (mcmd->fn) {
++ case SCST_ABORT_TASK:
++ {
++ struct scst_session *sess = mcmd->sess;
++ struct scst_cmd *cmd;
++
++ spin_lock_irq(&sess->sess_list_lock);
++ cmd = __scst_find_cmd_by_tag(sess, mcmd->tag, true);
++ if (cmd == NULL) {
++ TRACE_MGMT_DBG("ABORT TASK: command "
++ "for tag %llu not found",
++ (long long unsigned int)mcmd->tag);
++ mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
++ spin_unlock_irq(&sess->sess_list_lock);
++ res = scst_set_mcmd_next_state(mcmd);
++ goto out;
++ }
++ __scst_cmd_get(cmd);
++ spin_unlock_irq(&sess->sess_list_lock);
++ TRACE_DBG("Cmd to abort %p for tag %llu found",
++ cmd, (long long unsigned int)mcmd->tag);
++ mcmd->cmd_to_abort = cmd;
++ mcmd->state = SCST_MCMD_STATE_EXEC;
++ break;
++ }
++
++ case SCST_TARGET_RESET:
++ case SCST_NEXUS_LOSS_SESS:
++ case SCST_ABORT_ALL_TASKS_SESS:
++ case SCST_NEXUS_LOSS:
++ case SCST_ABORT_ALL_TASKS:
++ case SCST_UNREG_SESS_TM:
++ mcmd->state = SCST_MCMD_STATE_EXEC;
++ break;
++
++ case SCST_ABORT_TASK_SET:
++ case SCST_CLEAR_ACA:
++ case SCST_CLEAR_TASK_SET:
++ case SCST_LUN_RESET:
++ case SCST_PR_ABORT_ALL:
++ rc = scst_mgmt_translate_lun(mcmd);
++ if (rc == 0)
++ mcmd->state = SCST_MCMD_STATE_EXEC;
++ else if (rc < 0) {
++ PRINT_ERROR("Corresponding device for LUN %lld not "
++ "found", (long long unsigned int)mcmd->lun);
++ mcmd->status = SCST_MGMT_STATUS_LUN_NOT_EXIST;
++ res = scst_set_mcmd_next_state(mcmd);
++ } else
++ res = rc;
++ break;
++
++ default:
++ BUG();
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
++{
++ int res, rc;
++ struct scst_device *dev;
++ struct scst_acg *acg = mcmd->sess->acg;
++ struct scst_acg_dev *acg_dev;
++ int cont, c;
++ LIST_HEAD(host_devs);
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
++ mcmd, atomic_read(&mcmd->sess->sess_cmd_count));
++
++ mcmd->needs_unblocking = 1;
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
++ struct scst_device *d;
++ struct scst_tgt_dev *tgt_dev;
++ int found = 0;
++
++ dev = acg_dev->dev;
++
++ spin_lock_bh(&dev->dev_lock);
++ scst_block_dev(dev);
++ scst_process_reset(dev, mcmd->sess, NULL, mcmd, true);
++ spin_unlock_bh(&dev->dev_lock);
++
++ cont = 0;
++ c = 0;
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ cont = 1;
++ if (mcmd->sess == tgt_dev->sess) {
++ rc = scst_call_dev_task_mgmt_fn(mcmd,
++ tgt_dev, 0);
++ if (rc == SCST_DEV_TM_NOT_COMPLETED)
++ c = 1;
++ else if ((rc < 0) &&
++ (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
++ mcmd->status = rc;
++ break;
++ }
++ }
++ if (cont && !c)
++ continue;
++
++ if (dev->scsi_dev == NULL)
++ continue;
++
++ list_for_each_entry(d, &host_devs, tm_dev_list_entry) {
++ if (dev->scsi_dev->host->host_no ==
++ d->scsi_dev->host->host_no) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found)
++ list_add_tail(&dev->tm_dev_list_entry, &host_devs);
++
++ tm_dbg_task_mgmt(dev, "TARGET RESET", 0);
++ }
++
++ scst_unblock_aborted_cmds(1);
++
++ /*
++ * We suppose here that for all commands that already on devices
++ * on/after scsi_reset_provider() completion callbacks will be called.
++ */
++
++ list_for_each_entry(dev, &host_devs, tm_dev_list_entry) {
++ /* dev->scsi_dev must be non-NULL here */
++ TRACE(TRACE_MGMT, "Resetting host %d bus ",
++ dev->scsi_dev->host->host_no);
++ rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_TARGET);
++ TRACE(TRACE_MGMT, "Result of host %d target reset: %s",
++ dev->scsi_dev->host->host_no,
++ (rc == SUCCESS) ? "SUCCESS" : "FAILED");
++#if 0
++ if ((rc != SUCCESS) &&
++ (mcmd->status == SCST_MGMT_STATUS_SUCCESS)) {
++ /*
++ * SCSI_TRY_RESET_BUS is also done by
++ * scsi_reset_provider()
++ */
++ mcmd->status = SCST_MGMT_STATUS_FAILED;
++ }
++#else
++ /*
++ * scsi_reset_provider() returns very weird status, so let's
++ * always succeed
++ */
++#endif
++ }
++
++ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
++ dev = acg_dev->dev;
++ if (dev->scsi_dev != NULL)
++ dev->scsi_dev->was_reset = 0;
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
++{
++ int res, rc;
++ struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
++ struct scst_device *dev = tgt_dev->dev;
++
++ TRACE_ENTRY();
++
++ TRACE(TRACE_MGMT, "Resetting LUN %lld (mcmd %p)",
++ (long long unsigned int)tgt_dev->lun, mcmd);
++
++ mcmd->needs_unblocking = 1;
++
++ spin_lock_bh(&dev->dev_lock);
++ scst_block_dev(dev);
++ scst_process_reset(dev, mcmd->sess, NULL, mcmd, true);
++ spin_unlock_bh(&dev->dev_lock);
++
++ rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
++ if (rc != SCST_DEV_TM_NOT_COMPLETED)
++ goto out_tm_dbg;
++
++ if (dev->scsi_dev != NULL) {
++ TRACE(TRACE_MGMT, "Resetting host %d bus ",
++ dev->scsi_dev->host->host_no);
++ rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
++#if 0
++ if (rc != SUCCESS && mcmd->status == SCST_MGMT_STATUS_SUCCESS)
++ mcmd->status = SCST_MGMT_STATUS_FAILED;
++#else
++ /*
++ * scsi_reset_provider() returns very weird status, so let's
++ * always succeed
++ */
++#endif
++ dev->scsi_dev->was_reset = 0;
++ }
++
++ scst_unblock_aborted_cmds(0);
++
++out_tm_dbg:
++ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "LUN RESET", 0);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_mutex supposed to be held */
++static void scst_do_nexus_loss_sess(struct scst_mgmt_cmd *mcmd)
++{
++ int i;
++ struct scst_session *sess = mcmd->sess;
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
++ scst_nexus_loss(tgt_dev,
++ (mcmd->fn != SCST_UNREG_SESS_TM));
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
++ int nexus_loss)
++{
++ int res;
++ int i;
++ struct scst_session *sess = mcmd->sess;
++ struct scst_tgt_dev *tgt_dev;
++
++ TRACE_ENTRY();
++
++ if (nexus_loss) {
++ TRACE_MGMT_DBG("Nexus loss for sess %p (mcmd %p)",
++ sess, mcmd);
++ } else {
++ TRACE_MGMT_DBG("Aborting all from sess %p (mcmd %p)",
++ sess, mcmd);
++ }
++
++ mutex_lock(&scst_mutex);
++
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
++ int rc;
++
++ __scst_abort_task_set(mcmd, tgt_dev);
++
++ rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
++ if (rc < 0 && mcmd->status == SCST_MGMT_STATUS_SUCCESS)
++ mcmd->status = rc;
++
++ tm_dbg_task_mgmt(tgt_dev->dev, "NEXUS LOSS SESS or "
++ "ABORT ALL SESS or UNREG SESS",
++ (mcmd->fn == SCST_UNREG_SESS_TM));
++ }
++ }
++
++ scst_unblock_aborted_cmds(1);
++
++ mutex_unlock(&scst_mutex);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* scst_mutex supposed to be held */
++static void scst_do_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd)
++{
++ int i;
++ struct scst_tgt *tgt = mcmd->sess->tgt;
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, head,
++ sess_tgt_dev_list_entry) {
++ scst_nexus_loss(tgt_dev, true);
++ }
++ }
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
++ int nexus_loss)
++{
++ int res;
++ int i;
++ struct scst_tgt *tgt = mcmd->sess->tgt;
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ if (nexus_loss) {
++ TRACE_MGMT_DBG("I_T Nexus loss (tgt %p, mcmd %p)",
++ tgt, mcmd);
++ } else {
++ TRACE_MGMT_DBG("Aborting all from tgt %p (mcmd %p)",
++ tgt, mcmd);
++ }
++
++ mutex_lock(&scst_mutex);
++
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, head,
++ sess_tgt_dev_list_entry) {
++ int rc;
++
++ __scst_abort_task_set(mcmd, tgt_dev);
++
++ if (mcmd->sess == tgt_dev->sess) {
++ rc = scst_call_dev_task_mgmt_fn(
++ mcmd, tgt_dev, 0);
++ if ((rc < 0) &&
++ (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
++ mcmd->status = rc;
++ }
++
++ tm_dbg_task_mgmt(tgt_dev->dev, "NEXUS LOSS or "
++ "ABORT ALL", 0);
++ }
++ }
++ }
++
++ scst_unblock_aborted_cmds(1);
++
++ mutex_unlock(&scst_mutex);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_abort_task(struct scst_mgmt_cmd *mcmd)
++{
++ int res;
++ struct scst_cmd *cmd = mcmd->cmd_to_abort;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Aborting task (cmd %p, sn %d, set %d, tag %llu, "
++ "queue_type %x)", cmd, cmd->sn, cmd->sn_set,
++ (long long unsigned int)mcmd->tag, cmd->queue_type);
++
++ if (mcmd->lun_set && (mcmd->lun != cmd->lun)) {
++ PRINT_ERROR("ABORT TASK: LUN mismatch: mcmd LUN %llx, "
++ "cmd LUN %llx, cmd tag %llu",
++ (long long unsigned int)mcmd->lun,
++ (long long unsigned int)cmd->lun,
++ (long long unsigned int)mcmd->tag);
++ mcmd->status = SCST_MGMT_STATUS_REJECTED;
++ } else if (mcmd->cmd_sn_set &&
++ (scst_sn_before(mcmd->cmd_sn, cmd->tgt_sn) ||
++ (mcmd->cmd_sn == cmd->tgt_sn))) {
++ PRINT_ERROR("ABORT TASK: SN mismatch: mcmd SN %x, "
++ "cmd SN %x, cmd tag %llu", mcmd->cmd_sn,
++ cmd->tgt_sn, (long long unsigned int)mcmd->tag);
++ mcmd->status = SCST_MGMT_STATUS_REJECTED;
++ } else {
++ spin_lock_irq(&cmd->sess->sess_list_lock);
++ scst_abort_cmd(cmd, mcmd, 0, 1);
++ spin_unlock_irq(&cmd->sess->sess_list_lock);
++
++ scst_unblock_aborted_cmds(0);
++ }
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ mcmd->cmd_to_abort = NULL; /* just in case */
++
++ __scst_cmd_put(cmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Returns 0 if the command processing should be continued, <0 otherwise */
++static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ mcmd->status = SCST_MGMT_STATUS_SUCCESS;
++
++ switch (mcmd->fn) {
++ case SCST_ABORT_TASK:
++ res = scst_abort_task(mcmd);
++ break;
++
++ case SCST_ABORT_TASK_SET:
++ case SCST_PR_ABORT_ALL:
++ res = scst_abort_task_set(mcmd);
++ break;
++
++ case SCST_CLEAR_TASK_SET:
++ if (mcmd->mcmd_tgt_dev->dev->tst ==
++ SCST_CONTR_MODE_SEP_TASK_SETS)
++ res = scst_abort_task_set(mcmd);
++ else
++ res = scst_clear_task_set(mcmd);
++ break;
++
++ case SCST_LUN_RESET:
++ res = scst_lun_reset(mcmd);
++ break;
++
++ case SCST_TARGET_RESET:
++ res = scst_target_reset(mcmd);
++ break;
++
++ case SCST_ABORT_ALL_TASKS_SESS:
++ res = scst_abort_all_nexus_loss_sess(mcmd, 0);
++ break;
++
++ case SCST_NEXUS_LOSS_SESS:
++ case SCST_UNREG_SESS_TM:
++ res = scst_abort_all_nexus_loss_sess(mcmd, 1);
++ break;
++
++ case SCST_ABORT_ALL_TASKS:
++ res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
++ break;
++
++ case SCST_NEXUS_LOSS:
++ res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
++ break;
++
++ case SCST_CLEAR_ACA:
++ if (scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1) ==
++ SCST_DEV_TM_NOT_COMPLETED) {
++ mcmd->status = SCST_MGMT_STATUS_FN_NOT_SUPPORTED;
++ /* Nothing to do (yet) */
++ }
++ goto out_done;
++
++ default:
++ PRINT_ERROR("Unknown task management function %d", mcmd->fn);
++ mcmd->status = SCST_MGMT_STATUS_REJECTED;
++ goto out_done;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_done:
++ res = scst_set_mcmd_next_state(mcmd);
++ goto out;
++}
++
++static void scst_call_task_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
++{
++ struct scst_session *sess = mcmd->sess;
++
++ if ((sess->tgt->tgtt->task_mgmt_affected_cmds_done != NULL) &&
++ (mcmd->fn != SCST_UNREG_SESS_TM) &&
++ (mcmd->fn != SCST_PR_ABORT_ALL)) {
++ TRACE_DBG("Calling target %s task_mgmt_affected_cmds_done(%p)",
++ sess->tgt->tgtt->name, sess);
++ sess->tgt->tgtt->task_mgmt_affected_cmds_done(mcmd);
++ TRACE_MGMT_DBG("Target's %s task_mgmt_affected_cmds_done() "
++ "returned", sess->tgt->tgtt->name);
++ }
++ return;
++}
++
++static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ switch (mcmd->fn) {
++ case SCST_NEXUS_LOSS_SESS:
++ case SCST_UNREG_SESS_TM:
++ scst_do_nexus_loss_sess(mcmd);
++ break;
++
++ case SCST_NEXUS_LOSS:
++ scst_do_nexus_loss_tgt(mcmd);
++ break;
++ }
++
++ mutex_unlock(&scst_mutex);
++
++ scst_call_task_mgmt_affected_cmds_done(mcmd);
++
++ res = scst_set_mcmd_next_state(mcmd);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
++{
++ struct scst_device *dev;
++ struct scst_session *sess = mcmd->sess;
++
++ TRACE_ENTRY();
++
++ mcmd->state = SCST_MCMD_STATE_FINISHED;
++ if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
++ mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
++
++ if (mcmd->fn < SCST_UNREG_SESS_TM)
++ TRACE(TRACE_MGMT, "TM fn %d finished, "
++ "status %d", mcmd->fn, mcmd->status);
++ else
++ TRACE_MGMT_DBG("TM fn %d finished, "
++ "status %d", mcmd->fn, mcmd->status);
++
++ if (mcmd->fn == SCST_PR_ABORT_ALL) {
++ mcmd->origin_pr_cmd->scst_cmd_done(mcmd->origin_pr_cmd,
++ SCST_CMD_STATE_DEFAULT,
++ SCST_CONTEXT_THREAD);
++ } else if ((sess->tgt->tgtt->task_mgmt_fn_done != NULL) &&
++ (mcmd->fn != SCST_UNREG_SESS_TM)) {
++ TRACE_DBG("Calling target %s task_mgmt_fn_done(%p)",
++ sess->tgt->tgtt->name, sess);
++ sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
++ TRACE_MGMT_DBG("Target's %s task_mgmt_fn_done() "
++ "returned", sess->tgt->tgtt->name);
++ }
++
++ if (mcmd->needs_unblocking) {
++ switch (mcmd->fn) {
++ case SCST_LUN_RESET:
++ case SCST_CLEAR_TASK_SET:
++ dev = mcmd->mcmd_tgt_dev->dev;
++ spin_lock_bh(&dev->dev_lock);
++ scst_unblock_dev(dev);
++ spin_unlock_bh(&dev->dev_lock);
++ break;
++
++ case SCST_TARGET_RESET:
++ {
++ struct scst_acg *acg = mcmd->sess->acg;
++ struct scst_acg_dev *acg_dev;
++
++ mutex_lock(&scst_mutex);
++ list_for_each_entry(acg_dev, &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ dev = acg_dev->dev;
++ spin_lock_bh(&dev->dev_lock);
++ scst_unblock_dev(dev);
++ spin_unlock_bh(&dev->dev_lock);
++ }
++ mutex_unlock(&scst_mutex);
++ break;
++ }
++
++ default:
++ BUG();
++ break;
++ }
++ }
++
++ mcmd->tgt_priv = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Returns >0, if cmd should be requeued */
++static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /*
++ * We are in the TM thread and mcmd->state guaranteed to not be
++ * changed behind us.
++ */
++
++ TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
++
++ while (1) {
++ switch (mcmd->state) {
++ case SCST_MCMD_STATE_INIT:
++ res = scst_mgmt_cmd_init(mcmd);
++ if (res != 0)
++ goto out;
++ break;
++
++ case SCST_MCMD_STATE_EXEC:
++ if (scst_mgmt_cmd_exec(mcmd))
++ goto out;
++ break;
++
++ case SCST_MCMD_STATE_AFFECTED_CMDS_DONE:
++ if (scst_mgmt_affected_cmds_done(mcmd))
++ goto out;
++ break;
++
++ case SCST_MCMD_STATE_DONE:
++ scst_mgmt_cmd_send_done(mcmd);
++ break;
++
++ case SCST_MCMD_STATE_FINISHED:
++ scst_free_mgmt_cmd(mcmd);
++ /* mcmd is dead */
++ goto out;
++
++ default:
++ PRINT_CRIT_ERROR("Wrong mcmd %p state %d (fn %d, "
++ "cmd_finish_wait_count %d, cmd_done_wait_count "
++ "%d)", mcmd, mcmd->state, mcmd->fn,
++ mcmd->cmd_finish_wait_count,
++ mcmd->cmd_done_wait_count);
++ BUG();
++ res = -1;
++ goto out;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static inline int test_mgmt_cmd_list(void)
++{
++ int res = !list_empty(&scst_active_mgmt_cmd_list) ||
++ unlikely(kthread_should_stop());
++ return res;
++}
++
++int scst_tm_thread(void *arg)
++{
++ TRACE_ENTRY();
++
++ PRINT_INFO("Task management thread started, PID %d", current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ set_user_nice(current, -10);
++
++ spin_lock_irq(&scst_mcmd_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_mgmt_cmd_list()) {
++ add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
++ &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_mgmt_cmd_list())
++ break;
++ spin_unlock_irq(&scst_mcmd_lock);
++ schedule();
++ spin_lock_irq(&scst_mcmd_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
++ }
++
++ while (!list_empty(&scst_active_mgmt_cmd_list)) {
++ int rc;
++ struct scst_mgmt_cmd *mcmd;
++ mcmd = list_entry(scst_active_mgmt_cmd_list.next,
++ typeof(*mcmd), mgmt_cmd_list_entry);
++ TRACE_MGMT_DBG("Deleting mgmt cmd %p from active cmd "
++ "list", mcmd);
++ list_del(&mcmd->mgmt_cmd_list_entry);
++ spin_unlock_irq(&scst_mcmd_lock);
++ rc = scst_process_mgmt_cmd(mcmd);
++ spin_lock_irq(&scst_mcmd_lock);
++ if (rc > 0) {
++ if (test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
++ !test_bit(SCST_FLAG_SUSPENDING,
++ &scst_flags)) {
++ TRACE_MGMT_DBG("Adding mgmt cmd %p to "
++ "head of delayed mgmt cmd list",
++ mcmd);
++ list_add(&mcmd->mgmt_cmd_list_entry,
++ &scst_delayed_mgmt_cmd_list);
++ } else {
++ TRACE_MGMT_DBG("Adding mgmt cmd %p to "
++ "head of active mgmt cmd list",
++ mcmd);
++ list_add(&mcmd->mgmt_cmd_list_entry,
++ &scst_active_mgmt_cmd_list);
++ }
++ }
++ }
++ }
++ spin_unlock_irq(&scst_mcmd_lock);
++
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so scst_active_mgmt_cmd_list must be empty.
++ */
++ BUG_ON(!list_empty(&scst_active_mgmt_cmd_list));
++
++ PRINT_INFO("Task management thread PID %d finished", current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
++ *sess, int fn, int atomic, void *tgt_priv)
++{
++ struct scst_mgmt_cmd *mcmd = NULL;
++
++ TRACE_ENTRY();
++
++ if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
++ PRINT_ERROR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
++ "(target %s)", sess->tgt->tgtt->name);
++ goto out;
++ }
++
++ mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
++ if (mcmd == NULL) {
++ PRINT_CRIT_ERROR("Lost TM fn %d, initiator %s", fn,
++ sess->initiator_name);
++ goto out;
++ }
++
++ mcmd->sess = sess;
++ mcmd->fn = fn;
++ mcmd->state = SCST_MCMD_STATE_INIT;
++ mcmd->tgt_priv = tgt_priv;
++
++ if (fn == SCST_PR_ABORT_ALL) {
++ atomic_inc(&mcmd->origin_pr_cmd->pr_abort_counter->pr_abort_pending_cnt);
++ atomic_inc(&mcmd->origin_pr_cmd->pr_abort_counter->pr_aborting_cnt);
++ }
++
++out:
++ TRACE_EXIT();
++ return mcmd;
++}
++
++static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
++ struct scst_mgmt_cmd *mcmd)
++{
++ unsigned long flags;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ scst_sess_get(sess);
++
++ if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
++ PRINT_CRIT_ERROR("New mgmt cmd while shutting down the "
++ "session %p shut_phase %ld", sess, sess->shut_phase);
++ BUG();
++ }
++
++ local_irq_save(flags);
++
++ spin_lock(&sess->sess_list_lock);
++ atomic_inc(&sess->sess_cmd_count);
++
++ if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
++ switch (sess->init_phase) {
++ case SCST_SESS_IPH_INITING:
++ TRACE_DBG("Adding mcmd %p to init deferred mcmd list",
++ mcmd);
++ list_add_tail(&mcmd->mgmt_cmd_list_entry,
++ &sess->init_deferred_mcmd_list);
++ goto out_unlock;
++ case SCST_SESS_IPH_SUCCESS:
++ break;
++ case SCST_SESS_IPH_FAILED:
++ res = -1;
++ goto out_unlock;
++ default:
++ BUG();
++ }
++ }
++
++ spin_unlock(&sess->sess_list_lock);
++
++ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
++ spin_lock(&scst_mcmd_lock);
++ list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
++ spin_unlock(&scst_mcmd_lock);
++
++ local_irq_restore(flags);
++
++ wake_up(&scst_mgmt_cmd_list_waitQ);
++
++out:
++ TRACE_EXIT();
++ return res;
++
++out_unlock:
++ spin_unlock(&sess->sess_list_lock);
++ local_irq_restore(flags);
++ goto out;
++}
++
++/**
++ * scst_rx_mgmt_fn() - create new management command and send it for execution
++ *
++ * Description:
++ * Creates new management command and sends it for execution.
++ *
++ * Returns 0 for success, error code otherwise.
++ *
++ * Must not be called in parallel with scst_unregister_session() for the
++ * same sess.
++ */
++int scst_rx_mgmt_fn(struct scst_session *sess,
++ const struct scst_rx_mgmt_params *params)
++{
++ int res = -EFAULT;
++ struct scst_mgmt_cmd *mcmd = NULL;
++
++ TRACE_ENTRY();
++
++ switch (params->fn) {
++ case SCST_ABORT_TASK:
++ BUG_ON(!params->tag_set);
++ break;
++ case SCST_TARGET_RESET:
++ case SCST_ABORT_ALL_TASKS:
++ case SCST_NEXUS_LOSS:
++ break;
++ default:
++ BUG_ON(!params->lun_set);
++ }
++
++ mcmd = scst_pre_rx_mgmt_cmd(sess, params->fn, params->atomic,
++ params->tgt_priv);
++ if (mcmd == NULL)
++ goto out;
++
++ if (params->lun_set) {
++ mcmd->lun = scst_unpack_lun(params->lun, params->lun_len);
++ if (mcmd->lun == NO_SUCH_LUN)
++ goto out_free;
++ mcmd->lun_set = 1;
++ }
++
++ if (params->tag_set)
++ mcmd->tag = params->tag;
++
++ mcmd->cmd_sn_set = params->cmd_sn_set;
++ mcmd->cmd_sn = params->cmd_sn;
++
++ if (params->fn < SCST_UNREG_SESS_TM)
++ TRACE(TRACE_MGMT, "TM fn %d", params->fn);
++ else
++ TRACE_MGMT_DBG("TM fn %d", params->fn);
++
++ TRACE_MGMT_DBG("sess=%p, tag_set %d, tag %lld, lun_set %d, "
++ "lun=%lld, cmd_sn_set %d, cmd_sn %d, priv %p", sess,
++ params->tag_set,
++ (long long unsigned int)params->tag,
++ params->lun_set,
++ (long long unsigned int)mcmd->lun,
++ params->cmd_sn_set,
++ params->cmd_sn,
++ params->tgt_priv);
++
++ if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
++ goto out_free;
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ scst_free_mgmt_cmd(mcmd);
++ mcmd = NULL;
++ goto out;
++}
++EXPORT_SYMBOL(scst_rx_mgmt_fn);
++
++/*
++ * Written by Jack Handy - jakkhandy@hotmail.com
++ * Taken by Gennadiy Nerubayev <parakie@gmail.com> from
++ * http://www.codeproject.com/KB/string/wildcmp.aspx. No license attached
++ * to it, and it's posted on a free site; assumed to be free for use.
++ *
++ * Added the negative sign support - VLNB
++ *
++ * Also see comment for wildcmp().
++ *
++ * User space part of iSCSI-SCST also has a copy of this code, so fixing a bug
++ * here, don't forget to fix the copy too!
++ */
++static bool __wildcmp(const char *wild, const char *string, int recursion_level)
++{
++ const char *cp = NULL, *mp = NULL;
++
++ while ((*string) && (*wild != '*')) {
++ if ((*wild == '!') && (recursion_level == 0))
++ return !__wildcmp(++wild, string, ++recursion_level);
++
++ if ((*wild != *string) && (*wild != '?'))
++ return false;
++
++ wild++;
++ string++;
++ }
++
++ while (*string) {
++ if ((*wild == '!') && (recursion_level == 0))
++ return !__wildcmp(++wild, string, ++recursion_level);
++
++ if (*wild == '*') {
++ if (!*++wild)
++ return true;
++
++ mp = wild;
++ cp = string+1;
++ } else if ((*wild == *string) || (*wild == '?')) {
++ wild++;
++ string++;
++ } else {
++ wild = mp;
++ string = cp++;
++ }
++ }
++
++ while (*wild == '*')
++ wild++;
++
++ return !*wild;
++}
+
-+obj-$(CONFIG_SCST) += scst.o dev_handlers/ iscsi-scst/ qla2xxx-target/ \
-+ srpt/ scst_local/
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/scst_lib.c
---- orig/linux-2.6.36/drivers/scst/scst_lib.c
-+++ linux-2.6.36/drivers/scst/scst_lib.c
-@@ -0,0 +1,7362 @@
++/*
++ * Returns true if string "string" matches pattern "wild", false otherwise.
++ * Pattern is a regular DOS-type pattern, containing '*' and '?' symbols.
++ * '*' means match all any symbols, '?' means match only any single symbol.
++ *
++ * For instance:
++ * if (wildcmp("bl?h.*", "blah.jpg")) {
++ * // match
++ * } else {
++ * // no match
++ * }
++ *
++ * Also it supports boolean inversion sign '!', which does boolean inversion of
++ * the value of the rest of the string. Only one '!' allowed in the pattern,
++ * other '!' are treated as regular symbols. For instance:
++ * if (wildcmp("bl!?h.*", "blah.jpg")) {
++ * // no match
++ * } else {
++ * // match
++ * }
++ *
++ * Also see comment for __wildcmp().
++ */
++static bool wildcmp(const char *wild, const char *string)
++{
++ return __wildcmp(wild, string, 0);
++}
++
++/* scst_mutex supposed to be held */
++static struct scst_acg *scst_find_tgt_acg_by_name_wild(struct scst_tgt *tgt,
++ const char *initiator_name)
++{
++ struct scst_acg *acg, *res = NULL;
++ struct scst_acn *n;
++
++ TRACE_ENTRY();
++
++ if (initiator_name == NULL)
++ goto out;
++
++ list_for_each_entry(acg, &tgt->tgt_acg_list, acg_list_entry) {
++ list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
++ if (wildcmp(n->name, initiator_name)) {
++ TRACE_DBG("Access control group %s found",
++ acg->acg_name);
++ res = acg;
++ goto out;
++ }
++ }
++ }
++
++out:
++ TRACE_EXIT_HRES(res);
++ return res;
++}
++
++/* Must be called under scst_mutex */
++static struct scst_acg *__scst_find_acg(struct scst_tgt *tgt,
++ const char *initiator_name)
++{
++ struct scst_acg *acg = NULL;
++
++ TRACE_ENTRY();
++
++ acg = scst_find_tgt_acg_by_name_wild(tgt, initiator_name);
++ if (acg == NULL)
++ acg = tgt->default_acg;
++
++ TRACE_EXIT_HRES((unsigned long)acg);
++ return acg;
++}
++
++/* Must be called under scst_mutex */
++struct scst_acg *scst_find_acg(const struct scst_session *sess)
++{
++ return __scst_find_acg(sess->tgt, sess->initiator_name);
++}
++
++/**
++ * scst_initiator_has_luns() - check if this initiator will see any LUNs
++ *
++ * Checks if this initiator will see any LUNs upon connect to this target.
++ * Returns true if yes and false otherwise.
++ */
++bool scst_initiator_has_luns(struct scst_tgt *tgt, const char *initiator_name)
++{
++ bool res;
++ struct scst_acg *acg;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ acg = __scst_find_acg(tgt, initiator_name);
++
++ res = !list_empty(&acg->acg_dev_list);
++
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_initiator_has_luns);
++
++static int scst_init_session(struct scst_session *sess)
++{
++ int res = 0;
++ struct scst_cmd *cmd;
++ struct scst_mgmt_cmd *mcmd, *tm;
++ int mwake = 0;
++
++ TRACE_ENTRY();
++
++ mutex_lock(&scst_mutex);
++
++ sess->acg = scst_find_acg(sess);
++
++ PRINT_INFO("Using security group \"%s\" for initiator \"%s\"",
++ sess->acg->acg_name, sess->initiator_name);
++
++ list_add_tail(&sess->acg_sess_list_entry, &sess->acg->acg_sess_list);
++
++ TRACE_DBG("Adding sess %p to tgt->sess_list", sess);
++ list_add_tail(&sess->sess_list_entry, &sess->tgt->sess_list);
++
++ if (sess->tgt->tgtt->get_initiator_port_transport_id != NULL) {
++ res = sess->tgt->tgtt->get_initiator_port_transport_id(
++ sess->tgt, sess, &sess->transport_id);
++ if (res != 0) {
++ PRINT_ERROR("Unable to make initiator %s port "
++ "transport id", sess->initiator_name);
++ goto failed;
++ }
++ TRACE_PR("sess %p (ini %s), transport id %s/%d", sess,
++ sess->initiator_name,
++ debug_transport_id_to_initiator_name(
++ sess->transport_id), sess->tgt->rel_tgt_id);
++ }
++
++ res = scst_sess_sysfs_create(sess);
++ if (res != 0)
++ goto failed;
++
++ /*
++ * scst_sess_alloc_tgt_devs() must be called after session added in the
++ * sess_list to not race with scst_check_reassign_sess()!
++ */
++ res = scst_sess_alloc_tgt_devs(sess);
++
++failed:
++ mutex_unlock(&scst_mutex);
++
++ if (sess->init_result_fn) {
++ TRACE_DBG("Calling init_result_fn(%p)", sess);
++ sess->init_result_fn(sess, sess->reg_sess_data, res);
++ TRACE_DBG("%s", "init_result_fn() returned");
++ }
++
++ spin_lock_irq(&sess->sess_list_lock);
++
++ if (res == 0)
++ sess->init_phase = SCST_SESS_IPH_SUCCESS;
++ else
++ sess->init_phase = SCST_SESS_IPH_FAILED;
++
++restart:
++ list_for_each_entry(cmd, &sess->init_deferred_cmd_list,
++ cmd_list_entry) {
++ TRACE_DBG("Deleting cmd %p from init deferred cmd list", cmd);
++ list_del(&cmd->cmd_list_entry);
++ atomic_dec(&sess->sess_cmd_count);
++ spin_unlock_irq(&sess->sess_list_lock);
++ scst_cmd_init_done(cmd, SCST_CONTEXT_THREAD);
++ spin_lock_irq(&sess->sess_list_lock);
++ goto restart;
++ }
++
++ spin_lock(&scst_mcmd_lock);
++ list_for_each_entry_safe(mcmd, tm, &sess->init_deferred_mcmd_list,
++ mgmt_cmd_list_entry) {
++ TRACE_DBG("Moving mgmt command %p from init deferred mcmd list",
++ mcmd);
++ list_move_tail(&mcmd->mgmt_cmd_list_entry,
++ &scst_active_mgmt_cmd_list);
++ mwake = 1;
++ }
++
++ spin_unlock(&scst_mcmd_lock);
++ /*
++ * In case of an error at this point the caller target driver supposed
++ * to already call this sess's unregistration.
++ */
++ sess->init_phase = SCST_SESS_IPH_READY;
++ spin_unlock_irq(&sess->sess_list_lock);
++
++ if (mwake)
++ wake_up(&scst_mgmt_cmd_list_waitQ);
++
++ scst_sess_put(sess);
++
++ TRACE_EXIT();
++ return res;
++}
++
++/**
++ * scst_register_session() - register session
++ * @tgt: target
++ * @atomic: true, if the function called in the atomic context. If false,
++ * this function will block until the session registration is
++ * completed.
++ * @initiator_name: remote initiator's name, any NULL-terminated string,
++ * e.g. iSCSI name, which used as the key to found appropriate
++ * access control group. Could be NULL, then the default
++ * target's LUNs are used.
++ * @tgt_priv: pointer to target driver's private data
++ * @result_fn_data: any target driver supplied data
++ * @result_fn: pointer to the function that will be asynchronously called
++ * when session initialization finishes.
++ * Can be NULL. Parameters:
++ * - sess - session
++ * - data - target driver supplied to scst_register_session()
++ * data
++ * - result - session initialization result, 0 on success or
++ * appropriate error code otherwise
++ *
++ * Description:
++ * Registers new session. Returns new session on success or NULL otherwise.
++ *
++ * Note: A session creation and initialization is a complex task,
++ * which requires sleeping state, so it can't be fully done
++ * in interrupt context. Therefore the "bottom half" of it, if
++ * scst_register_session() is called from atomic context, will be
++ * done in SCST thread context. In this case scst_register_session()
++ * will return not completely initialized session, but the target
++ * driver can supply commands to this session via scst_rx_cmd().
++ * Those commands processing will be delayed inside SCST until
++ * the session initialization is finished, then their processing
++ * will be restarted. The target driver will be notified about
++ * finish of the session initialization by function result_fn().
++ * On success the target driver could do nothing, but if the
++ * initialization fails, the target driver must ensure that
++ * no more new commands being sent or will be sent to SCST after
++ * result_fn() returns. All already sent to SCST commands for
++ * failed session will be returned in xmit_response() with BUSY status.
++ * In case of failure the driver shall call scst_unregister_session()
++ * inside result_fn(), it will NOT be called automatically.
++ */
++struct scst_session *scst_register_session(struct scst_tgt *tgt, int atomic,
++ const char *initiator_name, void *tgt_priv, void *result_fn_data,
++ void (*result_fn) (struct scst_session *sess, void *data, int result))
++{
++ struct scst_session *sess;
++ int res;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ sess = scst_alloc_session(tgt, atomic ? GFP_ATOMIC : GFP_KERNEL,
++ initiator_name);
++ if (sess == NULL)
++ goto out;
++
++ scst_sess_set_tgt_priv(sess, tgt_priv);
++
++ scst_sess_get(sess); /* one for registered session */
++ scst_sess_get(sess); /* one held until sess is inited */
++
++ if (atomic) {
++ sess->reg_sess_data = result_fn_data;
++ sess->init_result_fn = result_fn;
++ spin_lock_irqsave(&scst_mgmt_lock, flags);
++ TRACE_DBG("Adding sess %p to scst_sess_init_list", sess);
++ list_add_tail(&sess->sess_init_list_entry,
++ &scst_sess_init_list);
++ spin_unlock_irqrestore(&scst_mgmt_lock, flags);
++ wake_up(&scst_mgmt_waitQ);
++ } else {
++ res = scst_init_session(sess);
++ if (res != 0)
++ goto out_free;
++ }
++
++out:
++ TRACE_EXIT();
++ return sess;
++
++out_free:
++ scst_free_session(sess);
++ sess = NULL;
++ goto out;
++}
++EXPORT_SYMBOL_GPL(scst_register_session);
++
++/**
++ * scst_register_session_non_gpl() - register session (non-GPL version)
++ * @tgt: target
++ * @initiator_name: remote initiator's name, any NULL-terminated string,
++ * e.g. iSCSI name, which used as the key to found appropriate
++ * access control group. Could be NULL, then the default
++ * target's LUNs are used.
++ * @tgt_priv: pointer to target driver's private data
++ *
++ * Description:
++ * Registers new session. Returns new session on success or NULL otherwise.
++ */
++struct scst_session *scst_register_session_non_gpl(struct scst_tgt *tgt,
++ const char *initiator_name, void *tgt_priv)
++{
++ return scst_register_session(tgt, 0, initiator_name, tgt_priv,
++ NULL, NULL);
++}
++EXPORT_SYMBOL(scst_register_session_non_gpl);
++
++/**
++ * scst_unregister_session() - unregister session
++ * @sess: session to be unregistered
++ * @wait: if true, instructs to wait until all commands, which
++ * currently is being executed and belonged to the session,
++ * finished. Otherwise, target driver should be prepared to
++ * receive xmit_response() for the session's command after
++ * scst_unregister_session() returns.
++ * @unreg_done_fn: pointer to the function that will be asynchronously called
++ * when the last session's command finishes and
++ * the session is about to be completely freed. Can be NULL.
++ * Parameter:
++ * - sess - session
++ *
++ * Unregisters session.
++ *
++ * Notes:
++ * - All outstanding commands will be finished regularly. After
++ * scst_unregister_session() returned, no new commands must be sent to
++ * SCST via scst_rx_cmd().
++ *
++ * - The caller must ensure that no scst_rx_cmd() or scst_rx_mgmt_fn_*() is
++ * called in parallel with scst_unregister_session().
++ *
++ * - Can be called before result_fn() of scst_register_session() called,
++ * i.e. during the session registration/initialization.
++ *
++ * - It is highly recommended to call scst_unregister_session() as soon as it
++ * gets clear that session will be unregistered and not to wait until all
++ * related commands finished. This function provides the wait functionality,
++ * but it also starts recovering stuck commands, if there are any.
++ * Otherwise, your target driver could wait for those commands forever.
++ */
++void scst_unregister_session(struct scst_session *sess, int wait,
++ void (*unreg_done_fn) (struct scst_session *sess))
++{
++ unsigned long flags;
++ DECLARE_COMPLETION_ONSTACK(c);
++ int rc, lun;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("Unregistering session %p (wait %d)", sess, wait);
++
++ sess->unreg_done_fn = unreg_done_fn;
++
++ /* Abort all outstanding commands and clear reservation, if necessary */
++ lun = 0;
++ rc = scst_rx_mgmt_fn_lun(sess, SCST_UNREG_SESS_TM,
++ (uint8_t *)&lun, sizeof(lun), SCST_ATOMIC, NULL);
++ if (rc != 0) {
++ PRINT_ERROR("SCST_UNREG_SESS_TM failed %d (sess %p)",
++ rc, sess);
++ }
++
++ sess->shut_phase = SCST_SESS_SPH_SHUTDOWN;
++
++ spin_lock_irqsave(&scst_mgmt_lock, flags);
++
++ if (wait)
++ sess->shutdown_compl = &c;
++
++ spin_unlock_irqrestore(&scst_mgmt_lock, flags);
++
++ scst_sess_put(sess);
++
++ if (wait) {
++ TRACE_DBG("Waiting for session %p to complete", sess);
++ wait_for_completion(&c);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL_GPL(scst_unregister_session);
++
++/**
++ * scst_unregister_session_non_gpl() - unregister session, non-GPL version
++ * @sess: session to be unregistered
++ *
++ * Unregisters session.
++ *
++ * See notes for scst_unregister_session() above.
++ */
++void scst_unregister_session_non_gpl(struct scst_session *sess)
++{
++ TRACE_ENTRY();
++
++ scst_unregister_session(sess, 1, NULL);
++
++ TRACE_EXIT();
++ return;
++}
++EXPORT_SYMBOL(scst_unregister_session_non_gpl);
++
++static inline int test_mgmt_list(void)
++{
++ int res = !list_empty(&scst_sess_init_list) ||
++ !list_empty(&scst_sess_shut_list) ||
++ unlikely(kthread_should_stop());
++ return res;
++}
++
++int scst_global_mgmt_thread(void *arg)
++{
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ PRINT_INFO("Management thread started, PID %d", current->pid);
++
++ current->flags |= PF_NOFREEZE;
++
++ set_user_nice(current, -10);
++
++ spin_lock_irq(&scst_mgmt_lock);
++ while (!kthread_should_stop()) {
++ wait_queue_t wait;
++ init_waitqueue_entry(&wait, current);
++
++ if (!test_mgmt_list()) {
++ add_wait_queue_exclusive(&scst_mgmt_waitQ, &wait);
++ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (test_mgmt_list())
++ break;
++ spin_unlock_irq(&scst_mgmt_lock);
++ schedule();
++ spin_lock_irq(&scst_mgmt_lock);
++ }
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&scst_mgmt_waitQ, &wait);
++ }
++
++ while (!list_empty(&scst_sess_init_list)) {
++ sess = list_entry(scst_sess_init_list.next,
++ typeof(*sess), sess_init_list_entry);
++ TRACE_DBG("Removing sess %p from scst_sess_init_list",
++ sess);
++ list_del(&sess->sess_init_list_entry);
++ spin_unlock_irq(&scst_mgmt_lock);
++
++ if (sess->init_phase == SCST_SESS_IPH_INITING)
++ scst_init_session(sess);
++ else {
++ PRINT_CRIT_ERROR("session %p is in "
++ "scst_sess_init_list, but in unknown "
++ "init phase %x", sess,
++ sess->init_phase);
++ BUG();
++ }
++
++ spin_lock_irq(&scst_mgmt_lock);
++ }
++
++ while (!list_empty(&scst_sess_shut_list)) {
++ sess = list_entry(scst_sess_shut_list.next,
++ typeof(*sess), sess_shut_list_entry);
++ TRACE_DBG("Removing sess %p from scst_sess_shut_list",
++ sess);
++ list_del(&sess->sess_shut_list_entry);
++ spin_unlock_irq(&scst_mgmt_lock);
++
++ switch (sess->shut_phase) {
++ case SCST_SESS_SPH_SHUTDOWN:
++ BUG_ON(atomic_read(&sess->refcnt) != 0);
++ scst_free_session_callback(sess);
++ break;
++ default:
++ PRINT_CRIT_ERROR("session %p is in "
++ "scst_sess_shut_list, but in unknown "
++ "shut phase %lx", sess,
++ sess->shut_phase);
++ BUG();
++ break;
++ }
++
++ spin_lock_irq(&scst_mgmt_lock);
++ }
++ }
++ spin_unlock_irq(&scst_mgmt_lock);
++
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so both lists must be empty.
++ */
++ BUG_ON(!list_empty(&scst_sess_init_list));
++ BUG_ON(!list_empty(&scst_sess_shut_list));
++
++ PRINT_INFO("Management thread PID %d finished", current->pid);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++/* Called under sess->sess_list_lock */
++static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
++ uint64_t tag, bool to_abort)
++{
++ struct scst_cmd *cmd, *res = NULL;
++
++ TRACE_ENTRY();
++
++ /* ToDo: hash list */
++
++ TRACE_DBG("%s (sess=%p, tag=%llu)", "Searching in sess cmd list",
++ sess, (long long unsigned int)tag);
++
++ list_for_each_entry(cmd, &sess->sess_cmd_list,
++ sess_cmd_list_entry) {
++ if (cmd->tag == tag) {
++ /*
++ * We must not count done commands, because
++ * they were submitted for transmission.
++ * Otherwise we can have a race, when for
++ * some reason cmd's release delayed
++ * after transmission and initiator sends
++ * cmd with the same tag => it can be possible
++ * that a wrong cmd will be returned.
++ */
++ if (cmd->done) {
++ if (to_abort) {
++ /*
++ * We should return the latest not
++ * aborted cmd with this tag.
++ */
++ if (res == NULL)
++ res = cmd;
++ else {
++ if (test_bit(SCST_CMD_ABORTED,
++ &res->cmd_flags)) {
++ res = cmd;
++ } else if (!test_bit(SCST_CMD_ABORTED,
++ &cmd->cmd_flags))
++ res = cmd;
++ }
++ }
++ continue;
++ } else {
++ res = cmd;
++ break;
++ }
++ }
++ }
++
++ TRACE_EXIT();
++ return res;
++}
++
++/**
++ * scst_find_cmd() - find command by custom comparison function
++ *
++ * Finds a command based on user supplied data and comparison
++ * callback function, that should return true, if the command is found.
++ * Returns the command on success or NULL otherwise.
++ */
++struct scst_cmd *scst_find_cmd(struct scst_session *sess, void *data,
++ int (*cmp_fn) (struct scst_cmd *cmd,
++ void *data))
++{
++ struct scst_cmd *cmd = NULL;
++ unsigned long flags = 0;
++
++ TRACE_ENTRY();
++
++ if (cmp_fn == NULL)
++ goto out;
++
++ spin_lock_irqsave(&sess->sess_list_lock, flags);
++
++ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
++ list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list_entry) {
++ /*
++ * We must not count done commands, because they were
++ * submitted for transmission. Otherwise we can have a race,
++ * when for some reason cmd's release delayed after
++ * transmission and initiator sends cmd with the same tag =>
++ * it can be possible that a wrong cmd will be returned.
++ */
++ if (cmd->done)
++ continue;
++ if (cmp_fn(cmd, data))
++ goto out_unlock;
++ }
++
++ cmd = NULL;
++
++out_unlock:
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++
++out:
++ TRACE_EXIT();
++ return cmd;
++}
++EXPORT_SYMBOL(scst_find_cmd);
++
++/**
++ * scst_find_cmd_by_tag() - find command by tag
++ *
++ * Finds a command based on the supplied tag comparing it with one
++ * that previously set by scst_cmd_set_tag(). Returns the found command on
++ * success or NULL otherwise.
++ */
++struct scst_cmd *scst_find_cmd_by_tag(struct scst_session *sess,
++ uint64_t tag)
++{
++ unsigned long flags;
++ struct scst_cmd *cmd;
++ spin_lock_irqsave(&sess->sess_list_lock, flags);
++ cmd = __scst_find_cmd_by_tag(sess, tag, false);
++ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++ return cmd;
++}
++EXPORT_SYMBOL(scst_find_cmd_by_tag);
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_lib.c linux-2.6.39/drivers/scst/scst_lib.c
+--- orig/linux-2.6.39/drivers/scst/scst_lib.c
++++ linux-2.6.39/drivers/scst/scst_lib.c
+@@ -0,0 +1,7481 @@
+/*
+ * scst_lib.c
+ *
@@ -5197,11 +15281,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+#include "scst_pres.h"
+
+struct scsi_io_context {
-+ unsigned int full_cdb_used:1;
+ void *data;
+ void (*done)(void *data, char *sense, int result, int resid);
+ char sense[SCST_SENSE_BUFFERSIZE];
-+ unsigned char full_cdb[0];
+};
+static struct kmem_cache *scsi_io_context_cache;
+
@@ -5254,7 +15336,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ * type_disk devkey[0]
+ * type_tape devkey[1]
+ * type_printer devkey[2]
-+ * type_proseccor devkey[3]
++ * type_processor devkey[3]
+ * type_worm devkey[4]
+ * type_cdrom devkey[5]
+ * type_scanner devkey[6]
@@ -5272,7 +15354,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ uint8_t direction; /* init --> target: SCST_DATA_WRITE
+ * target --> init: SCST_DATA_READ
+ */
-+ uint16_t flags; /* opcode -- various flags */
++ uint32_t flags; /* opcode -- various flags */
+ uint8_t off; /* length offset in cdb */
+ int (*get_trans_len)(struct scst_cmd *cmd, uint8_t off);
+};
@@ -5398,7 +15480,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ SCST_DATA_READ, SCST_SMALL_TIMEOUT|SCST_IMPLICIT_HQ|SCST_SKIP_UA|
+ SCST_REG_RESERVE_ALLOWED|
+ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
-+ 4, get_trans_len_1},
++ 3, get_trans_len_2},
+ {0x13, "VOVVVV ", "VERIFY(6)",
+ SCST_DATA_NONE, SCST_TRANSFER_LEN_TYPE_FIXED|
+ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED|
@@ -5409,13 +15491,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ SCST_WRITE_EXCL_ALLOWED,
+ 2, get_trans_len_3},
+ {0x15, "OMOOOOOOOOOOOOOO", "MODE SELECT(6)",
-+ SCST_DATA_WRITE, SCST_IMPLICIT_ORDERED, 4, get_trans_len_1},
++ SCST_DATA_WRITE, SCST_STRICTLY_SERIALIZED, 4, get_trans_len_1},
+ {0x16, "MMMMMMMMMMMMMMMM", "RESERVE",
-+ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|
++ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|SCST_SERIALIZED|
+ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
+ 0, get_trans_len_none},
+ {0x17, "MMMMMMMMMMMMMMMM", "RELEASE",
-+ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|
++ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|SCST_SERIALIZED|
+ SCST_REG_RESERVE_ALLOWED|
+ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
+ 0, get_trans_len_none},
@@ -5545,6 +15627,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ 0, get_trans_len_single},
+ {0x42, " O ", "READ SUB-CHANNEL",
+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
++ {0x42, "O ", "UNMAP",
++ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 7, get_trans_len_2},
+ {0x43, " O ", "READ TOC/PMA/ATIP",
+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
+ {0x44, " M ", "REPORT DENSITY SUPPORT",
@@ -5569,7 +15653,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ {0x4B, " O ", "PAUSE/RESUME",
+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
+ {0x4C, "OOOOOOOOOOOOOOOO", "LOG SELECT",
-+ SCST_DATA_WRITE, SCST_IMPLICIT_ORDERED, 7, get_trans_len_2},
++ SCST_DATA_WRITE, SCST_STRICTLY_SERIALIZED, 7, get_trans_len_2},
+ {0x4D, "OOOOOOOOOOOOOOOO", "LOG SENSE",
+ SCST_DATA_READ, SCST_SMALL_TIMEOUT|
+ SCST_REG_RESERVE_ALLOWED|
@@ -5595,13 +15679,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ {0x54, " O ", "SEND OPC INFORMATION",
+ SCST_DATA_WRITE, FLAG_NONE, 7, get_trans_len_2},
+ {0x55, "OOOOOOOOOOOOOOOO", "MODE SELECT(10)",
-+ SCST_DATA_WRITE, SCST_IMPLICIT_ORDERED, 7, get_trans_len_2},
++ SCST_DATA_WRITE, SCST_STRICTLY_SERIALIZED, 7, get_trans_len_2},
+ {0x56, "OOOOOOOOOOOOOOOO", "RESERVE(10)",
-+ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD,
++ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|SCST_SERIALIZED|
++ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
+ 0, get_trans_len_none},
+ {0x57, "OOOOOOOOOOOOOOOO", "RELEASE(10)",
-+ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|
-+ SCST_REG_RESERVE_ALLOWED,
++ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|SCST_SERIALIZED|
++ SCST_REG_RESERVE_ALLOWED|
++ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
+ 0, get_trans_len_none},
+ {0x58, " O ", "REPAIR TRACK",
+ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
@@ -5613,15 +15699,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
+ {0x5D, " O ", "SEND CUE SHEET",
+ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_3},
-+ {0x5E, "OOOOO OOOO ", "PERSISTENT RESERV IN",
++ {0x5E, "OOOOO OOOO ", "PERSISTENT RESERVE IN",
+ SCST_DATA_READ, SCST_SMALL_TIMEOUT|
-+ SCST_LOCAL_CMD|
++ SCST_LOCAL_CMD|SCST_SERIALIZED|
+ SCST_WRITE_EXCL_ALLOWED|
+ SCST_EXCL_ACCESS_ALLOWED,
+ 5, get_trans_len_4},
-+ {0x5F, "OOOOO OOOO ", "PERSISTENT RESERV OUT",
++ {0x5F, "OOOOO OOOO ", "PERSISTENT RESERVE OUT",
+ SCST_DATA_WRITE, SCST_SMALL_TIMEOUT|
-+ SCST_LOCAL_CMD|
++ SCST_LOCAL_CMD|SCST_SERIALIZED|
+ SCST_WRITE_EXCL_ALLOWED|
+ SCST_EXCL_ACCESS_ALLOWED,
+ 5, get_trans_len_4},
@@ -5824,7 +15910,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ const uint8_t *sense, int sense_len, int flags);
+static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
+static void scst_release_space(struct scst_cmd *cmd);
-+static void scst_unblock_cmds(struct scst_device *dev);
+static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
+static int scst_alloc_add_tgt_dev(struct scst_session *sess,
+ struct scst_acg_dev *acg_dev, struct scst_tgt_dev **out_tgt_dev);
@@ -5842,7 +15927,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ * scst_alloc_sense() - allocate sense buffer for command
+ *
+ * Allocates, if necessary, sense buffer for command. Returns 0 on success
-+ * and error code othrwise. Parameter "atomic" should be non-0 if the
++ * and error code otherwise. Parameter "atomic" should be non-0 if the
+ * function called in atomic context.
+ */
+int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
@@ -5880,7 +15965,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ *
+ * Allocates, if necessary, sense buffer for command and copies in
+ * it data from the supplied sense buffer. Returns 0 on success
-+ * and error code othrwise.
++ * and error code otherwise.
+ */
+int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
+ const uint8_t *sense, unsigned int len)
@@ -5931,6 +16016,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ TRACE_ENTRY();
+
++ if (status == SAM_STAT_RESERVATION_CONFLICT) {
++ TRACE(TRACE_SCSI|TRACE_MINOR, "Reservation conflict (dev %s, "
++ "initiator %s, tgt_id %d)",
++ cmd->dev ? cmd->dev->virt_name : NULL,
++ cmd->sess->initiator_name, cmd->tgt->rel_tgt_id);
++ }
++
+ if (cmd->status != 0) {
+ TRACE_MGMT_DBG("cmd %p already has status %x set", cmd,
+ cmd->status);
@@ -6349,7 +16441,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ cmd);
+ if ((cmd->sense_buflen < 18) || (cmd->sense_valid_len < 8)) {
+ PRINT_ERROR("Sense too small to convert (%d, "
-+ "type: descryptor, valid %d)",
++ "type: descriptor, valid %d)",
+ cmd->sense_buflen, cmd->sense_valid_len);
+ goto out;
+ }
@@ -6428,16 +16520,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
+ asc, ascq);
+
-+ /* Protect sess_tgt_dev_list_hash */
++ /* To protect sess_tgt_dev_list */
+ mutex_lock(&scst_mutex);
+
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[i];
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
+ struct scst_tgt_dev *tgt_dev;
+
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
+ if (!list_empty(&tgt_dev->UA_list)) {
+ struct scst_tgt_dev_UA *ua;
@@ -6470,7 +16560,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+}
+EXPORT_SYMBOL(scst_set_initial_UA);
+
-+static struct scst_aen *scst_alloc_aen(struct scst_session *sess,
++struct scst_aen *scst_alloc_aen(struct scst_session *sess,
+ uint64_t unpacked_lun)
+{
+ struct scst_aen *aen;
@@ -6494,9 +16584,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+out:
+ TRACE_EXIT_HRES((unsigned long)aen);
+ return aen;
-+};
++}
+
-+static void scst_free_aen(struct scst_aen *aen)
++void scst_free_aen(struct scst_aen *aen)
+{
+ TRACE_ENTRY();
+
@@ -6505,7 +16595,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ TRACE_EXIT();
+ return;
-+};
++}
+
+/* Must be called under scst_mutex */
+void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
@@ -6546,7 +16636,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ }
+
+queue_ua:
-+ TRACE_MGMT_DBG("AEN not supported, queuing plain UA (tgt_dev %p)",
++ TRACE_MGMT_DBG("AEN not supported, queueing plain UA (tgt_dev %p)",
+ tgt_dev);
+ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
+ tgt_dev->dev->d_sense, key, asc, ascq);
@@ -6617,32 +16707,31 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ int flags)
+{
+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
-+ struct list_head *shead;
++ struct list_head *head;
+ struct scst_tgt_dev *tgt_dev;
+ int i;
+
+ TRACE_ENTRY();
+
-+ TRACE_MGMT_DBG("Queuing REPORTED LUNS DATA CHANGED UA "
++ TRACE_MGMT_DBG("Queueing REPORTED LUNS DATA CHANGED UA "
+ "(sess %p)", sess);
+
+ local_bh_disable();
+
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ shead = &sess->sess_tgt_dev_list_hash[i];
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ head = &sess->sess_tgt_dev_list[i];
+
-+ list_for_each_entry(tgt_dev, shead,
++ list_for_each_entry(tgt_dev, head,
+ sess_tgt_dev_list_entry) {
+ /* Lockdep triggers here a false positive.. */
+ spin_lock(&tgt_dev->tgt_dev_lock);
+ }
+ }
+
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ shead = &sess->sess_tgt_dev_list_hash[i];
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ head = &sess->sess_tgt_dev_list[i];
+
-+ list_for_each_entry(tgt_dev, shead,
-+ sess_tgt_dev_list_entry) {
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
+ int sl;
+
+ if (!scst_is_report_luns_changed_type(
@@ -6658,11 +16747,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ }
+ }
+
-+ for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
-+ shead = &sess->sess_tgt_dev_list_hash[i];
++ for (i = SESS_TGT_DEV_LIST_HASH_SIZE-1; i >= 0; i--) {
++ head = &sess->sess_tgt_dev_list[i];
+
-+ list_for_each_entry_reverse(tgt_dev,
-+ shead, sess_tgt_dev_list_entry) {
++ list_for_each_entry_reverse(tgt_dev, head,
++ sess_tgt_dev_list_entry) {
+ spin_unlock(&tgt_dev->tgt_dev_lock);
+ }
+ }
@@ -6689,13 +16778,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ TRACE_DBG("REPORTED LUNS DATA CHANGED (sess %p)", sess);
+
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *shead;
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head;
+ struct scst_tgt_dev *tgt_dev;
+
-+ shead = &sess->sess_tgt_dev_list_hash[i];
++ head = &sess->sess_tgt_dev_list[i];
+
-+ list_for_each_entry(tgt_dev, shead,
++ list_for_each_entry(tgt_dev, head,
+ sess_tgt_dev_list_entry) {
+ if (scst_is_report_luns_changed_type(
+ tgt_dev->dev->type)) {
@@ -6788,7 +16877,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ SCST_SET_UA_FLAG_AT_HEAD);
+ mutex_unlock(&scst_mutex);
+ } else {
-+ struct list_head *shead;
++ struct list_head *head;
+ struct scst_tgt_dev *tgt_dev;
+ uint64_t lun;
+
@@ -6797,8 +16886,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ mutex_lock(&scst_mutex);
+
+ /* tgt_dev might get dead, so we need to reseek it */
-+ shead = &aen->sess->sess_tgt_dev_list_hash[HASH_VAL(lun)];
-+ list_for_each_entry(tgt_dev, shead,
++ head = &aen->sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(lun)];
++ list_for_each_entry(tgt_dev, head,
+ sess_tgt_dev_list_entry) {
+ if (tgt_dev->lun == lun) {
+ TRACE_MGMT_DBG("Requeuing failed AEN UA for "
@@ -6850,7 +16939,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ struct scst_acg *acg, *old_acg;
+ struct scst_acg_dev *acg_dev;
+ int i, rc;
-+ struct list_head *shead;
++ struct list_head *head;
+ struct scst_tgt_dev *tgt_dev;
+ bool luns_changed = false;
+ bool add_failed, something_freed, not_needed_freed = false;
@@ -6880,10 +16969,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
+ unsigned int inq_changed_ua_needed = 0;
+
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ shead = &sess->sess_tgt_dev_list_hash[i];
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ head = &sess->sess_tgt_dev_list[i];
+
-+ list_for_each_entry(tgt_dev, shead,
++ list_for_each_entry(tgt_dev, head,
+ sess_tgt_dev_list_entry) {
+ if ((tgt_dev->dev == acg_dev->dev) &&
+ (tgt_dev->lun == acg_dev->lun) &&
@@ -6920,11 +17009,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ something_freed = false;
+ not_needed_freed = true;
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
+ struct scst_tgt_dev *t;
-+ shead = &sess->sess_tgt_dev_list_hash[i];
++ head = &sess->sess_tgt_dev_list[i];
+
-+ list_for_each_entry_safe(tgt_dev, t, shead,
++ list_for_each_entry_safe(tgt_dev, t, head,
+ sess_tgt_dev_list_entry) {
+ if (tgt_dev->acg_dev->acg != acg) {
+ TRACE_MGMT_DBG("sess %p: Deleting not used "
@@ -6955,10 +17044,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ if (luns_changed) {
+ scst_report_luns_changed_sess(sess);
+
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ shead = &sess->sess_tgt_dev_list_hash[i];
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ head = &sess->sess_tgt_dev_list[i];
+
-+ list_for_each_entry(tgt_dev, shead,
++ list_for_each_entry(tgt_dev, head,
+ sess_tgt_dev_list_entry) {
+ if (tgt_dev->inq_changed_ua_needed) {
+ TRACE_MGMT_DBG("sess %p: Setting "
@@ -7047,6 +17136,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ case SCST_CMD_STATE_RDY_TO_XFER:
+ case SCST_CMD_STATE_DATA_WAIT:
+ case SCST_CMD_STATE_TGT_PRE_EXEC:
++ case SCST_CMD_STATE_START_EXEC:
+ case SCST_CMD_STATE_SEND_FOR_EXEC:
+ case SCST_CMD_STATE_LOCAL_EXEC:
+ case SCST_CMD_STATE_REAL_EXEC:
@@ -7058,7 +17148,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
+ cmd->state, cmd, cmd->cdb[0]);
+ BUG();
-+ /* Invalid state to supress compiler's warning */
++ /* Invalid state to suppress a compiler warning */
+ res = SCST_CMD_STATE_LAST_ACTIVE;
+ }
+
@@ -7106,6 +17196,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ break;
+ case SCST_CMD_STATE_TGT_PRE_EXEC:
+ case SCST_CMD_STATE_SEND_FOR_EXEC:
++ case SCST_CMD_STATE_START_EXEC:
+ case SCST_CMD_STATE_LOCAL_EXEC:
+ case SCST_CMD_STATE_REAL_EXEC:
+ case SCST_CMD_STATE_REAL_EXECUTING:
@@ -7113,6 +17204,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ case SCST_CMD_STATE_PRE_DEV_DONE:
+ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
+ case SCST_CMD_STATE_PRE_XMIT_RESP:
++ case SCST_CMD_STATE_FINISHED_INTERNAL:
+ break;
+ default:
+ PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
@@ -7144,7 +17236,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ TRACE_ENTRY();
+
+ len = scst_get_sg_buf_first(cmd, &buf, *cmd->write_sg,
-+ *cmd->write_sg_cnt);
++ *cmd->write_sg_cnt);
+ while (len > 0) {
+ int cur_offs;
+
@@ -7171,30 +17263,36 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+static void scst_adjust_sg(struct scst_cmd *cmd, struct scatterlist *sg,
+ int *sg_cnt, int adjust_len)
+{
-+ int i, l;
++ int i, j, l;
+
+ TRACE_ENTRY();
+
+ l = 0;
-+ for (i = 0; i < *sg_cnt; i++) {
-+ l += sg[i].length;
++ for (i = 0, j = 0; i < *sg_cnt; i++, j++) {
++ TRACE_DBG("i %d, j %d, sg_cnt %d, sg %p, page_link %lx", i, j,
++ *sg_cnt, sg, sg[j].page_link);
++ if (unlikely(sg_is_chain(&sg[j]))) {
++ sg = sg_chain_ptr(&sg[j]);
++ j = 0;
++ }
++ l += sg[j].length;
+ if (l >= adjust_len) {
-+ int left = adjust_len - (l - sg[i].length);
++ int left = adjust_len - (l - sg[j].length);
+#ifdef CONFIG_SCST_DEBUG
+ TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
-+ "sg %p, sg_cnt %d, adjust_len %d, i %d, "
-+ "sg[i].length %d, left %d",
++ "sg %p, sg_cnt %d, adjust_len %d, i %d, j %d, "
++ "sg[j].length %d, left %d",
+ cmd, (long long unsigned int)cmd->tag,
-+ sg, *sg_cnt, adjust_len, i,
-+ sg[i].length, left);
++ sg, *sg_cnt, adjust_len, i, j,
++ sg[j].length, left);
+#endif
+ cmd->orig_sg = sg;
+ cmd->p_orig_sg_cnt = sg_cnt;
+ cmd->orig_sg_cnt = *sg_cnt;
-+ cmd->orig_sg_entry = i;
-+ cmd->orig_entry_len = sg[i].length;
-+ *sg_cnt = (left > 0) ? i+1 : i;
-+ sg[i].length = left;
++ cmd->orig_sg_entry = j;
++ cmd->orig_entry_len = sg[j].length;
++ *sg_cnt = (left > 0) ? j+1 : j;
++ sg[j].length = left;
+ cmd->sg_buff_modified = 1;
+ break;
+ }
@@ -7239,6 +17337,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ if (resp_data_len == cmd->bufflen)
+ goto out;
+
++ TRACE_DBG("cmd %p, resp_data_len %d", cmd, resp_data_len);
++
+ scst_adjust_sg(cmd, cmd->sg, &cmd->sg_cnt, resp_data_len);
+
+ cmd->resid_possible = 1;
@@ -7276,7 +17376,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ cmd->expected_transfer_len);
+
+ if (cmd->adjusted_resp_data_len != cmd->resp_data_len) {
-+ TRACE_MEM("Abjusting resp_data_len to %d (cmd %p, sg %p, "
++ TRACE_MEM("Adjusting resp_data_len to %d (cmd %p, sg %p, "
+ "sg_cnt %d)", cmd->adjusted_resp_data_len, cmd, cmd->sg,
+ cmd->sg_cnt);
+ scst_check_restore_sg_buff(cmd);
@@ -7299,7 +17399,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+{
+ TRACE_ENTRY();
+
-+ BUG_ON(!cmd->expected_values_set);
++ if (!cmd->expected_values_set) {
++ /*
++ * No expected values set, so no residuals processing.
++ * It can happen if a command preliminary completed before
++ * target driver had a chance to set expected values.
++ */
++ TRACE_MGMT_DBG("No expected values set, ignoring (cmd %p)", cmd);
++ goto out;
++ }
+
+ cmd->resid_possible = 1;
+
@@ -7339,13 +17447,25 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ */
+bool __scst_get_resid(struct scst_cmd *cmd, int *resid, int *bidi_out_resid)
+{
++ bool res;
++
+ TRACE_ENTRY();
+
+ *resid = 0;
+ if (bidi_out_resid != NULL)
+ *bidi_out_resid = 0;
+
-+ BUG_ON(!cmd->expected_values_set);
++ if (!cmd->expected_values_set) {
++ /*
++ * No expected values set, so no residuals processing.
++ * It can happen if a command preliminary completed before
++ * target driver had a chance to set expected values.
++ */
++ TRACE_MGMT_DBG("No expected values set, returning no residual "
++ "(cmd %p)", cmd);
++ res = false;
++ goto out;
++ }
+
+ if (cmd->expected_data_direction & SCST_DATA_READ) {
+ *resid = cmd->expected_transfer_len - cmd->resp_data_len;
@@ -7363,13 +17483,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ *resid = cmd->write_len - cmd->bufflen;
+ }
+
++ res = true;
++
+ TRACE_DBG("cmd %p, resid %d, bidi_out_resid %d (resp_data_len %d, "
+ "expected_data_direction %d, write_len %d, bufflen %d)", cmd,
+ *resid, bidi_out_resid ? *bidi_out_resid : 0, cmd->resp_data_len,
+ cmd->expected_data_direction, cmd->write_len, cmd->bufflen);
+
-+ TRACE_EXIT_RES(1);
-+ return true;
++out:
++ TRACE_EXIT_RES(res);
++ return res;
+}
+EXPORT_SYMBOL(__scst_get_resid);
+
@@ -7387,7 +17510,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ /*
+ * Memory barrier is needed here, because we need the exact order
+ * between the read and write between retry_cmds and finished_cmds to
-+ * not miss the case when a command finished while we queuing it for
++ * not miss the case when a command finished while we queueing it for
+ * retry after the finished_cmds check.
+ */
+ smp_mb();
@@ -7446,7 +17569,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+EXPORT_SYMBOL_GPL(scst_update_hw_pending_start);
+
+/*
-+ * Supposed to be called under sess_list_lock, but can release/reaquire it.
++ * Supposed to be called under sess_list_lock, but can release/reacquire it.
+ * Returns 0 to continue, >0 to restart, <0 to break.
+ */
+static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
@@ -7631,7 +17754,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of tgt failed");
++ PRINT_ERROR("%s", "Allocation of tgt failed");
+ res = -ENOMEM;
+ goto out;
+ }
@@ -7662,6 +17785,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ TRACE_ENTRY();
+
+ kfree(tgt->tgt_name);
++ kfree(tgt->tgt_comment);
+
+ kfree(tgt);
+
@@ -7669,6 +17793,21 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ return;
+}
+
++static void scst_init_order_data(struct scst_order_data *order_data)
++{
++ int i;
++ spin_lock_init(&order_data->sn_lock);
++ INIT_LIST_HEAD(&order_data->deferred_cmd_list);
++ INIT_LIST_HEAD(&order_data->skipped_sn_list);
++ order_data->curr_sn = (typeof(order_data->curr_sn))(-300);
++ order_data->expected_sn = order_data->curr_sn + 1;
++ order_data->num_free_sn_slots = ARRAY_SIZE(order_data->sn_slots)-1;
++ order_data->cur_sn_slot = &order_data->sn_slots[0];
++ for (i = 0; i < (int)ARRAY_SIZE(order_data->sn_slots); i++)
++ atomic_set(&order_data->sn_slots[i], 0);
++ return;
++}
++
+/* Called under scst_mutex and suspended activity */
+int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
+{
@@ -7679,15 +17818,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ dev = kzalloc(sizeof(*dev), gfp_mask);
+ if (dev == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s",
-+ "Allocation of scst_device failed");
++ PRINT_ERROR("%s", "Allocation of scst_device failed");
+ res = -ENOMEM;
+ goto out;
+ }
+
+ dev->handler = &scst_null_devtype;
+ atomic_set(&dev->dev_cmd_count, 0);
-+ atomic_set(&dev->write_cmd_count, 0);
+ scst_init_mem_lim(&dev->dev_mem_lim);
+ spin_lock_init(&dev->dev_lock);
+ INIT_LIST_HEAD(&dev->blocked_cmd_list);
@@ -7697,7 +17834,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
+
+ mutex_init(&dev->dev_pr_mutex);
-+ atomic_set(&dev->pr_readers_count, 0);
+ dev->pr_generation = 0;
+ dev->pr_is_set = 0;
+ dev->pr_holder = NULL;
@@ -7705,6 +17841,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ dev->pr_type = TYPE_UNSPECIFIED;
+ INIT_LIST_HEAD(&dev->dev_registrants_list);
+
++ scst_init_order_data(&dev->dev_order_data);
++
+ scst_init_threads(&dev->dev_cmd_threads);
+
+ *out_dev = dev;
@@ -7760,8 +17898,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
+ if (res == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM,
-+ "%s", "Allocation of scst_acg_dev failed");
++ PRINT_ERROR("%s", "Allocation of scst_acg_dev failed");
+ goto out;
+ }
+
@@ -7923,13 +18060,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ INIT_LIST_HEAD(&acg->acg_dev_list);
+ INIT_LIST_HEAD(&acg->acg_sess_list);
+ INIT_LIST_HEAD(&acg->acn_list);
++ cpumask_copy(&acg->acg_cpu_mask, &default_cpu_mask);
+ acg->acg_name = kstrdup(acg_name, GFP_KERNEL);
+ if (acg->acg_name == NULL) {
+ PRINT_ERROR("%s", "Allocation of acg_name failed");
+ goto out_free;
+ }
+
-+ acg->addr_method = SCST_LUN_ADDR_METHOD_PERIPHERAL;
++ acg->addr_method = tgt->tgtt->preferred_addr_method;
+
+ if (tgt_acg) {
+ int rc;
@@ -8113,9 +18251,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ TRACE_MGMT_DBG("IO context for t %p not yet "
+ "initialized, waiting...", t);
+ msleep(100);
-+ barrier();
+ goto found;
+ }
++ smp_rmb();
+ TRACE_MGMT_DBG("Going to share IO context %p (res %p, ini %s, "
+ "dev %s, cmd_threads %p, grouping type %d)",
+ res->active_cmd_threads->io_context, res,
@@ -8363,16 +18501,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
+ struct scst_tgt_dev *tgt_dev;
+ struct scst_device *dev = acg_dev->dev;
-+ struct list_head *sess_tgt_dev_list_head;
-+ int i, sl;
++ struct list_head *head;
++ int sl;
+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
+
+ TRACE_ENTRY();
+
+ tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
+ if (tgt_dev == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_tgt_dev "
-+ "failed");
++ PRINT_ERROR("%s", "Allocation of scst_tgt_dev failed");
+ res = -ENOMEM;
+ goto out;
+ }
@@ -8411,15 +18548,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ INIT_LIST_HEAD(&tgt_dev->UA_list);
+ spin_lock_init(&tgt_dev->thr_data_lock);
+ INIT_LIST_HEAD(&tgt_dev->thr_data_list);
-+ spin_lock_init(&tgt_dev->sn_lock);
-+ INIT_LIST_HEAD(&tgt_dev->deferred_cmd_list);
-+ INIT_LIST_HEAD(&tgt_dev->skipped_sn_list);
-+ tgt_dev->curr_sn = (typeof(tgt_dev->curr_sn))(-300);
-+ tgt_dev->expected_sn = tgt_dev->curr_sn + 1;
-+ tgt_dev->num_free_sn_slots = ARRAY_SIZE(tgt_dev->sn_slots)-1;
-+ tgt_dev->cur_sn_slot = &tgt_dev->sn_slots[0];
-+ for (i = 0; i < (int)ARRAY_SIZE(tgt_dev->sn_slots); i++)
-+ atomic_set(&tgt_dev->sn_slots[i], 0);
++
++ scst_init_order_data(&tgt_dev->tgt_dev_order_data);
++ if (dev->tst == SCST_CONTR_MODE_SEP_TASK_SETS)
++ tgt_dev->curr_order_data = &tgt_dev->tgt_dev_order_data;
++ else
++ tgt_dev->curr_order_data = &dev->dev_order_data;
+
+ if (dev->handler->parse_atomic &&
+ dev->handler->alloc_data_buf_atomic &&
@@ -8480,10 +18614,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
+ spin_unlock_bh(&dev->dev_lock);
+
-+ sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[HASH_VAL(tgt_dev->lun)];
-+ list_add_tail(&tgt_dev->sess_tgt_dev_list_entry,
-+ sess_tgt_dev_list_head);
++ head = &sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(tgt_dev->lun)];
++ list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, head);
+
+ *out_tgt_dev = tgt_dev;
+
@@ -8522,11 +18654,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ scst_clear_reservation(tgt_dev);
+
++#if 0 /* Clearing UAs and last sense isn't required by SAM and it looks to be
++ * better to not clear them to not loose important events, so let's
++ * disable it.
++ */
+ /* With activity suspended the lock isn't needed, but let's be safe */
+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
+ scst_free_all_UA(tgt_dev);
+ memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++#endif
+
+ if (queue_UA) {
+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
@@ -8621,14 +18758,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ TRACE_ENTRY();
+
+ /* The session is going down, no users, so no locks */
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[i];
-+ list_for_each_entry_safe(tgt_dev, t, sess_tgt_dev_list_head,
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
++ list_for_each_entry_safe(tgt_dev, t, head,
+ sess_tgt_dev_list_entry) {
+ scst_free_tgt_dev(tgt_dev);
+ }
-+ INIT_LIST_HEAD(sess_tgt_dev_list_head);
++ INIT_LIST_HEAD(head);
+ }
+
+ TRACE_EXIT();
@@ -8640,7 +18776,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+{
+ int res = 0;
+ struct scst_acn *acn;
-+ int len;
+ char *nm;
+
+ TRACE_ENTRY();
@@ -8663,15 +18798,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ acn->acg = acg;
+
-+ len = strlen(name);
-+ nm = kmalloc(len + 1, GFP_KERNEL);
++ nm = kstrdup(name, GFP_KERNEL);
+ if (nm == NULL) {
+ PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
+ res = -ENOMEM;
+ goto out_free;
+ }
-+
-+ strcpy(nm, name);
+ acn->name = nm;
+
+ res = scst_acn_sysfs_create(acn);
@@ -8738,14 +18870,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+}
+
+static struct scst_cmd *scst_create_prepare_internal_cmd(
-+ struct scst_cmd *orig_cmd, int bufsize)
++ struct scst_cmd *orig_cmd, const uint8_t *cdb,
++ unsigned int cdb_len, int bufsize)
+{
+ struct scst_cmd *res;
++ int rc;
+ gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
+
+ TRACE_ENTRY();
+
-+ res = scst_alloc_cmd(gfp_mask);
++ res = scst_alloc_cmd(cdb, cdb_len, gfp_mask);
+ if (res == NULL)
+ goto out;
+
@@ -8757,6 +18891,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ res->tgt = orig_cmd->tgt;
+ res->dev = orig_cmd->dev;
+ res->tgt_dev = orig_cmd->tgt_dev;
++ res->cur_order_data = orig_cmd->tgt_dev->curr_order_data;
+ res->lun = orig_cmd->lun;
+ res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
+ res->data_direction = SCST_DATA_UNKNOWN;
@@ -8765,7 +18900,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ scst_sess_get(res->sess);
+ if (res->tgt_dev != NULL)
-+ __scst_get();
++ res->cpu_cmd_counter = scst_get();
++
++ rc = scst_pre_parse(res);
++ BUG_ON(rc != 0);
+
+ res->state = SCST_CMD_STATE_PARSE;
+
@@ -8792,13 +18930,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ }
+
+ rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
++ request_sense, sizeof(request_sense),
+ SCST_SENSE_BUFFERSIZE);
+ if (rs_cmd == NULL)
+ goto out_error;
+
-+ memcpy(rs_cmd->cdb, request_sense, sizeof(request_sense));
+ rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
-+ rs_cmd->cdb_len = sizeof(request_sense);
+ rs_cmd->data_direction = SCST_DATA_READ;
+ rs_cmd->expected_data_direction = rs_cmd->data_direction;
+ rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
@@ -8830,7 +18967,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ BUG_ON(orig_cmd == NULL);
+
-+ len = scst_get_buf_first(req_cmd, &buf);
++ len = scst_get_buf_full(req_cmd, &buf);
+
+ if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
+ SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
@@ -8846,7 +18983,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ }
+
+ if (len > 0)
-+ scst_put_buf(req_cmd, buf);
++ scst_put_buf_full(req_cmd, buf);
+
+ TRACE_MGMT_DBG("Adding orig cmd %p to head of active "
+ "cmd list", orig_cmd);
@@ -8963,18 +19100,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
+ if (sess == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s",
-+ "Allocation of scst_session failed");
++ PRINT_ERROR("%s", "Allocation of scst_session failed");
+ goto out;
+ }
+
+ sess->init_phase = SCST_SESS_IPH_INITING;
+ sess->shut_phase = SCST_SESS_SPH_READY;
+ atomic_set(&sess->refcnt, 0);
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[i];
-+ INIT_LIST_HEAD(sess_tgt_dev_list_head);
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
++ INIT_LIST_HEAD(head);
+ }
+ spin_lock_init(&sess->sess_list_lock);
+ INIT_LIST_HEAD(&sess->sess_cmd_list);
@@ -9012,9 +19147,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ scst_sess_free_tgt_devs(sess);
+
-+ /* tgt will stay alive at least until its sysfs alive */
-+ kobject_get(&sess->tgt->tgt_kobj);
-+
+ mutex_unlock(&scst_mutex);
+ scst_sess_sysfs_del(sess);
+ mutex_lock(&scst_mutex);
@@ -9029,11 +19161,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
+ list_del(&sess->acg_sess_list_entry);
+
-+ mutex_unlock(&scst_mutex);
-+
++ /* Called under lock to protect from too early tgt release */
+ wake_up_all(&sess->tgt->unreg_waitQ);
+
-+ kobject_put(&sess->tgt->tgt_kobj);
++ /*
++ * NOTE: do not dereference the sess->tgt pointer after scst_mutex
++ * has been unlocked, because it can be already dead!!
++ */
++ mutex_unlock(&scst_mutex);
+
+ kfree(sess->transport_id);
+ kfree(sess->initiator_name);
@@ -9120,7 +19255,53 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+}
+EXPORT_SYMBOL(scst_cmd_put);
+
-+struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask)
++/**
++ * scst_cmd_set_ext_cdb() - sets cmd's extended CDB and its length
++ */
++void scst_cmd_set_ext_cdb(struct scst_cmd *cmd,
++ uint8_t *ext_cdb, unsigned int ext_cdb_len,
++ gfp_t gfp_mask)
++{
++ unsigned int len = cmd->cdb_len + ext_cdb_len;
++
++ TRACE_ENTRY();
++
++ if (len <= sizeof(cmd->cdb_buf))
++ goto copy;
++
++ if (unlikely(len > SCST_MAX_LONG_CDB_SIZE)) {
++ PRINT_ERROR("Too big CDB (%d)", len);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out;
++ }
++
++ cmd->cdb = kmalloc(len, gfp_mask);
++ if (unlikely(cmd->cdb == NULL)) {
++ PRINT_ERROR("Unable to alloc extended CDB (size %d)", len);
++ goto out_err;
++ }
++
++ memcpy(cmd->cdb, cmd->cdb_buf, cmd->cdb_len);
++
++copy:
++ memcpy(&cmd->cdb[cmd->cdb_len], ext_cdb, ext_cdb_len);
++
++ cmd->cdb_len = cmd->cdb_len + ext_cdb_len;
++
++out:
++ TRACE_EXIT();
++ return;
++
++out_err:
++ cmd->cdb = cmd->cdb_buf;
++ scst_set_busy(cmd);
++ goto out;
++}
++EXPORT_SYMBOL(scst_cmd_set_ext_cdb);
++
++struct scst_cmd *scst_alloc_cmd(const uint8_t *cdb,
++ unsigned int cdb_len, gfp_t gfp_mask)
+{
+ struct scst_cmd *cmd;
+
@@ -9137,6 +19318,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ atomic_set(&cmd->cmd_ref, 1);
+ cmd->cmd_threads = &scst_main_cmd_threads;
+ INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
++ cmd->cdb = cmd->cdb_buf;
+ cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
+ cmd->timeout = SCST_DEFAULT_TIMEOUT;
+ cmd->retries = 0;
@@ -9149,9 +19331,36 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
+ cmd->dbl_ua_orig_resp_data_len = -1;
+
++ if (unlikely(cdb_len == 0)) {
++ PRINT_ERROR("%s", "Wrong CDB len 0, finishing cmd");
++ goto out_free;
++ } else if (cdb_len <= SCST_MAX_CDB_SIZE) {
++ /* Duplicate memcpy to save a branch on the most common path */
++ memcpy(cmd->cdb, cdb, cdb_len);
++ } else {
++ if (unlikely(cdb_len > SCST_MAX_LONG_CDB_SIZE)) {
++ PRINT_ERROR("Too big CDB (%d), finishing cmd", cdb_len);
++ goto out_free;
++ }
++ cmd->cdb = kmalloc(cdb_len, gfp_mask);
++ if (unlikely(cmd->cdb == NULL)) {
++ PRINT_ERROR("Unable to alloc extended CDB (size %d)",
++ cdb_len);
++ goto out_free;
++ }
++ memcpy(cmd->cdb, cdb, cdb_len);
++ }
++
++ cmd->cdb_len = cdb_len;
++
+out:
+ TRACE_EXIT();
+ return cmd;
++
++out_free:
++ kmem_cache_free(scst_cmd_cachep, cmd);
++ cmd = NULL;
++ goto out;
+}
+
+static void scst_destroy_put_cmd(struct scst_cmd *cmd)
@@ -9162,7 +19371,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ * At this point tgt_dev can be dead, but the pointer remains non-NULL
+ */
+ if (likely(cmd->tgt_dev != NULL))
-+ __scst_put();
++ scst_put(cmd->cpu_cmd_counter);
+
+ scst_destroy_cmd(cmd);
+ return;
@@ -9178,12 +19387,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ TRACE_DBG("Freeing cmd %p (tag %llu)",
+ cmd, (long long unsigned int)cmd->tag);
+
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("Freeing aborted cmd %p (scst_cmd_count %d)",
-+ cmd, atomic_read(&scst_cmd_count));
-+ }
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
++ TRACE_MGMT_DBG("Freeing aborted cmd %p", cmd);
+
-+ BUG_ON(cmd->unblock_dev);
++ EXTRACHECKS_BUG_ON(cmd->unblock_dev || cmd->dec_on_dev_needed ||
++ cmd->dec_pr_readers_count_needed);
+
+ /*
+ * Target driver can already free sg buffer before calling
@@ -9228,8 +19436,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ "%d, target %s, LUN %lld, sn %d, expected_sn %d)",
+ cmd, cmd->cdb[0], cmd->tgtt->name,
+ (long long unsigned int)cmd->lun,
-+ cmd->sn, cmd->tgt_dev->expected_sn);
-+ scst_unblock_deferred(cmd->tgt_dev, cmd);
++ cmd->sn, cmd->cur_order_data->expected_sn);
++ scst_unblock_deferred(cmd->cur_order_data, cmd);
+ }
+#endif
+
@@ -9243,6 +19451,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ }
+ }
+
++ if (cmd->cdb != cmd->cdb_buf)
++ kfree(cmd->cdb);
++
+ if (likely(destroy))
+ scst_destroy_put_cmd(cmd);
+
@@ -9345,7 +19556,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ scst_sess_put(mcmd->sess);
+
+ if (mcmd->mcmd_tgt_dev != NULL)
-+ __scst_put();
++ scst_put(mcmd->cpu_cmd_counter);
+
+ mempool_free(mcmd, scst_mgmt_mempool);
+
@@ -9353,6 +19564,43 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ return;
+}
+
++static bool scst_on_sg_tablesize_low(struct scst_cmd *cmd, bool out)
++{
++ bool res;
++ int sg_cnt = out ? cmd->out_sg_cnt : cmd->sg_cnt;
++ static int ll;
++ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++
++ TRACE_ENTRY();
++
++ if (sg_cnt > cmd->tgt->sg_tablesize) {
++ /* It's the target's side business */
++ goto failed;
++ }
++
++ if (tgt_dev->dev->handler->on_sg_tablesize_low == NULL)
++ goto failed;
++
++ res = tgt_dev->dev->handler->on_sg_tablesize_low(cmd);
++
++ TRACE_DBG("on_sg_tablesize_low(%p) returned %d", cmd, res);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++failed:
++ res = false;
++ if ((ll < 10) || TRACING_MINOR()) {
++ PRINT_INFO("Unable to complete command due to SG IO count "
++ "limitation (%srequested %d, available %d, tgt lim %d)",
++ out ? "OUT buffer, " : "", cmd->sg_cnt,
++ tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
++ ll++;
++ }
++ goto out;
++}
++
+int scst_alloc_space(struct scst_cmd *cmd)
+{
+ gfp_t gfp_mask;
@@ -9360,7 +19608,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ int atomic = scst_cmd_atomic(cmd);
+ int flags;
+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ static int ll;
+
+ TRACE_ENTRY();
+
@@ -9375,16 +19622,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ if (cmd->sg == NULL)
+ goto out;
+
-+ if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt)) {
-+ if ((ll < 10) || TRACING_MINOR()) {
-+ PRINT_INFO("Unable to complete command due to "
-+ "SG IO count limitation (requested %d, "
-+ "available %d, tgt lim %d)", cmd->sg_cnt,
-+ tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
-+ ll++;
-+ }
-+ goto out_sg_free;
-+ }
++ if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt))
++ if (!scst_on_sg_tablesize_low(cmd, false))
++ goto out_sg_free;
+
+ if (cmd->data_direction != SCST_DATA_BIDI)
+ goto success;
@@ -9395,16 +19635,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ if (cmd->out_sg == NULL)
+ goto out_sg_free;
+
-+ if (unlikely(cmd->out_sg_cnt > tgt_dev->max_sg_cnt)) {
-+ if ((ll < 10) || TRACING_MINOR()) {
-+ PRINT_INFO("Unable to complete command due to "
-+ "SG IO count limitation (OUT buffer, requested "
-+ "%d, available %d, tgt lim %d)", cmd->out_sg_cnt,
-+ tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
-+ ll++;
-+ }
-+ goto out_out_sg_free;
-+ }
++ if (unlikely(cmd->out_sg_cnt > tgt_dev->max_sg_cnt))
++ if (!scst_on_sg_tablesize_low(cmd, true))
++ goto out_out_sg_free;
+
+success:
+ res = 0;
@@ -9478,10 +19711,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ if (sioc->done)
+ sioc->done(sioc->data, sioc->sense, req->errors, req->resid_len);
+
-+ if (!sioc->full_cdb_used)
-+ kmem_cache_free(scsi_io_context_cache, sioc);
-+ else
-+ kfree(sioc);
++ kmem_cache_free(scsi_io_context_cache, sioc);
+
+ __blk_put_request(req->q, req);
+ return;
@@ -9490,42 +19720,24 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+/**
+ * scst_scsi_exec_async - executes a SCSI command in pass-through mode
+ * @cmd: scst command
++ * @data: pointer passed to done() as "data"
+ * @done: callback function when done
+ */
-+int scst_scsi_exec_async(struct scst_cmd *cmd,
-+ void (*done)(void *, char *, int, int))
++int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
++ void (*done)(void *data, char *sense, int result, int resid))
+{
+ int res = 0;
+ struct request_queue *q = cmd->dev->scsi_dev->request_queue;
+ struct request *rq;
+ struct scsi_io_context *sioc;
+ int write = (cmd->data_direction & SCST_DATA_WRITE) ? WRITE : READ;
-+ gfp_t gfp = GFP_KERNEL;
++ gfp_t gfp = cmd->noio_mem_alloc ? GFP_NOIO : GFP_KERNEL;
+ int cmd_len = cmd->cdb_len;
+
-+ if (cmd->ext_cdb_len == 0) {
-+ TRACE_DBG("Simple CDB (cmd_len %d)", cmd_len);
-+ sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
-+ if (sioc == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ } else {
-+ cmd_len += cmd->ext_cdb_len;
-+
-+ TRACE_DBG("Extended CDB (cmd_len %d)", cmd_len);
-+
-+ sioc = kzalloc(sizeof(*sioc) + cmd_len, gfp);
-+ if (sioc == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ sioc->full_cdb_used = 1;
-+
-+ memcpy(sioc->full_cdb, cmd->cdb, cmd->cdb_len);
-+ memcpy(&sioc->full_cdb[cmd->cdb_len], cmd->ext_cdb,
-+ cmd->ext_cdb_len);
++ sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
++ if (sioc == NULL) {
++ res = -ENOMEM;
++ goto out;
+ }
+
+ rq = blk_get_request(q, write, gfp);
@@ -9578,15 +19790,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+done:
+ TRACE_DBG("sioc %p, cmd %p", sioc, cmd);
+
-+ sioc->data = cmd;
++ sioc->data = data;
+ sioc->done = done;
+
+ rq->cmd_len = cmd_len;
-+ if (cmd->ext_cdb_len == 0) {
++ if (rq->cmd_len <= BLK_MAX_CDB) {
+ memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+ memcpy(rq->cmd, cmd->cdb, cmd->cdb_len);
+ } else
-+ rq->cmd = sioc->full_cdb;
++ rq->cmd = cmd->cdb;
+
+ rq->sense = sioc->sense;
+ rq->sense_len = sizeof(sioc->sense);
@@ -9610,12 +19822,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ blk_put_request(rq);
+
+out_free_sioc:
-+ if (!sioc->full_cdb_used)
-+ kmem_cache_free(scsi_io_context_cache, sioc);
-+ else
-+ kfree(sioc);
++ kmem_cache_free(scsi_io_context_cache, sioc);
+ goto out;
+}
++EXPORT_SYMBOL(scst_scsi_exec_async);
+
+/**
+ * scst_copy_sg() - copy data between the command's SGs
@@ -9669,7 +19879,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+}
+EXPORT_SYMBOL_GPL(scst_copy_sg);
+
-+int scst_get_full_buf(struct scst_cmd *cmd, uint8_t **buf)
++/**
++ * scst_get_buf_full - return linear buffer for command
++ * @cmd: scst command
++ * @buf: pointer on the resulting pointer
++ *
++ * If the command's buffer >single page, it vmalloc() the needed area
++ * and copies the buffer there. Returns length of the buffer or negative
++ * error code otherwise.
++ */
++int scst_get_buf_full(struct scst_cmd *cmd, uint8_t **buf)
+{
+ int res = 0;
+
@@ -9721,8 +19940,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ TRACE_EXIT_RES(res);
+ return res;
+}
++EXPORT_SYMBOL(scst_get_buf_full);
+
-+void scst_put_full_buf(struct scst_cmd *cmd, uint8_t *buf)
++/**
++ * scst_put_buf_full - unmaps linear buffer for command
++ * @cmd: scst command
++ * @buf: pointer on the buffer to unmap
++ *
++ * Reverse operation for scst_get_buf_full. If the buffer was vmalloced(),
++ * it vfree() the buffer.
++ */
++void scst_put_buf_full(struct scst_cmd *cmd, uint8_t *buf)
+{
+ TRACE_ENTRY();
+
@@ -9757,6 +19985,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ TRACE_EXIT();
+ return;
+}
++EXPORT_SYMBOL(scst_put_buf_full);
+
+static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, 0, 16, 12, 0, 0 };
+
@@ -10029,34 +20258,23 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+EXPORT_SYMBOL_GPL(scst_get_cdb_info);
+
+/* Packs SCST LUN back to SCSI form */
-+__be64 scst_pack_lun(const uint64_t lun, unsigned int addr_method)
++__be64 scst_pack_lun(const uint64_t lun, enum scst_lun_addr_method addr_method)
+{
-+ uint64_t res;
-+ uint16_t *p = (uint16_t *)&res;
++ uint64_t res = 0;
+
-+ res = lun;
-+
-+ if ((addr_method == SCST_LUN_ADDR_METHOD_FLAT) && (lun != 0)) {
-+ /*
-+ * Flat space: luns other than 0 should use flat space
-+ * addressing method.
-+ */
-+ *p = 0x7fff & *p;
-+ *p = 0x4000 | *p;
++ if (lun) {
++ res = (addr_method << 14) | (lun & 0x3fff);
++ res = res << 48;
+ }
-+ /* Default is to use peripheral device addressing mode */
-+
-+ *p = (__force u16)cpu_to_be16(*p);
+
-+ TRACE_EXIT_HRES((unsigned long)res);
-+ return (__force __be64)res;
++ TRACE_EXIT_HRES(res >> 48);
++ return cpu_to_be64(res);
+}
+
+/*
-+ * Routine to extract a lun number from an 8-byte LUN structure
-+ * in network byte order (BE).
-+ * (see SAM-2, Section 4.12.3 page 40)
-+ * Supports 2 types of lun unpacking: peripheral and logical unit.
++ * Function to extract a LUN number from an 8-byte LUN structure in network byte
++ * order (big endian). Supports three LUN addressing methods: peripheral, flat
++ * and logical unit. See also SAM-2, section 4.9.4 (page 40).
+ */
+uint64_t scst_unpack_lun(const uint8_t *lun, int len)
+{
@@ -10095,46 +20313,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
+ switch (address_method) {
-+ case 0: /* peripheral device addressing method */
-+#if 0
-+ if (*lun) {
-+ PRINT_ERROR("Illegal BUS INDENTIFIER in LUN "
-+ "peripheral device addressing method 0x%02x, "
-+ "expected 0", *lun);
-+ break;
-+ }
-+ res = *(lun + 1);
-+ break;
-+#else
-+ /*
-+ * Looks like it's legal to use it as flat space addressing
-+ * method as well
-+ */
-+
-+ /* go through */
-+#endif
-+
-+ case 1: /* flat space addressing method */
++ case SCST_LUN_ADDR_METHOD_PERIPHERAL:
++ case SCST_LUN_ADDR_METHOD_FLAT:
++ case SCST_LUN_ADDR_METHOD_LUN:
+ res = *(lun + 1) | (((*lun) & 0x3f) << 8);
+ break;
+
-+ case 2: /* logical unit addressing method */
-+ if (*lun & 0x3f) {
-+ PRINT_ERROR("Illegal BUS NUMBER in LUN logical unit "
-+ "addressing method 0x%02x, expected 0",
-+ *lun & 0x3f);
-+ break;
-+ }
-+ if (*(lun + 1) & 0xe0) {
-+ PRINT_ERROR("Illegal TARGET in LUN logical unit "
-+ "addressing method 0x%02x, expected 0",
-+ (*(lun + 1) & 0xf8) >> 5);
-+ break;
-+ }
-+ res = *(lun + 1) & 0x1f;
-+ break;
-+
-+ case 3: /* extended logical unit addressing method */
++ case SCST_LUN_ADDR_METHOD_EXTENDED_LUN:
+ default:
+ PRINT_ERROR("Unimplemented LUN addressing method %u",
+ address_method);
@@ -10540,7 +20725,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ int buffer_size, sector_size, sh;
+ uint8_t *buffer;
+
-+ buffer_size = scst_get_buf_first(cmd, &buffer);
++ buffer_size = scst_get_buf_full(cmd, &buffer);
+ if (unlikely(buffer_size <= 0)) {
+ if (buffer_size < 0) {
+ PRINT_ERROR("%s: Unable to get the"
@@ -10552,7 +20737,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ sector_size =
+ ((buffer[4] << 24) | (buffer[5] << 16) |
+ (buffer[6] << 8) | (buffer[7] << 0));
-+ scst_put_buf(cmd, buffer);
++ scst_put_buf_full(cmd, buffer);
+ if (sector_size != 0)
+ sh = scst_calc_block_shift(sector_size);
+ else
@@ -10603,7 +20788,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ switch (opcode) {
+ case MODE_SENSE:
+ case MODE_SELECT:
-+ buffer_size = scst_get_buf_first(cmd, &buffer);
++ buffer_size = scst_get_buf_full(cmd, &buffer);
+ if (unlikely(buffer_size <= 0)) {
+ if (buffer_size < 0) {
+ PRINT_ERROR("%s: Unable to get the buffer (%d)",
@@ -10641,7 +20826,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ switch (opcode) {
+ case MODE_SENSE:
+ case MODE_SELECT:
-+ scst_put_buf(cmd, buffer);
++ scst_put_buf_full(cmd, buffer);
+ break;
+ }
+
@@ -10867,7 +21052,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ dev->dev_reserved = 0;
+ /*
+ * There is no need to send RELEASE, since the device is going
-+ * to be resetted. Actually, since we can be in RESET TM
++ * to be reset. Actually, since we can be in RESET TM
+ * function, it might be dangerous.
+ */
+ }
@@ -10878,14 +21063,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ dev_tgt_dev_list_entry) {
+ struct scst_session *sess = tgt_dev->sess;
+
++#if 0 /* Clearing UAs and last sense isn't required by SAM and it
++ * looks to be better to not clear them to not loose important
++ * events, so let's disable it.
++ */
+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
-+
+ scst_free_all_UA(tgt_dev);
-+
+ memset(tgt_dev->tgt_dev_sense, 0,
+ sizeof(tgt_dev->tgt_dev_sense));
-+
+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
++#endif
+
+ spin_lock_irq(&sess->sess_list_lock);
+
@@ -10929,6 +21116,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ return;
+}
+
++/* Caller must hold tgt_dev->tgt_dev_lock. */
++void scst_tgt_dev_del_free_UA(struct scst_tgt_dev *tgt_dev,
++ struct scst_tgt_dev_UA *ua)
++{
++ list_del(&ua->UA_list_entry);
++ if (list_empty(&tgt_dev->UA_list))
++ clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
++ mempool_free(ua, scst_ua_mempool);
++}
++
+/* No locks, no IRQ or IRQ-disabled context allowed */
+int scst_set_pending_UA(struct scst_cmd *cmd)
+{
@@ -10978,17 +21175,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ /*
+ * cmd won't allow to suspend activities, so we can access
-+ * sess->sess_tgt_dev_list_hash without any additional
++ * sess->sess_tgt_dev_list without any additional
+ * protection.
+ */
+
+ local_bh_disable();
+
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[i];
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ list_for_each_entry(tgt_dev, head,
+ sess_tgt_dev_list_entry) {
+ /* Lockdep triggers here a false positive.. */
+ spin_lock(&tgt_dev->tgt_dev_lock);
@@ -11009,12 +21205,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ list_del(&UA_entry->UA_list_entry);
+
+ if (UA_entry->global_UA) {
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[i];
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
+ struct scst_tgt_dev *tgt_dev;
+
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
++ list_for_each_entry(tgt_dev, head,
+ sess_tgt_dev_list_entry) {
+ struct scst_tgt_dev_UA *ua;
+ list_for_each_entry(ua, &tgt_dev->UA_list,
@@ -11026,8 +21221,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ TRACE_MGMT_DBG("Freeing not "
+ "needed global UA %p",
+ ua);
-+ list_del(&ua->UA_list_entry);
-+ mempool_free(ua, scst_ua_mempool);
++ scst_tgt_dev_del_free_UA(tgt_dev,
++ ua);
+ break;
+ }
+ }
@@ -11044,11 +21239,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+out_unlock:
+ if (global_unlock) {
-+ for (i = TGT_DEV_HASH_SIZE-1; i >= 0; i--) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[i];
++ for (i = SESS_TGT_DEV_LIST_HASH_SIZE-1; i >= 0; i--) {
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry_reverse(tgt_dev, sess_tgt_dev_list_head,
++ list_for_each_entry_reverse(tgt_dev, head,
+ sess_tgt_dev_list_entry) {
+ spin_unlock(&tgt_dev->tgt_dev_lock);
+ }
@@ -11085,7 +21279,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+
+ UA_entry->global_UA = (flags & SCST_SET_UA_FLAG_GLOBAL) != 0;
+ if (UA_entry->global_UA)
-+ TRACE_MGMT_DBG("Queuing global UA %p", UA_entry);
++ TRACE_MGMT_DBG("Queueing global UA %p", UA_entry);
+
+ if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer)) {
+ PRINT_WARNING("Sense truncated (needed %d), shall you increase "
@@ -11175,7 +21369,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+{
+ TRACE_ENTRY();
+
-+ TRACE_MGMT_DBG("Processing UA dev %p", dev);
++ TRACE_MGMT_DBG("Processing UA dev %s", dev->virt_name);
+
+ /* Check for reset UA */
+ if (scst_analyze_sense(sense, sense_len, SCST_SENSE_ASC_VALID,
@@ -11212,25 +21406,25 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+}
+
+/* No locks */
-+struct scst_cmd *__scst_check_deferred_commands(struct scst_tgt_dev *tgt_dev)
++struct scst_cmd *__scst_check_deferred_commands(struct scst_order_data *order_data)
+{
+ struct scst_cmd *res = NULL, *cmd, *t;
-+ typeof(tgt_dev->expected_sn) expected_sn = tgt_dev->expected_sn;
++ typeof(order_data->expected_sn) expected_sn = order_data->expected_sn;
+
-+ spin_lock_irq(&tgt_dev->sn_lock);
++ spin_lock_irq(&order_data->sn_lock);
+
-+ if (unlikely(tgt_dev->hq_cmd_count != 0))
++ if (unlikely(order_data->hq_cmd_count != 0))
+ goto out_unlock;
+
+restart:
-+ list_for_each_entry_safe(cmd, t, &tgt_dev->deferred_cmd_list,
++ list_for_each_entry_safe(cmd, t, &order_data->deferred_cmd_list,
+ sn_cmd_list_entry) {
+ EXTRACHECKS_BUG_ON(cmd->queue_type ==
+ SCST_CMD_QUEUE_HEAD_OF_QUEUE);
+ if (cmd->sn == expected_sn) {
+ TRACE_SN("Deferred command %p (sn %d, set %d) found",
+ cmd, cmd->sn, cmd->sn_set);
-+ tgt_dev->def_cmd_count--;
++ order_data->def_cmd_count--;
+ list_del(&cmd->sn_cmd_list_entry);
+ if (res == NULL)
+ res = cmd;
@@ -11248,7 +21442,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ if (res != NULL)
+ goto out_unlock;
+
-+ list_for_each_entry(cmd, &tgt_dev->skipped_sn_list,
++ list_for_each_entry(cmd, &order_data->skipped_sn_list,
+ sn_cmd_list_entry) {
+ EXTRACHECKS_BUG_ON(cmd->queue_type ==
+ SCST_CMD_QUEUE_HEAD_OF_QUEUE);
@@ -11263,21 +21457,21 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ cmd,
+ (long long unsigned int)cmd->tag,
+ cmd->sn);
-+ tgt_dev->def_cmd_count--;
++ order_data->def_cmd_count--;
+ list_del(&cmd->sn_cmd_list_entry);
-+ spin_unlock_irq(&tgt_dev->sn_lock);
++ spin_unlock_irq(&order_data->sn_lock);
+ if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
+ &cmd->cmd_flags))
+ scst_destroy_put_cmd(cmd);
-+ scst_inc_expected_sn(tgt_dev, slot);
-+ expected_sn = tgt_dev->expected_sn;
-+ spin_lock_irq(&tgt_dev->sn_lock);
++ scst_inc_expected_sn(order_data, slot);
++ expected_sn = order_data->expected_sn;
++ spin_lock_irq(&order_data->sn_lock);
+ goto restart;
+ }
+ }
+
+out_unlock:
-+ spin_unlock_irq(&tgt_dev->sn_lock);
++ spin_unlock_irq(&order_data->sn_lock);
+ return res;
+}
+
@@ -11413,26 +21607,55 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ return res;
+}
+
-+/* dev_lock supposed to be held and BH disabled */
-+void scst_block_dev(struct scst_device *dev)
++static void __scst_unblock_deferred(struct scst_order_data *order_data,
++ struct scst_cmd *out_of_sn_cmd)
+{
-+ dev->block_count++;
-+ TRACE_MGMT_DBG("Device BLOCK(new %d), dev %p", dev->block_count, dev);
++ EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
++
++ if (out_of_sn_cmd->sn == order_data->expected_sn) {
++ scst_inc_expected_sn(order_data, out_of_sn_cmd->sn_slot);
++ scst_make_deferred_commands_active(order_data);
++ } else {
++ out_of_sn_cmd->out_of_sn = 1;
++ spin_lock_irq(&order_data->sn_lock);
++ order_data->def_cmd_count++;
++ list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
++ &order_data->skipped_sn_list);
++ TRACE_SN("out_of_sn_cmd %p with sn %d added to skipped_sn_list"
++ " (expected_sn %d)", out_of_sn_cmd, out_of_sn_cmd->sn,
++ order_data->expected_sn);
++ spin_unlock_irq(&order_data->sn_lock);
++ }
++
++ return;
+}
+
-+/* No locks */
-+void scst_unblock_dev(struct scst_device *dev)
++void scst_unblock_deferred(struct scst_order_data *order_data,
++ struct scst_cmd *out_of_sn_cmd)
+{
-+ spin_lock_bh(&dev->dev_lock);
-+ TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %p",
-+ dev->block_count-1, dev);
-+ if (--dev->block_count == 0)
-+ scst_unblock_cmds(dev);
-+ spin_unlock_bh(&dev->dev_lock);
-+ BUG_ON(dev->block_count < 0);
++ TRACE_ENTRY();
++
++ if (!out_of_sn_cmd->sn_set) {
++ TRACE_SN("cmd %p without sn", out_of_sn_cmd);
++ goto out;
++ }
++
++ __scst_unblock_deferred(order_data, out_of_sn_cmd);
++
++out:
++ TRACE_EXIT();
++ return;
+}
+
-+/* No locks */
++/* dev_lock supposed to be held and BH disabled */
++void scst_block_dev(struct scst_device *dev)
++{
++ dev->block_count++;
++ TRACE_MGMT_DBG("Device BLOCK (new count %d), dev %s", dev->block_count,
++ dev->virt_name);
++}
++
++/* dev_lock supposed to be held and BH disabled */
+bool __scst_check_blocked_dev(struct scst_cmd *cmd)
+{
+ int res = false;
@@ -11450,144 +21673,125 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+ goto out;
+ }
+
-+repeat:
-+ if (dev->block_count > 0) {
-+ spin_lock_bh(&dev->dev_lock);
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
-+ goto out_unlock;
-+ if (dev->block_count > 0) {
-+ TRACE_MGMT_DBG("Delaying cmd %p due to blocking "
-+ "(tag %llu, dev %p)", cmd,
-+ (long long unsigned int)cmd->tag, dev);
-+ list_add_tail(&cmd->blocked_cmd_list_entry,
-+ &dev->blocked_cmd_list);
-+ res = true;
-+ spin_unlock_bh(&dev->dev_lock);
-+ goto out;
-+ } else {
-+ TRACE_MGMT_DBG("%s", "Somebody unblocked the device, "
-+ "continuing");
-+ }
-+ spin_unlock_bh(&dev->dev_lock);
-+ }
++ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
++ goto out;
+
-+ if (dev->dev_double_ua_possible) {
-+ spin_lock_bh(&dev->dev_lock);
-+ if (dev->block_count == 0) {
-+ TRACE_MGMT_DBG("cmd %p (tag %llu), blocking further "
-+ "cmds due to possible double reset UA (dev %p)",
-+ cmd, (long long unsigned int)cmd->tag, dev);
-+ scst_block_dev(dev);
++ if (dev->block_count > 0) {
++ TRACE_MGMT_DBG("Delaying cmd %p due to blocking "
++ "(tag %llu, op %x, dev %s)", cmd,
++ (long long unsigned int)cmd->tag, cmd->cdb[0],
++ dev->virt_name);
++ goto out_block;
++ } else if (scst_is_strictly_serialized_cmd(cmd)) {
++ TRACE_MGMT_DBG("cmd %p (tag %llu, op %x): blocking further "
++ "cmds on dev %s due to strict serialization", cmd,
++ (long long unsigned int)cmd->tag, cmd->cdb[0],
++ dev->virt_name);
++ scst_block_dev(dev);
++ if (dev->on_dev_cmd_count > 1) {
++ TRACE_MGMT_DBG("Delaying strictly serialized cmd %p "
++ "(dev %s, on_dev_cmds to wait %d)", cmd,
++ dev->virt_name, dev->on_dev_cmd_count-1);
++ EXTRACHECKS_BUG_ON(dev->strictly_serialized_cmd_waiting);
++ dev->strictly_serialized_cmd_waiting = 1;
++ goto out_block;
++ } else
+ cmd->unblock_dev = 1;
-+ } else {
-+ spin_unlock_bh(&dev->dev_lock);
-+ TRACE_MGMT_DBG("Somebody blocked the device, "
-+ "repeating (count %d)", dev->block_count);
-+ goto repeat;
-+ }
-+ spin_unlock_bh(&dev->dev_lock);
-+ }
++ } else if ((dev->dev_double_ua_possible) || scst_is_serialized_cmd(cmd)) {
++ TRACE_MGMT_DBG("cmd %p (tag %llu, op %x): blocking further cmds "
++ "on dev %s due to %s", cmd, (long long unsigned int)cmd->tag,
++ cmd->cdb[0], dev->virt_name,
++ dev->dev_double_ua_possible ? "possible double reset UA" :
++ "serialized cmd");
++ scst_block_dev(dev);
++ cmd->unblock_dev = 1;
++ } else
++ TRACE_MGMT_DBG("No blocks for device %s", dev->virt_name);
+
+out:
+ TRACE_EXIT_RES(res);
+ return res;
+
-+out_unlock:
-+ spin_unlock_bh(&dev->dev_lock);
++out_block:
++ if (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE)
++ list_add(&cmd->blocked_cmd_list_entry,
++ &dev->blocked_cmd_list);
++ else
++ list_add_tail(&cmd->blocked_cmd_list_entry,
++ &dev->blocked_cmd_list);
++ res = true;
+ goto out;
+}
+
-+/* Called under dev_lock */
-+static void scst_unblock_cmds(struct scst_device *dev)
++/* dev_lock supposed to be held and BH disabled */
++void scst_unblock_dev(struct scst_device *dev)
+{
-+ struct scst_cmd *cmd, *tcmd;
-+ unsigned long flags;
-+
+ TRACE_ENTRY();
+
-+ local_irq_save(flags);
-+ list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
-+ blocked_cmd_list_entry) {
-+ list_del(&cmd->blocked_cmd_list_entry);
-+ TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd list", cmd);
-+ spin_lock(&cmd->cmd_threads->cmd_list_lock);
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ list_add(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ else
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
-+ }
-+ local_irq_restore(flags);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void __scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
-+ struct scst_cmd *out_of_sn_cmd)
-+{
-+ EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
++ TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %s",
++ dev->block_count-1, dev->virt_name);
+
-+ if (out_of_sn_cmd->sn == tgt_dev->expected_sn) {
-+ scst_inc_expected_sn(tgt_dev, out_of_sn_cmd->sn_slot);
-+ scst_make_deferred_commands_active(tgt_dev);
-+ } else {
-+ out_of_sn_cmd->out_of_sn = 1;
-+ spin_lock_irq(&tgt_dev->sn_lock);
-+ tgt_dev->def_cmd_count++;
-+ list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
-+ &tgt_dev->skipped_sn_list);
-+ TRACE_SN("out_of_sn_cmd %p with sn %d added to skipped_sn_list"
-+ " (expected_sn %d)", out_of_sn_cmd, out_of_sn_cmd->sn,
-+ tgt_dev->expected_sn);
-+ spin_unlock_irq(&tgt_dev->sn_lock);
-+ }
++#ifdef CONFIG_SMP
++ EXTRACHECKS_BUG_ON(!spin_is_locked(&dev->dev_lock));
++#endif
+
-+ return;
-+}
++ if (--dev->block_count == 0) {
++ struct scst_cmd *cmd, *tcmd;
++ unsigned long flags;
+
-+void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
-+ struct scst_cmd *out_of_sn_cmd)
-+{
-+ TRACE_ENTRY();
++ local_irq_save(flags);
++ list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
++ blocked_cmd_list_entry) {
++ bool strictly_serialized;
++ list_del(&cmd->blocked_cmd_list_entry);
++ TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd "
++ "list", cmd);
++ spin_lock(&cmd->cmd_threads->cmd_list_lock);
++ if (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE)
++ list_add(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ else
++ list_add_tail(&cmd->cmd_list_entry,
++ &cmd->cmd_threads->active_cmd_list);
++ strictly_serialized = scst_is_strictly_serialized_cmd(cmd);
++ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
++ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
++ if (dev->strictly_serialized_cmd_waiting && strictly_serialized)
++ break;
++ }
++ local_irq_restore(flags);
+
-+ if (!out_of_sn_cmd->sn_set) {
-+ TRACE_SN("cmd %p without sn", out_of_sn_cmd);
-+ goto out;
++ dev->strictly_serialized_cmd_waiting = 0;
+ }
+
-+ __scst_unblock_deferred(tgt_dev, out_of_sn_cmd);
++ BUG_ON(dev->block_count < 0);
+
-+out:
+ TRACE_EXIT();
+ return;
+}
+
+void scst_on_hq_cmd_response(struct scst_cmd *cmd)
+{
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
++ struct scst_order_data *order_data = cmd->cur_order_data;
+
+ TRACE_ENTRY();
+
+ if (!cmd->hq_cmd_inced)
+ goto out;
+
-+ spin_lock_irq(&tgt_dev->sn_lock);
-+ tgt_dev->hq_cmd_count--;
-+ spin_unlock_irq(&tgt_dev->sn_lock);
++ spin_lock_irq(&order_data->sn_lock);
++ order_data->hq_cmd_count--;
++ spin_unlock_irq(&order_data->sn_lock);
+
-+ EXTRACHECKS_BUG_ON(tgt_dev->hq_cmd_count < 0);
++ EXTRACHECKS_BUG_ON(order_data->hq_cmd_count < 0);
+
+ /*
+ * There is no problem in checking hq_cmd_count in the
+ * non-locked state. In the worst case we will only have
+ * unneeded run of the deferred commands.
+ */
-+ if (tgt_dev->hq_cmd_count == 0)
-+ scst_make_deferred_commands_active(tgt_dev);
++ if (order_data->hq_cmd_count == 0)
++ scst_make_deferred_commands_active(order_data);
+
+out:
+ TRACE_EXIT();
@@ -11629,9 +21833,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+{
+ TRACE_ENTRY();
+
-+ TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d, "
-+ "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
-+ atomic_read(&scst_cmd_count));
++ TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d)", cmd,
++ atomic_read(&cmd->cmd_ref));
+
+ scst_done_cmd_mgmt(cmd);
+
@@ -12517,17 +22720,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_lib.c linux-2.6.36/drivers/scst/s
+}
+
+#endif /* CONFIG_SCST_MEASURE_LATENCY */
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_main.c linux-2.6.36/drivers/scst/scst_main.c
---- orig/linux-2.6.36/drivers/scst/scst_main.c
-+++ linux-2.6.36/drivers/scst/scst_main.c
-@@ -0,0 +1,2198 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_pres.h linux-2.6.39/drivers/scst/scst_pres.h
+--- orig/linux-2.6.39/drivers/scst/scst_pres.h
++++ linux-2.6.39/drivers/scst/scst_pres.h
+@@ -0,0 +1,234 @@
+/*
-+ * scst_main.c
++ * scst_pres.c
+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
++ * Copyright (C) 2009 - 2010 Alexey Obitotskiy <alexeyo1@open-e.com>
++ * Copyright (C) 2009 - 2010 Open-E, Inc.
++ * Copyright (C) 2009 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -12540,2263 +22742,226 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_main.c linux-2.6.36/drivers/scst/
+ * GNU General Public License for more details.
+ */
+
-+#include <linux/module.h>
++#ifndef SCST_PRES_H_
++#define SCST_PRES_H_
+
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/unistd.h>
-+#include <linux/string.h>
-+#include <linux/kthread.h>
+#include <linux/delay.h>
+
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+#include "scst_mem.h"
-+#include "scst_pres.h"
-+
-+#if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
-+#warning "HIGHMEM kernel configurations are fully supported, but not\
-+ recommended for performance reasons. Consider changing VMSPLIT\
-+ option or use a 64-bit configuration instead. See README file for\
-+ details."
-+#endif
-+
-+/**
-+ ** SCST global variables. They are all uninitialized to have their layout in
-+ ** memory be exactly as specified. Otherwise compiler puts zero-initialized
-+ ** variable separately from nonzero-initialized ones.
-+ **/
-+
-+/*
-+ * Main SCST mutex. All targets, devices and dev_types management is done
-+ * under this mutex.
-+ *
-+ * It must NOT be used in any works (schedule_work(), etc.), because
-+ * otherwise a deadlock (double lock, actually) is possible, e.g., with
-+ * scst_user detach_tgt(), which is called under scst_mutex and calls
-+ * flush_scheduled_work().
-+ */
-+struct mutex scst_mutex;
-+EXPORT_SYMBOL_GPL(scst_mutex);
-+
-+/*
-+ * Secondary level main mutex, inner for scst_mutex. Needed for
-+ * __scst_pr_register_all_tg_pt(), since we can't use scst_mutex there,
-+ * because of the circular locking dependency with dev_pr_mutex.
-+ */
-+struct mutex scst_mutex2;
-+
-+/* Both protected by scst_mutex or scst_mutex2 on read and both on write */
-+struct list_head scst_template_list;
-+struct list_head scst_dev_list;
-+
-+/* Protected by scst_mutex */
-+struct list_head scst_dev_type_list;
-+struct list_head scst_virtual_dev_type_list;
-+
-+spinlock_t scst_main_lock;
-+
-+static struct kmem_cache *scst_mgmt_cachep;
-+mempool_t *scst_mgmt_mempool;
-+static struct kmem_cache *scst_mgmt_stub_cachep;
-+mempool_t *scst_mgmt_stub_mempool;
-+static struct kmem_cache *scst_ua_cachep;
-+mempool_t *scst_ua_mempool;
-+static struct kmem_cache *scst_sense_cachep;
-+mempool_t *scst_sense_mempool;
-+static struct kmem_cache *scst_aen_cachep;
-+mempool_t *scst_aen_mempool;
-+struct kmem_cache *scst_tgtd_cachep;
-+struct kmem_cache *scst_sess_cachep;
-+struct kmem_cache *scst_acgd_cachep;
-+
-+unsigned int scst_setup_id;
-+
-+spinlock_t scst_init_lock;
-+wait_queue_head_t scst_init_cmd_list_waitQ;
-+struct list_head scst_init_cmd_list;
-+unsigned int scst_init_poll_cnt;
-+
-+struct kmem_cache *scst_cmd_cachep;
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+unsigned long scst_trace_flag;
-+#endif
-+
-+int scst_max_tasklet_cmd = SCST_DEF_MAX_TASKLET_CMD;
-+
-+unsigned long scst_flags;
-+atomic_t scst_cmd_count;
-+
-+struct scst_cmd_threads scst_main_cmd_threads;
-+
-+struct scst_tasklet scst_tasklets[NR_CPUS];
-+
-+spinlock_t scst_mcmd_lock;
-+struct list_head scst_active_mgmt_cmd_list;
-+struct list_head scst_delayed_mgmt_cmd_list;
-+wait_queue_head_t scst_mgmt_cmd_list_waitQ;
-+
-+wait_queue_head_t scst_mgmt_waitQ;
-+spinlock_t scst_mgmt_lock;
-+struct list_head scst_sess_init_list;
-+struct list_head scst_sess_shut_list;
-+
-+wait_queue_head_t scst_dev_cmd_waitQ;
-+
-+static struct mutex scst_suspend_mutex;
-+/* protected by scst_suspend_mutex */
-+static struct list_head scst_cmd_threads_list;
-+
-+int scst_threads;
-+static struct task_struct *scst_init_cmd_thread;
-+static struct task_struct *scst_mgmt_thread;
-+static struct task_struct *scst_mgmt_cmd_thread;
-+
-+static int suspend_count;
-+
-+static int scst_virt_dev_last_id; /* protected by scst_mutex */
-+
-+static unsigned int scst_max_cmd_mem;
-+unsigned int scst_max_dev_cmd_mem;
-+
-+module_param_named(scst_threads, scst_threads, int, 0);
-+MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
-+
-+module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, S_IRUGO);
-+MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
-+ "all SCSI commands of all devices at any given time in MB");
-+
-+module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, S_IRUGO);
-+MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
-+ "by all SCSI commands of a device at any given time in MB");
-+
-+struct scst_dev_type scst_null_devtype = {
-+ .name = "none",
-+ .threads_num = -1,
-+};
-+
-+static void __scst_resume_activity(void);
-+
-+/**
-+ * __scst_register_target_template() - register target template.
-+ * @vtt: target template
-+ * @version: SCST_INTERFACE_VERSION version string to ensure that
-+ * SCST core and the target driver use the same version of
-+ * the SCST interface
-+ *
-+ * Description:
-+ * Registers a target template and returns 0 on success or appropriate
-+ * error code otherwise.
-+ *
-+ * Target drivers supposed to behave sanely and not call register()
-+ * and unregister() randomly sinultaneously.
-+ */
-+int __scst_register_target_template(struct scst_tgt_template *vtt,
-+ const char *version)
-+{
-+ int res = 0;
-+ struct scst_tgt_template *t;
-+
-+ TRACE_ENTRY();
-+
-+ INIT_LIST_HEAD(&vtt->tgt_list);
-+
-+ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
-+ PRINT_ERROR("Incorrect version of target %s", vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (!vtt->detect) {
-+ PRINT_ERROR("Target driver %s must have "
-+ "detect() method.", vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (!vtt->release) {
-+ PRINT_ERROR("Target driver %s must have "
-+ "release() method.", vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (!vtt->xmit_response) {
-+ PRINT_ERROR("Target driver %s must have "
-+ "xmit_response() method.", vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (vtt->get_initiator_port_transport_id == NULL)
-+ PRINT_WARNING("Target driver %s doesn't support Persistent "
-+ "Reservations", vtt->name);
-+
-+ if (vtt->threads_num < 0) {
-+ PRINT_ERROR("Wrong threads_num value %d for "
-+ "target \"%s\"", vtt->threads_num,
-+ vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if ((!vtt->enable_target || !vtt->is_target_enabled) &&
-+ !vtt->enabled_attr_not_needed)
-+ PRINT_WARNING("Target driver %s doesn't have enable_target() "
-+ "and/or is_target_enabled() method(s). This is unsafe "
-+ "and can lead that initiators connected on the "
-+ "initialization time can see an unexpected set of "
-+ "devices or no devices at all!", vtt->name);
-+
-+ if (((vtt->add_target != NULL) && (vtt->del_target == NULL)) ||
-+ ((vtt->add_target == NULL) && (vtt->del_target != NULL))) {
-+ PRINT_ERROR("Target driver %s must either define both "
-+ "add_target() and del_target(), or none.", vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (vtt->rdy_to_xfer == NULL)
-+ vtt->rdy_to_xfer_atomic = 1;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0)
-+ goto out;
-+ list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
-+ if (strcmp(t->name, vtt->name) == 0) {
-+ PRINT_ERROR("Target driver %s already registered",
-+ vtt->name);
-+ mutex_unlock(&scst_mutex);
-+ goto out_unlock;
-+ }
-+ }
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_tgtt_sysfs_create(vtt);
-+ if (res)
-+ goto out;
-+
-+ mutex_lock(&scst_mutex);
-+ mutex_lock(&scst_mutex2);
-+ list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
-+ mutex_unlock(&scst_mutex2);
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_DBG("%s", "Calling target driver's detect()");
-+ res = vtt->detect(vtt);
-+ TRACE_DBG("Target driver's detect() returned %d", res);
-+ if (res < 0) {
-+ PRINT_ERROR("%s", "The detect() routine failed");
-+ res = -EINVAL;
-+ goto out_del;
-+ }
-+
-+ PRINT_INFO("Target template %s registered successfully", vtt->name);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ scst_tgtt_sysfs_del(vtt);
-+
-+ mutex_lock(&scst_mutex);
-+
-+ mutex_lock(&scst_mutex2);
-+ list_del(&vtt->scst_template_list_entry);
-+ mutex_unlock(&scst_mutex2);
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(__scst_register_target_template);
-+
-+static int scst_check_non_gpl_target_template(struct scst_tgt_template *vtt)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (vtt->task_mgmt_affected_cmds_done || vtt->threads_num ||
-+ vtt->on_hw_pending_cmd_timeout) {
-+ PRINT_ERROR("Not allowed functionality in non-GPL version for "
-+ "target template %s", vtt->name);
-+ res = -EPERM;
-+ goto out;
-+ }
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * __scst_register_target_template_non_gpl() - register target template,
-+ * non-GPL version
-+ * @vtt: target template
-+ * @version: SCST_INTERFACE_VERSION version string to ensure that
-+ * SCST core and the target driver use the same version of
-+ * the SCST interface
-+ *
-+ * Description:
-+ * Registers a target template and returns 0 on success or appropriate
-+ * error code otherwise.
-+ *
-+ * Note: *vtt must be static!
-+ */
-+int __scst_register_target_template_non_gpl(struct scst_tgt_template *vtt,
-+ const char *version)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_check_non_gpl_target_template(vtt);
-+ if (res != 0)
-+ goto out;
-+
-+ res = __scst_register_target_template(vtt, version);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(__scst_register_target_template_non_gpl);
-+
-+/**
-+ * scst_unregister_target_template() - unregister target template
-+ *
-+ * Target drivers supposed to behave sanely and not call register()
-+ * and unregister() randomly sinultaneously. Also it is supposed that
-+ * no attepts to create new targets for this vtt will be done in a race
-+ * with this function.
-+ */
-+void scst_unregister_target_template(struct scst_tgt_template *vtt)
-+{
-+ struct scst_tgt *tgt;
-+ struct scst_tgt_template *t;
-+ int found = 0;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
-+ if (strcmp(t->name, vtt->name) == 0) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ if (!found) {
-+ PRINT_ERROR("Target driver %s isn't registered", vtt->name);
-+ goto out_err_up;
-+ }
-+
-+ mutex_lock(&scst_mutex2);
-+ list_del(&vtt->scst_template_list_entry);
-+ mutex_unlock(&scst_mutex2);
-+
-+ /* Wait for outstanding sysfs mgmt calls completed */
-+ while (vtt->tgtt_active_sysfs_works_count > 0) {
-+ mutex_unlock(&scst_mutex);
-+ msleep(100);
-+ mutex_lock(&scst_mutex);
-+ }
-+
-+restart:
-+ list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
-+ mutex_unlock(&scst_mutex);
-+ scst_unregister_target(tgt);
-+ mutex_lock(&scst_mutex);
-+ goto restart;
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_tgtt_sysfs_del(vtt);
-+
-+ PRINT_INFO("Target template %s unregistered successfully", vtt->name);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_err_up:
-+ mutex_unlock(&scst_mutex);
-+ goto out;
-+}
-+EXPORT_SYMBOL(scst_unregister_target_template);
-+
-+/**
-+ * scst_register_target() - register target
-+ *
-+ * Registers a target for template vtt and returns new target structure on
-+ * success or NULL otherwise.
-+ */
-+struct scst_tgt *scst_register_target(struct scst_tgt_template *vtt,
-+ const char *target_name)
-+{
-+ struct scst_tgt *tgt;
-+ int rc = 0;
-+
-+ TRACE_ENTRY();
-+
-+ rc = scst_alloc_tgt(vtt, &tgt);
-+ if (rc != 0)
-+ goto out;
-+
-+ if (target_name != NULL) {
-+
-+ tgt->tgt_name = kmalloc(strlen(target_name) + 1, GFP_KERNEL);
-+ if (tgt->tgt_name == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "Allocation of tgt name %s failed",
-+ target_name);
-+ rc = -ENOMEM;
-+ goto out_free_tgt;
-+ }
-+ strcpy(tgt->tgt_name, target_name);
-+ } else {
-+ static int tgt_num; /* protected by scst_mutex */
-+ int len = strlen(vtt->name) +
-+ strlen(SCST_DEFAULT_TGT_NAME_SUFFIX) + 11 + 1;
-+
-+ tgt->tgt_name = kmalloc(len, GFP_KERNEL);
-+ if (tgt->tgt_name == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "Allocation of tgt name failed "
-+ "(template name %s)", vtt->name);
-+ rc = -ENOMEM;
-+ goto out_free_tgt;
-+ }
-+ sprintf(tgt->tgt_name, "%s%s%d", vtt->name,
-+ SCST_DEFAULT_TGT_NAME_SUFFIX, tgt_num++);
-+ }
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ rc = -EINTR;
-+ goto out_free_tgt;
-+ }
-+
-+ rc = scst_tgt_sysfs_create(tgt);
-+ if (rc < 0)
-+ goto out_unlock;
-+
-+ tgt->default_acg = scst_alloc_add_acg(tgt, tgt->tgt_name, false);
-+ if (tgt->default_acg == NULL)
-+ goto out_sysfs_del;
-+
-+ mutex_lock(&scst_mutex2);
-+ list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
-+ mutex_unlock(&scst_mutex2);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ PRINT_INFO("Target %s for template %s registered successfully",
-+ tgt->tgt_name, vtt->name);
-+
-+ TRACE_DBG("tgt %p", tgt);
-+
-+out:
-+ TRACE_EXIT();
-+ return tgt;
-+
-+out_sysfs_del:
-+ mutex_unlock(&scst_mutex);
-+ scst_tgt_sysfs_del(tgt);
-+ goto out_free_tgt;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_free_tgt:
-+ /* In case of error tgt_name will be freed in scst_free_tgt() */
-+ scst_free_tgt(tgt);
-+ tgt = NULL;
-+ goto out;
-+}
-+EXPORT_SYMBOL(scst_register_target);
-+
-+static inline int test_sess_list(struct scst_tgt *tgt)
-+{
-+ int res;
-+ mutex_lock(&scst_mutex);
-+ res = list_empty(&tgt->sess_list);
-+ mutex_unlock(&scst_mutex);
-+ return res;
-+}
-+
-+/**
-+ * scst_unregister_target() - unregister target.
-+ *
-+ * It is supposed that no attepts to create new sessions for this
-+ * target will be done in a race with this function.
-+ */
-+void scst_unregister_target(struct scst_tgt *tgt)
-+{
-+ struct scst_session *sess;
-+ struct scst_tgt_template *vtt = tgt->tgtt;
-+ struct scst_acg *acg, *acg_tmp;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("%s", "Calling target driver's release()");
-+ tgt->tgtt->release(tgt);
-+ TRACE_DBG("%s", "Target driver's release() returned");
-+
-+ mutex_lock(&scst_mutex);
-+again:
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ if (sess->shut_phase == SCST_SESS_SPH_READY) {
-+ /*
-+ * Sometimes it's hard for target driver to track all
-+ * its sessions (see scst_local, eg), so let's help it.
-+ */
-+ mutex_unlock(&scst_mutex);
-+ scst_unregister_session(sess, 0, NULL);
-+ mutex_lock(&scst_mutex);
-+ goto again;
-+ }
-+ }
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_DBG("%s", "Waiting for sessions shutdown");
-+ wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
-+ TRACE_DBG("%s", "wait_event() returned");
-+
-+ scst_suspend_activity(false);
-+ mutex_lock(&scst_mutex);
-+
-+ mutex_lock(&scst_mutex2);
-+ list_del(&tgt->tgt_list_entry);
-+ mutex_unlock(&scst_mutex2);
-+
-+ del_timer_sync(&tgt->retry_timer);
-+
-+ scst_del_free_acg(tgt->default_acg);
-+
-+ list_for_each_entry_safe(acg, acg_tmp, &tgt->tgt_acg_list,
-+ acg_list_entry) {
-+ scst_del_free_acg(acg);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+
-+ scst_tgt_sysfs_del(tgt);
-+
-+ PRINT_INFO("Target %s for template %s unregistered successfully",
-+ tgt->tgt_name, vtt->name);
-+
-+ scst_free_tgt(tgt);
-+
-+ TRACE_DBG("Unregistering tgt %p finished", tgt);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_unregister_target);
-+
-+static int scst_susp_wait(bool interruptible)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (interruptible) {
-+ res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
-+ (atomic_read(&scst_cmd_count) == 0),
-+ SCST_SUSPENDING_TIMEOUT);
-+ if (res <= 0) {
-+ __scst_resume_activity();
-+ if (res == 0)
-+ res = -EBUSY;
-+ } else
-+ res = 0;
-+ } else
-+ wait_event(scst_dev_cmd_waitQ,
-+ atomic_read(&scst_cmd_count) == 0);
-+
-+ TRACE_MGMT_DBG("wait_event() returned %d", res);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_suspend_activity() - globally suspend any activity
-+ *
-+ * Description:
-+ * Globally suspends any activity and doesn't return, until there are any
-+ * active commands (state after SCST_CMD_STATE_INIT). If "interruptible"
-+ * is true, it returns after SCST_SUSPENDING_TIMEOUT or if it was interrupted
-+ * by a signal with the corresponding error status < 0. If "interruptible"
-+ * is false, it will wait virtually forever. On success returns 0.
-+ *
-+ * New arriving commands stay in the suspended state until
-+ * scst_resume_activity() is called.
-+ */
-+int scst_suspend_activity(bool interruptible)
-+{
-+ int res = 0;
-+ bool rep = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (interruptible) {
-+ if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+ } else
-+ mutex_lock(&scst_suspend_mutex);
-+
-+ TRACE_MGMT_DBG("suspend_count %d", suspend_count);
-+ suspend_count++;
-+ if (suspend_count > 1)
-+ goto out_up;
-+
-+ set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
-+ set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
-+ /*
-+ * Assignment of SCST_FLAG_SUSPENDING and SCST_FLAG_SUSPENDED must be
-+ * ordered with scst_cmd_count. Otherwise lockless logic in
-+ * scst_translate_lun() and scst_mgmt_translate_lun() won't work.
-+ */
-+ smp_mb__after_set_bit();
-+
-+ /*
-+ * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
-+ * information about scst_user behavior.
-+ *
-+ * ToDo: make the global suspending unneeded (switch to per-device
-+ * reference counting? That would mean to switch off from lockless
-+ * implementation of scst_translate_lun().. )
-+ */
-+
-+ if (atomic_read(&scst_cmd_count) != 0) {
-+ PRINT_INFO("Waiting for %d active commands to complete... This "
-+ "might take few minutes for disks or few hours for "
-+ "tapes, if you use long executed commands, like "
-+ "REWIND or FORMAT. In case, if you have a hung user "
-+ "space device (i.e. made using scst_user module) not "
-+ "responding to any commands, if might take virtually "
-+ "forever until the corresponding user space "
-+ "program recovers and starts responding or gets "
-+ "killed.", atomic_read(&scst_cmd_count));
-+ rep = true;
-+ }
-+
-+ res = scst_susp_wait(interruptible);
-+ if (res != 0)
-+ goto out_clear;
-+
-+ clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
-+ /* See comment about smp_mb() above */
-+ smp_mb__after_clear_bit();
-+
-+ TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
-+ atomic_read(&scst_cmd_count));
-+
-+ res = scst_susp_wait(interruptible);
-+ if (res != 0)
-+ goto out_clear;
-+
-+ if (rep)
-+ PRINT_INFO("%s", "All active commands completed");
++#define PR_REGISTER 0x00
++#define PR_RESERVE 0x01
++#define PR_RELEASE 0x02
++#define PR_CLEAR 0x03
++#define PR_PREEMPT 0x04
++#define PR_PREEMPT_AND_ABORT 0x05
++#define PR_REGISTER_AND_IGNORE 0x06
++#define PR_REGISTER_AND_MOVE 0x07
+
-+out_up:
-+ mutex_unlock(&scst_suspend_mutex);
++#define PR_READ_KEYS 0x00
++#define PR_READ_RESERVATION 0x01
++#define PR_REPORT_CAPS 0x02
++#define PR_READ_FULL_STATUS 0x03
+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
++#define TYPE_UNSPECIFIED (-1)
++#define TYPE_WRITE_EXCLUSIVE 0x01
++#define TYPE_EXCLUSIVE_ACCESS 0x03
++#define TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
++#define TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
++#define TYPE_WRITE_EXCLUSIVE_ALL_REG 0x07
++#define TYPE_EXCLUSIVE_ACCESS_ALL_REG 0x08
+
-+out_clear:
-+ clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
-+ /* See comment about smp_mb() above */
-+ smp_mb__after_clear_bit();
-+ goto out_up;
-+}
-+EXPORT_SYMBOL_GPL(scst_suspend_activity);
++#define SCOPE_LU 0x00
+
-+static void __scst_resume_activity(void)
++static inline void scst_inc_pr_readers_count(struct scst_cmd *cmd,
++ bool locked)
+{
-+ struct scst_cmd_threads *l;
-+
-+ TRACE_ENTRY();
-+
-+ suspend_count--;
-+ TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
-+ if (suspend_count > 0)
-+ goto out;
-+
-+ clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
-+ /*
-+ * The barrier is needed to make sure all woken up threads see the
-+ * cleared flag. Not sure if it's really needed, but let's be safe.
-+ */
-+ smp_mb__after_clear_bit();
-+
-+ list_for_each_entry(l, &scst_cmd_threads_list, lists_list_entry) {
-+ wake_up_all(&l->cmd_list_waitQ);
-+ }
-+ wake_up_all(&scst_init_cmd_list_waitQ);
++ struct scst_device *dev = cmd->dev;
+
-+ spin_lock_irq(&scst_mcmd_lock);
-+ if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
-+ struct scst_mgmt_cmd *m;
-+ m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
-+ mgmt_cmd_list_entry);
-+ TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
-+ "mgmt cmd list", m);
-+ list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
-+ }
-+ spin_unlock_irq(&scst_mcmd_lock);
-+ wake_up_all(&scst_mgmt_cmd_list_waitQ);
++ EXTRACHECKS_BUG_ON(cmd->dec_pr_readers_count_needed);
+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
++ if (!locked)
++ spin_lock_bh(&dev->dev_lock);
+
-+/**
-+ * scst_resume_activity() - globally resume all activities
-+ *
-+ * Resumes suspended by scst_suspend_activity() activities.
-+ */
-+void scst_resume_activity(void)
-+{
-+ TRACE_ENTRY();
++#ifdef CONFIG_SMP
++ EXTRACHECKS_BUG_ON(!spin_is_locked(&dev->dev_lock));
++#endif
+
-+ mutex_lock(&scst_suspend_mutex);
-+ __scst_resume_activity();
-+ mutex_unlock(&scst_suspend_mutex);
++ dev->pr_readers_count++;
++ cmd->dec_pr_readers_count_needed = 1;
++ TRACE_DBG("New inc pr_readers_count %d (cmd %p)", dev->pr_readers_count,
++ cmd);
+
-+ TRACE_EXIT();
++ if (!locked)
++ spin_unlock_bh(&dev->dev_lock);
+ return;
+}
-+EXPORT_SYMBOL_GPL(scst_resume_activity);
+
-+static int scst_register_device(struct scsi_device *scsidp)
++static inline void scst_dec_pr_readers_count(struct scst_cmd *cmd,
++ bool locked)
+{
-+ int res = 0;
-+ struct scst_device *dev, *d;
-+
-+ TRACE_ENTRY();
++ struct scst_device *dev = cmd->dev;
+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
++ if (unlikely(!cmd->dec_pr_readers_count_needed)) {
++ PRINT_ERROR("scst_check_local_events() should not be called "
++ "twice (cmd %p, op %x)! Use "
++ "scst_pre_check_local_events() instead.", cmd,
++ cmd->cdb[0]);
++ WARN_ON(1);
+ goto out;
+ }
+
-+ res = scst_alloc_device(GFP_KERNEL, &dev);
-+ if (res != 0)
-+ goto out_unlock;
-+
-+ dev->type = scsidp->type;
-+
-+ dev->virt_name = kmalloc(50, GFP_KERNEL);
-+ if (dev->virt_name == NULL) {
-+ PRINT_ERROR("%s", "Unable to alloc device name");
-+ res = -ENOMEM;
-+ goto out_free_dev;
-+ }
-+ snprintf(dev->virt_name, 50, "%d:%d:%d:%d", scsidp->host->host_no,
-+ scsidp->channel, scsidp->id, scsidp->lun);
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (strcmp(d->virt_name, dev->virt_name) == 0) {
-+ PRINT_ERROR("Device %s already exists", dev->virt_name);
-+ res = -EEXIST;
-+ goto out_free_dev;
-+ }
-+ }
-+
-+ dev->scsi_dev = scsidp;
-+
-+ list_add_tail(&dev->dev_list_entry, &scst_dev_list);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_dev_sysfs_create(dev);
-+ if (res != 0)
-+ goto out_del;
-+
-+ PRINT_INFO("Attached to scsi%d, channel %d, id %d, lun %d, "
-+ "type %d", scsidp->host->host_no, scsidp->channel,
-+ scsidp->id, scsidp->lun, scsidp->type);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ list_del(&dev->dev_list_entry);
-+
-+out_free_dev:
-+ scst_free_device(dev);
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ goto out;
-+}
-+
-+static void scst_unregister_device(struct scsi_device *scsidp)
-+{
-+ struct scst_device *d, *dev = NULL;
-+ struct scst_acg_dev *acg_dev, *aa;
-+
-+ TRACE_ENTRY();
-+
-+ scst_suspend_activity(false);
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (d->scsi_dev == scsidp) {
-+ dev = d;
-+ TRACE_DBG("Device %p found", dev);
-+ break;
-+ }
-+ }
-+ if (dev == NULL) {
-+ PRINT_ERROR("SCST device for SCSI device %d:%d:%d:%d not found",
-+ scsidp->host->host_no, scsidp->channel, scsidp->id,
-+ scsidp->lun);
-+ goto out_unlock;
-+ }
-+
-+ list_del(&dev->dev_list_entry);
-+
-+ scst_assign_dev_handler(dev, &scst_null_devtype);
-+
-+ list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
-+ dev_acg_dev_list_entry) {
-+ scst_acg_del_lun(acg_dev->acg, acg_dev->lun, true);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_resume_activity();
++ if (!locked)
++ spin_lock_bh(&dev->dev_lock);
+
-+ scst_dev_sysfs_del(dev);
++#ifdef CONFIG_SMP
++ EXTRACHECKS_BUG_ON(!spin_is_locked(&dev->dev_lock));
++#endif
+
-+ PRINT_INFO("Detached from scsi%d, channel %d, id %d, lun %d, type %d",
-+ scsidp->host->host_no, scsidp->channel, scsidp->id,
-+ scsidp->lun, scsidp->type);
++ dev->pr_readers_count--;
++ cmd->dec_pr_readers_count_needed = 0;
++ TRACE_DBG("New dec pr_readers_count %d (cmd %p)", dev->pr_readers_count,
++ cmd);
+
-+ scst_free_device(dev);
++ if (!locked)
++ spin_unlock_bh(&dev->dev_lock);
+
+out:
-+ TRACE_EXIT();
++ EXTRACHECKS_BUG_ON(dev->pr_readers_count < 0);
+ return;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+ goto out;
-+}
-+
-+static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
-+{
-+ int res = 0;
-+
-+ if (dev_handler->parse == NULL) {
-+ PRINT_ERROR("scst dev handler %s must have "
-+ "parse() method.", dev_handler->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (((dev_handler->add_device != NULL) &&
-+ (dev_handler->del_device == NULL)) ||
-+ ((dev_handler->add_device == NULL) &&
-+ (dev_handler->del_device != NULL))) {
-+ PRINT_ERROR("Dev handler %s must either define both "
-+ "add_device() and del_device(), or none.",
-+ dev_handler->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (dev_handler->alloc_data_buf == NULL)
-+ dev_handler->alloc_data_buf_atomic = 1;
-+
-+ if (dev_handler->dev_done == NULL)
-+ dev_handler->dev_done_atomic = 1;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_check_device_name(const char *dev_name)
-+{
-+ int res = 0;
-+
-+ if (strchr(dev_name, '/') != NULL) {
-+ PRINT_ERROR("Dev name %s contains illegal character '/'",
-+ dev_name);
-+ res = -EINVAL;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
+}
+
-+/**
-+ * scst_register_virtual_device() - register a virtual device.
-+ * @dev_handler: the device's device handler
-+ * @dev_name: the new device name, NULL-terminated string. Must be uniq
-+ * among all virtual devices in the system.
-+ *
-+ * Registers a virtual device and returns assinged to the device ID on
-+ * success, or negative value otherwise
-+ */
-+int scst_register_virtual_device(struct scst_dev_type *dev_handler,
-+ const char *dev_name)
++static inline void scst_reset_requeued_cmd(struct scst_cmd *cmd)
+{
-+ int res, rc;
-+ struct scst_device *dev, *d;
-+ bool sysfs_del = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev_handler == NULL) {
-+ PRINT_ERROR("%s: valid device handler must be supplied",
-+ __func__);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (dev_name == NULL) {
-+ PRINT_ERROR("%s: device name must be non-NULL", __func__);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_check_device_name(dev_name);
-+ if (res != 0)
-+ goto out;
-+
-+ res = scst_dev_handler_check(dev_handler);
-+ if (res != 0)
-+ goto out;
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_resume;
-+ }
-+
-+ res = scst_alloc_device(GFP_KERNEL, &dev);
-+ if (res != 0)
-+ goto out_unlock;
-+
-+ dev->type = dev_handler->type;
-+ dev->scsi_dev = NULL;
-+ dev->virt_name = kstrdup(dev_name, GFP_KERNEL);
-+ if (dev->virt_name == NULL) {
-+ PRINT_ERROR("Unable to allocate virt_name for dev %s",
-+ dev_name);
-+ res = -ENOMEM;
-+ goto out_free_dev;
-+ }
-+
-+ while (1) {
-+ dev->virt_id = scst_virt_dev_last_id++;
-+ if (dev->virt_id > 0)
-+ break;
-+ scst_virt_dev_last_id = 1;
-+ }
-+
-+ res = dev->virt_id;
-+
-+ rc = scst_pr_init_dev(dev);
-+ if (rc != 0) {
-+ res = rc;
-+ goto out_free_dev;
-+ }
-+
-+ /*
-+ * We can drop scst_mutex, because we have not yet added the dev in
-+ * scst_dev_list, so it "doesn't exist" yet.
-+ */
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_dev_sysfs_create(dev);
-+ if (res != 0)
-+ goto out_lock_pr_clear_dev;
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (strcmp(d->virt_name, dev_name) == 0) {
-+ PRINT_ERROR("Device %s already exists", dev_name);
-+ res = -EEXIST;
-+ sysfs_del = true;
-+ goto out_pr_clear_dev;
-+ }
-+ }
-+
-+ rc = scst_assign_dev_handler(dev, dev_handler);
-+ if (rc != 0) {
-+ res = rc;
-+ sysfs_del = true;
-+ goto out_pr_clear_dev;
-+ }
-+
-+ list_add_tail(&dev->dev_list_entry, &scst_dev_list);
-+
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+
-+ res = dev->virt_id;
-+
-+ PRINT_INFO("Attached to virtual device %s (id %d)",
-+ dev_name, res);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_lock_pr_clear_dev:
-+ mutex_lock(&scst_mutex);
-+
-+out_pr_clear_dev:
-+ scst_pr_clear_dev(dev);
-+
-+out_free_dev:
-+ mutex_unlock(&scst_mutex);
-+ if (sysfs_del)
-+ scst_dev_sysfs_del(dev);
-+ scst_free_device(dev);
-+ goto out_resume;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_resume:
-+ scst_resume_activity();
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(scst_register_virtual_device);
-+
-+/**
-+ * scst_unregister_virtual_device() - unegister a virtual device.
-+ * @id: the device's ID, returned by the registration function
-+ */
-+void scst_unregister_virtual_device(int id)
-+{
-+ struct scst_device *d, *dev = NULL;
-+ struct scst_acg_dev *acg_dev, *aa;
-+
-+ TRACE_ENTRY();
-+
-+ scst_suspend_activity(false);
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (d->virt_id == id) {
-+ dev = d;
-+ TRACE_DBG("Virtual device %p (id %d) found", dev, id);
-+ break;
-+ }
-+ }
-+ if (dev == NULL) {
-+ PRINT_ERROR("Virtual device (id %d) not found", id);
-+ goto out_unlock;
-+ }
-+
-+ list_del(&dev->dev_list_entry);
-+
-+ scst_pr_clear_dev(dev);
-+
-+ scst_assign_dev_handler(dev, &scst_null_devtype);
-+
-+ list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
-+ dev_acg_dev_list_entry) {
-+ scst_acg_del_lun(acg_dev->acg, acg_dev->lun, true);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+
-+ scst_dev_sysfs_del(dev);
-+
-+ PRINT_INFO("Detached from virtual device %s (id %d)",
-+ dev->virt_name, dev->virt_id);
-+
-+ scst_free_device(dev);
-+
-+out:
-+ TRACE_EXIT();
++ TRACE_DBG("Reset requeued cmd %p (op %x)", cmd, cmd->cdb[0]);
++ scst_inc_pr_readers_count(cmd, false);
++ cmd->check_local_events_once_done = 0;
+ return;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+ goto out;
+}
-+EXPORT_SYMBOL_GPL(scst_unregister_virtual_device);
+
-+/**
-+ * __scst_register_dev_driver() - register pass-through dev handler driver
-+ * @dev_type: dev handler template
-+ * @version: SCST_INTERFACE_VERSION version string to ensure that
-+ * SCST core and the dev handler use the same version of
-+ * the SCST interface
-+ *
-+ * Description:
-+ * Registers a pass-through dev handler driver. Returns 0 on success
-+ * or appropriate error code otherwise.
-+ */
-+int __scst_register_dev_driver(struct scst_dev_type *dev_type,
-+ const char *version)
++static inline bool scst_pr_type_valid(uint8_t type)
+{
-+ int res, exist;
-+ struct scst_dev_type *dt;
-+
-+ TRACE_ENTRY();
-+
-+ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
-+ PRINT_ERROR("Incorrect version of dev handler %s",
-+ dev_type->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_dev_handler_check(dev_type);
-+ if (res != 0)
-+ goto out;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ exist = 0;
-+ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
-+ if (strcmp(dt->name, dev_type->name) == 0) {
-+ PRINT_ERROR("Device type handler \"%s\" already "
-+ "exist", dt->name);
-+ exist = 1;
-+ break;
-+ }
++ switch (type) {
++ case TYPE_WRITE_EXCLUSIVE:
++ case TYPE_EXCLUSIVE_ACCESS:
++ case TYPE_WRITE_EXCLUSIVE_REGONLY:
++ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
++ case TYPE_WRITE_EXCLUSIVE_ALL_REG:
++ case TYPE_EXCLUSIVE_ACCESS_ALL_REG:
++ return true;
++ default:
++ return false;
+ }
-+ if (exist)
-+ goto out_unlock;
-+
-+ list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_devt_sysfs_create(dev_type);
-+ if (res < 0)
-+ goto out;
-+
-+ PRINT_INFO("Device handler \"%s\" for type %d registered "
-+ "successfully", dev_type->name, dev_type->type);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ goto out;
+}
-+EXPORT_SYMBOL_GPL(__scst_register_dev_driver);
+
-+/**
-+ * scst_unregister_dev_driver() - unregister pass-through dev handler driver
-+ */
-+void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
++static inline bool scst_pr_read_lock(struct scst_cmd *cmd)
+{
-+ struct scst_device *dev;
-+ struct scst_dev_type *dt;
-+ int found = 0;
++ struct scst_device *dev = cmd->dev;
++ bool unlock = false;
+
+ TRACE_ENTRY();
+
-+ scst_suspend_activity(false);
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
-+ if (strcmp(dt->name, dev_type->name) == 0) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ if (!found) {
-+ PRINT_ERROR("Dev handler \"%s\" isn't registered",
-+ dev_type->name);
-+ goto out_up;
-+ }
-+
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ if (dev->handler == dev_type) {
-+ scst_assign_dev_handler(dev, &scst_null_devtype);
-+ TRACE_DBG("Dev handler removed from device %p", dev);
-+ }
++ smp_mb(); /* to sync with scst_pr_write_lock() */
++ if (unlikely(dev->pr_writer_active)) {
++ unlock = true;
++ scst_dec_pr_readers_count(cmd, false);
++ mutex_lock(&dev->dev_pr_mutex);
+ }
+
-+ list_del(&dev_type->dev_type_list_entry);
-+
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+
-+ scst_devt_sysfs_del(dev_type);
-+
-+ PRINT_INFO("Device handler \"%s\" for type %d unloaded",
-+ dev_type->name, dev_type->type);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_up:
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+ goto out;
++ TRACE_EXIT_RES(unlock);
++ return unlock;
+}
-+EXPORT_SYMBOL_GPL(scst_unregister_dev_driver);
+
-+/**
-+ * __scst_register_virtual_dev_driver() - register virtual dev handler driver
-+ * @dev_type: dev handler template
-+ * @version: SCST_INTERFACE_VERSION version string to ensure that
-+ * SCST core and the dev handler use the same version of
-+ * the SCST interface
-+ *
-+ * Description:
-+ * Registers a virtual dev handler driver. Returns 0 on success or
-+ * appropriate error code otherwise.
-+ */
-+int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
-+ const char *version)
++static inline void scst_pr_read_unlock(struct scst_cmd *cmd, bool unlock)
+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
-+ PRINT_ERROR("Incorrect version of virtual dev handler %s",
-+ dev_type->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_dev_handler_check(dev_type);
-+ if (res != 0)
-+ goto out;
-+
-+ mutex_lock(&scst_mutex);
-+ list_add_tail(&dev_type->dev_type_list_entry, &scst_virtual_dev_type_list);
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_devt_sysfs_create(dev_type);
-+ if (res < 0)
-+ goto out;
-+
-+ if (dev_type->type != -1) {
-+ PRINT_INFO("Virtual device handler %s for type %d "
-+ "registered successfully", dev_type->name,
-+ dev_type->type);
-+ } else {
-+ PRINT_INFO("Virtual device handler \"%s\" registered "
-+ "successfully", dev_type->name);
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(__scst_register_virtual_dev_driver);
++ struct scst_device *dev = cmd->dev;
+
-+/**
-+ * scst_unregister_virtual_dev_driver() - unregister virtual dev driver
-+ */
-+void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
-+{
+ TRACE_ENTRY();
+
-+ mutex_lock(&scst_mutex);
-+
-+ /* Disable sysfs mgmt calls (e.g. addition of new devices) */
-+ list_del(&dev_type->dev_type_list_entry);
-+
-+ /* Wait for outstanding sysfs mgmt calls completed */
-+ while (dev_type->devt_active_sysfs_works_count > 0) {
-+ mutex_unlock(&scst_mutex);
-+ msleep(100);
-+ mutex_lock(&scst_mutex);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_devt_sysfs_del(dev_type);
-+
-+ PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
++ if (unlikely(unlock))
++ mutex_unlock(&dev->dev_pr_mutex);
++ else
++ scst_dec_pr_readers_count(cmd, false);
+
+ TRACE_EXIT();
+ return;
+}
-+EXPORT_SYMBOL_GPL(scst_unregister_virtual_dev_driver);
-+
-+/* scst_mutex supposed to be held */
-+int scst_add_threads(struct scst_cmd_threads *cmd_threads,
-+ struct scst_device *dev, struct scst_tgt_dev *tgt_dev, int num)
-+{
-+ int res = 0, i;
-+ struct scst_cmd_thread_t *thr;
-+ int n = 0, tgt_dev_num = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (num == 0) {
-+ res = 0;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(thr, &cmd_threads->threads_list, thread_list_entry) {
-+ n++;
-+ }
-+
-+ TRACE_DBG("cmd_threads %p, dev %p, tgt_dev %p, num %d, n %d",
-+ cmd_threads, dev, tgt_dev, num, n);
-+
-+ if (tgt_dev != NULL) {
-+ struct scst_tgt_dev *t;
-+ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if (t == tgt_dev)
-+ break;
-+ tgt_dev_num++;
-+ }
-+ }
-+
-+ for (i = 0; i < num; i++) {
-+ thr = kmalloc(sizeof(*thr), GFP_KERNEL);
-+ if (!thr) {
-+ res = -ENOMEM;
-+ PRINT_ERROR("Fail to allocate thr %d", res);
-+ goto out_wait;
-+ }
-+
-+ if (dev != NULL) {
-+ char nm[14]; /* to limit the name's len */
-+ strlcpy(nm, dev->virt_name, ARRAY_SIZE(nm));
-+ thr->cmd_thread = kthread_create(scst_cmd_thread,
-+ cmd_threads, "%s%d", nm, n++);
-+ } else if (tgt_dev != NULL) {
-+ char nm[11]; /* to limit the name's len */
-+ strlcpy(nm, tgt_dev->dev->virt_name, ARRAY_SIZE(nm));
-+ thr->cmd_thread = kthread_create(scst_cmd_thread,
-+ cmd_threads, "%s%d_%d", nm, tgt_dev_num, n++);
-+ } else
-+ thr->cmd_thread = kthread_create(scst_cmd_thread,
-+ cmd_threads, "scstd%d", n++);
-+
-+ if (IS_ERR(thr->cmd_thread)) {
-+ res = PTR_ERR(thr->cmd_thread);
-+ PRINT_ERROR("kthread_create() failed: %d", res);
-+ kfree(thr);
-+ goto out_wait;
-+ }
-+
-+ list_add(&thr->thread_list_entry, &cmd_threads->threads_list);
-+ cmd_threads->nr_threads++;
-+
-+ TRACE_DBG("Added thr %p to threads list (nr_threads %d, n %d)",
-+ thr, cmd_threads->nr_threads, n);
-+
-+ wake_up_process(thr->cmd_thread);
-+ }
-+
-+out_wait:
-+ if (i > 0 && cmd_threads != &scst_main_cmd_threads) {
-+ /*
-+ * Wait for io_context gets initialized to avoid possible races
-+ * for it from the sharing it tgt_devs.
-+ */
-+ while (!*(volatile bool*)&cmd_threads->io_context_ready) {
-+ TRACE_DBG("Waiting for io_context for cmd_threads %p "
-+ "initialized", cmd_threads);
-+ msleep(50);
-+ }
-+ }
-+
-+ if (res != 0)
-+ scst_del_threads(cmd_threads, i);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
+
-+/* scst_mutex supposed to be held */
-+void scst_del_threads(struct scst_cmd_threads *cmd_threads, int num)
++static inline void scst_pr_write_lock(struct scst_device *dev)
+{
-+ struct scst_cmd_thread_t *ct, *tmp;
-+
+ TRACE_ENTRY();
+
-+ if (num == 0)
-+ goto out;
-+
-+ list_for_each_entry_safe_reverse(ct, tmp, &cmd_threads->threads_list,
-+ thread_list_entry) {
-+ int rc;
-+ struct scst_device *dev;
-+
-+ rc = kthread_stop(ct->cmd_thread);
-+ if (rc != 0 && rc != -EINTR)
-+ TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
-+
-+ list_del(&ct->thread_list_entry);
-+
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ scst_del_thr_data(tgt_dev, ct->cmd_thread);
-+ }
-+ }
-+
-+ kfree(ct);
++ mutex_lock(&dev->dev_pr_mutex);
+
-+ cmd_threads->nr_threads--;
++ dev->pr_writer_active = 1;
++ /* to sync with scst_pr_read_lock() and unlock() */
++ smp_mb();
+
-+ --num;
-+ if (num == 0)
++ while (true) {
++ int readers;
++ spin_lock_bh(&dev->dev_lock);
++ readers = dev->pr_readers_count;
++ spin_unlock_bh(&dev->dev_lock);
++ if (readers == 0)
+ break;
++ TRACE_DBG("Waiting for %d readers (dev %p)", readers, dev);
++ msleep(1);
+ }
+
-+ EXTRACHECKS_BUG_ON((cmd_threads->nr_threads == 0) &&
-+ (cmd_threads->io_context != NULL));
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+void scst_stop_dev_threads(struct scst_device *dev)
-+{
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ scst_tgt_dev_stop_threads(tgt_dev);
-+ }
-+
-+ if ((dev->threads_num > 0) &&
-+ (dev->threads_pool_type == SCST_THREADS_POOL_SHARED))
-+ scst_del_threads(&dev->dev_cmd_threads, -1);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+int scst_create_dev_threads(struct scst_device *dev)
-+{
-+ int res = 0;
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ res = scst_tgt_dev_setup_threads(tgt_dev);
-+ if (res != 0)
-+ goto out_err;
-+ }
-+
-+ if ((dev->threads_num > 0) &&
-+ (dev->threads_pool_type == SCST_THREADS_POOL_SHARED)) {
-+ res = scst_add_threads(&dev->dev_cmd_threads, dev, NULL,
-+ dev->threads_num);
-+ if (res != 0)
-+ goto out_err;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err:
-+ scst_stop_dev_threads(dev);
-+ goto out;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+int scst_assign_dev_handler(struct scst_device *dev,
-+ struct scst_dev_type *handler)
-+{
-+ int res = 0;
-+ struct scst_tgt_dev *tgt_dev;
-+ LIST_HEAD(attached_tgt_devs);
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(handler == NULL);
-+
-+ if (dev->handler == handler)
-+ goto out;
-+
-+ if (dev->handler == NULL)
-+ goto assign;
-+
-+ if (dev->handler->detach_tgt) {
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ TRACE_DBG("Calling dev handler's detach_tgt(%p)",
-+ tgt_dev);
-+ dev->handler->detach_tgt(tgt_dev);
-+ TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
-+ }
-+ }
-+
-+ /*
-+ * devt_dev sysfs must be created AFTER attach() and deleted BEFORE
-+ * detach() to avoid calls from sysfs for not yet ready or already dead
-+ * objects.
-+ */
-+ scst_devt_dev_sysfs_del(dev);
-+
-+ if (dev->handler->detach) {
-+ TRACE_DBG("%s", "Calling dev handler's detach()");
-+ dev->handler->detach(dev);
-+ TRACE_DBG("%s", "Old handler's detach() returned");
-+ }
-+
-+ scst_stop_dev_threads(dev);
-+
-+assign:
-+ dev->handler = handler;
-+
-+ if (handler == NULL)
-+ goto out;
-+
-+ dev->threads_num = handler->threads_num;
-+ dev->threads_pool_type = handler->threads_pool_type;
-+
-+ if (handler->attach) {
-+ TRACE_DBG("Calling new dev handler's attach(%p)", dev);
-+ res = handler->attach(dev);
-+ TRACE_DBG("New dev handler's attach() returned %d", res);
-+ if (res != 0) {
-+ PRINT_ERROR("New device handler's %s attach() "
-+ "failed: %d", handler->name, res);
-+ goto out;
-+ }
-+ }
-+
-+ res = scst_devt_dev_sysfs_create(dev);
-+ if (res != 0)
-+ goto out_detach;
-+
-+ if (handler->attach_tgt) {
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ TRACE_DBG("Calling dev handler's attach_tgt(%p)",
-+ tgt_dev);
-+ res = handler->attach_tgt(tgt_dev);
-+ TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
-+ if (res != 0) {
-+ PRINT_ERROR("Device handler's %s attach_tgt() "
-+ "failed: %d", handler->name, res);
-+ goto out_err_remove_sysfs;
-+ }
-+ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
-+ &attached_tgt_devs);
-+ }
-+ }
-+
-+ res = scst_create_dev_threads(dev);
-+ if (res != 0)
-+ goto out_err_detach_tgt;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err_detach_tgt:
-+ if (handler && handler->detach_tgt) {
-+ list_for_each_entry(tgt_dev, &attached_tgt_devs,
-+ extra_tgt_dev_list_entry) {
-+ TRACE_DBG("Calling handler's detach_tgt(%p)",
-+ tgt_dev);
-+ handler->detach_tgt(tgt_dev);
-+ TRACE_DBG("%s", "Handler's detach_tgt() returned");
-+ }
-+ }
-+
-+out_err_remove_sysfs:
-+ scst_devt_dev_sysfs_del(dev);
-+
-+out_detach:
-+ if (handler && handler->detach) {
-+ TRACE_DBG("%s", "Calling handler's detach()");
-+ handler->detach(dev);
-+ TRACE_DBG("%s", "Handler's detach() returned");
-+ }
-+
-+ dev->handler = &scst_null_devtype;
-+ dev->threads_num = scst_null_devtype.threads_num;
-+ dev->threads_pool_type = scst_null_devtype.threads_pool_type;
-+ goto out;
-+}
-+
-+/**
-+ * scst_init_threads() - initialize SCST processing threads pool
-+ *
-+ * Initializes scst_cmd_threads structure
-+ */
-+void scst_init_threads(struct scst_cmd_threads *cmd_threads)
-+{
-+ TRACE_ENTRY();
-+
-+ spin_lock_init(&cmd_threads->cmd_list_lock);
-+ INIT_LIST_HEAD(&cmd_threads->active_cmd_list);
-+ init_waitqueue_head(&cmd_threads->cmd_list_waitQ);
-+ INIT_LIST_HEAD(&cmd_threads->threads_list);
-+ mutex_init(&cmd_threads->io_context_mutex);
-+
-+ mutex_lock(&scst_suspend_mutex);
-+ list_add_tail(&cmd_threads->lists_list_entry,
-+ &scst_cmd_threads_list);
-+ mutex_unlock(&scst_suspend_mutex);
-+
+ TRACE_EXIT();
+ return;
+}
-+EXPORT_SYMBOL_GPL(scst_init_threads);
-+
-+/**
-+ * scst_deinit_threads() - deinitialize SCST processing threads pool
-+ *
-+ * Deinitializes scst_cmd_threads structure
-+ */
-+void scst_deinit_threads(struct scst_cmd_threads *cmd_threads)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_suspend_mutex);
-+ list_del(&cmd_threads->lists_list_entry);
-+ mutex_unlock(&scst_suspend_mutex);
-+
-+ BUG_ON(cmd_threads->io_context);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_deinit_threads);
-+
-+static void scst_stop_global_threads(void)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ scst_del_threads(&scst_main_cmd_threads, -1);
-+
-+ if (scst_mgmt_cmd_thread)
-+ kthread_stop(scst_mgmt_cmd_thread);
-+ if (scst_mgmt_thread)
-+ kthread_stop(scst_mgmt_thread);
-+ if (scst_init_cmd_thread)
-+ kthread_stop(scst_init_cmd_thread);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* It does NOT stop ran threads on error! */
-+static int scst_start_global_threads(int num)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ res = scst_add_threads(&scst_main_cmd_threads, NULL, NULL, num);
-+ if (res < 0)
-+ goto out_unlock;
-+
-+ scst_init_cmd_thread = kthread_run(scst_init_thread,
-+ NULL, "scst_initd");
-+ if (IS_ERR(scst_init_cmd_thread)) {
-+ res = PTR_ERR(scst_init_cmd_thread);
-+ PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
-+ scst_init_cmd_thread = NULL;
-+ goto out_unlock;
-+ }
-+
-+ scst_mgmt_cmd_thread = kthread_run(scst_tm_thread,
-+ NULL, "scsi_tm");
-+ if (IS_ERR(scst_mgmt_cmd_thread)) {
-+ res = PTR_ERR(scst_mgmt_cmd_thread);
-+ PRINT_ERROR("kthread_create() for TM failed: %d", res);
-+ scst_mgmt_cmd_thread = NULL;
-+ goto out_unlock;
-+ }
-+
-+ scst_mgmt_thread = kthread_run(scst_global_mgmt_thread,
-+ NULL, "scst_mgmtd");
-+ if (IS_ERR(scst_mgmt_thread)) {
-+ res = PTR_ERR(scst_mgmt_thread);
-+ PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
-+ scst_mgmt_thread = NULL;
-+ goto out_unlock;
-+ }
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_get() - increase global SCST ref counter
-+ *
-+ * Increases global SCST ref counter that prevents from entering into suspended
-+ * activities stage, so protects from any global management operations.
-+ */
-+void scst_get(void)
-+{
-+ __scst_get();
-+}
-+EXPORT_SYMBOL(scst_get);
-+
-+/**
-+ * scst_put() - decrease global SCST ref counter
-+ *
-+ * Decreses global SCST ref counter that prevents from entering into suspended
-+ * activities stage, so protects from any global management operations. On
-+ * zero, if suspending activities is waiting, they will be suspended.
-+ */
-+void scst_put(void)
-+{
-+ __scst_put();
-+}
-+EXPORT_SYMBOL(scst_put);
-+
-+/**
-+ * scst_get_setup_id() - return SCST setup ID
-+ *
-+ * Returns SCST setup ID. This ID can be used for multiple
-+ * setups with the same configuration.
-+ */
-+unsigned int scst_get_setup_id(void)
-+{
-+ return scst_setup_id;
-+}
-+EXPORT_SYMBOL_GPL(scst_get_setup_id);
-+
-+static int scst_add(struct device *cdev, struct class_interface *intf)
-+{
-+ struct scsi_device *scsidp;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ scsidp = to_scsi_device(cdev->parent);
-+
-+ if ((scsidp->host->hostt->name == NULL) ||
-+ (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0))
-+ res = scst_register_device(scsidp);
-+
-+ TRACE_EXIT();
-+ return res;
-+}
+
-+static void scst_remove(struct device *cdev, struct class_interface *intf)
++static inline void scst_pr_write_unlock(struct scst_device *dev)
+{
-+ struct scsi_device *scsidp;
-+
+ TRACE_ENTRY();
+
-+ scsidp = to_scsi_device(cdev->parent);
-+
-+ if ((scsidp->host->hostt->name == NULL) ||
-+ (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0))
-+ scst_unregister_device(scsidp);
++ dev->pr_writer_active = 0;
++ mutex_unlock(&dev->dev_pr_mutex);
+
+ TRACE_EXIT();
+ return;
+}
+
-+static struct class_interface scst_interface = {
-+ .add_dev = scst_add,
-+ .remove_dev = scst_remove,
-+};
-+
-+static void __init scst_print_config(void)
-+{
-+ char buf[128];
-+ int i, j;
-+
-+ i = snprintf(buf, sizeof(buf), "Enabled features: ");
-+ j = i;
-+
-+#ifdef CONFIG_SCST_STRICT_SERIALIZING
-+ i += snprintf(&buf[i], sizeof(buf) - i, "STRICT_SERIALIZING");
-+#endif
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_TRACING
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_RETRY
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_OOM
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_SN
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ i += snprintf(&buf[i], sizeof(buf) - i,
-+ "%sTEST_IO_IN_SIRQ",
-+ (j == i) ? "" : ", ");
-+#endif
++int scst_pr_init_dev(struct scst_device *dev);
++void scst_pr_clear_dev(struct scst_device *dev);
+
-+#ifdef CONFIG_SCST_STRICT_SECURITY
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sSTRICT_SECURITY",
-+ (j == i) ? "" : ", ");
-+#endif
++int scst_pr_init_tgt_dev(struct scst_tgt_dev *tgt_dev);
++void scst_pr_clear_tgt_dev(struct scst_tgt_dev *tgt_dev);
+
-+ if (j != i)
-+ PRINT_INFO("%s", buf);
-+}
++bool scst_pr_crh_case(struct scst_cmd *cmd);
++bool scst_pr_is_cmd_allowed(struct scst_cmd *cmd);
+
-+static int __init init_scst(void)
-+{
-+ int res, i;
-+ int scst_num_cpus;
++void scst_pr_register(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_register_and_ignore(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size);
++void scst_pr_register_and_move(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size);
++void scst_pr_reserve(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_release(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_clear(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_preempt(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_preempt_and_abort(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size);
+
-+ TRACE_ENTRY();
++void scst_pr_read_keys(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_read_reservation(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size);
++void scst_pr_report_caps(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
++void scst_pr_read_full_status(struct scst_cmd *cmd, uint8_t *buffer,
++ int buffer_size);
+
-+ {
-+ struct scsi_sense_hdr *shdr;
-+ BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
-+ }
-+ {
-+ struct scst_tgt_dev *t;
-+ struct scst_cmd *c;
-+ BUILD_BUG_ON(sizeof(t->curr_sn) != sizeof(t->expected_sn));
-+ BUILD_BUG_ON(sizeof(c->sn) != sizeof(t->expected_sn));
-+ }
++void scst_pr_sync_device_file(struct scst_tgt_dev *tgt_dev, struct scst_cmd *cmd);
+
-+ mutex_init(&scst_mutex);
-+ mutex_init(&scst_mutex2);
-+ INIT_LIST_HEAD(&scst_template_list);
-+ INIT_LIST_HEAD(&scst_dev_list);
-+ INIT_LIST_HEAD(&scst_dev_type_list);
-+ INIT_LIST_HEAD(&scst_virtual_dev_type_list);
-+ spin_lock_init(&scst_main_lock);
-+ spin_lock_init(&scst_init_lock);
-+ init_waitqueue_head(&scst_init_cmd_list_waitQ);
-+ INIT_LIST_HEAD(&scst_init_cmd_list);
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
-+#endif
-+ atomic_set(&scst_cmd_count, 0);
-+ spin_lock_init(&scst_mcmd_lock);
-+ INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
-+ INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
-+ init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
-+ init_waitqueue_head(&scst_mgmt_waitQ);
-+ spin_lock_init(&scst_mgmt_lock);
-+ INIT_LIST_HEAD(&scst_sess_init_list);
-+ INIT_LIST_HEAD(&scst_sess_shut_list);
-+ init_waitqueue_head(&scst_dev_cmd_waitQ);
-+ mutex_init(&scst_suspend_mutex);
-+ INIT_LIST_HEAD(&scst_cmd_threads_list);
-+
-+ scst_init_threads(&scst_main_cmd_threads);
-+
-+ res = scst_lib_init();
-+ if (res != 0)
-+ goto out_deinit_threads;
-+
-+ scst_num_cpus = num_online_cpus();
-+
-+ /* ToDo: register_cpu_notifier() */
-+
-+ if (scst_threads == 0)
-+ scst_threads = scst_num_cpus;
-+
-+ if (scst_threads < 1) {
-+ PRINT_ERROR("%s", "scst_threads can not be less than 1");
-+ scst_threads = scst_num_cpus;
-+ }
-+
-+#define INIT_CACHEP(p, s, o) do { \
-+ p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
-+ TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
-+ sizeof(struct s)); \
-+ if (p == NULL) { \
-+ res = -ENOMEM; \
-+ goto o; \
-+ } \
-+ } while (0)
-+
-+ INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out_lib_exit);
-+ INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
-+ out_destroy_mgmt_cache);
-+ INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
-+ out_destroy_mgmt_stub_cache);
-+ {
-+ struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
-+ INIT_CACHEP(scst_sense_cachep, scst_sense,
-+ out_destroy_ua_cache);
-+ }
-+ INIT_CACHEP(scst_aen_cachep, scst_aen, out_destroy_sense_cache);
-+ INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_aen_cache);
-+ INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
-+ INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
-+ INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
-+
-+ scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
-+ mempool_free_slab, scst_mgmt_cachep);
-+ if (scst_mgmt_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_destroy_acg_cache;
-+ }
-+
-+ /*
-+ * All mgmt stubs, UAs and sense buffers are bursty and loosing them
-+ * may have fatal consequences, so let's have big pools for them.
-+ */
-+
-+ scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
-+ mempool_free_slab, scst_mgmt_stub_cachep);
-+ if (scst_mgmt_stub_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_destroy_mgmt_mempool;
-+ }
-+
-+ scst_ua_mempool = mempool_create(512, mempool_alloc_slab,
-+ mempool_free_slab, scst_ua_cachep);
-+ if (scst_ua_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_destroy_mgmt_stub_mempool;
-+ }
-+
-+ scst_sense_mempool = mempool_create(1024, mempool_alloc_slab,
-+ mempool_free_slab, scst_sense_cachep);
-+ if (scst_sense_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_destroy_ua_mempool;
-+ }
-+
-+ scst_aen_mempool = mempool_create(100, mempool_alloc_slab,
-+ mempool_free_slab, scst_aen_cachep);
-+ if (scst_aen_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_destroy_sense_mempool;
-+ }
-+
-+ res = scst_sysfs_init();
-+ if (res != 0)
-+ goto out_destroy_aen_mempool;
-+
-+ if (scst_max_cmd_mem == 0) {
-+ struct sysinfo si;
-+ si_meminfo(&si);
-+#if BITS_PER_LONG == 32
-+ scst_max_cmd_mem = min(
-+ (((uint64_t)(si.totalram - si.totalhigh) << PAGE_SHIFT)
-+ >> 20) >> 2, (uint64_t)1 << 30);
++void scst_pr_dump_prs(struct scst_device *dev, bool force);
+#else
-+ scst_max_cmd_mem = (((si.totalram - si.totalhigh) << PAGE_SHIFT)
-+ >> 20) >> 2;
-+#endif
-+ }
-+
-+ if (scst_max_dev_cmd_mem != 0) {
-+ if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
-+ PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
-+ "scst_max_cmd_mem (%d)",
-+ scst_max_dev_cmd_mem,
-+ scst_max_cmd_mem);
-+ scst_max_dev_cmd_mem = scst_max_cmd_mem;
-+ }
-+ } else
-+ scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
-+
-+ res = scst_sgv_pools_init(
-+ ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
-+ if (res != 0)
-+ goto out_sysfs_cleanup;
-+
-+ res = scsi_register_interface(&scst_interface);
-+ if (res != 0)
-+ goto out_destroy_sgv_pool;
-+
-+ for (i = 0; i < (int)ARRAY_SIZE(scst_tasklets); i++) {
-+ spin_lock_init(&scst_tasklets[i].tasklet_lock);
-+ INIT_LIST_HEAD(&scst_tasklets[i].tasklet_cmd_list);
-+ tasklet_init(&scst_tasklets[i].tasklet,
-+ (void *)scst_cmd_tasklet,
-+ (unsigned long)&scst_tasklets[i]);
-+ }
-+
-+ TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
-+ scst_threads);
-+
-+ res = scst_start_global_threads(scst_threads);
-+ if (res < 0)
-+ goto out_thread_free;
-+
-+ PRINT_INFO("SCST version %s loaded successfully (max mem for "
-+ "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
-+ scst_max_cmd_mem, scst_max_dev_cmd_mem);
-+
-+ scst_print_config();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_thread_free:
-+ scst_stop_global_threads();
-+
-+ scsi_unregister_interface(&scst_interface);
-+
-+out_destroy_sgv_pool:
-+ scst_sgv_pools_deinit();
-+
-+out_sysfs_cleanup:
-+ scst_sysfs_cleanup();
-+
-+out_destroy_aen_mempool:
-+ mempool_destroy(scst_aen_mempool);
-+
-+out_destroy_sense_mempool:
-+ mempool_destroy(scst_sense_mempool);
-+
-+out_destroy_ua_mempool:
-+ mempool_destroy(scst_ua_mempool);
-+
-+out_destroy_mgmt_stub_mempool:
-+ mempool_destroy(scst_mgmt_stub_mempool);
-+
-+out_destroy_mgmt_mempool:
-+ mempool_destroy(scst_mgmt_mempool);
-+
-+out_destroy_acg_cache:
-+ kmem_cache_destroy(scst_acgd_cachep);
-+
-+out_destroy_tgt_cache:
-+ kmem_cache_destroy(scst_tgtd_cachep);
-+
-+out_destroy_sess_cache:
-+ kmem_cache_destroy(scst_sess_cachep);
-+
-+out_destroy_cmd_cache:
-+ kmem_cache_destroy(scst_cmd_cachep);
-+
-+out_destroy_aen_cache:
-+ kmem_cache_destroy(scst_aen_cachep);
-+
-+out_destroy_sense_cache:
-+ kmem_cache_destroy(scst_sense_cachep);
-+
-+out_destroy_ua_cache:
-+ kmem_cache_destroy(scst_ua_cachep);
-+
-+out_destroy_mgmt_stub_cache:
-+ kmem_cache_destroy(scst_mgmt_stub_cachep);
-+
-+out_destroy_mgmt_cache:
-+ kmem_cache_destroy(scst_mgmt_cachep);
-+
-+out_lib_exit:
-+ scst_lib_exit();
-+
-+out_deinit_threads:
-+ scst_deinit_threads(&scst_main_cmd_threads);
-+ goto out;
-+}
-+
-+static void __exit exit_scst(void)
-+{
-+ TRACE_ENTRY();
-+
-+ /* ToDo: unregister_cpu_notifier() */
-+
-+ scst_stop_global_threads();
-+
-+ scst_deinit_threads(&scst_main_cmd_threads);
-+
-+ scsi_unregister_interface(&scst_interface);
-+
-+ scst_sgv_pools_deinit();
-+
-+ scst_sysfs_cleanup();
-+
-+#define DEINIT_CACHEP(p) do { \
-+ kmem_cache_destroy(p); \
-+ p = NULL; \
-+ } while (0)
-+
-+ mempool_destroy(scst_mgmt_mempool);
-+ mempool_destroy(scst_mgmt_stub_mempool);
-+ mempool_destroy(scst_ua_mempool);
-+ mempool_destroy(scst_sense_mempool);
-+ mempool_destroy(scst_aen_mempool);
-+
-+ DEINIT_CACHEP(scst_mgmt_cachep);
-+ DEINIT_CACHEP(scst_mgmt_stub_cachep);
-+ DEINIT_CACHEP(scst_ua_cachep);
-+ DEINIT_CACHEP(scst_sense_cachep);
-+ DEINIT_CACHEP(scst_aen_cachep);
-+ DEINIT_CACHEP(scst_cmd_cachep);
-+ DEINIT_CACHEP(scst_sess_cachep);
-+ DEINIT_CACHEP(scst_tgtd_cachep);
-+ DEINIT_CACHEP(scst_acgd_cachep);
-+
-+ scst_lib_exit();
-+
-+ PRINT_INFO("%s", "SCST unloaded");
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(init_scst);
-+module_exit(exit_scst);
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("SCSI target core");
-+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_module.c linux-2.6.36/drivers/scst/scst_module.c
---- orig/linux-2.6.36/drivers/scst/scst_module.c
-+++ linux-2.6.36/drivers/scst/scst_module.c
-@@ -0,0 +1,70 @@
-+/*
-+ * scst_module.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * Support for loading target modules. The usage is similar to scsi_module.c
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+
-+#include <scst.h>
-+
-+static int __init init_this_scst_driver(void)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_register_target_template(&driver_target_template);
-+ TRACE_DBG("scst_register_target_template() returned %d", res);
-+ if (res < 0)
-+ goto out;
-+
-+#ifdef SCST_REGISTER_INITIATOR_DRIVER
-+ driver_template.module = THIS_MODULE;
-+ scsi_register_module(MODULE_SCSI_HA, &driver_template);
-+ TRACE_DBG("driver_template.present=%d",
-+ driver_template.present);
-+ if (driver_template.present == 0) {
-+ res = -ENODEV;
-+ MOD_DEC_USE_COUNT;
-+ goto out;
-+ }
-+#endif
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void __exit exit_this_scst_driver(void)
-+{
-+ TRACE_ENTRY();
-+
-+#ifdef SCST_REGISTER_INITIATOR_DRIVER
-+ scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
++static inline void scst_pr_dump_prs(struct scst_device *dev, bool force) {}
+#endif
+
-+ scst_unregister_target_template(&driver_target_template);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(init_this_scst_driver);
-+module_exit(exit_this_scst_driver);
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/scst_pres.c
---- orig/linux-2.6.36/drivers/scst/scst_pres.c
-+++ linux-2.6.36/drivers/scst/scst_pres.c
-@@ -0,0 +1,2648 @@
++#endif /* SCST_PRES_H_ */
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_pres.c linux-2.6.39/drivers/scst/scst_pres.c
+--- orig/linux-2.6.39/drivers/scst/scst_pres.c
++++ linux-2.6.39/drivers/scst/scst_pres.c
+@@ -0,0 +1,2637 @@
+/*
+ * scst_pres.c
+ *
@@ -14822,7 +22987,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
-+#include <linux/smp_lock.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/kthread.h>
@@ -14944,7 +23108,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+ } else
+ len = TID_COMMON_SIZE;
+
-+ return (memcmp(tid_a, tid_b, len) == 0);
++ return memcmp(tid_a, tid_b, len) == 0;
+
+out_error:
+ PRINT_ERROR("%s", "Invalid initiator port transport id");
@@ -15273,7 +23437,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+
+ scst_set_sense(ua, sizeof(ua), dev->d_sense, key, asc, ascq);
+
-+ TRACE_PR("Queuing UA [%x %x %x]: registrant %s/%d (%p), tgt_dev %p, "
++ TRACE_PR("Queueing UA [%x %x %x]: registrant %s/%d (%p), tgt_dev %p, "
+ "key %016llx", ua[2], ua[12], ua[13],
+ debug_transport_id_to_initiator_name(reg->transport_id),
+ reg->rel_tgt_id, reg, reg->tgt_dev, reg->key);
@@ -15348,17 +23512,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+ return;
+}
+
-+/* Abstract vfs_unlink & path_put for different kernel versions */
-+static inline void scst_pr_vfs_unlink_and_put(struct nameidata *nd)
-+{
-+ vfs_unlink(nd->path.dentry->d_parent->d_inode,
-+ nd->path.dentry);
-+ path_put(&nd->path);
-+}
-+
-+static inline void scst_pr_path_put(struct nameidata *nd)
++/* Abstract vfs_unlink() for different kernel versions (as possible) */
++static inline void scst_pr_vfs_unlink_and_put(struct path *path)
+{
-+ path_put(&nd->path);
++ vfs_unlink(path->dentry->d_parent->d_inode, path->dentry);
++ path_put(path);
+}
+
+/* Called under scst_mutex */
@@ -15659,23 +23817,25 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+{
+ int res = 0;
+ struct scst_device *dev = tgt_dev->dev;
-+ struct nameidata nd;
++ struct path path;
+ mm_segment_t old_fs = get_fs();
+
+ TRACE_ENTRY();
+
+ set_fs(KERNEL_DS);
+
-+ res = path_lookup(dev->pr_file_name, 0, &nd);
++ res = dev->pr_file_name ? kern_path(dev->pr_file_name, 0, &path) :
++ -ENOENT;
+ if (!res)
-+ scst_pr_vfs_unlink_and_put(&nd);
++ scst_pr_vfs_unlink_and_put(&path);
+ else
+ TRACE_DBG("Unable to lookup file '%s' - error %d",
+ dev->pr_file_name, res);
+
-+ res = path_lookup(dev->pr_file_name1, 0, &nd);
++ res = dev->pr_file_name1 ? kern_path(dev->pr_file_name1, 0, &path) :
++ -ENOENT;
+ if (!res)
-+ scst_pr_vfs_unlink_and_put(&nd);
++ scst_pr_vfs_unlink_and_put(&path);
+ else
+ TRACE_DBG("Unable to lookup file '%s' - error %d",
+ dev->pr_file_name1, res);
@@ -15846,12 +24006,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+write_error_close:
+ filp_close(file, NULL);
+ {
-+ struct nameidata nd;
++ struct path path;
+ int rc;
+
-+ rc = path_lookup(dev->pr_file_name, 0, &nd);
++ rc = kern_path(dev->pr_file_name, 0, &path);
+ if (!rc)
-+ scst_pr_vfs_unlink_and_put(&nd);
++ scst_pr_vfs_unlink_and_put(&path);
+ else
+ TRACE_PR("Unable to lookup '%s' - error %d",
+ dev->pr_file_name, rc);
@@ -15862,14 +24022,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+static int scst_pr_check_pr_path(void)
+{
+ int res;
-+ struct nameidata nd;
++ struct path path;
++
+ mm_segment_t old_fs = get_fs();
+
+ TRACE_ENTRY();
+
+ set_fs(KERNEL_DS);
+
-+ res = path_lookup(SCST_PR_DIR, 0, &nd);
++ res = kern_path(SCST_PR_DIR, 0, &path);
++ if (res == 0)
++ path_put(&path);
+ if (res != 0) {
+ PRINT_ERROR("Unable to find %s (err %d), you should create "
+ "this directory manually or reinstall SCST",
@@ -15877,8 +24040,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+ goto out_setfs;
+ }
+
-+ scst_pr_path_put(&nd);
-+
+out_setfs:
+ set_fs(old_fs);
+
@@ -16205,12 +24366,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+ if (tgtt->get_initiator_port_transport_id == NULL)
+ continue;
+
-+ if (tgtt->get_initiator_port_transport_id(NULL, NULL) != proto_id)
-+ continue;
-+
+ list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
+ struct scst_dev_registrant *reg;
+
++ if (tgtt->get_initiator_port_transport_id(tgt, NULL, NULL) != proto_id)
++ continue;
++
+ reg = scst_pr_find_reg(dev, transport_id,
+ tgt->rel_tgt_id);
+ if (reg == NULL)
@@ -16292,12 +24453,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+ if (tgtt->get_initiator_port_transport_id == NULL)
+ continue;
+
-+ if (tgtt->get_initiator_port_transport_id(NULL, NULL) != proto_id)
-+ continue;
-+
+ TRACE_PR("tgtt %s, spec_i_pt %d", tgtt->name, spec_i_pt);
+
+ list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
++ if (tgtt->get_initiator_port_transport_id(tgt, NULL, NULL) != proto_id)
++ continue;
+ if (tgt->rel_tgt_id == 0)
+ continue;
+ TRACE_PR("tgt %s, rel_tgt_id %d", tgt->tgt_name,
@@ -16813,7 +24973,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+/* Called with dev_pr_mutex locked, no IRQ */
+void scst_pr_clear(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
+{
-+ int scope, type;
+ __be64 key;
+ struct scst_device *dev = cmd->dev;
+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
@@ -16822,8 +24981,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+ TRACE_ENTRY();
+
+ key = get_unaligned((__be64 *)&buffer[0]);
-+ scope = (cmd->cdb[2] & 0x0f) >> 4;
-+ type = cmd->cdb[2] & 0x0f;
+
+ if (buffer_size != 24) {
+ TRACE_PR("Invalid buffer size %d", buffer_size);
@@ -17048,15 +25205,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+
+ TRACE_ENTRY();
+
-+ saved_cmd_done = NULL; /* to remove warning that it's used not inited */
++ if (!atomic_dec_and_test(&cmd->pr_abort_counter->pr_abort_pending_cnt))
++ goto out;
+
-+ if (cmd->pr_abort_counter != NULL) {
-+ if (!atomic_dec_and_test(&cmd->pr_abort_counter->pr_abort_pending_cnt))
-+ goto out;
-+ saved_cmd_done = cmd->pr_abort_counter->saved_cmd_done;
-+ kfree(cmd->pr_abort_counter);
-+ cmd->pr_abort_counter = NULL;
-+ }
++ saved_cmd_done = cmd->pr_abort_counter->saved_cmd_done;
++ kfree(cmd->pr_abort_counter);
++ cmd->pr_abort_counter = NULL;
+
+ saved_cmd_done(cmd, next_state, pref_context);
+
@@ -17174,7 +25328,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+
+ TRACE_ENTRY();
+
-+ unlock = scst_pr_read_lock(dev);
++ unlock = scst_pr_read_lock(cmd);
+
+ TRACE_DBG("Testing if command %s (0x%x) from %s allowed to execute",
+ cmd->op_name, cmd->cdb[0], cmd->sess->initiator_name);
@@ -17234,7 +25388,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+ cmd->op_name, cmd->cdb[0], cmd->sess->initiator_name);
+
+out_unlock:
-+ scst_pr_read_unlock(dev, unlock);
++ scst_pr_read_unlock(cmd, unlock);
+
+ TRACE_EXIT_RES(allowed);
+ return allowed;
@@ -17256,7 +25410,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+ }
+
+ TRACE_PR("Read Keys (dev %s): PRGen %d", dev->virt_name,
-+ dev->pr_generation);
++ dev->pr_generation);
+
+ put_unaligned(cpu_to_be32(dev->pr_generation), (__be32 *)&buffer[0]);
+
@@ -17445,190 +25599,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.c linux-2.6.36/drivers/scst/
+ TRACE_EXIT();
+ return;
+}
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_pres.h linux-2.6.36/drivers/scst/scst_pres.h
---- orig/linux-2.6.36/drivers/scst/scst_pres.h
-+++ linux-2.6.36/drivers/scst/scst_pres.h
-@@ -0,0 +1,170 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_sysfs.c linux-2.6.39/drivers/scst/scst_sysfs.c
+--- orig/linux-2.6.39/drivers/scst/scst_sysfs.c
++++ linux-2.6.39/drivers/scst/scst_sysfs.c
+@@ -0,0 +1,6224 @@
+/*
-+ * scst_pres.c
++ * scst_sysfs.c
+ *
-+ * Copyright (C) 2009 - 2010 Alexey Obitotskiy <alexeyo1@open-e.com>
-+ * Copyright (C) 2009 - 2010 Open-E, Inc.
++ * Copyright (C) 2009 Daniel Henrique Debonzi <debonzi@linux.vnet.ibm.com>
+ * Copyright (C) 2009 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef SCST_PRES_H_
-+#define SCST_PRES_H_
-+
-+#include <linux/delay.h>
-+
-+#define PR_REGISTER 0x00
-+#define PR_RESERVE 0x01
-+#define PR_RELEASE 0x02
-+#define PR_CLEAR 0x03
-+#define PR_PREEMPT 0x04
-+#define PR_PREEMPT_AND_ABORT 0x05
-+#define PR_REGISTER_AND_IGNORE 0x06
-+#define PR_REGISTER_AND_MOVE 0x07
-+
-+#define PR_READ_KEYS 0x00
-+#define PR_READ_RESERVATION 0x01
-+#define PR_REPORT_CAPS 0x02
-+#define PR_READ_FULL_STATUS 0x03
-+
-+#define TYPE_UNSPECIFIED (-1)
-+#define TYPE_WRITE_EXCLUSIVE 0x01
-+#define TYPE_EXCLUSIVE_ACCESS 0x03
-+#define TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
-+#define TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
-+#define TYPE_WRITE_EXCLUSIVE_ALL_REG 0x07
-+#define TYPE_EXCLUSIVE_ACCESS_ALL_REG 0x08
-+
-+#define SCOPE_LU 0x00
-+
-+static inline bool scst_pr_type_valid(uint8_t type)
-+{
-+ switch (type) {
-+ case TYPE_WRITE_EXCLUSIVE:
-+ case TYPE_EXCLUSIVE_ACCESS:
-+ case TYPE_WRITE_EXCLUSIVE_REGONLY:
-+ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
-+ case TYPE_WRITE_EXCLUSIVE_ALL_REG:
-+ case TYPE_EXCLUSIVE_ACCESS_ALL_REG:
-+ return true;
-+ default:
-+ return false;
-+ }
-+}
-+
-+static inline bool scst_pr_read_lock(struct scst_device *dev)
-+{
-+ bool unlock = false;
-+
-+ TRACE_ENTRY();
-+
-+ atomic_inc(&dev->pr_readers_count);
-+ smp_mb__after_atomic_inc(); /* to sync with scst_pr_write_lock() */
-+
-+ if (unlikely(dev->pr_writer_active)) {
-+ unlock = true;
-+ atomic_dec(&dev->pr_readers_count);
-+ mutex_lock(&dev->dev_pr_mutex);
-+ }
-+
-+ TRACE_EXIT_RES(unlock);
-+ return unlock;
-+}
-+
-+static inline void scst_pr_read_unlock(struct scst_device *dev, bool unlock)
-+{
-+ TRACE_ENTRY();
-+
-+ if (unlikely(unlock))
-+ mutex_unlock(&dev->dev_pr_mutex);
-+ else {
-+ /*
-+ * To sync with scst_pr_write_lock(). We need it to ensure
-+ * order of our reads with the writer's writes.
-+ */
-+ smp_mb__before_atomic_dec();
-+ atomic_dec(&dev->pr_readers_count);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline void scst_pr_write_lock(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev->dev_pr_mutex);
-+
-+ dev->pr_writer_active = 1;
-+
-+ /* to sync with scst_pr_read_lock() and unlock() */
-+ smp_mb();
-+
-+ while (atomic_read(&dev->pr_readers_count) != 0) {
-+ TRACE_DBG("Waiting for %d readers (dev %p)",
-+ atomic_read(&dev->pr_readers_count), dev);
-+ msleep(1);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline void scst_pr_write_unlock(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ dev->pr_writer_active = 0;
-+
-+ mutex_unlock(&dev->dev_pr_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+int scst_pr_init_dev(struct scst_device *dev);
-+void scst_pr_clear_dev(struct scst_device *dev);
-+
-+int scst_pr_init_tgt_dev(struct scst_tgt_dev *tgt_dev);
-+void scst_pr_clear_tgt_dev(struct scst_tgt_dev *tgt_dev);
-+
-+bool scst_pr_crh_case(struct scst_cmd *cmd);
-+bool scst_pr_is_cmd_allowed(struct scst_cmd *cmd);
-+
-+void scst_pr_register(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_register_and_ignore(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size);
-+void scst_pr_register_and_move(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size);
-+void scst_pr_reserve(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_release(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_clear(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_preempt(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_preempt_and_abort(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size);
-+
-+void scst_pr_read_keys(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_read_reservation(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size);
-+void scst_pr_report_caps(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_read_full_status(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size);
-+
-+void scst_pr_sync_device_file(struct scst_tgt_dev *tgt_dev, struct scst_cmd *cmd);
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+void scst_pr_dump_prs(struct scst_device *dev, bool force);
-+#else
-+static inline void scst_pr_dump_prs(struct scst_device *dev, bool force) {}
-+#endif
-+
-+#endif /* SCST_PRES_H_ */
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_priv.h linux-2.6.36/drivers/scst/scst_priv.h
---- orig/linux-2.6.36/drivers/scst/scst_priv.h
-+++ linux-2.6.36/drivers/scst/scst_priv.h
-@@ -0,0 +1,603 @@
-+/*
-+ * scst_priv.h
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2009 - 2010 ID7 Ltd.
+ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
@@ -17642,752 +25622,279 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_priv.h linux-2.6.36/drivers/scst/
+ * GNU General Public License for more details.
+ */
+
-+#ifndef __SCST_PRIV_H
-+#define __SCST_PRIV_H
-+
-+#include <linux/types.h>
-+
-+#include <scsi/scsi.h>
-+#include <scsi/scsi_cmnd.h>
-+#include <scsi/scsi_driver.h>
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_host.h>
++#include <linux/kobject.h>
++#include <linux/string.h>
++#include <linux/sysfs.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/ctype.h>
++#include <linux/slab.h>
++#include <linux/kthread.h>
+
-+#define LOG_PREFIX "scst"
++#include <scst/scst.h>
++#include "scst_priv.h"
++#include "scst_pres.h"
+
-+#include <scst/scst_debug.h>
++static DECLARE_COMPLETION(scst_sysfs_root_release_completion);
+
-+#define TRACE_RTRY 0x80000000
-+#define TRACE_SCSI_SERIALIZING 0x40000000
-+/** top being the edge away from the interupt */
-+#define TRACE_SND_TOP 0x20000000
-+#define TRACE_RCV_TOP 0x01000000
-+/** bottom being the edge toward the interupt */
-+#define TRACE_SND_BOT 0x08000000
-+#define TRACE_RCV_BOT 0x04000000
++static struct kobject *scst_targets_kobj;
++static struct kobject *scst_devices_kobj;
++static struct kobject *scst_handlers_kobj;
++static struct kobject *scst_device_groups_kobj;
++
++static const char *const scst_dev_handler_types[] = {
++ "Direct-access device (e.g., magnetic disk)",
++ "Sequential-access device (e.g., magnetic tape)",
++ "Printer device",
++ "Processor device",
++ "Write-once device (e.g., some optical disks)",
++ "CD-ROM device",
++ "Scanner device (obsolete)",
++ "Optical memory device (e.g., some optical disks)",
++ "Medium changer device (e.g., jukeboxes)",
++ "Communications device (obsolete)",
++ "Defined by ASC IT8 (Graphic arts pre-press devices)",
++ "Defined by ASC IT8 (Graphic arts pre-press devices)",
++ "Storage array controller device (e.g., RAID)",
++ "Enclosure services device",
++ "Simplified direct-access device (e.g., magnetic disk)",
++ "Optical card reader/writer device"
++};
+
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+#define trace_flag scst_trace_flag
-+extern unsigned long scst_trace_flag;
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+
-+#define SCST_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR | TRACE_PID | \
-+ TRACE_LINE | TRACE_FUNCTION | TRACE_SPECIAL | TRACE_MGMT | \
-+ TRACE_MGMT_DEBUG | TRACE_RTRY)
-+
-+#define TRACE_RETRY(args...) TRACE_DBG_FLAG(TRACE_RTRY, args)
-+#define TRACE_SN(args...) TRACE_DBG_FLAG(TRACE_SCSI_SERIALIZING, args)
-+#define TRACE_SEND_TOP(args...) TRACE_DBG_FLAG(TRACE_SND_TOP, args)
-+#define TRACE_RECV_TOP(args...) TRACE_DBG_FLAG(TRACE_RCV_TOP, args)
-+#define TRACE_SEND_BOT(args...) TRACE_DBG_FLAG(TRACE_SND_BOT, args)
-+#define TRACE_RECV_BOT(args...) TRACE_DBG_FLAG(TRACE_RCV_BOT, args)
-+
-+#else /* CONFIG_SCST_DEBUG */
-+
-+# ifdef CONFIG_SCST_TRACING
-+#define SCST_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
-+ TRACE_SPECIAL)
-+# else
-+#define SCST_DEFAULT_LOG_FLAGS 0
-+# endif
+
-+#define TRACE_RETRY(args...)
-+#define TRACE_SN(args...)
-+#define TRACE_SEND_TOP(args...)
-+#define TRACE_RECV_TOP(args...)
-+#define TRACE_SEND_BOT(args...)
-+#define TRACE_RECV_BOT(args...)
++static DEFINE_MUTEX(scst_log_mutex);
+
++static struct scst_trace_log scst_trace_tbl[] = {
++ { TRACE_OUT_OF_MEM, "out_of_mem" },
++ { TRACE_MINOR, "minor" },
++ { TRACE_SG_OP, "sg" },
++ { TRACE_MEMORY, "mem" },
++ { TRACE_BUFF, "buff" },
++#ifndef GENERATING_UPSTREAM_PATCH
++ { TRACE_ENTRYEXIT, "entryexit" },
+#endif
-+
-+/**
-+ ** Bits for scst_flags
-+ **/
-+
-+/*
-+ * Set if new commands initialization is being suspended for a while.
-+ * Used to let TM commands execute while preparing the suspend, since
-+ * RESET or ABORT could be necessary to free SCSI commands.
-+ */
-+#define SCST_FLAG_SUSPENDING 0
-+
-+/* Set if new commands initialization is suspended for a while */
-+#define SCST_FLAG_SUSPENDED 1
-+
-+/**
-+ ** Return codes for cmd state process functions. Codes are the same as
-+ ** for SCST_EXEC_* to avoid translation to them and, hence, have better code.
-+ **/
-+#define SCST_CMD_STATE_RES_CONT_NEXT SCST_EXEC_COMPLETED
-+#define SCST_CMD_STATE_RES_CONT_SAME SCST_EXEC_NOT_COMPLETED
-+#define SCST_CMD_STATE_RES_NEED_THREAD (SCST_EXEC_NOT_COMPLETED+1)
-+
-+/**
-+ ** Maximum count of uncompleted commands that an initiator could
-+ ** queue on any device. Then it will start getting TASK QUEUE FULL status.
-+ **/
-+#define SCST_MAX_TGT_DEV_COMMANDS 48
-+
-+/**
-+ ** Maximum count of uncompleted commands that could be queued on any device.
-+ ** Then initiators sending commands to this device will start getting
-+ ** TASK QUEUE FULL status.
-+ **/
-+#define SCST_MAX_DEV_COMMANDS 256
-+
-+#define SCST_TGT_RETRY_TIMEOUT (3/2*HZ)
-+
-+/* Definitions of symbolic constants for LUN addressing method */
-+#define SCST_LUN_ADDR_METHOD_PERIPHERAL 0
-+#define SCST_LUN_ADDR_METHOD_FLAT 1
-+
-+/* Activities suspending timeout */
-+#define SCST_SUSPENDING_TIMEOUT (90 * HZ)
-+
-+extern struct mutex scst_mutex2;
-+
-+extern int scst_threads;
-+
-+extern unsigned int scst_max_dev_cmd_mem;
-+
-+extern mempool_t *scst_mgmt_mempool;
-+extern mempool_t *scst_mgmt_stub_mempool;
-+extern mempool_t *scst_ua_mempool;
-+extern mempool_t *scst_sense_mempool;
-+extern mempool_t *scst_aen_mempool;
-+
-+extern struct kmem_cache *scst_cmd_cachep;
-+extern struct kmem_cache *scst_sess_cachep;
-+extern struct kmem_cache *scst_tgtd_cachep;
-+extern struct kmem_cache *scst_acgd_cachep;
-+
-+extern spinlock_t scst_main_lock;
-+
-+extern struct scst_sgv_pools scst_sgv;
-+
-+extern unsigned long scst_flags;
-+extern atomic_t scst_cmd_count;
-+extern struct list_head scst_template_list;
-+extern struct list_head scst_dev_list;
-+extern struct list_head scst_dev_type_list;
-+extern struct list_head scst_virtual_dev_type_list;
-+extern wait_queue_head_t scst_dev_cmd_waitQ;
-+
-+extern unsigned int scst_setup_id;
-+
-+#define SCST_DEF_MAX_TASKLET_CMD 20
-+extern int scst_max_tasklet_cmd;
-+
-+extern spinlock_t scst_init_lock;
-+extern struct list_head scst_init_cmd_list;
-+extern wait_queue_head_t scst_init_cmd_list_waitQ;
-+extern unsigned int scst_init_poll_cnt;
-+
-+extern struct scst_cmd_threads scst_main_cmd_threads;
-+
-+extern spinlock_t scst_mcmd_lock;
-+/* The following lists protected by scst_mcmd_lock */
-+extern struct list_head scst_active_mgmt_cmd_list;
-+extern struct list_head scst_delayed_mgmt_cmd_list;
-+extern wait_queue_head_t scst_mgmt_cmd_list_waitQ;
-+
-+struct scst_tasklet {
-+ spinlock_t tasklet_lock;
-+ struct list_head tasklet_cmd_list;
-+ struct tasklet_struct tasklet;
++ { TRACE_PID, "pid" },
++ { TRACE_LINE, "line" },
++ { TRACE_FUNCTION, "function" },
++ { TRACE_DEBUG, "debug" },
++ { TRACE_SPECIAL, "special" },
++ { TRACE_SCSI, "scsi" },
++ { TRACE_MGMT, "mgmt" },
++ { TRACE_MGMT_DEBUG, "mgmt_dbg" },
++ { TRACE_FLOW_CONTROL, "flow_control" },
++ { TRACE_PRES, "pr" },
++ { 0, NULL }
+};
-+extern struct scst_tasklet scst_tasklets[NR_CPUS];
-+
-+extern wait_queue_head_t scst_mgmt_waitQ;
-+extern spinlock_t scst_mgmt_lock;
-+extern struct list_head scst_sess_init_list;
-+extern struct list_head scst_sess_shut_list;
+
-+struct scst_cmd_thread_t {
-+ struct task_struct *cmd_thread;
-+ struct list_head thread_list_entry;
++static struct scst_trace_log scst_local_trace_tbl[] = {
++ { TRACE_RTRY, "retry" },
++ { TRACE_SCSI_SERIALIZING, "scsi_serializing" },
++ { TRACE_RCV_BOT, "recv_bot" },
++ { TRACE_SND_BOT, "send_bot" },
++ { TRACE_RCV_TOP, "recv_top" },
++ { TRACE_SND_TOP, "send_top" },
++ { 0, NULL }
+};
+
-+static inline bool scst_set_io_context(struct scst_cmd *cmd,
-+ struct io_context **old)
-+{
-+ bool res;
-+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ return false;
-+#endif
-+
-+ if (cmd->cmd_threads == &scst_main_cmd_threads) {
-+ EXTRACHECKS_BUG_ON(in_interrupt());
-+ /*
-+ * No need for any ref counting action, because io_context
-+ * supposed to be cleared in the end of the caller function.
-+ */
-+ current->io_context = cmd->tgt_dev->async_io_context;
-+ res = true;
-+ TRACE_DBG("io_context %p (tgt_dev %p)", current->io_context,
-+ cmd->tgt_dev);
-+ EXTRACHECKS_BUG_ON(current->io_context == NULL);
-+ } else
-+ res = false;
-+
-+ return res;
-+}
-+
-+static inline void scst_reset_io_context(struct scst_tgt_dev *tgt_dev,
-+ struct io_context *old)
-+{
-+ current->io_context = old;
-+ TRACE_DBG("io_context %p reset", current->io_context);
-+ return;
-+}
-+
-+/*
-+ * Converts string presentation of threads pool type to enum.
-+ * Returns SCST_THREADS_POOL_TYPE_INVALID if the string is invalid.
-+ */
-+extern enum scst_dev_type_threads_pool_type scst_parse_threads_pool_type(
-+ const char *p, int len);
-+
-+extern int scst_add_threads(struct scst_cmd_threads *cmd_threads,
-+ struct scst_device *dev, struct scst_tgt_dev *tgt_dev, int num);
-+extern void scst_del_threads(struct scst_cmd_threads *cmd_threads, int num);
-+
-+extern int scst_create_dev_threads(struct scst_device *dev);
-+extern void scst_stop_dev_threads(struct scst_device *dev);
-+
-+extern int scst_tgt_dev_setup_threads(struct scst_tgt_dev *tgt_dev);
-+extern void scst_tgt_dev_stop_threads(struct scst_tgt_dev *tgt_dev);
-+
-+extern bool scst_del_thr_data(struct scst_tgt_dev *tgt_dev,
-+ struct task_struct *tsk);
-+
-+extern struct scst_dev_type scst_null_devtype;
-+
-+extern struct scst_cmd *__scst_check_deferred_commands(
-+ struct scst_tgt_dev *tgt_dev);
-+
-+/* Used to save the function call on the fast path */
-+static inline struct scst_cmd *scst_check_deferred_commands(
-+ struct scst_tgt_dev *tgt_dev)
++static void scst_read_trace_tbl(const struct scst_trace_log *tbl, char *buf,
++ unsigned long log_level, int *pos)
+{
-+ if (tgt_dev->def_cmd_count == 0)
-+ return NULL;
-+ else
-+ return __scst_check_deferred_commands(tgt_dev);
-+}
++ const struct scst_trace_log *t = tbl;
+
-+static inline void scst_make_deferred_commands_active(
-+ struct scst_tgt_dev *tgt_dev)
-+{
-+ struct scst_cmd *c;
++ if (t == NULL)
++ goto out;
+
-+ c = __scst_check_deferred_commands(tgt_dev);
-+ if (c != NULL) {
-+ TRACE_SN("Adding cmd %p to active cmd list", c);
-+ spin_lock_irq(&c->cmd_threads->cmd_list_lock);
-+ list_add_tail(&c->cmd_list_entry,
-+ &c->cmd_threads->active_cmd_list);
-+ wake_up(&c->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irq(&c->cmd_threads->cmd_list_lock);
++ while (t->token) {
++ if (log_level & t->val) {
++ *pos += sprintf(&buf[*pos], "%s%s",
++ (*pos == 0) ? "" : " | ",
++ t->token);
++ }
++ t++;
+ }
-+
++out:
+ return;
+}
+
-+void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot);
-+int scst_check_hq_cmd(struct scst_cmd *cmd);
-+
-+void scst_unblock_deferred(struct scst_tgt_dev *tgt_dev,
-+ struct scst_cmd *cmd_sn);
-+
-+void scst_on_hq_cmd_response(struct scst_cmd *cmd);
-+void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd);
-+
-+int scst_cmd_thread(void *arg);
-+void scst_cmd_tasklet(long p);
-+int scst_init_thread(void *arg);
-+int scst_tm_thread(void *arg);
-+int scst_global_mgmt_thread(void *arg);
-+
-+void scst_zero_write_rest(struct scst_cmd *cmd);
-+void scst_limit_sg_write_len(struct scst_cmd *cmd);
-+void scst_adjust_resp_data_len(struct scst_cmd *cmd);
-+
-+int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds);
-+
-+int scst_alloc_tgt(struct scst_tgt_template *tgtt, struct scst_tgt **tgt);
-+void scst_free_tgt(struct scst_tgt *tgt);
-+
-+int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev);
-+void scst_free_device(struct scst_device *dev);
-+
-+struct scst_acg *scst_alloc_add_acg(struct scst_tgt *tgt,
-+ const char *acg_name, bool tgt_acg);
-+void scst_del_free_acg(struct scst_acg *acg);
-+
-+struct scst_acg *scst_tgt_find_acg(struct scst_tgt *tgt, const char *name);
-+struct scst_acg *scst_find_acg(const struct scst_session *sess);
-+
-+void scst_check_reassign_sessions(void);
-+
-+int scst_sess_alloc_tgt_devs(struct scst_session *sess);
-+void scst_sess_free_tgt_devs(struct scst_session *sess);
-+void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA);
-+
-+int scst_acg_add_lun(struct scst_acg *acg, struct kobject *parent,
-+ struct scst_device *dev, uint64_t lun, int read_only,
-+ bool gen_scst_report_luns_changed, struct scst_acg_dev **out_acg_dev);
-+int scst_acg_del_lun(struct scst_acg *acg, uint64_t lun,
-+ bool gen_scst_report_luns_changed);
-+
-+int scst_acg_add_acn(struct scst_acg *acg, const char *name);
-+void scst_del_free_acn(struct scst_acn *acn, bool reassign);
-+struct scst_acn *scst_find_acn(struct scst_acg *acg, const char *name);
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static inline bool scst_acg_sess_is_empty(struct scst_acg *acg)
-+{
-+ return list_empty(&acg->acg_sess_list);
-+}
-+
-+int scst_prepare_request_sense(struct scst_cmd *orig_cmd);
-+int scst_finish_internal_cmd(struct scst_cmd *cmd);
-+
-+void scst_store_sense(struct scst_cmd *cmd);
-+
-+int scst_assign_dev_handler(struct scst_device *dev,
-+ struct scst_dev_type *handler);
-+
-+struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
-+ const char *initiator_name);
-+void scst_free_session(struct scst_session *sess);
-+void scst_free_session_callback(struct scst_session *sess);
-+
-+struct scst_cmd *scst_alloc_cmd(gfp_t gfp_mask);
-+void scst_free_cmd(struct scst_cmd *cmd);
-+static inline void scst_destroy_cmd(struct scst_cmd *cmd)
++static ssize_t scst_trace_level_show(const struct scst_trace_log *local_tbl,
++ unsigned long log_level, char *buf, const char *help)
+{
-+ kmem_cache_free(scst_cmd_cachep, cmd);
-+ return;
-+}
-+
-+void scst_check_retries(struct scst_tgt *tgt);
-+
-+int scst_scsi_exec_async(struct scst_cmd *cmd,
-+ void (*done)(void *, char *, int, int));
-+
-+int scst_alloc_space(struct scst_cmd *cmd);
-+
-+int scst_lib_init(void);
-+void scst_lib_exit(void);
-+
-+int scst_get_full_buf(struct scst_cmd *cmd, uint8_t **buf);
-+void scst_put_full_buf(struct scst_cmd *cmd, uint8_t *buf);
-+
-+__be64 scst_pack_lun(const uint64_t lun, unsigned int addr_method);
-+uint64_t scst_unpack_lun(const uint8_t *lun, int len);
-+
-+struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask);
-+void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd);
-+void scst_done_cmd_mgmt(struct scst_cmd *cmd);
++ int pos = 0;
+
-+static inline void scst_devt_cleanup(struct scst_dev_type *devt) { }
++ scst_read_trace_tbl(scst_trace_tbl, buf, log_level, &pos);
++ scst_read_trace_tbl(local_tbl, buf, log_level, &pos);
+
-+int scst_sysfs_init(void);
-+void scst_sysfs_cleanup(void);
-+int scst_tgtt_sysfs_create(struct scst_tgt_template *tgtt);
-+void scst_tgtt_sysfs_del(struct scst_tgt_template *tgtt);
-+int scst_tgt_sysfs_create(struct scst_tgt *tgt);
-+void scst_tgt_sysfs_prepare_put(struct scst_tgt *tgt);
-+void scst_tgt_sysfs_del(struct scst_tgt *tgt);
-+int scst_sess_sysfs_create(struct scst_session *sess);
-+void scst_sess_sysfs_del(struct scst_session *sess);
-+int scst_recreate_sess_luns_link(struct scst_session *sess);
-+int scst_sgv_sysfs_create(struct sgv_pool *pool);
-+void scst_sgv_sysfs_del(struct sgv_pool *pool);
-+int scst_devt_sysfs_create(struct scst_dev_type *devt);
-+void scst_devt_sysfs_del(struct scst_dev_type *devt);
-+int scst_dev_sysfs_create(struct scst_device *dev);
-+void scst_dev_sysfs_del(struct scst_device *dev);
-+int scst_tgt_dev_sysfs_create(struct scst_tgt_dev *tgt_dev);
-+void scst_tgt_dev_sysfs_del(struct scst_tgt_dev *tgt_dev);
-+int scst_devt_dev_sysfs_create(struct scst_device *dev);
-+void scst_devt_dev_sysfs_del(struct scst_device *dev);
-+int scst_acg_sysfs_create(struct scst_tgt *tgt,
-+ struct scst_acg *acg);
-+void scst_acg_sysfs_del(struct scst_acg *acg);
-+int scst_acg_dev_sysfs_create(struct scst_acg_dev *acg_dev,
-+ struct kobject *parent);
-+void scst_acg_dev_sysfs_del(struct scst_acg_dev *acg_dev);
-+int scst_acn_sysfs_create(struct scst_acn *acn);
-+void scst_acn_sysfs_del(struct scst_acn *acn);
++ pos += sprintf(&buf[pos], "\n\n\nUsage:\n"
++ " echo \"all|none|default\" >trace_level\n"
++ " echo \"value DEC|0xHEX|0OCT\" >trace_level\n"
++ " echo \"add|del TOKEN\" >trace_level\n"
++ "\nwhere TOKEN is one of [debug, function, line, pid,\n"
++#ifndef GENERATING_UPSTREAM_PATCH
++ " entryexit, buff, mem, sg, out_of_mem,\n"
++#else
++ " buff, mem, sg, out_of_mem,\n"
++#endif
++ " special, scsi, mgmt, minor,\n"
++ " mgmt_dbg, scsi_serializing,\n"
++ " retry, recv_bot, send_bot, recv_top, pr,\n"
++ " send_top%s]\n", help != NULL ? help : "");
+
-+void __scst_dev_check_set_UA(struct scst_device *dev, struct scst_cmd *exclude,
-+ const uint8_t *sense, int sense_len);
-+static inline void scst_dev_check_set_UA(struct scst_device *dev,
-+ struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
-+{
-+ spin_lock_bh(&dev->dev_lock);
-+ __scst_dev_check_set_UA(dev, exclude, sense, sense_len);
-+ spin_unlock_bh(&dev->dev_lock);
-+ return;
++ return pos;
+}
-+void scst_dev_check_set_local_UA(struct scst_device *dev,
-+ struct scst_cmd *exclude, const uint8_t *sense, int sense_len);
-+
-+#define SCST_SET_UA_FLAG_AT_HEAD 1
-+#define SCST_SET_UA_FLAG_GLOBAL 2
-+
-+void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
-+ const uint8_t *sense, int sense_len, int flags);
-+int scst_set_pending_UA(struct scst_cmd *cmd);
-+
-+void scst_report_luns_changed(struct scst_acg *acg);
-+
-+void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
-+ bool other_ini, bool call_dev_task_mgmt_fn);
-+void scst_process_reset(struct scst_device *dev,
-+ struct scst_session *originator, struct scst_cmd *exclude_cmd,
-+ struct scst_mgmt_cmd *mcmd, bool setUA);
-+
-+bool scst_is_ua_global(const uint8_t *sense, int len);
-+void scst_requeue_ua(struct scst_cmd *cmd);
+
-+void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
-+ int key, int asc, int ascq);
-+
-+static inline bool scst_is_implicit_hq(struct scst_cmd *cmd)
++static int scst_write_trace(const char *buf, size_t length,
++ unsigned long *log_level, unsigned long default_level,
++ const char *name, const struct scst_trace_log *tbl)
+{
-+ return (cmd->op_flags & SCST_IMPLICIT_HQ) != 0;
-+}
-+
-+/*
-+ * Some notes on devices "blocking". Blocking means that no
-+ * commands will go from SCST to underlying SCSI device until it
-+ * is unblocked. But we don't care about all commands that
-+ * already on the device.
-+ */
-+
-+extern void scst_block_dev(struct scst_device *dev);
-+extern void scst_unblock_dev(struct scst_device *dev);
-+
-+extern bool __scst_check_blocked_dev(struct scst_cmd *cmd);
++ int res = length;
++ int action;
++ unsigned long level = 0, oldlevel;
++ char *buffer, *p, *e;
++ const struct scst_trace_log *t;
++ enum {
++ SCST_TRACE_ACTION_ALL = 1,
++ SCST_TRACE_ACTION_NONE = 2,
++ SCST_TRACE_ACTION_DEFAULT = 3,
++ SCST_TRACE_ACTION_ADD = 4,
++ SCST_TRACE_ACTION_DEL = 5,
++ SCST_TRACE_ACTION_VALUE = 6,
++ };
+
-+static inline bool scst_check_blocked_dev(struct scst_cmd *cmd)
-+{
-+ if (unlikely(cmd->dev->block_count > 0) ||
-+ unlikely(cmd->dev->dev_double_ua_possible))
-+ return __scst_check_blocked_dev(cmd);
-+ else
-+ return false;
-+}
++ TRACE_ENTRY();
+
-+/* No locks */
-+static inline void scst_check_unblock_dev(struct scst_cmd *cmd)
-+{
-+ if (unlikely(cmd->unblock_dev)) {
-+ TRACE_MGMT_DBG("cmd %p (tag %llu): unblocking dev %p", cmd,
-+ (long long unsigned int)cmd->tag, cmd->dev);
-+ cmd->unblock_dev = 0;
-+ scst_unblock_dev(cmd->dev);
++ if ((buf == NULL) || (length == 0)) {
++ res = -EINVAL;
++ goto out;
+ }
-+ return;
-+}
-+
-+static inline void __scst_get(void)
-+{
-+ atomic_inc(&scst_cmd_count);
-+ TRACE_DBG("Incrementing scst_cmd_count(new value %d)",
-+ atomic_read(&scst_cmd_count));
-+ /* See comment about smp_mb() in scst_suspend_activity() */
-+ smp_mb__after_atomic_inc();
-+}
+
-+static inline void __scst_put(void)
-+{
-+ int f;
-+ f = atomic_dec_and_test(&scst_cmd_count);
-+ /* See comment about smp_mb() in scst_suspend_activity() */
-+ if (f && unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
-+ TRACE_MGMT_DBG("%s", "Waking up scst_dev_cmd_waitQ");
-+ wake_up_all(&scst_dev_cmd_waitQ);
++ buffer = kasprintf(GFP_KERNEL, "%.*s", (int)length, buf);
++ if (buffer == NULL) {
++ PRINT_ERROR("Unable to alloc intermediate buffer (size %zd)",
++ length+1);
++ res = -ENOMEM;
++ goto out;
+ }
-+ TRACE_DBG("Decrementing scst_cmd_count(new value %d)",
-+ atomic_read(&scst_cmd_count));
-+}
-+
-+void scst_sched_session_free(struct scst_session *sess);
+
-+static inline void scst_sess_get(struct scst_session *sess)
-+{
-+ atomic_inc(&sess->refcnt);
-+ TRACE_DBG("Incrementing sess %p refcnt (new value %d)",
-+ sess, atomic_read(&sess->refcnt));
-+}
++ TRACE_DBG("buffer %s", buffer);
+
-+static inline void scst_sess_put(struct scst_session *sess)
-+{
-+ TRACE_DBG("Decrementing sess %p refcnt (new value %d)",
-+ sess, atomic_read(&sess->refcnt)-1);
-+ if (atomic_dec_and_test(&sess->refcnt))
-+ scst_sched_session_free(sess);
-+}
++ p = buffer;
++ if (!strncasecmp("all", p, 3)) {
++ action = SCST_TRACE_ACTION_ALL;
++ } else if (!strncasecmp("none", p, 4) || !strncasecmp("null", p, 4)) {
++ action = SCST_TRACE_ACTION_NONE;
++ } else if (!strncasecmp("default", p, 7)) {
++ action = SCST_TRACE_ACTION_DEFAULT;
++ } else if (!strncasecmp("add", p, 3)) {
++ p += 3;
++ action = SCST_TRACE_ACTION_ADD;
++ } else if (!strncasecmp("del", p, 3)) {
++ p += 3;
++ action = SCST_TRACE_ACTION_DEL;
++ } else if (!strncasecmp("value", p, 5)) {
++ p += 5;
++ action = SCST_TRACE_ACTION_VALUE;
++ } else {
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
+
-+static inline void __scst_cmd_get(struct scst_cmd *cmd)
-+{
-+ atomic_inc(&cmd->cmd_ref);
-+ TRACE_DBG("Incrementing cmd %p ref (new value %d)",
-+ cmd, atomic_read(&cmd->cmd_ref));
-+}
++ switch (action) {
++ case SCST_TRACE_ACTION_ADD:
++ case SCST_TRACE_ACTION_DEL:
++ case SCST_TRACE_ACTION_VALUE:
++ if (!isspace(*p)) {
++ PRINT_ERROR("%s", "Syntax error");
++ res = -EINVAL;
++ goto out_free;
++ }
++ }
+
-+static inline void __scst_cmd_put(struct scst_cmd *cmd)
-+{
-+ TRACE_DBG("Decrementing cmd %p ref (new value %d)",
-+ cmd, atomic_read(&cmd->cmd_ref)-1);
-+ if (atomic_dec_and_test(&cmd->cmd_ref))
-+ scst_free_cmd(cmd);
-+}
++ switch (action) {
++ case SCST_TRACE_ACTION_ALL:
++ level = TRACE_ALL;
++ break;
++ case SCST_TRACE_ACTION_DEFAULT:
++ level = default_level;
++ break;
++ case SCST_TRACE_ACTION_NONE:
++ level = TRACE_NULL;
++ break;
++ case SCST_TRACE_ACTION_ADD:
++ case SCST_TRACE_ACTION_DEL:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = 0;
++ if (tbl) {
++ t = tbl;
++ while (t->token) {
++ if (!strcasecmp(p, t->token)) {
++ level = t->val;
++ break;
++ }
++ t++;
++ }
++ }
++ if (level == 0) {
++ t = scst_trace_tbl;
++ while (t->token) {
++ if (!strcasecmp(p, t->token)) {
++ level = t->val;
++ break;
++ }
++ t++;
++ }
++ }
++ if (level == 0) {
++ PRINT_ERROR("Unknown token \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case SCST_TRACE_ACTION_VALUE:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ res = strict_strtoul(p, 0, &level);
++ if (res != 0) {
++ PRINT_ERROR("Invalid trace value \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++ break;
++ }
+
-+extern void scst_throttle_cmd(struct scst_cmd *cmd);
-+extern void scst_unthrottle_cmd(struct scst_cmd *cmd);
++ oldlevel = *log_level;
+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+extern void tm_dbg_check_released_cmds(void);
-+extern int tm_dbg_check_cmd(struct scst_cmd *cmd);
-+extern void tm_dbg_release_cmd(struct scst_cmd *cmd);
-+extern void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn,
-+ int force);
-+extern int tm_dbg_is_release(void);
-+#else
-+static inline void tm_dbg_check_released_cmds(void) {}
-+static inline int tm_dbg_check_cmd(struct scst_cmd *cmd)
-+{
-+ return 0;
-+}
-+static inline void tm_dbg_release_cmd(struct scst_cmd *cmd) {}
-+static inline void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn,
-+ int force) {}
-+static inline int tm_dbg_is_release(void)
-+{
-+ return 0;
-+}
-+#endif /* CONFIG_SCST_DEBUG_TM */
++ switch (action) {
++ case SCST_TRACE_ACTION_ADD:
++ *log_level |= level;
++ break;
++ case SCST_TRACE_ACTION_DEL:
++ *log_level &= ~level;
++ break;
++ default:
++ *log_level = level;
++ break;
++ }
+
-+#ifdef CONFIG_SCST_DEBUG_SN
-+void scst_check_debug_sn(struct scst_cmd *cmd);
-+#else
-+static inline void scst_check_debug_sn(struct scst_cmd *cmd) {}
-+#endif
++ PRINT_INFO("Changed trace level for \"%s\": old 0x%08lx, new 0x%08lx",
++ name, oldlevel, *log_level);
+
-+static inline int scst_sn_before(uint32_t seq1, uint32_t seq2)
-+{
-+ return (int32_t)(seq1-seq2) < 0;
++out_free:
++ kfree(buffer);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
+}
+
-+int gen_relative_target_port_id(uint16_t *id);
-+bool scst_is_relative_target_port_id_unique(uint16_t id,
-+ const struct scst_tgt *t);
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+
-+void scst_set_start_time(struct scst_cmd *cmd);
-+void scst_set_cur_start(struct scst_cmd *cmd);
-+void scst_set_parse_time(struct scst_cmd *cmd);
-+void scst_set_alloc_buf_time(struct scst_cmd *cmd);
-+void scst_set_restart_waiting_time(struct scst_cmd *cmd);
-+void scst_set_rdy_to_xfer_time(struct scst_cmd *cmd);
-+void scst_set_pre_exec_time(struct scst_cmd *cmd);
-+void scst_set_exec_time(struct scst_cmd *cmd);
-+void scst_set_dev_done_time(struct scst_cmd *cmd);
-+void scst_set_xmit_time(struct scst_cmd *cmd);
-+void scst_set_tgt_on_free_time(struct scst_cmd *cmd);
-+void scst_set_dev_on_free_time(struct scst_cmd *cmd);
-+void scst_update_lat_stats(struct scst_cmd *cmd);
-+
-+#else
-+
-+static inline void scst_set_start_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_cur_start(struct scst_cmd *cmd) {}
-+static inline void scst_set_parse_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_alloc_buf_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_restart_waiting_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_rdy_to_xfer_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_pre_exec_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_exec_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_dev_done_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_xmit_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_tgt_on_free_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_dev_on_free_time(struct scst_cmd *cmd) {}
-+static inline void scst_update_lat_stats(struct scst_cmd *cmd) {}
-+
-+#endif /* CONFIG_SCST_MEASURE_LATENCY */
-+
-+#endif /* __SCST_PRIV_H */
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst/scst_sysfs.c
---- orig/linux-2.6.36/drivers/scst/scst_sysfs.c
-+++ linux-2.6.36/drivers/scst/scst_sysfs.c
-@@ -0,0 +1,5336 @@
-+/*
-+ * scst_sysfs.c
-+ *
-+ * Copyright (C) 2009 Daniel Henrique Debonzi <debonzi@linux.vnet.ibm.com>
-+ * Copyright (C) 2009 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2009 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/kobject.h>
-+#include <linux/string.h>
-+#include <linux/sysfs.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/ctype.h>
-+#include <linux/slab.h>
-+#include <linux/kthread.h>
-+
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+#include "scst_mem.h"
-+#include "scst_pres.h"
-+
-+static DECLARE_COMPLETION(scst_sysfs_root_release_completion);
-+
-+static struct kobject scst_sysfs_root_kobj;
-+static struct kobject *scst_targets_kobj;
-+static struct kobject *scst_devices_kobj;
-+static struct kobject *scst_sgv_kobj;
-+static struct kobject *scst_handlers_kobj;
-+
-+static const char *scst_dev_handler_types[] = {
-+ "Direct-access device (e.g., magnetic disk)",
-+ "Sequential-access device (e.g., magnetic tape)",
-+ "Printer device",
-+ "Processor device",
-+ "Write-once device (e.g., some optical disks)",
-+ "CD-ROM device",
-+ "Scanner device (obsolete)",
-+ "Optical memory device (e.g., some optical disks)",
-+ "Medium changer device (e.g., jukeboxes)",
-+ "Communications device (obsolete)",
-+ "Defined by ASC IT8 (Graphic arts pre-press devices)",
-+ "Defined by ASC IT8 (Graphic arts pre-press devices)",
-+ "Storage array controller device (e.g., RAID)",
-+ "Enclosure services device",
-+ "Simplified direct-access device (e.g., magnetic disk)",
-+ "Optical card reader/writer device"
-+};
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+static DEFINE_MUTEX(scst_log_mutex);
-+
-+static struct scst_trace_log scst_trace_tbl[] = {
-+ { TRACE_OUT_OF_MEM, "out_of_mem" },
-+ { TRACE_MINOR, "minor" },
-+ { TRACE_SG_OP, "sg" },
-+ { TRACE_MEMORY, "mem" },
-+ { TRACE_BUFF, "buff" },
-+#ifndef GENERATING_UPSTREAM_PATCH
-+ { TRACE_ENTRYEXIT, "entryexit" },
-+#endif
-+ { TRACE_PID, "pid" },
-+ { TRACE_LINE, "line" },
-+ { TRACE_FUNCTION, "function" },
-+ { TRACE_DEBUG, "debug" },
-+ { TRACE_SPECIAL, "special" },
-+ { TRACE_SCSI, "scsi" },
-+ { TRACE_MGMT, "mgmt" },
-+ { TRACE_MGMT_DEBUG, "mgmt_dbg" },
-+ { TRACE_FLOW_CONTROL, "flow_control" },
-+ { TRACE_PRES, "pr" },
-+ { 0, NULL }
-+};
-+
-+static struct scst_trace_log scst_local_trace_tbl[] = {
-+ { TRACE_RTRY, "retry" },
-+ { TRACE_SCSI_SERIALIZING, "scsi_serializing" },
-+ { TRACE_RCV_BOT, "recv_bot" },
-+ { TRACE_SND_BOT, "send_bot" },
-+ { TRACE_RCV_TOP, "recv_top" },
-+ { TRACE_SND_TOP, "send_top" },
-+ { 0, NULL }
-+};
-+
-+static ssize_t scst_trace_level_show(const struct scst_trace_log *local_tbl,
-+ unsigned long log_level, char *buf, const char *help);
-+static int scst_write_trace(const char *buf, size_t length,
-+ unsigned long *log_level, unsigned long default_level,
-+ const char *name, const struct scst_trace_log *tbl);
-+
+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
+
-+static ssize_t scst_luns_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf);
-+static ssize_t scst_luns_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count);
-+static ssize_t scst_tgt_addr_method_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf);
-+static ssize_t scst_tgt_addr_method_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count);
-+static ssize_t scst_tgt_io_grouping_type_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf);
-+static ssize_t scst_tgt_io_grouping_type_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count);
-+static ssize_t scst_ini_group_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf);
-+static ssize_t scst_ini_group_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count);
-+static ssize_t scst_rel_tgt_id_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf);
-+static ssize_t scst_rel_tgt_id_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count);
-+static ssize_t scst_acg_luns_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count);
-+static ssize_t scst_acg_ini_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf);
-+static ssize_t scst_acg_ini_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count);
-+static ssize_t scst_acg_addr_method_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf);
-+static ssize_t scst_acg_addr_method_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count);
-+static ssize_t scst_acg_io_grouping_type_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf);
-+static ssize_t scst_acg_io_grouping_type_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count);
-+static ssize_t scst_acn_file_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+
+/**
+ ** Sysfs work
+ **/
@@ -18476,66 +25983,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+}
+EXPORT_SYMBOL(scst_sysfs_work_put);
+
-+/**
-+ * scst_sysfs_queue_wait_work() - waits for the work to complete
-+ *
-+ * Returnes status of the completed work or -EAGAIN if the work not
-+ * completed before timeout. In the latter case a user should poll
-+ * last_sysfs_mgmt_res until it returns the result of the processing.
-+ */
-+int scst_sysfs_queue_wait_work(struct scst_sysfs_work_item *work)
-+{
-+ int res = 0, rc;
-+ unsigned long timeout = 15*HZ;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock(&sysfs_work_lock);
-+
-+ TRACE_DBG("Adding sysfs work %p to the list", work);
-+ list_add_tail(&work->sysfs_work_list_entry, &sysfs_work_list);
-+
-+ active_sysfs_works++;
-+
-+ spin_unlock(&sysfs_work_lock);
-+
-+ kref_get(&work->sysfs_work_kref);
-+
-+ wake_up(&sysfs_work_waitQ);
-+
-+ while (1) {
-+ rc = wait_for_completion_interruptible_timeout(
-+ &work->sysfs_work_done, timeout);
-+ if (rc == 0) {
-+ if (!mutex_is_locked(&scst_mutex)) {
-+ TRACE_DBG("scst_mutex not locked, continue "
-+ "waiting (work %p)", work);
-+ timeout = 5*HZ;
-+ continue;
-+ }
-+ TRACE_MGMT_DBG("Time out waiting for work %p",
-+ work);
-+ res = -EAGAIN;
-+ goto out_put;
-+ } else if (rc < 0) {
-+ res = rc;
-+ goto out_put;
-+ }
-+ break;
-+ }
-+
-+ res = work->work_res;
-+
-+out_put:
-+ kref_put(&work->sysfs_work_kref, scst_sysfs_work_release);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(scst_sysfs_queue_wait_work);
-+
-+/* Called under sysfs_work_lock and drops/reaquire it inside */
++/* Called under sysfs_work_lock and drops/reacquire it inside */
+static void scst_process_sysfs_works(void)
++ __releases(&sysfs_work_lock)
++ __acquires(&sysfs_work_lock)
+{
+ struct scst_sysfs_work_item *work;
+
@@ -18576,9 +26027,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+static int sysfs_work_thread_fn(void *arg)
+{
++ bool one_time_only = (bool)arg;
++
+ TRACE_ENTRY();
+
-+ PRINT_INFO("User interface thread started, PID %d", current->pid);
++ if (!one_time_only)
++ PRINT_INFO("User interface thread started, PID %d", current->pid);
+
+ current->flags |= PF_NOFREEZE;
+
@@ -18589,6 +26043,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ wait_queue_t wait;
+ init_waitqueue_entry(&wait, current);
+
++ if (one_time_only && !test_sysfs_work_list())
++ break;
++
+ if (!test_sysfs_work_list()) {
+ add_wait_queue_exclusive(&sysfs_work_waitQ, &wait);
+ for (;;) {
@@ -18607,18 +26064,100 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ }
+ spin_unlock(&sysfs_work_lock);
+
-+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so both lists must be empty.
-+ */
-+ BUG_ON(!list_empty(&sysfs_work_list));
++ if (!one_time_only) {
++ /*
++ * If kthread_should_stop() is true, we are guaranteed to be
++ * on the module unload, so both lists must be empty.
++ */
++ BUG_ON(!list_empty(&sysfs_work_list));
+
-+ PRINT_INFO("User interface thread PID %d finished", current->pid);
++ PRINT_INFO("User interface thread PID %d finished", current->pid);
++ }
+
+ TRACE_EXIT();
+ return 0;
+}
+
++/**
++ * scst_sysfs_queue_wait_work() - waits for the work to complete
++ *
++ * Returns status of the completed work or -EAGAIN if the work not
++ * completed before timeout. In the latter case a user should poll
++ * last_sysfs_mgmt_res until it returns the result of the processing.
++ */
++int scst_sysfs_queue_wait_work(struct scst_sysfs_work_item *work)
++{
++ int res = 0, rc;
++ unsigned long timeout = 15*HZ;
++ struct task_struct *t;
++ static atomic_t uid_thread_name = ATOMIC_INIT(0);
++
++ TRACE_ENTRY();
++
++ spin_lock(&sysfs_work_lock);
++
++ TRACE_DBG("Adding sysfs work %p to the list", work);
++ list_add_tail(&work->sysfs_work_list_entry, &sysfs_work_list);
++
++ active_sysfs_works++;
++
++ kref_get(&work->sysfs_work_kref);
++
++ spin_unlock(&sysfs_work_lock);
++
++ wake_up(&sysfs_work_waitQ);
++
++ /*
++ * We can have a dead lock possibility like: the sysfs thread is waiting
++ * for the last put during some object unregistration and at the same
++ * time another queued work is having reference on that object taken and
++ * waiting for attention from the sysfs thread. Generally, all sysfs
++ * functions calling kobject_get() and then queuing sysfs thread job
++ * affected by this. This is especially dangerous in read only cases,
++ * like vdev_sysfs_filename_show().
++ *
++ * So, to eliminate that deadlock we will create an extra sysfs thread
++ * for each queued sysfs work. This thread will quit as soon as it will
++ * see that there is not more queued works to process.
++ */
++
++ t = kthread_run(sysfs_work_thread_fn, (void *)true, "scst_uid%d",
++ atomic_inc_return(&uid_thread_name));
++ if (IS_ERR(t))
++ PRINT_ERROR("kthread_run() for user interface thread %d "
++ "failed: %d", atomic_read(&uid_thread_name),
++ (int)PTR_ERR(t));
++
++ while (1) {
++ rc = wait_for_completion_interruptible_timeout(
++ &work->sysfs_work_done, timeout);
++ if (rc == 0) {
++ if (!mutex_is_locked(&scst_mutex)) {
++ TRACE_DBG("scst_mutex not locked, continue "
++ "waiting (work %p)", work);
++ timeout = 5*HZ;
++ continue;
++ }
++ TRACE_MGMT_DBG("Time out waiting for work %p", work);
++ res = -EAGAIN;
++ goto out_put;
++ } else if (rc < 0) {
++ res = rc;
++ goto out_put;
++ }
++ break;
++ }
++
++ res = work->work_res;
++
++out_put:
++ kref_put(&work->sysfs_work_kref, scst_sysfs_work_release);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL(scst_sysfs_queue_wait_work);
++
+/* No locks */
+static int scst_check_grab_tgtt_ptr(struct scst_tgt_template *tgtt)
+{
@@ -18776,7 +26315,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+}
+
+/**
-+ ** Regilar SCST sysfs ops
++ ** Regular SCST sysfs ops
+ **/
+static ssize_t scst_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
@@ -18799,7 +26338,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ return -EIO;
+}
+
-+static const struct sysfs_ops scst_sysfs_ops = {
++const struct sysfs_ops scst_sysfs_ops = {
+ .show = scst_show,
+ .store = scst_store,
+};
@@ -18821,7 +26360,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ TRACE_ENTRY();
+
+ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
-+ complete_all(&tgtt->tgtt_kobj_release_cmpl);
++ if (tgtt->tgtt_kobj_release_cmpl)
++ complete_all(tgtt->tgtt_kobj_release_cmpl);
+
+ TRACE_EXIT();
+ return;
@@ -18856,10 +26396,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
+
-+ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
-+ res = -EINTR;
++ res = mutex_lock_interruptible(&scst_log_mutex);
++ if (res != 0)
+ goto out;
-+ }
+
+ res = scst_write_trace(buf, count, tgtt->trace_flags,
+ tgtt->default_trace_flags, tgtt->name, tgtt->trace_tbl);
@@ -18880,16 +26419,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+static ssize_t scst_tgtt_mgmt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
-+char *help = "Usage: echo \"add_target target_name [parameters]\" "
-+ ">mgmt\n"
-+ " echo \"del_target target_name\" >mgmt\n"
-+ "%s%s"
-+ "%s"
-+ "\n"
-+ "where parameters are one or more "
-+ "param_name=value pairs separated by ';'\n\n"
-+ "%s%s%s%s%s%s%s%s\n";
-+ struct scst_tgt_template *tgtt;
++ static const char help[] =
++ "Usage: echo \"add_target target_name [parameters]\" >mgmt\n"
++ " echo \"del_target target_name\" >mgmt\n"
++ "%s%s"
++ "%s"
++ "\n"
++ "where parameters are one or more "
++ "param_name=value pairs separated by ';'\n\n"
++ "%s%s%s%s%s%s%s%s\n";
++ struct scst_tgt_template *tgtt;
+
+ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
+
@@ -18997,13 +26536,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
+
-+ buffer = kzalloc(count+1, GFP_KERNEL);
++ buffer = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
+ if (buffer == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
-+ memcpy(buffer, buf, count);
-+ buffer[count] = '\0';
+
+ res = scst_alloc_sysfs_work(scst_tgtt_mgmt_store_work_fn, false, &work);
+ if (res != 0)
@@ -19032,12 +26569,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+int scst_tgtt_sysfs_create(struct scst_tgt_template *tgtt)
+{
+ int res = 0;
-+ const struct attribute **pattr;
+
+ TRACE_ENTRY();
+
-+ init_completion(&tgtt->tgtt_kobj_release_cmpl);
-+
+ res = kobject_init_and_add(&tgtt->tgtt_kobj, &tgtt_ktype,
+ scst_targets_kobj, tgtt->name);
+ if (res != 0) {
@@ -19055,19 +26589,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ }
+ }
+
-+ pattr = tgtt->tgtt_attrs;
-+ if (pattr != NULL) {
-+ while (*pattr != NULL) {
-+ TRACE_DBG("Creating attr %s for target driver %s",
-+ (*pattr)->name, tgtt->name);
-+ res = sysfs_create_file(&tgtt->tgtt_kobj, *pattr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attr %s for target "
-+ "driver %s", (*pattr)->name,
-+ tgtt->name);
-+ goto out_del;
-+ }
-+ pattr++;
++ if (tgtt->tgtt_attrs) {
++ res = sysfs_create_files(&tgtt->tgtt_kobj, tgtt->tgtt_attrs);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attributes for target "
++ "driver %s", tgtt->name);
++ goto out_del;
+ }
+ }
+
@@ -19100,18 +26627,21 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+void scst_tgtt_sysfs_del(struct scst_tgt_template *tgtt)
+{
+ int rc;
++ DECLARE_COMPLETION_ONSTACK(c);
+
+ TRACE_ENTRY();
+
++ tgtt->tgtt_kobj_release_cmpl = &c;
++
+ kobject_del(&tgtt->tgtt_kobj);
+ kobject_put(&tgtt->tgtt_kobj);
+
-+ rc = wait_for_completion_timeout(&tgtt->tgtt_kobj_release_cmpl, HZ);
++ rc = wait_for_completion_timeout(tgtt->tgtt_kobj_release_cmpl, HZ);
+ if (rc == 0) {
+ PRINT_INFO("Waiting for releasing sysfs entry "
+ "for target template %s (%d refs)...", tgtt->name,
+ atomic_read(&tgtt->tgtt_kobj.kref.refcount));
-+ wait_for_completion(&tgtt->tgtt_kobj_release_cmpl);
++ wait_for_completion(tgtt->tgtt_kobj_release_cmpl);
+ PRINT_INFO("Done waiting for releasing sysfs "
+ "entry for target template %s", tgtt->name);
+ }
@@ -19131,7 +26661,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ TRACE_ENTRY();
+
+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ complete_all(&tgt->tgt_kobj_release_cmpl);
++ if (tgt->tgt_kobj_release_cmpl)
++ complete_all(tgt->tgt_kobj_release_cmpl);
+
+ TRACE_EXIT();
+ return;
@@ -19142,61 +26673,896 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ .release = scst_tgt_release,
+};
+
-+static void scst_acg_release(struct kobject *kobj)
++static int __scst_process_luns_mgmt_store(char *buffer,
++ struct scst_tgt *tgt, struct scst_acg *acg, bool tgt_kobj)
+{
-+ struct scst_acg *acg;
++ int res, read_only = 0, action;
++ char *p, *e = NULL;
++ unsigned int virt_lun;
++ struct scst_acg_dev *acg_dev = NULL, *acg_dev_tmp;
++ struct scst_device *d, *dev = NULL;
++ enum {
++ SCST_LUN_ACTION_ADD = 1,
++ SCST_LUN_ACTION_DEL = 2,
++ SCST_LUN_ACTION_REPLACE = 3,
++ SCST_LUN_ACTION_CLEAR = 4,
++ };
+
+ TRACE_ENTRY();
+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+ complete_all(&acg->acg_kobj_release_cmpl);
++ TRACE_DBG("buffer %s", buffer);
+
-+ TRACE_EXIT();
-+ return;
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ if (strncasecmp("add", p, 3) == 0) {
++ p += 3;
++ action = SCST_LUN_ACTION_ADD;
++ } else if (strncasecmp("del", p, 3) == 0) {
++ p += 3;
++ action = SCST_LUN_ACTION_DEL;
++ } else if (!strncasecmp("replace", p, 7)) {
++ p += 7;
++ action = SCST_LUN_ACTION_REPLACE;
++ } else if (!strncasecmp("clear", p, 5)) {
++ p += 5;
++ action = SCST_LUN_ACTION_CLEAR;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
++ goto out_resume;
++
++ /* Check if tgt and acg not already freed while we were coming here */
++ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
++ goto out_unlock;
++
++ if ((action != SCST_LUN_ACTION_CLEAR) &&
++ (action != SCST_LUN_ACTION_DEL)) {
++ if (!isspace(*p)) {
++ PRINT_ERROR("%s", "Syntax error");
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p; /* save p */
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = '\0';
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (!strcmp(d->virt_name, p)) {
++ dev = d;
++ TRACE_DBG("Device %p (%s) found", dev, p);
++ break;
++ }
++ }
++ if (dev == NULL) {
++ PRINT_ERROR("Device '%s' not found", p);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ }
++
++ switch (action) {
++ case SCST_LUN_ACTION_ADD:
++ case SCST_LUN_ACTION_REPLACE:
++ {
++ bool dev_replaced = false;
++
++ e++;
++ while (isspace(*e) && *e != '\0')
++ e++;
++
++ virt_lun = simple_strtoul(e, &e, 0);
++ if (virt_lun > SCST_MAX_LUN) {
++ PRINT_ERROR("Too big LUN %d (max %d)", virt_lun,
++ SCST_MAX_LUN);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ while (isspace(*e) && *e != '\0')
++ e++;
++
++ while (1) {
++ char *pp;
++ unsigned long val;
++ char *param = scst_get_next_token_str(&e);
++ if (param == NULL)
++ break;
++
++ p = scst_get_next_lexem(&param);
++ if (*p == '\0') {
++ PRINT_ERROR("Syntax error at %s (device %s)",
++ param, dev->virt_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ pp = scst_get_next_lexem(&param);
++ if (*pp == '\0') {
++ PRINT_ERROR("Parameter %s value missed for device %s",
++ p, dev->virt_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ if (scst_get_next_lexem(&param)[0] != '\0') {
++ PRINT_ERROR("Too many parameter's %s values (device %s)",
++ p, dev->virt_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ res = strict_strtoul(pp, 0, &val);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtoul() for %s failed: %d "
++ "(device %s)", pp, res, dev->virt_name);
++ goto out_unlock;
++ }
++
++ if (!strcasecmp("read_only", p)) {
++ read_only = val;
++ TRACE_DBG("READ ONLY %d", read_only);
++ } else {
++ PRINT_ERROR("Unknown parameter %s (device %s)",
++ p, dev->virt_name);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ }
++
++ acg_dev = NULL;
++ list_for_each_entry(acg_dev_tmp, &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ if (acg_dev_tmp->lun == virt_lun) {
++ acg_dev = acg_dev_tmp;
++ break;
++ }
++ }
++
++ if (acg_dev != NULL) {
++ if (action == SCST_LUN_ACTION_ADD) {
++ PRINT_ERROR("virt lun %d already exists in "
++ "group %s", virt_lun, acg->acg_name);
++ res = -EEXIST;
++ goto out_unlock;
++ } else {
++ /* Replace */
++ res = scst_acg_del_lun(acg, acg_dev->lun,
++ false);
++ if (res != 0)
++ goto out_unlock;
++
++ dev_replaced = true;
++ }
++ }
++
++ res = scst_acg_add_lun(acg,
++ tgt_kobj ? tgt->tgt_luns_kobj : acg->luns_kobj,
++ dev, virt_lun, read_only, !dev_replaced, NULL);
++ if (res != 0)
++ goto out_unlock;
++
++ if (dev_replaced) {
++ struct scst_tgt_dev *tgt_dev;
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if ((tgt_dev->acg_dev->acg == acg) &&
++ (tgt_dev->lun == virt_lun)) {
++ TRACE_MGMT_DBG("INQUIRY DATA HAS CHANGED"
++ " on tgt_dev %p", tgt_dev);
++ scst_gen_aen_or_ua(tgt_dev,
++ SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
++ }
++ }
++ }
++
++ break;
++ }
++ case SCST_LUN_ACTION_DEL:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ virt_lun = simple_strtoul(p, &p, 0);
++
++ res = scst_acg_del_lun(acg, virt_lun, true);
++ if (res != 0)
++ goto out_unlock;
++ break;
++ case SCST_LUN_ACTION_CLEAR:
++ PRINT_INFO("Removed all devices from group %s",
++ acg->acg_name);
++ list_for_each_entry_safe(acg_dev, acg_dev_tmp,
++ &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ res = scst_acg_del_lun(acg, acg_dev->lun,
++ list_is_last(&acg_dev->acg_dev_list_entry,
++ &acg->acg_dev_list));
++ if (res != 0)
++ goto out_unlock;
++ }
++ break;
++ }
++
++ res = 0;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_resume:
++ scst_resume_activity();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
+}
+
-+static struct kobj_type acg_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_acg_release,
-+};
++static int scst_luns_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return __scst_process_luns_mgmt_store(work->buf, work->tgt, work->acg,
++ work->is_tgt_kobj);
++}
++
++static ssize_t __scst_acg_mgmt_store(struct scst_acg *acg,
++ const char *buf, size_t count, bool is_tgt_kobj,
++ int (*sysfs_work_fn)(struct scst_sysfs_work_item *))
++{
++ int res;
++ char *buffer;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ buffer = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
++ if (buffer == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++
++ res = scst_alloc_sysfs_work(sysfs_work_fn, false, &work);
++ if (res != 0)
++ goto out_free;
++
++ work->buf = buffer;
++ work->tgt = acg->tgt;
++ work->acg = acg;
++ work->is_tgt_kobj = is_tgt_kobj;
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(buffer);
++ goto out;
++}
++
++static ssize_t __scst_luns_mgmt_store(struct scst_acg *acg,
++ bool tgt_kobj, const char *buf, size_t count)
++{
++ return __scst_acg_mgmt_store(acg, buf, count, tgt_kobj,
++ scst_luns_mgmt_store_work_fn);
++}
++
++static ssize_t scst_luns_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
++{
++ static const char help[] =
++ "Usage: echo \"add|del H:C:I:L lun [parameters]\" >mgmt\n"
++ " echo \"add VNAME lun [parameters]\" >mgmt\n"
++ " echo \"del lun\" >mgmt\n"
++ " echo \"replace H:C:I:L lun [parameters]\" >mgmt\n"
++ " echo \"replace VNAME lun [parameters]\" >mgmt\n"
++ " echo \"clear\" >mgmt\n"
++ "\n"
++ "where parameters are one or more "
++ "param_name=value pairs separated by ';'\n"
++ "\nThe following parameters available: read_only.\n";
++
++ return sprintf(buf, "%s", help);
++}
++
++static ssize_t scst_luns_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int res;
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj->parent, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ res = __scst_luns_mgmt_store(acg, true, buf, count);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
+
+static struct kobj_attribute scst_luns_mgmt =
+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_luns_mgmt_show,
+ scst_luns_mgmt_store);
+
-+static struct kobj_attribute scst_acg_luns_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_luns_mgmt_show,
-+ scst_acg_luns_mgmt_store);
++static ssize_t __scst_acg_addr_method_show(struct scst_acg *acg, char *buf)
++{
++ int res;
+
-+static struct kobj_attribute scst_acg_ini_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_acg_ini_mgmt_show,
-+ scst_acg_ini_mgmt_store);
++ switch (acg->addr_method) {
++ case SCST_LUN_ADDR_METHOD_FLAT:
++ res = sprintf(buf, "FLAT\n");
++ break;
++ case SCST_LUN_ADDR_METHOD_PERIPHERAL:
++ res = sprintf(buf, "PERIPHERAL\n");
++ break;
++ case SCST_LUN_ADDR_METHOD_LUN:
++ res = sprintf(buf, "LUN\n");
++ break;
++ default:
++ res = sprintf(buf, "UNKNOWN\n");
++ break;
++ }
+
-+static struct kobj_attribute scst_ini_group_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_ini_group_mgmt_show,
-+ scst_ini_group_mgmt_store);
++ if (acg->addr_method != acg->tgt->tgtt->preferred_addr_method)
++ res += sprintf(&buf[res], "%s\n", SCST_SYSFS_KEY_MARK);
++
++ return res;
++}
++
++static ssize_t __scst_acg_addr_method_store(struct scst_acg *acg,
++ const char *buf, size_t count)
++{
++ int res = count;
++
++ if (strncasecmp(buf, "FLAT", min_t(int, 4, count)) == 0)
++ acg->addr_method = SCST_LUN_ADDR_METHOD_FLAT;
++ else if (strncasecmp(buf, "PERIPHERAL", min_t(int, 10, count)) == 0)
++ acg->addr_method = SCST_LUN_ADDR_METHOD_PERIPHERAL;
++ else if (strncasecmp(buf, "LUN", min_t(int, 3, count)) == 0)
++ acg->addr_method = SCST_LUN_ADDR_METHOD_LUN;
++ else {
++ PRINT_ERROR("Unknown address method %s", buf);
++ res = -EINVAL;
++ }
++
++ TRACE_DBG("acg %p, addr_method %d", acg, acg->addr_method);
++
++ return res;
++}
++
++static ssize_t scst_tgt_addr_method_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ return __scst_acg_addr_method_show(acg, buf);
++}
++
++static ssize_t scst_tgt_addr_method_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ res = __scst_acg_addr_method_store(acg, buf, count);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
+
+static struct kobj_attribute scst_tgt_addr_method =
+ __ATTR(addr_method, S_IRUGO | S_IWUSR, scst_tgt_addr_method_show,
+ scst_tgt_addr_method_store);
+
++static ssize_t __scst_acg_io_grouping_type_show(struct scst_acg *acg, char *buf)
++{
++ int res;
++
++ switch (acg->acg_io_grouping_type) {
++ case SCST_IO_GROUPING_AUTO:
++ res = sprintf(buf, "%s\n", SCST_IO_GROUPING_AUTO_STR);
++ break;
++ case SCST_IO_GROUPING_THIS_GROUP_ONLY:
++ res = sprintf(buf, "%s\n%s\n",
++ SCST_IO_GROUPING_THIS_GROUP_ONLY_STR,
++ SCST_SYSFS_KEY_MARK);
++ break;
++ case SCST_IO_GROUPING_NEVER:
++ res = sprintf(buf, "%s\n%s\n", SCST_IO_GROUPING_NEVER_STR,
++ SCST_SYSFS_KEY_MARK);
++ break;
++ default:
++ res = sprintf(buf, "%d\n%s\n", acg->acg_io_grouping_type,
++ SCST_SYSFS_KEY_MARK);
++ break;
++ }
++
++ return res;
++}
++
++static int __scst_acg_process_io_grouping_type_store(struct scst_tgt *tgt,
++ struct scst_acg *acg, int io_grouping_type)
++{
++ int res = 0;
++ struct scst_acg_dev *acg_dev;
++
++ TRACE_DBG("tgt %p, acg %p, io_grouping_type %d", tgt, acg,
++ io_grouping_type);
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
++ goto out_resume;
++
++ /* Check if tgt and acg not already freed while we were coming here */
++ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
++ goto out_unlock;
++
++ acg->acg_io_grouping_type = io_grouping_type;
++
++ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
++ int rc;
++
++ scst_stop_dev_threads(acg_dev->dev);
++
++ rc = scst_create_dev_threads(acg_dev->dev);
++ if (rc != 0)
++ res = rc;
++ }
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_resume:
++ scst_resume_activity();
++
++out:
++ return res;
++}
++
++static int __scst_acg_io_grouping_type_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return __scst_acg_process_io_grouping_type_store(work->tgt, work->acg,
++ work->io_grouping_type);
++}
++
++static ssize_t __scst_acg_io_grouping_type_store(struct scst_acg *acg,
++ const char *buf, size_t count)
++{
++ int res = 0;
++ int prev = acg->acg_io_grouping_type;
++ long io_grouping_type;
++ struct scst_sysfs_work_item *work;
++
++ if (strncasecmp(buf, SCST_IO_GROUPING_AUTO_STR,
++ min_t(int, strlen(SCST_IO_GROUPING_AUTO_STR), count)) == 0)
++ io_grouping_type = SCST_IO_GROUPING_AUTO;
++ else if (strncasecmp(buf, SCST_IO_GROUPING_THIS_GROUP_ONLY_STR,
++ min_t(int, strlen(SCST_IO_GROUPING_THIS_GROUP_ONLY_STR), count)) == 0)
++ io_grouping_type = SCST_IO_GROUPING_THIS_GROUP_ONLY;
++ else if (strncasecmp(buf, SCST_IO_GROUPING_NEVER_STR,
++ min_t(int, strlen(SCST_IO_GROUPING_NEVER_STR), count)) == 0)
++ io_grouping_type = SCST_IO_GROUPING_NEVER;
++ else {
++ res = strict_strtol(buf, 0, &io_grouping_type);
++ if ((res != 0) || (io_grouping_type <= 0)) {
++ PRINT_ERROR("Unknown or not allowed I/O grouping type "
++ "%s", buf);
++ res = -EINVAL;
++ goto out;
++ }
++ }
++
++ if (prev == io_grouping_type)
++ goto out;
++
++ res = scst_alloc_sysfs_work(__scst_acg_io_grouping_type_store_work_fn,
++ false, &work);
++ if (res != 0)
++ goto out;
++
++ work->tgt = acg->tgt;
++ work->acg = acg;
++ work->io_grouping_type = io_grouping_type;
++
++ res = scst_sysfs_queue_wait_work(work);
++
++out:
++ return res;
++}
++
++static ssize_t scst_tgt_io_grouping_type_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ return __scst_acg_io_grouping_type_show(acg, buf);
++}
++
++static ssize_t scst_tgt_io_grouping_type_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ res = __scst_acg_io_grouping_type_store(acg, buf, count);
++ if (res != 0)
++ goto out;
++
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
+static struct kobj_attribute scst_tgt_io_grouping_type =
+ __ATTR(io_grouping_type, S_IRUGO | S_IWUSR,
+ scst_tgt_io_grouping_type_show,
+ scst_tgt_io_grouping_type_store);
+
-+static struct kobj_attribute scst_rel_tgt_id =
-+ __ATTR(rel_tgt_id, S_IRUGO | S_IWUSR, scst_rel_tgt_id_show,
-+ scst_rel_tgt_id_store);
++static ssize_t __scst_acg_cpu_mask_show(struct scst_acg *acg, char *buf)
++{
++ int res;
+
-+static struct kobj_attribute scst_acg_addr_method =
-+ __ATTR(addr_method, S_IRUGO | S_IWUSR, scst_acg_addr_method_show,
-+ scst_acg_addr_method_store);
++ res = cpumask_scnprintf(buf, SCST_SYSFS_BLOCK_SIZE,
++ &acg->acg_cpu_mask);
++ if (!cpus_equal(acg->acg_cpu_mask, default_cpu_mask))
++ res += sprintf(&buf[res], "\n%s\n", SCST_SYSFS_KEY_MARK);
+
-+static struct kobj_attribute scst_acg_io_grouping_type =
-+ __ATTR(io_grouping_type, S_IRUGO | S_IWUSR,
-+ scst_acg_io_grouping_type_show,
-+ scst_acg_io_grouping_type_store);
++ return res;
++}
++
++static int __scst_acg_process_cpu_mask_store(struct scst_tgt *tgt,
++ struct scst_acg *acg, cpumask_t *cpu_mask)
++{
++ int res = 0;
++ struct scst_session *sess;
++
++ TRACE_DBG("tgt %p, acg %p", tgt, acg);
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
++ goto out;
++
++ /* Check if tgt and acg not already freed while we were coming here */
++ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
++ goto out_unlock;
++
++ cpumask_copy(&acg->acg_cpu_mask, cpu_mask);
++
++ list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
++ int i;
++ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
++ struct scst_tgt_dev *tgt_dev;
++ struct list_head *head = &sess->sess_tgt_dev_list[i];
++ list_for_each_entry(tgt_dev, head,
++ sess_tgt_dev_list_entry) {
++ struct scst_cmd_thread_t *thr;
++ if (tgt_dev->active_cmd_threads != &tgt_dev->tgt_dev_cmd_threads)
++ continue;
++ list_for_each_entry(thr,
++ &tgt_dev->active_cmd_threads->threads_list,
++ thread_list_entry) {
++ int rc;
++ rc = set_cpus_allowed_ptr(thr->cmd_thread, cpu_mask);
++ if (rc != 0)
++ PRINT_ERROR("Setting CPU "
++ "affinity failed: %d", rc);
++ }
++ }
++ }
++ if (tgt->tgtt->report_aen != NULL) {
++ struct scst_aen *aen;
++ int rc;
++
++ aen = scst_alloc_aen(sess, 0);
++ if (aen == NULL) {
++ PRINT_ERROR("Unable to notify target driver %s "
++ "about cpu_mask change", tgt->tgt_name);
++ continue;
++ }
++
++ aen->event_fn = SCST_AEN_CPU_MASK_CHANGED;
++
++ TRACE_DBG("Calling target's %s report_aen(%p)",
++ tgt->tgtt->name, aen);
++ rc = tgt->tgtt->report_aen(aen);
++ TRACE_DBG("Target's %s report_aen(%p) returned %d",
++ tgt->tgtt->name, aen, rc);
++ if (rc != SCST_AEN_RES_SUCCESS)
++ scst_free_aen(aen);
++ }
++ }
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out:
++ return res;
++}
++
++static int __scst_acg_cpu_mask_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return __scst_acg_process_cpu_mask_store(work->tgt, work->acg,
++ &work->cpu_mask);
++}
++
++static ssize_t __scst_acg_cpu_mask_store(struct scst_acg *acg,
++ const char *buf, size_t count)
++{
++ int res;
++ struct scst_sysfs_work_item *work;
++
++ /* cpumask might be too big for stack */
++
++ res = scst_alloc_sysfs_work(__scst_acg_cpu_mask_store_work_fn,
++ false, &work);
++ if (res != 0)
++ goto out;
++
++ /*
++ * We can't use cpumask_parse_user() here, because it expects
++ * buffer in the user space.
++ */
++ res = __bitmap_parse(buf, count, 0, cpumask_bits(&work->cpu_mask),
++ nr_cpumask_bits);
++ if (res != 0) {
++ PRINT_ERROR("__bitmap_parse() failed: %d", res);
++ goto out_release;
++ }
++
++ if (cpus_equal(acg->acg_cpu_mask, work->cpu_mask))
++ goto out;
++
++ work->tgt = acg->tgt;
++ work->acg = acg;
++
++ res = scst_sysfs_queue_wait_work(work);
++
++out:
++ return res;
++
++out_release:
++ scst_sysfs_work_release(&work->sysfs_work_kref);
++ goto out;
++}
++
++static ssize_t scst_tgt_cpu_mask_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ return __scst_acg_cpu_mask_show(acg, buf);
++}
++
++static ssize_t scst_tgt_cpu_mask_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_acg *acg;
++ struct scst_tgt *tgt;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ acg = tgt->default_acg;
++
++ res = __scst_acg_cpu_mask_store(acg, buf, count);
++ if (res != 0)
++ goto out;
++
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute scst_tgt_cpu_mask =
++ __ATTR(cpu_mask, S_IRUGO | S_IWUSR,
++ scst_tgt_cpu_mask_show,
++ scst_tgt_cpu_mask_store);
++
++static ssize_t scst_ini_group_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ static const char help[] =
++ "Usage: echo \"create GROUP_NAME\" >mgmt\n"
++ " echo \"del GROUP_NAME\" >mgmt\n";
++
++ return sprintf(buf, "%s", help);
++}
++
++static int scst_process_ini_group_mgmt_store(char *buffer,
++ struct scst_tgt *tgt)
++{
++ int res, action;
++ char *p, *e = NULL;
++ struct scst_acg *a, *acg = NULL;
++ enum {
++ SCST_INI_GROUP_ACTION_CREATE = 1,
++ SCST_INI_GROUP_ACTION_DEL = 2,
++ };
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("tgt %p, buffer %s", tgt, buffer);
++
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ if (strncasecmp("create ", p, 7) == 0) {
++ p += 7;
++ action = SCST_INI_GROUP_ACTION_CREATE;
++ } else if (strncasecmp("del ", p, 4) == 0) {
++ p += 4;
++ action = SCST_INI_GROUP_ACTION_DEL;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
++ goto out_resume;
++
++ /* Check if our pointer is still alive */
++ if (scst_check_tgt_acg_ptrs(tgt, NULL) != 0)
++ goto out_unlock;
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = '\0';
++
++ if (p[0] == '\0') {
++ PRINT_ERROR("%s", "Group name required");
++ res = -EINVAL;
++ goto out_unlock;
++ }
++
++ list_for_each_entry(a, &tgt->tgt_acg_list, acg_list_entry) {
++ if (strcmp(a->acg_name, p) == 0) {
++ TRACE_DBG("group (acg) %p %s found",
++ a, a->acg_name);
++ acg = a;
++ break;
++ }
++ }
++
++ switch (action) {
++ case SCST_INI_GROUP_ACTION_CREATE:
++ TRACE_DBG("Creating group '%s'", p);
++ if (acg != NULL) {
++ PRINT_ERROR("acg name %s exist", p);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ acg = scst_alloc_add_acg(tgt, p, true);
++ if (acg == NULL)
++ goto out_unlock;
++ break;
++ case SCST_INI_GROUP_ACTION_DEL:
++ TRACE_DBG("Deleting group '%s'", p);
++ if (acg == NULL) {
++ PRINT_ERROR("Group %s not found", p);
++ res = -EINVAL;
++ goto out_unlock;
++ }
++ if (!scst_acg_sess_is_empty(acg)) {
++ PRINT_ERROR("Group %s is not empty", acg->acg_name);
++ res = -EBUSY;
++ goto out_unlock;
++ }
++ scst_del_free_acg(acg);
++ break;
++ }
++
++ res = 0;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_resume:
++ scst_resume_activity();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_ini_group_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
++{
++ return scst_process_ini_group_mgmt_store(work->buf, work->tgt);
++}
++
++static ssize_t scst_ini_group_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ char *buffer;
++ struct scst_tgt *tgt;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ tgt = container_of(kobj->parent, struct scst_tgt, tgt_kobj);
++
++ buffer = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
++ if (buffer == NULL) {
++ res = -ENOMEM;
++ goto out;
++ }
++
++ res = scst_alloc_sysfs_work(scst_ini_group_mgmt_store_work_fn, false,
++ &work);
++ if (res != 0)
++ goto out_free;
++
++ work->buf = buffer;
++ work->tgt = tgt;
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(buffer);
++ goto out;
++}
++
++static struct kobj_attribute scst_ini_group_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_ini_group_mgmt_show,
++ scst_ini_group_mgmt_store);
+
+static ssize_t scst_tgt_enable_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
@@ -19314,6 +27680,191 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ __ATTR(enabled, S_IRUGO | S_IWUSR,
+ scst_tgt_enable_show, scst_tgt_enable_store);
+
++static ssize_t scst_rel_tgt_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_tgt *tgt;
++ int res;
++
++ TRACE_ENTRY();
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++
++ res = sprintf(buf, "%d\n%s", tgt->rel_tgt_id,
++ (tgt->rel_tgt_id != 0) ? SCST_SYSFS_KEY_MARK "\n" : "");
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_process_rel_tgt_id_store(struct scst_sysfs_work_item *work)
++{
++ int res = 0;
++ struct scst_tgt *tgt = work->tgt_r;
++ unsigned long rel_tgt_id = work->rel_tgt_id;
++ bool enabled;
++
++ TRACE_ENTRY();
++
++ /* tgt protected by kobject_get() */
++
++ TRACE_DBG("Trying to set relative target port id %d",
++ (uint16_t)rel_tgt_id);
++
++ if (tgt->tgtt->is_target_enabled != NULL)
++ enabled = tgt->tgtt->is_target_enabled(tgt);
++ else
++ enabled = true;
++
++ if (enabled && rel_tgt_id != tgt->rel_tgt_id) {
++ if (!scst_is_relative_target_port_id_unique(rel_tgt_id, tgt)) {
++ PRINT_ERROR("Relative port id %d is not unique",
++ (uint16_t)rel_tgt_id);
++ res = -EBADSLT;
++ goto out_put;
++ }
++ }
++
++ if (rel_tgt_id < SCST_MIN_REL_TGT_ID ||
++ rel_tgt_id > SCST_MAX_REL_TGT_ID) {
++ if ((rel_tgt_id == 0) && !enabled)
++ goto set;
++
++ PRINT_ERROR("Invalid relative port id %d",
++ (uint16_t)rel_tgt_id);
++ res = -EINVAL;
++ goto out_put;
++ }
++
++set:
++ tgt->rel_tgt_id = (uint16_t)rel_tgt_id;
++
++out_put:
++ kobject_put(&tgt->tgt_kobj);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_rel_tgt_id_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res = 0;
++ struct scst_tgt *tgt;
++ unsigned long rel_tgt_id;
++ struct scst_sysfs_work_item *work;
++
++ TRACE_ENTRY();
++
++ if (buf == NULL)
++ goto out;
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++
++ res = strict_strtoul(buf, 0, &rel_tgt_id);
++ if (res != 0) {
++ PRINT_ERROR("%s", "Wrong rel_tgt_id");
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_alloc_sysfs_work(scst_process_rel_tgt_id_store, false,
++ &work);
++ if (res != 0)
++ goto out;
++
++ work->tgt_r = tgt;
++ work->rel_tgt_id = rel_tgt_id;
++
++ kobject_get(&tgt->tgt_kobj);
++
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute scst_rel_tgt_id =
++ __ATTR(rel_tgt_id, S_IRUGO | S_IWUSR, scst_rel_tgt_id_show,
++ scst_rel_tgt_id_store);
++
++static ssize_t scst_tgt_comment_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_tgt *tgt;
++ int res;
++
++ TRACE_ENTRY();
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++
++ if (tgt->tgt_comment != NULL)
++ res = sprintf(buf, "%s\n%s", tgt->tgt_comment,
++ SCST_SYSFS_KEY_MARK "\n");
++ else
++ res = 0;
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_tgt_comment_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
++{
++ int res;
++ struct scst_tgt *tgt;
++ char *p;
++ int len;
++
++ TRACE_ENTRY();
++
++ if ((buf == NULL) || (count == 0)) {
++ res = 0;
++ goto out;
++ }
++
++ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++
++ len = strnlen(buf, count);
++ if (buf[count-1] == '\n')
++ len--;
++
++ if (len == 0) {
++ kfree(tgt->tgt_comment);
++ tgt->tgt_comment = NULL;
++ goto out_done;
++ }
++
++ p = kmalloc(len+1, GFP_KERNEL);
++ if (p == NULL) {
++ PRINT_ERROR("Unable to alloc tgt_comment string (len %d)",
++ len+1);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ memcpy(p, buf, len);
++ p[len] = '\0';
++
++ kfree(tgt->tgt_comment);
++
++ tgt->tgt_comment = p;
++
++out_done:
++ res = count;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct kobj_attribute scst_tgt_comment =
++ __ATTR(comment, S_IRUGO | S_IWUSR, scst_tgt_comment_show,
++ scst_tgt_comment_store);
++
+/*
+ * Supposed to be called under scst_mutex. In case of error will drop,
+ * then reacquire it.
@@ -19321,12 +27872,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+int scst_tgt_sysfs_create(struct scst_tgt *tgt)
+{
+ int res;
-+ const struct attribute **pattr;
+
+ TRACE_ENTRY();
+
-+ init_completion(&tgt->tgt_kobj_release_cmpl);
-+
+ res = kobject_init_and_add(&tgt->tgt_kobj, &tgt_ktype,
+ &tgt->tgtt->tgtt_kobj, tgt->tgt_name);
+ if (res != 0) {
@@ -19389,6 +27937,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ }
+
+ res = sysfs_create_file(&tgt->tgt_kobj,
++ &scst_tgt_comment.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attribute %s for tgt %s",
++ scst_tgt_comment.attr.name, tgt->tgt_name);
++ goto out_err;
++ }
++
++ res = sysfs_create_file(&tgt->tgt_kobj,
+ &scst_tgt_addr_method.attr);
+ if (res != 0) {
+ PRINT_ERROR("Can't add attribute %s for tgt %s",
@@ -19404,18 +27960,19 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ goto out_err;
+ }
+
-+ pattr = tgt->tgtt->tgt_attrs;
-+ if (pattr != NULL) {
-+ while (*pattr != NULL) {
-+ TRACE_DBG("Creating attr %s for tgt %s", (*pattr)->name,
-+ tgt->tgt_name);
-+ res = sysfs_create_file(&tgt->tgt_kobj, *pattr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
-+ (*pattr)->name, tgt->tgt_name);
-+ goto out_err;
-+ }
-+ pattr++;
++ res = sysfs_create_file(&tgt->tgt_kobj, &scst_tgt_cpu_mask.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attribute %s for tgt %s",
++ scst_tgt_cpu_mask.attr.name, tgt->tgt_name);
++ goto out_err;
++ }
++
++ if (tgt->tgtt->tgt_attrs) {
++ res = sysfs_create_files(&tgt->tgt_kobj, tgt->tgtt->tgt_attrs);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attributes for tgt %s",
++ tgt->tgt_name);
++ goto out_err;
+ }
+ }
+
@@ -19441,9 +27998,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+void scst_tgt_sysfs_del(struct scst_tgt *tgt)
+{
+ int rc;
++ DECLARE_COMPLETION_ONSTACK(c);
+
+ TRACE_ENTRY();
+
++ tgt->tgt_kobj_release_cmpl = &c;
++
+ kobject_del(tgt->tgt_sess_kobj);
+ kobject_del(tgt->tgt_luns_kobj);
+ kobject_del(tgt->tgt_ini_grp_kobj);
@@ -19454,12 +28014,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ kobject_put(tgt->tgt_ini_grp_kobj);
+ kobject_put(&tgt->tgt_kobj);
+
-+ rc = wait_for_completion_timeout(&tgt->tgt_kobj_release_cmpl, HZ);
++ rc = wait_for_completion_timeout(tgt->tgt_kobj_release_cmpl, HZ);
+ if (rc == 0) {
+ PRINT_INFO("Waiting for releasing sysfs entry "
+ "for target %s (%d refs)...", tgt->tgt_name,
+ atomic_read(&tgt->tgt_kobj.kref.refcount));
-+ wait_for_completion(&tgt->tgt_kobj_release_cmpl);
++ wait_for_completion(tgt->tgt_kobj_release_cmpl);
+ PRINT_INFO("Done waiting for releasing sysfs "
+ "entry for target %s", tgt->tgt_name);
+ }
@@ -19482,7 +28042,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ dev = container_of(kobj, struct scst_device, dev_kobj);
+
+ pos = sprintf(buf, "%d - %s\n", dev->type,
-+ (unsigned)dev->type > ARRAY_SIZE(scst_dev_handler_types) ?
++ (unsigned)dev->type >= ARRAY_SIZE(scst_dev_handler_types) ?
+ "unknown" : scst_dev_handler_types[dev->type]);
+
+ return pos;
@@ -19530,10 +28090,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ if (res != 0)
+ goto out;
+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
+ goto out_resume;
-+ }
+
+ /* Check if our pointer is still alive */
+ if (scst_check_dev_ptr(dev) != 0)
@@ -19578,6 +28137,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+{
+ int res = 0;
+
++ TRACE_ENTRY();
++
+ *stop = false;
+
+ if (dev->threads_num < 0) {
@@ -19594,6 +28155,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ }
+
+out:
++ TRACE_EXIT_RES(res);
+ return res;
+}
+
@@ -19654,10 +28216,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ work->new_threads_pool_type = dev->threads_pool_type;
+
+ res = scst_sysfs_queue_wait_work(work);
++
++out:
+ if (res == 0)
+ res = count;
+
-+out:
+ TRACE_EXIT_RES(res);
+ return res;
+}
@@ -19743,10 +28306,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ work->new_threads_pool_type = newtpt;
+
+ res = scst_sysfs_queue_wait_work(work);
++
++out:
+ if (res == 0)
+ res = count;
+
-+out:
+ TRACE_EXIT_RES(res);
+ return res;
+}
@@ -19768,7 +28332,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ complete_all(&dev->dev_kobj_release_cmpl);
++ if (dev->dev_kobj_release_cmpl)
++ complete_all(dev->dev_kobj_release_cmpl);
+
+ TRACE_EXIT();
+ return;
@@ -19777,7 +28342,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+int scst_devt_dev_sysfs_create(struct scst_device *dev)
+{
+ int res = 0;
-+ const struct attribute **pattr;
+
+ TRACE_ENTRY();
+
@@ -19819,16 +28383,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ }
+ }
+
-+ pattr = dev->handler->dev_attrs;
-+ if (pattr != NULL) {
-+ while (*pattr != NULL) {
-+ res = sysfs_create_file(&dev->dev_kobj, *pattr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add dev attr %s for dev %s",
-+ (*pattr)->name, dev->virt_name);
-+ goto out_err;
-+ }
-+ pattr++;
++ if (dev->handler->dev_attrs) {
++ res = sysfs_create_files(&dev->dev_kobj,
++ dev->handler->dev_attrs);
++ if (res != 0) {
++ PRINT_ERROR("Can't add dev attributes for dev %s",
++ dev->virt_name);
++ goto out_err;
+ }
+ }
+
@@ -19843,20 +28404,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+void scst_devt_dev_sysfs_del(struct scst_device *dev)
+{
-+ const struct attribute **pattr;
-+
+ TRACE_ENTRY();
+
+ if (dev->handler == &scst_null_devtype)
+ goto out;
+
-+ pattr = dev->handler->dev_attrs;
-+ if (pattr != NULL) {
-+ while (*pattr != NULL) {
-+ sysfs_remove_file(&dev->dev_kobj, *pattr);
-+ pattr++;
-+ }
-+ }
++ if (dev->handler->dev_attrs)
++ sysfs_remove_files(&dev->dev_kobj, dev->handler->dev_attrs);
+
+ sysfs_remove_link(&dev->dev_kobj, "handler");
+ sysfs_remove_link(&dev->handler->devt_kobj, dev->virt_name);
@@ -19889,8 +28443,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ TRACE_ENTRY();
+
-+ init_completion(&dev->dev_kobj_release_cmpl);
-+
+ res = kobject_init_and_add(&dev->dev_kobj, &scst_dev_ktype,
+ scst_devices_kobj, dev->virt_name);
+ if (res != 0) {
@@ -19946,21 +28498,24 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+void scst_dev_sysfs_del(struct scst_device *dev)
+{
+ int rc;
++ DECLARE_COMPLETION_ONSTACK(c);
+
+ TRACE_ENTRY();
+
++ dev->dev_kobj_release_cmpl = &c;
++
+ kobject_del(dev->dev_exp_kobj);
+ kobject_del(&dev->dev_kobj);
+
+ kobject_put(dev->dev_exp_kobj);
+ kobject_put(&dev->dev_kobj);
+
-+ rc = wait_for_completion_timeout(&dev->dev_kobj_release_cmpl, HZ);
++ rc = wait_for_completion_timeout(dev->dev_kobj_release_cmpl, HZ);
+ if (rc == 0) {
+ PRINT_INFO("Waiting for releasing sysfs entry "
+ "for device %s (%d refs)...", dev->virt_name,
+ atomic_read(&dev->dev_kobj.kref.refcount));
-+ wait_for_completion(&dev->dev_kobj_release_cmpl);
++ wait_for_completion(dev->dev_kobj_release_cmpl);
+ PRINT_INFO("Done waiting for releasing sysfs "
+ "entry for device %s", dev->virt_name);
+ }
@@ -19970,7 +28525,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+}
+
+/**
-+ ** Tgt_dev's directory implementation
++ ** Tgt_dev implementation
+ **/
+
+#ifdef CONFIG_SCST_MEASURE_LATENCY
@@ -20120,7 +28675,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ TRACE_ENTRY();
+
+ tgt_dev = container_of(kobj, struct scst_tgt_dev, tgt_dev_kobj);
-+ complete_all(&tgt_dev->tgt_dev_kobj_release_cmpl);
++ if (tgt_dev->tgt_dev_kobj_release_cmpl)
++ complete_all(tgt_dev->tgt_dev_kobj_release_cmpl);
+
+ TRACE_EXIT();
+ return;
@@ -20138,8 +28694,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ TRACE_ENTRY();
+
-+ init_completion(&tgt_dev->tgt_dev_kobj_release_cmpl);
-+
+ res = kobject_init_and_add(&tgt_dev->tgt_dev_kobj, &scst_tgt_dev_ktype,
+ &tgt_dev->sess->sess_kobj, "lun%lld",
+ (unsigned long long)tgt_dev->lun);
@@ -20164,20 +28718,23 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+void scst_tgt_dev_sysfs_del(struct scst_tgt_dev *tgt_dev)
+{
+ int rc;
++ DECLARE_COMPLETION_ONSTACK(c);
+
+ TRACE_ENTRY();
+
++ tgt_dev->tgt_dev_kobj_release_cmpl = &c;
++
+ kobject_del(&tgt_dev->tgt_dev_kobj);
+ kobject_put(&tgt_dev->tgt_dev_kobj);
+
+ rc = wait_for_completion_timeout(
-+ &tgt_dev->tgt_dev_kobj_release_cmpl, HZ);
++ tgt_dev->tgt_dev_kobj_release_cmpl, HZ);
+ if (rc == 0) {
+ PRINT_INFO("Waiting for releasing sysfs entry "
+ "for tgt_dev %lld (%d refs)...",
+ (unsigned long long)tgt_dev->lun,
+ atomic_read(&tgt_dev->tgt_dev_kobj.kref.refcount));
-+ wait_for_completion(&tgt_dev->tgt_dev_kobj_release_cmpl);
++ wait_for_completion(tgt_dev->tgt_dev_kobj_release_cmpl);
+ PRINT_INFO("Done waiting for releasing sysfs entry for "
+ "tgt_dev %lld", (unsigned long long)tgt_dev->lun);
+ }
@@ -20350,10 +28907,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ TRACE_ENTRY();
+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
+ goto out_put;
-+ }
+
+ PRINT_INFO("Zeroing latency statistics for initiator "
+ "%s", sess->initiator_name);
@@ -20373,12 +28929,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ memset(sess->sess_latency_stat, 0,
+ sizeof(sess->sess_latency_stat));
+
-+ for (t = TGT_DEV_HASH_SIZE-1; t >= 0; t--) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[t];
++ for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
++ struct list_head *head = &sess->sess_tgt_dev_list[t];
+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
+ tgt_dev->scst_time = 0;
+ tgt_dev->tgt_time = 0;
+ tgt_dev->dev_time = 0;
@@ -20453,17 +29007,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ TRACE_ENTRY();
+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
+ goto out_put;
-+ }
+
-+ for (t = TGT_DEV_HASH_SIZE-1; t >= 0; t--) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[t];
++ for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
++ struct list_head *head = &sess->sess_tgt_dev_list[t];
+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
++ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
+ active_cmds += atomic_read(&tgt_dev->tgt_dev_cmd_count);
+ }
+ }
@@ -20526,12 +29077,72 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+}
+
+static struct kobj_attribute session_initiator_name_attr =
-+ __ATTR(initiator_name, S_IRUGO, scst_sess_sysfs_initiator_name_show, NULL);
++ __ATTR(initiator_name, S_IRUGO, scst_sess_sysfs_initiator_name_show,
++ NULL);
++
++#define SCST_SESS_SYSFS_STAT_ATTR(name, exported_name, dir, kb) \
++static ssize_t scst_sess_sysfs_##exported_name##_show(struct kobject *kobj, \
++ struct kobj_attribute *attr, char *buf) \
++{ \
++ struct scst_session *sess; \
++ int res; \
++ uint64_t v; \
++ \
++ BUILD_BUG_ON(SCST_DATA_UNKNOWN != 0); \
++ BUILD_BUG_ON(SCST_DATA_WRITE != 1); \
++ BUILD_BUG_ON(SCST_DATA_READ != 2); \
++ BUILD_BUG_ON(SCST_DATA_BIDI != 3); \
++ BUILD_BUG_ON(SCST_DATA_NONE != 4); \
++ \
++ BUILD_BUG_ON(dir >= SCST_DATA_DIR_MAX); \
++ \
++ sess = container_of(kobj, struct scst_session, sess_kobj); \
++ v = sess->io_stats[dir].name; \
++ if (kb) \
++ v >>= 10; \
++ res = sprintf(buf, "%llu\n", (unsigned long long)v); \
++ return res; \
++} \
++ \
++static ssize_t scst_sess_sysfs_##exported_name##_store(struct kobject *kobj, \
++ struct kobj_attribute *attr, const char *buf, size_t count) \
++{ \
++ struct scst_session *sess; \
++ sess = container_of(kobj, struct scst_session, sess_kobj); \
++ spin_lock_irq(&sess->sess_list_lock); \
++ BUILD_BUG_ON(dir >= SCST_DATA_DIR_MAX); \
++ sess->io_stats[dir].cmd_count = 0; \
++ sess->io_stats[dir].io_byte_count = 0; \
++ spin_unlock_irq(&sess->sess_list_lock); \
++ return count; \
++} \
++ \
++static struct kobj_attribute session_##exported_name##_attr = \
++ __ATTR(exported_name, S_IRUGO | S_IWUSR, \
++ scst_sess_sysfs_##exported_name##_show, \
++ scst_sess_sysfs_##exported_name##_store);
++
++SCST_SESS_SYSFS_STAT_ATTR(cmd_count, unknown_cmd_count, SCST_DATA_UNKNOWN, 0);
++SCST_SESS_SYSFS_STAT_ATTR(cmd_count, write_cmd_count, SCST_DATA_WRITE, 0);
++SCST_SESS_SYSFS_STAT_ATTR(io_byte_count, write_io_count_kb, SCST_DATA_WRITE, 1);
++SCST_SESS_SYSFS_STAT_ATTR(cmd_count, read_cmd_count, SCST_DATA_READ, 0);
++SCST_SESS_SYSFS_STAT_ATTR(io_byte_count, read_io_count_kb, SCST_DATA_READ, 1);
++SCST_SESS_SYSFS_STAT_ATTR(cmd_count, bidi_cmd_count, SCST_DATA_BIDI, 0);
++SCST_SESS_SYSFS_STAT_ATTR(io_byte_count, bidi_io_count_kb, SCST_DATA_BIDI, 1);
++SCST_SESS_SYSFS_STAT_ATTR(cmd_count, none_cmd_count, SCST_DATA_NONE, 0);
+
+static struct attribute *scst_session_attrs[] = {
+ &session_commands_attr.attr,
+ &session_active_commands_attr.attr,
+ &session_initiator_name_attr.attr,
++ &session_unknown_cmd_count_attr.attr,
++ &session_write_cmd_count_attr.attr,
++ &session_write_io_count_kb_attr.attr,
++ &session_read_cmd_count_attr.attr,
++ &session_read_io_count_kb_attr.attr,
++ &session_bidi_cmd_count_attr.attr,
++ &session_bidi_io_count_kb_attr.attr,
++ &session_none_cmd_count_attr.attr,
+#ifdef CONFIG_SCST_MEASURE_LATENCY
+ &session_latency_attr.attr,
+#endif /* CONFIG_SCST_MEASURE_LATENCY */
@@ -20545,7 +29156,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ TRACE_ENTRY();
+
+ sess = container_of(kobj, struct scst_session, sess_kobj);
-+ complete_all(&sess->sess_kobj_release_cmpl);
++ if (sess->sess_kobj_release_cmpl)
++ complete_all(sess->sess_kobj_release_cmpl);
+
+ TRACE_EXIT();
+ return;
@@ -20591,7 +29203,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+{
+ int res = 0;
+ struct scst_session *s;
-+ const struct attribute **pattr;
+ char *name = (char *)sess->initiator_name;
+ int len = strlen(name) + 1, n = 1;
+
@@ -20606,7 +29217,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ if (s == sess)
+ continue;
+
-+ TRACE_DBG("Dublicated session from the same initiator "
++ TRACE_DBG("Duplicated session from the same initiator "
+ "%s found", name);
+
+ if (name == sess->initiator_name) {
@@ -20626,8 +29237,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ }
+ }
+
-+ init_completion(&sess->sess_kobj_release_cmpl);
-+
+ TRACE_DBG("Adding session %s to sysfs", name);
+
+ res = kobject_init_and_add(&sess->sess_kobj, &scst_session_ktype,
@@ -20639,17 +29248,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ sess->sess_kobj_ready = 1;
+
-+ pattr = sess->tgt->tgtt->sess_attrs;
-+ if (pattr != NULL) {
-+ while (*pattr != NULL) {
-+ res = sysfs_create_file(&sess->sess_kobj, *pattr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add sess attr %s for sess "
-+ "for initiator %s", (*pattr)->name,
-+ name);
-+ goto out_free;
-+ }
-+ pattr++;
++ if (sess->tgt->tgtt->sess_attrs) {
++ res = sysfs_create_files(&sess->sess_kobj,
++ sess->tgt->tgtt->sess_attrs);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attributes for session %s", name);
++ goto out_free;
+ }
+ }
+
@@ -20671,6 +29275,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+void scst_sess_sysfs_del(struct scst_session *sess)
+{
+ int rc;
++ DECLARE_COMPLETION_ONSTACK(c);
+
+ TRACE_ENTRY();
+
@@ -20680,15 +29285,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ TRACE_DBG("Deleting session %s from sysfs",
+ kobject_name(&sess->sess_kobj));
+
++ sess->sess_kobj_release_cmpl = &c;
++
+ kobject_del(&sess->sess_kobj);
+ kobject_put(&sess->sess_kobj);
+
-+ rc = wait_for_completion_timeout(&sess->sess_kobj_release_cmpl, HZ);
++ rc = wait_for_completion_timeout(sess->sess_kobj_release_cmpl, HZ);
+ if (rc == 0) {
+ PRINT_INFO("Waiting for releasing sysfs entry "
+ "for session from %s (%d refs)...", sess->initiator_name,
+ atomic_read(&sess->sess_kobj.kref.refcount));
-+ wait_for_completion(&sess->sess_kobj_release_cmpl);
++ wait_for_completion(sess->sess_kobj_release_cmpl);
+ PRINT_INFO("Done waiting for releasing sysfs "
+ "entry for session %s", sess->initiator_name);
+ }
@@ -20709,7 +29316,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ TRACE_ENTRY();
+
+ acg_dev = container_of(kobj, struct scst_acg_dev, acg_dev_kobj);
-+ complete_all(&acg_dev->acg_dev_kobj_release_cmpl);
++ if (acg_dev->acg_dev_kobj_release_cmpl)
++ complete_all(acg_dev->acg_dev_kobj_release_cmpl);
+
+ TRACE_EXIT();
+ return;
@@ -20753,9 +29361,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+void scst_acg_dev_sysfs_del(struct scst_acg_dev *acg_dev)
+{
+ int rc;
++ DECLARE_COMPLETION_ONSTACK(c);
+
+ TRACE_ENTRY();
+
++ acg_dev->acg_dev_kobj_release_cmpl = &c;
++
+ if (acg_dev->dev != NULL) {
+ sysfs_remove_link(acg_dev->dev->dev_exp_kobj,
+ acg_dev->acg_dev_link_name);
@@ -20765,12 +29376,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ kobject_del(&acg_dev->acg_dev_kobj);
+ kobject_put(&acg_dev->acg_dev_kobj);
+
-+ rc = wait_for_completion_timeout(&acg_dev->acg_dev_kobj_release_cmpl, HZ);
++ rc = wait_for_completion_timeout(acg_dev->acg_dev_kobj_release_cmpl, HZ);
+ if (rc == 0) {
+ PRINT_INFO("Waiting for releasing sysfs entry "
+ "for acg_dev %p (%d refs)...", acg_dev,
+ atomic_read(&acg_dev->acg_dev_kobj.kref.refcount));
-+ wait_for_completion(&acg_dev->acg_dev_kobj_release_cmpl);
++ wait_for_completion(acg_dev->acg_dev_kobj_release_cmpl);
+ PRINT_INFO("Done waiting for releasing sysfs "
+ "entry for acg_dev %p", acg_dev);
+ }
@@ -20786,10 +29397,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ TRACE_ENTRY();
+
-+ init_completion(&acg_dev->acg_dev_kobj_release_cmpl);
-+
+ res = kobject_init_and_add(&acg_dev->acg_dev_kobj, &acg_dev_ktype,
-+ parent, "%u", acg_dev->lun);
++ parent, "%llu", acg_dev->lun);
+ if (res != 0) {
+ PRINT_ERROR("Can't add acg_dev %p to sysfs", acg_dev);
+ goto out;
@@ -20824,1093 +29433,37 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ goto out;
+}
+
-+static int __scst_process_luns_mgmt_store(char *buffer,
-+ struct scst_tgt *tgt, struct scst_acg *acg, bool tgt_kobj)
-+{
-+ int res, read_only = 0, action;
-+ char *p, *e = NULL;
-+ unsigned int virt_lun;
-+ struct scst_acg_dev *acg_dev = NULL, *acg_dev_tmp;
-+ struct scst_device *d, *dev = NULL;
-+
-+#define SCST_LUN_ACTION_ADD 1
-+#define SCST_LUN_ACTION_DEL 2
-+#define SCST_LUN_ACTION_REPLACE 3
-+#define SCST_LUN_ACTION_CLEAR 4
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("buffer %s", buffer);
-+
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ if (strncasecmp("add", p, 3) == 0) {
-+ p += 3;
-+ action = SCST_LUN_ACTION_ADD;
-+ } else if (strncasecmp("del", p, 3) == 0) {
-+ p += 3;
-+ action = SCST_LUN_ACTION_DEL;
-+ } else if (!strncasecmp("replace", p, 7)) {
-+ p += 7;
-+ action = SCST_LUN_ACTION_REPLACE;
-+ } else if (!strncasecmp("clear", p, 5)) {
-+ p += 5;
-+ action = SCST_LUN_ACTION_CLEAR;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_resume;
-+ }
-+
-+ /* Check if tgt and acg not already freed while we were coming here */
-+ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
-+ goto out_unlock;
-+
-+ if ((action != SCST_LUN_ACTION_CLEAR) &&
-+ (action != SCST_LUN_ACTION_DEL)) {
-+ if (!isspace(*p)) {
-+ PRINT_ERROR("%s", "Syntax error");
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p; /* save p */
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = '\0';
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (!strcmp(d->virt_name, p)) {
-+ dev = d;
-+ TRACE_DBG("Device %p (%s) found", dev, p);
-+ break;
-+ }
-+ }
-+ if (dev == NULL) {
-+ PRINT_ERROR("Device '%s' not found", p);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ switch (action) {
-+ case SCST_LUN_ACTION_ADD:
-+ case SCST_LUN_ACTION_REPLACE:
-+ {
-+ bool dev_replaced = false;
-+
-+ e++;
-+ while (isspace(*e) && *e != '\0')
-+ e++;
-+ virt_lun = simple_strtoul(e, &e, 0);
-+
-+ while (isspace(*e) && *e != '\0')
-+ e++;
-+
-+ while (1) {
-+ char *pp;
-+ unsigned long val;
-+ char *param = scst_get_next_token_str(&e);
-+ if (param == NULL)
-+ break;
-+
-+ p = scst_get_next_lexem(&param);
-+ if (*p == '\0') {
-+ PRINT_ERROR("Syntax error at %s (device %s)",
-+ param, dev->virt_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ pp = scst_get_next_lexem(&param);
-+ if (*pp == '\0') {
-+ PRINT_ERROR("Parameter %s value missed for device %s",
-+ p, dev->virt_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ if (scst_get_next_lexem(&param)[0] != '\0') {
-+ PRINT_ERROR("Too many parameter's %s values (device %s)",
-+ p, dev->virt_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ res = strict_strtoul(pp, 0, &val);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtoul() for %s failed: %d "
-+ "(device %s)", pp, res, dev->virt_name);
-+ goto out_unlock;
-+ }
-+
-+ if (!strcasecmp("read_only", p)) {
-+ read_only = val;
-+ TRACE_DBG("READ ONLY %d", read_only);
-+ } else {
-+ PRINT_ERROR("Unknown parameter %s (device %s)",
-+ p, dev->virt_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ acg_dev = NULL;
-+ list_for_each_entry(acg_dev_tmp, &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ if (acg_dev_tmp->lun == virt_lun) {
-+ acg_dev = acg_dev_tmp;
-+ break;
-+ }
-+ }
-+
-+ if (acg_dev != NULL) {
-+ if (action == SCST_LUN_ACTION_ADD) {
-+ PRINT_ERROR("virt lun %d already exists in "
-+ "group %s", virt_lun, acg->acg_name);
-+ res = -EEXIST;
-+ goto out_unlock;
-+ } else {
-+ /* Replace */
-+ res = scst_acg_del_lun(acg, acg_dev->lun,
-+ false);
-+ if (res != 0)
-+ goto out_unlock;
-+
-+ dev_replaced = true;
-+ }
-+ }
-+
-+ res = scst_acg_add_lun(acg,
-+ tgt_kobj ? tgt->tgt_luns_kobj : acg->luns_kobj,
-+ dev, virt_lun, read_only, !dev_replaced, NULL);
-+ if (res != 0)
-+ goto out_unlock;
-+
-+ if (dev_replaced) {
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if ((tgt_dev->acg_dev->acg == acg) &&
-+ (tgt_dev->lun == virt_lun)) {
-+ TRACE_MGMT_DBG("INQUIRY DATA HAS CHANGED"
-+ " on tgt_dev %p", tgt_dev);
-+ scst_gen_aen_or_ua(tgt_dev,
-+ SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
-+ }
-+ }
-+ }
-+
-+ break;
-+ }
-+ case SCST_LUN_ACTION_DEL:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ virt_lun = simple_strtoul(p, &p, 0);
-+
-+ res = scst_acg_del_lun(acg, virt_lun, true);
-+ if (res != 0)
-+ goto out_unlock;
-+ break;
-+ case SCST_LUN_ACTION_CLEAR:
-+ PRINT_INFO("Removed all devices from group %s",
-+ acg->acg_name);
-+ list_for_each_entry_safe(acg_dev, acg_dev_tmp,
-+ &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ res = scst_acg_del_lun(acg, acg_dev->lun,
-+ list_is_last(&acg_dev->acg_dev_list_entry,
-+ &acg->acg_dev_list));
-+ if (res)
-+ goto out_unlock;
-+ }
-+ break;
-+ }
-+
-+ res = 0;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_resume:
-+ scst_resume_activity();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+#undef SCST_LUN_ACTION_ADD
-+#undef SCST_LUN_ACTION_DEL
-+#undef SCST_LUN_ACTION_REPLACE
-+#undef SCST_LUN_ACTION_CLEAR
-+}
-+
-+static int scst_luns_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return __scst_process_luns_mgmt_store(work->buf, work->tgt, work->acg,
-+ work->is_tgt_kobj);
-+}
-+
-+static ssize_t __scst_acg_mgmt_store(struct scst_acg *acg,
-+ const char *buf, size_t count, bool is_tgt_kobj,
-+ int (*sysfs_work_fn)(struct scst_sysfs_work_item *))
-+{
-+ int res;
-+ char *buffer;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ buffer = kzalloc(count+1, GFP_KERNEL);
-+ if (buffer == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ memcpy(buffer, buf, count);
-+ buffer[count] = '\0';
-+
-+ res = scst_alloc_sysfs_work(sysfs_work_fn, false, &work);
-+ if (res != 0)
-+ goto out_free;
-+
-+ work->buf = buffer;
-+ work->tgt = acg->tgt;
-+ work->acg = acg;
-+ work->is_tgt_kobj = is_tgt_kobj;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(buffer);
-+ goto out;
-+}
-+
-+static ssize_t __scst_luns_mgmt_store(struct scst_acg *acg,
-+ bool tgt_kobj, const char *buf, size_t count)
-+{
-+ return __scst_acg_mgmt_store(acg, buf, count, tgt_kobj,
-+ scst_luns_mgmt_store_work_fn);
-+}
-+
-+static ssize_t scst_luns_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ static char *help = "Usage: echo \"add|del H:C:I:L lun [parameters]\" >mgmt\n"
-+ " echo \"add VNAME lun [parameters]\" >mgmt\n"
-+ " echo \"del lun\" >mgmt\n"
-+ " echo \"replace H:C:I:L lun [parameters]\" >mgmt\n"
-+ " echo \"replace VNAME lun [parameters]\" >mgmt\n"
-+ " echo \"clear\" >mgmt\n"
-+ "\n"
-+ "where parameters are one or more "
-+ "param_name=value pairs separated by ';'\n"
-+ "\nThe following parameters available: read_only.";
-+
-+ return sprintf(buf, "%s", help);
-+}
-+
-+static ssize_t scst_luns_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj->parent, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ res = __scst_luns_mgmt_store(acg, true, buf, count);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t __scst_acg_addr_method_show(struct scst_acg *acg, char *buf)
-+{
-+ int res;
-+
-+ switch (acg->addr_method) {
-+ case SCST_LUN_ADDR_METHOD_FLAT:
-+ res = sprintf(buf, "FLAT\n%s\n", SCST_SYSFS_KEY_MARK);
-+ break;
-+ case SCST_LUN_ADDR_METHOD_PERIPHERAL:
-+ res = sprintf(buf, "PERIPHERAL\n");
-+ break;
-+ default:
-+ res = sprintf(buf, "UNKNOWN\n");
-+ break;
-+ }
-+
-+ return res;
-+}
-+
-+static ssize_t __scst_acg_addr_method_store(struct scst_acg *acg,
-+ const char *buf, size_t count)
-+{
-+ int res = count;
-+
-+ if (strncasecmp(buf, "FLAT", min_t(int, 4, count)) == 0)
-+ acg->addr_method = SCST_LUN_ADDR_METHOD_FLAT;
-+ else if (strncasecmp(buf, "PERIPHERAL", min_t(int, 10, count)) == 0)
-+ acg->addr_method = SCST_LUN_ADDR_METHOD_PERIPHERAL;
-+ else {
-+ PRINT_ERROR("Unknown address method %s", buf);
-+ res = -EINVAL;
-+ }
-+
-+ TRACE_DBG("acg %p, addr_method %d", acg, acg->addr_method);
-+
-+ return res;
-+}
-+
-+static ssize_t scst_tgt_addr_method_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ return __scst_acg_addr_method_show(acg, buf);
-+}
-+
-+static ssize_t scst_tgt_addr_method_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ res = __scst_acg_addr_method_store(acg, buf, count);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t __scst_acg_io_grouping_type_show(struct scst_acg *acg, char *buf)
-+{
-+ int res;
-+
-+ switch (acg->acg_io_grouping_type) {
-+ case SCST_IO_GROUPING_AUTO:
-+ res = sprintf(buf, "%s\n", SCST_IO_GROUPING_AUTO_STR);
-+ break;
-+ case SCST_IO_GROUPING_THIS_GROUP_ONLY:
-+ res = sprintf(buf, "%s\n%s\n",
-+ SCST_IO_GROUPING_THIS_GROUP_ONLY_STR,
-+ SCST_SYSFS_KEY_MARK);
-+ break;
-+ case SCST_IO_GROUPING_NEVER:
-+ res = sprintf(buf, "%s\n%s\n", SCST_IO_GROUPING_NEVER_STR,
-+ SCST_SYSFS_KEY_MARK);
-+ break;
-+ default:
-+ res = sprintf(buf, "%d\n%s\n", acg->acg_io_grouping_type,
-+ SCST_SYSFS_KEY_MARK);
-+ break;
-+ }
-+
-+ return res;
-+}
-+
-+static int __scst_acg_process_io_grouping_type_store(struct scst_tgt *tgt,
-+ struct scst_acg *acg, int io_grouping_type)
-+{
-+ int res = 0;
-+ struct scst_acg_dev *acg_dev;
-+
-+ TRACE_DBG("tgt %p, acg %p, io_grouping_type %d", tgt, acg,
-+ io_grouping_type);
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_resume;
-+ }
-+
-+ /* Check if tgt and acg not already freed while we were coming here */
-+ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
-+ goto out_unlock;
-+
-+ acg->acg_io_grouping_type = io_grouping_type;
-+
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
-+ int rc;
-+
-+ scst_stop_dev_threads(acg_dev->dev);
-+
-+ rc = scst_create_dev_threads(acg_dev->dev);
-+ if (rc != 0)
-+ res = rc;
-+ }
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_resume:
-+ scst_resume_activity();
-+
-+out:
-+ return res;
-+}
-+
-+static int __scst_acg_io_grouping_type_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return __scst_acg_process_io_grouping_type_store(work->tgt, work->acg,
-+ work->io_grouping_type);
-+}
-+
-+static ssize_t __scst_acg_io_grouping_type_store(struct scst_acg *acg,
-+ const char *buf, size_t count)
-+{
-+ int res = 0;
-+ int prev = acg->acg_io_grouping_type;
-+ long io_grouping_type;
-+ struct scst_sysfs_work_item *work;
-+
-+ if (strncasecmp(buf, SCST_IO_GROUPING_AUTO_STR,
-+ min_t(int, strlen(SCST_IO_GROUPING_AUTO_STR), count)) == 0)
-+ io_grouping_type = SCST_IO_GROUPING_AUTO;
-+ else if (strncasecmp(buf, SCST_IO_GROUPING_THIS_GROUP_ONLY_STR,
-+ min_t(int, strlen(SCST_IO_GROUPING_THIS_GROUP_ONLY_STR), count)) == 0)
-+ io_grouping_type = SCST_IO_GROUPING_THIS_GROUP_ONLY;
-+ else if (strncasecmp(buf, SCST_IO_GROUPING_NEVER_STR,
-+ min_t(int, strlen(SCST_IO_GROUPING_NEVER_STR), count)) == 0)
-+ io_grouping_type = SCST_IO_GROUPING_NEVER;
-+ else {
-+ res = strict_strtol(buf, 0, &io_grouping_type);
-+ if ((res != 0) || (io_grouping_type <= 0)) {
-+ PRINT_ERROR("Unknown or not allowed I/O grouping type "
-+ "%s", buf);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ }
-+
-+ if (prev == io_grouping_type)
-+ goto out;
-+
-+ res = scst_alloc_sysfs_work(__scst_acg_io_grouping_type_store_work_fn,
-+ false, &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->tgt = acg->tgt;
-+ work->acg = acg;
-+ work->io_grouping_type = io_grouping_type;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+
-+out:
-+ return res;
-+}
-+
-+static ssize_t scst_tgt_io_grouping_type_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ return __scst_acg_io_grouping_type_show(acg, buf);
-+}
++/**
++ ** ini_groups directory implementation.
++ **/
+
-+static ssize_t scst_tgt_io_grouping_type_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
++static void scst_acg_release(struct kobject *kobj)
+{
-+ int res;
+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ res = __scst_acg_io_grouping_type_store(acg, buf, count);
-+ if (res != 0)
-+ goto out;
-+
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * Called with scst_mutex held.
-+ *
-+ * !! No sysfs works must use kobject_get() to protect acg, due to possible
-+ * !! deadlock with scst_mutex (it is waiting for the last put, but
-+ * !! the last ref counter holder is waiting for scst_mutex)
-+ */
-+void scst_acg_sysfs_del(struct scst_acg *acg)
-+{
-+ int rc;
+
+ TRACE_ENTRY();
+
-+ kobject_del(acg->luns_kobj);
-+ kobject_del(acg->initiators_kobj);
-+ kobject_del(&acg->acg_kobj);
-+
-+ kobject_put(acg->luns_kobj);
-+ kobject_put(acg->initiators_kobj);
-+ kobject_put(&acg->acg_kobj);
-+
-+ rc = wait_for_completion_timeout(&acg->acg_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for acg %s (%d refs)...", acg->acg_name,
-+ atomic_read(&acg->acg_kobj.kref.refcount));
-+ wait_for_completion(&acg->acg_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs "
-+ "entry for acg %s", acg->acg_name);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+int scst_acg_sysfs_create(struct scst_tgt *tgt,
-+ struct scst_acg *acg)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ init_completion(&acg->acg_kobj_release_cmpl);
-+
-+ res = kobject_init_and_add(&acg->acg_kobj, &acg_ktype,
-+ tgt->tgt_ini_grp_kobj, acg->acg_name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add acg '%s' to sysfs", acg->acg_name);
-+ goto out;
-+ }
-+
-+ acg->luns_kobj = kobject_create_and_add("luns", &acg->acg_kobj);
-+ if (acg->luns_kobj == NULL) {
-+ PRINT_ERROR("Can't create luns kobj for tgt %s",
-+ tgt->tgt_name);
-+ res = -ENOMEM;
-+ goto out_del;
-+ }
-+
-+ res = sysfs_create_file(acg->luns_kobj, &scst_acg_luns_mgmt.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
-+ scst_acg_luns_mgmt.attr.name, tgt->tgt_name);
-+ goto out_del;
-+ }
-+
-+ acg->initiators_kobj = kobject_create_and_add("initiators",
-+ &acg->acg_kobj);
-+ if (acg->initiators_kobj == NULL) {
-+ PRINT_ERROR("Can't create initiators kobj for tgt %s",
-+ tgt->tgt_name);
-+ res = -ENOMEM;
-+ goto out_del;
-+ }
-+
-+ res = sysfs_create_file(acg->initiators_kobj,
-+ &scst_acg_ini_mgmt.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
-+ scst_acg_ini_mgmt.attr.name, tgt->tgt_name);
-+ goto out_del;
-+ }
-+
-+ res = sysfs_create_file(&acg->acg_kobj, &scst_acg_addr_method.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
-+ scst_acg_addr_method.attr.name, tgt->tgt_name);
-+ goto out_del;
-+ }
-+
-+ res = sysfs_create_file(&acg->acg_kobj, &scst_acg_io_grouping_type.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
-+ scst_acg_io_grouping_type.attr.name, tgt->tgt_name);
-+ goto out_del;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ scst_acg_sysfs_del(acg);
-+ goto out;
-+}
-+
-+static ssize_t scst_acg_addr_method_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+
-+ return __scst_acg_addr_method_show(acg, buf);
-+}
-+
-+static ssize_t scst_acg_addr_method_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+
-+ res = __scst_acg_addr_method_store(acg, buf, count);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_acg_io_grouping_type_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+
-+ return __scst_acg_io_grouping_type_show(acg, buf);
-+}
-+
-+static ssize_t scst_acg_io_grouping_type_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+
+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+
-+ res = __scst_acg_io_grouping_type_store(acg, buf, count);
-+ if (res != 0)
-+ goto out;
-+
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_ini_group_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ static char *help = "Usage: echo \"create GROUP_NAME\" >mgmt\n"
-+ " echo \"del GROUP_NAME\" >mgmt\n";
-+
-+ return sprintf(buf, "%s", help);
-+}
-+
-+static int scst_process_ini_group_mgmt_store(char *buffer,
-+ struct scst_tgt *tgt)
-+{
-+ int res, action;
-+ int len;
-+ char *name;
-+ char *p, *e = NULL;
-+ struct scst_acg *a, *acg = NULL;
-+
-+#define SCST_INI_GROUP_ACTION_CREATE 1
-+#define SCST_INI_GROUP_ACTION_DEL 2
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("tgt %p, buffer %s", tgt, buffer);
-+
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ if (strncasecmp("create ", p, 7) == 0) {
-+ p += 7;
-+ action = SCST_INI_GROUP_ACTION_CREATE;
-+ } else if (strncasecmp("del ", p, 4) == 0) {
-+ p += 4;
-+ action = SCST_INI_GROUP_ACTION_DEL;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_resume;
-+ }
-+
-+ /* Check if our pointer is still alive */
-+ if (scst_check_tgt_acg_ptrs(tgt, NULL) != 0)
-+ goto out_unlock;
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = '\0';
-+
-+ if (p[0] == '\0') {
-+ PRINT_ERROR("%s", "Group name required");
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ list_for_each_entry(a, &tgt->tgt_acg_list, acg_list_entry) {
-+ if (strcmp(a->acg_name, p) == 0) {
-+ TRACE_DBG("group (acg) %p %s found",
-+ a, a->acg_name);
-+ acg = a;
-+ break;
-+ }
-+ }
-+
-+ switch (action) {
-+ case SCST_INI_GROUP_ACTION_CREATE:
-+ TRACE_DBG("Creating group '%s'", p);
-+ if (acg != NULL) {
-+ PRINT_ERROR("acg name %s exist", p);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ len = strlen(p) + 1;
-+ name = kmalloc(len, GFP_KERNEL);
-+ if (name == NULL) {
-+ PRINT_ERROR("%s", "Allocation of name failed");
-+ res = -ENOMEM;
-+ goto out_unlock;
-+ }
-+ strlcpy(name, p, len);
-+
-+ acg = scst_alloc_add_acg(tgt, name, true);
-+ kfree(name);
-+ if (acg == NULL)
-+ goto out_unlock;
-+ break;
-+ case SCST_INI_GROUP_ACTION_DEL:
-+ TRACE_DBG("Deleting group '%s'", p);
-+ if (acg == NULL) {
-+ PRINT_ERROR("Group %s not found", p);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ if (!scst_acg_sess_is_empty(acg)) {
-+ PRINT_ERROR("Group %s is not empty", acg->acg_name);
-+ res = -EBUSY;
-+ goto out_unlock;
-+ }
-+ scst_del_free_acg(acg);
-+ break;
-+ }
-+
-+ res = 0;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_resume:
-+ scst_resume_activity();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+#undef SCST_LUN_ACTION_CREATE
-+#undef SCST_LUN_ACTION_DEL
-+}
-+
-+static int scst_ini_group_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return scst_process_ini_group_mgmt_store(work->buf, work->tgt);
-+}
-+
-+static ssize_t scst_ini_group_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ char *buffer;
-+ struct scst_tgt *tgt;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ tgt = container_of(kobj->parent, struct scst_tgt, tgt_kobj);
-+
-+ buffer = kzalloc(count+1, GFP_KERNEL);
-+ if (buffer == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ memcpy(buffer, buf, count);
-+ buffer[count] = '\0';
-+
-+ res = scst_alloc_sysfs_work(scst_ini_group_mgmt_store_work_fn, false,
-+ &work);
-+ if (res != 0)
-+ goto out_free;
-+
-+ work->buf = buffer;
-+ work->tgt = tgt;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(buffer);
-+ goto out;
-+}
-+
-+static ssize_t scst_rel_tgt_id_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_tgt *tgt;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+
-+ res = sprintf(buf, "%d\n%s", tgt->rel_tgt_id,
-+ (tgt->rel_tgt_id != 0) ? SCST_SYSFS_KEY_MARK "\n" : "");
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_process_rel_tgt_id_store(struct scst_sysfs_work_item *work)
-+{
-+ int res = 0;
-+ struct scst_tgt *tgt = work->tgt;
-+ unsigned long rel_tgt_id = work->l;
-+
-+ TRACE_ENTRY();
-+
-+ /* tgt protected by kobject_get() */
-+
-+ TRACE_DBG("Trying to set relative target port id %d",
-+ (uint16_t)rel_tgt_id);
-+
-+ if (tgt->tgtt->is_target_enabled(tgt) &&
-+ rel_tgt_id != tgt->rel_tgt_id) {
-+ if (!scst_is_relative_target_port_id_unique(rel_tgt_id, tgt)) {
-+ PRINT_ERROR("Relative port id %d is not unique",
-+ (uint16_t)rel_tgt_id);
-+ res = -EBADSLT;
-+ goto out_put;
-+ }
-+ }
-+
-+ if (rel_tgt_id < SCST_MIN_REL_TGT_ID ||
-+ rel_tgt_id > SCST_MAX_REL_TGT_ID) {
-+ if ((rel_tgt_id == 0) && !tgt->tgtt->is_target_enabled(tgt))
-+ goto set;
-+
-+ PRINT_ERROR("Invalid relative port id %d",
-+ (uint16_t)rel_tgt_id);
-+ res = -EINVAL;
-+ goto out_put;
-+ }
-+
-+set:
-+ tgt->rel_tgt_id = (uint16_t)rel_tgt_id;
-+
-+out_put:
-+ kobject_put(&tgt->tgt_kobj);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_rel_tgt_id_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res = 0;
-+ struct scst_tgt *tgt;
-+ unsigned long rel_tgt_id;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ if (buf == NULL)
-+ goto out;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+
-+ res = strict_strtoul(buf, 0, &rel_tgt_id);
-+ if (res != 0) {
-+ PRINT_ERROR("%s", "Wrong rel_tgt_id");
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_alloc_sysfs_work(scst_process_rel_tgt_id_store, false,
-+ &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->tgt = tgt;
-+ work->l = rel_tgt_id;
-+
-+ kobject_get(&tgt->tgt_kobj);
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+int scst_acn_sysfs_create(struct scst_acn *acn)
-+{
-+ int res = 0;
-+ int len;
-+ struct scst_acg *acg = acn->acg;
-+ struct kobj_attribute *attr = NULL;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ static struct lock_class_key __key;
-+#endif
-+
-+ TRACE_ENTRY();
-+
-+ acn->acn_attr = NULL;
-+
-+ attr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL);
-+ if (attr == NULL) {
-+ PRINT_ERROR("Unable to allocate attributes for initiator '%s'",
-+ acn->name);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ len = strlen(acn->name) + 1;
-+ attr->attr.name = kzalloc(len, GFP_KERNEL);
-+ if (attr->attr.name == NULL) {
-+ PRINT_ERROR("Unable to allocate attributes for initiator '%s'",
-+ acn->name);
-+ res = -ENOMEM;
-+ goto out_free;
-+ }
-+ strlcpy((char *)attr->attr.name, acn->name, len);
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ attr->attr.key = &__key;
-+#endif
-+
-+ attr->attr.mode = S_IRUGO;
-+ attr->show = scst_acn_file_show;
-+ attr->store = NULL;
-+
-+ res = sysfs_create_file(acg->initiators_kobj, &attr->attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Unable to create acn '%s' for group '%s'",
-+ acn->name, acg->acg_name);
-+ kfree(attr->attr.name);
-+ goto out_free;
-+ }
-+
-+ acn->acn_attr = attr;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(attr);
-+ goto out;
-+}
-+
-+void scst_acn_sysfs_del(struct scst_acn *acn)
-+{
-+ struct scst_acg *acg = acn->acg;
-+
-+ TRACE_ENTRY();
-+
-+ if (acn->acn_attr != NULL) {
-+ sysfs_remove_file(acg->initiators_kobj,
-+ &acn->acn_attr->attr);
-+ kfree(acn->acn_attr->attr.name);
-+ kfree(acn->acn_attr);
-+ }
++ if (acg->acg_kobj_release_cmpl)
++ complete_all(acg->acg_kobj_release_cmpl);
+
+ TRACE_EXIT();
+ return;
+}
+
-+static ssize_t scst_acn_file_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, "%s\n",
-+ attr->attr.name);
-+}
-+
-+static ssize_t scst_acg_luns_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj->parent, struct scst_acg, acg_kobj);
-+ res = __scst_luns_mgmt_store(acg, false, buf, count);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
++static struct kobj_type acg_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_acg_release,
++};
+
+static ssize_t scst_acg_ini_mgmt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
-+ static char *help = "Usage: echo \"add INITIATOR_NAME\" "
-+ ">mgmt\n"
-+ " echo \"del INITIATOR_NAME\" "
-+ ">mgmt\n"
-+ " echo \"move INITIATOR_NAME DEST_GROUP_NAME\" "
-+ ">mgmt\n"
-+ " echo \"clear\" "
-+ ">mgmt\n";
++ static const char help[] =
++ "Usage: echo \"add INITIATOR_NAME\" >mgmt\n"
++ " echo \"del INITIATOR_NAME\" >mgmt\n"
++ " echo \"move INITIATOR_NAME DEST_GROUP_NAME\" >mgmt\n"
++ " echo \"clear\" >mgmt\n";
+
+ return sprintf(buf, "%s", help);
+}
@@ -21923,11 +29476,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ char *name = NULL, *group = NULL;
+ struct scst_acg *acg_dest = NULL;
+ struct scst_acn *acn = NULL, *acn_tmp;
-+
-+#define SCST_ACG_ACTION_INI_ADD 1
-+#define SCST_ACG_ACTION_INI_DEL 2
-+#define SCST_ACG_ACTION_INI_CLEAR 3
-+#define SCST_ACG_ACTION_INI_MOVE 4
++ enum {
++ SCST_ACG_ACTION_INI_ADD = 1,
++ SCST_ACG_ACTION_INI_DEL = 2,
++ SCST_ACG_ACTION_INI_CLEAR = 3,
++ SCST_ACG_ACTION_INI_MOVE = 4,
++ };
+
+ TRACE_ENTRY();
+
@@ -21966,10 +29520,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ if (res != 0)
+ goto out;
+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
+ goto out_resume;
-+ }
+
+ /* Check if tgt and acg not already freed while we were coming here */
+ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
@@ -22101,11 +29654,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+out:
+ TRACE_EXIT_RES(res);
+ return res;
-+
-+#undef SCST_ACG_ACTION_INI_ADD
-+#undef SCST_ACG_ACTION_INI_DEL
-+#undef SCST_ACG_ACTION_INI_CLEAR
-+#undef SCST_ACG_ACTION_INI_MOVE
+}
+
+static int scst_acg_ini_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
@@ -22124,225 +29672,111 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ scst_acg_ini_mgmt_store_work_fn);
+}
+
-+/**
-+ ** SGV directory implementation
-+ **/
-+
-+static struct kobj_attribute sgv_stat_attr =
-+ __ATTR(stats, S_IRUGO | S_IWUSR, sgv_sysfs_stat_show,
-+ sgv_sysfs_stat_reset);
-+
-+static struct attribute *sgv_attrs[] = {
-+ &sgv_stat_attr.attr,
-+ NULL,
-+};
-+
-+static void sgv_kobj_release(struct kobject *kobj)
-+{
-+ struct sgv_pool *pool;
-+
-+ TRACE_ENTRY();
-+
-+ pool = container_of(kobj, struct sgv_pool, sgv_kobj);
-+ complete_all(&pool->sgv_kobj_release_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct kobj_type sgv_pool_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = sgv_kobj_release,
-+ .default_attrs = sgv_attrs,
-+};
++static struct kobj_attribute scst_acg_ini_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_acg_ini_mgmt_show,
++ scst_acg_ini_mgmt_store);
+
-+int scst_sgv_sysfs_create(struct sgv_pool *pool)
++static ssize_t scst_acg_luns_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
+{
+ int res;
++ struct scst_acg *acg;
+
-+ TRACE_ENTRY();
-+
-+ init_completion(&pool->sgv_kobj_release_cmpl);
-+
-+ res = kobject_init_and_add(&pool->sgv_kobj, &sgv_pool_ktype,
-+ scst_sgv_kobj, pool->name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add sgv pool %s to sysfs", pool->name);
-+ goto out;
-+ }
++ acg = container_of(kobj->parent, struct scst_acg, acg_kobj);
++ res = __scst_luns_mgmt_store(acg, false, buf, count);
+
-+out:
+ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+void scst_sgv_sysfs_del(struct sgv_pool *pool)
-+{
-+ int rc;
-+
-+ TRACE_ENTRY();
-+
-+ kobject_del(&pool->sgv_kobj);
-+ kobject_put(&pool->sgv_kobj);
-+
-+ rc = wait_for_completion_timeout(&pool->sgv_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for SGV pool %s (%d refs)...", pool->name,
-+ atomic_read(&pool->sgv_kobj.kref.refcount));
-+ wait_for_completion(&pool->sgv_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs "
-+ "entry for SGV pool %s", pool->name);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct kobj_attribute sgv_global_stat_attr =
-+ __ATTR(global_stats, S_IRUGO | S_IWUSR, sgv_sysfs_global_stat_show,
-+ sgv_sysfs_global_stat_reset);
-+
-+static struct attribute *sgv_default_attrs[] = {
-+ &sgv_global_stat_attr.attr,
-+ NULL,
-+};
-+
-+static void scst_sysfs_release(struct kobject *kobj)
-+{
-+ kfree(kobj);
-+}
-+
-+static struct kobj_type sgv_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_sysfs_release,
-+ .default_attrs = sgv_default_attrs,
-+};
-+
-+/**
-+ ** SCST sysfs root directory implementation
-+ **/
++static struct kobj_attribute scst_acg_luns_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_luns_mgmt_show,
++ scst_acg_luns_mgmt_store);
+
-+static ssize_t scst_threads_show(struct kobject *kobj,
++static ssize_t scst_acg_addr_method_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
-+ int count;
-+
-+ TRACE_ENTRY();
++ struct scst_acg *acg;
+
-+ count = sprintf(buf, "%d\n%s", scst_main_cmd_threads.nr_threads,
-+ (scst_main_cmd_threads.nr_threads != scst_threads) ?
-+ SCST_SYSFS_KEY_MARK "\n" : "");
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
+
-+ TRACE_EXIT();
-+ return count;
++ return __scst_acg_addr_method_show(acg, buf);
+}
+
-+static int scst_process_threads_store(int newtn)
++static ssize_t scst_acg_addr_method_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int res;
-+ long oldtn, delta;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("newtn %d", newtn);
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ oldtn = scst_main_cmd_threads.nr_threads;
-+
-+ delta = newtn - oldtn;
-+ if (delta < 0)
-+ scst_del_threads(&scst_main_cmd_threads, -delta);
-+ else {
-+ res = scst_add_threads(&scst_main_cmd_threads, NULL, NULL, delta);
-+ if (res != 0)
-+ goto out_up;
-+ }
++ struct scst_acg *acg;
+
-+ PRINT_INFO("Changed cmd threads num: old %ld, new %d", oldtn, newtn);
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
+
-+out_up:
-+ mutex_unlock(&scst_mutex);
++ res = __scst_acg_addr_method_store(acg, buf, count);
+
-+out:
+ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+static int scst_threads_store_work_fn(struct scst_sysfs_work_item *work)
++static struct kobj_attribute scst_acg_addr_method =
++ __ATTR(addr_method, S_IRUGO | S_IWUSR, scst_acg_addr_method_show,
++ scst_acg_addr_method_store);
++
++static ssize_t scst_acg_io_grouping_type_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
+{
-+ return scst_process_threads_store(work->new_threads_num);
++ struct scst_acg *acg;
++
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
++
++ return __scst_acg_io_grouping_type_show(acg, buf);
+}
+
-+static ssize_t scst_threads_store(struct kobject *kobj,
++static ssize_t scst_acg_io_grouping_type_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int res;
-+ long newtn;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
++ struct scst_acg *acg;
+
-+ res = strict_strtol(buf, 0, &newtn);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtol() for %s failed: %d ", buf, res);
-+ goto out;
-+ }
-+ if (newtn <= 0) {
-+ PRINT_ERROR("Illegal threads num value %ld", newtn);
-+ res = -EINVAL;
-+ goto out;
-+ }
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
+
-+ res = scst_alloc_sysfs_work(scst_threads_store_work_fn, false, &work);
++ res = __scst_acg_io_grouping_type_store(acg, buf, count);
+ if (res != 0)
+ goto out;
+
-+ work->new_threads_num = newtn;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
++ res = count;
+
+out:
+ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+static ssize_t scst_setup_id_show(struct kobject *kobj,
++static struct kobj_attribute scst_acg_io_grouping_type =
++ __ATTR(io_grouping_type, S_IRUGO | S_IWUSR,
++ scst_acg_io_grouping_type_show,
++ scst_acg_io_grouping_type_store);
++
++static ssize_t scst_acg_cpu_mask_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
-+ int count;
-+
-+ TRACE_ENTRY();
++ struct scst_acg *acg;
+
-+ count = sprintf(buf, "0x%x\n%s\n", scst_setup_id,
-+ (scst_setup_id == 0) ? "" : SCST_SYSFS_KEY_MARK);
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
+
-+ TRACE_EXIT();
-+ return count;
++ return __scst_acg_cpu_mask_show(acg, buf);
+}
+
-+static ssize_t scst_setup_id_store(struct kobject *kobj,
++static ssize_t scst_acg_cpu_mask_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int res;
-+ unsigned long val;
++ struct scst_acg *acg;
+
-+ TRACE_ENTRY();
++ acg = container_of(kobj, struct scst_acg, acg_kobj);
+
-+ res = strict_strtoul(buf, 0, &val);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtoul() for %s failed: %d ", buf, res);
++ res = __scst_acg_cpu_mask_store(acg, buf, count);
++ if (res != 0)
+ goto out;
-+ }
-+
-+ scst_setup_id = val;
-+ PRINT_INFO("Changed scst_setup_id to %x", scst_setup_id);
+
+ res = count;
+
@@ -22351,405 +29785,209 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ return res;
+}
+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++static struct kobj_attribute scst_acg_cpu_mask =
++ __ATTR(cpu_mask, S_IRUGO | S_IWUSR,
++ scst_acg_cpu_mask_show,
++ scst_acg_cpu_mask_store);
+
-+static void scst_read_trace_tlb(const struct scst_trace_log *tbl, char *buf,
-+ unsigned long log_level, int *pos)
++/*
++ * Called with scst_mutex held.
++ *
++ * !! No sysfs works must use kobject_get() to protect acg, due to possible
++ * !! deadlock with scst_mutex (it is waiting for the last put, but
++ * !! the last ref counter holder is waiting for scst_mutex)
++ */
++void scst_acg_sysfs_del(struct scst_acg *acg)
+{
-+ const struct scst_trace_log *t = tbl;
-+
-+ if (t == NULL)
-+ goto out;
++ int rc;
++ DECLARE_COMPLETION_ONSTACK(c);
+
-+ while (t->token) {
-+ if (log_level & t->val) {
-+ *pos += sprintf(&buf[*pos], "%s%s",
-+ (*pos == 0) ? "" : " | ",
-+ t->token);
-+ }
-+ t++;
-+ }
-+out:
-+ return;
-+}
++ TRACE_ENTRY();
+
-+static ssize_t scst_trace_level_show(const struct scst_trace_log *local_tbl,
-+ unsigned long log_level, char *buf, const char *help)
-+{
-+ int pos = 0;
++ acg->acg_kobj_release_cmpl = &c;
+
-+ scst_read_trace_tlb(scst_trace_tbl, buf, log_level, &pos);
-+ scst_read_trace_tlb(local_tbl, buf, log_level, &pos);
++ kobject_del(acg->luns_kobj);
++ kobject_del(acg->initiators_kobj);
++ kobject_del(&acg->acg_kobj);
+
-+ pos += sprintf(&buf[pos], "\n\n\nUsage:\n"
-+ " echo \"all|none|default\" >trace_level\n"
-+ " echo \"value DEC|0xHEX|0OCT\" >trace_level\n"
-+ " echo \"add|del TOKEN\" >trace_level\n"
-+ "\nwhere TOKEN is one of [debug, function, line, pid,\n"
-+#ifndef GENERATING_UPSTREAM_PATCH
-+ " entryexit, buff, mem, sg, out_of_mem,\n"
-+#else
-+ " buff, mem, sg, out_of_mem,\n"
-+#endif
-+ " special, scsi, mgmt, minor,\n"
-+ " mgmt_dbg, scsi_serializing,\n"
-+ " retry, recv_bot, send_bot, recv_top, pr,\n"
-+ " send_top%s]", help != NULL ? help : "");
++ kobject_put(acg->luns_kobj);
++ kobject_put(acg->initiators_kobj);
++ kobject_put(&acg->acg_kobj);
+
-+ return pos;
-+}
++ rc = wait_for_completion_timeout(acg->acg_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for acg %s (%d refs)...", acg->acg_name,
++ atomic_read(&acg->acg_kobj.kref.refcount));
++ wait_for_completion(acg->acg_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs "
++ "entry for acg %s", acg->acg_name);
++ }
+
-+static ssize_t scst_main_trace_level_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return scst_trace_level_show(scst_local_trace_tbl, trace_flag,
-+ buf, NULL);
++ TRACE_EXIT();
++ return;
+}
+
-+static int scst_write_trace(const char *buf, size_t length,
-+ unsigned long *log_level, unsigned long default_level,
-+ const char *name, const struct scst_trace_log *tbl)
++int scst_acg_sysfs_create(struct scst_tgt *tgt,
++ struct scst_acg *acg)
+{
-+ int res = length;
-+ int action;
-+ unsigned long level = 0, oldlevel;
-+ char *buffer, *p, *e;
-+ const struct scst_trace_log *t;
-+
-+#define SCST_TRACE_ACTION_ALL 1
-+#define SCST_TRACE_ACTION_NONE 2
-+#define SCST_TRACE_ACTION_DEFAULT 3
-+#define SCST_TRACE_ACTION_ADD 4
-+#define SCST_TRACE_ACTION_DEL 5
-+#define SCST_TRACE_ACTION_VALUE 6
++ int res = 0;
+
+ TRACE_ENTRY();
+
-+ if ((buf == NULL) || (length == 0)) {
-+ res = -EINVAL;
++ res = kobject_init_and_add(&acg->acg_kobj, &acg_ktype,
++ tgt->tgt_ini_grp_kobj, acg->acg_name);
++ if (res != 0) {
++ PRINT_ERROR("Can't add acg '%s' to sysfs", acg->acg_name);
+ goto out;
+ }
+
-+ buffer = kmalloc(length+1, GFP_KERNEL);
-+ if (buffer == NULL) {
-+ PRINT_ERROR("Unable to alloc intermediate buffer (size %zd)",
-+ length+1);
++ acg->luns_kobj = kobject_create_and_add("luns", &acg->acg_kobj);
++ if (acg->luns_kobj == NULL) {
++ PRINT_ERROR("Can't create luns kobj for tgt %s",
++ tgt->tgt_name);
+ res = -ENOMEM;
-+ goto out;
++ goto out_del;
+ }
-+ memcpy(buffer, buf, length);
-+ buffer[length] = '\0';
+
-+ TRACE_DBG("buffer %s", buffer);
-+
-+ p = buffer;
-+ if (!strncasecmp("all", p, 3)) {
-+ action = SCST_TRACE_ACTION_ALL;
-+ } else if (!strncasecmp("none", p, 4) || !strncasecmp("null", p, 4)) {
-+ action = SCST_TRACE_ACTION_NONE;
-+ } else if (!strncasecmp("default", p, 7)) {
-+ action = SCST_TRACE_ACTION_DEFAULT;
-+ } else if (!strncasecmp("add", p, 3)) {
-+ p += 3;
-+ action = SCST_TRACE_ACTION_ADD;
-+ } else if (!strncasecmp("del", p, 3)) {
-+ p += 3;
-+ action = SCST_TRACE_ACTION_DEL;
-+ } else if (!strncasecmp("value", p, 5)) {
-+ p += 5;
-+ action = SCST_TRACE_ACTION_VALUE;
-+ } else {
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
++ res = sysfs_create_file(acg->luns_kobj, &scst_acg_luns_mgmt.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
++ scst_acg_luns_mgmt.attr.name, tgt->tgt_name);
++ goto out_del;
+ }
+
-+ switch (action) {
-+ case SCST_TRACE_ACTION_ADD:
-+ case SCST_TRACE_ACTION_DEL:
-+ case SCST_TRACE_ACTION_VALUE:
-+ if (!isspace(*p)) {
-+ PRINT_ERROR("%s", "Syntax error");
-+ res = -EINVAL;
-+ goto out_free;
-+ }
++ acg->initiators_kobj = kobject_create_and_add("initiators",
++ &acg->acg_kobj);
++ if (acg->initiators_kobj == NULL) {
++ PRINT_ERROR("Can't create initiators kobj for tgt %s",
++ tgt->tgt_name);
++ res = -ENOMEM;
++ goto out_del;
+ }
+
-+ switch (action) {
-+ case SCST_TRACE_ACTION_ALL:
-+ level = TRACE_ALL;
-+ break;
-+ case SCST_TRACE_ACTION_DEFAULT:
-+ level = default_level;
-+ break;
-+ case SCST_TRACE_ACTION_NONE:
-+ level = TRACE_NULL;
-+ break;
-+ case SCST_TRACE_ACTION_ADD:
-+ case SCST_TRACE_ACTION_DEL:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = 0;
-+ if (tbl) {
-+ t = tbl;
-+ while (t->token) {
-+ if (!strcasecmp(p, t->token)) {
-+ level = t->val;
-+ break;
-+ }
-+ t++;
-+ }
-+ }
-+ if (level == 0) {
-+ t = scst_trace_tbl;
-+ while (t->token) {
-+ if (!strcasecmp(p, t->token)) {
-+ level = t->val;
-+ break;
-+ }
-+ t++;
-+ }
-+ }
-+ if (level == 0) {
-+ PRINT_ERROR("Unknown token \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+ break;
-+ case SCST_TRACE_ACTION_VALUE:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ res = strict_strtoul(p, 0, &level);
-+ if (res != 0) {
-+ PRINT_ERROR("Invalid trace value \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+ break;
++ res = sysfs_create_file(acg->initiators_kobj,
++ &scst_acg_ini_mgmt.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
++ scst_acg_ini_mgmt.attr.name, tgt->tgt_name);
++ goto out_del;
+ }
+
-+ oldlevel = *log_level;
++ res = sysfs_create_file(&acg->acg_kobj, &scst_acg_addr_method.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
++ scst_acg_addr_method.attr.name, tgt->tgt_name);
++ goto out_del;
++ }
+
-+ switch (action) {
-+ case SCST_TRACE_ACTION_ADD:
-+ *log_level |= level;
-+ break;
-+ case SCST_TRACE_ACTION_DEL:
-+ *log_level &= ~level;
-+ break;
-+ default:
-+ *log_level = level;
-+ break;
++ res = sysfs_create_file(&acg->acg_kobj, &scst_acg_io_grouping_type.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
++ scst_acg_io_grouping_type.attr.name, tgt->tgt_name);
++ goto out_del;
+ }
+
-+ PRINT_INFO("Changed trace level for \"%s\": old 0x%08lx, new 0x%08lx",
-+ name, oldlevel, *log_level);
++ res = sysfs_create_file(&acg->acg_kobj, &scst_acg_cpu_mask.attr);
++ if (res != 0) {
++ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
++ scst_acg_cpu_mask.attr.name, tgt->tgt_name);
++ goto out_del;
++ }
+
-+out_free:
-+ kfree(buffer);
+out:
+ TRACE_EXIT_RES(res);
+ return res;
+
-+#undef SCST_TRACE_ACTION_ALL
-+#undef SCST_TRACE_ACTION_NONE
-+#undef SCST_TRACE_ACTION_DEFAULT
-+#undef SCST_TRACE_ACTION_ADD
-+#undef SCST_TRACE_ACTION_DEL
-+#undef SCST_TRACE_ACTION_VALUE
++out_del:
++ scst_acg_sysfs_del(acg);
++ goto out;
+}
+
-+static ssize_t scst_main_trace_level_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ res = scst_write_trace(buf, count, &trace_flag,
-+ SCST_DEFAULT_LOG_FLAGS, "scst", scst_local_trace_tbl);
-+
-+ mutex_unlock(&scst_log_mutex);
++/**
++ ** acn
++ **/
+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
++static ssize_t scst_acn_file_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, "%s\n",
++ attr->attr.name);
+}
+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+static ssize_t scst_version_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
++int scst_acn_sysfs_create(struct scst_acn *acn)
+{
-+ TRACE_ENTRY();
-+
-+ sprintf(buf, "%s\n", SCST_VERSION_STRING);
-+
-+#ifdef CONFIG_SCST_STRICT_SERIALIZING
-+ strcat(buf, "STRICT_SERIALIZING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ strcat(buf, "EXTRACHECKS\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_TRACING
-+ strcat(buf, "TRACING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+ strcat(buf, "DEBUG\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+ strcat(buf, "DEBUG_TM\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_RETRY
-+ strcat(buf, "DEBUG_RETRY\n");
++ int res = 0;
++ struct scst_acg *acg = acn->acg;
++ struct kobj_attribute *attr = NULL;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ static struct lock_class_key __key;
+#endif
+
-+#ifdef CONFIG_SCST_DEBUG_OOM
-+ strcat(buf, "DEBUG_OOM\n");
-+#endif
++ TRACE_ENTRY();
+
-+#ifdef CONFIG_SCST_DEBUG_SN
-+ strcat(buf, "DEBUG_SN\n");
-+#endif
++ acn->acn_attr = NULL;
+
-+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
-+ strcat(buf, "USE_EXPECTED_VALUES\n");
-+#endif
++ attr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL);
++ if (attr == NULL) {
++ PRINT_ERROR("Unable to allocate attributes for initiator '%s'",
++ acn->name);
++ res = -ENOMEM;
++ goto out;
++ }
+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ strcat(buf, "TEST_IO_IN_SIRQ\n");
-+#endif
++ attr->attr.name = kstrdup(acn->name, GFP_KERNEL);
++ if (attr->attr.name == NULL) {
++ PRINT_ERROR("Unable to allocate attributes for initiator '%s'",
++ acn->name);
++ res = -ENOMEM;
++ goto out_free;
++ }
+
-+#ifdef CONFIG_SCST_STRICT_SECURITY
-+ strcat(buf, "STRICT_SECURITY\n");
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ attr->attr.key = &__key;
+#endif
+
-+ TRACE_EXIT();
-+ return strlen(buf);
-+}
-+
-+static ssize_t scst_last_sysfs_mgmt_res_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int res;
++ attr->attr.mode = S_IRUGO;
++ attr->show = scst_acn_file_show;
++ attr->store = NULL;
+
-+ TRACE_ENTRY();
++ res = sysfs_create_file(acg->initiators_kobj, &attr->attr);
++ if (res != 0) {
++ PRINT_ERROR("Unable to create acn '%s' for group '%s'",
++ acn->name, acg->acg_name);
++ kfree(attr->attr.name);
++ goto out_free;
++ }
+
-+ spin_lock(&sysfs_work_lock);
-+ TRACE_DBG("active_sysfs_works %d", active_sysfs_works);
-+ if (active_sysfs_works > 0)
-+ res = -EAGAIN;
-+ else
-+ res = sprintf(buf, "%d\n", last_sysfs_work_res);
-+ spin_unlock(&sysfs_work_lock);
++ acn->acn_attr = attr;
+
++out:
+ TRACE_EXIT_RES(res);
+ return res;
-+}
-+
-+static struct kobj_attribute scst_threads_attr =
-+ __ATTR(threads, S_IRUGO | S_IWUSR, scst_threads_show,
-+ scst_threads_store);
-+
-+static struct kobj_attribute scst_setup_id_attr =
-+ __ATTR(setup_id, S_IRUGO | S_IWUSR, scst_setup_id_show,
-+ scst_setup_id_store);
-+
-+static ssize_t scst_max_tasklet_cmd_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int count;
-+
-+ TRACE_ENTRY();
+
-+ count = sprintf(buf, "%d\n%s\n", scst_max_tasklet_cmd,
-+ (scst_max_tasklet_cmd == SCST_DEF_MAX_TASKLET_CMD)
-+ ? "" : SCST_SYSFS_KEY_MARK);
-+
-+ TRACE_EXIT();
-+ return count;
++out_free:
++ kfree(attr);
++ goto out;
+}
+
-+static ssize_t scst_max_tasklet_cmd_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
++void scst_acn_sysfs_del(struct scst_acn *acn)
+{
-+ int res;
-+ unsigned long val;
++ struct scst_acg *acg = acn->acg;
+
+ TRACE_ENTRY();
+
-+ res = strict_strtoul(buf, 0, &val);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtoul() for %s failed: %d ", buf, res);
-+ goto out;
++ if (acn->acn_attr != NULL) {
++ sysfs_remove_file(acg->initiators_kobj,
++ &acn->acn_attr->attr);
++ kfree(acn->acn_attr->attr.name);
++ kfree(acn->acn_attr);
+ }
+
-+ scst_max_tasklet_cmd = val;
-+ PRINT_INFO("Changed scst_max_tasklet_cmd to %d", scst_max_tasklet_cmd);
-+
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_max_tasklet_cmd_attr =
-+ __ATTR(max_tasklet_cmd, S_IRUGO | S_IWUSR, scst_max_tasklet_cmd_show,
-+ scst_max_tasklet_cmd_store);
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+static struct kobj_attribute scst_trace_level_attr =
-+ __ATTR(trace_level, S_IRUGO | S_IWUSR, scst_main_trace_level_show,
-+ scst_main_trace_level_store);
-+#endif
-+
-+static struct kobj_attribute scst_version_attr =
-+ __ATTR(version, S_IRUGO, scst_version_show, NULL);
-+
-+static struct kobj_attribute scst_last_sysfs_mgmt_res_attr =
-+ __ATTR(last_sysfs_mgmt_res, S_IRUGO,
-+ scst_last_sysfs_mgmt_res_show, NULL);
-+
-+static struct attribute *scst_sysfs_root_default_attrs[] = {
-+ &scst_threads_attr.attr,
-+ &scst_setup_id_attr.attr,
-+ &scst_max_tasklet_cmd_attr.attr,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ &scst_trace_level_attr.attr,
-+#endif
-+ &scst_version_attr.attr,
-+ &scst_last_sysfs_mgmt_res_attr.attr,
-+ NULL,
-+};
-+
-+static void scst_sysfs_root_release(struct kobject *kobj)
-+{
-+ complete_all(&scst_sysfs_root_release_completion);
++ TRACE_EXIT();
++ return;
+}
+
-+static struct kobj_type scst_sysfs_root_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_sysfs_root_release,
-+ .default_attrs = scst_sysfs_root_default_attrs,
-+};
-+
+/**
+ ** Dev handlers
+ **/
@@ -22761,7 +29999,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ TRACE_ENTRY();
+
+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
-+ complete_all(&devt->devt_kobj_release_compl);
++ if (devt->devt_kobj_release_compl)
++ complete_all(devt->devt_kobj_release_compl);
+
+ TRACE_EXIT();
+ return;
@@ -22791,10 +30030,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
+
-+ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
-+ res = -EINTR;
++ res = mutex_lock_interruptible(&scst_log_mutex);
++ if (res != 0)
+ goto out;
-+ }
+
+ res = scst_write_trace(buf, count, devt->trace_flags,
+ devt->default_trace_flags, devt->name, devt->trace_tbl);
@@ -22821,7 +30059,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
+
+ pos = sprintf(buf, "%d - %s\n", devt->type,
-+ (unsigned)devt->type > ARRAY_SIZE(scst_dev_handler_types) ?
++ (unsigned)devt->type >= ARRAY_SIZE(scst_dev_handler_types) ?
+ "unknown" : scst_dev_handler_types[devt->type]);
+
+ return pos;
@@ -22844,15 +30082,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+static ssize_t scst_devt_mgmt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
-+ char *help = "Usage: echo \"add_device device_name [parameters]\" "
-+ ">mgmt\n"
-+ " echo \"del_device device_name\" >mgmt\n"
-+ "%s%s"
-+ "%s"
-+ "\n"
-+ "where parameters are one or more "
-+ "param_name=value pairs separated by ';'\n\n"
-+ "%s%s%s%s%s%s%s%s\n";
++ static const char help[] =
++ "Usage: echo \"add_device device_name [parameters]\" >mgmt\n"
++ " echo \"del_device device_name\" >mgmt\n"
++ "%s%s"
++ "%s"
++ "\n"
++ "where parameters are one or more "
++ "param_name=value pairs separated by ';'\n\n"
++ "%s%s%s%s%s%s%s%s\n";
+ struct scst_dev_type *devt;
+
+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
@@ -22962,13 +30200,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
+
-+ buffer = kzalloc(count+1, GFP_KERNEL);
++ buffer = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
+ if (buffer == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
-+ memcpy(buffer, buf, count);
-+ buffer[count] = '\0';
+
+ res = scst_alloc_sysfs_work(sysfs_work_fn, false, &work);
+ if (res != 0)
@@ -23004,8 +30240,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+static ssize_t scst_devt_pass_through_mgmt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
-+ char *help = "Usage: echo \"add_device H:C:I:L\" >mgmt\n"
-+ " echo \"del_device H:C:I:L\" >mgmt\n";
++ static const char help[] =
++ "Usage: echo \"add_device H:C:I:L\" >mgmt\n"
++ " echo \"del_device H:C:I:L\" >mgmt\n";
+ return sprintf(buf, "%s", help);
+}
+
@@ -23057,10 +30294,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+
+ TRACE_DBG("Dev %ld:%ld:%ld:%ld", host, channel, id, lun);
+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
+ goto out;
-+ }
+
+ /* Check if devt not be already freed while we were coming here */
+ if (scst_check_devt_ptr(devt, &scst_dev_type_list) != 0)
@@ -23149,12 +30385,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+{
+ int res;
+ struct kobject *parent;
-+ const struct attribute **pattr;
+
+ TRACE_ENTRY();
+
-+ init_completion(&devt->devt_kobj_release_compl);
-+
+ if (devt->parent != NULL)
+ parent = &devt->parent->devt_kobj;
+ else
@@ -23180,17 +30413,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+ goto out_err;
+ }
+
-+ pattr = devt->devt_attrs;
-+ if (pattr != NULL) {
-+ while (*pattr != NULL) {
-+ res = sysfs_create_file(&devt->devt_kobj, *pattr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add devt attr %s for dev "
-+ "handler %s", (*pattr)->name,
-+ devt->name);
-+ goto out_err;
-+ }
-+ pattr++;
++ if (devt->devt_attrs) {
++ res = sysfs_create_files(&devt->devt_kobj, devt->devt_attrs);
++ if (res != 0) {
++ PRINT_ERROR("Can't add attributes for dev handler %s",
++ devt->name);
++ goto out_err;
+ }
+ }
+
@@ -23218,18 +30446,21 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+void scst_devt_sysfs_del(struct scst_dev_type *devt)
+{
+ int rc;
++ DECLARE_COMPLETION_ONSTACK(c);
+
+ TRACE_ENTRY();
+
++ devt->devt_kobj_release_compl = &c;
++
+ kobject_del(&devt->devt_kobj);
+ kobject_put(&devt->devt_kobj);
+
-+ rc = wait_for_completion_timeout(&devt->devt_kobj_release_compl, HZ);
++ rc = wait_for_completion_timeout(devt->devt_kobj_release_compl, HZ);
+ if (rc == 0) {
+ PRINT_INFO("Waiting for releasing of sysfs entry "
+ "for dev handler template %s (%d refs)...", devt->name,
+ atomic_read(&devt->devt_kobj.kref.refcount));
-+ wait_for_completion(&devt->devt_kobj_release_compl);
++ wait_for_completion(devt->devt_kobj_release_compl);
+ PRINT_INFO("Done waiting for releasing sysfs entry "
+ "for dev handler template %s", devt->name);
+ }
@@ -23239,6994 +30470,1366 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_sysfs.c linux-2.6.36/drivers/scst
+}
+
+/**
-+ ** Sysfs user info
++ ** SCST sysfs device_groups/<dg>/devices/<dev> implementation.
+ **/
+
-+static DEFINE_MUTEX(scst_sysfs_user_info_mutex);
-+
-+/* All protected by scst_sysfs_user_info_mutex */
-+static LIST_HEAD(scst_sysfs_user_info_list);
-+static uint32_t scst_sysfs_info_cur_cookie;
-+
-+/* scst_sysfs_user_info_mutex supposed to be held */
-+static struct scst_sysfs_user_info *scst_sysfs_user_find_info(uint32_t cookie)
++int scst_dg_dev_sysfs_add(struct scst_dev_group *dg, struct scst_dg_dev *dgdev)
+{
-+ struct scst_sysfs_user_info *info, *res = NULL;
++ int res;
+
+ TRACE_ENTRY();
-+
-+ list_for_each_entry(info, &scst_sysfs_user_info_list,
-+ info_list_entry) {
-+ if (info->info_cookie == cookie) {
-+ res = info;
-+ break;
-+ }
-+ }
-+
-+ TRACE_EXIT_HRES(res);
++ res = sysfs_create_link(dg->dev_kobj, &dgdev->dev->dev_kobj,
++ dgdev->dev->virt_name);
++ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+/**
-+ * scst_sysfs_user_get_info() - get user_info
-+ *
-+ * Finds the user_info based on cookie and mark it as received the reply by
-+ * setting for it flag info_being_executed.
-+ *
-+ * Returns found entry or NULL.
-+ */
-+struct scst_sysfs_user_info *scst_sysfs_user_get_info(uint32_t cookie)
++void scst_dg_dev_sysfs_del(struct scst_dev_group *dg, struct scst_dg_dev *dgdev)
+{
-+ struct scst_sysfs_user_info *res = NULL;
-+
+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_sysfs_user_info_mutex);
-+
-+ res = scst_sysfs_user_find_info(cookie);
-+ if (res != NULL) {
-+ if (!res->info_being_executed)
-+ res->info_being_executed = 1;
-+ }
-+
-+ mutex_unlock(&scst_sysfs_user_info_mutex);
-+
-+ TRACE_EXIT_HRES(res);
-+ return res;
++ sysfs_remove_link(dg->dev_kobj, dgdev->dev->virt_name);
++ TRACE_EXIT();
+}
-+EXPORT_SYMBOL_GPL(scst_sysfs_user_get_info);
+
+/**
-+ ** Helper functionality to help target drivers and dev handlers support
-+ ** sending events to user space and wait for their completion in a safe
-+ ** manner. See samples how to use it in iscsi-scst or scst_user.
++ ** SCST sysfs device_groups/<dg>/devices directory implementation.
+ **/
+
-+/**
-+ * scst_sysfs_user_add_info() - create and add user_info in the global list
-+ *
-+ * Creates an info structure and adds it in the info_list.
-+ * Returns 0 and out_info on success, error code otherwise.
-+ */
-+int scst_sysfs_user_add_info(struct scst_sysfs_user_info **out_info)
-+{
-+ int res = 0;
-+ struct scst_sysfs_user_info *info;
-+
-+ TRACE_ENTRY();
-+
-+ info = kzalloc(sizeof(*info), GFP_KERNEL);
-+ if (info == NULL) {
-+ PRINT_ERROR("Unable to allocate sysfs user info (size %zd)",
-+ sizeof(*info));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ mutex_lock(&scst_sysfs_user_info_mutex);
-+
-+ while ((info->info_cookie == 0) ||
-+ (scst_sysfs_user_find_info(info->info_cookie) != NULL))
-+ info->info_cookie = scst_sysfs_info_cur_cookie++;
-+
-+ init_completion(&info->info_completion);
-+
-+ list_add_tail(&info->info_list_entry, &scst_sysfs_user_info_list);
-+ info->info_in_list = 1;
-+
-+ *out_info = info;
-+
-+ mutex_unlock(&scst_sysfs_user_info_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_sysfs_user_add_info);
-+
-+/**
-+ * scst_sysfs_user_del_info - delete and frees user_info
-+ */
-+void scst_sysfs_user_del_info(struct scst_sysfs_user_info *info)
++static ssize_t scst_dg_devs_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_sysfs_user_info_mutex);
-+
-+ if (info->info_in_list)
-+ list_del(&info->info_list_entry);
-+
-+ mutex_unlock(&scst_sysfs_user_info_mutex);
-+
-+ kfree(info);
++ static const char help[] =
++ "Usage: echo \"add device\" >mgmt\n"
++ " echo \"del device\" >mgmt\n";
+
-+ TRACE_EXIT();
-+ return;
++ return scnprintf(buf, PAGE_SIZE, help);
+}
-+EXPORT_SYMBOL_GPL(scst_sysfs_user_del_info);
+
-+/*
-+ * Returns true if the reply received and being processed by another part of
-+ * the kernel, false otherwise. Also removes the user_info from the list to
-+ * fix for the user space that it missed the timeout.
-+ */
-+static bool scst_sysfs_user_info_executing(struct scst_sysfs_user_info *info)
++static int scst_dg_devs_mgmt_store_work_fn(struct scst_sysfs_work_item *w)
+{
-+ bool res;
++ struct scst_dev_group *dg;
++ char *cmd, *p, *pp, *dev_name;
++ int res;
+
+ TRACE_ENTRY();
+
-+ mutex_lock(&scst_sysfs_user_info_mutex);
-+
-+ res = info->info_being_executed;
-+
-+ if (info->info_in_list) {
-+ list_del(&info->info_list_entry);
-+ info->info_in_list = 0;
-+ }
-+
-+ mutex_unlock(&scst_sysfs_user_info_mutex);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_wait_info_completion() - wait an user space event's completion
-+ *
-+ * Waits for the info request been completed by user space at most timeout
-+ * jiffies. If the reply received before timeout and being processed by
-+ * another part of the kernel, i.e. scst_sysfs_user_info_executing()
-+ * returned true, waits for it to complete indefinitely.
-+ *
-+ * Returns status of the request completion.
-+ */
-+int scst_wait_info_completion(struct scst_sysfs_user_info *info,
-+ unsigned long timeout)
-+{
-+ int res, rc;
-+
-+ TRACE_ENTRY();
++ cmd = w->buf;
++ dg = scst_lookup_dg_by_kobj(w->kobj);
++ WARN_ON(!dg);
+
-+ TRACE_DBG("Waiting for info %p completion", info);
++ p = strchr(cmd, '\n');
++ if (p)
++ *p = '\0';
+
-+ while (1) {
-+ rc = wait_for_completion_interruptible_timeout(
-+ &info->info_completion, timeout);
-+ if (rc > 0) {
-+ TRACE_DBG("Waiting for info %p finished with %d",
-+ info, rc);
-+ break;
-+ } else if (rc == 0) {
-+ if (!scst_sysfs_user_info_executing(info)) {
-+ PRINT_ERROR("Timeout waiting for user "
-+ "space event %p", info);
-+ res = -EBUSY;
-+ goto out;
-+ } else {
-+ /* Req is being executed in the kernel */
-+ TRACE_DBG("Keep waiting for info %p completion",
-+ info);
-+ wait_for_completion(&info->info_completion);
-+ break;
-+ }
-+ } else if (rc != -ERESTARTSYS) {
-+ res = rc;
-+ PRINT_ERROR("wait_for_completion() failed: %d",
-+ res);
-+ goto out;
-+ } else {
-+ TRACE_DBG("Waiting for info %p finished with %d, "
-+ "retrying", info, rc);
-+ }
++ res = -EINVAL;
++ pp = cmd;
++ p = scst_get_next_lexem(&pp);
++ if (strcasecmp(p, "add") == 0) {
++ dev_name = scst_get_next_lexem(&pp);
++ if (!*dev_name)
++ goto out;
++ res = scst_dg_dev_add(dg, dev_name);
++ } else if (strcasecmp(p, "del") == 0) {
++ dev_name = scst_get_next_lexem(&pp);
++ if (!*dev_name)
++ goto out;
++ res = scst_dg_dev_remove_by_name(dg, dev_name);
+ }
-+
-+ TRACE_DBG("info %p, status %d", info, info->info_status);
-+ res = info->info_status;
-+
+out:
++ kobject_put(w->kobj);
+ TRACE_EXIT_RES(res);
+ return res;
+}
-+EXPORT_SYMBOL_GPL(scst_wait_info_completion);
+
-+int __init scst_sysfs_init(void)
++static ssize_t scst_dg_devs_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
+{
-+ int res = 0;
++ char *cmd;
++ struct scst_sysfs_work_item *work;
++ int res;
+
+ TRACE_ENTRY();
+
-+ sysfs_work_thread = kthread_run(sysfs_work_thread_fn,
-+ NULL, "scst_uid");
-+ if (IS_ERR(sysfs_work_thread)) {
-+ res = PTR_ERR(sysfs_work_thread);
-+ PRINT_ERROR("kthread_create() for user interface thread "
-+ "failed: %d", res);
-+ sysfs_work_thread = NULL;
++ res = -ENOMEM;
++ cmd = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
++ if (!cmd)
+ goto out;
-+ }
-+
-+ res = kobject_init_and_add(&scst_sysfs_root_kobj,
-+ &scst_sysfs_root_ktype, kernel_kobj, "%s", "scst_tgt");
-+ if (res != 0)
-+ goto sysfs_root_add_error;
-+
-+ scst_targets_kobj = kobject_create_and_add("targets",
-+ &scst_sysfs_root_kobj);
-+ if (scst_targets_kobj == NULL)
-+ goto targets_kobj_error;
-+
-+ scst_devices_kobj = kobject_create_and_add("devices",
-+ &scst_sysfs_root_kobj);
-+ if (scst_devices_kobj == NULL)
-+ goto devices_kobj_error;
-+
-+ scst_sgv_kobj = kzalloc(sizeof(*scst_sgv_kobj), GFP_KERNEL);
-+ if (scst_sgv_kobj == NULL)
-+ goto sgv_kobj_error;
+
-+ res = kobject_init_and_add(scst_sgv_kobj, &sgv_ktype,
-+ &scst_sysfs_root_kobj, "%s", "sgv");
-+ if (res != 0)
-+ goto sgv_kobj_add_error;
++ res = scst_alloc_sysfs_work(scst_dg_devs_mgmt_store_work_fn, false,
++ &work);
++ if (res)
++ goto out;
+
-+ scst_handlers_kobj = kobject_create_and_add("handlers",
-+ &scst_sysfs_root_kobj);
-+ if (scst_handlers_kobj == NULL)
-+ goto handlers_kobj_error;
++ work->buf = cmd;
++ work->kobj = kobj;
++ kobject_get(kobj);
++ res = scst_sysfs_queue_wait_work(work);
+
+out:
++ if (res == 0)
++ res = count;
+ TRACE_EXIT_RES(res);
+ return res;
-+
-+handlers_kobj_error:
-+ kobject_del(scst_sgv_kobj);
-+
-+sgv_kobj_add_error:
-+ kobject_put(scst_sgv_kobj);
-+
-+sgv_kobj_error:
-+ kobject_del(scst_devices_kobj);
-+ kobject_put(scst_devices_kobj);
-+
-+devices_kobj_error:
-+ kobject_del(scst_targets_kobj);
-+ kobject_put(scst_targets_kobj);
-+
-+targets_kobj_error:
-+ kobject_del(&scst_sysfs_root_kobj);
-+
-+sysfs_root_add_error:
-+ kobject_put(&scst_sysfs_root_kobj);
-+
-+ kthread_stop(sysfs_work_thread);
-+
-+ if (res == 0)
-+ res = -EINVAL;
-+
-+ goto out;
-+}
-+
-+void scst_sysfs_cleanup(void)
-+{
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("%s", "Exiting SCST sysfs hierarchy...");
-+
-+ kobject_del(scst_sgv_kobj);
-+ kobject_put(scst_sgv_kobj);
-+
-+ kobject_del(scst_devices_kobj);
-+ kobject_put(scst_devices_kobj);
-+
-+ kobject_del(scst_targets_kobj);
-+ kobject_put(scst_targets_kobj);
-+
-+ kobject_del(scst_handlers_kobj);
-+ kobject_put(scst_handlers_kobj);
-+
-+ kobject_del(&scst_sysfs_root_kobj);
-+ kobject_put(&scst_sysfs_root_kobj);
-+
-+ wait_for_completion(&scst_sysfs_root_release_completion);
-+ /*
-+ * There is a race, when in the release() schedule happens just after
-+ * calling complete(), so if we exit and unload scst module immediately,
-+ * there will be oops there. So let's give it a chance to quit
-+ * gracefully. Unfortunately, current kobjects implementation
-+ * doesn't allow better ways to handle it.
-+ */
-+ msleep(3000);
-+
-+ if (sysfs_work_thread)
-+ kthread_stop(sysfs_work_thread);
-+
-+ PRINT_INFO("%s", "Exiting SCST sysfs hierarchy done");
-+
-+ TRACE_EXIT();
-+ return;
+}
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_targ.c linux-2.6.36/drivers/scst/scst_targ.c
---- orig/linux-2.6.36/drivers/scst/scst_targ.c
-+++ linux-2.6.36/drivers/scst/scst_targ.c
-@@ -0,0 +1,6654 @@
-+/*
-+ * scst_targ.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/smp_lock.h>
-+#include <linux/unistd.h>
-+#include <linux/string.h>
-+#include <linux/kthread.h>
-+#include <linux/delay.h>
-+#include <linux/ktime.h>
-+
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+#include "scst_pres.h"
+
-+#if 0 /* Temporary left for future performance investigations */
-+/* Deleting it don't forget to delete write_cmd_count */
-+#define CONFIG_SCST_ORDERED_READS
-+#endif
-+
-+#if 0 /* Let's disable it for now to see if users will complain about it */
-+/* Deleting it don't forget to delete write_cmd_count */
-+#define CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
-+#endif
-+
-+static void scst_cmd_set_sn(struct scst_cmd *cmd);
-+static int __scst_init_cmd(struct scst_cmd *cmd);
-+static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
-+static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
-+ uint64_t tag, bool to_abort);
-+static void scst_process_redirect_cmd(struct scst_cmd *cmd,
-+ enum scst_exec_context context, int check_retries);
++static struct kobj_attribute scst_dg_devs_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_dg_devs_mgmt_show,
++ scst_dg_devs_mgmt_store);
+
-+/**
-+ * scst_post_parse() - do post parse actions
-+ *
-+ * This function must be called by dev handler after its parse() callback
-+ * returned SCST_CMD_STATE_STOP before calling scst_process_active_cmd().
-+ */
-+void scst_post_parse(struct scst_cmd *cmd)
-+{
-+ scst_set_parse_time(cmd);
-+}
-+EXPORT_SYMBOL_GPL(scst_post_parse);
++static const struct attribute *scst_dg_devs_attrs[] = {
++ &scst_dg_devs_mgmt.attr,
++ NULL,
++};
+
+/**
-+ * scst_post_alloc_data_buf() - do post alloc_data_buf actions
-+ *
-+ * This function must be called by dev handler after its alloc_data_buf()
-+ * callback returned SCST_CMD_STATE_STOP before calling
-+ * scst_process_active_cmd().
-+ */
-+void scst_post_alloc_data_buf(struct scst_cmd *cmd)
-+{
-+ scst_set_alloc_buf_time(cmd);
-+}
-+EXPORT_SYMBOL_GPL(scst_post_alloc_data_buf);
-+
-+static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
-+{
-+ struct scst_tasklet *t = &scst_tasklets[smp_processor_id()];
-+ unsigned long flags;
-+
-+ if (atomic_read(&scst_cmd_count) <= scst_max_tasklet_cmd) {
-+ spin_lock_irqsave(&t->tasklet_lock, flags);
-+ TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
-+ smp_processor_id());
-+ list_add_tail(&cmd->cmd_list_entry, &t->tasklet_cmd_list);
-+ spin_unlock_irqrestore(&t->tasklet_lock, flags);
-+
-+ tasklet_schedule(&t->tasklet);
-+ } else {
-+ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
-+ TRACE_DBG("Too many tasklet commands (%d), adding cmd %p to "
-+ "active cmd list", atomic_read(&scst_cmd_count), cmd);
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
-+ }
-+ return;
-+}
++ ** SCST sysfs device_groups/<dg>/target_groups/<tg>/<tgt> implementation.
++ **/
+
-+/**
-+ * scst_rx_cmd() - create new command
-+ * @sess: SCST session
-+ * @lun: LUN for the command
-+ * @lun_len: length of the LUN in bytes
-+ * @cdb: CDB of the command
-+ * @cdb_len: length of the CDB in bytes
-+ * @atomic: true, if current context is atomic
-+ *
-+ * Description:
-+ * Creates new SCST command. Returns new command on success or
-+ * NULL otherwise.
-+ *
-+ * Must not be called in parallel with scst_unregister_session() for the
-+ * same session.
-+ */
-+struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
-+ const uint8_t *lun, int lun_len, const uint8_t *cdb,
-+ unsigned int cdb_len, int atomic)
++static ssize_t scst_tg_tgt_rel_tgt_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
+{
-+ struct scst_cmd *cmd;
-+
-+ TRACE_ENTRY();
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
-+ PRINT_CRIT_ERROR("%s",
-+ "New cmd while shutting down the session");
-+ BUG();
-+ }
-+#endif
-+
-+ cmd = scst_alloc_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
-+ if (cmd == NULL)
-+ goto out;
-+
-+ cmd->sess = sess;
-+ cmd->tgt = sess->tgt;
-+ cmd->tgtt = sess->tgt->tgtt;
-+
-+ cmd->lun = scst_unpack_lun(lun, lun_len);
-+ if (unlikely(cmd->lun == NO_SUCH_LUN)) {
-+ PRINT_ERROR("Wrong LUN %d, finishing cmd", -1);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_lun_not_supported));
-+ }
-+
-+ /*
-+ * For cdb_len 0 defer the error reporting until scst_cmd_init_done(),
-+ * scst_set_cmd_error() supports nested calls.
-+ */
-+ if (unlikely(cdb_len > SCST_MAX_CDB_SIZE)) {
-+ PRINT_ERROR("Too big CDB len %d, finishing cmd", cdb_len);
-+ cdb_len = SCST_MAX_CDB_SIZE;
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_message));
-+ }
++ struct scst_tg_tgt *tg_tgt;
+
-+ memcpy(cmd->cdb, cdb, cdb_len);
-+ cmd->cdb_len = cdb_len;
-+
-+ TRACE_DBG("cmd %p, sess %p", cmd, sess);
-+ scst_sess_get(sess);
-+
-+out:
-+ TRACE_EXIT();
-+ return cmd;
++ tg_tgt = container_of(kobj, struct scst_tg_tgt, kobj);
++ return scnprintf(buf, PAGE_SIZE, "%u\n" SCST_SYSFS_KEY_MARK "\n",
++ tg_tgt->rel_tgt_id);
+}
-+EXPORT_SYMBOL(scst_rx_cmd);
+
-+/*
-+ * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
-+ * this command should be stopped.
-+ */
-+static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
++static ssize_t scst_tg_tgt_rel_tgt_id_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
+{
-+ int rc, res = 0;
++ struct scst_tg_tgt *tg_tgt;
++ unsigned long rel_tgt_id;
++ char ch[8];
++ int res;
+
+ TRACE_ENTRY();
-+
-+ /* See the comment in scst_do_job_init() */
-+ if (unlikely(!list_empty(&scst_init_cmd_list))) {
-+ TRACE_MGMT_DBG("%s", "init cmd list busy");
-+ goto out_redirect;
-+ }
-+ /*
-+ * Memory barrier isn't necessary here, because CPU appears to
-+ * be self-consistent and we don't care about the race, described
-+ * in comment in scst_do_job_init().
-+ */
-+
-+ rc = __scst_init_cmd(cmd);
-+ if (unlikely(rc > 0))
-+ goto out_redirect;
-+ else if (unlikely(rc != 0)) {
-+ res = 1;
++ tg_tgt = container_of(kobj, struct scst_tg_tgt, kobj);
++ snprintf(ch, sizeof(ch), "%.*s", min_t(int, count, sizeof(ch)-1), buf);
++ res = strict_strtoul(ch, 0, &rel_tgt_id);
++ if (res)
+ goto out;
-+ }
-+
-+ EXTRACHECKS_BUG_ON(*context == SCST_CONTEXT_SAME);
-+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ scst_get_cdb_info(cmd);
-+ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
++ res = -EINVAL;
++ if (rel_tgt_id == 0 || rel_tgt_id > 0xffff)
+ goto out;
-+#endif
-+
-+ /* Small context optimization */
-+ if ((*context == SCST_CONTEXT_TASKLET) ||
-+ (*context == SCST_CONTEXT_DIRECT_ATOMIC)) {
-+ /*
-+ * If any data_direction not set, it's SCST_DATA_UNKNOWN,
-+ * which is 0, so we can safely | them
-+ */
-+ BUILD_BUG_ON(SCST_DATA_UNKNOWN != 0);
-+ if ((cmd->data_direction | cmd->expected_data_direction) & SCST_DATA_WRITE) {
-+ if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
-+ &cmd->tgt_dev->tgt_dev_flags))
-+ *context = SCST_CONTEXT_THREAD;
-+ } else
-+ *context = SCST_CONTEXT_THREAD;
-+ }
-+
++ tg_tgt->rel_tgt_id = rel_tgt_id;
++ res = count;
+out:
+ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_redirect:
-+ if (cmd->preprocessing_only) {
-+ /*
-+ * Poor man solution for single threaded targets, where
-+ * blocking receiver at least sometimes means blocking all.
-+ * For instance, iSCSI target won't be able to receive
-+ * Data-Out PDUs.
-+ */
-+ BUG_ON(*context != SCST_CONTEXT_DIRECT);
-+ scst_set_busy(cmd);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = 1;
-+ /* Keep initiator away from too many BUSY commands */
-+ msleep(50);
-+ } else {
-+ unsigned long flags;
-+ spin_lock_irqsave(&scst_init_lock, flags);
-+ TRACE_MGMT_DBG("Adding cmd %p to init cmd list (scst_cmd_count "
-+ "%d)", cmd, atomic_read(&scst_cmd_count));
-+ list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
-+ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
-+ scst_init_poll_cnt++;
-+ spin_unlock_irqrestore(&scst_init_lock, flags);
-+ wake_up(&scst_init_cmd_list_waitQ);
-+ res = -1;
-+ }
-+ goto out;
+}
+
-+/**
-+ * scst_cmd_init_done() - the command's initialization done
-+ * @cmd: SCST command
-+ * @pref_context: preferred command execution context
-+ *
-+ * Description:
-+ * Notifies SCST that the driver finished its part of the command
-+ * initialization, and the command is ready for execution.
-+ * The second argument sets preferred command execition context.
-+ * See SCST_CONTEXT_* constants for details.
-+ *
-+ * !!IMPORTANT!!
-+ *
-+ * If cmd->set_sn_on_restart_cmd not set, this function, as well as
-+ * scst_cmd_init_stage1_done() and scst_restart_cmd(), must not be
-+ * called simultaneously for the same session (more precisely,
-+ * for the same session/LUN, i.e. tgt_dev), i.e. they must be
-+ * somehow externally serialized. This is needed to have lock free fast
-+ * path in scst_cmd_set_sn(). For majority of targets those functions are
-+ * naturally serialized by the single source of commands. Only iSCSI
-+ * immediate commands with multiple connections per session seems to be an
-+ * exception. For it, some mutex/lock shall be used for the serialization.
-+ */
-+void scst_cmd_init_done(struct scst_cmd *cmd,
-+ enum scst_exec_context pref_context)
-+{
-+ unsigned long flags;
-+ struct scst_session *sess = cmd->sess;
-+ int rc;
-+
-+ TRACE_ENTRY();
-+
-+ scst_set_start_time(cmd);
-+
-+ TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
-+ TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
-+ "(cmd %p)", (long long unsigned int)cmd->tag,
-+ (long long unsigned int)cmd->lun, cmd->cdb_len,
-+ cmd->queue_type, cmd);
-+ PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Recieving CDB",
-+ cmd->cdb, cmd->cdb_len);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (unlikely((in_irq() || irqs_disabled())) &&
-+ ((pref_context == SCST_CONTEXT_DIRECT) ||
-+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
-+ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
-+ "SCST_CONTEXT_THREAD instead", pref_context,
-+ cmd->tgtt->name);
-+ dump_stack();
-+ pref_context = SCST_CONTEXT_THREAD;
-+ }
-+#endif
-+
-+ atomic_inc(&sess->sess_cmd_count);
-+
-+ spin_lock_irqsave(&sess->sess_list_lock, flags);
-+
-+ if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
-+ /*
-+ * We must always keep commands in the sess list from the
-+ * very beginning, because otherwise they can be missed during
-+ * TM processing. This check is needed because there might be
-+ * old, i.e. deferred, commands and new, i.e. just coming, ones.
-+ */
-+ if (cmd->sess_cmd_list_entry.next == NULL)
-+ list_add_tail(&cmd->sess_cmd_list_entry,
-+ &sess->sess_cmd_list);
-+ switch (sess->init_phase) {
-+ case SCST_SESS_IPH_SUCCESS:
-+ break;
-+ case SCST_SESS_IPH_INITING:
-+ TRACE_DBG("Adding cmd %p to init deferred cmd list",
-+ cmd);
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &sess->init_deferred_cmd_list);
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
-+ goto out;
-+ case SCST_SESS_IPH_FAILED:
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
-+ scst_set_busy(cmd);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ goto active;
-+ default:
-+ BUG();
-+ }
-+ } else
-+ list_add_tail(&cmd->sess_cmd_list_entry,
-+ &sess->sess_cmd_list);
-+
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
-+
-+ if (unlikely(cmd->cdb_len == 0)) {
-+ PRINT_ERROR("%s", "Wrong CDB len 0, finishing cmd");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ goto active;
-+ }
-+
-+ if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
-+ PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_message));
-+ goto active;
-+ }
-+
-+ /*
-+ * Cmd must be inited here to preserve the order. In case if cmd
-+ * already preliminary completed by target driver we need to init
-+ * cmd anyway to find out in which format we should return sense.
-+ */
-+ cmd->state = SCST_CMD_STATE_INIT;
-+ rc = scst_init_cmd(cmd, &pref_context);
-+ if (unlikely(rc < 0))
-+ goto out;
-+
-+active:
-+ /* Here cmd must not be in any cmd list, no locks */
-+ switch (pref_context) {
-+ case SCST_CONTEXT_TASKLET:
-+ scst_schedule_tasklet(cmd);
-+ break;
-+
-+ default:
-+ PRINT_ERROR("Context %x is undefined, using the thread one",
-+ pref_context);
-+ /* go through */
-+ case SCST_CONTEXT_THREAD:
-+ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
-+ TRACE_DBG("Adding cmd %p to active cmd list", cmd);
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ list_add(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ else
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
-+ break;
-+
-+ case SCST_CONTEXT_DIRECT:
-+ scst_process_active_cmd(cmd, false);
-+ break;
-+
-+ case SCST_CONTEXT_DIRECT_ATOMIC:
-+ scst_process_active_cmd(cmd, true);
-+ break;
-+ }
++static struct kobj_attribute scst_tg_tgt_rel_tgt_id =
++ __ATTR(rel_tgt_id, S_IRUGO | S_IWUSR, scst_tg_tgt_rel_tgt_id_show,
++ scst_tg_tgt_rel_tgt_id_store);
+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_cmd_init_done);
++static const struct attribute *scst_tg_tgt_attrs[] = {
++ &scst_tg_tgt_rel_tgt_id.attr,
++ NULL,
++};
+
-+static int scst_pre_parse(struct scst_cmd *cmd)
++int scst_tg_tgt_sysfs_add(struct scst_target_group *tg,
++ struct scst_tg_tgt *tg_tgt)
+{
+ int res;
-+ struct scst_device *dev = cmd->dev;
-+ int rc;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * Expected transfer data supplied by the SCSI transport via the
-+ * target driver are untrusted, so we prefer to fetch them from CDB.
-+ * Additionally, not all transports support supplying the expected
-+ * transfer data.
-+ */
-+
-+ rc = scst_get_cdb_info(cmd);
-+ if (unlikely(rc != 0)) {
-+ if (rc > 0) {
-+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
-+ goto out_err;
-+ }
-+
-+ EXTRACHECKS_BUG_ON(cmd->op_flags & SCST_INFO_VALID);
-+
-+ TRACE(TRACE_MINOR, "Unknown opcode 0x%02x for %s. "
-+ "Should you update scst_scsi_op_table?",
-+ cmd->cdb[0], dev->handler->name);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Failed CDB", cmd->cdb,
-+ cmd->cdb_len);
-+ } else
-+ EXTRACHECKS_BUG_ON(!(cmd->op_flags & SCST_INFO_VALID));
-+
-+#ifdef CONFIG_SCST_STRICT_SERIALIZING
-+ cmd->inc_expected_sn_on_done = 1;
-+#else
-+ cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
-+ (!dev->has_own_order_mgmt &&
-+ (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
-+ cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
-+#endif
-+
-+ TRACE_DBG("op_name <%s> (cmd %p), direction=%d "
-+ "(expected %d, set %s), bufflen=%d, out_bufflen=%d (expected "
-+ "len %d, out expected len %d), flags=%d", cmd->op_name, cmd,
-+ cmd->data_direction, cmd->expected_data_direction,
-+ scst_cmd_is_expected_set(cmd) ? "yes" : "no",
-+ cmd->bufflen, cmd->out_bufflen, cmd->expected_transfer_len,
-+ cmd->expected_out_transfer_len, cmd->op_flags);
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = -1;
-+ goto out;
-+}
-+
-+#ifndef CONFIG_SCST_USE_EXPECTED_VALUES
-+static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
-+{
-+ bool res = false;
-+
-+ /* VERIFY commands with BYTCHK unset shouldn't fail here */
-+ if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
-+ (cmd->cdb[1] & BYTCHK) == 0) {
-+ res = true;
-+ goto out;
-+ }
-+
-+ switch (cmd->cdb[0]) {
-+ case TEST_UNIT_READY:
-+ /* Crazy VMware people sometimes do TUR with READ direction */
-+ if ((cmd->expected_data_direction == SCST_DATA_READ) ||
-+ (cmd->expected_data_direction == SCST_DATA_NONE))
-+ res = true;
-+ break;
-+ }
-+
-+out:
-+ return res;
-+}
-+#endif
-+
-+static int scst_parse_cmd(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_RES_CONT_SAME;
-+ int state;
-+ struct scst_device *dev = cmd->dev;
-+ int orig_bufflen = cmd->bufflen;
+
+ TRACE_ENTRY();
-+
-+ if (likely(!scst_is_cmd_fully_local(cmd))) {
-+ if (unlikely(!dev->handler->parse_atomic &&
-+ scst_cmd_atomic(cmd))) {
-+ /*
-+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
-+ * optimization.
-+ */
-+ TRACE_MGMT_DBG("Dev handler %s parse() needs thread "
-+ "context, rescheduling", dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ TRACE_DBG("Calling dev handler %s parse(%p)",
-+ dev->handler->name, cmd);
-+ TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
-+ cmd->cdb, cmd->cdb_len);
-+ scst_set_cur_start(cmd);
-+ state = dev->handler->parse(cmd);
-+ /* Caution: cmd can be already dead here */
-+ TRACE_DBG("Dev handler %s parse() returned %d",
-+ dev->handler->name, state);
-+
-+ switch (state) {
-+ case SCST_CMD_STATE_NEED_THREAD_CTX:
-+ scst_set_parse_time(cmd);
-+ TRACE_DBG("Dev handler %s parse() requested thread "
-+ "context, rescheduling", dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+
-+ case SCST_CMD_STATE_STOP:
-+ TRACE_DBG("Dev handler %s parse() requested stop "
-+ "processing", dev->handler->name);
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ goto out;
-+ }
-+
-+ scst_set_parse_time(cmd);
-+
-+ if (state == SCST_CMD_STATE_DEFAULT)
-+ state = SCST_CMD_STATE_PREPARE_SPACE;
-+ } else
-+ state = SCST_CMD_STATE_PREPARE_SPACE;
-+
-+ if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
-+ goto set_res;
-+
-+ if (unlikely(!(cmd->op_flags & SCST_INFO_VALID))) {
-+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
-+ if (scst_cmd_is_expected_set(cmd)) {
-+ TRACE(TRACE_MINOR, "Using initiator supplied values: "
-+ "direction %d, transfer_len %d/%d",
-+ cmd->expected_data_direction,
-+ cmd->expected_transfer_len,
-+ cmd->expected_out_transfer_len);
-+ cmd->data_direction = cmd->expected_data_direction;
-+ cmd->bufflen = cmd->expected_transfer_len;
-+ cmd->out_bufflen = cmd->expected_out_transfer_len;
-+ } else {
-+ PRINT_ERROR("Unknown opcode 0x%02x for %s and "
-+ "target %s not supplied expected values",
-+ cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+ }
-+#else
-+ /*
-+ * Let's ignore reporting T10/04-262r7 16-byte and 12-byte ATA
-+ * pass-thru commands to not pollute logs (udev(?) checks them
-+ * for some reason). If somebody has their description, please,
-+ * update scst_scsi_op_table.
-+ */
-+ if ((cmd->cdb[0] != 0x85) && (cmd->cdb[0] != 0xa1))
-+ PRINT_ERROR("Refusing unknown opcode %x", cmd->cdb[0]);
-+ else
-+ TRACE(TRACE_MINOR, "Refusing unknown opcode %x",
-+ cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+#endif
-+ }
-+
-+ if (unlikely(cmd->cdb_len == 0)) {
-+ PRINT_ERROR("Unable to get CDB length for "
-+ "opcode 0x%02x. Returning INVALID "
-+ "OPCODE", cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+ }
-+
-+ EXTRACHECKS_BUG_ON(cmd->cdb_len == 0);
-+
-+ TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
-+ "(expected %d, set %s), bufflen=%d, out_bufflen=%d, (expected "
-+ "len %d, out expected len %d), flags=%x", cmd->op_name, cmd,
-+ cmd->data_direction, cmd->expected_data_direction,
-+ scst_cmd_is_expected_set(cmd) ? "yes" : "no",
-+ cmd->bufflen, cmd->out_bufflen, cmd->expected_transfer_len,
-+ cmd->expected_out_transfer_len, cmd->op_flags);
-+
-+ if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
-+ if (scst_cmd_is_expected_set(cmd)) {
-+ /*
-+ * Command data length can't be easily
-+ * determined from the CDB. ToDo, all such
-+ * commands processing should be fixed. Until
-+ * it's done, get the length from the supplied
-+ * expected value, but limit it to some
-+ * reasonable value (15MB).
-+ */
-+ cmd->bufflen = min(cmd->expected_transfer_len,
-+ 15*1024*1024);
-+ if (cmd->data_direction == SCST_DATA_BIDI)
-+ cmd->out_bufflen = min(cmd->expected_out_transfer_len,
-+ 15*1024*1024);
-+ cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
-+ } else {
-+ PRINT_ERROR("Unknown data transfer length for opcode "
-+ "0x%x (handler %s, target %s)", cmd->cdb[0],
-+ dev->handler->name, cmd->tgtt->name);
-+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_message));
-+ goto out_done;
-+ }
-+ }
-+
-+ if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
-+ PRINT_ERROR("NACA bit in control byte CDB is not supported "
-+ "(opcode 0x%02x)", cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_done;
-+ }
-+
-+ if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
-+ PRINT_ERROR("Linked commands are not supported "
-+ "(opcode 0x%02x)", cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_done;
-+ }
-+
-+ if (cmd->dh_data_buf_alloced &&
-+ unlikely((orig_bufflen > cmd->bufflen))) {
-+ PRINT_ERROR("Dev handler supplied data buffer (size %d), "
-+ "is less, than required (size %d)", cmd->bufflen,
-+ orig_bufflen);
-+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
-+ goto out_hw_error;
-+ }
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if ((cmd->bufflen != 0) &&
-+ ((cmd->data_direction == SCST_DATA_NONE) ||
-+ ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
-+ PRINT_ERROR("Dev handler %s parse() returned "
-+ "invalid cmd data_direction %d, bufflen %d, state %d "
-+ "or sg %p (opcode 0x%x)", dev->handler->name,
-+ cmd->data_direction, cmd->bufflen, state, cmd->sg,
-+ cmd->cdb[0]);
-+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
-+ goto out_hw_error;
-+ }
-+#endif
-+
-+ if (scst_cmd_is_expected_set(cmd)) {
-+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
-+ if (unlikely((cmd->data_direction != cmd->expected_data_direction) ||
-+ (cmd->bufflen != cmd->expected_transfer_len) ||
-+ (cmd->out_bufflen != cmd->expected_out_transfer_len))) {
-+ TRACE(TRACE_MINOR, "Expected values don't match "
-+ "decoded ones: data_direction %d, "
-+ "expected_data_direction %d, "
-+ "bufflen %d, expected_transfer_len %d, "
-+ "out_bufflen %d, expected_out_transfer_len %d",
-+ cmd->data_direction,
-+ cmd->expected_data_direction,
-+ cmd->bufflen, cmd->expected_transfer_len,
-+ cmd->out_bufflen, cmd->expected_out_transfer_len);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
-+ cmd->cdb, cmd->cdb_len);
-+ cmd->data_direction = cmd->expected_data_direction;
-+ cmd->bufflen = cmd->expected_transfer_len;
-+ cmd->out_bufflen = cmd->expected_out_transfer_len;
-+ cmd->resid_possible = 1;
-+ }
-+#else
-+ if (unlikely(cmd->data_direction !=
-+ cmd->expected_data_direction)) {
-+ if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
-+ (cmd->bufflen != 0)) &&
-+ !scst_is_allowed_to_mismatch_cmd(cmd)) {
-+ PRINT_ERROR("Expected data direction %d for "
-+ "opcode 0x%02x (handler %s, target %s) "
-+ "doesn't match decoded value %d",
-+ cmd->expected_data_direction,
-+ cmd->cdb[0], dev->handler->name,
-+ cmd->tgtt->name, cmd->data_direction);
-+ PRINT_BUFFER("Failed CDB", cmd->cdb,
-+ cmd->cdb_len);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_message));
-+ goto out_done;
-+ }
-+ }
-+ if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
-+ TRACE(TRACE_MINOR, "Warning: expected "
-+ "transfer length %d for opcode 0x%02x "
-+ "(handler %s, target %s) doesn't match "
-+ "decoded value %d",
-+ cmd->expected_transfer_len, cmd->cdb[0],
-+ dev->handler->name, cmd->tgtt->name,
-+ cmd->bufflen);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
-+ cmd->cdb, cmd->cdb_len);
-+ if ((cmd->data_direction & SCST_DATA_READ) ||
-+ (cmd->data_direction & SCST_DATA_WRITE))
-+ cmd->resid_possible = 1;
-+ }
-+ if (unlikely(cmd->out_bufflen != cmd->expected_out_transfer_len)) {
-+ TRACE(TRACE_MINOR, "Warning: expected bidirectional OUT "
-+ "transfer length %d for opcode 0x%02x "
-+ "(handler %s, target %s) doesn't match "
-+ "decoded value %d",
-+ cmd->expected_out_transfer_len, cmd->cdb[0],
-+ dev->handler->name, cmd->tgtt->name,
-+ cmd->out_bufflen);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
-+ cmd->cdb, cmd->cdb_len);
-+ cmd->resid_possible = 1;
-+ }
-+#endif
-+ }
-+
-+ if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
-+ PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
-+ "target %s", cmd->cdb[0], dev->handler->name,
-+ cmd->tgtt->name);
-+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
-+ goto out_hw_error;
-+ }
-+
-+set_res:
-+ if (cmd->data_len == -1)
-+ cmd->data_len = cmd->bufflen;
-+
-+ if (cmd->bufflen == 0) {
-+ /*
-+ * According to SPC bufflen 0 for data transfer commands isn't
-+ * an error, so we need to fix the transfer direction.
-+ */
-+ cmd->data_direction = SCST_DATA_NONE;
-+ }
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ switch (state) {
-+ case SCST_CMD_STATE_PREPARE_SPACE:
-+ case SCST_CMD_STATE_PARSE:
-+ case SCST_CMD_STATE_RDY_TO_XFER:
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ case SCST_CMD_STATE_PRE_DEV_DONE:
-+ case SCST_CMD_STATE_DEV_DONE:
-+ case SCST_CMD_STATE_PRE_XMIT_RESP:
-+ case SCST_CMD_STATE_XMIT_RESP:
-+ case SCST_CMD_STATE_FINISHED:
-+ case SCST_CMD_STATE_FINISHED_INTERNAL:
-+#endif
-+ cmd->state = state;
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ break;
-+
-+ default:
-+ if (state >= 0) {
-+ PRINT_ERROR("Dev handler %s parse() returned "
-+ "invalid cmd state %d (opcode %d)",
-+ dev->handler->name, state, cmd->cdb[0]);
-+ } else {
-+ PRINT_ERROR("Dev handler %s parse() returned "
-+ "error %d (opcode %d)", dev->handler->name,
-+ state, cmd->cdb[0]);
-+ }
-+ goto out_hw_error;
-+ }
-+#endif
-+
-+ if (cmd->resp_data_len == -1) {
-+ if (cmd->data_direction & SCST_DATA_READ)
-+ cmd->resp_data_len = cmd->bufflen;
-+ else
-+ cmd->resp_data_len = 0;
-+ }
-+
-+ /* We already completed (with an error) */
-+ if (unlikely(cmd->completed))
-+ goto out_done;
-+
-+#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ /*
-+ * We can't allow atomic command on the exec stages. It shouldn't
-+ * be because of the SCST_TGT_DEV_AFTER_* optimization, but during
-+ * parsing data_direction can change, so we need to recheck.
-+ */
-+ if (unlikely(scst_cmd_atomic(cmd) &&
-+ !(cmd->data_direction & SCST_DATA_WRITE))) {
-+ TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_MINOR, "Atomic context and "
-+ "non-WRITE data direction, rescheduling (cmd %p)", cmd);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
++ BUG_ON(!tg);
++ BUG_ON(!tg_tgt);
++ BUG_ON(!tg_tgt->name);
++ if (tg_tgt->tgt)
++ res = sysfs_create_link(&tg->kobj, &tg_tgt->tgt->tgt_kobj,
++ tg_tgt->name);
++ else {
++ res = kobject_add(&tg_tgt->kobj, &tg->kobj, "%s", tg_tgt->name);
++ if (res)
++ goto err;
++ res = sysfs_create_files(&tg_tgt->kobj, scst_tg_tgt_attrs);
++ if (res)
++ goto err;
+ }
-+#endif
-+
+out:
-+ TRACE_EXIT_HRES(res);
++ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_hw_error:
-+ /* dev_done() will be called as part of the regular cmd's finish */
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+
-+out_done:
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
++err:
++ scst_tg_tgt_sysfs_del(tg, tg_tgt);
+ goto out;
+}
+
-+static void scst_set_write_len(struct scst_cmd *cmd)
++void scst_tg_tgt_sysfs_del(struct scst_target_group *tg,
++ struct scst_tg_tgt *tg_tgt)
+{
+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(!(cmd->data_direction & SCST_DATA_WRITE));
-+
-+ if (cmd->data_direction & SCST_DATA_READ) {
-+ cmd->write_len = cmd->out_bufflen;
-+ cmd->write_sg = &cmd->out_sg;
-+ cmd->write_sg_cnt = &cmd->out_sg_cnt;
-+ } else {
-+ cmd->write_len = cmd->bufflen;
-+ /* write_sg and write_sg_cnt already initialized correctly */
-+ }
-+
-+ TRACE_MEM("cmd %p, write_len %d, write_sg %p, write_sg_cnt %d, "
-+ "resid_possible %d", cmd, cmd->write_len, *cmd->write_sg,
-+ *cmd->write_sg_cnt, cmd->resid_possible);
-+
-+ if (unlikely(cmd->resid_possible)) {
-+ if (cmd->data_direction & SCST_DATA_READ) {
-+ cmd->write_len = min(cmd->out_bufflen,
-+ cmd->expected_out_transfer_len);
-+ if (cmd->write_len == cmd->out_bufflen)
-+ goto out;
-+ } else {
-+ cmd->write_len = min(cmd->bufflen,
-+ cmd->expected_transfer_len);
-+ if (cmd->write_len == cmd->bufflen)
-+ goto out;
-+ }
-+ scst_limit_sg_write_len(cmd);
++ if (tg_tgt->tgt)
++ sysfs_remove_link(&tg->kobj, tg_tgt->name);
++ else {
++ sysfs_remove_files(&tg_tgt->kobj, scst_tg_tgt_attrs);
++ kobject_del(&tg_tgt->kobj);
+ }
-+
-+out:
+ TRACE_EXIT();
-+ return;
+}
+
-+static int scst_prepare_space(struct scst_cmd *cmd)
-+{
-+ int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (cmd->data_direction == SCST_DATA_NONE)
-+ goto done;
-+
-+ if (likely(!scst_is_cmd_fully_local(cmd)) &&
-+ (dev->handler->alloc_data_buf != NULL)) {
-+ int state;
-+
-+ if (unlikely(!dev->handler->alloc_data_buf_atomic &&
-+ scst_cmd_atomic(cmd))) {
-+ /*
-+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
-+ * optimization.
-+ */
-+ TRACE_MGMT_DBG("Dev handler %s alloc_data_buf() needs "
-+ "thread context, rescheduling",
-+ dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ TRACE_DBG("Calling dev handler %s alloc_data_buf(%p)",
-+ dev->handler->name, cmd);
-+ scst_set_cur_start(cmd);
-+ state = dev->handler->alloc_data_buf(cmd);
-+ /* Caution: cmd can be already dead here */
-+ TRACE_DBG("Dev handler %s alloc_data_buf() returned %d",
-+ dev->handler->name, state);
-+
-+ switch (state) {
-+ case SCST_CMD_STATE_NEED_THREAD_CTX:
-+ scst_set_alloc_buf_time(cmd);
-+ TRACE_DBG("Dev handler %s alloc_data_buf() requested "
-+ "thread context, rescheduling",
-+ dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+
-+ case SCST_CMD_STATE_STOP:
-+ TRACE_DBG("Dev handler %s alloc_data_buf() requested "
-+ "stop processing", dev->handler->name);
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ goto out;
-+ }
-+
-+ scst_set_alloc_buf_time(cmd);
-+
-+ if (unlikely(state != SCST_CMD_STATE_DEFAULT)) {
-+ cmd->state = state;
-+ goto out;
-+ }
-+ }
-+
-+ if (cmd->tgt_need_alloc_data_buf) {
-+ int orig_bufflen = cmd->bufflen;
-+
-+ TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
-+ cmd);
-+
-+ scst_set_cur_start(cmd);
-+ r = cmd->tgtt->alloc_data_buf(cmd);
-+ scst_set_alloc_buf_time(cmd);
-+
-+ if (r > 0)
-+ goto alloc;
-+ else if (r == 0) {
-+ if (unlikely(cmd->bufflen == 0)) {
-+ /* See comment in scst_alloc_space() */
-+ if (cmd->sg == NULL)
-+ goto alloc;
-+ }
-+
-+ cmd->tgt_data_buf_alloced = 1;
-+
-+ if (unlikely(orig_bufflen < cmd->bufflen)) {
-+ PRINT_ERROR("Target driver allocated data "
-+ "buffer (size %d), is less, than "
-+ "required (size %d)", orig_bufflen,
-+ cmd->bufflen);
-+ goto out_error;
-+ }
-+ TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
-+ } else
-+ goto check;
-+ }
-+
-+alloc:
-+ if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
-+ r = scst_alloc_space(cmd);
-+ } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
-+ TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
-+ r = 0;
-+ } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
-+ TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
-+ cmd->sg = cmd->tgt_sg;
-+ cmd->sg_cnt = cmd->tgt_sg_cnt;
-+ cmd->out_sg = cmd->tgt_out_sg;
-+ cmd->out_sg_cnt = cmd->tgt_out_sg_cnt;
-+ r = 0;
-+ } else {
-+ TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
-+ "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
-+ cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
-+ r = 0;
-+ }
-+
-+check:
-+ if (r != 0) {
-+ if (scst_cmd_atomic(cmd)) {
-+ TRACE_MEM("%s", "Atomic memory allocation failed, "
-+ "rescheduling to the thread");
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ } else
-+ goto out_no_space;
-+ }
-+
-+done:
-+ if (cmd->preprocessing_only) {
-+ cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE;
-+ if (cmd->data_direction & SCST_DATA_WRITE)
-+ scst_set_write_len(cmd);
-+ } else if (cmd->data_direction & SCST_DATA_WRITE) {
-+ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
-+ scst_set_write_len(cmd);
-+ } else
-+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
++/**
++ ** SCST sysfs device_groups/<dg>/target_groups/<tg> directory implementation.
++ **/
+
-+out_no_space:
-+ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
-+ "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
-+ scst_set_busy(cmd);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
++static ssize_t scst_tg_group_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
++{
++ struct scst_target_group *tg;
+
-+out_error:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
++ tg = container_of(kobj, struct scst_target_group, kobj);
++ return scnprintf(buf, PAGE_SIZE, "%u\n" SCST_SYSFS_KEY_MARK "\n",
++ tg->group_id);
+}
+
-+static int scst_preprocessing_done(struct scst_cmd *cmd)
++static ssize_t scst_tg_group_id_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
+{
++ struct scst_target_group *tg;
++ unsigned long group_id;
++ char ch[8];
+ int res;
+
+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(!cmd->preprocessing_only);
-+
-+ cmd->preprocessing_only = 0;
-+
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE_CALLED;
-+
-+ TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
-+ scst_set_cur_start(cmd);
-+ cmd->tgtt->preprocessing_done(cmd);
-+ TRACE_DBG("%s", "preprocessing_done() returned");
-+
-+ TRACE_EXIT_HRES(res);
++ tg = container_of(kobj, struct scst_target_group, kobj);
++ snprintf(ch, sizeof(ch), "%.*s", min_t(int, count, sizeof(ch)-1), buf);
++ res = strict_strtoul(ch, 0, &group_id);
++ if (res)
++ goto out;
++ res = -EINVAL;
++ if (group_id == 0 || group_id > 0xffff)
++ goto out;
++ tg->group_id = group_id;
++ res = count;
++out:
++ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+/**
-+ * scst_restart_cmd() - restart execution of the command
-+ * @cmd: SCST commands
-+ * @status: completion status
-+ * @pref_context: preferred command execition context
-+ *
-+ * Description:
-+ * Notifies SCST that the driver finished its part of the command's
-+ * preprocessing and it is ready for further processing.
-+ *
-+ * The second argument sets completion status
-+ * (see SCST_PREPROCESS_STATUS_* constants for details)
-+ *
-+ * See also comment for scst_cmd_init_done() for the serialization
-+ * requirements.
-+ */
-+void scst_restart_cmd(struct scst_cmd *cmd, int status,
-+ enum scst_exec_context pref_context)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_set_restart_waiting_time(cmd);
-+
-+ TRACE_DBG("Preferred context: %d", pref_context);
-+ TRACE_DBG("tag=%llu, status=%#x",
-+ (long long unsigned int)scst_cmd_get_tag(cmd),
-+ status);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if ((in_irq() || irqs_disabled()) &&
-+ ((pref_context == SCST_CONTEXT_DIRECT) ||
-+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
-+ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
-+ "SCST_CONTEXT_THREAD instead", pref_context,
-+ cmd->tgtt->name);
-+ dump_stack();
-+ pref_context = SCST_CONTEXT_THREAD;
-+ }
-+#endif
-+
-+ switch (status) {
-+ case SCST_PREPROCESS_STATUS_SUCCESS:
-+ if (cmd->data_direction & SCST_DATA_WRITE)
-+ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
-+ else
-+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
-+ if (cmd->set_sn_on_restart_cmd)
-+ scst_cmd_set_sn(cmd);
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
-+ break;
-+#endif
-+ /* Small context optimization */
-+ if ((pref_context == SCST_CONTEXT_TASKLET) ||
-+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
-+ ((pref_context == SCST_CONTEXT_SAME) &&
-+ scst_cmd_atomic(cmd)))
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ case SCST_PREPROCESS_STATUS_ERROR_FATAL:
-+ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
-+ /* go through */
-+ case SCST_PREPROCESS_STATUS_ERROR:
-+ if (cmd->sense != NULL)
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ default:
-+ PRINT_ERROR("%s() received unknown status %x", __func__,
-+ status);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+ }
++static struct kobj_attribute scst_tg_group_id =
++ __ATTR(group_id, S_IRUGO | S_IWUSR, scst_tg_group_id_show,
++ scst_tg_group_id_store);
+
-+ scst_process_redirect_cmd(cmd, pref_context, 1);
++static ssize_t scst_tg_preferred_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
++{
++ struct scst_target_group *tg;
+
-+ TRACE_EXIT();
-+ return;
++ tg = container_of(kobj, struct scst_target_group, kobj);
++ return scnprintf(buf, PAGE_SIZE, "%u\n%s",
++ tg->preferred, SCST_SYSFS_KEY_MARK "\n");
+}
-+EXPORT_SYMBOL(scst_restart_cmd);
+
-+static int scst_rdy_to_xfer(struct scst_cmd *cmd)
++static ssize_t scst_tg_preferred_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
+{
-+ int res, rc;
-+ struct scst_tgt_template *tgtt = cmd->tgtt;
++ struct scst_target_group *tg;
++ unsigned long preferred;
++ char ch[8];
++ int res;
+
+ TRACE_ENTRY();
-+
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
-+ goto out_dev_done;
-+ }
-+
-+ if ((tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
-+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
-+#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ /* We can't allow atomic command on the exec stages */
-+ if (scst_cmd_atomic(cmd)) {
-+ TRACE_DBG("NULL rdy_to_xfer() and atomic context, "
-+ "rescheduling (cmd %p)", cmd);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ } else
-+#endif
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
++ tg = container_of(kobj, struct scst_target_group, kobj);
++ snprintf(ch, sizeof(ch), "%.*s", min_t(int, count, sizeof(ch)-1), buf);
++ res = strict_strtoul(ch, 0, &preferred);
++ if (res)
+ goto out;
-+ }
-+
-+ if (unlikely(!tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
-+ /*
-+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
-+ * optimization.
-+ */
-+ TRACE_MGMT_DBG("Target driver %s rdy_to_xfer() needs thread "
-+ "context, rescheduling", tgtt->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
++ res = -EINVAL;
++ if (preferred != 0 && preferred != 1)
+ goto out;
-+ }
-+
-+ while (1) {
-+ int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
-+
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ cmd->state = SCST_CMD_STATE_DATA_WAIT;
-+
-+ if (tgtt->on_hw_pending_cmd_timeout != NULL) {
-+ struct scst_session *sess = cmd->sess;
-+ cmd->hw_pending_start = jiffies;
-+ cmd->cmd_hw_pending = 1;
-+ if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
-+ TRACE_DBG("Sched HW pending work for sess %p "
-+ "(max time %d)", sess,
-+ tgtt->max_hw_pending_time);
-+ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
-+ &sess->sess_aflags);
-+ schedule_delayed_work(&sess->hw_pending_work,
-+ tgtt->max_hw_pending_time * HZ);
-+ }
-+ }
-+
-+ scst_set_cur_start(cmd);
-+
-+ TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
-+#ifdef CONFIG_SCST_DEBUG_RETRY
-+ if (((scst_random() % 100) == 75))
-+ rc = SCST_TGT_RES_QUEUE_FULL;
-+ else
-+#endif
-+ rc = tgtt->rdy_to_xfer(cmd);
-+ TRACE_DBG("rdy_to_xfer() returned %d", rc);
-+
-+ if (likely(rc == SCST_TGT_RES_SUCCESS))
-+ goto out;
-+
-+ scst_set_rdy_to_xfer_time(cmd);
-+
-+ cmd->cmd_hw_pending = 0;
-+
-+ /* Restore the previous state */
-+ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
-+
-+ switch (rc) {
-+ case SCST_TGT_RES_QUEUE_FULL:
-+ if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
-+ break;
-+ else
-+ continue;
-+
-+ case SCST_TGT_RES_NEED_THREAD_CTX:
-+ TRACE_DBG("Target driver %s "
-+ "rdy_to_xfer() requested thread "
-+ "context, rescheduling", tgtt->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ break;
-+
-+ default:
-+ goto out_error_rc;
-+ }
-+ break;
-+ }
-+
++ tg->preferred = preferred;
++ res = count;
+out:
-+ TRACE_EXIT_HRES(res);
++ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_error_rc:
-+ if (rc == SCST_TGT_RES_FATAL_ERROR) {
-+ PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
-+ "fatal error", tgtt->name);
-+ } else {
-+ PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
-+ "value %d", tgtt->name, rc);
-+ }
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+
-+out_dev_done:
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
+}
+
-+/* No locks, but might be in IRQ */
-+static void scst_process_redirect_cmd(struct scst_cmd *cmd,
-+ enum scst_exec_context context, int check_retries)
-+{
-+ struct scst_tgt *tgt = cmd->tgt;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Context: %x", context);
-+
-+ if (check_retries)
-+ scst_check_retries(tgt);
-+
-+ if (context == SCST_CONTEXT_SAME)
-+ context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
-+ SCST_CONTEXT_DIRECT;
-+
-+ switch (context) {
-+ case SCST_CONTEXT_DIRECT_ATOMIC:
-+ scst_process_active_cmd(cmd, true);
-+ break;
-+
-+ case SCST_CONTEXT_DIRECT:
-+ scst_process_active_cmd(cmd, false);
-+ break;
-+
-+ case SCST_CONTEXT_TASKLET:
-+ scst_schedule_tasklet(cmd);
-+ break;
-+
-+ default:
-+ PRINT_ERROR("Context %x is unknown, using the thread one",
-+ context);
-+ /* go through */
-+ case SCST_CONTEXT_THREAD:
-+ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
-+ TRACE_DBG("Adding cmd %p to active cmd list", cmd);
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ list_add(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ else
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
-+ break;
-+ }
++static struct kobj_attribute scst_tg_preferred =
++ __ATTR(preferred, S_IRUGO | S_IWUSR, scst_tg_preferred_show,
++ scst_tg_preferred_store);
+
-+ TRACE_EXIT();
-+ return;
-+}
++static struct { enum scst_tg_state s; const char *n; } scst_tg_state_names[] = {
++ { SCST_TG_STATE_OPTIMIZED, "active" },
++ { SCST_TG_STATE_NONOPTIMIZED, "nonoptimized" },
++ { SCST_TG_STATE_STANDBY, "standby" },
++ { SCST_TG_STATE_UNAVAILABLE, "unavailable" },
++ { SCST_TG_STATE_OFFLINE, "offline" },
++ { SCST_TG_STATE_TRANSITIONING, "transitioning" },
++};
+
-+/**
-+ * scst_rx_data() - the command's data received
-+ * @cmd: SCST commands
-+ * @status: data receiving completion status
-+ * @pref_context: preferred command execution context
-+ *
-+ * Description:
-+ * Notifies SCST that the driver received all the necessary data
-+ * and the command is ready for further processing.
-+ *
-+ * The second argument sets data receiving completion status
-+ * (see SCST_RX_STATUS_* constants for details)
-+ */
-+void scst_rx_data(struct scst_cmd *cmd, int status,
-+ enum scst_exec_context pref_context)
++static ssize_t scst_tg_state_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
+{
-+ TRACE_ENTRY();
-+
-+ scst_set_rdy_to_xfer_time(cmd);
-+
-+ TRACE_DBG("Preferred context: %d", pref_context);
-+ TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
-+
-+ cmd->cmd_hw_pending = 0;
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if ((in_irq() || irqs_disabled()) &&
-+ ((pref_context == SCST_CONTEXT_DIRECT) ||
-+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
-+ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
-+ "SCST_CONTEXT_THREAD instead", pref_context,
-+ cmd->tgtt->name);
-+ dump_stack();
-+ pref_context = SCST_CONTEXT_THREAD;
-+ }
-+#endif
-+
-+ switch (status) {
-+ case SCST_RX_STATUS_SUCCESS:
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ if (trace_flag & TRACE_RCV_BOT) {
-+ int i;
-+ struct scatterlist *sg;
-+ if (cmd->out_sg != NULL)
-+ sg = cmd->out_sg;
-+ else if (cmd->tgt_out_sg != NULL)
-+ sg = cmd->tgt_out_sg;
-+ else if (cmd->tgt_sg != NULL)
-+ sg = cmd->tgt_sg;
-+ else
-+ sg = cmd->sg;
-+ if (sg != NULL) {
-+ TRACE_RECV_BOT("RX data for cmd %p "
-+ "(sg_cnt %d, sg %p, sg[0].page %p)",
-+ cmd, cmd->tgt_sg_cnt, sg,
-+ (void *)sg_page(&sg[0]));
-+ for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
-+ PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
-+ sg_virt(&sg[i]), sg[i].length);
-+ }
-+ }
-+ }
-+#endif
-+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
++ struct scst_target_group *tg;
++ int i;
+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
++ tg = container_of(kobj, struct scst_target_group, kobj);
++ for (i = ARRAY_SIZE(scst_tg_state_names) - 1; i >= 0; i--)
++ if (scst_tg_state_names[i].s == tg->state)
+ break;
-+#endif
-+
-+ /* Small context optimization */
-+ if ((pref_context == SCST_CONTEXT_TASKLET) ||
-+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
-+ ((pref_context == SCST_CONTEXT_SAME) &&
-+ scst_cmd_atomic(cmd)))
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ case SCST_RX_STATUS_ERROR_SENSE_SET:
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ case SCST_RX_STATUS_ERROR_FATAL:
-+ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
-+ /* go through */
-+ case SCST_RX_STATUS_ERROR:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ default:
-+ PRINT_ERROR("scst_rx_data() received unknown status %x",
-+ status);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+ }
-+
-+ scst_process_redirect_cmd(cmd, pref_context, 1);
+
-+ TRACE_EXIT();
-+ return;
++ return scnprintf(buf, PAGE_SIZE, "%s\n" SCST_SYSFS_KEY_MARK "\n",
++ i >= 0 ? scst_tg_state_names[i].n : "???");
+}
-+EXPORT_SYMBOL(scst_rx_data);
+
-+static int scst_tgt_pre_exec(struct scst_cmd *cmd)
++static int scst_tg_state_store_work_fn(struct scst_sysfs_work_item *w)
+{
-+ int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
++ struct scst_target_group *tg;
++ char *cmd, *p;
++ int i, res;
+
+ TRACE_ENTRY();
+
-+ if (unlikely(cmd->resid_possible)) {
-+ if (cmd->data_direction & SCST_DATA_WRITE) {
-+ bool do_zero = false;
-+ if (cmd->data_direction & SCST_DATA_READ) {
-+ if (cmd->write_len != cmd->out_bufflen)
-+ do_zero = true;
-+ } else {
-+ if (cmd->write_len != cmd->bufflen)
-+ do_zero = true;
-+ }
-+ if (do_zero) {
-+ scst_check_restore_sg_buff(cmd);
-+ scst_zero_write_rest(cmd);
-+ }
-+ }
-+ }
-+
-+ cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
-+
-+ if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
-+ goto out;
++ cmd = w->buf;
++ tg = container_of(w->kobj, struct scst_target_group, kobj);
+
-+ TRACE_DBG("Calling pre_exec(%p)", cmd);
-+ scst_set_cur_start(cmd);
-+ rc = cmd->tgtt->pre_exec(cmd);
-+ scst_set_pre_exec_time(cmd);
-+ TRACE_DBG("pre_exec() returned %d", rc);
++ p = strchr(cmd, '\n');
++ if (p)
++ *p = '\0';
+
-+ if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
-+ switch (rc) {
-+ case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
-+ scst_set_cmd_abnormal_done_state(cmd);
++ for (i = ARRAY_SIZE(scst_tg_state_names) - 1; i >= 0; i--)
++ if (strcmp(scst_tg_state_names[i].n, cmd) == 0)
+ break;
-+ case SCST_PREPROCESS_STATUS_ERROR_FATAL:
-+ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
-+ /* go through */
-+ case SCST_PREPROCESS_STATUS_ERROR:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ break;
-+ default:
-+ BUG();
-+ break;
-+ }
-+ }
+
++ res = -EINVAL;
++ if (i < 0)
++ goto out;
++ res = scst_tg_set_state(tg, scst_tg_state_names[i].s);
+out:
++ kobject_put(w->kobj);
+ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
-+ const uint8_t *rq_sense, int rq_sense_len, int resid)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_set_exec_time(cmd);
-+
-+ cmd->status = result & 0xff;
-+ cmd->msg_status = msg_byte(result);
-+ cmd->host_status = host_byte(result);
-+ cmd->driver_status = driver_byte(result);
-+ if (unlikely(resid != 0)) {
-+ if ((cmd->data_direction & SCST_DATA_READ) &&
-+ (resid > 0) && (resid < cmd->resp_data_len))
-+ scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
-+ /*
-+ * We ignore write direction residue, because from the
-+ * initiator's POV we already transferred all the data.
-+ */
-+ }
-+
-+ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
-+ /* We might have double reset UA here */
-+ cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
-+ cmd->dbl_ua_orig_data_direction = cmd->data_direction;
-+
-+ scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
-+ }
-+
-+ TRACE(TRACE_SCSI, "cmd %p, result %x, cmd->status %x, resid %d, "
-+ "cmd->msg_status %x, cmd->host_status %x, "
-+ "cmd->driver_status %x", cmd, result, cmd->status, resid,
-+ cmd->msg_status, cmd->host_status, cmd->driver_status);
-+
-+ cmd->completed = 1;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* For small context optimization */
-+static inline enum scst_exec_context scst_optimize_post_exec_context(
-+ struct scst_cmd *cmd, enum scst_exec_context context)
-+{
-+ if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
-+ (context == SCST_CONTEXT_TASKLET) ||
-+ (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
-+ if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
-+ &cmd->tgt_dev->tgt_dev_flags))
-+ context = SCST_CONTEXT_THREAD;
-+ }
-+ return context;
-+}
-+
-+static void scst_cmd_done(void *data, char *sense, int result, int resid)
++static ssize_t scst_tg_state_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
+{
-+ struct scst_cmd *cmd;
++ char *cmd;
++ struct scst_sysfs_work_item *work;
++ int res;
+
+ TRACE_ENTRY();
+
-+ cmd = (struct scst_cmd *)data;
-+ if (cmd == NULL)
++ res = -ENOMEM;
++ cmd = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
++ if (!cmd)
+ goto out;
+
-+ scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
-+
-+ cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
++ res = scst_alloc_sysfs_work(scst_tg_state_store_work_fn, false,
++ &work);
++ if (res)
++ goto out;
+
-+ scst_process_redirect_cmd(cmd,
-+ scst_optimize_post_exec_context(cmd, scst_estimate_context()), 0);
++ work->buf = cmd;
++ work->kobj = kobj;
++ kobject_get(kobj);
++ res = scst_sysfs_queue_wait_work(work);
+
+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
-+ enum scst_exec_context pref_context)
-+{
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(cmd->pr_abort_counter != NULL);
-+
-+ scst_set_exec_time(cmd);
-+
-+ TRACE(TRACE_SCSI, "cmd %p, status %x, msg_status %x, host_status %x, "
-+ "driver_status %x, resp_data_len %d", cmd, cmd->status,
-+ cmd->msg_status, cmd->host_status, cmd->driver_status,
-+ cmd->resp_data_len);
-+
-+ if (next_state == SCST_CMD_STATE_DEFAULT)
-+ next_state = SCST_CMD_STATE_PRE_DEV_DONE;
-+
-+#if defined(CONFIG_SCST_DEBUG)
-+ if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
-+ if ((trace_flag & TRACE_RCV_TOP) && (cmd->sg != NULL)) {
-+ int i;
-+ struct scatterlist *sg = cmd->sg;
-+ TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
-+ "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
-+ for (i = 0; i < cmd->sg_cnt; ++i) {
-+ TRACE_BUFF_FLAG(TRACE_RCV_TOP,
-+ "Exec'd sg", sg_virt(&sg[i]),
-+ sg[i].length);
-+ }
-+ }
-+ }
-+#endif
-+
-+ cmd->state = next_state;
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
-+ (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
-+ (next_state != SCST_CMD_STATE_FINISHED) &&
-+ (next_state != SCST_CMD_STATE_FINISHED_INTERNAL)) {
-+ PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
-+ __func__, next_state, cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ }
-+#endif
-+ pref_context = scst_optimize_post_exec_context(cmd, pref_context);
-+ scst_process_redirect_cmd(cmd, pref_context, 0);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_report_luns_local(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_COMPLETED, rc;
-+ int dev_cnt = 0;
-+ int buffer_size;
-+ int i;
-+ struct scst_tgt_dev *tgt_dev = NULL;
-+ uint8_t *buffer;
-+ int offs, overflow = 0;
-+
-+ TRACE_ENTRY();
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+
-+ if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
-+ PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
-+ "LUNS command", cmd->cdb[2]);
-+ goto out_err;
-+ }
-+
-+ buffer_size = scst_get_buf_first(cmd, &buffer);
-+ if (unlikely(buffer_size == 0))
-+ goto out_compl;
-+ else if (unlikely(buffer_size < 0))
-+ goto out_hw_err;
-+
-+ if (buffer_size < 16)
-+ goto out_put_err;
-+
-+ memset(buffer, 0, buffer_size);
-+ offs = 8;
-+
-+ /*
-+ * cmd won't allow to suspend activities, so we can access
-+ * sess->sess_tgt_dev_list_hash without any additional protection.
-+ */
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &cmd->sess->sess_tgt_dev_list_hash[i];
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ if (!overflow) {
-+ if (offs >= buffer_size) {
-+ scst_put_buf(cmd, buffer);
-+ buffer_size = scst_get_buf_next(cmd,
-+ &buffer);
-+ if (buffer_size > 0) {
-+ memset(buffer, 0, buffer_size);
-+ offs = 0;
-+ } else {
-+ overflow = 1;
-+ goto inc_dev_cnt;
-+ }
-+ }
-+ if ((buffer_size - offs) < 8) {
-+ PRINT_ERROR("Buffer allocated for "
-+ "REPORT LUNS command doesn't "
-+ "allow to fit 8 byte entry "
-+ "(buffer_size=%d)",
-+ buffer_size);
-+ goto out_put_hw_err;
-+ }
-+ if ((cmd->sess->acg->addr_method == SCST_LUN_ADDR_METHOD_FLAT) &&
-+ (tgt_dev->lun != 0)) {
-+ buffer[offs] = (tgt_dev->lun >> 8) & 0x3f;
-+ buffer[offs] = buffer[offs] | 0x40;
-+ buffer[offs+1] = tgt_dev->lun & 0xff;
-+ } else {
-+ buffer[offs] = (tgt_dev->lun >> 8) & 0xff;
-+ buffer[offs+1] = tgt_dev->lun & 0xff;
-+ }
-+ offs += 8;
-+ }
-+inc_dev_cnt:
-+ dev_cnt++;
-+ }
-+ }
-+ if (!overflow)
-+ scst_put_buf(cmd, buffer);
-+
-+ /* Set the response header */
-+ buffer_size = scst_get_buf_first(cmd, &buffer);
-+ if (unlikely(buffer_size == 0))
-+ goto out_compl;
-+ else if (unlikely(buffer_size < 0))
-+ goto out_hw_err;
-+
-+ dev_cnt *= 8;
-+ buffer[0] = (dev_cnt >> 24) & 0xff;
-+ buffer[1] = (dev_cnt >> 16) & 0xff;
-+ buffer[2] = (dev_cnt >> 8) & 0xff;
-+ buffer[3] = dev_cnt & 0xff;
-+
-+ scst_put_buf(cmd, buffer);
-+
-+ dev_cnt += 8;
-+ if (dev_cnt < cmd->resp_data_len)
-+ scst_set_resp_data_len(cmd, dev_cnt);
-+
-+out_compl:
-+ cmd->completed = 1;
-+
-+ /* Clear left sense_reported_luns_data_changed UA, if any. */
-+
-+ /*
-+ * cmd won't allow to suspend activities, so we can access
-+ * sess->sess_tgt_dev_list_hash without any additional protection.
-+ */
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &cmd->sess->sess_tgt_dev_list_hash[i];
-+
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ struct scst_tgt_dev_UA *ua;
-+
-+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
-+ list_for_each_entry(ua, &tgt_dev->UA_list,
-+ UA_list_entry) {
-+ if (scst_analyze_sense(ua->UA_sense_buffer,
-+ ua->UA_valid_sense_len,
-+ SCST_SENSE_ALL_VALID,
-+ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
-+ TRACE_MGMT_DBG("Freeing not needed "
-+ "REPORTED LUNS DATA CHANGED UA "
-+ "%p", ua);
-+ list_del(&ua->UA_list_entry);
-+ mempool_free(ua, scst_ua_mempool);
-+ break;
-+ }
-+ }
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+ }
-+ }
-+
-+out_done:
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+
++ if (res == 0)
++ res = count;
+ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_put_err:
-+ scst_put_buf(cmd, buffer);
-+
-+out_err:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_compl;
-+
-+out_put_hw_err:
-+ scst_put_buf(cmd, buffer);
-+
-+out_hw_err:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_compl;
+}
+
-+static int scst_request_sense_local(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_COMPLETED, rc;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ uint8_t *buffer;
-+ int buffer_size = 0, sl = 0;
-+
-+ TRACE_ENTRY();
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+
-+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
-+
-+ if (tgt_dev->tgt_dev_valid_sense_len == 0)
-+ goto out_unlock_not_completed;
-+
-+ TRACE(TRACE_SCSI, "%s: Returning stored sense", cmd->op_name);
-+
-+ buffer_size = scst_get_buf_first(cmd, &buffer);
-+ if (unlikely(buffer_size == 0))
-+ goto out_unlock_compl;
-+ else if (unlikely(buffer_size < 0))
-+ goto out_unlock_hw_err;
-+
-+ memset(buffer, 0, buffer_size);
-+
-+ if (((tgt_dev->tgt_dev_sense[0] == 0x70) ||
-+ (tgt_dev->tgt_dev_sense[0] == 0x71)) && (cmd->cdb[1] & 1)) {
-+ PRINT_WARNING("%s: Fixed format of the saved sense, but "
-+ "descriptor format requested. Convertion will "
-+ "truncated data", cmd->op_name);
-+ PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
-+ tgt_dev->tgt_dev_valid_sense_len);
-+
-+ buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
-+ sl = scst_set_sense(buffer, buffer_size, true,
-+ tgt_dev->tgt_dev_sense[2], tgt_dev->tgt_dev_sense[12],
-+ tgt_dev->tgt_dev_sense[13]);
-+ } else if (((tgt_dev->tgt_dev_sense[0] == 0x72) ||
-+ (tgt_dev->tgt_dev_sense[0] == 0x73)) && !(cmd->cdb[1] & 1)) {
-+ PRINT_WARNING("%s: Descriptor format of the "
-+ "saved sense, but fixed format requested. Convertion "
-+ "will truncated data", cmd->op_name);
-+ PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
-+ tgt_dev->tgt_dev_valid_sense_len);
-+
-+ buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
-+ sl = scst_set_sense(buffer, buffer_size, false,
-+ tgt_dev->tgt_dev_sense[1], tgt_dev->tgt_dev_sense[2],
-+ tgt_dev->tgt_dev_sense[3]);
-+ } else {
-+ if (buffer_size >= tgt_dev->tgt_dev_valid_sense_len)
-+ sl = tgt_dev->tgt_dev_valid_sense_len;
-+ else {
-+ sl = buffer_size;
-+ TRACE(TRACE_MINOR, "%s: Being returned sense truncated "
-+ "to size %d (needed %d)", cmd->op_name,
-+ buffer_size, tgt_dev->tgt_dev_valid_sense_len);
-+ }
-+ memcpy(buffer, tgt_dev->tgt_dev_sense, sl);
-+ }
-+
-+ scst_put_buf(cmd, buffer);
-+
-+ tgt_dev->tgt_dev_valid_sense_len = 0;
-+
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+
-+ scst_set_resp_data_len(cmd, sl);
-+
-+out_compl:
-+ cmd->completed = 1;
-+
-+out_done:
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unlock_hw_err:
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_compl;
++static struct kobj_attribute scst_tg_state =
++ __ATTR(state, S_IRUGO | S_IWUSR, scst_tg_state_show,
++ scst_tg_state_store);
+
-+out_unlock_not_completed:
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+ res = SCST_EXEC_NOT_COMPLETED;
-+ goto out;
++static ssize_t scst_tg_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
++{
++ static const char help[] =
++ "Usage: echo \"add target\" >mgmt\n"
++ " echo \"del target\" >mgmt\n";
+
-+out_unlock_compl:
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+ goto out_compl;
++ return scnprintf(buf, PAGE_SIZE, help);
+}
+
-+static int scst_reserve_local(struct scst_cmd *cmd)
++static int scst_tg_mgmt_store_work_fn(struct scst_sysfs_work_item *w)
+{
-+ int res = SCST_EXEC_NOT_COMPLETED, rc;
-+ struct scst_device *dev;
-+ struct scst_tgt_dev *tgt_dev_tmp;
++ struct scst_target_group *tg;
++ char *cmd, *p, *pp, *target_name;
++ int res;
+
+ TRACE_ENTRY();
+
-+ if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
-+ PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
-+ "(lun=%lld)", (long long unsigned int)cmd->lun);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_done;
-+ }
-+
-+ dev = cmd->dev;
-+
-+ /*
-+ * There's no need to block this device, even for
-+ * SCST_CONTR_MODE_ONE_TASK_SET, or anyhow else protect reservations
-+ * changes, because:
-+ *
-+ * 1. The reservation changes are (rather) atomic, i.e., in contrast
-+ * to persistent reservations, don't have any invalid intermediate
-+ * states during being changed.
-+ *
-+ * 2. It's a duty of initiators to ensure order of regular commands
-+ * around the reservation command either by ORDERED attribute, or by
-+ * queue draining, or etc. For case of SCST_CONTR_MODE_ONE_TASK_SET
-+ * there are no target drivers which can ensure even for ORDERED
-+ * comamnds order of their delivery, so, because initiators know
-+ * it, also there's no point to do any extra protection actions.
-+ */
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ if (!list_empty(&dev->dev_registrants_list)) {
-+ if (scst_pr_crh_case(cmd))
-+ goto out_completed;
-+ else {
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+ }
-+
-+ spin_lock_bh(&dev->dev_lock);
++ cmd = w->buf;
++ tg = container_of(w->kobj, struct scst_target_group, kobj);
+
-+ if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
-+ spin_unlock_bh(&dev->dev_lock);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
++ p = strchr(cmd, '\n');
++ if (p)
++ *p = '\0';
+
-+ list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if (cmd->tgt_dev != tgt_dev_tmp)
-+ set_bit(SCST_TGT_DEV_RESERVED,
-+ &tgt_dev_tmp->tgt_dev_flags);
++ res = -EINVAL;
++ pp = cmd;
++ p = scst_get_next_lexem(&pp);
++ if (strcasecmp(p, "add") == 0) {
++ target_name = scst_get_next_lexem(&pp);
++ if (!*target_name)
++ goto out;
++ res = scst_tg_tgt_add(tg, target_name);
++ } else if (strcasecmp(p, "del") == 0) {
++ target_name = scst_get_next_lexem(&pp);
++ if (!*target_name)
++ goto out;
++ res = scst_tg_tgt_remove_by_name(tg, target_name);
+ }
-+ dev->dev_reserved = 1;
-+
-+ spin_unlock_bh(&dev->dev_lock);
-+
+out:
++ kobject_put(w->kobj);
+ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_completed:
-+ cmd->completed = 1;
-+
-+out_done:
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ res = SCST_EXEC_COMPLETED;
-+ goto out;
+}
+
-+static int scst_release_local(struct scst_cmd *cmd)
++static ssize_t scst_tg_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
+{
-+ int res = SCST_EXEC_NOT_COMPLETED, rc;
-+ struct scst_tgt_dev *tgt_dev_tmp;
-+ struct scst_device *dev;
++ char *cmd;
++ struct scst_sysfs_work_item *work;
++ int res;
+
+ TRACE_ENTRY();
+
-+ dev = cmd->dev;
-+
-+ /*
-+ * See comment in scst_reserve_local() why no dev blocking or any
-+ * other protection is needed here.
-+ */
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ if (!list_empty(&dev->dev_registrants_list)) {
-+ if (scst_pr_crh_case(cmd))
-+ goto out_completed;
-+ else {
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+ }
-+
-+ spin_lock_bh(&dev->dev_lock);
-+
-+ /*
-+ * The device could be RELEASED behind us, if RESERVING session
-+ * is closed (see scst_free_tgt_dev()), but this actually doesn't
-+ * matter, so use lock and no retest for DEV_RESERVED bits again
-+ */
-+ if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
-+ res = SCST_EXEC_COMPLETED;
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+ cmd->completed = 1;
-+ } else {
-+ list_for_each_entry(tgt_dev_tmp,
-+ &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ clear_bit(SCST_TGT_DEV_RESERVED,
-+ &tgt_dev_tmp->tgt_dev_flags);
-+ }
-+ dev->dev_reserved = 0;
-+ }
++ res = -ENOMEM;
++ cmd = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
++ if (!cmd)
++ goto out;
+
-+ spin_unlock_bh(&dev->dev_lock);
++ res = scst_alloc_sysfs_work(scst_tg_mgmt_store_work_fn, false,
++ &work);
++ if (res)
++ goto out;
+
-+ if (res == SCST_EXEC_COMPLETED)
-+ goto out_done;
++ work->buf = cmd;
++ work->kobj = kobj;
++ kobject_get(kobj);
++ res = scst_sysfs_queue_wait_work(work);
+
+out:
++ if (res == 0)
++ res = count;
+ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_completed:
-+ cmd->completed = 1;
-+
-+out_done:
-+ res = SCST_EXEC_COMPLETED;
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ goto out;
-+}
-+
-+/**
-+ * scst_check_local_events() - check if there are any local SCSI events
-+ *
-+ * Description:
-+ * Checks if the command can be executed or there are local events,
-+ * like reservatons, pending UAs, etc. Returns < 0 if command must be
-+ * aborted, > 0 if there is an event and command should be immediately
-+ * completed, or 0 otherwise.
-+ *
-+ * !! Dev handlers implementing exec() callback must call this function there
-+ * !! just before the actual command's execution!
-+ *
-+ * On call no locks, no IRQ or IRQ-disabled context allowed.
-+ */
-+static int scst_persistent_reserve_in_local(struct scst_cmd *cmd)
-+{
-+ int rc;
-+ struct scst_device *dev;
-+ struct scst_tgt_dev *tgt_dev;
-+ struct scst_session *session;
-+ int action;
-+ uint8_t *buffer;
-+ int buffer_size;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(scst_cmd_atomic(cmd));
-+
-+ dev = cmd->dev;
-+ tgt_dev = cmd->tgt_dev;
-+ session = cmd->sess;
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ if (unlikely(dev->not_pr_supporting_tgt_devs_num != 0)) {
-+ PRINT_WARNING("Persistent Reservation command %x refused for "
-+ "device %s, because the device has not supporting PR "
-+ "transports connected", cmd->cdb[0], dev->virt_name);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+ }
-+
-+ if (dev->dev_reserved) {
-+ TRACE_PR("PR command rejected, because device %s holds regular "
-+ "reservation", dev->virt_name);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+
-+ if (dev->scsi_dev != NULL) {
-+ PRINT_WARNING("PR commands for pass-through devices not "
-+ "supported (device %s)", dev->virt_name);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+ }
-+
-+ buffer_size = scst_get_full_buf(cmd, &buffer);
-+ if (unlikely(buffer_size <= 0)) {
-+ if (buffer_size < 0)
-+ scst_set_busy(cmd);
-+ goto out_done;
-+ }
-+
-+ scst_pr_write_lock(dev);
-+
-+ /* We can be aborted by another PR command while waiting for the lock */
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
-+ goto out_unlock;
-+ }
-+
-+ action = cmd->cdb[1] & 0x1f;
-+
-+ TRACE(TRACE_SCSI, "PR action %x for '%s' (LUN %llx) from '%s'", action,
-+ dev->virt_name, tgt_dev->lun, session->initiator_name);
-+
-+ switch (action) {
-+ case PR_READ_KEYS:
-+ scst_pr_read_keys(cmd, buffer, buffer_size);
-+ break;
-+ case PR_READ_RESERVATION:
-+ scst_pr_read_reservation(cmd, buffer, buffer_size);
-+ break;
-+ case PR_REPORT_CAPS:
-+ scst_pr_report_caps(cmd, buffer, buffer_size);
-+ break;
-+ case PR_READ_FULL_STATUS:
-+ scst_pr_read_full_status(cmd, buffer, buffer_size);
-+ break;
-+ default:
-+ PRINT_ERROR("Unsupported action %x", action);
-+ scst_pr_write_unlock(dev);
-+ goto out_err;
-+ }
-+
-+out_complete:
-+ cmd->completed = 1;
-+
-+out_unlock:
-+ scst_pr_write_unlock(dev);
-+
-+ scst_put_full_buf(cmd, buffer);
-+
-+out_done:
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+
-+ TRACE_EXIT_RES(SCST_EXEC_COMPLETED);
-+ return SCST_EXEC_COMPLETED;
-+
-+out_err:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_complete;
+}
+
-+/* No locks, no IRQ or IRQ-disabled context allowed */
-+static int scst_persistent_reserve_out_local(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_COMPLETED;
-+ int rc;
-+ struct scst_device *dev;
-+ struct scst_tgt_dev *tgt_dev;
-+ struct scst_session *session;
-+ int action;
-+ uint8_t *buffer;
-+ int buffer_size;
-+ bool aborted = false;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(scst_cmd_atomic(cmd));
-+
-+ dev = cmd->dev;
-+ tgt_dev = cmd->tgt_dev;
-+ session = cmd->sess;
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ if (unlikely(dev->not_pr_supporting_tgt_devs_num != 0)) {
-+ PRINT_WARNING("Persistent Reservation command %x refused for "
-+ "device %s, because the device has not supporting PR "
-+ "transports connected", cmd->cdb[0], dev->virt_name);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+ }
-+
-+ action = cmd->cdb[1] & 0x1f;
-+
-+ TRACE(TRACE_SCSI, "PR action %x for '%s' (LUN %llx) from '%s'", action,
-+ dev->virt_name, tgt_dev->lun, session->initiator_name);
-+
-+ if (dev->dev_reserved) {
-+ TRACE_PR("PR command rejected, because device %s holds regular "
-+ "reservation", dev->virt_name);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+
-+ /*
-+ * Check if tgt_dev already registered. Also by this check we make
-+ * sure that table "PERSISTENT RESERVE OUT service actions that are
-+ * allowed in the presence of various reservations" is honored.
-+ * REGISTER AND MOVE and RESERVE will be additionally checked for
-+ * conflicts later.
-+ */
-+ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
-+ (tgt_dev->registrant == NULL)) {
-+ TRACE_PR("'%s' not registered", cmd->sess->initiator_name);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+
-+ buffer_size = scst_get_full_buf(cmd, &buffer);
-+ if (unlikely(buffer_size <= 0)) {
-+ if (buffer_size < 0)
-+ scst_set_busy(cmd);
-+ goto out_done;
-+ }
-+
-+ /* Check scope */
-+ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
-+ (action != PR_CLEAR) && ((cmd->cdb[2] & 0x0f) >> 4) != SCOPE_LU) {
-+ TRACE_PR("Scope must be SCOPE_LU for action %x", action);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_put_full_buf;
-+ }
-+
-+ /* Check SPEC_I_PT (PR_REGISTER_AND_MOVE has another format) */
-+ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_MOVE) &&
-+ ((buffer[20] >> 3) & 0x01)) {
-+ TRACE_PR("SPEC_I_PT must be zero for action %x", action);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_invalid_field_in_cdb));
-+ goto out_put_full_buf;
-+ }
-+
-+ /* Check ALL_TG_PT (PR_REGISTER_AND_MOVE has another format) */
-+ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
-+ (action != PR_REGISTER_AND_MOVE) && ((buffer[20] >> 2) & 0x01)) {
-+ TRACE_PR("ALL_TG_PT must be zero for action %x", action);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_invalid_field_in_cdb));
-+ goto out_put_full_buf;
-+ }
-+
-+ scst_pr_write_lock(dev);
-+
-+ /* We can be aborted by another PR command while waiting for the lock */
-+ aborted = test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
-+ if (unlikely(aborted)) {
-+ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
-+ goto out_unlock;
-+ }
++static struct kobj_attribute scst_tg_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_tg_mgmt_show,
++ scst_tg_mgmt_store);
+
-+ switch (action) {
-+ case PR_REGISTER:
-+ scst_pr_register(cmd, buffer, buffer_size);
-+ break;
-+ case PR_RESERVE:
-+ scst_pr_reserve(cmd, buffer, buffer_size);
-+ break;
-+ case PR_RELEASE:
-+ scst_pr_release(cmd, buffer, buffer_size);
-+ break;
-+ case PR_CLEAR:
-+ scst_pr_clear(cmd, buffer, buffer_size);
-+ break;
-+ case PR_PREEMPT:
-+ scst_pr_preempt(cmd, buffer, buffer_size);
-+ break;
-+ case PR_PREEMPT_AND_ABORT:
-+ scst_pr_preempt_and_abort(cmd, buffer, buffer_size);
-+ break;
-+ case PR_REGISTER_AND_IGNORE:
-+ scst_pr_register_and_ignore(cmd, buffer, buffer_size);
-+ break;
-+ case PR_REGISTER_AND_MOVE:
-+ scst_pr_register_and_move(cmd, buffer, buffer_size);
-+ break;
-+ default:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_unlock;
-+ }
-+
-+ if (cmd->status == SAM_STAT_GOOD)
-+ scst_pr_sync_device_file(tgt_dev, cmd);
-+
-+ if ((dev->handler->pr_cmds_notifications) &&
-+ (cmd->status == SAM_STAT_GOOD)) /* sync file may change status */
-+ res = SCST_EXEC_NOT_COMPLETED;
-+
-+out_unlock:
-+ scst_pr_write_unlock(dev);
-+
-+out_put_full_buf:
-+ scst_put_full_buf(cmd, buffer);
-+
-+out_done:
-+ if (SCST_EXEC_COMPLETED == res) {
-+ if (!aborted)
-+ cmd->completed = 1;
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT,
-+ SCST_CONTEXT_SAME);
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
++static const struct attribute *scst_tg_attrs[] = {
++ &scst_tg_mgmt.attr,
++ &scst_tg_group_id.attr,
++ &scst_tg_preferred.attr,
++ &scst_tg_state.attr,
++ NULL,
++};
+
-+/* No locks, no IRQ or IRQ-disabled context allowed */
-+int scst_check_local_events(struct scst_cmd *cmd)
++int scst_tg_sysfs_add(struct scst_dev_group *dg, struct scst_target_group *tg)
+{
-+ int res, rc;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_device *dev = cmd->dev;
++ int res;
+
+ TRACE_ENTRY();
-+
-+ /*
-+ * There's no race here, because we need to trace commands sent
-+ * *after* dev_double_ua_possible flag was set.
-+ */
-+ if (unlikely(dev->dev_double_ua_possible))
-+ cmd->double_ua_possible = 1;
-+
-+ /* Reserve check before Unit Attention */
-+ if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
-+ &tgt_dev->tgt_dev_flags))) {
-+ if ((cmd->op_flags & SCST_REG_RESERVE_ALLOWED) == 0) {
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_complete;
-+ }
-+ }
-+
-+ if (dev->pr_is_set) {
-+ if (unlikely(!scst_pr_is_cmd_allowed(cmd))) {
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_complete;
-+ }
-+ }
-+
-+ /*
-+ * Let's check for ABORTED after scst_pr_is_cmd_allowed(), because
-+ * we might sleep for a while there.
-+ */
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
-+ goto out_uncomplete;
-+ }
-+
-+ /* If we had internal bus reset, set the command error unit attention */
-+ if ((dev->scsi_dev != NULL) &&
-+ unlikely(dev->scsi_dev->was_reset)) {
-+ if (scst_is_ua_command(cmd)) {
-+ int done = 0;
-+ /*
-+ * Prevent more than 1 cmd to be triggered by
-+ * was_reset.
-+ */
-+ spin_lock_bh(&dev->dev_lock);
-+ if (dev->scsi_dev->was_reset) {
-+ TRACE(TRACE_MGMT, "was_reset is %d", 1);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_reset_UA));
-+ /*
-+ * It looks like it is safe to clear was_reset
-+ * here.
-+ */
-+ dev->scsi_dev->was_reset = 0;
-+ done = 1;
-+ }
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ if (done)
-+ goto out_complete;
-+ }
-+ }
-+
-+ if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
-+ &cmd->tgt_dev->tgt_dev_flags))) {
-+ if (scst_is_ua_command(cmd)) {
-+ rc = scst_set_pending_UA(cmd);
-+ if (rc == 0)
-+ goto out_complete;
-+ }
-+ }
-+
-+ res = 0;
-+
++ res = kobject_add(&tg->kobj, dg->tg_kobj, "%s", tg->name);
++ if (res)
++ goto err;
++ res = sysfs_create_files(&tg->kobj, scst_tg_attrs);
++ if (res)
++ goto err;
+out:
+ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_complete:
-+ res = 1;
-+ BUG_ON(!cmd->completed);
-+ goto out;
-+
-+out_uncomplete:
-+ res = -1;
++err:
++ scst_tg_sysfs_del(tg);
+ goto out;
+}
-+EXPORT_SYMBOL_GPL(scst_check_local_events);
-+
-+/* No locks */
-+void scst_inc_expected_sn(struct scst_tgt_dev *tgt_dev, atomic_t *slot)
-+{
-+ if (slot == NULL)
-+ goto inc;
-+
-+ /* Optimized for lockless fast path */
-+
-+ TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - tgt_dev->sn_slots,
-+ atomic_read(slot));
-+
-+ if (!atomic_dec_and_test(slot))
-+ goto out;
-+
-+ TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
-+ tgt_dev->num_free_sn_slots);
-+ if (tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1) {
-+ spin_lock_irq(&tgt_dev->sn_lock);
-+ if (likely(tgt_dev->num_free_sn_slots < (int)ARRAY_SIZE(tgt_dev->sn_slots)-1)) {
-+ if (tgt_dev->num_free_sn_slots < 0)
-+ tgt_dev->cur_sn_slot = slot;
-+ /*
-+ * To be in-sync with SIMPLE case in scst_cmd_set_sn()
-+ */
-+ smp_mb();
-+ tgt_dev->num_free_sn_slots++;
-+ TRACE_SN("Incremented num_free_sn_slots (%d)",
-+ tgt_dev->num_free_sn_slots);
-+
-+ }
-+ spin_unlock_irq(&tgt_dev->sn_lock);
-+ }
-+
-+inc:
-+ /*
-+ * No protection of expected_sn is needed, because only one thread
-+ * at time can be here (serialized by sn). Also it is supposed that
-+ * there could not be half-incremented halves.
-+ */
-+ tgt_dev->expected_sn++;
-+ /*
-+ * Write must be before def_cmd_count read to be in sync. with
-+ * scst_post_exec_sn(). See comment in scst_send_for_exec().
-+ */
-+ smp_mb();
-+ TRACE_SN("Next expected_sn: %d", tgt_dev->expected_sn);
-+
-+out:
-+ return;
-+}
+
-+/* No locks */
-+static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
-+ bool make_active)
++void scst_tg_sysfs_del(struct scst_target_group *tg)
+{
-+ /* For HQ commands SN is not set */
-+ bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
-+ cmd->sn_set && !cmd->retry;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_cmd *res;
-+
+ TRACE_ENTRY();
-+
-+ if (inc_expected_sn)
-+ scst_inc_expected_sn(tgt_dev, cmd->sn_slot);
-+
-+ if (make_active) {
-+ scst_make_deferred_commands_active(tgt_dev);
-+ res = NULL;
-+ } else
-+ res = scst_check_deferred_commands(tgt_dev);
-+
-+ TRACE_EXIT_HRES(res);
-+ return res;
++ sysfs_remove_files(&tg->kobj, scst_tg_attrs);
++ kobject_del(&tg->kobj);
++ TRACE_EXIT();
+}
+
-+/* cmd must be additionally referenced to not die inside */
-+static int scst_do_real_exec(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_NOT_COMPLETED;
-+ int rc;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_dev_type *handler = dev->handler;
-+ struct io_context *old_ctx = NULL;
-+ bool ctx_changed = false;
-+
-+ TRACE_ENTRY();
-+
-+ ctx_changed = scst_set_io_context(cmd, &old_ctx);
-+
-+ cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
-+
-+ if (handler->exec) {
-+ TRACE_DBG("Calling dev handler %s exec(%p)",
-+ handler->name, cmd);
-+ TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
-+ cmd->cdb_len);
-+ scst_set_cur_start(cmd);
-+ res = handler->exec(cmd);
-+ TRACE_DBG("Dev handler %s exec() returned %d",
-+ handler->name, res);
-+
-+ if (res == SCST_EXEC_COMPLETED)
-+ goto out_complete;
-+
-+ scst_set_exec_time(cmd);
-+
-+ BUG_ON(res != SCST_EXEC_NOT_COMPLETED);
-+ }
-+
-+ TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
-+
-+ if (unlikely(dev->scsi_dev == NULL)) {
-+ PRINT_ERROR("Command for virtual device must be "
-+ "processed by device handler (LUN %lld)!",
-+ (long long unsigned int)cmd->lun);
-+ goto out_error;
-+ }
-+
-+ res = scst_check_local_events(cmd);
-+ if (unlikely(res != 0))
-+ goto out_done;
-+
-+ scst_set_cur_start(cmd);
-+
-+ rc = scst_scsi_exec_async(cmd, scst_cmd_done);
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("scst pass-through exec failed: %x", rc);
-+ if ((int)rc == -EINVAL)
-+ PRINT_ERROR("Do you have too low max_sectors on your "
-+ "backend hardware? For success max_sectors must "
-+ "be >= bufflen in sectors (max_sectors %d, "
-+ "bufflen %db, CDB %x). See README for more "
-+ "details.", dev->scsi_dev->host->max_sectors,
-+ cmd->bufflen, cmd->cdb[0]);
-+ goto out_error;
-+ }
-+
-+out_complete:
-+ res = SCST_EXEC_COMPLETED;
-+
-+ if (ctx_changed)
-+ scst_reset_io_context(cmd->tgt_dev, old_ctx);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_error:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_done;
-+
-+out_done:
-+ res = SCST_EXEC_COMPLETED;
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ goto out_complete;
-+}
++/**
++ ** SCST sysfs device_groups/<dg>/target_groups directory implementation.
++ **/
+
-+static inline int scst_real_exec(struct scst_cmd *cmd)
++static ssize_t scst_dg_tgs_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
-+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
-+
-+ __scst_cmd_get(cmd);
-+
-+ res = scst_do_real_exec(cmd);
-+ if (likely(res == SCST_EXEC_COMPLETED)) {
-+ scst_post_exec_sn(cmd, true);
-+ if (cmd->dev->scsi_dev != NULL)
-+ generic_unplug_device(
-+ cmd->dev->scsi_dev->request_queue);
-+ } else
-+ BUG();
-+
-+ __scst_cmd_put(cmd);
-+
-+ /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
++ static const char help[] =
++ "Usage: echo \"create group_name\" >mgmt\n"
++ " echo \"del group_name\" >mgmt\n";
+
-+ TRACE_EXIT_RES(res);
-+ return res;
++ return scnprintf(buf, PAGE_SIZE, help);
+}
+
-+static int scst_do_local_exec(struct scst_cmd *cmd)
++static int scst_dg_tgs_mgmt_store_work_fn(struct scst_sysfs_work_item *w)
+{
++ struct scst_dev_group *dg;
++ char *cmd, *p, *pp, *dev_name;
+ int res;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
+
+ TRACE_ENTRY();
+
-+ /* Check READ_ONLY device status */
-+ if ((cmd->op_flags & SCST_WRITE_MEDIUM) &&
-+ (tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
-+ cmd->dev->rd_only)) {
-+ PRINT_WARNING("Attempt of write access to read-only device: "
-+ "initiator %s, LUN %lld, op %x",
-+ cmd->sess->initiator_name, cmd->lun, cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_data_protect));
-+ goto out_done;
-+ }
++ cmd = w->buf;
++ dg = scst_lookup_dg_by_kobj(w->kobj);
++ WARN_ON(!dg);
+
-+ if (!scst_is_cmd_local(cmd)) {
-+ res = SCST_EXEC_NOT_COMPLETED;
-+ goto out;
-+ }
++ p = strchr(cmd, '\n');
++ if (p)
++ *p = '\0';
+
-+ switch (cmd->cdb[0]) {
-+ case RESERVE:
-+ case RESERVE_10:
-+ res = scst_reserve_local(cmd);
-+ break;
-+ case RELEASE:
-+ case RELEASE_10:
-+ res = scst_release_local(cmd);
-+ break;
-+ case PERSISTENT_RESERVE_IN:
-+ res = scst_persistent_reserve_in_local(cmd);
-+ break;
-+ case PERSISTENT_RESERVE_OUT:
-+ res = scst_persistent_reserve_out_local(cmd);
-+ break;
-+ case REPORT_LUNS:
-+ res = scst_report_luns_local(cmd);
-+ break;
-+ case REQUEST_SENSE:
-+ res = scst_request_sense_local(cmd);
-+ break;
-+ default:
-+ res = SCST_EXEC_NOT_COMPLETED;
-+ break;
++ res = -EINVAL;
++ pp = cmd;
++ p = scst_get_next_lexem(&pp);
++ if (strcasecmp(p, "create") == 0 || strcasecmp(p, "add") == 0) {
++ dev_name = scst_get_next_lexem(&pp);
++ if (!*dev_name)
++ goto out;
++ res = scst_tg_add(dg, dev_name);
++ } else if (strcasecmp(p, "del") == 0) {
++ dev_name = scst_get_next_lexem(&pp);
++ if (!*dev_name)
++ goto out;
++ res = scst_tg_remove_by_name(dg, dev_name);
+ }
-+
+out:
++ kobject_put(w->kobj);
+ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_done:
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ res = SCST_EXEC_COMPLETED;
-+ goto out;
+}
+
-+static int scst_local_exec(struct scst_cmd *cmd)
++static ssize_t scst_dg_tgs_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
+{
++ char *cmd;
++ struct scst_sysfs_work_item *work;
+ int res;
+
+ TRACE_ENTRY();
+
-+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
-+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
-+
-+ __scst_cmd_get(cmd);
-+
-+ res = scst_do_local_exec(cmd);
-+ if (likely(res == SCST_EXEC_NOT_COMPLETED))
-+ cmd->state = SCST_CMD_STATE_REAL_EXEC;
-+ else if (res == SCST_EXEC_COMPLETED)
-+ scst_post_exec_sn(cmd, true);
-+ else
-+ BUG();
-+
-+ __scst_cmd_put(cmd);
-+
-+ /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_exec(struct scst_cmd **active_cmd)
-+{
-+ struct scst_cmd *cmd = *active_cmd;
-+ struct scst_cmd *ref_cmd;
-+ struct scst_device *dev = cmd->dev;
-+ int res = SCST_CMD_STATE_RES_CONT_NEXT, count;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(scst_check_blocked_dev(cmd)))
++ res = -ENOMEM;
++ cmd = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
++ if (!cmd)
+ goto out;
+
-+ /* To protect tgt_dev */
-+ ref_cmd = cmd;
-+ __scst_cmd_get(ref_cmd);
-+
-+ count = 0;
-+ while (1) {
-+ int rc;
-+
-+ cmd->sent_for_exec = 1;
-+ /*
-+ * To sync with scst_abort_cmd(). The above assignment must
-+ * be before SCST_CMD_ABORTED test, done later in
-+ * scst_check_local_events(). It's far from here, so the order
-+ * is virtually guaranteed, but let's have it just in case.
-+ */
-+ smp_mb();
-+
-+ cmd->scst_cmd_done = scst_cmd_done_local;
-+ cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
-+
-+ rc = scst_do_local_exec(cmd);
-+ if (likely(rc == SCST_EXEC_NOT_COMPLETED))
-+ /* Nothing to do */;
-+ else {
-+ BUG_ON(rc != SCST_EXEC_COMPLETED);
-+ goto done;
-+ }
-+
-+ cmd->state = SCST_CMD_STATE_REAL_EXEC;
-+
-+ rc = scst_do_real_exec(cmd);
-+ BUG_ON(rc != SCST_EXEC_COMPLETED);
-+
-+done:
-+ count++;
-+
-+ cmd = scst_post_exec_sn(cmd, false);
-+ if (cmd == NULL)
-+ break;
-+
-+ if (unlikely(scst_check_blocked_dev(cmd)))
-+ break;
-+
-+ __scst_cmd_put(ref_cmd);
-+ ref_cmd = cmd;
-+ __scst_cmd_get(ref_cmd);
-+ }
-+
-+ *active_cmd = cmd;
-+
-+ if (count == 0)
-+ goto out_put;
-+
-+ if (dev->scsi_dev != NULL)
-+ generic_unplug_device(dev->scsi_dev->request_queue);
++ res = scst_alloc_sysfs_work(scst_dg_tgs_mgmt_store_work_fn, false,
++ &work);
++ if (res)
++ goto out;
+
-+out_put:
-+ __scst_cmd_put(ref_cmd);
-+ /* !! At this point sess, dev and tgt_dev can be already freed !! */
++ work->buf = cmd;
++ work->kobj = kobj;
++ kobject_get(kobj);
++ res = scst_sysfs_queue_wait_work(work);
+
+out:
++ if (res == 0)
++ res = count;
+ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+static int scst_send_for_exec(struct scst_cmd **active_cmd)
-+{
-+ int res;
-+ struct scst_cmd *cmd = *active_cmd;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ typeof(tgt_dev->expected_sn) expected_sn;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(cmd->internal))
-+ goto exec;
-+
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ goto exec;
-+
-+ BUG_ON(!cmd->sn_set);
-+
-+ expected_sn = tgt_dev->expected_sn;
-+ /* Optimized for lockless fast path */
-+ if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
-+ spin_lock_irq(&tgt_dev->sn_lock);
++static struct kobj_attribute scst_dg_tgs_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_dg_tgs_mgmt_show,
++ scst_dg_tgs_mgmt_store);
+
-+ tgt_dev->def_cmd_count++;
-+ /*
-+ * Memory barrier is needed here to implement lockless fast
-+ * path. We need the exact order of read and write between
-+ * def_cmd_count and expected_sn. Otherwise, we can miss case,
-+ * when expected_sn was changed to be equal to cmd->sn while
-+ * we are queuing cmd the deferred list after the expected_sn
-+ * below. It will lead to a forever stuck command. But with
-+ * the barrier in such case __scst_check_deferred_commands()
-+ * will be called and it will take sn_lock, so we will be
-+ * synchronized.
-+ */
-+ smp_mb();
++static const struct attribute *scst_dg_tgs_attrs[] = {
++ &scst_dg_tgs_mgmt.attr,
++ NULL,
++};
+
-+ expected_sn = tgt_dev->expected_sn;
-+ if ((cmd->sn != expected_sn) || (tgt_dev->hq_cmd_count > 0)) {
-+ if (unlikely(test_bit(SCST_CMD_ABORTED,
-+ &cmd->cmd_flags))) {
-+ /* Necessary to allow aborting out of sn cmds */
-+ TRACE_MGMT_DBG("Aborting out of sn cmd %p "
-+ "(tag %llu, sn %u)", cmd,
-+ (long long unsigned)cmd->tag, cmd->sn);
-+ tgt_dev->def_cmd_count--;
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ } else {
-+ TRACE_SN("Deferring cmd %p (sn=%d, set %d, "
-+ "expected_sn=%d)", cmd, cmd->sn,
-+ cmd->sn_set, expected_sn);
-+ list_add_tail(&cmd->sn_cmd_list_entry,
-+ &tgt_dev->deferred_cmd_list);
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ }
-+ spin_unlock_irq(&tgt_dev->sn_lock);
-+ goto out;
-+ } else {
-+ TRACE_SN("Somebody incremented expected_sn %d, "
-+ "continuing", expected_sn);
-+ tgt_dev->def_cmd_count--;
-+ spin_unlock_irq(&tgt_dev->sn_lock);
-+ }
-+ }
++/**
++ ** SCST sysfs device_groups directory implementation.
++ **/
+
-+exec:
-+ res = scst_exec(active_cmd);
++int scst_dg_sysfs_add(struct kobject *parent, struct scst_dev_group *dg)
++{
++ int res;
+
++ dg->dev_kobj = NULL;
++ dg->tg_kobj = NULL;
++ res = kobject_add(&dg->kobj, parent, "%s", dg->name);
++ if (res)
++ goto err;
++ res = -EEXIST;
++ dg->dev_kobj = kobject_create_and_add("devices", &dg->kobj);
++ if (!dg->dev_kobj)
++ goto err;
++ res = sysfs_create_files(dg->dev_kobj, scst_dg_devs_attrs);
++ if (res)
++ goto err;
++ dg->tg_kobj = kobject_create_and_add("target_groups", &dg->kobj);
++ if (!dg->tg_kobj)
++ goto err;
++ res = sysfs_create_files(dg->tg_kobj, scst_dg_tgs_attrs);
++ if (res)
++ goto err;
+out:
-+ TRACE_EXIT_HRES(res);
+ return res;
++err:
++ scst_dg_sysfs_del(dg);
++ goto out;
+}
+
-+/* No locks supposed to be held */
-+static int scst_check_sense(struct scst_cmd *cmd)
++void scst_dg_sysfs_del(struct scst_dev_group *dg)
+{
-+ int res = 0;
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(cmd->ua_ignore))
-+ goto out;
-+
-+ /* If we had internal bus reset behind us, set the command error UA */
-+ if ((dev->scsi_dev != NULL) &&
-+ unlikely(cmd->host_status == DID_RESET) &&
-+ scst_is_ua_command(cmd)) {
-+ TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
-+ dev->scsi_dev->was_reset, cmd->host_status);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_reset_UA));
-+ /* It looks like it is safe to clear was_reset here */
-+ dev->scsi_dev->was_reset = 0;
++ if (dg->tg_kobj) {
++ sysfs_remove_files(dg->tg_kobj, scst_dg_tgs_attrs);
++ kobject_del(dg->tg_kobj);
++ kobject_put(dg->tg_kobj);
++ dg->tg_kobj = NULL;
+ }
-+
-+ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
-+ SCST_SENSE_VALID(cmd->sense)) {
-+ PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
-+ cmd->sense_valid_len);
-+
-+ /* Check Unit Attention Sense Key */
-+ if (scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
-+ if (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASC_VALID,
-+ 0, SCST_SENSE_ASC_UA_RESET, 0)) {
-+ if (cmd->double_ua_possible) {
-+ TRACE_MGMT_DBG("Double UA "
-+ "detected for device %p", dev);
-+ TRACE_MGMT_DBG("Retrying cmd"
-+ " %p (tag %llu)", cmd,
-+ (long long unsigned)cmd->tag);
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+ cmd->completed = 0;
-+
-+ mempool_free(cmd->sense,
-+ scst_sense_mempool);
-+ cmd->sense = NULL;
-+
-+ scst_check_restore_sg_buff(cmd);
-+
-+ BUG_ON(cmd->dbl_ua_orig_resp_data_len < 0);
-+ cmd->data_direction =
-+ cmd->dbl_ua_orig_data_direction;
-+ cmd->resp_data_len =
-+ cmd->dbl_ua_orig_resp_data_len;
-+
-+ cmd->state = SCST_CMD_STATE_REAL_EXEC;
-+ cmd->retry = 1;
-+ res = 1;
-+ goto out;
-+ }
-+ }
-+ scst_dev_check_set_UA(dev, cmd, cmd->sense,
-+ cmd->sense_valid_len);
-+ }
-+ }
-+
-+ if (unlikely(cmd->double_ua_possible)) {
-+ if (scst_is_ua_command(cmd)) {
-+ TRACE_MGMT_DBG("Clearing dbl_ua_possible flag (dev %p, "
-+ "cmd %p)", dev, cmd);
-+ /*
-+ * Lock used to protect other flags in the bitfield
-+ * (just in case, actually). Those flags can't be
-+ * changed in parallel, because the device is
-+ * serialized.
-+ */
-+ spin_lock_bh(&dev->dev_lock);
-+ dev->dev_double_ua_possible = 0;
-+ spin_unlock_bh(&dev->dev_lock);
-+ }
++ if (dg->dev_kobj) {
++ sysfs_remove_files(dg->dev_kobj, scst_dg_devs_attrs);
++ kobject_del(dg->dev_kobj);
++ kobject_put(dg->dev_kobj);
++ dg->dev_kobj = NULL;
+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
++ kobject_del(&dg->kobj);
+}
+
-+static int scst_check_auto_sense(struct scst_cmd *cmd)
++static ssize_t scst_device_groups_mgmt_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
-+ (!SCST_SENSE_VALID(cmd->sense) ||
-+ SCST_NO_SENSE(cmd->sense))) {
-+ TRACE(TRACE_SCSI|TRACE_MINOR_AND_MGMT_DBG, "CHECK_CONDITION, "
-+ "but no sense: cmd->status=%x, cmd->msg_status=%x, "
-+ "cmd->host_status=%x, cmd->driver_status=%x (cmd %p)",
-+ cmd->status, cmd->msg_status, cmd->host_status,
-+ cmd->driver_status, cmd);
-+ res = 1;
-+ } else if (unlikely(cmd->host_status)) {
-+ if ((cmd->host_status == DID_REQUEUE) ||
-+ (cmd->host_status == DID_IMM_RETRY) ||
-+ (cmd->host_status == DID_SOFT_ERROR) ||
-+ (cmd->host_status == DID_ABORT)) {
-+ scst_set_busy(cmd);
-+ } else {
-+ TRACE(TRACE_SCSI|TRACE_MINOR_AND_MGMT_DBG, "Host "
-+ "status %x received, returning HARDWARE ERROR "
-+ "instead (cmd %p)", cmd->host_status, cmd);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ }
++ static const char help[] =
++ "Usage: echo \"create group_name\" >mgmt\n"
++ " echo \"del group_name\" >mgmt\n";
+
-+ TRACE_EXIT_RES(res);
-+ return res;
++ return scnprintf(buf, PAGE_SIZE, help);
+}
+
-+static int scst_pre_dev_done(struct scst_cmd *cmd)
++static ssize_t scst_device_groups_mgmt_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
+{
-+ int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
++ int res;
++ char *p, *pp, *input, *group_name;
+
+ TRACE_ENTRY();
+
-+ if (unlikely(scst_check_auto_sense(cmd))) {
-+ PRINT_INFO("Command finished with CHECK CONDITION, but "
-+ "without sense data (opcode 0x%x), issuing "
-+ "REQUEST SENSE", cmd->cdb[0]);
-+ rc = scst_prepare_request_sense(cmd);
-+ if (rc == 0)
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ else {
-+ PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
-+ "returning HARDWARE ERROR");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ goto out;
-+ } else if (unlikely(scst_check_sense(cmd))) {
-+ /*
-+ * We can't allow atomic command on the exec stages, so
-+ * restart to the thread
-+ */
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ if (likely(scsi_status_is_good(cmd->status))) {
-+ unsigned char type = cmd->dev->type;
-+ if (unlikely((cmd->cdb[0] == MODE_SENSE ||
-+ cmd->cdb[0] == MODE_SENSE_10)) &&
-+ (cmd->tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
-+ cmd->dev->rd_only) &&
-+ (type == TYPE_DISK ||
-+ type == TYPE_WORM ||
-+ type == TYPE_MOD ||
-+ type == TYPE_TAPE)) {
-+ int32_t length;
-+ uint8_t *address;
-+ bool err = false;
-+
-+ length = scst_get_buf_first(cmd, &address);
-+ if (length < 0) {
-+ PRINT_ERROR("%s", "Unable to get "
-+ "MODE_SENSE buffer");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(
-+ scst_sense_hardw_error));
-+ err = true;
-+ } else if (length > 2 && cmd->cdb[0] == MODE_SENSE)
-+ address[2] |= 0x80; /* Write Protect*/
-+ else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
-+ address[3] |= 0x80; /* Write Protect*/
-+ scst_put_buf(cmd, address);
-+
-+ if (err)
-+ goto out;
-+ }
-+
-+ /*
-+ * Check and clear NormACA option for the device, if necessary,
-+ * since we don't support ACA
-+ */
-+ if (unlikely((cmd->cdb[0] == INQUIRY)) &&
-+ /* Std INQUIRY data (no EVPD) */
-+ !(cmd->cdb[1] & SCST_INQ_EVPD) &&
-+ (cmd->resp_data_len > SCST_INQ_BYTE3)) {
-+ uint8_t *buffer;
-+ int buflen;
-+ bool err = false;
-+
-+ /* ToDo: all pages ?? */
-+ buflen = scst_get_buf_first(cmd, &buffer);
-+ if (buflen > SCST_INQ_BYTE3) {
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
-+ PRINT_INFO("NormACA set for device: "
-+ "lun=%lld, type 0x%02x. Clear it, "
-+ "since it's unsupported.",
-+ (long long unsigned int)cmd->lun,
-+ buffer[0]);
-+ }
-+#endif
-+ buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
-+ } else if (buflen != 0) {
-+ PRINT_ERROR("%s", "Unable to get INQUIRY "
-+ "buffer");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ err = true;
-+ }
-+ if (buflen > 0)
-+ scst_put_buf(cmd, buffer);
-+
-+ if (err)
-+ goto out;
-+ }
++ input = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
++ pp = input;
++ p = strchr(input, '\n');
++ if (p)
++ *p = '\0';
+
-+ if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
-+ (cmd->cdb[0] == MODE_SELECT_10) ||
-+ (cmd->cdb[0] == LOG_SELECT))) {
-+ TRACE(TRACE_SCSI,
-+ "MODE/LOG SELECT succeeded (LUN %lld)",
-+ (long long unsigned int)cmd->lun);
-+ cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
++ res = -EINVAL;
++ p = scst_get_next_lexem(&pp);
++ if (strcasecmp(p, "create") == 0 || strcasecmp(p, "add") == 0) {
++ group_name = scst_get_next_lexem(&pp);
++ if (!*group_name)
+ goto out;
-+ }
-+ } else {
-+ TRACE(TRACE_SCSI, "cmd %p not succeeded with status %x",
-+ cmd, cmd->status);
-+
-+ if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
-+ if (!test_bit(SCST_TGT_DEV_RESERVED,
-+ &cmd->tgt_dev->tgt_dev_flags)) {
-+ struct scst_tgt_dev *tgt_dev_tmp;
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE(TRACE_SCSI, "RESERVE failed lun=%lld, "
-+ "status=%x",
-+ (long long unsigned int)cmd->lun,
-+ cmd->status);
-+ PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
-+ cmd->sense_valid_len);
-+
-+ /* Clearing the reservation */
-+ spin_lock_bh(&dev->dev_lock);
-+ list_for_each_entry(tgt_dev_tmp,
-+ &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ clear_bit(SCST_TGT_DEV_RESERVED,
-+ &tgt_dev_tmp->tgt_dev_flags);
-+ }
-+ dev->dev_reserved = 0;
-+ spin_unlock_bh(&dev->dev_lock);
-+ }
-+ }
-+
-+ /* Check for MODE PARAMETERS CHANGED UA */
-+ if ((cmd->dev->scsi_dev != NULL) &&
-+ (cmd->status == SAM_STAT_CHECK_CONDITION) &&
-+ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
-+ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASCx_VALID,
-+ 0, 0x2a, 0x01)) {
-+ TRACE(TRACE_SCSI, "MODE PARAMETERS CHANGED UA (lun "
-+ "%lld)", (long long unsigned int)cmd->lun);
-+ cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
++ res = scst_dg_add(scst_device_groups_kobj, group_name);
++ } else if (strcasecmp(p, "del") == 0) {
++ group_name = scst_get_next_lexem(&pp);
++ if (!*group_name)
+ goto out;
-+ }
++ res = scst_dg_remove(group_name);
+ }
-+
-+ cmd->state = SCST_CMD_STATE_DEV_DONE;
-+
+out:
++ kfree(input);
++ if (res == 0)
++ res = count;
+ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+static int scst_mode_select_checks(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_RES_CONT_SAME;
-+
-+ TRACE_ENTRY();
-+
-+ if (likely(scsi_status_is_good(cmd->status))) {
-+ int atomic = scst_cmd_atomic(cmd);
-+ if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
-+ (cmd->cdb[0] == MODE_SELECT_10) ||
-+ (cmd->cdb[0] == LOG_SELECT))) {
-+ struct scst_device *dev = cmd->dev;
-+ int sl;
-+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
-+
-+ if (atomic && (dev->scsi_dev != NULL)) {
-+ TRACE_DBG("%s", "MODE/LOG SELECT: thread "
-+ "context required");
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
-+ "setting the SELECT UA (lun=%lld)",
-+ (long long unsigned int)cmd->lun);
++static struct kobj_attribute scst_device_groups_mgmt =
++ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_device_groups_mgmt_show,
++ scst_device_groups_mgmt_store);
+
-+ spin_lock_bh(&dev->dev_lock);
-+ if (cmd->cdb[0] == LOG_SELECT) {
-+ sl = scst_set_sense(sense_buffer,
-+ sizeof(sense_buffer),
-+ dev->d_sense,
-+ UNIT_ATTENTION, 0x2a, 0x02);
-+ } else {
-+ sl = scst_set_sense(sense_buffer,
-+ sizeof(sense_buffer),
-+ dev->d_sense,
-+ UNIT_ATTENTION, 0x2a, 0x01);
-+ }
-+ scst_dev_check_set_local_UA(dev, cmd, sense_buffer, sl);
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ if (dev->scsi_dev != NULL)
-+ scst_obtain_device_parameters(dev);
-+ }
-+ } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
-+ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
-+ /* mode parameters changed */
-+ (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASCx_VALID,
-+ 0, 0x2a, 0x01) ||
-+ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASC_VALID,
-+ 0, 0x29, 0) /* reset */ ||
-+ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASC_VALID,
-+ 0, 0x28, 0) /* medium changed */ ||
-+ /* cleared by another ini (just in case) */
-+ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASC_VALID,
-+ 0, 0x2F, 0))) {
-+ int atomic = scst_cmd_atomic(cmd);
-+ if (atomic) {
-+ TRACE_DBG("Possible parameters changed UA %x: "
-+ "thread context required", cmd->sense[12]);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
-+ "(LUN %lld): getting new parameters", cmd->sense[12],
-+ (long long unsigned int)cmd->lun);
-+
-+ scst_obtain_device_parameters(cmd->dev);
-+ } else
-+ BUG();
-+
-+ cmd->state = SCST_CMD_STATE_DEV_DONE;
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
-+{
-+ if (likely(cmd->sn_set))
-+ scst_inc_expected_sn(cmd->tgt_dev, cmd->sn_slot);
++static const struct attribute *scst_device_groups_attrs[] = {
++ &scst_device_groups_mgmt.attr,
++ NULL,
++};
+
-+ scst_make_deferred_commands_active(cmd->tgt_dev);
-+}
++/**
++ ** SCST sysfs root directory implementation
++ **/
+
-+static int scst_dev_done(struct scst_cmd *cmd)
++static ssize_t scst_threads_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
+{
-+ int res = SCST_CMD_STATE_RES_CONT_SAME;
-+ int state;
-+ struct scst_device *dev = cmd->dev;
++ int count;
+
+ TRACE_ENTRY();
+
-+ state = SCST_CMD_STATE_PRE_XMIT_RESP;
-+
-+ if (likely(!scst_is_cmd_fully_local(cmd)) &&
-+ likely(dev->handler->dev_done != NULL)) {
-+ int rc;
-+
-+ if (unlikely(!dev->handler->dev_done_atomic &&
-+ scst_cmd_atomic(cmd))) {
-+ /*
-+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
-+ * optimization.
-+ */
-+ TRACE_MGMT_DBG("Dev handler %s dev_done() needs thread "
-+ "context, rescheduling", dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ TRACE_DBG("Calling dev handler %s dev_done(%p)",
-+ dev->handler->name, cmd);
-+ scst_set_cur_start(cmd);
-+ rc = dev->handler->dev_done(cmd);
-+ scst_set_dev_done_time(cmd);
-+ TRACE_DBG("Dev handler %s dev_done() returned %d",
-+ dev->handler->name, rc);
-+ if (rc != SCST_CMD_STATE_DEFAULT)
-+ state = rc;
-+ }
-+
-+ switch (state) {
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ case SCST_CMD_STATE_PRE_XMIT_RESP:
-+ case SCST_CMD_STATE_PARSE:
-+ case SCST_CMD_STATE_PREPARE_SPACE:
-+ case SCST_CMD_STATE_RDY_TO_XFER:
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ case SCST_CMD_STATE_PRE_DEV_DONE:
-+ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
-+ case SCST_CMD_STATE_DEV_DONE:
-+ case SCST_CMD_STATE_XMIT_RESP:
-+ case SCST_CMD_STATE_FINISHED:
-+ case SCST_CMD_STATE_FINISHED_INTERNAL:
-+#else
-+ default:
-+#endif
-+ cmd->state = state;
-+ break;
-+ case SCST_CMD_STATE_NEED_THREAD_CTX:
-+ TRACE_DBG("Dev handler %s dev_done() requested "
-+ "thread context, rescheduling",
-+ dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ break;
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ default:
-+ if (state >= 0) {
-+ PRINT_ERROR("Dev handler %s dev_done() returned "
-+ "invalid cmd state %d",
-+ dev->handler->name, state);
-+ } else {
-+ PRINT_ERROR("Dev handler %s dev_done() returned "
-+ "error %d", dev->handler->name,
-+ state);
-+ }
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ break;
-+#endif
-+ }
-+
-+ scst_check_unblock_dev(cmd);
-+
-+ if (cmd->inc_expected_sn_on_done && cmd->sent_for_exec)
-+ scst_inc_check_expected_sn(cmd);
-+
-+ if (unlikely(cmd->internal))
-+ cmd->state = SCST_CMD_STATE_FINISHED_INTERNAL;
-+
-+#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ if (cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) {
-+ /* We can't allow atomic command on the exec stages */
-+ if (scst_cmd_atomic(cmd)) {
-+ switch (state) {
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ TRACE_DBG("Atomic context and redirect, "
-+ "rescheduling (cmd %p)", cmd);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ break;
-+ }
-+ }
-+ }
-+#endif
++ count = sprintf(buf, "%d\n%s", scst_main_cmd_threads.nr_threads,
++ (scst_main_cmd_threads.nr_threads != scst_threads) ?
++ SCST_SYSFS_KEY_MARK "\n" : "");
+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
++ TRACE_EXIT();
++ return count;
+}
+
-+static int scst_pre_xmit_response(struct scst_cmd *cmd)
++static int scst_process_threads_store(int newtn)
+{
+ int res;
++ long oldtn, delta;
+
+ TRACE_ENTRY();
+
-+ EXTRACHECKS_BUG_ON(cmd->internal);
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+ if (cmd->tm_dbg_delayed &&
-+ !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
-+ if (scst_cmd_atomic(cmd)) {
-+ TRACE_MGMT_DBG("%s",
-+ "DEBUG_TM delayed cmd needs a thread");
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ return res;
-+ }
-+ TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
-+ cmd, cmd->tag);
-+ schedule_timeout_uninterruptible(HZ);
-+ }
-+#endif
-+
-+ if (likely(cmd->tgt_dev != NULL)) {
-+ /*
-+ * Those counters protect from not getting too long processing
-+ * latency, so we should decrement them after cmd completed.
-+ */
-+ atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
-+#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
-+ atomic_dec(&cmd->dev->dev_cmd_count);
-+#endif
-+#ifdef CONFIG_SCST_ORDERED_READS
-+ /* If expected values not set, expected direction is UNKNOWN */
-+ if (cmd->expected_data_direction & SCST_DATA_WRITE)
-+ atomic_dec(&cmd->dev->write_cmd_count);
-+#endif
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ scst_on_hq_cmd_response(cmd);
-+
-+ if (unlikely(!cmd->sent_for_exec)) {
-+ TRACE_SN("cmd %p was not sent to mid-lev"
-+ " (sn %d, set %d)",
-+ cmd, cmd->sn, cmd->sn_set);
-+ scst_unblock_deferred(cmd->tgt_dev, cmd);
-+ cmd->sent_for_exec = 1;
-+ }
-+ }
-+
-+ cmd->done = 1;
-+ smp_mb(); /* to sync with scst_abort_cmd() */
-+
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
-+ scst_xmit_process_aborted_cmd(cmd);
-+ else if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION))
-+ scst_store_sense(cmd);
++ TRACE_DBG("newtn %d", newtn);
+
-+ if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu), "
-+ "skipping", cmd, (long long unsigned int)cmd->tag);
-+ cmd->state = SCST_CMD_STATE_FINISHED;
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res != 0)
+ goto out;
-+ }
-+
-+ if (unlikely(cmd->resid_possible))
-+ scst_adjust_resp_data_len(cmd);
-+ else
-+ cmd->adjusted_resp_data_len = cmd->resp_data_len;
+
-+ cmd->state = SCST_CMD_STATE_XMIT_RESP;
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+static int scst_xmit_response(struct scst_cmd *cmd)
-+{
-+ struct scst_tgt_template *tgtt = cmd->tgtt;
-+ int res, rc;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(cmd->internal);
++ oldtn = scst_main_cmd_threads.nr_threads;
+
-+ if (unlikely(!tgtt->xmit_response_atomic &&
-+ scst_cmd_atomic(cmd))) {
-+ /*
-+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
-+ * optimization.
-+ */
-+ TRACE_MGMT_DBG("Target driver %s xmit_response() needs thread "
-+ "context, rescheduling", tgtt->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
++ delta = newtn - oldtn;
++ if (delta < 0)
++ scst_del_threads(&scst_main_cmd_threads, -delta);
++ else {
++ res = scst_add_threads(&scst_main_cmd_threads, NULL, NULL, delta);
++ if (res != 0)
++ goto out_up;
+ }
+
-+ while (1) {
-+ int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
-+
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ cmd->state = SCST_CMD_STATE_XMIT_WAIT;
-+
-+ TRACE_DBG("Calling xmit_response(%p)", cmd);
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ if (trace_flag & TRACE_SND_BOT) {
-+ int i;
-+ struct scatterlist *sg;
-+ if (cmd->tgt_sg != NULL)
-+ sg = cmd->tgt_sg;
-+ else
-+ sg = cmd->sg;
-+ if (sg != NULL) {
-+ TRACE(TRACE_SND_BOT, "Xmitting data for cmd %p "
-+ "(sg_cnt %d, sg %p, sg[0].page %p)",
-+ cmd, cmd->tgt_sg_cnt, sg,
-+ (void *)sg_page(&sg[0]));
-+ for (i = 0; i < cmd->tgt_sg_cnt; ++i) {
-+ PRINT_BUFF_FLAG(TRACE_SND_BOT,
-+ "Xmitting sg", sg_virt(&sg[i]),
-+ sg[i].length);
-+ }
-+ }
-+ }
-+#endif
-+
-+ if (tgtt->on_hw_pending_cmd_timeout != NULL) {
-+ struct scst_session *sess = cmd->sess;
-+ cmd->hw_pending_start = jiffies;
-+ cmd->cmd_hw_pending = 1;
-+ if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
-+ TRACE_DBG("Sched HW pending work for sess %p "
-+ "(max time %d)", sess,
-+ tgtt->max_hw_pending_time);
-+ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
-+ &sess->sess_aflags);
-+ schedule_delayed_work(&sess->hw_pending_work,
-+ tgtt->max_hw_pending_time * HZ);
-+ }
-+ }
-+
-+ scst_set_cur_start(cmd);
-+
-+#ifdef CONFIG_SCST_DEBUG_RETRY
-+ if (((scst_random() % 100) == 77))
-+ rc = SCST_TGT_RES_QUEUE_FULL;
-+ else
-+#endif
-+ rc = tgtt->xmit_response(cmd);
-+ TRACE_DBG("xmit_response() returned %d", rc);
-+
-+ if (likely(rc == SCST_TGT_RES_SUCCESS))
-+ goto out;
-+
-+ scst_set_xmit_time(cmd);
-+
-+ cmd->cmd_hw_pending = 0;
-+
-+ /* Restore the previous state */
-+ cmd->state = SCST_CMD_STATE_XMIT_RESP;
-+
-+ switch (rc) {
-+ case SCST_TGT_RES_QUEUE_FULL:
-+ if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
-+ break;
-+ else
-+ continue;
-+
-+ case SCST_TGT_RES_NEED_THREAD_CTX:
-+ TRACE_DBG("Target driver %s xmit_response() "
-+ "requested thread context, rescheduling",
-+ tgtt->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ break;
++ PRINT_INFO("Changed cmd threads num: old %ld, new %d", oldtn, newtn);
+
-+ default:
-+ goto out_error;
-+ }
-+ break;
-+ }
++out_up:
++ mutex_unlock(&scst_mutex);
+
+out:
-+ /* Caution: cmd can be already dead here */
-+ TRACE_EXIT_HRES(res);
++ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_error:
-+ if (rc == SCST_TGT_RES_FATAL_ERROR) {
-+ PRINT_ERROR("Target driver %s xmit_response() returned "
-+ "fatal error", tgtt->name);
-+ } else {
-+ PRINT_ERROR("Target driver %s xmit_response() returned "
-+ "invalid value %d", tgtt->name, rc);
-+ }
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ cmd->state = SCST_CMD_STATE_FINISHED;
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
+}
+
-+/**
-+ * scst_tgt_cmd_done() - the command's processing done
-+ * @cmd: SCST command
-+ * @pref_context: preferred command execution context
-+ *
-+ * Description:
-+ * Notifies SCST that the driver sent the response and the command
-+ * can be freed now. Don't forget to set the delivery status, if it
-+ * isn't success, using scst_set_delivery_status() before calling
-+ * this function. The third argument sets preferred command execition
-+ * context (see SCST_CONTEXT_* constants for details)
-+ */
-+void scst_tgt_cmd_done(struct scst_cmd *cmd,
-+ enum scst_exec_context pref_context)
++static int scst_threads_store_work_fn(struct scst_sysfs_work_item *work)
+{
-+ TRACE_ENTRY();
-+
-+ BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
-+
-+ scst_set_xmit_time(cmd);
-+
-+ cmd->cmd_hw_pending = 0;
-+
-+ if (unlikely(cmd->tgt_dev == NULL))
-+ pref_context = SCST_CONTEXT_THREAD;
-+
-+ cmd->state = SCST_CMD_STATE_FINISHED;
-+
-+ scst_process_redirect_cmd(cmd, pref_context, 1);
-+
-+ TRACE_EXIT();
-+ return;
++ return scst_process_threads_store(work->new_threads_num);
+}
-+EXPORT_SYMBOL(scst_tgt_cmd_done);
+
-+static int scst_finish_cmd(struct scst_cmd *cmd)
++static ssize_t scst_threads_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int res;
-+ struct scst_session *sess = cmd->sess;
-+
-+ TRACE_ENTRY();
-+
-+ scst_update_lat_stats(cmd);
-+
-+ if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
-+ if ((cmd->tgt_dev != NULL) &&
-+ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
-+ /* This UA delivery failed, so we need to requeue it */
-+ if (scst_cmd_atomic(cmd) &&
-+ scst_is_ua_global(cmd->sense, cmd->sense_valid_len)) {
-+ TRACE_MGMT_DBG("Requeuing of global UA for "
-+ "failed cmd %p needs a thread", cmd);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+ scst_requeue_ua(cmd);
-+ }
-+ }
-+
-+ atomic_dec(&sess->sess_cmd_count);
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+ list_del(&cmd->sess_cmd_list_entry);
-+ spin_unlock_irq(&sess->sess_list_lock);
-+
-+ cmd->finished = 1;
-+ smp_mb(); /* to sync with scst_abort_cmd() */
-+
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d, "
-+ "scst_cmd_count %d)", cmd, atomic_read(&cmd->cmd_ref),
-+ atomic_read(&scst_cmd_count));
-+
-+ scst_finish_cmd_mgmt(cmd);
-+ }
-+
-+ __scst_cmd_put(cmd);
-+
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/*
-+ * No locks, but it must be externally serialized (see comment for
-+ * scst_cmd_init_done() in scst.h)
-+ */
-+static void scst_cmd_set_sn(struct scst_cmd *cmd)
-+{
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ unsigned long flags;
++ long newtn;
++ struct scst_sysfs_work_item *work;
+
+ TRACE_ENTRY();
+
-+ if (scst_is_implicit_hq(cmd) &&
-+ likely(cmd->queue_type == SCST_CMD_QUEUE_SIMPLE)) {
-+ TRACE_SN("Implicit HQ cmd %p", cmd);
-+ cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
-+ }
-+
-+ EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
-+
-+ /* Optimized for lockless fast path */
-+
-+ scst_check_debug_sn(cmd);
-+
-+#ifdef CONFIG_SCST_STRICT_SERIALIZING
-+ cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
-+#endif
-+
-+ if (cmd->dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
-+ /*
-+ * Not the best way, but good enough until there is a
-+ * possibility to specify queue type during pass-through
-+ * commands submission.
-+ */
-+ cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
-+ }
-+
-+ switch (cmd->queue_type) {
-+ case SCST_CMD_QUEUE_SIMPLE:
-+ case SCST_CMD_QUEUE_UNTAGGED:
-+#ifdef CONFIG_SCST_ORDERED_READS
-+ if (scst_cmd_is_expected_set(cmd)) {
-+ if ((cmd->expected_data_direction == SCST_DATA_READ) &&
-+ (atomic_read(&cmd->dev->write_cmd_count) == 0))
-+ goto ordered;
-+ } else
-+ goto ordered;
-+#endif
-+ if (likely(tgt_dev->num_free_sn_slots >= 0)) {
-+ /*
-+ * atomic_inc_return() implies memory barrier to sync
-+ * with scst_inc_expected_sn()
-+ */
-+ if (atomic_inc_return(tgt_dev->cur_sn_slot) == 1) {
-+ tgt_dev->curr_sn++;
-+ TRACE_SN("Incremented curr_sn %d",
-+ tgt_dev->curr_sn);
-+ }
-+ cmd->sn_slot = tgt_dev->cur_sn_slot;
-+ cmd->sn = tgt_dev->curr_sn;
-+
-+ tgt_dev->prev_cmd_ordered = 0;
-+ } else {
-+ TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
-+ "%zd", ARRAY_SIZE(tgt_dev->sn_slots));
-+ goto ordered;
-+ }
-+ break;
-+
-+ case SCST_CMD_QUEUE_ORDERED:
-+ TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
-+ordered:
-+ if (!tgt_dev->prev_cmd_ordered) {
-+ spin_lock_irqsave(&tgt_dev->sn_lock, flags);
-+ if (tgt_dev->num_free_sn_slots >= 0) {
-+ tgt_dev->num_free_sn_slots--;
-+ if (tgt_dev->num_free_sn_slots >= 0) {
-+ int i = 0;
-+ /* Commands can finish in any order, so
-+ * we don't know which slot is empty.
-+ */
-+ while (1) {
-+ tgt_dev->cur_sn_slot++;
-+ if (tgt_dev->cur_sn_slot ==
-+ tgt_dev->sn_slots + ARRAY_SIZE(tgt_dev->sn_slots))
-+ tgt_dev->cur_sn_slot = tgt_dev->sn_slots;
-+
-+ if (atomic_read(tgt_dev->cur_sn_slot) == 0)
-+ break;
-+
-+ i++;
-+ BUG_ON(i == ARRAY_SIZE(tgt_dev->sn_slots));
-+ }
-+ TRACE_SN("New cur SN slot %zd",
-+ tgt_dev->cur_sn_slot -
-+ tgt_dev->sn_slots);
-+ }
-+ }
-+ spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
-+ }
-+ tgt_dev->prev_cmd_ordered = 1;
-+ tgt_dev->curr_sn++;
-+ cmd->sn = tgt_dev->curr_sn;
-+ break;
-+
-+ case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
-+ TRACE_SN("HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
-+ spin_lock_irqsave(&tgt_dev->sn_lock, flags);
-+ tgt_dev->hq_cmd_count++;
-+ spin_unlock_irqrestore(&tgt_dev->sn_lock, flags);
-+ cmd->hq_cmd_inced = 1;
++ res = strict_strtol(buf, 0, &newtn);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtol() for %s failed: %d ", buf, res);
+ goto out;
-+
-+ default:
-+ BUG();
+ }
-+
-+ TRACE_SN("cmd(%p)->sn: %d (tgt_dev %p, *cur_sn_slot %d, "
-+ "num_free_sn_slots %d, prev_cmd_ordered %ld, "
-+ "cur_sn_slot %zd)", cmd, cmd->sn, tgt_dev,
-+ atomic_read(tgt_dev->cur_sn_slot),
-+ tgt_dev->num_free_sn_slots, tgt_dev->prev_cmd_ordered,
-+ tgt_dev->cur_sn_slot-tgt_dev->sn_slots);
-+
-+ cmd->sn_set = 1;
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * Returns 0 on success, > 0 when we need to wait for unblock,
-+ * < 0 if there is no device (lun) or device type handler.
-+ *
-+ * No locks, but might be on IRQ, protection is done by the
-+ * suspended activity.
-+ */
-+static int scst_translate_lun(struct scst_cmd *cmd)
-+{
-+ struct scst_tgt_dev *tgt_dev = NULL;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ /* See comment about smp_mb() in scst_suspend_activity() */
-+ __scst_get();
-+
-+ if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
-+ TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
-+ (long long unsigned int)cmd->lun);
-+ res = -1;
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ if (tgt_dev->lun == cmd->lun) {
-+ TRACE_DBG("tgt_dev %p found", tgt_dev);
-+
-+ if (unlikely(tgt_dev->dev->handler ==
-+ &scst_null_devtype)) {
-+ PRINT_INFO("Dev handler for device "
-+ "%lld is NULL, the device will not "
-+ "be visible remotely",
-+ (long long unsigned int)cmd->lun);
-+ break;
-+ }
-+
-+ cmd->cmd_threads = tgt_dev->active_cmd_threads;
-+ cmd->tgt_dev = tgt_dev;
-+ cmd->dev = tgt_dev->dev;
-+
-+ res = 0;
-+ break;
-+ }
-+ }
-+ if (res != 0) {
-+ TRACE(TRACE_MINOR,
-+ "tgt_dev for LUN %lld not found, command to "
-+ "unexisting LU?",
-+ (long long unsigned int)cmd->lun);
-+ __scst_put();
-+ }
-+ } else {
-+ TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
-+ __scst_put();
-+ res = 1;
++ if (newtn <= 0) {
++ PRINT_ERROR("Illegal threads num value %ld", newtn);
++ res = -EINVAL;
++ goto out;
+ }
+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * No locks, but might be on IRQ.
-+ *
-+ * Returns 0 on success, > 0 when we need to wait for unblock,
-+ * < 0 if there is no device (lun) or device type handler.
-+ */
-+static int __scst_init_cmd(struct scst_cmd *cmd)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_translate_lun(cmd);
-+ if (likely(res == 0)) {
-+ int cnt;
-+ bool failure = false;
-+
-+ cmd->state = SCST_CMD_STATE_PARSE;
-+
-+ cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
-+ if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
-+ TRACE(TRACE_FLOW_CONTROL,
-+ "Too many pending commands (%d) in "
-+ "session, returning BUSY to initiator \"%s\"",
-+ cnt, (cmd->sess->initiator_name[0] == '\0') ?
-+ "Anonymous" : cmd->sess->initiator_name);
-+ failure = true;
-+ }
-+
-+#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
-+ cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
-+ if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
-+ if (!failure) {
-+ TRACE(TRACE_FLOW_CONTROL,
-+ "Too many pending device "
-+ "commands (%d), returning BUSY to "
-+ "initiator \"%s\"", cnt,
-+ (cmd->sess->initiator_name[0] == '\0') ?
-+ "Anonymous" :
-+ cmd->sess->initiator_name);
-+ failure = true;
-+ }
-+ }
-+#endif
-+
-+#ifdef CONFIG_SCST_ORDERED_READS
-+ /* If expected values not set, expected direction is UNKNOWN */
-+ if (cmd->expected_data_direction & SCST_DATA_WRITE)
-+ atomic_inc(&cmd->dev->write_cmd_count);
-+#endif
-+
-+ if (unlikely(failure))
-+ goto out_busy;
++ res = scst_alloc_sysfs_work(scst_threads_store_work_fn, false, &work);
++ if (res != 0)
++ goto out;
+
-+ if (unlikely(scst_pre_parse(cmd) != 0))
-+ goto out;
++ work->new_threads_num = newtn;
+
-+ if (!cmd->set_sn_on_restart_cmd)
-+ scst_cmd_set_sn(cmd);
-+ } else if (res < 0) {
-+ TRACE_DBG("Finishing cmd %p", cmd);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_lun_not_supported));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ } else
-+ goto out;
++ res = scst_sysfs_queue_wait_work(work);
++ if (res == 0)
++ res = count;
+
+out:
+ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_busy:
-+ scst_set_busy(cmd);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ goto out;
+}
+
-+/* Called under scst_init_lock and IRQs disabled */
-+static void scst_do_job_init(void)
-+ __releases(&scst_init_lock)
-+ __acquires(&scst_init_lock)
-+{
-+ struct scst_cmd *cmd;
-+ int susp;
-+
-+ TRACE_ENTRY();
-+
-+restart:
-+ /*
-+ * There is no need for read barrier here, because we don't care where
-+ * this check will be done.
-+ */
-+ susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
-+ if (scst_init_poll_cnt > 0)
-+ scst_init_poll_cnt--;
-+
-+ list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
-+ int rc;
-+ if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
-+ continue;
-+ if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
-+ spin_unlock_irq(&scst_init_lock);
-+ rc = __scst_init_cmd(cmd);
-+ spin_lock_irq(&scst_init_lock);
-+ if (rc > 0) {
-+ TRACE_MGMT_DBG("%s",
-+ "FLAG SUSPENDED set, restarting");
-+ goto restart;
-+ }
-+ } else {
-+ TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
-+ cmd, (long long unsigned int)cmd->tag);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ }
-+
-+ /*
-+ * Deleting cmd from init cmd list after __scst_init_cmd()
-+ * is necessary to keep the check in scst_init_cmd() correct
-+ * to preserve the commands order.
-+ *
-+ * We don't care about the race, when init cmd list is empty
-+ * and one command detected that it just was not empty, so
-+ * it's inserting to it, but another command at the same time
-+ * seeing init cmd list empty and goes directly, because it
-+ * could affect only commands from the same initiator to the
-+ * same tgt_dev, but scst_cmd_init_done*() doesn't guarantee
-+ * the order in case of simultaneous such calls anyway.
-+ */
-+ TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
-+ smp_wmb(); /* enforce the required order */
-+ list_del(&cmd->cmd_list_entry);
-+ spin_unlock(&scst_init_lock);
-+
-+ spin_lock(&cmd->cmd_threads->cmd_list_lock);
-+ TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ list_add(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ else
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
-+
-+ spin_lock(&scst_init_lock);
-+ goto restart;
-+ }
-+
-+ /* It isn't really needed, but let's keep it */
-+ if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
-+ goto restart;
-+
-+ TRACE_EXIT();
-+ return;
-+}
++static struct kobj_attribute scst_threads_attr =
++ __ATTR(threads, S_IRUGO | S_IWUSR, scst_threads_show,
++ scst_threads_store);
+
-+static inline int test_init_cmd_list(void)
++static ssize_t scst_setup_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
+{
-+ int res = (!list_empty(&scst_init_cmd_list) &&
-+ !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
-+ unlikely(kthread_should_stop()) ||
-+ (scst_init_poll_cnt > 0);
-+ return res;
-+}
++ int count;
+
-+int scst_init_thread(void *arg)
-+{
+ TRACE_ENTRY();
+
-+ PRINT_INFO("Init thread started, PID %d", current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+
-+ set_user_nice(current, -10);
-+
-+ spin_lock_irq(&scst_init_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (!test_init_cmd_list()) {
-+ add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
-+ &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_init_cmd_list())
-+ break;
-+ spin_unlock_irq(&scst_init_lock);
-+ schedule();
-+ spin_lock_irq(&scst_init_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
-+ }
-+ scst_do_job_init();
-+ }
-+ spin_unlock_irq(&scst_init_lock);
-+
-+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so scst_init_cmd_list must be empty.
-+ */
-+ BUG_ON(!list_empty(&scst_init_cmd_list));
-+
-+ PRINT_INFO("Init thread PID %d finished", current->pid);
++ count = sprintf(buf, "0x%x\n%s\n", scst_setup_id,
++ (scst_setup_id == 0) ? "" : SCST_SYSFS_KEY_MARK);
+
+ TRACE_EXIT();
-+ return 0;
++ return count;
+}
+
-+/**
-+ * scst_process_active_cmd() - process active command
-+ *
-+ * Description:
-+ * Main SCST commands processing routing. Must be used only by dev handlers.
-+ *
-+ * Argument atomic is true, if function called in atomic context.
-+ *
-+ * Must be called with no locks held.
-+ */
-+void scst_process_active_cmd(struct scst_cmd *cmd, bool atomic)
++static ssize_t scst_setup_id_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int res;
++ unsigned long val;
+
+ TRACE_ENTRY();
+
-+ /*
-+ * Checkpatch will complain on the use of in_atomic() below. You
-+ * can safely ignore this warning since in_atomic() is used here only
-+ * for debugging purposes.
-+ */
-+ EXTRACHECKS_BUG_ON(in_irq() || irqs_disabled());
-+ EXTRACHECKS_WARN_ON((in_atomic() || in_interrupt() || irqs_disabled()) &&
-+ !atomic);
-+
-+ cmd->atomic = atomic;
-+
-+ TRACE_DBG("cmd %p, atomic %d", cmd, atomic);
-+
-+ do {
-+ switch (cmd->state) {
-+ case SCST_CMD_STATE_PARSE:
-+ res = scst_parse_cmd(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_PREPARE_SPACE:
-+ res = scst_prepare_space(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_PREPROCESSING_DONE:
-+ res = scst_preprocessing_done(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_RDY_TO_XFER:
-+ res = scst_rdy_to_xfer(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ res = scst_tgt_pre_exec(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ if (tm_dbg_check_cmd(cmd) != 0) {
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ TRACE_MGMT_DBG("Skipping cmd %p (tag %llu), "
-+ "because of TM DBG delay", cmd,
-+ (long long unsigned int)cmd->tag);
-+ break;
-+ }
-+ res = scst_send_for_exec(&cmd);
-+ /*
-+ * !! At this point cmd, sess & tgt_dev can already be
-+ * freed !!
-+ */
-+ break;
-+
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ res = scst_local_exec(cmd);
-+ /*
-+ * !! At this point cmd, sess & tgt_dev can already be
-+ * freed !!
-+ */
-+ break;
-+
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ res = scst_real_exec(cmd);
-+ /*
-+ * !! At this point cmd, sess & tgt_dev can already be
-+ * freed !!
-+ */
-+ break;
-+
-+ case SCST_CMD_STATE_PRE_DEV_DONE:
-+ res = scst_pre_dev_done(cmd);
-+ EXTRACHECKS_BUG_ON((res == SCST_CMD_STATE_RES_NEED_THREAD) &&
-+ (cmd->state == SCST_CMD_STATE_PRE_DEV_DONE));
-+ break;
-+
-+ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
-+ res = scst_mode_select_checks(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_DEV_DONE:
-+ res = scst_dev_done(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_PRE_XMIT_RESP:
-+ res = scst_pre_xmit_response(cmd);
-+ EXTRACHECKS_BUG_ON(res ==
-+ SCST_CMD_STATE_RES_NEED_THREAD);
-+ break;
-+
-+ case SCST_CMD_STATE_XMIT_RESP:
-+ res = scst_xmit_response(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_FINISHED:
-+ res = scst_finish_cmd(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_FINISHED_INTERNAL:
-+ res = scst_finish_internal_cmd(cmd);
-+ EXTRACHECKS_BUG_ON(res ==
-+ SCST_CMD_STATE_RES_NEED_THREAD);
-+ break;
-+
-+ default:
-+ PRINT_CRIT_ERROR("cmd (%p) in state %d, but shouldn't "
-+ "be", cmd, cmd->state);
-+ BUG();
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ break;
-+ }
-+ } while (res == SCST_CMD_STATE_RES_CONT_SAME);
-+
-+ if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
-+ /* None */
-+ } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
-+ spin_lock_irq(&cmd->cmd_threads->cmd_list_lock);
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ switch (cmd->state) {
-+ case SCST_CMD_STATE_PARSE:
-+ case SCST_CMD_STATE_PREPARE_SPACE:
-+ case SCST_CMD_STATE_RDY_TO_XFER:
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ case SCST_CMD_STATE_DEV_DONE:
-+ case SCST_CMD_STATE_XMIT_RESP:
-+#endif
-+ TRACE_DBG("Adding cmd %p to head of active cmd list",
-+ cmd);
-+ list_add(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ break;
-+ default:
-+ PRINT_CRIT_ERROR("cmd %p is in invalid state %d)", cmd,
-+ cmd->state);
-+ spin_unlock_irq(&cmd->cmd_threads->cmd_list_lock);
-+ BUG();
-+ spin_lock_irq(&cmd->cmd_threads->cmd_list_lock);
-+ break;
-+ }
-+#endif
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irq(&cmd->cmd_threads->cmd_list_lock);
-+ } else
-+ BUG();
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_process_active_cmd);
-+
-+/* Called under cmd_list_lock and IRQs disabled */
-+static void scst_do_job_active(struct list_head *cmd_list,
-+ spinlock_t *cmd_list_lock, bool atomic)
-+ __releases(cmd_list_lock)
-+ __acquires(cmd_list_lock)
-+{
-+ TRACE_ENTRY();
-+
-+ while (!list_empty(cmd_list)) {
-+ struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
-+ cmd_list_entry);
-+ TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
-+ list_del(&cmd->cmd_list_entry);
-+ spin_unlock_irq(cmd_list_lock);
-+ scst_process_active_cmd(cmd, atomic);
-+ spin_lock_irq(cmd_list_lock);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline int test_cmd_threads(struct scst_cmd_threads *p_cmd_threads)
-+{
-+ int res = !list_empty(&p_cmd_threads->active_cmd_list) ||
-+ unlikely(kthread_should_stop()) ||
-+ tm_dbg_is_release();
-+ return res;
-+}
-+
-+int scst_cmd_thread(void *arg)
-+{
-+ struct scst_cmd_threads *p_cmd_threads = arg;
-+
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("Processing thread %s (PID %d) started", current->comm,
-+ current->pid);
-+
-+#if 0
-+ set_user_nice(current, 10);
-+#endif
-+ current->flags |= PF_NOFREEZE;
-+
-+ mutex_lock(&p_cmd_threads->io_context_mutex);
-+
-+ WARN_ON(current->io_context);
-+
-+ if (p_cmd_threads != &scst_main_cmd_threads) {
-+ /*
-+ * For linked IO contexts io_context might be not NULL while
-+ * io_context 0.
-+ */
-+ if (p_cmd_threads->io_context == NULL) {
-+ p_cmd_threads->io_context = get_io_context(GFP_KERNEL, -1);
-+ TRACE_MGMT_DBG("Alloced new IO context %p "
-+ "(p_cmd_threads %p)",
-+ p_cmd_threads->io_context,
-+ p_cmd_threads);
-+ /*
-+ * Put the extra reference created by get_io_context()
-+ * because we don't need it.
-+ */
-+ put_io_context(p_cmd_threads->io_context);
-+ } else {
-+ current->io_context = ioc_task_link(p_cmd_threads->io_context);
-+ TRACE_MGMT_DBG("Linked IO context %p "
-+ "(p_cmd_threads %p)", p_cmd_threads->io_context,
-+ p_cmd_threads);
-+ }
-+ p_cmd_threads->io_context_refcnt++;
-+ }
-+
-+ mutex_unlock(&p_cmd_threads->io_context_mutex);
-+
-+ p_cmd_threads->io_context_ready = true;
-+
-+ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (!test_cmd_threads(p_cmd_threads)) {
-+ add_wait_queue_exclusive_head(
-+ &p_cmd_threads->cmd_list_waitQ,
-+ &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_cmd_threads(p_cmd_threads))
-+ break;
-+ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
-+ schedule();
-+ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&p_cmd_threads->cmd_list_waitQ, &wait);
-+ }
-+
-+ if (tm_dbg_is_release()) {
-+ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
-+ tm_dbg_check_released_cmds();
-+ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
-+ }
-+
-+ scst_do_job_active(&p_cmd_threads->active_cmd_list,
-+ &p_cmd_threads->cmd_list_lock, false);
-+ }
-+ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
-+
-+ if (p_cmd_threads != &scst_main_cmd_threads) {
-+ mutex_lock(&p_cmd_threads->io_context_mutex);
-+ if (--p_cmd_threads->io_context_refcnt == 0)
-+ p_cmd_threads->io_context = NULL;
-+ mutex_unlock(&p_cmd_threads->io_context_mutex);
-+ }
-+
-+ PRINT_INFO("Processing thread %s (PID %d) finished", current->comm,
-+ current->pid);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+void scst_cmd_tasklet(long p)
-+{
-+ struct scst_tasklet *t = (struct scst_tasklet *)p;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_irq(&t->tasklet_lock);
-+ scst_do_job_active(&t->tasklet_cmd_list, &t->tasklet_lock, true);
-+ spin_unlock_irq(&t->tasklet_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * Returns 0 on success, < 0 if there is no device handler or
-+ * > 0 if SCST_FLAG_SUSPENDED set and SCST_FLAG_SUSPENDING - not.
-+ * No locks, protection is done by the suspended activity.
-+ */
-+static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
-+{
-+ struct scst_tgt_dev *tgt_dev = NULL;
-+ struct list_head *sess_tgt_dev_list_head;
-+ int res = -1;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %lld)", mcmd,
-+ (long long unsigned int)mcmd->lun);
-+
-+ /* See comment about smp_mb() in scst_suspend_activity() */
-+ __scst_get();
-+
-+ if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
-+ !test_bit(SCST_FLAG_SUSPENDING, &scst_flags))) {
-+ TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
-+ __scst_put();
-+ res = 1;
-+ goto out;
-+ }
-+
-+ sess_tgt_dev_list_head =
-+ &mcmd->sess->sess_tgt_dev_list_hash[HASH_VAL(mcmd->lun)];
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ if (tgt_dev->lun == mcmd->lun) {
-+ TRACE_DBG("tgt_dev %p found", tgt_dev);
-+ mcmd->mcmd_tgt_dev = tgt_dev;
-+ res = 0;
-+ break;
-+ }
-+ }
-+ if (mcmd->mcmd_tgt_dev == NULL)
-+ __scst_put();
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/* No locks */
-+void scst_done_cmd_mgmt(struct scst_cmd *cmd)
-+{
-+ struct scst_mgmt_cmd_stub *mstb, *t;
-+ bool wake = 0;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("cmd %p done (tag %llu)",
-+ cmd, (long long unsigned int)cmd->tag);
-+
-+ spin_lock_irqsave(&scst_mcmd_lock, flags);
-+
-+ list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
-+ cmd_mgmt_cmd_list_entry) {
-+ struct scst_mgmt_cmd *mcmd;
-+
-+ if (!mstb->done_counted)
-+ continue;
-+
-+ mcmd = mstb->mcmd;
-+ TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_done_wait_count %d",
-+ mcmd, mcmd->cmd_done_wait_count);
-+
-+ mcmd->cmd_done_wait_count--;
-+
-+ BUG_ON(mcmd->cmd_done_wait_count < 0);
-+
-+ if (mcmd->cmd_done_wait_count > 0) {
-+ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
-+ "skipping", mcmd->cmd_done_wait_count);
-+ goto check_free;
-+ }
-+
-+ if (mcmd->state == SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_DONE) {
-+ mcmd->state = SCST_MCMD_STATE_AFFECTED_CMDS_DONE;
-+ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
-+ "list", mcmd);
-+ list_add_tail(&mcmd->mgmt_cmd_list_entry,
-+ &scst_active_mgmt_cmd_list);
-+ wake = 1;
-+ }
-+
-+check_free:
-+ if (!mstb->finish_counted) {
-+ TRACE_DBG("Releasing mstb %p", mstb);
-+ list_del(&mstb->cmd_mgmt_cmd_list_entry);
-+ mempool_free(mstb, scst_mgmt_stub_mempool);
-+ }
-+ }
-+
-+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
-+
-+ if (wake)
-+ wake_up(&scst_mgmt_cmd_list_waitQ);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called under scst_mcmd_lock and IRQs disabled */
-+static void __scst_dec_finish_wait_count(struct scst_mgmt_cmd *mcmd, bool *wake)
-+{
-+ TRACE_ENTRY();
-+
-+ mcmd->cmd_finish_wait_count--;
-+
-+ BUG_ON(mcmd->cmd_finish_wait_count < 0);
-+
-+ if (mcmd->cmd_finish_wait_count > 0) {
-+ TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, "
-+ "skipping", mcmd->cmd_finish_wait_count);
++ res = strict_strtoul(buf, 0, &val);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtoul() for %s failed: %d ", buf, res);
+ goto out;
+ }
+
-+ if (mcmd->cmd_done_wait_count > 0) {
-+ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
-+ "skipping", mcmd->cmd_done_wait_count);
-+ goto out;
-+ }
++ scst_setup_id = val;
++ PRINT_INFO("Changed scst_setup_id to %x", scst_setup_id);
+
-+ if (mcmd->state == SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_FINISHED) {
-+ mcmd->state = SCST_MCMD_STATE_DONE;
-+ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
-+ "list", mcmd);
-+ list_add_tail(&mcmd->mgmt_cmd_list_entry,
-+ &scst_active_mgmt_cmd_list);
-+ *wake = true;
-+ }
++ res = count;
+
+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_prepare_async_mcmd() - prepare async management command
-+ *
-+ * Notifies SCST that management command is going to be async, i.e.
-+ * will be completed in another context.
-+ *
-+ * No SCST locks supposed to be held on entrance.
-+ */
-+void scst_prepare_async_mcmd(struct scst_mgmt_cmd *mcmd)
-+{
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Preparing mcmd %p for async execution "
-+ "(cmd_finish_wait_count %d)", mcmd,
-+ mcmd->cmd_finish_wait_count);
-+
-+ spin_lock_irqsave(&scst_mcmd_lock, flags);
-+ mcmd->cmd_finish_wait_count++;
-+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_prepare_async_mcmd);
-+
-+/**
-+ * scst_async_mcmd_completed() - async management command completed
-+ *
-+ * Notifies SCST that async management command, prepared by
-+ * scst_prepare_async_mcmd(), completed.
-+ *
-+ * No SCST locks supposed to be held on entrance.
-+ */
-+void scst_async_mcmd_completed(struct scst_mgmt_cmd *mcmd, int status)
-+{
-+ unsigned long flags;
-+ bool wake = false;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Async mcmd %p completed (status %d)", mcmd, status);
-+
-+ spin_lock_irqsave(&scst_mcmd_lock, flags);
-+
-+ if (status != SCST_MGMT_STATUS_SUCCESS)
-+ mcmd->status = status;
-+
-+ __scst_dec_finish_wait_count(mcmd, &wake);
-+
-+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
-+
-+ if (wake)
-+ wake_up(&scst_mgmt_cmd_list_waitQ);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_async_mcmd_completed);
-+
-+/* No locks */
-+static void scst_finish_cmd_mgmt(struct scst_cmd *cmd)
-+{
-+ struct scst_mgmt_cmd_stub *mstb, *t;
-+ bool wake = false;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("cmd %p finished (tag %llu)",
-+ cmd, (long long unsigned int)cmd->tag);
-+
-+ spin_lock_irqsave(&scst_mcmd_lock, flags);
-+
-+ list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
-+ cmd_mgmt_cmd_list_entry) {
-+ struct scst_mgmt_cmd *mcmd = mstb->mcmd;
-+
-+ TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_finish_wait_count %d", mcmd,
-+ mcmd->cmd_finish_wait_count);
-+
-+ BUG_ON(!mstb->finish_counted);
-+
-+ if (cmd->completed)
-+ mcmd->completed_cmd_count++;
-+
-+ __scst_dec_finish_wait_count(mcmd, &wake);
-+
-+ TRACE_DBG("Releasing mstb %p", mstb);
-+ list_del(&mstb->cmd_mgmt_cmd_list_entry);
-+ mempool_free(mstb, scst_mgmt_stub_mempool);
-+ }
-+
-+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
-+
-+ if (wake)
-+ wake_up(&scst_mgmt_cmd_list_waitQ);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
-+ struct scst_tgt_dev *tgt_dev, int set_status)
-+{
-+ int res = SCST_DEV_TM_NOT_COMPLETED;
-+ struct scst_dev_type *h = tgt_dev->dev->handler;
-+
-+ if (h->task_mgmt_fn) {
-+ TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
-+ h->name, mcmd->fn);
-+ EXTRACHECKS_BUG_ON(in_irq() || irqs_disabled());
-+ res = h->task_mgmt_fn(mcmd, tgt_dev);
-+ TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
-+ h->name, res);
-+ if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED))
-+ mcmd->status = res;
-+ }
++ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
-+{
-+ switch (mgmt_fn) {
-+#ifdef CONFIG_SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
-+ case SCST_ABORT_TASK:
-+#endif
-+#if 0
-+ case SCST_ABORT_TASK_SET:
-+ case SCST_CLEAR_TASK_SET:
-+#endif
-+ return 1;
-+ default:
-+ return 0;
-+ }
-+}
++static struct kobj_attribute scst_setup_id_attr =
++ __ATTR(setup_id, S_IRUGO | S_IWUSR, scst_setup_id_show,
++ scst_setup_id_store);
+
-+/* Might be called under sess_list_lock and IRQ off + BHs also off */
-+void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
-+ bool other_ini, bool call_dev_task_mgmt_fn)
++static ssize_t scst_max_tasklet_cmd_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
+{
-+ unsigned long flags;
-+ static DEFINE_SPINLOCK(other_ini_lock);
++ int count;
+
+ TRACE_ENTRY();
+
-+ TRACE(TRACE_SCSI|TRACE_MGMT_DEBUG, "Aborting cmd %p (tag %llu, op %x)",
-+ cmd, (long long unsigned int)cmd->tag, cmd->cdb[0]);
-+
-+ /* To protect from concurrent aborts */
-+ spin_lock_irqsave(&other_ini_lock, flags);
-+
-+ if (other_ini) {
-+ struct scst_device *dev = NULL;
-+
-+ /* Might be necessary if command aborted several times */
-+ if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
-+ set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
-+
-+ /* Necessary for scst_xmit_process_aborted_cmd */
-+ if (cmd->dev != NULL)
-+ dev = cmd->dev;
-+ else if ((mcmd != NULL) && (mcmd->mcmd_tgt_dev != NULL))
-+ dev = mcmd->mcmd_tgt_dev->dev;
-+
-+ if (dev != NULL) {
-+ if (dev->tas)
-+ set_bit(SCST_CMD_DEVICE_TAS, &cmd->cmd_flags);
-+ } else
-+ PRINT_WARNING("Abort cmd %p from other initiator, but "
-+ "neither cmd, nor mcmd %p have tgt_dev set, so "
-+ "TAS information can be lost", cmd, mcmd);
-+ } else {
-+ /* Might be necessary if command aborted several times */
-+ clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
-+ }
-+
-+ set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
-+
-+ spin_unlock_irqrestore(&other_ini_lock, flags);
-+
-+ /*
-+ * To sync with cmd->finished/done set in
-+ * scst_finish_cmd()/scst_pre_xmit_response() and with setting UA for
-+ * aborted cmd in scst_set_pending_UA().
-+ */
-+ smp_mb__after_set_bit();
-+
-+ if (cmd->tgt_dev == NULL) {
-+ spin_lock_irqsave(&scst_init_lock, flags);
-+ scst_init_poll_cnt++;
-+ spin_unlock_irqrestore(&scst_init_lock, flags);
-+ wake_up(&scst_init_cmd_list_waitQ);
-+ }
-+
-+ if (call_dev_task_mgmt_fn && (cmd->tgt_dev != NULL)) {
-+ EXTRACHECKS_BUG_ON(irqs_disabled());
-+ scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 1);
-+ }
-+
-+ spin_lock_irqsave(&scst_mcmd_lock, flags);
-+ if ((mcmd != NULL) && !cmd->finished) {
-+ struct scst_mgmt_cmd_stub *mstb;
-+
-+ mstb = mempool_alloc(scst_mgmt_stub_mempool, GFP_ATOMIC);
-+ if (mstb == NULL) {
-+ PRINT_CRIT_ERROR("Allocation of management command "
-+ "stub failed (mcmd %p, cmd %p)", mcmd, cmd);
-+ goto unlock;
-+ }
-+ memset(mstb, 0, sizeof(*mstb));
-+
-+ TRACE_DBG("mstb %p, mcmd %p", mstb, mcmd);
-+
-+ mstb->mcmd = mcmd;
-+
-+ /*
-+ * Delay the response until the command's finish in order to
-+ * guarantee that "no further responses from the task are sent
-+ * to the SCSI initiator port" after response from the TM
-+ * function is sent (SAM). Plus, we must wait here to be sure
-+ * that we won't receive double commands with the same tag.
-+ * Moreover, if we don't wait here, we might have a possibility
-+ * for data corruption, when aborted and reported as completed
-+ * command actually gets executed *after* new commands sent
-+ * after this TM command completed.
-+ */
-+
-+ if (cmd->sent_for_exec && !cmd->done) {
-+ TRACE_MGMT_DBG("cmd %p (tag %llu) is being executed",
-+ cmd, (long long unsigned int)cmd->tag);
-+ mstb->done_counted = 1;
-+ mcmd->cmd_done_wait_count++;
-+ }
-+
-+ /*
-+ * We don't have to wait the command's status delivery finish
-+ * to other initiators + it can affect MPIO failover.
-+ */
-+ if (!other_ini) {
-+ mstb->finish_counted = 1;
-+ mcmd->cmd_finish_wait_count++;
-+ }
-+
-+ if (mstb->done_counted || mstb->finish_counted) {
-+ TRACE_MGMT_DBG("cmd %p (tag %llu, sn %u) being "
-+ "executed/xmitted (state %d, op %x, proc time "
-+ "%ld sec., timeout %d sec.), deferring ABORT "
-+ "(cmd_done_wait_count %d, cmd_finish_wait_count "
-+ "%d)", cmd, (long long unsigned int)cmd->tag,
-+ cmd->sn, cmd->state, cmd->cdb[0],
-+ (long)(jiffies - cmd->start_time) / HZ,
-+ cmd->timeout / HZ, mcmd->cmd_done_wait_count,
-+ mcmd->cmd_finish_wait_count);
-+ /*
-+ * cmd can't die here or sess_list_lock already taken
-+ * and cmd is in the sess list
-+ */
-+ list_add_tail(&mstb->cmd_mgmt_cmd_list_entry,
-+ &cmd->mgmt_cmd_list);
-+ } else {
-+ /* We don't need to wait for this cmd */
-+ mempool_free(mstb, scst_mgmt_stub_mempool);
-+ }
-+ }
-+
-+unlock:
-+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
-+
-+ tm_dbg_release_cmd(cmd);
++ count = sprintf(buf, "%d\n%s\n", scst_max_tasklet_cmd,
++ (scst_max_tasklet_cmd == SCST_DEF_MAX_TASKLET_CMD)
++ ? "" : SCST_SYSFS_KEY_MARK);
+
+ TRACE_EXIT();
-+ return;
++ return count;
+}
+
-+/* No locks. Returns 0, if mcmd should be processed further. */
-+static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
++static ssize_t scst_max_tasklet_cmd_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int res;
-+
-+ spin_lock_irq(&scst_mcmd_lock);
-+
-+ switch (mcmd->state) {
-+ case SCST_MCMD_STATE_INIT:
-+ case SCST_MCMD_STATE_EXEC:
-+ if (mcmd->cmd_done_wait_count == 0) {
-+ mcmd->state = SCST_MCMD_STATE_AFFECTED_CMDS_DONE;
-+ res = 0;
-+ } else {
-+ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
-+ "preparing to wait", mcmd->cmd_done_wait_count);
-+ mcmd->state = SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_DONE;
-+ res = -1;
-+ }
-+ break;
-+
-+ case SCST_MCMD_STATE_AFFECTED_CMDS_DONE:
-+ if (mcmd->cmd_finish_wait_count == 0) {
-+ mcmd->state = SCST_MCMD_STATE_DONE;
-+ res = 0;
-+ } else {
-+ TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, "
-+ "preparing to wait",
-+ mcmd->cmd_finish_wait_count);
-+ mcmd->state = SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_FINISHED;
-+ res = -1;
-+ }
-+ break;
-+
-+ case SCST_MCMD_STATE_DONE:
-+ mcmd->state = SCST_MCMD_STATE_FINISHED;
-+ res = 0;
-+ break;
-+
-+ default:
-+ PRINT_CRIT_ERROR("Wrong mcmd %p state %d (fn %d, "
-+ "cmd_finish_wait_count %d, cmd_done_wait_count %d)",
-+ mcmd, mcmd->state, mcmd->fn,
-+ mcmd->cmd_finish_wait_count, mcmd->cmd_done_wait_count);
-+ spin_unlock_irq(&scst_mcmd_lock);
-+ BUG();
-+ goto out;
-+ }
-+
-+ spin_unlock_irq(&scst_mcmd_lock);
-+
-+out:
-+ return res;
-+}
-+
-+/* IRQs supposed to be disabled */
-+static bool __scst_check_unblock_aborted_cmd(struct scst_cmd *cmd,
-+ struct list_head *list_entry)
-+{
-+ bool res;
-+ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
-+ list_del(list_entry);
-+ spin_lock(&cmd->cmd_threads->cmd_list_lock);
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
-+ res = 1;
-+ } else
-+ res = 0;
-+ return res;
-+}
-+
-+static void scst_unblock_aborted_cmds(int scst_mutex_held)
-+{
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (!scst_mutex_held)
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ struct scst_cmd *cmd, *tcmd;
-+ struct scst_tgt_dev *tgt_dev;
-+ spin_lock_bh(&dev->dev_lock);
-+ local_irq_disable();
-+ list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
-+ blocked_cmd_list_entry) {
-+ if (__scst_check_unblock_aborted_cmd(cmd,
-+ &cmd->blocked_cmd_list_entry)) {
-+ TRACE_MGMT_DBG("Unblock aborted blocked cmd %p",
-+ cmd);
-+ }
-+ }
-+ local_irq_enable();
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ local_irq_disable();
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ spin_lock(&tgt_dev->sn_lock);
-+ list_for_each_entry_safe(cmd, tcmd,
-+ &tgt_dev->deferred_cmd_list,
-+ sn_cmd_list_entry) {
-+ if (__scst_check_unblock_aborted_cmd(cmd,
-+ &cmd->sn_cmd_list_entry)) {
-+ TRACE_MGMT_DBG("Unblocked aborted SN "
-+ "cmd %p (sn %u)",
-+ cmd, cmd->sn);
-+ tgt_dev->def_cmd_count--;
-+ }
-+ }
-+ spin_unlock(&tgt_dev->sn_lock);
-+ }
-+ local_irq_enable();
-+ }
-+
-+ if (!scst_mutex_held)
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
-+ struct scst_tgt_dev *tgt_dev)
-+{
-+ struct scst_cmd *cmd;
-+ struct scst_session *sess = tgt_dev->sess;
-+ bool other_ini;
++ unsigned long val;
+
+ TRACE_ENTRY();
+
-+ if ((mcmd->fn == SCST_PR_ABORT_ALL) &&
-+ (mcmd->origin_pr_cmd->sess != sess))
-+ other_ini = true;
-+ else
-+ other_ini = false;
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+
-+ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
-+ list_for_each_entry(cmd, &sess->sess_cmd_list,
-+ sess_cmd_list_entry) {
-+ if ((mcmd->fn == SCST_PR_ABORT_ALL) &&
-+ (mcmd->origin_pr_cmd == cmd))
-+ continue;
-+ if ((cmd->tgt_dev == tgt_dev) ||
-+ ((cmd->tgt_dev == NULL) &&
-+ (cmd->lun == tgt_dev->lun))) {
-+ if (mcmd->cmd_sn_set) {
-+ BUG_ON(!cmd->tgt_sn_set);
-+ if (scst_sn_before(mcmd->cmd_sn, cmd->tgt_sn) ||
-+ (mcmd->cmd_sn == cmd->tgt_sn))
-+ continue;
-+ }
-+ scst_abort_cmd(cmd, mcmd, other_ini, 0);
-+ }
-+ }
-+ spin_unlock_irq(&sess->sess_list_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res;
-+ struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
-+
-+ TRACE(TRACE_MGMT, "Aborting task set (lun=%lld, mcmd=%p)",
-+ (long long unsigned int)tgt_dev->lun, mcmd);
-+
-+ __scst_abort_task_set(mcmd, tgt_dev);
-+
-+ if (mcmd->fn == SCST_PR_ABORT_ALL) {
-+ struct scst_pr_abort_all_pending_mgmt_cmds_counter *pr_cnt =
-+ mcmd->origin_pr_cmd->pr_abort_counter;
-+ if (atomic_dec_and_test(&pr_cnt->pr_aborting_cnt))
-+ complete_all(&pr_cnt->pr_aborting_cmpl);
++ res = strict_strtoul(buf, 0, &val);
++ if (res != 0) {
++ PRINT_ERROR("strict_strtoul() for %s failed: %d ", buf, res);
++ goto out;
+ }
+
-+ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "ABORT TASK SET/PR ABORT", 0);
-+
-+ scst_unblock_aborted_cmds(0);
-+
-+ scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
++ scst_max_tasklet_cmd = val;
++ PRINT_INFO("Changed scst_max_tasklet_cmd to %d", scst_max_tasklet_cmd);
+
-+ res = scst_set_mcmd_next_state(mcmd);
++ res = count;
+
++out:
+ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+static int scst_is_cmd_belongs_to_dev(struct scst_cmd *cmd,
-+ struct scst_device *dev)
-+{
-+ struct scst_tgt_dev *tgt_dev = NULL;
-+ struct list_head *sess_tgt_dev_list_head;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Finding match for dev %p and cmd %p (lun %lld)", dev, cmd,
-+ (long long unsigned int)cmd->lun);
-+
-+ sess_tgt_dev_list_head =
-+ &cmd->sess->sess_tgt_dev_list_hash[HASH_VAL(cmd->lun)];
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ if (tgt_dev->lun == cmd->lun) {
-+ TRACE_DBG("dev %p found", tgt_dev->dev);
-+ res = (tgt_dev->dev == dev);
-+ goto out;
-+ }
-+ }
++static struct kobj_attribute scst_max_tasklet_cmd_attr =
++ __ATTR(max_tasklet_cmd, S_IRUGO | S_IWUSR, scst_max_tasklet_cmd_show,
++ scst_max_tasklet_cmd_store);
+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_clear_task_set(struct scst_mgmt_cmd *mcmd)
++static ssize_t scst_main_trace_level_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
+{
-+ int res;
-+ struct scst_device *dev = mcmd->mcmd_tgt_dev->dev;
-+ struct scst_tgt_dev *tgt_dev;
-+ LIST_HEAD(UA_tgt_devs);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE(TRACE_MGMT, "Clearing task set (lun=%lld, mcmd=%p)",
-+ (long long unsigned int)mcmd->lun, mcmd);
-+
-+#if 0 /* we are SAM-3 */
-+ /*
-+ * When a logical unit is aborting one or more tasks from a SCSI
-+ * initiator port with the TASK ABORTED status it should complete all
-+ * of those tasks before entering additional tasks from that SCSI
-+ * initiator port into the task set - SAM2
-+ */
-+ mcmd->needs_unblocking = 1;
-+ spin_lock_bh(&dev->dev_lock);
-+ scst_block_dev(dev);
-+ spin_unlock_bh(&dev->dev_lock);
-+#endif
-+
-+ __scst_abort_task_set(mcmd, mcmd->mcmd_tgt_dev);
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ struct scst_session *sess = tgt_dev->sess;
-+ struct scst_cmd *cmd;
-+ int aborted = 0;
-+
-+ if (tgt_dev == mcmd->mcmd_tgt_dev)
-+ continue;
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+
-+ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
-+ list_for_each_entry(cmd, &sess->sess_cmd_list,
-+ sess_cmd_list_entry) {
-+ if ((cmd->dev == dev) ||
-+ ((cmd->dev == NULL) &&
-+ scst_is_cmd_belongs_to_dev(cmd, dev))) {
-+ scst_abort_cmd(cmd, mcmd, 1, 0);
-+ aborted = 1;
-+ }
-+ }
-+ spin_unlock_irq(&sess->sess_list_lock);
-+
-+ if (aborted)
-+ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
-+ &UA_tgt_devs);
-+ }
-+
-+ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "CLEAR TASK SET", 0);
-+
-+ scst_unblock_aborted_cmds(1);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ if (!dev->tas) {
-+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
-+ int sl;
-+
-+ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
-+ dev->d_sense,
-+ SCST_LOAD_SENSE(scst_sense_cleared_by_another_ini_UA));
-+
-+ list_for_each_entry(tgt_dev, &UA_tgt_devs,
-+ extra_tgt_dev_list_entry) {
-+ scst_check_set_UA(tgt_dev, sense_buffer, sl, 0);
-+ }
-+ }
-+
-+ scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 0);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
++ return scst_trace_level_show(scst_local_trace_tbl, trace_flag,
++ buf, NULL);
+}
+
-+/* Returns 0 if the command processing should be continued,
-+ * >0, if it should be requeued, <0 otherwise */
-+static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
++static ssize_t scst_main_trace_level_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf, size_t count)
+{
-+ int res = 0, rc;
++ int res;
+
+ TRACE_ENTRY();
+
-+ switch (mcmd->fn) {
-+ case SCST_ABORT_TASK:
-+ {
-+ struct scst_session *sess = mcmd->sess;
-+ struct scst_cmd *cmd;
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+ cmd = __scst_find_cmd_by_tag(sess, mcmd->tag, true);
-+ if (cmd == NULL) {
-+ TRACE_MGMT_DBG("ABORT TASK: command "
-+ "for tag %llu not found",
-+ (long long unsigned int)mcmd->tag);
-+ mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
-+ spin_unlock_irq(&sess->sess_list_lock);
-+ res = scst_set_mcmd_next_state(mcmd);
-+ goto out;
-+ }
-+ __scst_cmd_get(cmd);
-+ spin_unlock_irq(&sess->sess_list_lock);
-+ TRACE_DBG("Cmd to abort %p for tag %llu found",
-+ cmd, (long long unsigned int)mcmd->tag);
-+ mcmd->cmd_to_abort = cmd;
-+ mcmd->state = SCST_MCMD_STATE_EXEC;
-+ break;
-+ }
-+
-+ case SCST_TARGET_RESET:
-+ case SCST_NEXUS_LOSS_SESS:
-+ case SCST_ABORT_ALL_TASKS_SESS:
-+ case SCST_NEXUS_LOSS:
-+ case SCST_ABORT_ALL_TASKS:
-+ case SCST_UNREG_SESS_TM:
-+ mcmd->state = SCST_MCMD_STATE_EXEC;
-+ break;
++ res = mutex_lock_interruptible(&scst_log_mutex);
++ if (res != 0)
++ goto out;
+
-+ case SCST_ABORT_TASK_SET:
-+ case SCST_CLEAR_ACA:
-+ case SCST_CLEAR_TASK_SET:
-+ case SCST_LUN_RESET:
-+ case SCST_PR_ABORT_ALL:
-+ rc = scst_mgmt_translate_lun(mcmd);
-+ if (rc == 0)
-+ mcmd->state = SCST_MCMD_STATE_EXEC;
-+ else if (rc < 0) {
-+ PRINT_ERROR("Corresponding device for LUN %lld not "
-+ "found", (long long unsigned int)mcmd->lun);
-+ mcmd->status = SCST_MGMT_STATUS_LUN_NOT_EXIST;
-+ res = scst_set_mcmd_next_state(mcmd);
-+ } else
-+ res = rc;
-+ break;
++ res = scst_write_trace(buf, count, &trace_flag,
++ SCST_DEFAULT_LOG_FLAGS, "scst", scst_local_trace_tbl);
+
-+ default:
-+ BUG();
-+ }
++ mutex_unlock(&scst_log_mutex);
+
+out:
+ TRACE_EXIT_RES(res);
+ return res;
+}
+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res, rc;
-+ struct scst_device *dev;
-+ struct scst_acg *acg = mcmd->sess->acg;
-+ struct scst_acg_dev *acg_dev;
-+ int cont, c;
-+ LIST_HEAD(host_devs);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
-+ mcmd, atomic_read(&mcmd->sess->sess_cmd_count));
-+
-+ mcmd->needs_unblocking = 1;
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
-+ struct scst_device *d;
-+ struct scst_tgt_dev *tgt_dev;
-+ int found = 0;
-+
-+ dev = acg_dev->dev;
-+
-+ spin_lock_bh(&dev->dev_lock);
-+ scst_block_dev(dev);
-+ scst_process_reset(dev, mcmd->sess, NULL, mcmd, true);
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ cont = 0;
-+ c = 0;
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ cont = 1;
-+ if (mcmd->sess == tgt_dev->sess) {
-+ rc = scst_call_dev_task_mgmt_fn(mcmd,
-+ tgt_dev, 0);
-+ if (rc == SCST_DEV_TM_NOT_COMPLETED)
-+ c = 1;
-+ else if ((rc < 0) &&
-+ (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
-+ mcmd->status = rc;
-+ break;
-+ }
-+ }
-+ if (cont && !c)
-+ continue;
-+
-+ if (dev->scsi_dev == NULL)
-+ continue;
-+
-+ list_for_each_entry(d, &host_devs, tm_dev_list_entry) {
-+ if (dev->scsi_dev->host->host_no ==
-+ d->scsi_dev->host->host_no) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ if (!found)
-+ list_add_tail(&dev->tm_dev_list_entry, &host_devs);
-+
-+ tm_dbg_task_mgmt(dev, "TARGET RESET", 0);
-+ }
-+
-+ scst_unblock_aborted_cmds(1);
-+
-+ /*
-+ * We suppose here that for all commands that already on devices
-+ * on/after scsi_reset_provider() completion callbacks will be called.
-+ */
-+
-+ list_for_each_entry(dev, &host_devs, tm_dev_list_entry) {
-+ /* dev->scsi_dev must be non-NULL here */
-+ TRACE(TRACE_MGMT, "Resetting host %d bus ",
-+ dev->scsi_dev->host->host_no);
-+ rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_TARGET);
-+ TRACE(TRACE_MGMT, "Result of host %d target reset: %s",
-+ dev->scsi_dev->host->host_no,
-+ (rc == SUCCESS) ? "SUCCESS" : "FAILED");
-+#if 0
-+ if ((rc != SUCCESS) &&
-+ (mcmd->status == SCST_MGMT_STATUS_SUCCESS)) {
-+ /*
-+ * SCSI_TRY_RESET_BUS is also done by
-+ * scsi_reset_provider()
-+ */
-+ mcmd->status = SCST_MGMT_STATUS_FAILED;
-+ }
-+#else
-+ /*
-+ * scsi_reset_provider() returns very weird status, so let's
-+ * always succeed
-+ */
-+#endif
-+ }
-+
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
-+ dev = acg_dev->dev;
-+ if (dev->scsi_dev != NULL)
-+ dev->scsi_dev->was_reset = 0;
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
++static struct kobj_attribute scst_main_trace_level_attr =
++ __ATTR(trace_level, S_IRUGO | S_IWUSR, scst_main_trace_level_show,
++ scst_main_trace_level_store);
+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
++#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
++static ssize_t scst_version_show(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ char *buf)
+{
-+ int res, rc;
-+ struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
-+ struct scst_device *dev = tgt_dev->dev;
-+
+ TRACE_ENTRY();
+
-+ TRACE(TRACE_MGMT, "Resetting LUN %lld (mcmd %p)",
-+ (long long unsigned int)tgt_dev->lun, mcmd);
-+
-+ mcmd->needs_unblocking = 1;
-+
-+ spin_lock_bh(&dev->dev_lock);
-+ scst_block_dev(dev);
-+ scst_process_reset(dev, mcmd->sess, NULL, mcmd, true);
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
-+ if (rc != SCST_DEV_TM_NOT_COMPLETED)
-+ goto out_tm_dbg;
++ sprintf(buf, "%s\n", SCST_VERSION_STRING);
+
-+ if (dev->scsi_dev != NULL) {
-+ TRACE(TRACE_MGMT, "Resetting host %d bus ",
-+ dev->scsi_dev->host->host_no);
-+ rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
-+#if 0
-+ if (rc != SUCCESS && mcmd->status == SCST_MGMT_STATUS_SUCCESS)
-+ mcmd->status = SCST_MGMT_STATUS_FAILED;
-+#else
-+ /*
-+ * scsi_reset_provider() returns very weird status, so let's
-+ * always succeed
-+ */
++#ifdef CONFIG_SCST_STRICT_SERIALIZING
++ strcat(buf, "STRICT_SERIALIZING\n");
+#endif
-+ dev->scsi_dev->was_reset = 0;
-+ }
-+
-+ scst_unblock_aborted_cmds(0);
-+
-+out_tm_dbg:
-+ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "LUN RESET", 0);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* scst_mutex supposed to be held */
-+static void scst_do_nexus_loss_sess(struct scst_mgmt_cmd *mcmd)
-+{
-+ int i;
-+ struct scst_session *sess = mcmd->sess;
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[i];
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ scst_nexus_loss(tgt_dev,
-+ (mcmd->fn != SCST_UNREG_SESS_TM));
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
-+ int nexus_loss)
-+{
-+ int res;
-+ int i;
-+ struct scst_session *sess = mcmd->sess;
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (nexus_loss) {
-+ TRACE_MGMT_DBG("Nexus loss for sess %p (mcmd %p)",
-+ sess, mcmd);
-+ } else {
-+ TRACE_MGMT_DBG("Aborting all from sess %p (mcmd %p)",
-+ sess, mcmd);
-+ }
-+
-+ mutex_lock(&scst_mutex);
+
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[i];
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ int rc;
-+
-+ __scst_abort_task_set(mcmd, tgt_dev);
++#ifdef CONFIG_SCST_EXTRACHECKS
++ strcat(buf, "EXTRACHECKS\n");
++#endif
+
-+ rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
-+ if (rc < 0 && mcmd->status == SCST_MGMT_STATUS_SUCCESS)
-+ mcmd->status = rc;
++#ifdef CONFIG_SCST_TRACING
++ strcat(buf, "TRACING\n");
++#endif
+
-+ tm_dbg_task_mgmt(tgt_dev->dev, "NEXUS LOSS SESS or "
-+ "ABORT ALL SESS or UNREG SESS",
-+ (mcmd->fn == SCST_UNREG_SESS_TM));
-+ }
-+ }
++#ifdef CONFIG_SCST_DEBUG
++ strcat(buf, "DEBUG\n");
++#endif
+
-+ scst_unblock_aborted_cmds(1);
++#ifdef CONFIG_SCST_DEBUG_TM
++ strcat(buf, "DEBUG_TM\n");
++#endif
+
-+ mutex_unlock(&scst_mutex);
++#ifdef CONFIG_SCST_DEBUG_RETRY
++ strcat(buf, "DEBUG_RETRY\n");
++#endif
+
-+ res = scst_set_mcmd_next_state(mcmd);
++#ifdef CONFIG_SCST_DEBUG_OOM
++ strcat(buf, "DEBUG_OOM\n");
++#endif
+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
++#ifdef CONFIG_SCST_DEBUG_SN
++ strcat(buf, "DEBUG_SN\n");
++#endif
+
-+/* scst_mutex supposed to be held */
-+static void scst_do_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd)
-+{
-+ int i;
-+ struct scst_tgt *tgt = mcmd->sess->tgt;
-+ struct scst_session *sess;
++#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
++ strcat(buf, "USE_EXPECTED_VALUES\n");
++#endif
+
-+ TRACE_ENTRY();
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ strcat(buf, "TEST_IO_IN_SIRQ\n");
++#endif
+
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[i];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ scst_nexus_loss(tgt_dev, true);
-+ }
-+ }
-+ }
++#ifdef CONFIG_SCST_STRICT_SECURITY
++ strcat(buf, "STRICT_SECURITY\n");
++#endif
+
+ TRACE_EXIT();
-+ return;
++ return strlen(buf);
+}
+
-+static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
-+ int nexus_loss)
-+{
-+ int res;
-+ int i;
-+ struct scst_tgt *tgt = mcmd->sess->tgt;
-+ struct scst_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ if (nexus_loss) {
-+ TRACE_MGMT_DBG("I_T Nexus loss (tgt %p, mcmd %p)",
-+ tgt, mcmd);
-+ } else {
-+ TRACE_MGMT_DBG("Aborting all from tgt %p (mcmd %p)",
-+ tgt, mcmd);
-+ }
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ for (i = 0; i < TGT_DEV_HASH_SIZE; i++) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[i];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ int rc;
-+
-+ __scst_abort_task_set(mcmd, tgt_dev);
-+
-+ if (nexus_loss)
-+ scst_nexus_loss(tgt_dev, true);
-+
-+ if (mcmd->sess == tgt_dev->sess) {
-+ rc = scst_call_dev_task_mgmt_fn(
-+ mcmd, tgt_dev, 0);
-+ if ((rc < 0) &&
-+ (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
-+ mcmd->status = rc;
-+ }
-+
-+ tm_dbg_task_mgmt(tgt_dev->dev, "NEXUS LOSS or "
-+ "ABORT ALL", 0);
-+ }
-+ }
-+ }
-+
-+ scst_unblock_aborted_cmds(1);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
++static struct kobj_attribute scst_version_attr =
++ __ATTR(version, S_IRUGO, scst_version_show, NULL);
+
-+static int scst_abort_task(struct scst_mgmt_cmd *mcmd)
++static ssize_t scst_last_sysfs_mgmt_res_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
+{
+ int res;
-+ struct scst_cmd *cmd = mcmd->cmd_to_abort;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Abortind task (cmd %p, sn %d, set %d, tag %llu, "
-+ "queue_type %x)", cmd, cmd->sn, cmd->sn_set,
-+ (long long unsigned int)mcmd->tag, cmd->queue_type);
-+
-+ if (mcmd->lun_set && (mcmd->lun != cmd->lun)) {
-+ PRINT_ERROR("ABORT TASK: LUN mismatch: mcmd LUN %llx, "
-+ "cmd LUN %llx, cmd tag %llu",
-+ (long long unsigned int)mcmd->lun,
-+ (long long unsigned int)cmd->lun,
-+ (long long unsigned int)mcmd->tag);
-+ mcmd->status = SCST_MGMT_STATUS_REJECTED;
-+ } else if (mcmd->cmd_sn_set &&
-+ (scst_sn_before(mcmd->cmd_sn, cmd->tgt_sn) ||
-+ (mcmd->cmd_sn == cmd->tgt_sn))) {
-+ PRINT_ERROR("ABORT TASK: SN mismatch: mcmd SN %x, "
-+ "cmd SN %x, cmd tag %llu", mcmd->cmd_sn,
-+ cmd->tgt_sn, (long long unsigned int)mcmd->tag);
-+ mcmd->status = SCST_MGMT_STATUS_REJECTED;
-+ } else {
-+ scst_abort_cmd(cmd, mcmd, 0, 1);
-+ scst_unblock_aborted_cmds(0);
-+ }
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ mcmd->cmd_to_abort = NULL; /* just in case */
-+
-+ __scst_cmd_put(cmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res = 0;
+
+ TRACE_ENTRY();
+
-+ mcmd->status = SCST_MGMT_STATUS_SUCCESS;
-+
-+ switch (mcmd->fn) {
-+ case SCST_ABORT_TASK:
-+ res = scst_abort_task(mcmd);
-+ break;
-+
-+ case SCST_ABORT_TASK_SET:
-+ case SCST_PR_ABORT_ALL:
-+ res = scst_abort_task_set(mcmd);
-+ break;
-+
-+ case SCST_CLEAR_TASK_SET:
-+ if (mcmd->mcmd_tgt_dev->dev->tst ==
-+ SCST_CONTR_MODE_SEP_TASK_SETS)
-+ res = scst_abort_task_set(mcmd);
-+ else
-+ res = scst_clear_task_set(mcmd);
-+ break;
-+
-+ case SCST_LUN_RESET:
-+ res = scst_lun_reset(mcmd);
-+ break;
-+
-+ case SCST_TARGET_RESET:
-+ res = scst_target_reset(mcmd);
-+ break;
-+
-+ case SCST_ABORT_ALL_TASKS_SESS:
-+ res = scst_abort_all_nexus_loss_sess(mcmd, 0);
-+ break;
-+
-+ case SCST_NEXUS_LOSS_SESS:
-+ case SCST_UNREG_SESS_TM:
-+ res = scst_abort_all_nexus_loss_sess(mcmd, 1);
-+ break;
-+
-+ case SCST_ABORT_ALL_TASKS:
-+ res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
-+ break;
-+
-+ case SCST_NEXUS_LOSS:
-+ res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
-+ break;
-+
-+ case SCST_CLEAR_ACA:
-+ if (scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1) ==
-+ SCST_DEV_TM_NOT_COMPLETED) {
-+ mcmd->status = SCST_MGMT_STATUS_FN_NOT_SUPPORTED;
-+ /* Nothing to do (yet) */
-+ }
-+ goto out_done;
-+
-+ default:
-+ PRINT_ERROR("Unknown task management function %d", mcmd->fn);
-+ mcmd->status = SCST_MGMT_STATUS_REJECTED;
-+ goto out_done;
-+ }
++ spin_lock(&sysfs_work_lock);
++ TRACE_DBG("active_sysfs_works %d", active_sysfs_works);
++ if (active_sysfs_works > 0)
++ res = -EAGAIN;
++ else
++ res = sprintf(buf, "%d\n", last_sysfs_work_res);
++ spin_unlock(&sysfs_work_lock);
+
-+out:
+ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_done:
-+ res = scst_set_mcmd_next_state(mcmd);
-+ goto out;
+}
+
-+static void scst_call_task_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
-+{
-+ struct scst_session *sess = mcmd->sess;
++static struct kobj_attribute scst_last_sysfs_mgmt_res_attr =
++ __ATTR(last_sysfs_mgmt_res, S_IRUGO,
++ scst_last_sysfs_mgmt_res_show, NULL);
+
-+ if ((sess->tgt->tgtt->task_mgmt_affected_cmds_done != NULL) &&
-+ (mcmd->fn != SCST_UNREG_SESS_TM) &&
-+ (mcmd->fn != SCST_PR_ABORT_ALL)) {
-+ TRACE_DBG("Calling target %s task_mgmt_affected_cmds_done(%p)",
-+ sess->tgt->tgtt->name, sess);
-+ sess->tgt->tgtt->task_mgmt_affected_cmds_done(mcmd);
-+ TRACE_MGMT_DBG("Target's %s task_mgmt_affected_cmds_done() "
-+ "returned", sess->tgt->tgtt->name);
-+ }
-+ return;
-+}
++static struct attribute *scst_sysfs_root_default_attrs[] = {
++ &scst_threads_attr.attr,
++ &scst_setup_id_attr.attr,
++ &scst_max_tasklet_cmd_attr.attr,
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ &scst_main_trace_level_attr.attr,
++#endif
++ &scst_version_attr.attr,
++ &scst_last_sysfs_mgmt_res_attr.attr,
++ NULL,
++};
+
-+static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
++static void scst_sysfs_root_release(struct kobject *kobj)
+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ switch (mcmd->fn) {
-+ case SCST_NEXUS_LOSS_SESS:
-+ case SCST_UNREG_SESS_TM:
-+ scst_do_nexus_loss_sess(mcmd);
-+ break;
-+
-+ case SCST_NEXUS_LOSS:
-+ scst_do_nexus_loss_tgt(mcmd);
-+ break;
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_call_task_mgmt_affected_cmds_done(mcmd);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
++ complete_all(&scst_sysfs_root_release_completion);
+}
+
-+static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
-+{
-+ struct scst_device *dev;
-+ struct scst_session *sess = mcmd->sess;
-+
-+ TRACE_ENTRY();
-+
-+ mcmd->state = SCST_MCMD_STATE_FINISHED;
-+ if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
-+ mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
-+
-+ if (mcmd->fn < SCST_UNREG_SESS_TM)
-+ TRACE(TRACE_MGMT, "TM fn %d finished, "
-+ "status %x", mcmd->fn, mcmd->status);
-+ else
-+ TRACE_MGMT_DBG("TM fn %d finished, "
-+ "status %x", mcmd->fn, mcmd->status);
-+
-+ if (mcmd->fn == SCST_PR_ABORT_ALL) {
-+ mcmd->origin_pr_cmd->scst_cmd_done(mcmd->origin_pr_cmd,
-+ SCST_CMD_STATE_DEFAULT,
-+ SCST_CONTEXT_THREAD);
-+ } else if ((sess->tgt->tgtt->task_mgmt_fn_done != NULL) &&
-+ (mcmd->fn != SCST_UNREG_SESS_TM)) {
-+ TRACE_DBG("Calling target %s task_mgmt_fn_done(%p)",
-+ sess->tgt->tgtt->name, sess);
-+ sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
-+ TRACE_MGMT_DBG("Target's %s task_mgmt_fn_done() "
-+ "returned", sess->tgt->tgtt->name);
-+ }
-+
-+ if (mcmd->needs_unblocking) {
-+ switch (mcmd->fn) {
-+ case SCST_LUN_RESET:
-+ case SCST_CLEAR_TASK_SET:
-+ scst_unblock_dev(mcmd->mcmd_tgt_dev->dev);
-+ break;
-+
-+ case SCST_TARGET_RESET:
-+ {
-+ struct scst_acg *acg = mcmd->sess->acg;
-+ struct scst_acg_dev *acg_dev;
-+
-+ mutex_lock(&scst_mutex);
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ dev = acg_dev->dev;
-+ scst_unblock_dev(dev);
-+ }
-+ mutex_unlock(&scst_mutex);
-+ break;
-+ }
++static struct kobj_type scst_sysfs_root_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_sysfs_root_release,
++ .default_attrs = scst_sysfs_root_default_attrs,
++};
+
-+ default:
-+ BUG();
-+ break;
-+ }
-+ }
++/**
++ ** Sysfs user info
++ **/
+
-+ mcmd->tgt_priv = NULL;
++static DEFINE_MUTEX(scst_sysfs_user_info_mutex);
+
-+ TRACE_EXIT();
-+ return;
-+}
++/* All protected by scst_sysfs_user_info_mutex */
++static LIST_HEAD(scst_sysfs_user_info_list);
++static uint32_t scst_sysfs_info_cur_cookie;
+
-+/* Returns >0, if cmd should be requeued */
-+static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
++/* scst_sysfs_user_info_mutex supposed to be held */
++static struct scst_sysfs_user_info *scst_sysfs_user_find_info(uint32_t cookie)
+{
-+ int res = 0;
++ struct scst_sysfs_user_info *info, *res = NULL;
+
+ TRACE_ENTRY();
+
-+ /*
-+ * We are in the TM thread and mcmd->state guaranteed to not be
-+ * changed behind us.
-+ */
-+
-+ TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
-+
-+ while (1) {
-+ switch (mcmd->state) {
-+ case SCST_MCMD_STATE_INIT:
-+ res = scst_mgmt_cmd_init(mcmd);
-+ if (res)
-+ goto out;
-+ break;
-+
-+ case SCST_MCMD_STATE_EXEC:
-+ if (scst_mgmt_cmd_exec(mcmd))
-+ goto out;
-+ break;
-+
-+ case SCST_MCMD_STATE_AFFECTED_CMDS_DONE:
-+ if (scst_mgmt_affected_cmds_done(mcmd))
-+ goto out;
-+ break;
-+
-+ case SCST_MCMD_STATE_DONE:
-+ scst_mgmt_cmd_send_done(mcmd);
++ list_for_each_entry(info, &scst_sysfs_user_info_list,
++ info_list_entry) {
++ if (info->info_cookie == cookie) {
++ res = info;
+ break;
-+
-+ case SCST_MCMD_STATE_FINISHED:
-+ scst_free_mgmt_cmd(mcmd);
-+ /* mcmd is dead */
-+ goto out;
-+
-+ default:
-+ PRINT_CRIT_ERROR("Wrong mcmd %p state %d (fn %d, "
-+ "cmd_finish_wait_count %d, cmd_done_wait_count "
-+ "%d)", mcmd, mcmd->state, mcmd->fn,
-+ mcmd->cmd_finish_wait_count,
-+ mcmd->cmd_done_wait_count);
-+ BUG();
-+ res = -1;
-+ goto out;
+ }
+ }
+
-+out:
-+ TRACE_EXIT_RES(res);
++ TRACE_EXIT_HRES(res);
+ return res;
+}
+
-+static inline int test_mgmt_cmd_list(void)
++/**
++ * scst_sysfs_user_get_info() - get user_info
++ *
++ * Finds the user_info based on cookie and mark it as received the reply by
++ * setting for it flag info_being_executed.
++ *
++ * Returns found entry or NULL.
++ */
++struct scst_sysfs_user_info *scst_sysfs_user_get_info(uint32_t cookie)
+{
-+ int res = !list_empty(&scst_active_mgmt_cmd_list) ||
-+ unlikely(kthread_should_stop());
-+ return res;
-+}
++ struct scst_sysfs_user_info *res = NULL;
+
-+int scst_tm_thread(void *arg)
-+{
+ TRACE_ENTRY();
+
-+ PRINT_INFO("Task management thread started, PID %d", current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+
-+ set_user_nice(current, -10);
-+
-+ spin_lock_irq(&scst_mcmd_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (!test_mgmt_cmd_list()) {
-+ add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
-+ &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_mgmt_cmd_list())
-+ break;
-+ spin_unlock_irq(&scst_mcmd_lock);
-+ schedule();
-+ spin_lock_irq(&scst_mcmd_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
-+ }
++ mutex_lock(&scst_sysfs_user_info_mutex);
+
-+ while (!list_empty(&scst_active_mgmt_cmd_list)) {
-+ int rc;
-+ struct scst_mgmt_cmd *mcmd;
-+ mcmd = list_entry(scst_active_mgmt_cmd_list.next,
-+ typeof(*mcmd), mgmt_cmd_list_entry);
-+ TRACE_MGMT_DBG("Deleting mgmt cmd %p from active cmd "
-+ "list", mcmd);
-+ list_del(&mcmd->mgmt_cmd_list_entry);
-+ spin_unlock_irq(&scst_mcmd_lock);
-+ rc = scst_process_mgmt_cmd(mcmd);
-+ spin_lock_irq(&scst_mcmd_lock);
-+ if (rc > 0) {
-+ if (test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
-+ !test_bit(SCST_FLAG_SUSPENDING,
-+ &scst_flags)) {
-+ TRACE_MGMT_DBG("Adding mgmt cmd %p to "
-+ "head of delayed mgmt cmd list",
-+ mcmd);
-+ list_add(&mcmd->mgmt_cmd_list_entry,
-+ &scst_delayed_mgmt_cmd_list);
-+ } else {
-+ TRACE_MGMT_DBG("Adding mgmt cmd %p to "
-+ "head of active mgmt cmd list",
-+ mcmd);
-+ list_add(&mcmd->mgmt_cmd_list_entry,
-+ &scst_active_mgmt_cmd_list);
-+ }
-+ }
-+ }
++ res = scst_sysfs_user_find_info(cookie);
++ if (res != NULL) {
++ if (!res->info_being_executed)
++ res->info_being_executed = 1;
+ }
-+ spin_unlock_irq(&scst_mcmd_lock);
-+
-+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so scst_active_mgmt_cmd_list must be empty.
-+ */
-+ BUG_ON(!list_empty(&scst_active_mgmt_cmd_list));
+
-+ PRINT_INFO("Task management thread PID %d finished", current->pid);
++ mutex_unlock(&scst_sysfs_user_info_mutex);
+
-+ TRACE_EXIT();
-+ return 0;
++ TRACE_EXIT_HRES(res);
++ return res;
+}
++EXPORT_SYMBOL_GPL(scst_sysfs_user_get_info);
+
-+static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
-+ *sess, int fn, int atomic, void *tgt_priv)
-+{
-+ struct scst_mgmt_cmd *mcmd = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
-+ PRINT_ERROR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
-+ "(target %s)", sess->tgt->tgtt->name);
-+ goto out;
-+ }
-+
-+ mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
-+ if (mcmd == NULL) {
-+ PRINT_CRIT_ERROR("Lost TM fn %d, initiator %s", fn,
-+ sess->initiator_name);
-+ goto out;
-+ }
-+
-+ mcmd->sess = sess;
-+ mcmd->fn = fn;
-+ mcmd->state = SCST_MCMD_STATE_INIT;
-+ mcmd->tgt_priv = tgt_priv;
-+
-+ if (fn == SCST_PR_ABORT_ALL) {
-+ atomic_inc(&mcmd->origin_pr_cmd->pr_abort_counter->pr_abort_pending_cnt);
-+ atomic_inc(&mcmd->origin_pr_cmd->pr_abort_counter->pr_aborting_cnt);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return mcmd;
-+}
++/**
++ ** Helper functionality to help target drivers and dev handlers support
++ ** sending events to user space and wait for their completion in a safe
++ ** manner. See samples how to use it in iscsi-scst or scst_user.
++ **/
+
-+static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
-+ struct scst_mgmt_cmd *mcmd)
++/**
++ * scst_sysfs_user_add_info() - create and add user_info in the global list
++ *
++ * Creates an info structure and adds it in the info_list.
++ * Returns 0 and out_info on success, error code otherwise.
++ */
++int scst_sysfs_user_add_info(struct scst_sysfs_user_info **out_info)
+{
-+ unsigned long flags;
+ int res = 0;
++ struct scst_sysfs_user_info *info;
+
+ TRACE_ENTRY();
+
-+ scst_sess_get(sess);
-+
-+ if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
-+ PRINT_CRIT_ERROR("New mgmt cmd while shutting down the "
-+ "session %p shut_phase %ld", sess, sess->shut_phase);
-+ BUG();
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (info == NULL) {
++ PRINT_ERROR("Unable to allocate sysfs user info (size %zd)",
++ sizeof(*info));
++ res = -ENOMEM;
++ goto out;
+ }
+
-+ local_irq_save(flags);
-+
-+ spin_lock(&sess->sess_list_lock);
-+ atomic_inc(&sess->sess_cmd_count);
++ mutex_lock(&scst_sysfs_user_info_mutex);
+
-+ if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
-+ switch (sess->init_phase) {
-+ case SCST_SESS_IPH_INITING:
-+ TRACE_DBG("Adding mcmd %p to init deferred mcmd list",
-+ mcmd);
-+ list_add_tail(&mcmd->mgmt_cmd_list_entry,
-+ &sess->init_deferred_mcmd_list);
-+ goto out_unlock;
-+ case SCST_SESS_IPH_SUCCESS:
-+ break;
-+ case SCST_SESS_IPH_FAILED:
-+ res = -1;
-+ goto out_unlock;
-+ default:
-+ BUG();
-+ }
-+ }
++ while ((info->info_cookie == 0) ||
++ (scst_sysfs_user_find_info(info->info_cookie) != NULL))
++ info->info_cookie = scst_sysfs_info_cur_cookie++;
+
-+ spin_unlock(&sess->sess_list_lock);
++ init_completion(&info->info_completion);
+
-+ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
-+ spin_lock(&scst_mcmd_lock);
-+ list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
-+ spin_unlock(&scst_mcmd_lock);
++ list_add_tail(&info->info_list_entry, &scst_sysfs_user_info_list);
++ info->info_in_list = 1;
+
-+ local_irq_restore(flags);
++ *out_info = info;
+
-+ wake_up(&scst_mgmt_cmd_list_waitQ);
++ mutex_unlock(&scst_sysfs_user_info_mutex);
+
+out:
-+ TRACE_EXIT();
++ TRACE_EXIT_RES(res);
+ return res;
-+
-+out_unlock:
-+ spin_unlock(&sess->sess_list_lock);
-+ local_irq_restore(flags);
-+ goto out;
+}
++EXPORT_SYMBOL_GPL(scst_sysfs_user_add_info);
+
+/**
-+ * scst_rx_mgmt_fn() - create new management command and send it for execution
-+ *
-+ * Description:
-+ * Creates new management command and sends it for execution.
-+ *
-+ * Returns 0 for success, error code otherwise.
-+ *
-+ * Must not be called in parallel with scst_unregister_session() for the
-+ * same sess.
++ * scst_sysfs_user_del_info - delete and frees user_info
+ */
-+int scst_rx_mgmt_fn(struct scst_session *sess,
-+ const struct scst_rx_mgmt_params *params)
++void scst_sysfs_user_del_info(struct scst_sysfs_user_info *info)
+{
-+ int res = -EFAULT;
-+ struct scst_mgmt_cmd *mcmd = NULL;
-+
+ TRACE_ENTRY();
+
-+ switch (params->fn) {
-+ case SCST_ABORT_TASK:
-+ BUG_ON(!params->tag_set);
-+ break;
-+ case SCST_TARGET_RESET:
-+ case SCST_ABORT_ALL_TASKS:
-+ case SCST_NEXUS_LOSS:
-+ break;
-+ default:
-+ BUG_ON(!params->lun_set);
-+ }
-+
-+ mcmd = scst_pre_rx_mgmt_cmd(sess, params->fn, params->atomic,
-+ params->tgt_priv);
-+ if (mcmd == NULL)
-+ goto out;
-+
-+ if (params->lun_set) {
-+ mcmd->lun = scst_unpack_lun(params->lun, params->lun_len);
-+ if (mcmd->lun == NO_SUCH_LUN)
-+ goto out_free;
-+ mcmd->lun_set = 1;
-+ }
-+
-+ if (params->tag_set)
-+ mcmd->tag = params->tag;
-+
-+ mcmd->cmd_sn_set = params->cmd_sn_set;
-+ mcmd->cmd_sn = params->cmd_sn;
-+
-+ if (params->fn < SCST_UNREG_SESS_TM)
-+ TRACE(TRACE_MGMT, "TM fn %d", params->fn);
-+ else
-+ TRACE_MGMT_DBG("TM fn %d", params->fn);
-+
-+ TRACE_MGMT_DBG("sess=%p, tag_set %d, tag %lld, lun_set %d, "
-+ "lun=%lld, cmd_sn_set %d, cmd_sn %d, priv %p", sess,
-+ params->tag_set,
-+ (long long unsigned int)params->tag,
-+ params->lun_set,
-+ (long long unsigned int)mcmd->lun,
-+ params->cmd_sn_set,
-+ params->cmd_sn,
-+ params->tgt_priv);
++ mutex_lock(&scst_sysfs_user_info_mutex);
+
-+ if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
-+ goto out_free;
++ if (info->info_in_list)
++ list_del(&info->info_list_entry);
+
-+ res = 0;
++ mutex_unlock(&scst_sysfs_user_info_mutex);
+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
++ kfree(info);
+
-+out_free:
-+ scst_free_mgmt_cmd(mcmd);
-+ mcmd = NULL;
-+ goto out;
++ TRACE_EXIT();
++ return;
+}
-+EXPORT_SYMBOL(scst_rx_mgmt_fn);
++EXPORT_SYMBOL_GPL(scst_sysfs_user_del_info);
+
+/*
-+ * Written by Jack Handy - jakkhandy@hotmail.com
-+ * Taken by Gennadiy Nerubayev <parakie@gmail.com> from
-+ * http://www.codeproject.com/KB/string/wildcmp.aspx. No license attached
-+ * to it, and it's posted on a free site; assumed to be free for use.
-+ *
-+ * Added the negative sign support - VLNB
-+ *
-+ * Also see comment for wildcmp().
-+ *
-+ * User space part of iSCSI-SCST also has a copy of this code, so fixing a bug
-+ * here, don't forget to fix the copy too!
++ * Returns true if the reply received and being processed by another part of
++ * the kernel, false otherwise. Also removes the user_info from the list to
++ * fix for the user space that it missed the timeout.
+ */
-+static bool __wildcmp(const char *wild, const char *string, int recursion_level)
++static bool scst_sysfs_user_info_executing(struct scst_sysfs_user_info *info)
+{
-+ const char *cp = NULL, *mp = NULL;
-+
-+ while ((*string) && (*wild != '*')) {
-+ if ((*wild == '!') && (recursion_level == 0))
-+ return !__wildcmp(++wild, string, ++recursion_level);
-+
-+ if ((*wild != *string) && (*wild != '?'))
-+ return false;
++ bool res;
+
-+ wild++;
-+ string++;
-+ }
++ TRACE_ENTRY();
+
-+ while (*string) {
-+ if ((*wild == '!') && (recursion_level == 0))
-+ return !__wildcmp(++wild, string, ++recursion_level);
++ mutex_lock(&scst_sysfs_user_info_mutex);
+
-+ if (*wild == '*') {
-+ if (!*++wild)
-+ return true;
++ res = info->info_being_executed;
+
-+ mp = wild;
-+ cp = string+1;
-+ } else if ((*wild == *string) || (*wild == '?')) {
-+ wild++;
-+ string++;
-+ } else {
-+ wild = mp;
-+ string = cp++;
-+ }
++ if (info->info_in_list) {
++ list_del(&info->info_list_entry);
++ info->info_in_list = 0;
+ }
+
-+ while (*wild == '*')
-+ wild++;
++ mutex_unlock(&scst_sysfs_user_info_mutex);
+
-+ return !*wild;
++ TRACE_EXIT_RES(res);
++ return res;
+}
+
-+/*
-+ * Returns true if string "string" matches pattern "wild", false otherwise.
-+ * Pattern is a regular DOS-type pattern, containing '*' and '?' symbols.
-+ * '*' means match all any symbols, '?' means match only any single symbol.
-+ *
-+ * For instance:
-+ * if (wildcmp("bl?h.*", "blah.jpg")) {
-+ * // match
-+ * } else {
-+ * // no match
-+ * }
++/**
++ * scst_wait_info_completion() - wait an user space event's completion
+ *
-+ * Also it supports boolean inversion sign '!', which does boolean inversion of
-+ * the value of the rest of the string. Only one '!' allowed in the pattern,
-+ * other '!' are treated as regular symbols. For instance:
-+ * if (wildcmp("bl!?h.*", "blah.jpg")) {
-+ * // no match
-+ * } else {
-+ * // match
-+ * }
++ * Waits for the info request been completed by user space at most timeout
++ * jiffies. If the reply received before timeout and being processed by
++ * another part of the kernel, i.e. scst_sysfs_user_info_executing()
++ * returned true, waits for it to complete indefinitely.
+ *
-+ * Also see comment for __wildcmp().
++ * Returns status of the request completion.
+ */
-+static bool wildcmp(const char *wild, const char *string)
-+{
-+ return __wildcmp(wild, string, 0);
-+}
-+
-+/* scst_mutex supposed to be held */
-+static struct scst_acg *scst_find_tgt_acg_by_name_wild(struct scst_tgt *tgt,
-+ const char *initiator_name)
++int scst_wait_info_completion(struct scst_sysfs_user_info *info,
++ unsigned long timeout)
+{
-+ struct scst_acg *acg, *res = NULL;
-+ struct scst_acn *n;
++ int res, rc;
+
+ TRACE_ENTRY();
+
-+ if (initiator_name == NULL)
-+ goto out;
++ TRACE_DBG("Waiting for info %p completion", info);
+
-+ list_for_each_entry(acg, &tgt->tgt_acg_list, acg_list_entry) {
-+ list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
-+ if (wildcmp(n->name, initiator_name)) {
-+ TRACE_DBG("Access control group %s found",
-+ acg->acg_name);
-+ res = acg;
++ while (1) {
++ rc = wait_for_completion_interruptible_timeout(
++ &info->info_completion, timeout);
++ if (rc > 0) {
++ TRACE_DBG("Waiting for info %p finished with %d",
++ info, rc);
++ break;
++ } else if (rc == 0) {
++ if (!scst_sysfs_user_info_executing(info)) {
++ PRINT_ERROR("Timeout waiting for user "
++ "space event %p", info);
++ res = -EBUSY;
+ goto out;
++ } else {
++ /* Req is being executed in the kernel */
++ TRACE_DBG("Keep waiting for info %p completion",
++ info);
++ wait_for_completion(&info->info_completion);
++ break;
+ }
++ } else if (rc != -ERESTARTSYS) {
++ res = rc;
++ PRINT_ERROR("wait_for_completion() failed: %d",
++ res);
++ goto out;
++ } else {
++ TRACE_DBG("Waiting for info %p finished with %d, "
++ "retrying", info, rc);
+ }
+ }
+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/* Must be called under scst_mutex */
-+static struct scst_acg *__scst_find_acg(struct scst_tgt *tgt,
-+ const char *initiator_name)
-+{
-+ struct scst_acg *acg = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ acg = scst_find_tgt_acg_by_name_wild(tgt, initiator_name);
-+ if (acg == NULL)
-+ acg = tgt->default_acg;
-+
-+ TRACE_EXIT_HRES((unsigned long)acg);
-+ return acg;
-+}
-+
-+/* Must be called under scst_mutex */
-+struct scst_acg *scst_find_acg(const struct scst_session *sess)
-+{
-+ return __scst_find_acg(sess->tgt, sess->initiator_name);
-+}
-+
-+/**
-+ * scst_initiator_has_luns() - check if this initiator will see any LUNs
-+ *
-+ * Checks if this initiator will see any LUNs upon connect to this target.
-+ * Returns true if yes and false otherwise.
-+ */
-+bool scst_initiator_has_luns(struct scst_tgt *tgt, const char *initiator_name)
-+{
-+ bool res;
-+ struct scst_acg *acg;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ acg = __scst_find_acg(tgt, initiator_name);
-+
-+ res = !list_empty(&acg->acg_dev_list);
-+
-+ mutex_unlock(&scst_mutex);
++ TRACE_DBG("info %p, status %d", info, info->info_status);
++ res = info->info_status;
+
++out:
+ TRACE_EXIT_RES(res);
+ return res;
+}
-+EXPORT_SYMBOL_GPL(scst_initiator_has_luns);
++EXPORT_SYMBOL_GPL(scst_wait_info_completion);
+
-+static int scst_init_session(struct scst_session *sess)
++static struct kobject scst_sysfs_root_kobj;
++
++int __init scst_sysfs_init(void)
+{
+ int res = 0;
-+ struct scst_cmd *cmd;
-+ struct scst_mgmt_cmd *mcmd, *tm;
-+ int mwake = 0;
+
+ TRACE_ENTRY();
+
-+ mutex_lock(&scst_mutex);
-+
-+ sess->acg = scst_find_acg(sess);
-+
-+ PRINT_INFO("Using security group \"%s\" for initiator \"%s\"",
-+ sess->acg->acg_name, sess->initiator_name);
-+
-+ list_add_tail(&sess->acg_sess_list_entry, &sess->acg->acg_sess_list);
-+
-+ TRACE_DBG("Adding sess %p to tgt->sess_list", sess);
-+ list_add_tail(&sess->sess_list_entry, &sess->tgt->sess_list);
-+
-+ if (sess->tgt->tgtt->get_initiator_port_transport_id != NULL) {
-+ res = sess->tgt->tgtt->get_initiator_port_transport_id(sess,
-+ &sess->transport_id);
-+ if (res != 0) {
-+ PRINT_ERROR("Unable to make initiator %s port "
-+ "transport id", sess->initiator_name);
-+ goto failed;
-+ }
-+ TRACE_PR("sess %p (ini %s), transport id %s/%d", sess,
-+ sess->initiator_name,
-+ debug_transport_id_to_initiator_name(
-+ sess->transport_id), sess->tgt->rel_tgt_id);
++ sysfs_work_thread = kthread_run(sysfs_work_thread_fn,
++ NULL, "scst_uid");
++ if (IS_ERR(sysfs_work_thread)) {
++ res = PTR_ERR(sysfs_work_thread);
++ PRINT_ERROR("kthread_run() for user interface thread "
++ "failed: %d", res);
++ sysfs_work_thread = NULL;
++ goto out;
+ }
+
-+ res = scst_sess_sysfs_create(sess);
++ res = kobject_init_and_add(&scst_sysfs_root_kobj,
++ &scst_sysfs_root_ktype, kernel_kobj, "%s", "scst_tgt");
+ if (res != 0)
-+ goto failed;
-+
-+ /*
-+ * scst_sess_alloc_tgt_devs() must be called after session added in the
-+ * sess_list to not race with scst_check_reassign_sess()!
-+ */
-+ res = scst_sess_alloc_tgt_devs(sess);
-+
-+failed:
-+ mutex_unlock(&scst_mutex);
-+
-+ if (sess->init_result_fn) {
-+ TRACE_DBG("Calling init_result_fn(%p)", sess);
-+ sess->init_result_fn(sess, sess->reg_sess_data, res);
-+ TRACE_DBG("%s", "init_result_fn() returned");
-+ }
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+
-+ if (res == 0)
-+ sess->init_phase = SCST_SESS_IPH_SUCCESS;
-+ else
-+ sess->init_phase = SCST_SESS_IPH_FAILED;
-+
-+restart:
-+ list_for_each_entry(cmd, &sess->init_deferred_cmd_list,
-+ cmd_list_entry) {
-+ TRACE_DBG("Deleting cmd %p from init deferred cmd list", cmd);
-+ list_del(&cmd->cmd_list_entry);
-+ atomic_dec(&sess->sess_cmd_count);
-+ spin_unlock_irq(&sess->sess_list_lock);
-+ scst_cmd_init_done(cmd, SCST_CONTEXT_THREAD);
-+ spin_lock_irq(&sess->sess_list_lock);
-+ goto restart;
-+ }
-+
-+ spin_lock(&scst_mcmd_lock);
-+ list_for_each_entry_safe(mcmd, tm, &sess->init_deferred_mcmd_list,
-+ mgmt_cmd_list_entry) {
-+ TRACE_DBG("Moving mgmt command %p from init deferred mcmd list",
-+ mcmd);
-+ list_move_tail(&mcmd->mgmt_cmd_list_entry,
-+ &scst_active_mgmt_cmd_list);
-+ mwake = 1;
-+ }
-+
-+ spin_unlock(&scst_mcmd_lock);
-+ /*
-+ * In case of an error at this point the caller target driver supposed
-+ * to already call this sess's unregistration.
-+ */
-+ sess->init_phase = SCST_SESS_IPH_READY;
-+ spin_unlock_irq(&sess->sess_list_lock);
-+
-+ if (mwake)
-+ wake_up(&scst_mgmt_cmd_list_waitQ);
-+
-+ scst_sess_put(sess);
-+
-+ TRACE_EXIT();
-+ return res;
-+}
++ goto sysfs_root_add_error;
+
-+/**
-+ * scst_register_session() - register session
-+ * @tgt: target
-+ * @atomic: true, if the function called in the atomic context. If false,
-+ * this function will block until the session registration is
-+ * completed.
-+ * @initiator_name: remote initiator's name, any NULL-terminated string,
-+ * e.g. iSCSI name, which used as the key to found appropriate
-+ * access control group. Could be NULL, then the default
-+ * target's LUNs are used.
-+ * @tgt_priv: pointer to target driver's private data
-+ * @result_fn_data: any target driver supplied data
-+ * @result_fn: pointer to the function that will be asynchronously called
-+ * when session initialization finishes.
-+ * Can be NULL. Parameters:
-+ * - sess - session
-+ * - data - target driver supplied to scst_register_session()
-+ * data
-+ * - result - session initialization result, 0 on success or
-+ * appropriate error code otherwise
-+ *
-+ * Description:
-+ * Registers new session. Returns new session on success or NULL otherwise.
-+ *
-+ * Note: A session creation and initialization is a complex task,
-+ * which requires sleeping state, so it can't be fully done
-+ * in interrupt context. Therefore the "bottom half" of it, if
-+ * scst_register_session() is called from atomic context, will be
-+ * done in SCST thread context. In this case scst_register_session()
-+ * will return not completely initialized session, but the target
-+ * driver can supply commands to this session via scst_rx_cmd().
-+ * Those commands processing will be delayed inside SCST until
-+ * the session initialization is finished, then their processing
-+ * will be restarted. The target driver will be notified about
-+ * finish of the session initialization by function result_fn().
-+ * On success the target driver could do nothing, but if the
-+ * initialization fails, the target driver must ensure that
-+ * no more new commands being sent or will be sent to SCST after
-+ * result_fn() returns. All already sent to SCST commands for
-+ * failed session will be returned in xmit_response() with BUSY status.
-+ * In case of failure the driver shall call scst_unregister_session()
-+ * inside result_fn(), it will NOT be called automatically.
-+ */
-+struct scst_session *scst_register_session(struct scst_tgt *tgt, int atomic,
-+ const char *initiator_name, void *tgt_priv, void *result_fn_data,
-+ void (*result_fn) (struct scst_session *sess, void *data, int result))
-+{
-+ struct scst_session *sess;
-+ int res;
-+ unsigned long flags;
++ scst_targets_kobj = kobject_create_and_add("targets",
++ &scst_sysfs_root_kobj);
++ if (scst_targets_kobj == NULL)
++ goto targets_kobj_error;
+
-+ TRACE_ENTRY();
++ scst_devices_kobj = kobject_create_and_add("devices",
++ &scst_sysfs_root_kobj);
++ if (scst_devices_kobj == NULL)
++ goto devices_kobj_error;
+
-+ sess = scst_alloc_session(tgt, atomic ? GFP_ATOMIC : GFP_KERNEL,
-+ initiator_name);
-+ if (sess == NULL)
-+ goto out;
++ res = scst_add_sgv_kobj(&scst_sysfs_root_kobj, "sgv");
++ if (res != 0)
++ goto sgv_kobj_error;
+
-+ scst_sess_set_tgt_priv(sess, tgt_priv);
++ scst_handlers_kobj = kobject_create_and_add("handlers",
++ &scst_sysfs_root_kobj);
++ if (scst_handlers_kobj == NULL)
++ goto handlers_kobj_error;
+
-+ scst_sess_get(sess); /* one for registered session */
-+ scst_sess_get(sess); /* one held until sess is inited */
++ scst_device_groups_kobj = kobject_create_and_add("device_groups",
++ &scst_sysfs_root_kobj);
++ if (scst_device_groups_kobj == NULL)
++ goto device_groups_kobj_error;
+
-+ if (atomic) {
-+ sess->reg_sess_data = result_fn_data;
-+ sess->init_result_fn = result_fn;
-+ spin_lock_irqsave(&scst_mgmt_lock, flags);
-+ TRACE_DBG("Adding sess %p to scst_sess_init_list", sess);
-+ list_add_tail(&sess->sess_init_list_entry,
-+ &scst_sess_init_list);
-+ spin_unlock_irqrestore(&scst_mgmt_lock, flags);
-+ wake_up(&scst_mgmt_waitQ);
-+ } else {
-+ res = scst_init_session(sess);
-+ if (res != 0)
-+ goto out_free;
-+ }
++ if (sysfs_create_files(scst_device_groups_kobj,
++ scst_device_groups_attrs))
++ goto device_groups_attrs_error;
+
+out:
-+ TRACE_EXIT();
-+ return sess;
-+
-+out_free:
-+ scst_free_session(sess);
-+ sess = NULL;
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(scst_register_session);
-+
-+/**
-+ * scst_register_session_non_gpl() - register session (non-GPL version)
-+ * @tgt: target
-+ * @initiator_name: remote initiator's name, any NULL-terminated string,
-+ * e.g. iSCSI name, which used as the key to found appropriate
-+ * access control group. Could be NULL, then the default
-+ * target's LUNs are used.
-+ * @tgt_priv: pointer to target driver's private data
-+ *
-+ * Description:
-+ * Registers new session. Returns new session on success or NULL otherwise.
-+ */
-+struct scst_session *scst_register_session_non_gpl(struct scst_tgt *tgt,
-+ const char *initiator_name, void *tgt_priv)
-+{
-+ return scst_register_session(tgt, 0, initiator_name, tgt_priv,
-+ NULL, NULL);
-+}
-+EXPORT_SYMBOL(scst_register_session_non_gpl);
-+
-+/**
-+ * scst_unregister_session() - unregister session
-+ * @sess: session to be unregistered
-+ * @wait: if true, instructs to wait until all commands, which
-+ * currently is being executed and belonged to the session,
-+ * finished. Otherwise, target driver should be prepared to
-+ * receive xmit_response() for the session's command after
-+ * scst_unregister_session() returns.
-+ * @unreg_done_fn: pointer to the function that will be asynchronously called
-+ * when the last session's command finishes and
-+ * the session is about to be completely freed. Can be NULL.
-+ * Parameter:
-+ * - sess - session
-+ *
-+ * Unregisters session.
-+ *
-+ * Notes:
-+ * - All outstanding commands will be finished regularly. After
-+ * scst_unregister_session() returned, no new commands must be sent to
-+ * SCST via scst_rx_cmd().
-+ *
-+ * - The caller must ensure that no scst_rx_cmd() or scst_rx_mgmt_fn_*() is
-+ * called in paralell with scst_unregister_session().
-+ *
-+ * - Can be called before result_fn() of scst_register_session() called,
-+ * i.e. during the session registration/initialization.
-+ *
-+ * - It is highly recommended to call scst_unregister_session() as soon as it
-+ * gets clear that session will be unregistered and not to wait until all
-+ * related commands finished. This function provides the wait functionality,
-+ * but it also starts recovering stuck commands, if there are any.
-+ * Otherwise, your target driver could wait for those commands forever.
-+ */
-+void scst_unregister_session(struct scst_session *sess, int wait,
-+ void (*unreg_done_fn) (struct scst_session *sess))
-+{
-+ unsigned long flags;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+ int rc, lun;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Unregistering session %p (wait %d)", sess, wait);
-+
-+ sess->unreg_done_fn = unreg_done_fn;
-+
-+ /* Abort all outstanding commands and clear reservation, if necessary */
-+ lun = 0;
-+ rc = scst_rx_mgmt_fn_lun(sess, SCST_UNREG_SESS_TM,
-+ (uint8_t *)&lun, sizeof(lun), SCST_ATOMIC, NULL);
-+ if (rc != 0) {
-+ PRINT_ERROR("SCST_UNREG_SESS_TM failed %d (sess %p)",
-+ rc, sess);
-+ }
-+
-+ sess->shut_phase = SCST_SESS_SPH_SHUTDOWN;
++ TRACE_EXIT_RES(res);
++ return res;
+
-+ spin_lock_irqsave(&scst_mgmt_lock, flags);
++device_groups_attrs_error:
++ kobject_del(scst_device_groups_kobj);
++ kobject_put(scst_device_groups_kobj);
+
-+ if (wait)
-+ sess->shutdown_compl = &c;
++device_groups_kobj_error:
++ kobject_del(scst_handlers_kobj);
++ kobject_put(scst_handlers_kobj);
+
-+ spin_unlock_irqrestore(&scst_mgmt_lock, flags);
++handlers_kobj_error:
++ scst_del_put_sgv_kobj();
+
-+ scst_sess_put(sess);
++sgv_kobj_error:
++ kobject_del(scst_devices_kobj);
++ kobject_put(scst_devices_kobj);
+
-+ if (wait) {
-+ TRACE_DBG("Waiting for session %p to complete", sess);
-+ wait_for_completion(&c);
-+ }
++devices_kobj_error:
++ kobject_del(scst_targets_kobj);
++ kobject_put(scst_targets_kobj);
+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_unregister_session);
++targets_kobj_error:
++ kobject_del(&scst_sysfs_root_kobj);
+
-+/**
-+ * scst_unregister_session_non_gpl() - unregister session, non-GPL version
-+ * @sess: session to be unregistered
-+ *
-+ * Unregisters session.
-+ *
-+ * See notes for scst_unregister_session() above.
-+ */
-+void scst_unregister_session_non_gpl(struct scst_session *sess)
-+{
-+ TRACE_ENTRY();
++sysfs_root_add_error:
++ kobject_put(&scst_sysfs_root_kobj);
+
-+ scst_unregister_session(sess, 1, NULL);
++ kthread_stop(sysfs_work_thread);
+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_unregister_session_non_gpl);
++ if (res == 0)
++ res = -EINVAL;
+
-+static inline int test_mgmt_list(void)
-+{
-+ int res = !list_empty(&scst_sess_init_list) ||
-+ !list_empty(&scst_sess_shut_list) ||
-+ unlikely(kthread_should_stop());
-+ return res;
++ goto out;
+}
+
-+int scst_global_mgmt_thread(void *arg)
++void scst_sysfs_cleanup(void)
+{
-+ struct scst_session *sess;
-+
+ TRACE_ENTRY();
+
-+ PRINT_INFO("Management thread started, PID %d", current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+
-+ set_user_nice(current, -10);
++ PRINT_INFO("%s", "Exiting SCST sysfs hierarchy...");
+
-+ spin_lock_irq(&scst_mgmt_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
++ scst_del_put_sgv_kobj();
+
-+ if (!test_mgmt_list()) {
-+ add_wait_queue_exclusive(&scst_mgmt_waitQ, &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_mgmt_list())
-+ break;
-+ spin_unlock_irq(&scst_mgmt_lock);
-+ schedule();
-+ spin_lock_irq(&scst_mgmt_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&scst_mgmt_waitQ, &wait);
-+ }
++ kobject_del(scst_devices_kobj);
++ kobject_put(scst_devices_kobj);
+
-+ while (!list_empty(&scst_sess_init_list)) {
-+ sess = list_entry(scst_sess_init_list.next,
-+ typeof(*sess), sess_init_list_entry);
-+ TRACE_DBG("Removing sess %p from scst_sess_init_list",
-+ sess);
-+ list_del(&sess->sess_init_list_entry);
-+ spin_unlock_irq(&scst_mgmt_lock);
++ kobject_del(scst_targets_kobj);
++ kobject_put(scst_targets_kobj);
+
-+ if (sess->init_phase == SCST_SESS_IPH_INITING)
-+ scst_init_session(sess);
-+ else {
-+ PRINT_CRIT_ERROR("session %p is in "
-+ "scst_sess_init_list, but in unknown "
-+ "init phase %x", sess,
-+ sess->init_phase);
-+ BUG();
-+ }
++ kobject_del(scst_handlers_kobj);
++ kobject_put(scst_handlers_kobj);
+
-+ spin_lock_irq(&scst_mgmt_lock);
-+ }
++ sysfs_remove_files(scst_device_groups_kobj, scst_device_groups_attrs);
+
-+ while (!list_empty(&scst_sess_shut_list)) {
-+ sess = list_entry(scst_sess_shut_list.next,
-+ typeof(*sess), sess_shut_list_entry);
-+ TRACE_DBG("Removing sess %p from scst_sess_shut_list",
-+ sess);
-+ list_del(&sess->sess_shut_list_entry);
-+ spin_unlock_irq(&scst_mgmt_lock);
++ kobject_del(scst_device_groups_kobj);
++ kobject_put(scst_device_groups_kobj);
+
-+ switch (sess->shut_phase) {
-+ case SCST_SESS_SPH_SHUTDOWN:
-+ BUG_ON(atomic_read(&sess->refcnt) != 0);
-+ scst_free_session_callback(sess);
-+ break;
-+ default:
-+ PRINT_CRIT_ERROR("session %p is in "
-+ "scst_sess_shut_list, but in unknown "
-+ "shut phase %lx", sess,
-+ sess->shut_phase);
-+ BUG();
-+ break;
-+ }
-+
-+ spin_lock_irq(&scst_mgmt_lock);
-+ }
-+ }
-+ spin_unlock_irq(&scst_mgmt_lock);
++ kobject_del(&scst_sysfs_root_kobj);
++ kobject_put(&scst_sysfs_root_kobj);
+
++ wait_for_completion(&scst_sysfs_root_release_completion);
+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so both lists must be empty.
++ * There is a race, when in the release() schedule happens just after
++ * calling complete(), so if we exit and unload scst module immediately,
++ * there will be oops there. So let's give it a chance to quit
++ * gracefully. Unfortunately, current kobjects implementation
++ * doesn't allow better ways to handle it.
+ */
-+ BUG_ON(!list_empty(&scst_sess_init_list));
-+ BUG_ON(!list_empty(&scst_sess_shut_list));
-+
-+ PRINT_INFO("Management thread PID %d finished", current->pid);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+/* Called under sess->sess_list_lock */
-+static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
-+ uint64_t tag, bool to_abort)
-+{
-+ struct scst_cmd *cmd, *res = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ /* ToDo: hash list */
-+
-+ TRACE_DBG("%s (sess=%p, tag=%llu)", "Searching in sess cmd list",
-+ sess, (long long unsigned int)tag);
-+
-+ list_for_each_entry(cmd, &sess->sess_cmd_list,
-+ sess_cmd_list_entry) {
-+ if (cmd->tag == tag) {
-+ /*
-+ * We must not count done commands, because
-+ * they were submitted for transmittion.
-+ * Otherwise we can have a race, when for
-+ * some reason cmd's release delayed
-+ * after transmittion and initiator sends
-+ * cmd with the same tag => it can be possible
-+ * that a wrong cmd will be returned.
-+ */
-+ if (cmd->done) {
-+ if (to_abort) {
-+ /*
-+ * We should return the latest not
-+ * aborted cmd with this tag.
-+ */
-+ if (res == NULL)
-+ res = cmd;
-+ else {
-+ if (test_bit(SCST_CMD_ABORTED,
-+ &res->cmd_flags)) {
-+ res = cmd;
-+ } else if (!test_bit(SCST_CMD_ABORTED,
-+ &cmd->cmd_flags))
-+ res = cmd;
-+ }
-+ }
-+ continue;
-+ } else {
-+ res = cmd;
-+ break;
-+ }
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+/**
-+ * scst_find_cmd() - find command by custom comparison function
-+ *
-+ * Finds a command based on user supplied data and comparision
-+ * callback function, that should return true, if the command is found.
-+ * Returns the command on success or NULL otherwise.
-+ */
-+struct scst_cmd *scst_find_cmd(struct scst_session *sess, void *data,
-+ int (*cmp_fn) (struct scst_cmd *cmd,
-+ void *data))
-+{
-+ struct scst_cmd *cmd = NULL;
-+ unsigned long flags = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (cmp_fn == NULL)
-+ goto out;
-+
-+ spin_lock_irqsave(&sess->sess_list_lock, flags);
-+
-+ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
-+ list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list_entry) {
-+ /*
-+ * We must not count done commands, because they were
-+ * submitted for transmittion. Otherwise we can have a race,
-+ * when for some reason cmd's release delayed after
-+ * transmittion and initiator sends cmd with the same tag =>
-+ * it can be possible that a wrong cmd will be returned.
-+ */
-+ if (cmd->done)
-+ continue;
-+ if (cmp_fn(cmd, data))
-+ goto out_unlock;
-+ }
++ msleep(3000);
+
-+ cmd = NULL;
++ if (sysfs_work_thread)
++ kthread_stop(sysfs_work_thread);
+
-+out_unlock:
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
++ PRINT_INFO("%s", "Exiting SCST sysfs hierarchy done");
+
-+out:
+ TRACE_EXIT();
-+ return cmd;
-+}
-+EXPORT_SYMBOL(scst_find_cmd);
-+
-+/**
-+ * scst_find_cmd_by_tag() - find command by tag
-+ *
-+ * Finds a command based on the supplied tag comparing it with one
-+ * that previously set by scst_cmd_set_tag(). Returns the found command on
-+ * success or NULL otherwise.
-+ */
-+struct scst_cmd *scst_find_cmd_by_tag(struct scst_session *sess,
-+ uint64_t tag)
-+{
-+ unsigned long flags;
-+ struct scst_cmd *cmd;
-+ spin_lock_irqsave(&sess->sess_list_lock, flags);
-+ cmd = __scst_find_cmd_by_tag(sess, tag, false);
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
-+ return cmd;
++ return;
+}
-+EXPORT_SYMBOL(scst_find_cmd_by_tag);
-diff -uprN orig/linux-2.6.36/include/scst/scst_debug.h linux-2.6.36/include/scst/scst_debug.h
---- orig/linux-2.6.36/include/scst/scst_debug.h
-+++ linux-2.6.36/include/scst/scst_debug.h
+diff -uprN orig/linux-2.6.39/include/scst/scst_debug.h linux-2.6.39/include/scst/scst_debug.h
+--- orig/linux-2.6.39/include/scst/scst_debug.h
++++ linux-2.6.39/include/scst/scst_debug.h
@@ -0,0 +1,351 @@
+/*
+ * include/scst_debug.h
@@ -30236,7 +31839,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst_debug.h linux-2.6.36/include/scst
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
+ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
-+ * Contains macroses for execution tracing and error reporting
++ * Contains macros for execution tracing and error reporting
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -30318,7 +31921,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst_debug.h linux-2.6.36/include/scst
+#endif
+
+/*
-+ * We don't print prefix for debug traces to not put additional preasure
++ * We don't print prefix for debug traces to not put additional pressure
+ * on the logging system in case of a lot of logging.
+ */
+
@@ -30579,9 +32182,9 @@ diff -uprN orig/linux-2.6.36/include/scst/scst_debug.h linux-2.6.36/include/scst
+#endif
+
+#endif /* __SCST_DEBUG_H */
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_debug.c linux-2.6.36/drivers/scst/scst_debug.c
---- orig/linux-2.6.36/drivers/scst/scst_debug.c
-+++ linux-2.6.36/drivers/scst/scst_debug.c
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_debug.c linux-2.6.39/drivers/scst/scst_debug.c
+--- orig/linux-2.6.39/drivers/scst/scst_debug.c
++++ linux-2.6.39/drivers/scst/scst_debug.c
@@ -0,0 +1,224 @@
+/*
+ * scst_debug.c
@@ -30725,7 +32328,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_debug.c linux-2.6.36/drivers/scst
+ * if the name corrupted in the debug logs because of the race for this buffer.
+ *
+ * Note! You can't call this function 2 or more times in a single logging
-+ * (printk) statement, because then each new call of this functon will override
++ * (printk) statement, because then each new call of this function will override
+ * data written in this buffer by the previous call. You should instead split
+ * that logging statement on smaller statements each calling
+ * debug_transport_id_to_initiator_name() only once.
@@ -30807,2717 +32410,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_debug.c linux-2.6.36/drivers/scst
+}
+
+#endif /* CONFIG_SCST_DEBUG || CONFIG_SCST_TRACING */
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_proc.c linux-2.6.36/drivers/scst/scst_proc.c
---- orig/linux-2.6.36/drivers/scst/scst_proc.c
-+++ linux-2.6.36/drivers/scst/scst_proc.c
-@@ -0,0 +1,2704 @@
-+/*
-+ * scst_proc.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/unistd.h>
-+#include <linux/string.h>
-+#include <linux/proc_fs.h>
-+#include <linux/seq_file.h>
-+
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+#include "scst_mem.h"
-+#include "scst_pres.h"
-+
-+static int scst_proc_init_groups(void);
-+static void scst_proc_cleanup_groups(void);
-+static int scst_proc_assign_handler(char *buf);
-+static int scst_proc_group_add(const char *p, unsigned int addr_method);
-+static int scst_proc_del_free_acg(struct scst_acg *acg, int remove_proc);
-+
-+static struct scst_proc_data scst_version_proc_data;
-+static struct scst_proc_data scst_help_proc_data;
-+static struct scst_proc_data scst_sgv_proc_data;
-+static struct scst_proc_data scst_groups_names_proc_data;
-+static struct scst_proc_data scst_groups_devices_proc_data;
-+static struct scst_proc_data scst_groups_addr_method_proc_data;
-+static struct scst_proc_data scst_sessions_proc_data;
-+static struct scst_proc_data scst_dev_handler_type_proc_data;
-+static struct scst_proc_data scst_tgt_proc_data;
-+static struct scst_proc_data scst_threads_proc_data;
-+static struct scst_proc_data scst_scsi_tgt_proc_data;
-+static struct scst_proc_data scst_dev_handler_proc_data;
-+
-+/*
-+ * Must be less than 4K page size, since our output routines
-+ * use some slack for overruns
-+ */
-+#define SCST_PROC_BLOCK_SIZE (PAGE_SIZE - 512)
-+
-+#define SCST_PROC_LOG_ENTRY_NAME "trace_level"
-+#define SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME "type"
-+#define SCST_PROC_VERSION_NAME "version"
-+#define SCST_PROC_SESSIONS_NAME "sessions"
-+#define SCST_PROC_HELP_NAME "help"
-+#define SCST_PROC_THREADS_NAME "threads"
-+#define SCST_PROC_GROUPS_ENTRY_NAME "groups"
-+#define SCST_PROC_GROUPS_DEVICES_ENTRY_NAME "devices"
-+#define SCST_PROC_GROUPS_USERS_ENTRY_NAME "names"
-+#define SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME "addr_method"
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+#define SCST_PROC_LAT_ENTRY_NAME "latency"
-+#endif
-+
-+#define SCST_PROC_ACTION_ALL 1
-+#define SCST_PROC_ACTION_NONE 2
-+#define SCST_PROC_ACTION_DEFAULT 3
-+#define SCST_PROC_ACTION_ADD 4
-+#define SCST_PROC_ACTION_CLEAR 5
-+#define SCST_PROC_ACTION_MOVE 6
-+#define SCST_PROC_ACTION_DEL 7
-+#define SCST_PROC_ACTION_REPLACE 8
-+#define SCST_PROC_ACTION_VALUE 9
-+#define SCST_PROC_ACTION_ASSIGN 10
-+#define SCST_PROC_ACTION_ADD_GROUP 11
-+#define SCST_PROC_ACTION_DEL_GROUP 12
-+#define SCST_PROC_ACTION_RENAME_GROUP 13
-+#define SCST_PROC_ACTION_DUMP_PRS 14
-+
-+static struct proc_dir_entry *scst_proc_scsi_tgt;
-+static struct proc_dir_entry *scst_proc_groups_root;
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+static struct scst_proc_data scst_log_proc_data;
-+
-+static struct scst_trace_log scst_proc_trace_tbl[] = {
-+ { TRACE_OUT_OF_MEM, "out_of_mem" },
-+ { TRACE_MINOR, "minor" },
-+ { TRACE_SG_OP, "sg" },
-+ { TRACE_MEMORY, "mem" },
-+ { TRACE_BUFF, "buff" },
-+#ifndef GENERATING_UPSTREAM_PATCH
-+ { TRACE_ENTRYEXIT, "entryexit" },
-+#endif
-+ { TRACE_PID, "pid" },
-+ { TRACE_LINE, "line" },
-+ { TRACE_FUNCTION, "function" },
-+ { TRACE_DEBUG, "debug" },
-+ { TRACE_SPECIAL, "special" },
-+ { TRACE_SCSI, "scsi" },
-+ { TRACE_MGMT, "mgmt" },
-+ { TRACE_MGMT_DEBUG, "mgmt_dbg" },
-+ { TRACE_FLOW_CONTROL, "flow_control" },
-+ { TRACE_PRES, "pr" },
-+ { 0, NULL }
-+};
-+
-+static struct scst_trace_log scst_proc_local_trace_tbl[] = {
-+ { TRACE_RTRY, "retry" },
-+ { TRACE_SCSI_SERIALIZING, "scsi_serializing" },
-+ { TRACE_RCV_BOT, "recv_bot" },
-+ { TRACE_SND_BOT, "send_bot" },
-+ { TRACE_RCV_TOP, "recv_top" },
-+ { TRACE_SND_TOP, "send_top" },
-+ { 0, NULL }
-+};
-+#endif
-+
-+static char *scst_proc_help_string =
-+" echo \"assign H:C:I:L HANDLER_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
-+"\n"
-+" echo \"add_group GROUP_NAME [FLAT]\" >/proc/scsi_tgt/scsi_tgt\n"
-+" echo \"del_group GROUP_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
-+" echo \"rename_group OLD_NAME NEW_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
-+"\n"
-+" echo \"add|del H:C:I:L lun [READ_ONLY]\""
-+" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
-+" echo \"replace H:C:I:L lun [READ_ONLY]\""
-+" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
-+" echo \"add|del V_NAME lun [READ_ONLY]\""
-+" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
-+" echo \"replace V_NAME lun [READ_ONLY]\""
-+" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
-+" echo \"clear\" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
-+"\n"
-+" echo \"add|del NAME\" >/proc/scsi_tgt/groups/GROUP_NAME/names\n"
-+" echo \"move NAME NEW_GROUP_NAME\" >/proc/scsi_tgt/groups/OLD_GROUP_NAME/names\n"
-+" echo \"clear\" >/proc/scsi_tgt/groups/GROUP_NAME/names\n"
-+"\n"
-+" echo \"DEC|0xHEX|0OCT\" >/proc/scsi_tgt/threads\n"
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+"\n"
-+" echo \"all|none|default\" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
-+" echo \"value DEC|0xHEX|0OCT\""
-+" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
-+" echo \"set|add|del TOKEN\""
-+" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
-+" where TOKEN is one of [debug, function, line, pid, entryexit,\n"
-+" buff, mem, sg, out_of_mem, special, scsi,\n"
-+" mgmt, minor, mgmt_dbg]\n"
-+" Additionally for /proc/scsi_tgt/trace_level there are these TOKENs\n"
-+" [scsi_serializing, retry, recv_bot, send_bot, recv_top, send_top]\n"
-+" echo \"dump_prs dev_name\" >/proc/scsi_tgt/trace_level\n"
-+#endif
-+;
-+
-+static char *scst_proc_dev_handler_type[] = {
-+ "Direct-access device (e.g., magnetic disk)",
-+ "Sequential-access device (e.g., magnetic tape)",
-+ "Printer device",
-+ "Processor device",
-+ "Write-once device (e.g., some optical disks)",
-+ "CD-ROM device",
-+ "Scanner device (obsolete)",
-+ "Optical memory device (e.g., some optical disks)",
-+ "Medium changer device (e.g., jukeboxes)",
-+ "Communications device (obsolete)",
-+ "Defined by ASC IT8 (Graphic arts pre-press devices)",
-+ "Defined by ASC IT8 (Graphic arts pre-press devices)",
-+ "Storage array controller device (e.g., RAID)",
-+ "Enclosure services device",
-+ "Simplified direct-access device (e.g., magnetic disk)",
-+ "Optical card reader/writer device"
-+};
-+
-+static DEFINE_MUTEX(scst_proc_mutex);
-+
-+#include <linux/ctype.h>
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+static DEFINE_MUTEX(scst_log_mutex);
-+
-+int scst_proc_log_entry_write(struct file *file, const char __user *buf,
-+ unsigned long length, unsigned long *log_level,
-+ unsigned long default_level, const struct scst_trace_log *tbl)
-+{
-+ int res = length;
-+ int action;
-+ unsigned long level = 0, oldlevel;
-+ char *buffer, *p, *e;
-+ const struct scst_trace_log *t;
-+ char *data = (char *)PDE(file->f_dentry->d_inode)->data;
-+
-+ TRACE_ENTRY();
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ /*
-+ * Usage:
-+ * echo "all|none|default" >/proc/scsi_tgt/trace_level
-+ * echo "value DEC|0xHEX|0OCT" >/proc/scsi_tgt/trace_level
-+ * echo "add|del TOKEN" >/proc/scsi_tgt/trace_level
-+ */
-+ p = buffer;
-+ if (!strncasecmp("all", p, 3)) {
-+ action = SCST_PROC_ACTION_ALL;
-+ } else if (!strncasecmp("none", p, 4) || !strncasecmp("null", p, 4)) {
-+ action = SCST_PROC_ACTION_NONE;
-+ } else if (!strncasecmp("default", p, 7)) {
-+ action = SCST_PROC_ACTION_DEFAULT;
-+ } else if (!strncasecmp("add ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_ADD;
-+ } else if (!strncasecmp("del ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_DEL;
-+ } else if (!strncasecmp("value ", p, 6)) {
-+ p += 6;
-+ action = SCST_PROC_ACTION_VALUE;
-+ } else if (!strncasecmp("dump_prs ", p, 9)) {
-+ p += 9;
-+ action = SCST_PROC_ACTION_DUMP_PRS;
-+ } else {
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ALL:
-+ level = TRACE_ALL;
-+ break;
-+ case SCST_PROC_ACTION_DEFAULT:
-+ level = default_level;
-+ break;
-+ case SCST_PROC_ACTION_NONE:
-+ level = TRACE_NULL;
-+ break;
-+ case SCST_PROC_ACTION_ADD:
-+ case SCST_PROC_ACTION_DEL:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = 0;
-+ if (tbl) {
-+ t = tbl;
-+ while (t->token) {
-+ if (!strcasecmp(p, t->token)) {
-+ level = t->val;
-+ break;
-+ }
-+ t++;
-+ }
-+ }
-+ if (level == 0) {
-+ t = scst_proc_trace_tbl;
-+ while (t->token) {
-+ if (!strcasecmp(p, t->token)) {
-+ level = t->val;
-+ break;
-+ }
-+ t++;
-+ }
-+ }
-+ if (level == 0) {
-+ PRINT_ERROR("Unknown token \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+ break;
-+ case SCST_PROC_ACTION_VALUE:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ level = simple_strtoul(p, NULL, 0);
-+ break;
-+ case SCST_PROC_ACTION_DUMP_PRS:
-+ {
-+ struct scst_device *dev;
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = '\0';
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free;
-+ }
-+
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ if (strcmp(dev->virt_name, p) == 0) {
-+ scst_pr_dump_prs(dev, true);
-+ goto out_up;
-+ }
-+ }
-+
-+ PRINT_ERROR("Device %s not found", p);
-+ res = -ENOENT;
-+out_up:
-+ mutex_unlock(&scst_mutex);
-+ goto out_free;
-+ }
-+ }
-+
-+ oldlevel = *log_level;
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ *log_level |= level;
-+ break;
-+ case SCST_PROC_ACTION_DEL:
-+ *log_level &= ~level;
-+ break;
-+ default:
-+ *log_level = level;
-+ break;
-+ }
-+
-+ PRINT_INFO("Changed trace level for \"%s\": "
-+ "old 0x%08lx, new 0x%08lx",
-+ (char *)data, oldlevel, *log_level);
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_proc_log_entry_write);
-+
-+static ssize_t scst_proc_scsi_tgt_gen_write_log(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ res = scst_proc_log_entry_write(file, buf, length,
-+ &trace_flag, SCST_DEFAULT_LOG_FLAGS,
-+ scst_proc_local_trace_tbl);
-+
-+ mutex_unlock(&scst_log_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+
-+static char *scst_io_size_names[] = {
-+ "<=8K ",
-+ "<=32K ",
-+ "<=128K",
-+ "<=512K",
-+ ">512K "
-+};
-+
-+static int lat_info_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_acg *acg;
-+ struct scst_session *sess;
-+ char buf[50];
-+
-+ TRACE_ENTRY();
-+
-+ BUILD_BUG_ON(SCST_LATENCY_STATS_NUM != ARRAY_SIZE(scst_io_size_names));
-+ BUILD_BUG_ON(SCST_LATENCY_STATS_NUM != ARRAY_SIZE(sess->sess_latency_stat));
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
-+ bool header_printed = false;
-+
-+ list_for_each_entry(sess, &acg->acg_sess_list,
-+ acg_sess_list_entry) {
-+ unsigned int i;
-+ int t;
-+ uint64_t scst_time, tgt_time, dev_time;
-+ unsigned int processed_cmds;
-+
-+ if (!header_printed) {
-+ seq_printf(seq, "%-15s %-15s %-46s %-46s %-46s\n",
-+ "T-L names", "Total commands", "SCST latency",
-+ "Target latency", "Dev latency (min/avg/max/all ns)");
-+ header_printed = true;
-+ }
-+
-+ seq_printf(seq, "Target name: %s\nInitiator name: %s\n",
-+ sess->tgt->tgtt->name,
-+ sess->initiator_name);
-+
-+ spin_lock_bh(&sess->lat_lock);
-+
-+ for (i = 0; i < SCST_LATENCY_STATS_NUM ; i++) {
-+ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
-+ unsigned int processed_cmds_wr;
-+ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
-+ unsigned int processed_cmds_rd;
-+ struct scst_ext_latency_stat *latency_stat;
-+
-+ latency_stat = &sess->sess_latency_stat[i];
-+ scst_time_wr = latency_stat->scst_time_wr;
-+ scst_time_rd = latency_stat->scst_time_rd;
-+ tgt_time_wr = latency_stat->tgt_time_wr;
-+ tgt_time_rd = latency_stat->tgt_time_rd;
-+ dev_time_wr = latency_stat->dev_time_wr;
-+ dev_time_rd = latency_stat->dev_time_rd;
-+ processed_cmds_wr = latency_stat->processed_cmds_wr;
-+ processed_cmds_rd = latency_stat->processed_cmds_rd;
-+
-+ seq_printf(seq, "%-5s %-9s %-15lu ",
-+ "Write", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_wr);
-+ if (processed_cmds_wr == 0)
-+ processed_cmds_wr = 1;
-+
-+ do_div(scst_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_wr,
-+ (unsigned long)scst_time_wr,
-+ (unsigned long)latency_stat->max_scst_time_wr,
-+ (unsigned long)latency_stat->scst_time_wr);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(tgt_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_wr,
-+ (unsigned long)tgt_time_wr,
-+ (unsigned long)latency_stat->max_tgt_time_wr,
-+ (unsigned long)latency_stat->tgt_time_wr);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(dev_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_wr,
-+ (unsigned long)dev_time_wr,
-+ (unsigned long)latency_stat->max_dev_time_wr,
-+ (unsigned long)latency_stat->dev_time_wr);
-+ seq_printf(seq, "%-47s\n", buf);
-+
-+ seq_printf(seq, "%-5s %-9s %-15lu ",
-+ "Read", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_rd);
-+ if (processed_cmds_rd == 0)
-+ processed_cmds_rd = 1;
-+
-+ do_div(scst_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_rd,
-+ (unsigned long)scst_time_rd,
-+ (unsigned long)latency_stat->max_scst_time_rd,
-+ (unsigned long)latency_stat->scst_time_rd);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(tgt_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_rd,
-+ (unsigned long)tgt_time_rd,
-+ (unsigned long)latency_stat->max_tgt_time_rd,
-+ (unsigned long)latency_stat->tgt_time_rd);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(dev_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_rd,
-+ (unsigned long)dev_time_rd,
-+ (unsigned long)latency_stat->max_dev_time_rd,
-+ (unsigned long)latency_stat->dev_time_rd);
-+ seq_printf(seq, "%-47s\n", buf);
-+ }
-+
-+ for (t = TGT_DEV_HASH_SIZE-1; t >= 0; t--) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[t];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+
-+ seq_printf(seq, "\nLUN: %llu\n", tgt_dev->lun);
-+
-+ for (i = 0; i < SCST_LATENCY_STATS_NUM ; i++) {
-+ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
-+ unsigned int processed_cmds_wr;
-+ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
-+ unsigned int processed_cmds_rd;
-+ struct scst_ext_latency_stat *latency_stat;
-+
-+ latency_stat = &tgt_dev->dev_latency_stat[i];
-+ scst_time_wr = latency_stat->scst_time_wr;
-+ scst_time_rd = latency_stat->scst_time_rd;
-+ tgt_time_wr = latency_stat->tgt_time_wr;
-+ tgt_time_rd = latency_stat->tgt_time_rd;
-+ dev_time_wr = latency_stat->dev_time_wr;
-+ dev_time_rd = latency_stat->dev_time_rd;
-+ processed_cmds_wr = latency_stat->processed_cmds_wr;
-+ processed_cmds_rd = latency_stat->processed_cmds_rd;
-+
-+ seq_printf(seq, "%-5s %-9s %-15lu ",
-+ "Write", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_wr);
-+ if (processed_cmds_wr == 0)
-+ processed_cmds_wr = 1;
-+
-+ do_div(scst_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_wr,
-+ (unsigned long)scst_time_wr,
-+ (unsigned long)latency_stat->max_scst_time_wr,
-+ (unsigned long)latency_stat->scst_time_wr);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(tgt_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_wr,
-+ (unsigned long)tgt_time_wr,
-+ (unsigned long)latency_stat->max_tgt_time_wr,
-+ (unsigned long)latency_stat->tgt_time_wr);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(dev_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_wr,
-+ (unsigned long)dev_time_wr,
-+ (unsigned long)latency_stat->max_dev_time_wr,
-+ (unsigned long)latency_stat->dev_time_wr);
-+ seq_printf(seq, "%-47s\n", buf);
-+
-+ seq_printf(seq, "%-5s %-9s %-15lu ",
-+ "Read", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_rd);
-+ if (processed_cmds_rd == 0)
-+ processed_cmds_rd = 1;
-+
-+ do_div(scst_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_rd,
-+ (unsigned long)scst_time_rd,
-+ (unsigned long)latency_stat->max_scst_time_rd,
-+ (unsigned long)latency_stat->scst_time_rd);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(tgt_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_rd,
-+ (unsigned long)tgt_time_rd,
-+ (unsigned long)latency_stat->max_tgt_time_rd,
-+ (unsigned long)latency_stat->tgt_time_rd);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(dev_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_rd,
-+ (unsigned long)dev_time_rd,
-+ (unsigned long)latency_stat->max_dev_time_rd,
-+ (unsigned long)latency_stat->dev_time_rd);
-+ seq_printf(seq, "%-47s\n", buf);
-+ }
-+ }
-+ }
-+
-+ scst_time = sess->scst_time;
-+ tgt_time = sess->tgt_time;
-+ dev_time = sess->dev_time;
-+ processed_cmds = sess->processed_cmds;
-+
-+ seq_printf(seq, "\n%-15s %-16d", "Overall ",
-+ processed_cmds);
-+
-+ if (processed_cmds == 0)
-+ processed_cmds = 1;
-+
-+ do_div(scst_time, processed_cmds);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)sess->min_scst_time,
-+ (unsigned long)scst_time,
-+ (unsigned long)sess->max_scst_time,
-+ (unsigned long)sess->scst_time);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(tgt_time, processed_cmds);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)sess->min_tgt_time,
-+ (unsigned long)tgt_time,
-+ (unsigned long)sess->max_tgt_time,
-+ (unsigned long)sess->tgt_time);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(dev_time, processed_cmds);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)sess->min_dev_time,
-+ (unsigned long)dev_time,
-+ (unsigned long)sess->max_dev_time,
-+ (unsigned long)sess->dev_time);
-+ seq_printf(seq, "%-47s\n\n", buf);
-+
-+ spin_unlock_bh(&sess->lat_lock);
-+ }
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_proc_scsi_tgt_gen_write_lat(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res = length, t;
-+ struct scst_acg *acg;
-+ struct scst_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
-+ list_for_each_entry(sess, &acg->acg_sess_list,
-+ acg_sess_list_entry) {
-+ PRINT_INFO("Zeroing latency statistics for initiator "
-+ "%s", sess->initiator_name);
-+ spin_lock_bh(&sess->lat_lock);
-+
-+ sess->scst_time = 0;
-+ sess->tgt_time = 0;
-+ sess->dev_time = 0;
-+ sess->min_scst_time = 0;
-+ sess->min_tgt_time = 0;
-+ sess->min_dev_time = 0;
-+ sess->max_scst_time = 0;
-+ sess->max_tgt_time = 0;
-+ sess->max_dev_time = 0;
-+ sess->processed_cmds = 0;
-+ memset(sess->sess_latency_stat, 0,
-+ sizeof(sess->sess_latency_stat));
-+
-+ for (t = TGT_DEV_HASH_SIZE-1; t >= 0; t--) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[t];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ tgt_dev->scst_time = 0;
-+ tgt_dev->tgt_time = 0;
-+ tgt_dev->dev_time = 0;
-+ tgt_dev->processed_cmds = 0;
-+ memset(tgt_dev->dev_latency_stat, 0,
-+ sizeof(tgt_dev->dev_latency_stat));
-+ }
-+ }
-+
-+ spin_unlock_bh(&sess->lat_lock);
-+ }
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_lat_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write_lat)
-+ .show = lat_info_show,
-+ .data = "scsi_tgt",
-+};
-+
-+#endif /* CONFIG_SCST_MEASURE_LATENCY */
-+
-+static int __init scst_proc_init_module_log(void)
-+{
-+ int res = 0;
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) || \
-+ defined(CONFIG_SCST_MEASURE_LATENCY)
-+ struct proc_dir_entry *generic;
-+#endif
-+
-+ TRACE_ENTRY();
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_LOG_ENTRY_NAME,
-+ &scst_log_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_LOG_ENTRY_NAME);
-+ res = -ENOMEM;
-+ }
-+#endif
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+ if (res == 0) {
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_LAT_ENTRY_NAME,
-+ &scst_lat_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME,
-+ SCST_PROC_LAT_ENTRY_NAME);
-+ res = -ENOMEM;
-+ }
-+ }
-+#endif
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void scst_proc_cleanup_module_log(void)
-+{
-+ TRACE_ENTRY();
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ remove_proc_entry(SCST_PROC_LOG_ENTRY_NAME, scst_proc_scsi_tgt);
-+#endif
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+ remove_proc_entry(SCST_PROC_LAT_ENTRY_NAME, scst_proc_scsi_tgt);
-+#endif
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_proc_group_add_tree(struct scst_acg *acg, const char *name)
-+{
-+ int res = 0;
-+ struct proc_dir_entry *generic;
-+
-+ TRACE_ENTRY();
-+
-+ acg->acg_proc_root = proc_mkdir(name, scst_proc_groups_root);
-+ if (acg->acg_proc_root == NULL) {
-+ PRINT_ERROR("Not enough memory to register %s entry in "
-+ "/proc/%s/%s", name, SCST_PROC_ENTRY_NAME,
-+ SCST_PROC_GROUPS_ENTRY_NAME);
-+ goto out;
-+ }
-+
-+ scst_groups_addr_method_proc_data.data = acg;
-+ generic = scst_create_proc_entry(acg->acg_proc_root,
-+ SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME,
-+ &scst_groups_addr_method_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
-+ SCST_PROC_ENTRY_NAME,
-+ SCST_PROC_GROUPS_ENTRY_NAME,
-+ name, SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME);
-+ res = -ENOMEM;
-+ goto out_remove;
-+ }
-+
-+ scst_groups_devices_proc_data.data = acg;
-+ generic = scst_create_proc_entry(acg->acg_proc_root,
-+ SCST_PROC_GROUPS_DEVICES_ENTRY_NAME,
-+ &scst_groups_devices_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
-+ SCST_PROC_ENTRY_NAME,
-+ SCST_PROC_GROUPS_ENTRY_NAME,
-+ name, SCST_PROC_GROUPS_DEVICES_ENTRY_NAME);
-+ res = -ENOMEM;
-+ goto out_remove0;
-+ }
-+
-+ scst_groups_names_proc_data.data = acg;
-+ generic = scst_create_proc_entry(acg->acg_proc_root,
-+ SCST_PROC_GROUPS_USERS_ENTRY_NAME,
-+ &scst_groups_names_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
-+ SCST_PROC_ENTRY_NAME,
-+ SCST_PROC_GROUPS_ENTRY_NAME,
-+ name, SCST_PROC_GROUPS_USERS_ENTRY_NAME);
-+ res = -ENOMEM;
-+ goto out_remove1;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_remove1:
-+ remove_proc_entry(SCST_PROC_GROUPS_DEVICES_ENTRY_NAME,
-+ acg->acg_proc_root);
-+
-+out_remove0:
-+ remove_proc_entry(SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME,
-+ acg->acg_proc_root);
-+out_remove:
-+ remove_proc_entry(name, scst_proc_groups_root);
-+ goto out;
-+}
-+
-+static void scst_proc_del_acg_tree(struct proc_dir_entry *acg_proc_root,
-+ const char *name)
-+{
-+ TRACE_ENTRY();
-+
-+ remove_proc_entry(SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME, acg_proc_root);
-+ remove_proc_entry(SCST_PROC_GROUPS_USERS_ENTRY_NAME, acg_proc_root);
-+ remove_proc_entry(SCST_PROC_GROUPS_DEVICES_ENTRY_NAME, acg_proc_root);
-+ remove_proc_entry(name, scst_proc_groups_root);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static int scst_proc_group_add(const char *p, unsigned int addr_method)
-+{
-+ int res = 0, len = strlen(p) + 1;
-+ struct scst_acg *acg;
-+ char *name = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ name = kmalloc(len, GFP_KERNEL);
-+ if (name == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of name failed");
-+ goto out_nomem;
-+ }
-+ strlcpy(name, p, len);
-+
-+ acg = scst_alloc_add_acg(NULL, name, false);
-+ if (acg == NULL) {
-+ PRINT_ERROR("scst_alloc_add_acg() (name %s) failed", name);
-+ goto out_free;
-+ }
-+
-+ acg->addr_method = addr_method;
-+
-+ res = scst_proc_group_add_tree(acg, p);
-+ if (res != 0)
-+ goto out_free_acg;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free_acg:
-+ scst_proc_del_free_acg(acg, 0);
-+
-+out_free:
-+ kfree(name);
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static int scst_proc_del_free_acg(struct scst_acg *acg, int remove_proc)
-+{
-+ struct proc_dir_entry *acg_proc_root = acg->acg_proc_root;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (acg != scst_default_acg) {
-+ if (!scst_acg_sess_is_empty(acg)) {
-+ PRINT_ERROR("%s", "Session is not empty");
-+ res = -EBUSY;
-+ goto out;
-+ }
-+ if (remove_proc)
-+ scst_proc_del_acg_tree(acg_proc_root, acg->acg_name);
-+ scst_del_free_acg(acg);
-+ }
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static int scst_proc_rename_acg(struct scst_acg *acg, const char *new_name)
-+{
-+ int res = 0, len = strlen(new_name) + 1;
-+ char *name;
-+ struct proc_dir_entry *old_acg_proc_root = acg->acg_proc_root;
-+
-+ TRACE_ENTRY();
-+
-+ name = kmalloc(len, GFP_KERNEL);
-+ if (name == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of new name failed");
-+ goto out_nomem;
-+ }
-+ strlcpy(name, new_name, len);
-+
-+ res = scst_proc_group_add_tree(acg, new_name);
-+ if (res != 0)
-+ goto out_free;
-+
-+ scst_proc_del_acg_tree(old_acg_proc_root, acg->acg_name);
-+
-+ kfree(acg->acg_name);
-+ acg->acg_name = name;
-+
-+ scst_check_reassign_sessions();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(name);
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __init scst_proc_init_groups(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /* create the proc directory entry for the device */
-+ scst_proc_groups_root = proc_mkdir(SCST_PROC_GROUPS_ENTRY_NAME,
-+ scst_proc_scsi_tgt);
-+ if (scst_proc_groups_root == NULL) {
-+ PRINT_ERROR("Not enough memory to register %s entry in "
-+ "/proc/%s", SCST_PROC_GROUPS_ENTRY_NAME,
-+ SCST_PROC_ENTRY_NAME);
-+ goto out_nomem;
-+ }
-+
-+ res = scst_proc_group_add_tree(scst_default_acg,
-+ SCST_DEFAULT_ACG_NAME);
-+ if (res != 0)
-+ goto out_remove;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_remove:
-+ remove_proc_entry(SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static void scst_proc_cleanup_groups(void)
-+{
-+ struct scst_acg *acg_tmp, *acg;
-+
-+ TRACE_ENTRY();
-+
-+ /* remove all groups (dir & entries) */
-+ list_for_each_entry_safe(acg, acg_tmp, &scst_acg_list,
-+ acg_list_entry) {
-+ scst_proc_del_free_acg(acg, 1);
-+ }
-+
-+ scst_proc_del_acg_tree(scst_default_acg->acg_proc_root,
-+ SCST_DEFAULT_ACG_NAME);
-+ TRACE_DBG("remove_proc_entry(%s, %p)",
-+ SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
-+
-+ TRACE_EXIT();
-+}
-+
-+static int __init scst_proc_init_sgv(void)
-+{
-+ int res = 0;
-+ struct proc_dir_entry *pr;
-+
-+ TRACE_ENTRY();
-+
-+ pr = scst_create_proc_entry(scst_proc_scsi_tgt, "sgv",
-+ &scst_sgv_proc_data);
-+ if (pr == NULL) {
-+ PRINT_ERROR("%s", "cannot create sgv /proc entry");
-+ res = -ENOMEM;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void __exit scst_proc_cleanup_sgv(void)
-+{
-+ TRACE_ENTRY();
-+ remove_proc_entry("sgv", scst_proc_scsi_tgt);
-+ TRACE_EXIT();
-+}
-+
-+int __init scst_proc_init_module(void)
-+{
-+ int res = 0;
-+ struct proc_dir_entry *generic;
-+
-+ TRACE_ENTRY();
-+
-+ scst_proc_scsi_tgt = proc_mkdir(SCST_PROC_ENTRY_NAME, NULL);
-+ if (!scst_proc_scsi_tgt) {
-+ PRINT_ERROR("cannot init /proc/%s", SCST_PROC_ENTRY_NAME);
-+ goto out_nomem;
-+ }
-+
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_ENTRY_NAME,
-+ &scst_tgt_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_ENTRY_NAME);
-+ goto out_remove;
-+ }
-+
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_VERSION_NAME,
-+ &scst_version_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_VERSION_NAME);
-+ goto out_remove1;
-+ }
-+
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_SESSIONS_NAME,
-+ &scst_sessions_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_SESSIONS_NAME);
-+ goto out_remove2;
-+ }
-+
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_HELP_NAME,
-+ &scst_help_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_HELP_NAME);
-+ goto out_remove3;
-+ }
-+
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_THREADS_NAME,
-+ &scst_threads_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_THREADS_NAME);
-+ goto out_remove4;
-+ }
-+
-+ if (scst_proc_init_module_log() < 0)
-+ goto out_remove5;
-+
-+ if (scst_proc_init_groups() < 0)
-+ goto out_remove6;
-+
-+ if (scst_proc_init_sgv() < 0)
-+ goto out_remove7;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_remove7:
-+ scst_proc_cleanup_groups();
-+
-+out_remove6:
-+ scst_proc_cleanup_module_log();
-+
-+out_remove5:
-+ remove_proc_entry(SCST_PROC_THREADS_NAME, scst_proc_scsi_tgt);
-+
-+out_remove4:
-+ remove_proc_entry(SCST_PROC_HELP_NAME, scst_proc_scsi_tgt);
-+
-+out_remove3:
-+ remove_proc_entry(SCST_PROC_SESSIONS_NAME, scst_proc_scsi_tgt);
-+
-+out_remove2:
-+ remove_proc_entry(SCST_PROC_VERSION_NAME, scst_proc_scsi_tgt);
-+
-+out_remove1:
-+ remove_proc_entry(SCST_PROC_ENTRY_NAME, scst_proc_scsi_tgt);
-+
-+out_remove:
-+ remove_proc_entry(SCST_PROC_ENTRY_NAME, NULL);
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+void __exit scst_proc_cleanup_module(void)
-+{
-+ TRACE_ENTRY();
-+
-+ /* We may not bother about locks here */
-+ scst_proc_cleanup_sgv();
-+ scst_proc_cleanup_groups();
-+ scst_proc_cleanup_module_log();
-+ remove_proc_entry(SCST_PROC_THREADS_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_HELP_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_SESSIONS_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_VERSION_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_ENTRY_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_ENTRY_NAME, NULL);
-+
-+ TRACE_EXIT();
-+}
-+
-+static ssize_t scst_proc_threads_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res = length;
-+ int oldtn, newtn, delta;
-+ char *buffer;
-+
-+ TRACE_ENTRY();
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free;
-+ }
-+
-+ mutex_lock(&scst_mutex);
-+
-+ oldtn = scst_main_cmd_threads.nr_threads;
-+ newtn = simple_strtoul(buffer, NULL, 0);
-+ if (newtn <= 0) {
-+ PRINT_ERROR("Illegal threads num value %d", newtn);
-+ res = -EINVAL;
-+ goto out_up_thr_free;
-+ }
-+ delta = newtn - oldtn;
-+ if (delta < 0)
-+ scst_del_threads(&scst_main_cmd_threads, -delta);
-+ else {
-+ int rc = scst_add_threads(&scst_main_cmd_threads, NULL, NULL,
-+ delta);
-+ if (rc != 0)
-+ res = rc;
-+ }
-+
-+ PRINT_INFO("Changed cmd threads num: old %d, new %d", oldtn, newtn);
-+
-+out_up_thr_free:
-+ mutex_unlock(&scst_mutex);
-+
-+ mutex_unlock(&scst_proc_mutex);
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+int scst_build_proc_target_dir_entries(struct scst_tgt_template *vtt)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /* create the proc directory entry for the device */
-+ vtt->proc_tgt_root = proc_mkdir(vtt->name, scst_proc_scsi_tgt);
-+ if (vtt->proc_tgt_root == NULL) {
-+ PRINT_ERROR("Not enough memory to register SCSI target %s "
-+ "in /proc/%s", vtt->name, SCST_PROC_ENTRY_NAME);
-+ goto out_nomem;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+void scst_cleanup_proc_target_dir_entries(struct scst_tgt_template *vtt)
-+{
-+ TRACE_ENTRY();
-+
-+ remove_proc_entry(vtt->name, scst_proc_scsi_tgt);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called under scst_mutex */
-+int scst_build_proc_target_entries(struct scst_tgt *vtt)
-+{
-+ int res = 0;
-+ struct proc_dir_entry *p;
-+ char name[20];
-+
-+ TRACE_ENTRY();
-+
-+ if (vtt->tgtt->read_proc || vtt->tgtt->write_proc) {
-+ /* create the proc file entry for the device */
-+ scnprintf(name, sizeof(name), "%d", vtt->tgtt->proc_dev_num);
-+ scst_scsi_tgt_proc_data.data = (void *)vtt;
-+ p = scst_create_proc_entry(vtt->tgtt->proc_tgt_root,
-+ name,
-+ &scst_scsi_tgt_proc_data);
-+ if (p == NULL) {
-+ PRINT_ERROR("Not enough memory to register SCSI "
-+ "target entry %s in /proc/%s/%s", name,
-+ SCST_PROC_ENTRY_NAME, vtt->tgtt->name);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ vtt->proc_num = vtt->tgtt->proc_dev_num;
-+ vtt->tgtt->proc_dev_num++;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+void scst_cleanup_proc_target_entries(struct scst_tgt *vtt)
-+{
-+ char name[20];
-+
-+ TRACE_ENTRY();
-+
-+ if (vtt->tgtt->read_proc || vtt->tgtt->write_proc) {
-+ scnprintf(name, sizeof(name), "%d", vtt->proc_num);
-+ remove_proc_entry(name, vtt->tgtt->proc_tgt_root);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static ssize_t scst_proc_scsi_tgt_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ struct scst_tgt *vtt =
-+ (struct scst_tgt *)PDE(file->f_dentry->d_inode)->data;
-+ ssize_t res = 0;
-+ char *buffer;
-+ char *start;
-+ int eof = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (vtt->tgtt->write_proc == NULL) {
-+ res = -ENOSYS;
-+ goto out;
-+ }
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ TRACE_BUFFER("Buffer", buffer, length);
-+
-+ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free;
-+ }
-+
-+ res = vtt->tgtt->write_proc(buffer, &start, 0, length, &eof, vtt);
-+
-+ mutex_unlock(&scst_proc_mutex);
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+int scst_build_proc_dev_handler_dir_entries(struct scst_dev_type *dev_type)
-+{
-+ int res = 0;
-+ struct proc_dir_entry *p;
-+ const char *name; /* workaround to keep /proc ABI intact */
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(dev_type->proc_dev_type_root);
-+
-+ if (strcmp(dev_type->name, "vdisk_fileio") == 0)
-+ name = "vdisk";
-+ else
-+ name = dev_type->name;
-+
-+ /* create the proc directory entry for the dev type handler */
-+ dev_type->proc_dev_type_root = proc_mkdir(name,
-+ scst_proc_scsi_tgt);
-+ if (dev_type->proc_dev_type_root == NULL) {
-+ PRINT_ERROR("Not enough memory to register dev handler dir "
-+ "%s in /proc/%s", name, SCST_PROC_ENTRY_NAME);
-+ goto out_nomem;
-+ }
-+
-+ scst_dev_handler_type_proc_data.data = dev_type;
-+ if (dev_type->type >= 0) {
-+ p = scst_create_proc_entry(dev_type->proc_dev_type_root,
-+ SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
-+ &scst_dev_handler_type_proc_data);
-+ if (p == NULL) {
-+ PRINT_ERROR("Not enough memory to register dev "
-+ "handler entry %s in /proc/%s/%s",
-+ SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
-+ SCST_PROC_ENTRY_NAME, name);
-+ goto out_remove;
-+ }
-+ }
-+
-+ if (dev_type->read_proc || dev_type->write_proc) {
-+ /* create the proc file entry for the dev type handler */
-+ scst_dev_handler_proc_data.data = (void *)dev_type;
-+ p = scst_create_proc_entry(dev_type->proc_dev_type_root,
-+ name,
-+ &scst_dev_handler_proc_data);
-+ if (p == NULL) {
-+ PRINT_ERROR("Not enough memory to register dev "
-+ "handler entry %s in /proc/%s/%s", name,
-+ SCST_PROC_ENTRY_NAME, name);
-+ goto out_remove1;
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_remove1:
-+ if (dev_type->type >= 0)
-+ remove_proc_entry(SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
-+ dev_type->proc_dev_type_root);
-+
-+out_remove:
-+ remove_proc_entry(name, scst_proc_scsi_tgt);
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+void scst_cleanup_proc_dev_handler_dir_entries(struct scst_dev_type *dev_type)
-+{
-+ /* Workaround to keep /proc ABI intact */
-+ const char *name;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(dev_type->proc_dev_type_root == NULL);
-+
-+ if (strcmp(dev_type->name, "vdisk_fileio") == 0)
-+ name = "vdisk";
-+ else
-+ name = dev_type->name;
-+
-+ if (dev_type->type >= 0) {
-+ remove_proc_entry(SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
-+ dev_type->proc_dev_type_root);
-+ }
-+ if (dev_type->read_proc || dev_type->write_proc)
-+ remove_proc_entry(name, dev_type->proc_dev_type_root);
-+ remove_proc_entry(name, scst_proc_scsi_tgt);
-+ dev_type->proc_dev_type_root = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static ssize_t scst_proc_scsi_dev_handler_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ struct scst_dev_type *dev_type =
-+ (struct scst_dev_type *)PDE(file->f_dentry->d_inode)->data;
-+ ssize_t res = 0;
-+ char *buffer;
-+ char *start;
-+ int eof = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev_type->write_proc == NULL) {
-+ res = -ENOSYS;
-+ goto out;
-+ }
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ TRACE_BUFFER("Buffer", buffer, length);
-+
-+ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free;
-+ }
-+
-+ res = dev_type->write_proc(buffer, &start, 0, length, &eof, dev_type);
-+
-+ mutex_unlock(&scst_proc_mutex);
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_proc_scsi_tgt_gen_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res, rc = 0, action;
-+ char *buffer, *p, *pp, *ppp;
-+ struct scst_acg *a, *acg = NULL;
-+ unsigned int addr_method = SCST_LUN_ADDR_METHOD_PERIPHERAL;
-+
-+ TRACE_ENTRY();
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ /*
-+ * Usage: echo "add_group GROUP_NAME [FLAT]" >/proc/scsi_tgt/scsi_tgt
-+ * or echo "del_group GROUP_NAME" >/proc/scsi_tgt/scsi_tgt
-+ * or echo "rename_group OLD_NAME NEW_NAME" >/proc/scsi_tgt/scsi_tgt"
-+ * or echo "assign H:C:I:L HANDLER_NAME" >/proc/scsi_tgt/scsi_tgt
-+ */
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ if (!strncasecmp("assign ", p, 7)) {
-+ p += 7;
-+ action = SCST_PROC_ACTION_ASSIGN;
-+ } else if (!strncasecmp("add_group ", p, 10)) {
-+ p += 10;
-+ action = SCST_PROC_ACTION_ADD_GROUP;
-+ } else if (!strncasecmp("del_group ", p, 10)) {
-+ p += 10;
-+ action = SCST_PROC_ACTION_DEL_GROUP;
-+ } else if (!strncasecmp("rename_group ", p, 13)) {
-+ p += 13;
-+ action = SCST_PROC_ACTION_RENAME_GROUP;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out_free;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free_resume;
-+ }
-+
-+ res = length;
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD_GROUP:
-+ case SCST_PROC_ACTION_DEL_GROUP:
-+ case SCST_PROC_ACTION_RENAME_GROUP:
-+ pp = p;
-+ while (!isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ *pp = '\0';
-+ pp++;
-+ while (isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD_GROUP:
-+ ppp = pp;
-+ while (!isspace(*ppp) && *ppp != '\0')
-+ ppp++;
-+ if (*ppp != '\0') {
-+ *ppp = '\0';
-+ ppp++;
-+ while (isspace(*ppp) && *ppp != '\0')
-+ ppp++;
-+ if (*ppp != '\0') {
-+ PRINT_ERROR("%s", "Too many "
-+ "arguments");
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ }
-+ if (strcasecmp(pp, "FLAT") != 0) {
-+ PRINT_ERROR("Unexpected "
-+ "argument %s", pp);
-+ res = -EINVAL;
-+ goto out_up_free;
-+ } else
-+ addr_method = SCST_LUN_ADDR_METHOD_FLAT;
-+ break;
-+ case SCST_PROC_ACTION_DEL_GROUP:
-+ PRINT_ERROR("%s", "Too many "
-+ "arguments");
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ }
-+ }
-+
-+ if (strcmp(p, SCST_DEFAULT_ACG_NAME) == 0) {
-+ PRINT_ERROR("Attempt to add/delete/rename predefined "
-+ "group \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+
-+ list_for_each_entry(a, &scst_acg_list, acg_list_entry) {
-+ if (strcmp(a->acg_name, p) == 0) {
-+ TRACE_DBG("group (acg) %p %s found",
-+ a, a->acg_name);
-+ acg = a;
-+ break;
-+ }
-+ }
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD_GROUP:
-+ if (acg) {
-+ PRINT_ERROR("acg name %s exist", p);
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ rc = scst_proc_group_add(p, addr_method);
-+ break;
-+ case SCST_PROC_ACTION_DEL_GROUP:
-+ if (acg == NULL) {
-+ PRINT_ERROR("acg name %s not found", p);
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ rc = scst_proc_del_free_acg(acg, 1);
-+ break;
-+ case SCST_PROC_ACTION_RENAME_GROUP:
-+ if (acg == NULL) {
-+ PRINT_ERROR("acg name %s not found", p);
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+
-+ p = pp;
-+ while (!isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ *pp = '\0';
-+ pp++;
-+ while (isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ PRINT_ERROR("%s", "Too many arguments");
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ }
-+ rc = scst_proc_rename_acg(acg, p);
-+ break;
-+ }
-+ break;
-+ case SCST_PROC_ACTION_ASSIGN:
-+ rc = scst_proc_assign_handler(p);
-+ break;
-+ }
-+
-+ if (rc != 0)
-+ res = rc;
-+
-+out_up_free:
-+ mutex_unlock(&scst_mutex);
-+
-+out_free_resume:
-+ scst_resume_activity();
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static int scst_proc_assign_handler(char *buf)
-+{
-+ int res = 0;
-+ char *p = buf, *e, *ee;
-+ unsigned long host, channel = 0, id = 0, lun = 0;
-+ struct scst_device *d, *dev = NULL;
-+ struct scst_dev_type *dt, *handler = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+
-+ host = simple_strtoul(p, &p, 0);
-+ if ((host == ULONG_MAX) || (*p != ':'))
-+ goto out_synt_err;
-+ p++;
-+ channel = simple_strtoul(p, &p, 0);
-+ if ((channel == ULONG_MAX) || (*p != ':'))
-+ goto out_synt_err;
-+ p++;
-+ id = simple_strtoul(p, &p, 0);
-+ if ((channel == ULONG_MAX) || (*p != ':'))
-+ goto out_synt_err;
-+ p++;
-+ lun = simple_strtoul(p, &p, 0);
-+ if (lun == ULONG_MAX)
-+ goto out_synt_err;
-+
-+ e = p;
-+ e++;
-+ while (isspace(*e) && *e != '\0')
-+ e++;
-+ ee = e;
-+ while (!isspace(*ee) && *ee != '\0')
-+ ee++;
-+ *ee = '\0';
-+
-+ TRACE_DBG("Dev %ld:%ld:%ld:%ld, handler %s", host, channel, id, lun, e);
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if ((d->virt_id == 0) &&
-+ d->scsi_dev->host->host_no == host &&
-+ d->scsi_dev->channel == channel &&
-+ d->scsi_dev->id == id &&
-+ d->scsi_dev->lun == lun) {
-+ dev = d;
-+ TRACE_DBG("Dev %p (%ld:%ld:%ld:%ld) found",
-+ dev, host, channel, id, lun);
-+ break;
-+ }
-+ }
-+
-+ if (dev == NULL) {
-+ PRINT_ERROR("Device %ld:%ld:%ld:%ld not found",
-+ host, channel, id, lun);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
-+ if (!strcmp(dt->name, e)) {
-+ handler = dt;
-+ TRACE_DBG("Dev handler %p with name %s found",
-+ dt, dt->name);
-+ break;
-+ }
-+ }
-+
-+ if (handler == NULL) {
-+ PRINT_ERROR("Handler %s not found", e);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (dev->scsi_dev->type != handler->type) {
-+ PRINT_ERROR("Type %d of device %s differs from type "
-+ "%d of dev handler %s", dev->type,
-+ dev->handler->name, handler->type, handler->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_assign_dev_handler(dev, handler);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_synt_err:
-+ PRINT_ERROR("Syntax error on %s", p);
-+ res = -EINVAL;
-+ goto out;
-+}
-+
-+static ssize_t scst_proc_groups_devices_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res, action, rc, read_only = 0;
-+ char *buffer, *p, *e = NULL;
-+ unsigned int virt_lun;
-+ struct scst_acg *acg =
-+ (struct scst_acg *)PDE(file->f_dentry->d_inode)->data;
-+ struct scst_acg_dev *acg_dev = NULL, *acg_dev_tmp;
-+ struct scst_device *d, *dev = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ /*
-+ * Usage: echo "add|del H:C:I:L lun [READ_ONLY]" \
-+ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
-+ * or echo "replace H:C:I:L lun [READ_ONLY]" \
-+ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
-+ * or echo "add|del V_NAME lun [READ_ONLY]" \
-+ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
-+ * or echo "replace V_NAME lun [READ_ONLY]" \
-+ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
-+ * or echo "clear" >/proc/scsi_tgt/groups/GROUP_NAME/devices
-+ */
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ if (!strncasecmp("clear", p, 5)) {
-+ action = SCST_PROC_ACTION_CLEAR;
-+ } else if (!strncasecmp("add ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_ADD;
-+ } else if (!strncasecmp("del ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_DEL;
-+ } else if (!strncasecmp("replace ", p, 8)) {
-+ p += 8;
-+ action = SCST_PROC_ACTION_REPLACE;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out_free;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free_resume;
-+ }
-+
-+ res = length;
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ case SCST_PROC_ACTION_DEL:
-+ case SCST_PROC_ACTION_REPLACE:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p; /* save p */
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = 0;
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (!strcmp(d->virt_name, p)) {
-+ dev = d;
-+ TRACE_DBG("Device %p (%s) found", dev, p);
-+ break;
-+ }
-+ }
-+ if (dev == NULL) {
-+ PRINT_ERROR("Device %s not found", p);
-+ res = -EINVAL;
-+ goto out_free_up;
-+ }
-+ break;
-+ }
-+
-+ /* ToDo: create separate functions */
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ case SCST_PROC_ACTION_REPLACE:
-+ {
-+ bool dev_replaced = false;
-+
-+ e++;
-+ while (isspace(*e) && *e != '\0')
-+ e++;
-+ virt_lun = simple_strtoul(e, &e, 0);
-+
-+ while (isspace(*e) && *e != '\0')
-+ e++;
-+
-+ if (*e != '\0') {
-+ if (!strncasecmp("READ_ONLY", e, 9))
-+ read_only = 1;
-+ else {
-+ PRINT_ERROR("Unknown option \"%s\"", e);
-+ res = -EINVAL;
-+ goto out_free_up;
-+ }
-+ }
-+
-+ list_for_each_entry(acg_dev_tmp, &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ if (acg_dev_tmp->lun == virt_lun) {
-+ acg_dev = acg_dev_tmp;
-+ break;
-+ }
-+ }
-+ if (acg_dev != NULL) {
-+ if (action == SCST_PROC_ACTION_ADD) {
-+ PRINT_ERROR("virt lun %d already exists in "
-+ "group %s", virt_lun, acg->acg_name);
-+ res = -EEXIST;
-+ goto out_free_up;
-+ } else {
-+ /* Replace */
-+ rc = scst_acg_del_lun(acg, acg_dev->lun,
-+ false);
-+ if (rc) {
-+ res = rc;
-+ goto out_free_up;
-+ }
-+ dev_replaced = true;
-+ }
-+ }
-+
-+ rc = scst_acg_add_lun(acg, NULL, dev, virt_lun, read_only,
-+ false, NULL);
-+ if (rc) {
-+ res = rc;
-+ goto out_free_up;
-+ }
-+
-+ if (action == SCST_PROC_ACTION_ADD)
-+ scst_report_luns_changed(acg);
-+
-+ if (dev_replaced) {
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if ((tgt_dev->acg_dev->acg == acg) &&
-+ (tgt_dev->lun == virt_lun)) {
-+ TRACE_MGMT_DBG("INQUIRY DATA HAS CHANGED"
-+ " on tgt_dev %p", tgt_dev);
-+ scst_gen_aen_or_ua(tgt_dev,
-+ SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
-+ }
-+ }
-+ }
-+ break;
-+ }
-+ case SCST_PROC_ACTION_DEL:
-+ {
-+ /*
-+ * This code doesn't handle if there are >1 LUNs for the same
-+ * device in the group. Instead, it always deletes the first
-+ * entry. It wasn't fixed for compatibility reasons, because
-+ * procfs is now obsoleted.
-+ */
-+ struct scst_acg_dev *a;
-+ list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
-+ if (a->dev == dev) {
-+ rc = scst_acg_del_lun(acg, a->lun, true);
-+ if (rc) {
-+ res = rc;
-+ goto out_free_up;
-+ }
-+ break;
-+ }
-+ }
-+ PRINT_ERROR("Device is not found in group %s", acg->acg_name);
-+ break;
-+ }
-+ case SCST_PROC_ACTION_CLEAR:
-+ list_for_each_entry_safe(acg_dev, acg_dev_tmp,
-+ &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ rc = scst_acg_del_lun(acg, acg_dev->lun,
-+ list_is_last(&acg_dev->acg_dev_list_entry,
-+ &acg->acg_dev_list));
-+ if (rc) {
-+ res = rc;
-+ goto out_free_up;
-+ }
-+ }
-+ break;
-+ }
-+
-+out_free_up:
-+ mutex_unlock(&scst_mutex);
-+
-+out_free_resume:
-+ scst_resume_activity();
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_proc_groups_names_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res = length, rc = 0, action;
-+ char *buffer, *p, *pp = NULL;
-+ struct scst_acg *acg =
-+ (struct scst_acg *)PDE(file->f_dentry->d_inode)->data;
-+ struct scst_acn *n, *nn;
-+
-+ TRACE_ENTRY();
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ /*
-+ * Usage: echo "add|del NAME" >/proc/scsi_tgt/groups/GROUP_NAME/names
-+ * or echo "move NAME NEW_GROUP_NAME" >/proc/scsi_tgt/groups/OLD_GROUP_NAME/names"
-+ * or echo "clear" >/proc/scsi_tgt/groups/GROUP_NAME/names
-+ */
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ if (!strncasecmp("clear", p, 5)) {
-+ action = SCST_PROC_ACTION_CLEAR;
-+ } else if (!strncasecmp("add ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_ADD;
-+ } else if (!strncasecmp("del ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_DEL;
-+ } else if (!strncasecmp("move ", p, 5)) {
-+ p += 5;
-+ action = SCST_PROC_ACTION_MOVE;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ case SCST_PROC_ACTION_DEL:
-+ case SCST_PROC_ACTION_MOVE:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ pp = p;
-+ while (!isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ *pp = '\0';
-+ pp++;
-+ while (isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ case SCST_PROC_ACTION_DEL:
-+ PRINT_ERROR("%s", "Too many "
-+ "arguments");
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+ }
-+ }
-+ break;
-+ }
-+
-+ rc = scst_suspend_activity(true);
-+ if (rc != 0)
-+ goto out_free;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free_resume;
-+ }
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ rc = scst_acg_add_acn(acg, p);
-+ break;
-+ case SCST_PROC_ACTION_DEL:
-+ rc = scst_acg_remove_name(acg, p, true);
-+ break;
-+ case SCST_PROC_ACTION_MOVE:
-+ {
-+ struct scst_acg *a, *new_acg = NULL;
-+ char *name = p;
-+ p = pp;
-+ while (!isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ *pp = '\0';
-+ pp++;
-+ while (isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ PRINT_ERROR("%s", "Too many arguments");
-+ res = -EINVAL;
-+ goto out_free_unlock;
-+ }
-+ }
-+ list_for_each_entry(a, &scst_acg_list, acg_list_entry) {
-+ if (strcmp(a->acg_name, p) == 0) {
-+ TRACE_DBG("group (acg) %p %s found",
-+ a, a->acg_name);
-+ new_acg = a;
-+ break;
-+ }
-+ }
-+ if (new_acg == NULL) {
-+ PRINT_ERROR("Group %s not found", p);
-+ res = -EINVAL;
-+ goto out_free_unlock;
-+ }
-+ rc = scst_acg_remove_name(acg, name, false);
-+ if (rc != 0)
-+ goto out_free_unlock;
-+ rc = scst_acg_add_acn(new_acg, name);
-+ if (rc != 0)
-+ scst_acg_add_acn(acg, name);
-+ break;
-+ }
-+ case SCST_PROC_ACTION_CLEAR:
-+ list_for_each_entry_safe(n, nn, &acg->acn_list,
-+ acn_list_entry) {
-+ scst_del_free_acn(n, false);
-+ }
-+ scst_check_reassign_sessions();
-+ break;
-+ }
-+
-+out_free_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_free_resume:
-+ scst_resume_activity();
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+
-+out:
-+ if (rc < 0)
-+ res = rc;
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_version_info_show(struct seq_file *seq, void *v)
-+{
-+ TRACE_ENTRY();
-+
-+ seq_printf(seq, "%s\n", SCST_VERSION_STRING);
-+
-+#ifdef CONFIG_SCST_STRICT_SERIALIZING
-+ seq_printf(seq, "STRICT_SERIALIZING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ seq_printf(seq, "EXTRACHECKS\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_TRACING
-+ seq_printf(seq, "TRACING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+ seq_printf(seq, "DEBUG\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+ seq_printf(seq, "DEBUG_TM\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_RETRY
-+ seq_printf(seq, "DEBUG_RETRY\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_OOM
-+ seq_printf(seq, "DEBUG_OOM\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_SN
-+ seq_printf(seq, "DEBUG_SN\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
-+ seq_printf(seq, "USE_EXPECTED_VALUES\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ seq_printf(seq, "TEST_IO_IN_SIRQ\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_STRICT_SECURITY
-+ seq_printf(seq, "STRICT_SECURITY\n");
-+#endif
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static struct scst_proc_data scst_version_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = scst_version_info_show,
-+};
-+
-+static int scst_help_info_show(struct seq_file *seq, void *v)
-+{
-+ TRACE_ENTRY();
-+
-+ seq_printf(seq, "%s\n", scst_proc_help_string);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static struct scst_proc_data scst_help_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = scst_help_info_show,
-+};
-+
-+static int scst_dev_handler_type_info_show(struct seq_file *seq, void *v)
-+{
-+ struct scst_dev_type *dev_type = (struct scst_dev_type *)seq->private;
-+
-+ TRACE_ENTRY();
-+
-+ seq_printf(seq, "%d - %s\n", dev_type->type,
-+ dev_type->type > (int)ARRAY_SIZE(scst_proc_dev_handler_type)
-+ ? "unknown" : scst_proc_dev_handler_type[dev_type->type]);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static struct scst_proc_data scst_dev_handler_type_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = scst_dev_handler_type_info_show,
-+};
-+
-+static int scst_sessions_info_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_acg *acg;
-+ struct scst_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ seq_printf(seq, "%-20s %-45s %-35s %-15s\n",
-+ "Target name", "Initiator name",
-+ "Group name", "Active/All Commands Count");
-+
-+ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
-+ list_for_each_entry(sess, &acg->acg_sess_list,
-+ acg_sess_list_entry) {
-+ int active_cmds = 0, t;
-+ for (t = TGT_DEV_HASH_SIZE-1; t >= 0; t--) {
-+ struct list_head *sess_tgt_dev_list_head =
-+ &sess->sess_tgt_dev_list_hash[t];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev,
-+ sess_tgt_dev_list_head,
-+ sess_tgt_dev_list_entry) {
-+ active_cmds += atomic_read(&tgt_dev->tgt_dev_cmd_count);
-+ }
-+ }
-+ seq_printf(seq, "%-20s %-45s %-35s %d/%d\n",
-+ sess->tgt->tgtt->name,
-+ sess->initiator_name,
-+ acg->acg_name, active_cmds,
-+ atomic_read(&sess->sess_cmd_count));
-+ }
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_sessions_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = scst_sessions_info_show,
-+};
-+
-+static struct scst_proc_data scst_sgv_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = sgv_procinfo_show,
-+};
-+
-+static int scst_groups_names_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_acg *acg = (struct scst_acg *)seq->private;
-+ struct scst_acn *name;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(name, &acg->acn_list, acn_list_entry) {
-+ seq_printf(seq, "%s\n", name->name);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_groups_names_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_groups_names_write)
-+ .show = scst_groups_names_show,
-+};
-+
-+static int scst_groups_addr_method_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_acg *acg = (struct scst_acg *)seq->private;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ switch (acg->addr_method) {
-+ case SCST_LUN_ADDR_METHOD_FLAT:
-+ seq_printf(seq, "%s\n", "FLAT");
-+ break;
-+ case SCST_LUN_ADDR_METHOD_PERIPHERAL:
-+ seq_printf(seq, "%s\n", "PERIPHERAL");
-+ break;
-+ default:
-+ seq_printf(seq, "%s\n", "UNKNOWN");
-+ break;
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+static struct scst_proc_data scst_groups_addr_method_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = scst_groups_addr_method_show,
-+};
-+static int scst_groups_devices_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_acg *acg = (struct scst_acg *)seq->private;
-+ struct scst_acg_dev *acg_dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ seq_printf(seq, "%-60s%-13s%s\n", "Device (host:ch:id:lun or name)",
-+ "LUN", "Options");
-+
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
-+ seq_printf(seq, "%-60s%-13lld%s\n",
-+ acg_dev->dev->virt_name,
-+ (long long unsigned int)acg_dev->lun,
-+ acg_dev->rd_only ? "RO" : "");
-+ }
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_groups_devices_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_groups_devices_write)
-+ .show = scst_groups_devices_show,
-+};
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+static int scst_proc_read_tlb(const struct scst_trace_log *tbl,
-+ struct seq_file *seq,
-+ unsigned long log_level, int *first)
-+{
-+ const struct scst_trace_log *t = tbl;
-+ int res = 0;
-+
-+ while (t->token) {
-+ if (log_level & t->val) {
-+ seq_printf(seq, "%s%s", *first ? "" : " | ", t->token);
-+ *first = 0;
-+ }
-+ t++;
-+ }
-+ return res;
-+}
-+
-+int scst_proc_log_entry_read(struct seq_file *seq, unsigned long log_level,
-+ const struct scst_trace_log *tbl)
-+{
-+ int res = 0, first = 1;
-+
-+ TRACE_ENTRY();
-+
-+ scst_proc_read_tlb(scst_proc_trace_tbl, seq, log_level, &first);
-+
-+ if (tbl)
-+ scst_proc_read_tlb(tbl, seq, log_level, &first);
-+
-+ seq_printf(seq, "%s\n", first ? "none" : "");
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_proc_log_entry_read);
-+
-+static int log_info_show(struct seq_file *seq, void *v)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ res = scst_proc_log_entry_read(seq, trace_flag,
-+ scst_proc_local_trace_tbl);
-+
-+ mutex_unlock(&scst_log_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_log_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write_log)
-+ .show = log_info_show,
-+ .data = "scsi_tgt",
-+};
-+
-+#endif
-+
-+static int scst_tgt_info_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ seq_printf(seq, "%-60s%s\n", "Device (host:ch:id:lun or name)",
-+ "Device handler");
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ seq_printf(seq, "%-60s%s\n",
-+ dev->virt_name, dev->handler->name);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_tgt_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write)
-+ .show = scst_tgt_info_show,
-+};
-+
-+static int scst_threads_info_show(struct seq_file *seq, void *v)
-+{
-+ TRACE_ENTRY();
-+
-+ seq_printf(seq, "%d\n", scst_main_cmd_threads.nr_threads);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static struct scst_proc_data scst_threads_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_threads_write)
-+ .show = scst_threads_info_show,
-+};
-+
-+static int scst_scsi_tgtinfo_show(struct seq_file *seq, void *v)
-+{
-+ struct scst_tgt *vtt = seq->private;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ if (vtt->tgtt->read_proc)
-+ res = vtt->tgtt->read_proc(seq, vtt);
-+
-+ mutex_unlock(&scst_proc_mutex);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_scsi_tgt_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_write)
-+ .show = scst_scsi_tgtinfo_show,
-+};
-+
-+static int scst_dev_handler_info_show(struct seq_file *seq, void *v)
-+{
-+ struct scst_dev_type *dev_type = seq->private;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ if (dev_type->read_proc)
-+ res = dev_type->read_proc(seq, dev_type);
-+
-+ mutex_unlock(&scst_proc_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_dev_handler_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_dev_handler_write)
-+ .show = scst_dev_handler_info_show,
-+};
-+
-+struct proc_dir_entry *scst_create_proc_entry(struct proc_dir_entry *root,
-+ const char *name, struct scst_proc_data *pdata)
-+{
-+ struct proc_dir_entry *p = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ if (root) {
-+ mode_t mode;
-+
-+ mode = S_IFREG | S_IRUGO | (pdata->seq_op.write ? S_IWUSR : 0);
-+ p = create_proc_entry(name, mode, root);
-+ if (p == NULL) {
-+ PRINT_ERROR("Fail to create entry %s in /proc", name);
-+ } else {
-+ p->proc_fops = &pdata->seq_op;
-+ p->data = pdata->data;
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return p;
-+}
-+EXPORT_SYMBOL_GPL(scst_create_proc_entry);
-+
-+int scst_single_seq_open(struct inode *inode, struct file *file)
-+{
-+ struct scst_proc_data *pdata = container_of(PDE(inode)->proc_fops,
-+ struct scst_proc_data, seq_op);
-+ return single_open(file, pdata->show, PDE(inode)->data);
-+}
-+EXPORT_SYMBOL_GPL(scst_single_seq_open);
-+
-+struct proc_dir_entry *scst_proc_get_tgt_root(
-+ struct scst_tgt_template *vtt)
-+{
-+ return vtt->proc_tgt_root;
-+}
-+EXPORT_SYMBOL_GPL(scst_proc_get_tgt_root);
-+
-+struct proc_dir_entry *scst_proc_get_dev_type_root(
-+ struct scst_dev_type *dtt)
-+{
-+ return dtt->proc_dev_type_root;
-+}
-+EXPORT_SYMBOL_GPL(scst_proc_get_dev_type_root);
-diff -uprN orig/linux-2.6.36/include/scst/scst_sgv.h linux-2.6.36/include/scst/scst_sgv.h
---- orig/linux-2.6.36/include/scst/scst_sgv.h
-+++ linux-2.6.36/include/scst/scst_sgv.h
+diff -uprN orig/linux-2.6.39/include/scst/scst_sgv.h linux-2.6.39/include/scst/scst_sgv.h
+--- orig/linux-2.6.39/include/scst/scst_sgv.h
++++ linux-2.6.39/include/scst/scst_sgv.h
@@ -0,0 +1,98 @@
+/*
+ * include/scst_sgv.h
@@ -33617,10 +32512,10 @@ diff -uprN orig/linux-2.6.36/include/scst/scst_sgv.h linux-2.6.36/include/scst/s
+void scst_init_mem_lim(struct scst_mem_lim *mem_lim);
+
+#endif /* __SCST_SGV_H */
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.h linux-2.6.36/drivers/scst/scst_mem.h
---- orig/linux-2.6.36/drivers/scst/scst_mem.h
-+++ linux-2.6.36/drivers/scst/scst_mem.h
-@@ -0,0 +1,151 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_mem.h linux-2.6.39/drivers/scst/scst_mem.h
+--- orig/linux-2.6.39/drivers/scst/scst_mem.h
++++ linux-2.6.39/drivers/scst/scst_mem.h
+@@ -0,0 +1,142 @@
+/*
+ * scst_mem.h
+ *
@@ -33749,7 +32644,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.h linux-2.6.36/drivers/scst/s
+ struct kobject sgv_kobj;
+
+ /* sysfs release completion */
-+ struct completion sgv_kobj_release_cmpl;
++ struct completion *sgv_kobj_release_cmpl;
+};
+
+static inline struct scatterlist *sgv_pool_sg(struct sgv_pool_obj *obj)
@@ -33760,22 +32655,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.h linux-2.6.36/drivers/scst/s
+int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark);
+void scst_sgv_pools_deinit(void);
+
-+ssize_t sgv_sysfs_stat_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+ssize_t sgv_sysfs_stat_reset(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count);
-+ssize_t sgv_sysfs_global_stat_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+ssize_t sgv_sysfs_global_stat_reset(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count);
-+
+void scst_sgv_pool_use_norm(struct scst_tgt_dev *tgt_dev);
+void scst_sgv_pool_use_norm_clust(struct scst_tgt_dev *tgt_dev);
+void scst_sgv_pool_use_dma(struct scst_tgt_dev *tgt_dev);
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/scst_mem.c
---- orig/linux-2.6.36/drivers/scst/scst_mem.c
-+++ linux-2.6.36/drivers/scst/scst_mem.c
-@@ -0,0 +1,1880 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_mem.c linux-2.6.39/drivers/scst/scst_mem.c
+--- orig/linux-2.6.39/drivers/scst/scst_mem.c
++++ linux-2.6.39/drivers/scst/scst_mem.c
+@@ -0,0 +1,2001 @@
+/*
+ * scst_mem.c
+ *
@@ -33845,6 +32731,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+ */
+static LIST_HEAD(sgv_pools_list);
+
++static struct kobject *scst_sgv_kobj;
++static int scst_sgv_sysfs_create(struct sgv_pool *pool);
++static void scst_sgv_sysfs_del(struct sgv_pool *pool);
++
+static inline bool sgv_pool_clustered(const struct sgv_pool *pool)
+{
+ return pool->clustering_type != sgv_no_clustering;
@@ -34105,6 +32995,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+
+static int sgv_shrink(struct shrinker *shrinker, int nr, gfp_t gfpm)
+{
++
+ TRACE_ENTRY();
+
+ if (nr > 0) {
@@ -34158,7 +33049,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+ /*
+ * Let's reschedule it for full period to not get here
+ * too often. In the worst case we have shrinker
-+ * to reclaim buffers quickier.
++ * to reclaim buffers more quickly.
+ */
+ TRACE_MEM("Rescheduling purge work for pool %p (delay "
+ "%d HZ/%d sec)", pool, pool->purge_interval,
@@ -34299,17 +33190,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+ while (pages > 0) {
+ int order = 0;
+
-+/*
-+ * __free_pages() doesn't like freeing pages with not that order with
-+ * which they were allocated, so disable this small optimization.
-+ */
-+#if 0
-+ if (len > 0) {
-+ while (((1 << order) << PAGE_SHIFT) < len)
-+ order++;
-+ len = 0;
-+ }
-+#endif
+ TRACE_MEM("free_pages(): order %d, page %lx",
+ order, (unsigned long)p);
+
@@ -35194,7 +34074,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+ for (i = 0; i < pool->max_caches; i++) {
+ sgv_pool_init_cache(pool, i);
+ if (pool->caches[i] == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool "
++ PRINT_ERROR("Allocation of sgv_pool "
+ "cache %s(%d) failed", name, i);
+ goto out_free;
+ }
@@ -35254,7 +34134,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+}
+
+/**
-+ * sgv_pool_flush - flushe the SGV pool
++ * sgv_pool_flush() - flushes the SGV pool.
+ *
+ * Flushes, i.e. frees, all the cached entries in the SGV pool.
+ */
@@ -35396,7 +34276,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (pool == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of sgv_pool failed");
++ PRINT_ERROR("Allocation of sgv_pool failed (size %zd)",
++ sizeof(*pool));
+ goto out_unlock;
+ }
+
@@ -35527,7 +34408,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+ return;
+}
+
-+ssize_t sgv_sysfs_stat_show(struct kobject *kobj,
++static ssize_t sgv_sysfs_stat_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct sgv_pool *pool;
@@ -35583,7 +34464,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+ return res;
+}
+
-+ssize_t sgv_sysfs_stat_reset(struct kobject *kobj,
++static ssize_t sgv_sysfs_stat_reset(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct sgv_pool *pool;
@@ -35606,13 +34487,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+ atomic_set(&pool->other_merged, 0);
+ atomic_set(&pool->other_alloc, 0);
+
-+ PRINT_INFO("Statistics for SGV pool %s resetted", pool->name);
++ PRINT_INFO("Statistics for SGV pool %s reset", pool->name);
+
+ TRACE_EXIT_RES(count);
+ return count;
+}
+
-+ssize_t sgv_sysfs_global_stat_show(struct kobject *kobj,
++static ssize_t sgv_sysfs_global_stat_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct sgv_pool *pool;
@@ -35641,7 +34522,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+ return res;
+}
+
-+ssize_t sgv_sysfs_global_stat_reset(struct kobject *kobj,
++static ssize_t sgv_sysfs_global_stat_reset(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ TRACE_ENTRY();
@@ -35650,22 +34531,161 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_mem.c linux-2.6.36/drivers/scst/s
+ atomic_set(&sgv_releases_on_hiwmk_failed, 0);
+ atomic_set(&sgv_other_total_alloc, 0);
+
-+ PRINT_INFO("%s", "Global SGV pool statistics resetted");
++ PRINT_INFO("%s", "Global SGV pool statistics reset");
+
+ TRACE_EXIT_RES(count);
+ return count;
+}
+
-diff -uprN orig/linux-2.6.36/Documentation/scst/sgv_cache.txt linux-2.6.36/Documentation/scst/sgv_cache.txt
---- orig/linux-2.6.36/Documentation/scst/sgv_cache.txt
-+++ linux-2.6.36/Documentation/scst/sgv_cache.txt
-@@ -0,0 +1,224 @@
-+ SCST SGV CACHE.
++static struct kobj_attribute sgv_stat_attr =
++ __ATTR(stats, S_IRUGO | S_IWUSR, sgv_sysfs_stat_show,
++ sgv_sysfs_stat_reset);
++
++static struct attribute *sgv_attrs[] = {
++ &sgv_stat_attr.attr,
++ NULL,
++};
++
++static void sgv_kobj_release(struct kobject *kobj)
++{
++ struct sgv_pool *pool;
++
++ TRACE_ENTRY();
+
-+ PROGRAMMING INTERFACE DESCRIPTION.
++ pool = container_of(kobj, struct sgv_pool, sgv_kobj);
++ if (pool->sgv_kobj_release_cmpl != NULL)
++ complete_all(pool->sgv_kobj_release_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++static struct kobj_type sgv_pool_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = sgv_kobj_release,
++ .default_attrs = sgv_attrs,
++};
++
++static int scst_sgv_sysfs_create(struct sgv_pool *pool)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ res = kobject_init_and_add(&pool->sgv_kobj, &sgv_pool_ktype,
++ scst_sgv_kobj, pool->name);
++ if (res != 0) {
++ PRINT_ERROR("Can't add sgv pool %s to sysfs", pool->name);
++ goto out;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void scst_sgv_sysfs_del(struct sgv_pool *pool)
++{
++ int rc;
++ DECLARE_COMPLETION_ONSTACK(c);
++
++ TRACE_ENTRY();
++
++ pool->sgv_kobj_release_cmpl = &c;
++
++ kobject_del(&pool->sgv_kobj);
++ kobject_put(&pool->sgv_kobj);
++
++ rc = wait_for_completion_timeout(pool->sgv_kobj_release_cmpl, HZ);
++ if (rc == 0) {
++ PRINT_INFO("Waiting for releasing sysfs entry "
++ "for SGV pool %s (%d refs)...", pool->name,
++ atomic_read(&pool->sgv_kobj.kref.refcount));
++ wait_for_completion(pool->sgv_kobj_release_cmpl);
++ PRINT_INFO("Done waiting for releasing sysfs "
++ "entry for SGV pool %s", pool->name);
++ }
++
++ TRACE_EXIT();
++}
++
++static struct kobj_attribute sgv_global_stat_attr =
++ __ATTR(global_stats, S_IRUGO | S_IWUSR, sgv_sysfs_global_stat_show,
++ sgv_sysfs_global_stat_reset);
++
++static struct attribute *sgv_default_attrs[] = {
++ &sgv_global_stat_attr.attr,
++ NULL,
++};
++
++static void scst_sysfs_release(struct kobject *kobj)
++{
++ kfree(kobj);
++}
++
++static struct kobj_type sgv_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_sysfs_release,
++ .default_attrs = sgv_default_attrs,
++};
++
++/**
++ * scst_add_sgv_kobj() - Initialize and add the root SGV kernel object.
++ */
++int scst_add_sgv_kobj(struct kobject *parent, const char *name)
++{
++ int res;
++
++ WARN_ON(scst_sgv_kobj);
++ res = -ENOMEM;
++ scst_sgv_kobj = kzalloc(sizeof(*scst_sgv_kobj), GFP_KERNEL);
++ if (!scst_sgv_kobj)
++ goto out;
++ res = kobject_init_and_add(scst_sgv_kobj, &sgv_ktype, parent, name);
++ if (res != 0)
++ goto out_free;
++out:
++ return res;
++out_free:
++ kobject_put(scst_sgv_kobj);
++ scst_sgv_kobj = NULL;
++ goto out;
++}
++
++/**
++ * scst_del_put_sgv_kobj() - Remove the root SGV kernel object.
++ */
++void scst_del_put_sgv_kobj(void)
++{
++ WARN_ON(!scst_sgv_kobj);
++ kobject_del(scst_sgv_kobj);
++ kobject_put(scst_sgv_kobj);
++ scst_sgv_kobj = NULL;
++}
++
+diff -uprN orig/linux-2.6.39/Documentation/scst/sgv_cache.sgml linux-2.6.39/Documentation/scst/sgv_cache.sgml
+--- orig/linux-2.6.39/Documentation/scst/sgv_cache.sgml
++++ linux-2.6.39/Documentation/scst/sgv_cache.sgml
+@@ -0,0 +1,335 @@
++<!doctype linuxdoc system>
++
++<article>
++
++<title>
++SCST SGV cache description
++</title>
+
-+ For SCST version 1.0.2
++<author>
++ <name>Vladislav Bolkhovitin</name>
++</author>
+
++<date>Version 2.1.0</date>
++
++<toc>
++
++<sect>Introduction
++
++<p>
+SCST SGV cache is a memory management subsystem in SCST. One can call it
+a "memory pool", but Linux kernel already have a mempool interface,
+which serves different purposes. SGV cache provides to SCST core, target
@@ -35675,50 +34695,65 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/sgv_cache.txt linux-2.6.36/Docum
+used anymore, but keeps it for a while (possibly indefinitely) to let it
+be reused by the next consecutive command. This allows to:
+
-+ - Reduce commands processing latencies and, hence, improve performance;
++<itemize>
++
++<item> Reduce commands processing latencies and, hence, improve performance;
+
-+ - Make commands processing latencies predictable, which is essential
++<item> Make commands processing latencies predictable, which is essential
+ for RT applications.
+
++</itemize>
++
+The freed SG vectors are kept by the SGV cache either for some (possibly
+indefinite) time, or, optionally, until the system needs more memory and
+asks to free some using the set_shrinker() interface. Also the SGV cache
+allows to:
+
-+ - Cluster pages together. "Cluster" means merging adjacent pages in a
++<itemize>
++
++<item> Cluster pages together. "Cluster" means merging adjacent pages in a
+single SG entry. It allows to have less SG entries in the resulting SG
+vector, hence improve performance handling it as well as allow to
+work with bigger buffers on hardware with limited SG capabilities.
+
-+ - Set custom page allocator functions. For instance, scst_user device
++<item> Set custom page allocator functions. For instance, scst_user device
+handler uses this facility to eliminate unneeded mapping/unmapping of
+user space pages and avoid unneeded IOCTL calls for buffers allocations.
+In fileio_tgt application, which uses a regular malloc() function to
+allocate data buffers, this facility allows ~30% less CPU load and
+considerable performance increase.
+
-+ - Prevent each initiator or all initiators altogether to allocate too
++<item> Prevent each initiator or all initiators altogether to allocate too
+much memory and DoS the target. Consider 10 initiators, which can have
+access to 10 devices each. Any of them can queue up to 64 commands, each
+can transfer up to 1MB of data. So, all of them in a peak can allocate
+up to 10*10*64 = ~6.5GB of memory for data buffers. This amount must be
+limited somehow and the SGV cache performs this function.
+
++</itemize>
++
++<sect> Implementation
++
++<p>
+From implementation POV the SGV cache is a simple extension of the kmem
+cache. It can work in 2 modes:
+
-+1. With fixed size buffers.
++<enum>
+
-+2. With a set of power 2 size buffers. In this mode each SGV cache
++<item> With fixed size buffers.
++
++<item> With a set of power 2 size buffers. In this mode each SGV cache
+(struct sgv_pool) has SGV_POOL_ELEMENTS (11 currently) of kmem caches.
+Each of those kmem caches keeps SGV cache objects (struct sgv_pool_obj)
+corresponding to SG vectors with size of order X pages. For instance,
-+request to allocate 4 pages will be served from kmem cache[2], since the
++request to allocate 4 pages will be served from kmem cache&lsqb;2&rsqb, since the
+order of the of number of requested pages is 2. If later request to
+allocate 11KB comes, the same SG vector with 4 pages will be reused (see
+below). This mode is in average allows less memory overhead comparing
+with the fixed size buffers mode.
+
++</enum>
++
+Consider how the SGV cache works in the set of buffers mode. When a
+request to allocate new SG vector comes, sgv_pool_alloc() via
+sgv_get_obj() checks if there is already a cached vector with that
@@ -35744,150 +34779,233 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/sgv_cache.txt linux-2.6.36/Docum
+either by the purge work, which is scheduled once in 60 seconds, or in
+sgv_shrink() called by system, when it's asking for memory.
+
-+ Interface.
++<sect> Interface
+
-+struct sgv_pool *sgv_pool_create(const char *name,
++<sect1> sgv_pool *sgv_pool_create()
++
++<p>
++<verb>
++struct sgv_pool *sgv_pool_create(
++ const char *name,
+ enum sgv_clustering_types clustered, int single_alloc_pages,
+ bool shared, int purge_interval)
++</verb>
+
+This function creates and initializes an SGV cache. It has the following
+arguments:
+
-+ - name - the name of the SGV cache
++<itemize>
+
-+ - clustered - sets type of the pages clustering. The type can be:
++<item> <bf/name/ - the name of the SGV cache
+
-+ * sgv_no_clustering - no clustering performed.
++<item> <bf/clustered/ - sets type of the pages clustering. The type can be:
+
-+ * sgv_tail_clustering - a page will only be merged with the latest
-+ previously allocated page, so the order of pages in the SG will be
-+ preserved
++ <itemize>
+
-+ * sgv_full_clustering - free merging of pages at any place in
-+ the SG is allowed. This mode usually provides the best merging
-+ rate.
++ <item> <bf/sgv_no_clustering/ - no clustering performed.
+
-+ - single_alloc_pages - if 0, then the SGV cache will work in the set of
++ <item> <bf/sgv_tail_clustering/ - a page will only be merged with the latest
++ previously allocated page, so the order of pages in the SG will be
++ preserved
++
++ <item> <bf/sgv_full_clustering/ - free merging of pages at any place in
++ the SG is allowed. This mode usually provides the best merging
++ rate.
++
++ </itemize>
++
++<item> <bf/single_alloc_pages/ - if 0, then the SGV cache will work in the set of
+ power 2 size buffers mode. If >0, then the SGV cache will work in the
+ fixed size buffers mode. In this case single_alloc_pages sets the
+ size of each buffer in pages.
+
-+ - shared - sets if the SGV cache can be shared between devices or not.
++<item> <bf/shared/ - sets if the SGV cache can be shared between devices or not.
+ The cache sharing allowed only between devices created inside the same
+ address space. If an SGV cache is shared, each subsequent call of
+ sgv_pool_create() with the same cache name will not create a new cache,
+ but instead return a reference to it.
+
-+ - purge_interval - sets the cache purging interval. I.e. an SG buffer
++<item> <bf/purge_interval/ - sets the cache purging interval. I.e. an SG buffer
+ will be freed if it's unused for time t purge_interval <= t <
+ 2*purge_interval. If purge_interval is 0, then the default interval
+ will be used (60 seconds). If purge_interval <0, then the automatic
+ purging will be disabled. Shrinking by the system's demand will also
+ be disabled.
+
++</itemize>
++
+Returns the resulting SGV cache or NULL in case of any error.
+
-+void sgv_pool_del(struct sgv_pool *pool)
++<sect1> void sgv_pool_del()
++
++<p>
++<verb>
++void sgv_pool_del(
++ struct sgv_pool *pool)
++</verb>
+
+This function deletes the corresponding SGV cache. If the cache is
+shared, it will decrease its reference counter. If the reference counter
+reaches 0, the cache will be destroyed.
+
-+void sgv_pool_flush(struct sgv_pool *pool)
++<sect1> void sgv_pool_flush()
++
++<p>
++<verb>
++void sgv_pool_flush(
++ struct sgv_pool *pool)
++</verb>
+
+This function flushes, i.e. frees, all the cached entries in the SGV
+cache.
+
-+void sgv_pool_set_allocator(struct sgv_pool *pool,
++<sect1> void sgv_pool_set_allocator()
++
++<p>
++<verb>
++void sgv_pool_set_allocator(
++ struct sgv_pool *pool,
+ struct page *(*alloc_pages_fn)(struct scatterlist *sg, gfp_t gfp, void *priv),
+ void (*free_pages_fn)(struct scatterlist *sg, int sg_count, void *priv));
++</verb>
+
+This function allows to set for the SGV cache a custom pages allocator. For
+instance, scst_user uses such function to supply to the cache mapped from
+user space pages.
+
-+alloc_pages_fn() has the following parameters:
++<bf/alloc_pages_fn()/ has the following parameters:
++
++<itemize>
++
++<item> <bf/sg/ - SG entry, to which the allocated page should be added.
+
-+ - sg - SG entry, to which the allocated page should be added.
++<item> <bf/gfp/ - the allocation GFP flags
+
-+ - gfp - the allocation GFP flags
++<item> <bf/priv/ - pointer to a private data supplied to sgv_pool_alloc()
+
-+ - priv - pointer to a private data supplied to sgv_pool_alloc()
++</itemize>
+
+This function should return the allocated page or NULL, if no page was
+allocated.
+
-+free_pages_fn() has the following parameters:
+
-+ - sg - SG vector to free
++<bf/free_pages_fn()/ has the following parameters:
+
-+ - sg_count - number of SG entries in the sg
++<itemize>
+
-+ - priv - pointer to a private data supplied to the corresponding sgv_pool_alloc()
++<item> <bf/sg/ - SG vector to free
+
-+struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
-+ gfp_t gfp_mask, int flags, int *count,
-+ struct sgv_pool_obj **sgv, struct scst_mem_lim *mem_lim, void *priv)
++<item> <bf/sg_count/ - number of SG entries in the sg
++
++<item> <bf/priv/ - pointer to a private data supplied to the
++corresponding sgv_pool_alloc()
++
++</itemize>
++
++<sect1> struct scatterlist *sgv_pool_alloc()
++
++<p>
++<verb>
++struct scatterlist *sgv_pool_alloc(
++ struct sgv_pool *pool,
++ unsigned int size,
++ gfp_t gfp_mask,
++ int flags,
++ int *count,
++ struct sgv_pool_obj **sgv,
++ struct scst_mem_lim *mem_lim,
++ void *priv)
++</verb>
+
+This function allocates an SG vector from the SGV cache. It has the
+following parameters:
+
-+ - pool - the cache to alloc from
++<itemize>
+
-+ - size - size of the resulting SG vector in bytes
++<item> <bf/pool/ - the cache to alloc from
+
-+ - gfp_mask - the allocation mask
++<item> <bf/size/ - size of the resulting SG vector in bytes
+
-+ - flags - the allocation flags. The following flags are possible and
++<item> <bf/gfp_mask/ - the allocation mask
++
++<item> <bf/flags/ - the allocation flags. The following flags are possible and
+ can be set using OR operation:
+
-+ * SGV_POOL_ALLOC_NO_CACHED - the SG vector must not be cached.
++ <enum>
++
++ <item> <bf/SGV_POOL_ALLOC_NO_CACHED/ - the SG vector must not be cached.
+
-+ * SGV_POOL_NO_ALLOC_ON_CACHE_MISS - don't do an allocation on a
++ <item> <bf/SGV_POOL_NO_ALLOC_ON_CACHE_MISS/ - don't do an allocation on a
+ cache miss.
+
-+ * SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL - return an empty SGV object,
++ <item> <bf/SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL/ - return an empty SGV object,
+ i.e. without the SG vector, if the allocation can't be completed.
+ For instance, because SGV_POOL_NO_ALLOC_ON_CACHE_MISS flag set.
+
-+ - count - the resulting count of SG entries in the resulting SG vector.
++ </enum>
+
-+ - sgv - the resulting SGV object. It should be used to free the
++<item> <bf/count/ - the resulting count of SG entries in the resulting SG vector.
++
++<item> <bf/sgv/ - the resulting SGV object. It should be used to free the
+ resulting SG vector.
+
-+ - mem_lim - memory limits, see below.
++<item> <bf/mem_lim/ - memory limits, see below.
+
-+ - priv - pointer to private for this allocation data. This pointer will
++<item> <bf/priv/ - pointer to private for this allocation data. This pointer will
+ be supplied to alloc_pages_fn() and free_pages_fn() and can be
+ retrieved by sgv_get_priv().
+
++</itemize>
++
+This function returns pointer to the resulting SG vector or NULL in case
+of any error.
+
-+void sgv_pool_free(struct sgv_pool_obj *sgv, struct scst_mem_lim *mem_lim)
++<sect1> void sgv_pool_free()
++
++<p>
++<verb>
++void sgv_pool_free(
++ struct sgv_pool_obj *sgv,
++ struct scst_mem_lim *mem_lim)
++</verb>
+
+This function frees previously allocated SG vector, referenced by SGV
+cache object sgv.
+
-+void *sgv_get_priv(struct sgv_pool_obj *sgv)
++<sect1> void *sgv_get_priv(struct sgv_pool_obj *sgv)
++
++<p>
++<verb>
++void *sgv_get_priv(
++ struct sgv_pool_obj *sgv)
++</verb>
+
+This function allows to get the allocation private data for this SGV
+cache object sgv. The private data are set by sgv_pool_alloc().
+
-+void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
++<sect1> void scst_init_mem_lim()
++
++<p>
++<verb>
++void scst_init_mem_lim(
++ struct scst_mem_lim *mem_lim)
++</verb>
+
+This function initializes memory limits structure mem_lim according to
+the current system configuration. This structure should be latter used
+to track and limit allocated by one or more SGV caches memory.
+
-+ Runtime information and statistics.
+
++<sect> Runtime information and statistics.
++
++<p>
+Runtime information and statistics is available in /sys/kernel/scst_tgt/sgv.
+
-diff -uprN orig/linux-2.6.36/include/scst/scst_user.h linux-2.6.36/include/scst/scst_user.h
---- orig/linux-2.6.36/include/scst/scst_user.h
-+++ linux-2.6.36/include/scst/scst_user.h
-@@ -0,0 +1,322 @@
++</article>
+diff -uprN orig/linux-2.6.39/include/scst/scst_user.h linux-2.6.39/include/scst/scst_user.h
+--- orig/linux-2.6.39/include/scst/scst_user.h
++++ linux-2.6.39/include/scst/scst_user.h
+@@ -0,0 +1,320 @@
+/*
+ * include/scst_user.h
+ *
@@ -35917,9 +35035,9 @@ diff -uprN orig/linux-2.6.36/include/scst/scst_user.h linux-2.6.36/include/scst/
+
+#define DEV_USER_NAME "scst_user"
+#define DEV_USER_PATH "/dev/"
-+#define DEV_USER_VERSION_NAME "2.0.0.1"
++#define DEV_USER_VERSION_NAME SCST_VERSION_NAME
+#define DEV_USER_VERSION \
-+ DEV_USER_VERSION_NAME "$Revision: 3165 $" SCST_CONST_VERSION
++ DEV_USER_VERSION_NAME "$Revision: 3281 $" SCST_CONST_VERSION
+
+#define SCST_USER_PARSE_STANDARD 0
+#define SCST_USER_PARSE_CALL 1
@@ -36009,7 +35127,6 @@ diff -uprN orig/linux-2.6.36/include/scst/scst_user.h linux-2.6.36/include/scst/
+
+ uint8_t cdb[SCST_MAX_CDB_SIZE];
+ uint16_t cdb_len;
-+ uint16_t ext_cdb_len;
+
+ int32_t timeout;
+ int32_t bufflen;
@@ -36033,7 +35150,6 @@ diff -uprN orig/linux-2.6.36/include/scst/scst_user.h linux-2.6.36/include/scst/
+
+ uint8_t cdb[SCST_MAX_CDB_SIZE];
+ uint16_t cdb_len;
-+ uint16_t ext_cdb_len;
+
+ int32_t alloc_len;
+
@@ -36048,7 +35164,6 @@ diff -uprN orig/linux-2.6.36/include/scst/scst_user.h linux-2.6.36/include/scst/
+
+ uint8_t cdb[SCST_MAX_CDB_SIZE];
+ uint16_t cdb_len;
-+ uint16_t ext_cdb_len;
+
+ int32_t data_len;
+ int32_t bufflen;
@@ -36116,6 +35231,7 @@ diff -uprN orig/linux-2.6.36/include/scst/scst_user.h linux-2.6.36/include/scst/
+ uint32_t op_flags;
+ int32_t data_len;
+ int32_t bufflen;
++ int32_t out_bufflen;
+ };
+ struct {
+ uint8_t sense_len;
@@ -36210,10 +35326,10 @@ diff -uprN orig/linux-2.6.36/include/scst/scst_user.h linux-2.6.36/include/scst/
+ _IOWR('s', UCMD_STATE_TM_EXECING, struct scst_user_tm)
+
+#endif /* __SCST_USER_H */
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/drivers/scst/dev_handlers/scst_user.c
---- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c
-+++ linux-2.6.36/drivers/scst/dev_handlers/scst_user.c
-@@ -0,0 +1,3739 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/scst_user.c linux-2.6.39/drivers/scst/dev_handlers/scst_user.c
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/scst_user.c
++++ linux-2.6.39/drivers/scst/dev_handlers/scst_user.c
+@@ -0,0 +1,3751 @@
+/*
+ * scst_user.c
+ *
@@ -36585,7 +35701,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+static struct page *dev_user_alloc_pages(struct scatterlist *sg,
+ gfp_t gfp_mask, void *priv)
+{
-+ struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
++ struct scst_user_cmd *ucmd = priv;
+ int offset = 0;
+
+ TRACE_ENTRY();
@@ -36688,7 +35804,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
+ void *priv)
+{
-+ struct scst_user_cmd *ucmd = (struct scst_user_cmd *)priv;
++ struct scst_user_cmd *ucmd = priv;
+
+ TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
+ sg_count, ucmd);
@@ -36741,7 +35857,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+ if (cmd->data_direction != SCST_DATA_BIDI) {
+ orig_bufflen = cmd->bufflen;
-+ pool = (struct sgv_pool *)cmd->tgt_dev->dh_priv;
++ pool = cmd->tgt_dev->dh_priv;
+ } else {
+ /* Make out_sg->offset 0 */
+ int len = cmd->bufflen + ucmd->first_page_offset;
@@ -36775,8 +35891,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ cmd->sg = sgv_pool_alloc(pool, bufflen, gfp_mask, flags, &cmd->sg_cnt,
+ &ucmd->sgv, &dev->udev_mem_lim, ucmd);
+ if (cmd->sg != NULL) {
-+ struct scst_user_cmd *buf_ucmd =
-+ (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
++ struct scst_user_cmd *buf_ucmd = sgv_get_priv(ucmd->sgv);
+
+ TRACE_MEM("Buf ucmd %p (cmd->sg_cnt %d, last seg len %d, "
+ "last_len %d, bufflen %d)", buf_ucmd, cmd->sg_cnt,
@@ -36878,9 +35993,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ ucmd->user_cmd.cmd_h = ucmd->h;
+ ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
+ ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
-+ memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb, cmd->cdb_len);
++ memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb,
++ min_t(int, SCST_MAX_CDB_SIZE, cmd->cdb_len));
+ ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
-+ ucmd->user_cmd.alloc_cmd.ext_cdb_len = cmd->ext_cdb_len;
+ ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ?
+ (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
+ ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
@@ -36923,7 +36038,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+static int dev_user_get_block(struct scst_cmd *cmd)
+{
-+ struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
++ struct scst_user_dev *dev = cmd->dev->dh_priv;
+ /*
+ * No need for locks here, since *_detach() can not be
+ * called, when there are existing commands.
@@ -36937,7 +36052,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ int rc, res = SCST_CMD_STATE_DEFAULT;
+ struct scst_user_cmd *ucmd;
+ int atomic = scst_cmd_atomic(cmd);
-+ struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
++ struct scst_user_dev *dev = cmd->dev->dh_priv;
+ gfp_t gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
+
+ TRACE_ENTRY();
@@ -36956,7 +36071,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ ucmd->cmd = cmd;
+ cmd->dh_priv = ucmd;
+ } else {
-+ ucmd = (struct scst_user_cmd *)cmd->dh_priv;
++ ucmd = cmd->dh_priv;
+ TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
+ }
+
@@ -36999,9 +36114,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ ucmd->user_cmd.cmd_h = ucmd->h;
+ ucmd->user_cmd.subcode = SCST_USER_PARSE;
+ ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
-+ memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb, cmd->cdb_len);
++ memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb,
++ min_t(int, SCST_MAX_CDB_SIZE, cmd->cdb_len));
+ ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
-+ ucmd->user_cmd.parse_cmd.ext_cdb_len = cmd->ext_cdb_len;
+ ucmd->user_cmd.parse_cmd.timeout = cmd->timeout / HZ;
+ ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
+ ucmd->user_cmd.parse_cmd.out_bufflen = cmd->out_bufflen;
@@ -37052,7 +36167,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+static int dev_user_alloc_data_buf(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_DEFAULT;
-+ struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
++ struct scst_user_cmd *ucmd = cmd->dh_priv;
+
+ TRACE_ENTRY();
+
@@ -37088,7 +36203,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ */
+
+ for (i = 0; (bufflen > 0) && (i < buf_ucmd->num_data_pages); i++) {
-+ struct page *page;
++ struct page *page __attribute__((unused));
+ page = buf_ucmd->data_pages[i];
+#ifdef ARCH_HAS_FLUSH_ANON_PAGE
+ struct vm_area_struct *vma = find_vma(current->mm, start);
@@ -37107,7 +36222,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+static int dev_user_exec(struct scst_cmd *cmd)
+{
-+ struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
++ struct scst_user_cmd *ucmd = cmd->dh_priv;
+ int res = SCST_EXEC_COMPLETED;
+
+ TRACE_ENTRY();
@@ -37119,17 +36234,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ if (cmd->data_direction & SCST_DATA_WRITE)
+ dev_user_flush_dcache(ucmd);
+
-+ BUILD_BUG_ON(sizeof(ucmd->user_cmd.exec_cmd.cdb) != sizeof(cmd->cdb));
-+
+ ucmd->user_cmd_payload_len =
+ offsetof(struct scst_user_get_cmd, exec_cmd) +
+ sizeof(ucmd->user_cmd.exec_cmd);
+ ucmd->user_cmd.cmd_h = ucmd->h;
+ ucmd->user_cmd.subcode = SCST_USER_EXEC;
+ ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
-+ memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb, cmd->cdb_len);
++ memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb,
++ min_t(int, SCST_MAX_CDB_SIZE, cmd->cdb_len));
+ ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
-+ ucmd->user_cmd.exec_cmd.ext_cdb_len = cmd->ext_cdb_len;
+ ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
+ ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
+ ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
@@ -37169,7 +36282,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+static void dev_user_on_free_cmd(struct scst_cmd *cmd)
+{
-+ struct scst_user_cmd *ucmd = (struct scst_user_cmd *)cmd->dh_priv;
++ struct scst_user_cmd *ucmd = cmd->dh_priv;
+
+ TRACE_ENTRY();
+
@@ -37221,7 +36334,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+static void dev_user_set_block(struct scst_cmd *cmd, int block)
+{
-+ struct scst_user_dev *dev = (struct scst_user_dev *)cmd->dev->dh_priv;
++ struct scst_user_dev *dev = cmd->dev->dh_priv;
+ /*
+ * No need for locks here, since *_detach() can not be
+ * called, when there are existing commands.
@@ -37465,10 +36578,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ (preply->bufflen == 0)))
+ goto out_inval;
+
-+ if (unlikely((preply->bufflen < 0) || (preply->data_len < 0)))
++ if (unlikely((preply->bufflen < 0) || (preply->out_bufflen < 0) ||
++ (preply->data_len < 0)))
++ goto out_inval;
++
++ if (unlikely(preply->cdb_len > cmd->cdb_len))
+ goto out_inval;
+
-+ if (unlikely(preply->cdb_len > SCST_MAX_CDB_SIZE))
++ if (!(preply->op_flags & SCST_INFO_VALID))
+ goto out_inval;
+
+ TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
@@ -37480,11 +36597,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ cmd->queue_type = preply->queue_type;
+ cmd->data_direction = preply->data_direction;
+ cmd->bufflen = preply->bufflen;
++ cmd->out_bufflen = preply->out_bufflen;
+ cmd->data_len = preply->data_len;
+ if (preply->cdb_len > 0)
+ cmd->cdb_len = preply->cdb_len;
-+ if (preply->op_flags & SCST_INFO_VALID)
-+ cmd->op_flags = preply->op_flags;
++ cmd->op_flags = preply->op_flags;
+
+out_process:
+ scst_post_parse(cmd);
@@ -37822,7 +36939,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ res = dev_user_check_reg(dev);
+ if (unlikely(res != 0)) {
+ mutex_unlock(&dev_priv_mutex);
@@ -37865,7 +36982,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ res = dev_user_check_reg(dev);
+ if (unlikely(res != 0)) {
+ mutex_unlock(&dev_priv_mutex);
@@ -37916,12 +37033,18 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ if (cmd == NULL)
+ goto out_put;
+
-+ if (cmd->ext_cdb == NULL)
++ BUILD_BUG_ON(sizeof(cmd->cdb_buf) != SCST_MAX_CDB_SIZE);
++
++ if (cmd->cdb_len <= SCST_MAX_CDB_SIZE)
+ goto out_cmd_put;
+
-+ TRACE_BUFFER("EXT CDB", cmd->ext_cdb, cmd->ext_cdb_len);
++ EXTRACHECKS_BUG_ON(cmd->cdb_buf == cmd->cdb_buf);
++
++ TRACE_BUFFER("EXT CDB", &cmd->cdb[sizeof(cmd->cdb_buf)],
++ cmd->cdb_len - sizeof(cmd->cdb_buf));
+ rc = copy_to_user((void __user *)(unsigned long)get.ext_cdb_buffer,
-+ cmd->ext_cdb, cmd->ext_cdb_len);
++ &cmd->cdb[sizeof(cmd->cdb_buf)],
++ cmd->cdb_len - sizeof(cmd->cdb_buf));
+ if (unlikely(rc != 0)) {
+ PRINT_ERROR("Failed to copy to user %d bytes", rc);
+ res = -EFAULT;
@@ -38104,7 +37227,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ res = dev_user_check_reg(dev);
+ if (unlikely(res != 0)) {
+ mutex_unlock(&dev_priv_mutex);
@@ -38312,7 +37435,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ res = dev_user_check_reg(dev);
+ if (unlikely(res != 0)) {
+ mutex_unlock(&dev_priv_mutex);
@@ -38591,12 +37714,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ return;
+}
+
++/* Can be called under some spinlock and IRQs off */
+static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
+ struct scst_tgt_dev *tgt_dev)
+{
+ struct scst_user_cmd *ucmd;
-+ struct scst_user_dev *dev =
-+ (struct scst_user_dev *)tgt_dev->dev->dh_priv;
++ struct scst_user_dev *dev = tgt_dev->dev->dh_priv;
+ struct scst_user_cmd *ucmd_to_abort = NULL;
+
+ TRACE_ENTRY();
@@ -38642,7 +37765,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ dev_user_abort_ready_commands(dev);
+
+ /* We can't afford missing TM command due to memory shortage */
-+ ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
++ ucmd = dev_user_alloc_ucmd(dev, GFP_ATOMIC|__GFP_NOFAIL);
++ if (ucmd == NULL) {
++ PRINT_CRIT_ERROR("Unable to allocate TM %d message "
++ "(dev %s)", mcmd->fn, dev->name);
++ goto out;
++ }
+
+ ucmd->user_cmd_payload_len =
+ offsetof(struct scst_user_get_cmd, tm_cmd) +
@@ -38655,8 +37783,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
+
+ if (mcmd->cmd_to_abort != NULL) {
-+ ucmd_to_abort =
-+ (struct scst_user_cmd *)mcmd->cmd_to_abort->dh_priv;
++ ucmd_to_abort = mcmd->cmd_to_abort->dh_priv;
+ if (ucmd_to_abort != NULL)
+ ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
+ }
@@ -38673,6 +37800,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+ dev_user_add_to_ready(ucmd);
+
++out:
+ TRACE_EXIT();
+ return SCST_DEV_TM_NOT_COMPLETED;
+}
@@ -38718,7 +37846,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+static void dev_user_detach(struct scst_device *sdev)
+{
-+ struct scst_user_dev *dev = (struct scst_user_dev *)sdev->dh_priv;
++ struct scst_user_dev *dev = sdev->dh_priv;
+
+ TRACE_ENTRY();
+
@@ -38767,8 +37895,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
+{
-+ struct scst_user_dev *dev =
-+ (struct scst_user_dev *)tgt_dev->dev->dh_priv;
++ struct scst_user_dev *dev = tgt_dev->dev->dh_priv;
+ int res = 0, rc;
+ struct scst_user_cmd *ucmd;
+ DECLARE_COMPLETION_ONSTACK(cmpl);
@@ -38856,19 +37983,21 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
+{
-+ struct scst_user_dev *dev =
-+ (struct scst_user_dev *)tgt_dev->dev->dh_priv;
++ struct scst_user_dev *dev = tgt_dev->dev->dh_priv;
+ struct scst_user_cmd *ucmd;
+
+ TRACE_ENTRY();
+
+ /*
-+ * We can't miss TM command due to memory shortage, because it might
++ * We can't miss detach command due to memory shortage, because it might
+ * lead to a memory leak in the user space handler.
+ */
+ ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
-+ if (ucmd == NULL)
++ if (ucmd == NULL) {
++ PRINT_CRIT_ERROR("Unable to allocate DETACH_SESS message "
++ "(dev %s)", dev->name);
+ goto out;
++ }
+
+ TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %llx)", ucmd,
+ ucmd->h, ucmd->user_cmd.sess.sess_h);
@@ -39197,7 +38326,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ res = dev_user_check_reg(dev);
+ if (res != 0) {
+ mutex_unlock(&dev_priv_mutex);
@@ -39213,7 +38342,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ up_read(&dev->dev_rwsem);
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ if (dev == NULL) {
+ mutex_unlock(&dev_priv_mutex);
+ goto out_resume;
@@ -39252,7 +38381,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ res = dev_user_check_reg(dev);
+ if (res != 0) {
+ mutex_unlock(&dev_priv_mutex);
@@ -39286,7 +38415,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ res = dev_user_check_reg(dev);
+ if (res != 0) {
+ mutex_unlock(&dev_priv_mutex);
@@ -39319,7 +38448,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ res = dev_user_check_reg(dev);
+ if (unlikely(res != 0)) {
+ mutex_unlock(&dev_priv_mutex);
@@ -39371,8 +38500,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ sg = sgv_pool_alloc(pool, bufflen, GFP_KERNEL, SGV_POOL_ALLOC_GET_NEW,
+ &sg_cnt, &ucmd->sgv, &dev->udev_mem_lim, ucmd);
+ if (sg != NULL) {
-+ struct scst_user_cmd *buf_ucmd =
-+ (struct scst_user_cmd *)sgv_get_priv(ucmd->sgv);
++ struct scst_user_cmd *buf_ucmd = sgv_get_priv(ucmd->sgv);
+
+ TRACE_MEM("Buf ucmd %p (sg_cnt %d, last seg len %d, "
+ "bufflen %d)", buf_ucmd, sg_cnt,
@@ -39480,7 +38608,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ res = dev_user_check_reg(dev);
+ if (res != 0) {
+ mutex_unlock(&dev_priv_mutex);
@@ -39514,7 +38642,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ mutex_lock(&dev_priv_mutex);
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ res = dev_user_check_reg(dev);
+ if (res != 0) {
+ mutex_unlock(&dev_priv_mutex);
@@ -39611,7 +38739,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+static int __dev_user_release(void *arg)
+{
-+ struct scst_user_dev *dev = (struct scst_user_dev *)arg;
++ struct scst_user_dev *dev = arg;
+ dev_user_exit_dev(dev);
+ kfree(dev);
+ return 0;
@@ -39624,7 +38752,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+
+ TRACE_ENTRY();
+
-+ dev = (struct scst_user_dev *)file->private_data;
++ dev = file->private_data;
+ if (dev == NULL)
+ goto out;
+ file->private_data = NULL;
@@ -39716,7 +38844,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ udev = (struct scst_user_dev *)dev->dh_priv;
++ udev = dev->dh_priv;
+
+ spin_lock_irqsave(&udev->udev_cmd_threads.cmd_list_lock, flags);
+ for (i = 0; i < (int)ARRAY_SIZE(udev->ucmd_hash); i++) {
@@ -39953,10 +39081,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_user.c linux-2.6.36/
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("User space device handler for SCST");
+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c
---- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c
-+++ linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c
-@@ -0,0 +1,4228 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.39/drivers/scst/dev_handlers/scst_vdisk.c
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/scst_vdisk.c
++++ linux-2.6.39/drivers/scst/dev_handlers/scst_vdisk.c
+@@ -0,0 +1,4525 @@
+/*
+ * scst_vdisk.c
+ *
@@ -39986,7 +39114,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
-+#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/uio.h>
@@ -39997,11 +39124,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+#include <asm/atomic.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
-+#include <linux/version.h>
++#include <linux/delay.h>
+#include <asm/div64.h>
+#include <asm/unaligned.h>
+#include <linux/slab.h>
+#include <linux/bio.h>
++#include <linux/crc32c.h>
+
+#define LOG_PREFIX "dev_vdisk"
+
@@ -40012,12 +39140,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+#define TRACE_ORDER 0x80000000
+
+static struct scst_trace_log vdisk_local_trace_tbl[] = {
-+ { TRACE_ORDER, "order" },
-+ { 0, NULL }
++ { TRACE_ORDER, "order" },
++ { 0, NULL }
+};
+#define trace_log_tbl vdisk_local_trace_tbl
+
-+#define VDISK_TRACE_TLB_HELP ", order"
++#define VDISK_TRACE_TBL_HELP ", order"
+
+#endif
+
@@ -40027,11 +39155,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+#define SCST_FIO_VENDOR "SCST_FIO"
+#define SCST_BIO_VENDOR "SCST_BIO"
+/* 4 byte ASCII Product Revision Level - left aligned */
-+#define SCST_FIO_REV " 200"
++#define SCST_FIO_REV " 210"
+
+#define MAX_USN_LEN (20+1) /* For '\0' */
+
-+#define INQ_BUF_SZ 128
++#define INQ_BUF_SZ 256
+#define EVPD 0x01
+#define CMDDT 0x02
+
@@ -40058,10 +39186,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+#define DEF_NV_CACHE 0
+#define DEF_O_DIRECT 0
+#define DEF_REMOVABLE 0
++#define DEF_THIN_PROVISIONED 0
+
+#define VDISK_NULLIO_SIZE (3LL*1024*1024*1024*1024/2)
+
+#define DEF_TST SCST_CONTR_MODE_SEP_TASK_SETS
++
+/*
+ * Since we can't control backstorage device's reordering, we have to always
+ * report unrestricted reordering.
@@ -40073,73 +39203,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+#define DEF_DSENSE SCST_CONTR_MODE_FIXED_SENSE
+
-+static unsigned int random_values[256] = {
-+ 9862592UL, 3744545211UL, 2348289082UL, 4036111983UL,
-+ 435574201UL, 3110343764UL, 2383055570UL, 1826499182UL,
-+ 4076766377UL, 1549935812UL, 3696752161UL, 1200276050UL,
-+ 3878162706UL, 1783530428UL, 2291072214UL, 125807985UL,
-+ 3407668966UL, 547437109UL, 3961389597UL, 969093968UL,
-+ 56006179UL, 2591023451UL, 1849465UL, 1614540336UL,
-+ 3699757935UL, 479961779UL, 3768703953UL, 2529621525UL,
-+ 4157893312UL, 3673555386UL, 4091110867UL, 2193909423UL,
-+ 2800464448UL, 3052113233UL, 450394455UL, 3424338713UL,
-+ 2113709130UL, 4082064373UL, 3708640918UL, 3841182218UL,
-+ 3141803315UL, 1032476030UL, 1166423150UL, 1169646901UL,
-+ 2686611738UL, 575517645UL, 2829331065UL, 1351103339UL,
-+ 2856560215UL, 2402488288UL, 867847666UL, 8524618UL,
-+ 704790297UL, 2228765657UL, 231508411UL, 1425523814UL,
-+ 2146764591UL, 1287631730UL, 4142687914UL, 3879884598UL,
-+ 729945311UL, 310596427UL, 2263511876UL, 1983091134UL,
-+ 3500916580UL, 1642490324UL, 3858376049UL, 695342182UL,
-+ 780528366UL, 1372613640UL, 1100993200UL, 1314818946UL,
-+ 572029783UL, 3775573540UL, 776262915UL, 2684520905UL,
-+ 1007252738UL, 3505856396UL, 1974886670UL, 3115856627UL,
-+ 4194842288UL, 2135793908UL, 3566210707UL, 7929775UL,
-+ 1321130213UL, 2627281746UL, 3587067247UL, 2025159890UL,
-+ 2587032000UL, 3098513342UL, 3289360258UL, 130594898UL,
-+ 2258149812UL, 2275857755UL, 3966929942UL, 1521739999UL,
-+ 4191192765UL, 958953550UL, 4153558347UL, 1011030335UL,
-+ 524382185UL, 4099757640UL, 498828115UL, 2396978754UL,
-+ 328688935UL, 826399828UL, 3174103611UL, 3921966365UL,
-+ 2187456284UL, 2631406787UL, 3930669674UL, 4282803915UL,
-+ 1776755417UL, 374959755UL, 2483763076UL, 844956392UL,
-+ 2209187588UL, 3647277868UL, 291047860UL, 3485867047UL,
-+ 2223103546UL, 2526736133UL, 3153407604UL, 3828961796UL,
-+ 3355731910UL, 2322269798UL, 2752144379UL, 519897942UL,
-+ 3430536488UL, 1801511593UL, 1953975728UL, 3286944283UL,
-+ 1511612621UL, 1050133852UL, 409321604UL, 1037601109UL,
-+ 3352316843UL, 4198371381UL, 617863284UL, 994672213UL,
-+ 1540735436UL, 2337363549UL, 1242368492UL, 665473059UL,
-+ 2330728163UL, 3443103219UL, 2291025133UL, 3420108120UL,
-+ 2663305280UL, 1608969839UL, 2278959931UL, 1389747794UL,
-+ 2226946970UL, 2131266900UL, 3856979144UL, 1894169043UL,
-+ 2692697628UL, 3797290626UL, 3248126844UL, 3922786277UL,
-+ 343705271UL, 3739749888UL, 2191310783UL, 2962488787UL,
-+ 4119364141UL, 1403351302UL, 2984008923UL, 3822407178UL,
-+ 1932139782UL, 2323869332UL, 2793574182UL, 1852626483UL,
-+ 2722460269UL, 1136097522UL, 1005121083UL, 1805201184UL,
-+ 2212824936UL, 2979547931UL, 4133075915UL, 2585731003UL,
-+ 2431626071UL, 134370235UL, 3763236829UL, 1171434827UL,
-+ 2251806994UL, 1289341038UL, 3616320525UL, 392218563UL,
-+ 1544502546UL, 2993937212UL, 1957503701UL, 3579140080UL,
-+ 4270846116UL, 2030149142UL, 1792286022UL, 366604999UL,
-+ 2625579499UL, 790898158UL, 770833822UL, 815540197UL,
-+ 2747711781UL, 3570468835UL, 3976195842UL, 1257621341UL,
-+ 1198342980UL, 1860626190UL, 3247856686UL, 351473955UL,
-+ 993440563UL, 340807146UL, 1041994520UL, 3573925241UL,
-+ 480246395UL, 2104806831UL, 1020782793UL, 3362132583UL,
-+ 2272911358UL, 3440096248UL, 2356596804UL, 259492703UL,
-+ 3899500740UL, 252071876UL, 2177024041UL, 4284810959UL,
-+ 2775999888UL, 2653420445UL, 2876046047UL, 1025771859UL,
-+ 1994475651UL, 3564987377UL, 4112956647UL, 1821511719UL,
-+ 3113447247UL, 455315102UL, 1585273189UL, 2311494568UL,
-+ 774051541UL, 1898115372UL, 2637499516UL, 247231365UL,
-+ 1475014417UL, 803585727UL, 3911097303UL, 1714292230UL,
-+ 476579326UL, 2496900974UL, 3397613314UL, 341202244UL,
-+ 807790202UL, 4221326173UL, 499979741UL, 1301488547UL,
-+ 1056807896UL, 3525009458UL, 1174811641UL, 3049738746UL,
-+};
-+
+struct scst_vdisk_dev {
+ uint32_t block_size;
+ uint64_t nblocks;
@@ -40167,6 +39230,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ unsigned int blockio:1;
+ unsigned int cdrom_empty:1;
+ unsigned int removable:1;
++ unsigned int thin_provisioned:1;
+
+ int virt_id;
+ char name[16+1]; /* Name of the virtual device,
@@ -40216,11 +39280,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ struct scst_vdisk_thr *thr, loff_t loff);
+static void blockio_exec_rw(struct scst_cmd *cmd, struct scst_vdisk_thr *thr,
+ u64 lba_start, int write);
-+static int blockio_flush(struct block_device *bdev);
++static int vdisk_blockio_flush(struct block_device *bdev, gfp_t gfp_mask,
++ bool report_error);
+static void vdisk_exec_verify(struct scst_cmd *cmd,
+ struct scst_vdisk_thr *thr, loff_t loff);
+static void vdisk_exec_read_capacity(struct scst_cmd *cmd);
+static void vdisk_exec_read_capacity16(struct scst_cmd *cmd);
++static void vdisk_exec_report_tpgs(struct scst_cmd *cmd);
+static void vdisk_exec_inquiry(struct scst_cmd *cmd);
+static void vdisk_exec_request_sense(struct scst_cmd *cmd);
+static void vdisk_exec_mode_sense(struct scst_cmd *cmd);
@@ -40228,6 +39294,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+static void vdisk_exec_log(struct scst_cmd *cmd);
+static void vdisk_exec_read_toc(struct scst_cmd *cmd);
+static void vdisk_exec_prevent_allow_medium_removal(struct scst_cmd *cmd);
++static void vdisk_exec_unmap(struct scst_cmd *cmd, struct scst_vdisk_thr *thr);
+static int vdisk_fsync(struct scst_vdisk_thr *thr, loff_t loff,
+ loff_t len, struct scst_cmd *cmd, struct scst_device *dev);
+static ssize_t vdisk_add_fileio_device(const char *device_name, char *params);
@@ -40250,6 +39317,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ struct kobj_attribute *attr, char *buf);
+static ssize_t vdisk_sysfs_wt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
++static ssize_t vdisk_sysfs_tp_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
+static ssize_t vdisk_sysfs_nv_cache_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
+static ssize_t vdisk_sysfs_o_direct_show(struct kobject *kobj,
@@ -40280,6 +39349,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ __ATTR(read_only, S_IRUGO, vdisk_sysfs_rd_only_show, NULL);
+static struct kobj_attribute vdisk_wt_attr =
+ __ATTR(write_through, S_IRUGO, vdisk_sysfs_wt_show, NULL);
++static struct kobj_attribute vdisk_tp_attr =
++ __ATTR(thin_provisioned, S_IRUGO, vdisk_sysfs_tp_show, NULL);
+static struct kobj_attribute vdisk_nv_cache_attr =
+ __ATTR(nv_cache, S_IRUGO, vdisk_sysfs_nv_cache_show, NULL);
+static struct kobj_attribute vdisk_o_direct_attr =
@@ -40305,6 +39376,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ &vdisk_blocksize_attr.attr,
+ &vdisk_rd_only_attr.attr,
+ &vdisk_wt_attr.attr,
++ &vdisk_tp_attr.attr,
+ &vdisk_nv_cache_attr.attr,
+ &vdisk_o_direct_attr.attr,
+ &vdisk_removable_attr.attr,
@@ -40325,6 +39397,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ &vdisk_resync_size_attr.attr,
+ &vdev_t10_dev_id_attr.attr,
+ &vdev_usn_attr.attr,
++ &vdisk_tp_attr.attr,
+ NULL,
+};
+
@@ -40380,12 +39453,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ .del_device = vdisk_del_device,
+ .dev_attrs = vdisk_fileio_attrs,
+ .add_device_parameters = "filename, blocksize, write_through, "
-+ "nv_cache, o_direct, read_only, removable",
++ "nv_cache, o_direct, read_only, removable, thin_provisioned",
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
+ .trace_flags = &trace_flag,
+ .trace_tbl = vdisk_local_trace_tbl,
-+ .trace_tbl_help = VDISK_TRACE_TLB_HELP,
++ .trace_tbl_help = VDISK_TRACE_TBL_HELP,
+#endif
+};
+
@@ -40408,12 +39481,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ .del_device = vdisk_del_device,
+ .dev_attrs = vdisk_blockio_attrs,
+ .add_device_parameters = "filename, blocksize, nv_cache, read_only, "
-+ "removable",
++ "removable, thin_provisioned",
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
+ .trace_flags = &trace_flag,
+ .trace_tbl = vdisk_local_trace_tbl,
-+ .trace_tbl_help = VDISK_TRACE_TLB_HELP,
++ .trace_tbl_help = VDISK_TRACE_TBL_HELP,
+#endif
+};
+
@@ -40438,7 +39511,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
+ .trace_flags = &trace_flag,
+ .trace_tbl = vdisk_local_trace_tbl,
-+ .trace_tbl_help = VDISK_TRACE_TLB_HELP,
++ .trace_tbl_help = VDISK_TRACE_TBL_HELP,
+#endif
+};
+
@@ -40464,7 +39537,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
+ .trace_flags = &trace_flag,
+ .trace_tbl = vdisk_local_trace_tbl,
-+ .trace_tbl_help = VDISK_TRACE_TLB_HELP,
++ .trace_tbl_help = VDISK_TRACE_TBL_HELP,
+#endif
+};
+
@@ -40526,7 +39599,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ goto out_close;
+ }
+
-+ if (blockio_flush(inode->i_bdev) != 0) {
++ if (vdisk_blockio_flush(inode->i_bdev, GFP_KERNEL, false) != 0) {
+ PRINT_WARNING("Device %s doesn't support barriers, switching "
+ "to NV_CACHE mode. Read README for more details.",
+ virt_dev->filename);
@@ -40541,6 +39614,58 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ return;
+}
+
++static void vdisk_check_tp_support(struct scst_vdisk_dev *virt_dev)
++{
++ struct inode *inode;
++ struct file *fd;
++ bool supported = false;
++
++ TRACE_ENTRY();
++
++ if (virt_dev->rd_only || !virt_dev->thin_provisioned)
++ goto out;
++
++ fd = filp_open(virt_dev->filename, O_LARGEFILE, 0600);
++ if (IS_ERR(fd)) {
++ PRINT_ERROR("filp_open(%s) returned error %ld",
++ virt_dev->filename, PTR_ERR(fd));
++ goto out;
++ }
++
++ inode = fd->f_dentry->d_inode;
++
++ if (virt_dev->blockio) {
++ if (!S_ISBLK(inode->i_mode)) {
++ PRINT_ERROR("%s is NOT a block device",
++ virt_dev->filename);
++ goto out_close;
++ }
++ supported = blk_queue_discard(bdev_get_queue(inode->i_bdev));
++
++ } else {
++ /*
++ * truncate_range() was chosen rather as a sample. In future,
++ * when unmap of range of blocks in file become standard, we
++ * will just switch to the new call.
++ */
++ supported = (inode->i_op->truncate_range != NULL);
++ }
++
++ if (!supported) {
++ PRINT_WARNING("Device %s doesn't support thin "
++ "provisioning, disabling it.",
++ virt_dev->filename);
++ virt_dev->thin_provisioned = 0;
++ }
++
++out_close:
++ filp_close(fd, NULL);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
+/* Returns 0 on success and file size in *file_size, error code otherwise */
+static int vdisk_get_file_size(const char *filename, bool blockio,
+ loff_t *file_size)
@@ -40638,6 +39763,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_DBG("size of file: %lld", (long long unsigned int)err);
+
+ vdisk_blockio_check_flush_support(virt_dev);
++ vdisk_check_tp_support(virt_dev);
+ } else
+ virt_dev->file_size = 0;
+
@@ -40678,8 +39804,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+/* scst_mutex supposed to be held */
+static void vdisk_detach(struct scst_device *dev)
+{
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = dev->dh_priv;
+
+ TRACE_ENTRY();
+
@@ -40714,20 +39839,19 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+}
+
+static struct scst_vdisk_thr *vdisk_init_thr_data(
-+ struct scst_tgt_dev *tgt_dev)
++ struct scst_tgt_dev *tgt_dev, gfp_t gfp_mask)
+{
+ struct scst_vdisk_thr *res;
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)tgt_dev->dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = tgt_dev->dev->dh_priv;
+
+ TRACE_ENTRY();
+
+ EXTRACHECKS_BUG_ON(virt_dev->nullio);
+
-+ res = kmem_cache_zalloc(vdisk_thr_cachep, GFP_KERNEL);
++ res = kmem_cache_zalloc(vdisk_thr_cachep, gfp_mask);
+ if (res == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Unable to allocate struct "
-+ "scst_vdisk_thr");
++ PRINT_ERROR("Unable to allocate struct scst_vdisk_thr"
++ " (size %zd)", sizeof(*res));
+ goto out;
+ }
+
@@ -40789,8 +39913,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ loff_t loff;
+ struct scst_device *dev = cmd->dev;
+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = dev->dh_priv;
+ struct scst_thr_data_hdr *d;
+ struct scst_vdisk_thr *thr = NULL;
+ int fua = 0;
@@ -40820,7 +39943,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ if (!virt_dev->nullio) {
+ d = scst_find_thr_data(tgt_dev);
+ if (unlikely(d == NULL)) {
-+ thr = vdisk_init_thr_data(tgt_dev);
++ thr = vdisk_init_thr_data(tgt_dev,
++ cmd->noio_mem_alloc ? GFP_NOIO : GFP_KERNEL);
+ if (thr == NULL) {
+ scst_set_busy(cmd);
+ goto out_compl;
@@ -41018,12 +40142,22 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ vdisk_exec_read_capacity16(cmd);
+ break;
+ }
-+ /* else go through */
++ goto out_invalid_opcode;
++ case UNMAP:
++ vdisk_exec_unmap(cmd, thr);
++ break;
++ case MAINTENANCE_IN:
++ switch (cmd->cdb[1] & 0x1f) {
++ case MI_REPORT_TARGET_PGS:
++ vdisk_exec_report_tpgs(cmd);
++ break;
++ default:
++ goto out_invalid_opcode;
++ }
++ break;
+ case REPORT_LUNS:
+ default:
-+ TRACE_DBG("Invalid opcode %d", opcode);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_invalid_opcode;
+ }
+
+out_compl:
@@ -41040,12 +40174,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ TRACE_EXIT_RES(res);
+ return res;
++
++out_invalid_opcode:
++ TRACE_DBG("Invalid opcode %d", opcode);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out_compl;
+}
+
+static int vdisk_get_block_shift(struct scst_cmd *cmd)
+{
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
+ return virt_dev->block_shift;
+}
+
@@ -41065,8 +40204,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+{
+ int res = SCST_EXEC_COMPLETED;
+ int opcode = cmd->cdb[0];
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
+
+ TRACE_ENTRY();
+
@@ -41108,31 +40246,140 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+static uint64_t vdisk_gen_dev_id_num(const char *virt_dev_name)
+{
-+ unsigned int dev_id_num, i;
++ uint32_t dev_id_num;
+
-+ for (dev_id_num = 0, i = 0; i < strlen(virt_dev_name); i++) {
-+ unsigned int rv = random_values[(int)(virt_dev_name[i])];
-+ /* Do some rotating of the bits */
-+ dev_id_num ^= ((rv << i) | (rv >> (32 - i)));
-+ }
++ dev_id_num = crc32c(0, virt_dev_name, strlen(virt_dev_name)+1);
+
+ return ((uint64_t)scst_get_setup_id() << 32) | dev_id_num;
+}
+
++static void vdisk_exec_unmap(struct scst_cmd *cmd, struct scst_vdisk_thr *thr)
++{
++ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
++ ssize_t length = 0;
++ struct file *fd = thr->fd;
++ struct inode *inode;
++ uint8_t *address;
++ int offset, descriptor_len, total_len;
++
++ TRACE_ENTRY();
++
++ if (unlikely(!virt_dev->thin_provisioned)) {
++ TRACE_DBG("%s", "Invalid opcode UNMAP");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
++ goto out;
++ }
++
++ if (unlikely(cmd->cdb[1] & 1)) {
++ /* ANCHOR not supported */
++ TRACE_DBG("%s", "Invalid ANCHOR field");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out;
++ }
++
++ length = scst_get_buf_full(cmd, &address);
++ if (unlikely(length <= 0)) {
++ if (length == 0)
++ goto out_put;
++ else if (length == -ENOMEM)
++ scst_set_busy(cmd);
++ else
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out;
++ }
++
++ inode = fd->f_dentry->d_inode;
++
++ total_len = cmd->cdb[7] << 8 | cmd->cdb[8]; /* length */
++ offset = 8;
++
++ descriptor_len = address[2] << 8 | address[3];
++
++ TRACE_DBG("total_len %d, descriptor_len %d", total_len, descriptor_len);
++
++ if (descriptor_len == 0)
++ goto out_put;
++
++ if (unlikely((descriptor_len > (total_len - 8)) ||
++ ((descriptor_len % 16) != 0))) {
++ PRINT_ERROR("Bad descriptor length: %d < %d - 8",
++ descriptor_len, total_len);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
++ goto out_put;
++ }
++
++ while ((offset - 8) < descriptor_len) {
++ int err;
++ uint64_t start;
++ uint32_t len;
++ start = be64_to_cpu(get_unaligned((__be64 *)&address[offset]));
++ offset += 8;
++ len = be32_to_cpu(get_unaligned((__be32 *)&address[offset]));
++ offset += 8;
++
++ if ((start > virt_dev->nblocks) ||
++ ((start + len) > virt_dev->nblocks)) {
++ PRINT_ERROR("Device %s: attempt to write beyond max "
++ "size", virt_dev->name);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_put;
++ }
++
++ TRACE_DBG("Unmapping lba %lld (blocks %d)",
++ (unsigned long long)start, len);
++
++ if (virt_dev->blockio) {
++ gfp_t gfp = cmd->noio_mem_alloc ? GFP_NOIO : GFP_KERNEL;
++ err = blkdev_issue_discard(inode->i_bdev, start, len,
++ gfp, 0);
++ if (unlikely(err != 0)) {
++ PRINT_ERROR("blkdev_issue_discard() for "
++ "LBA %lld len %d failed with err %d",
++ (unsigned long long)start, len, err);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_write_error));
++ goto out_put;
++ }
++ } else {
++ const int block_shift = virt_dev->block_shift;
++
++ /*
++ * We are guaranteed by thin_provisioned flag
++ * that truncate_range is not NULL.
++ */
++ if (((start + len) << block_shift) &
++ (PAGE_CACHE_SIZE - 1)) {
++ PRINT_ERROR("Invalid UNMAP range [%llu, %llu); "
++ "block size = %d", start, start + len,
++ virt_dev->block_size);
++ goto out_put;
++ }
++ inode->i_op->truncate_range(inode,
++ start << block_shift,
++ ((start + len) << block_shift) - 1);
++ }
++ }
++
++out_put:
++ scst_put_buf_full(cmd, address);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
+static void vdisk_exec_inquiry(struct scst_cmd *cmd)
+{
+ int32_t length, i, resp_len = 0;
+ uint8_t *address;
+ uint8_t *buf;
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
-+
-+ /* ToDo: Performance Boost:
-+ * 1. remove kzalloc, buf
-+ * 2. do all checks before touching *address
-+ * 3. zero *address
-+ * 4. write directly to *address
-+ */
++ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
++ uint16_t tg_id;
+
+ TRACE_ENTRY();
+
@@ -41142,11 +40389,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ goto out;
+ }
+
-+ length = scst_get_buf_first(cmd, &address);
++ length = scst_get_buf_full(cmd, &address);
+ TRACE_DBG("length %d", length);
+ if (unlikely(length <= 0)) {
+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ }
@@ -41161,8 +40408,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ }
+
+ buf[0] = cmd->dev->type; /* type dev */
-+ if (virt_dev->removable)
-+ buf[1] = 0x80; /* removable */
+ /* Vital Product */
+ if (cmd->cdb[1] & EVPD) {
+ if (0 == cmd->cdb[2]) {
@@ -41174,17 +40419,26 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ if (virt_dev->dev->type == TYPE_DISK) {
+ buf[3] += 1;
+ buf[7] = 0xB0; /* block limits */
++ if (virt_dev->thin_provisioned) {
++ buf[3] += 1;
++ buf[8] = 0xB2; /* thin provisioning */
++ }
+ }
+ resp_len = buf[3] + 4;
+ } else if (0x80 == cmd->cdb[2]) {
+ /* unit serial number */
-+ int usn_len;
-+ read_lock(&vdisk_serial_rwlock);
-+ usn_len = strlen(virt_dev->usn);
+ buf[1] = 0x80;
-+ buf[3] = usn_len;
-+ strncpy(&buf[4], virt_dev->usn, usn_len);
-+ read_unlock(&vdisk_serial_rwlock);
++ if (cmd->tgtt->get_serial) {
++ buf[3] = cmd->tgtt->get_serial(cmd->tgt_dev,
++ &buf[4], INQ_BUF_SZ - 4);
++ } else {
++ int usn_len;
++ read_lock(&vdisk_serial_rwlock);
++ usn_len = strlen(virt_dev->usn);
++ buf[3] = usn_len;
++ strncpy(&buf[4], virt_dev->usn, usn_len);
++ read_unlock(&vdisk_serial_rwlock);
++ }
+ resp_len = buf[3] + 4;
+ } else if (0x83 == cmd->cdb[2]) {
+ /* device identification */
@@ -41194,7 +40448,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ /* T10 vendor identifier field format (faked) */
+ buf[num + 0] = 0x2; /* ASCII */
+ buf[num + 1] = 0x1; /* Vendor ID */
-+ if (virt_dev->blockio)
++ if (cmd->tgtt->vendor)
++ memcpy(&buf[num + 4], cmd->tgtt->vendor, 8);
++ else if (virt_dev->blockio)
+ memcpy(&buf[num + 4], SCST_BIO_VENDOR, 8);
+ else
+ memcpy(&buf[num + 4], SCST_FIO_VENDOR, 8);
@@ -41224,6 +40480,22 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ num += 4;
+
++ tg_id = scst_lookup_tg_id(cmd->dev, cmd->tgt);
++ if (tg_id) {
++ /*
++ * Target port group designator
++ */
++ buf[num + 0] = 0x01; /* binary */
++ /* Target port group id */
++ buf[num + 1] = 0x10 | 0x05;
++
++ put_unaligned(cpu_to_be16(tg_id),
++ (__be16 *)&buf[num + 4 + 2]);
++
++ buf[num + 3] = 4;
++ num += 4 + buf[num + 3];
++ }
++
+ /*
+ * IEEE id
+ */
@@ -41253,10 +40525,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ resp_len += 4;
+ } else if ((0xB0 == cmd->cdb[2]) &&
+ (virt_dev->dev->type == TYPE_DISK)) {
-+ /* block limits */
++ /* Block Limits */
+ int max_transfer;
+ buf[1] = 0xB0;
-+ buf[3] = 0x1C;
++ buf[3] = 0x3C;
+ /* Optimal transfer granuality is PAGE_SIZE */
+ put_unaligned(cpu_to_be16(max_t(int,
+ PAGE_SIZE/virt_dev->block_size, 1)),
@@ -41268,7 +40540,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ put_unaligned(cpu_to_be32(max_transfer),
+ (uint32_t *)&buf[8]);
+ /*
-+ * Let's have optimal transfer len 1MB. Better to not
++ * Let's have optimal transfer len 512KB. Better to not
+ * set it at all, because we don't have such limit,
+ * but some initiators may not understand that (?).
+ * From other side, too big transfers are not optimal,
@@ -41276,8 +40548,27 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ */
+ put_unaligned(cpu_to_be32(min_t(int,
+ max_transfer,
-+ 1*1024*1024 / virt_dev->block_size)),
++ 512*1024 / virt_dev->block_size)),
+ (uint32_t *)&buf[12]);
++ if (virt_dev->thin_provisioned) {
++ /* MAXIMUM UNMAP LBA COUNT is UNLIMITED */
++ put_unaligned(__constant_cpu_to_be32(0xFFFFFFFF),
++ (uint32_t *)&buf[20]);
++ /* MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT is UNLIMITED */
++ put_unaligned(__constant_cpu_to_be32(0xFFFFFFFF),
++ (uint32_t *)&buf[24]);
++ /* OPTIMAL UNMAP GRANULARITY is 1 */
++ put_unaligned(__constant_cpu_to_be32(1),
++ (uint32_t *)&buf[28]);
++ }
++ resp_len = buf[3] + 4;
++ } else if ((0xB2 == cmd->cdb[2]) &&
++ (virt_dev->dev->type == TYPE_DISK) &&
++ virt_dev->thin_provisioned) {
++ /* Thin Provisioning */
++ buf[1] = 0xB2;
++ buf[3] = 2;
++ buf[5] = 0x80;
+ resp_len = buf[3] + 4;
+ } else {
+ TRACE_DBG("INQUIRY: Unsupported EVPD page %x",
@@ -41296,17 +40587,25 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ goto out_put;
+ }
+
++ if (virt_dev->removable)
++ buf[1] = 0x80; /* removable */
+ buf[2] = 5; /* Device complies to SPC-3 */
-+ buf[3] = 0x12; /* HiSup + data in format specified in SPC */
++ buf[3] = 0x02; /* Data in format specified in SPC */
++ if (cmd->tgtt->fake_aca)
++ buf[3] |= 0x20;
+ buf[4] = 31;/* n - 4 = 35 - 4 = 31 for full 36 byte data */
-+ buf[6] = 1; /* MultiP 1 */
++ if (scst_impl_alua_configured(cmd->dev))
++ buf[5] = SCST_INQ_TPGS_MODE_IMPLICIT;
++ buf[6] = 0x10; /* MultiP 1 */
+ buf[7] = 2; /* CMDQUE 1, BQue 0 => commands queuing supported */
+
+ /*
+ * 8 byte ASCII Vendor Identification of the target
+ * - left aligned.
+ */
-+ if (virt_dev->blockio)
++ if (cmd->tgtt->vendor)
++ memcpy(&buf[8], cmd->tgtt->vendor, 8);
++ else if (virt_dev->blockio)
+ memcpy(&buf[8], SCST_BIO_VENDOR, 8);
+ else
+ memcpy(&buf[8], SCST_FIO_VENDOR, 8);
@@ -41316,14 +40615,21 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ * aligned.
+ */
+ memset(&buf[16], ' ', 16);
-+ len = min(strlen(virt_dev->name), (size_t)16);
-+ memcpy(&buf[16], virt_dev->name, len);
++ if (cmd->tgtt->get_product_id)
++ cmd->tgtt->get_product_id(cmd->tgt_dev, &buf[16], 16);
++ else {
++ len = min_t(size_t, strlen(virt_dev->name), 16);
++ memcpy(&buf[16], virt_dev->name, len);
++ }
+
+ /*
+ * 4 byte ASCII Product Revision Level of the target - left
+ * aligned.
+ */
-+ memcpy(&buf[32], SCST_FIO_REV, 4);
++ if (cmd->tgtt->revision)
++ memcpy(&buf[32], cmd->tgtt->revision, 4);
++ else
++ memcpy(&buf[32], SCST_FIO_REV, 4);
+
+ /** Version descriptors **/
+
@@ -41363,6 +40669,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ num += 2;
+ }
+
++ /* Vendor specific information. */
++ if (cmd->tgtt->get_vend_specific) {
++ /* Skip to byte 96. */
++ num = 96 - 58;
++ num += cmd->tgtt->get_vend_specific(cmd->tgt_dev,
++ &buf[96], INQ_BUF_SZ - 96);
++ }
++
+ buf[4] += num;
+ resp_len = buf[4] + 5;
+ }
@@ -41374,7 +40688,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ memcpy(address, buf, length);
+
+out_put:
-+ scst_put_buf(cmd, address);
++ scst_put_buf_full(cmd, address);
+ if (length < cmd->resp_data_len)
+ scst_set_resp_data_len(cmd, length);
+
@@ -41397,10 +40711,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ sl = scst_set_sense(b, sizeof(b), cmd->dev->d_sense,
+ SCST_LOAD_SENSE(scst_sense_no_sense));
+
-+ length = scst_get_buf_first(cmd, &address);
++ length = scst_get_buf_full(cmd, &address);
+ TRACE_DBG("length %d", length);
+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_first() failed: %d)", length);
++ PRINT_ERROR("scst_get_buf_full() failed: %d)", length);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ goto out;
@@ -41410,7 +40724,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ memcpy(address, b, length);
+ scst_set_resp_data_len(cmd, length);
+
-+ scst_put_buf(cmd, address);
++ scst_put_buf_full(cmd, address);
+
+out:
+ TRACE_EXIT();
@@ -41580,7 +40894,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ goto out;
+ }
+
-+ virt_dev = (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ virt_dev = cmd->dev->dh_priv;
+ blocksize = virt_dev->block_size;
+ nblocks = virt_dev->nblocks;
+
@@ -41596,10 +40910,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ if (!virt_dev->blockio)
+ dev_spec |= DPOFUA;
+
-+ length = scst_get_buf_first(cmd, &address);
++ length = scst_get_buf_full(cmd, &address);
+ if (unlikely(length <= 0)) {
+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ }
@@ -41708,7 +41022,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ memcpy(address, buf, offset);
+
+out_put:
-+ scst_put_buf(cmd, address);
++ scst_put_buf_full(cmd, address);
+ if (offset < cmd->resp_data_len)
+ scst_set_resp_data_len(cmd, offset);
+
@@ -41741,15 +41055,22 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+}
+
+static void vdisk_ctrl_m_pg_select(unsigned char *p,
-+ struct scst_vdisk_dev *virt_dev)
++ struct scst_vdisk_dev *virt_dev, struct scst_cmd *cmd)
+{
+ struct scst_device *dev = virt_dev->dev;
+ int old_swp = dev->swp, old_tas = dev->tas, old_dsense = dev->d_sense;
+
-+#if 0
-+ /* Not implemented yet, see comment in vdisk_ctrl_m_pg() */
-+ dev->tst = p[2] >> 5;
++#if 0 /* Not implemented yet, see comment in vdisk_ctrl_m_pg() */
++ dev->tst = (p[2] >> 5) & 1;
+ dev->queue_alg = p[3] >> 4;
++#else
++ if ((dev->tst != ((p[2] >> 5) & 1)) || (dev->queue_alg != (p[3] >> 4))) {
++ TRACE(TRACE_MINOR|TRACE_SCSI, "%s", "MODE SELECT: Changing of "
++ "TST and QUEUE ALGORITHM not supported");
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ return;
++ }
+#endif
+ dev->swp = (p[4] & 0x8) >> 3;
+ dev->tas = (p[5] & 0x40) >> 6;
@@ -41771,13 +41092,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ TRACE_ENTRY();
+
-+ virt_dev = (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ virt_dev = cmd->dev->dh_priv;
+ mselect_6 = (MODE_SELECT == cmd->cdb[0]);
+
-+ length = scst_get_buf_first(cmd, &address);
++ length = scst_get_buf_full(cmd, &address);
+ if (unlikely(length <= 0)) {
+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ }
@@ -41840,7 +41161,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ scst_sense_invalid_field_in_parm_list));
+ goto out_put;
+ }
-+ vdisk_ctrl_m_pg_select(&address[offset], virt_dev);
++ vdisk_ctrl_m_pg_select(&address[offset], virt_dev, cmd);
+ } else {
+ PRINT_ERROR("MODE SELECT: Invalid request %x",
+ address[offset] & 0x3f);
@@ -41852,7 +41173,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ }
+
+out_put:
-+ scst_put_buf(cmd, address);
++ scst_put_buf_full(cmd, address);
+
+out:
+ TRACE_EXIT();
@@ -41882,7 +41203,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ TRACE_ENTRY();
+
-+ virt_dev = (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ virt_dev = cmd->dev->dh_priv;
+ blocksize = virt_dev->block_size;
+ nblocks = virt_dev->nblocks;
+
@@ -41898,7 +41219,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ /* Last block on the virt_dev is (nblocks-1) */
+ memset(buffer, 0, sizeof(buffer));
-+ if (nblocks >> 32) {
++
++ /*
++ * If we are thinly provisioned, we must ensure that the initiator
++ * issues a READ_CAPACITY(16) so we can return the TPE bit. By
++ * returning 0xFFFFFFFF we do that.
++ */
++ if (nblocks >> 32 || virt_dev->thin_provisioned) {
+ buffer[0] = 0xFF;
+ buffer[1] = 0xFF;
+ buffer[2] = 0xFF;
@@ -41914,10 +41241,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ buffer[6] = (blocksize >> (BYTE * 1)) & 0xFF;
+ buffer[7] = (blocksize >> (BYTE * 0)) & 0xFF;
+
-+ length = scst_get_buf_first(cmd, &address);
++ length = scst_get_buf_full(cmd, &address);
+ if (unlikely(length <= 0)) {
+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ }
@@ -41928,7 +41255,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ memcpy(address, buffer, length);
+
-+ scst_put_buf(cmd, address);
++ scst_put_buf_full(cmd, address);
+
+ if (length < cmd->resp_data_len)
+ scst_set_resp_data_len(cmd, length);
@@ -41949,7 +41276,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ TRACE_ENTRY();
+
-+ virt_dev = (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ virt_dev = cmd->dev->dh_priv;
+ blocksize = virt_dev->block_size;
+ nblocks = virt_dev->nblocks - 1;
+
@@ -41995,10 +41322,22 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ break;
+ }
+
-+ length = scst_get_buf_first(cmd, &address);
++ if (virt_dev->thin_provisioned) {
++ buffer[14] |= 0x80; /* Add TPE */
++#if 0 /*
++ * Might be a big performance and functionality win, but might be
++ * dangerous as well, although generally nearly always it should be set,
++ * because nearly all devices should return zero for unmapped blocks.
++ * But let's be on the safe side and disable it for now.
++ */
++ buffer[14] |= 0x40; /* Add TPRZ */
++#endif
++ }
++
++ length = scst_get_buf_full(cmd, &address);
+ if (unlikely(length <= 0)) {
+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ }
@@ -42009,11 +41348,66 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ memcpy(address, buffer, length);
+
-+ scst_put_buf(cmd, address);
++ scst_put_buf_full(cmd, address);
++
++ if (length < cmd->resp_data_len)
++ scst_set_resp_data_len(cmd, length);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* SPC-4 REPORT TARGET PORT GROUPS command */
++static void vdisk_exec_report_tpgs(struct scst_cmd *cmd)
++{
++ struct scst_device *dev;
++ uint8_t *address;
++ void *buf;
++ int32_t buf_len;
++ uint32_t allocation_length, data_length, length;
++ uint8_t data_format;
++ int res;
++
++ TRACE_ENTRY();
++
++ buf_len = scst_get_buf_full(cmd, &address);
++ if (buf_len < 0) {
++ PRINT_ERROR("scst_get_buf_full() failed: %d", buf_len);
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out;
++ }
+
++ if (cmd->cdb_len < 12)
++ PRINT_WARNING("received invalid REPORT TARGET PORT GROUPS "
++ "command - length %d is too small (should be at "
++ "least 12 bytes)", cmd->cdb_len);
++
++ dev = cmd->dev;
++ data_format = cmd->cdb_len > 1 ? cmd->cdb[1] >> 5 : 0;
++ allocation_length = cmd->cdb_len >= 10 ?
++ be32_to_cpu(get_unaligned((__be32 *)(cmd->cdb + 6))) : 1024;
++
++ res = scst_tg_get_group_info(&buf, &data_length, dev, data_format);
++ if (res == -ENOMEM) {
++ scst_set_busy(cmd);
++ goto out_put;
++ } else if (res < 0) {
++ scst_set_cmd_error(cmd,
++ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
++ goto out_put;
++ }
++
++ length = min_t(uint32_t, min(allocation_length, data_length), buf_len);
++ memcpy(address, buf, length);
++ kfree(buf);
+ if (length < cmd->resp_data_len)
+ scst_set_resp_data_len(cmd, length);
+
++out_put:
++ scst_put_buf_full(cmd, address);
++
+out:
+ TRACE_EXIT();
+ return;
@@ -42053,17 +41447,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ goto out;
+ }
+
-+ length = scst_get_buf_first(cmd, &address);
++ length = scst_get_buf_full(cmd, &address);
+ if (unlikely(length <= 0)) {
+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_first() failed: %d", length);
++ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
+ scst_set_cmd_error(cmd,
+ SCST_LOAD_SENSE(scst_sense_hardw_error));
+ }
+ goto out;
+ }
+
-+ virt_dev = (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ virt_dev = cmd->dev->dh_priv;
+ /* ToDo when you have > 8TB ROM device. */
+ nblocks = (uint32_t)virt_dev->nblocks;
+
@@ -42100,7 +41494,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ off = length;
+ memcpy(address, buffer, off);
+
-+ scst_put_buf(cmd, address);
++ scst_put_buf_full(cmd, address);
+
+ if (off < cmd->resp_data_len)
+ scst_set_resp_data_len(cmd, off);
@@ -42112,8 +41506,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+static void vdisk_exec_prevent_allow_medium_removal(struct scst_cmd *cmd)
+{
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
+
+ TRACE_DBG("PERSIST/PREVENT 0x%02x", cmd->cdb[4]);
+
@@ -42128,8 +41521,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ loff_t len, struct scst_cmd *cmd, struct scst_device *dev)
+{
+ int res = 0;
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = dev->dh_priv;
+ struct file *file;
+
+ TRACE_ENTRY();
@@ -42140,7 +41532,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ goto out;
+
+ if (virt_dev->blockio) {
-+ res = blockio_flush(thr->bdev);
++ res = vdisk_blockio_flush(thr->bdev,
++ (cmd->noio_mem_alloc ? GFP_NOIO : GFP_KERNEL), true);
+ goto out;
+ }
+
@@ -42193,8 +41586,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ loff_t err;
+ ssize_t length, full_len;
+ uint8_t __user *address;
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
+ struct file *fd = thr->fd;
+ struct iovec *iv;
+ int iv_count, i;
@@ -42308,8 +41700,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ loff_t err;
+ ssize_t length, full_len, saved_full_len;
+ uint8_t __user *address;
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
+ struct file *fd = thr->fd;
+ struct iovec *iv, *eiv;
+ int i, iv_count, eiv_count;
@@ -42506,16 +41897,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+static void blockio_exec_rw(struct scst_cmd *cmd, struct scst_vdisk_thr *thr,
+ u64 lba_start, int write)
+{
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
+ struct block_device *bdev = thr->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
-+ int length, max_nr_vecs = 0;
-+ uint8_t *address;
++ int length, max_nr_vecs = 0, offset;
++ struct page *page;
+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+ int need_new_bio;
+ struct scst_blockio_work *blockio_work;
+ int bios = 0;
++ gfp_t gfp_mask = (cmd->noio_mem_alloc ? GFP_NOIO : GFP_KERNEL);
++ struct blk_plug plug;
+
+ TRACE_ENTRY();
+
@@ -42523,7 +41915,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ goto out;
+
+ /* Allocate and initialize blockio_work struct */
-+ blockio_work = kmem_cache_alloc(blockio_work_cachep, GFP_KERNEL);
++ blockio_work = kmem_cache_alloc(blockio_work_cachep, gfp_mask);
+ if (blockio_work == NULL)
+ goto out_no_mem;
+
@@ -42536,24 +41928,23 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ need_new_bio = 1;
+
-+ length = scst_get_buf_first(cmd, &address);
++ length = scst_get_sg_page_first(cmd, &page, &offset);
+ while (length > 0) {
+ int len, bytes, off, thislen;
-+ uint8_t *addr;
++ struct page *pg;
+ u64 lba_start0;
+
-+ addr = address;
-+ off = offset_in_page(addr);
++ pg = page;
+ len = length;
++ off = offset;
+ thislen = 0;
+ lba_start0 = lba_start;
+
+ while (len > 0) {
+ int rc;
-+ struct page *page = virt_to_page(addr);
+
+ if (need_new_bio) {
-+ bio = bio_kmalloc(GFP_KERNEL, max_nr_vecs);
++ bio = bio_kmalloc(gfp_mask, max_nr_vecs);
+ if (!bio) {
+ PRINT_ERROR("Failed to create bio "
+ "for data segment %d (cmd %p)",
@@ -42586,7 +41977,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+
-+ rc = bio_add_page(bio, page, bytes, off);
++ rc = bio_add_page(bio, pg, bytes, off);
+ if (rc < bytes) {
+ BUG_ON(rc != 0);
+ need_new_bio = 1;
@@ -42595,7 +41986,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ continue;
+ }
+
-+ addr += PAGE_SIZE;
++ pg++;
+ thislen += bytes;
+ len -= bytes;
+ off = 0;
@@ -42603,13 +41994,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ lba_start += length >> virt_dev->block_shift;
+
-+ scst_put_buf(cmd, address);
-+ length = scst_get_buf_next(cmd, &address);
++ scst_put_sg_page(cmd, page, offset);
++ length = scst_get_sg_page_next(cmd, &page, &offset);
+ }
+
+ /* +1 to prevent erroneous too early command completion */
+ atomic_set(&blockio_work->bios_inflight, bios+1);
+
++ blk_start_plug(&plug);
++
+ while (hbio) {
+ bio = hbio;
+ hbio = hbio->bi_next;
@@ -42617,8 +42010,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ submit_bio((write != 0), bio);
+ }
+
-+ if (q && q->unplug_fn)
-+ q->unplug_fn(q);
++ blk_finish_plug(&plug);
+
+ blockio_check_finish(blockio_work);
+
@@ -42639,14 +42031,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ goto out;
+}
+
-+static int blockio_flush(struct block_device *bdev)
++static int vdisk_blockio_flush(struct block_device *bdev, gfp_t gfp_mask,
++ bool report_error)
+{
+ int res = 0;
+
+ TRACE_ENTRY();
+
-+ res = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT);
-+ if (res != 0)
++ res = blkdev_issue_flush(bdev, gfp_mask, NULL);
++ if ((res != 0) && report_error)
+ PRINT_ERROR("blkdev_issue_flush() failed: %d", res);
+
+ TRACE_EXIT_RES(res);
@@ -42661,8 +42054,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ ssize_t length, len_mem = 0;
+ uint8_t *address_sav, *address;
+ int compare;
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)cmd->dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
+ struct file *fd = thr->fd;
+ uint8_t *mem_verify = NULL;
+
@@ -42777,8 +42169,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ if ((mcmd->fn == SCST_LUN_RESET) || (mcmd->fn == SCST_TARGET_RESET)) {
+ /* Restore default values */
+ struct scst_device *dev = tgt_dev->dev;
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = dev->dh_priv;
+
+ dev->tst = DEF_TST;
+ dev->d_sense = DEF_DSENSE;
@@ -42794,8 +42185,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ spin_unlock(&virt_dev->flags_lock);
+ } else if (mcmd->fn == SCST_PR_ABORT_ALL) {
+ struct scst_device *dev = tgt_dev->dev;
-+ struct scst_vdisk_dev *virt_dev =
-+ (struct scst_vdisk_dev *)dev->dh_priv;
++ struct scst_vdisk_dev *virt_dev = dev->dh_priv;
+ spin_lock(&virt_dev->flags_lock);
+ virt_dev->prevent_allow_medium_removal = 0;
+ spin_unlock(&virt_dev->flags_lock);
@@ -42841,6 +42231,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ i += snprintf(&buf[i], sizeof(buf) - i, "%sREMOVABLE",
+ (j == i) ? "(" : ", ");
+
++ if (virt_dev->thin_provisioned)
++ i += snprintf(&buf[i], sizeof(buf) - i, "%sTHIN PROVISIONED",
++ (j == i) ? "(" : ", ");
++
+ if (j == i)
+ PRINT_INFO("%s", buf);
+ else
@@ -42899,9 +42293,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ int res = 0;
+ struct scst_vdisk_dev *virt_dev;
+ uint64_t dev_id_num;
-+ int dev_id_len;
-+ char dev_id_str[17];
-+ int32_t i;
+
+ virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
+ if (virt_dev == NULL) {
@@ -42916,6 +42307,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ virt_dev->rd_only = DEF_RD_ONLY;
+ virt_dev->removable = DEF_REMOVABLE;
++ virt_dev->thin_provisioned = DEF_THIN_PROVISIONED;
+
+ virt_dev->block_size = DEF_DISK_BLOCKSIZE;
+ virt_dev->block_shift = DEF_DISK_BLOCKSIZE_SHIFT;
@@ -42929,22 +42321,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ strcpy(virt_dev->name, name);
+
+ dev_id_num = vdisk_gen_dev_id_num(virt_dev->name);
-+ dev_id_len = scnprintf(dev_id_str, sizeof(dev_id_str), "%llx",
-+ dev_id_num);
+
-+ i = strlen(virt_dev->name) + 1; /* for ' ' */
-+ memset(virt_dev->t10_dev_id, ' ', i + dev_id_len);
-+ memcpy(virt_dev->t10_dev_id, virt_dev->name, i-1);
-+ memcpy(virt_dev->t10_dev_id + i, dev_id_str, dev_id_len);
++ snprintf(virt_dev->t10_dev_id, sizeof(virt_dev->t10_dev_id),
++ "%llx-%s", dev_id_num, virt_dev->name);
+ TRACE_DBG("t10_dev_id %s", virt_dev->t10_dev_id);
+
-+ virt_dev->t10_dev_id_set = 1; /* temporary */
-+
+ scnprintf(virt_dev->usn, sizeof(virt_dev->usn), "%llx", dev_id_num);
+ TRACE_DBG("usn %s", virt_dev->usn);
+
-+ virt_dev->usn_set = 1; /* temporary */
-+
+ *res_virt_dev = virt_dev;
+
+out:
@@ -43084,6 +42468,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ } else if (!strcasecmp("removable", p)) {
+ virt_dev->removable = val;
+ TRACE_DBG("REMOVABLE %d", virt_dev->removable);
++ } else if (!strcasecmp("thin_provisioned", p)) {
++ virt_dev->thin_provisioned = val;
++ TRACE_DBG("THIN PROVISIONED %d",
++ virt_dev->thin_provisioned);
+ } else if (!strcasecmp("blocksize", p)) {
+ virt_dev->block_size = val;
+ virt_dev->block_shift = scst_calc_block_shift(
@@ -43174,7 +42562,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+{
+ int res = 0;
+ const char *allowed_params[] = { "filename", "read_only", "removable",
-+ "blocksize", "nv_cache", NULL };
++ "blocksize", "nv_cache",
++ "thin_provisioned", NULL };
+ struct scst_vdisk_dev *virt_dev;
+
+ TRACE_ENTRY();
@@ -43392,7 +42781,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ if (res != 0)
+ goto out;
+
++#if 0 /*
++ * Our implementation is pretty minimalistic and doesn't support all
++ * mandatory commands, so it's better to not claim any standard
++ * confirmance.
++ */
+ virt_dev->command_set_version = 0x02A0; /* MMC-3 */
++#endif
+
+ virt_dev->rd_only = 1;
+ virt_dev->removable = 1;
@@ -43525,16 +42920,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ old_fn = virt_dev->filename;
+
+ if (!virt_dev->cdrom_empty) {
-+ int len = strlen(filename) + 1;
-+ char *fn = kmalloc(len, GFP_KERNEL);
++ char *fn = kstrdup(filename, GFP_KERNEL);
+ if (fn == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s",
-+ "Allocation of filename failed");
++ PRINT_ERROR("%s", "Allocation of filename failed");
+ res = -ENOMEM;
+ goto out_unlock;
+ }
+
-+ strlcpy(fn, filename, len);
+ virt_dev->filename = fn;
+
+ res = vdisk_get_file_size(virt_dev->filename,
@@ -43604,7 +42996,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ /* It's safe, since we taken dev_kobj and dh_priv NULLed in attach() */
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ res = vcdrom_change(virt_dev, work->buf);
+
@@ -43626,15 +43018,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
+
-+ i_buf = kmalloc(count+1, GFP_KERNEL);
++ i_buf = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
+ if (i_buf == NULL) {
+ PRINT_ERROR("Unable to alloc intermediate buffer with size %zd",
+ count+1);
+ res = -ENOMEM;
+ goto out;
+ }
-+ memcpy(i_buf, buf, count);
-+ i_buf[count] = '\0';
+
+ res = scst_alloc_sysfs_work(vcdrom_sysfs_process_filename_store,
+ false, &work);
@@ -43669,7 +43059,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ pos = sprintf(buf, "%lld\n", virt_dev->file_size / 1024 / 1024);
+
@@ -43687,7 +43077,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ pos = sprintf(buf, "%d\n%s", (int)virt_dev->block_size,
+ (virt_dev->block_size == DEF_DISK_BLOCKSIZE) ? "" :
@@ -43707,11 +43097,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ pos = sprintf(buf, "%d\n%s", virt_dev->rd_only ? 1 : 0,
+ (virt_dev->rd_only == DEF_RD_ONLY) ? "" :
-+ SCST_SYSFS_KEY_MARK "");
++ SCST_SYSFS_KEY_MARK "\n");
+
+ TRACE_EXIT_RES(pos);
+ return pos;
@@ -43727,11 +43117,31 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ pos = sprintf(buf, "%d\n%s", virt_dev->wt_flag ? 1 : 0,
+ (virt_dev->wt_flag == DEF_WRITE_THROUGH) ? "" :
-+ SCST_SYSFS_KEY_MARK "");
++ SCST_SYSFS_KEY_MARK "\n");
++
++ TRACE_EXIT_RES(pos);
++ return pos;
++}
++
++static ssize_t vdisk_sysfs_tp_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int pos = 0;
++ struct scst_device *dev;
++ struct scst_vdisk_dev *virt_dev;
++
++ TRACE_ENTRY();
++
++ dev = container_of(kobj, struct scst_device, dev_kobj);
++ virt_dev = dev->dh_priv;
++
++ pos = sprintf(buf, "%d\n%s", virt_dev->thin_provisioned ? 1 : 0,
++ (virt_dev->thin_provisioned == DEF_THIN_PROVISIONED) ? "" :
++ SCST_SYSFS_KEY_MARK "\n");
+
+ TRACE_EXIT_RES(pos);
+ return pos;
@@ -43747,11 +43157,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ pos = sprintf(buf, "%d\n%s", virt_dev->nv_cache ? 1 : 0,
+ (virt_dev->nv_cache == DEF_NV_CACHE) ? "" :
-+ SCST_SYSFS_KEY_MARK "");
++ SCST_SYSFS_KEY_MARK "\n");
+
+ TRACE_EXIT_RES(pos);
+ return pos;
@@ -43767,11 +43177,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ pos = sprintf(buf, "%d\n%s", virt_dev->o_direct_flag ? 1 : 0,
+ (virt_dev->o_direct_flag == DEF_O_DIRECT) ? "" :
-+ SCST_SYSFS_KEY_MARK "");
++ SCST_SYSFS_KEY_MARK "\n");
+
+ TRACE_EXIT_RES(pos);
+ return pos;
@@ -43787,7 +43197,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ pos = sprintf(buf, "%d\n", virt_dev->removable ? 1 : 0);
+
@@ -43809,12 +43219,27 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+
+ dev = work->dev;
+
-+ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_put;
++ /*
++ * Since we have a get() on dev->dev_kobj, we can not simply mutex_lock
++ * scst_vdisk_mutex, because otherwise we can fall in a deadlock with
++ * vdisk_del_device(), which is waiting for the last ref to dev_kobj
++ * under scst_vdisk_mutex.
++ */
++ while (!mutex_trylock(&scst_vdisk_mutex)) {
++ if ((volatile bool)(dev->dev_unregistering)) {
++ TRACE_MGMT_DBG("Skipping being unregistered dev %s",
++ dev->virt_name);
++ res = -ENOENT;
++ goto out_put;
++ }
++ if (signal_pending(current)) {
++ res = -EINTR;
++ goto out_put;
++ }
++ msleep(100);
+ }
+
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ if (virt_dev == NULL)
+ goto out_unlock;
@@ -43882,7 +43307,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ /* It's safe, since we taken dev_kobj and dh_priv NULLed in attach() */
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ res = vdisk_resync_size(virt_dev);
+
@@ -43931,7 +43356,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ write_lock(&vdisk_serial_rwlock);
+
@@ -43980,7 +43405,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ read_lock(&vdisk_serial_rwlock);
+ pos = sprintf(buf, "%s\n%s", virt_dev->t10_dev_id,
@@ -44050,7 +43475,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+ TRACE_ENTRY();
+
+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = (struct scst_vdisk_dev *)dev->dh_priv;
++ virt_dev = dev->dh_priv;
+
+ read_lock(&vdisk_serial_rwlock);
+ pos = sprintf(buf, "%s\n%s", virt_dev->usn,
@@ -44185,10 +43610,3543 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_vdisk.c linux-2.6.36
+MODULE_DESCRIPTION("SCSI disk (type 0) and CDROM (type 5) dev handler for "
+ "SCST using files on file systems or block devices");
+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documentation/scst/README.scst
---- orig/linux-2.6.36/Documentation/scst/README.scst
-+++ linux-2.6.36/Documentation/scst/README.scst
-@@ -0,0 +1,1453 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_tg.c linux-2.6.39/drivers/scst/scst_tg.c
+--- orig/linux-2.6.39/drivers/scst/scst_tg.c
++++ linux-2.6.39/drivers/scst/scst_tg.c
+@@ -0,0 +1,809 @@
++/*
++ * scst_tg.c
++ *
++ * SCSI target group related code.
++ *
++ * Copyright (C) 2011 Bart Van Assche <bvanassche@acm.org>.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <asm/unaligned.h>
++#include <scst/scst.h>
++#include "scst_priv.h"
++
++static struct list_head scst_dev_group_list;
++
++/* Look up a device by name. */
++static struct scst_device *__lookup_dev(const char *name)
++{
++ struct scst_device *dev;
++
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry)
++ if (strcmp(dev->virt_name, name) == 0)
++ return dev;
++
++ return NULL;
++}
++
++/* Look up a target by name. */
++static struct scst_tgt *__lookup_tgt(const char *name)
++{
++ struct scst_tgt_template *t;
++ struct scst_tgt *tgt;
++
++ list_for_each_entry(t, &scst_template_list, scst_template_list_entry)
++ list_for_each_entry(tgt, &t->tgt_list, tgt_list_entry)
++ if (strcmp(tgt->tgt_name, name) == 0)
++ return tgt;
++
++ return NULL;
++}
++
++/* Look up a target by name in the given device group. */
++static struct scst_tg_tgt *__lookup_dg_tgt(struct scst_dev_group *dg,
++ const char *tgt_name)
++{
++ struct scst_target_group *tg;
++ struct scst_tg_tgt *tg_tgt;
++
++ BUG_ON(!dg);
++ BUG_ON(!tgt_name);
++ list_for_each_entry(tg, &dg->tg_list, entry)
++ list_for_each_entry(tg_tgt, &tg->tgt_list, entry)
++ if (strcmp(tg_tgt->name, tgt_name) == 0)
++ return tg_tgt;
++
++ return NULL;
++}
++
++/* Look up a target group by name in the given device group. */
++static struct scst_target_group *
++__lookup_tg_by_name(struct scst_dev_group *dg, const char *name)
++{
++ struct scst_target_group *tg;
++
++ list_for_each_entry(tg, &dg->tg_list, entry)
++ if (strcmp(tg->name, name) == 0)
++ return tg;
++
++ return NULL;
++}
++
++/* Look up a device node by device pointer in the given device group. */
++static struct scst_dg_dev *__lookup_dg_dev_by_dev(struct scst_dev_group *dg,
++ struct scst_device *dev)
++{
++ struct scst_dg_dev *dgd;
++
++ list_for_each_entry(dgd, &dg->dev_list, entry)
++ if (dgd->dev == dev)
++ return dgd;
++
++ return NULL;
++}
++
++/* Look up a device node by name in the given device group. */
++static struct scst_dg_dev *__lookup_dg_dev_by_name(struct scst_dev_group *dg,
++ const char *name)
++{
++ struct scst_dg_dev *dgd;
++
++ list_for_each_entry(dgd, &dg->dev_list, entry)
++ if (strcmp(dgd->dev->virt_name, name) == 0)
++ return dgd;
++
++ return NULL;
++}
++
++/* Look up a device node by name in any device group. */
++static struct scst_dg_dev *__global_lookup_dg_dev_by_name(const char *name)
++{
++ struct scst_dev_group *dg;
++ struct scst_dg_dev *dgd;
++
++ list_for_each_entry(dg, &scst_dev_group_list, entry) {
++ dgd = __lookup_dg_dev_by_name(dg, name);
++ if (dgd)
++ return dgd;
++ }
++ return NULL;
++}
++
++/* Look up a device group by name. */
++static struct scst_dev_group *__lookup_dg_by_name(const char *name)
++{
++ struct scst_dev_group *dg;
++
++ list_for_each_entry(dg, &scst_dev_group_list, entry)
++ if (strcmp(dg->name, name) == 0)
++ return dg;
++
++ return NULL;
++}
++
++/* Look up a device group by device pointer. */
++static struct scst_dev_group *__lookup_dg_by_dev(struct scst_device *dev)
++{
++ struct scst_dev_group *dg;
++
++ list_for_each_entry(dg, &scst_dev_group_list, entry)
++ if (__lookup_dg_dev_by_dev(dg, dev))
++ return dg;
++
++ return NULL;
++}
++
++/*
++ * Target group contents management.
++ */
++
++static void scst_release_tg_tgt(struct kobject *kobj)
++{
++ struct scst_tg_tgt *tg_tgt;
++
++ tg_tgt = container_of(kobj, struct scst_tg_tgt, kobj);
++ kfree(tg_tgt->name);
++ kfree(tg_tgt);
++}
++
++static struct kobj_type scst_tg_tgt_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_release_tg_tgt,
++};
++
++/**
++ * scst_tg_tgt_add() - Add a target to a target group.
++ */
++int scst_tg_tgt_add(struct scst_target_group *tg, const char *name)
++{
++ struct scst_tg_tgt *tg_tgt;
++ struct scst_tgt *tgt;
++ int res;
++
++ TRACE_ENTRY();
++ BUG_ON(!tg);
++ BUG_ON(!name);
++ res = -ENOMEM;
++ tg_tgt = kzalloc(sizeof *tg_tgt, GFP_KERNEL);
++ if (!tg_tgt)
++ goto out;
++ tg_tgt->tg = tg;
++ kobject_init(&tg_tgt->kobj, &scst_tg_tgt_ktype);
++ tg_tgt->name = kstrdup(name, GFP_KERNEL);
++ if (!tg_tgt->name)
++ goto out_put;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res)
++ goto out_put;
++ res = -EEXIST;
++ tgt = __lookup_tgt(name);
++ if (__lookup_dg_tgt(tg->dg, name))
++ goto out_unlock;
++ tg_tgt->tgt = tgt;
++ res = scst_tg_tgt_sysfs_add(tg, tg_tgt);
++ if (res)
++ goto out_unlock;
++ list_add_tail(&tg_tgt->entry, &tg->tgt_list);
++ res = 0;
++ mutex_unlock(&scst_mutex);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++out_unlock:
++ mutex_unlock(&scst_mutex);
++out_put:
++ kobject_put(&tg_tgt->kobj);
++ goto out;
++}
++
++static void __scst_tg_tgt_remove(struct scst_target_group *tg,
++ struct scst_tg_tgt *tg_tgt)
++{
++ TRACE_ENTRY();
++ list_del(&tg_tgt->entry);
++ scst_tg_tgt_sysfs_del(tg, tg_tgt);
++ kobject_put(&tg_tgt->kobj);
++ TRACE_EXIT();
++}
++
++/**
++ * scst_tg_tgt_remove_by_name() - Remove a target from a target group.
++ */
++int scst_tg_tgt_remove_by_name(struct scst_target_group *tg, const char *name)
++{
++ struct scst_tg_tgt *tg_tgt;
++ int res;
++
++ TRACE_ENTRY();
++ BUG_ON(!tg);
++ BUG_ON(!name);
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res)
++ goto out;
++ res = -EINVAL;
++ tg_tgt = __lookup_dg_tgt(tg->dg, name);
++ if (!tg_tgt)
++ goto out_unlock;
++ __scst_tg_tgt_remove(tg, tg_tgt);
++ res = 0;
++out_unlock:
++ mutex_unlock(&scst_mutex);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* Caller must hold scst_mutex. Called from the target removal code. */
++void scst_tg_tgt_remove_by_tgt(struct scst_tgt *tgt)
++{
++ struct scst_dev_group *dg;
++ struct scst_target_group *tg;
++ struct scst_tg_tgt *t, *t2;
++
++ BUG_ON(!tgt);
++ list_for_each_entry(dg, &scst_dev_group_list, entry)
++ list_for_each_entry(tg, &dg->tg_list, entry)
++ list_for_each_entry_safe(t, t2, &tg->tgt_list, entry)
++ if (t->tgt == tgt)
++ __scst_tg_tgt_remove(tg, t);
++}
++
++/*
++ * Target group management.
++ */
++
++static void scst_release_tg(struct kobject *kobj)
++{
++ struct scst_target_group *tg;
++
++ tg = container_of(kobj, struct scst_target_group, kobj);
++ kfree(tg->name);
++ kfree(tg);
++}
++
++static struct kobj_type scst_tg_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_release_tg,
++};
++
++/**
++ * scst_tg_add() - Add a target group.
++ */
++int scst_tg_add(struct scst_dev_group *dg, const char *name)
++{
++ struct scst_target_group *tg;
++ int res;
++
++ TRACE_ENTRY();
++ res = -ENOMEM;
++ tg = kzalloc(sizeof *tg, GFP_KERNEL);
++ if (!tg)
++ goto out;
++ kobject_init(&tg->kobj, &scst_tg_ktype);
++ tg->name = kstrdup(name, GFP_KERNEL);
++ if (!tg->name)
++ goto out_put;
++ tg->dg = dg;
++ tg->state = SCST_TG_STATE_OPTIMIZED;
++ INIT_LIST_HEAD(&tg->tgt_list);
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res)
++ goto out_put;
++ res = -EEXIST;
++ if (__lookup_tg_by_name(dg, name))
++ goto out_unlock;
++ res = scst_tg_sysfs_add(dg, tg);
++ if (res)
++ goto out_unlock;
++ list_add_tail(&tg->entry, &dg->tg_list);
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++out_put:
++ kobject_put(&tg->kobj);
++ goto out;
++}
++
++static void __scst_tg_remove(struct scst_dev_group *dg,
++ struct scst_target_group *tg)
++{
++ struct scst_tg_tgt *tg_tgt;
++
++ TRACE_ENTRY();
++ BUG_ON(!dg);
++ BUG_ON(!tg);
++ while (!list_empty(&tg->tgt_list)) {
++ tg_tgt = list_first_entry(&tg->tgt_list, struct scst_tg_tgt,
++ entry);
++ __scst_tg_tgt_remove(tg, tg_tgt);
++ }
++ list_del(&tg->entry);
++ scst_tg_sysfs_del(tg);
++ kobject_put(&tg->kobj);
++ TRACE_EXIT();
++}
++
++/**
++ * scst_tg_remove_by_name() - Remove a target group.
++ */
++int scst_tg_remove_by_name(struct scst_dev_group *dg, const char *name)
++{
++ struct scst_target_group *tg;
++ int res;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res)
++ goto out;
++ res = -EINVAL;
++ tg = __lookup_tg_by_name(dg, name);
++ if (!tg)
++ goto out_unlock;
++ __scst_tg_remove(dg, tg);
++ res = 0;
++out_unlock:
++ mutex_unlock(&scst_mutex);
++out:
++ return res;
++}
++
++int scst_tg_set_state(struct scst_target_group *tg, enum scst_tg_state state)
++{
++ struct scst_dg_dev *dg_dev;
++ struct scst_device *dev;
++ struct scst_tgt_dev *tgt_dev;
++ int res;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res)
++ goto out;
++
++ tg->state = state;
++
++ list_for_each_entry(dg_dev, &tg->dg->dev_list, entry) {
++ dev = dg_dev->dev;
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ TRACE_MGMT_DBG("ALUA state of tgt_dev %p has changed",
++ tgt_dev);
++ scst_gen_aen_or_ua(tgt_dev,
++ SCST_LOAD_SENSE(scst_sense_asym_access_state_changed));
++ }
++ }
++ mutex_unlock(&scst_mutex);
++out:
++ return res;
++}
++
++/*
++ * Device group contents manipulation.
++ */
++
++/**
++ * scst_dg_dev_add() - Add a device to a device group.
++ *
++ * It is verified whether 'name' refers to an existing device and whether that
++ * device has not yet been added to any other device group.
++ */
++int scst_dg_dev_add(struct scst_dev_group *dg, const char *name)
++{
++ struct scst_dg_dev *dgdev;
++ struct scst_device *dev;
++ int res;
++
++ res = -ENOMEM;
++ dgdev = kzalloc(sizeof *dgdev, GFP_KERNEL);
++ if (!dgdev)
++ goto out;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res)
++ goto out_free;
++ res = -EEXIST;
++ if (__global_lookup_dg_dev_by_name(name))
++ goto out_unlock;
++ res = -EINVAL;
++ dev = __lookup_dev(name);
++ if (!dev)
++ goto out_unlock;
++ dgdev->dev = dev;
++ res = scst_dg_dev_sysfs_add(dg, dgdev);
++ if (res)
++ goto out_unlock;
++ list_add_tail(&dgdev->entry, &dg->dev_list);
++ mutex_unlock(&scst_mutex);
++
++out:
++ return res;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++out_free:
++ kfree(dgdev);
++ goto out;
++}
++
++static void __scst_dg_dev_remove(struct scst_dev_group *dg,
++ struct scst_dg_dev *dgdev)
++{
++ list_del(&dgdev->entry);
++ scst_dg_dev_sysfs_del(dg, dgdev);
++ kfree(dgdev);
++}
++
++/**
++ * scst_dg_dev_remove_by_name() - Remove a device from a device group.
++ */
++int scst_dg_dev_remove_by_name(struct scst_dev_group *dg, const char *name)
++{
++ struct scst_dg_dev *dgdev;
++ int res;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res)
++ goto out;
++ res = -EINVAL;
++ dgdev = __lookup_dg_dev_by_name(dg, name);
++ if (!dgdev)
++ goto out_unlock;
++ __scst_dg_dev_remove(dg, dgdev);
++ res = 0;
++out_unlock:
++ mutex_unlock(&scst_mutex);
++out:
++ return res;
++}
++
++/* Caller must hold scst_mutex. Called from the device removal code. */
++int scst_dg_dev_remove_by_dev(struct scst_device *dev)
++{
++ struct scst_dev_group *dg;
++ struct scst_dg_dev *dgdev;
++ int res;
++
++ res = -EINVAL;
++ dg = __lookup_dg_by_dev(dev);
++ if (!dg)
++ goto out;
++ dgdev = __lookup_dg_dev_by_dev(dg, dev);
++ BUG_ON(!dgdev);
++ __scst_dg_dev_remove(dg, dgdev);
++ res = 0;
++out:
++ return res;
++}
++
++/*
++ * Device group management.
++ */
++
++static void scst_release_dg(struct kobject *kobj)
++{
++ struct scst_dev_group *dg;
++
++ dg = container_of(kobj, struct scst_dev_group, kobj);
++ kfree(dg->name);
++ kfree(dg);
++}
++
++static struct kobj_type scst_dg_ktype = {
++ .sysfs_ops = &scst_sysfs_ops,
++ .release = scst_release_dg,
++};
++
++/**
++ * scst_dg_add() - Add a new device group object and make it visible in sysfs.
++ */
++int scst_dg_add(struct kobject *parent, const char *name)
++{
++ struct scst_dev_group *dg;
++ int res;
++
++ TRACE_ENTRY();
++
++ res = -ENOMEM;
++ dg = kzalloc(sizeof(*dg), GFP_KERNEL);
++ if (!dg)
++ goto out;
++ kobject_init(&dg->kobj, &scst_dg_ktype);
++ dg->name = kstrdup(name, GFP_KERNEL);
++ if (!dg->name)
++ goto out_put;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res)
++ goto out_put;
++ res = -EEXIST;
++ if (__lookup_dg_by_name(name))
++ goto out_unlock;
++ res = -ENOMEM;
++ INIT_LIST_HEAD(&dg->dev_list);
++ INIT_LIST_HEAD(&dg->tg_list);
++ res = scst_dg_sysfs_add(parent, dg);
++ if (res)
++ goto out_unlock;
++ list_add_tail(&dg->entry, &scst_dev_group_list);
++ mutex_unlock(&scst_mutex);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++out_put:
++ kobject_put(&dg->kobj);
++ goto out;
++}
++
++static void __scst_dg_remove(struct scst_dev_group *dg)
++{
++ struct scst_dg_dev *dgdev;
++ struct scst_target_group *tg;
++
++ list_del(&dg->entry);
++ scst_dg_sysfs_del(dg);
++ while (!list_empty(&dg->dev_list)) {
++ dgdev = list_first_entry(&dg->dev_list, struct scst_dg_dev,
++ entry);
++ __scst_dg_dev_remove(dg, dgdev);
++ }
++ while (!list_empty(&dg->tg_list)) {
++ tg = list_first_entry(&dg->tg_list, struct scst_target_group,
++ entry);
++ __scst_tg_remove(dg, tg);
++ }
++ kobject_put(&dg->kobj);
++}
++
++int scst_dg_remove(const char *name)
++{
++ struct scst_dev_group *dg;
++ int res;
++
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res)
++ goto out;
++ res = -EINVAL;
++ dg = __lookup_dg_by_name(name);
++ if (!dg)
++ goto out_unlock;
++ __scst_dg_remove(dg);
++ res = 0;
++out_unlock:
++ mutex_unlock(&scst_mutex);
++out:
++ return res;
++}
++
++/*
++ * Given a pointer to a device_groups/<dg>/devices or
++ * device_groups/<dg>/target_groups kobject, return the pointer to the
++ * corresponding device group.
++ *
++ * Note: The caller must hold a reference on the kobject to avoid that the
++ * object disappears before the caller stops using the device group pointer.
++ */
++struct scst_dev_group *scst_lookup_dg_by_kobj(struct kobject *kobj)
++{
++ int res;
++ struct scst_dev_group *dg;
++
++ dg = NULL;
++ res = mutex_lock_interruptible(&scst_mutex);
++ if (res)
++ goto out;
++ list_for_each_entry(dg, &scst_dev_group_list, entry)
++ if (dg->dev_kobj == kobj || dg->tg_kobj == kobj)
++ goto out_unlock;
++ dg = NULL;
++out_unlock:
++ mutex_unlock(&scst_mutex);
++out:
++ return dg;
++}
++
++/*
++ * Target group module management.
++ */
++
++void scst_tg_init(void)
++{
++ INIT_LIST_HEAD(&scst_dev_group_list);
++}
++
++void scst_tg_cleanup(void)
++{
++ struct scst_dev_group *tg;
++
++ mutex_lock(&scst_mutex);
++ while (!list_empty(&scst_dev_group_list)) {
++ tg = list_first_entry(&scst_dev_group_list,
++ struct scst_dev_group, entry);
++ __scst_dg_remove(tg);
++ }
++ mutex_unlock(&scst_mutex);
++}
++
++/*
++ * Functions for target group related SCSI command support.
++ */
++
++/**
++ * scst_lookup_tg_id() - Look up a target port group identifier.
++ * @dev: SCST device.
++ * @tgt: SCST target.
++ *
++ * Returns a non-zero number if the lookup was successful and zero if not.
++ */
++uint16_t scst_lookup_tg_id(struct scst_device *dev, struct scst_tgt *tgt)
++{
++ struct scst_dev_group *dg;
++ struct scst_target_group *tg;
++ struct scst_tg_tgt *tg_tgt;
++ uint16_t tg_id = 0;
++
++ TRACE_ENTRY();
++ mutex_lock(&scst_mutex);
++ dg = __lookup_dg_by_dev(dev);
++ if (!dg)
++ goto out_unlock;
++ tg_tgt = __lookup_dg_tgt(dg, tgt->tgt_name);
++ if (!tg_tgt)
++ goto out_unlock;
++ tg = tg_tgt->tg;
++ BUG_ON(!tg);
++ tg_id = tg->group_id;
++out_unlock:
++ mutex_unlock(&scst_mutex);
++
++ TRACE_EXIT_RES(tg_id);
++ return tg_id;
++}
++EXPORT_SYMBOL_GPL(scst_lookup_tg_id);
++
++/**
++ * scst_impl_alua_configured() - Whether implicit ALUA has been configured.
++ * @dev: Pointer to the SCST device to verify.
++ */
++bool scst_impl_alua_configured(struct scst_device *dev)
++{
++ struct scst_dev_group *dg;
++ bool res;
++
++ mutex_lock(&scst_mutex);
++ dg = __lookup_dg_by_dev(dev);
++ res = dg != NULL;
++ mutex_unlock(&scst_mutex);
++
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_impl_alua_configured);
++
++/**
++ * scst_tg_get_group_info() - Build REPORT TARGET GROUPS response.
++ * @buf: Pointer to a pointer to which the result buffer pointer will be set.
++ * @length: Response length, including the "RETURN DATA LENGTH" field.
++ * @dev: Pointer to the SCST device for which to obtain group information.
++ * @data_format: Three-bit response data format specification.
++ */
++int scst_tg_get_group_info(void **buf, uint32_t *length,
++ struct scst_device *dev, uint8_t data_format)
++{
++ struct scst_dev_group *dg;
++ struct scst_target_group *tg;
++ struct scst_tg_tgt *tgtgt;
++ struct scst_tgt *tgt;
++ uint8_t *p;
++ uint32_t ret_data_len;
++ uint16_t rel_tgt_id;
++ int res;
++
++ TRACE_ENTRY();
++
++ BUG_ON(!buf);
++ BUG_ON(!length);
++
++ ret_data_len = 0;
++
++ res = -EINVAL;
++ switch (data_format) {
++ case 0:
++ break;
++ case 1:
++ /* Extended header */
++ ret_data_len += 4;
++ break;
++ default:
++ goto out;
++ }
++
++ *length = 4;
++
++ mutex_lock(&scst_mutex);
++
++ dg = __lookup_dg_by_dev(dev);
++ if (dg) {
++ list_for_each_entry(tg, &dg->tg_list, entry) {
++ /* Target port group descriptor header. */
++ ret_data_len += 8;
++ list_for_each_entry(tgtgt, &tg->tgt_list, entry) {
++ /* Target port descriptor. */
++ ret_data_len += 4;
++ }
++ }
++ }
++
++ *length += ret_data_len;
++
++ res = -ENOMEM;
++ *buf = kzalloc(*length, GFP_KERNEL);
++ if (!*buf)
++ goto out_unlock;
++
++ p = *buf;
++ /* Return data length. */
++ put_unaligned(cpu_to_be32(ret_data_len), (__be32 *)p);
++ p += 4;
++ if (data_format == 1) {
++ /* Extended header */
++ *p++ = 0x10; /* format = 1 */
++ *p++ = 0x00; /* implicit transition time = 0 */
++ p += 2; /* reserved */
++ }
++
++ if (!dg)
++ goto done;
++
++ list_for_each_entry(tg, &dg->tg_list, entry) {
++ /* Target port group descriptor header. */
++ *p++ = (tg->preferred ? SCST_TG_PREFERRED : 0) | tg->state;
++ *p++ = SCST_TG_SUP_OPTIMIZED
++ | SCST_TG_SUP_NONOPTIMIZED
++ | SCST_TG_SUP_STANDBY
++ | SCST_TG_SUP_UNAVAILABLE;
++ put_unaligned(cpu_to_be16(tg->group_id), (__be16 *)p);
++ p += 2;
++ p++; /* reserved */
++ *p++ = 2; /* status code: implicit transition */
++ p++; /* vendor specific */
++ list_for_each_entry(tgtgt, &tg->tgt_list, entry)
++ (*p)++; /* target port count */
++ p++;
++ list_for_each_entry(tgtgt, &tg->tgt_list, entry) {
++ tgt = tgtgt->tgt;
++ rel_tgt_id = tgt ? tgt->rel_tgt_id : tgtgt->rel_tgt_id;
++ /* Target port descriptor. */
++ p += 2; /* reserved */
++ /* Relative target port identifier. */
++ put_unaligned(cpu_to_be16(rel_tgt_id),
++ (__be16 *)p);
++ p += 2;
++ }
++ }
++
++done:
++ WARN_ON(p - (uint8_t *)*buf != *length);
++
++ res = 0;
++
++out_unlock:
++ mutex_unlock(&scst_mutex);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_tg_get_group_info);
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_proc.c linux-2.6.39/drivers/scst/scst_proc.c
+--- orig/linux-2.6.39/drivers/scst/scst_proc.c
++++ linux-2.6.39/drivers/scst/scst_proc.c
+@@ -0,0 +1,2716 @@
++/*
++ * scst_proc.c
++ *
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2005 Leonid Stoljar
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/module.h>
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/unistd.h>
++#include <linux/string.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#include <scst/scst.h>
++#include "scst_priv.h"
++#include "scst_mem.h"
++#include "scst_pres.h"
++
++static int scst_proc_init_groups(void);
++static void scst_proc_cleanup_groups(void);
++static int scst_proc_assign_handler(char *buf);
++static int scst_proc_group_add(const char *p, unsigned int addr_method);
++static int scst_proc_del_free_acg(struct scst_acg *acg, int remove_proc);
++
++static struct scst_proc_data scst_version_proc_data;
++static struct scst_proc_data scst_help_proc_data;
++static struct scst_proc_data scst_sgv_proc_data;
++static struct scst_proc_data scst_groups_names_proc_data;
++static struct scst_proc_data scst_groups_devices_proc_data;
++static struct scst_proc_data scst_groups_addr_method_proc_data;
++static struct scst_proc_data scst_sessions_proc_data;
++static struct scst_proc_data scst_dev_handler_type_proc_data;
++static struct scst_proc_data scst_tgt_proc_data;
++static struct scst_proc_data scst_threads_proc_data;
++static struct scst_proc_data scst_scsi_tgt_proc_data;
++static struct scst_proc_data scst_dev_handler_proc_data;
++
++/*
++ * Must be less than 4K page size, since our output routines
++ * use some slack for overruns
++ */
++#define SCST_PROC_BLOCK_SIZE (PAGE_SIZE - 512)
++
++#define SCST_PROC_LOG_ENTRY_NAME "trace_level"
++#define SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME "type"
++#define SCST_PROC_VERSION_NAME "version"
++#define SCST_PROC_SESSIONS_NAME "sessions"
++#define SCST_PROC_HELP_NAME "help"
++#define SCST_PROC_THREADS_NAME "threads"
++#define SCST_PROC_GROUPS_ENTRY_NAME "groups"
++#define SCST_PROC_GROUPS_DEVICES_ENTRY_NAME "devices"
++#define SCST_PROC_GROUPS_USERS_ENTRY_NAME "names"
++#define SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME "addr_method"
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++#define SCST_PROC_LAT_ENTRY_NAME "latency"
++#endif
++
++#define SCST_PROC_ACTION_ALL 1
++#define SCST_PROC_ACTION_NONE 2
++#define SCST_PROC_ACTION_DEFAULT 3
++#define SCST_PROC_ACTION_ADD 4
++#define SCST_PROC_ACTION_CLEAR 5
++#define SCST_PROC_ACTION_MOVE 6
++#define SCST_PROC_ACTION_DEL 7
++#define SCST_PROC_ACTION_REPLACE 8
++#define SCST_PROC_ACTION_VALUE 9
++#define SCST_PROC_ACTION_ASSIGN 10
++#define SCST_PROC_ACTION_ADD_GROUP 11
++#define SCST_PROC_ACTION_DEL_GROUP 12
++#define SCST_PROC_ACTION_RENAME_GROUP 13
++#define SCST_PROC_ACTION_DUMP_PRS 14
++
++static struct proc_dir_entry *scst_proc_scsi_tgt;
++static struct proc_dir_entry *scst_proc_groups_root;
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++static struct scst_proc_data scst_log_proc_data;
++
++static struct scst_trace_log scst_proc_trace_tbl[] = {
++ { TRACE_OUT_OF_MEM, "out_of_mem" },
++ { TRACE_MINOR, "minor" },
++ { TRACE_SG_OP, "sg" },
++ { TRACE_MEMORY, "mem" },
++ { TRACE_BUFF, "buff" },
++#ifndef GENERATING_UPSTREAM_PATCH
++ { TRACE_ENTRYEXIT, "entryexit" },
++#endif
++ { TRACE_PID, "pid" },
++ { TRACE_LINE, "line" },
++ { TRACE_FUNCTION, "function" },
++ { TRACE_DEBUG, "debug" },
++ { TRACE_SPECIAL, "special" },
++ { TRACE_SCSI, "scsi" },
++ { TRACE_MGMT, "mgmt" },
++ { TRACE_MGMT_DEBUG, "mgmt_dbg" },
++ { TRACE_FLOW_CONTROL, "flow_control" },
++ { TRACE_PRES, "pr" },
++ { 0, NULL }
++};
++
++static struct scst_trace_log scst_proc_local_trace_tbl[] = {
++ { TRACE_RTRY, "retry" },
++ { TRACE_SCSI_SERIALIZING, "scsi_serializing" },
++ { TRACE_RCV_BOT, "recv_bot" },
++ { TRACE_SND_BOT, "send_bot" },
++ { TRACE_RCV_TOP, "recv_top" },
++ { TRACE_SND_TOP, "send_top" },
++ { 0, NULL }
++};
++#endif
++
++static char *scst_proc_help_string =
++" echo \"assign H:C:I:L HANDLER_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
++"\n"
++" echo \"add_group GROUP_NAME [FLAT]\" >/proc/scsi_tgt/scsi_tgt\n"
++" echo \"add_group GROUP_NAME [LUN]\" >/proc/scsi_tgt/scsi_tgt\n"
++" echo \"del_group GROUP_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
++" echo \"rename_group OLD_NAME NEW_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
++"\n"
++" echo \"add|del H:C:I:L lun [READ_ONLY]\""
++" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
++" echo \"replace H:C:I:L lun [READ_ONLY]\""
++" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
++" echo \"add|del V_NAME lun [READ_ONLY]\""
++" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
++" echo \"replace V_NAME lun [READ_ONLY]\""
++" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
++" echo \"clear\" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
++"\n"
++" echo \"add|del NAME\" >/proc/scsi_tgt/groups/GROUP_NAME/names\n"
++" echo \"move NAME NEW_GROUP_NAME\" >/proc/scsi_tgt/groups/OLD_GROUP_NAME/names\n"
++" echo \"clear\" >/proc/scsi_tgt/groups/GROUP_NAME/names\n"
++"\n"
++" echo \"DEC|0xHEX|0OCT\" >/proc/scsi_tgt/threads\n"
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++"\n"
++" echo \"all|none|default\" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
++" echo \"value DEC|0xHEX|0OCT\""
++" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
++" echo \"set|add|del TOKEN\""
++" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
++" where TOKEN is one of [debug, function, line, pid, entryexit,\n"
++" buff, mem, sg, out_of_mem, special, scsi,\n"
++" mgmt, minor, mgmt_dbg]\n"
++" Additionally for /proc/scsi_tgt/trace_level there are these TOKENs\n"
++" [scsi_serializing, retry, recv_bot, send_bot, recv_top, send_top]\n"
++" echo \"dump_prs dev_name\" >/proc/scsi_tgt/trace_level\n"
++#endif
++;
++
++static char *scst_proc_dev_handler_type[] = {
++ "Direct-access device (e.g., magnetic disk)",
++ "Sequential-access device (e.g., magnetic tape)",
++ "Printer device",
++ "Processor device",
++ "Write-once device (e.g., some optical disks)",
++ "CD-ROM device",
++ "Scanner device (obsolete)",
++ "Optical memory device (e.g., some optical disks)",
++ "Medium changer device (e.g., jukeboxes)",
++ "Communications device (obsolete)",
++ "Defined by ASC IT8 (Graphic arts pre-press devices)",
++ "Defined by ASC IT8 (Graphic arts pre-press devices)",
++ "Storage array controller device (e.g., RAID)",
++ "Enclosure services device",
++ "Simplified direct-access device (e.g., magnetic disk)",
++ "Optical card reader/writer device"
++};
++
++static DEFINE_MUTEX(scst_proc_mutex);
++
++#include <linux/ctype.h>
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++static DEFINE_MUTEX(scst_log_mutex);
++
++int scst_proc_log_entry_write(struct file *file, const char __user *buf,
++ unsigned long length, unsigned long *log_level,
++ unsigned long default_level, const struct scst_trace_log *tbl)
++{
++ int res = length;
++ int action;
++ unsigned long level = 0, oldlevel;
++ char *buffer, *p, *e;
++ const struct scst_trace_log *t;
++ char *data = (char *)PDE(file->f_dentry->d_inode)->data;
++
++ TRACE_ENTRY();
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ /*
++ * Usage:
++ * echo "all|none|default" >/proc/scsi_tgt/trace_level
++ * echo "value DEC|0xHEX|0OCT" >/proc/scsi_tgt/trace_level
++ * echo "add|del TOKEN" >/proc/scsi_tgt/trace_level
++ */
++ p = buffer;
++ if (!strncasecmp("all", p, 3)) {
++ action = SCST_PROC_ACTION_ALL;
++ } else if (!strncasecmp("none", p, 4) || !strncasecmp("null", p, 4)) {
++ action = SCST_PROC_ACTION_NONE;
++ } else if (!strncasecmp("default", p, 7)) {
++ action = SCST_PROC_ACTION_DEFAULT;
++ } else if (!strncasecmp("add ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_ADD;
++ } else if (!strncasecmp("del ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_DEL;
++ } else if (!strncasecmp("value ", p, 6)) {
++ p += 6;
++ action = SCST_PROC_ACTION_VALUE;
++ } else if (!strncasecmp("dump_prs ", p, 9)) {
++ p += 9;
++ action = SCST_PROC_ACTION_DUMP_PRS;
++ } else {
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ switch (action) {
++ case SCST_PROC_ACTION_ALL:
++ level = TRACE_ALL;
++ break;
++ case SCST_PROC_ACTION_DEFAULT:
++ level = default_level;
++ break;
++ case SCST_PROC_ACTION_NONE:
++ level = TRACE_NULL;
++ break;
++ case SCST_PROC_ACTION_ADD:
++ case SCST_PROC_ACTION_DEL:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = 0;
++ if (tbl) {
++ t = tbl;
++ while (t->token) {
++ if (!strcasecmp(p, t->token)) {
++ level = t->val;
++ break;
++ }
++ t++;
++ }
++ }
++ if (level == 0) {
++ t = scst_proc_trace_tbl;
++ while (t->token) {
++ if (!strcasecmp(p, t->token)) {
++ level = t->val;
++ break;
++ }
++ t++;
++ }
++ }
++ if (level == 0) {
++ PRINT_ERROR("Unknown token \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++ break;
++ case SCST_PROC_ACTION_VALUE:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ level = simple_strtoul(p, NULL, 0);
++ break;
++ case SCST_PROC_ACTION_DUMP_PRS:
++ {
++ struct scst_device *dev;
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p;
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = '\0';
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_free;
++ }
++
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ if (strcmp(dev->virt_name, p) == 0) {
++ scst_pr_dump_prs(dev, true);
++ goto out_up;
++ }
++ }
++
++ PRINT_ERROR("Device %s not found", p);
++ res = -ENOENT;
++out_up:
++ mutex_unlock(&scst_mutex);
++ goto out_free;
++ }
++ }
++
++ oldlevel = *log_level;
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ *log_level |= level;
++ break;
++ case SCST_PROC_ACTION_DEL:
++ *log_level &= ~level;
++ break;
++ default:
++ *log_level = level;
++ break;
++ }
++
++ PRINT_INFO("Changed trace level for \"%s\": "
++ "old 0x%08lx, new 0x%08lx",
++ (char *)data, oldlevel, *log_level);
++
++out_free:
++ free_page((unsigned long)buffer);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_proc_log_entry_write);
++
++static ssize_t scst_proc_scsi_tgt_gen_write_log(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = scst_proc_log_entry_write(file, buf, length,
++ &trace_flag, SCST_DEFAULT_LOG_FLAGS,
++ scst_proc_local_trace_tbl);
++
++ mutex_unlock(&scst_log_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++
++static char *scst_io_size_names[] = {
++ "<=8K ",
++ "<=32K ",
++ "<=128K",
++ "<=512K",
++ ">512K "
++};
++
++static int lat_info_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_acg *acg;
++ struct scst_session *sess;
++ char buf[50];
++
++ TRACE_ENTRY();
++
++ BUILD_BUG_ON(SCST_LATENCY_STATS_NUM != ARRAY_SIZE(scst_io_size_names));
++ BUILD_BUG_ON(SCST_LATENCY_STATS_NUM != ARRAY_SIZE(sess->sess_latency_stat));
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
++ bool header_printed = false;
++
++ list_for_each_entry(sess, &acg->acg_sess_list,
++ acg_sess_list_entry) {
++ unsigned int i;
++ int t;
++ uint64_t scst_time, tgt_time, dev_time;
++ unsigned int processed_cmds;
++
++ if (!header_printed) {
++ seq_printf(seq, "%-15s %-15s %-46s %-46s %-46s\n",
++ "T-L names", "Total commands", "SCST latency",
++ "Target latency", "Dev latency (min/avg/max/all ns)");
++ header_printed = true;
++ }
++
++ seq_printf(seq, "Target name: %s\nInitiator name: %s\n",
++ sess->tgt->tgtt->name,
++ sess->initiator_name);
++
++ spin_lock_bh(&sess->lat_lock);
++
++ for (i = 0; i < SCST_LATENCY_STATS_NUM ; i++) {
++ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
++ unsigned int processed_cmds_wr;
++ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
++ unsigned int processed_cmds_rd;
++ struct scst_ext_latency_stat *latency_stat;
++
++ latency_stat = &sess->sess_latency_stat[i];
++ scst_time_wr = latency_stat->scst_time_wr;
++ scst_time_rd = latency_stat->scst_time_rd;
++ tgt_time_wr = latency_stat->tgt_time_wr;
++ tgt_time_rd = latency_stat->tgt_time_rd;
++ dev_time_wr = latency_stat->dev_time_wr;
++ dev_time_rd = latency_stat->dev_time_rd;
++ processed_cmds_wr = latency_stat->processed_cmds_wr;
++ processed_cmds_rd = latency_stat->processed_cmds_rd;
++
++ seq_printf(seq, "%-5s %-9s %-15lu ",
++ "Write", scst_io_size_names[i],
++ (unsigned long)processed_cmds_wr);
++ if (processed_cmds_wr == 0)
++ processed_cmds_wr = 1;
++
++ do_div(scst_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_wr,
++ (unsigned long)scst_time_wr,
++ (unsigned long)latency_stat->max_scst_time_wr,
++ (unsigned long)latency_stat->scst_time_wr);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(tgt_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_wr,
++ (unsigned long)tgt_time_wr,
++ (unsigned long)latency_stat->max_tgt_time_wr,
++ (unsigned long)latency_stat->tgt_time_wr);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(dev_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_wr,
++ (unsigned long)dev_time_wr,
++ (unsigned long)latency_stat->max_dev_time_wr,
++ (unsigned long)latency_stat->dev_time_wr);
++ seq_printf(seq, "%-47s\n", buf);
++
++ seq_printf(seq, "%-5s %-9s %-15lu ",
++ "Read", scst_io_size_names[i],
++ (unsigned long)processed_cmds_rd);
++ if (processed_cmds_rd == 0)
++ processed_cmds_rd = 1;
++
++ do_div(scst_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_rd,
++ (unsigned long)scst_time_rd,
++ (unsigned long)latency_stat->max_scst_time_rd,
++ (unsigned long)latency_stat->scst_time_rd);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(tgt_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_rd,
++ (unsigned long)tgt_time_rd,
++ (unsigned long)latency_stat->max_tgt_time_rd,
++ (unsigned long)latency_stat->tgt_time_rd);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(dev_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_rd,
++ (unsigned long)dev_time_rd,
++ (unsigned long)latency_stat->max_dev_time_rd,
++ (unsigned long)latency_stat->dev_time_rd);
++ seq_printf(seq, "%-47s\n", buf);
++ }
++
++ for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
++ struct list_head *head =
++ &sess->sess_tgt_dev_list[t];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, head,
++ sess_tgt_dev_list_entry) {
++
++ seq_printf(seq, "\nLUN: %llu\n", tgt_dev->lun);
++
++ for (i = 0; i < SCST_LATENCY_STATS_NUM ; i++) {
++ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
++ unsigned int processed_cmds_wr;
++ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
++ unsigned int processed_cmds_rd;
++ struct scst_ext_latency_stat *latency_stat;
++
++ latency_stat = &tgt_dev->dev_latency_stat[i];
++ scst_time_wr = latency_stat->scst_time_wr;
++ scst_time_rd = latency_stat->scst_time_rd;
++ tgt_time_wr = latency_stat->tgt_time_wr;
++ tgt_time_rd = latency_stat->tgt_time_rd;
++ dev_time_wr = latency_stat->dev_time_wr;
++ dev_time_rd = latency_stat->dev_time_rd;
++ processed_cmds_wr = latency_stat->processed_cmds_wr;
++ processed_cmds_rd = latency_stat->processed_cmds_rd;
++
++ seq_printf(seq, "%-5s %-9s %-15lu ",
++ "Write", scst_io_size_names[i],
++ (unsigned long)processed_cmds_wr);
++ if (processed_cmds_wr == 0)
++ processed_cmds_wr = 1;
++
++ do_div(scst_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_wr,
++ (unsigned long)scst_time_wr,
++ (unsigned long)latency_stat->max_scst_time_wr,
++ (unsigned long)latency_stat->scst_time_wr);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(tgt_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_wr,
++ (unsigned long)tgt_time_wr,
++ (unsigned long)latency_stat->max_tgt_time_wr,
++ (unsigned long)latency_stat->tgt_time_wr);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(dev_time_wr, processed_cmds_wr);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_wr,
++ (unsigned long)dev_time_wr,
++ (unsigned long)latency_stat->max_dev_time_wr,
++ (unsigned long)latency_stat->dev_time_wr);
++ seq_printf(seq, "%-47s\n", buf);
++
++ seq_printf(seq, "%-5s %-9s %-15lu ",
++ "Read", scst_io_size_names[i],
++ (unsigned long)processed_cmds_rd);
++ if (processed_cmds_rd == 0)
++ processed_cmds_rd = 1;
++
++ do_div(scst_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_scst_time_rd,
++ (unsigned long)scst_time_rd,
++ (unsigned long)latency_stat->max_scst_time_rd,
++ (unsigned long)latency_stat->scst_time_rd);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(tgt_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_tgt_time_rd,
++ (unsigned long)tgt_time_rd,
++ (unsigned long)latency_stat->max_tgt_time_rd,
++ (unsigned long)latency_stat->tgt_time_rd);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(dev_time_rd, processed_cmds_rd);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)latency_stat->min_dev_time_rd,
++ (unsigned long)dev_time_rd,
++ (unsigned long)latency_stat->max_dev_time_rd,
++ (unsigned long)latency_stat->dev_time_rd);
++ seq_printf(seq, "%-47s\n", buf);
++ }
++ }
++ }
++
++ scst_time = sess->scst_time;
++ tgt_time = sess->tgt_time;
++ dev_time = sess->dev_time;
++ processed_cmds = sess->processed_cmds;
++
++ seq_printf(seq, "\n%-15s %-16d", "Overall ",
++ processed_cmds);
++
++ if (processed_cmds == 0)
++ processed_cmds = 1;
++
++ do_div(scst_time, processed_cmds);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)sess->min_scst_time,
++ (unsigned long)scst_time,
++ (unsigned long)sess->max_scst_time,
++ (unsigned long)sess->scst_time);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(tgt_time, processed_cmds);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)sess->min_tgt_time,
++ (unsigned long)tgt_time,
++ (unsigned long)sess->max_tgt_time,
++ (unsigned long)sess->tgt_time);
++ seq_printf(seq, "%-47s", buf);
++
++ do_div(dev_time, processed_cmds);
++ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
++ (unsigned long)sess->min_dev_time,
++ (unsigned long)dev_time,
++ (unsigned long)sess->max_dev_time,
++ (unsigned long)sess->dev_time);
++ seq_printf(seq, "%-47s\n\n", buf);
++
++ spin_unlock_bh(&sess->lat_lock);
++ }
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_proc_scsi_tgt_gen_write_lat(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res = length, t;
++ struct scst_acg *acg;
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
++ list_for_each_entry(sess, &acg->acg_sess_list,
++ acg_sess_list_entry) {
++ PRINT_INFO("Zeroing latency statistics for initiator "
++ "%s", sess->initiator_name);
++ spin_lock_bh(&sess->lat_lock);
++
++ sess->scst_time = 0;
++ sess->tgt_time = 0;
++ sess->dev_time = 0;
++ sess->min_scst_time = 0;
++ sess->min_tgt_time = 0;
++ sess->min_dev_time = 0;
++ sess->max_scst_time = 0;
++ sess->max_tgt_time = 0;
++ sess->max_dev_time = 0;
++ sess->processed_cmds = 0;
++ memset(sess->sess_latency_stat, 0,
++ sizeof(sess->sess_latency_stat));
++
++ for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
++ struct list_head *head =
++ &sess->sess_tgt_dev_list[t];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, head,
++ sess_tgt_dev_list_entry) {
++ tgt_dev->scst_time = 0;
++ tgt_dev->tgt_time = 0;
++ tgt_dev->dev_time = 0;
++ tgt_dev->processed_cmds = 0;
++ memset(tgt_dev->dev_latency_stat, 0,
++ sizeof(tgt_dev->dev_latency_stat));
++ }
++ }
++
++ spin_unlock_bh(&sess->lat_lock);
++ }
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_lat_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write_lat)
++ .show = lat_info_show,
++ .data = "scsi_tgt",
++};
++
++#endif /* CONFIG_SCST_MEASURE_LATENCY */
++
++static int __init scst_proc_init_module_log(void)
++{
++ int res = 0;
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) || \
++ defined(CONFIG_SCST_MEASURE_LATENCY)
++ struct proc_dir_entry *generic;
++#endif
++
++ TRACE_ENTRY();
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_LOG_ENTRY_NAME,
++ &scst_log_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_LOG_ENTRY_NAME);
++ res = -ENOMEM;
++ }
++#endif
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++ if (res == 0) {
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_LAT_ENTRY_NAME,
++ &scst_lat_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME,
++ SCST_PROC_LAT_ENTRY_NAME);
++ res = -ENOMEM;
++ }
++ }
++#endif
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void scst_proc_cleanup_module_log(void)
++{
++ TRACE_ENTRY();
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++ remove_proc_entry(SCST_PROC_LOG_ENTRY_NAME, scst_proc_scsi_tgt);
++#endif
++
++#ifdef CONFIG_SCST_MEASURE_LATENCY
++ remove_proc_entry(SCST_PROC_LAT_ENTRY_NAME, scst_proc_scsi_tgt);
++#endif
++
++ TRACE_EXIT();
++ return;
++}
++
++static int scst_proc_group_add_tree(struct scst_acg *acg, const char *name)
++{
++ int res = 0;
++ struct proc_dir_entry *generic;
++
++ TRACE_ENTRY();
++
++ acg->acg_proc_root = proc_mkdir(name, scst_proc_groups_root);
++ if (acg->acg_proc_root == NULL) {
++ PRINT_ERROR("Not enough memory to register %s entry in "
++ "/proc/%s/%s", name, SCST_PROC_ENTRY_NAME,
++ SCST_PROC_GROUPS_ENTRY_NAME);
++ goto out;
++ }
++
++ scst_groups_addr_method_proc_data.data = acg;
++ generic = scst_create_proc_entry(acg->acg_proc_root,
++ SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME,
++ &scst_groups_addr_method_proc_data);
++ if (!generic) {
++ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
++ SCST_PROC_ENTRY_NAME,
++ SCST_PROC_GROUPS_ENTRY_NAME,
++ name, SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME);
++ res = -ENOMEM;
++ goto out_remove;
++ }
++
++ scst_groups_devices_proc_data.data = acg;
++ generic = scst_create_proc_entry(acg->acg_proc_root,
++ SCST_PROC_GROUPS_DEVICES_ENTRY_NAME,
++ &scst_groups_devices_proc_data);
++ if (!generic) {
++ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
++ SCST_PROC_ENTRY_NAME,
++ SCST_PROC_GROUPS_ENTRY_NAME,
++ name, SCST_PROC_GROUPS_DEVICES_ENTRY_NAME);
++ res = -ENOMEM;
++ goto out_remove0;
++ }
++
++ scst_groups_names_proc_data.data = acg;
++ generic = scst_create_proc_entry(acg->acg_proc_root,
++ SCST_PROC_GROUPS_USERS_ENTRY_NAME,
++ &scst_groups_names_proc_data);
++ if (!generic) {
++ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
++ SCST_PROC_ENTRY_NAME,
++ SCST_PROC_GROUPS_ENTRY_NAME,
++ name, SCST_PROC_GROUPS_USERS_ENTRY_NAME);
++ res = -ENOMEM;
++ goto out_remove1;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_remove1:
++ remove_proc_entry(SCST_PROC_GROUPS_DEVICES_ENTRY_NAME,
++ acg->acg_proc_root);
++
++out_remove0:
++ remove_proc_entry(SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME,
++ acg->acg_proc_root);
++out_remove:
++ remove_proc_entry(name, scst_proc_groups_root);
++ goto out;
++}
++
++static void scst_proc_del_acg_tree(struct proc_dir_entry *acg_proc_root,
++ const char *name)
++{
++ TRACE_ENTRY();
++
++ remove_proc_entry(SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME, acg_proc_root);
++ remove_proc_entry(SCST_PROC_GROUPS_USERS_ENTRY_NAME, acg_proc_root);
++ remove_proc_entry(SCST_PROC_GROUPS_DEVICES_ENTRY_NAME, acg_proc_root);
++ remove_proc_entry(name, scst_proc_groups_root);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++static int scst_proc_group_add(const char *p, unsigned int addr_method)
++{
++ int res = 0, len = strlen(p) + 1;
++ struct scst_acg *acg;
++ char *name = NULL;
++
++ TRACE_ENTRY();
++
++ name = kmalloc(len, GFP_KERNEL);
++ if (name == NULL) {
++ PRINT_ERROR("Allocation of new name (size %d) failed", len);
++ goto out_nomem;
++ }
++ strlcpy(name, p, len);
++
++ acg = scst_alloc_add_acg(NULL, name, false);
++ if (acg == NULL) {
++ PRINT_ERROR("scst_alloc_add_acg() (name %s) failed", name);
++ goto out_free;
++ }
++
++ acg->addr_method = addr_method;
++
++ res = scst_proc_group_add_tree(acg, p);
++ if (res != 0)
++ goto out_free_acg;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free_acg:
++ scst_proc_del_free_acg(acg, 0);
++
++out_free:
++ kfree(name);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++static int scst_proc_del_free_acg(struct scst_acg *acg, int remove_proc)
++{
++ struct proc_dir_entry *acg_proc_root = acg->acg_proc_root;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (acg != scst_default_acg) {
++ if (!scst_acg_sess_is_empty(acg)) {
++ PRINT_ERROR("%s", "Session is not empty");
++ res = -EBUSY;
++ goto out;
++ }
++ if (remove_proc)
++ scst_proc_del_acg_tree(acg_proc_root, acg->acg_name);
++ scst_del_free_acg(acg);
++ }
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++static int scst_proc_rename_acg(struct scst_acg *acg, const char *new_name)
++{
++ int res = 0, len = strlen(new_name) + 1;
++ char *name;
++ struct proc_dir_entry *old_acg_proc_root = acg->acg_proc_root;
++
++ TRACE_ENTRY();
++
++ name = kmalloc(len, GFP_KERNEL);
++ if (name == NULL) {
++ PRINT_ERROR("Allocation of new name (size %d) failed", len);
++ goto out_nomem;
++ }
++ strlcpy(name, new_name, len);
++
++ res = scst_proc_group_add_tree(acg, new_name);
++ if (res != 0)
++ goto out_free;
++
++ scst_proc_del_acg_tree(old_acg_proc_root, acg->acg_name);
++
++ kfree(acg->acg_name);
++ acg->acg_name = name;
++
++ scst_check_reassign_sessions();
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ kfree(name);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++static int __init scst_proc_init_groups(void)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /* create the proc directory entry for the device */
++ scst_proc_groups_root = proc_mkdir(SCST_PROC_GROUPS_ENTRY_NAME,
++ scst_proc_scsi_tgt);
++ if (scst_proc_groups_root == NULL) {
++ PRINT_ERROR("Not enough memory to register %s entry in "
++ "/proc/%s", SCST_PROC_GROUPS_ENTRY_NAME,
++ SCST_PROC_ENTRY_NAME);
++ goto out_nomem;
++ }
++
++ res = scst_proc_group_add_tree(scst_default_acg,
++ SCST_DEFAULT_ACG_NAME);
++ if (res != 0)
++ goto out_remove;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_remove:
++ remove_proc_entry(SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++static void scst_proc_cleanup_groups(void)
++{
++ struct scst_acg *acg_tmp, *acg;
++
++ TRACE_ENTRY();
++
++ /* remove all groups (dir & entries) */
++ list_for_each_entry_safe(acg, acg_tmp, &scst_acg_list,
++ acg_list_entry) {
++ scst_proc_del_free_acg(acg, 1);
++ }
++
++ scst_proc_del_acg_tree(scst_default_acg->acg_proc_root,
++ SCST_DEFAULT_ACG_NAME);
++ TRACE_DBG("remove_proc_entry(%s, %p)",
++ SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
++
++ TRACE_EXIT();
++}
++
++static int __init scst_proc_init_sgv(void)
++{
++ int res = 0;
++ struct proc_dir_entry *pr;
++
++ TRACE_ENTRY();
++
++ pr = scst_create_proc_entry(scst_proc_scsi_tgt, "sgv",
++ &scst_sgv_proc_data);
++ if (pr == NULL) {
++ PRINT_ERROR("%s", "cannot create sgv /proc entry");
++ res = -ENOMEM;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void __exit scst_proc_cleanup_sgv(void)
++{
++ TRACE_ENTRY();
++ remove_proc_entry("sgv", scst_proc_scsi_tgt);
++ TRACE_EXIT();
++}
++
++int __init scst_proc_init_module(void)
++{
++ int res = 0;
++ struct proc_dir_entry *generic;
++
++ TRACE_ENTRY();
++
++ scst_proc_scsi_tgt = proc_mkdir(SCST_PROC_ENTRY_NAME, NULL);
++ if (!scst_proc_scsi_tgt) {
++ PRINT_ERROR("cannot init /proc/%s", SCST_PROC_ENTRY_NAME);
++ goto out_nomem;
++ }
++
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_ENTRY_NAME,
++ &scst_tgt_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_ENTRY_NAME);
++ goto out_remove;
++ }
++
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_VERSION_NAME,
++ &scst_version_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_VERSION_NAME);
++ goto out_remove1;
++ }
++
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_SESSIONS_NAME,
++ &scst_sessions_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_SESSIONS_NAME);
++ goto out_remove2;
++ }
++
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_HELP_NAME,
++ &scst_help_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_HELP_NAME);
++ goto out_remove3;
++ }
++
++ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
++ SCST_PROC_THREADS_NAME,
++ &scst_threads_proc_data);
++ if (!generic) {
++ PRINT_ERROR("cannot init /proc/%s/%s",
++ SCST_PROC_ENTRY_NAME, SCST_PROC_THREADS_NAME);
++ goto out_remove4;
++ }
++
++ if (scst_proc_init_module_log() < 0)
++ goto out_remove5;
++
++ if (scst_proc_init_groups() < 0)
++ goto out_remove6;
++
++ if (scst_proc_init_sgv() < 0)
++ goto out_remove7;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_remove7:
++ scst_proc_cleanup_groups();
++
++out_remove6:
++ scst_proc_cleanup_module_log();
++
++out_remove5:
++ remove_proc_entry(SCST_PROC_THREADS_NAME, scst_proc_scsi_tgt);
++
++out_remove4:
++ remove_proc_entry(SCST_PROC_HELP_NAME, scst_proc_scsi_tgt);
++
++out_remove3:
++ remove_proc_entry(SCST_PROC_SESSIONS_NAME, scst_proc_scsi_tgt);
++
++out_remove2:
++ remove_proc_entry(SCST_PROC_VERSION_NAME, scst_proc_scsi_tgt);
++
++out_remove1:
++ remove_proc_entry(SCST_PROC_ENTRY_NAME, scst_proc_scsi_tgt);
++
++out_remove:
++ remove_proc_entry(SCST_PROC_ENTRY_NAME, NULL);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++void __exit scst_proc_cleanup_module(void)
++{
++ TRACE_ENTRY();
++
++ /* We may not bother about locks here */
++ scst_proc_cleanup_sgv();
++ scst_proc_cleanup_groups();
++ scst_proc_cleanup_module_log();
++ remove_proc_entry(SCST_PROC_THREADS_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_HELP_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_SESSIONS_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_VERSION_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_ENTRY_NAME, scst_proc_scsi_tgt);
++ remove_proc_entry(SCST_PROC_ENTRY_NAME, NULL);
++
++ TRACE_EXIT();
++}
++
++static ssize_t scst_proc_threads_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res = length;
++ int oldtn, newtn, delta;
++ char *buffer;
++
++ TRACE_ENTRY();
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
++ res = -EINTR;
++ goto out_free;
++ }
++
++ mutex_lock(&scst_mutex);
++
++ oldtn = scst_main_cmd_threads.nr_threads;
++ newtn = simple_strtoul(buffer, NULL, 0);
++ if (newtn <= 0) {
++ PRINT_ERROR("Illegal threads num value %d", newtn);
++ res = -EINVAL;
++ goto out_up_thr_free;
++ }
++ delta = newtn - oldtn;
++ if (delta < 0)
++ scst_del_threads(&scst_main_cmd_threads, -delta);
++ else {
++ int rc = scst_add_threads(&scst_main_cmd_threads, NULL, NULL,
++ delta);
++ if (rc != 0)
++ res = rc;
++ }
++
++ PRINT_INFO("Changed cmd threads num: old %d, new %d", oldtn, newtn);
++
++out_up_thr_free:
++ mutex_unlock(&scst_mutex);
++
++ mutex_unlock(&scst_proc_mutex);
++
++out_free:
++ free_page((unsigned long)buffer);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++int scst_build_proc_target_dir_entries(struct scst_tgt_template *vtt)
++{
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ /* create the proc directory entry for the device */
++ vtt->proc_tgt_root = proc_mkdir(vtt->name, scst_proc_scsi_tgt);
++ if (vtt->proc_tgt_root == NULL) {
++ PRINT_ERROR("Not enough memory to register SCSI target %s "
++ "in /proc/%s", vtt->name, SCST_PROC_ENTRY_NAME);
++ goto out_nomem;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++void scst_cleanup_proc_target_dir_entries(struct scst_tgt_template *vtt)
++{
++ TRACE_ENTRY();
++
++ remove_proc_entry(vtt->name, scst_proc_scsi_tgt);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Called under scst_mutex */
++int scst_build_proc_target_entries(struct scst_tgt *vtt)
++{
++ int res = 0;
++ struct proc_dir_entry *p;
++ char name[20];
++
++ TRACE_ENTRY();
++
++ if (vtt->tgtt->read_proc || vtt->tgtt->write_proc) {
++ /* create the proc file entry for the device */
++ scnprintf(name, sizeof(name), "%d", vtt->tgtt->proc_dev_num);
++ scst_scsi_tgt_proc_data.data = (void *)vtt;
++ p = scst_create_proc_entry(vtt->tgtt->proc_tgt_root,
++ name,
++ &scst_scsi_tgt_proc_data);
++ if (p == NULL) {
++ PRINT_ERROR("Not enough memory to register SCSI "
++ "target entry %s in /proc/%s/%s", name,
++ SCST_PROC_ENTRY_NAME, vtt->tgtt->name);
++ res = -ENOMEM;
++ goto out;
++ }
++ vtt->proc_num = vtt->tgtt->proc_dev_num;
++ vtt->tgtt->proc_dev_num++;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++void scst_cleanup_proc_target_entries(struct scst_tgt *vtt)
++{
++ char name[20];
++
++ TRACE_ENTRY();
++
++ if (vtt->tgtt->read_proc || vtt->tgtt->write_proc) {
++ scnprintf(name, sizeof(name), "%d", vtt->proc_num);
++ remove_proc_entry(name, vtt->tgtt->proc_tgt_root);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static ssize_t scst_proc_scsi_tgt_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ struct scst_tgt *vtt =
++ (struct scst_tgt *)PDE(file->f_dentry->d_inode)->data;
++ ssize_t res = 0;
++ char *buffer;
++ char *start;
++ int eof = 0;
++
++ TRACE_ENTRY();
++
++ if (vtt->tgtt->write_proc == NULL) {
++ res = -ENOSYS;
++ goto out;
++ }
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ TRACE_BUFFER("Buffer", buffer, length);
++
++ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
++ res = -EINTR;
++ goto out_free;
++ }
++
++ res = vtt->tgtt->write_proc(buffer, &start, 0, length, &eof, vtt);
++
++ mutex_unlock(&scst_proc_mutex);
++
++out_free:
++ free_page((unsigned long)buffer);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++int scst_build_proc_dev_handler_dir_entries(struct scst_dev_type *dev_type)
++{
++ int res = 0;
++ struct proc_dir_entry *p;
++ const char *name; /* workaround to keep /proc ABI intact */
++
++ TRACE_ENTRY();
++
++ BUG_ON(dev_type->proc_dev_type_root);
++
++ if (strcmp(dev_type->name, "vdisk_fileio") == 0)
++ name = "vdisk";
++ else
++ name = dev_type->name;
++
++ /* create the proc directory entry for the dev type handler */
++ dev_type->proc_dev_type_root = proc_mkdir(name,
++ scst_proc_scsi_tgt);
++ if (dev_type->proc_dev_type_root == NULL) {
++ PRINT_ERROR("Not enough memory to register dev handler dir "
++ "%s in /proc/%s", name, SCST_PROC_ENTRY_NAME);
++ goto out_nomem;
++ }
++
++ scst_dev_handler_type_proc_data.data = dev_type;
++ if (dev_type->type >= 0) {
++ p = scst_create_proc_entry(dev_type->proc_dev_type_root,
++ SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
++ &scst_dev_handler_type_proc_data);
++ if (p == NULL) {
++ PRINT_ERROR("Not enough memory to register dev "
++ "handler entry %s in /proc/%s/%s",
++ SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
++ SCST_PROC_ENTRY_NAME, name);
++ goto out_remove;
++ }
++ }
++
++ if (dev_type->read_proc || dev_type->write_proc) {
++ /* create the proc file entry for the dev type handler */
++ scst_dev_handler_proc_data.data = (void *)dev_type;
++ p = scst_create_proc_entry(dev_type->proc_dev_type_root,
++ name,
++ &scst_dev_handler_proc_data);
++ if (p == NULL) {
++ PRINT_ERROR("Not enough memory to register dev "
++ "handler entry %s in /proc/%s/%s", name,
++ SCST_PROC_ENTRY_NAME, name);
++ goto out_remove1;
++ }
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_remove1:
++ if (dev_type->type >= 0)
++ remove_proc_entry(SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
++ dev_type->proc_dev_type_root);
++
++out_remove:
++ remove_proc_entry(name, scst_proc_scsi_tgt);
++
++out_nomem:
++ res = -ENOMEM;
++ goto out;
++}
++
++void scst_cleanup_proc_dev_handler_dir_entries(struct scst_dev_type *dev_type)
++{
++ /* Workaround to keep /proc ABI intact */
++ const char *name;
++
++ TRACE_ENTRY();
++
++ BUG_ON(dev_type->proc_dev_type_root == NULL);
++
++ if (strcmp(dev_type->name, "vdisk_fileio") == 0)
++ name = "vdisk";
++ else
++ name = dev_type->name;
++
++ if (dev_type->type >= 0) {
++ remove_proc_entry(SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
++ dev_type->proc_dev_type_root);
++ }
++ if (dev_type->read_proc || dev_type->write_proc)
++ remove_proc_entry(name, dev_type->proc_dev_type_root);
++ remove_proc_entry(name, scst_proc_scsi_tgt);
++ dev_type->proc_dev_type_root = NULL;
++
++ TRACE_EXIT();
++ return;
++}
++
++static ssize_t scst_proc_scsi_dev_handler_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ struct scst_dev_type *dev_type =
++ (struct scst_dev_type *)PDE(file->f_dentry->d_inode)->data;
++ ssize_t res = 0;
++ char *buffer;
++ char *start;
++ int eof = 0;
++
++ TRACE_ENTRY();
++
++ if (dev_type->write_proc == NULL) {
++ res = -ENOSYS;
++ goto out;
++ }
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ TRACE_BUFFER("Buffer", buffer, length);
++
++ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
++ res = -EINTR;
++ goto out_free;
++ }
++
++ res = dev_type->write_proc(buffer, &start, 0, length, &eof, dev_type);
++
++ mutex_unlock(&scst_proc_mutex);
++
++out_free:
++ free_page((unsigned long)buffer);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_proc_scsi_tgt_gen_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res, rc = 0, action;
++ char *buffer, *p, *pp, *ppp;
++ struct scst_acg *a, *acg = NULL;
++ unsigned int addr_method = SCST_LUN_ADDR_METHOD_PERIPHERAL;
++
++ TRACE_ENTRY();
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ /*
++ * Usage: echo "add_group GROUP_NAME [FLAT]" >/proc/scsi_tgt/scsi_tgt
++ * or echo "add_group GROUP_NAME [LUN]" >/proc/scsi_tgt/scsi_tgt
++ * or echo "del_group GROUP_NAME" >/proc/scsi_tgt/scsi_tgt
++ * or echo "rename_group OLD_NAME NEW_NAME" >/proc/scsi_tgt/scsi_tgt"
++ * or echo "assign H:C:I:L HANDLER_NAME" >/proc/scsi_tgt/scsi_tgt
++ */
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ if (!strncasecmp("assign ", p, 7)) {
++ p += 7;
++ action = SCST_PROC_ACTION_ASSIGN;
++ } else if (!strncasecmp("add_group ", p, 10)) {
++ p += 10;
++ action = SCST_PROC_ACTION_ADD_GROUP;
++ } else if (!strncasecmp("del_group ", p, 10)) {
++ p += 10;
++ action = SCST_PROC_ACTION_DEL_GROUP;
++ } else if (!strncasecmp("rename_group ", p, 13)) {
++ p += 13;
++ action = SCST_PROC_ACTION_RENAME_GROUP;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out_free;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_free_resume;
++ }
++
++ res = length;
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD_GROUP:
++ case SCST_PROC_ACTION_DEL_GROUP:
++ case SCST_PROC_ACTION_RENAME_GROUP:
++ pp = p;
++ while (!isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ *pp = '\0';
++ pp++;
++ while (isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ switch (action) {
++ case SCST_PROC_ACTION_ADD_GROUP:
++ ppp = pp;
++ while (!isspace(*ppp) && *ppp != '\0')
++ ppp++;
++ if (*ppp != '\0') {
++ *ppp = '\0';
++ ppp++;
++ while (isspace(*ppp) && *ppp != '\0')
++ ppp++;
++ if (*ppp != '\0') {
++ PRINT_ERROR("%s", "Too many "
++ "arguments");
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ }
++ if (strcasecmp(pp, "FLAT") == 0)
++ addr_method = SCST_LUN_ADDR_METHOD_FLAT;
++ else if (strcasecmp(pp, "LUN") == 0)
++ addr_method = SCST_LUN_ADDR_METHOD_LUN;
++ else {
++ PRINT_ERROR("Unexpected "
++ "argument %s", pp);
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ break;
++ case SCST_PROC_ACTION_DEL_GROUP:
++ PRINT_ERROR("%s", "Too many "
++ "arguments");
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ }
++ }
++
++ if (strcmp(p, SCST_DEFAULT_ACG_NAME) == 0) {
++ PRINT_ERROR("Attempt to add/delete/rename predefined "
++ "group \"%s\"", p);
++ res = -EINVAL;
++ goto out_up_free;
++ }
++
++ list_for_each_entry(a, &scst_acg_list, acg_list_entry) {
++ if (strcmp(a->acg_name, p) == 0) {
++ TRACE_DBG("group (acg) %p %s found",
++ a, a->acg_name);
++ acg = a;
++ break;
++ }
++ }
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD_GROUP:
++ if (acg) {
++ PRINT_ERROR("acg name %s exist", p);
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ rc = scst_proc_group_add(p, addr_method);
++ break;
++ case SCST_PROC_ACTION_DEL_GROUP:
++ if (acg == NULL) {
++ PRINT_ERROR("acg name %s not found", p);
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ rc = scst_proc_del_free_acg(acg, 1);
++ break;
++ case SCST_PROC_ACTION_RENAME_GROUP:
++ if (acg == NULL) {
++ PRINT_ERROR("acg name %s not found", p);
++ res = -EINVAL;
++ goto out_up_free;
++ }
++
++ p = pp;
++ while (!isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ *pp = '\0';
++ pp++;
++ while (isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ PRINT_ERROR("%s", "Too many arguments");
++ res = -EINVAL;
++ goto out_up_free;
++ }
++ }
++ rc = scst_proc_rename_acg(acg, p);
++ break;
++ }
++ break;
++ case SCST_PROC_ACTION_ASSIGN:
++ rc = scst_proc_assign_handler(p);
++ break;
++ }
++
++ if (rc != 0)
++ res = rc;
++
++out_up_free:
++ mutex_unlock(&scst_mutex);
++
++out_free_resume:
++ scst_resume_activity();
++
++out_free:
++ free_page((unsigned long)buffer);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* The activity supposed to be suspended and scst_mutex held */
++static int scst_proc_assign_handler(char *buf)
++{
++ int res = 0;
++ char *p = buf, *e, *ee;
++ unsigned long host, channel = 0, id = 0, lun = 0;
++ struct scst_device *d, *dev = NULL;
++ struct scst_dev_type *dt, *handler = NULL;
++
++ TRACE_ENTRY();
++
++ while (isspace(*p) && *p != '\0')
++ p++;
++
++ host = simple_strtoul(p, &p, 0);
++ if ((host == ULONG_MAX) || (*p != ':'))
++ goto out_synt_err;
++ p++;
++ channel = simple_strtoul(p, &p, 0);
++ if ((channel == ULONG_MAX) || (*p != ':'))
++ goto out_synt_err;
++ p++;
++ id = simple_strtoul(p, &p, 0);
++ if ((channel == ULONG_MAX) || (*p != ':'))
++ goto out_synt_err;
++ p++;
++ lun = simple_strtoul(p, &p, 0);
++ if (lun == ULONG_MAX)
++ goto out_synt_err;
++
++ e = p;
++ e++;
++ while (isspace(*e) && *e != '\0')
++ e++;
++ ee = e;
++ while (!isspace(*ee) && *ee != '\0')
++ ee++;
++ *ee = '\0';
++
++ TRACE_DBG("Dev %ld:%ld:%ld:%ld, handler %s", host, channel, id, lun, e);
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if ((d->virt_id == 0) &&
++ d->scsi_dev->host->host_no == host &&
++ d->scsi_dev->channel == channel &&
++ d->scsi_dev->id == id &&
++ d->scsi_dev->lun == lun) {
++ dev = d;
++ TRACE_DBG("Dev %p (%ld:%ld:%ld:%ld) found",
++ dev, host, channel, id, lun);
++ break;
++ }
++ }
++
++ if (dev == NULL) {
++ PRINT_ERROR("Device %ld:%ld:%ld:%ld not found",
++ host, channel, id, lun);
++ res = -EINVAL;
++ goto out;
++ }
++
++ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
++ if (!strcmp(dt->name, e)) {
++ handler = dt;
++ TRACE_DBG("Dev handler %p with name %s found",
++ dt, dt->name);
++ break;
++ }
++ }
++
++ if (handler == NULL) {
++ PRINT_ERROR("Handler %s not found", e);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (dev->scsi_dev->type != handler->type) {
++ PRINT_ERROR("Type %d of device %s differs from type "
++ "%d of dev handler %s", dev->type,
++ dev->handler->name, handler->type, handler->name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = scst_assign_dev_handler(dev, handler);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_synt_err:
++ PRINT_ERROR("Syntax error on %s", p);
++ res = -EINVAL;
++ goto out;
++}
++
++static ssize_t scst_proc_groups_devices_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res, action, rc, read_only = 0;
++ char *buffer, *p, *e = NULL;
++ unsigned int virt_lun;
++ struct scst_acg *acg =
++ (struct scst_acg *)PDE(file->f_dentry->d_inode)->data;
++ struct scst_acg_dev *acg_dev = NULL, *acg_dev_tmp;
++ struct scst_device *d, *dev = NULL;
++
++ TRACE_ENTRY();
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ /*
++ * Usage: echo "add|del H:C:I:L lun [READ_ONLY]" \
++ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
++ * or echo "replace H:C:I:L lun [READ_ONLY]" \
++ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
++ * or echo "add|del V_NAME lun [READ_ONLY]" \
++ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
++ * or echo "replace V_NAME lun [READ_ONLY]" \
++ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
++ * or echo "clear" >/proc/scsi_tgt/groups/GROUP_NAME/devices
++ */
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ if (!strncasecmp("clear", p, 5)) {
++ action = SCST_PROC_ACTION_CLEAR;
++ } else if (!strncasecmp("add ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_ADD;
++ } else if (!strncasecmp("del ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_DEL;
++ } else if (!strncasecmp("replace ", p, 8)) {
++ p += 8;
++ action = SCST_PROC_ACTION_REPLACE;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ res = scst_suspend_activity(true);
++ if (res != 0)
++ goto out_free;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_free_resume;
++ }
++
++ res = length;
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ case SCST_PROC_ACTION_DEL:
++ case SCST_PROC_ACTION_REPLACE:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ e = p; /* save p */
++ while (!isspace(*e) && *e != '\0')
++ e++;
++ *e = 0;
++
++ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
++ if (!strcmp(d->virt_name, p)) {
++ dev = d;
++ TRACE_DBG("Device %p (%s) found", dev, p);
++ break;
++ }
++ }
++ if (dev == NULL) {
++ PRINT_ERROR("Device %s not found", p);
++ res = -EINVAL;
++ goto out_free_up;
++ }
++ break;
++ }
++
++ /* ToDo: create separate functions */
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ case SCST_PROC_ACTION_REPLACE:
++ {
++ bool dev_replaced = false;
++
++ e++;
++ while (isspace(*e) && *e != '\0')
++ e++;
++
++ virt_lun = simple_strtoul(e, &e, 0);
++ if (virt_lun > SCST_MAX_LUN) {
++ PRINT_ERROR("Too big LUN %d (max %d)", virt_lun,
++ SCST_MAX_LUN);
++ res = -EINVAL;
++ goto out_free_up;
++ }
++
++ while (isspace(*e) && *e != '\0')
++ e++;
++
++ if (*e != '\0') {
++ if (!strncasecmp("READ_ONLY", e, 9))
++ read_only = 1;
++ else {
++ PRINT_ERROR("Unknown option \"%s\"", e);
++ res = -EINVAL;
++ goto out_free_up;
++ }
++ }
++
++ list_for_each_entry(acg_dev_tmp, &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ if (acg_dev_tmp->lun == virt_lun) {
++ acg_dev = acg_dev_tmp;
++ break;
++ }
++ }
++ if (acg_dev != NULL) {
++ if (action == SCST_PROC_ACTION_ADD) {
++ PRINT_ERROR("virt lun %d already exists in "
++ "group %s", virt_lun, acg->acg_name);
++ res = -EEXIST;
++ goto out_free_up;
++ } else {
++ /* Replace */
++ rc = scst_acg_del_lun(acg, acg_dev->lun,
++ false);
++ if (rc) {
++ res = rc;
++ goto out_free_up;
++ }
++ dev_replaced = true;
++ }
++ }
++
++ rc = scst_acg_add_lun(acg, NULL, dev, virt_lun, read_only,
++ false, NULL);
++ if (rc) {
++ res = rc;
++ goto out_free_up;
++ }
++
++ if (action == SCST_PROC_ACTION_ADD)
++ scst_report_luns_changed(acg);
++
++ if (dev_replaced) {
++ struct scst_tgt_dev *tgt_dev;
++
++ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
++ dev_tgt_dev_list_entry) {
++ if ((tgt_dev->acg_dev->acg == acg) &&
++ (tgt_dev->lun == virt_lun)) {
++ TRACE_MGMT_DBG("INQUIRY DATA HAS CHANGED"
++ " on tgt_dev %p", tgt_dev);
++ scst_gen_aen_or_ua(tgt_dev,
++ SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
++ }
++ }
++ }
++ break;
++ }
++ case SCST_PROC_ACTION_DEL:
++ {
++ /*
++ * This code doesn't handle if there are >1 LUNs for the same
++ * device in the group. Instead, it always deletes the first
++ * entry. It wasn't fixed for compatibility reasons, because
++ * procfs is now obsoleted.
++ */
++ struct scst_acg_dev *a;
++ list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
++ if (a->dev == dev) {
++ rc = scst_acg_del_lun(acg, a->lun, true);
++ if (rc)
++ res = rc;
++ goto out_free_up;
++ }
++ }
++ PRINT_ERROR("Device is not found in group %s", acg->acg_name);
++ break;
++ }
++ case SCST_PROC_ACTION_CLEAR:
++ list_for_each_entry_safe(acg_dev, acg_dev_tmp,
++ &acg->acg_dev_list,
++ acg_dev_list_entry) {
++ rc = scst_acg_del_lun(acg, acg_dev->lun,
++ list_is_last(&acg_dev->acg_dev_list_entry,
++ &acg->acg_dev_list));
++ if (rc) {
++ res = rc;
++ goto out_free_up;
++ }
++ }
++ break;
++ }
++
++out_free_up:
++ mutex_unlock(&scst_mutex);
++
++out_free_resume:
++ scst_resume_activity();
++
++out_free:
++ free_page((unsigned long)buffer);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t scst_proc_groups_names_write(struct file *file,
++ const char __user *buf,
++ size_t length, loff_t *off)
++{
++ int res = length, rc = 0, action;
++ char *buffer, *p, *pp = NULL;
++ struct scst_acg *acg =
++ (struct scst_acg *)PDE(file->f_dentry->d_inode)->data;
++ struct scst_acn *n, *nn;
++
++ TRACE_ENTRY();
++
++ if (length > SCST_PROC_BLOCK_SIZE) {
++ res = -EOVERFLOW;
++ goto out;
++ }
++ if (!buf) {
++ res = -EINVAL;
++ goto out;
++ }
++ buffer = (char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ res = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(buffer, buf, length)) {
++ res = -EFAULT;
++ goto out_free;
++ }
++ if (length < PAGE_SIZE) {
++ buffer[length] = '\0';
++ } else if (buffer[PAGE_SIZE-1]) {
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ /*
++ * Usage: echo "add|del NAME" >/proc/scsi_tgt/groups/GROUP_NAME/names
++ * or echo "move NAME NEW_GROUP_NAME" >/proc/scsi_tgt/groups/OLD_GROUP_NAME/names"
++ * or echo "clear" >/proc/scsi_tgt/groups/GROUP_NAME/names
++ */
++ p = buffer;
++ if (p[strlen(p) - 1] == '\n')
++ p[strlen(p) - 1] = '\0';
++ if (!strncasecmp("clear", p, 5)) {
++ action = SCST_PROC_ACTION_CLEAR;
++ } else if (!strncasecmp("add ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_ADD;
++ } else if (!strncasecmp("del ", p, 4)) {
++ p += 4;
++ action = SCST_PROC_ACTION_DEL;
++ } else if (!strncasecmp("move ", p, 5)) {
++ p += 5;
++ action = SCST_PROC_ACTION_MOVE;
++ } else {
++ PRINT_ERROR("Unknown action \"%s\"", p);
++ res = -EINVAL;
++ goto out_free;
++ }
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ case SCST_PROC_ACTION_DEL:
++ case SCST_PROC_ACTION_MOVE:
++ while (isspace(*p) && *p != '\0')
++ p++;
++ pp = p;
++ while (!isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ *pp = '\0';
++ pp++;
++ while (isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ case SCST_PROC_ACTION_DEL:
++ PRINT_ERROR("%s", "Too many "
++ "arguments");
++ res = -EINVAL;
++ goto out_free;
++ }
++ }
++ }
++ break;
++ }
++
++ rc = scst_suspend_activity(true);
++ if (rc != 0)
++ goto out_free;
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out_free_resume;
++ }
++
++ switch (action) {
++ case SCST_PROC_ACTION_ADD:
++ rc = scst_acg_add_acn(acg, p);
++ break;
++ case SCST_PROC_ACTION_DEL:
++ rc = scst_acg_remove_name(acg, p, true);
++ break;
++ case SCST_PROC_ACTION_MOVE:
++ {
++ struct scst_acg *a, *new_acg = NULL;
++ char *name = p;
++ p = pp;
++ while (!isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ *pp = '\0';
++ pp++;
++ while (isspace(*pp) && *pp != '\0')
++ pp++;
++ if (*pp != '\0') {
++ PRINT_ERROR("%s", "Too many arguments");
++ res = -EINVAL;
++ goto out_free_unlock;
++ }
++ }
++ list_for_each_entry(a, &scst_acg_list, acg_list_entry) {
++ if (strcmp(a->acg_name, p) == 0) {
++ TRACE_DBG("group (acg) %p %s found",
++ a, a->acg_name);
++ new_acg = a;
++ break;
++ }
++ }
++ if (new_acg == NULL) {
++ PRINT_ERROR("Group %s not found", p);
++ res = -EINVAL;
++ goto out_free_unlock;
++ }
++ rc = scst_acg_remove_name(acg, name, false);
++ if (rc != 0)
++ goto out_free_unlock;
++ rc = scst_acg_add_acn(new_acg, name);
++ if (rc != 0)
++ scst_acg_add_acn(acg, name);
++ break;
++ }
++ case SCST_PROC_ACTION_CLEAR:
++ list_for_each_entry_safe(n, nn, &acg->acn_list,
++ acn_list_entry) {
++ scst_del_free_acn(n, false);
++ }
++ scst_check_reassign_sessions();
++ break;
++ }
++
++out_free_unlock:
++ mutex_unlock(&scst_mutex);
++
++out_free_resume:
++ scst_resume_activity();
++
++out_free:
++ free_page((unsigned long)buffer);
++
++out:
++ if (rc < 0)
++ res = rc;
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int scst_version_info_show(struct seq_file *seq, void *v)
++{
++ TRACE_ENTRY();
++
++ seq_printf(seq, "%s\n", SCST_VERSION_STRING);
++
++#ifdef CONFIG_SCST_STRICT_SERIALIZING
++ seq_printf(seq, "STRICT_SERIALIZING\n");
++#endif
++
++#ifdef CONFIG_SCST_EXTRACHECKS
++ seq_printf(seq, "EXTRACHECKS\n");
++#endif
++
++#ifdef CONFIG_SCST_TRACING
++ seq_printf(seq, "TRACING\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++ seq_printf(seq, "DEBUG\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_TM
++ seq_printf(seq, "DEBUG_TM\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_RETRY
++ seq_printf(seq, "DEBUG_RETRY\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_OOM
++ seq_printf(seq, "DEBUG_OOM\n");
++#endif
++
++#ifdef CONFIG_SCST_DEBUG_SN
++ seq_printf(seq, "DEBUG_SN\n");
++#endif
++
++#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
++ seq_printf(seq, "USE_EXPECTED_VALUES\n");
++#endif
++
++#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
++ seq_printf(seq, "TEST_IO_IN_SIRQ\n");
++#endif
++
++#ifdef CONFIG_SCST_STRICT_SECURITY
++ seq_printf(seq, "STRICT_SECURITY\n");
++#endif
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static struct scst_proc_data scst_version_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = scst_version_info_show,
++};
++
++static int scst_help_info_show(struct seq_file *seq, void *v)
++{
++ TRACE_ENTRY();
++
++ seq_printf(seq, "%s\n", scst_proc_help_string);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static struct scst_proc_data scst_help_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = scst_help_info_show,
++};
++
++static int scst_dev_handler_type_info_show(struct seq_file *seq, void *v)
++{
++ struct scst_dev_type *dev_type = (struct scst_dev_type *)seq->private;
++
++ TRACE_ENTRY();
++
++ seq_printf(seq, "%d - %s\n", dev_type->type,
++ dev_type->type > (int)ARRAY_SIZE(scst_proc_dev_handler_type)
++ ? "unknown" : scst_proc_dev_handler_type[dev_type->type]);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static struct scst_proc_data scst_dev_handler_type_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = scst_dev_handler_type_info_show,
++};
++
++static int scst_sessions_info_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_acg *acg;
++ struct scst_session *sess;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ seq_printf(seq, "%-20s %-45s %-35s %-15s\n",
++ "Target name", "Initiator name",
++ "Group name", "Active/All Commands Count");
++
++ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
++ list_for_each_entry(sess, &acg->acg_sess_list,
++ acg_sess_list_entry) {
++ int active_cmds = 0, t;
++ for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
++ struct list_head *head =
++ &sess->sess_tgt_dev_list[t];
++ struct scst_tgt_dev *tgt_dev;
++ list_for_each_entry(tgt_dev, head,
++ sess_tgt_dev_list_entry) {
++ active_cmds += atomic_read(&tgt_dev->tgt_dev_cmd_count);
++ }
++ }
++ seq_printf(seq, "%-20s %-45s %-35s %d/%d\n",
++ sess->tgt->tgtt->name,
++ sess->initiator_name,
++ acg->acg_name, active_cmds,
++ atomic_read(&sess->sess_cmd_count));
++ }
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_sessions_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = scst_sessions_info_show,
++};
++
++static struct scst_proc_data scst_sgv_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = sgv_procinfo_show,
++};
++
++static int scst_groups_names_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_acg *acg = (struct scst_acg *)seq->private;
++ struct scst_acn *name;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ list_for_each_entry(name, &acg->acn_list, acn_list_entry) {
++ seq_printf(seq, "%s\n", name->name);
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_groups_names_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_groups_names_write)
++ .show = scst_groups_names_show,
++};
++
++static int scst_groups_addr_method_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_acg *acg = (struct scst_acg *)seq->private;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ switch (acg->addr_method) {
++ case SCST_LUN_ADDR_METHOD_FLAT:
++ seq_printf(seq, "%s\n", "FLAT");
++ break;
++ case SCST_LUN_ADDR_METHOD_PERIPHERAL:
++ seq_printf(seq, "%s\n", "PERIPHERAL");
++ break;
++ case SCST_LUN_ADDR_METHOD_LUN:
++ seq_printf(seq, "%s\n", "LUN");
++ break;
++ default:
++ seq_printf(seq, "%s\n", "UNKNOWN");
++ break;
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++static struct scst_proc_data scst_groups_addr_method_proc_data = {
++ SCST_DEF_RW_SEQ_OP(NULL)
++ .show = scst_groups_addr_method_show,
++};
++static int scst_groups_devices_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_acg *acg = (struct scst_acg *)seq->private;
++ struct scst_acg_dev *acg_dev;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ seq_printf(seq, "%-60s%-13s%s\n", "Device (host:ch:id:lun or name)",
++ "LUN", "Options");
++
++ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
++ seq_printf(seq, "%-60s%-13lld%s\n",
++ acg_dev->dev->virt_name,
++ (long long unsigned int)acg_dev->lun,
++ acg_dev->rd_only ? "RO" : "");
++ }
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_groups_devices_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_groups_devices_write)
++ .show = scst_groups_devices_show,
++};
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++
++static int scst_proc_read_tbl(const struct scst_trace_log *tbl,
++ struct seq_file *seq,
++ unsigned long log_level, int *first)
++{
++ const struct scst_trace_log *t = tbl;
++ int res = 0;
++
++ while (t->token) {
++ if (log_level & t->val) {
++ seq_printf(seq, "%s%s", *first ? "" : " | ", t->token);
++ *first = 0;
++ }
++ t++;
++ }
++ return res;
++}
++
++int scst_proc_log_entry_read(struct seq_file *seq, unsigned long log_level,
++ const struct scst_trace_log *tbl)
++{
++ int res = 0, first = 1;
++
++ TRACE_ENTRY();
++
++ scst_proc_read_tbl(scst_proc_trace_tbl, seq, log_level, &first);
++
++ if (tbl)
++ scst_proc_read_tbl(tbl, seq, log_level, &first);
++
++ seq_printf(seq, "%s\n", first ? "none" : "");
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++EXPORT_SYMBOL_GPL(scst_proc_log_entry_read);
++
++static int log_info_show(struct seq_file *seq, void *v)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ res = scst_proc_log_entry_read(seq, trace_flag,
++ scst_proc_local_trace_tbl);
++
++ mutex_unlock(&scst_log_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_log_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write_log)
++ .show = log_info_show,
++ .data = "scsi_tgt",
++};
++
++#endif
++
++static int scst_tgt_info_show(struct seq_file *seq, void *v)
++{
++ int res = 0;
++ struct scst_device *dev;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ seq_printf(seq, "%-60s%s\n", "Device (host:ch:id:lun or name)",
++ "Device handler");
++ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
++ seq_printf(seq, "%-60s%s\n",
++ dev->virt_name, dev->handler->name);
++ }
++
++ mutex_unlock(&scst_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_tgt_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write)
++ .show = scst_tgt_info_show,
++};
++
++static int scst_threads_info_show(struct seq_file *seq, void *v)
++{
++ TRACE_ENTRY();
++
++ seq_printf(seq, "%d\n", scst_main_cmd_threads.nr_threads);
++
++ TRACE_EXIT();
++ return 0;
++}
++
++static struct scst_proc_data scst_threads_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_threads_write)
++ .show = scst_threads_info_show,
++};
++
++static int scst_scsi_tgtinfo_show(struct seq_file *seq, void *v)
++{
++ struct scst_tgt *vtt = seq->private;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ if (vtt->tgtt->read_proc)
++ res = vtt->tgtt->read_proc(seq, vtt);
++
++ mutex_unlock(&scst_proc_mutex);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_scsi_tgt_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_write)
++ .show = scst_scsi_tgtinfo_show,
++};
++
++static int scst_dev_handler_info_show(struct seq_file *seq, void *v)
++{
++ struct scst_dev_type *dev_type = seq->private;
++ int res = 0;
++
++ TRACE_ENTRY();
++
++ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
++ res = -EINTR;
++ goto out;
++ }
++
++ if (dev_type->read_proc)
++ res = dev_type->read_proc(seq, dev_type);
++
++ mutex_unlock(&scst_proc_mutex);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static struct scst_proc_data scst_dev_handler_proc_data = {
++ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_dev_handler_write)
++ .show = scst_dev_handler_info_show,
++};
++
++struct proc_dir_entry *scst_create_proc_entry(struct proc_dir_entry *root,
++ const char *name, struct scst_proc_data *pdata)
++{
++ struct proc_dir_entry *p = NULL;
++
++ TRACE_ENTRY();
++
++ if (root) {
++ mode_t mode;
++
++ mode = S_IFREG | S_IRUGO | (pdata->seq_op.write ? S_IWUSR : 0);
++ p = create_proc_entry(name, mode, root);
++ if (p == NULL) {
++ PRINT_ERROR("Fail to create entry %s in /proc", name);
++ } else {
++ p->proc_fops = &pdata->seq_op;
++ p->data = pdata->data;
++ }
++ }
++
++ TRACE_EXIT();
++ return p;
++}
++EXPORT_SYMBOL_GPL(scst_create_proc_entry);
++
++int scst_single_seq_open(struct inode *inode, struct file *file)
++{
++ struct scst_proc_data *pdata = container_of(PDE(inode)->proc_fops,
++ struct scst_proc_data, seq_op);
++ return single_open(file, pdata->show, PDE(inode)->data);
++}
++EXPORT_SYMBOL_GPL(scst_single_seq_open);
++
++struct proc_dir_entry *scst_proc_get_tgt_root(
++ struct scst_tgt_template *vtt)
++{
++ return vtt->proc_tgt_root;
++}
++EXPORT_SYMBOL_GPL(scst_proc_get_tgt_root);
++
++struct proc_dir_entry *scst_proc_get_dev_type_root(
++ struct scst_dev_type *dtt)
++{
++ return dtt->proc_dev_type_root;
++}
++EXPORT_SYMBOL_GPL(scst_proc_get_dev_type_root);
+diff -uprN orig/linux-2.6.39/Documentation/scst/README.scst linux-2.6.39/Documentation/scst/README.scst
+--- orig/linux-2.6.39/Documentation/scst/README.scst
++++ linux-2.6.39/Documentation/scst/README.scst
+@@ -0,0 +1,1535 @@
+Generic SCSI target mid-level for Linux (SCST)
+==============================================
+
@@ -44226,6 +47184,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+Full list of SCST features and comparison with other Linux targets you
+can find on http://scst.sourceforge.net/comparison.html.
+
++
+Installation
+------------
+
@@ -44265,11 +47224,12 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ freely use any sg, sd, st, etc. devices imported from target
+ on the same host, but you can't mount file systems or put
+ swap on them. This is a limitation of Linux memory/cache
-+ manager, because in this case an OOM deadlock like: system
-+ needs some memory -> it decides to clear some cache -> cache
-+ needs to write on target exported device -> initiator sends
-+ request to the target -> target needs memory -> system needs
-+ even more memory -> deadlock.
++ manager, because in this case a memory allocation deadlock is
++ possible like: system needs some memory -> it decides to
++ clear some cache -> the cache is needed to be written on a
++ target exported device -> initiator sends request to the
++ target located on the same system -> the target needs memory
++ -> the system needs even more memory -> deadlock.
+
+IMPORTANT: In the current version simultaneous access to local SCSI devices
+========= via standard high-level SCSI drivers (sd, st, sg, etc.) and
@@ -44281,12 +47241,14 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ devices READ/WRITE commands using direct disk handler are
+ generally safe.
+
++
+Usage in failover mode
+----------------------
+
+It is recommended to use TEST UNIT READY ("tur") command to check if
+SCST target is alive in MPIO configurations.
+
++
+Device handlers
+---------------
+
@@ -44306,13 +47268,13 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+or ever disk partition, where there is no file systems overhead. Using
+block devices comparing to sending SCSI commands directly to SCSI
+mid-level via scsi_do_req()/scsi_execute_async() has advantage that data
-+are transferred via system cache, so it is possible to fully benefit from
-+caching and read ahead performed by Linux's VM subsystem. The only
++are transferred via system cache, so it is possible to fully benefit
++from caching and read ahead performed by Linux's VM subsystem. The only
+disadvantage here that in the FILEIO mode there is superfluous data
+copying between the cache and SCST's buffers. This issue is going to be
-+addressed in the next release. Virtual CDROM's are useful for remote
-+installation. See below for details how to setup and use VDISK device
-+handler.
++addressed in one of the future releases. Virtual CDROM's are useful for
++remote installation. See below for details how to setup and use VDISK
++device handler.
+
+"Performance" device handlers for disks, MO disks and tapes in their
+exec() method skip (pretend to execute) all READ and WRITE operations
@@ -44324,6 +47286,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ was allocated, without even being zeroed. Thus, "perf" device
+ handlers impose some security risk, so use them with caution.
+
++
+Compilation options
+-------------------
+
@@ -44357,6 +47320,8 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ kernel log and your initiator returns an error. Also report those
+ messages in the SCST mailing list scst-devel@lists.sourceforge.net.
+ Note, that not all SCSI transports support supplying expected values.
++ You should try to enable this option if you have a not working with
++ SCST pass-through device, for instance, an SATA CDROM.
+
+ - CONFIG_SCST_DEBUG_TM - if defined, turns on task management functions
+ debugging, when on LUN 6 some of the commands will be delayed for
@@ -44420,6 +47385,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+HIGHMEM kernel configurations are fully supported, but not recommended
+for performance reasons.
+
++
+Module parameters
+-----------------
+
@@ -44432,9 +47398,26 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ consumed by the SCST commands for data buffers at any given time. By
+ default it is approximately TotalMem/4.
+
++
+SCST sysfs interface
+--------------------
+
++SCST sysfs interface designed to be self descriptive and self
++containing. This means that a high level managament tool for it can be
++written once and automatically support any future sysfs interface
++changes (attributes additions or removals, new target drivers and dev
++handlers, etc.) without any modifications. Scstadmin is an example of
++such management tool.
++
++To implement that an management tool should not be implemented around
++drivers and their attributes, but around common rules those drivers and
++attributes follow. You can find those rules in SysfsRules file. For
++instance, each SCST sysfs file (attribute) can contain in the last line
++mark "[key]". It is automatically added to allow scstadmin and other
++management tools to see which attributes it should save in the config
++file. If you are doing manual attributes manipulations, you can ignore
++this mark.
++
+Root of SCST sysfs interface is /sys/kernel/scst_tgt. It has the
+following entries:
+
@@ -44443,12 +47426,13 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ - handlers - this is a root subdirectory for all SCST dev handlers
+
+ - max_tasklet_cmd - specifies how many commands at max can be queued in
-+ the SCST core simultaneously from all connected initiators to allow
-+ processing commands in soft-IRQ context in tasklets. If the count of
-+ the commands exceeds this value, then all of them will be processed
-+ only in threads. This is to to prevent possible starvation under
-+ heavy load and in some cases to improve performance by more evenly
-+ spreading load over available CPUs.
++ the SCST core simultaneously on a single CPU from all connected
++ initiators to allow processing commands on this CPU in soft-IRQ
++ context in tasklets. If the count of the commands exceeds this value,
++ then all of them will be processed only in SCST threads. This is to
++ to prevent possible under heavy load starvation of processes on the
++ CPUs serving soft IRQs and in some cases to improve performance by
++ more evenly spreading load over available CPUs.
+
+ - sgv - this is a root subdirectory for all SCST SGV caches
+
@@ -44465,7 +47449,9 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ BLOCKIO or NULLIO.
+
+ - trace_level - allows to enable and disable various tracing
-+ facilities. See content of this file for help how to use it.
++ facilities. See content of this file for help how to use it. See also
++ section "Dealing with massive logs" for more info how to make correct
++ logs when you enabled trace levels producing a lot of logs data.
+
+ - version - read-only attribute, which allows to see version of
+ SCST and enabled optional features.
@@ -44481,10 +47467,6 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ completed, it will return the result of this operation (0 for success
+ or -errno for error).
+
-+Each SCST sysfs file (attribute) can contain in the last line mark
-+"[key]". It is automatically added mark used to allow scstadmin to see
-+which attributes it should save in the config file. You can ignore it.
-+
+"Devices" subdirectory contains subdirectories for each SCST devices.
+
+Content of each device's subdirectory is dev handler specific. See
@@ -44532,7 +47514,9 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ handlers).
+
+ - trace_level - allows to enable and disable various tracing
-+ facilities. See content of this file for help how to use it.
++ facilities. See content of this file for help how to use it. See also
++ section "Dealing with massive logs" for more info how to make correct
++ logs when you enabled trace levels producing a lot of logs data.
+
+ - type - SCSI type of devices served by this dev handler.
+
@@ -44565,6 +47549,11 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+
+ - sessions - subdirectory containing connected to this target sessions.
+
++ - comment - this attribute can be used to store any human readable info
++ to help identify target. For instance, to help identify the target's
++ mapping to the corresponding hardware port. It isn't anyhow used by
++ SCST.
++
+ - enabled - using this attribute you can enable or disable this target/
+ It allows to finish configuring it before it starts accepting new
+ connections. 0 by default.
@@ -44576,6 +47565,10 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ initiators security groups, so you can assign the addressing method
+ on per-initiator basis.
+
++ - cpu_mask - defines CPU affinity mask for threads serving this target.
++ For threads serving LUNs it is used only for devices with
++ threads_pool_type "per_initiator".
++
+ - io_grouping_type - defines how I/O from sessions to this target are
+ grouped together. This I/O grouping is very important for
+ performance. By setting this attribute in a right value, you can
@@ -44644,6 +47637,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+
+See below description of the VDISK's sysfs interface for samples.
+
++
+Access and devices visibility management (LUN masking)
+------------------------------------------------------
+
@@ -44728,7 +47722,8 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ - "del GROUP_NAME" - deletes a new security group.
+
+Each security group's subdirectory contains 2 subdirectories: initiators
-+and luns.
++and luns as well as the following attributes: addr_method, cpu_mask and
++io_grouping_type. See above description of them.
+
+Each "initiators" subdirectory contains list of added to this groups
+initiator as well as as well as file "mgmt". This file has the following
@@ -44821,6 +47816,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+the security groups before new connections from the initiators are
+created, i.e. before the target enabled.
+
++
+VDISK device handler
+--------------------
+
@@ -44834,7 +47830,9 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ of the corresponding devices.
+
+ - trace_level - allows to enable and disable various tracing
-+ facilities. See content of this file for help how to use it.
++ facilities. See content of this file for help how to use it. See also
++ section "Dealing with massive logs" for more info how to make correct
++ logs when you enabled trace levels producing a lot of logs data.
+
+ - mgmt - main management entry, which allows to add/delete VDISK
+ devices with the corresponding type.
@@ -44891,6 +47889,10 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ will go from the initiator. This option overrides "write_through"
+ option. Disabled by default.
+
++ - thin_provisioned - enables thin provisioning facility, when remote
++ initiators can unmap blocks of storage, if they don't need them
++ anymore. Backend storage also must support this facility.
++
+ - removable - with this flag set the device is reported to remote
+ initiators as removable.
+
@@ -44902,8 +47904,8 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+below for more info.
+
+The following parameters possible for vdisk_blockio: filename,
-+blocksize, nv_cache, read_only, removable. See vdisk_fileio above for
-+description of those parameters.
++blocksize, nv_cache, read_only, removable, thin_provisioned. See
++vdisk_fileio above for description of those parameters.
+
+Handler vdisk_nullio provides NULLIO mode to create virtual devices. In
+this mode no real I/O is done, but success returned to initiators.
@@ -44938,6 +47940,9 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+
+ - nv_cache - contains NV_CACHE status of this virtual device.
+
++ - thin_provisioned - contains thin provisioning status of this virtual
++ device
++
+ - removable - contains removable status of this virtual device.
+
+ - size_mb - contains size of this virtual device in MB.
@@ -44977,6 +47982,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+|-- resync_size
+|-- size_mb
+|-- t10_dev_id
++|-- thin_provisioned
+|-- threads_num
+|-- threads_pool_type
+|-- type
@@ -44985,8 +47991,9 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+
+Each vdisk_blockio's device has the following attributes in
+/sys/kernel/scst_tgt/devices/device_name: blocksize, filename, nv_cache,
-+read_only, removable, resync_size, size_mb, t10_dev_id, threads_num,
-+threads_pool_type, type, usn. See above description of those parameters.
++read_only, removable, resync_size, size_mb, t10_dev_id,
++thin_provisioned, threads_num, threads_pool_type, type, usn. See above
++description of those parameters.
+
+Each vdisk_nullio's device has the following attributes in
+/sys/kernel/scst_tgt/devices/device_name: blocksize, read_only,
@@ -45062,6 +48069,30 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ size device, the block size stops matter, any program will
+ work with files on such file system.
+
++
++Dealing with massive logs
++-------------------------
++
++If you want to enable using "trace_level" file logging levels, which
++produce a lot of events, like "debug", to not loose logged events you
++should also:
++
++ * Increase in .config of your kernel CONFIG_LOG_BUF_SHIFT variable
++ to much bigger value, then recompile it. For example, value 25 will
++ provide good protection from logging overflow even under high volume
++ of logging events. To use it you will need to modify the maximum
++ allowed value for CONFIG_LOG_BUF_SHIFT in the corresponding Kconfig
++ file to 25 as well.
++
++ * Change in your /etc/syslog.conf or other config file of your favorite
++ logging program to store kernel logs in async manner. For example,
++ you can add in rsyslog.conf line "kern.info -/var/log/kernel" and
++ add "kern.none" in line for /var/log/messages, so the resulting line
++ would looks like:
++
++ "*.info;kern.none;mail.none;authpriv.none;cron.none /var/log/messages"
++
++
+Persistent Reservations
+-----------------------
+
@@ -45089,6 +48120,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+persistent reservations from this device are released, upon reconnect
+the initiators will see it.
+
++
+Caching
+-------
+
@@ -45175,6 +48207,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+Note, on some real-life workloads write through caching might perform
+better, than write back one with the barrier protection turned on.
+
++
+BLOCKIO VDISK mode
+------------------
+
@@ -45221,6 +48254,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ restore 1.x behavior, your should recreate your BLOCKIO
+ devices in NV_CACHE mode.
+
++
+Pass-through mode
+-----------------
+
@@ -45289,6 +48323,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+page each. See the following patch as an example:
+http://scst.sourceforge.net/sgv_big_order_alloc.diff
+
++
+Performance
+-----------
+
@@ -45429,7 +48464,10 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ device.
+
+ Note2: you need to restart SCST after you changed read-ahead settings
-+ on the target.
++ on the target. It is a limitation of the Linux read ahead
++ implementation. It reads RA values for each file only when the file
++ is open and not updates them when the global RA parameters changed.
++ Hence, the need for vdisk to reopen all its files/devices.
+
+ - You may need to increase amount of requests that OS on initiator
+ sends to the target device. To do it on Linux initiators, run
@@ -45493,19 +48531,22 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ See also important notes about setting block sizes >512 bytes
+ for VDISK FILEIO devices above.
+
-+9. In some cases, for instance working with SSD devices, which consume 100%
-+of a single CPU load for data transfers in their internal threads, to
-+maximize IOPS it can be needed to assign for those threads dedicated
-+CPUs using Linux CPU affinity facilities. No IRQ processing should be
-+done on those CPUs. Check that using /proc/interrupts. See taskset
-+command and Documentation/IRQ-affinity.txt in your kernel's source tree
-+for how to assign IRQ affinity to tasks and IRQs.
++9. In some cases, for instance working with SSD devices, which consume
++100% of a single CPU load for data transfers in their internal threads,
++to maximize IOPS it can be needed to assign for those threads dedicated
++CPUs. Consider using cpu_mask attribute for devices with
++threads_pool_type "per_initiator" or Linux CPU affinity facilities for
++other threads_pool_types. No IRQ processing should be done on those
++CPUs. Check that using /proc/interrupts. See taskset command and
++Documentation/IRQ-affinity.txt in your kernel's source tree for how to
++assign IRQ affinity to tasks and IRQs.
+
+The reason for that is that processing of coming commands in SIRQ
+context might be done on the same CPUs as SSD devices' threads doing data
+transfers. As the result, those threads won't receive all the processing
+power of those CPUs and perform worse.
+
++
+Work if target's backstorage or link is too slow
+------------------------------------------------
+
@@ -45598,6 +48639,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+scst_priv.h to 64. Usually initiators don't try to push more commands on
+the target.
+
++
+Credits
+-------
+
@@ -45619,11 +48661,8 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ * Erik Habbinga <erikhabbinga@inphase-tech.com> for fixes and support
+ of the LSI target driver.
+
-+ * Ross S. W. Walker <rswwalker@hotmail.com> for the original block IO
-+ code and Vu Pham <huongvp@yahoo.com> who updated it for the VDISK dev
-+ handler.
-+
-+ * Michael G. Byrnes <michael.byrnes@hp.com> for fixes.
++ * Ross S. W. Walker <rswwalker@hotmail.com> for BLOCKIO inspiration
++ and Vu Pham <huongvp@yahoo.com> who implemented it for VDISK dev handler.
+
+ * Alessandro Premoli <a.premoli@andxor.it> for fixes
+
@@ -45641,11 +48680,12 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst linux-2.6.36/Documen
+ * Daniel Debonzi <debonzi@linux.vnet.ibm.com> for a big part of the
+ initial SCST sysfs tree implementation
+
++
+Vladislav Bolkhovitin <vst@vlnb.net>, http://scst.sourceforge.net
-diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Documentation/scst/SysfsRules
---- orig/linux-2.6.36/Documentation/scst/SysfsRules
-+++ linux-2.6.36/Documentation/scst/SysfsRules
-@@ -0,0 +1,933 @@
+diff -uprN orig/linux-2.6.39/Documentation/scst/SysfsRules linux-2.6.39/Documentation/scst/SysfsRules
+--- orig/linux-2.6.39/Documentation/scst/SysfsRules
++++ linux-2.6.39/Documentation/scst/SysfsRules
+@@ -0,0 +1,942 @@
+ SCST SYSFS interface rules
+ ==========================
+
@@ -45679,6 +48719,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Document
+In other words, all "IncomingUser[0-9]*" names should be considered as
+different instances of the same "IncomingUser" attribute.
+
++
+I. Rules for target drivers
+===========================
+
@@ -45761,6 +48802,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Document
+ retry, recv_bot, send_bot, recv_top,
+ send_top, d_read, d_write, conn, conn_dbg, iov, pdu, net_page]
+
++
+3. "version" - this read-only for all attribute SHOULD return version of
+the target driver and some info about its enabled compile time facilities.
+
@@ -45919,6 +48961,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Document
+
+See SCST core's README for more info about those attributes.
+
++
+II. Rules for dev handlers
+==========================
+
@@ -45974,6 +49017,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Document
+
+out_of_mem | minor | pid | line | function | special | mgmt | mgmt_dbg
+
++
+Usage:
+ echo "all|none|default" >trace_level
+ echo "value DEC|0xHEX|0OCT" >trace_level
@@ -46145,6 +49189,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Document
+
+See SCST core's README for more info about those attributes.
+
++
+III. Rules for management utilities
+===================================
+
@@ -46339,6 +49384,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Document
+Thus, management utility should implement only 8 procedures: (1.1),
+(1.2), (2.1.3), (2.1.4), (2.2.3), (2.2.4), (2.3.3), (2.3.4).
+
++
+How to distinguish hardware and virtual targets
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
@@ -46351,6 +49397,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Document
+A target is virtual if there is "mgmt" file and "hw_target" attribute
+doesn't exist.
+
++
+Algorithm to convert current SCST configuration to config file
+--------------------------------------------------------------
+
@@ -46434,6 +49481,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Document
+
+3.4. Store value of "enabled" attribute, if it exists.
+
++
+Algorithm to initialize SCST from config file
+---------------------------------------------
+
@@ -46500,6 +49548,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Document
+
+3.4. If this target driver supports enabling, enable it.
+
++
+Algorithm to apply changes in config file to currently running SCST
+-------------------------------------------------------------------
+
@@ -46579,9 +49628,9 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/SysfsRules linux-2.6.36/Document
+
+3.5. If this target driver should be enabled, enable it.
+
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/Makefile linux-2.6.36/drivers/scst/dev_handlers/Makefile
---- orig/linux-2.6.36/drivers/scst/dev_handlers/Makefile
-+++ linux-2.6.36/drivers/scst/dev_handlers/Makefile
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/Makefile linux-2.6.39/drivers/scst/dev_handlers/Makefile
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/Makefile
++++ linux-2.6.39/drivers/scst/dev_handlers/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Wno-unused-parameter
+
@@ -46597,10 +49646,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/Makefile linux-2.6.36/dri
+obj-$(CONFIG_SCST_PROCESSOR) += scst_processor.o
+obj-$(CONFIG_SCST_VDISK) += scst_vdisk.o
+obj-$(CONFIG_SCST_USER) += scst_user.o
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c
---- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c
-+++ linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c
-@@ -0,0 +1,302 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/scst_cdrom.c linux-2.6.39/drivers/scst/dev_handlers/scst_cdrom.c
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/scst_cdrom.c
++++ linux-2.6.39/drivers/scst/dev_handlers/scst_cdrom.c
+@@ -0,0 +1,263 @@
+/*
+ * scst_cdrom.c
+ *
@@ -46660,15 +49709,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c linux-2.6.36
+#endif
+};
+
-+/**************************************************************
-+ * Function: cdrom_attach
-+ *
-+ * Argument:
-+ *
-+ * Returns : 1 if attached, error code otherwise
-+ *
-+ * Description:
-+ *************************************************************/
+static int cdrom_attach(struct scst_device *dev)
+{
+ int res, rc;
@@ -46691,15 +49731,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c linux-2.6.36
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (params == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s",
-+ "Unable to allocate struct cdrom_params");
++ PRINT_ERROR("Unable to allocate struct cdrom_params (size %zd)",
++ sizeof(*params));
+ res = -ENOMEM;
+ goto out;
+ }
+
+ buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (!buffer) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Memory allocation failure");
++ PRINT_ERROR("Buffer memory allocation (size %d) failure",
++ buffer_size);
+ res = -ENOMEM;
+ goto out_free_params;
+ }
@@ -46778,15 +49819,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c linux-2.6.36
+ return res;
+}
+
-+/************************************************************
-+ * Function: cdrom_detach
-+ *
-+ * Argument:
-+ *
-+ * Returns : None
-+ *
-+ * Description: Called to detach this device type driver
-+ ************************************************************/
+static void cdrom_detach(struct scst_device *dev)
+{
+ struct cdrom_params *params =
@@ -46811,17 +49843,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c linux-2.6.36
+ return params->block_shift;
+}
+
-+/********************************************************************
-+ * Function: cdrom_parse
-+ *
-+ * Argument:
-+ *
-+ * Returns : The state of the command
-+ *
-+ * Description: This does the parsing of the command
-+ *
-+ * Note: Not all states are allowed on return
-+ ********************************************************************/
+static int cdrom_parse(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_DEFAULT;
@@ -46847,17 +49868,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c linux-2.6.36
+ return;
+}
+
-+/********************************************************************
-+ * Function: cdrom_done
-+ *
-+ * Argument:
-+ *
-+ * Returns :
-+ *
-+ * Description: This is the completion routine for the command,
-+ * it is used to extract any necessary information
-+ * about a command.
-+ ********************************************************************/
+static int cdrom_done(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_DEFAULT;
@@ -46903,10 +49913,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_cdrom.c linux-2.6.36
+MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
+MODULE_DESCRIPTION("SCSI CDROM (type 5) dev handler for SCST");
+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c
---- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c
-+++ linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c
-@@ -0,0 +1,223 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/scst_changer.c linux-2.6.39/drivers/scst/dev_handlers/scst_changer.c
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/scst_changer.c
++++ linux-2.6.39/drivers/scst/dev_handlers/scst_changer.c
+@@ -0,0 +1,183 @@
+/*
+ * scst_changer.c
+ *
@@ -46961,15 +49971,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c linux-2.6.
+#endif
+};
+
-+/**************************************************************
-+ * Function: changer_attach
-+ *
-+ * Argument:
-+ *
-+ * Returns : 1 if attached, error code otherwise
-+ *
-+ * Description:
-+ *************************************************************/
+static int changer_attach(struct scst_device *dev)
+{
+ int res, rc;
@@ -47020,15 +50021,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c linux-2.6.
+ return res;
+}
+
-+/************************************************************
-+ * Function: changer_detach
-+ *
-+ * Argument:
-+ *
-+ * Returns : None
-+ *
-+ * Description: Called to detach this device type driver
-+ ************************************************************/
+#if 0
+void changer_detach(struct scst_device *dev)
+{
@@ -47039,17 +50031,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c linux-2.6.
+}
+#endif
+
-+/********************************************************************
-+ * Function: changer_parse
-+ *
-+ * Argument:
-+ *
-+ * Returns : The state of the command
-+ *
-+ * Description: This does the parsing of the command
-+ *
-+ * Note: Not all states are allowed on return
-+ ********************************************************************/
+static int changer_parse(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_DEFAULT;
@@ -47061,17 +50042,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c linux-2.6.
+ return res;
+}
+
-+/********************************************************************
-+ * Function: changer_done
-+ *
-+ * Argument:
-+ *
-+ * Returns :
-+ *
-+ * Description: This is the completion routine for the command,
-+ * it is used to extract any necessary information
-+ * about a command.
-+ ********************************************************************/
+#if 0
+int changer_done(struct scst_cmd *cmd)
+{
@@ -47130,9 +50100,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_changer.c linux-2.6.
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCSI medium changer (type 8) dev handler for SCST");
+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_dev_handler.h linux-2.6.36/drivers/scst/dev_handlers/scst_dev_handler.h
---- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_dev_handler.h
-+++ linux-2.6.36/drivers/scst/dev_handlers/scst_dev_handler.h
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/scst_dev_handler.h linux-2.6.39/drivers/scst/dev_handlers/scst_dev_handler.h
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/scst_dev_handler.h
++++ linux-2.6.39/drivers/scst/dev_handlers/scst_dev_handler.h
@@ -0,0 +1,27 @@
+#ifndef __SCST_DEV_HANDLER_H
+#define __SCST_DEV_HANDLER_H
@@ -47161,10 +50131,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_dev_handler.h linux-
+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
+
+#endif /* __SCST_DEV_HANDLER_H */
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c
---- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c
-+++ linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c
-@@ -0,0 +1,380 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/scst_disk.c linux-2.6.39/drivers/scst/dev_handlers/scst_disk.c
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/scst_disk.c
++++ linux-2.6.39/drivers/scst/dev_handlers/scst_disk.c
+@@ -0,0 +1,692 @@
+/*
+ * scst_disk.c
+ *
@@ -47191,8 +50161,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+
+#include <linux/module.h>
+#include <linux/init.h>
++#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/slab.h>
++#include <asm/unaligned.h>
+
+#define LOG_PREFIX "dev_disk"
+
@@ -47211,8 +50183,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+static int disk_attach(struct scst_device *dev);
+static void disk_detach(struct scst_device *dev);
+static int disk_parse(struct scst_cmd *cmd);
++static int disk_perf_exec(struct scst_cmd *cmd);
+static int disk_done(struct scst_cmd *cmd);
+static int disk_exec(struct scst_cmd *cmd);
++static bool disk_on_sg_tablesize_low(struct scst_cmd *cmd);
+
+static struct scst_dev_type disk_devtype = {
+ .name = DISK_NAME,
@@ -47223,6 +50197,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+ .attach = disk_attach,
+ .detach = disk_detach,
+ .parse = disk_parse,
++ .exec = disk_exec,
++ .on_sg_tablesize_low = disk_on_sg_tablesize_low,
+ .dev_done = disk_done,
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
@@ -47238,8 +50214,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+ .attach = disk_attach,
+ .detach = disk_detach,
+ .parse = disk_parse,
++ .exec = disk_perf_exec,
+ .dev_done = disk_done,
-+ .exec = disk_exec,
++ .on_sg_tablesize_low = disk_on_sg_tablesize_low,
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
+ .trace_flags = &trace_flag,
@@ -47287,15 +50264,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+module_init(init_scst_disk_driver);
+module_exit(exit_scst_disk_driver);
+
-+/**************************************************************
-+ * Function: disk_attach
-+ *
-+ * Argument:
-+ *
-+ * Returns : 1 if attached, error code otherwise
-+ *
-+ * Description:
-+ *************************************************************/
+static int disk_attach(struct scst_device *dev)
+{
+ int res, rc;
@@ -47318,15 +50286,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (params == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s",
-+ "Unable to allocate struct disk_params");
++ PRINT_ERROR("Unable to allocate struct disk_params (size %zd)",
++ sizeof(*params));
+ res = -ENOMEM;
+ goto out;
+ }
+
+ buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (!buffer) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Memory allocation failure");
++ PRINT_ERROR("Buffer memory allocation (size %d) failure",
++ buffer_size);
+ res = -ENOMEM;
+ goto out_free_params;
+ }
@@ -47400,15 +50369,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+ return res;
+}
+
-+/************************************************************
-+ * Function: disk_detach
-+ *
-+ * Argument:
-+ *
-+ * Returns : None
-+ *
-+ * Description: Called to detach this device type driver
-+ ************************************************************/
+static void disk_detach(struct scst_device *dev)
+{
+ struct disk_params *params =
@@ -47433,17 +50393,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+ return params->block_shift;
+}
+
-+/********************************************************************
-+ * Function: disk_parse
-+ *
-+ * Argument:
-+ *
-+ * Returns : The state of the command
-+ *
-+ * Description: This does the parsing of the command
-+ *
-+ * Note: Not all states are allowed on return
-+ ********************************************************************/
+static int disk_parse(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_DEFAULT;
@@ -47469,17 +50418,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+ return;
+}
+
-+/********************************************************************
-+ * Function: disk_done
-+ *
-+ * Argument:
-+ *
-+ * Returns :
-+ *
-+ * Description: This is the completion routine for the command,
-+ * it is used to extract any necessary information
-+ * about a command.
-+ ********************************************************************/
+static int disk_done(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_DEFAULT;
@@ -47492,19 +50430,355 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+ return res;
+}
+
-+/********************************************************************
-+ * Function: disk_exec
-+ *
-+ * Argument:
-+ *
-+ * Returns :
-+ *
-+ * Description: Make SCST do nothing for data READs and WRITES.
-+ * Intended for raw line performance testing
-+ ********************************************************************/
++static bool disk_on_sg_tablesize_low(struct scst_cmd *cmd)
++{
++ bool res;
++
++ TRACE_ENTRY();
++
++ switch (cmd->cdb[0]) {
++ case WRITE_6:
++ case READ_6:
++ case WRITE_10:
++ case READ_10:
++ case WRITE_VERIFY:
++ case WRITE_12:
++ case READ_12:
++ case WRITE_VERIFY_12:
++ case WRITE_16:
++ case READ_16:
++ case WRITE_VERIFY_16:
++ res = true;
++ /* See comment in disk_exec */
++ cmd->inc_expected_sn_on_done = 1;
++ break;
++ default:
++ res = false;
++ break;
++ }
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++struct disk_work {
++ struct scst_cmd *cmd;
++ struct completion disk_work_cmpl;
++ volatile int result;
++ unsigned int left;
++ uint64_t save_lba;
++ unsigned int save_len;
++ struct scatterlist *save_sg;
++ int save_sg_cnt;
++};
++
++static int disk_cdb_get_transfer_data(const uint8_t *cdb,
++ uint64_t *out_lba, unsigned int *out_length)
++{
++ int res;
++ uint64_t lba;
++ unsigned int len;
++
++ TRACE_ENTRY();
++
++ switch (cdb[0]) {
++ case WRITE_6:
++ case READ_6:
++ lba = be16_to_cpu(get_unaligned((__be16 *)&cdb[2]));
++ len = cdb[4];
++ break;
++ case WRITE_10:
++ case READ_10:
++ case WRITE_VERIFY:
++ lba = be32_to_cpu(get_unaligned((__be32 *)&cdb[2]));
++ len = be16_to_cpu(get_unaligned((__be16 *)&cdb[7]));
++ break;
++ case WRITE_12:
++ case READ_12:
++ case WRITE_VERIFY_12:
++ lba = be32_to_cpu(get_unaligned((__be32 *)&cdb[2]));
++ len = be32_to_cpu(get_unaligned((__be32 *)&cdb[6]));
++ break;
++ case WRITE_16:
++ case READ_16:
++ case WRITE_VERIFY_16:
++ lba = be64_to_cpu(get_unaligned((__be64 *)&cdb[2]));
++ len = be32_to_cpu(get_unaligned((__be32 *)&cdb[10]));
++ break;
++ default:
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = 0;
++ *out_lba = lba;
++ *out_length = len;
++
++ TRACE_DBG("LBA %lld, length %d", (unsigned long long)lba, len);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int disk_cdb_set_transfer_data(uint8_t *cdb,
++ uint64_t lba, unsigned int len)
++{
++ int res;
++
++ TRACE_ENTRY();
++
++ switch (cdb[0]) {
++ case WRITE_6:
++ case READ_6:
++ put_unaligned(cpu_to_be16(lba), (__be16 *)&cdb[2]);
++ cdb[4] = len;
++ break;
++ case WRITE_10:
++ case READ_10:
++ case WRITE_VERIFY:
++ put_unaligned(cpu_to_be32(lba), (__be32 *)&cdb[2]);
++ put_unaligned(cpu_to_be16(len), (__be16 *)&cdb[7]);
++ break;
++ case WRITE_12:
++ case READ_12:
++ case WRITE_VERIFY_12:
++ put_unaligned(cpu_to_be32(lba), (__be32 *)&cdb[2]);
++ put_unaligned(cpu_to_be32(len), (__be32 *)&cdb[6]);
++ break;
++ case WRITE_16:
++ case READ_16:
++ case WRITE_VERIFY_16:
++ put_unaligned(cpu_to_be64(lba), (__be64 *)&cdb[2]);
++ put_unaligned(cpu_to_be32(len), (__be32 *)&cdb[10]);
++ break;
++ default:
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = 0;
++
++ TRACE_DBG("LBA %lld, length %d", (unsigned long long)lba, len);
++ TRACE_BUFFER("New CDB", cdb, SCST_MAX_CDB_SIZE);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static void disk_restore_sg(struct disk_work *work)
++{
++ disk_cdb_set_transfer_data(work->cmd->cdb, work->save_lba, work->save_len);
++ work->cmd->sg = work->save_sg;
++ work->cmd->sg_cnt = work->save_sg_cnt;
++ return;
++}
++
++static void disk_cmd_done(void *data, char *sense, int result, int resid)
++{
++ struct disk_work *work = data;
++
++ TRACE_ENTRY();
++
++ TRACE_DBG("work %p, cmd %p, left %d, result %d, sense %p, resid %d",
++ work, work->cmd, work->left, result, sense, resid);
++
++ if (result == SAM_STAT_GOOD)
++ goto out_complete;
++
++ work->result = result;
++
++ disk_restore_sg(work);
++
++ scst_pass_through_cmd_done(work->cmd, sense, result, resid + work->left);
++
++out_complete:
++ complete_all(&work->disk_work_cmpl);
++
++ TRACE_EXIT();
++ return;
++}
++
++/* Executes command and split CDB, if necessary */
+static int disk_exec(struct scst_cmd *cmd)
+{
-+ int res = SCST_EXEC_NOT_COMPLETED, rc;
++ int res, rc;
++ struct disk_params *params = (struct disk_params *)cmd->dev->dh_priv;
++ struct disk_work work;
++ unsigned int offset, cur_len; /* in blocks */
++ struct scatterlist *sg, *start_sg;
++ int cur_sg_cnt;
++ int sg_tablesize = cmd->dev->scsi_dev->host->sg_tablesize;
++ int max_sectors;
++ int num, j;
++
++ TRACE_ENTRY();
++
++ /*
++ * For PC requests we are going to submit max_hw_sectors used instead
++ * of max_sectors.
++ */
++ max_sectors = queue_max_hw_sectors(cmd->dev->scsi_dev->request_queue);
++
++ if (unlikely(((max_sectors << params->block_shift) & ~PAGE_MASK) != 0)) {
++ int mlen = max_sectors << params->block_shift;
++ int pg = ((mlen >> PAGE_SHIFT) + ((mlen & ~PAGE_MASK) != 0)) - 1;
++ int adj_len = pg << PAGE_SHIFT;
++ max_sectors = adj_len >> params->block_shift;
++ if (max_sectors == 0) {
++ PRINT_ERROR("Too low max sectors %d", max_sectors);
++ goto out_error;
++ }
++ }
++
++ if (unlikely((cmd->bufflen >> params->block_shift) > max_sectors)) {
++ if ((cmd->out_bufflen >> params->block_shift) > max_sectors) {
++ PRINT_ERROR("Too limited max_sectors %d for "
++ "bidirectional cmd %x (out_bufflen %d)",
++ max_sectors, cmd->cdb[0], cmd->out_bufflen);
++ /* Let lower level handle it */
++ res = SCST_EXEC_NOT_COMPLETED;
++ goto out;
++ }
++ goto split;
++ }
++
++ if (likely(cmd->sg_cnt <= sg_tablesize)) {
++ res = SCST_EXEC_NOT_COMPLETED;
++ goto out;
++ }
++
++split:
++ BUG_ON(cmd->out_sg_cnt > sg_tablesize);
++ BUG_ON((cmd->out_bufflen >> params->block_shift) > max_sectors);
++
++ /*
++ * We don't support changing BIDI CDBs (see disk_on_sg_tablesize_low()),
++ * so use only sg_cnt
++ */
++
++ memset(&work, 0, sizeof(work));
++ work.cmd = cmd;
++ work.save_sg = cmd->sg;
++ work.save_sg_cnt = cmd->sg_cnt;
++ rc = disk_cdb_get_transfer_data(cmd->cdb, &work.save_lba,
++ &work.save_len);
++ if (rc != 0)
++ goto out_error;
++
++ rc = scst_check_local_events(cmd);
++ if (unlikely(rc != 0))
++ goto out_done;
++
++ cmd->status = 0;
++ cmd->msg_status = 0;
++ cmd->host_status = DID_OK;
++ cmd->driver_status = 0;
++
++ TRACE_DBG("cmd %p, save_sg %p, save_sg_cnt %d, save_lba %lld, "
++ "save_len %d (sg_tablesize %d, max_sectors %d, block_shift %d, "
++ "sizeof(*sg) 0x%zx)", cmd, work.save_sg, work.save_sg_cnt,
++ (unsigned long long)work.save_lba, work.save_len,
++ sg_tablesize, max_sectors, params->block_shift, sizeof(*sg));
++
++ /*
++ * If we submit all chunks async'ly, it will be very not trivial what
++ * to do if several of them finish with sense or residual. So, let's
++ * do it synchronously.
++ */
++
++ num = 1;
++ j = 0;
++ offset = 0;
++ cur_len = 0;
++ sg = work.save_sg;
++ start_sg = sg;
++ cur_sg_cnt = 0;
++ while (1) {
++ unsigned int l;
++
++ if (unlikely(sg_is_chain(&sg[j]))) {
++ bool reset_start_sg = (start_sg == &sg[j]);
++ sg = sg_chain_ptr(&sg[j]);
++ j = 0;
++ if (reset_start_sg)
++ start_sg = sg;
++ }
++
++ l = sg[j].length >> params->block_shift;
++ cur_len += l;
++ cur_sg_cnt++;
++
++ TRACE_DBG("l %d, j %d, num %d, offset %d, cur_len %d, "
++ "cur_sg_cnt %d, start_sg %p", l, j, num, offset,
++ cur_len, cur_sg_cnt, start_sg);
++
++ if (((num % sg_tablesize) == 0) ||
++ (num == work.save_sg_cnt) ||
++ (cur_len >= max_sectors)) {
++ TRACE_DBG("%s", "Execing...");
++
++ disk_cdb_set_transfer_data(cmd->cdb,
++ work.save_lba + offset, cur_len);
++ cmd->sg = start_sg;
++ cmd->sg_cnt = cur_sg_cnt;
++
++ work.left = work.save_len - (offset + cur_len);
++ init_completion(&work.disk_work_cmpl);
++
++ rc = scst_scsi_exec_async(cmd, &work, disk_cmd_done);
++ if (unlikely(rc != 0)) {
++ PRINT_ERROR("scst_scsi_exec_async() failed: %d",
++ rc);
++ goto out_err_restore;
++ }
++
++ wait_for_completion(&work.disk_work_cmpl);
++
++ if (work.result != SAM_STAT_GOOD) {
++ /* cmd can be already dead */
++ res = SCST_EXEC_COMPLETED;
++ goto out;
++ }
++
++ offset += cur_len;
++ cur_len = 0;
++ cur_sg_cnt = 0;
++ start_sg = &sg[j+1];
++
++ if (num == work.save_sg_cnt)
++ break;
++ }
++ num++;
++ j++;
++ }
++
++ cmd->completed = 1;
++
++out_restore:
++ disk_restore_sg(&work);
++
++out_done:
++ res = SCST_EXEC_COMPLETED;
++ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_err_restore:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_restore;
++
++out_error:
++ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
++ goto out_done;
++}
++
++static int disk_perf_exec(struct scst_cmd *cmd)
++{
++ int res, rc;
+ int opcode = cmd->cdb[0];
+
+ TRACE_ENTRY();
@@ -47527,14 +50801,21 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+ case READ_10:
+ case READ_12:
+ case READ_16:
-+ cmd->completed = 1;
-+ goto out_done;
++ case WRITE_VERIFY:
++ case WRITE_VERIFY_12:
++ case WRITE_VERIFY_16:
++ goto out_complete;
+ }
+
++ res = SCST_EXEC_NOT_COMPLETED;
++
+out:
+ TRACE_EXIT_RES(res);
+ return res;
+
++out_complete:
++ cmd->completed = 1;
++
+out_done:
+ res = SCST_EXEC_COMPLETED;
+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
@@ -47545,10 +50826,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_disk.c linux-2.6.36/
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCSI disk (type 0) dev handler for SCST");
+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c
---- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c
-+++ linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c
-@@ -0,0 +1,399 @@
++
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.39/drivers/scst/dev_handlers/scst_modisk.c
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/scst_modisk.c
++++ linux-2.6.39/drivers/scst/dev_handlers/scst_modisk.c
+@@ -0,0 +1,350 @@
+/*
+ * scst_modisk.c
+ *
@@ -47596,7 +50878,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.3
+static void modisk_detach(struct scst_device *);
+static int modisk_parse(struct scst_cmd *);
+static int modisk_done(struct scst_cmd *);
-+static int modisk_exec(struct scst_cmd *);
++static int modisk_perf_exec(struct scst_cmd *);
+
+static struct scst_dev_type modisk_devtype = {
+ .name = MODISK_NAME,
@@ -47623,7 +50905,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.3
+ .detach = modisk_detach,
+ .parse = modisk_parse,
+ .dev_done = modisk_done,
-+ .exec = modisk_exec,
++ .exec = modisk_perf_exec,
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
+ .trace_flags = &trace_flag,
@@ -47671,15 +50953,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.3
+module_init(init_scst_modisk_driver);
+module_exit(exit_scst_modisk_driver);
+
-+/**************************************************************
-+ * Function: modisk_attach
-+ *
-+ * Argument:
-+ *
-+ * Returns : 1 if attached, error code otherwise
-+ *
-+ * Description:
-+ *************************************************************/
+static int modisk_attach(struct scst_device *dev)
+{
+ int res, rc;
@@ -47702,8 +50975,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.3
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (params == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s",
-+ "Unable to allocate struct modisk_params");
++ PRINT_ERROR("Unable to allocate struct modisk_params (size %zd)",
++ sizeof(*params));
+ res = -ENOMEM;
+ goto out;
+ }
@@ -47721,7 +50994,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.3
+
+ buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (!buffer) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Memory allocation failure");
++ PRINT_ERROR("Buffer memory allocation (size %d) failure",
++ buffer_size);
+ res = -ENOMEM;
+ goto out_free_params;
+ }
@@ -47801,15 +51075,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.3
+ return res;
+}
+
-+/************************************************************
-+ * Function: modisk_detach
-+ *
-+ * Argument:
-+ *
-+ * Returns : None
-+ *
-+ * Description: Called to detach this device type driver
-+ ************************************************************/
+static void modisk_detach(struct scst_device *dev)
+{
+ struct modisk_params *params =
@@ -47835,17 +51100,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.3
+ return params->block_shift;
+}
+
-+/********************************************************************
-+ * Function: modisk_parse
-+ *
-+ * Argument:
-+ *
-+ * Returns : The state of the command
-+ *
-+ * Description: This does the parsing of the command
-+ *
-+ * Note: Not all states are allowed on return
-+ ********************************************************************/
+static int modisk_parse(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_DEFAULT;
@@ -47872,17 +51126,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.3
+ return;
+}
+
-+/********************************************************************
-+ * Function: modisk_done
-+ *
-+ * Argument:
-+ *
-+ * Returns :
-+ *
-+ * Description: This is the completion routine for the command,
-+ * it is used to extract any necessary information
-+ * about a command.
-+ ********************************************************************/
+static int modisk_done(struct scst_cmd *cmd)
+{
+ int res;
@@ -47895,17 +51138,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.3
+ return res;
+}
+
-+/********************************************************************
-+ * Function: modisk_exec
-+ *
-+ * Argument:
-+ *
-+ * Returns :
-+ *
-+ * Description: Make SCST do nothing for data READs and WRITES.
-+ * Intended for raw line performance testing
-+ ********************************************************************/
-+static int modisk_exec(struct scst_cmd *cmd)
++static int modisk_perf_exec(struct scst_cmd *cmd)
+{
+ int res = SCST_EXEC_NOT_COMPLETED, rc;
+ int opcode = cmd->cdb[0];
@@ -47948,10 +51181,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_modisk.c linux-2.6.3
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCSI MO disk (type 7) dev handler for SCST");
+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c
---- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c
-+++ linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c
-@@ -0,0 +1,223 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/scst_processor.c linux-2.6.39/drivers/scst/dev_handlers/scst_processor.c
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/scst_processor.c
++++ linux-2.6.39/drivers/scst/dev_handlers/scst_processor.c
+@@ -0,0 +1,183 @@
+/*
+ * scst_processor.c
+ *
@@ -48006,15 +51239,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c linux-2.
+#endif
+};
+
-+/**************************************************************
-+ * Function: processor_attach
-+ *
-+ * Argument:
-+ *
-+ * Returns : 1 if attached, error code otherwise
-+ *
-+ * Description:
-+ *************************************************************/
+static int processor_attach(struct scst_device *dev)
+{
+ int res, rc;
@@ -48065,15 +51289,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c linux-2.
+ return res;
+}
+
-+/************************************************************
-+ * Function: processor_detach
-+ *
-+ * Argument:
-+ *
-+ * Returns : None
-+ *
-+ * Description: Called to detach this device type driver
-+ ************************************************************/
+#if 0
+void processor_detach(struct scst_device *dev)
+{
@@ -48084,17 +51299,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c linux-2.
+}
+#endif
+
-+/********************************************************************
-+ * Function: processor_parse
-+ *
-+ * Argument:
-+ *
-+ * Returns : The state of the command
-+ *
-+ * Description: This does the parsing of the command
-+ *
-+ * Note: Not all states are allowed on return
-+ ********************************************************************/
+static int processor_parse(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_DEFAULT;
@@ -48106,17 +51310,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c linux-2.
+ return res;
+}
+
-+/********************************************************************
-+ * Function: processor_done
-+ *
-+ * Argument:
-+ *
-+ * Returns :
-+ *
-+ * Description: This is the completion routine for the command,
-+ * it is used to extract any necessary information
-+ * about a command.
-+ ********************************************************************/
+#if 0
+int processor_done(struct scst_cmd *cmd)
+{
@@ -48175,10 +51368,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_processor.c linux-2.
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCSI medium processor (type 3) dev handler for SCST");
+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c
---- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c
-+++ linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c
-@@ -0,0 +1,224 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/scst_raid.c linux-2.6.39/drivers/scst/dev_handlers/scst_raid.c
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/scst_raid.c
++++ linux-2.6.39/drivers/scst/dev_handlers/scst_raid.c
+@@ -0,0 +1,184 @@
+/*
+ * scst_raid.c
+ *
@@ -48233,15 +51426,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c linux-2.6.36/
+#endif
+};
+
-+/**************************************************************
-+ * Function: raid_attach
-+ *
-+ * Argument:
-+ *
-+ * Returns : 1 if attached, error code otherwise
-+ *
-+ * Description:
-+ *************************************************************/
+static int raid_attach(struct scst_device *dev)
+{
+ int res, rc;
@@ -48292,15 +51476,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c linux-2.6.36/
+ return res;
+}
+
-+/************************************************************
-+ * Function: raid_detach
-+ *
-+ * Argument:
-+ *
-+ * Returns : None
-+ *
-+ * Description: Called to detach this device type driver
-+ ************************************************************/
+#if 0
+void raid_detach(struct scst_device *dev)
+{
@@ -48311,17 +51486,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c linux-2.6.36/
+}
+#endif
+
-+/********************************************************************
-+ * Function: raid_parse
-+ *
-+ * Argument:
-+ *
-+ * Returns : The state of the command
-+ *
-+ * Description: This does the parsing of the command
-+ *
-+ * Note: Not all states are allowed on return
-+ ********************************************************************/
+static int raid_parse(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_DEFAULT;
@@ -48333,17 +51497,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c linux-2.6.36/
+ return res;
+}
+
-+/********************************************************************
-+ * Function: raid_done
-+ *
-+ * Argument:
-+ *
-+ * Returns :
-+ *
-+ * Description: This is the completion routine for the command,
-+ * it is used to extract any necessary information
-+ * about a command.
-+ ********************************************************************/
+#if 0
+int raid_done(struct scst_cmd *cmd)
+{
@@ -48403,10 +51556,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_raid.c linux-2.6.36/
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCSI raid(controller) (type 0xC) dev handler for SCST");
+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c
---- orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c
-+++ linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c
-@@ -0,0 +1,432 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/dev_handlers/scst_tape.c linux-2.6.39/drivers/scst/dev_handlers/scst_tape.c
+--- orig/linux-2.6.39/drivers/scst/dev_handlers/scst_tape.c
++++ linux-2.6.39/drivers/scst/dev_handlers/scst_tape.c
+@@ -0,0 +1,383 @@
+/*
+ * scst_tape.c
+ *
@@ -48459,7 +51612,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/
+static void tape_detach(struct scst_device *);
+static int tape_parse(struct scst_cmd *);
+static int tape_done(struct scst_cmd *);
-+static int tape_exec(struct scst_cmd *);
++static int tape_perf_exec(struct scst_cmd *);
+
+static struct scst_dev_type tape_devtype = {
+ .name = TAPE_NAME,
@@ -48486,7 +51639,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/
+ .detach = tape_detach,
+ .parse = tape_parse,
+ .dev_done = tape_done,
-+ .exec = tape_exec,
++ .exec = tape_perf_exec,
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
+ .trace_flags = &trace_flag,
@@ -48534,15 +51687,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/
+module_init(init_scst_tape_driver);
+module_exit(exit_scst_tape_driver);
+
-+/**************************************************************
-+ * Function: tape_attach
-+ *
-+ * Argument:
-+ *
-+ * Returns : 1 if attached, error code otherwise
-+ *
-+ * Description:
-+ *************************************************************/
+static int tape_attach(struct scst_device *dev)
+{
+ int res, rc;
@@ -48563,8 +51707,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (params == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s",
-+ "Unable to allocate struct tape_params");
++ PRINT_ERROR("Unable to allocate struct tape_params (size %zd)",
++ sizeof(*params));
+ res = -ENOMEM;
+ goto out;
+ }
@@ -48573,7 +51717,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/
+
+ buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (!buffer) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Memory allocation failure");
++ PRINT_ERROR("Buffer memory allocation (size %d) failure",
++ buffer_size);
+ res = -ENOMEM;
+ goto out_free_req;
+ }
@@ -48646,15 +51791,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/
+ return res;
+}
+
-+/************************************************************
-+ * Function: tape_detach
-+ *
-+ * Argument:
-+ *
-+ * Returns : None
-+ *
-+ * Description: Called to detach this device type driver
-+ ************************************************************/
+static void tape_detach(struct scst_device *dev)
+{
+ struct tape_params *params =
@@ -48679,17 +51815,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/
+ return params->block_size;
+}
+
-+/********************************************************************
-+ * Function: tape_parse
-+ *
-+ * Argument:
-+ *
-+ * Returns : The state of the command
-+ *
-+ * Description: This does the parsing of the command
-+ *
-+ * Note: Not all states are allowed on return
-+ ********************************************************************/
+static int tape_parse(struct scst_cmd *cmd)
+{
+ int res = SCST_CMD_STATE_DEFAULT;
@@ -48712,17 +51837,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/
+ return;
+}
+
-+/********************************************************************
-+ * Function: tape_done
-+ *
-+ * Argument:
-+ *
-+ * Returns :
-+ *
-+ * Description: This is the completion routine for the command,
-+ * it is used to extract any necessary information
-+ * about a command.
-+ ********************************************************************/
+static int tape_done(struct scst_cmd *cmd)
+{
+ int opcode = cmd->cdb[0];
@@ -48792,17 +51906,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/
+ return res;
+}
+
-+/********************************************************************
-+ * Function: tape_exec
-+ *
-+ * Argument:
-+ *
-+ * Returns :
-+ *
-+ * Description: Make SCST do nothing for data READs and WRITES.
-+ * Intended for raw line performance testing
-+ ********************************************************************/
-+static int tape_exec(struct scst_cmd *cmd)
++static int tape_perf_exec(struct scst_cmd *cmd)
+{
+ int res = SCST_EXEC_NOT_COMPLETED, rc;
+ int opcode = cmd->cdb[0];
@@ -48839,9 +51943,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/dev_handlers/scst_tape.c linux-2.6.36/
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCSI tape (type 1) dev handler for SCST");
+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/drivers/scst/fcst/Makefile linux-2.6.36/drivers/scst/fcst/Makefile
---- orig/linux-2.6.36/drivers/scst/fcst/Makefile
-+++ linux-2.6.36/drivers/scst/fcst/Makefile
+diff -uprN orig/linux-2.6.39/drivers/scst/fcst/Makefile linux-2.6.39/drivers/scst/fcst/Makefile
+--- orig/linux-2.6.39/drivers/scst/fcst/Makefile
++++ linux-2.6.39/drivers/scst/fcst/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_FCST) += fcst.o
+
@@ -48850,19 +51954,19 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/Makefile linux-2.6.36/drivers/scs
+ ft_io.o \
+ ft_scst.o \
+ ft_sess.o
-diff -uprN orig/linux-2.6.36/drivers/scst/fcst/Kconfig linux-2.6.36/drivers/scst/fcst/Kconfig
---- orig/linux-2.6.36/drivers/scst/fcst/Kconfig
-+++ linux-2.6.36/drivers/scst/fcst/Kconfig
+diff -uprN orig/linux-2.6.39/drivers/scst/fcst/Kconfig linux-2.6.39/drivers/scst/fcst/Kconfig
+--- orig/linux-2.6.39/drivers/scst/fcst/Kconfig
++++ linux-2.6.39/drivers/scst/fcst/Kconfig
@@ -0,0 +1,5 @@
+config FCST
+ tristate "SCST target module for Fibre Channel using libfc"
+ depends on LIBFC && SCST
+ ---help---
+ Supports using libfc HBAs as target adapters with SCST
-diff -uprN orig/linux-2.6.36/drivers/scst/fcst/fcst.h linux-2.6.36/drivers/scst/fcst/fcst.h
---- orig/linux-2.6.36/drivers/scst/fcst/fcst.h
-+++ linux-2.6.36/drivers/scst/fcst/fcst.h
-@@ -0,0 +1,151 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/fcst/fcst.h linux-2.6.39/drivers/scst/fcst/fcst.h
+--- orig/linux-2.6.39/drivers/scst/fcst/fcst.h
++++ linux-2.6.39/drivers/scst/fcst/fcst.h
+@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
@@ -48884,6 +51988,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/fcst.h linux-2.6.36/drivers/scst/
+#ifndef __SCSI_FCST_H__
+#define __SCSI_FCST_H__
+
++#include <linux/version.h>
+#include <scst/scst.h>
+
+#define FT_VERSION "0.3"
@@ -48978,7 +52083,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/fcst.h linux-2.6.36/drivers/scst/
+int ft_prli(struct fc_rport_priv *, u32 spp_len,
+ const struct fc_els_spp *, struct fc_els_spp *);
+void ft_prlo(struct fc_rport_priv *);
-+void ft_recv(struct fc_lport *, struct fc_seq *, struct fc_frame *);
++void ft_recv(struct fc_lport *, struct fc_frame *);
+
+/*
+ * SCST interface.
@@ -48993,7 +52098,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/fcst.h linux-2.6.36/drivers/scst/
+int ft_tgt_enable(struct scst_tgt *, bool);
+bool ft_tgt_enabled(struct scst_tgt *);
+int ft_report_aen(struct scst_aen *);
-+int ft_get_transport_id(struct scst_session *, uint8_t **);
++int ft_get_transport_id(struct scst_tgt *, struct scst_session *, uint8_t **);
+
+/*
+ * Session interface.
@@ -49006,7 +52111,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/fcst.h linux-2.6.36/drivers/scst/
+ * other internal functions.
+ */
+int ft_thread(void *);
-+void ft_recv_req(struct ft_sess *, struct fc_seq *, struct fc_frame *);
++void ft_recv_req(struct ft_sess *, struct fc_frame *);
+void ft_recv_write_data(struct scst_cmd *, struct fc_frame *);
+int ft_send_read_data(struct scst_cmd *);
+struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
@@ -49014,10 +52119,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/fcst.h linux-2.6.36/drivers/scst/
+void ft_cmd_dump(struct scst_cmd *, const char *);
+
+#endif /* __SCSI_FCST_H__ */
-diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scst/fcst/ft_cmd.c
---- orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c
-+++ linux-2.6.36/drivers/scst/fcst/ft_cmd.c
-@@ -0,0 +1,686 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/fcst/ft_cmd.c linux-2.6.39/drivers/scst/fcst/ft_cmd.c
+--- orig/linux-2.6.39/drivers/scst/fcst/ft_cmd.c
++++ linux-2.6.39/drivers/scst/fcst/ft_cmd.c
+@@ -0,0 +1,685 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
@@ -49058,7 +52163,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+{
+ static atomic_t serial;
+ struct ft_cmd *fcmd;
-+ struct fc_exch *ep;
++ struct fc_frame_header *fh;
+ char prefix[30];
+ char buf[150];
+
@@ -49066,12 +52171,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ return;
+
+ fcmd = scst_cmd_get_tgt_priv(cmd);
-+ ep = fc_seq_exch(fcmd->seq);
++ fh = fc_frame_header_get(fcmd->req_frame);
+ snprintf(prefix, sizeof(prefix), FT_MODULE ": cmd %2x",
+ atomic_inc_return(&serial) & 0xff);
+
+ printk(KERN_INFO "%s %s oid %x oxid %x resp_len %u\n",
-+ prefix, caller, ep->oid, ep->oxid,
++ prefix, caller, ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id),
+ scst_cmd_get_resp_data_len(cmd));
+ printk(KERN_INFO "%s scst_cmd %p wlen %u rlen %u\n",
+ prefix, cmd, fcmd->write_data_len, fcmd->read_data_len);
@@ -49146,8 +52251,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ "host_status %x driver_status %x\n",
+ prefix, cmd->status, cmd->msg_status,
+ cmd->host_status, cmd->driver_status);
-+ printk(KERN_INFO "%s cdb_len %d ext_cdb_len %u\n",
-+ prefix, cmd->cdb_len, cmd->ext_cdb_len);
++ printk(KERN_INFO "%s cdb_len %d\n", prefix, cmd->cdb_len);
+ snprintf(buf, sizeof(buf), "%s cdb ", prefix);
+ print_hex_dump(KERN_INFO, buf, DUMP_PREFIX_NONE,
+ 16, 4, cmd->cdb, SCST_MAX_CDB_SIZE, 0);
@@ -49159,19 +52263,19 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+static void ft_cmd_tm_dump(struct scst_mgmt_cmd *mcmd, const char *caller)
+{
+ struct ft_cmd *fcmd;
-+ struct fc_exch *ep;
++ struct fc_frame_header *fh;
+ char prefix[30];
+ char buf[150];
+
+ if (!(ft_debug_logging & FT_DEBUG_IO))
+ return;
+ fcmd = scst_mgmt_cmd_get_tgt_priv(mcmd);
-+ ep = fc_seq_exch(fcmd->seq);
++ fh = fc_frame_header_get(fcmd->req_frame);
+
+ snprintf(prefix, sizeof(prefix), FT_MODULE ": mcmd");
+
+ printk(KERN_INFO "%s %s oid %x oxid %x lun %lld\n",
-+ prefix, caller, ep->oid, ep->oxid,
++ prefix, caller, ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id),
+ (unsigned long long)mcmd->lun);
+ printk(KERN_INFO "%s state %d fn %d fin_wait %d done_wait %d comp %d\n",
+ prefix, mcmd->state, mcmd->fn,
@@ -49190,7 +52294,23 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+}
+
+/*
-+ * Free command.
++ * Free command and associated frame.
++ */
++static void ft_cmd_done(struct ft_cmd *fcmd)
++{
++ struct fc_frame *fp = fcmd->req_frame;
++ struct fc_lport *lport;
++
++ lport = fr_dev(fp);
++ if (fr_seq(fp))
++ lport->tt.seq_release(fr_seq(fp));
++
++ fc_frame_free(fp);
++ kfree(fcmd);
++}
++
++/*
++ * Free command - callback from SCST.
+ */
+void ft_cmd_free(struct scst_cmd *cmd)
+{
@@ -49199,8 +52319,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ fcmd = scst_cmd_get_tgt_priv(cmd);
+ if (fcmd) {
+ scst_cmd_set_tgt_priv(cmd, NULL);
-+ fc_frame_free(fcmd->req_frame);
-+ kfree(fcmd);
++ ft_cmd_done(fcmd);
+ }
+}
+
@@ -49404,27 +52523,28 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ * status is SAM_STAT_GOOD (zero) if code is valid.
+ * This is used in error cases, such as allocation failures.
+ */
-+static void ft_send_resp_status(struct fc_seq *sp, u32 status,
++static void ft_send_resp_status(struct fc_frame *rx_fp, u32 status,
+ enum fcp_resp_rsp_codes code)
+{
+ struct fc_frame *fp;
++ struct fc_frame_header *fh;
+ size_t len;
+ struct fcp_resp_with_ext *fcp;
+ struct fcp_resp_rsp_info *info;
+ struct fc_lport *lport;
-+ struct fc_exch *ep;
-+
-+ ep = fc_seq_exch(sp);
++ struct fc_seq *sp;
+
++ sp = fr_seq(rx_fp);
++ fh = fc_frame_header_get(rx_fp);
+ FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
-+ ep->did, ep->oxid, status, code);
-+ lport = ep->lp;
++ ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
++ lport = fr_dev(rx_fp);
+ len = sizeof(*fcp);
+ if (status == SAM_STAT_GOOD)
+ len += sizeof(*info);
+ fp = fc_frame_alloc(lport, len);
+ if (!fp)
-+ goto out;
++ return;
+ fcp = fc_frame_payload_get(fp, len);
+ memset(fcp, 0, len);
+ fcp->resp.fr_status = status;
@@ -49435,13 +52555,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ info->rsp_code = code;
+ }
+
-+ sp = lport->tt.seq_start_next(sp);
-+ fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
-+ FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
-+
-+ lport->tt.seq_send(lport, sp, fp);
-+out:
-+ lport->tt.exch_done(sp);
++ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
++ if (sp)
++ lport->tt.seq_send(lport, sp, fp);
++ else
++ lport->tt.frame_send(lport, fp);
+}
+
+/*
@@ -49450,9 +52568,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ */
+static void ft_send_resp_code(struct ft_cmd *fcmd, enum fcp_resp_rsp_codes code)
+{
-+ ft_send_resp_status(fcmd->seq, SAM_STAT_GOOD, code);
-+ fc_frame_free(fcmd->req_frame);
-+ kfree(fcmd);
++ ft_send_resp_status(fcmd->req_frame, SAM_STAT_GOOD, code);
++ ft_cmd_done(fcmd);
+}
+
+void ft_cmd_tm_done(struct scst_mgmt_cmd *mcmd)
@@ -49537,10 +52654,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ * Handle an incoming FCP command frame.
+ * Note that this may be called directly from the softirq context.
+ */
-+static void ft_recv_cmd(struct ft_sess *sess, struct fc_seq *sp,
-+ struct fc_frame *fp)
++static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
+{
+ static atomic_t serial;
++ struct fc_seq *sp;
+ struct scst_cmd *cmd;
+ struct ft_cmd *fcmd;
+ struct fcp_cmnd *fcp;
@@ -49549,12 +52666,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ u32 data_len;
+ int cdb_len;
+
-+ lport = fc_seq_exch(sp)->lp;
++ lport = sess->tport->lport;
+ fcmd = kzalloc(sizeof(*fcmd), GFP_ATOMIC);
+ if (!fcmd)
+ goto busy;
+ fcmd->serial = atomic_inc_return(&serial); /* debug only */
-+ fcmd->seq = sp;
+ fcmd->max_payload = sess->max_payload;
+ fcmd->max_lso_payload = sess->max_lso_payload;
+ fcmd->req_frame = fp;
@@ -49580,15 +52696,20 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+
+ cmd = scst_rx_cmd(sess->scst_sess, fcp->fc_lun, sizeof(fcp->fc_lun),
+ fcp->fc_cdb, cdb_len, SCST_ATOMIC);
-+ if (!cmd) {
-+ kfree(fcmd);
++ if (!cmd)
+ goto busy;
-+ }
+ fcmd->scst_cmd = cmd;
+ scst_cmd_set_tgt_priv(cmd, fcmd);
+
++ sp = lport->tt.seq_assign(lport, fp);
++ if (!sp)
++ goto busy;
++ fcmd->seq = sp;
++ lport->tt.seq_set_resp(sp, ft_recv_seq, cmd);
++
+ switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
+ case 0:
++ default:
+ data_dir = SCST_DATA_NONE;
+ break;
+ case FCP_CFL_RDDATA:
@@ -49619,7 +52740,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ break;
+ }
+
-+ lport->tt.seq_set_resp(sp, ft_recv_seq, cmd);
+ scst_cmd_init_done(cmd, SCST_CONTEXT_THREAD);
+ return;
+
@@ -49629,45 +52749,30 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+
+busy:
+ FT_IO_DBG("cmd allocation failure - sending BUSY\n");
-+ ft_send_resp_status(sp, SAM_STAT_BUSY, 0);
-+ fc_frame_free(fp);
++ ft_send_resp_status(fp, SAM_STAT_BUSY, 0);
++ ft_cmd_done(fcmd);
+}
+
+/*
+ * Send FCP ELS-4 Reject.
+ */
-+static void ft_cmd_ls_rjt(struct fc_seq *sp, enum fc_els_rjt_reason reason,
++static void ft_cmd_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
+ enum fc_els_rjt_explan explan)
+{
-+ struct fc_frame *fp;
-+ struct fc_els_ls_rjt *rjt;
++ struct fc_seq_els_data rjt_data;
+ struct fc_lport *lport;
-+ struct fc_exch *ep;
-+
-+ ep = fc_seq_exch(sp);
-+ lport = ep->lp;
-+ fp = fc_frame_alloc(lport, sizeof(*rjt));
-+ if (!fp)
-+ return;
-+
-+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
-+ memset(rjt, 0, sizeof(*rjt));
-+ rjt->er_cmd = ELS_LS_RJT;
-+ rjt->er_reason = reason;
-+ rjt->er_explan = explan;
+
-+ sp = lport->tt.seq_start_next(sp);
-+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, FC_TYPE_FCP,
-+ FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_LAST_SEQ, 0);
-+ lport->tt.seq_send(lport, sp, fp);
++ lport = fr_dev(rx_fp);
++ rjt_data.reason = reason;
++ rjt_data.explan = explan;
++ lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+}
+
+/*
+ * Handle an incoming FCP ELS-4 command frame.
+ * Note that this may be called directly from the softirq context.
+ */
-+static void ft_recv_els4(struct ft_sess *sess, struct fc_seq *sp,
-+ struct fc_frame *fp)
++static void ft_recv_els4(struct ft_sess *sess, struct fc_frame *fp)
+{
+ u8 op = fc_frame_payload_op(fp);
+
@@ -49675,7 +52780,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ case ELS_SRR: /* TBD */
+ default:
+ FT_IO_DBG("unsupported ELS-4 op %x\n", op);
-+ ft_cmd_ls_rjt(sp, ELS_RJT_INVAL, ELS_EXPL_NONE);
++ ft_cmd_ls_rjt(fp, ELS_RJT_INVAL, ELS_EXPL_NONE);
+ fc_frame_free(fp);
+ break;
+ }
@@ -49685,29 +52790,28 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_cmd.c linux-2.6.36/drivers/scs
+ * Handle an incoming FCP frame.
+ * Note that this may be called directly from the softirq context.
+ */
-+void ft_recv_req(struct ft_sess *sess, struct fc_seq *sp, struct fc_frame *fp)
++void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_DD_UNSOL_CMD:
-+ ft_recv_cmd(sess, sp, fp);
++ ft_recv_cmd(sess, fp);
+ break;
+ case FC_RCTL_ELS4_REQ:
-+ ft_recv_els4(sess, sp, fp);
++ ft_recv_els4(sess, fp);
+ break;
+ default:
+ printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
+ __func__, fh->fh_r_ctl);
+ fc_frame_free(fp);
-+ sess->tport->lport->tt.exch_done(sp);
+ break;
+ }
+}
-diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_io.c linux-2.6.36/drivers/scst/fcst/ft_io.c
---- orig/linux-2.6.36/drivers/scst/fcst/ft_io.c
-+++ linux-2.6.36/drivers/scst/fcst/ft_io.c
-@@ -0,0 +1,272 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/fcst/ft_io.c linux-2.6.39/drivers/scst/fcst/ft_io.c
+--- orig/linux-2.6.39/drivers/scst/fcst/ft_io.c
++++ linux-2.6.39/drivers/scst/fcst/ft_io.c
+@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
@@ -49928,7 +53032,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_io.c linux-2.6.36/drivers/scst
+ fr_max_payload(fp) = fcmd->max_payload;
+ to = fc_frame_payload_get(fp, 0);
+ fh_off = frame_off;
-+ frame_off += frame_len;
+ }
+ tlen = min(mem_len, frame_len);
+ BUG_ON(!tlen);
@@ -49943,20 +53046,25 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_io.c linux-2.6.36/drivers/scst
+ PAGE_SIZE - (mem_off & ~PAGE_MASK));
+ skb_fill_page_desc(fp_skb(fp),
+ skb_shinfo(fp_skb(fp))->nr_frags,
-+ page, mem_off, tlen);
++ page, offset_in_page(from + mem_off),
++ tlen);
+ fr_len(fp) += tlen;
+ fp_skb(fp)->data_len += tlen;
+ fp_skb(fp)->truesize +=
+ PAGE_SIZE << compound_order(page);
++ frame_len -= tlen;
++ if (skb_shinfo(fp_skb(fp))->nr_frags >= FC_FRAME_SG_LEN)
++ frame_len = 0;
+ } else {
+ memcpy(to, from + mem_off, tlen);
+ to += tlen;
++ frame_len -= tlen;
+ }
+
+ mem_len -= tlen;
+ mem_off += tlen;
-+ frame_len -= tlen;
+ remaining -= tlen;
++ frame_off += tlen;
+
+ if (frame_len)
+ continue;
@@ -49980,9 +53088,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_io.c linux-2.6.36/drivers/scst
+ }
+ return SCST_TGT_RES_SUCCESS;
+}
-diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_scst.c linux-2.6.36/drivers/scst/fcst/ft_scst.c
---- orig/linux-2.6.36/drivers/scst/fcst/ft_scst.c
-+++ linux-2.6.36/drivers/scst/fcst/ft_scst.c
+diff -uprN orig/linux-2.6.39/drivers/scst/fcst/ft_scst.c linux-2.6.39/drivers/scst/fcst/ft_scst.c
+--- orig/linux-2.6.39/drivers/scst/fcst/ft_scst.c
++++ linux-2.6.39/drivers/scst/fcst/ft_scst.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
@@ -50080,10 +53188,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_scst.c linux-2.6.36/drivers/sc
+ synchronize_rcu();
+}
+module_exit(ft_module_exit);
-diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_sess.c linux-2.6.36/drivers/scst/fcst/ft_sess.c
---- orig/linux-2.6.36/drivers/scst/fcst/ft_sess.c
-+++ linux-2.6.36/drivers/scst/fcst/ft_sess.c
-@@ -0,0 +1,570 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/fcst/ft_sess.c linux-2.6.39/drivers/scst/fcst/ft_sess.c
+--- orig/linux-2.6.39/drivers/scst/fcst/ft_sess.c
++++ linux-2.6.39/drivers/scst/fcst/ft_sess.c
+@@ -0,0 +1,576 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
@@ -50135,15 +53243,20 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_sess.c linux-2.6.36/drivers/sc
+ FT_SESS_DBG("create %s\n", name);
+
+ tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
-+ if (tport)
++ if (tport) {
++ FT_SESS_DBG("tport alloc %s - already setup\n", name);
+ return tport;
++ }
+
+ tport = kzalloc(sizeof(*tport), GFP_KERNEL);
-+ if (!tport)
++ if (!tport) {
++ FT_SESS_DBG("tport alloc %s failed\n", name);
+ return NULL;
++ }
+
+ tport->tgt = scst_register_target(&ft_scst_template, name);
+ if (!tport->tgt) {
++ FT_SESS_DBG("register_target %s failed\n", name);
+ kfree(tport);
+ return NULL;
+ }
@@ -50155,6 +53268,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_sess.c linux-2.6.36/drivers/sc
+ INIT_HLIST_HEAD(&tport->hash[i]);
+
+ rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
++ FT_SESS_DBG("register_target %s succeeded\n", name);
+ return tport;
+}
+
@@ -50383,7 +53497,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_sess.c linux-2.6.36/drivers/sc
+/*
+ * Allocate and fill in the SPC Transport ID for persistent reservations.
+ */
-+int ft_get_transport_id(struct scst_session *scst_sess, uint8_t **result)
++int ft_get_transport_id(struct scst_tgt *tgt, struct scst_session *scst_sess,
++ uint8_t **result)
+{
+ struct ft_sess *sess;
+ struct {
@@ -50560,7 +53675,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_sess.c linux-2.6.36/drivers/sc
+ * Caller has verified that the frame is type FCP.
+ * Note that this may be called directly from the softirq context.
+ */
-+void ft_recv(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
++void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct ft_sess *sess;
+ struct fc_frame_header *fh;
@@ -50574,14 +53689,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_sess.c linux-2.6.36/drivers/sc
+ sess = ft_sess_get(lport, sid);
+ if (!sess) {
+ FT_SESS_DBG("sid %x sess lookup failed\n", sid);
-+ lport->tt.exch_done(sp);
+ /* TBD XXX - if FCP_CMND, send LOGO */
+ fc_frame_free(fp);
+ return;
+ }
+ FT_SESS_DBG("sid %x sess lookup returned %p preempt %x\n",
+ sid, sess, preempt_count());
-+ ft_recv_req(sess, sp, fp);
++ ft_recv_req(sess, fp);
+ ft_sess_put(sess);
+}
+
@@ -50654,15 +53768,30 @@ diff -uprN orig/linux-2.6.36/drivers/scst/fcst/ft_sess.c linux-2.6.36/drivers/sc
+ aen->event_fn, sess->port_id, scst_aen_get_lun(aen));
+ return SCST_AEN_RES_FAILED; /* XXX TBD */
+}
-diff -uprN orig/linux-2.6.36/Documentation/scst/README.fcst linux-2.6.36/Documentation/scst/README.fcst
---- orig/linux-2.6.36/Documentation/scst/README.fcst
-+++ linux-2.6.36/Documentation/scst/README.fcst
-@@ -0,0 +1,99 @@
-+fcst README v1.0 06/10/2010
+diff -uprN orig/linux-2.6.39/Documentation/scst/README.fcst linux-2.6.39/Documentation/scst/README.fcst
+--- orig/linux-2.6.39/Documentation/scst/README.fcst
++++ linux-2.6.39/Documentation/scst/README.fcst
+@@ -0,0 +1,114 @@
++About fcst
++==========
++
++The fcst kernel module implements an SCST target driver for the FCoE protocol.
++FCoE or Fibre Channel over Ethernet is a protocol that allows to communicate
++fibre channel frames over an Ethernet network. Since the FCoE protocol
++requires a lossless Ethernet network, special network adapters and switches
++are required. Ethernet network adapters that support FCoE are called
++Converged Network Adapters (CNA). The standard that makes lossless Ethernet
++communication possible is called DCB or Data Center Bridging.
++
++Since FCoE frames are a kind of Ethernet frames, communication between FCoE
++clients and servers is limited to a single Ethernet broadcast domain.
+
-+$Id$
+
-+FCST is a module that depends on libfc and SCST to provide FC target support.
++Building and Installing
++=======================
++
++FCST is a kernel module that depends on libfc and SCST to provide FC target
++support.
+
+To build for linux-2.6.34, do:
+
@@ -50757,13 +53886,14 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.fcst linux-2.6.36/Documen
+14. As a temporary workaround, you may need to reset the interface
+ on the initiator side so it sees the SCST device as a target and
+ discovers LUNs. You can avoid this by bringing up the initiator last.
-diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst.h linux-2.6.36/include/scst/iscsi_scst.h
---- orig/linux-2.6.36/include/scst/iscsi_scst.h
-+++ linux-2.6.36/include/scst/iscsi_scst.h
-@@ -0,0 +1,220 @@
+diff -uprN orig/linux-2.6.39/include/scst/iscsi_scst.h linux-2.6.39/include/scst/iscsi_scst.h
+--- orig/linux-2.6.39/include/scst/iscsi_scst.h
++++ linux-2.6.39/include/scst/iscsi_scst.h
+@@ -0,0 +1,226 @@
+/*
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -50832,6 +53962,7 @@ diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst.h linux-2.6.36/include/scst
+ key_queued_cmnds,
+ key_rsp_timeout,
+ key_nop_in_interval,
++ key_nop_in_timeout,
+ key_max_sessions,
+ target_key_last,
+};
@@ -50944,7 +54075,7 @@ diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst.h linux-2.6.36/include/scst
+#define MIN_NR_QUEUED_CMNDS 1
+#define MAX_NR_QUEUED_CMNDS 256
+
-+#define DEFAULT_RSP_TIMEOUT 30
++#define DEFAULT_RSP_TIMEOUT 90
+#define MIN_RSP_TIMEOUT 2
+#define MAX_RSP_TIMEOUT 65535
+
@@ -50952,6 +54083,10 @@ diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst.h linux-2.6.36/include/scst
+#define MIN_NOP_IN_INTERVAL 0
+#define MAX_NOP_IN_INTERVAL 65535
+
++#define DEFAULT_NOP_IN_TIMEOUT 30
++#define MIN_NOP_IN_TIMEOUT 2
++#define MAX_NOP_IN_TIMEOUT 65535
++
+#define NETLINK_ISCSI_SCST 25
+
+#define REGISTER_USERD _IOWR('s', 0, struct iscsi_kern_register_info)
@@ -50981,13 +54116,14 @@ diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst.h linux-2.6.36/include/scst
+}
+
+#endif
-diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst_ver.h linux-2.6.36/include/scst/iscsi_scst_ver.h
---- orig/linux-2.6.36/include/scst/iscsi_scst_ver.h
-+++ linux-2.6.36/include/scst/iscsi_scst_ver.h
+diff -uprN orig/linux-2.6.39/include/scst/iscsi_scst_ver.h linux-2.6.39/include/scst/iscsi_scst_ver.h
+--- orig/linux-2.6.39/include/scst/iscsi_scst_ver.h
++++ linux-2.6.39/include/scst/iscsi_scst_ver.h
@@ -0,0 +1,20 @@
+/*
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -51000,33 +54136,32 @@ diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst_ver.h linux-2.6.36/include/
+ * GNU General Public License for more details.
+ */
+
-+/* #define CONFIG_SCST_PROC */
+
+#define ISCSI_VERSION_STRING_SUFFIX
+
-+#define ISCSI_VERSION_STRING "2.0.0" ISCSI_VERSION_STRING_SUFFIX
-diff -uprN orig/linux-2.6.36/include/scst/iscsi_scst_itf_ver.h linux-2.6.36/include/scst/iscsi_scst_itf_ver.h
---- orig/linux-2.6.36/include/scst/iscsi_scst_itf_ver.h
-+++ linux-2.6.36/include/scst/iscsi_scst_itf_ver.h
++#define ISCSI_VERSION_STRING "2.1.0" ISCSI_VERSION_STRING_SUFFIX
+diff -uprN orig/linux-2.6.39/include/scst/iscsi_scst_itf_ver.h linux-2.6.39/include/scst/iscsi_scst_itf_ver.h
+--- orig/linux-2.6.39/include/scst/iscsi_scst_itf_ver.h
++++ linux-2.6.39/include/scst/iscsi_scst_itf_ver.h
@@ -0,0 +1,3 @@
+/* Autogenerated, don't edit */
+
-+#define ISCSI_SCST_INTERFACE_VERSION ISCSI_VERSION_STRING "_" "31815603fdea2196eb9774eac0e41bf15c9a9130"
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/Makefile linux-2.6.36/drivers/scst/iscsi-scst/Makefile
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/Makefile
-+++ linux-2.6.36/drivers/scst/iscsi-scst/Makefile
++#define ISCSI_SCST_INTERFACE_VERSION ISCSI_VERSION_STRING "_" "6e5293bf78ac2fa099a12c932a10afb091dc7731"
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/Makefile linux-2.6.39/drivers/scst/iscsi-scst/Makefile
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/Makefile
++++ linux-2.6.39/drivers/scst/iscsi-scst/Makefile
@@ -0,0 +1,4 @@
+iscsi-scst-y := iscsi.o nthread.o config.o digest.o \
+ conn.o session.o target.o event.o param.o
+
+obj-$(CONFIG_SCST_ISCSI) += iscsi-scst.o
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/Kconfig linux-2.6.36/drivers/scst/iscsi-scst/Kconfig
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/Kconfig
-+++ linux-2.6.36/drivers/scst/iscsi-scst/Kconfig
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/Kconfig linux-2.6.39/drivers/scst/iscsi-scst/Kconfig
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/Kconfig
++++ linux-2.6.39/drivers/scst/iscsi-scst/Kconfig
@@ -0,0 +1,25 @@
+config SCST_ISCSI
+ tristate "ISCSI Target"
-+ depends on SCST && INET
++ depends on SCST && INET && LIBCRC32C
+ default SCST
+ help
+ ISCSI target driver for SCST framework. The iSCSI protocol has been
@@ -51049,14 +54184,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/Kconfig linux-2.6.36/driver
+ iSCSI initiator that is talking to SCST.
+
+ If unsure, say "N".
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/config.c linux-2.6.36/drivers/scst/iscsi-scst/config.c
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/config.c
-+++ linux-2.6.36/drivers/scst/iscsi-scst/config.c
-@@ -0,0 +1,1032 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/config.c linux-2.6.39/drivers/scst/iscsi-scst/config.c
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/config.c
++++ linux-2.6.39/drivers/scst/iscsi-scst/config.c
+@@ -0,0 +1,1033 @@
+/*
+ * Copyright (C) 2004 - 2005 FUJITA Tomonori <tomof@acm.org>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -52085,14 +55221,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/config.c linux-2.6.36/drive
+}
+
+#endif /* CONFIG_SCST_DEBUG */
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers/scst/iscsi-scst/conn.c
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c
-+++ linux-2.6.36/drivers/scst/iscsi-scst/conn.c
-@@ -0,0 +1,910 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/conn.c linux-2.6.39/drivers/scst/iscsi-scst/conn.c
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/conn.c
++++ linux-2.6.39/drivers/scst/iscsi-scst/conn.c
+@@ -0,0 +1,945 @@
+/*
+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -52158,7 +55295,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+ TRACE_ENTRY();
+
+ conn = container_of(kobj, struct iscsi_conn, conn_kobj);
-+ complete_all(&conn->conn_kobj_release_cmpl);
++ if (conn->conn_kobj_release_cmpl != NULL)
++ complete_all(conn->conn_kobj_release_cmpl);
+
+ TRACE_EXIT();
+ return;
@@ -52256,18 +55394,21 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+static void conn_sysfs_del(struct iscsi_conn *conn)
+{
+ int rc;
++ DECLARE_COMPLETION_ONSTACK(c);
+
+ TRACE_ENTRY();
+
++ conn->conn_kobj_release_cmpl = &c;
++
+ kobject_del(&conn->conn_kobj);
+ kobject_put(&conn->conn_kobj);
+
-+ rc = wait_for_completion_timeout(&conn->conn_kobj_release_cmpl, HZ);
++ rc = wait_for_completion_timeout(conn->conn_kobj_release_cmpl, HZ);
+ if (rc == 0) {
+ PRINT_INFO("Waiting for releasing sysfs entry "
+ "for conn %p (%d refs)...", conn,
+ atomic_read(&conn->conn_kobj.kref.refcount));
-+ wait_for_completion(&conn->conn_kobj_release_cmpl);
++ wait_for_completion(conn->conn_kobj_release_cmpl);
+ PRINT_INFO("Done waiting for releasing sysfs "
+ "entry for conn %p", conn);
+ }
@@ -52304,8 +55445,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+ }
+ }
+
-+ init_completion(&conn->conn_kobj_release_cmpl);
-+
+ res = kobject_init_and_add(&conn->conn_kobj, &iscsi_conn_ktype,
+ scst_sysfs_get_sess_kobj(session->scst_sess), addr);
+ if (res != 0) {
@@ -52368,9 +55507,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+
+void iscsi_make_conn_rd_active(struct iscsi_conn *conn)
+{
++ struct iscsi_thread_pool *p = conn->conn_thr_pool;
++
+ TRACE_ENTRY();
+
-+ spin_lock_bh(&iscsi_rd_lock);
++ spin_lock_bh(&p->rd_lock);
+
+ TRACE_DBG("conn %p, rd_state %x, rd_data_ready %d", conn,
+ conn->rd_state, conn->rd_data_ready);
@@ -52385,12 +55526,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+ conn->rd_data_ready = 1;
+
+ if (conn->rd_state == ISCSI_CONN_RD_STATE_IDLE) {
-+ list_add_tail(&conn->rd_list_entry, &iscsi_rd_list);
++ list_add_tail(&conn->rd_list_entry, &p->rd_list);
+ conn->rd_state = ISCSI_CONN_RD_STATE_IN_LIST;
-+ wake_up(&iscsi_rd_waitQ);
++ wake_up(&p->rd_waitQ);
+ }
+
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&p->rd_lock);
+
+ TRACE_EXIT();
+ return;
@@ -52398,9 +55539,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+
+void iscsi_make_conn_wr_active(struct iscsi_conn *conn)
+{
++ struct iscsi_thread_pool *p = conn->conn_thr_pool;
++
+ TRACE_ENTRY();
+
-+ spin_lock_bh(&iscsi_wr_lock);
++ spin_lock_bh(&p->wr_lock);
+
+ TRACE_DBG("conn %p, wr_state %x, wr_space_ready %d", conn,
+ conn->wr_state, conn->wr_space_ready);
@@ -52413,12 +55556,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+ */
+
+ if (conn->wr_state == ISCSI_CONN_WR_STATE_IDLE) {
-+ list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
++ list_add_tail(&conn->wr_list_entry, &p->wr_list);
+ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
-+ wake_up(&iscsi_wr_waitQ);
++ wake_up(&p->wr_waitQ);
+ }
+
-+ spin_unlock_bh(&iscsi_wr_lock);
++ spin_unlock_bh(&p->wr_lock);
+
+ TRACE_EXIT();
+ return;
@@ -52426,13 +55569,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+
+void __mark_conn_closed(struct iscsi_conn *conn, int flags)
+{
-+ spin_lock_bh(&iscsi_rd_lock);
++ spin_lock_bh(&conn->conn_thr_pool->rd_lock);
+ conn->closing = 1;
+ if (flags & ISCSI_CONN_ACTIVE_CLOSE)
+ conn->active_close = 1;
+ if (flags & ISCSI_CONN_DELETING)
+ conn->deleting = 1;
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
+
+ iscsi_make_conn_rd_active(conn);
+}
@@ -52490,17 +55633,19 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+
+void __iscsi_write_space_ready(struct iscsi_conn *conn)
+{
++ struct iscsi_thread_pool *p = conn->conn_thr_pool;
++
+ TRACE_ENTRY();
+
-+ spin_lock_bh(&iscsi_wr_lock);
++ spin_lock_bh(&p->wr_lock);
+ conn->wr_space_ready = 1;
+ if ((conn->wr_state == ISCSI_CONN_WR_STATE_SPACE_WAIT)) {
+ TRACE_DBG("wr space ready (conn %p)", conn);
-+ list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
++ list_add_tail(&conn->wr_list_entry, &p->wr_list);
+ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
-+ wake_up(&iscsi_wr_waitQ);
++ wake_up(&p->wr_waitQ);
+ }
-+ spin_unlock_bh(&iscsi_wr_lock);
++ spin_unlock_bh(&p->wr_lock);
+
+ TRACE_EXIT();
+ return;
@@ -52539,21 +55684,21 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+ cmnd = list_entry(conn->write_timeout_list.next,
+ struct iscsi_cmnd, write_timeout_list_entry);
+
-+ timeout_time = j + conn->rsp_timeout + ISCSI_ADD_SCHED_TIME;
++ timeout_time = j + iscsi_get_timeout(cmnd) + ISCSI_ADD_SCHED_TIME;
+
-+ if (unlikely(time_after_eq(j, cmnd->write_start +
-+ conn->rsp_timeout))) {
++ if (unlikely(time_after_eq(j, iscsi_get_timeout_time(cmnd)))) {
+ if (!conn->closing) {
-+ PRINT_ERROR("Timeout sending data/waiting "
++ PRINT_ERROR("Timeout %ld sec sending data/waiting "
+ "for reply to/from initiator "
+ "%s (SID %llx), closing connection",
++ iscsi_get_timeout(cmnd)/HZ,
+ conn->session->initiator_name,
+ (long long unsigned int)
+ conn->session->sid);
+ /*
+ * We must call mark_conn_closed() outside of
+ * write_list_lock or we will have a circular
-+ * locking dependency with iscsi_rd_lock.
++ * locking dependency with rd_lock.
+ */
+ spin_unlock_bh(&conn->write_list_lock);
+ mark_conn_closed(conn);
@@ -52621,34 +55766,57 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+
+ TRACE_ENTRY();
+
-+ TRACE_DBG_FLAG(force ? TRACE_CONN_OC_DBG : TRACE_MGMT_DEBUG,
-+ "j %ld (TIMEOUT %d, force %d)", j,
++ TRACE_DBG_FLAG(TRACE_MGMT_DEBUG, "conn %p, read_cmnd %p, read_state "
++ "%d, j %ld (TIMEOUT %d, force %d)", conn, conn->read_cmnd,
++ conn->read_state, j,
+ ISCSI_TM_DATA_WAIT_TIMEOUT + ISCSI_ADD_SCHED_TIME, force);
+
+ iscsi_extracheck_is_rd_thread(conn);
+
+again:
-+ spin_lock_bh(&iscsi_rd_lock);
++ spin_lock_bh(&conn->conn_thr_pool->rd_lock);
+ spin_lock(&conn->write_list_lock);
+
+ aborted_cmds_pending = false;
+ list_for_each_entry(cmnd, &conn->write_timeout_list,
+ write_timeout_list_entry) {
++ /*
++ * This should not happen, because DATA OUT commands can't get
++ * into write_timeout_list.
++ */
++ BUG_ON(cmnd->cmd_req != NULL);
++
+ if (test_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags)) {
-+ TRACE_DBG_FLAG(force ? TRACE_CONN_OC_DBG : TRACE_MGMT_DEBUG,
-+ "Checking aborted cmnd %p (scst_state %d, "
-+ "on_write_timeout_list %d, write_start %ld, "
-+ "r2t_len_to_receive %d)", cmnd,
++ TRACE_MGMT_DBG("Checking aborted cmnd %p (scst_state "
++ "%d, on_write_timeout_list %d, write_start "
++ "%ld, r2t_len_to_receive %d)", cmnd,
+ cmnd->scst_state, cmnd->on_write_timeout_list,
+ cmnd->write_start, cmnd->r2t_len_to_receive);
++ if ((cmnd == conn->read_cmnd) ||
++ cmnd->data_out_in_data_receiving) {
++ BUG_ON((cmnd == conn->read_cmnd) && force);
++ /*
++ * We can't abort command waiting for data from
++ * the net, because otherwise we are risking to
++ * get out of sync with the sender, so we have
++ * to wait until the timeout timer gets into the
++ * action and close this connection.
++ */
++ TRACE_MGMT_DBG("Aborted cmnd %p is %s, "
++ "keep waiting", cmnd,
++ (cmnd == conn->read_cmnd) ? "RX cmnd" :
++ "waiting for DATA OUT data");
++ goto cont;
++ }
+ if ((cmnd->r2t_len_to_receive != 0) &&
+ (time_after_eq(j, cmnd->write_start + ISCSI_TM_DATA_WAIT_TIMEOUT) ||
+ force)) {
+ spin_unlock(&conn->write_list_lock);
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
+ iscsi_fail_data_waiting_cmnd(cmnd);
+ goto again;
+ }
++cont:
+ aborted_cmds_pending = true;
+ }
+ }
@@ -52667,7 +55835,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+ }
+
+ spin_unlock(&conn->write_list_lock);
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
+
+ TRACE_EXIT();
+ return;
@@ -52871,12 +56039,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+ INIT_LIST_HEAD(&conn->nop_req_list);
+ spin_lock_init(&conn->nop_req_list_lock);
+
++ conn->conn_thr_pool = session->sess_thr_pool;
++
+ conn->nop_in_ttt = 0;
+ INIT_DELAYED_WORK(&conn->nop_in_delayed_work,
+ (void (*)(struct work_struct *))conn_nop_in_delayed_work_fn);
+ conn->last_rcv_time = jiffies;
-+ conn->rsp_timeout = session->tgt_params.rsp_timeout * HZ;
++ conn->data_rsp_timeout = session->tgt_params.rsp_timeout * HZ;
+ conn->nop_in_interval = session->tgt_params.nop_in_interval * HZ;
++ conn->nop_in_timeout = session->tgt_params.nop_in_timeout * HZ;
+ if (conn->nop_in_interval > 0) {
+ TRACE_DBG("Schedule Nop-In work for conn %p", conn);
+ schedule_delayed_work(&conn->nop_in_delayed_work,
@@ -52999,17 +56170,18 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/conn.c linux-2.6.36/drivers
+}
+
+#endif /* CONFIG_SCST_EXTRACHECKS */
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/digest.c linux-2.6.36/drivers/scst/iscsi-scst/digest.c
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/digest.c
-+++ linux-2.6.36/drivers/scst/iscsi-scst/digest.c
-@@ -0,0 +1,244 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/digest.c linux-2.6.39/drivers/scst/iscsi-scst/digest.c
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/digest.c
++++ linux-2.6.39/drivers/scst/iscsi-scst/digest.c
+@@ -0,0 +1,245 @@
+/*
+ * iSCSI digest handling.
+ *
+ * Copyright (C) 2004 - 2006 Xiranet Communications GmbH
+ * <arne.redlich@xiranet.com>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -53247,16 +56419,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/digest.c linux-2.6.36/drive
+ TRACE_DBG("TX data digest for cmd %p: %x (offset %d, opcode %x)", cmnd,
+ cmnd->ddigest, offset, cmnd_opcode(cmnd));
+}
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/digest.h linux-2.6.36/drivers/scst/iscsi-scst/digest.h
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/digest.h
-+++ linux-2.6.36/drivers/scst/iscsi-scst/digest.h
-@@ -0,0 +1,31 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/digest.h linux-2.6.39/drivers/scst/iscsi-scst/digest.h
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/digest.h
++++ linux-2.6.39/drivers/scst/iscsi-scst/digest.h
+@@ -0,0 +1,32 @@
+/*
+ * iSCSI digest handling.
+ *
+ * Copyright (C) 2004 Xiranet Communications GmbH <arne.redlich@xiranet.com>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -53282,16 +56455,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/digest.h linux-2.6.36/drive
+extern void digest_tx_data(struct iscsi_cmnd *cmnd);
+
+#endif /* __ISCSI_DIGEST_H__ */
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/event.c linux-2.6.36/drivers/scst/iscsi-scst/event.c
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/event.c
-+++ linux-2.6.36/drivers/scst/iscsi-scst/event.c
-@@ -0,0 +1,165 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/event.c linux-2.6.39/drivers/scst/iscsi-scst/event.c
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/event.c
++++ linux-2.6.39/drivers/scst/iscsi-scst/event.c
+@@ -0,0 +1,162 @@
+/*
+ * Event notification code.
+ *
+ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -53313,13 +56487,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/event.c linux-2.6.36/driver
+
+static int event_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
-+ u32 uid, pid, seq;
-+ char *data;
++ u32 pid;
+
-+ pid = NETLINK_CREDS(skb)->pid;
-+ uid = NETLINK_CREDS(skb)->uid;
-+ seq = nlh->nlmsg_seq;
-+ data = NLMSG_DATA(nlh);
++ pid = NETLINK_CREDS(skb)->pid;
+
+ iscsid_pid = pid;
+
@@ -53451,14 +56621,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/event.c linux-2.6.36/driver
+{
+ netlink_kernel_release(nl);
+}
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c
-+++ linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c
-@@ -0,0 +1,3956 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/iscsi.c linux-2.6.39/drivers/scst/iscsi-scst/iscsi.c
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/iscsi.c
++++ linux-2.6.39/drivers/scst/iscsi-scst/iscsi.c
+@@ -0,0 +1,4137 @@
+/*
+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -53496,7 +56667,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+#define ISCSI_INIT_WRITE_WAKE 0x1
+
+static int ctr_major;
-+static char ctr_name[] = "iscsi-scst-ctl";
++static const char ctr_name[] = "iscsi-scst-ctl";
+
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+unsigned long iscsi_trace_flag = ISCSI_DEFAULT_LOG_FLAGS;
@@ -53504,30 +56675,19 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+
+static struct kmem_cache *iscsi_cmnd_cache;
+
-+DEFINE_SPINLOCK(iscsi_rd_lock);
-+LIST_HEAD(iscsi_rd_list);
-+DECLARE_WAIT_QUEUE_HEAD(iscsi_rd_waitQ);
++static DEFINE_MUTEX(iscsi_threads_pool_mutex);
++static LIST_HEAD(iscsi_thread_pools_list);
+
-+DEFINE_SPINLOCK(iscsi_wr_lock);
-+LIST_HEAD(iscsi_wr_list);
-+DECLARE_WAIT_QUEUE_HEAD(iscsi_wr_waitQ);
++static struct iscsi_thread_pool *iscsi_main_thread_pool;
+
+static struct page *dummy_page;
+static struct scatterlist dummy_sg;
+
-+struct iscsi_thread_t {
-+ struct task_struct *thr;
-+ struct list_head threads_list_entry;
-+};
-+
-+static LIST_HEAD(iscsi_threads_list);
-+
+static void cmnd_remove_data_wait_hash(struct iscsi_cmnd *cmnd);
+static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status);
+static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess);
+static void req_cmnd_release(struct iscsi_cmnd *req);
+static int cmnd_insert_data_wait_hash(struct iscsi_cmnd *cmnd);
-+static void __cmnd_abort(struct iscsi_cmnd *cmnd);
+static void iscsi_cmnd_init_write(struct iscsi_cmnd *rsp, int flags);
+static void iscsi_set_resid_no_scst_cmd(struct iscsi_cmnd *rsp);
+static void iscsi_set_resid(struct iscsi_cmnd *rsp);
@@ -53722,7 +56882,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+{
+ TRACE_ENTRY();
+
-+ TRACE_MGMT_DBG("Failing data waiting cmnd %p", cmnd);
++ TRACE_MGMT_DBG("Failing data waiting cmnd %p (data_out_in_data_receiving %d)",
++ cmnd, cmnd->data_out_in_data_receiving);
+
+ /*
+ * There is no race with conn_abort(), since all functions
@@ -53883,6 +57044,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+
+ EXTRACHECKS_BUG_ON(cmnd->on_rx_digest_list);
+ EXTRACHECKS_BUG_ON(cmnd->hashed);
++ EXTRACHECKS_BUG_ON(cmnd->cmd_req);
++ EXTRACHECKS_BUG_ON(cmnd->data_out_in_data_receiving);
+
+ req_del_from_write_timeout_list(cmnd);
+
@@ -54091,9 +57254,18 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+
+ if (unlikely(req->hashed)) {
+ /* It sometimes can happen during errors recovery */
++ TRACE_MGMT_DBG("Removing req %p from hash", req);
+ cmnd_remove_data_wait_hash(req);
+ }
+
++ if (unlikely(req->cmd_req)) {
++ /* It sometimes can happen during errors recovery */
++ TRACE_MGMT_DBG("Putting cmd_req %p (req %p)", req->cmd_req, req);
++ req->cmd_req->data_out_in_data_receiving = 0;
++ cmnd_put(req->cmd_req);
++ req->cmd_req = NULL;
++ }
++
+ if (unlikely(req->main_rsp != NULL)) {
+ TRACE_DBG("Sending main rsp %p", req->main_rsp);
+ if (cmnd_opcode(req) == ISCSI_OP_SCSI_CMD) {
@@ -54714,7 +57886,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ struct list_head *head;
+ struct iscsi_cmnd *cmnd;
+
-+ head = &conn->session->cmnd_data_wait_hash[cmnd_hashfn(itt)];
++ head = &conn->session->cmnd_data_wait_hash[cmnd_hashfn((__force u32)itt)];
+
+ list_for_each_entry(cmnd, head, hash_list_entry) {
+ if (cmnd->pdu.bhs.itt == itt)
@@ -54787,7 +57959,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+
+ spin_lock(&session->cmnd_data_wait_hash_lock);
+
-+ head = &session->cmnd_data_wait_hash[cmnd_hashfn(itt)];
++ head = &session->cmnd_data_wait_hash[cmnd_hashfn((__force u32)itt)];
+
+ tmp = __cmnd_find_data_wait_hash(cmnd->conn, itt);
+ if (likely(!tmp)) {
@@ -55171,8 +58343,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ cmnd->sg = sg = scst_alloc(size, GFP_KERNEL,
+ &cmnd->sg_cnt);
+ if (sg == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "Allocating buffer for"
-+ " %d Nop-Out payload failed", size);
++ TRACE(TRACE_OUT_OF_MEM, "Allocation of buffer "
++ "for %d Nop-Out payload failed", size);
+ err = -ISCSI_REASON_OUT_OF_RESOURCES;
+ goto out;
+ }
@@ -55461,7 +58633,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ struct iscsi_cdb_ahdr *eca =
+ (struct iscsi_cdb_ahdr *)ahdr;
+ scst_cmd_set_ext_cdb(scst_cmd, eca->cdb,
-+ be16_to_cpu(ahdr->ahslength) - 1);
++ be16_to_cpu(ahdr->ahslength) - 1,
++ GFP_KERNEL);
+ break;
+ }
+ s = 3 + be16_to_cpu(ahdr->ahslength);
@@ -55505,8 +58678,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ TRACE_ENTRY();
+
+ /*
-+ * There is no race with send_r2t() and conn_abort(), since
-+ * all functions called from single read thread
++ * There is no race with send_r2t(), conn_abort() and
++ * iscsi_check_tm_data_wait_timeouts(), since
++ * all the functions called from single read thread
+ */
+ iscsi_extracheck_is_rd_thread(cmnd->conn);
+
@@ -55526,6 +58700,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ goto out;
+ }
+
++ cmnd_get(orig_req);
++
+ if (unlikely(orig_req->r2t_len_to_receive < cmnd->pdu.datasize)) {
+ if (orig_req->prelim_compl_flags != 0) {
+ /* We can have fake r2t_len_to_receive */
@@ -55554,15 +58730,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ if (req_hdr->flags & ISCSI_FLG_FINAL)
+ orig_req->outstanding_r2t--;
+
-+ if (unlikely(orig_req->prelim_compl_flags != 0)) {
-+ res = iscsi_preliminary_complete(cmnd, orig_req, true);
-+ goto out;
-+ }
++ EXTRACHECKS_BUG_ON(orig_req->data_out_in_data_receiving);
++ orig_req->data_out_in_data_receiving = 1;
+
+ TRACE_WRITE("cmnd %p, orig_req %p, offset %u, datasize %u", cmnd,
+ orig_req, offset, cmnd->pdu.datasize);
+
-+ res = cmnd_prepare_recv_pdu(conn, orig_req, offset, cmnd->pdu.datasize);
++ if (unlikely(orig_req->prelim_compl_flags != 0))
++ res = iscsi_preliminary_complete(cmnd, orig_req, true);
++ else
++ res = cmnd_prepare_recv_pdu(conn, orig_req, offset, cmnd->pdu.datasize);
+
+out:
+ TRACE_EXIT_RES(res);
@@ -55586,6 +58763,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+
+ iscsi_extracheck_is_rd_thread(cmnd->conn);
+
++ req->data_out_in_data_receiving = 0;
++
+ if (!(cmnd->conn->ddigest_type & DIGEST_NONE) &&
+ !cmnd->ddigest_checked) {
+ cmd_add_on_rx_ddigest_list(req, cmnd);
@@ -55623,7 +58802,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ req->r2t_len_to_send);
+
+ if (!(req_hdr->flags & ISCSI_FLG_FINAL))
-+ goto out;
++ goto out_put;
+
+ if (req->r2t_len_to_receive == 0) {
+ if (!req->pending)
@@ -55631,6 +58810,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ } else if (req->r2t_len_to_send != 0)
+ send_r2t(req);
+
++out_put:
++ cmnd_put(req);
++ cmnd->cmd_req = NULL;
++
+out:
+ TRACE_EXIT();
+ return;
@@ -55647,15 +58830,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ "ref_cnt %d, on_write_timeout_list %d, write_start %ld, ITT %x, "
+ "sn %u, op %x, r2t_len_to_receive %d, r2t_len_to_send %d, "
+ "CDB op %x, size to write %u, outstanding_r2t %d, "
-+ "sess->exp_cmd_sn %u, conn %p, rd_task %p)",
-+ cmnd, cmnd->scst_cmd, cmnd->scst_state,
++ "sess->exp_cmd_sn %u, conn %p, rd_task %p, read_cmnd %p, "
++ "read_state %d)", cmnd, cmnd->scst_cmd, cmnd->scst_state,
+ atomic_read(&cmnd->ref_cnt), cmnd->on_write_timeout_list,
+ cmnd->write_start, cmnd->pdu.bhs.itt, cmnd->pdu.bhs.sn,
+ cmnd_opcode(cmnd), cmnd->r2t_len_to_receive,
+ cmnd->r2t_len_to_send, cmnd_scsicode(cmnd),
+ cmnd_write_size(cmnd), cmnd->outstanding_r2t,
+ cmnd->conn->session->exp_cmd_sn, cmnd->conn,
-+ cmnd->conn->rd_task);
++ cmnd->conn->rd_task, cmnd->conn->read_cmnd,
++ cmnd->conn->read_state);
+
+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
+ TRACE_MGMT_DBG("net_ref_cnt %d", atomic_read(&cmnd->net_ref_cnt));
@@ -55665,7 +58849,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ * Lock to sync with iscsi_check_tm_data_wait_timeouts(), including
+ * CMD_ABORTED bit set.
+ */
-+ spin_lock_bh(&iscsi_rd_lock);
++ spin_lock_bh(&conn->conn_thr_pool->rd_lock);
+
+ /*
+ * We suppose that preliminary commands completion is tested by
@@ -55679,7 +58863,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ TRACE_MGMT_DBG("Setting conn_tm_active for conn %p", conn);
+ conn->conn_tm_active = 1;
+
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
+
+ /*
+ * We need the lock to sync with req_add_to_write_timeout_list() and
@@ -55701,7 +58885,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+}
+
+/* Must be called from the read or conn close thread */
-+static int cmnd_abort(struct iscsi_cmnd *req, int *status)
++static int cmnd_abort_pre_checks(struct iscsi_cmnd *req, int *status)
+{
+ struct iscsi_task_mgt_hdr *req_hdr =
+ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
@@ -55719,7 +58903,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+
+ cmnd = cmnd_find_itt_get(req->conn, req_hdr->rtt);
+ if (cmnd) {
-+ struct iscsi_conn *conn = cmnd->conn;
+ struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
+
+ if (req_hdr->lun != hdr->lun) {
@@ -55761,10 +58944,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ goto out_put;
+ }
+
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ __cmnd_abort(cmnd);
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+
+ cmnd_put(cmnd);
+ res = 0;
+ } else {
@@ -55802,69 +58981,86 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ goto out;
+}
+
-+/* Must be called from the read or conn close thread */
-+static int target_abort(struct iscsi_cmnd *req, int all)
++struct iscsi_cmnd_abort_params {
++ struct work_struct iscsi_cmnd_abort_work;
++ struct scst_cmd *scst_cmd;
++};
++
++static mempool_t *iscsi_cmnd_abort_mempool;
++
++static void iscsi_cmnd_abort_fn(struct work_struct *work)
+{
-+ struct iscsi_target *target = req->conn->session->target;
-+ struct iscsi_task_mgt_hdr *req_hdr =
-+ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
-+ struct iscsi_session *session;
++ struct iscsi_cmnd_abort_params *params = container_of(work,
++ struct iscsi_cmnd_abort_params, iscsi_cmnd_abort_work);
++ struct scst_cmd *scst_cmd = params->scst_cmd;
++ struct iscsi_session *session = scst_sess_get_tgt_priv(scst_cmd->sess);
+ struct iscsi_conn *conn;
-+ struct iscsi_cmnd *cmnd;
++ struct iscsi_cmnd *cmnd = scst_cmd_get_tgt_priv(scst_cmd);
++ bool done = false;
+
-+ mutex_lock(&target->target_mutex);
++ TRACE_ENTRY();
+
-+ list_for_each_entry(session, &target->session_list,
-+ session_list_entry) {
-+ list_for_each_entry(conn, &session->conn_list,
-+ conn_list_entry) {
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ list_for_each_entry(cmnd, &conn->cmd_list,
-+ cmd_list_entry) {
-+ if (cmnd == req)
-+ continue;
-+ if (all)
-+ __cmnd_abort(cmnd);
-+ else if (req_hdr->lun == cmnd_hdr(cmnd)->lun)
-+ __cmnd_abort(cmnd);
++ TRACE_MGMT_DBG("Checking aborted scst_cmd %p (cmnd %p)", scst_cmd, cmnd);
++
++ mutex_lock(&session->target->target_mutex);
++
++ /*
++ * cmnd pointer is valid only under cmd_list_lock, but we can't know the
++ * corresponding conn without dereferencing cmnd at first, so let's
++ * check all conns and cmnds to find out if our cmnd is still valid
++ * under lock.
++ */
++ list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
++ struct iscsi_cmnd *c;
++ spin_lock_bh(&conn->cmd_list_lock);
++ list_for_each_entry(c, &conn->cmd_list, cmd_list_entry) {
++ if (c == cmnd) {
++ __cmnd_abort(cmnd);
++ done = true;
++ break;
+ }
-+ spin_unlock_bh(&conn->cmd_list_lock);
+ }
++ spin_unlock_bh(&conn->cmd_list_lock);
++ if (done)
++ break;
+ }
+
-+ mutex_unlock(&target->target_mutex);
-+ return 0;
++ mutex_unlock(&session->target->target_mutex);
++
++ scst_cmd_put(scst_cmd);
++
++ mempool_free(params, iscsi_cmnd_abort_mempool);
++
++ TRACE_EXIT();
++ return;
+}
+
-+/* Must be called from the read or conn close thread */
-+static void task_set_abort(struct iscsi_cmnd *req)
++static void iscsi_on_abort_cmd(struct scst_cmd *scst_cmd)
+{
-+ struct iscsi_session *session = req->conn->session;
-+ struct iscsi_task_mgt_hdr *req_hdr =
-+ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
-+ struct iscsi_target *target = session->target;
-+ struct iscsi_conn *conn;
-+ struct iscsi_cmnd *cmnd;
++ struct iscsi_cmnd_abort_params *params;
+
-+ mutex_lock(&target->target_mutex);
++ TRACE_ENTRY();
+
-+ list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
-+ struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
-+ if (cmnd == req)
-+ continue;
-+ if (req_hdr->lun != hdr->lun)
-+ continue;
-+ if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
-+ req_hdr->cmd_sn == hdr->cmd_sn)
-+ continue;
-+ __cmnd_abort(cmnd);
-+ }
-+ spin_unlock_bh(&conn->cmd_list_lock);
++ params = mempool_alloc(iscsi_cmnd_abort_mempool, GFP_ATOMIC);
++ if (params == NULL) {
++ PRINT_CRIT_ERROR("Unable to create iscsi_cmnd_abort_params, "
++ "iSCSI cmnd for scst_cmd %p may not be aborted",
++ scst_cmd);
++ goto out;
+ }
+
-+ mutex_unlock(&target->target_mutex);
++ memset(params, 0, sizeof(*params));
++ INIT_WORK(&params->iscsi_cmnd_abort_work, iscsi_cmnd_abort_fn);
++ params->scst_cmd = scst_cmd;
++
++ scst_cmd_get(scst_cmd);
++
++ TRACE_MGMT_DBG("Scheduling abort check for scst_cmd %p", scst_cmd);
++
++ schedule_work(&params->iscsi_cmnd_abort_work);
++
++out:
++ TRACE_EXIT();
+ return;
+}
+
@@ -55967,7 +59163,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+
+ switch (function) {
+ case ISCSI_FUNCTION_ABORT_TASK:
-+ rc = cmnd_abort(req, &status);
++ rc = cmnd_abort_pre_checks(req, &status);
+ if (rc == 0) {
+ params.fn = SCST_ABORT_TASK;
+ params.tag = (__force u32)req_hdr->rtt;
@@ -55983,7 +59179,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ }
+ break;
+ case ISCSI_FUNCTION_ABORT_TASK_SET:
-+ task_set_abort(req);
+ params.fn = SCST_ABORT_TASK_SET;
+ params.lun = (uint8_t *)&req_hdr->lun;
+ params.lun_len = sizeof(req_hdr->lun);
@@ -55995,7 +59190,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
+ break;
+ case ISCSI_FUNCTION_CLEAR_TASK_SET:
-+ task_set_abort(req);
+ params.fn = SCST_CLEAR_TASK_SET;
+ params.lun = (uint8_t *)&req_hdr->lun;
+ params.lun_len = sizeof(req_hdr->lun);
@@ -56019,7 +59213,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ break;
+ case ISCSI_FUNCTION_TARGET_COLD_RESET:
+ case ISCSI_FUNCTION_TARGET_WARM_RESET:
-+ target_abort(req, 1);
+ params.fn = SCST_TARGET_RESET;
+ params.cmd_sn = req_hdr->cmd_sn;
+ params.cmd_sn_set = 1;
@@ -56028,7 +59221,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
+ break;
+ case ISCSI_FUNCTION_LOGICAL_UNIT_RESET:
-+ target_abort(req, 0);
+ params.fn = SCST_LUN_RESET;
+ params.lun = (uint8_t *)&req_hdr->lun;
+ params.lun_len = sizeof(req_hdr->lun);
@@ -56412,9 +59604,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+
+ if (unlikely(before(cmd_sn, session->exp_cmd_sn))) {
+ TRACE_MGMT_DBG("Ignoring out of expected range cmd_sn "
-+ "(sn %u, exp_sn %u, op %x, CDB op %x)", cmd_sn,
-+ session->exp_cmd_sn, cmnd_opcode(cmnd),
-+ cmnd_scsicode(cmnd));
++ "(sn %u, exp_sn %u, cmd %p, op %x, CDB op %x)",
++ cmd_sn, session->exp_cmd_sn, cmnd,
++ cmnd_opcode(cmnd), cmnd_scsicode(cmnd));
+ drop = 1;
+ }
+
@@ -56620,11 +59812,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+static void iscsi_try_local_processing(struct iscsi_cmnd *req)
+{
+ struct iscsi_conn *conn = req->conn;
++ struct iscsi_thread_pool *p = conn->conn_thr_pool;
+ bool local;
+
+ TRACE_ENTRY();
+
-+ spin_lock_bh(&iscsi_wr_lock);
++ spin_lock_bh(&p->wr_lock);
+ switch (conn->wr_state) {
+ case ISCSI_CONN_WR_STATE_IN_LIST:
+ list_del(&conn->wr_list_entry);
@@ -56641,7 +59834,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ local = false;
+ break;
+ }
-+ spin_unlock_bh(&iscsi_wr_lock);
++ spin_unlock_bh(&p->wr_lock);
+
+ if (local) {
+ int rc = 1;
@@ -56652,7 +59845,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ break;
+ } while (req->not_processed_rsp_cnt != 0);
+
-+ spin_lock_bh(&iscsi_wr_lock);
++ spin_lock_bh(&p->wr_lock);
+#ifdef CONFIG_SCST_EXTRACHECKS
+ conn->wr_task = NULL;
+#endif
@@ -56661,12 +59854,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ "(conn %p)", conn);
+ conn->wr_state = ISCSI_CONN_WR_STATE_SPACE_WAIT;
+ } else if (test_write_ready(conn)) {
-+ list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
++ list_add_tail(&conn->wr_list_entry, &p->wr_list);
+ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
-+ wake_up(&iscsi_wr_waitQ);
++ wake_up(&p->wr_waitQ);
+ } else
+ conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
-+ spin_unlock_bh(&iscsi_wr_lock);
++ spin_unlock_bh(&p->wr_lock);
+ }
+
+ TRACE_EXIT();
@@ -56842,7 +60035,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ return res;
+}
+
-+/* Called under sn_lock, but might drop it inside, then reaquire */
++/* Called under sn_lock, but might drop it inside, then reacquire */
+static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess)
+ __acquires(&sn_lock)
+ __releases(&sn_lock)
@@ -57069,6 +60262,27 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ goto out;
+}
+
++static int iscsi_cpu_mask_changed_aen(struct scst_aen *aen)
++{
++ int res = SCST_AEN_RES_SUCCESS;
++ struct scst_session *scst_sess = scst_aen_get_sess(aen);
++ struct iscsi_session *sess = scst_sess_get_tgt_priv(scst_sess);
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("CPU mask changed AEN to sess %p (initiator %s)", sess,
++ sess->initiator_name);
++
++ mutex_lock(&sess->target->target_mutex);
++ iscsi_sess_force_close(sess);
++ mutex_unlock(&sess->target->target_mutex);
++
++ scst_aen_done(aen);
++
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
+static int iscsi_report_aen(struct scst_aen *aen)
+{
+ int res;
@@ -57080,6 +60294,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ case SCST_AEN_SCSI:
+ res = iscsi_scsi_aen(aen);
+ break;
++ case SCST_AEN_CPU_MASK_CHANGED:
++ res = iscsi_cpu_mask_changed_aen(aen);
++ break;
+ default:
+ TRACE_MGMT_DBG("Unsupported AEN %d", event_fn);
+ res = SCST_AEN_RES_NOT_SUPPORTED;
@@ -57090,8 +60307,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ return res;
+}
+
-+static int iscsi_get_initiator_port_transport_id(struct scst_session *scst_sess,
-+ uint8_t **transport_id)
++static int iscsi_get_initiator_port_transport_id(struct scst_tgt *tgt,
++ struct scst_session *scst_sess, uint8_t **transport_id)
+{
+ struct iscsi_session *sess;
+ int res = 0;
@@ -57197,13 +60414,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+}
+
+static struct scst_trace_log iscsi_local_trace_tbl[] = {
-+ { TRACE_D_WRITE, "d_write" },
-+ { TRACE_CONN_OC, "conn" },
-+ { TRACE_CONN_OC_DBG, "conn_dbg" },
-+ { TRACE_D_IOV, "iov" },
-+ { TRACE_D_DUMP_PDU, "pdu" },
-+ { TRACE_NET_PG, "net_page" },
-+ { 0, NULL }
++ { TRACE_D_WRITE, "d_write" },
++ { TRACE_CONN_OC, "conn" },
++ { TRACE_CONN_OC_DBG, "conn_dbg" },
++ { TRACE_D_IOV, "iov" },
++ { TRACE_D_DUMP_PDU, "pdu" },
++ { TRACE_NET_PG, "net_page" },
++ { 0, NULL }
+};
+
+#define ISCSI_TRACE_TBL_HELP ", d_write, conn, conn_dbg, iov, pdu, net_page"
@@ -57245,57 +60462,184 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ .pre_exec = iscsi_pre_exec,
+ .task_mgmt_affected_cmds_done = iscsi_task_mgmt_affected_cmds_done,
+ .task_mgmt_fn_done = iscsi_task_mgmt_fn_done,
++ .on_abort_cmd = iscsi_on_abort_cmd,
+ .report_aen = iscsi_report_aen,
+ .get_initiator_port_transport_id = iscsi_get_initiator_port_transport_id,
+ .get_scsi_transport_version = iscsi_get_scsi_transport_version,
+};
+
-+static __init int iscsi_run_threads(int count, char *name, int (*fn)(void *))
++int iscsi_threads_pool_get(const cpumask_t *cpu_mask,
++ struct iscsi_thread_pool **out_pool)
+{
-+ int res = 0;
-+ int i;
-+ struct iscsi_thread_t *thr;
++ int res;
++ struct iscsi_thread_pool *p;
++ struct iscsi_thread *t, *tt;
++ int i, j, count;
+
-+ for (i = 0; i < count; i++) {
-+ thr = kmalloc(sizeof(*thr), GFP_KERNEL);
-+ if (!thr) {
-+ res = -ENOMEM;
-+ PRINT_ERROR("Failed to allocate thr %d", res);
-+ goto out;
++ TRACE_ENTRY();
++
++ mutex_lock(&iscsi_threads_pool_mutex);
++
++ list_for_each_entry(p, &iscsi_thread_pools_list,
++ thread_pools_list_entry) {
++ if ((cpu_mask == NULL) ||
++ __cpus_equal(cpu_mask, &p->cpu_mask, nr_cpumask_bits)) {
++ p->thread_pool_ref++;
++ TRACE_DBG("iSCSI thread pool %p found (new ref %d)",
++ p, p->thread_pool_ref);
++ res = 0;
++ goto out_unlock;
+ }
-+ thr->thr = kthread_run(fn, NULL, "%s%d", name, i);
-+ if (IS_ERR(thr->thr)) {
-+ res = PTR_ERR(thr->thr);
-+ PRINT_ERROR("kthread_create() failed: %d", res);
-+ kfree(thr);
-+ goto out;
++ }
++
++ TRACE_DBG("%s", "Creating new iSCSI thread pool");
++
++ p = kzalloc(sizeof(*p), GFP_KERNEL);
++ if (p == NULL) {
++ PRINT_ERROR("Unable to allocate iSCSI thread pool (size %zd)",
++ sizeof(*p));
++ res = -ENOMEM;
++ if (!list_empty(&iscsi_thread_pools_list)) {
++ PRINT_WARNING("%s", "Using global iSCSI thread pool "
++ "instead");
++ p = list_entry(iscsi_thread_pools_list.next,
++ struct iscsi_thread_pool,
++ thread_pools_list_entry);
++ } else
++ res = -ENOMEM;
++ goto out_unlock;
++ }
++
++ spin_lock_init(&p->rd_lock);
++ INIT_LIST_HEAD(&p->rd_list);
++ init_waitqueue_head(&p->rd_waitQ);
++ spin_lock_init(&p->wr_lock);
++ INIT_LIST_HEAD(&p->wr_list);
++ init_waitqueue_head(&p->wr_waitQ);
++ if (cpu_mask == NULL)
++ cpus_setall(p->cpu_mask);
++ else {
++ cpus_clear(p->cpu_mask);
++ for_each_cpu(i, cpu_mask)
++ cpu_set(i, p->cpu_mask);
++ }
++ p->thread_pool_ref = 1;
++ INIT_LIST_HEAD(&p->threads_list);
++
++ if (cpu_mask == NULL)
++ count = max((int)num_online_cpus(), 2);
++ else {
++ count = 0;
++ for_each_cpu(i, cpu_mask)
++ count++;
++ }
++
++ for (j = 0; j < 2; j++) {
++ int (*fn)(void *);
++ char name[25];
++ static int major;
++
++ if (j == 0)
++ fn = istrd;
++ else
++ fn = istwr;
++
++ for (i = 0; i < count; i++) {
++ if (j == 0) {
++ major++;
++ if (cpu_mask == NULL)
++ snprintf(name, sizeof(name), "iscsird%d", i);
++ else
++ snprintf(name, sizeof(name), "iscsird%d_%d",
++ major, i);
++ } else {
++ if (cpu_mask == NULL)
++ snprintf(name, sizeof(name), "iscsiwr%d", i);
++ else
++ snprintf(name, sizeof(name), "iscsiwr%d_%d",
++ major, i);
++ }
++
++ t = kmalloc(sizeof(*t), GFP_KERNEL);
++ if (t == NULL) {
++ res = -ENOMEM;
++ PRINT_ERROR("Failed to allocate thread %s "
++ "(size %zd)", name, sizeof(*t));
++ goto out_free;
++ }
++
++ t->thr = kthread_run(fn, p, name);
++ if (IS_ERR(t->thr)) {
++ res = PTR_ERR(t->thr);
++ PRINT_ERROR("kthread_run() for thread %s failed: %d",
++ name, res);
++ kfree(t);
++ goto out_free;
++ }
++ list_add_tail(&t->threads_list_entry, &p->threads_list);
+ }
-+ list_add_tail(&thr->threads_list_entry, &iscsi_threads_list);
+ }
+
-+out:
++ list_add_tail(&p->thread_pools_list_entry, &iscsi_thread_pools_list);
++ res = 0;
++
++ TRACE_DBG("Created iSCSI thread pool %p", p);
++
++out_unlock:
++ mutex_unlock(&iscsi_threads_pool_mutex);
++
++ if (out_pool != NULL)
++ *out_pool = p;
++
++ TRACE_EXIT_RES(res);
+ return res;
++
++out_free:
++ list_for_each_entry_safe(t, tt, &p->threads_list, threads_list_entry) {
++ kthread_stop(t->thr);
++ list_del(&t->threads_list_entry);
++ kfree(t);
++ }
++ goto out_unlock;
+}
+
-+static void iscsi_stop_threads(void)
++void iscsi_threads_pool_put(struct iscsi_thread_pool *p)
+{
-+ struct iscsi_thread_t *t, *tmp;
++ struct iscsi_thread *t, *tt;
+
-+ list_for_each_entry_safe(t, tmp, &iscsi_threads_list,
-+ threads_list_entry) {
-+ int rc = kthread_stop(t->thr);
-+ if (rc < 0)
-+ TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
++ TRACE_ENTRY();
++
++ mutex_lock(&iscsi_threads_pool_mutex);
++
++ p->thread_pool_ref--;
++ if (p->thread_pool_ref > 0) {
++ TRACE_DBG("iSCSI thread pool %p still has %d references)",
++ p, p->thread_pool_ref);
++ goto out_unlock;
++ }
++
++ TRACE_DBG("Freeing iSCSI thread pool %p", p);
++
++ list_for_each_entry_safe(t, tt, &p->threads_list, threads_list_entry) {
++ kthread_stop(t->thr);
+ list_del(&t->threads_list_entry);
+ kfree(t);
+ }
++
++ list_del(&p->thread_pools_list_entry);
++
++ kfree(p);
++
++out_unlock:
++ mutex_unlock(&iscsi_threads_pool_mutex);
++
++ TRACE_EXIT();
+ return;
+}
+
+static int __init iscsi_init(void)
+{
+ int err = 0;
-+ int num;
+
+ PRINT_INFO("iSCSI SCST Target - version %s", ISCSI_VERSION_STRING);
+
@@ -57308,12 +60652,19 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ sg_init_table(&dummy_sg, 1);
+ sg_set_page(&dummy_sg, dummy_page, PAGE_SIZE, 0);
+
++ iscsi_cmnd_abort_mempool = mempool_create_kmalloc_pool(2500,
++ sizeof(struct iscsi_cmnd_abort_params));
++ if (iscsi_cmnd_abort_mempool == NULL) {
++ err = -ENOMEM;
++ goto out_free_dummy;
++ }
++
+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
+ err = net_set_get_put_page_callbacks(iscsi_get_page_callback,
+ iscsi_put_page_callback);
+ if (err != 0) {
+ PRINT_INFO("Unable to set page callbackes: %d", err);
-+ goto out_free_dummy;
++ goto out_destroy_mempool;
+ }
+#else
+#ifndef GENERATING_UPSTREAM_PATCH
@@ -57348,13 +60699,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+
+ iscsi_conn_ktype.sysfs_ops = scst_sysfs_get_sysfs_ops();
+
-+ num = max((int)num_online_cpus(), 2);
-+
-+ err = iscsi_run_threads(num, "iscsird", istrd);
-+ if (err != 0)
-+ goto out_thr;
-+
-+ err = iscsi_run_threads(num, "iscsiwr", istwr);
++ err = iscsi_threads_pool_get(NULL, &iscsi_main_thread_pool);
+ if (err != 0)
+ goto out_thr;
+
@@ -57362,7 +60707,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ return err;
+
+out_thr:
-+ iscsi_stop_threads();
+
+ scst_unregister_target_template(&iscsi_template);
+
@@ -57379,15 +60723,20 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
+ net_set_get_put_page_callbacks(NULL, NULL);
+
-+out_free_dummy:
++out_destroy_mempool:
++ mempool_destroy(iscsi_cmnd_abort_mempool);
+#endif
++
++out_free_dummy:
+ __free_pages(dummy_page, 0);
+ goto out;
+}
+
+static void __exit iscsi_exit(void)
+{
-+ iscsi_stop_threads();
++ iscsi_threads_pool_put(iscsi_main_thread_pool);
++
++ BUG_ON(!list_empty(&iscsi_thread_pools_list));
+
+ unregister_chrdev(ctr_major, ctr_name);
+
@@ -57401,6 +60750,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+ net_set_get_put_page_callbacks(NULL, NULL);
+#endif
+
++ mempool_destroy(iscsi_cmnd_abort_mempool);
++
+ __free_pages(dummy_page, 0);
+ return;
+}
@@ -57411,78 +60762,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.c linux-2.6.36/driver
+MODULE_VERSION(ISCSI_VERSION_STRING);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCST iSCSI Target");
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi_dbg.h linux-2.6.36/drivers/scst/iscsi-scst/iscsi_dbg.h
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi_dbg.h
-+++ linux-2.6.36/drivers/scst/iscsi-scst/iscsi_dbg.h
-@@ -0,0 +1,60 @@
-+/*
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef ISCSI_DBG_H
-+#define ISCSI_DBG_H
-+
-+#define LOG_PREFIX "iscsi-scst"
-+
-+#include <scst/scst_debug.h>
-+
-+#define TRACE_D_WRITE 0x80000000
-+#define TRACE_CONN_OC 0x40000000
-+#define TRACE_D_IOV 0x20000000
-+#define TRACE_D_DUMP_PDU 0x10000000
-+#define TRACE_NET_PG 0x08000000
-+#define TRACE_CONN_OC_DBG 0x04000000
-+
-+#ifdef CONFIG_SCST_DEBUG
-+#define ISCSI_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_LINE | TRACE_PID | \
-+ TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_DEBUG | \
-+ TRACE_MINOR | TRACE_SPECIAL | TRACE_CONN_OC)
-+#else
-+#define ISCSI_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
-+ TRACE_SPECIAL)
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+struct iscsi_pdu;
-+struct iscsi_cmnd;
-+extern void iscsi_dump_pdu(struct iscsi_pdu *pdu);
-+extern unsigned long iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(
-+ struct iscsi_cmnd *cmnd);
-+#else
-+#define iscsi_dump_pdu(x) do {} while (0)
-+#define iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(x) do {} while (0)
-+#endif
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+extern unsigned long iscsi_trace_flag;
-+#define trace_flag iscsi_trace_flag
-+#endif
-+
-+#define TRACE_CONN_CLOSE(args...) TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_CONN_OC, args)
-+#define TRACE_CONN_CLOSE_DBG(args...) TRACE(TRACE_CONN_OC_DBG, args)
-+#define TRACE_NET_PAGE(args...) TRACE_DBG_FLAG(TRACE_NET_PG, args)
-+#define TRACE_WRITE(args...) TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_D_WRITE, args)
-+
-+#endif
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h
-+++ linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h
-@@ -0,0 +1,743 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/iscsi.h linux-2.6.39/drivers/scst/iscsi-scst/iscsi.h
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/iscsi.h
++++ linux-2.6.39/drivers/scst/iscsi-scst/iscsi.h
+@@ -0,0 +1,788 @@
+/*
+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -57538,11 +60826,30 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+ int queued_cmnds;
+ unsigned int rsp_timeout;
+ unsigned int nop_in_interval;
++ unsigned int nop_in_timeout;
++};
++
++struct iscsi_thread {
++ struct task_struct *thr;
++ struct list_head threads_list_entry;
+};
+
-+struct network_thread_info {
-+ struct task_struct *task;
-+ unsigned int ready;
++struct iscsi_thread_pool {
++ spinlock_t rd_lock;
++ struct list_head rd_list;
++ wait_queue_head_t rd_waitQ;
++
++ spinlock_t wr_lock;
++ struct list_head wr_list;
++ wait_queue_head_t wr_waitQ;
++
++ cpumask_t cpu_mask;
++
++ int thread_pool_ref;
++
++ struct list_head threads_list;
++
++ struct list_head thread_pools_list_entry;
+};
+
+struct iscsi_target;
@@ -57574,8 +60881,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+};
+
+#define ISCSI_HASH_ORDER 8
-+#define cmnd_hashfn(itt) (BUILD_BUG_ON(!__same_type(itt, __be32)), \
-+ hash_long((__force u32)(itt), ISCSI_HASH_ORDER))
++#define cmnd_hashfn(itt) hash_32(itt, ISCSI_HASH_ORDER)
+
+struct iscsi_session {
+ struct iscsi_target *target;
@@ -57618,6 +60924,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+ unsigned int sess_reinstating:1;
+ unsigned int sess_shutting_down:1;
+
++ struct iscsi_thread_pool *sess_thr_pool;
++
+ /* All don't need any protection */
+ char *initiator_name;
+ u64 sid;
@@ -57660,10 +60968,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+
+ /* Protected by write_list_lock */
+ struct timer_list rsp_timer;
-+ unsigned int rsp_timeout; /* in jiffies */
++ unsigned int data_rsp_timeout; /* in jiffies */
+
+ /*
-+ * All 2 protected by iscsi_wr_lock. Modified independently to the
++ * All 2 protected by wr_lock. Modified independently to the
+ * above field, hence the alignment.
+ */
+ unsigned short wr_state __attribute__((aligned(sizeof(long))));
@@ -57699,7 +61007,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+ int hdigest_type;
+ int ddigest_type;
+
-+ /* All 6 protected by iscsi_rd_lock */
++ struct iscsi_thread_pool *conn_thr_pool;
++
++ /* All 6 protected by rd_lock */
+ unsigned short rd_state;
+ unsigned short rd_data_ready:1;
+ /* Let's save some cache footprint by putting them here */
@@ -57744,13 +61054,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+
+ struct delayed_work nop_in_delayed_work;
+ unsigned int nop_in_interval; /* in jiffies */
++ unsigned int nop_in_timeout; /* in jiffies */
+ struct list_head nop_req_list;
+ spinlock_t nop_req_list_lock;
+ u32 nop_in_ttt;
+
+ /* Don't need any protection */
+ struct kobject conn_kobj;
-+ struct completion conn_kobj_release_cmpl;
++ struct completion *conn_kobj_release_cmpl;
+};
+
+struct iscsi_pdu {
@@ -57813,6 +61124,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+ unsigned int force_cleanup_done:1;
+ unsigned int dec_active_cmds:1;
+ unsigned int ddigest_checked:1;
++ /*
++ * Used to prevent release of original req while its related DATA OUT
++ * cmd is receiving data, i.e. stays between data_out_start() and
++ * data_out_end(). Ref counting can't be used for that, because
++ * req_cmnd_release() supposed to be called only once.
++ */
++ unsigned int data_out_in_data_receiving:1;
+#ifdef CONFIG_SCST_EXTRACHECKS
+ unsigned int on_rx_digest_list:1;
+ unsigned int release_called:1;
@@ -57946,14 +61264,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+extern int ctr_open_state;
+extern const struct file_operations ctr_fops;
+
-+extern spinlock_t iscsi_rd_lock;
-+extern struct list_head iscsi_rd_list;
-+extern wait_queue_head_t iscsi_rd_waitQ;
-+
-+extern spinlock_t iscsi_wr_lock;
-+extern struct list_head iscsi_wr_list;
-+extern wait_queue_head_t iscsi_wr_waitQ;
-+
+/* iscsi.c */
+extern struct iscsi_cmnd *cmnd_alloc(struct iscsi_conn *,
+ struct iscsi_cmnd *parent);
@@ -57973,6 +61283,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+ struct iscsi_cmnd *orig_req, bool get_data);
+extern int set_scst_preliminary_status_rsp(struct iscsi_cmnd *req,
+ bool get_data, int key, int asc, int ascq);
++extern int iscsi_threads_pool_get(const cpumask_t *cpu_mask,
++ struct iscsi_thread_pool **out_pool);
++extern void iscsi_threads_pool_put(struct iscsi_thread_pool *p);
+
+/* conn.c */
+extern struct kobj_type iscsi_conn_ktype;
@@ -58036,6 +61349,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+ struct iscsi_kern_session_info *);
+extern int __del_session(struct iscsi_target *, u64);
+extern int session_free(struct iscsi_session *session, bool del);
++extern void iscsi_sess_force_close(struct iscsi_session *sess);
+
+/* params.c */
+extern const char *iscsi_get_digest_name(int val, char *res);
@@ -58080,7 +61394,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+extern struct scst_tgt_template iscsi_template;
+
+/*
-+ * Skip this command if result is not 0. Must be called under
++ * Skip this command if result is true. Must be called under
+ * corresponding lock.
+ */
+static inline bool cmnd_get_check(struct iscsi_cmnd *cmnd)
@@ -58177,6 +61491,24 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+#endif
+}
+
++static inline unsigned long iscsi_get_timeout(struct iscsi_cmnd *req)
++{
++ unsigned long res;
++
++ res = (cmnd_opcode(req) == ISCSI_OP_NOP_OUT) ?
++ req->conn->nop_in_timeout : req->conn->data_rsp_timeout;
++
++ if (unlikely(test_bit(ISCSI_CMD_ABORTED, &req->prelim_compl_flags)))
++ res = min_t(unsigned long, res, ISCSI_TM_DATA_WAIT_TIMEOUT);
++
++ return res;
++}
++
++static inline unsigned long iscsi_get_timeout_time(struct iscsi_cmnd *req)
++{
++ return req->write_start + iscsi_get_timeout(req);
++}
++
+static inline int test_write_ready(struct iscsi_conn *conn)
+{
+ /*
@@ -58222,14 +61554,80 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi.h linux-2.6.36/driver
+#endif
+
+#endif /* __ISCSI_H__ */
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi_hdr.h linux-2.6.36/drivers/scst/iscsi-scst/iscsi_hdr.h
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi_hdr.h
-+++ linux-2.6.36/drivers/scst/iscsi-scst/iscsi_hdr.h
-@@ -0,0 +1,525 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/iscsi_dbg.h linux-2.6.39/drivers/scst/iscsi-scst/iscsi_dbg.h
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/iscsi_dbg.h
++++ linux-2.6.39/drivers/scst/iscsi-scst/iscsi_dbg.h
+@@ -0,0 +1,61 @@
++/*
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation, version 2
++ * of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#ifndef ISCSI_DBG_H
++#define ISCSI_DBG_H
++
++#define LOG_PREFIX "iscsi-scst"
++
++#include <scst/scst_debug.h>
++
++#define TRACE_D_WRITE 0x80000000
++#define TRACE_CONN_OC 0x40000000
++#define TRACE_D_IOV 0x20000000
++#define TRACE_D_DUMP_PDU 0x10000000
++#define TRACE_NET_PG 0x08000000
++#define TRACE_CONN_OC_DBG 0x04000000
++
++#ifdef CONFIG_SCST_DEBUG
++#define ISCSI_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_LINE | TRACE_PID | \
++ TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_DEBUG | \
++ TRACE_MINOR | TRACE_SPECIAL | TRACE_CONN_OC)
++#else
++#define ISCSI_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
++ TRACE_SPECIAL)
++#endif
++
++#ifdef CONFIG_SCST_DEBUG
++struct iscsi_pdu;
++struct iscsi_cmnd;
++extern void iscsi_dump_pdu(struct iscsi_pdu *pdu);
++extern unsigned long iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(
++ struct iscsi_cmnd *cmnd);
++#else
++#define iscsi_dump_pdu(x) do {} while (0)
++#define iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(x) do {} while (0)
++#endif
++
++#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
++extern unsigned long iscsi_trace_flag;
++#define trace_flag iscsi_trace_flag
++#endif
++
++#define TRACE_CONN_CLOSE(args...) TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_CONN_OC, args)
++#define TRACE_CONN_CLOSE_DBG(args...) TRACE(TRACE_CONN_OC_DBG, args)
++#define TRACE_NET_PAGE(args...) TRACE_DBG_FLAG(TRACE_NET_PG, args)
++#define TRACE_WRITE(args...) TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_D_WRITE, args)
++
++#endif
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/iscsi_hdr.h linux-2.6.39/drivers/scst/iscsi-scst/iscsi_hdr.h
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/iscsi_hdr.h
++++ linux-2.6.39/drivers/scst/iscsi-scst/iscsi_hdr.h
+@@ -0,0 +1,526 @@
+/*
+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -58751,16 +62149,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/iscsi_hdr.h linux-2.6.36/dr
+#define cmnd_scsicode(cmnd) (cmnd_hdr((cmnd))->scb[0])
+
+#endif /* __ISCSI_HDR_H__ */
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/drivers/scst/iscsi-scst/nthread.c
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c
-+++ linux-2.6.36/drivers/scst/iscsi-scst/nthread.c
-@@ -0,0 +1,1838 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/nthread.c linux-2.6.39/drivers/scst/iscsi-scst/nthread.c
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/nthread.c
++++ linux-2.6.39/drivers/scst/iscsi-scst/nthread.c
+@@ -0,0 +1,1891 @@
+/*
+ * Network threads.
+ *
+ * Copyright (C) 2004 - 2005 FUJITA Tomonori <tomof@acm.org>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -58775,13 +62174,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/kthread.h>
-+#include <asm/ioctls.h>
+#include <linux/delay.h>
+#include <net/tcp.h>
+
+#include "iscsi.h"
+#include "digest.h"
+
++/* Read data states */
+enum rx_state {
+ RX_INIT_BHS, /* Must be zero for better "switch" optimization. */
+ RX_BHS,
@@ -59305,9 +62704,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+ while (1) {
+ bool t;
+
-+ spin_lock_bh(&iscsi_wr_lock);
++ spin_lock_bh(&conn->conn_thr_pool->wr_lock);
+ t = (conn->wr_state == ISCSI_CONN_WR_STATE_IDLE);
-+ spin_unlock_bh(&iscsi_wr_lock);
++ spin_unlock_bh(&conn->conn_thr_pool->wr_lock);
+
+ if (t && (atomic_read(&conn->conn_ref_cnt) == 0))
+ break;
@@ -59610,7 +63009,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+ case RX_BHS:
+ res = do_recv(conn);
+ if (res == 0) {
++ /*
++ * This command not yet received on the aborted
++ * time, so shouldn't be affected by any abort.
++ */
++ EXTRACHECKS_BUG_ON(cmnd->prelim_compl_flags != 0);
++
+ iscsi_cmnd_get_length(&cmnd->pdu);
++
+ if (cmnd->pdu.ahssize == 0) {
+ if ((conn->hdigest_type & DIGEST_NONE) == 0)
+ conn->read_state = RX_INIT_HDIGEST;
@@ -59762,12 +63168,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+}
+
+/*
-+ * Called under iscsi_rd_lock and BHs disabled, but will drop it inside,
-+ * then reaquire.
++ * Called under rd_lock and BHs disabled, but will drop it inside,
++ * then reacquire.
+ */
-+static void scst_do_job_rd(void)
-+ __acquires(&iscsi_rd_lock)
-+ __releases(&iscsi_rd_lock)
++static void scst_do_job_rd(struct iscsi_thread_pool *p)
++ __acquires(&rd_lock)
++ __releases(&rd_lock)
+{
+ TRACE_ENTRY();
+
@@ -59775,9 +63181,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+ * We delete/add to tail connections to maintain fairness between them.
+ */
+
-+ while (!list_empty(&iscsi_rd_list)) {
++ while (!list_empty(&p->rd_list)) {
+ int closed = 0, rc;
-+ struct iscsi_conn *conn = list_entry(iscsi_rd_list.next,
++ struct iscsi_conn *conn = list_entry(p->rd_list.next,
+ typeof(*conn), rd_list_entry);
+
+ list_del(&conn->rd_list_entry);
@@ -59788,26 +63194,26 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+#ifdef CONFIG_SCST_EXTRACHECKS
+ conn->rd_task = current;
+#endif
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&p->rd_lock);
+
+ rc = process_read_io(conn, &closed);
+
-+ spin_lock_bh(&iscsi_rd_lock);
++ spin_lock_bh(&p->rd_lock);
+
+ if (unlikely(closed))
+ continue;
+
+ if (unlikely(conn->conn_tm_active)) {
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&p->rd_lock);
+ iscsi_check_tm_data_wait_timeouts(conn, false);
-+ spin_lock_bh(&iscsi_rd_lock);
++ spin_lock_bh(&p->rd_lock);
+ }
+
+#ifdef CONFIG_SCST_EXTRACHECKS
+ conn->rd_task = NULL;
+#endif
+ if ((rc == 0) || conn->rd_data_ready) {
-+ list_add_tail(&conn->rd_list_entry, &iscsi_rd_list);
++ list_add_tail(&conn->rd_list_entry, &p->rd_list);
+ conn->rd_state = ISCSI_CONN_RD_STATE_IN_LIST;
+ } else
+ conn->rd_state = ISCSI_CONN_RD_STATE_IDLE;
@@ -59817,50 +63223,56 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+ return;
+}
+
-+static inline int test_rd_list(void)
++static inline int test_rd_list(struct iscsi_thread_pool *p)
+{
-+ int res = !list_empty(&iscsi_rd_list) ||
++ int res = !list_empty(&p->rd_list) ||
+ unlikely(kthread_should_stop());
+ return res;
+}
+
+int istrd(void *arg)
+{
++ struct iscsi_thread_pool *p = arg;
++ int rc;
++
+ TRACE_ENTRY();
+
-+ PRINT_INFO("Read thread started, PID %d", current->pid);
++ PRINT_INFO("Read thread for pool %p started, PID %d", p, current->pid);
+
+ current->flags |= PF_NOFREEZE;
++ rc = set_cpus_allowed_ptr(current, &p->cpu_mask);
++ if (rc != 0)
++ PRINT_ERROR("Setting CPU affinity failed: %d", rc);
+
-+ spin_lock_bh(&iscsi_rd_lock);
++ spin_lock_bh(&p->rd_lock);
+ while (!kthread_should_stop()) {
+ wait_queue_t wait;
+ init_waitqueue_entry(&wait, current);
+
-+ if (!test_rd_list()) {
-+ add_wait_queue_exclusive_head(&iscsi_rd_waitQ, &wait);
++ if (!test_rd_list(p)) {
++ add_wait_queue_exclusive_head(&p->rd_waitQ, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_rd_list())
++ if (test_rd_list(p))
+ break;
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&p->rd_lock);
+ schedule();
-+ spin_lock_bh(&iscsi_rd_lock);
++ spin_lock_bh(&p->rd_lock);
+ }
+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&iscsi_rd_waitQ, &wait);
++ remove_wait_queue(&p->rd_waitQ, &wait);
+ }
-+ scst_do_job_rd();
++ scst_do_job_rd(p);
+ }
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&p->rd_lock);
+
+ /*
+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so iscsi_rd_list must be empty.
++ * on the module unload, so rd_list must be empty.
+ */
-+ BUG_ON(!list_empty(&iscsi_rd_list));
++ BUG_ON(!list_empty(&p->rd_list));
+
-+ PRINT_INFO("Read thread PID %d finished", current->pid);
++ PRINT_INFO("Read thread for PID %d for pool %p finished", current->pid, p);
+
+ TRACE_EXIT();
+ return 0;
@@ -59963,8 +63375,41 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+ req->on_write_timeout_list = 1;
+ req->write_start = jiffies;
+
-+ list_add_tail(&req->write_timeout_list_entry,
-+ &conn->write_timeout_list);
++ if (unlikely(cmnd_opcode(req) == ISCSI_OP_NOP_OUT)) {
++ unsigned long req_tt = iscsi_get_timeout_time(req);
++ struct iscsi_cmnd *r;
++ bool inserted = false;
++ list_for_each_entry(r, &conn->write_timeout_list,
++ write_timeout_list_entry) {
++ unsigned long tt = iscsi_get_timeout_time(r);
++ if (time_after(tt, req_tt)) {
++ TRACE_DBG("Add NOP IN req %p (tt %ld) before "
++ "req %p (tt %ld)", req, req_tt, r, tt);
++ list_add_tail(&req->write_timeout_list_entry,
++ &r->write_timeout_list_entry);
++ inserted = true;
++ break;
++ } else
++ TRACE_DBG("Skipping op %x req %p (tt %ld)",
++ cmnd_opcode(r), r, tt);
++ }
++ if (!inserted) {
++ TRACE_DBG("Add NOP IN req %p in the tail", req);
++ list_add_tail(&req->write_timeout_list_entry,
++ &conn->write_timeout_list);
++ }
++
++ /* We suppose that nop_in_timeout must be <= data_rsp_timeout */
++ req_tt += ISCSI_ADD_SCHED_TIME;
++ if (timer_pending(&conn->rsp_timer) &&
++ time_after(conn->rsp_timer.expires, req_tt)) {
++ TRACE_DBG("Timer adjusted for sooner expired NOP IN "
++ "req %p", req);
++ mod_timer(&conn->rsp_timer, req_tt);
++ }
++ } else
++ list_add_tail(&req->write_timeout_list_entry,
++ &conn->write_timeout_list);
+
+ if (!timer_pending(&conn->rsp_timer)) {
+ unsigned long timeout_time;
@@ -59973,11 +63418,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+ &req->prelim_compl_flags))) {
+ set_conn_tm_active = true;
+ timeout_time = req->write_start +
-+ ISCSI_TM_DATA_WAIT_TIMEOUT +
-+ ISCSI_ADD_SCHED_TIME;
++ ISCSI_TM_DATA_WAIT_TIMEOUT;
+ } else
-+ timeout_time = req->write_start +
-+ conn->rsp_timeout + ISCSI_ADD_SCHED_TIME;
++ timeout_time = iscsi_get_timeout_time(req);
++
++ timeout_time += ISCSI_ADD_SCHED_TIME;
+
+ TRACE_DBG("Starting timer on %ld (con %p, write_start %ld)",
+ timeout_time, conn, req->write_start);
@@ -60001,13 +63446,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+ /*
+ * conn_tm_active can be already cleared by
+ * iscsi_check_tm_data_wait_timeouts(). write_list_lock is an inner
-+ * lock for iscsi_rd_lock.
++ * lock for rd_lock.
+ */
+ if (unlikely(set_conn_tm_active)) {
-+ spin_lock_bh(&iscsi_rd_lock);
++ spin_lock_bh(&conn->conn_thr_pool->rd_lock);
+ TRACE_MGMT_DBG("Setting conn_tm_active for conn %p", conn);
+ conn->conn_tm_active = 1;
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
+ }
+
+out:
@@ -60487,12 +63932,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+}
+
+/*
-+ * Called under iscsi_wr_lock and BHs disabled, but will drop it inside,
-+ * then reaquire.
++ * Called under wr_lock and BHs disabled, but will drop it inside,
++ * then reacquire.
+ */
-+static void scst_do_job_wr(void)
-+ __acquires(&iscsi_wr_lock)
-+ __releases(&iscsi_wr_lock)
++static void scst_do_job_wr(struct iscsi_thread_pool *p)
++ __acquires(&wr_lock)
++ __releases(&wr_lock)
+{
+ TRACE_ENTRY();
+
@@ -60500,9 +63945,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+ * We delete/add to tail connections to maintain fairness between them.
+ */
+
-+ while (!list_empty(&iscsi_wr_list)) {
++ while (!list_empty(&p->wr_list)) {
+ int rc;
-+ struct iscsi_conn *conn = list_entry(iscsi_wr_list.next,
++ struct iscsi_conn *conn = list_entry(p->wr_list.next,
+ typeof(*conn), wr_list_entry);
+
+ TRACE_DBG("conn %p, wr_state %x, wr_space_ready %d, "
@@ -60518,13 +63963,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+#ifdef CONFIG_SCST_EXTRACHECKS
+ conn->wr_task = current;
+#endif
-+ spin_unlock_bh(&iscsi_wr_lock);
++ spin_unlock_bh(&p->wr_lock);
+
+ conn_get(conn);
+
+ rc = iscsi_send(conn);
+
-+ spin_lock_bh(&iscsi_wr_lock);
++ spin_lock_bh(&p->wr_lock);
+#ifdef CONFIG_SCST_EXTRACHECKS
+ conn->wr_task = NULL;
+#endif
@@ -60533,7 +63978,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+ "(conn %p)", conn);
+ conn->wr_state = ISCSI_CONN_WR_STATE_SPACE_WAIT;
+ } else if (test_write_ready(conn)) {
-+ list_add_tail(&conn->wr_list_entry, &iscsi_wr_list);
++ list_add_tail(&conn->wr_list_entry, &p->wr_list);
+ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
+ } else
+ conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
@@ -60545,62 +63990,69 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/nthread.c linux-2.6.36/driv
+ return;
+}
+
-+static inline int test_wr_list(void)
++static inline int test_wr_list(struct iscsi_thread_pool *p)
+{
-+ int res = !list_empty(&iscsi_wr_list) ||
++ int res = !list_empty(&p->wr_list) ||
+ unlikely(kthread_should_stop());
+ return res;
+}
+
+int istwr(void *arg)
+{
++ struct iscsi_thread_pool *p = arg;
++ int rc;
++
+ TRACE_ENTRY();
+
-+ PRINT_INFO("Write thread started, PID %d", current->pid);
++ PRINT_INFO("Write thread for pool %p started, PID %d", p, current->pid);
+
+ current->flags |= PF_NOFREEZE;
++ rc = set_cpus_allowed_ptr(current, &p->cpu_mask);
++ if (rc != 0)
++ PRINT_ERROR("Setting CPU affinity failed: %d", rc);
+
-+ spin_lock_bh(&iscsi_wr_lock);
++ spin_lock_bh(&p->wr_lock);
+ while (!kthread_should_stop()) {
+ wait_queue_t wait;
+ init_waitqueue_entry(&wait, current);
+
-+ if (!test_wr_list()) {
-+ add_wait_queue_exclusive_head(&iscsi_wr_waitQ, &wait);
++ if (!test_wr_list(p)) {
++ add_wait_queue_exclusive_head(&p->wr_waitQ, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_wr_list())
++ if (test_wr_list(p))
+ break;
-+ spin_unlock_bh(&iscsi_wr_lock);
++ spin_unlock_bh(&p->wr_lock);
+ schedule();
-+ spin_lock_bh(&iscsi_wr_lock);
++ spin_lock_bh(&p->wr_lock);
+ }
+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&iscsi_wr_waitQ, &wait);
++ remove_wait_queue(&p->wr_waitQ, &wait);
+ }
-+ scst_do_job_wr();
++ scst_do_job_wr(p);
+ }
-+ spin_unlock_bh(&iscsi_wr_lock);
++ spin_unlock_bh(&p->wr_lock);
+
+ /*
+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so iscsi_wr_list must be empty.
++ * on the module unload, so wr_list must be empty.
+ */
-+ BUG_ON(!list_empty(&iscsi_wr_list));
++ BUG_ON(!list_empty(&p->wr_list));
+
-+ PRINT_INFO("Write thread PID %d finished", current->pid);
++ PRINT_INFO("Write thread PID %d for pool %p finished", current->pid, p);
+
+ TRACE_EXIT();
+ return 0;
+}
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/param.c linux-2.6.36/drivers/scst/iscsi-scst/param.c
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/param.c
-+++ linux-2.6.36/drivers/scst/iscsi-scst/param.c
-@@ -0,0 +1,306 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/param.c linux-2.6.39/drivers/scst/iscsi-scst/param.c
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/param.c
++++ linux-2.6.39/drivers/scst/iscsi-scst/param.c
+@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -60713,6 +64165,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/param.c linux-2.6.36/driver
+ int32_t *iparams = info->session_params;
+ const int max_len = ISCSI_CONN_IOV_MAX * PAGE_SIZE;
+
++ /*
++ * This is only kernel sanity check. Actual data validity checks
++ * performed in the user space.
++ */
++
+ CHECK_PARAM(info, iparams, initial_r2t, 0, 1);
+ CHECK_PARAM(info, iparams, immediate_data, 0, 1);
+ CHECK_PARAM(info, iparams, max_connections, 1, 1);
@@ -60794,6 +64251,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/param.c linux-2.6.36/driver
+ struct iscsi_kern_params_info *info)
+{
+ int32_t *iparams = info->target_params;
++ unsigned int rsp_timeout, nop_in_timeout;
++
++ /*
++ * This is only kernel sanity check. Actual data validity checks
++ * performed in the user space.
++ */
+
+ CHECK_PARAM(info, iparams, queued_cmnds, MIN_NR_QUEUED_CMNDS,
+ min_t(int, MAX_NR_QUEUED_CMNDS,
@@ -60802,6 +64265,26 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/param.c linux-2.6.36/driver
+ MAX_RSP_TIMEOUT);
+ CHECK_PARAM(info, iparams, nop_in_interval, MIN_NOP_IN_INTERVAL,
+ MAX_NOP_IN_INTERVAL);
++ CHECK_PARAM(info, iparams, nop_in_timeout, MIN_NOP_IN_TIMEOUT,
++ MAX_NOP_IN_TIMEOUT);
++
++ /*
++ * We adjust too long timeout in req_add_to_write_timeout_list()
++ * only for NOPs, so check and warn if this assumption isn't honored.
++ */
++ if (!info->partial || (info->partial & 1 << key_rsp_timeout))
++ rsp_timeout = iparams[key_rsp_timeout];
++ else
++ rsp_timeout = session->tgt_params.rsp_timeout;
++ if (!info->partial || (info->partial & 1 << key_nop_in_timeout))
++ nop_in_timeout = iparams[key_nop_in_timeout];
++ else
++ nop_in_timeout = session->tgt_params.nop_in_timeout;
++ if (nop_in_timeout > rsp_timeout)
++ PRINT_WARNING("%s", "RspTimeout should be >= NopInTimeout, "
++ "otherwise data transfer failure could take up to "
++ "NopInTimeout long to detect");
++
+ return;
+}
+
@@ -60820,28 +64303,32 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/param.c linux-2.6.36/driver
+ SET_PARAM(params, info, iparams, queued_cmnds);
+ SET_PARAM(params, info, iparams, rsp_timeout);
+ SET_PARAM(params, info, iparams, nop_in_interval);
++ SET_PARAM(params, info, iparams, nop_in_timeout);
+
+ PRINT_INFO("Target parameters set for session %llx: "
+ "QueuedCommands %d, Response timeout %d, Nop-In "
-+ "interval %d", session->sid, params->queued_cmnds,
-+ params->rsp_timeout, params->nop_in_interval);
++ "interval %d, Nop-In timeout %d", session->sid,
++ params->queued_cmnds, params->rsp_timeout,
++ params->nop_in_interval, params->nop_in_timeout);
+
+ list_for_each_entry(conn, &session->conn_list,
+ conn_list_entry) {
-+ conn->rsp_timeout = session->tgt_params.rsp_timeout * HZ;
++ conn->data_rsp_timeout = session->tgt_params.rsp_timeout * HZ;
+ conn->nop_in_interval = session->tgt_params.nop_in_interval * HZ;
-+ spin_lock_bh(&iscsi_rd_lock);
++ conn->nop_in_timeout = session->tgt_params.nop_in_timeout * HZ;
++ spin_lock_bh(&conn->conn_thr_pool->rd_lock);
+ if (!conn->closing && (conn->nop_in_interval > 0)) {
+ TRACE_DBG("Schedule Nop-In work for conn %p", conn);
+ schedule_delayed_work(&conn->nop_in_delayed_work,
+ conn->nop_in_interval + ISCSI_ADD_SCHED_TIME);
+ }
-+ spin_unlock_bh(&iscsi_rd_lock);
++ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
+ }
+ } else {
+ GET_PARAM(params, info, iparams, queued_cmnds);
+ GET_PARAM(params, info, iparams, rsp_timeout);
+ GET_PARAM(params, info, iparams, nop_in_interval);
++ GET_PARAM(params, info, iparams, nop_in_timeout);
+ }
+
+ return 0;
@@ -60903,14 +64390,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/param.c linux-2.6.36/driver
+out:
+ return err;
+}
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/session.c linux-2.6.36/drivers/scst/iscsi-scst/session.c
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/session.c
-+++ linux-2.6.36/drivers/scst/iscsi-scst/session.c
-@@ -0,0 +1,499 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/session.c linux-2.6.39/drivers/scst/iscsi-scst/session.c
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/session.c
++++ linux-2.6.39/drivers/scst/iscsi-scst/session.c
+@@ -0,0 +1,525 @@
+/*
+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -60983,12 +64471,20 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/session.c linux-2.6.36/driv
+ goto err;
+ }
+
++ err = iscsi_threads_pool_get(&session->scst_sess->acg->acg_cpu_mask,
++ &session->sess_thr_pool);
++ if (err != 0)
++ goto err_unreg;
++
+ TRACE_MGMT_DBG("Session %p created: target %p, tid %u, sid %#Lx",
+ session, target, target->tid, info->sid);
+
+ *result = session;
+ return 0;
+
++err_unreg:
++ scst_unregister_session(session->scst_sess, 1, NULL);
++
+err:
+ if (session) {
+ kfree(session->initiator_name);
@@ -61206,6 +64702,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/session.c linux-2.6.36/driv
+ if (del)
+ list_del(&session->session_list_entry);
+
++ if (session->sess_thr_pool != NULL) {
++ iscsi_threads_pool_put(session->sess_thr_pool);
++ session->sess_thr_pool = NULL;
++ }
++
+ if (session->scst_sess != NULL) {
+ /*
+ * We must NOT call scst_unregister_session() in the waiting
@@ -61240,6 +64741,25 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/session.c linux-2.6.36/driv
+ return session_free(session, true);
+}
+
++/* Must be called under target_mutex */
++void iscsi_sess_force_close(struct iscsi_session *sess)
++{
++ struct iscsi_conn *conn;
++
++ TRACE_ENTRY();
++
++ PRINT_INFO("Deleting session %llx with initiator %s (%p)",
++ (long long unsigned int)sess->sid, sess->initiator_name, sess);
++
++ list_for_each_entry(conn, &sess->conn_list, conn_list_entry) {
++ TRACE_MGMT_DBG("Deleting connection with initiator %p", conn);
++ __mark_conn_closed(conn, ISCSI_CONN_ACTIVE_CLOSE|ISCSI_CONN_DELETING);
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
+#define ISCSI_SESS_BOOL_PARAM_ATTR(name, exported_name) \
+static ssize_t iscsi_sess_show_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
@@ -61358,7 +64878,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/session.c linux-2.6.36/driv
+ int res;
+ struct scst_session *scst_sess;
+ struct iscsi_session *sess;
-+ struct iscsi_conn *conn;
+
+ TRACE_ENTRY();
+
@@ -61370,13 +64889,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/session.c linux-2.6.36/driv
+ goto out;
+ }
+
-+ PRINT_INFO("Deleting session %llx with initiator %s (%p)",
-+ (long long unsigned int)sess->sid, sess->initiator_name, sess);
-+
-+ list_for_each_entry(conn, &sess->conn_list, conn_list_entry) {
-+ TRACE_MGMT_DBG("Deleting connection with initiator %p", conn);
-+ __mark_conn_closed(conn, ISCSI_CONN_ACTIVE_CLOSE|ISCSI_CONN_DELETING);
-+ }
++ iscsi_sess_force_close(sess);
+
+ mutex_unlock(&sess->target->target_mutex);
+
@@ -61406,14 +64919,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/session.c linux-2.6.36/driv
+ NULL,
+};
+
-diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/target.c linux-2.6.36/drivers/scst/iscsi-scst/target.c
---- orig/linux-2.6.36/drivers/scst/iscsi-scst/target.c
-+++ linux-2.6.36/drivers/scst/iscsi-scst/target.c
-@@ -0,0 +1,533 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/iscsi-scst/target.c linux-2.6.39/drivers/scst/iscsi-scst/target.c
+--- orig/linux-2.6.39/drivers/scst/iscsi-scst/target.c
++++ linux-2.6.39/drivers/scst/iscsi-scst/target.c
+@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2010 Vladislav Bolkhovitin
++ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
@@ -61530,7 +65044,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/target.c linux-2.6.36/drive
+ int err;
+ u32 tid = info->tid;
+ struct iscsi_target *target = NULL; /* to calm down sparse */
-+ struct iscsi_kern_params_info *params_info;
+ struct iscsi_kern_attr *attr_info;
+ union add_info_union {
+ struct iscsi_kern_params_info params_info;
@@ -61564,7 +65077,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/target.c linux-2.6.36/drive
+ err = -ENOMEM;
+ goto out;
+ }
-+ params_info = (struct iscsi_kern_params_info *)add_info;
+ attr_info = (struct iscsi_kern_attr *)add_info;
+
+ if (tid == 0) {
@@ -61943,10 +65455,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/iscsi-scst/target.c linux-2.6.36/drive
+ return res;
+}
+
-diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Documentation/scst/README.iscsi
---- orig/linux-2.6.36/Documentation/scst/README.iscsi
-+++ linux-2.6.36/Documentation/scst/README.iscsi
-@@ -0,0 +1,741 @@
+diff -uprN orig/linux-2.6.39/Documentation/scst/README.iscsi linux-2.6.39/Documentation/scst/README.iscsi
+--- orig/linux-2.6.39/Documentation/scst/README.iscsi
++++ linux-2.6.39/Documentation/scst/README.iscsi
+@@ -0,0 +1,748 @@
+iSCSI SCST target driver
+========================
+
@@ -61991,6 +65503,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Docume
+CAUTION: Working of target and initiator on the same host isn't fully
+======= supported. See SCST README file for details.
+
++
+Sysfs interface
+---------------
+
@@ -62084,34 +65597,25 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Docume
+ - QueuedCommands - defines maximum number of commands queued to any
+ session of this target. Default is 32 commands.
+
-+ - RspTimeout - defines the maximum time in seconds a command can wait for
-+ response from initiator, otherwise the corresponding connection will
-+ be closed. For performance reasons it is implemented as a timer,
-+ which once in RspTimeout time checks the oldest command waiting for
-+ response and, if it's older than RspTimeout, then it closes the
-+ connection. Hence, a stalled connection will be closed in time
-+ between RspTimeout and 2*RspTimeout. Default is 30 seconds.
-+
+ - NopInInterval - defines interval between NOP-In requests, which the
+ target will send on idle connections to check if the initiator is
+ still alive. If there is no NOP-Out reply from the initiator in
+ RspTimeout time, the corresponding connection will be closed. Default
+ is 30 seconds. If it's set to 0, then NOP-In requests are disabled.
+
++ - NopInTimeout - defines the maximum time in seconds a NOP-In request
++ can wait for response from initiator, otherwise the corresponding
++ connection will be closed. Default is 30 seconds.
++
++ - RspTimeout - defines the maximum time in seconds a command can wait for
++ response from initiator, otherwise the corresponding connection will
++ be closed. Default is 90 seconds.
++
+ - enabled - using this attribute you can enable or disable iSCSI-SCST
+ accept new connections to this target. It allows to finish
+ configuring it before it starts accepting new connections. 0 by
+ default.
+
-+ - rel_tgt_id - allows to read or write SCSI Relative Target Port
-+ Identifier attribute. This identifier is used to identify SCSI Target
-+ Ports by some SCSI commands, mainly by Persistent Reservations
-+ commands. This identifier must be unique among all SCST targets, but
-+ for convenience SCST allows disabled targets to have not unique
-+ rel_tgt_id. In this case SCST will not allow to enable this target
-+ until rel_tgt_id becomes unique. This attribute initialized unique by
-+ SCST by default.
-+
+ - redirect - allows to temporarily or permanently redirect login to the
+ target to another portal. Discovery sessions will not be impacted,
+ but normal sessions will be redirected before security negotiation.
@@ -62157,6 +65661,8 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Docume
+
+ - state - contains processing state of this connection.
+
++See SCST README for info about other attributes.
++
+Below is a sample script, which configures 1 virtual disk "disk1" using
+/disk1 image and one target iqn.2006-10.net.vlnb:tgt with all default
+parameters:
@@ -62489,6 +65995,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Docume
+|-- trace_level
+`-- version
+
++
+Advanced initiators access control
+----------------------------------
+
@@ -62574,6 +66081,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Docume
+will block access of initiator iqn.2005-03.org.vlnb:cacdcd2520 to
+all target iqn.2006-10.net.vlnb:tgt portals.
+
++
+Troubleshooting
+---------------
+
@@ -62589,6 +66097,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Docume
+If after looking on the logs the reason of your problem is still unclear
+for you, report to SCST mailing list scst-devel@lists.sourceforge.net.
+
++
+Work if target's backstorage or link is too slow
+------------------------------------------------
+
@@ -62603,6 +66112,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Docume
+Also see SCST README file for more details about that issue and ways to
+prevent it.
+
++
+Performance advices
+-------------------
+
@@ -62649,9 +66159,17 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Docume
+aggregate load on all CPUs, so with 4 cores 25% corresponds to 100% load
+of any single CPU.
+
-+7. See SCST core's README for more advices. Especially pay attention to
++7. For high speed network adapters it can be better if you configure
++them to serve connections, e.g., from initiator on CPU0 and from
++initiator Y on CPU1. Then you can bind threads processing them also to
++CPU0 and CPU1 correspondingly using cpu_mask attribute of their targets
++or security groups. In NUMA-like configurations it can signficantly
++boost IOPS performance.
++
++8. See SCST core's README for more advices. Especially pay attention to
+have io_grouping_type option set correctly.
+
++
+Compilation options
+-------------------
+
@@ -62670,6 +66188,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Docume
+ - CONFIG_SCST_ISCSI_DEBUG_DIGEST_FAILURES - simulates digest failures in
+ random places.
+
++
+Credits
+-------
+
@@ -62684,21 +66203,22 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.iscsi linux-2.6.36/Docume
+
+ * Tomasz Chmielewski <mangoo@wpkg.org> for testing and suggestions
+
-+ * Bart Van Assche <bart.vanassche@gmail.com> for a lot of help
++ * Bart Van Assche <bvanassche@acm.org> for a lot of help
+
+Vladislav Bolkhovitin <vst@vlnb.net>, http://scst.sourceforge.net
+
-diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h
---- orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h
-+++ linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h
-@@ -0,0 +1,131 @@
+diff -uprN orig/linux-2.6.39/drivers/scsi/qla2xxx/qla2x_tgt.h linux-2.6.39/drivers/scsi/qla2xxx/qla2x_tgt.h
+--- orig/linux-2.6.39/drivers/scsi/qla2xxx/qla2x_tgt.h
++++ linux-2.6.39/drivers/scsi/qla2xxx/qla2x_tgt.h
+@@ -0,0 +1,137 @@
+/*
+ * qla2x_tgt.h
+ *
-+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * Additional file for the target driver support.
+ *
@@ -62719,6 +66239,8 @@ diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h linux-2.6.36/drive
+#ifndef __QLA2X_TGT_H
+#define __QLA2X_TGT_H
+
++#include <linux/version.h>
++
+extern request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
+
+#ifdef CONFIG_SCSI_QLA2XXX_TARGET
@@ -62760,7 +66282,7 @@ diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h linux-2.6.36/drive
+ * ha = adapter block pointer.
+ *
+ * Caller MUST have hardware lock held. This function might release it,
-+ * then reaquire.
++ * then reacquire.
+ */
+static inline void
+__qla2x00_send_enable_lun(scsi_qla_host_t *ha, int enable)
@@ -62819,21 +66341,25 @@ diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt.h linux-2.6.36/drive
+}
+
+extern void qla2xxx_add_targets(void);
++extern size_t
++qla2xxx_add_vtarget(u64 *port_name, u64 *node_name, u64 *parent_host);
++extern size_t qla2xxx_del_vtarget(u64 *port_name);
+
+#endif /* CONFIG_SCSI_QLA2XXX_TARGET */
+
+#endif /* __QLA2X_TGT_H */
-diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h
---- orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h
-+++ linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h
-@@ -0,0 +1,729 @@
+diff -uprN orig/linux-2.6.39/drivers/scsi/qla2xxx/qla2x_tgt_def.h linux-2.6.39/drivers/scsi/qla2xxx/qla2x_tgt_def.h
+--- orig/linux-2.6.39/drivers/scsi/qla2xxx/qla2x_tgt_def.h
++++ linux-2.6.39/drivers/scsi/qla2xxx/qla2x_tgt_def.h
+@@ -0,0 +1,737 @@
+/*
+ * qla2x_tgt_def.h
+ *
-+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * Additional file for the target driver support.
+ *
@@ -62873,13 +66399,13 @@ diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h linux-2.6.36/d
+ * Must be changed on any change in any initiator visible interfaces or
+ * data in the target add-on
+ */
-+#define QLA2X_TARGET_MAGIC 267
++#define QLA2X_TARGET_MAGIC 270
+
+/*
+ * Must be changed on any change in any target visible interfaces or
+ * data in the initiator
+ */
-+#define QLA2X_INITIATOR_MAGIC 57319
++#define QLA2X_INITIATOR_MAGIC 57224
+
+#define QLA2X_INI_MODE_STR_EXCLUSIVE "exclusive"
+#define QLA2X_INI_MODE_STR_DISABLED "disabled"
@@ -63257,8 +66783,13 @@ diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h linux-2.6.36/d
+ uint8_t rddata:1;
+ uint8_t add_cdb_len:6;
+ uint8_t cdb[16];
-+ /* Valid only if add_cdb_len=0, otherwise this is additional CDB data */
-+ uint32_t data_length;
++ /*
++ * add_cdb is optional and can absent from fcp_cmnd_t. Size 4 only to
++ * make sizeof(fcp_cmnd_t) be as expected by BUILD_BUG_ON() in
++ * q2t_init().
++ */
++ uint8_t add_cdb[4];
++ /* uint32_t data_length; */
+} __attribute__((packed)) fcp_cmnd_t;
+
+/*
@@ -63337,7 +66868,8 @@ diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h linux-2.6.36/d
+ uint16_t status;
+ uint16_t timeout;
+ uint16_t dseg_count; /* Data segment count. */
-+ uint8_t reserved1[6];
++ uint8_t vp_index;
++ uint8_t reserved1[5];
+ uint32_t exchange_address;
+ uint16_t reserved2;
+ uint16_t flags;
@@ -63443,7 +66975,8 @@ diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h linux-2.6.36/d
+ uint8_t entry_status; /* Entry Status. */
+ uint8_t reserved_1[6];
+ uint16_t nport_handle;
-+ uint8_t reserved_2[3];
++ uint8_t reserved_2[2];
++ uint8_t vp_index;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
@@ -63485,7 +67018,7 @@ diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h linux-2.6.36/d
+ uint16_t nport_handle;
+ uint16_t control_flags;
+#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
-+ uint8_t reserved_2;
++ uint8_t vp_index;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
@@ -63556,18 +67089,18 @@ diff -uprN orig/linux-2.6.36/drivers/scsi/qla2xxx/qla2x_tgt_def.h linux-2.6.36/d
+int qla2x00_wait_for_hba_online(scsi_qla_host_t *ha);
+
+#endif /* __QLA2X_TGT_DEF_H */
-diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/Makefile linux-2.6.36/drivers/scst/qla2xxx-target/Makefile
---- orig/linux-2.6.36/drivers/scst/qla2xxx-target/Makefile
-+++ linux-2.6.36/drivers/scst/qla2xxx-target/Makefile
+diff -uprN orig/linux-2.6.39/drivers/scst/qla2xxx-target/Makefile linux-2.6.39/drivers/scst/qla2xxx-target/Makefile
+--- orig/linux-2.6.39/drivers/scst/qla2xxx-target/Makefile
++++ linux-2.6.39/drivers/scst/qla2xxx-target/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/scsi/qla2xxx
+
+qla2x00tgt-y := qla2x00t.o
+
+obj-$(CONFIG_SCST_QLA_TGT_ADDON) += qla2x00tgt.o
-diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/Kconfig linux-2.6.36/drivers/scst/qla2xxx-target/Kconfig
---- orig/linux-2.6.36/drivers/scst/qla2xxx-target/Kconfig
-+++ linux-2.6.36/drivers/scst/qla2xxx-target/Kconfig
+diff -uprN orig/linux-2.6.39/drivers/scst/qla2xxx-target/Kconfig linux-2.6.39/drivers/scst/qla2xxx-target/Kconfig
+--- orig/linux-2.6.39/drivers/scst/qla2xxx-target/Kconfig
++++ linux-2.6.39/drivers/scst/qla2xxx-target/Kconfig
@@ -0,0 +1,30 @@
+config SCST_QLA_TGT_ADDON
+ tristate "QLogic 2XXX Target Mode Add-On"
@@ -63599,17 +67132,18 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/Kconfig linux-2.6.36/dr
+ performance loss.
+
+ If unsure, say "N".
-diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c
---- orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c
-+++ linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c
-@@ -0,0 +1,5486 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.39/drivers/scst/qla2xxx-target/qla2x00t.c
+--- orig/linux-2.6.39/drivers/scst/qla2xxx-target/qla2x00t.c
++++ linux-2.6.39/drivers/scst/qla2xxx-target/qla2x00t.c
+@@ -0,0 +1,6448 @@
+/*
+ * qla2x00t.c
+ *
-+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2006 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * QLogic 22xx/23xx/24xx/25xx FC target driver.
+ *
@@ -63635,6 +67169,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/list.h>
++#include <asm/unaligned.h>
+
+#include <scst/scst.h>
+
@@ -63676,8 +67211,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd);
+static void q2t_on_free_cmd(struct scst_cmd *scst_cmd);
+static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *mcmd);
-+static int q2t_get_initiator_port_transport_id(struct scst_session *scst_sess,
-+ uint8_t **transport_id);
++static int q2t_get_initiator_port_transport_id(struct scst_tgt *tgt,
++ struct scst_session *scst_sess, uint8_t **transport_id);
+
+/* Predefs for callbacks handed to qla2xxx(target) */
+static void q24_atio_pkt(scsi_qla_host_t *ha, atio7_entry_t *pkt);
@@ -63732,6 +67267,31 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+struct kobj_attribute q2t_abort_isp_attr =
+ __ATTR(abort_isp, S_IWUSR, NULL, q2t_abort_isp_store);
+
++static ssize_t q2t_hw_target_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++
++static struct kobj_attribute q2t_hw_target_attr =
++ __ATTR(hw_target, S_IRUGO, q2t_hw_target_show, NULL);
++
++static ssize_t q2t_node_name_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++
++static struct kobj_attribute q2t_vp_node_name_attr =
++ __ATTR(node_name, S_IRUGO, q2t_node_name_show, NULL);
++
++static ssize_t q2t_node_name_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buffer, size_t size);
++
++static struct kobj_attribute q2t_hw_node_name_attr =
++ __ATTR(node_name, S_IRUGO|S_IWUSR, q2t_node_name_show,
++ q2t_node_name_store);
++
++static ssize_t q2t_vp_parent_host_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf);
++
++static struct kobj_attribute q2t_vp_parent_host_attr =
++ __ATTR(parent_host, S_IRUGO, q2t_vp_parent_host_show, NULL);
++
+static const struct attribute *q2t_tgt_attrs[] = {
+ &q2t_expl_conf_attr.attr,
+ &q2t_abort_isp_attr.attr,
@@ -63740,6 +67300,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+static int q2t_enable_tgt(struct scst_tgt *tgt, bool enable);
+static bool q2t_is_tgt_enabled(struct scst_tgt *tgt);
++static ssize_t q2t_add_vtarget(const char *target_name, char *params);
++static ssize_t q2t_del_vtarget(const char *target_name);
+
+/*
+ * Global Variables
@@ -63774,6 +67336,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ .on_hw_pending_cmd_timeout = q2t_on_hw_pending_cmd_timeout,
+ .enable_target = q2t_enable_tgt,
+ .is_target_enabled = q2t_is_tgt_enabled,
++ .add_target = q2t_add_vtarget,
++ .del_target = q2t_del_vtarget,
++ .add_target_parameters = "node_name, parent_host",
+ .tgtt_attrs = q2t_attrs,
+ .tgt_attrs = q2t_tgt_attrs,
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
@@ -63794,14 +67359,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return 0;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static inline void q2t_sess_get(struct q2t_sess *sess)
+{
+ sess->sess_ref++;
+ TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref);
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static inline void q2t_sess_put(struct q2t_sess *sess)
+{
+ TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref-1);
@@ -63812,60 +67377,98 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ q2t_unreg_sess(sess);
+}
+
-+/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
++/* pha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+static inline struct q2t_sess *q2t_find_sess_by_loop_id(struct q2t_tgt *tgt,
-+ uint16_t lid)
++ uint16_t loop_id)
++{
++ struct q2t_sess *sess;
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ if ((loop_id == sess->loop_id) && !sess->deleted)
++ return sess;
++ }
++ return NULL;
++}
++
++/* pha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
++static inline struct q2t_sess *q2t_find_sess_by_s_id_include_deleted(
++ struct q2t_tgt *tgt, const uint8_t *s_id)
+{
+ struct q2t_sess *sess;
-+ BUG_ON(tgt == NULL);
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ if (lid == (sess->loop_id))
++ if ((sess->s_id.b.al_pa == s_id[2]) &&
++ (sess->s_id.b.area == s_id[1]) &&
++ (sess->s_id.b.domain == s_id[0]))
+ return sess;
+ }
+ return NULL;
+}
+
-+/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
++/* pha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+static inline struct q2t_sess *q2t_find_sess_by_s_id(struct q2t_tgt *tgt,
+ const uint8_t *s_id)
+{
+ struct q2t_sess *sess;
-+ BUG_ON(tgt == NULL);
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ if ((sess->s_id.b.al_pa == s_id[2]) &&
+ (sess->s_id.b.area == s_id[1]) &&
-+ (sess->s_id.b.domain == s_id[0]))
++ (sess->s_id.b.domain == s_id[0]) &&
++ !sess->deleted)
+ return sess;
+ }
+ return NULL;
+}
+
-+/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
++/* pha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+static inline struct q2t_sess *q2t_find_sess_by_s_id_le(struct q2t_tgt *tgt,
+ const uint8_t *s_id)
+{
+ struct q2t_sess *sess;
-+ BUG_ON(tgt == NULL);
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ if ((sess->s_id.b.al_pa == s_id[0]) &&
+ (sess->s_id.b.area == s_id[1]) &&
-+ (sess->s_id.b.domain == s_id[2]))
++ (sess->s_id.b.domain == s_id[2]) &&
++ !sess->deleted)
++ return sess;
++ }
++ return NULL;
++}
++
++/* pha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
++static inline struct q2t_sess *q2t_find_sess_by_port_name(struct q2t_tgt *tgt,
++ const uint8_t *port_name)
++{
++ struct q2t_sess *sess;
++ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
++ if ((sess->port_name[0] == port_name[0]) &&
++ (sess->port_name[1] == port_name[1]) &&
++ (sess->port_name[2] == port_name[2]) &&
++ (sess->port_name[3] == port_name[3]) &&
++ (sess->port_name[4] == port_name[4]) &&
++ (sess->port_name[5] == port_name[5]) &&
++ (sess->port_name[6] == port_name[6]) &&
++ (sess->port_name[7] == port_name[7]))
+ return sess;
+ }
+ return NULL;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static inline void q2t_exec_queue(scsi_qla_host_t *ha)
+{
-+ qla2x00_isp_cmd(ha);
++ qla2x00_isp_cmd(to_qla_parent(ha));
++}
++
++/* pha->hardware_lock supposed to be held on entry */
++static inline request_t *q2t_req_pkt(scsi_qla_host_t *ha)
++{
++ return qla2x00_req_pkt(to_qla_parent(ha));
+}
+
-+/* Might release hw lock, then reaquire!! */
++/* Might release hw lock, then reacquire!! */
+static inline int q2t_issue_marker(scsi_qla_host_t *ha, int ha_locked)
+{
+ /* Send marker if required */
-+ if (unlikely(ha->marker_needed != 0)) {
++ if (unlikely(to_qla_parent(ha)->marker_needed != 0)) {
+ int rc = qla2x00_issue_marker(ha, ha_locked);
+ if (rc != QLA_SUCCESS) {
+ PRINT_ERROR("qla2x00t(%ld): issue_marker() "
@@ -63876,6 +67479,200 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return QLA_SUCCESS;
+}
+
++static inline
++scsi_qla_host_t *q2t_find_host_by_d_id(scsi_qla_host_t *ha, uint8_t *d_id)
++{
++ if ((ha->d_id.b.area != d_id[1]) || (ha->d_id.b.domain != d_id[0]))
++ return NULL;
++
++ if (ha->d_id.b.al_pa == d_id[2])
++ return ha;
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ uint8_t vp_idx;
++ BUG_ON(ha->tgt_vp_map == NULL);
++ vp_idx = ha->tgt_vp_map[d_id[2]].idx;
++ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
++ return ha->tgt_vp_map[vp_idx].vha;
++ }
++
++ return NULL;
++}
++
++static inline
++scsi_qla_host_t *q2t_find_host_by_vp_idx(scsi_qla_host_t *ha, uint16_t vp_idx)
++{
++ if (ha->vp_idx == vp_idx)
++ return ha;
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ BUG_ON(ha->tgt_vp_map == NULL);
++ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
++ return ha->tgt_vp_map[vp_idx].vha;
++ }
++
++ return NULL;
++}
++
++static void q24_atio_pkt_all_vps(scsi_qla_host_t *ha, atio7_entry_t *atio)
++{
++ TRACE_ENTRY();
++
++ BUG_ON(ha == NULL);
++
++ switch (atio->entry_type) {
++ case ATIO_TYPE7:
++ {
++ scsi_qla_host_t *host = q2t_find_host_by_d_id(ha, atio->fcp_hdr.d_id);
++ if (unlikely(NULL == host)) {
++ /*
++ * It might happen, because there is a small gap between
++ * requesting the DPC thread to update loop and actual
++ * update. It is harmless and on the next retry should
++ * work well.
++ */
++ PRINT_WARNING("qla2x00t(%ld): Received ATIO_TYPE7 "
++ "with unknown d_id %x:%x:%x", ha->instance,
++ atio->fcp_hdr.d_id[0], atio->fcp_hdr.d_id[1],
++ atio->fcp_hdr.d_id[2]);
++ break;
++ }
++ q24_atio_pkt(host, atio);
++ break;
++ }
++
++ case IMMED_NOTIFY_TYPE:
++ {
++ scsi_qla_host_t *host = ha;
++ if (IS_FWI2_CAPABLE(ha)) {
++ notify24xx_entry_t *entry = (notify24xx_entry_t *)atio;
++ if ((entry->vp_index != 0xFF) &&
++ (entry->nport_handle != 0xFFFF)) {
++ host = q2t_find_host_by_vp_idx(ha,
++ entry->vp_index);
++ if (unlikely(!host)) {
++ PRINT_ERROR("qla2x00t(%ld): Received "
++ "ATIO (IMMED_NOTIFY_TYPE) "
++ "with unknown vp_index %d",
++ ha->instance, entry->vp_index);
++ break;
++ }
++ }
++ }
++ q24_atio_pkt(host, atio);
++ break;
++ }
++
++ default:
++ PRINT_ERROR("qla2x00t(%ld): Received unknown ATIO atio "
++ "type %x", ha->instance, atio->entry_type);
++ break;
++ }
++
++ TRACE_EXIT();
++ return;
++}
++
++static void q2t_response_pkt_all_vps(scsi_qla_host_t *ha, response_t *pkt)
++{
++ TRACE_ENTRY();
++
++ BUG_ON(ha == NULL);
++
++ switch (pkt->entry_type) {
++ case CTIO_TYPE7:
++ {
++ ctio7_fw_entry_t *entry = (ctio7_fw_entry_t *)pkt;
++ scsi_qla_host_t *host = q2t_find_host_by_vp_idx(ha,
++ entry->vp_index);
++ if (unlikely(!host)) {
++ PRINT_ERROR("qla2x00t(%ld): Response pkt (CTIO_TYPE7) "
++ "received, with unknown vp_index %d",
++ ha->instance, entry->vp_index);
++ break;
++ }
++ q2t_response_pkt(host, pkt);
++ break;
++ }
++
++ case IMMED_NOTIFY_TYPE:
++ {
++ scsi_qla_host_t *host = ha;
++ if (IS_FWI2_CAPABLE(ha)) {
++ notify24xx_entry_t *entry = (notify24xx_entry_t *)pkt;
++ host = q2t_find_host_by_vp_idx(ha, entry->vp_index);
++ if (unlikely(!host)) {
++ PRINT_ERROR("qla2x00t(%ld): Response pkt "
++ "(IMMED_NOTIFY_TYPE) received, "
++ "with unknown vp_index %d",
++ ha->instance, entry->vp_index);
++ break;
++ }
++ }
++ q2t_response_pkt(host, pkt);
++ break;
++ }
++
++ case NOTIFY_ACK_TYPE:
++ {
++ scsi_qla_host_t *host = ha;
++ if (IS_FWI2_CAPABLE(ha)) {
++ nack24xx_entry_t *entry = (nack24xx_entry_t *)pkt;
++ if (0xFF != entry->vp_index) {
++ host = q2t_find_host_by_vp_idx(ha,
++ entry->vp_index);
++ if (unlikely(!host)) {
++ PRINT_ERROR("qla2x00t(%ld): Response "
++ "pkt (NOTIFY_ACK_TYPE) "
++ "received, with unknown "
++ "vp_index %d", ha->instance,
++ entry->vp_index);
++ break;
++ }
++ }
++ }
++ q2t_response_pkt(host, pkt);
++ break;
++ }
++
++ case ABTS_RECV_24XX:
++ {
++ abts24_recv_entry_t *entry = (abts24_recv_entry_t *)pkt;
++ scsi_qla_host_t *host = q2t_find_host_by_vp_idx(ha,
++ entry->vp_index);
++ if (unlikely(!host)) {
++ PRINT_ERROR("qla2x00t(%ld): Response pkt "
++ "(ABTS_RECV_24XX) received, with unknown "
++ "vp_index %d", ha->instance, entry->vp_index);
++ break;
++ }
++ q2t_response_pkt(host, pkt);
++ break;
++ }
++
++ case ABTS_RESP_24XX:
++ {
++ abts24_resp_entry_t *entry = (abts24_resp_entry_t *)pkt;
++ scsi_qla_host_t *host = q2t_find_host_by_vp_idx(ha,
++ entry->vp_index);
++ if (unlikely(!host)) {
++ PRINT_ERROR("qla2x00t(%ld): Response pkt "
++ "(ABTS_RECV_24XX) received, with unknown "
++ "vp_index %d", ha->instance, entry->vp_index);
++ break;
++ }
++ q2t_response_pkt(host, pkt);
++ break;
++ }
++
++ default:
++ q2t_response_pkt(ha, pkt);
++ break;
++ }
++
++ TRACE_EXIT();
++ return;
++}
+/*
+ * Registers with initiator driver (but target mode isn't enabled till
+ * it's turned on via sysfs)
@@ -63885,8 +67682,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ int res, rc;
+ struct qla_tgt_data t = {
+ .magic = QLA2X_TARGET_MAGIC,
-+ .tgt24_atio_pkt = q24_atio_pkt,
-+ .tgt_response_pkt = q2t_response_pkt,
++ .tgt24_atio_pkt = q24_atio_pkt_all_vps,
++ .tgt_response_pkt = q2t_response_pkt_all_vps,
+ .tgt2x_ctio_completion = q2x_ctio_completion,
+ .tgt_async_event = q2t_async_event,
+ .tgt_host_action = q2t_host_action,
@@ -63926,7 +67723,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ struct q2t_sess *sess;
+ struct q2t_tgt *tgt;
-+ scsi_qla_host_t *ha;
++ scsi_qla_host_t *ha, *pha;
+ unsigned long flags;
+
+ TRACE_ENTRY();
@@ -63947,23 +67744,24 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ list_empty(&tgt->sess_list), tgt->sess_count);
+
+ ha = tgt->ha;
++ pha = to_qla_parent(ha);
+
+ /*
+ * We need to protect against race, when tgt is freed before or
+ * inside wake_up()
+ */
-+ spin_lock_irqsave(&ha->hardware_lock, flags);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
+ tgt->sess_count--;
+ if (tgt->sess_count == 0)
+ wake_up_all(&tgt->waitQ);
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+out:
+ TRACE_EXIT();
+ return;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static int q2t_unreg_sess(struct q2t_sess *sess)
+{
+ int res = 1;
@@ -63989,7 +67787,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return res;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static int q2t_reset(scsi_qla_host_t *ha, void *iocb, int mcmd)
+{
+ struct q2t_sess *sess;
@@ -64007,6 +67805,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ if (loop_id == 0xFFFF) {
+ /* Global event */
++ atomic_inc(&ha->tgt->tgt_global_resets_count);
+ q2t_clear_tgt_db(ha->tgt, 1);
+ if (!list_empty(&ha->tgt->sess_list)) {
+ sess = list_entry(ha->tgt->sess_list.next,
@@ -64059,27 +67858,65 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return res;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
++static void q2t_schedule_sess_for_deletion(struct q2t_sess *sess)
++{
++ struct q2t_tgt *tgt = sess->tgt;
++ uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
++ bool schedule;
++
++ TRACE_ENTRY();
++
++ if (sess->deleted)
++ goto out;
++
++ /*
++ * If the list is empty, then, most likely, the work isn't
++ * scheduled.
++ */
++ schedule = list_empty(&tgt->del_sess_list);
++
++ TRACE_MGMT_DBG("Scheduling sess %p for deletion (schedule %d)", sess,
++ schedule);
++ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
++ sess->deleted = 1;
++ sess->expires = jiffies + dev_loss_tmo * HZ;
++
++ PRINT_INFO("qla2x00t(%ld): session for port %02x:%02x:%02x:"
++ "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
++ "deletion in %d secs", tgt->ha->instance,
++ sess->port_name[0], sess->port_name[1],
++ sess->port_name[2], sess->port_name[3],
++ sess->port_name[4], sess->port_name[5],
++ sess->port_name[6], sess->port_name[7],
++ sess->loop_id, dev_loss_tmo);
++
++ if (schedule)
++ schedule_delayed_work(&tgt->sess_del_work,
++ jiffies - sess->expires);
++
++out:
++ TRACE_EXIT();
++ return;
++}
++
++/* pha->hardware_lock supposed to be held on entry */
+static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only)
+{
+ struct q2t_sess *sess, *sess_tmp;
+
+ TRACE_ENTRY();
+
-+ TRACE(TRACE_MGMT, "qla2x00t: Clearing targets DB %p", tgt);
++ TRACE(TRACE_MGMT, "qla2x00t: Clearing targets DB for target %p", tgt);
+
+ list_for_each_entry_safe(sess, sess_tmp, &tgt->sess_list,
+ sess_list_entry) {
-+ if (local_only && !sess->local)
-+ continue;
-+ if (local_only && sess->local)
-+ TRACE_MGMT_DBG("Putting local session %p from port "
-+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-+ sess, sess->port_name[0], sess->port_name[1],
-+ sess->port_name[2], sess->port_name[3],
-+ sess->port_name[4], sess->port_name[5],
-+ sess->port_name[6], sess->port_name[7]);
-+ q2t_sess_put(sess);
++ if (local_only) {
++ if (!sess->local)
++ continue;
++ q2t_schedule_sess_for_deletion(sess);
++ } else
++ q2t_sess_put(sess);
+ }
+
+ /* At this point tgt could be already dead */
@@ -64100,60 +67937,267 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ struct q2t_sess *sess = (struct q2t_sess *)data;
+ struct q2t_tgt *tgt = sess->tgt;
+ scsi_qla_host_t *ha = tgt->ha;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+ unsigned long flags;
+
+ PRINT_INFO("qla2x00t(%ld): Session initialization failed",
+ ha->instance);
+
-+ spin_lock_irqsave(&ha->hardware_lock, flags);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
+ q2t_sess_put(sess);
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ }
+
+ TRACE_EXIT();
+ return;
+}
+
-+static void q2t_del_sess_timer_fn(unsigned long arg)
++static int q24_get_loop_id(scsi_qla_host_t *ha, const uint8_t *s_id,
++ uint16_t *loop_id)
++{
++ dma_addr_t gid_list_dma;
++ struct gid_list_info *gid_list;
++ char *id_iter;
++ int res, rc, i, retries = 0;
++ uint16_t entries;
++
++ TRACE_ENTRY();
++
++ gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
++ &gid_list_dma, GFP_KERNEL);
++ if (gid_list == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): DMA Alloc failed of %zd",
++ ha->instance, GID_LIST_SIZE);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ /* Get list of logged in devices */
++retry:
++ rc = qla2x00_get_id_list(ha, gid_list, gid_list_dma, &entries);
++ if (rc != QLA_SUCCESS) {
++ if (rc == QLA_FW_NOT_READY) {
++ retries++;
++ if (retries < 3) {
++ msleep(1000);
++ goto retry;
++ }
++ }
++ TRACE_MGMT_DBG("qla2x00t(%ld): get_id_list() failed: %x",
++ ha->instance, rc);
++ res = -rc;
++ goto out_free_id_list;
++ }
++
++ id_iter = (char *)gid_list;
++ res = -1;
++ for (i = 0; i < entries; i++) {
++ struct gid_list_info *gid = (struct gid_list_info *)id_iter;
++ if ((gid->al_pa == s_id[2]) &&
++ (gid->area == s_id[1]) &&
++ (gid->domain == s_id[0])) {
++ *loop_id = le16_to_cpu(gid->loop_id);
++ res = 0;
++ break;
++ }
++ id_iter += ha->gid_list_info_size;
++ }
++
++out_free_id_list:
++ dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, gid_list, gid_list_dma);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static bool q2t_check_fcport_exist(scsi_qla_host_t *ha, struct q2t_sess *sess)
++{
++ bool res, found = false;
++ int rc, i;
++ uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
++ uint16_t entries;
++ void *pmap;
++ int pmap_len;
++ fc_port_t *fcport;
++ int global_resets;
++
++ TRACE_ENTRY();
++
++retry:
++ global_resets = atomic_read(&ha->tgt->tgt_global_resets_count);
++
++ rc = qla2x00_get_node_name_list(ha, &pmap, &pmap_len);
++ if (rc != QLA_SUCCESS) {
++ res = false;
++ goto out;
++ }
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ struct qla_port24_data *pmap24 = pmap;
++
++ entries = pmap_len/sizeof(*pmap24);
++
++ for (i = 0; i < entries; ++i) {
++ if ((sess->port_name[0] == pmap24[i].port_name[0]) &&
++ (sess->port_name[1] == pmap24[i].port_name[1]) &&
++ (sess->port_name[2] == pmap24[i].port_name[2]) &&
++ (sess->port_name[3] == pmap24[i].port_name[3]) &&
++ (sess->port_name[4] == pmap24[i].port_name[4]) &&
++ (sess->port_name[5] == pmap24[i].port_name[5]) &&
++ (sess->port_name[6] == pmap24[i].port_name[6]) &&
++ (sess->port_name[7] == pmap24[i].port_name[7])) {
++ loop_id = le16_to_cpu(pmap24[i].loop_id);
++ found = true;
++ break;
++ }
++ }
++ } else {
++ struct qla_port23_data *pmap2x = pmap;
++
++ entries = pmap_len/sizeof(*pmap2x);
++
++ for (i = 0; i < entries; ++i) {
++ if ((sess->port_name[0] == pmap2x[i].port_name[0]) &&
++ (sess->port_name[1] == pmap2x[i].port_name[1]) &&
++ (sess->port_name[2] == pmap2x[i].port_name[2]) &&
++ (sess->port_name[3] == pmap2x[i].port_name[3]) &&
++ (sess->port_name[4] == pmap2x[i].port_name[4]) &&
++ (sess->port_name[5] == pmap2x[i].port_name[5]) &&
++ (sess->port_name[6] == pmap2x[i].port_name[6]) &&
++ (sess->port_name[7] == pmap2x[i].port_name[7])) {
++ loop_id = le16_to_cpu(pmap2x[i].loop_id);
++ found = true;
++ break;
++ }
++ }
++ }
++
++ kfree(pmap);
++
++ if (!found) {
++ res = false;
++ goto out;
++ }
++
++ TRACE_MGMT_DBG("loop_id %d", loop_id);
++
++ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
++ if (fcport == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): Allocation of tmp FC port failed",
++ ha->instance);
++ res = false;
++ goto out;
++ }
++
++ fcport->loop_id = loop_id;
++
++ rc = qla2x00_get_port_database(ha, fcport, 0);
++ if (rc != QLA_SUCCESS) {
++ PRINT_ERROR("qla2x00t(%ld): Failed to retrieve fcport "
++ "information -- get_port_database() returned %x "
++ "(loop_id=0x%04x)", ha->instance, rc, loop_id);
++ res = false;
++ goto out_free_fcport;
++ }
++
++ if (global_resets != atomic_read(&ha->tgt->tgt_global_resets_count)) {
++ TRACE_MGMT_DBG("qla2x00t(%ld): global reset during session "
++ "discovery (counter was %d, new %d), retrying",
++ ha->instance, global_resets,
++ atomic_read(&ha->tgt->tgt_global_resets_count));
++ goto retry;
++ }
++
++ TRACE_MGMT_DBG("Updating sess %p s_id %x:%x:%x, "
++ "loop_id %d) to d_id %x:%x:%x, loop_id %d", sess,
++ sess->s_id.b.domain, sess->s_id.b.area,
++ sess->s_id.b.al_pa, sess->loop_id, fcport->d_id.b.domain,
++ fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->loop_id);
++
++ sess->s_id = fcport->d_id;
++ sess->loop_id = fcport->loop_id;
++ sess->conf_compl_supported = fcport->conf_compl_supported;
++
++ res = true;
++
++out_free_fcport:
++ kfree(fcport);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++/* pha->hardware_lock supposed to be held on entry */
++static void q2t_undelete_sess(struct q2t_sess *sess)
++{
++ BUG_ON(!sess->deleted);
++
++ list_del(&sess->del_list_entry);
++ sess->deleted = 0;
++}
++
++static void q2t_del_sess_work_fn(struct delayed_work *work)
+{
-+ struct q2t_tgt *tgt = (struct q2t_tgt *)arg;
++ struct q2t_tgt *tgt = container_of(work, struct q2t_tgt,
++ sess_del_work);
+ scsi_qla_host_t *ha = tgt->ha;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct q2t_sess *sess;
+ unsigned long flags;
+
+ TRACE_ENTRY();
+
-+ spin_lock_irqsave(&ha->hardware_lock, flags);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
+ while (!list_empty(&tgt->del_sess_list)) {
+ sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
+ del_list_entry);
+ if (time_after_eq(jiffies, sess->expires)) {
-+ /*
-+ * sess will be deleted from del_sess_list in
-+ * q2t_unreg_sess()
-+ */
-+ TRACE_MGMT_DBG("Timeout: sess %p about to be deleted",
-+ sess);
-+ q2t_sess_put(sess);
++ bool cancel;
++
++ q2t_undelete_sess(sess);
++
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
++ cancel = q2t_check_fcport_exist(ha, sess);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
++
++ if (cancel) {
++ if (sess->deleted) {
++ /*
++ * sess was again deleted while we were
++ * discovering it
++ */
++ continue;
++ }
++
++ PRINT_INFO("qla2x00t(%ld): cancel deletion of "
++ "session for port %02x:%02x:%02x:"
++ "%02x:%02x:%02x:%02x:%02x (loop ID %d), "
++ "because it isn't deleted by firmware",
++ ha->instance,
++ sess->port_name[0], sess->port_name[1],
++ sess->port_name[2], sess->port_name[3],
++ sess->port_name[4], sess->port_name[5],
++ sess->port_name[6], sess->port_name[7],
++ sess->loop_id);
++ } else {
++ TRACE_MGMT_DBG("Timeout: sess %p about to be "
++ "deleted", sess);
++ q2t_sess_put(sess);
++ }
+ } else {
-+ tgt->sess_del_timer.expires = sess->expires;
-+ add_timer(&tgt->sess_del_timer);
++ schedule_delayed_work(&tgt->sess_del_work,
++ jiffies - sess->expires);
+ break;
+ }
+ }
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+ TRACE_EXIT();
+ return;
+}
+
-+/* pha->hardware_lock supposed to be held on entry */
-+static void q2t_undelete_sess(struct q2t_sess *sess)
-+{
-+ list_del(&sess->del_list_entry);
-+ sess->deleted = 0;
-+}
-+
+/*
+ * Must be called under tgt_mutex.
+ *
@@ -64167,11 +68211,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ const int wwn_str_len = 3*WWN_SIZE+2;
+ struct q2t_tgt *tgt = ha->tgt;
+ struct q2t_sess *sess;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ TRACE_ENTRY();
+
+ /* Check to avoid double sessions */
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ if ((sess->port_name[0] == fcport->port_name[0]) &&
+ (sess->port_name[1] == fcport->port_name[1]) &&
@@ -64184,9 +68229,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ TRACE_MGMT_DBG("Double sess %p found (s_id %x:%x:%x, "
+ "loop_id %d), updating to d_id %x:%x:%x, "
+ "loop_id %d", sess, sess->s_id.b.domain,
-+ sess->s_id.b.al_pa, sess->s_id.b.area,
++ sess->s_id.b.area, sess->s_id.b.al_pa,
+ sess->loop_id, fcport->d_id.b.domain,
-+ fcport->d_id.b.al_pa, fcport->d_id.b.area,
++ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->loop_id);
+
+ if (sess->deleted)
@@ -64198,11 +68243,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ sess->conf_compl_supported = fcport->conf_compl_supported;
+ if (sess->local && !local)
+ sess->local = 0;
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ goto out;
+ }
+ }
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+
+ /* We are under tgt_mutex, so a new sess can't be added behind us */
+
@@ -64256,16 +68301,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out_free_sess_wwn;
+ }
+
-+ spin_lock_irq(&ha->hardware_lock);
+ TRACE_MGMT_DBG("Adding sess %p to tgt %p", sess, tgt);
++
++ spin_lock_irq(&pha->hardware_lock);
+ list_add_tail(&sess->sess_list_entry, &tgt->sess_list);
+ tgt->sess_count++;
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+
+ PRINT_INFO("qla2x00t(%ld): %ssession for wwn %s (loop_id %d, "
+ "s_id %x:%x:%x, confirmed completion %ssupported) added",
+ ha->instance, local ? "local " : "", wwn_str, fcport->loop_id,
-+ sess->s_id.b.domain, sess->s_id.b.al_pa, sess->s_id.b.area,
++ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ sess->conf_compl_supported ? "" : "not ");
+
+ kfree(wwn_str);
@@ -64284,25 +68330,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out;
+}
+
-+/* pha->hardware_lock supposed to be held on entry */
-+static void q2t_reappear_sess(struct q2t_sess *sess, const char *reason)
-+{
-+ q2t_undelete_sess(sess);
-+
-+ PRINT_INFO("qla2x00t(%ld): %ssession for port %02x:"
-+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
-+ "reappeared%s", sess->tgt->ha->instance,
-+ sess->local ? "local " : "", sess->port_name[0],
-+ sess->port_name[1], sess->port_name[2], sess->port_name[3],
-+ sess->port_name[4], sess->port_name[5], sess->port_name[6],
-+ sess->port_name[7], sess->loop_id, reason);
-+ TRACE_MGMT_DBG("Appeared sess %p", sess);
-+}
-+
+static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport)
+{
+ struct q2t_tgt *tgt;
+ struct q2t_sess *sess;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ TRACE_ENTRY();
+
@@ -64316,18 +68348,33 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ if (tgt->tgt_stop)
+ goto out_unlock;
+
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+
-+ sess = q2t_find_sess_by_loop_id(tgt, fcport->loop_id);
++ sess = q2t_find_sess_by_port_name(tgt, fcport->port_name);
+ if (sess == NULL) {
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ sess = q2t_create_sess(ha, fcport, false);
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ if (sess != NULL)
+ q2t_sess_put(sess); /* put the extra creation ref */
+ } else {
-+ if (sess->deleted)
-+ q2t_reappear_sess(sess, "");
++ if (sess->deleted) {
++ q2t_undelete_sess(sess);
++
++ PRINT_INFO("qla2x00t(%ld): %ssession for port %02x:"
++ "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
++ "reappeared", sess->tgt->ha->instance,
++ sess->local ? "local " : "", sess->port_name[0],
++ sess->port_name[1], sess->port_name[2],
++ sess->port_name[3], sess->port_name[4],
++ sess->port_name[5], sess->port_name[6],
++ sess->port_name[7], sess->loop_id);
++
++ TRACE_MGMT_DBG("Reappeared sess %p", sess);
++ }
++ sess->s_id = fcport->d_id;
++ sess->loop_id = fcport->loop_id;
++ sess->conf_compl_supported = fcport->conf_compl_supported;
+ }
+
+ if (sess->local) {
@@ -64342,7 +68389,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ sess->local = 0;
+ }
+
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+
+out_unlock:
+ mutex_unlock(&ha->tgt_mutex);
@@ -64355,7 +68402,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ struct q2t_tgt *tgt;
+ struct q2t_sess *sess;
-+ uint32_t dev_loss_tmo;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ TRACE_ENTRY();
+
@@ -64366,43 +68413,22 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
+ goto out_unlock;
+
-+ dev_loss_tmo = ha->port_down_retry_count + 5;
-+
+ if (tgt->tgt_stop)
+ goto out_unlock;
+
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+
-+ sess = q2t_find_sess_by_loop_id(tgt, fcport->loop_id);
++ sess = q2t_find_sess_by_port_name(tgt, fcport->port_name);
+ if (sess == NULL)
+ goto out_unlock_ha;
+
-+ if (!sess->deleted) {
-+ int add_tmr;
-+
-+ add_tmr = list_empty(&tgt->del_sess_list);
-+
-+ TRACE_MGMT_DBG("Scheduling sess %p to deletion", sess);
-+ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
-+ sess->deleted = 1;
-+
-+ PRINT_INFO("qla2x00t(%ld): %ssession for port %02x:%02x:%02x:"
-+ "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
-+ "deletion in %d secs", ha->instance,
-+ sess->local ? "local " : "",
-+ fcport->port_name[0], fcport->port_name[1],
-+ fcport->port_name[2], fcport->port_name[3],
-+ fcport->port_name[4], fcport->port_name[5],
-+ fcport->port_name[6], fcport->port_name[7],
-+ sess->loop_id, dev_loss_tmo);
++ TRACE_MGMT_DBG("sess %p", sess);
+
-+ sess->expires = jiffies + dev_loss_tmo * HZ;
-+ if (add_tmr)
-+ mod_timer(&tgt->sess_del_timer, sess->expires);
-+ }
++ sess->local = 1;
++ q2t_schedule_sess_for_deletion(sess);
+
+out_unlock_ha:
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+
+out_unlock:
+ mutex_unlock(&ha->tgt_mutex);
@@ -64415,16 +68441,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ unsigned long flags;
+ int res;
++ scsi_qla_host_t *pha = to_qla_parent(tgt->ha);
+
+ /*
+ * We need to protect against race, when tgt is freed before or
+ * inside wake_up()
+ */
-+ spin_lock_irqsave(&tgt->ha->hardware_lock, flags);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
+ TRACE_DBG("tgt %p, empty(sess_list)=%d sess_count=%d",
+ tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+ res = (tgt->sess_count == 0);
-+ spin_unlock_irqrestore(&tgt->ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+ return res;
+}
@@ -64434,6 +68461,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
+ scsi_qla_host_t *ha = tgt->ha;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ TRACE_ENTRY();
+
@@ -64445,13 +68473,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ */
+
+ mutex_lock(&ha->tgt_mutex);
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ tgt->tgt_stop = 1;
+ q2t_clear_tgt_db(tgt, false);
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ mutex_unlock(&ha->tgt_mutex);
+
-+ del_timer_sync(&tgt->sess_del_timer);
++ cancel_delayed_work_sync(&tgt->sess_del_work);
+
+ TRACE_MGMT_DBG("Waiting for sess works (tgt %p)", tgt);
+ spin_lock_irq(&tgt->sess_work_lock);
@@ -64469,7 +68497,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+
+ /* Big hammer */
-+ if (!ha->host_shutting_down && qla_tgt_mode_enabled(ha))
++ if (!pha->host_shutting_down && qla_tgt_mode_enabled(ha))
+ qla2x00_disable_tgt_mode(ha);
+
+ /* Wait for sessions to clear out (just in case) */
@@ -64479,14 +68507,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ tgt->irq_cmd_count, tgt);
+
+ mutex_lock(&ha->tgt_mutex);
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ while (tgt->irq_cmd_count != 0) {
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ udelay(2);
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ }
+ ha->tgt = NULL;
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ mutex_unlock(&ha->tgt_mutex);
+
+ TRACE_MGMT_DBG("Stop of tgt %p finished", tgt);
@@ -64516,8 +68544,51 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return 0;
+}
+
++/* pha->hardware_lock supposed to be held on entry */
++static int q2t_sched_sess_work(struct q2t_tgt *tgt, int type,
++ const void *param, unsigned int param_size)
++{
++ int res;
++ struct q2t_sess_work_param *prm;
++ unsigned long flags;
++
++ TRACE_ENTRY();
++
++ prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
++ if (prm == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): Unable to create session "
++ "work, command will be refused", tgt->ha->instance);
++ res = -ENOMEM;
++ goto out;
++ }
++
++ TRACE_MGMT_DBG("Scheduling work (type %d, prm %p) to find session for "
++ "param %p (size %d, tgt %p)", type, prm, param, param_size, tgt);
++
++ BUG_ON(param_size > (sizeof(*prm) -
++ offsetof(struct q2t_sess_work_param, cmd)));
++
++ prm->type = type;
++ memcpy(&prm->cmd, param, param_size);
++
++ spin_lock_irqsave(&tgt->sess_work_lock, flags);
++ if (!tgt->sess_works_pending)
++ tgt->tm_to_unknown = 0;
++ list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
++ tgt->sess_works_pending = 1;
++ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
++
++ schedule_work(&tgt->sess_work);
++
++ res = 0;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q2x_modify_command_count(scsi_qla_host_t *ha, int cmd_count,
+ int imm_count)
@@ -64531,7 +68602,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ /* Sending marker isn't necessary, since we called from ISR */
+
-+ pkt = (modify_lun_entry_t *)qla2x00_req_pkt(ha);
++ pkt = (modify_lun_entry_t *)q2t_req_pkt(ha);
+ if (pkt == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
+ "request packet", ha->instance, __func__);
@@ -64570,7 +68641,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q2x_send_notify_ack(scsi_qla_host_t *ha, notify_entry_t *iocb,
+ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
@@ -64586,7 +68657,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
+ goto out;
+
-+ ntfy = (nack_entry_t *)qla2x00_req_pkt(ha);
++ ntfy = (nack_entry_t *)q2t_req_pkt(ha);
+ if (ntfy == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
+ "request packet", ha->instance, __func__);
@@ -64621,8 +68692,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ TRACE(TRACE_SCSI, "qla2x00t(%ld): Sending Notify Ack Seq %#x -> I %#x "
+ "St %#x RC %#x", ha->instance,
-+ le16_to_cpu(iocb->seq_id), GET_TARGET_ID(ha, iocb),
-+ le16_to_cpu(iocb->status), le16_to_cpu(ntfy->resp_code));
++ le16_to_cpu(iocb->seq_id), GET_TARGET_ID(ha, iocb),
++ le16_to_cpu(iocb->status), le16_to_cpu(ntfy->resp_code));
+ TRACE_BUFFER("Notify Ack packet data", ntfy, REQUEST_ENTRY_SIZE);
+
+ q2t_exec_queue(ha);
@@ -64633,7 +68704,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q24_send_abts_resp(scsi_qla_host_t *ha,
+ const abts24_recv_entry_t *abts, uint32_t status, bool ids_reversed)
@@ -64651,7 +68722,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
+ goto out;
+
-+ resp = (abts24_resp_entry_t *)qla2x00_req_pkt(ha);
++ resp = (abts24_resp_entry_t *)q2t_req_pkt(ha);
+ if (resp == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
+ "request packet", ha->instance, __func__);
@@ -64661,6 +68732,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ resp->entry_type = ABTS_RESP_24XX;
+ resp->entry_count = 1;
+ resp->nport_handle = abts->nport_handle;
++ resp->vp_index = ha->vp_idx;
+ resp->sof_type = abts->sof_type;
+ resp->exchange_address = abts->exchange_address;
+ resp->fcp_hdr_le = abts->fcp_hdr_le;
@@ -64713,7 +68785,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q24_retry_term_exchange(scsi_qla_host_t *ha,
+ abts24_resp_fw_entry_t *entry)
@@ -64728,7 +68800,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
+ goto out;
+
-+ ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
++ ctio = (ctio7_status1_entry_t *)q2t_req_pkt(ha);
+ if (ctio == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
+ "request packet", ha->instance, __func__);
@@ -64745,6 +68817,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ ctio->common.nport_handle = entry->nport_handle;
+ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
++ ctio->common.vp_index = ha->vp_idx;
+ ctio->common.initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+ ctio->common.initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+ ctio->common.initiator_id[2] = entry->fcp_hdr_le.d_id[2];
@@ -64764,14 +68837,55 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+}
+
++/* pha->hardware_lock supposed to be held on entry */
++static int __q24_handle_abts(scsi_qla_host_t *ha, abts24_recv_entry_t *abts,
++ struct q2t_sess *sess)
++{
++ int res;
++ uint32_t tag = abts->exchange_addr_to_abort;
++ struct q2t_mgmt_cmd *mcmd;
++
++ TRACE_ENTRY();
++
++ TRACE_MGMT_DBG("qla2x00t(%ld): task abort (tag=%d)", ha->instance,
++ tag);
++
++ mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
++ if (mcmd == NULL) {
++ PRINT_ERROR("qla2x00t(%ld): %s: Allocation of ABORT cmd failed",
++ ha->instance, __func__);
++ res = -ENOMEM;
++ goto out;
++ }
++ memset(mcmd, 0, sizeof(*mcmd));
++
++ mcmd->sess = sess;
++ memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
++
++ res = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag,
++ SCST_ATOMIC, mcmd);
++ if (res != 0) {
++ PRINT_ERROR("qla2x00t(%ld): scst_rx_mgmt_fn_tag() failed: %d",
++ ha->instance, res);
++ goto out_free;
++ }
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_free:
++ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
++ goto out;
++}
++
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q24_handle_abts(scsi_qla_host_t *ha, abts24_recv_entry_t *abts)
+{
-+ uint32_t tag;
+ int rc;
-+ struct q2t_mgmt_cmd *mcmd;
++ uint32_t tag = abts->exchange_addr_to_abort;
+ struct q2t_sess *sess;
+
+ TRACE_ENTRY();
@@ -64782,8 +68896,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out_err;
+ }
+
-+ tag = abts->exchange_addr_to_abort;
-+
+ if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+ TRACE_MGMT_DBG("qla2x00t(%ld): ABTS: Unknown Exchange "
+ "Address received", ha->instance);
@@ -64797,45 +68909,35 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ sess = q2t_find_sess_by_s_id_le(ha->tgt, abts->fcp_hdr_le.s_id);
+ if (sess == NULL) {
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): task abort for unexisting "
++ TRACE_MGMT_DBG("qla2x00t(%ld): task abort for unexisting "
+ "session", ha->instance);
-+ ha->tgt->tm_to_unknown = 1;
-+ goto out_err;
-+ }
-+
-+ mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
-+ if (mcmd == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s: Allocation of ABORT cmd failed",
-+ ha->instance, __func__);
-+ goto out_err;
++ rc = q2t_sched_sess_work(ha->tgt, Q2T_SESS_WORK_ABORT, abts,
++ sizeof(*abts));
++ if (rc != 0) {
++ ha->tgt->tm_to_unknown = 1;
++ goto out_err;
++ }
++ goto out;
+ }
-+ memset(mcmd, 0, sizeof(*mcmd));
-+
-+ mcmd->sess = sess;
-+ memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+
-+ rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag,
-+ SCST_ATOMIC, mcmd);
++ rc = __q24_handle_abts(ha, abts, sess);
+ if (rc != 0) {
+ PRINT_ERROR("qla2x00t(%ld): scst_rx_mgmt_fn_tag() failed: %d",
+ ha->instance, rc);
-+ goto out_err_free;
++ goto out_err;
+ }
+
+out:
+ TRACE_EXIT();
+ return;
+
-+out_err_free:
-+ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
-+
+out_err:
+ q24_send_abts_resp(ha, abts, SCST_MGMT_STATUS_REJECTED, false);
+ goto out;
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q24_send_task_mgmt_ctio(scsi_qla_host_t *ha,
+ struct q2t_mgmt_cmd *mcmd, uint32_t resp_code)
@@ -64852,7 +68954,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
+ goto out;
+
-+ ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
++ ctio = (ctio7_status1_entry_t *)q2t_req_pkt(ha);
+ if (ctio == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
+ "request packet", ha->instance, __func__);
@@ -64864,6 +68966,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->common.nport_handle = mcmd->sess->loop_id;
+ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
++ ctio->common.vp_index = ha->vp_idx;
+ ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
+ ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
+ ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
@@ -64885,7 +68988,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q24_send_notify_ack(scsi_qla_host_t *ha,
+ notify24xx_entry_t *iocb, uint16_t srr_flags,
@@ -64904,7 +69007,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ if (ha->tgt != NULL)
+ ha->tgt->notify_ack_expected++;
+
-+ nack = (nack24xx_entry_t *)qla2x00_req_pkt(ha);
++ nack = (nack24xx_entry_t *)q2t_req_pkt(ha);
+ if (nack == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
+ "request packet", ha->instance, __func__);
@@ -64928,6 +69031,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ nack->srr_reject_code = srr_reject_code;
+ nack->srr_reject_code_expl = srr_explan;
+ nack->ox_id = iocb->ox_id;
++ nack->vp_index = iocb->vp_index;
+
+ TRACE(TRACE_SCSI, "qla2x00t(%ld): Sending 24xx Notify Ack %d",
+ ha->instance, nack->status);
@@ -64971,7 +69075,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ struct q2t_mgmt_cmd *mcmd;
+ unsigned long flags;
-+ scsi_qla_host_t *ha;
++ scsi_qla_host_t *ha, *pha;
+
+ TRACE_ENTRY();
+
@@ -64985,8 +69089,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ }
+
+ ha = mcmd->sess->tgt->ha;
++ pha = to_qla_parent(ha);
+
-+ spin_lock_irqsave(&ha->hardware_lock, flags);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (mcmd->flags == Q24_MGMT_SEND_NACK) {
+ q24_send_notify_ack(ha,
@@ -65007,7 +69112,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ q2x_send_notify_ack(ha, &mcmd->orig_iocb.notify_entry, 0,
+ resp_code, 1, 0, 0, 0);
+ }
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+ scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
+ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
@@ -65067,11 +69172,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+static int q2t_check_reserve_free_req(scsi_qla_host_t *ha, uint32_t req_cnt)
+{
+ int res = SCST_TGT_RES_SUCCESS;
-+ device_reg_t __iomem *reg = ha->iobase;
++ device_reg_t __iomem *reg;
+ uint32_t cnt;
+
+ TRACE_ENTRY();
+
++ ha = to_qla_parent(ha);
++ reg = ha->iobase;
++
+ if (ha->req_q_cnt < (req_cnt + 2)) {
+ if (IS_FWI2_CAPABLE(ha))
+ cnt = (uint16_t)RD_REG_DWORD(
@@ -65106,10 +69214,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static inline void *q2t_get_req_pkt(scsi_qla_host_t *ha)
+{
++ ha = to_qla_parent(ha);
++
+ /* Adjust ring index. */
+ ha->req_ring_index++;
+ if (ha->req_ring_index == ha->request_q_length) {
@@ -65121,7 +69231,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return (cont_entry_t *)ha->request_ring_ptr;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static inline uint32_t q2t_make_handle(scsi_qla_host_t *ha)
+{
+ uint32_t h;
@@ -65148,7 +69258,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return h;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static void q2x_build_ctio_pkt(struct q2t_prm *prm)
+{
+ uint32_t h;
@@ -65187,7 +69297,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ GET_TARGET_ID(ha, &pkt->common), pkt->common.rx_id);
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static int q24_build_ctio_pkt(struct q2t_prm *prm)
+{
+ uint32_t h;
@@ -65198,12 +69308,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ TRACE_ENTRY();
+
-+ pkt = (ctio7_status0_entry_t *)ha->request_ring_ptr;
++ pkt = (ctio7_status0_entry_t *)to_qla_parent(ha)->request_ring_ptr;
+ prm->pkt = pkt;
+ memset(pkt, 0, sizeof(*pkt));
+
+ pkt->common.entry_type = CTIO_TYPE7;
+ pkt->common.entry_count = (uint8_t)prm->req_cnt;
++ pkt->common.vp_index = ha->vp_idx;
+
+ h = q2t_make_handle(ha);
+ if (unlikely(h == Q2T_NULL_HANDLE)) {
@@ -65237,7 +69348,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. We have already made sure
++ * pha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void q2t_load_cont_data_segments(struct q2t_prm *prm)
@@ -65308,7 +69419,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. We have already made sure
++ * pha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void q2x_load_data_segments(struct q2t_prm *prm)
@@ -65372,7 +69483,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. We have already made sure
++ * pha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void q24_load_data_segments(struct q2t_prm *prm)
@@ -65445,6 +69556,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ int res;
+ struct q2t_tgt *tgt = cmd->tgt;
+ scsi_qla_host_t *ha = tgt->ha;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+ uint16_t full_req_cnt;
+ struct scst_cmd *scst_cmd = cmd->scst_cmd;
+
@@ -65540,7 +69652,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ prm->req_cnt, full_req_cnt, prm->add_status_pkt);
+
+ /* Acquire ring specific lock */
-+ spin_lock_irqsave(&ha->hardware_lock, *flags);
++ spin_lock_irqsave(&pha->hardware_lock, *flags);
+
+ /* Does F/W have an IOCBs for this request */
+ res = q2t_check_reserve_free_req(ha, full_req_cnt);
@@ -65557,7 +69669,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ q2t_unmap_sg(ha, cmd);
+
+ /* Release ring specific lock */
-+ spin_unlock_irqrestore(&ha->hardware_lock, *flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, *flags);
+ goto out;
+}
+
@@ -65615,7 +69727,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ int res;
+ unsigned long flags;
-+ scsi_qla_host_t *ha;
++ scsi_qla_host_t *ha, *pha;
+ struct q2t_prm prm;
+ ctio_common_entry_t *pkt;
+
@@ -65630,9 +69742,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out;
+ }
+
-+ /* Here ha->hardware_lock already locked */
++ /* Here pha->hardware_lock already locked */
+
+ ha = prm.tgt->ha;
++ pha = to_qla_parent(ha);
+
+ q2x_build_ctio_pkt(&prm);
+ pkt = (ctio_common_entry_t *)prm.pkt;
@@ -65687,7 +69800,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ q2t_exec_queue(ha);
+
+ /* Release ring specific lock */
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+out:
+ TRACE_EXIT_RES(res);
@@ -65842,7 +69955,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ int res;
+ unsigned long flags;
-+ scsi_qla_host_t *ha;
++ scsi_qla_host_t *ha, *pha;
+ struct q2t_prm prm;
+ ctio7_status0_entry_t *pkt;
+
@@ -65857,9 +69970,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out;
+ }
+
-+ /* Here ha->hardware_lock already locked */
++ /* Here pha->hardware_lock already locked */
+
+ ha = prm.tgt->ha;
++ pha = to_qla_parent(ha);
+
+ res = q24_build_ctio_pkt(&prm);
+ if (unlikely(res != SCST_TGT_RES_SUCCESS))
@@ -65921,7 +70035,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+out_unlock:
+ /* Release ring specific lock */
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+out:
+ TRACE_EXIT_RES(res);
@@ -65937,7 +70051,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ int res = SCST_TGT_RES_SUCCESS;
+ unsigned long flags;
-+ scsi_qla_host_t *ha;
++ scsi_qla_host_t *ha, *pha;
+ struct q2t_tgt *tgt = cmd->tgt;
+ struct q2t_prm prm;
+ void *p;
@@ -65950,6 +70064,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ prm.sg = NULL;
+ prm.req_cnt = 1;
+ ha = tgt->ha;
++ pha = to_qla_parent(ha);
+
+ /* Send marker if required */
+ if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
@@ -65966,7 +70081,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ }
+
+ /* Acquire ring specific lock */
-+ spin_lock_irqsave(&ha->hardware_lock, flags);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
+
+ /* Does F/W have an IOCBs for this request */
+ res = q2t_check_reserve_free_req(ha, prm.req_cnt);
@@ -66000,7 +70115,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+out_unlock:
+ /* Release ring specific lock */
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+out:
+ TRACE_EXIT_RES(res);
@@ -66033,13 +70148,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return res;
+}
+
-+/* If hardware_lock held on entry, might drop it, then reaquire */
++/* If hardware_lock held on entry, might drop it, then reacquire */
+static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
+ atio_entry_t *atio, int ha_locked)
+{
+ ctio_ret_entry_t *ctio;
+ unsigned long flags = 0; /* to stop compiler's warning */
+ int do_tgt_cmd_done = 0;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ TRACE_ENTRY();
+
@@ -66050,9 +70166,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out;
+
+ if (!ha_locked)
-+ spin_lock_irqsave(&ha->hardware_lock, flags);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
+
-+ ctio = (ctio_ret_entry_t *)qla2x00_req_pkt(ha);
++ ctio = (ctio_ret_entry_t *)q2t_req_pkt(ha);
+ if (ctio == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
+ "request packet", ha->instance, __func__);
@@ -66090,7 +70206,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+out_unlock:
+ if (!ha_locked)
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+ if (do_tgt_cmd_done) {
+ if (!ha_locked && !in_interrupt()) {
@@ -66106,13 +70222,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+}
+
-+/* If hardware_lock held on entry, might drop it, then reaquire */
++/* If hardware_lock held on entry, might drop it, then reacquire */
+static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
+ atio7_entry_t *atio, int ha_locked)
+{
+ ctio7_status1_entry_t *ctio;
+ unsigned long flags = 0; /* to stop compiler's warning */
+ int do_tgt_cmd_done = 0;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ TRACE_ENTRY();
+
@@ -66123,9 +70240,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out;
+
+ if (!ha_locked)
-+ spin_lock_irqsave(&ha->hardware_lock, flags);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
+
-+ ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
++ ctio = (ctio7_status1_entry_t *)q2t_req_pkt(ha);
+ if (ctio == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
+ "request packet", ha->instance, __func__);
@@ -66146,6 +70263,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ ctio->common.nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
+ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
++ ctio->common.vp_index = ha->vp_idx;
+ ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
+ ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
+ ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
@@ -66155,7 +70273,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
+
+ /* Most likely, it isn't needed */
-+ ctio->residual = atio->fcp_cmnd.data_length;
++ ctio->residual = get_unaligned((uint32_t *)
++ &atio->fcp_cmnd.add_cdb[atio->fcp_cmnd.add_cdb_len]);
+ if (ctio->residual != 0)
+ ctio->scsi_status |= SS_RESIDUAL_UNDER;
+
@@ -66165,7 +70284,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+out_unlock:
+ if (!ha_locked)
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+ if (do_tgt_cmd_done) {
+ if (!ha_locked && !in_interrupt()) {
@@ -66208,7 +70327,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static int q2t_prepare_srr_ctio(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
+ void *ctio)
+{
@@ -66294,7 +70413,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static int q2t_term_ctio_exchange(scsi_qla_host_t *ha, void *ctio,
+ struct q2t_cmd *cmd, uint32_t status)
@@ -66332,7 +70451,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return term;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static inline struct q2t_cmd *q2t_get_cmd(scsi_qla_host_t *ha, uint32_t handle)
+{
+ handle--;
@@ -66344,7 +70463,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return NULL;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static struct q2t_cmd *q2t_ctio_to_cmd(scsi_qla_host_t *ha, uint32_t handle,
+ void *ctio)
+{
@@ -66415,7 +70534,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q2t_do_ctio_completion(scsi_qla_host_t *ha, uint32_t handle,
+ uint32_t status, void *ctio)
@@ -66537,7 +70656,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle)
+{
@@ -66558,7 +70677,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+}
+
-+/* ha->hardware_lock is supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static int q2x_do_send_cmd_to_scst(struct q2t_cmd *cmd)
+{
+ int res = 0;
@@ -66637,7 +70756,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return res;
+}
+
-+/* ha->hardware_lock is supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static int q24_do_send_cmd_to_scst(struct q2t_cmd *cmd)
+{
+ int res = 0;
@@ -66650,7 +70769,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ cmd->scst_cmd = scst_rx_cmd(sess->scst_sess,
+ (uint8_t *)&atio->fcp_cmnd.lun, sizeof(atio->fcp_cmnd.lun),
-+ atio->fcp_cmnd.cdb, Q2T_MAX_CDB_LEN, SCST_ATOMIC);
++ atio->fcp_cmnd.cdb, sizeof(atio->fcp_cmnd.cdb) +
++ atio->fcp_cmnd.add_cdb_len, SCST_ATOMIC);
+
+ if (cmd->scst_cmd == NULL) {
+ PRINT_ERROR("%s", "qla2x00t: scst_rx_cmd() failed");
@@ -66671,7 +70791,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ else
+ dir = SCST_DATA_NONE;
+ scst_cmd_set_expected(cmd->scst_cmd, dir,
-+ be32_to_cpu(atio->fcp_cmnd.data_length));
++ be32_to_cpu(get_unaligned((uint32_t *)
++ &atio->fcp_cmnd.add_cdb[atio->fcp_cmnd.add_cdb_len])));
+
+ switch (atio->fcp_cmnd.task_attr) {
+ case ATIO_SIMPLE_QUEUE:
@@ -66712,7 +70833,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return res;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static int q2t_do_send_cmd_to_scst(scsi_qla_host_t *ha,
+ struct q2t_cmd *cmd, struct q2t_sess *sess)
+{
@@ -66733,7 +70854,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return res;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static int q2t_send_cmd_to_scst(scsi_qla_host_t *ha, atio_t *atio)
+{
+ int res = 0;
@@ -66785,9 +70906,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ }
+ }
+
-+ if (unlikely(sess->deleted))
-+ q2t_reappear_sess(sess, " by new commands");
-+
+ res = q2t_do_send_cmd_to_scst(ha, cmd, sess);
+ if (unlikely(res != 0))
+ goto out_free_cmd;
@@ -66801,36 +70919,18 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out;
+
+out_sched:
-+ {
-+ struct q2t_sess_work_param *prm;
-+ unsigned long flags;
-+
-+ prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
-+ if (prm == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): Unable to create session "
-+ "work, command will be refused", ha->instance);
-+ res = -1;
-+ goto out_free_cmd;
-+ }
-+
-+ TRACE_MGMT_DBG("Scheduling work to find session for cmd %p",
-+ cmd);
-+
-+ prm->cmd = cmd;
-+
-+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
-+ if (!tgt->sess_works_pending)
-+ tgt->tm_to_unknown = 0;
-+ list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
-+ tgt->sess_works_pending = 1;
-+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
-+
-+ schedule_work(&tgt->sess_work);
++ if (atio->entry_count > 1) {
++ TRACE_MGMT_DBG("Dropping multy entry cmd %p", cmd);
++ res = -EBUSY;
++ goto out_free_cmd;
+ }
++ res = q2t_sched_sess_work(tgt, Q2T_SESS_WORK_CMD, &cmd, sizeof(cmd));
++ if (res != 0)
++ goto out_free_cmd;
+ goto out;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
+ int lun_size, int fn, void *iocb, int flags)
+{
@@ -66945,7 +71045,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static int q2t_handle_task_mgmt(scsi_qla_host_t *ha, void *iocb)
+{
+ int res = 0;
@@ -66965,11 +71065,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ lun_size = sizeof(a->fcp_cmnd.lun);
+ fn = a->fcp_cmnd.task_mgmt_flags;
+ sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
-+ if (sess != NULL) {
-+ sess->s_id.b.al_pa = a->fcp_hdr.s_id[2];
-+ sess->s_id.b.area = a->fcp_hdr.s_id[1];
-+ sess->s_id.b.domain = a->fcp_hdr.s_id[0];
-+ }
+ } else {
+ notify_entry_t *n = (notify_entry_t *)iocb;
+ /* make it be in network byte order */
@@ -66981,10 +71076,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ }
+
+ if (sess == NULL) {
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): task mgmt fn 0x%x for "
++ TRACE_MGMT_DBG("qla2x00t(%ld): task mgmt fn 0x%x for "
+ "non-existant session", ha->instance, fn);
-+ tgt->tm_to_unknown = 1;
-+ res = -ESRCH;
++ res = q2t_sched_sess_work(tgt, Q2T_SESS_WORK_TM, iocb,
++ IS_FWI2_CAPABLE(ha) ? sizeof(atio7_entry_t) :
++ sizeof(notify_entry_t));
++ if (res != 0)
++ tgt->tm_to_unknown = 1;
+ goto out;
+ }
+
@@ -66995,29 +71093,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return res;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
-+static int q2t_abort_task(scsi_qla_host_t *ha, notify_entry_t *iocb)
++/* pha->hardware_lock supposed to be held on entry */
++static int __q2t_abort_task(scsi_qla_host_t *ha, notify_entry_t *iocb,
++ struct q2t_sess *sess)
+{
-+ int res = 0, rc;
++ int res, rc;
+ struct q2t_mgmt_cmd *mcmd;
-+ struct q2t_sess *sess;
-+ int loop_id;
-+ uint32_t tag;
+
+ TRACE_ENTRY();
+
-+ loop_id = GET_TARGET_ID(ha, iocb);
-+ tag = le16_to_cpu(iocb->seq_id);
-+
-+ sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
-+ if (sess == NULL) {
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): task abort for unexisting "
-+ "session", ha->instance);
-+ ha->tgt->tm_to_unknown = 1;
-+ res = -EFAULT;
-+ goto out;
-+ }
-+
+ mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (mcmd == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s: Allocation of ABORT cmd failed",
@@ -67031,8 +71115,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ memcpy(&mcmd->orig_iocb.notify_entry, iocb,
+ sizeof(mcmd->orig_iocb.notify_entry));
+
-+ rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag,
-+ SCST_ATOMIC, mcmd);
++ rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK,
++ le16_to_cpu(iocb->seq_id), SCST_ATOMIC, mcmd);
+ if (rc != 0) {
+ PRINT_ERROR("qla2x00t(%ld): scst_rx_mgmt_fn_tag() failed: %d",
+ ha->instance, rc);
@@ -67040,6 +71124,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out_free;
+ }
+
++ res = 0;
++
+out:
+ TRACE_EXIT_RES(res);
+ return res;
@@ -67049,22 +71135,53 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out;
+}
+
++/* pha->hardware_lock supposed to be held on entry */
++static int q2t_abort_task(scsi_qla_host_t *ha, notify_entry_t *iocb)
++{
++ int res;
++ struct q2t_sess *sess;
++ int loop_id;
++
++ TRACE_ENTRY();
++
++ loop_id = GET_TARGET_ID(ha, iocb);
++
++ sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
++ if (sess == NULL) {
++ TRACE_MGMT_DBG("qla2x00t(%ld): task abort for unexisting "
++ "session", ha->instance);
++ res = q2t_sched_sess_work(sess->tgt, Q2T_SESS_WORK_ABORT, iocb,
++ sizeof(*iocb));
++ if (res != 0)
++ sess->tgt->tm_to_unknown = 1;
++ goto out;
++ }
++
++ res = __q2t_abort_task(ha, iocb, sess);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static int q24_handle_els(scsi_qla_host_t *ha, notify24xx_entry_t *iocb)
+{
-+ int res = 0;
++ int res = 1; /* send notify ack */
+
+ TRACE_ENTRY();
+
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): ELS opcode %x", ha->instance,
++ TRACE_MGMT_DBG("qla2x00t(%ld): ELS opcode %x", ha->instance,
+ iocb->status_subcode);
+
+ switch (iocb->status_subcode) {
+ case ELS_PLOGI:
+ case ELS_FLOGI:
+ case ELS_PRLI:
++ break;
++
+ case ELS_LOGO:
+ case ELS_PRLO:
+ res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
@@ -67078,14 +71195,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
+ tgt->link_reinit_iocb_pending = 0;
+ }
-+ res = 1; /* send notify ack */
+ break;
+ }
+
+ default:
+ PRINT_ERROR("qla2x00t(%ld): Unsupported ELS command %x "
+ "received", ha->instance, iocb->status_subcode);
++#if 0
+ res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
++#endif
+ break;
+ }
+
@@ -67242,6 +71360,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ notify24xx_entry_t *ntfy = &imm->imm.notify_entry24;
+ struct q2t_cmd *cmd = sctio->cmd;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ TRACE_ENTRY();
+
@@ -67249,10 +71368,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ switch (ntfy->srr_ui) {
+ case SRR_IU_STATUS:
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ q24_send_notify_ack(ha, ntfy,
+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ __q24_xmit_response(cmd, Q2T_XMIT_STATUS);
+ break;
+ case SRR_IU_DATA_IN:
@@ -67263,10 +71382,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ offset = le32_to_cpu(imm->imm.notify_entry24.srr_rel_offs);
+ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ q24_send_notify_ack(ha, ntfy,
+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ __q24_xmit_response(cmd, xmit_type);
+ } else {
+ PRINT_ERROR("qla2x00t(%ld): SRR for in data for cmd "
@@ -67285,10 +71404,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ offset = le32_to_cpu(imm->imm.notify_entry24.srr_rel_offs);
+ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ q24_send_notify_ack(ha, ntfy,
+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ if (xmit_type & Q2T_XMIT_DATA)
+ __q2t_rdy_to_xfer(cmd);
+ } else {
@@ -67310,7 +71429,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+
+out_reject:
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ q24_send_notify_ack(ha, ntfy, NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
@@ -67320,7 +71439,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ SCST_CONTEXT_THREAD);
+ } else
+ q24_send_term_exchange(ha, cmd, &cmd->atio.atio7, 1);
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ goto out;
+}
+
@@ -67330,6 +71449,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ notify_entry_t *ntfy = &imm->imm.notify_entry;
+ struct q2t_cmd *cmd = sctio->cmd;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ TRACE_ENTRY();
+
@@ -67337,10 +71457,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ switch (ntfy->srr_ui) {
+ case SRR_IU_STATUS:
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ q2x_send_notify_ack(ha, ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ __q2x_xmit_response(cmd, Q2T_XMIT_STATUS);
+ break;
+ case SRR_IU_DATA_IN:
@@ -67351,10 +71471,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ offset = le32_to_cpu(imm->imm.notify_entry.srr_rel_offs);
+ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ q2x_send_notify_ack(ha, ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ __q2x_xmit_response(cmd, xmit_type);
+ } else {
+ PRINT_ERROR("qla2x00t(%ld): SRR for in data for cmd "
@@ -67373,10 +71493,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ offset = le32_to_cpu(imm->imm.notify_entry.srr_rel_offs);
+ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ q2x_send_notify_ack(ha, ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ if (xmit_type & Q2T_XMIT_DATA)
+ __q2t_rdy_to_xfer(cmd);
+ } else {
@@ -67398,7 +71518,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+
+out_reject:
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ q2x_send_notify_ack(ha, ntfy, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
@@ -67408,15 +71528,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ SCST_CONTEXT_THREAD);
+ } else
+ q2x_send_term_exchange(ha, cmd, &cmd->atio.atio2x, 1);
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ goto out;
+}
+
+static void q2t_reject_free_srr_imm(scsi_qla_host_t *ha, struct srr_imm *imm,
+ int ha_locked)
+{
++ scsi_qla_host_t *pha = to_qla_parent(ha);
++
+ if (!ha_locked)
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+
+ if (IS_FWI2_CAPABLE(ha)) {
+ q24_send_notify_ack(ha, &imm->imm.notify_entry24,
@@ -67431,7 +71553,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ }
+
+ if (!ha_locked)
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+
+ kfree(imm);
+ return;
@@ -67513,7 +71635,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+static void q2t_prepare_srr_imm(scsi_qla_host_t *ha, void *iocb)
+{
+ struct srr_imm *imm;
@@ -67612,7 +71734,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q2t_handle_imm_notify(scsi_qla_host_t *ha, void *iocb)
+{
@@ -67643,8 +71765,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ /* set the Clear LIP reset event flag */
+ add_flags |= NOTIFY_ACK_CLEAR_LIP_RESET;
+ }
-+ if (q2t_reset(ha, iocb, Q2T_ABORT_ALL) == 0)
-+ send_notify_ack = 0;
++ /*
++ * No additional resets or aborts are needed, because firmware
++ * will as required by FCP either generate TARGET RESET or
++ * reject all affected commands with LIP_RESET status.
++ */
+ break;
+ }
+
@@ -67695,9 +71820,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ case IMM_NTFY_PORT_CONFIG:
+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Port config changed (%x)",
+ ha->instance, status);
-+ if (q2t_reset(ha, iocb, Q2T_ABORT_ALL) == 0)
-+ send_notify_ack = 0;
-+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_GLBL_LOGO:
@@ -67763,7 +71885,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q2x_send_busy(scsi_qla_host_t *ha, atio_entry_t *atio)
+{
@@ -67773,7 +71895,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ /* Sending marker isn't necessary, since we called from ISR */
+
-+ ctio = (ctio_ret_entry_t *)qla2x00_req_pkt(ha);
++ ctio = (ctio_ret_entry_t *)q2t_req_pkt(ha);
+ if (ctio == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
+ "request packet", ha->instance, __func__);
@@ -67810,25 +71932,33 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q24_send_busy(scsi_qla_host_t *ha, atio7_entry_t *atio,
+ uint16_t status)
+{
+ ctio7_status1_entry_t *ctio;
+ struct q2t_sess *sess;
++ uint16_t loop_id;
+
+ TRACE_ENTRY();
+
-+ sess = q2t_find_sess_by_s_id(ha->tgt, atio->fcp_hdr.s_id);
-+ if (sess == NULL) {
-+ q24_send_term_exchange(ha, NULL, atio, 1);
-+ goto out;
-+ }
++ /*
++ * In some cases, for instance for ATIO_EXCHANGE_ADDRESS_UNKNOWN, the
++ * spec requires to issue queue full SCSI status. So, let's search among
++ * being deleted sessions as well and use CTIO7_NHANDLE_UNRECOGNIZED,
++ * if we can't find sess.
++ */
++ sess = q2t_find_sess_by_s_id_include_deleted(ha->tgt,
++ atio->fcp_hdr.s_id);
++ if (sess != NULL)
++ loop_id = sess->loop_id;
++ else
++ loop_id = CTIO7_NHANDLE_UNRECOGNIZED;
+
+ /* Sending marker isn't necessary, since we called from ISR */
+
-+ ctio = (ctio7_status1_entry_t *)qla2x00_req_pkt(ha);
++ ctio = (ctio7_status1_entry_t *)q2t_req_pkt(ha);
+ if (ctio == NULL) {
+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
+ "request packet", ha->instance, __func__);
@@ -67838,8 +71968,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ ctio->common.entry_type = CTIO_TYPE7;
+ ctio->common.entry_count = 1;
+ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
-+ ctio->common.nport_handle = sess->loop_id;
++ ctio->common.nport_handle = loop_id;
+ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
++ ctio->common.vp_index = ha->vp_idx;
+ ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
+ ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
+ ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
@@ -67853,7 +71984,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ */
+ ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
+ ctio->scsi_status = cpu_to_le16(status);
-+ ctio->residual = atio->fcp_cmnd.data_length;
++ ctio->residual = get_unaligned((uint32_t *)
++ &atio->fcp_cmnd.add_cdb[atio->fcp_cmnd.add_cdb_len]);
+ if (ctio->residual != 0)
+ ctio->scsi_status |= SS_RESIDUAL_UNDER;
+
@@ -67866,7 +71998,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void q24_atio_pkt(scsi_qla_host_t *ha, atio7_entry_t *atio)
+{
@@ -67892,19 +72024,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ switch (atio->entry_type) {
+ case ATIO_TYPE7:
-+ if (unlikely(atio->entry_count > 1) ||
-+ unlikely(atio->fcp_cmnd.add_cdb_len != 0)) {
-+ PRINT_ERROR("qla2x00t(%ld): Multi entry ATIO7 IOCBs "
-+ "(%d), ie with CDBs>16 bytes (%d), are not "
-+ "supported", ha->instance, atio->entry_count,
-+ atio->fcp_cmnd.add_cdb_len);
-+ break;
-+ }
+ TRACE_DBG("ATIO_TYPE7 instance %ld, lun %Lx, read/write %d/%d, "
-+ "data_length %04x, s_id %x:%x:%x", ha->instance,
-+ atio->fcp_cmnd.lun, atio->fcp_cmnd.rddata,
-+ atio->fcp_cmnd.wrdata,
-+ be32_to_cpu(atio->fcp_cmnd.data_length),
++ "add_cdb_len %d, data_length %04x, s_id %x:%x:%x",
++ ha->instance, atio->fcp_cmnd.lun, atio->fcp_cmnd.rddata,
++ atio->fcp_cmnd.wrdata, atio->fcp_cmnd.add_cdb_len,
++ be32_to_cpu(get_unaligned((uint32_t *)
++ &atio->fcp_cmnd.add_cdb[atio->fcp_cmnd.add_cdb_len])),
+ atio->fcp_hdr.s_id[0], atio->fcp_hdr.s_id[1],
+ atio->fcp_hdr.s_id[2]);
+ TRACE_BUFFER("Incoming ATIO7 packet data", atio,
@@ -67966,7 +72091,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+}
+
-+/* ha->hardware_lock supposed to be held on entry */
++/* pha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void q2t_response_pkt(scsi_qla_host_t *ha, response_t *pkt)
+{
@@ -68215,7 +72340,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+}
+
+/*
-+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
++ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
+ */
+static void q2t_async_event(uint16_t code, scsi_qla_host_t *ha,
+ uint16_t *mailbox)
@@ -68247,40 +72372,38 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ break;
+
+ case MBA_LOOP_UP:
-+ {
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Async LOOP_UP occured "
-+ "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", ha->instance,
-+ le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-+ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Loop up occured",
++ ha->instance);
+ if (tgt->link_reinit_iocb_pending) {
+ q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
+ tgt->link_reinit_iocb_pending = 0;
+ }
+ break;
-+ }
+
+ case MBA_LIP_OCCURRED:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): LIP occured", ha->instance);
++ break;
++
+ case MBA_LOOP_DOWN:
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): Loop down occured",
++ ha->instance);
++ break;
++
+ case MBA_LIP_RESET:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Async event %#x occured "
-+ "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", ha->instance,
-+ code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-+ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
++ TRACE(TRACE_MGMT, "qla2x00t(%ld): LIP reset occured",
++ ha->instance);
+ break;
+
+ case MBA_PORT_UPDATE:
+ case MBA_RSCN_UPDATE:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Port update async event %#x "
-+ "occured: updating the ports database (m[1]=%x, m[2]=%x, "
-+ "m[3]=%x, m[4]=%x)", ha->instance, code,
-+ le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-+ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
++ TRACE_MGMT_DBG("qla2x00t(%ld): Port update async event %#x "
++ "occured", ha->instance, code);
+ /* .mark_all_devices_lost() is handled by the initiator driver */
+ break;
+
+ default:
+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Async event %#x occured: "
-+ "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)",
++ "ignoring (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)",
+ ha->instance, code,
+ le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
@@ -68294,7 +72417,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return;
+}
+
-+static int q2t_get_target_name(scsi_qla_host_t *ha, char **wwn)
++static int q2t_get_target_name(uint8_t *wwn, char **ppwwn_name)
+{
+ const int wwn_len = 3*WWN_SIZE+2;
+ int res = 0;
@@ -68302,108 +72425,57 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ name = kmalloc(wwn_len, GFP_KERNEL);
+ if (name == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "qla2x00t: Allocation of tgt "
-+ "name failed");
++ PRINT_ERROR("qla2x00t: Allocation of tgt wwn name (size %d) "
++ "failed", wwn_len);
+ res = -ENOMEM;
+ goto out;
+ }
+
+ sprintf(name, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-+ ha->port_name[0], ha->port_name[1],
-+ ha->port_name[2], ha->port_name[3],
-+ ha->port_name[4], ha->port_name[5],
-+ ha->port_name[6], ha->port_name[7]);
-+
-+ *wwn = name;
-+
-+out:
-+ return res;
-+}
-+
-+static int q24_get_loop_id(scsi_qla_host_t *ha, atio7_entry_t *atio7,
-+ uint16_t *loop_id)
-+{
-+ dma_addr_t gid_list_dma;
-+ struct gid_list_info *gid_list;
-+ char *id_iter;
-+ int res, rc, i;
-+ uint16_t entries;
-+
-+ TRACE_ENTRY();
-+
-+ gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
-+ &gid_list_dma, GFP_KERNEL);
-+ if (gid_list == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): DMA Alloc failed of %zd",
-+ ha->instance, GID_LIST_SIZE);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ /* Get list of logged in devices */
-+ rc = qla2x00_get_id_list(ha, gid_list, gid_list_dma, &entries);
-+ if (rc != QLA_SUCCESS) {
-+ PRINT_ERROR("qla2x00t(%ld): get_id_list() failed: %x",
-+ ha->instance, rc);
-+ res = -1;
-+ goto out_free_id_list;
-+ }
-+
-+ id_iter = (char *)gid_list;
-+ res = -1;
-+ for (i = 0; i < entries; i++) {
-+ struct gid_list_info *gid = (struct gid_list_info *)id_iter;
-+ if ((gid->al_pa == atio7->fcp_hdr.s_id[2]) &&
-+ (gid->area == atio7->fcp_hdr.s_id[1]) &&
-+ (gid->domain == atio7->fcp_hdr.s_id[0])) {
-+ *loop_id = le16_to_cpu(gid->loop_id);
-+ res = 0;
-+ break;
-+ }
-+ id_iter += ha->gid_list_info_size;
-+ }
-+
-+ if (res != 0) {
-+ if ((atio7->fcp_hdr.s_id[0] == 0xFF) &&
-+ (atio7->fcp_hdr.s_id[1] == 0xFC)) {
-+ /*
-+ * This is Domain Controller. It should be OK to drop
-+ * SCSI commands from it.
-+ */
-+ TRACE_MGMT_DBG("Unable to find initiator with S_ID "
-+ "%x:%x:%x", atio7->fcp_hdr.s_id[0],
-+ atio7->fcp_hdr.s_id[1], atio7->fcp_hdr.s_id[2]);
-+ } else
-+ PRINT_ERROR("qla2x00t(%ld): Unable to find initiator with "
-+ "S_ID %x:%x:%x", ha->instance,
-+ atio7->fcp_hdr.s_id[0], atio7->fcp_hdr.s_id[1],
-+ atio7->fcp_hdr.s_id[2]);
-+ }
++ wwn[0], wwn[1], wwn[2], wwn[3],
++ wwn[4], wwn[5], wwn[6], wwn[7]);
+
-+out_free_id_list:
-+ dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, gid_list, gid_list_dma);
++ *ppwwn_name = name;
+
+out:
-+ TRACE_EXIT_RES(res);
+ return res;
+}
+
+/* Must be called under tgt_mutex */
-+static struct q2t_sess *q2t_make_local_sess(scsi_qla_host_t *ha, atio_t *atio)
++static struct q2t_sess *q2t_make_local_sess(scsi_qla_host_t *ha,
++ const uint8_t *s_id, uint16_t loop_id)
+{
+ struct q2t_sess *sess = NULL;
+ fc_port_t *fcport = NULL;
-+ uint16_t loop_id = 0xFFFF; /* to remove warning */
-+ int rc;
++ int rc, global_resets;
+
+ TRACE_ENTRY();
+
++retry:
++ global_resets = atomic_read(&ha->tgt->tgt_global_resets_count);
++
+ if (IS_FWI2_CAPABLE(ha)) {
-+ rc = q24_get_loop_id(ha, (atio7_entry_t *)atio, &loop_id);
-+ if (rc != 0)
++ BUG_ON(s_id == NULL);
++
++ rc = q24_get_loop_id(ha, s_id, &loop_id);
++ if (rc != 0) {
++ if ((s_id[0] == 0xFF) &&
++ (s_id[1] == 0xFC)) {
++ /*
++ * This is Domain Controller, so it should be
++ * OK to drop SCSI commands from it.
++ */
++ TRACE_MGMT_DBG("Unable to find initiator with "
++ "S_ID %x:%x:%x", s_id[0], s_id[1],
++ s_id[2]);
++ } else
++ PRINT_ERROR("qla2x00t(%ld): Unable to find "
++ "initiator with S_ID %x:%x:%x",
++ ha->instance, s_id[0], s_id[1],
++ s_id[2]);
+ goto out;
-+ } else
-+ loop_id = GET_TARGET_ID(ha, (atio_entry_t *)atio);
++ }
++ }
+
+ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ if (fcport == NULL) {
@@ -68424,6 +72496,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ goto out_free_fcport;
+ }
+
++ if (global_resets != atomic_read(&ha->tgt->tgt_global_resets_count)) {
++ TRACE_MGMT_DBG("qla2x00t(%ld): global reset during session "
++ "discovery (counter was %d, new %d), retrying",
++ ha->instance, global_resets,
++ atomic_read(&ha->tgt->tgt_global_resets_count));
++ kfree(fcport);
++ fcport = NULL;
++ goto retry;
++ }
++
+ sess = q2t_create_sess(ha, fcport, true);
+
+out_free_fcport:
@@ -68434,32 +72516,70 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return sess;
+}
+
-+static int q2t_exec_sess_work(struct q2t_tgt *tgt,
++static void q2t_exec_sess_work(struct q2t_tgt *tgt,
+ struct q2t_sess_work_param *prm)
+{
+ scsi_qla_host_t *ha = tgt->ha;
-+ int res = 0;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
++ int rc;
+ struct q2t_sess *sess = NULL;
-+ struct q2t_cmd *cmd = prm->cmd;
-+ atio_t *atio = (atio_t *)&cmd->atio;
++ uint8_t *s_id = NULL; /* to hide compiler warnings */
++ uint8_t local_s_id[3];
++ int loop_id = -1; /* to hide compiler warnings */
+
+ TRACE_ENTRY();
+
-+ TRACE_MGMT_DBG("cmd %p", cmd);
++ TRACE_MGMT_DBG("prm %p", prm);
+
+ mutex_lock(&ha->tgt_mutex);
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+
+ if (tgt->tgt_stop)
+ goto send;
+
++ switch (prm->type) {
++ case Q2T_SESS_WORK_CMD:
++ {
++ struct q2t_cmd *cmd = prm->cmd;
++ if (IS_FWI2_CAPABLE(ha)) {
++ atio7_entry_t *a = (atio7_entry_t *)&cmd->atio;
++ s_id = a->fcp_hdr.s_id;
++ } else
++ loop_id = GET_TARGET_ID(ha, (atio_entry_t *)&cmd->atio);
++ break;
++ }
++ case Q2T_SESS_WORK_ABORT:
++ if (IS_FWI2_CAPABLE(ha)) {
++ sess = q2t_find_sess_by_s_id_le(tgt,
++ prm->abts.fcp_hdr_le.s_id);
++ if (sess == NULL) {
++ s_id = local_s_id;
++ s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
++ s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
++ s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
++ }
++ goto after_find;
++ } else
++ loop_id = GET_TARGET_ID(ha, &prm->tm_iocb);
++ break;
++ case Q2T_SESS_WORK_TM:
++ if (IS_FWI2_CAPABLE(ha))
++ s_id = prm->tm_iocb2.fcp_hdr.s_id;
++ else
++ loop_id = GET_TARGET_ID(ha, &prm->tm_iocb);
++ break;
++ default:
++ BUG_ON(1);
++ break;
++ }
++
+ if (IS_FWI2_CAPABLE(ha)) {
-+ atio7_entry_t *a = (atio7_entry_t *)atio;
-+ sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
++ BUG_ON(s_id == NULL);
++ sess = q2t_find_sess_by_s_id(tgt, s_id);
+ } else
-+ sess = q2t_find_sess_by_loop_id(tgt,
-+ GET_TARGET_ID(ha, (atio_entry_t *)atio));
++ sess = q2t_find_sess_by_loop_id(tgt, loop_id);
+
++after_find:
+ if (sess != NULL) {
+ TRACE_MGMT_DBG("sess %p found", sess);
+ q2t_sess_get(sess);
@@ -68468,43 +72588,124 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ * We are under tgt_mutex, so a new sess can't be added
+ * behind us.
+ */
-+ spin_unlock_irq(&ha->hardware_lock);
-+ sess = q2t_make_local_sess(ha, atio);
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
++ sess = q2t_make_local_sess(ha, s_id, loop_id);
++ spin_lock_irq(&pha->hardware_lock);
+ /* sess has got an extra creation ref */
+ }
+
+send:
-+ if (!tgt->tm_to_unknown && !tgt->tgt_stop && (sess != NULL)) {
++ if ((sess == NULL) || tgt->tgt_stop)
++ goto out_term;
++
++ switch (prm->type) {
++ case Q2T_SESS_WORK_CMD:
++ {
++ struct q2t_cmd *cmd = prm->cmd;
++ if (tgt->tm_to_unknown) {
++ /*
++ * Cmd might be already aborted behind us, so be safe
++ * and abort it. It should be OK, initiator will retry
++ * it.
++ */
++ goto out_term;
++ }
+ TRACE_MGMT_DBG("Sending work cmd %p to SCST", cmd);
-+ res = q2t_do_send_cmd_to_scst(ha, cmd, sess);
-+ } else {
-+ /*
-+ * Cmd might be already aborted behind us, so be safe and
-+ * abort it. It should be OK, initiator will retry it. It has
-+ * not sent to SCST yet, so pass NULL as the second argument.
-+ */
-+ TRACE_MGMT_DBG("Terminating work cmd %p", cmd);
++ rc = q2t_do_send_cmd_to_scst(ha, cmd, sess);
++ break;
++ }
++ case Q2T_SESS_WORK_ABORT:
+ if (IS_FWI2_CAPABLE(ha))
-+ q24_send_term_exchange(ha, NULL , &cmd->atio.atio7, 1);
++ rc = __q24_handle_abts(ha, &prm->abts, sess);
+ else
-+ q2x_send_term_exchange(ha, NULL, &cmd->atio.atio2x, 1);
-+ q2t_free_cmd(cmd);
++ rc = __q2t_abort_task(ha, &prm->tm_iocb, sess);
++ break;
++ case Q2T_SESS_WORK_TM:
++ {
++ uint8_t *lun;
++ uint16_t lun_data;
++ int lun_size, fn;
++ void *iocb;
++
++ if (IS_FWI2_CAPABLE(ha)) {
++ atio7_entry_t *a = &prm->tm_iocb2;
++ iocb = a;
++ lun = (uint8_t *)&a->fcp_cmnd.lun;
++ lun_size = sizeof(a->fcp_cmnd.lun);
++ fn = a->fcp_cmnd.task_mgmt_flags;
++ } else {
++ notify_entry_t *n = &prm->tm_iocb;
++ iocb = n;
++ /* make it be in network byte order */
++ lun_data = swab16(le16_to_cpu(n->lun));
++ lun = (uint8_t *)&lun_data;
++ lun_size = sizeof(lun_data);
++ fn = n->task_flags >> IMM_NTFY_TASK_MGMT_SHIFT;
++ }
++ rc = q2t_issue_task_mgmt(sess, lun, lun_size, fn, iocb, 0);
++ break;
++ }
++ default:
++ BUG_ON(1);
++ break;
+ }
+
++ if (rc != 0)
++ goto out_term;
++
++out_put:
+ if (sess != NULL)
+ q2t_sess_put(sess);
+
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ mutex_unlock(&ha->tgt_mutex);
+
-+ TRACE_EXIT_RES(res);
-+ return res;
++ TRACE_EXIT();
++ return;
++
++out_term:
++ switch (prm->type) {
++ case Q2T_SESS_WORK_CMD:
++ {
++ struct q2t_cmd *cmd = prm->cmd;
++ TRACE_MGMT_DBG("Terminating work cmd %p", cmd);
++ /*
++ * cmd has not sent to SCST yet, so pass NULL as the second
++ * argument
++ */
++ if (IS_FWI2_CAPABLE(ha))
++ q24_send_term_exchange(ha, NULL, &cmd->atio.atio7, 1);
++ else
++ q2x_send_term_exchange(ha, NULL, &cmd->atio.atio2x, 1);
++ q2t_free_cmd(cmd);
++ break;
++ }
++ case Q2T_SESS_WORK_ABORT:
++ if (IS_FWI2_CAPABLE(ha))
++ q24_send_abts_resp(ha, &prm->abts,
++ SCST_MGMT_STATUS_REJECTED, false);
++ else
++ q2x_send_notify_ack(ha, &prm->tm_iocb, 0,
++ 0, 0, 0, 0, 0);
++ break;
++ case Q2T_SESS_WORK_TM:
++ if (IS_FWI2_CAPABLE(ha))
++ q24_send_term_exchange(ha, NULL, &prm->tm_iocb2, 1);
++ else
++ q2x_send_notify_ack(ha, &prm->tm_iocb, 0,
++ 0, 0, 0, 0, 0);
++ break;
++ default:
++ BUG_ON(1);
++ break;
++ }
++ goto out_put;
+}
+
+static void q2t_sess_work_fn(struct work_struct *work)
+{
+ struct q2t_tgt *tgt = container_of(work, struct q2t_tgt, sess_work);
++ scsi_qla_host_t *pha = to_qla_parent(tgt->ha);
+
+ TRACE_ENTRY();
+
@@ -68512,7 +72713,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ spin_lock_irq(&tgt->sess_work_lock);
+ while (!list_empty(&tgt->sess_works_list)) {
-+ int rc;
+ struct q2t_sess_work_param *prm = list_entry(
+ tgt->sess_works_list.next, typeof(*prm),
+ sess_works_list_entry);
@@ -68525,34 +72725,28 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ spin_unlock_irq(&tgt->sess_work_lock);
+
-+ rc = q2t_exec_sess_work(tgt, prm);
++ q2t_exec_sess_work(tgt, prm);
+
+ spin_lock_irq(&tgt->sess_work_lock);
+
-+ if (rc != 0) {
-+ TRACE_MGMT_DBG("Unable to complete sess work (tgt %p), "
-+ "freeing cmd %p", tgt, prm->cmd);
-+ q2t_free_cmd(prm->cmd);
-+ }
-+
+ kfree(prm);
+ }
+ spin_unlock_irq(&tgt->sess_work_lock);
+
-+ spin_lock_irq(&tgt->ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ spin_lock(&tgt->sess_work_lock);
+ if (list_empty(&tgt->sess_works_list)) {
+ tgt->sess_works_pending = 0;
+ tgt->tm_to_unknown = 0;
+ }
+ spin_unlock(&tgt->sess_work_lock);
-+ spin_unlock_irq(&tgt->ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+
+ TRACE_EXIT();
+ return;
+}
+
-+/* ha->hardware_lock supposed to be held and IRQs off */
++/* pha->hardware_lock supposed to be held and IRQs off */
+static void q2t_cleanup_hw_pending_cmd(scsi_qla_host_t *ha, struct q2t_cmd *cmd)
+{
+ uint32_t h;
@@ -68572,6 +72766,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ struct q2t_cmd *cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
+ struct q2t_tgt *tgt = cmd->tgt;
+ scsi_qla_host_t *ha = tgt->ha;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+ unsigned long flags;
+
+ TRACE_ENTRY();
@@ -68579,7 +72774,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ TRACE_MGMT_DBG("Cmd %p HW pending for too long (state %x)", cmd,
+ cmd->state);
+
-+ spin_lock_irqsave(&ha->hardware_lock, flags);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
+
+ if (cmd->sg_mapped)
+ q2t_unmap_sg(ha, cmd);
@@ -68609,7 +72804,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_THREAD);
+
+out_unlock:
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ TRACE_EXIT();
+ return;
+}
@@ -68617,7 +72812,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+/* Must be called under tgt_host_action_mutex */
+static int q2t_add_target(scsi_qla_host_t *ha)
+{
-+ int res, rc;
++ int res;
++ int rc;
+ char *wwn;
+ int sg_tablesize;
+ struct q2t_tgt *tgt;
@@ -68630,8 +72826,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+
+ tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
+ if (tgt == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "qla2x00t: %s", "Allocation of tgt "
-+ "failed");
++ PRINT_ERROR("qla2x00t: %s", "Allocation of tgt failed");
+ res = -ENOMEM;
+ goto out;
+ }
@@ -68640,9 +72835,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ init_waitqueue_head(&tgt->waitQ);
+ INIT_LIST_HEAD(&tgt->sess_list);
+ INIT_LIST_HEAD(&tgt->del_sess_list);
-+ init_timer(&tgt->sess_del_timer);
-+ tgt->sess_del_timer.data = (unsigned long)tgt;
-+ tgt->sess_del_timer.function = q2t_del_sess_timer_fn;
++ INIT_DELAYED_WORK(&tgt->sess_del_work,
++ (void (*)(struct work_struct *))q2t_del_sess_work_fn);
+ spin_lock_init(&tgt->sess_work_lock);
+ INIT_WORK(&tgt->sess_work, q2t_sess_work_fn);
+ INIT_LIST_HEAD(&tgt->sess_works_list);
@@ -68650,10 +72844,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ INIT_LIST_HEAD(&tgt->srr_ctio_list);
+ INIT_LIST_HEAD(&tgt->srr_imm_list);
+ INIT_WORK(&tgt->srr_work, q2t_handle_srr_work);
++ atomic_set(&tgt->tgt_global_resets_count, 0);
+
+ ha->q2t_tgt = tgt;
+
-+ if (q2t_get_target_name(ha, &wwn) != 0)
++ res = q2t_get_target_name(ha->port_name, &wwn);
++ if (res != 0)
+ goto out_free;
+
+ tgt->scst_tgt = scst_register_target(&tgt2x_template, wwn);
@@ -68703,6 +72899,35 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ PRINT_ERROR("qla2x00t(%ld): Unable to create \"host\" link for "
+ "target %s", ha->instance,
+ scst_get_tgt_name(tgt->scst_tgt));
++ if (!ha->parent) {
++ rc = sysfs_create_file(scst_sysfs_get_tgt_kobj(tgt->scst_tgt),
++ &q2t_hw_target_attr.attr);
++ if (rc != 0)
++ PRINT_ERROR("qla2x00t(%ld): Unable to create "
++ "\"hw_target\" file for target %s",
++ ha->instance, scst_get_tgt_name(tgt->scst_tgt));
++
++ rc = sysfs_create_file(scst_sysfs_get_tgt_kobj(tgt->scst_tgt),
++ &q2t_hw_node_name_attr.attr);
++ if (rc != 0)
++ PRINT_ERROR("qla2x00t(%ld): Unable to create "
++ "\"node_name\" file for HW target %s",
++ ha->instance, scst_get_tgt_name(tgt->scst_tgt));
++ } else {
++ rc = sysfs_create_file(scst_sysfs_get_tgt_kobj(tgt->scst_tgt),
++ &q2t_vp_node_name_attr.attr);
++ if (rc != 0)
++ PRINT_ERROR("qla2x00t(%ld): Unable to create "
++ "\"node_name\" file for NPIV target %s",
++ ha->instance, scst_get_tgt_name(tgt->scst_tgt));
++
++ rc = sysfs_create_file(scst_sysfs_get_tgt_kobj(tgt->scst_tgt),
++ &q2t_vp_parent_host_attr.attr);
++ if (rc != 0)
++ PRINT_ERROR("qla2x00t(%ld): Unable to create "
++ "\"parent_host\" file for NPIV target %s",
++ ha->instance, scst_get_tgt_name(tgt->scst_tgt));
++ }
+
+ scst_tgt_set_sg_tablesize(tgt->scst_tgt, sg_tablesize);
+ scst_tgt_set_tgt_priv(tgt->scst_tgt, tgt);
@@ -68728,7 +72953,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ }
+
+ TRACE_DBG("Unregistering target for host %ld(%p)", ha->host_no, ha);
-+ scst_unregister_target(ha->tgt->scst_tgt);
++ scst_unregister_target(ha->q2t_tgt->scst_tgt);
+ /*
+ * Free of tgt happens via callback q2t_target_release
+ * called from scst_unregister_target, so we shouldn't touch
@@ -68743,6 +72968,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ qla2x_tgt_host_action_t action)
+{
+ int res = 0;
++ scsi_qla_host_t *pha = to_qla_parent(ha);
+
+ TRACE_ENTRY();
+
@@ -68780,10 +73006,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ PRINT_INFO("qla2x00t(%ld): Enabling target mode",
+ ha->instance);
+
-+ spin_lock_irq(&ha->hardware_lock);
++ spin_lock_irq(&pha->hardware_lock);
+ ha->tgt = ha->q2t_tgt;
+ ha->tgt->tgt_stop = 0;
-+ spin_unlock_irq(&ha->hardware_lock);
++ spin_unlock_irq(&pha->hardware_lock);
+ list_for_each_entry_rcu(fcport, &ha->fcports, list) {
+ q2t_fc_port_added(ha, fcport);
+ }
@@ -68845,8 +73071,156 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return qla_tgt_mode_enabled(ha);
+}
+
-+static int q2t_get_initiator_port_transport_id(struct scst_session *scst_sess,
-+ uint8_t **transport_id)
++static int q2t_parse_wwn(const char *ns, u64 *nm)
++{
++ unsigned int i, j;
++ u8 wwn[8];
++
++ /* validate we have enough characters for WWPN */
++ if (strnlen(ns, 23) != 23)
++ return -EINVAL;
++
++ memset(wwn, 0, sizeof(wwn));
++
++ /* Validate and store the new name */
++ for (i = 0, j = 0; i < 16; i++) {
++ if ((*ns >= 'a') && (*ns <= 'f'))
++ j = ((j << 4) | ((*ns++ - 'a') + 10));
++ else if ((*ns >= 'A') && (*ns <= 'F'))
++ j = ((j << 4) | ((*ns++ - 'A') + 10));
++ else if ((*ns >= '0') && (*ns <= '9'))
++ j = ((j << 4) | (*ns++ - '0'));
++ else
++ return -EINVAL;
++ if (i % 2) {
++ wwn[i/2] = j & 0xff;
++ j = 0;
++ if ((i < 15) && (':' != *ns++))
++ return -EINVAL;
++ }
++ }
++
++ *nm = wwn_to_u64(wwn);
++
++ return 0;
++}
++
++static ssize_t q2t_add_vtarget(const char *target_name, char *params)
++{
++ int res;
++ char *param, *p, *pp;
++ u64 port_name, node_name, *pnode_name = NULL;
++ u64 parent_host, *pparent_host = NULL;
++
++ TRACE_ENTRY();
++
++ res = q2t_parse_wwn(target_name, &port_name);
++ if (res) {
++ PRINT_ERROR("qla2x00t: Syntax error at target name %s",
++ target_name);
++ goto out;
++ }
++
++ while (1) {
++ param = scst_get_next_token_str(&params);
++ if (param == NULL)
++ break;
++
++ p = scst_get_next_lexem(&param);
++ if (*p == '\0') {
++ PRINT_ERROR("qla2x00t: Syntax error at %s (target %s)",
++ param, target_name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ pp = scst_get_next_lexem(&param);
++ if (*pp == '\0') {
++ PRINT_ERROR("qla2x00t: Parameter %s value missed for "
++ "target %s", p, target_name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (scst_get_next_lexem(&param)[0] != '\0') {
++ PRINT_ERROR("qla2x00t: Too many parameter's %s values "
++ "(target %s)", p, target_name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (!strcasecmp("node_name", p)) {
++ res = q2t_parse_wwn(pp, &node_name);
++ if (res) {
++ PRINT_ERROR("qla2x00t: Illegal node_name %s "
++ "(target %s)", pp, target_name);
++ res = -EINVAL;
++ goto out;
++ }
++ pnode_name = &node_name;
++ continue;
++ }
++
++ if (!strcasecmp("parent_host", p)) {
++ res = q2t_parse_wwn(pp, &parent_host);
++ if (res != 0) {
++ PRINT_ERROR("qla2x00t: Illegal parent_host %s"
++ " (target %s)", pp, target_name);
++ goto out;
++ }
++ pparent_host = &parent_host;
++ continue;
++ }
++
++ PRINT_ERROR("qla2x00t: Unknown parameter %s (target %s)", p,
++ target_name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (!pnode_name) {
++ PRINT_ERROR("qla2x00t: Missing parameter node_name (target %s)",
++ target_name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ if (!pparent_host) {
++ PRINT_ERROR("qla2x00t: Missing parameter parent_host "
++ "(target %s)", target_name);
++ res = -EINVAL;
++ goto out;
++ }
++
++ res = qla2xxx_add_vtarget(&port_name, pnode_name, pparent_host);
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static ssize_t q2t_del_vtarget(const char *target_name)
++{
++ int res;
++ u64 port_name;
++
++ TRACE_ENTRY();
++
++ res = q2t_parse_wwn(target_name, &port_name);
++ if (res) {
++ PRINT_ERROR("qla2x00t: Syntax error at target name %s",
++ target_name);
++ goto out;
++ }
++
++ res = qla2xxx_del_vtarget(&port_name);
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++}
++
++static int q2t_get_initiator_port_transport_id(struct scst_tgt *tgt,
++ struct scst_session *scst_sess, uint8_t **transport_id)
+{
+ struct q2t_sess *sess;
+ int res = 0;
@@ -68909,14 +73283,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+{
+ struct scst_tgt *scst_tgt;
+ struct q2t_tgt *tgt;
-+ scsi_qla_host_t *ha;
++ scsi_qla_host_t *ha, *pha;
+ unsigned long flags;
+
+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
+ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
+ ha = tgt->ha;
++ pha = to_qla_parent(ha);
+
-+ spin_lock_irqsave(&ha->hardware_lock, flags);
++ spin_lock_irqsave(&pha->hardware_lock, flags);
+
+ switch (buffer[0]) {
+ case '0':
@@ -68935,7 +73310,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ break;
+ }
+
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+
+ return size;
+}
@@ -68984,6 +73359,126 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+ return strlen(buf);
+}
+
++static ssize_t q2t_hw_target_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", 1);
++}
++
++static ssize_t q2t_node_name_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_tgt *scst_tgt;
++ struct q2t_tgt *tgt;
++ scsi_qla_host_t *ha;
++ ssize_t res;
++ char *wwn;
++ uint8_t *node_name;
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ ha = tgt->ha;
++
++ if (ha->parent == NULL) {
++ if (qla_tgt_mode_enabled(ha) || !ha->node_name_set)
++ node_name = ha->node_name;
++ else
++ node_name = ha->tgt_node_name;
++ } else
++ node_name = ha->node_name;
++
++ res = q2t_get_target_name(node_name, &wwn);
++ if (res != 0)
++ goto out;
++
++ res = sprintf(buf, "%s\n", wwn);
++ if ((ha->parent != NULL) || ha->node_name_set)
++ res += sprintf(&buf[res], "%s\n", SCST_SYSFS_KEY_MARK);
++
++ kfree(wwn);
++
++out:
++ return res;
++}
++
++static ssize_t q2t_node_name_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buffer, size_t size)
++{
++ struct scst_tgt *scst_tgt;
++ struct q2t_tgt *tgt;
++ scsi_qla_host_t *ha;
++ u64 node_name, old_node_name;
++ int res;
++
++ TRACE_ENTRY();
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ ha = tgt->ha;
++
++ BUG_ON(ha->parent != NULL);
++
++ if (size == 0)
++ goto out_default;
++
++ res = q2t_parse_wwn(buffer, &node_name);
++ if (res != 0) {
++ if ((buffer[0] == '\0') || (buffer[0] == '\n'))
++ goto out_default;
++ PRINT_ERROR("qla2x00t(%ld): Wrong node name", ha->instance);
++ goto out;
++ }
++
++ old_node_name = wwn_to_u64(ha->node_name);
++ if (old_node_name == node_name)
++ goto out_success;
++
++ u64_to_wwn(node_name, ha->tgt_node_name);
++ ha->node_name_set = 1;
++
++abort:
++ if (qla_tgt_mode_enabled(ha)) {
++ set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
++ qla2x00_wait_for_hba_online(ha);
++ }
++
++out_success:
++ res = size;
++
++out:
++ TRACE_EXIT_RES(res);
++ return res;
++
++out_default:
++ ha->node_name_set = 0;
++ goto abort;
++}
++
++static ssize_t q2t_vp_parent_host_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_tgt *scst_tgt;
++ struct q2t_tgt *tgt;
++ scsi_qla_host_t *ha;
++ ssize_t res;
++ char *wwn;
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ ha = to_qla_parent(tgt->ha);
++
++ res = q2t_get_target_name(ha->port_name, &wwn);
++ if (res != 0)
++ goto out;
++
++ res = sprintf(buf, "%s\n%s\n", wwn, SCST_SYSFS_KEY_MARK);
++
++ kfree(wwn);
++
++out:
++ return res;
++}
++
+static uint16_t q2t_get_scsi_transport_version(struct scst_tgt *scst_tgt)
+{
+ /* FCP-2 */
@@ -69089,17 +73584,18 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.c linux-2.6.36
+MODULE_DESCRIPTION("Target mode addon for qla2[2,3,4,5+]xx");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(Q2T_VERSION_STRING);
-diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h
---- orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h
-+++ linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h
-@@ -0,0 +1,273 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/qla2xxx-target/qla2x00t.h linux-2.6.39/drivers/scst/qla2xxx-target/qla2x00t.h
+--- orig/linux-2.6.39/drivers/scst/qla2xxx-target/qla2x00t.h
++++ linux-2.6.39/drivers/scst/qla2xxx-target/qla2x00t.h
+@@ -0,0 +1,287 @@
+/*
+ * qla2x00t.h
+ *
-+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2006 - 2010 ID7 Ltd.
++ * Copyright (C) 2010 - 2011 SCST Ltd.
+ *
+ * QLogic 22xx/23xx/24xx/25xx FC target driver.
+ *
@@ -69125,8 +73621,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h linux-2.6.36
+
+/* Version numbers, the same as for the kernel */
+#define Q2T_VERSION(a, b, c, d) (((a) << 030) + ((b) << 020) + (c) << 010 + (d))
-+#define Q2T_VERSION_CODE Q2T_VERSION(1, 0, 2, 0)
-+#define Q2T_VERSION_STRING "2.0.0"
++#define Q2T_VERSION_CODE Q2T_VERSION(2, 1, 0, 0)
++#define Q2T_VERSION_STRING "2.1.0"
+#define Q2T_PROC_VERSION_NAME "version"
+
+#define Q2T_MAX_CDB_LEN 16
@@ -69213,14 +73709,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h linux-2.6.36
+
+ /*
+ * To sync between IRQ handlers and q2t_target_release(). Needed,
-+ * because req_pkt() can drop/reaquire HW lock inside. Protected by
++ * because req_pkt() can drop/reacquire HW lock inside. Protected by
+ * HW lock.
+ */
+ int irq_cmd_count;
+
+ int datasegs_per_cmd, datasegs_per_cont;
+
-+ /* Target's flags, serialized by ha->hardware_lock */
++ /* Target's flags, serialized by pha->hardware_lock */
+ unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addressing enabled */
+ unsigned int link_reinit_iocb_pending:1;
+ unsigned int tm_to_unknown:1; /* TM to unknown session was sent */
@@ -69240,7 +73736,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h linux-2.6.36
+
+ /* Protected by hardware_lock */
+ struct list_head del_sess_list;
-+ struct timer_list sess_del_timer;
++ struct delayed_work sess_del_work;
+
+ spinlock_t sess_work_lock;
+ struct list_head sess_works_list;
@@ -69259,6 +73755,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h linux-2.6.36
+ struct list_head srr_imm_list;
+ struct work_struct srr_work;
+
++ atomic_t tgt_global_resets_count;
++
+ struct list_head tgt_list_entry;
+};
+
@@ -69316,7 +73814,18 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h linux-2.6.36
+
+struct q2t_sess_work_param {
+ struct list_head sess_works_list_entry;
-+ struct q2t_cmd *cmd;
++
++#define Q2T_SESS_WORK_CMD 0
++#define Q2T_SESS_WORK_ABORT 1
++#define Q2T_SESS_WORK_TM 2
++ int type;
++
++ union {
++ struct q2t_cmd *cmd;
++ abts24_recv_entry_t abts;
++ notify_entry_t tm_iocb;
++ atio7_entry_t tm_iocb2;
++ };
+};
+
+struct q2t_mgmt_cmd {
@@ -69366,15 +73875,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/qla2xxx-target/qla2x00t.h linux-2.6.36
+#define Q2T_XMIT_ALL (Q2T_XMIT_STATUS|Q2T_XMIT_DATA)
+
+#endif /* __QLA2X00T_H */
-diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Documentation/scst/README.qla2x00t
---- orig/linux-2.6.36/Documentation/scst/README.qla2x00t
-+++ linux-2.6.36/Documentation/scst/README.qla2x00t
-@@ -0,0 +1,526 @@
-+Target driver for Qlogic 22xx/23xx/24xx/25xx Fibre Channel cards
+diff -uprN orig/linux-2.6.39/Documentation/scst/README.qla2x00t linux-2.6.39/Documentation/scst/README.qla2x00t
+--- orig/linux-2.6.39/Documentation/scst/README.qla2x00t
++++ linux-2.6.39/Documentation/scst/README.qla2x00t
+@@ -0,0 +1,572 @@
++Target driver for QLogic 22xx/23xx/24xx/25xx Fibre Channel cards
+================================================================
+
-+Version 2.0.0, XX XXXXX 2010
-+----------------------------
++Version 2.1.0
++-------------
+
+This driver consists from two parts: the target mode driver itself and
+the changed initiator driver from Linux kernel, which is, particularly,
@@ -69389,14 +73898,6 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+supported, because it's too hard to backport used initiator driver to
+older kernels.
+
-+NPIV is partially supported by this driver. You can create virtual
-+targets using standard Linux interface by echoing wwpn:wwnn into
-+/sys/class/fc_host/hostX/vport_create and work with them, but SCST core
-+will not see those virtual targets and, hence, provide the
-+target-oriented access control for them. However, the initiator-oriented
-+access control will still work very well. Note, you need NPIV-supporting
-+firmware as well as NPIV-supporting switches to use NPIV.
-+
+The original initiator driver was taken from the kernel 2.6.26. Also the
+following 2.6.26.x commits have been applied to it (upstream ID):
+048feec5548c0582ee96148c61b87cccbcb5f9be,
@@ -69404,11 +73905,13 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+5f3a9a207f1fccde476dd31b4c63ead2967d934f,
+85821c906cf3563a00a3d98fa380a2581a7a5ff1,
+3c01b4f9fbb43fc911acd33ea7a14ea7a4f9866b,
-+8eca3f39c4b11320787f7b216f63214aee8415a9.
++8eca3f39c4b11320787f7b216f63214aee8415a9,
++0f19bc681ed0849a2b95778460a0a8132e3700e2.
+
+See also "ToDo" file for list of known issues and unimplemented
+features.
+
++
+Installation
+------------
+
@@ -69426,6 +73929,14 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+"/lib/modules/`you_kernel_version`/build" points to the source code for
+your currently running kernel.
+
++If your kernel version is <2.6.28, then you should consider applying
++kernel patch scst_fc_vport_create.patch from the "kernel" subdirectory.
++Without it, creating and removing NPIV targets using SCST sysfs
++interface will be disabled. NOTE: you will still be able to create and
++remove NPIV targets using the standard Linux interface (i.e. echoing
++wwpn:wwnn into /sys/class/fc_host/hostX/vport_create and
++/sys/class/fc_host/hostX/vport_delete).
++
+Then you should replace (or link) by the initiator driver from this
+package "qla2xxx" subdirectory in kernel_source/drivers/scsi/ of the
+currently running kernel and using your favorite kernel configuration
@@ -69452,6 +73963,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+/lib/modules/`you_kernel_version`/extra. To uninstall it, type 'make
+uninstall'.
+
++
+Usage
+-----
+
@@ -69477,6 +73989,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+http://scst.sourceforge.net/qla2x00t-howto.html and
+https://forums.openfiler.com/viewtopic.php?id=3422.
+
++
+IMPORTANT USAGE NOTES
+---------------------
+
@@ -69489,17 +74002,9 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+module, otherwise your initiators could not see the target, when it is
+enabled after qla2x00tgt module load.
+
-+3. If you delete and then add back with the same WWN an NPIV initiator
-+on the initiator side, make sure it has the same port_id as well. In
-+Fibre Channel initiators identified by port_id (s_id in FC terms), so if
-+the recreated NPIV initiator has another port_id, which was already used
-+by another (NPIV) initiator, those initiators could be confused by the
-+target and assigned to incorrect security groups, hence they could see
-+incorrect LUNs.
++3. You need to issue LIP after you enabled a target, if you enabled it
++after one or more its initiators already started.
+
-+If you can't ensure the same port_id's for recreated initiators, it is
-+safer to restart qla2x00tgt and qla2xxx modules on the target to make
-+sure the target doesn't have any initiator port_id cached.
+
+Initiator and target modes
+--------------------------
@@ -69508,7 +74013,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+parameter "qlini_mode", which determines when initiator mode will be
+enabled. Possible values:
+
-+ - "exclusive" (default ) - initiator mode will be enabled on load,
++ - "exclusive" (default) - initiator mode will be enabled on load,
+disabled on enabling target mode and then on disabling target mode
+enabled back.
+
@@ -69518,15 +74023,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+
+Usage of mode "disabled" is recommended, if you have incorrectly
+functioning your target's initiators, which if once seen a port in
-+initiator mode, later refuse to see it as a target. Although this mode
-+does make a noticeable difference, it isn't absolutely strong, since the
-+firmware once initialized requires a HBA to be in either initiator, or
-+target mode, so until you enable target mode on a port, your initiators
-+will report this port as working in initiator mode. If you need
-+absolutely strong assurance that initiator mode never enabled, you can
-+consider using patch
-+unsupported-patches/qla_delayed_hw_init_tgt_mode_from_the_beginning.diff.
-+See description of it inside the patch.
++initiator mode, later refuse to see it as a target.
+
+Use mode "enabled" if you need your QLA adapters to work in both
+initiator and target modes at the same time.
@@ -69539,6 +74036,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+particular port. Setting this attribute to 1 will reverse current status
+of the initiator mode from enabled to disabled and vice versa.
+
++
+Explicit conformation
+---------------------
+
@@ -69552,16 +74050,46 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+noticed, if it is enabled. Supported only for 23xx+. Disabled by
+default.
+
++
+Class 2
+-------
+
-+Class 2 is the close equivalent of the TCP in the IP world. If you
++Class 2 is the close equivalent of TCP in the network world. If you
+enable it, all the Fibre Channel packets will be acknowledged. By
-+default, class 3 is used, which is UDP-like. Enable it by echoing "1" in
-+/sys/kernel/scst_tgt/targets/qla2x00t/target/host/class2_enabled. This
-+option needs a special firmware with class 2 support. Disabled by
++default, class 3 is used, which is UDP-like. Enable class 2 by echoing
++"1" in /sys/kernel/scst_tgt/targets/qla2x00t/target/host/class2_enabled.
++This option needs a special firmware with class 2 support. Disabled by
+default.
+
++
++N_Port ID Virtualization
++------------------------
++
++N_Port ID Virtualization (NPIV) is a Fibre Channel facility allowing
++multiple N_Port IDs to share a single physical N_Port. NPIV is fully
++supported by this driver. You must have 24xx+ ISPs with NPIV-supporting
++and NPIV-switches switch(es) to use this facility.
++
++You can add NPIV targets by echoing:
++
++add_target target_name node_name=node_name_value; parent_host=parent_host_value
++
++in /sys/kernel/scst_tgt/targets/qla2x00t/mgmt.
++
++Removing NPIV targets is done by echoing:
++
++del_target target_name
++
++in/sys/kernel/scst_tgt/targets/qla2x00t/mgmt.
++
++Also, you can create and remove NPIV targets using the standard Linux
++interface (i.e. echoing wwpn:wwnn into /sys/class/fc_host/hostX/vport_create
++and /sys/class/fc_host/hostX/vport_delete).
++
++It is recommended to use scstadmin utility and its config file to
++configure virtual NPIV targets instead of the above direct interface.
++
++
+Compilation options
+-------------------
+
@@ -69585,6 +74113,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+ debugging. In this mode some CTIOs will be "broken" to force the
+ initiator to issue a retransmit request.
+
++
+Sysfs interface
+---------------
+
@@ -69605,6 +74134,12 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+ - version - read-only attribute, which allows to see version of
+ this driver and enabled optional features.
+
++ - mgmt - main management entry, which allows to configure NPIV targets.
++ See content of this file for help how to use it.
++
++ - hw_target (hardware target only) - read-only attribute with value 1.
++ It allows to distinguish hardware and virtual targets.
++
+Each target subdirectory contains the following entries:
+
+ - host - link pointing on the corresponding scsi_host of the initiator
@@ -69635,6 +74170,12 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+ until rel_tgt_id becomes unique. This attribute initialized unique by
+ SCST by default.
+
++ - node_name (NPIV targets only) - read-only attribute, which allows to see
++ the target World Wide Node Name.
++
++ - parent_host (NPIV target only) - read-only attribute, which allows to see
++ the parent HBA World Wide Port Name (WWPN).
++
+Subdirectory "sessions" contains one subdirectory for each connected
+session with name equal to port name of the connected initiator.
+
@@ -69647,9 +74188,10 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+
+ - commands - contains overall number of SCSI commands in this session.
+
-+Below is a sample script, which configures 1 virtual disk "disk1" using
-+/disk1 image for usage with 25:00:00:f0:98:87:92:f3 target. All
-+initiators connected to this target will see this device.
++Below is a sample script, which configures 2 virtual disk "disk1" using
++/disk1 image for usage with 25:00:00:f0:98:87:92:f3 hardware target, and
++"disk2" using /disk2 image for usage with 50:50:00:00:00:00:00:11 NPIV
++target. All initiators connected to this targets will see those devices.
+
+#!/bin/bash
+
@@ -69657,11 +74199,17 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+modprobe scst_vdisk
+
+echo "add_device disk1 filename=/disk1; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
++echo "add_device disk2 filename=/disk2; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
+
+modprobe qla2x00tgt
+
++echo "add_target 50:50:00:00:00:00:00:11 node_name=50:50:00:00:00:00:00:00;parent_host=25:00:00:f0:98:87:92:f3" >\
++/sys/kernel/scst_tgt/targets/qla2x00t/mgmt
++
+echo "add disk1 0" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
++echo "add disk2 0" >/sys/kernel/scst_tgt/targets/qla2x00t/50:50:00:00:00:00:00:11/luns/mgmt
+echo 1 >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/enabled
++echo 1 >/sys/kernel/scst_tgt/targets/qla2x00t/50:50:00:00:00:00:00:11/enabled
+
+Below is another sample script, which configures 1 real local SCSI disk
+0:0:1:0 for usage with 25:00:00:f0:98:87:92:f3 target:
@@ -69850,6 +74398,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+| | | | `-- read_only
+| | | `-- mgmt
+| | |-- rel_tgt_id
++| | |-- hw_target
+| | `-- sessions
+| | `-- 25:00:00:f0:99:87:94:a3
+| | |-- active_commands
@@ -69857,11 +74406,13 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+| | |-- initiator_name
+| | `-- luns -> ../../ini_groups/25:00:00:f0:99:87:94:a3/luns
+| |-- trace_level
-+| `-- version
++| |-- version
++| `-- mgmt
+|-- threads
+|-- trace_level
+`-- version
+
++
+Performance advices
+-------------------
+
@@ -69879,6 +74430,7 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+2. See SCST core's README for more advices. Especially pay attention to
+have io_grouping_type option set correctly.
+
++
+Credits
+-------
+
@@ -69895,10 +74447,143 @@ diff -uprN orig/linux-2.6.36/Documentation/scst/README.qla2x00t linux-2.6.36/Doc
+
+ * Ming Zhang <mingz@ele.uri.edu> for fixes.
+
++ * Uri Yanai <Uri.Yanai@ngsoft.com> and Dorit Halsadi
++<Dorit.Halsadi@dothill.com> for adding full NPIV support.
++
+Vladislav Bolkhovitin <vst@vlnb.net>, http://scst.sourceforge.net
-diff -uprN orig/linux-2.6.36/drivers/scst/srpt/Kconfig linux-2.6.36/drivers/scst/srpt/Kconfig
---- orig/linux-2.6.36/drivers/scst/srpt/Kconfig
-+++ linux-2.6.36/drivers/scst/srpt/Kconfig
+This patch adds the kernel module ib_srpt, which is a SCSI RDMA Protocol (SRP)
+target implementation. This driver uses the InfiniBand stack and the SCST core.
+
+It is a high performance driver capable of handling 600K+ 4K random write
+IOPS by a single target as well as 2.5+ GB/s sequential throughput over
+a single QDR IB port.
+
+It was originally developed by Vu Pham (Mellanox) and has been optimized by
+Bart Van Assche.
+
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Cc: Vu Pham <vu@mellanox.com>
+Cc: Roland Dreier <rdreier@cisco.com>
+Cc: David Dillow <dillowda@ornl.gov>
+diff -uprN orig/linux-2.6.39/Documentation/scst/README.srpt linux-2.6.39/Documentation/scst/README.srpt
+--- orig/linux-2.6.39/Documentation/scst/README.srpt
++++ linux-2.6.39/Documentation/scst/README.srpt
+@@ -0,0 +1,112 @@
++SCSI RDMA Protocol (SRP) Target driver for Linux
++=================================================
++
++The SRP Target driver is designed to work directly on top of the
++OpenFabrics OFED-1.x software stack (http://www.openfabrics.org) or
++the Infiniband drivers in the Linux kernel tree
++(http://www.kernel.org). The SRP target driver also interfaces with
++the generic SCSI target mid-level driver called SCST
++(http://scst.sourceforge.net).
++
++How-to run
++-----------
++
++A. On srp target machine
++1. Please refer to SCST's README for loading scst driver and its
++dev_handlers drivers (scst_disk, scst_vdisk block or file IO mode, nullio, ...)
++
++Example 1: working with real back-end scsi disks
++a. modprobe scst
++b. modprobe scst_disk
++c. cat /proc/scsi_tgt/scsi_tgt
++
++ibstor00:~ # cat /proc/scsi_tgt/scsi_tgt
++Device (host:ch:id:lun or name) Device handler
++0:0:0:0 dev_disk
++4:0:0:0 dev_disk
++5:0:0:0 dev_disk
++6:0:0:0 dev_disk
++7:0:0:0 dev_disk
++
++Now you want to exclude the first scsi disk and expose the last 4 scsi disks as
++IB/SRP luns for I/O
++echo "add 4:0:0:0 0" >/proc/scsi_tgt/groups/Default/devices
++echo "add 5:0:0:0 1" >/proc/scsi_tgt/groups/Default/devices
++echo "add 6:0:0:0 2" >/proc/scsi_tgt/groups/Default/devices
++echo "add 7:0:0:0 3" >/proc/scsi_tgt/groups/Default/devices
++
++Example 2: working with VDISK FILEIO mode (using md0 device and file 10G-file)
++a. modprobe scst
++b. modprobe scst_vdisk
++c. echo "open vdisk0 /dev/md0" > /proc/scsi_tgt/vdisk/vdisk
++d. echo "open vdisk1 /10G-file" > /proc/scsi_tgt/vdisk/vdisk
++e. echo "add vdisk0 0" >/proc/scsi_tgt/groups/Default/devices
++f. echo "add vdisk1 1" >/proc/scsi_tgt/groups/Default/devices
++
++Example 3: working with VDISK BLOCKIO mode (using md0 device, sda, and cciss/c1d0)
++a. modprobe scst
++b. modprobe scst_vdisk
++c. echo "open vdisk0 /dev/md0 BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
++d. echo "open vdisk1 /dev/sda BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
++e. echo "open vdisk2 /dev/cciss/c1d0 BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
++f. echo "add vdisk0 0" >/proc/scsi_tgt/groups/Default/devices
++g. echo "add vdisk1 1" >/proc/scsi_tgt/groups/Default/devices
++h. echo "add vdisk2 2" >/proc/scsi_tgt/groups/Default/devices
++
++2. modprobe ib_srpt
++
++
++B. On initiator machines you can manualy do the following steps:
++1. modprobe ib_srp
++2. ibsrpdm -c (to discover new SRP target)
++3. echo <new target info> > /sys/class/infiniband_srp/srp-mthca0-1/add_target
++4. fdisk -l (will show new discovered scsi disks)
++
++Example:
++Assume that you use port 1 of first HCA in the system ie. mthca0
++
++[root@lab104 ~]# ibsrpdm -c -d /dev/infiniband/umad0
++id_ext=0002c90200226cf4,ioc_guid=0002c90200226cf4,
++dgid=fe800000000000000002c90200226cf5,pkey=ffff,service_id=0002c90200226cf4
++[root@lab104 ~]# echo id_ext=0002c90200226cf4,ioc_guid=0002c90200226cf4,
++dgid=fe800000000000000002c90200226cf5,pkey=ffff,service_id=0002c90200226cf4 >
++/sys/class/infiniband_srp/srp-mthca0-1/add_target
++
++OR
++
+++ You can edit /etc/infiniband/openib.conf to load srp driver and srp HA daemon
++automatically ie. set SRP_LOAD=yes, and SRPHA_ENABLE=yes
+++ To set up and use high availability feature you need dm-multipath driver
++and multipath tool
+++ Please refer to OFED-1.x SRP's user manual for more in-details instructions
++on how-to enable/use HA feature
++
++To minimize QUEUE_FULL conditions, you can apply scst_increase_max_tgt_cmds
++patch from SRPT package from http://sourceforge.net/project/showfiles.php?group_id=110471
++
++
++Performance notes
++-----------------
++
++In some cases, for instance working with SSD devices, which consume 100%
++of a single CPU load for data transfers in their internal threads, to
++maximize IOPS it can be needed to assign for those threads dedicated
++CPUs using Linux CPU affinity facilities. No IRQ processing should be
++done on those CPUs. Check that using /proc/interrupts. See taskset
++command and Documentation/IRQ-affinity.txt in your kernel's source tree
++for how to assign CPU affinity to tasks and IRQs.
++
++The reason for that is that processing of coming commands in SIRQ context
++can be done on the same CPUs as SSD devices' threads doing data
++transfers. As the result, those threads won't receive all the CPU power
++and perform worse.
++
++Alternatively to CPU affinity assignment, you can try to enable SRP
++target's internal thread. It will allows Linux CPU scheduler to better
++distribute load among available CPUs. To enable SRP target driver's
++internal thread you should load ib_srpt module with parameter
++"thread=1".
++
++
++Send questions about this driver to scst-devel@lists.sourceforge.net, CC:
++Vu Pham <vuhuong@mellanox.com> and Bart Van Assche <bvanassche@acm.org>.
+diff -uprN orig/linux-2.6.39/drivers/scst/srpt/Kconfig linux-2.6.39/drivers/scst/srpt/Kconfig
+--- orig/linux-2.6.39/drivers/scst/srpt/Kconfig
++++ linux-2.6.39/drivers/scst/srpt/Kconfig
@@ -0,0 +1,12 @@
+config SCST_SRPT
+ tristate "InfiniBand SCSI RDMA Protocol target support"
@@ -69912,14 +74597,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/Kconfig linux-2.6.36/drivers/scst
+ supported by InfiniBand and by iWarp network hardware. More
+ information about the SRP protocol can be found on the website
+ of the INCITS T10 technical committee (http://www.t10.org/).
-diff -uprN orig/linux-2.6.36/drivers/scst/srpt/Makefile linux-2.6.36/drivers/scst/srpt/Makefile
---- orig/linux-2.6.36/drivers/scst/srpt/Makefile
-+++ linux-2.6.36/drivers/scst/srpt/Makefile
+diff -uprN orig/linux-2.6.39/drivers/scst/srpt/Makefile linux-2.6.39/drivers/scst/srpt/Makefile
+--- orig/linux-2.6.39/drivers/scst/srpt/Makefile
++++ linux-2.6.39/drivers/scst/srpt/Makefile
@@ -0,0 +1,1 @@
+obj-$(CONFIG_SCST_SRPT) += ib_srpt.o
-diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_dm_mad.h linux-2.6.36/drivers/scst/srpt/ib_dm_mad.h
---- orig/linux-2.6.36/drivers/scst/srpt/ib_dm_mad.h
-+++ linux-2.6.36/drivers/scst/srpt/ib_dm_mad.h
+diff -uprN orig/linux-2.6.39/drivers/scst/srpt/ib_dm_mad.h linux-2.6.39/drivers/scst/srpt/ib_dm_mad.h
+--- orig/linux-2.6.39/drivers/scst/srpt/ib_dm_mad.h
++++ linux-2.6.39/drivers/scst/srpt/ib_dm_mad.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
@@ -70060,14 +74745,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_dm_mad.h linux-2.6.36/drivers/
+};
+
+#endif
-diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/scst/srpt/ib_srpt.c
---- orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c
-+++ linux-2.6.36/drivers/scst/srpt/ib_srpt.c
-@@ -0,0 +1,3698 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/srpt/ib_srpt.c linux-2.6.39/drivers/scst/srpt/ib_srpt.c
+--- orig/linux-2.6.39/drivers/scst/srpt/ib_srpt.c
++++ linux-2.6.39/drivers/scst/srpt/ib_srpt.c
+@@ -0,0 +1,3812 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
++ * Copyright (C) 2008 - 2010 Bart Van Assche <bvanassche@acm.org>.
+ * Copyright (C) 2008 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2008 - 2010 Bart Van Assche <bart.vanassche@gmail.com>
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
@@ -70114,8 +74799,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+
+/* Name of this kernel module. */
+#define DRV_NAME "ib_srpt"
-+#define DRV_VERSION "2.0.0"
-+#define DRV_RELDATE "October 25, 2010"
++#define DRV_VERSION "2.1.0-pre"
++#define DRV_RELDATE "(not yet released)"
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+/* Flags to be used in SCST debug tracing statements. */
+#define DEFAULT_SRPT_TRACE_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR \
@@ -70194,6 +74879,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ "Use target port ID in the SCST session name such that"
+ " redundant paths between multiport systems can be masked.");
+
++static bool use_node_guid_in_target_name;
++module_param(use_node_guid_in_target_name, bool, 0444);
++MODULE_PARM_DESC(use_node_guid_in_target_name,
++ "Use target node GUIDs of HCAs as SCST target names.");
++
+static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
+{
+ return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
@@ -70209,7 +74899,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+static void srpt_unregister_mad_agent(struct srpt_device *sdev);
+static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx);
-+static void srpt_release_channel(struct scst_session *scst_sess);
++static void srpt_free_ch(struct scst_session *sess);
+
+static struct ib_client srpt_client = {
+ .name = DRV_NAME,
@@ -70217,22 +74907,62 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ .remove = srpt_remove_one
+};
+
++static enum rdma_ch_state srpt_set_ch_state_to_disc(struct srpt_rdma_ch *ch)
++{
++ unsigned long flags;
++ enum rdma_ch_state prev;
++
++ spin_lock_irqsave(&ch->spinlock, flags);
++ prev = atomic_read(&ch->state);
++ switch (prev) {
++ case CH_CONNECTING:
++ case CH_LIVE:
++ atomic_set(&ch->state, CH_DISCONNECTING);
++ break;
++ default:
++ break;
++ }
++ spin_unlock_irqrestore(&ch->spinlock, flags);
++
++ return prev;
++}
++
++static bool srpt_set_ch_state_to_draining(struct srpt_rdma_ch *ch)
++{
++ unsigned long flags;
++ bool changed_state = false;
++
++ spin_lock_irqsave(&ch->spinlock, flags);
++ switch (atomic_read(&ch->state)) {
++ case CH_CONNECTING:
++ case CH_LIVE:
++ case CH_DISCONNECTING:
++ atomic_set(&ch->state, CH_DRAINING);
++ changed_state = true;
++ break;
++ default:
++ break;
++ }
++ spin_unlock_irqrestore(&ch->spinlock, flags);
++
++ return changed_state;
++}
++
+/**
+ * srpt_test_and_set_channel_state() - Test and set the channel state.
+ *
+ * @ch: RDMA channel.
+ * @old: channel state to compare with.
-+ * @new: state to change the channel state to if the current state matches the
-+ * argument 'old'.
++ * @new: state to change the channel state to if the current state matches @old.
+ *
-+ * Returns the previous channel state.
++ * Returns true if and only if the channel state did match @old.
+ */
-+static enum rdma_ch_state
++static bool
+srpt_test_and_set_channel_state(struct srpt_rdma_ch *ch,
+ enum rdma_ch_state old,
+ enum rdma_ch_state new)
+{
-+ return atomic_cmpxchg(&ch->state, old, new);
++ return atomic_cmpxchg(&ch->state, old, new) == old;
+}
+
+/**
@@ -70314,11 +75044,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ ib_cm_notify(ch->cm_id, event->event);
+ break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
-+ if (srpt_test_and_set_channel_state(ch, RDMA_CHANNEL_LIVE,
-+ RDMA_CHANNEL_DISCONNECTING) == RDMA_CHANNEL_LIVE) {
-+ PRINT_INFO("disconnected session %s.", ch->sess_name);
-+ ib_send_cm_dreq(ch->cm_id, NULL, 0);
-+ }
++ TRACE_DBG("%s: received IB_EVENT_QP_LAST_WQE_REACHED",
++ ch->sess_name);
++ if (srpt_test_and_set_channel_state(ch, CH_DRAINING,
++ CH_RELEASING))
++ wake_up_process(ch->thread);
++ else
++ TRACE_DBG("%s: state %d - ignored LAST_WQE.",
++ ch->sess_name, atomic_read(&ch->state));
+ break;
+ default:
+ PRINT_ERROR("received unrecognized IB QP event %d",
@@ -70756,8 +75489,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+
+ TRACE_ENTRY();
+
-+ WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
-+ && ioctx_size != sizeof(struct srpt_send_ioctx));
++ WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) &&
++ ioctx_size != sizeof(struct srpt_send_ioctx));
+ WARN_ON(dma_size != srp_max_req_size && dma_size != srp_max_rsp_size);
+
+ ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
@@ -70776,7 +75509,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ srpt_free_ioctx(sdev, ring[i], dma_size, dir);
+ kfree(ring);
+out:
-+ TRACE_EXIT_RES(ring);
++ TRACE_EXIT_HRES(ring);
+ return ring;
+}
+
@@ -70857,7 +75590,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ struct ib_recv_wr wr, *bad_wr;
+
+ BUG_ON(!sdev);
-+ wr.wr_id = encode_wr_id(IB_WC_RECV, ioctx->ioctx.index);
++ wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
+
+ list.addr = ioctx->ioctx.dma;
+ list.length = srp_max_req_size;
@@ -70900,7 +75633,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ list.lkey = sdev->mr->lkey;
+
+ wr.next = NULL;
-+ wr.wr_id = encode_wr_id(IB_WC_SEND, ioctx->ioctx.index);
++ wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+ wr.opcode = IB_WR_SEND;
@@ -71065,6 +75798,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ int attr_mask;
+ int ret;
+
++ TRACE_ENTRY();
++
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ if (ret)
@@ -71075,6 +75810,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
++ TRACE_EXIT_RES(ret);
+ return ret;
+}
+
@@ -71084,27 +75820,78 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
-+ *
-+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
-+ * If this structure ever becomes larger, it might be necessary to allocate
-+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
-+ struct ib_qp_attr qp_attr;
++ struct ib_qp_attr *attr;
+ int attr_mask;
+ int ret;
++ uint64_t T_tr_ns;
++ uint32_t max_compl_time_ms;
+
-+ qp_attr.qp_state = IB_QPS_RTS;
-+ ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
++ TRACE_ENTRY();
++
++ attr = kzalloc(sizeof *attr, GFP_KERNEL);
++ if (!attr)
++ return -ENOMEM;
++
++ attr->qp_state = IB_QPS_RTS;
++ ret = ib_cm_init_qp_attr(ch->cm_id, attr, &attr_mask);
+ if (ret)
+ goto out;
+
-+ qp_attr.max_rd_atomic = 4;
++ attr->max_rd_atomic = 4;
+
-+ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
++ /*
++ * From IBTA C9-140: Transport Timer timeout interval
++ * T_tr = 4.096 us * 2**(local ACK timeout) where the local ACK timeout
++ * is a five-bit value, with zero meaning that the timer is disabled.
++ */
++ WARN_ON(attr->timeout >= (1 << 5));
++ if (attr->timeout) {
++ T_tr_ns = 1ULL << (12 + attr->timeout);
++ max_compl_time_ms = attr->retry_cnt * 4 * T_tr_ns / 1000000;
++ TRACE_DBG("Session %s: QP local ack timeout = %d or T_tr ="
++ " %u ms; retry_cnt = %d; max compl. time = %d ms",
++ ch->sess_name,
++ attr->timeout, (unsigned)(T_tr_ns / (1000 * 1000)),
++ attr->retry_cnt, max_compl_time_ms);
++
++ if (max_compl_time_ms >= RDMA_COMPL_TIMEOUT_S * 1000) {
++ PRINT_ERROR("Maximum RDMA completion time (%d ms)"
++ " exceeds ib_srpt timeout (%d ms)",
++ max_compl_time_ms,
++ 1000 * RDMA_COMPL_TIMEOUT_S);
++ }
++ }
++
++ ret = ib_modify_qp(qp, attr, attr_mask);
+
+out:
++ kfree(attr);
++ TRACE_EXIT_RES(ret);
++ return ret;
++}
++
++/**
++ * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
++ */
++static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
++{
++ struct ib_qp_attr *attr;
++ int ret;
++
++ TRACE_ENTRY();
++
++ attr = kzalloc(sizeof *attr, GFP_KERNEL);
++ if (!attr)
++ return -ENOMEM;
++
++ attr->qp_state = IB_QPS_ERR;
++ ret = ib_modify_qp(ch->qp, attr, IB_QP_STATE);
++ kfree(attr);
++
++ TRACE_EXIT_RES(ret);
+ return ret;
+}
+
@@ -71344,6 +76131,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ */
+static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
++ enum srpt_opcode opcode,
+ enum scst_exec_context context)
+{
+ enum srpt_command_state state;
@@ -71353,7 +76141,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+
+ scmnd = ioctx->scmnd;
-+ if (scmnd) {
++ if (opcode == SRPT_RDMA_READ_LAST && scmnd) {
+ state = srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
+ SRPT_STATE_DATA_IN);
+ if (state == SRPT_STATE_NEED_DATA)
@@ -71362,8 +76150,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ else
+ PRINT_ERROR("%s[%d]: wrong state = %d", __func__,
+ __LINE__, state);
-+ } else
-+ PRINT_ERROR("%s[%d]: scmnd == NULL", __func__, __LINE__);
++ } else if (opcode == SRPT_RDMA_ABORT) {
++ ioctx->rdma_aborted = true;
++ } else {
++ WARN_ON(opcode != SRPT_RDMA_READ_LAST);
++ PRINT_ERROR("%s[%d]: scmnd == NULL (opcode %d)", __func__,
++ __LINE__, opcode);
++ }
+}
+
+/**
@@ -71371,7 +76164,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ */
+static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
-+ u8 opcode,
++ enum srpt_opcode opcode,
+ enum scst_exec_context context)
+{
+ struct scst_cmd *scmnd;
@@ -71381,7 +76174,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ state = srpt_get_cmd_state(ioctx);
+ if (scmnd) {
+ switch (opcode) {
-+ case IB_WC_RDMA_READ:
++ case SRPT_RDMA_READ_LAST:
+ if (ioctx->n_rdma <= 0) {
+ PRINT_ERROR("Received invalid RDMA read error"
+ " completion with idx %d",
@@ -71395,7 +76188,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ PRINT_ERROR("%s[%d]: wrong state = %d",
+ __func__, __LINE__, state);
+ break;
-+ case IB_WC_RDMA_WRITE:
++ case SRPT_RDMA_WRITE_LAST:
+ scst_set_delivery_status(scmnd,
+ SCST_CMD_DELIVERY_ABORTED);
+ break;
@@ -71721,14 +76514,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+
+ ch_state = atomic_read(&ch->state);
+ srp_cmd = recv_ioctx->ioctx.buf;
-+ if (unlikely(ch_state == RDMA_CHANNEL_CONNECTING)) {
++ if (unlikely(ch_state == CH_CONNECTING)) {
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+ goto out;
+ }
+
-+ if (unlikely(ch_state == RDMA_CHANNEL_DISCONNECTING))
-+ goto post_recv;
-+
+ if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
+ if (!send_ioctx)
+ send_ioctx = srpt_get_send_ioctx(ch);
@@ -71739,8 +76529,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ }
+ }
+
-+ WARN_ON(ch_state != RDMA_CHANNEL_LIVE);
-+
+ switch (srp_cmd->opcode) {
+ case SRP_CMD:
+ srpt_handle_cmd(ch, recv_ioctx, send_ioctx, context);
@@ -71766,7 +76554,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ break;
+ }
+
-+post_recv:
+ srpt_post_recv(ch->sport->sdev, recv_ioctx);
+out:
+ return;
@@ -71818,36 +76605,35 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+{
+ struct srpt_send_ioctx *send_ioctx;
+ uint32_t index;
-+ u8 opcode;
++ enum srpt_opcode opcode;
+
+ index = idx_from_wr_id(wc->wr_id);
+ opcode = opcode_from_wr_id(wc->wr_id);
+ send_ioctx = ch->ioctx_ring[index];
+ if (wc->status == IB_WC_SUCCESS) {
-+ if (opcode == IB_WC_SEND)
++ if (opcode == SRPT_SEND)
+ srpt_handle_send_comp(ch, send_ioctx, context);
+ else {
-+ EXTRACHECKS_WARN_ON(wc->opcode != IB_WC_RDMA_READ);
-+ srpt_handle_rdma_comp(ch, send_ioctx, context);
++ EXTRACHECKS_WARN_ON(opcode != SRPT_RDMA_ABORT &&
++ wc->opcode != IB_WC_RDMA_READ);
++ srpt_handle_rdma_comp(ch, send_ioctx, opcode, context);
+ }
+ } else {
-+ if (opcode == IB_WC_SEND) {
++ if (opcode == SRPT_SEND) {
+ PRINT_INFO("sending response for idx %u failed with"
+ " status %d", index, wc->status);
+ srpt_handle_send_err_comp(ch, wc->wr_id, context);
-+ } else {
-+ PRINT_INFO("RDMA %s for idx %u failed with status %d",
-+ opcode == IB_WC_RDMA_READ ? "read"
-+ : opcode == IB_WC_RDMA_WRITE ? "write"
-+ : "???", index, wc->status);
++ } else if (opcode != SRPT_RDMA_MID) {
++ PRINT_INFO("RDMA t %d for idx %u failed with status %d",
++ opcode, index, wc->status);
+ srpt_handle_rdma_err_comp(ch, send_ioctx, opcode,
+ context);
+ }
+ }
+
-+ while (unlikely(opcode == IB_WC_SEND
++ while (unlikely(opcode == SRPT_SEND
+ && !list_empty(&ch->cmd_wait_list)
-+ && atomic_read(&ch->state) == RDMA_CHANNEL_LIVE
++ && atomic_read(&ch->state) == CH_LIVE
+ && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
+ struct srpt_recv_ioctx *recv_ioctx;
+
@@ -71859,19 +76645,22 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ }
+}
+
-+static void srpt_process_completion(struct ib_cq *cq,
++static bool srpt_process_completion(struct ib_cq *cq,
+ struct srpt_rdma_ch *ch,
+ enum scst_exec_context context)
+{
+ struct ib_wc *const wc = ch->wc;
+ int i, n;
++ bool keep_going;
+
+ EXTRACHECKS_WARN_ON(cq != ch->cq);
+
-+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
++ keep_going = atomic_read(&ch->state) <= CH_LIVE;
++ if (keep_going)
++ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
+ for (i = 0; i < n; i++) {
-+ if (opcode_from_wr_id(wc[i].wr_id) & IB_WC_RECV)
++ if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
+ srpt_process_rcv_completion(cq, ch, context,
+ &wc[i]);
+ else
@@ -71879,6 +76668,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ &wc[i]);
+ }
+ }
++
++ return keep_going;
+}
+
+/**
@@ -71900,7 +76691,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ atomic_inc(&ch->processing_compl);
+ switch (thread) {
+ case MODE_IB_COMPLETION_IN_THREAD:
-+ wake_up_interruptible(&ch->wait_queue);
++ wake_up_process(ch->thread);
+ break;
+ case MODE_IB_COMPLETION_IN_SIRQ:
+ srpt_process_completion(cq, ch, SCST_CONTEXT_THREAD);
@@ -71921,16 +76712,24 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+
+ ch = arg;
+ BUG_ON(!ch);
-+ PRINT_INFO("Session %s: kernel thread %s (PID %d) started",
-+ ch->sess_name, ch->thread->comm, current->pid);
+ while (!kthread_should_stop()) {
-+ wait_event_interruptible(ch->wait_queue,
-+ (srpt_process_completion(ch->cq, ch,
-+ SCST_CONTEXT_THREAD),
-+ kthread_should_stop()));
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (!srpt_process_completion(ch->cq, ch, SCST_CONTEXT_THREAD))
++ break;
++ schedule();
++ }
++ set_current_state(TASK_RUNNING);
++
++ TRACE_DBG("ch %s: about to invoke scst_unregister_session()",
++ ch->sess_name);
++ WARN_ON(atomic_read(&ch->state) != CH_RELEASING);
++ scst_unregister_session(ch->scst_sess, false, srpt_free_ch);
++
++ while (!kthread_should_stop()) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
+ }
-+ PRINT_INFO("Session %s: kernel thread %s (PID %d) stopped",
-+ ch->sess_name, ch->thread->comm, current->pid);
++
+ return 0;
+}
+
@@ -71989,8 +76788,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ goto err_destroy_qp;
+
+ if (thread == MODE_IB_COMPLETION_IN_THREAD) {
-+ init_waitqueue_head(&ch->wait_queue);
-+
+ TRACE_DBG("creating IB completion thread for session %s",
+ ch->sess_name);
+
@@ -72018,140 +76815,123 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+
+static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
+{
-+ if (ch->thread)
-+ kthread_stop(ch->thread);
++ TRACE_ENTRY();
++
++ while (ib_poll_cq(ch->cq, ARRAY_SIZE(ch->wc), ch->wc) > 0)
++ ;
+
+ ib_destroy_qp(ch->qp);
+ ib_destroy_cq(ch->cq);
++
++ TRACE_EXIT();
+}
+
+/**
-+ * srpt_unregister_channel() - Start RDMA channel disconnection.
++ * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
++ *
++ * Reset the QP and make sure all resources associated with the channel will
++ * be deallocated at an appropriate time.
+ *
-+ * Note: The caller must hold ch->sdev->spinlock.
++ * Returns true if and only if the channel state has been modified from
++ * CH_CONNECTING or CH_LIVE into CH_DISCONNECTING.
++ *
++ * Note: The caller must hold ch->sport->sdev->spinlock.
+ */
-+static void srpt_unregister_channel(struct srpt_rdma_ch *ch)
-+ __acquires(&ch->sport->sdev->spinlock)
-+ __releases(&ch->sport->sdev->spinlock)
++static bool __srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+ struct srpt_device *sdev;
-+ struct ib_qp_attr qp_attr;
-+ int ret;
++ enum rdma_ch_state prev_state;
++ bool was_live;
+
+ sdev = ch->sport->sdev;
-+ list_del(&ch->list);
-+ atomic_set(&ch->state, RDMA_CHANNEL_DISCONNECTING);
-+ spin_unlock_irq(&sdev->spinlock);
-+
-+ qp_attr.qp_state = IB_QPS_ERR;
-+ ret = ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
-+ if (ret < 0)
-+ PRINT_ERROR("Setting queue pair in error state failed: %d",
-+ ret);
-+
-+ while (atomic_read(&ch->processing_compl))
-+ ;
-+
-+ /*
-+ * At this point it is guaranteed that no new commands will be sent to
-+ * the SCST core for channel ch, which is a requirement for
-+ * scst_unregister_session().
-+ */
++ was_live = false;
++
++ prev_state = srpt_set_ch_state_to_disc(ch);
++
++ switch (prev_state) {
++ case CH_CONNECTING:
++ ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
++ NULL, 0);
++ /* fall through */
++ case CH_LIVE:
++ was_live = true;
++ if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
++ PRINT_ERROR("%s", "sending CM DREQ failed.");
++ break;
++ case CH_DISCONNECTING:
++ case CH_DRAINING:
++ case CH_RELEASING:
++ break;
++ }
+
-+ TRACE_DBG("unregistering session %p", ch->scst_sess);
-+ scst_unregister_session(ch->scst_sess, 0, srpt_release_channel);
-+ spin_lock_irq(&sdev->spinlock);
++ return was_live;
+}
+
+/**
-+ * srpt_release_channel_by_cmid() - Release a channel.
-+ * @cm_id: Pointer to the CM ID of the channel to be released.
-+ *
-+ * Note: Must be called from inside srpt_cm_handler to avoid a race between
-+ * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
-+ * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
-+ * waits until all SCST sessions for the associated IB device have been
-+ * unregistered and SCST session registration involves a call to
-+ * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
-+ * this function has finished).
++ * srpt_close_ch() - Close an RDMA channel.
+ */
-+static void srpt_release_channel_by_cmid(struct ib_cm_id *cm_id)
++static void srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+ struct srpt_device *sdev;
-+ struct srpt_rdma_ch *ch;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
+
-+ sdev = cm_id->context;
-+ BUG_ON(!sdev);
++ sdev = ch->sport->sdev;
+ spin_lock_irq(&sdev->spinlock);
-+ list_for_each_entry(ch, &sdev->rch_list, list) {
-+ if (ch->cm_id == cm_id) {
-+ srpt_unregister_channel(ch);
-+ break;
-+ }
-+ }
++ __srpt_close_ch(ch);
+ spin_unlock_irq(&sdev->spinlock);
-+
-+ TRACE_EXIT();
+}
+
+/**
-+ * srpt_find_channel() - Look up an RDMA channel.
-+ * @cm_id: Pointer to the CM ID of the channel to be looked up.
++ * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
++ * @cm_id: Pointer to the CM ID of the channel to be drained.
+ *
-+ * Return NULL if no matching RDMA channel has been found.
++ * Note: Must be called from inside srpt_cm_handler to avoid a race between
++ * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
++ * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
++ * waits until all target sessions for the associated IB device have been
++ * unregistered and target session registration involves a call to
++ * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
++ * this function has finished).
+ */
-+static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
-+ struct ib_cm_id *cm_id)
++static void srpt_drain_channel(struct ib_cm_id *cm_id)
+{
+ struct srpt_rdma_ch *ch;
-+ bool found;
-+
-+ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
-+ BUG_ON(!sdev);
++ int ret;
+
-+ found = false;
-+ spin_lock_irq(&sdev->spinlock);
-+ list_for_each_entry(ch, &sdev->rch_list, list) {
-+ if (ch->cm_id == cm_id) {
-+ found = true;
-+ break;
-+ }
-+ }
-+ spin_unlock_irq(&sdev->spinlock);
++ WARN_ON_ONCE(irqs_disabled());
+
-+ return found ? ch : NULL;
++ ch = cm_id->context;
++ if (srpt_set_ch_state_to_draining(ch)) {
++ ret = srpt_ch_qp_err(ch);
++ if (ret < 0)
++ PRINT_ERROR("Setting queue pair in error state"
++ " failed: %d", ret);
++ } else
++ TRACE_DBG("Channel already in state %d",
++ atomic_read(&ch->state));
+}
+
+/**
-+ * srpt_release_channel() - Release all resources associated with an RDMA channel.
-+ *
-+ * Notes:
-+ * - The caller must have removed the channel from the channel list before
-+ * calling this function.
-+ * - Must be called as a callback function via scst_unregister_session(). Never
-+ * call this function directly because doing so would trigger several race
-+ * conditions.
-+ * - Do not access ch->sport or ch->sport->sdev in this function because the
-+ * memory that was allocated for the sport and/or sdev data structures may
-+ * already have been freed at the time this function is called.
++ * srpt_free_ch() - Release all resources associated with an RDMA channel.
+ */
-+static void srpt_release_channel(struct scst_session *scst_sess)
++static void srpt_free_ch(struct scst_session *sess)
+{
+ struct srpt_rdma_ch *ch;
++ struct srpt_device *sdev;
+
+ TRACE_ENTRY();
+
-+ ch = scst_sess_get_tgt_priv(scst_sess);
++ ch = scst_sess_get_tgt_priv(sess);
+ BUG_ON(!ch);
-+ WARN_ON(atomic_read(&ch->state) != RDMA_CHANNEL_DISCONNECTING);
++ BUG_ON(ch->scst_sess != sess);
++ sdev = ch->sport->sdev;
++ BUG_ON(!sdev);
+
-+ TRACE_DBG("destroying cm_id %p", ch->cm_id);
-+ BUG_ON(!ch->cm_id);
-+ ib_destroy_cm_id(ch->cm_id);
++ WARN_ON(atomic_read(&ch->state) != CH_RELEASING);
++
++ BUG_ON(!ch->thread);
++ BUG_ON(ch->thread == current);
++ kthread_stop(ch->thread);
++ ch->thread = NULL;
+
+ srpt_destroy_ch_ib(ch);
+
@@ -72159,6 +76939,16 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ ch->sport->sdev, ch->rq_size,
+ srp_max_rsp_size, DMA_TO_DEVICE);
+
++ spin_lock_irq(&sdev->spinlock);
++ list_del(&ch->list);
++ spin_unlock_irq(&sdev->spinlock);
++
++ TRACE_DBG("destroying cm_id %p", ch->cm_id);
++ BUG_ON(!ch->cm_id);
++ ib_destroy_cm_id(ch->cm_id);
++
++ wake_up(&sdev->ch_releaseQ);
++
+ kfree(ch);
+
+ TRACE_EXIT();
@@ -72285,7 +77075,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ && param->port == ch->sport->port
+ && param->listen_id == ch->sport->sdev->cm_id
+ && ch->cm_id) {
-+ enum rdma_ch_state prev_state;
++ if (!__srpt_close_ch(ch))
++ continue;
+
+ /* found an existing channel */
+ TRACE_DBG("Found existing channel name= %s"
@@ -72293,33 +77084,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ ch->sess_name, ch->cm_id,
+ atomic_read(&ch->state));
+
-+ prev_state = atomic_xchg(&ch->state,
-+ RDMA_CHANNEL_DISCONNECTING);
-+ if (prev_state == RDMA_CHANNEL_CONNECTING)
-+ srpt_unregister_channel(ch);
-+
-+ spin_unlock_irq(&sdev->spinlock);
-+
+ rsp->rsp_flags =
+ SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
-+
-+ if (prev_state == RDMA_CHANNEL_LIVE) {
-+ ib_send_cm_dreq(ch->cm_id, NULL, 0);
-+ PRINT_INFO("disconnected"
-+ " session %s because a new"
-+ " SRP_LOGIN_REQ has been received.",
-+ ch->sess_name);
-+ } else if (prev_state ==
-+ RDMA_CHANNEL_CONNECTING) {
-+ PRINT_ERROR("%s", "rejected"
-+ " SRP_LOGIN_REQ because another login"
-+ " request is being processed.");
-+ ib_send_cm_rej(ch->cm_id,
-+ IB_CM_REJ_NO_RESOURCES,
-+ NULL, 0, NULL, 0);
-+ }
-+
-+ spin_lock_irq(&sdev->spinlock);
+ }
+ }
+
@@ -72353,13 +77119,14 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ memcpy(ch->t_port_id, req->target_port_id, 16);
+ ch->sport = &sdev->port[param->port - 1];
+ ch->cm_id = cm_id;
++ cm_id->context = ch;
+ /*
+ * Avoid QUEUE_FULL conditions by limiting the number of buffers used
+ * for the SRP protocol to the SCST SCSI command queue size.
+ */
+ ch->rq_size = min(SRPT_RQ_SIZE, scst_get_max_lun_commands(NULL, 0));
+ atomic_set(&ch->processing_compl, 0);
-+ atomic_set(&ch->state, RDMA_CHANNEL_CONNECTING);
++ atomic_set(&ch->state, CH_CONNECTING);
+ INIT_LIST_HEAD(&ch->cmd_wait_list);
+
+ spin_lock_init(&ch->spinlock);
@@ -72470,8 +77237,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ goto out;
+
+release_channel:
-+ atomic_set(&ch->state, RDMA_CHANNEL_DISCONNECTING);
-+ scst_unregister_session(ch->scst_sess, 0, NULL);
++ atomic_set(&ch->state, CH_DISCONNECTING);
++ scst_unregister_session(ch->scst_sess, false, NULL);
+ ch->scst_sess = NULL;
+
+destroy_ib:
@@ -72505,7 +77272,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
+{
+ PRINT_INFO("Received InfiniBand REJ packet for cm_id %p.", cm_id);
-+ srpt_release_channel_by_cmid(cm_id);
++ srpt_drain_channel(cm_id);
+}
+
+/**
@@ -72519,13 +77286,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ struct srpt_rdma_ch *ch;
+ int ret;
+
-+ ch = srpt_find_channel(cm_id->context, cm_id);
-+ WARN_ON(!ch);
-+ if (!ch)
-+ goto out;
++ TRACE_ENTRY();
++
++ ch = cm_id->context;
++ BUG_ON(!ch);
+
-+ if (srpt_test_and_set_channel_state(ch, RDMA_CHANNEL_CONNECTING,
-+ RDMA_CHANNEL_LIVE) == RDMA_CHANNEL_CONNECTING) {
++ if (srpt_test_and_set_channel_state(ch, CH_CONNECTING, CH_LIVE)) {
+ struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
+
+ ret = srpt_ch_qp_rts(ch, ch->qp);
@@ -72536,30 +77302,23 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ srpt_handle_new_iu(ch, ioctx, NULL,
+ SCST_CONTEXT_THREAD);
+ }
-+ if (ret && srpt_test_and_set_channel_state(ch,
-+ RDMA_CHANNEL_LIVE,
-+ RDMA_CHANNEL_DISCONNECTING) == RDMA_CHANNEL_LIVE) {
-+ TRACE_DBG("cm_id=%p sess_name=%s state=%d",
-+ cm_id, ch->sess_name,
-+ atomic_read(&ch->state));
-+ ib_send_cm_dreq(ch->cm_id, NULL, 0);
-+ }
++ if (ret)
++ srpt_close_ch(ch);
+ }
+
-+out:
-+ ;
++ TRACE_EXIT();
+}
+
+static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
+{
+ PRINT_INFO("Received InfiniBand TimeWait exit for cm_id %p.", cm_id);
-+ srpt_release_channel_by_cmid(cm_id);
++ srpt_drain_channel(cm_id);
+}
+
+static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
+{
+ PRINT_INFO("Received InfiniBand REP error for cm_id %p.", cm_id);
-+ srpt_release_channel_by_cmid(cm_id);
++ srpt_drain_channel(cm_id);
+}
+
+/**
@@ -72569,29 +77328,21 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+{
+ struct srpt_rdma_ch *ch;
+
-+ ch = srpt_find_channel(cm_id->context, cm_id);
-+ if (!ch) {
-+ TRACE_DBG("Received DREQ for channel %p which is already"
-+ " being unregistered.", cm_id);
-+ goto out;
-+ }
-+
-+ TRACE_DBG("cm_id= %p ch->state= %d", cm_id, atomic_read(&ch->state));
++ ch = cm_id->context;
+
-+ switch (atomic_read(&ch->state)) {
-+ case RDMA_CHANNEL_LIVE:
-+ case RDMA_CHANNEL_CONNECTING:
-+ ib_send_cm_drep(ch->cm_id, NULL, 0);
-+ PRINT_INFO("Received DREQ and sent DREP for session %s.",
-+ ch->sess_name);
++ switch (srpt_set_ch_state_to_disc(ch)) {
++ case CH_CONNECTING:
++ case CH_LIVE:
++ if (ib_send_cm_drep(ch->cm_id, NULL, 0) >= 0)
++ PRINT_INFO("Received DREQ and sent DREP for session %s",
++ ch->sess_name);
++ else
++ PRINT_ERROR("%s", "Sending DREP failed");
+ break;
-+ case RDMA_CHANNEL_DISCONNECTING:
+ default:
++ __WARN();
+ break;
+ }
-+
-+out:
-+ ;
+}
+
+/**
@@ -72600,7 +77351,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
+{
+ PRINT_INFO("Received InfiniBand DREP message for cm_id %p.", cm_id);
-+ srpt_release_channel_by_cmid(cm_id);
++ srpt_drain_channel(cm_id);
+}
+
+/**
@@ -72611,7 +77362,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ * Note: srpt_cm_handler() must only return a non-zero value when transferring
+ * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
+ * a non-zero value in any other case will trigger a race with the
-+ * ib_destroy_cm_id() call in srpt_release_channel().
++ * ib_destroy_cm_id() call in srpt_free_ch().
+ */
+static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+{
@@ -72708,7 +77459,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ if (ioctx->rdma_ius && ioctx->n_rdma_ius)
+ nrdma = ioctx->n_rdma_ius;
+ else {
-+ nrdma = count / SRPT_DEF_SG_PER_WQE + ioctx->n_rbuf;
++ nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
++ + ioctx->n_rbuf;
+
+ ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu,
+ scst_cmd_atomic(scmnd)
@@ -72787,6 +77539,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ goto free_mem;
+ }
+
++ EXTRACHECKS_WARN_ON(riu - ioctx->rdma_ius != ioctx->n_rdma);
++ EXTRACHECKS_WARN_ON(ioctx->n_rdma > ioctx->n_rdma_ius);
++
+ db = ioctx->rbufs;
+ tsize = (dir == SCST_DATA_READ)
+ ? scst_cmd_get_adjusted_resp_data_len(scmnd)
@@ -72829,15 +77584,17 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ }
+
+ ++k;
-+ if (k == riu->sge_cnt && rsize > 0) {
++ if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
+ ++riu;
+ sge = riu->sge;
+ k = 0;
-+ } else if (rsize > 0)
++ } else if (rsize > 0 && tsize > 0)
+ ++sge;
+ }
+ }
+
++ EXTRACHECKS_WARN_ON(riu - ioctx->rdma_ius != ioctx->n_rdma);
++
+ return 0;
+
+free_mem:
@@ -72852,7 +77609,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
-+ struct scst_cmd *scmnd;
+ struct scatterlist *sg;
+ scst_data_direction dir;
+
@@ -72867,10 +77623,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ ioctx->rdma_ius = NULL;
+
+ if (ioctx->mapped_sg_count) {
-+ scmnd = ioctx->scmnd;
-+ EXTRACHECKS_BUG_ON(!scmnd);
-+ EXTRACHECKS_WARN_ON(ioctx->scmnd != scmnd);
-+ EXTRACHECKS_WARN_ON(ioctx != scst_cmd_get_tgt_priv(scmnd));
++ EXTRACHECKS_BUG_ON(!ioctx->scmnd);
++ EXTRACHECKS_WARN_ON(ioctx
++ != scst_cmd_get_tgt_priv(ioctx->scmnd));
+ sg = ioctx->sg;
+ EXTRACHECKS_WARN_ON(!sg);
+ dir = ioctx->dir;
@@ -72896,6 +77651,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ int i;
+ int ret;
+ int sq_wr_avail;
++ const int n_rdma = ioctx->n_rdma;
+
+ if (dir == SCST_DATA_WRITE) {
+ ret = -ENOMEM;
@@ -72903,23 +77659,28 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ &ch->sq_wr_avail);
+ if (sq_wr_avail < 0) {
+ PRINT_WARNING("IB send queue full (needed %d)",
-+ ioctx->n_rdma);
++ n_rdma);
+ goto out;
+ }
+ }
+
++ ioctx->rdma_aborted = false;
+ ret = 0;
+ riu = ioctx->rdma_ius;
+ memset(&wr, 0, sizeof wr);
+
-+ for (i = 0; i < ioctx->n_rdma; ++i, ++riu) {
++ for (i = 0; i < n_rdma; ++i, ++riu) {
+ if (dir == SCST_DATA_READ) {
+ wr.opcode = IB_WR_RDMA_WRITE;
-+ wr.wr_id = encode_wr_id(IB_WC_RDMA_WRITE,
++ wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
++ SRPT_RDMA_WRITE_LAST :
++ SRPT_RDMA_MID,
+ ioctx->ioctx.index);
+ } else {
+ wr.opcode = IB_WR_RDMA_READ;
-+ wr.wr_id = encode_wr_id(IB_WC_RDMA_READ,
++ wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
++ SRPT_RDMA_READ_LAST :
++ SRPT_RDMA_MID,
+ ioctx->ioctx.index);
+ }
+ wr.next = NULL;
@@ -72929,12 +77690,34 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ wr.sg_list = riu->sge;
+
+ /* only get completion event for the last rdma wr */
-+ if (i == (ioctx->n_rdma - 1) && dir == SCST_DATA_WRITE)
++ if (i == (n_rdma - 1) && dir == SCST_DATA_WRITE)
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(ch->qp, &wr, &bad_wr);
+ if (ret)
-+ goto out;
++ break;
++ }
++
++ if (ret)
++ PRINT_ERROR("%s[%d]: ib_post_send() returned %d for %d/%d",
++ __func__, __LINE__, ret, i, n_rdma);
++ if (ret && i > 0) {
++ wr.num_sge = 0;
++ wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
++ wr.send_flags = IB_SEND_SIGNALED;
++ while (atomic_read(&ch->state) == CH_LIVE &&
++ ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
++ PRINT_INFO("Trying to abort failed RDMA transfer [%d]",
++ ioctx->ioctx.index);
++ msleep(1000);
++ }
++ while (atomic_read(&ch->state) != CH_DISCONNECTING &&
++ !ioctx->rdma_aborted) {
++ PRINT_INFO("Waiting until RDMA abort finished [%d]",
++ ioctx->ioctx.index);
++ msleep(1000);
++ }
++ PRINT_INFO("%s[%d]: done", __func__, __LINE__);
+ }
+
+out:
@@ -72990,7 +77773,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ * srpt_pending_cmd_timeout() - SCST command HCA processing timeout callback.
+ *
+ * Called by the SCST core if no IB completion notification has been received
-+ * within max_hw_pending_time seconds.
++ * within RDMA_COMPL_TIMEOUT_S seconds.
+ */
+static void srpt_pending_cmd_timeout(struct scst_cmd *scmnd)
+{
@@ -73050,13 +77833,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ BUG_ON(!ch);
+
+ ch_state = atomic_read(&ch->state);
-+ if (ch_state == RDMA_CHANNEL_DISCONNECTING) {
++ if (ch_state == CH_DISCONNECTING) {
+ TRACE_DBG("cmd with tag %lld: channel disconnecting",
+ scst_cmd_get_tag(scmnd));
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
+ ret = SCST_TGT_RES_FATAL_ERROR;
+ goto out;
-+ } else if (ch_state == RDMA_CHANNEL_CONNECTING) {
++ } else if (ch_state == CH_CONNECTING) {
+ ret = SCST_TGT_RES_QUEUE_FULL;
+ goto out;
+ }
@@ -73113,14 +77896,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ if (dir == SCST_DATA_READ
+ && scst_cmd_get_adjusted_resp_data_len(scmnd)) {
+ ret = srpt_xfer_data(ch, ioctx, scmnd);
-+ if (ret == SCST_TGT_RES_QUEUE_FULL) {
++ if (unlikely(ret != SCST_TGT_RES_SUCCESS)) {
+ srpt_set_cmd_state(ioctx, state);
+ PRINT_WARNING("xfer_data failed for tag %llu"
-+ " - retrying", scst_cmd_get_tag(scmnd));
-+ goto out;
-+ } else if (ret != SCST_TGT_RES_SUCCESS) {
-+ PRINT_ERROR("xfer_data failed for tag %llu",
-+ scst_cmd_get_tag(scmnd));
++ " - %s", scst_cmd_get_tag(scmnd),
++ ret == SCST_TGT_RES_QUEUE_FULL ?
++ "retrying" : "failing");
+ goto out;
+ }
+ }
@@ -73206,8 +77987,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ *
+ * See also SPC-3, section 7.5.4.5, TransportID for initiator ports using SRP.
+ */
-+static int srpt_get_initiator_port_transport_id(struct scst_session *scst_sess,
-+ uint8_t **transport_id)
++static int srpt_get_initiator_port_transport_id(struct scst_tgt *tgt,
++ struct scst_session *scst_sess, uint8_t **transport_id)
+{
+ struct srpt_rdma_ch *ch;
+ struct spc_rdma_transport_id {
@@ -73282,6 +78063,50 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ return device_count;
+}
+
++static int srpt_ch_list_empty(struct srpt_device *sdev)
++{
++ int res;
++
++ spin_lock_irq(&sdev->spinlock);
++ res = list_empty(&sdev->rch_list);
++ spin_unlock_irq(&sdev->spinlock);
++
++ return res;
++}
++
++/**
++ * srpt_release_sdev() - Free channel resources associated with a target.
++ */
++static int srpt_release_sdev(struct srpt_device *sdev)
++{
++ struct srpt_rdma_ch *ch, *next_ch;
++
++ TRACE_ENTRY();
++
++ WARN_ON_ONCE(irqs_disabled());
++ BUG_ON(!sdev);
++
++ spin_lock_irq(&sdev->spinlock);
++ list_for_each_entry_safe(ch, next_ch, &sdev->rch_list, list)
++ __srpt_close_ch(ch);
++ spin_unlock_irq(&sdev->spinlock);
++
++ while (wait_event_timeout(sdev->ch_releaseQ,
++ srpt_ch_list_empty(sdev), 5 * HZ) <= 0) {
++ PRINT_INFO("%s: waiting for session unregistration ...",
++ sdev->device->name);
++ spin_lock_irq(&sdev->spinlock);
++ list_for_each_entry_safe(ch, next_ch, &sdev->rch_list, list)
++ PRINT_INFO("%s: %d commands in progress",
++ ch->sess_name,
++ atomic_read(&ch->scst_sess->sess_cmd_count));
++ spin_unlock_irq(&sdev->spinlock);
++ }
++
++ TRACE_EXIT();
++ return 0;
++}
++
+/**
+ * srpt_release() - Free the resources associated with an SCST target.
+ *
@@ -73290,7 +78115,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+static int srpt_release(struct scst_tgt *scst_tgt)
+{
+ struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
-+ struct srpt_rdma_ch *ch;
+
+ TRACE_ENTRY();
+
@@ -73300,12 +78124,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ if (WARN_ON(!sdev))
+ return -ENODEV;
+
-+ spin_lock_irq(&sdev->spinlock);
-+ while (!list_empty(&sdev->rch_list)) {
-+ ch = list_first_entry(&sdev->rch_list, typeof(*ch), list);
-+ srpt_unregister_channel(ch);
-+ }
-+ spin_unlock_irq(&sdev->spinlock);
++ srpt_release_sdev(sdev);
+
+ scst_tgt_set_tgt_priv(scst_tgt, NULL);
+
@@ -73324,6 +78143,49 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ return 0x0940; /* SRP */
+}
+
++static ssize_t show_login_info(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ struct scst_tgt *scst_tgt;
++ struct srpt_device *sdev;
++ struct srpt_port *sport;
++ int i;
++ int len;
++
++ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
++ sdev = scst_tgt_get_tgt_priv(scst_tgt);
++ len = 0;
++ for (i = 0; i < sdev->device->phys_port_cnt; i++) {
++ sport = &sdev->port[i];
++
++ len += sprintf(buf + len,
++ "tid_ext=%016llx,ioc_guid=%016llx,pkey=ffff,"
++ "dgid=%04x%04x%04x%04x%04x%04x%04x%04x,"
++ "service_id=%016llx\n",
++ srpt_service_guid,
++ srpt_service_guid,
++ be16_to_cpu(((__be16 *) sport->gid.raw)[0]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[1]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[2]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[3]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[4]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[5]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[6]),
++ be16_to_cpu(((__be16 *) sport->gid.raw)[7]),
++ srpt_service_guid);
++ }
++
++ return len;
++}
++
++static struct kobj_attribute srpt_show_login_info_attr =
++ __ATTR(login_info, S_IRUGO, show_login_info, NULL);
++
++static const struct attribute *srpt_tgt_attrs[] = {
++ &srpt_show_login_info_attr.attr,
++ NULL
++};
++
+static ssize_t show_req_lim(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
@@ -73365,9 +78227,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+static struct scst_tgt_template srpt_template = {
+ .name = DRV_NAME,
+ .sg_tablesize = SRPT_DEF_SG_TABLESIZE,
-+ .max_hw_pending_time = 60/*seconds*/,
++ .max_hw_pending_time = RDMA_COMPL_TIMEOUT_S,
+ .enable_target = srpt_enable_target,
+ .is_target_enabled = srpt_is_target_enabled,
++ .tgt_attrs = srpt_tgt_attrs,
+ .sess_attrs = srpt_sess_attrs,
+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
+ .default_trace_flags = DEFAULT_SRPT_TRACE_FLAGS,
@@ -73385,68 +78248,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+};
+
+/**
-+ * srpt_dev_release() - Device release callback function.
-+ *
-+ * The callback function srpt_dev_release() is called whenever a
-+ * device is removed from the /sys/class/infiniband_srpt device class.
-+ * Although this function has been left empty, a release function has been
-+ * defined such that upon module removal no complaint is logged about a
-+ * missing release function.
-+ */
-+static void srpt_dev_release(struct device *dev)
-+{
-+}
-+
-+static ssize_t show_login_info(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct srpt_device *sdev;
-+ struct srpt_port *sport;
-+ int i;
-+ int len;
-+
-+ sdev = container_of(dev, struct srpt_device, dev);
-+ len = 0;
-+ for (i = 0; i < sdev->device->phys_port_cnt; i++) {
-+ sport = &sdev->port[i];
-+
-+ len += sprintf(buf + len,
-+ "tid_ext=%016llx,ioc_guid=%016llx,pkey=ffff,"
-+ "dgid=%04x%04x%04x%04x%04x%04x%04x%04x,"
-+ "service_id=%016llx\n",
-+ srpt_service_guid,
-+ srpt_service_guid,
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[0]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[1]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[2]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[3]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[4]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[5]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[6]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[7]),
-+ srpt_service_guid);
-+ }
-+
-+ return len;
-+}
-+
-+static struct class_attribute srpt_class_attrs[] = {
-+ __ATTR_NULL,
-+};
-+
-+static struct device_attribute srpt_dev_attrs[] = {
-+ __ATTR(login_info, S_IRUGO, show_login_info, NULL),
-+ __ATTR_NULL,
-+};
-+
-+static struct class srpt_class = {
-+ .name = "infiniband_srpt",
-+ .dev_release = srpt_dev_release,
-+ .class_attrs = srpt_class_attrs,
-+ .dev_attrs = srpt_dev_attrs,
-+};
-+
-+/**
+ * srpt_add_one() - Infiniband device addition callback function.
+ */
+static void srpt_add_one(struct ib_device *device)
@@ -73454,6 +78255,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+ struct ib_srq_init_attr srq_attr;
++ char tgt_name[24];
+ int i;
+
+ TRACE_ENTRY();
@@ -73466,9 +78268,18 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+
+ sdev->device = device;
+ INIT_LIST_HEAD(&sdev->rch_list);
++ init_waitqueue_head(&sdev->ch_releaseQ);
+ spin_lock_init(&sdev->spinlock);
+
-+ sdev->scst_tgt = scst_register_target(&srpt_template, NULL);
++ if (use_node_guid_in_target_name) {
++ snprintf(tgt_name, sizeof(tgt_name), "%04x:%04x:%04x:%04x",
++ be16_to_cpu(((__be16 *)&device->node_guid)[0]),
++ be16_to_cpu(((__be16 *)&device->node_guid)[1]),
++ be16_to_cpu(((__be16 *)&device->node_guid)[2]),
++ be16_to_cpu(((__be16 *)&device->node_guid)[3]));
++ sdev->scst_tgt = scst_register_target(&srpt_template, tgt_name);
++ } else
++ sdev->scst_tgt = scst_register_target(&srpt_template, NULL);
+ if (!sdev->scst_tgt) {
+ PRINT_ERROR("SCST registration failed for %s.",
+ sdev->device->name);
@@ -73477,19 +78288,12 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+
+ scst_tgt_set_tgt_priv(sdev->scst_tgt, sdev);
+
-+ sdev->dev.class = &srpt_class;
-+ sdev->dev.parent = device->dma_device;
-+ dev_set_name(&sdev->dev, "srpt-%s", device->name);
-+
-+ if (device_register(&sdev->dev))
-+ goto unregister_tgt;
-+
+ if (ib_query_device(device, &sdev->dev_attr))
-+ goto err_dev;
++ goto unregister_tgt;
+
+ sdev->pd = ib_alloc_pd(device);
+ if (IS_ERR(sdev->pd))
-+ goto err_dev;
++ goto unregister_tgt;
+
+ sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(sdev->mr))
@@ -73582,8 +78386,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ ib_dereg_mr(sdev->mr);
+err_pd:
+ ib_dealloc_pd(sdev->pd);
-+err_dev:
-+ device_unregister(&sdev->dev);
+unregister_tgt:
+ scst_unregister_target(sdev->scst_tgt);
+free_dev:
@@ -73623,8 +78425,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ ib_dereg_mr(sdev->mr);
+ ib_dealloc_pd(sdev->pd);
+
-+ device_unregister(&sdev->dev);
-+
+ /*
+ * Unregistering an SCST target must happen after destroying sdev->cm_id
+ * such that no new SRP_LOGIN_REQ information units can arrive while
@@ -73686,11 +78486,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ goto out;
+ }
+
-+ ret = class_register(&srpt_class);
-+ if (ret) {
-+ PRINT_ERROR("%s", "couldn't register class ib_srpt");
-+ goto out;
-+ }
++ if (!use_node_guid_in_target_name)
++ PRINT_WARNING("%s", "Usage of HCA numbers as SCST target names "
++ "is deprecated and will be removed in one of the next "
++ "versions. It is strongly recommended to set "
++ "use_node_guid_in_target_name parameter in 1 and "
++ "update your SCST config file accordingly to use HCAs "
++ "GUIDs.");
+
+ switch (thread) {
+ case MODE_ALL_IN_SIRQ:
@@ -73723,21 +78525,19 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ if (ret < 0) {
+ PRINT_ERROR("%s", "couldn't register with scst");
+ ret = -ENODEV;
-+ goto out_unregister_class;
++ goto out;
+ }
+
+ ret = ib_register_client(&srpt_client);
+ if (ret) {
+ PRINT_ERROR("%s", "couldn't register IB client");
-+ goto out_unregister_procfs;
++ goto out_unregister_target;
+ }
+
+ return 0;
+
-+out_unregister_procfs:
++out_unregister_target:
+ scst_unregister_target_template(&srpt_template);
-+out_unregister_class:
-+ class_unregister(&srpt_class);
+out:
+ return ret;
+}
@@ -73748,7 +78548,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+
+ ib_unregister_client(&srpt_client);
+ scst_unregister_target_template(&srpt_template);
-+ class_unregister(&srpt_class);
+
+ TRACE_EXIT();
+}
@@ -73762,13 +78561,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.c linux-2.6.36/drivers/sc
+ * indent-tabs-mode: t
+ * End:
+ */
-diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.h linux-2.6.36/drivers/scst/srpt/ib_srpt.h
---- orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.h
-+++ linux-2.6.36/drivers/scst/srpt/ib_srpt.h
-@@ -0,0 +1,353 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/srpt/ib_srpt.h linux-2.6.39/drivers/scst/srpt/ib_srpt.h
+--- orig/linux-2.6.39/drivers/scst/srpt/ib_srpt.h
++++ linux-2.6.39/drivers/scst/srpt/ib_srpt.h
+@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
-+ * Copyright (C) 2009 - 2010 Bart Van Assche <bart.vanassche@gmail.com>
++ * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
@@ -73894,11 +78693,22 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.h linux-2.6.36/drivers/sc
+ DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
+
+ DEFAULT_MAX_RDMA_SIZE = 65536,
++
++ RDMA_COMPL_TIMEOUT_S = 80,
++};
++
++enum srpt_opcode {
++ SRPT_RECV,
++ SRPT_SEND,
++ SRPT_RDMA_MID,
++ SRPT_RDMA_ABORT,
++ SRPT_RDMA_READ_LAST,
++ SRPT_RDMA_WRITE_LAST,
+};
+
-+static inline u64 encode_wr_id(u8 opcode, u32 idx)
++static inline u64 encode_wr_id(enum srpt_opcode opcode, u32 idx)
+{ return ((u64)opcode << 32) | idx; }
-+static inline u8 opcode_from_wr_id(u64 wr_id)
++static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
+{ return wr_id >> 32; }
+static inline u32 idx_from_wr_id(u64 wr_id)
+{ return (u32)wr_id; }
@@ -73978,6 +78788,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.h linux-2.6.36/drivers/sc
+ struct scst_cmd *scmnd;
+ scst_data_direction dir;
+ atomic_t state;
++ bool rdma_aborted;
+};
+
+/**
@@ -73992,16 +78803,24 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.h linux-2.6.36/drivers/sc
+
+/**
+ * enum rdma_ch_state - SRP channel state.
++ * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
++ * @CH_LIVE: QP is in RTS state.
++ * @CH_DISCONNECTING: DREQ has been received and waiting for DREP or DREQ has
++ * been sent and waiting for DREP or channel is being closed
++ * for another reason.
++ * @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
++ * @CH_RELEASING: Last WQE event has been received; releasing resources.
+ */
+enum rdma_ch_state {
-+ RDMA_CHANNEL_CONNECTING,
-+ RDMA_CHANNEL_LIVE,
-+ RDMA_CHANNEL_DISCONNECTING
++ CH_CONNECTING,
++ CH_LIVE,
++ CH_DISCONNECTING,
++ CH_DRAINING,
++ CH_RELEASING
+};
+
+/**
+ * struct srpt_rdma_ch - RDMA channel.
-+ * @wait_queue: Allows the kernel thread to wait for more work.
+ * @thread: Kernel thread that processes the IB queues associated with
+ * the channel.
+ * @cm_id: IB CM ID associated with the channel.
@@ -74029,7 +78848,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.h linux-2.6.36/drivers/sc
+ * @sess_name: SCST session name.
+ */
+struct srpt_rdma_ch {
-+ wait_queue_head_t wait_queue;
+ struct task_struct *thread;
+ struct ib_cm_id *cm_id;
+ struct ib_qp *qp;
@@ -74103,10 +78921,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.h linux-2.6.36/drivers/sc
+ int srq_size;
+ struct srpt_recv_ioctx **ioctx_ring;
+ struct list_head rch_list;
++ wait_queue_head_t ch_releaseQ;
+ spinlock_t spinlock;
+ struct srpt_port port[2];
+ struct ib_event_handler event_handler;
-+ struct device dev;
+ struct scst_tgt *scst_tgt;
+ bool enabled;
+};
@@ -74119,122 +78937,297 @@ diff -uprN orig/linux-2.6.36/drivers/scst/srpt/ib_srpt.h linux-2.6.36/drivers/sc
+ * indent-tabs-mode: t
+ * End:
+ */
-diff -uprN orig/linux-2.6.36/Documentation/scst/README.srpt linux-2.6.36/Documentation/scst/README.srpt
---- orig/linux-2.6.36/Documentation/scst/README.srpt
-+++ linux-2.6.36/Documentation/scst/README.srpt
-@@ -0,0 +1,109 @@
-+SCSI RDMA Protocol (SRP) Target driver for Linux
-+=================================================
+diff -uprN orig/linux-2.6.39/Documentation/scst/README.scst_local linux-2.6.39/Documentation/scst/README.scst_local
+--- orig/linux-2.6.39/Documentation/scst/README.scst_local
++++ linux-2.6.39/Documentation/scst/README.scst_local
+@@ -0,0 +1,284 @@
++SCST Local ...
++Richard Sharpe, 30-Nov-2008
+
-+The SRP Target driver is designed to work directly on top of the
-+OpenFabrics OFED-1.x software stack (http://www.openfabrics.org) or
-+the Infiniband drivers in the Linux kernel tree
-+(http://www.kernel.org). The SRP target driver also interfaces with
-+the generic SCSI target mid-level driver called SCST
-+(http://scst.sourceforge.net).
++This is the SCST Local driver. Its function is to allow you to access devices
++that are exported via SCST directly on the same Linux system that they are
++exported from.
+
-+How-to run
-+-----------
++No assumptions are made in the code about the device types on the target, so
++any device handlers that you load in SCST should be visible, including tapes
++and so forth.
+
-+A. On srp target machine
-+1. Please refer to SCST's README for loading scst driver and its
-+dev_handlers drivers (scst_disk, scst_vdisk block or file IO mode, nullio, ...)
++You can freely use any sg, sd, st, etc. devices imported from target,
++except the following: you can't mount file systems or put swap on them
++for all dev handlers, except BLOCKIO and pass-through, because it can
++lead to recursive memory allocation deadlock. This is a limitation of
++Linux memory/cache manager. See SCST README file for details. For
++BLOCKIO and pass-through dev handlers there's no such limitation, so you
++can freely mount file systems over them.
+
-+Example 1: working with real back-end scsi disks
-+a. modprobe scst
-+b. modprobe scst_disk
-+c. cat /proc/scsi_tgt/scsi_tgt
++To build, simply issue 'make' in the scst_local directory.
+
-+ibstor00:~ # cat /proc/scsi_tgt/scsi_tgt
-+Device (host:ch:id:lun or name) Device handler
-+0:0:0:0 dev_disk
-+4:0:0:0 dev_disk
-+5:0:0:0 dev_disk
-+6:0:0:0 dev_disk
-+7:0:0:0 dev_disk
++Try 'modinfo scst_local' for a listing of module parameters so far.
+
-+Now you want to exclude the first scsi disk and expose the last 4 scsi disks as
-+IB/SRP luns for I/O
-+echo "add 4:0:0:0 0" >/proc/scsi_tgt/groups/Default/devices
-+echo "add 5:0:0:0 1" >/proc/scsi_tgt/groups/Default/devices
-+echo "add 6:0:0:0 2" >/proc/scsi_tgt/groups/Default/devices
-+echo "add 7:0:0:0 3" >/proc/scsi_tgt/groups/Default/devices
++Here is how I have used it so far:
+
-+Example 2: working with VDISK FILEIO mode (using md0 device and file 10G-file)
-+a. modprobe scst
-+b. modprobe scst_vdisk
-+c. echo "open vdisk0 /dev/md0" > /proc/scsi_tgt/vdisk/vdisk
-+d. echo "open vdisk1 /10G-file" > /proc/scsi_tgt/vdisk/vdisk
-+e. echo "add vdisk0 0" >/proc/scsi_tgt/groups/Default/devices
-+f. echo "add vdisk1 1" >/proc/scsi_tgt/groups/Default/devices
++1. Load up scst:
+
-+Example 3: working with VDISK BLOCKIO mode (using md0 device, sda, and cciss/c1d0)
-+a. modprobe scst
-+b. modprobe scst_vdisk
-+c. echo "open vdisk0 /dev/md0 BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
-+d. echo "open vdisk1 /dev/sda BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
-+e. echo "open vdisk2 /dev/cciss/c1d0 BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
-+f. echo "add vdisk0 0" >/proc/scsi_tgt/groups/Default/devices
-+g. echo "add vdisk1 1" >/proc/scsi_tgt/groups/Default/devices
-+h. echo "add vdisk2 2" >/proc/scsi_tgt/groups/Default/devices
++ modprobe scst
++ modprobe scst_vdisk
+
-+2. modprobe ib_srpt
++2. Create a virtual disk (or your own device handler):
+
-+B. On initiator machines you can manualy do the following steps:
-+1. modprobe ib_srp
-+2. ibsrpdm -c (to discover new SRP target)
-+3. echo <new target info> > /sys/class/infiniband_srp/srp-mthca0-1/add_target
-+4. fdisk -l (will show new discovered scsi disks)
++ dd if=/dev/zero of=/some/path/vdisk1.img bs=16384 count=1000000
++ echo "add_device vm_disk1 filename=/some/path/vdisk1.img" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
+
-+Example:
-+Assume that you use port 1 of first HCA in the system ie. mthca0
++3. Load the scst_local driver:
+
-+[root@lab104 ~]# ibsrpdm -c -d /dev/infiniband/umad0
-+id_ext=0002c90200226cf4,ioc_guid=0002c90200226cf4,
-+dgid=fe800000000000000002c90200226cf5,pkey=ffff,service_id=0002c90200226cf4
-+[root@lab104 ~]# echo id_ext=0002c90200226cf4,ioc_guid=0002c90200226cf4,
-+dgid=fe800000000000000002c90200226cf5,pkey=ffff,service_id=0002c90200226cf4 >
-+/sys/class/infiniband_srp/srp-mthca0-1/add_target
++ insmod scst_local
++ echo "add vm_disk1 0" >/sys/kernel/scst_tgt/targets/scst_local/scst_local_tgt/luns/mgmt
+
-+OR
++4. Check what you have
+
-++ You can edit /etc/infiniband/openib.conf to load srp driver and srp HA daemon
-+automatically ie. set SRP_LOAD=yes, and SRPHA_ENABLE=yes
-++ To set up and use high availability feature you need dm-multipath driver
-+and multipath tool
-++ Please refer to OFED-1.x SRP's user manual for more in-details instructions
-+on how-to enable/use HA feature
++ cat /proc/scsi/scsi
++ Attached devices:
++ Host: scsi0 Channel: 00 Id: 00 Lun: 00
++ Vendor: ATA Model: ST9320320AS Rev: 0303
++ Type: Direct-Access ANSI SCSI revision: 05
++ Host: scsi4 Channel: 00 Id: 00 Lun: 00
++ Vendor: TSSTcorp Model: CD/DVDW TS-L632D Rev: TO04
++ Type: CD-ROM ANSI SCSI revision: 05
++ Host: scsi7 Channel: 00 Id: 00 Lun: 00
++ Vendor: SCST_FIO Model: vm_disk1 Rev: 200
++ Type: Direct-Access ANSI SCSI revision: 04
+
-+To minimize QUEUE_FULL conditions, you can apply scst_increase_max_tgt_cmds
-+patch from SRPT package from http://sourceforge.net/project/showfiles.php?group_id=110471
++Or instead of manually "add_device" in (2) and step (3) write a
++scstadmin config:
+
-+Performance notes
-+-----------------
++HANDLER vdisk_fileio {
++ DEVICE vm_disk1 {
++ filename /some/path/vdisk1.img
++ }
++}
+
-+In some cases, for instance working with SSD devices, which consume 100%
-+of a single CPU load for data transfers in their internal threads, to
-+maximize IOPS it can be needed to assign for those threads dedicated
-+CPUs using Linux CPU affinity facilities. No IRQ processing should be
-+done on those CPUs. Check that using /proc/interrupts. See taskset
-+command and Documentation/IRQ-affinity.txt in your kernel's source tree
-+for how to assign CPU affinity to tasks and IRQs.
++TARGET_DRIVER scst_local {
++ TARGET scst_local_tgt {
++ LUN 0 vm_disk1
++ }
++}
+
-+The reason for that is that processing of coming commands in SIRQ context
-+can be done on the same CPUs as SSD devices' threads doing data
-+transfers. As the result, those threads won't receive all the CPU power
-+and perform worse.
++then:
+
-+Alternatively to CPU affinity assignment, you can try to enable SRP
-+target's internal thread. It will allows Linux CPU scheduler to better
-+distribute load among available CPUs. To enable SRP target driver's
-+internal thread you should load ib_srpt module with parameter
-+"thread=1".
++ insmod scst_local
++ scstadmin -config conf_file.cfg
+
-+Send questions about this driver to scst-devel@lists.sourceforge.net, CC:
-+Vu Pham <vuhuong@mellanox.com> and Bart Van Assche <bart.vanassche@gmail.com>.
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/Kconfig linux-2.6.36/drivers/scst/scst_local/Kconfig
---- orig/linux-2.6.36/drivers/scst/scst_local/Kconfig
-+++ linux-2.6.36/drivers/scst/scst_local/Kconfig
++More advanced examples:
++
++For (3) you can:
++
++ insmod scst_local add_default_tgt=0
++ echo "add_target scst_local_tgt session_name=scst_local_host" >/sys/kernel/scst_tgt/targets/scst_local//mgmt
++ echo "add vm_disk1 0" >/sys/kernel/scst_tgt/targets/scst_local/scst_local_tgt/luns/mgmt
++
++Scst_local module's parameter add_default_tgt disables creation of
++default target "scst_local_tgt" and session "scst_local_host", so you
++needed to create it manually.
++
++There can be any number of targets and sessions created. Each SCST
++session corresponds to SCSI host. You can change which LUNs assigned to
++each session by using SCST access control. This mode is intended for
++user space target drivers (see below).
++
++Alternatively, you can write an scstadmin's config file conf_file.cfg:
++
++HANDLER vdisk_fileio {
++ DEVICE vm_disk1 {
++ filename /some/path/vdisk1.img
++ }
++}
++
++TARGET_DRIVER scst_local {
++ TARGET scst_local_tgt {
++ session_name scst_local_host
++
++ LUN 0 vm_disk1
++ }
++}
++
++then:
++
++ insmod scst_local add_default_tgt=0
++ scstadmin -config conf_file.cfg
++
++NOTE! Although scstadmin allows to create scst_local's sessions using
++"session_name" expression, it doesn't save existing sessions during
++writing config file by "write_config" command. If you need this
++functionality, feel free to send a request for it in SCST development
++mailing list.
++
++5. Have fun.
++
++Some of this was coded while in Santa Clara, some in Bangalore, and some in
++Hyderabad. Noe doubt some will be coded on the way back to Santa Clara.
++
++The code still has bugs, so if you encounter any, email me the fixes at:
++
++ realrichardsharpe@gmail.com
++
++I am thinking of renaming this to something more interesting.
++
++
++Sysfs interface
++===============
++
++See SCST's README for a common SCST sysfs description.
++
++Root of this driver is /sys/kernel/scst_tgt/targets/scst_local. It has
++the following additional entry:
++
++ - stats - read-only attribute with some statistical information.
++
++Each target subdirectory contains the following additional entries:
++
++ - phys_transport_version - contains and allows to change physical
++ transport version descriptor. It determines by which physical
++ interface this target will look like. See SPC for more details. By
++ default, it is not defined (0).
++
++ - scsi_transport_version - contains and allows to change SCSI
++ transport version descriptor. It determines by which SCSI
++ transport this target will look like. See SPC for more details. By
++ default, it is SAS.
++
++Each session subdirectory contains the following additional entries:
++
++ - transport_id - contains this host's TransportID. This TransportID
++ used to identify initiator in Persisten Reservation commands. If you
++ change scsi_transport_version for a target, make sure you set for all
++ its sessions correct TransportID. See SPC for more details.
++
++ - host - links to the corresponding SCSI host. Using it you can find
++ local sg/bsg/sd/etc. devices of this session. For instance, this
++ links points out to host12, so you can find your sg devices by:
++
++$ lsscsi -g|grep "\[12:"
++[12:0:0:0] disk SCST_FIO rd1 200 /dev/sdc /dev/sg2
++[12:0:0:1] disk SCST_FIO nullio 200 /dev/sdd /dev/sg3
++
++They are /dev/sg2 and /dev/sg3.
++
++The following management commands available via /sys/kernel/scst_tgt/targets/scst_local/mgmt:
++
++ - add_target target_name [session_name=sess_name; [session_name=sess_name1;] [...]] -
++ creates a target with optionally one or more sessions.
++
++ - del_target target_name - deletes a target.
++
++ - add_session target_name session_name - adds to target target_name
++ session (SCSI host) with name session_name.
++
++ - del_session target_name session_name - deletes session session_name
++ from target target_name.
++
++
++Note on performance
++===================
++
++Although this driver implemented in the most performance effective way,
++including zero-copy passing data between SCSI/block subsystems and SCST,
++in many cases it is NOT suited to measure performance as a NULL link.
++For example, it is not suited for max IOPS measurements. This is because
++for such cases not performance of the link between the target and
++initiator is the bottleneck, but CPU or memory speed on the target or
++initiator. For scst_local you have both initiator and target on the same
++system, which means each your initiator and target are much less
++CPU/memory powerful.
++
++
++User space target drivers
++=========================
++
++Scst_local can be used to write full featured SCST target drivers in
++user space:
++
++1. For each SCSI target a user space target driver should create an
++ scst_local's target using "add_target" command.
++
++2. Then the user space target driver should, if needed, set its SCSI and
++ physical transport version descriptors using attributes
++ scsi_transport_version and phys_transport_version correspondingly in
++ /sys/kernel/scst_tgt/targets/scst_local/target_name directory.
++
++3. For incoming session (I_T nexus) from an initiator the user space
++ target driver should create scst_local's session using "add_session"
++ command.
++
++4. Then, if needed, the user space target driver should set TransportID
++ for this session (I_T nexus) using attribute
++ /sys/kernel/scst_tgt/targets/scst_local/target_name/sessions/session_name/transport_id
++
++5. Then the user space target driver should find out sg/bsg devices for
++ the LUNs the created session has using link
++ /sys/kernel/scst_tgt/targets/scst_local/target_name/sessions/session_name/host
++ as described above.
++
++6. Then the user space target driver can start serving the initiator using
++ found sg/bsg devices.
++
++For other connected initiators steps 3-6 should be repeated.
++
++
++Compilation options
++===================
++
++There are the following compilation options, that could be commented
++in/out in Makefile:
++
++ - CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING - by default, when this option
++ is not defined, scst_local reschedules all commands for processing in
++ one of the SCST threads. If this option is defined, scst_local tries
++ to not do it, if possible (sometimes queuecommand() called under
++ various locks held), but instead process them in the submitter's
++ context. This is to increase performance, but as on 2.6.37 and below
++ Linux block layer doesn't work with such kind of reentrance, hence
++ this option disabled by default. Note! At the moment in
++ scst_estimate_context*() returning DIRECT contexts disabled, so this
++ option doesn't have any real effect.
++
++
++Change log
++==========
++
++V0.1 24-Sep-2008 (Hyderabad) Initial coding, pretty chatty and messy,
++ but worked.
++
++V0.2 25-Sep-2008 (Hong Kong) Cleaned up the code a lot, reduced the log
++ chatter, fixed a bug where multiple LUNs did not
++ work. Also, added logging control. Tested with
++ five virtual disks. They all came up as /dev/sdb
++ through /dev/sdf and I could dd to them. Also
++ fixed a bug preventing multiple adapters.
++
++V0.3 26-Sep-2008 (Santa Clara) Added back a copyright plus cleaned up some
++ unused functions and structures.
++
++V0.4 5-Oct-2008 (Santa Clara) Changed name to scst_local as suggested, cleaned
++ up some unused variables (made them used) and
++ change allocation to a kmem_cache pool.
++
++V0.5 5-Oct-2008 (Santa Clara) Added mgmt commands to handle dev reset and
++ aborts. Not sure if aborts works. Also corrected
++ the version info and renamed readme to README.
++
++V0.6 7-Oct-2008 (Santa Clara) Removed some redundant code and made some
++ changes suggested by Vladislav.
++
++V0.7 11-Oct-2008 (Santa Clara) Moved into the scst tree. Cleaned up some
++ unused functions, used TRACE macros etc.
++
++V0.9 30-Nov-2008 (Mtn View) Cleaned up an additional problem with symbols not
++ being defined in older version of the kernel. Also
++ fixed some English and cleaned up this doc.
++
++V1.0 10-Sep-2010 (Moscow) Sysfs management added. Reviewed and cleaned up.
++
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_local/Kconfig linux-2.6.39/drivers/scst/scst_local/Kconfig
+--- orig/linux-2.6.39/drivers/scst/scst_local/Kconfig
++++ linux-2.6.39/drivers/scst/scst_local/Kconfig
@@ -0,0 +1,22 @@
+config SCST_LOCAL
+ tristate "SCST Local driver"
@@ -74258,20 +79251,20 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/Kconfig linux-2.6.36/driver
+ unsafe.
+
+ If unsure, say "N".
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/Makefile linux-2.6.36/drivers/scst/scst_local/Makefile
---- orig/linux-2.6.36/drivers/scst/scst_local/Makefile
-+++ linux-2.6.36/drivers/scst/scst_local/Makefile
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_local/Makefile linux-2.6.39/drivers/scst/scst_local/Makefile
+--- orig/linux-2.6.39/drivers/scst/scst_local/Makefile
++++ linux-2.6.39/drivers/scst/scst_local/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SCST_LOCAL) += scst_local.o
+
-diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/drivers/scst/scst_local/scst_local.c
---- orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c
-+++ linux-2.6.36/drivers/scst/scst_local/scst_local.c
-@@ -0,0 +1,1563 @@
+diff -uprN orig/linux-2.6.39/drivers/scst/scst_local/scst_local.c linux-2.6.39/drivers/scst/scst_local/scst_local.c
+--- orig/linux-2.6.39/drivers/scst/scst_local/scst_local.c
++++ linux-2.6.39/drivers/scst/scst_local/scst_local.c
+@@ -0,0 +1,1589 @@
+/*
+ * Copyright (C) 2008 - 2010 Richard Sharpe
+ * Copyright (C) 1992 Eric Youngdale
-+ * Copyright (C) 2008 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
++ * Copyright (C) 2008 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
+ *
+ * Simulate a host adapter and an SCST target adapter back to back
+ *
@@ -74320,9 +79313,6 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+static unsigned long scst_local_trace_flag = SCST_LOCAL_DEFAULT_LOG_FLAGS;
+#endif
+
-+#define TRUE 1
-+#define FALSE 0
-+
+#define SCST_LOCAL_VERSION "1.0.0"
+static const char *scst_local_version_date = "20100910";
+
@@ -74377,10 +79367,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ container_of(d, struct scst_local_sess, dev)
+
+static int __scst_local_add_adapter(struct scst_local_tgt *tgt,
-+ const char *initiator_name, struct scst_local_sess **out_sess,
-+ bool locked);
++ const char *initiator_name, bool locked);
+static int scst_local_add_adapter(struct scst_local_tgt *tgt,
-+ const char *initiator_name, struct scst_local_sess **out_sess);
++ const char *initiator_name);
+static void scst_local_remove_adapter(struct scst_local_sess *sess);
+static int scst_local_add_target(const char *target_name,
+ struct scst_local_tgt **out_tgt);
@@ -74447,7 +79436,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+}
+
+static int scst_local_get_initiator_port_transport_id(
-+ struct scst_session *scst_sess, uint8_t **transport_id)
++ struct scst_tgt *tgt, struct scst_session *scst_sess,
++ uint8_t **transport_id)
+{
+ int res = 0;
+ int tr_id_size = 0;
@@ -74547,13 +79537,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+{
+ struct scst_tgt *scst_tgt;
+ struct scst_local_tgt *tgt;
-+ ssize_t res;
++ ssize_t res = -ENOENT;
+
+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ return -ENOENT;
++ goto out;
+
+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ tgt = scst_tgt_get_tgt_priv(scst_tgt);
++ if (!tgt)
++ goto out_up;
+
+ if (tgt->scsi_transport_version != 0)
+ res = sprintf(buf, "0x%x\n%s", tgt->scsi_transport_version,
@@ -74561,23 +79553,27 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ else
+ res = sprintf(buf, "0x%x\n", 0x0BE0); /* SAS */
+
++out_up:
+ up_read(&scst_local_exit_rwsem);
++out:
+ return res;
+}
+
+static ssize_t scst_local_scsi_transport_version_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buffer, size_t size)
+{
-+ ssize_t res;
++ ssize_t res = -ENOENT;
+ struct scst_tgt *scst_tgt;
+ struct scst_local_tgt *tgt;
+ unsigned long val;
+
+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ return -ENOENT;
++ goto out;
+
+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ tgt = scst_tgt_get_tgt_priv(scst_tgt);
++ if (!tgt)
++ goto out_up;
+
+ res = strict_strtoul(buffer, 0, &val);
+ if (res != 0) {
@@ -74591,6 +79587,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+
+out_up:
+ up_read(&scst_local_exit_rwsem);
++out:
+ return res;
+}
+
@@ -74604,35 +79601,41 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+{
+ struct scst_tgt *scst_tgt;
+ struct scst_local_tgt *tgt;
-+ ssize_t res;
++ ssize_t res = -ENOENT;
+
+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ return -ENOENT;
++ goto out;
+
+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ tgt = scst_tgt_get_tgt_priv(scst_tgt);
++ if (!tgt)
++ goto out_up;
+
+ res = sprintf(buf, "0x%x\n%s", tgt->phys_transport_version,
+ (tgt->phys_transport_version != 0) ?
+ SCST_SYSFS_KEY_MARK "\n" : "");
+
++out_up:
+ up_read(&scst_local_exit_rwsem);
++out:
+ return res;
+}
+
+static ssize_t scst_local_phys_transport_version_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buffer, size_t size)
+{
-+ ssize_t res;
++ ssize_t res = -ENOENT;
+ struct scst_tgt *scst_tgt;
+ struct scst_local_tgt *tgt;
+ unsigned long val;
+
+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ return -ENOENT;
++ goto out;
+
+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
++ tgt = scst_tgt_get_tgt_priv(scst_tgt);
++ if (!tgt)
++ goto out_up;
+
+ res = strict_strtoul(buffer, 0, &val);
+ if (res != 0) {
@@ -74646,6 +79649,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+
+out_up:
+ up_read(&scst_local_exit_rwsem);
++out:
+ return res;
+}
+
@@ -74795,7 +79799,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ goto out_remove;
+ }
+
-+ res = scst_local_add_adapter(tgt, p, NULL);
++ res = scst_local_add_adapter(tgt, p);
+ if (res != 0)
+ goto out_remove;
+ }
@@ -74890,7 +79894,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ }
+
+ if (strcasecmp("add_session", command) == 0) {
-+ res = __scst_local_add_adapter(tgt, session_name, NULL, true);
++ res = __scst_local_add_adapter(tgt, session_name, true);
+ } else if (strcasecmp("del_session", command) == 0) {
+ struct scst_local_sess *s, *sess = NULL;
+ list_for_each_entry(s, &tgt->sessions_list,
@@ -74932,7 +79936,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
+
+ ret = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, SCpnt->tag,
-+ FALSE, &dev_reset_completion);
++ false, &dev_reset_completion);
+
+ /* Now wait for the completion ... */
+ wait_for_completion_interruptible(&dev_reset_completion);
@@ -74949,7 +79953,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+static int scst_local_device_reset(struct scsi_cmnd *SCpnt)
+{
+ struct scst_local_sess *sess;
-+ uint16_t lun;
++ __be16 lun;
+ int ret;
+ DECLARE_COMPLETION_ONSTACK(dev_reset_completion);
+
@@ -74957,11 +79961,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+
+ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
+
-+ lun = SCpnt->device->lun;
-+ lun = cpu_to_be16(lun);
++ lun = cpu_to_be16(SCpnt->device->lun);
+
+ ret = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_LUN_RESET,
-+ (const uint8_t *)&lun, sizeof(lun), FALSE,
++ (const uint8_t *)&lun, sizeof(lun), false,
+ &dev_reset_completion);
+
+ /* Now wait for the completion ... */
@@ -74979,7 +79982,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+static int scst_local_target_reset(struct scsi_cmnd *SCpnt)
+{
+ struct scst_local_sess *sess;
-+ uint16_t lun;
++ __be16 lun;
+ int ret;
+ DECLARE_COMPLETION_ONSTACK(dev_reset_completion);
+
@@ -74987,11 +79990,10 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+
+ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
+
-+ lun = SCpnt->device->lun;
-+ lun = cpu_to_be16(lun);
++ lun = cpu_to_be16(SCpnt->device->lun);
+
+ ret = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_TARGET_RESET,
-+ (const uint8_t *)&lun, sizeof(lun), FALSE,
++ (const uint8_t *)&lun, sizeof(lun), false,
+ &dev_reset_completion);
+
+ /* Now wait for the completion ... */
@@ -75057,15 +80059,20 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ * This does the heavy lifting ... we pass all the commands on to the
+ * target driver and have it do its magic ...
+ */
-+static int scst_local_queuecommand(struct scsi_cmnd *SCpnt,
-+ void (*done)(struct scsi_cmnd *))
++#ifdef CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING
++static int scst_local_queuecommand(struct Scsi_Host *host,
++ struct scsi_cmnd *SCpnt)
++#else
++static int scst_local_queuecommand_lck(struct scsi_cmnd *SCpnt,
++ void (*done)(struct scsi_cmnd *))
+ __acquires(&h->host_lock)
+ __releases(&h->host_lock)
++#endif
+{
+ struct scst_local_sess *sess;
+ struct scatterlist *sgl = NULL;
+ int sgl_count = 0;
-+ uint16_t lun;
++ __be16 lun;
+ struct scst_cmd *scst_cmd = NULL;
+ scst_data_direction dir;
+
@@ -75078,22 +80085,19 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ scsi_set_resid(SCpnt, 0);
+
+ /*
-+ * We save a pointer to the done routine in SCpnt->scsi_done and
-+ * we save that as tgt specific stuff below.
-+ */
-+ SCpnt->scsi_done = done;
-+
-+ /*
+ * Tell the target that we have a command ... but first we need
+ * to get the LUN into a format that SCST understand
++ *
++ * NOTE! We need to call it with atomic parameter true to not
++ * get into mem alloc deadlock when mounting file systems over
++ * our devices.
+ */
-+ lun = SCpnt->device->lun;
-+ lun = cpu_to_be16(lun);
++ lun = cpu_to_be16(SCpnt->device->lun);
+ scst_cmd = scst_rx_cmd(sess->scst_sess, (const uint8_t *)&lun,
-+ sizeof(lun), SCpnt->cmnd, SCpnt->cmd_len, TRUE);
++ sizeof(lun), SCpnt->cmnd, SCpnt->cmd_len, true);
+ if (!scst_cmd) {
+ PRINT_ERROR("%s", "scst_rx_cmd() failed");
-+ return -ENOMEM;
++ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ scst_cmd_set_tag(scst_cmd, SCpnt->tag);
@@ -75121,11 +80125,13 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ case DMA_TO_DEVICE:
+ dir = SCST_DATA_WRITE;
+ scst_cmd_set_expected(scst_cmd, dir, scsi_bufflen(SCpnt));
++ scst_cmd_set_noio_mem_alloc(scst_cmd);
+ scst_cmd_set_tgt_sg(scst_cmd, sgl, sgl_count);
+ break;
+ case DMA_FROM_DEVICE:
+ dir = SCST_DATA_READ;
+ scst_cmd_set_expected(scst_cmd, dir, scsi_bufflen(SCpnt));
++ scst_cmd_set_noio_mem_alloc(scst_cmd);
+ scst_cmd_set_tgt_sg(scst_cmd, sgl, sgl_count);
+ break;
+ case DMA_BIDIRECTIONAL:
@@ -75134,6 +80140,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ scst_cmd_set_expected(scst_cmd, dir, scsi_bufflen(SCpnt));
+ scst_cmd_set_expected_out_transfer_len(scst_cmd,
+ scsi_in(SCpnt)->length);
++ scst_cmd_set_noio_mem_alloc(scst_cmd);
+ scst_cmd_set_tgt_sg(scst_cmd, scsi_in(SCpnt)->table.sgl,
+ scsi_in(SCpnt)->table.nents);
+ scst_cmd_set_tgt_out_sg(scst_cmd, sgl, sgl_count);
@@ -75148,16 +80155,20 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ /* Save the correct thing below depending on version */
+ scst_cmd_set_tgt_priv(scst_cmd, SCpnt);
+
++/*
++ * Although starting from 2.6.37 queuecommand() called with no host_lock
++ * held, in fact without DEF_SCSI_QCMD() it doesn't work and leading
++ * to various problems like hangs under highload. Most likely, it is caused
++ * by some not reenrable block layer function(s). So, until that changed, we
++ * have to go ahead with extra context switch. In this regard doesn't matter
++ * much if we under host_lock or not (although we absolutely don't need this
++ * lock), so let's have simpler code with DEF_SCSI_QCMD().
++ */
+#ifdef CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING
-+ {
-+ struct Scsi_Host *h = SCpnt->device->host;
-+ spin_unlock_irq(h->host_lock);
-+ scst_cmd_init_done(scst_cmd, scst_estimate_context_direct());
-+ spin_lock_irq(h->host_lock);
-+ }
++ scst_cmd_init_done(scst_cmd, SCST_CONTEXT_DIRECT);
+#else
+ /*
-+ * Unfortunately, we called with IRQs disabled, so have no choice,
++ * We called with IRQs disabled, so have no choice,
+ * except to pass to the thread context.
+ */
+ scst_cmd_init_done(scst_cmd, SCST_CONTEXT_THREAD);
@@ -75167,6 +80178,15 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ return 0;
+}
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) && \
++ !defined(CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING)
++/*
++ * See comment in scst_local_queuecommand_lck() near
++ * CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING
++ */
++static DEF_SCSI_QCMD(scst_local_queuecommand)
++#endif
++
+static int scst_local_targ_pre_exec(struct scst_cmd *scst_cmd)
+{
+ int res = SCST_PREPROCESS_STATUS_SUCCESS;
@@ -75184,6 +80204,8 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+/* Must be called under sess->aen_lock. Drops then reacquires it inside. */
+static void scst_process_aens(struct scst_local_sess *sess,
+ bool cleanup_only)
++ __releases(&sess->aen_lock)
++ __acquires(&sess->aen_lock)
+{
+ struct scst_aen_work_item *work_item = NULL;
+
@@ -75426,12 +80448,11 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ .eh_target_reset_handler = scst_local_target_reset,
+ .can_queue = 256,
+ .this_id = -1,
-+ /* SCST doesn't support sg chaining */
-+ .sg_tablesize = SG_MAX_SINGLE_ALLOC,
++ .sg_tablesize = 0xFFFF,
+ .cmd_per_lun = 32,
+ .max_sectors = 0xffff,
-+ /* SCST doesn't support sg chaining */
-+ .use_clustering = ENABLE_CLUSTERING,
++ /* Possible pass-through backend device may not support clustering */
++ .use_clustering = DISABLE_CLUSTERING,
+ .skip_settle_delay = 1,
+ .module = THIS_MODULE,
+};
@@ -75544,7 +80565,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+
+ cancel_work_sync(&sess->aen_work);
+
-+ scst_unregister_session(sess->scst_sess, TRUE, NULL);
++ scst_unregister_session(sess->scst_sess, true, NULL);
+
+ kfree(sess);
+
@@ -75554,8 +80575,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+}
+
+static int __scst_local_add_adapter(struct scst_local_tgt *tgt,
-+ const char *initiator_name, struct scst_local_sess **out_sess,
-+ bool locked)
++ const char *initiator_name, bool locked)
+{
+ int res;
+ struct scst_local_sess *sess;
@@ -75625,7 +80645,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ device_unregister(&sess->dev);
+
+unregister_session:
-+ scst_unregister_session(sess->scst_sess, TRUE, NULL);
++ scst_unregister_session(sess->scst_sess, true, NULL);
+
+out_free:
+ kfree(sess);
@@ -75633,9 +80653,9 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+}
+
+static int scst_local_add_adapter(struct scst_local_tgt *tgt,
-+ const char *initiator_name, struct scst_local_sess **out_sess)
++ const char *initiator_name)
+{
-+ return __scst_local_add_adapter(tgt, initiator_name, out_sess, false);
++ return __scst_local_add_adapter(tgt, initiator_name, false);
+}
+
+/* Must be called under scst_local_mutex */
@@ -75773,7 +80793,7 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+ if (ret != 0)
+ goto tgt_templ_unreg;
+
-+ ret = scst_local_add_adapter(tgt, "scst_local_host", NULL);
++ ret = scst_local_add_adapter(tgt, "scst_local_host");
+ if (ret != 0)
+ goto tgt_unreg;
+
@@ -75830,267 +80850,3 @@ diff -uprN orig/linux-2.6.36/drivers/scst/scst_local/scst_local.c linux-2.6.36/d
+
+device_initcall(scst_local_init);
+module_exit(scst_local_exit);
-+
-diff -uprN orig/linux-2.6.36/Documentation/scst/README.scst_local linux-2.6.36/Documentation/scst/README.scst_local
---- orig/linux-2.6.36/Documentation/scst/README.scst_local
-+++ linux-2.6.36/Documentation/scst/README.scst_local
-@@ -0,0 +1,259 @@
-+SCST Local ...
-+Richard Sharpe, 30-Nov-2008
-+
-+This is the SCST Local driver. Its function is to allow you to access devices
-+that are exported via SCST directly on the same Linux system that they are
-+exported from.
-+
-+No assumptions are made in the code about the device types on the target, so
-+any device handlers that you load in SCST should be visible, including tapes
-+and so forth.
-+
-+You can freely use any sg, sd, st, etc. devices imported from target,
-+except the following: you can't mount file systems or put swap on them.
-+This is a limitation of Linux memory/cache manager. See SCST README file
-+for details.
-+
-+To build, simply issue 'make' in the scst_local directory.
-+
-+Try 'modinfo scst_local' for a listing of module parameters so far.
-+
-+Here is how I have used it so far:
-+
-+1. Load up scst:
-+
-+ modprobe scst
-+ modprobe scst_vdisk
-+
-+2. Create a virtual disk (or your own device handler):
-+
-+ dd if=/dev/zero of=/some/path/vdisk1.img bs=16384 count=1000000
-+ echo "add_device vm_disk1 filename=/some/path/vdisk1.img" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
-+
-+3. Load the scst_local driver:
-+
-+ insmod scst_local
-+ echo "add vm_disk1 0" >/sys/kernel/scst_tgt/targets/scst_local/scst_local_tgt/luns/mgmt
-+
-+4. Check what you have
-+
-+ cat /proc/scsi/scsi
-+ Attached devices:
-+ Host: scsi0 Channel: 00 Id: 00 Lun: 00
-+ Vendor: ATA Model: ST9320320AS Rev: 0303
-+ Type: Direct-Access ANSI SCSI revision: 05
-+ Host: scsi4 Channel: 00 Id: 00 Lun: 00
-+ Vendor: TSSTcorp Model: CD/DVDW TS-L632D Rev: TO04
-+ Type: CD-ROM ANSI SCSI revision: 05
-+ Host: scsi7 Channel: 00 Id: 00 Lun: 00
-+ Vendor: SCST_FIO Model: vm_disk1 Rev: 200
-+ Type: Direct-Access ANSI SCSI revision: 04
-+
-+Or instead of manually "add_device" in (2) and step (3) write a
-+scstadmin config:
-+
-+HANDLER vdisk_fileio {
-+ DEVICE vm_disk1 {
-+ filename /some/path/vdisk1.img
-+ }
-+}
-+
-+TARGET_DRIVER scst_local {
-+ TARGET scst_local_tgt {
-+ LUN 0 vm_disk1
-+ }
-+}
-+
-+then:
-+
-+ insmod scst_local
-+ scstadmin -config conf_file.cfg
-+
-+More advanced examples:
-+
-+For (3) you can:
-+
-+ insmod scst_local add_default_tgt=0
-+ echo "add_target scst_local_tgt session_name=scst_local_host" >/sys/kernel/scst_tgt/targets/scst_local//mgmt
-+ echo "add vm_disk1 0" >/sys/kernel/scst_tgt/targets/scst_local/scst_local_tgt/luns/mgmt
-+
-+Scst_local module's parameter add_default_tgt disables creation of
-+default target "scst_local_tgt" and session "scst_local_host", so you
-+needed to create it manually.
-+
-+There can be any number of targets and sessions created. Each SCST
-+session corresponds to SCSI host. You can change which LUNs assigned to
-+each session by using SCST access control. This mode is intended for
-+user space target drivers (see below).
-+
-+Alternatively, you can write an scstadmin's config file conf_file.cfg:
-+
-+HANDLER vdisk_fileio {
-+ DEVICE vm_disk1 {
-+ filename /some/path/vdisk1.img
-+ }
-+}
-+
-+TARGET_DRIVER scst_local {
-+ TARGET scst_local_tgt {
-+ session_name scst_local_host
-+
-+ LUN 0 vm_disk1
-+ }
-+}
-+
-+then:
-+
-+ insmod scst_local add_default_tgt=0
-+ scstadmin -config conf_file.cfg
-+
-+NOTE! Although scstadmin allows to create scst_local's sessions using
-+"session_name" expression, it doesn't save existing sessions during
-+writing config file by "write_config" command. If you need this
-+functionality, feel free to send a request for it in SCST development
-+mailing list.
-+
-+5. Have fun.
-+
-+Some of this was coded while in Santa Clara, some in Bangalore, and some in
-+Hyderabad. Noe doubt some will be coded on the way back to Santa Clara.
-+
-+The code still has bugs, so if you encounter any, email me the fixes at:
-+
-+ realrichardsharpe@gmail.com
-+
-+I am thinking of renaming this to something more interesting.
-+
-+Sysfs interface
-+===============
-+
-+See SCST's README for a common SCST sysfs description.
-+
-+Root of this driver is /sys/kernel/scst_tgt/targets/scst_local. It has
-+the following additional entry:
-+
-+ - stats - read-only attribute with some statistical information.
-+
-+Each target subdirectory contains the following additional entries:
-+
-+ - phys_transport_version - contains and allows to change physical
-+ transport version descriptor. It determines by which phisical
-+ interface this target will look like. See SPC for more details. By
-+ default, it is not defined (0).
-+
-+ - scsi_transport_version - contains and allows to change SCSI
-+ transport version descriptor. It determines by which SCSI
-+ transport this target will look like. See SPC for more details. By
-+ default, it is SAS.
-+
-+Each session subdirectory contains the following additional entries:
-+
-+ - transport_id - contains this host's TransportID. This TransportID
-+ used to identify initiator in Persisten Reservation commands. If you
-+ change scsi_transport_version for a target, make sure you set for all
-+ its sessions correct TransportID. See SPC for more details.
-+
-+ - host - links to the corresponding SCSI host. Using it you can find
-+ local sg/bsg/sd/etc. devices of this session. For instance, this
-+ links points out to host12, so you can find your sg devices by:
-+
-+$ lsscsi -g|grep "\[12:"
-+[12:0:0:0] disk SCST_FIO rd1 200 /dev/sdc /dev/sg2
-+[12:0:0:1] disk SCST_FIO nullio 200 /dev/sdd /dev/sg3
-+
-+They are /dev/sg2 and /dev/sg3.
-+
-+The following management commands available via /sys/kernel/scst_tgt/targets/scst_local/mgmt:
-+
-+ - add_target target_name [session_name=sess_name; [session_name=sess_name1;] [...]] -
-+ creates a target with optionally one or more sessions.
-+
-+ - del_target target_name - deletes a target.
-+
-+ - add_session target_name session_name - adds to target target_name
-+ session (SCSI host) with name session_name.
-+
-+ - del_session target_name session_name - deletes session session_name
-+ from target target_name.
-+
-+Note on performance
-+===================
-+
-+Although this driver implemented in the most performance effective way,
-+including zero-copy passing data between SCSI/block subsystems and SCST,
-+in many cases it is NOT suited to measure performance as a NULL link.
-+For example, it is not suited for max IOPS measurements. This is because
-+for such cases not performance of the link between the target and
-+initiator is the bottleneck, but CPU or memory speed on the target or
-+initiator. For scst_local you have both initiator and target on the same
-+system, which means each your initiator and target are much less
-+CPU/memory powerful.
-+
-+User space target drivers
-+=========================
-+
-+Scst_local can be used to write full featured SCST target drivers in
-+user space:
-+
-+1. For each SCSI target a user space target driver should create an
-+ scst_local's target using "add_target" command.
-+
-+2. Then the user space target driver should, if needed, set its SCSI and
-+ physical transport version descriptors using attributes
-+ scsi_transport_version and phys_transport_version correspondingly in
-+ /sys/kernel/scst_tgt/targets/scst_local/target_name directory.
-+
-+3. For incoming session (I_T nexus) from an initiator the user space
-+ target driver should create scst_local's session using "add_session"
-+ command.
-+
-+4. Then, if needed, the user space target driver should set TransportID
-+ for this session (I_T nexus) using attribute
-+ /sys/kernel/scst_tgt/targets/scst_local/target_name/sessions/session_name/transport_id
-+
-+5. Then the user space target driver should find out sg/bsg devices for
-+ the LUNs the created session has using link
-+ /sys/kernel/scst_tgt/targets/scst_local/target_name/sessions/session_name/host
-+ as described above.
-+
-+6. Then the user space target driver can start serving the initiator using
-+ found sg/bsg devices.
-+
-+For other connected initiators steps 3-6 should be repeated.
-+
-+Change log
-+==========
-+
-+V0.1 24-Sep-2008 (Hyderabad) Initial coding, pretty chatty and messy,
-+ but worked.
-+
-+V0.2 25-Sep-2008 (Hong Kong) Cleaned up the code a lot, reduced the log
-+ chatter, fixed a bug where multiple LUNs did not
-+ work. Also, added logging control. Tested with
-+ five virtual disks. They all came up as /dev/sdb
-+ through /dev/sdf and I could dd to them. Also
-+ fixed a bug preventing multiple adapters.
-+
-+V0.3 26-Sep-2008 (Santa Clara) Added back a copyright plus cleaned up some
-+ unused functions and structures.
-+
-+V0.4 5-Oct-2008 (Santa Clara) Changed name to scst_local as suggested, cleaned
-+ up some unused variables (made them used) and
-+ change allocation to a kmem_cache pool.
-+
-+V0.5 5-Oct-2008 (Santa Clara) Added mgmt commands to handle dev reset and
-+ aborts. Not sure if aborts works. Also corrected
-+ the version info and renamed readme to README.
-+
-+V0.6 7-Oct-2008 (Santa Clara) Removed some redundant code and made some
-+ changes suggested by Vladislav.
-+
-+V0.7 11-Oct-2008 (Santa Clara) Moved into the scst tree. Cleaned up some
-+ unused functions, used TRACE macros etc.
-+
-+V0.9 30-Nov-2008 (Mtn View) Cleaned up an additional problem with symbols not
-+ being defined in older version of the kernel. Also
-+ fixed some English and cleaned up this doc.
-+
-+V1.0 10-Sep-2010 (Moscow) Sysfs management added. Reviewed and cleaned up.
-+
diff --git a/main/linux-scst/setlocalversion.patch b/main/linux-scst/setlocalversion.patch
deleted file mode 100644
index d82eb170a..000000000
--- a/main/linux-scst/setlocalversion.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- ./scripts/setlocalversion.orig
-+++ ./scripts/setlocalversion
-@@ -43,7 +43,7 @@
- fi
-
- # Check for git and a git repo.
-- if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
-+ if [ -d "$srctree"/.git ] && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
-
- # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
- # it, because this version is defined in the top level Makefile.
diff --git a/main/linux-scst/unionfs-2.5.7_for_2.6.36.diff b/main/linux-scst/unionfs-2.5.7_for_2.6.36.diff
deleted file mode 100644
index fabe75809..000000000
--- a/main/linux-scst/unionfs-2.5.7_for_2.6.36.diff
+++ /dev/null
@@ -1,11253 +0,0 @@
-diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
-index 4303614..5ade4a8 100644
---- a/Documentation/filesystems/00-INDEX
-+++ b/Documentation/filesystems/00-INDEX
-@@ -112,6 +112,8 @@ udf.txt
- - info and mount options for the UDF filesystem.
- ufs.txt
- - info on the ufs filesystem.
-+unionfs/
-+ - info on the unionfs filesystem
- vfat.txt
- - info on using the VFAT filesystem used in Windows NT and Windows 95
- vfs.txt
-diff --git a/Documentation/filesystems/unionfs/00-INDEX b/Documentation/filesystems/unionfs/00-INDEX
-new file mode 100644
-index 0000000..96fdf67
---- /dev/null
-+++ b/Documentation/filesystems/unionfs/00-INDEX
-@@ -0,0 +1,10 @@
-+00-INDEX
-+ - this file.
-+concepts.txt
-+ - A brief introduction of concepts.
-+issues.txt
-+ - A summary of known issues with unionfs.
-+rename.txt
-+ - Information regarding rename operations.
-+usage.txt
-+ - Usage information and examples.
-diff --git a/Documentation/filesystems/unionfs/concepts.txt b/Documentation/filesystems/unionfs/concepts.txt
-new file mode 100644
-index 0000000..b853788
---- /dev/null
-+++ b/Documentation/filesystems/unionfs/concepts.txt
-@@ -0,0 +1,287 @@
-+Unionfs 2.x CONCEPTS:
-+=====================
-+
-+This file describes the concepts needed by a namespace unification file
-+system.
-+
-+
-+Branch Priority:
-+================
-+
-+Each branch is assigned a unique priority - starting from 0 (highest
-+priority). No two branches can have the same priority.
-+
-+
-+Branch Mode:
-+============
-+
-+Each branch is assigned a mode - read-write or read-only. This allows
-+directories on media mounted read-write to be used in a read-only manner.
-+
-+
-+Whiteouts:
-+==========
-+
-+A whiteout removes a file name from the namespace. Whiteouts are needed when
-+one attempts to remove a file on a read-only branch.
-+
-+Suppose we have a two-branch union, where branch 0 is read-write and branch
-+1 is read-only. And a file 'foo' on branch 1:
-+
-+./b0/
-+./b1/
-+./b1/foo
-+
-+The unified view would simply be:
-+
-+./union/
-+./union/foo
-+
-+Since 'foo' is stored on a read-only branch, it cannot be removed. A
-+whiteout is used to remove the name 'foo' from the unified namespace. Again,
-+since branch 1 is read-only, the whiteout cannot be created there. So, we
-+try on a higher priority (lower numerically) branch and create the whiteout
-+there.
-+
-+./b0/
-+./b0/.wh.foo
-+./b1/
-+./b1/foo
-+
-+Later, when Unionfs traverses branches (due to lookup or readdir), it
-+eliminate 'foo' from the namespace (as well as the whiteout itself.)
-+
-+
-+Opaque Directories:
-+===================
-+
-+Assume we have a unionfs mount comprising of two branches. Branch 0 is
-+empty; branch 1 has the directory /a and file /a/f. Let's say we mount a
-+union of branch 0 as read-write and branch 1 as read-only. Now, let's say
-+we try to perform the following operation in the union:
-+
-+ rm -fr a
-+
-+Because branch 1 is not writable, we cannot physically remove the file /a/f
-+or the directory /a. So instead, we will create a whiteout in branch 0
-+named /.wh.a, masking out the name "a" from branch 1. Next, let's say we
-+try to create a directory named "a" as follows:
-+
-+ mkdir a
-+
-+Because we have a whiteout for "a" already, Unionfs behaves as if "a"
-+doesn't exist, and thus will delete the whiteout and replace it with an
-+actual directory named "a".
-+
-+The problem now is that if you try to "ls" in the union, Unionfs will
-+perform is normal directory name unification, for *all* directories named
-+"a" in all branches. This will cause the file /a/f from branch 1 to
-+re-appear in the union's namespace, which violates Unix semantics.
-+
-+To avoid this problem, we have a different form of whiteouts for
-+directories, called "opaque directories" (same as BSD Union Mount does).
-+Whenever we replace a whiteout with a directory, that directory is marked as
-+opaque. In Unionfs 2.x, it means that we create a file named
-+/a/.wh.__dir_opaque in branch 0, after having created directory /a there.
-+When unionfs notices that a directory is opaque, it stops all namespace
-+operations (including merging readdir contents) at that opaque directory.
-+This prevents re-exposing names from masked out directories.
-+
-+
-+Duplicate Elimination:
-+======================
-+
-+It is possible for files on different branches to have the same name.
-+Unionfs then has to select which instance of the file to show to the user.
-+Given the fact that each branch has a priority associated with it, the
-+simplest solution is to take the instance from the highest priority
-+(numerically lowest value) and "hide" the others.
-+
-+
-+Unlinking:
-+=========
-+
-+Unlink operation on non-directory instances is optimized to remove the
-+maximum possible objects in case multiple underlying branches have the same
-+file name. The unlink operation will first try to delete file instances
-+from highest priority branch and then move further to delete from remaining
-+branches in order of their decreasing priority. Consider a case (F..D..F),
-+where F is a file and D is a directory of the same name; here, some
-+intermediate branch could have an empty directory instance with the same
-+name, so this operation also tries to delete this directory instance and
-+proceed further to delete from next possible lower priority branch. The
-+unionfs unlink operation will smoothly delete the files with same name from
-+all possible underlying branches. In case if some error occurs, it creates
-+whiteout in highest priority branch that will hide file instance in rest of
-+the branches. An error could occur either if an unlink operations in any of
-+the underlying branch failed or if a branch has no write permission.
-+
-+This unlinking policy is known as "delete all" and it has the benefit of
-+overall reducing the number of inodes used by duplicate files, and further
-+reducing the total number of inodes consumed by whiteouts. The cost is of
-+extra processing, but testing shows this extra processing is well worth the
-+savings.
-+
-+
-+Copyup:
-+=======
-+
-+When a change is made to the contents of a file's data or meta-data, they
-+have to be stored somewhere. The best way is to create a copy of the
-+original file on a branch that is writable, and then redirect the write
-+though to this copy. The copy must be made on a higher priority branch so
-+that lookup and readdir return this newer "version" of the file rather than
-+the original (see duplicate elimination).
-+
-+An entire unionfs mount can be read-only or read-write. If it's read-only,
-+then none of the branches will be written to, even if some of the branches
-+are physically writeable. If the unionfs mount is read-write, then the
-+leftmost (highest priority) branch must be writeable (for copyup to take
-+place); the remaining branches can be any mix of read-write and read-only.
-+
-+In a writeable mount, unionfs will create new files/dir in the leftmost
-+branch. If one tries to modify a file in a read-only branch/media, unionfs
-+will copyup the file to the leftmost branch and modify it there. If you try
-+to modify a file from a writeable branch which is not the leftmost branch,
-+then unionfs will modify it in that branch; this is useful if you, say,
-+unify differnet packages (e.g., apache, sendmail, ftpd, etc.) and you want
-+changes to specific package files to remain logically in the directory where
-+they came from.
-+
-+Cache Coherency:
-+================
-+
-+Unionfs users often want to be able to modify files and directories directly
-+on the lower branches, and have those changes be visible at the Unionfs
-+level. This means that data (e.g., pages) and meta-data (dentries, inodes,
-+open files, etc.) have to be synchronized between the upper and lower
-+layers. In other words, the newest changes from a layer below have to be
-+propagated to the Unionfs layer above. If the two layers are not in sync, a
-+cache incoherency ensues, which could lead to application failures and even
-+oopses. The Linux kernel, however, has a rather limited set of mechanisms
-+to ensure this inter-layer cache coherency---so Unionfs has to do most of
-+the hard work on its own.
-+
-+Maintaining Invariants:
-+
-+The way Unionfs ensures cache coherency is as follows. At each entry point
-+to a Unionfs file system method, we call a utility function to validate the
-+primary objects of this method. Generally, we call unionfs_file_revalidate
-+on open files, and __unionfs_d_revalidate_chain on dentries (which also
-+validates inodes). These utility functions check to see whether the upper
-+Unionfs object is in sync with any of the lower objects that it represents.
-+The checks we perform include whether the Unionfs superblock has a newer
-+generation number, or if any of the lower objects mtime's or ctime's are
-+newer. (Note: generation numbers change when branch-management commands are
-+issued, so in a way, maintaining cache coherency is also very important for
-+branch-management.) If indeed we determine that any Unionfs object is no
-+longer in sync with its lower counterparts, then we rebuild that object
-+similarly to how we do so for branch-management.
-+
-+While rebuilding Unionfs's objects, we also purge any page mappings and
-+truncate inode pages (see fs/unionfs/dentry.c:purge_inode_data). This is to
-+ensure that Unionfs will re-get the newer data from the lower branches. We
-+perform this purging only if the Unionfs operation in question is a reading
-+operation; if Unionfs is performing a data writing operation (e.g., ->write,
-+->commit_write, etc.) then we do NOT flush the lower mappings/pages: this is
-+because (1) a self-deadlock could occur and (2) the upper Unionfs pages are
-+considered more authoritative anyway, as they are newer and will overwrite
-+any lower pages.
-+
-+Unionfs maintains the following important invariant regarding mtime's,
-+ctime's, and atime's: the upper inode object's times are the max() of all of
-+the lower ones. For non-directory objects, there's only one object below,
-+so the mapping is simple; for directory objects, there could me multiple
-+lower objects and we have to sync up with the newest one of all the lower
-+ones. This invariant is important to maintain, especially for directories
-+(besides, we need this to be POSIX compliant). A union could comprise
-+multiple writable branches, each of which could change. If we don't reflect
-+the newest possible mtime/ctime, some applications could fail. For example,
-+NFSv2/v3 exports check for newer directory mtimes on the server to determine
-+if the client-side attribute cache should be purged.
-+
-+To maintain these important invariants, of course, Unionfs carefully
-+synchronizes upper and lower times in various places. For example, if we
-+copy-up a file to a top-level branch, the parent directory where the file
-+was copied up to will now have a new mtime: so after a successful copy-up,
-+we sync up with the new top-level branch's parent directory mtime.
-+
-+Implementation:
-+
-+This cache-coherency implementation is efficient because it defers any
-+synchronizing between the upper and lower layers until absolutely needed.
-+Consider the example a common situation where users perform a lot of lower
-+changes, such as untarring a whole package. While these take place,
-+typically the user doesn't access the files via Unionfs; only after the
-+lower changes are done, does the user try to access the lower files. With
-+our cache-coherency implementation, the entirety of the changes to the lower
-+branches will not result in a single CPU cycle spent at the Unionfs level
-+until the user invokes a system call that goes through Unionfs.
-+
-+We have considered two alternate cache-coherency designs. (1) Using the
-+dentry/inode notify functionality to register interest in finding out about
-+any lower changes. This is a somewhat limited and also a heavy-handed
-+approach which could result in many notifications to the Unionfs layer upon
-+each small change at the lower layer (imagine a file being modified multiple
-+times in rapid succession). (2) Rewriting the VFS to support explicit
-+callbacks from lower objects to upper objects. We began exploring such an
-+implementation, but found it to be very complicated--it would have resulted
-+in massive VFS/MM changes which are unlikely to be accepted by the LKML
-+community. We therefore believe that our current cache-coherency design and
-+implementation represent the best approach at this time.
-+
-+Limitations:
-+
-+Our implementation works in that as long as a user process will have caused
-+Unionfs to be called, directly or indirectly, even to just do
-+->d_revalidate; then we will have purged the current Unionfs data and the
-+process will see the new data. For example, a process that continually
-+re-reads the same file's data will see the NEW data as soon as the lower
-+file had changed, upon the next read(2) syscall (even if the file is still
-+open!) However, this doesn't work when the process re-reads the open file's
-+data via mmap(2) (unless the user unmaps/closes the file and remaps/reopens
-+it). Once we respond to ->readpage(s), then the kernel maps the page into
-+the process's address space and there doesn't appear to be a way to force
-+the kernel to invalidate those pages/mappings, and force the process to
-+re-issue ->readpage. If there's a way to invalidate active mappings and
-+force a ->readpage, let us know please (invalidate_inode_pages2 doesn't do
-+the trick).
-+
-+Our current Unionfs code has to perform many file-revalidation calls. It
-+would be really nice if the VFS would export an optional file system hook
-+->file_revalidate (similarly to dentry->d_revalidate) that will be called
-+before each VFS op that has a "struct file" in it.
-+
-+Certain file systems have micro-second granularity (or better) for inode
-+times, and asynchronous actions could cause those times to change with some
-+small delay. In such cases, Unionfs may see a changed inode time that only
-+differs by a tiny fraction of a second: such a change may be a false
-+positive indication that the lower object has changed, whereas if unionfs
-+waits a little longer, that false indication will not be seen. (These false
-+positives are harmless, because they would at most cause unionfs to
-+re-validate an object that may need no revalidation, and print a debugging
-+message that clutters the console/logs.) Therefore, to minimize the chances
-+of these situations, we delay the detection of changed times by a small
-+factor of a few seconds, called UNIONFS_MIN_CC_TIME (which defaults to 3
-+seconds, as does NFS). This means that we will detect the change, only a
-+couple of seconds later, if indeed the time change persists in the lower
-+file object. This delayed detection has an added performance benefit: we
-+reduce the number of times that unionfs has to revalidate objects, in case
-+there's a lot of concurrent activity on both the upper and lower objects,
-+for the same file(s). Lastly, this delayed time attribute detection is
-+similar to how NFS clients operate (e.g., acregmin).
-+
-+Finally, there is no way currently in Linux to prevent lower directories
-+from being moved around (i.e., topology changes); there's no way to prevent
-+modifications to directory sub-trees of whole file systems which are mounted
-+read-write. It is therefore possible for in-flight operations in unionfs to
-+take place, while a lower directory is being moved around. Therefore, if
-+you try to, say, create a new file in a directory through unionfs, while the
-+directory is being moved around directly, then the new file may get created
-+in the new location where that directory was moved to. This is a somewhat
-+similar behaviour in NFS: an NFS client could be creating a new file while
-+th NFS server is moving th directory around; the file will get successfully
-+created in the new location. (The one exception in unionfs is that if the
-+branch is marked read-only by unionfs, then a copyup will take place.)
-+
-+For more information, see <http://unionfs.filesystems.org/>.
-diff --git a/Documentation/filesystems/unionfs/issues.txt b/Documentation/filesystems/unionfs/issues.txt
-new file mode 100644
-index 0000000..f4b7e7e
---- /dev/null
-+++ b/Documentation/filesystems/unionfs/issues.txt
-@@ -0,0 +1,28 @@
-+KNOWN Unionfs 2.x ISSUES:
-+=========================
-+
-+1. Unionfs should not use lookup_one_len() on the underlying f/s as it
-+ confuses NFSv4. Currently, unionfs_lookup() passes lookup intents to the
-+ lower file-system, this eliminates part of the problem. The remaining
-+ calls to lookup_one_len may need to be changed to pass an intent. We are
-+ currently introducing VFS changes to fs/namei.c's do_path_lookup() to
-+ allow proper file lookup and opening in stackable file systems.
-+
-+2. Lockdep (a debugging feature) isn't aware of stacking, and so it
-+ incorrectly complains about locking problems. The problem boils down to
-+ this: Lockdep considers all objects of a certain type to be in the same
-+ class, for example, all inodes. Lockdep doesn't like to see a lock held
-+ on two inodes within the same task, and warns that it could lead to a
-+ deadlock. However, stackable file systems do precisely that: they lock
-+ an upper object, and then a lower object, in a strict order to avoid
-+ locking problems; in addition, Unionfs, as a fan-out file system, may
-+ have to lock several lower inodes. We are currently looking into Lockdep
-+ to see how to make it aware of stackable file systems. For now, we
-+ temporarily disable lockdep when calling vfs methods on lower objects,
-+ but only for those places where lockdep complained. While this solution
-+ may seem unclean, it is not without precedent: other places in the kernel
-+ also do similar temporary disabling, of course after carefully having
-+ checked that it is the right thing to do. Anyway, you get any warnings
-+ from Lockdep, please report them to the Unionfs maintainers.
-+
-+For more information, see <http://unionfs.filesystems.org/>.
-diff --git a/Documentation/filesystems/unionfs/rename.txt b/Documentation/filesystems/unionfs/rename.txt
-new file mode 100644
-index 0000000..e20bb82
---- /dev/null
-+++ b/Documentation/filesystems/unionfs/rename.txt
-@@ -0,0 +1,31 @@
-+Rename is a complex beast. The following table shows which rename(2) operations
-+should succeed and which should fail.
-+
-+o: success
-+E: error (either unionfs or vfs)
-+X: EXDEV
-+
-+none = file does not exist
-+file = file is a file
-+dir = file is a empty directory
-+child= file is a non-empty directory
-+wh = file is a directory containing only whiteouts; this makes it logically
-+ empty
-+
-+ none file dir child wh
-+file o o E E E
-+dir o E o E o
-+child X E X E X
-+wh o E o E o
-+
-+
-+Renaming directories:
-+=====================
-+
-+Whenever a empty (either physically or logically) directory is being renamed,
-+the following sequence of events should take place:
-+
-+1) Remove whiteouts from both source and destination directory
-+2) Rename source to destination
-+3) Make destination opaque to prevent anything under it from showing up
-+
-diff --git a/Documentation/filesystems/unionfs/usage.txt b/Documentation/filesystems/unionfs/usage.txt
-new file mode 100644
-index 0000000..1adde69
---- /dev/null
-+++ b/Documentation/filesystems/unionfs/usage.txt
-@@ -0,0 +1,134 @@
-+Unionfs is a stackable unification file system, which can appear to merge
-+the contents of several directories (branches), while keeping their physical
-+content separate. Unionfs is useful for unified source tree management,
-+merged contents of split CD-ROM, merged separate software package
-+directories, data grids, and more. Unionfs allows any mix of read-only and
-+read-write branches, as well as insertion and deletion of branches anywhere
-+in the fan-out. To maintain Unix semantics, Unionfs handles elimination of
-+duplicates, partial-error conditions, and more.
-+
-+GENERAL SYNTAX
-+==============
-+
-+# mount -t unionfs -o <OPTIONS>,<BRANCH-OPTIONS> none MOUNTPOINT
-+
-+OPTIONS can be any legal combination of:
-+
-+- ro # mount file system read-only
-+- rw # mount file system read-write
-+- remount # remount the file system (see Branch Management below)
-+- incgen # increment generation no. (see Cache Consistency below)
-+
-+BRANCH-OPTIONS can be either (1) a list of branches given to the "dirs="
-+option, or (2) a list of individual branch manipulation commands, combined
-+with the "remount" option, and is further described in the "Branch
-+Management" section below.
-+
-+The syntax for the "dirs=" mount option is:
-+
-+ dirs=branch[=ro|=rw][:...]
-+
-+The "dirs=" option takes a colon-delimited list of directories to compose
-+the union, with an optional branch mode for each of those directories.
-+Directories that come earlier (specified first, on the left) in the list
-+have a higher precedence than those which come later. Additionally,
-+read-only or read-write permissions of the branch can be specified by
-+appending =ro or =rw (default) to each directory. See the Copyup section in
-+concepts.txt, for a description of Unionfs's behavior when mixing read-only
-+and read-write branches and mounts.
-+
-+Syntax:
-+
-+ dirs=/branch1[=ro|=rw]:/branch2[=ro|=rw]:...:/branchN[=ro|=rw]
-+
-+Example:
-+
-+ dirs=/writable_branch=rw:/read-only_branch=ro
-+
-+
-+BRANCH MANAGEMENT
-+=================
-+
-+Once you mount your union for the first time, using the "dirs=" option, you
-+can then change the union's overall mode or reconfigure the branches, using
-+the remount option, as follows.
-+
-+To downgrade a union from read-write to read-only:
-+
-+# mount -t unionfs -o remount,ro none MOUNTPOINT
-+
-+To upgrade a union from read-only to read-write:
-+
-+# mount -t unionfs -o remount,rw none MOUNTPOINT
-+
-+To delete a branch /foo, regardless where it is in the current union:
-+
-+# mount -t unionfs -o remount,del=/foo none MOUNTPOINT
-+
-+To insert (add) a branch /foo before /bar:
-+
-+# mount -t unionfs -o remount,add=/bar:/foo none MOUNTPOINT
-+
-+To insert (add) a branch /foo (with the "rw" mode flag) before /bar:
-+
-+# mount -t unionfs -o remount,add=/bar:/foo=rw none MOUNTPOINT
-+
-+To insert (add) a branch /foo (in "rw" mode) at the very beginning (i.e., a
-+new highest-priority branch), you can use the above syntax, or use a short
-+hand version as follows:
-+
-+# mount -t unionfs -o remount,add=/foo none MOUNTPOINT
-+
-+To append a branch to the very end (new lowest-priority branch):
-+
-+# mount -t unionfs -o remount,add=:/foo none MOUNTPOINT
-+
-+To append a branch to the very end (new lowest-priority branch), in
-+read-only mode:
-+
-+# mount -t unionfs -o remount,add=:/foo=ro none MOUNTPOINT
-+
-+Finally, to change the mode of one existing branch, say /foo, from read-only
-+to read-write, and change /bar from read-write to read-only:
-+
-+# mount -t unionfs -o remount,mode=/foo=rw,mode=/bar=ro none MOUNTPOINT
-+
-+Note: in Unionfs 2.x, you cannot set the leftmost branch to readonly because
-+then Unionfs won't have any writable place for copyups to take place.
-+Moreover, the VFS can get confused when it tries to modify something in a
-+file system mounted read-write, but isn't permitted to write to it.
-+Instead, you should set the whole union as readonly, as described above.
-+If, however, you must set the leftmost branch as readonly, perhaps so you
-+can get a snapshot of it at a point in time, then you should insert a new
-+writable top-level branch, and mark the one you want as readonly. This can
-+be accomplished as follows, assuming that /foo is your current leftmost
-+branch:
-+
-+# mount -t tmpfs -o size=NNN /new
-+# mount -t unionfs -o remount,add=/new,mode=/foo=ro none MOUNTPOINT
-+<do what you want safely in /foo>
-+# mount -t unionfs -o remount,del=/new,mode=/foo=rw none MOUNTPOINT
-+<check if there's anything in /new you want to preserve>
-+# umount /new
-+
-+CACHE CONSISTENCY
-+=================
-+
-+If you modify any file on any of the lower branches directly, while there is
-+a Unionfs 2.x mounted above any of those branches, you should tell Unionfs
-+to purge its caches and re-get the objects. To do that, you have to
-+increment the generation number of the superblock using the following
-+command:
-+
-+# mount -t unionfs -o remount,incgen none MOUNTPOINT
-+
-+Note that the older way of incrementing the generation number using an
-+ioctl, is no longer supported in Unionfs 2.0 and newer. Ioctls in general
-+are not encouraged. Plus, an ioctl is per-file concept, whereas the
-+generation number is a per-file-system concept. Worse, such an ioctl
-+requires an open file, which then has to be invalidated by the very nature
-+of the generation number increase (read: the old generation increase ioctl
-+was pretty racy).
-+
-+
-+For more information, see <http://unionfs.filesystems.org/>.
-diff --git a/MAINTAINERS b/MAINTAINERS
-index f2a2b8e..11d7f45 100644
---- a/MAINTAINERS
-+++ b/MAINTAINERS
-@@ -5917,6 +5917,14 @@ F: Documentation/cdrom/
- F: drivers/cdrom/cdrom.c
- F: include/linux/cdrom.h
-
-+UNIONFS
-+P: Erez Zadok
-+M: ezk@cs.sunysb.edu
-+L: unionfs@filesystems.org
-+W: http://unionfs.filesystems.org/
-+T: git git.kernel.org/pub/scm/linux/kernel/git/ezk/unionfs.git
-+S: Maintained
-+
- UNSORTED BLOCK IMAGES (UBI)
- M: Artem Bityutskiy <dedekind1@gmail.com>
- W: http://www.linux-mtd.infradead.org/
-diff --git a/fs/Kconfig b/fs/Kconfig
-index 3d18530..65b6aa1 100644
---- a/fs/Kconfig
-+++ b/fs/Kconfig
-@@ -169,6 +169,7 @@ if MISC_FILESYSTEMS
- source "fs/adfs/Kconfig"
- source "fs/affs/Kconfig"
- source "fs/ecryptfs/Kconfig"
-+source "fs/unionfs/Kconfig"
- source "fs/hfs/Kconfig"
- source "fs/hfsplus/Kconfig"
- source "fs/befs/Kconfig"
-diff --git a/fs/Makefile b/fs/Makefile
-index e6ec1d3..787332e 100644
---- a/fs/Makefile
-+++ b/fs/Makefile
-@@ -84,6 +84,7 @@ obj-$(CONFIG_ISO9660_FS) += isofs/
- obj-$(CONFIG_HFSPLUS_FS) += hfsplus/ # Before hfs to find wrapped HFS+
- obj-$(CONFIG_HFS_FS) += hfs/
- obj-$(CONFIG_ECRYPT_FS) += ecryptfs/
-+obj-$(CONFIG_UNION_FS) += unionfs/
- obj-$(CONFIG_VXFS_FS) += freevxfs/
- obj-$(CONFIG_NFS_FS) += nfs/
- obj-$(CONFIG_EXPORTFS) += exportfs/
-diff --git a/fs/namei.c b/fs/namei.c
-index 24896e8..db22420 100644
---- a/fs/namei.c
-+++ b/fs/namei.c
-@@ -385,6 +385,7 @@ void release_open_intent(struct nameidata *nd)
- else
- fput(nd->intent.open.file);
- }
-+EXPORT_SYMBOL_GPL(release_open_intent);
-
- static inline struct dentry *
- do_revalidate(struct dentry *dentry, struct nameidata *nd)
-diff --git a/fs/splice.c b/fs/splice.c
-index 8f1dfae..7a57fab 100644
---- a/fs/splice.c
-+++ b/fs/splice.c
-@@ -1092,8 +1092,8 @@ EXPORT_SYMBOL(generic_splice_sendpage);
- /*
- * Attempt to initiate a splice from pipe to file.
- */
--static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
-- loff_t *ppos, size_t len, unsigned int flags)
-+long vfs_splice_from(struct pipe_inode_info *pipe, struct file *out,
-+ loff_t *ppos, size_t len, unsigned int flags)
- {
- ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int);
-@@ -1116,13 +1116,14 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
-
- return splice_write(pipe, out, ppos, len, flags);
- }
-+EXPORT_SYMBOL_GPL(vfs_splice_from);
-
- /*
- * Attempt to initiate a splice from a file to a pipe.
- */
--static long do_splice_to(struct file *in, loff_t *ppos,
-- struct pipe_inode_info *pipe, size_t len,
-- unsigned int flags)
-+long vfs_splice_to(struct file *in, loff_t *ppos,
-+ struct pipe_inode_info *pipe, size_t len,
-+ unsigned int flags)
- {
- ssize_t (*splice_read)(struct file *, loff_t *,
- struct pipe_inode_info *, size_t, unsigned int);
-@@ -1142,6 +1143,7 @@ static long do_splice_to(struct file *in, loff_t *ppos,
-
- return splice_read(in, ppos, pipe, len, flags);
- }
-+EXPORT_SYMBOL_GPL(vfs_splice_to);
-
- /**
- * splice_direct_to_actor - splices data directly between two non-pipes
-@@ -1211,7 +1213,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
- size_t read_len;
- loff_t pos = sd->pos, prev_pos = pos;
-
-- ret = do_splice_to(in, &pos, pipe, len, flags);
-+ ret = vfs_splice_to(in, &pos, pipe, len, flags);
- if (unlikely(ret <= 0))
- goto out_release;
-
-@@ -1270,8 +1272,8 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
- {
- struct file *file = sd->u.file;
-
-- return do_splice_from(pipe, file, &file->f_pos, sd->total_len,
-- sd->flags);
-+ return vfs_splice_from(pipe, file, &file->f_pos, sd->total_len,
-+ sd->flags);
- }
-
- /**
-@@ -1368,7 +1370,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
- } else
- off = &out->f_pos;
-
-- ret = do_splice_from(ipipe, out, off, len, flags);
-+ ret = vfs_splice_from(ipipe, out, off, len, flags);
-
- if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
- ret = -EFAULT;
-@@ -1388,7 +1390,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
- } else
- off = &in->f_pos;
-
-- ret = do_splice_to(in, off, opipe, len, flags);
-+ ret = vfs_splice_to(in, off, opipe, len, flags);
-
- if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
- ret = -EFAULT;
-diff --git a/fs/stack.c b/fs/stack.c
-index 4a6f7f4..7eeef12 100644
---- a/fs/stack.c
-+++ b/fs/stack.c
-@@ -1,8 +1,20 @@
-+/*
-+ * Copyright (c) 2006-2009 Erez Zadok
-+ * Copyright (c) 2006-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2006-2009 Stony Brook University
-+ * Copyright (c) 2006-2009 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
- #include <linux/module.h>
- #include <linux/fs.h>
- #include <linux/fs_stack.h>
-
--/* does _NOT_ require i_mutex to be held.
-+/*
-+ * does _NOT_ require i_mutex to be held.
- *
- * This function cannot be inlined since i_size_{read,write} is rather
- * heavy-weight on 32-bit systems
-diff --git a/fs/unionfs/Kconfig b/fs/unionfs/Kconfig
-new file mode 100644
-index 0000000..f3c1ac4
---- /dev/null
-+++ b/fs/unionfs/Kconfig
-@@ -0,0 +1,24 @@
-+config UNION_FS
-+ tristate "Union file system (EXPERIMENTAL)"
-+ depends on EXPERIMENTAL
-+ help
-+ Unionfs is a stackable unification file system, which appears to
-+ merge the contents of several directories (branches), while keeping
-+ their physical content separate.
-+
-+ See <http://unionfs.filesystems.org> for details
-+
-+config UNION_FS_XATTR
-+ bool "Unionfs extended attributes"
-+ depends on UNION_FS
-+ help
-+ Extended attributes are name:value pairs associated with inodes by
-+ the kernel or by users (see the attr(5) manual page).
-+
-+ If unsure, say N.
-+
-+config UNION_FS_DEBUG
-+ bool "Debug Unionfs"
-+ depends on UNION_FS
-+ help
-+ If you say Y here, you can turn on debugging output from Unionfs.
-diff --git a/fs/unionfs/Makefile b/fs/unionfs/Makefile
-new file mode 100644
-index 0000000..86c32ba
---- /dev/null
-+++ b/fs/unionfs/Makefile
-@@ -0,0 +1,17 @@
-+UNIONFS_VERSION="2.5.7 (for 2.6.36)"
-+
-+EXTRA_CFLAGS += -DUNIONFS_VERSION=\"$(UNIONFS_VERSION)\"
-+
-+obj-$(CONFIG_UNION_FS) += unionfs.o
-+
-+unionfs-y := subr.o dentry.o file.o inode.o main.o super.o \
-+ rdstate.o copyup.o dirhelper.o rename.o unlink.o \
-+ lookup.o commonfops.o dirfops.o sioq.o mmap.o whiteout.o
-+
-+unionfs-$(CONFIG_UNION_FS_XATTR) += xattr.o
-+
-+unionfs-$(CONFIG_UNION_FS_DEBUG) += debug.o
-+
-+ifeq ($(CONFIG_UNION_FS_DEBUG),y)
-+EXTRA_CFLAGS += -DDEBUG
-+endif
-diff --git a/fs/unionfs/commonfops.c b/fs/unionfs/commonfops.c
-new file mode 100644
-index 0000000..51ea65e
---- /dev/null
-+++ b/fs/unionfs/commonfops.c
-@@ -0,0 +1,896 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * 1) Copyup the file
-+ * 2) Rename the file to '.unionfs<original inode#><counter>' - obviously
-+ * stolen from NFS's silly rename
-+ */
-+static int copyup_deleted_file(struct file *file, struct dentry *dentry,
-+ struct dentry *parent, int bstart, int bindex)
-+{
-+ static unsigned int counter;
-+ const int i_inosize = sizeof(dentry->d_inode->i_ino) * 2;
-+ const int countersize = sizeof(counter) * 2;
-+ const int nlen = sizeof(".unionfs") + i_inosize + countersize - 1;
-+ char name[nlen + 1];
-+ int err;
-+ struct dentry *tmp_dentry = NULL;
-+ struct dentry *lower_dentry;
-+ struct dentry *lower_dir_dentry = NULL;
-+
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bstart);
-+
-+ sprintf(name, ".unionfs%*.*lx",
-+ i_inosize, i_inosize, lower_dentry->d_inode->i_ino);
-+
-+ /*
-+ * Loop, looking for an unused temp name to copyup to.
-+ *
-+ * It's somewhat silly that we look for a free temp tmp name in the
-+ * source branch (bstart) instead of the dest branch (bindex), where
-+ * the final name will be created. We _will_ catch it if somehow
-+ * the name exists in the dest branch, but it'd be nice to catch it
-+ * sooner than later.
-+ */
-+retry:
-+ tmp_dentry = NULL;
-+ do {
-+ char *suffix = name + nlen - countersize;
-+
-+ dput(tmp_dentry);
-+ counter++;
-+ sprintf(suffix, "%*.*x", countersize, countersize, counter);
-+
-+ pr_debug("unionfs: trying to rename %s to %s\n",
-+ dentry->d_name.name, name);
-+
-+ tmp_dentry = lookup_lck_len(name, lower_dentry->d_parent,
-+ nlen);
-+ if (IS_ERR(tmp_dentry)) {
-+ err = PTR_ERR(tmp_dentry);
-+ goto out;
-+ }
-+ } while (tmp_dentry->d_inode != NULL); /* need negative dentry */
-+ dput(tmp_dentry);
-+
-+ err = copyup_named_file(parent->d_inode, file, name, bstart, bindex,
-+ i_size_read(file->f_path.dentry->d_inode));
-+ if (err) {
-+ if (unlikely(err == -EEXIST))
-+ goto retry;
-+ goto out;
-+ }
-+
-+ /* bring it to the same state as an unlinked file */
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, dbstart(dentry));
-+ if (!unionfs_lower_inode_idx(dentry->d_inode, bindex)) {
-+ atomic_inc(&lower_dentry->d_inode->i_count);
-+ unionfs_set_lower_inode_idx(dentry->d_inode, bindex,
-+ lower_dentry->d_inode);
-+ }
-+ lower_dir_dentry = lock_parent(lower_dentry);
-+ err = vfs_unlink(lower_dir_dentry->d_inode, lower_dentry);
-+ unlock_dir(lower_dir_dentry);
-+
-+out:
-+ if (!err)
-+ unionfs_check_dentry(dentry);
-+ return err;
-+}
-+
-+/*
-+ * put all references held by upper struct file and free lower file pointer
-+ * array
-+ */
-+static void cleanup_file(struct file *file)
-+{
-+ int bindex, bstart, bend;
-+ struct file **lower_files;
-+ struct file *lower_file;
-+ struct super_block *sb = file->f_path.dentry->d_sb;
-+
-+ lower_files = UNIONFS_F(file)->lower_files;
-+ bstart = fbstart(file);
-+ bend = fbend(file);
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ int i; /* holds (possibly) updated branch index */
-+ int old_bid;
-+
-+ lower_file = unionfs_lower_file_idx(file, bindex);
-+ if (!lower_file)
-+ continue;
-+
-+ /*
-+ * Find new index of matching branch with an open
-+ * file, since branches could have been added or
-+ * deleted causing the one with open files to shift.
-+ */
-+ old_bid = UNIONFS_F(file)->saved_branch_ids[bindex];
-+ i = branch_id_to_idx(sb, old_bid);
-+ if (unlikely(i < 0)) {
-+ printk(KERN_ERR "unionfs: no superblock for "
-+ "file %p\n", file);
-+ continue;
-+ }
-+
-+ /* decrement count of open files */
-+ branchput(sb, i);
-+ /*
-+ * fput will perform an mntput for us on the correct branch.
-+ * Although we're using the file's old branch configuration,
-+ * bindex, which is the old index, correctly points to the
-+ * right branch in the file's branch list. In other words,
-+ * we're going to mntput the correct branch even if branches
-+ * have been added/removed.
-+ */
-+ fput(lower_file);
-+ UNIONFS_F(file)->lower_files[bindex] = NULL;
-+ UNIONFS_F(file)->saved_branch_ids[bindex] = -1;
-+ }
-+
-+ UNIONFS_F(file)->lower_files = NULL;
-+ kfree(lower_files);
-+ kfree(UNIONFS_F(file)->saved_branch_ids);
-+ /* set to NULL because caller needs to know if to kfree on error */
-+ UNIONFS_F(file)->saved_branch_ids = NULL;
-+}
-+
-+/* open all lower files for a given file */
-+static int open_all_files(struct file *file)
-+{
-+ int bindex, bstart, bend, err = 0;
-+ struct file *lower_file;
-+ struct dentry *lower_dentry;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct super_block *sb = dentry->d_sb;
-+
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry)
-+ continue;
-+
-+ dget(lower_dentry);
-+ unionfs_mntget(dentry, bindex);
-+ branchget(sb, bindex);
-+
-+ lower_file =
-+ dentry_open(lower_dentry,
-+ unionfs_lower_mnt_idx(dentry, bindex),
-+ file->f_flags, current_cred());
-+ if (IS_ERR(lower_file)) {
-+ branchput(sb, bindex);
-+ err = PTR_ERR(lower_file);
-+ goto out;
-+ } else {
-+ unionfs_set_lower_file_idx(file, bindex, lower_file);
-+ }
-+ }
-+out:
-+ return err;
-+}
-+
-+/* open the highest priority file for a given upper file */
-+static int open_highest_file(struct file *file, bool willwrite)
-+{
-+ int bindex, bstart, bend, err = 0;
-+ struct file *lower_file;
-+ struct dentry *lower_dentry;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent = dget_parent(dentry);
-+ struct inode *parent_inode = parent->d_inode;
-+ struct super_block *sb = dentry->d_sb;
-+
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+
-+ lower_dentry = unionfs_lower_dentry(dentry);
-+ if (willwrite && IS_WRITE_FLAG(file->f_flags) && is_robranch(dentry)) {
-+ for (bindex = bstart - 1; bindex >= 0; bindex--) {
-+ err = copyup_file(parent_inode, file, bstart, bindex,
-+ i_size_read(dentry->d_inode));
-+ if (!err)
-+ break;
-+ }
-+ atomic_set(&UNIONFS_F(file)->generation,
-+ atomic_read(&UNIONFS_I(dentry->d_inode)->
-+ generation));
-+ goto out;
-+ }
-+
-+ dget(lower_dentry);
-+ unionfs_mntget(dentry, bstart);
-+ lower_file = dentry_open(lower_dentry,
-+ unionfs_lower_mnt_idx(dentry, bstart),
-+ file->f_flags, current_cred());
-+ if (IS_ERR(lower_file)) {
-+ err = PTR_ERR(lower_file);
-+ goto out;
-+ }
-+ branchget(sb, bstart);
-+ unionfs_set_lower_file(file, lower_file);
-+ /* Fix up the position. */
-+ lower_file->f_pos = file->f_pos;
-+
-+ memcpy(&lower_file->f_ra, &file->f_ra, sizeof(struct file_ra_state));
-+out:
-+ dput(parent);
-+ return err;
-+}
-+
-+/* perform a delayed copyup of a read-write file on a read-only branch */
-+static int do_delayed_copyup(struct file *file, struct dentry *parent)
-+{
-+ int bindex, bstart, bend, err = 0;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct inode *parent_inode = parent->d_inode;
-+
-+ bstart = fbstart(file);
-+ bend = fbend(file);
-+
-+ BUG_ON(!S_ISREG(dentry->d_inode->i_mode));
-+
-+ unionfs_check_file(file);
-+ for (bindex = bstart - 1; bindex >= 0; bindex--) {
-+ if (!d_deleted(dentry))
-+ err = copyup_file(parent_inode, file, bstart,
-+ bindex,
-+ i_size_read(dentry->d_inode));
-+ else
-+ err = copyup_deleted_file(file, dentry, parent,
-+ bstart, bindex);
-+ /* if succeeded, set lower open-file flags and break */
-+ if (!err) {
-+ struct file *lower_file;
-+ lower_file = unionfs_lower_file_idx(file, bindex);
-+ lower_file->f_flags = file->f_flags;
-+ break;
-+ }
-+ }
-+ if (err || (bstart <= fbstart(file)))
-+ goto out;
-+ bend = fbend(file);
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ if (unionfs_lower_file_idx(file, bindex)) {
-+ branchput(dentry->d_sb, bindex);
-+ fput(unionfs_lower_file_idx(file, bindex));
-+ unionfs_set_lower_file_idx(file, bindex, NULL);
-+ }
-+ }
-+ path_put_lowers(dentry, bstart, bend, false);
-+ iput_lowers(dentry->d_inode, bstart, bend, false);
-+ /* for reg file, we only open it "once" */
-+ fbend(file) = fbstart(file);
-+ dbend(dentry) = dbstart(dentry);
-+ ibend(dentry->d_inode) = ibstart(dentry->d_inode);
-+
-+out:
-+ unionfs_check_file(file);
-+ return err;
-+}
-+
-+/*
-+ * Helper function for unionfs_file_revalidate/locked.
-+ * Expects dentry/parent to be locked already, and revalidated.
-+ */
-+static int __unionfs_file_revalidate(struct file *file, struct dentry *dentry,
-+ struct dentry *parent,
-+ struct super_block *sb, int sbgen,
-+ int dgen, bool willwrite)
-+{
-+ int fgen;
-+ int bstart, bend, orig_brid;
-+ int size;
-+ int err = 0;
-+
-+ fgen = atomic_read(&UNIONFS_F(file)->generation);
-+
-+ /*
-+ * There are two cases we are interested in. The first is if the
-+ * generation is lower than the super-block. The second is if
-+ * someone has copied up this file from underneath us, we also need
-+ * to refresh things.
-+ */
-+ if (d_deleted(dentry) ||
-+ (sbgen <= fgen &&
-+ dbstart(dentry) == fbstart(file) &&
-+ unionfs_lower_file(file)))
-+ goto out_may_copyup;
-+
-+ /* save orig branch ID */
-+ orig_brid = UNIONFS_F(file)->saved_branch_ids[fbstart(file)];
-+
-+ /* First we throw out the existing files. */
-+ cleanup_file(file);
-+
-+ /* Now we reopen the file(s) as in unionfs_open. */
-+ bstart = fbstart(file) = dbstart(dentry);
-+ bend = fbend(file) = dbend(dentry);
-+
-+ size = sizeof(struct file *) * sbmax(sb);
-+ UNIONFS_F(file)->lower_files = kzalloc(size, GFP_KERNEL);
-+ if (unlikely(!UNIONFS_F(file)->lower_files)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+ size = sizeof(int) * sbmax(sb);
-+ UNIONFS_F(file)->saved_branch_ids = kzalloc(size, GFP_KERNEL);
-+ if (unlikely(!UNIONFS_F(file)->saved_branch_ids)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ if (S_ISDIR(dentry->d_inode->i_mode)) {
-+ /* We need to open all the files. */
-+ err = open_all_files(file);
-+ if (err)
-+ goto out;
-+ } else {
-+ int new_brid;
-+ /* We only open the highest priority branch. */
-+ err = open_highest_file(file, willwrite);
-+ if (err)
-+ goto out;
-+ new_brid = UNIONFS_F(file)->saved_branch_ids[fbstart(file)];
-+ if (unlikely(new_brid != orig_brid && sbgen > fgen)) {
-+ /*
-+ * If we re-opened the file on a different branch
-+ * than the original one, and this was due to a new
-+ * branch inserted, then update the mnt counts of
-+ * the old and new branches accordingly.
-+ */
-+ unionfs_mntget(dentry, bstart);
-+ unionfs_mntput(sb->s_root,
-+ branch_id_to_idx(sb, orig_brid));
-+ }
-+ /* regular files have only one open lower file */
-+ fbend(file) = fbstart(file);
-+ }
-+ atomic_set(&UNIONFS_F(file)->generation,
-+ atomic_read(&UNIONFS_I(dentry->d_inode)->generation));
-+
-+out_may_copyup:
-+ /* Copyup on the first write to a file on a readonly branch. */
-+ if (willwrite && IS_WRITE_FLAG(file->f_flags) &&
-+ !IS_WRITE_FLAG(unionfs_lower_file(file)->f_flags) &&
-+ is_robranch(dentry)) {
-+ pr_debug("unionfs: do delay copyup of \"%s\"\n",
-+ dentry->d_name.name);
-+ err = do_delayed_copyup(file, parent);
-+ /* regular files have only one open lower file */
-+ if (!err && !S_ISDIR(dentry->d_inode->i_mode))
-+ fbend(file) = fbstart(file);
-+ }
-+
-+out:
-+ if (err) {
-+ kfree(UNIONFS_F(file)->lower_files);
-+ kfree(UNIONFS_F(file)->saved_branch_ids);
-+ }
-+ return err;
-+}
-+
-+/*
-+ * Revalidate the struct file
-+ * @file: file to revalidate
-+ * @parent: parent dentry (locked by caller)
-+ * @willwrite: true if caller may cause changes to the file; false otherwise.
-+ * Caller must lock/unlock dentry's branch configuration.
-+ */
-+int unionfs_file_revalidate(struct file *file, struct dentry *parent,
-+ bool willwrite)
-+{
-+ struct super_block *sb;
-+ struct dentry *dentry;
-+ int sbgen, dgen;
-+ int err = 0;
-+
-+ dentry = file->f_path.dentry;
-+ sb = dentry->d_sb;
-+ verify_locked(dentry);
-+ verify_locked(parent);
-+
-+ /*
-+ * First revalidate the dentry inside struct file,
-+ * but not unhashed dentries.
-+ */
-+ if (!d_deleted(dentry) &&
-+ !__unionfs_d_revalidate(dentry, parent, willwrite)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+
-+ sbgen = atomic_read(&UNIONFS_SB(sb)->generation);
-+ dgen = atomic_read(&UNIONFS_D(dentry)->generation);
-+
-+ if (unlikely(sbgen > dgen)) { /* XXX: should never happen */
-+ pr_debug("unionfs: failed to revalidate dentry (%s)\n",
-+ dentry->d_name.name);
-+ err = -ESTALE;
-+ goto out;
-+ }
-+
-+ err = __unionfs_file_revalidate(file, dentry, parent, sb,
-+ sbgen, dgen, willwrite);
-+out:
-+ return err;
-+}
-+
-+/* unionfs_open helper function: open a directory */
-+static int __open_dir(struct inode *inode, struct file *file)
-+{
-+ struct dentry *lower_dentry;
-+ struct file *lower_file;
-+ int bindex, bstart, bend;
-+ struct vfsmount *mnt;
-+
-+ bstart = fbstart(file) = dbstart(file->f_path.dentry);
-+ bend = fbend(file) = dbend(file->f_path.dentry);
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_dentry =
-+ unionfs_lower_dentry_idx(file->f_path.dentry, bindex);
-+ if (!lower_dentry)
-+ continue;
-+
-+ dget(lower_dentry);
-+ unionfs_mntget(file->f_path.dentry, bindex);
-+ mnt = unionfs_lower_mnt_idx(file->f_path.dentry, bindex);
-+ lower_file = dentry_open(lower_dentry, mnt, file->f_flags,
-+ current_cred());
-+ if (IS_ERR(lower_file))
-+ return PTR_ERR(lower_file);
-+
-+ unionfs_set_lower_file_idx(file, bindex, lower_file);
-+
-+ /*
-+ * The branchget goes after the open, because otherwise
-+ * we would miss the reference on release.
-+ */
-+ branchget(inode->i_sb, bindex);
-+ }
-+
-+ return 0;
-+}
-+
-+/* unionfs_open helper function: open a file */
-+static int __open_file(struct inode *inode, struct file *file,
-+ struct dentry *parent)
-+{
-+ struct dentry *lower_dentry;
-+ struct file *lower_file;
-+ int lower_flags;
-+ int bindex, bstart, bend;
-+
-+ lower_dentry = unionfs_lower_dentry(file->f_path.dentry);
-+ lower_flags = file->f_flags;
-+
-+ bstart = fbstart(file) = dbstart(file->f_path.dentry);
-+ bend = fbend(file) = dbend(file->f_path.dentry);
-+
-+ /*
-+ * check for the permission for lower file. If the error is
-+ * COPYUP_ERR, copyup the file.
-+ */
-+ if (lower_dentry->d_inode && is_robranch(file->f_path.dentry)) {
-+ /*
-+ * if the open will change the file, copy it up otherwise
-+ * defer it.
-+ */
-+ if (lower_flags & O_TRUNC) {
-+ int size = 0;
-+ int err = -EROFS;
-+
-+ /* copyup the file */
-+ for (bindex = bstart - 1; bindex >= 0; bindex--) {
-+ err = copyup_file(parent->d_inode, file,
-+ bstart, bindex, size);
-+ if (!err)
-+ break;
-+ }
-+ return err;
-+ } else {
-+ /*
-+ * turn off writeable flags, to force delayed copyup
-+ * by caller.
-+ */
-+ lower_flags &= ~(OPEN_WRITE_FLAGS);
-+ }
-+ }
-+
-+ dget(lower_dentry);
-+
-+ /*
-+ * dentry_open will decrement mnt refcnt if err.
-+ * otherwise fput() will do an mntput() for us upon file close.
-+ */
-+ unionfs_mntget(file->f_path.dentry, bstart);
-+ lower_file =
-+ dentry_open(lower_dentry,
-+ unionfs_lower_mnt_idx(file->f_path.dentry, bstart),
-+ lower_flags, current_cred());
-+ if (IS_ERR(lower_file))
-+ return PTR_ERR(lower_file);
-+
-+ unionfs_set_lower_file(file, lower_file);
-+ branchget(inode->i_sb, bstart);
-+
-+ return 0;
-+}
-+
-+int unionfs_open(struct inode *inode, struct file *file)
-+{
-+ int err = 0;
-+ struct file *lower_file = NULL;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+ int bindex = 0, bstart = 0, bend = 0;
-+ int size;
-+ int valid = 0;
-+
-+ unionfs_read_lock(inode->i_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ /* don't open unhashed/deleted files */
-+ if (d_deleted(dentry)) {
-+ err = -ENOENT;
-+ goto out_nofree;
-+ }
-+
-+ /* XXX: should I change 'false' below to the 'willwrite' flag? */
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out_nofree;
-+ }
-+
-+ file->private_data =
-+ kzalloc(sizeof(struct unionfs_file_info), GFP_KERNEL);
-+ if (unlikely(!UNIONFS_F(file))) {
-+ err = -ENOMEM;
-+ goto out_nofree;
-+ }
-+ fbstart(file) = -1;
-+ fbend(file) = -1;
-+ atomic_set(&UNIONFS_F(file)->generation,
-+ atomic_read(&UNIONFS_I(inode)->generation));
-+
-+ size = sizeof(struct file *) * sbmax(inode->i_sb);
-+ UNIONFS_F(file)->lower_files = kzalloc(size, GFP_KERNEL);
-+ if (unlikely(!UNIONFS_F(file)->lower_files)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+ size = sizeof(int) * sbmax(inode->i_sb);
-+ UNIONFS_F(file)->saved_branch_ids = kzalloc(size, GFP_KERNEL);
-+ if (unlikely(!UNIONFS_F(file)->saved_branch_ids)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ bstart = fbstart(file) = dbstart(dentry);
-+ bend = fbend(file) = dbend(dentry);
-+
-+ /*
-+ * open all directories and make the unionfs file struct point to
-+ * these lower file structs
-+ */
-+ if (S_ISDIR(inode->i_mode))
-+ err = __open_dir(inode, file); /* open a dir */
-+ else
-+ err = __open_file(inode, file, parent); /* open a file */
-+
-+ /* freeing the allocated resources, and fput the opened files */
-+ if (err) {
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_file = unionfs_lower_file_idx(file, bindex);
-+ if (!lower_file)
-+ continue;
-+
-+ branchput(dentry->d_sb, bindex);
-+ /* fput calls dput for lower_dentry */
-+ fput(lower_file);
-+ }
-+ }
-+
-+out:
-+ if (err) {
-+ kfree(UNIONFS_F(file)->lower_files);
-+ kfree(UNIONFS_F(file)->saved_branch_ids);
-+ kfree(UNIONFS_F(file));
-+ }
-+out_nofree:
-+ if (!err) {
-+ unionfs_postcopyup_setmnt(dentry);
-+ unionfs_copy_attr_times(inode);
-+ unionfs_check_file(file);
-+ unionfs_check_inode(inode);
-+ }
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(inode->i_sb);
-+ return err;
-+}
-+
-+/*
-+ * release all lower object references & free the file info structure
-+ *
-+ * No need to grab sb info's rwsem.
-+ */
-+int unionfs_file_release(struct inode *inode, struct file *file)
-+{
-+ struct file *lower_file = NULL;
-+ struct unionfs_file_info *fileinfo;
-+ struct unionfs_inode_info *inodeinfo;
-+ struct super_block *sb = inode->i_sb;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+ int bindex, bstart, bend;
-+ int fgen, err = 0;
-+
-+ /*
-+ * Since mm/memory.c:might_fault() (under PROVE_LOCKING) was
-+ * modified in 2.6.29-rc1 to call might_lock_read on mmap_sem, this
-+ * has been causing false positives in file system stacking layers.
-+ * In particular, our ->mmap is called after sys_mmap2 already holds
-+ * mmap_sem, then we lock our own mutexes; but earlier, it's
-+ * possible for lockdep to have locked our mutexes first, and then
-+ * we call a lower ->readdir which could call might_fault. The
-+ * different ordering of the locks is what lockdep complains about
-+ * -- unnecessarily. Therefore, we have no choice but to tell
-+ * lockdep to temporarily turn off lockdep here. Note: the comments
-+ * inside might_sleep also suggest that it would have been
-+ * nicer to only annotate paths that needs that might_lock_read.
-+ */
-+ lockdep_off();
-+ unionfs_read_lock(sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ /*
-+ * We try to revalidate, but the VFS ignores return return values
-+ * from file->release, so we must always try to succeed here,
-+ * including to do the kfree and dput below. So if revalidation
-+ * failed, all we can do is print some message and keep going.
-+ */
-+ err = unionfs_file_revalidate(file, parent,
-+ UNIONFS_F(file)->wrote_to_file);
-+ if (!err)
-+ unionfs_check_file(file);
-+ fileinfo = UNIONFS_F(file);
-+ BUG_ON(file->f_path.dentry->d_inode != inode);
-+ inodeinfo = UNIONFS_I(inode);
-+
-+ /* fput all the lower files */
-+ fgen = atomic_read(&fileinfo->generation);
-+ bstart = fbstart(file);
-+ bend = fbend(file);
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_file = unionfs_lower_file_idx(file, bindex);
-+
-+ if (lower_file) {
-+ unionfs_set_lower_file_idx(file, bindex, NULL);
-+ fput(lower_file);
-+ branchput(sb, bindex);
-+ }
-+
-+ /* if there are no more refs to the dentry, dput it */
-+ if (d_deleted(dentry)) {
-+ dput(unionfs_lower_dentry_idx(dentry, bindex));
-+ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
-+ }
-+ }
-+
-+ kfree(fileinfo->lower_files);
-+ kfree(fileinfo->saved_branch_ids);
-+
-+ if (fileinfo->rdstate) {
-+ fileinfo->rdstate->access = jiffies;
-+ spin_lock(&inodeinfo->rdlock);
-+ inodeinfo->rdcount++;
-+ list_add_tail(&fileinfo->rdstate->cache,
-+ &inodeinfo->readdircache);
-+ mark_inode_dirty(inode);
-+ spin_unlock(&inodeinfo->rdlock);
-+ fileinfo->rdstate = NULL;
-+ }
-+ kfree(fileinfo);
-+
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(sb);
-+ lockdep_on();
-+ return err;
-+}
-+
-+/* pass the ioctl to the lower fs */
-+static long do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ struct file *lower_file;
-+ int err;
-+
-+ lower_file = unionfs_lower_file(file);
-+
-+ err = -ENOTTY;
-+ if (!lower_file || !lower_file->f_op)
-+ goto out;
-+ if (lower_file->f_op->unlocked_ioctl) {
-+ err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
-+#ifdef CONFIG_COMPAT
-+ } else if (lower_file->f_op->ioctl) {
-+ err = lower_file->f_op->compat_ioctl(
-+ lower_file->f_path.dentry->d_inode,
-+ lower_file, cmd, arg);
-+#endif
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/*
-+ * return to user-space the branch indices containing the file in question
-+ *
-+ * We use fd_set and therefore we are limited to the number of the branches
-+ * to FD_SETSIZE, which is currently 1024 - plenty for most people
-+ */
-+static int unionfs_ioctl_queryfile(struct file *file, struct dentry *parent,
-+ unsigned int cmd, unsigned long arg)
-+{
-+ int err = 0;
-+ fd_set branchlist;
-+ int bstart = 0, bend = 0, bindex = 0;
-+ int orig_bstart, orig_bend;
-+ struct dentry *dentry, *lower_dentry;
-+ struct vfsmount *mnt;
-+
-+ dentry = file->f_path.dentry;
-+ orig_bstart = dbstart(dentry);
-+ orig_bend = dbend(dentry);
-+ err = unionfs_partial_lookup(dentry, parent);
-+ if (err)
-+ goto out;
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+
-+ FD_ZERO(&branchlist);
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry)
-+ continue;
-+ if (likely(lower_dentry->d_inode))
-+ FD_SET(bindex, &branchlist);
-+ /* purge any lower objects after partial_lookup */
-+ if (bindex < orig_bstart || bindex > orig_bend) {
-+ dput(lower_dentry);
-+ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
-+ iput(unionfs_lower_inode_idx(dentry->d_inode, bindex));
-+ unionfs_set_lower_inode_idx(dentry->d_inode, bindex,
-+ NULL);
-+ mnt = unionfs_lower_mnt_idx(dentry, bindex);
-+ if (!mnt)
-+ continue;
-+ unionfs_mntput(dentry, bindex);
-+ unionfs_set_lower_mnt_idx(dentry, bindex, NULL);
-+ }
-+ }
-+ /* restore original dentry's offsets */
-+ dbstart(dentry) = orig_bstart;
-+ dbend(dentry) = orig_bend;
-+ ibstart(dentry->d_inode) = orig_bstart;
-+ ibend(dentry->d_inode) = orig_bend;
-+
-+ err = copy_to_user((void __user *)arg, &branchlist, sizeof(fd_set));
-+ if (unlikely(err))
-+ err = -EFAULT;
-+
-+out:
-+ return err < 0 ? err : bend;
-+}
-+
-+long unionfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ long err;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ err = unionfs_file_revalidate(file, parent, true);
-+ if (unlikely(err))
-+ goto out;
-+
-+ /* check if asked for local commands */
-+ switch (cmd) {
-+ case UNIONFS_IOCTL_INCGEN:
-+ /* Increment the superblock generation count */
-+ pr_info("unionfs: incgen ioctl deprecated; "
-+ "use \"-o remount,incgen\"\n");
-+ err = -ENOSYS;
-+ break;
-+
-+ case UNIONFS_IOCTL_QUERYFILE:
-+ /* Return list of branches containing the given file */
-+ err = unionfs_ioctl_queryfile(file, parent, cmd, arg);
-+ break;
-+
-+ default:
-+ /* pass the ioctl down */
-+ err = do_ioctl(file, cmd, arg);
-+ break;
-+ }
-+
-+out:
-+ unionfs_check_file(file);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+int unionfs_flush(struct file *file, fl_owner_t id)
-+{
-+ int err = 0;
-+ struct file *lower_file = NULL;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+ int bindex, bstart, bend;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ err = unionfs_file_revalidate(file, parent,
-+ UNIONFS_F(file)->wrote_to_file);
-+ if (unlikely(err))
-+ goto out;
-+ unionfs_check_file(file);
-+
-+ bstart = fbstart(file);
-+ bend = fbend(file);
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_file = unionfs_lower_file_idx(file, bindex);
-+
-+ if (lower_file && lower_file->f_op &&
-+ lower_file->f_op->flush) {
-+ err = lower_file->f_op->flush(lower_file, id);
-+ if (err)
-+ goto out;
-+ }
-+
-+ }
-+
-+out:
-+ if (!err)
-+ unionfs_check_file(file);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-diff --git a/fs/unionfs/copyup.c b/fs/unionfs/copyup.c
-new file mode 100644
-index 0000000..bba3a75
---- /dev/null
-+++ b/fs/unionfs/copyup.c
-@@ -0,0 +1,896 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * For detailed explanation of copyup see:
-+ * Documentation/filesystems/unionfs/concepts.txt
-+ */
-+
-+#ifdef CONFIG_UNION_FS_XATTR
-+/* copyup all extended attrs for a given dentry */
-+static int copyup_xattrs(struct dentry *old_lower_dentry,
-+ struct dentry *new_lower_dentry)
-+{
-+ int err = 0;
-+ ssize_t list_size = -1;
-+ char *name_list = NULL;
-+ char *attr_value = NULL;
-+ char *name_list_buf = NULL;
-+
-+ /* query the actual size of the xattr list */
-+ list_size = vfs_listxattr(old_lower_dentry, NULL, 0);
-+ if (list_size <= 0) {
-+ err = list_size;
-+ goto out;
-+ }
-+
-+ /* allocate space for the actual list */
-+ name_list = unionfs_xattr_alloc(list_size + 1, XATTR_LIST_MAX);
-+ if (unlikely(!name_list || IS_ERR(name_list))) {
-+ err = PTR_ERR(name_list);
-+ goto out;
-+ }
-+
-+ name_list_buf = name_list; /* save for kfree at end */
-+
-+ /* now get the actual xattr list of the source file */
-+ list_size = vfs_listxattr(old_lower_dentry, name_list, list_size);
-+ if (list_size <= 0) {
-+ err = list_size;
-+ goto out;
-+ }
-+
-+ /* allocate space to hold each xattr's value */
-+ attr_value = unionfs_xattr_alloc(XATTR_SIZE_MAX, XATTR_SIZE_MAX);
-+ if (unlikely(!attr_value || IS_ERR(attr_value))) {
-+ err = PTR_ERR(name_list);
-+ goto out;
-+ }
-+
-+ /* in a loop, get and set each xattr from src to dst file */
-+ while (*name_list) {
-+ ssize_t size;
-+
-+ /* Lock here since vfs_getxattr doesn't lock for us */
-+ mutex_lock(&old_lower_dentry->d_inode->i_mutex);
-+ size = vfs_getxattr(old_lower_dentry, name_list,
-+ attr_value, XATTR_SIZE_MAX);
-+ mutex_unlock(&old_lower_dentry->d_inode->i_mutex);
-+ if (size < 0) {
-+ err = size;
-+ goto out;
-+ }
-+ if (size > XATTR_SIZE_MAX) {
-+ err = -E2BIG;
-+ goto out;
-+ }
-+ /* Don't lock here since vfs_setxattr does it for us. */
-+ err = vfs_setxattr(new_lower_dentry, name_list, attr_value,
-+ size, 0);
-+ /*
-+ * Selinux depends on "security.*" xattrs, so to maintain
-+ * the security of copied-up files, if Selinux is active,
-+ * then we must copy these xattrs as well. So we need to
-+ * temporarily get FOWNER privileges.
-+ * XXX: move entire copyup code to SIOQ.
-+ */
-+ if (err == -EPERM && !capable(CAP_FOWNER)) {
-+ const struct cred *old_creds;
-+ struct cred *new_creds;
-+
-+ new_creds = prepare_creds();
-+ if (unlikely(!new_creds)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+ cap_raise(new_creds->cap_effective, CAP_FOWNER);
-+ old_creds = override_creds(new_creds);
-+ err = vfs_setxattr(new_lower_dentry, name_list,
-+ attr_value, size, 0);
-+ revert_creds(old_creds);
-+ }
-+ if (err < 0)
-+ goto out;
-+ name_list += strlen(name_list) + 1;
-+ }
-+out:
-+ unionfs_xattr_kfree(name_list_buf);
-+ unionfs_xattr_kfree(attr_value);
-+ /* Ignore if xattr isn't supported */
-+ if (err == -ENOTSUPP || err == -EOPNOTSUPP)
-+ err = 0;
-+ return err;
-+}
-+#endif /* CONFIG_UNION_FS_XATTR */
-+
-+/*
-+ * Determine the mode based on the copyup flags, and the existing dentry.
-+ *
-+ * Handle file systems which may not support certain options. For example
-+ * jffs2 doesn't allow one to chmod a symlink. So we ignore such harmless
-+ * errors, rather than propagating them up, which results in copyup errors
-+ * and errors returned back to users.
-+ */
-+static int copyup_permissions(struct super_block *sb,
-+ struct dentry *old_lower_dentry,
-+ struct dentry *new_lower_dentry)
-+{
-+ struct inode *i = old_lower_dentry->d_inode;
-+ struct iattr newattrs;
-+ int err;
-+
-+ newattrs.ia_atime = i->i_atime;
-+ newattrs.ia_mtime = i->i_mtime;
-+ newattrs.ia_ctime = i->i_ctime;
-+ newattrs.ia_gid = i->i_gid;
-+ newattrs.ia_uid = i->i_uid;
-+ newattrs.ia_valid = ATTR_CTIME | ATTR_ATIME | ATTR_MTIME |
-+ ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_FORCE |
-+ ATTR_GID | ATTR_UID;
-+ mutex_lock(&new_lower_dentry->d_inode->i_mutex);
-+ err = notify_change(new_lower_dentry, &newattrs);
-+ if (err)
-+ goto out;
-+
-+ /* now try to change the mode and ignore EOPNOTSUPP on symlinks */
-+ newattrs.ia_mode = i->i_mode;
-+ newattrs.ia_valid = ATTR_MODE | ATTR_FORCE;
-+ err = notify_change(new_lower_dentry, &newattrs);
-+ if (err == -EOPNOTSUPP &&
-+ S_ISLNK(new_lower_dentry->d_inode->i_mode)) {
-+ printk(KERN_WARNING
-+ "unionfs: changing \"%s\" symlink mode unsupported\n",
-+ new_lower_dentry->d_name.name);
-+ err = 0;
-+ }
-+
-+out:
-+ mutex_unlock(&new_lower_dentry->d_inode->i_mutex);
-+ return err;
-+}
-+
-+/*
-+ * create the new device/file/directory - use copyup_permission to copyup
-+ * times, and mode
-+ *
-+ * if the object being copied up is a regular file, the file is only created,
-+ * the contents have to be copied up separately
-+ */
-+static int __copyup_ndentry(struct dentry *old_lower_dentry,
-+ struct dentry *new_lower_dentry,
-+ struct dentry *new_lower_parent_dentry,
-+ char *symbuf)
-+{
-+ int err = 0;
-+ umode_t old_mode = old_lower_dentry->d_inode->i_mode;
-+ struct sioq_args args;
-+
-+ if (S_ISDIR(old_mode)) {
-+ args.mkdir.parent = new_lower_parent_dentry->d_inode;
-+ args.mkdir.dentry = new_lower_dentry;
-+ args.mkdir.mode = old_mode;
-+
-+ run_sioq(__unionfs_mkdir, &args);
-+ err = args.err;
-+ } else if (S_ISLNK(old_mode)) {
-+ args.symlink.parent = new_lower_parent_dentry->d_inode;
-+ args.symlink.dentry = new_lower_dentry;
-+ args.symlink.symbuf = symbuf;
-+
-+ run_sioq(__unionfs_symlink, &args);
-+ err = args.err;
-+ } else if (S_ISBLK(old_mode) || S_ISCHR(old_mode) ||
-+ S_ISFIFO(old_mode) || S_ISSOCK(old_mode)) {
-+ args.mknod.parent = new_lower_parent_dentry->d_inode;
-+ args.mknod.dentry = new_lower_dentry;
-+ args.mknod.mode = old_mode;
-+ args.mknod.dev = old_lower_dentry->d_inode->i_rdev;
-+
-+ run_sioq(__unionfs_mknod, &args);
-+ err = args.err;
-+ } else if (S_ISREG(old_mode)) {
-+ struct nameidata nd;
-+ err = init_lower_nd(&nd, LOOKUP_CREATE);
-+ if (unlikely(err < 0))
-+ goto out;
-+ args.create.nd = &nd;
-+ args.create.parent = new_lower_parent_dentry->d_inode;
-+ args.create.dentry = new_lower_dentry;
-+ args.create.mode = old_mode;
-+
-+ run_sioq(__unionfs_create, &args);
-+ err = args.err;
-+ release_lower_nd(&nd, err);
-+ } else {
-+ printk(KERN_CRIT "unionfs: unknown inode type %d\n",
-+ old_mode);
-+ BUG();
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+static int __copyup_reg_data(struct dentry *dentry,
-+ struct dentry *new_lower_dentry, int new_bindex,
-+ struct dentry *old_lower_dentry, int old_bindex,
-+ struct file **copyup_file, loff_t len)
-+{
-+ struct super_block *sb = dentry->d_sb;
-+ struct file *input_file;
-+ struct file *output_file;
-+ struct vfsmount *output_mnt;
-+ mm_segment_t old_fs;
-+ char *buf = NULL;
-+ ssize_t read_bytes, write_bytes;
-+ loff_t size;
-+ int err = 0;
-+
-+ /* open old file */
-+ unionfs_mntget(dentry, old_bindex);
-+ branchget(sb, old_bindex);
-+ /* dentry_open calls dput and mntput if it returns an error */
-+ input_file = dentry_open(old_lower_dentry,
-+ unionfs_lower_mnt_idx(dentry, old_bindex),
-+ O_RDONLY | O_LARGEFILE, current_cred());
-+ if (IS_ERR(input_file)) {
-+ dput(old_lower_dentry);
-+ err = PTR_ERR(input_file);
-+ goto out;
-+ }
-+ if (unlikely(!input_file->f_op || !input_file->f_op->read)) {
-+ err = -EINVAL;
-+ goto out_close_in;
-+ }
-+
-+ /* open new file */
-+ dget(new_lower_dentry);
-+ output_mnt = unionfs_mntget(sb->s_root, new_bindex);
-+ branchget(sb, new_bindex);
-+ output_file = dentry_open(new_lower_dentry, output_mnt,
-+ O_RDWR | O_LARGEFILE, current_cred());
-+ if (IS_ERR(output_file)) {
-+ err = PTR_ERR(output_file);
-+ goto out_close_in2;
-+ }
-+ if (unlikely(!output_file->f_op || !output_file->f_op->write)) {
-+ err = -EINVAL;
-+ goto out_close_out;
-+ }
-+
-+ /* allocating a buffer */
-+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-+ if (unlikely(!buf)) {
-+ err = -ENOMEM;
-+ goto out_close_out;
-+ }
-+
-+ input_file->f_pos = 0;
-+ output_file->f_pos = 0;
-+
-+ old_fs = get_fs();
-+ set_fs(KERNEL_DS);
-+
-+ size = len;
-+ err = 0;
-+ do {
-+ if (len >= PAGE_SIZE)
-+ size = PAGE_SIZE;
-+ else if ((len < PAGE_SIZE) && (len > 0))
-+ size = len;
-+
-+ len -= PAGE_SIZE;
-+
-+ read_bytes =
-+ input_file->f_op->read(input_file,
-+ (char __user *)buf, size,
-+ &input_file->f_pos);
-+ if (read_bytes <= 0) {
-+ err = read_bytes;
-+ break;
-+ }
-+
-+ /* see Documentation/filesystems/unionfs/issues.txt */
-+ lockdep_off();
-+ write_bytes =
-+ output_file->f_op->write(output_file,
-+ (char __user *)buf,
-+ read_bytes,
-+ &output_file->f_pos);
-+ lockdep_on();
-+ if ((write_bytes < 0) || (write_bytes < read_bytes)) {
-+ err = write_bytes;
-+ break;
-+ }
-+ } while ((read_bytes > 0) && (len > 0));
-+
-+ set_fs(old_fs);
-+
-+ kfree(buf);
-+
-+ if (!err)
-+ err = output_file->f_op->fsync(output_file, 0);
-+
-+ if (err)
-+ goto out_close_out;
-+
-+ if (copyup_file) {
-+ *copyup_file = output_file;
-+ goto out_close_in;
-+ }
-+
-+out_close_out:
-+ fput(output_file);
-+
-+out_close_in2:
-+ branchput(sb, new_bindex);
-+
-+out_close_in:
-+ fput(input_file);
-+
-+out:
-+ branchput(sb, old_bindex);
-+
-+ return err;
-+}
-+
-+/*
-+ * dput the lower references for old and new dentry & clear a lower dentry
-+ * pointer
-+ */
-+static void __clear(struct dentry *dentry, struct dentry *old_lower_dentry,
-+ int old_bstart, int old_bend,
-+ struct dentry *new_lower_dentry, int new_bindex)
-+{
-+ /* get rid of the lower dentry and all its traces */
-+ unionfs_set_lower_dentry_idx(dentry, new_bindex, NULL);
-+ dbstart(dentry) = old_bstart;
-+ dbend(dentry) = old_bend;
-+
-+ dput(new_lower_dentry);
-+ dput(old_lower_dentry);
-+}
-+
-+/*
-+ * Copy up a dentry to a file of specified name.
-+ *
-+ * @dir: used to pull the ->i_sb to access other branches
-+ * @dentry: the non-negative dentry whose lower_inode we should copy
-+ * @bstart: the branch of the lower_inode to copy from
-+ * @new_bindex: the branch to create the new file in
-+ * @name: the name of the file to create
-+ * @namelen: length of @name
-+ * @copyup_file: the "struct file" to return (optional)
-+ * @len: how many bytes to copy-up?
-+ */
-+int copyup_dentry(struct inode *dir, struct dentry *dentry, int bstart,
-+ int new_bindex, const char *name, int namelen,
-+ struct file **copyup_file, loff_t len)
-+{
-+ struct dentry *new_lower_dentry;
-+ struct dentry *old_lower_dentry = NULL;
-+ struct super_block *sb;
-+ int err = 0;
-+ int old_bindex;
-+ int old_bstart;
-+ int old_bend;
-+ struct dentry *new_lower_parent_dentry = NULL;
-+ mm_segment_t oldfs;
-+ char *symbuf = NULL;
-+
-+ verify_locked(dentry);
-+
-+ old_bindex = bstart;
-+ old_bstart = dbstart(dentry);
-+ old_bend = dbend(dentry);
-+
-+ BUG_ON(new_bindex < 0);
-+ BUG_ON(new_bindex >= old_bindex);
-+
-+ sb = dir->i_sb;
-+
-+ err = is_robranch_super(sb, new_bindex);
-+ if (err)
-+ goto out;
-+
-+ /* Create the directory structure above this dentry. */
-+ new_lower_dentry = create_parents(dir, dentry, name, new_bindex);
-+ if (IS_ERR(new_lower_dentry)) {
-+ err = PTR_ERR(new_lower_dentry);
-+ goto out;
-+ }
-+
-+ old_lower_dentry = unionfs_lower_dentry_idx(dentry, old_bindex);
-+ /* we conditionally dput this old_lower_dentry at end of function */
-+ dget(old_lower_dentry);
-+
-+ /* For symlinks, we must read the link before we lock the directory. */
-+ if (S_ISLNK(old_lower_dentry->d_inode->i_mode)) {
-+
-+ symbuf = kmalloc(PATH_MAX, GFP_KERNEL);
-+ if (unlikely(!symbuf)) {
-+ __clear(dentry, old_lower_dentry,
-+ old_bstart, old_bend,
-+ new_lower_dentry, new_bindex);
-+ err = -ENOMEM;
-+ goto out_free;
-+ }
-+
-+ oldfs = get_fs();
-+ set_fs(KERNEL_DS);
-+ err = old_lower_dentry->d_inode->i_op->readlink(
-+ old_lower_dentry,
-+ (char __user *)symbuf,
-+ PATH_MAX);
-+ set_fs(oldfs);
-+ if (err < 0) {
-+ __clear(dentry, old_lower_dentry,
-+ old_bstart, old_bend,
-+ new_lower_dentry, new_bindex);
-+ goto out_free;
-+ }
-+ symbuf[err] = '\0';
-+ }
-+
-+ /* Now we lock the parent, and create the object in the new branch. */
-+ new_lower_parent_dentry = lock_parent(new_lower_dentry);
-+
-+ /* create the new inode */
-+ err = __copyup_ndentry(old_lower_dentry, new_lower_dentry,
-+ new_lower_parent_dentry, symbuf);
-+
-+ if (err) {
-+ __clear(dentry, old_lower_dentry,
-+ old_bstart, old_bend,
-+ new_lower_dentry, new_bindex);
-+ goto out_unlock;
-+ }
-+
-+ /* We actually copyup the file here. */
-+ if (S_ISREG(old_lower_dentry->d_inode->i_mode))
-+ err = __copyup_reg_data(dentry, new_lower_dentry, new_bindex,
-+ old_lower_dentry, old_bindex,
-+ copyup_file, len);
-+ if (err)
-+ goto out_unlink;
-+
-+ /* Set permissions. */
-+ err = copyup_permissions(sb, old_lower_dentry, new_lower_dentry);
-+ if (err)
-+ goto out_unlink;
-+
-+#ifdef CONFIG_UNION_FS_XATTR
-+ /* Selinux uses extended attributes for permissions. */
-+ err = copyup_xattrs(old_lower_dentry, new_lower_dentry);
-+ if (err)
-+ goto out_unlink;
-+#endif /* CONFIG_UNION_FS_XATTR */
-+
-+ /* do not allow files getting deleted to be re-interposed */
-+ if (!d_deleted(dentry))
-+ unionfs_reinterpose(dentry);
-+
-+ goto out_unlock;
-+
-+out_unlink:
-+ /*
-+ * copyup failed, because we possibly ran out of space or
-+ * quota, or something else happened so let's unlink; we don't
-+ * really care about the return value of vfs_unlink
-+ */
-+ vfs_unlink(new_lower_parent_dentry->d_inode, new_lower_dentry);
-+
-+ if (copyup_file) {
-+ /* need to close the file */
-+
-+ fput(*copyup_file);
-+ branchput(sb, new_bindex);
-+ }
-+
-+ /*
-+ * TODO: should we reset the error to something like -EIO?
-+ *
-+ * If we don't reset, the user may get some nonsensical errors, but
-+ * on the other hand, if we reset to EIO, we guarantee that the user
-+ * will get a "confusing" error message.
-+ */
-+
-+out_unlock:
-+ unlock_dir(new_lower_parent_dentry);
-+
-+out_free:
-+ /*
-+ * If old_lower_dentry was not a file, then we need to dput it. If
-+ * it was a file, then it was already dput indirectly by other
-+ * functions we call above which operate on regular files.
-+ */
-+ if (old_lower_dentry && old_lower_dentry->d_inode &&
-+ !S_ISREG(old_lower_dentry->d_inode->i_mode))
-+ dput(old_lower_dentry);
-+ kfree(symbuf);
-+
-+ if (err) {
-+ /*
-+ * if directory creation succeeded, but inode copyup failed,
-+ * then purge new dentries.
-+ */
-+ if (dbstart(dentry) < old_bstart &&
-+ ibstart(dentry->d_inode) > dbstart(dentry))
-+ __clear(dentry, NULL, old_bstart, old_bend,
-+ unionfs_lower_dentry(dentry), dbstart(dentry));
-+ goto out;
-+ }
-+ if (!S_ISDIR(dentry->d_inode->i_mode)) {
-+ unionfs_postcopyup_release(dentry);
-+ if (!unionfs_lower_inode(dentry->d_inode)) {
-+ /*
-+ * If we got here, then we copied up to an
-+ * unlinked-open file, whose name is .unionfsXXXXX.
-+ */
-+ struct inode *inode = new_lower_dentry->d_inode;
-+ atomic_inc(&inode->i_count);
-+ unionfs_set_lower_inode_idx(dentry->d_inode,
-+ ibstart(dentry->d_inode),
-+ inode);
-+ }
-+ }
-+ unionfs_postcopyup_setmnt(dentry);
-+ /* sync inode times from copied-up inode to our inode */
-+ unionfs_copy_attr_times(dentry->d_inode);
-+ unionfs_check_inode(dir);
-+ unionfs_check_dentry(dentry);
-+out:
-+ return err;
-+}
-+
-+/*
-+ * This function creates a copy of a file represented by 'file' which
-+ * currently resides in branch 'bstart' to branch 'new_bindex.' The copy
-+ * will be named "name".
-+ */
-+int copyup_named_file(struct inode *dir, struct file *file, char *name,
-+ int bstart, int new_bindex, loff_t len)
-+{
-+ int err = 0;
-+ struct file *output_file = NULL;
-+
-+ err = copyup_dentry(dir, file->f_path.dentry, bstart, new_bindex,
-+ name, strlen(name), &output_file, len);
-+ if (!err) {
-+ fbstart(file) = new_bindex;
-+ unionfs_set_lower_file_idx(file, new_bindex, output_file);
-+ }
-+
-+ return err;
-+}
-+
-+/*
-+ * This function creates a copy of a file represented by 'file' which
-+ * currently resides in branch 'bstart' to branch 'new_bindex'.
-+ */
-+int copyup_file(struct inode *dir, struct file *file, int bstart,
-+ int new_bindex, loff_t len)
-+{
-+ int err = 0;
-+ struct file *output_file = NULL;
-+ struct dentry *dentry = file->f_path.dentry;
-+
-+ err = copyup_dentry(dir, dentry, bstart, new_bindex,
-+ dentry->d_name.name, dentry->d_name.len,
-+ &output_file, len);
-+ if (!err) {
-+ fbstart(file) = new_bindex;
-+ unionfs_set_lower_file_idx(file, new_bindex, output_file);
-+ }
-+
-+ return err;
-+}
-+
-+/* purge a dentry's lower-branch states (dput/mntput, etc.) */
-+static void __cleanup_dentry(struct dentry *dentry, int bindex,
-+ int old_bstart, int old_bend)
-+{
-+ int loop_start;
-+ int loop_end;
-+ int new_bstart = -1;
-+ int new_bend = -1;
-+ int i;
-+
-+ loop_start = min(old_bstart, bindex);
-+ loop_end = max(old_bend, bindex);
-+
-+ /*
-+ * This loop sets the bstart and bend for the new dentry by
-+ * traversing from left to right. It also dputs all negative
-+ * dentries except bindex
-+ */
-+ for (i = loop_start; i <= loop_end; i++) {
-+ if (!unionfs_lower_dentry_idx(dentry, i))
-+ continue;
-+
-+ if (i == bindex) {
-+ new_bend = i;
-+ if (new_bstart < 0)
-+ new_bstart = i;
-+ continue;
-+ }
-+
-+ if (!unionfs_lower_dentry_idx(dentry, i)->d_inode) {
-+ dput(unionfs_lower_dentry_idx(dentry, i));
-+ unionfs_set_lower_dentry_idx(dentry, i, NULL);
-+
-+ unionfs_mntput(dentry, i);
-+ unionfs_set_lower_mnt_idx(dentry, i, NULL);
-+ } else {
-+ if (new_bstart < 0)
-+ new_bstart = i;
-+ new_bend = i;
-+ }
-+ }
-+
-+ if (new_bstart < 0)
-+ new_bstart = bindex;
-+ if (new_bend < 0)
-+ new_bend = bindex;
-+ dbstart(dentry) = new_bstart;
-+ dbend(dentry) = new_bend;
-+
-+}
-+
-+/* set lower inode ptr and update bstart & bend if necessary */
-+static void __set_inode(struct dentry *upper, struct dentry *lower,
-+ int bindex)
-+{
-+ unionfs_set_lower_inode_idx(upper->d_inode, bindex,
-+ igrab(lower->d_inode));
-+ if (likely(ibstart(upper->d_inode) > bindex))
-+ ibstart(upper->d_inode) = bindex;
-+ if (likely(ibend(upper->d_inode) < bindex))
-+ ibend(upper->d_inode) = bindex;
-+
-+}
-+
-+/* set lower dentry ptr and update bstart & bend if necessary */
-+static void __set_dentry(struct dentry *upper, struct dentry *lower,
-+ int bindex)
-+{
-+ unionfs_set_lower_dentry_idx(upper, bindex, lower);
-+ if (likely(dbstart(upper) > bindex))
-+ dbstart(upper) = bindex;
-+ if (likely(dbend(upper) < bindex))
-+ dbend(upper) = bindex;
-+}
-+
-+/*
-+ * This function replicates the directory structure up-to given dentry
-+ * in the bindex branch.
-+ */
-+struct dentry *create_parents(struct inode *dir, struct dentry *dentry,
-+ const char *name, int bindex)
-+{
-+ int err;
-+ struct dentry *child_dentry;
-+ struct dentry *parent_dentry;
-+ struct dentry *lower_parent_dentry = NULL;
-+ struct dentry *lower_dentry = NULL;
-+ const char *childname;
-+ unsigned int childnamelen;
-+ int nr_dentry;
-+ int count = 0;
-+ int old_bstart;
-+ int old_bend;
-+ struct dentry **path = NULL;
-+ struct super_block *sb;
-+
-+ verify_locked(dentry);
-+
-+ err = is_robranch_super(dir->i_sb, bindex);
-+ if (err) {
-+ lower_dentry = ERR_PTR(err);
-+ goto out;
-+ }
-+
-+ old_bstart = dbstart(dentry);
-+ old_bend = dbend(dentry);
-+
-+ lower_dentry = ERR_PTR(-ENOMEM);
-+
-+ /* There is no sense allocating any less than the minimum. */
-+ nr_dentry = 1;
-+ path = kmalloc(nr_dentry * sizeof(struct dentry *), GFP_KERNEL);
-+ if (unlikely(!path))
-+ goto out;
-+
-+ /* assume the negative dentry of unionfs as the parent dentry */
-+ parent_dentry = dentry;
-+
-+ /*
-+ * This loop finds the first parent that exists in the given branch.
-+ * We start building the directory structure from there. At the end
-+ * of the loop, the following should hold:
-+ * - child_dentry is the first nonexistent child
-+ * - parent_dentry is the first existent parent
-+ * - path[0] is the = deepest child
-+ * - path[count] is the first child to create
-+ */
-+ do {
-+ child_dentry = parent_dentry;
-+
-+ /* find the parent directory dentry in unionfs */
-+ parent_dentry = dget_parent(child_dentry);
-+
-+ /* find out the lower_parent_dentry in the given branch */
-+ lower_parent_dentry =
-+ unionfs_lower_dentry_idx(parent_dentry, bindex);
-+
-+ /* grow path table */
-+ if (count == nr_dentry) {
-+ void *p;
-+
-+ nr_dentry *= 2;
-+ p = krealloc(path, nr_dentry * sizeof(struct dentry *),
-+ GFP_KERNEL);
-+ if (unlikely(!p)) {
-+ lower_dentry = ERR_PTR(-ENOMEM);
-+ goto out;
-+ }
-+ path = p;
-+ }
-+
-+ /* store the child dentry */
-+ path[count++] = child_dentry;
-+ } while (!lower_parent_dentry);
-+ count--;
-+
-+ sb = dentry->d_sb;
-+
-+ /*
-+ * This code goes between the begin/end labels and basically
-+ * emulates a while(child_dentry != dentry), only cleaner and
-+ * shorter than what would be a much longer while loop.
-+ */
-+begin:
-+ /* get lower parent dir in the current branch */
-+ lower_parent_dentry = unionfs_lower_dentry_idx(parent_dentry, bindex);
-+ dput(parent_dentry);
-+
-+ /* init the values to lookup */
-+ childname = child_dentry->d_name.name;
-+ childnamelen = child_dentry->d_name.len;
-+
-+ if (child_dentry != dentry) {
-+ /* lookup child in the underlying file system */
-+ lower_dentry = lookup_lck_len(childname, lower_parent_dentry,
-+ childnamelen);
-+ if (IS_ERR(lower_dentry))
-+ goto out;
-+ } else {
-+ /*
-+ * Is the name a whiteout of the child name ? lookup the
-+ * whiteout child in the underlying file system
-+ */
-+ lower_dentry = lookup_lck_len(name, lower_parent_dentry,
-+ strlen(name));
-+ if (IS_ERR(lower_dentry))
-+ goto out;
-+
-+ /* Replace the current dentry (if any) with the new one */
-+ dput(unionfs_lower_dentry_idx(dentry, bindex));
-+ unionfs_set_lower_dentry_idx(dentry, bindex,
-+ lower_dentry);
-+
-+ __cleanup_dentry(dentry, bindex, old_bstart, old_bend);
-+ goto out;
-+ }
-+
-+ if (lower_dentry->d_inode) {
-+ /*
-+ * since this already exists we dput to avoid
-+ * multiple references on the same dentry
-+ */
-+ dput(lower_dentry);
-+ } else {
-+ struct sioq_args args;
-+
-+ /* it's a negative dentry, create a new dir */
-+ lower_parent_dentry = lock_parent(lower_dentry);
-+
-+ args.mkdir.parent = lower_parent_dentry->d_inode;
-+ args.mkdir.dentry = lower_dentry;
-+ args.mkdir.mode = child_dentry->d_inode->i_mode;
-+
-+ run_sioq(__unionfs_mkdir, &args);
-+ err = args.err;
-+
-+ if (!err)
-+ err = copyup_permissions(dir->i_sb, child_dentry,
-+ lower_dentry);
-+ unlock_dir(lower_parent_dentry);
-+ if (err) {
-+ dput(lower_dentry);
-+ lower_dentry = ERR_PTR(err);
-+ goto out;
-+ }
-+
-+ }
-+
-+ __set_inode(child_dentry, lower_dentry, bindex);
-+ __set_dentry(child_dentry, lower_dentry, bindex);
-+ /*
-+ * update times of this dentry, but also the parent, because if
-+ * we changed, the parent may have changed too.
-+ */
-+ fsstack_copy_attr_times(parent_dentry->d_inode,
-+ lower_parent_dentry->d_inode);
-+ unionfs_copy_attr_times(child_dentry->d_inode);
-+
-+ parent_dentry = child_dentry;
-+ child_dentry = path[--count];
-+ goto begin;
-+out:
-+ /* cleanup any leftover locks from the do/while loop above */
-+ if (IS_ERR(lower_dentry))
-+ while (count)
-+ dput(path[count--]);
-+ kfree(path);
-+ return lower_dentry;
-+}
-+
-+/*
-+ * Post-copyup helper to ensure we have valid mnts: set lower mnt of
-+ * dentry+parents to the first parent node that has an mnt.
-+ */
-+void unionfs_postcopyup_setmnt(struct dentry *dentry)
-+{
-+ struct dentry *parent, *hasone;
-+ int bindex = dbstart(dentry);
-+
-+ if (unionfs_lower_mnt_idx(dentry, bindex))
-+ return;
-+ hasone = dentry->d_parent;
-+ /* this loop should stop at root dentry */
-+ while (!unionfs_lower_mnt_idx(hasone, bindex))
-+ hasone = hasone->d_parent;
-+ parent = dentry;
-+ while (!unionfs_lower_mnt_idx(parent, bindex)) {
-+ unionfs_set_lower_mnt_idx(parent, bindex,
-+ unionfs_mntget(hasone, bindex));
-+ parent = parent->d_parent;
-+ }
-+}
-+
-+/*
-+ * Post-copyup helper to release all non-directory source objects of a
-+ * copied-up file. Regular files should have only one lower object.
-+ */
-+void unionfs_postcopyup_release(struct dentry *dentry)
-+{
-+ int bstart, bend;
-+
-+ BUG_ON(S_ISDIR(dentry->d_inode->i_mode));
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+
-+ path_put_lowers(dentry, bstart + 1, bend, false);
-+ iput_lowers(dentry->d_inode, bstart + 1, bend, false);
-+
-+ dbend(dentry) = bstart;
-+ ibend(dentry->d_inode) = ibstart(dentry->d_inode) = bstart;
-+}
-diff --git a/fs/unionfs/debug.c b/fs/unionfs/debug.c
-new file mode 100644
-index 0000000..100d2c6
---- /dev/null
-+++ b/fs/unionfs/debug.c
-@@ -0,0 +1,532 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * Helper debugging functions for maintainers (and for users to report back
-+ * useful information back to maintainers)
-+ */
-+
-+/* it's always useful to know what part of the code called us */
-+#define PRINT_CALLER(fname, fxn, line) \
-+ do { \
-+ if (!printed_caller) { \
-+ pr_debug("PC:%s:%s:%d\n", (fname), (fxn), (line)); \
-+ printed_caller = 1; \
-+ } \
-+ } while (0)
-+
-+/*
-+ * __unionfs_check_{inode,dentry,file} perform exhaustive sanity checking on
-+ * the fan-out of various Unionfs objects. We check that no lower objects
-+ * exist outside the start/end branch range; that all objects within are
-+ * non-NULL (with some allowed exceptions); that for every lower file
-+ * there's a lower dentry+inode; that the start/end ranges match for all
-+ * corresponding lower objects; that open files/symlinks have only one lower
-+ * objects, but directories can have several; and more.
-+ */
-+void __unionfs_check_inode(const struct inode *inode,
-+ const char *fname, const char *fxn, int line)
-+{
-+ int bindex;
-+ int istart, iend;
-+ struct inode *lower_inode;
-+ struct super_block *sb;
-+ int printed_caller = 0;
-+ void *poison_ptr;
-+
-+ /* for inodes now */
-+ BUG_ON(!inode);
-+ sb = inode->i_sb;
-+ istart = ibstart(inode);
-+ iend = ibend(inode);
-+ /* don't check inode if no lower branches */
-+ if (istart < 0 && iend < 0)
-+ return;
-+ if (unlikely(istart > iend)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" Ci0: inode=%p istart/end=%d:%d\n",
-+ inode, istart, iend);
-+ }
-+ if (unlikely((istart == -1 && iend != -1) ||
-+ (istart != -1 && iend == -1))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" Ci1: inode=%p istart/end=%d:%d\n",
-+ inode, istart, iend);
-+ }
-+ if (!S_ISDIR(inode->i_mode)) {
-+ if (unlikely(iend != istart)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" Ci2: inode=%p istart=%d iend=%d\n",
-+ inode, istart, iend);
-+ }
-+ }
-+
-+ for (bindex = sbstart(sb); bindex < sbmax(sb); bindex++) {
-+ if (unlikely(!UNIONFS_I(inode))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" Ci3: no inode_info %p\n", inode);
-+ return;
-+ }
-+ if (unlikely(!UNIONFS_I(inode)->lower_inodes)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" Ci4: no lower_inodes %p\n", inode);
-+ return;
-+ }
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (lower_inode) {
-+ memset(&poison_ptr, POISON_INUSE, sizeof(void *));
-+ if (unlikely(bindex < istart || bindex > iend)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" Ci5: inode/linode=%p:%p bindex=%d "
-+ "istart/end=%d:%d\n", inode,
-+ lower_inode, bindex, istart, iend);
-+ } else if (unlikely(lower_inode == poison_ptr)) {
-+ /* freed inode! */
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" Ci6: inode/linode=%p:%p bindex=%d "
-+ "istart/end=%d:%d\n", inode,
-+ lower_inode, bindex, istart, iend);
-+ }
-+ continue;
-+ }
-+ /* if we get here, then lower_inode == NULL */
-+ if (bindex < istart || bindex > iend)
-+ continue;
-+ /*
-+ * directories can have NULL lower inodes in b/t start/end,
-+ * but NOT if at the start/end range.
-+ */
-+ if (unlikely(S_ISDIR(inode->i_mode) &&
-+ bindex > istart && bindex < iend))
-+ continue;
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" Ci7: inode/linode=%p:%p "
-+ "bindex=%d istart/end=%d:%d\n",
-+ inode, lower_inode, bindex, istart, iend);
-+ }
-+}
-+
-+void __unionfs_check_dentry(const struct dentry *dentry,
-+ const char *fname, const char *fxn, int line)
-+{
-+ int bindex;
-+ int dstart, dend, istart, iend;
-+ struct dentry *lower_dentry;
-+ struct inode *inode, *lower_inode;
-+ struct super_block *sb;
-+ struct vfsmount *lower_mnt;
-+ int printed_caller = 0;
-+ void *poison_ptr;
-+
-+ BUG_ON(!dentry);
-+ sb = dentry->d_sb;
-+ inode = dentry->d_inode;
-+ dstart = dbstart(dentry);
-+ dend = dbend(dentry);
-+ /* don't check dentry/mnt if no lower branches */
-+ if (dstart < 0 && dend < 0)
-+ goto check_inode;
-+ BUG_ON(dstart > dend);
-+
-+ if (unlikely((dstart == -1 && dend != -1) ||
-+ (dstart != -1 && dend == -1))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CD0: dentry=%p dstart/end=%d:%d\n",
-+ dentry, dstart, dend);
-+ }
-+ /*
-+ * check for NULL dentries inside the start/end range, or
-+ * non-NULL dentries outside the start/end range.
-+ */
-+ for (bindex = sbstart(sb); bindex < sbmax(sb); bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (lower_dentry) {
-+ if (unlikely(bindex < dstart || bindex > dend)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CD1: dentry/lower=%p:%p(%p) "
-+ "bindex=%d dstart/end=%d:%d\n",
-+ dentry, lower_dentry,
-+ (lower_dentry ? lower_dentry->d_inode :
-+ (void *) -1L),
-+ bindex, dstart, dend);
-+ }
-+ } else { /* lower_dentry == NULL */
-+ if (bindex < dstart || bindex > dend)
-+ continue;
-+ /*
-+ * Directories can have NULL lower inodes in b/t
-+ * start/end, but NOT if at the start/end range.
-+ * Ignore this rule, however, if this is a NULL
-+ * dentry or a deleted dentry.
-+ */
-+ if (unlikely(!d_deleted((struct dentry *) dentry) &&
-+ inode &&
-+ !(inode && S_ISDIR(inode->i_mode) &&
-+ bindex > dstart && bindex < dend))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CD2: dentry/lower=%p:%p(%p) "
-+ "bindex=%d dstart/end=%d:%d\n",
-+ dentry, lower_dentry,
-+ (lower_dentry ?
-+ lower_dentry->d_inode :
-+ (void *) -1L),
-+ bindex, dstart, dend);
-+ }
-+ }
-+ }
-+
-+ /* check for vfsmounts same as for dentries */
-+ for (bindex = sbstart(sb); bindex < sbmax(sb); bindex++) {
-+ lower_mnt = unionfs_lower_mnt_idx(dentry, bindex);
-+ if (lower_mnt) {
-+ if (unlikely(bindex < dstart || bindex > dend)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CM0: dentry/lmnt=%p:%p bindex=%d "
-+ "dstart/end=%d:%d\n", dentry,
-+ lower_mnt, bindex, dstart, dend);
-+ }
-+ } else { /* lower_mnt == NULL */
-+ if (bindex < dstart || bindex > dend)
-+ continue;
-+ /*
-+ * Directories can have NULL lower inodes in b/t
-+ * start/end, but NOT if at the start/end range.
-+ * Ignore this rule, however, if this is a NULL
-+ * dentry.
-+ */
-+ if (unlikely(inode &&
-+ !(inode && S_ISDIR(inode->i_mode) &&
-+ bindex > dstart && bindex < dend))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CM1: dentry/lmnt=%p:%p "
-+ "bindex=%d dstart/end=%d:%d\n",
-+ dentry, lower_mnt, bindex,
-+ dstart, dend);
-+ }
-+ }
-+ }
-+
-+check_inode:
-+ /* for inodes now */
-+ if (!inode)
-+ return;
-+ istart = ibstart(inode);
-+ iend = ibend(inode);
-+ /* don't check inode if no lower branches */
-+ if (istart < 0 && iend < 0)
-+ return;
-+ BUG_ON(istart > iend);
-+ if (unlikely((istart == -1 && iend != -1) ||
-+ (istart != -1 && iend == -1))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CI0: dentry/inode=%p:%p istart/end=%d:%d\n",
-+ dentry, inode, istart, iend);
-+ }
-+ if (unlikely(istart != dstart)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CI1: dentry/inode=%p:%p istart=%d dstart=%d\n",
-+ dentry, inode, istart, dstart);
-+ }
-+ if (unlikely(iend != dend)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CI2: dentry/inode=%p:%p iend=%d dend=%d\n",
-+ dentry, inode, iend, dend);
-+ }
-+
-+ if (!S_ISDIR(inode->i_mode)) {
-+ if (unlikely(dend != dstart)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CI3: dentry/inode=%p:%p dstart=%d dend=%d\n",
-+ dentry, inode, dstart, dend);
-+ }
-+ if (unlikely(iend != istart)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CI4: dentry/inode=%p:%p istart=%d iend=%d\n",
-+ dentry, inode, istart, iend);
-+ }
-+ }
-+
-+ for (bindex = sbstart(sb); bindex < sbmax(sb); bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (lower_inode) {
-+ memset(&poison_ptr, POISON_INUSE, sizeof(void *));
-+ if (unlikely(bindex < istart || bindex > iend)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CI5: dentry/linode=%p:%p bindex=%d "
-+ "istart/end=%d:%d\n", dentry,
-+ lower_inode, bindex, istart, iend);
-+ } else if (unlikely(lower_inode == poison_ptr)) {
-+ /* freed inode! */
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CI6: dentry/linode=%p:%p bindex=%d "
-+ "istart/end=%d:%d\n", dentry,
-+ lower_inode, bindex, istart, iend);
-+ }
-+ continue;
-+ }
-+ /* if we get here, then lower_inode == NULL */
-+ if (bindex < istart || bindex > iend)
-+ continue;
-+ /*
-+ * directories can have NULL lower inodes in b/t start/end,
-+ * but NOT if at the start/end range.
-+ */
-+ if (unlikely(S_ISDIR(inode->i_mode) &&
-+ bindex > istart && bindex < iend))
-+ continue;
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CI7: dentry/linode=%p:%p "
-+ "bindex=%d istart/end=%d:%d\n",
-+ dentry, lower_inode, bindex, istart, iend);
-+ }
-+
-+ /*
-+ * If it's a directory, then intermediate objects b/t start/end can
-+ * be NULL. But, check that all three are NULL: lower dentry, mnt,
-+ * and inode.
-+ */
-+ if (dstart >= 0 && dend >= 0 && S_ISDIR(inode->i_mode))
-+ for (bindex = dstart+1; bindex < dend; bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ lower_dentry = unionfs_lower_dentry_idx(dentry,
-+ bindex);
-+ lower_mnt = unionfs_lower_mnt_idx(dentry, bindex);
-+ if (unlikely(!((lower_inode && lower_dentry &&
-+ lower_mnt) ||
-+ (!lower_inode &&
-+ !lower_dentry && !lower_mnt)))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" Cx: lmnt/ldentry/linode=%p:%p:%p "
-+ "bindex=%d dstart/end=%d:%d\n",
-+ lower_mnt, lower_dentry, lower_inode,
-+ bindex, dstart, dend);
-+ }
-+ }
-+ /* check if lower inode is newer than upper one (it shouldn't) */
-+ if (unlikely(is_newer_lower(dentry) && !is_negative_lower(dentry))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ for (bindex = ibstart(inode); bindex <= ibend(inode);
-+ bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (unlikely(!lower_inode))
-+ continue;
-+ pr_debug(" CI8: bindex=%d mtime/lmtime=%lu.%lu/%lu.%lu "
-+ "ctime/lctime=%lu.%lu/%lu.%lu\n",
-+ bindex,
-+ inode->i_mtime.tv_sec,
-+ inode->i_mtime.tv_nsec,
-+ lower_inode->i_mtime.tv_sec,
-+ lower_inode->i_mtime.tv_nsec,
-+ inode->i_ctime.tv_sec,
-+ inode->i_ctime.tv_nsec,
-+ lower_inode->i_ctime.tv_sec,
-+ lower_inode->i_ctime.tv_nsec);
-+ }
-+ }
-+}
-+
-+void __unionfs_check_file(const struct file *file,
-+ const char *fname, const char *fxn, int line)
-+{
-+ int bindex;
-+ int dstart, dend, fstart, fend;
-+ struct dentry *dentry;
-+ struct file *lower_file;
-+ struct inode *inode;
-+ struct super_block *sb;
-+ int printed_caller = 0;
-+
-+ BUG_ON(!file);
-+ dentry = file->f_path.dentry;
-+ sb = dentry->d_sb;
-+ dstart = dbstart(dentry);
-+ dend = dbend(dentry);
-+ BUG_ON(dstart > dend);
-+ fstart = fbstart(file);
-+ fend = fbend(file);
-+ BUG_ON(fstart > fend);
-+
-+ if (unlikely((fstart == -1 && fend != -1) ||
-+ (fstart != -1 && fend == -1))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CF0: file/dentry=%p:%p fstart/end=%d:%d\n",
-+ file, dentry, fstart, fend);
-+ }
-+ if (unlikely(fstart != dstart)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CF1: file/dentry=%p:%p fstart=%d dstart=%d\n",
-+ file, dentry, fstart, dstart);
-+ }
-+ if (unlikely(fend != dend)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CF2: file/dentry=%p:%p fend=%d dend=%d\n",
-+ file, dentry, fend, dend);
-+ }
-+ inode = dentry->d_inode;
-+ if (!S_ISDIR(inode->i_mode)) {
-+ if (unlikely(fend != fstart)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CF3: file/inode=%p:%p fstart=%d fend=%d\n",
-+ file, inode, fstart, fend);
-+ }
-+ if (unlikely(dend != dstart)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CF4: file/dentry=%p:%p dstart=%d dend=%d\n",
-+ file, dentry, dstart, dend);
-+ }
-+ }
-+
-+ /*
-+ * check for NULL dentries inside the start/end range, or
-+ * non-NULL dentries outside the start/end range.
-+ */
-+ for (bindex = sbstart(sb); bindex < sbmax(sb); bindex++) {
-+ lower_file = unionfs_lower_file_idx(file, bindex);
-+ if (lower_file) {
-+ if (unlikely(bindex < fstart || bindex > fend)) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CF5: file/lower=%p:%p bindex=%d "
-+ "fstart/end=%d:%d\n", file,
-+ lower_file, bindex, fstart, fend);
-+ }
-+ } else { /* lower_file == NULL */
-+ if (bindex >= fstart && bindex <= fend) {
-+ /*
-+ * directories can have NULL lower inodes in
-+ * b/t start/end, but NOT if at the
-+ * start/end range.
-+ */
-+ if (unlikely(!(S_ISDIR(inode->i_mode) &&
-+ bindex > fstart &&
-+ bindex < fend))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CF6: file/lower=%p:%p "
-+ "bindex=%d fstart/end=%d:%d\n",
-+ file, lower_file, bindex,
-+ fstart, fend);
-+ }
-+ }
-+ }
-+ }
-+
-+ __unionfs_check_dentry(dentry, fname, fxn, line);
-+}
-+
-+void __unionfs_check_nd(const struct nameidata *nd,
-+ const char *fname, const char *fxn, int line)
-+{
-+ struct file *file;
-+ int printed_caller = 0;
-+
-+ if (unlikely(!nd))
-+ return;
-+ if (nd->flags & LOOKUP_OPEN) {
-+ file = nd->intent.open.file;
-+ if (unlikely(file->f_path.dentry &&
-+ strcmp(file->f_path.dentry->d_sb->s_type->name,
-+ UNIONFS_NAME))) {
-+ PRINT_CALLER(fname, fxn, line);
-+ pr_debug(" CND1: lower_file of type %s\n",
-+ file->f_path.dentry->d_sb->s_type->name);
-+ }
-+ }
-+}
-+
-+/* useful to track vfsmount leaks that could cause EBUSY on unmount */
-+void __show_branch_counts(const struct super_block *sb,
-+ const char *file, const char *fxn, int line)
-+{
-+ int i;
-+ struct vfsmount *mnt;
-+
-+ pr_debug("BC:");
-+ for (i = 0; i < sbmax(sb); i++) {
-+ if (likely(sb->s_root))
-+ mnt = UNIONFS_D(sb->s_root)->lower_paths[i].mnt;
-+ else
-+ mnt = NULL;
-+ printk(KERN_CONT "%d:",
-+ (mnt ? atomic_read(&mnt->mnt_count) : -99));
-+ }
-+ printk(KERN_CONT "%s:%s:%d\n", file, fxn, line);
-+}
-+
-+void __show_inode_times(const struct inode *inode,
-+ const char *file, const char *fxn, int line)
-+{
-+ struct inode *lower_inode;
-+ int bindex;
-+
-+ for (bindex = ibstart(inode); bindex <= ibend(inode); bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (unlikely(!lower_inode))
-+ continue;
-+ pr_debug("IT(%lu:%d): %s:%s:%d "
-+ "um=%lu/%lu lm=%lu/%lu uc=%lu/%lu lc=%lu/%lu\n",
-+ inode->i_ino, bindex,
-+ file, fxn, line,
-+ inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
-+ lower_inode->i_mtime.tv_sec,
-+ lower_inode->i_mtime.tv_nsec,
-+ inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
-+ lower_inode->i_ctime.tv_sec,
-+ lower_inode->i_ctime.tv_nsec);
-+ }
-+}
-+
-+void __show_dinode_times(const struct dentry *dentry,
-+ const char *file, const char *fxn, int line)
-+{
-+ struct inode *inode = dentry->d_inode;
-+ struct inode *lower_inode;
-+ int bindex;
-+
-+ for (bindex = ibstart(inode); bindex <= ibend(inode); bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (!lower_inode)
-+ continue;
-+ pr_debug("DT(%s:%lu:%d): %s:%s:%d "
-+ "um=%lu/%lu lm=%lu/%lu uc=%lu/%lu lc=%lu/%lu\n",
-+ dentry->d_name.name, inode->i_ino, bindex,
-+ file, fxn, line,
-+ inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
-+ lower_inode->i_mtime.tv_sec,
-+ lower_inode->i_mtime.tv_nsec,
-+ inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
-+ lower_inode->i_ctime.tv_sec,
-+ lower_inode->i_ctime.tv_nsec);
-+ }
-+}
-+
-+void __show_inode_counts(const struct inode *inode,
-+ const char *file, const char *fxn, int line)
-+{
-+ struct inode *lower_inode;
-+ int bindex;
-+
-+ if (unlikely(!inode)) {
-+ pr_debug("SiC: Null inode\n");
-+ return;
-+ }
-+ for (bindex = sbstart(inode->i_sb); bindex <= sbend(inode->i_sb);
-+ bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (unlikely(!lower_inode))
-+ continue;
-+ pr_debug("SIC(%lu:%d:%d): lc=%d %s:%s:%d\n",
-+ inode->i_ino, bindex,
-+ atomic_read(&(inode)->i_count),
-+ atomic_read(&(lower_inode)->i_count),
-+ file, fxn, line);
-+ }
-+}
-diff --git a/fs/unionfs/dentry.c b/fs/unionfs/dentry.c
-new file mode 100644
-index 0000000..a0c3bba
---- /dev/null
-+++ b/fs/unionfs/dentry.c
-@@ -0,0 +1,397 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+bool is_negative_lower(const struct dentry *dentry)
-+{
-+ int bindex;
-+ struct dentry *lower_dentry;
-+
-+ BUG_ON(!dentry);
-+ /* cache coherency: check if file was deleted on lower branch */
-+ if (dbstart(dentry) < 0)
-+ return true;
-+ for (bindex = dbstart(dentry); bindex <= dbend(dentry); bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ /* unhashed (i.e., unlinked) lower dentries don't count */
-+ if (lower_dentry && lower_dentry->d_inode &&
-+ !d_deleted(lower_dentry) &&
-+ !(lower_dentry->d_flags & DCACHE_NFSFS_RENAMED))
-+ return false;
-+ }
-+ return true;
-+}
-+
-+static inline void __dput_lowers(struct dentry *dentry, int start, int end)
-+{
-+ struct dentry *lower_dentry;
-+ int bindex;
-+
-+ if (start < 0)
-+ return;
-+ for (bindex = start; bindex <= end; bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry)
-+ continue;
-+ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
-+ dput(lower_dentry);
-+ }
-+}
-+
-+/*
-+ * Purge and invalidate as many data pages of a unionfs inode. This is
-+ * called when the lower inode has changed, and we want to force processes
-+ * to re-get the new data.
-+ */
-+static inline void purge_inode_data(struct inode *inode)
-+{
-+ /* remove all non-private mappings */
-+ unmap_mapping_range(inode->i_mapping, 0, 0, 0);
-+ /* invalidate as many pages as possible */
-+ invalidate_mapping_pages(inode->i_mapping, 0, -1);
-+ /*
-+ * Don't try to truncate_inode_pages here, because this could lead
-+ * to a deadlock between some of address_space ops and dentry
-+ * revalidation: the address space op is invoked with a lock on our
-+ * own page, and truncate_inode_pages will block on locked pages.
-+ */
-+}
-+
-+/*
-+ * Revalidate a single file/symlink/special dentry. Assume that info nodes
-+ * of the @dentry and its @parent are locked. Assume parent is valid,
-+ * otherwise return false (and let's hope the VFS will try to re-lookup this
-+ * dentry). Returns true if valid, false otherwise.
-+ */
-+bool __unionfs_d_revalidate(struct dentry *dentry, struct dentry *parent,
-+ bool willwrite)
-+{
-+ bool valid = true; /* default is valid */
-+ struct dentry *lower_dentry;
-+ struct dentry *result;
-+ int bindex, bstart, bend;
-+ int sbgen, dgen, pdgen;
-+ int positive = 0;
-+ int interpose_flag;
-+
-+ verify_locked(dentry);
-+ verify_locked(parent);
-+
-+ /* if the dentry is unhashed, do NOT revalidate */
-+ if (d_deleted(dentry))
-+ goto out;
-+
-+ dgen = atomic_read(&UNIONFS_D(dentry)->generation);
-+
-+ if (is_newer_lower(dentry)) {
-+ /* root dentry is always valid */
-+ if (IS_ROOT(dentry)) {
-+ unionfs_copy_attr_times(dentry->d_inode);
-+ } else {
-+ /*
-+ * reset generation number to zero, guaranteed to be
-+ * "old"
-+ */
-+ dgen = 0;
-+ atomic_set(&UNIONFS_D(dentry)->generation, dgen);
-+ }
-+ if (!willwrite)
-+ purge_inode_data(dentry->d_inode);
-+ }
-+
-+ sbgen = atomic_read(&UNIONFS_SB(dentry->d_sb)->generation);
-+
-+ BUG_ON(dbstart(dentry) == -1);
-+ if (dentry->d_inode)
-+ positive = 1;
-+
-+ /* if our dentry is valid, then validate all lower ones */
-+ if (sbgen == dgen)
-+ goto validate_lowers;
-+
-+ /* The root entry should always be valid */
-+ BUG_ON(IS_ROOT(dentry));
-+
-+ /* We can't work correctly if our parent isn't valid. */
-+ pdgen = atomic_read(&UNIONFS_D(parent)->generation);
-+
-+ /* Free the pointers for our inodes and this dentry. */
-+ path_put_lowers_all(dentry, false);
-+
-+ interpose_flag = INTERPOSE_REVAL_NEG;
-+ if (positive) {
-+ interpose_flag = INTERPOSE_REVAL;
-+ iput_lowers_all(dentry->d_inode, true);
-+ }
-+
-+ if (realloc_dentry_private_data(dentry) != 0) {
-+ valid = false;
-+ goto out;
-+ }
-+
-+ result = unionfs_lookup_full(dentry, parent, interpose_flag);
-+ if (result) {
-+ if (IS_ERR(result)) {
-+ valid = false;
-+ goto out;
-+ }
-+ /*
-+ * current unionfs_lookup_backend() doesn't return
-+ * a valid dentry
-+ */
-+ dput(dentry);
-+ dentry = result;
-+ }
-+
-+ if (unlikely(positive && is_negative_lower(dentry))) {
-+ /* call make_bad_inode here ? */
-+ d_drop(dentry);
-+ valid = false;
-+ goto out;
-+ }
-+
-+ /*
-+ * if we got here then we have revalidated our dentry and all lower
-+ * ones, so we can return safely.
-+ */
-+ if (!valid) /* lower dentry revalidation failed */
-+ goto out;
-+
-+ /*
-+ * If the parent's gen no. matches the superblock's gen no., then
-+ * we can update our denty's gen no. If they didn't match, then it
-+ * was OK to revalidate this dentry with a stale parent, but we'll
-+ * purposely not update our dentry's gen no. (so it can be redone);
-+ * and, we'll mark our parent dentry as invalid so it'll force it
-+ * (and our dentry) to be revalidated.
-+ */
-+ if (pdgen == sbgen)
-+ atomic_set(&UNIONFS_D(dentry)->generation, sbgen);
-+ goto out;
-+
-+validate_lowers:
-+
-+ /* The revalidation must occur across all branches */
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+ BUG_ON(bstart == -1);
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry || !lower_dentry->d_op
-+ || !lower_dentry->d_op->d_revalidate)
-+ continue;
-+ /*
-+ * Don't pass nameidata to lower file system, because we
-+ * don't want an arbitrary lower file being opened or
-+ * returned to us: it may be useless to us because of the
-+ * fanout nature of unionfs (cf. file/directory open-file
-+ * invariants). We will open lower files as and when needed
-+ * later on.
-+ */
-+ if (!lower_dentry->d_op->d_revalidate(lower_dentry, NULL))
-+ valid = false;
-+ }
-+
-+ if (!dentry->d_inode ||
-+ ibstart(dentry->d_inode) < 0 ||
-+ ibend(dentry->d_inode) < 0) {
-+ valid = false;
-+ goto out;
-+ }
-+
-+ if (valid) {
-+ /*
-+ * If we get here, and we copy the meta-data from the lower
-+ * inode to our inode, then it is vital that we have already
-+ * purged all unionfs-level file data. We do that in the
-+ * caller (__unionfs_d_revalidate) by calling
-+ * purge_inode_data.
-+ */
-+ unionfs_copy_attr_all(dentry->d_inode,
-+ unionfs_lower_inode(dentry->d_inode));
-+ fsstack_copy_inode_size(dentry->d_inode,
-+ unionfs_lower_inode(dentry->d_inode));
-+ }
-+
-+out:
-+ return valid;
-+}
-+
-+/*
-+ * Determine if the lower inode objects have changed from below the unionfs
-+ * inode. Return true if changed, false otherwise.
-+ *
-+ * We check if the mtime or ctime have changed. However, the inode times
-+ * can be changed by anyone without much protection, including
-+ * asynchronously. This can sometimes cause unionfs to find that the lower
-+ * file system doesn't change its inode times quick enough, resulting in a
-+ * false positive indication (which is harmless, it just makes unionfs do
-+ * extra work in re-validating the objects). To minimize the chances of
-+ * these situations, we still consider such small time changes valid, but we
-+ * don't print debugging messages unless the time changes are greater than
-+ * UNIONFS_MIN_CC_TIME (which defaults to 3 seconds, as with NFS's acregmin)
-+ * because significant changes are more likely due to users manually
-+ * touching lower files.
-+ */
-+bool is_newer_lower(const struct dentry *dentry)
-+{
-+ int bindex;
-+ struct inode *inode;
-+ struct inode *lower_inode;
-+
-+ /* ignore if we're called on semi-initialized dentries/inodes */
-+ if (!dentry || !UNIONFS_D(dentry))
-+ return false;
-+ inode = dentry->d_inode;
-+ if (!inode || !UNIONFS_I(inode)->lower_inodes ||
-+ ibstart(inode) < 0 || ibend(inode) < 0)
-+ return false;
-+
-+ for (bindex = ibstart(inode); bindex <= ibend(inode); bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (!lower_inode)
-+ continue;
-+
-+ /* check if mtime/ctime have changed */
-+ if (unlikely(timespec_compare(&inode->i_mtime,
-+ &lower_inode->i_mtime) < 0)) {
-+ if ((lower_inode->i_mtime.tv_sec -
-+ inode->i_mtime.tv_sec) > UNIONFS_MIN_CC_TIME) {
-+ pr_info("unionfs: new lower inode mtime "
-+ "(bindex=%d, name=%s)\n", bindex,
-+ dentry->d_name.name);
-+ show_dinode_times(dentry);
-+ }
-+ return true;
-+ }
-+ if (unlikely(timespec_compare(&inode->i_ctime,
-+ &lower_inode->i_ctime) < 0)) {
-+ if ((lower_inode->i_ctime.tv_sec -
-+ inode->i_ctime.tv_sec) > UNIONFS_MIN_CC_TIME) {
-+ pr_info("unionfs: new lower inode ctime "
-+ "(bindex=%d, name=%s)\n", bindex,
-+ dentry->d_name.name);
-+ show_dinode_times(dentry);
-+ }
-+ return true;
-+ }
-+ }
-+
-+ /*
-+ * Last check: if this is a positive dentry, but somehow all lower
-+ * dentries are negative or unhashed, then this dentry needs to be
-+ * revalidated, because someone probably deleted the objects from
-+ * the lower branches directly.
-+ */
-+ if (is_negative_lower(dentry))
-+ return true;
-+
-+ return false; /* default: lower is not newer */
-+}
-+
-+static int unionfs_d_revalidate(struct dentry *dentry,
-+ struct nameidata *nd_unused)
-+{
-+ bool valid = true;
-+ int err = 1; /* 1 means valid for the VFS */
-+ struct dentry *parent;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (valid) {
-+ unionfs_postcopyup_setmnt(dentry);
-+ unionfs_check_dentry(dentry);
-+ } else {
-+ d_drop(dentry);
-+ err = valid;
-+ }
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+
-+ return err;
-+}
-+
-+static void unionfs_d_release(struct dentry *dentry)
-+{
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ if (unlikely(!UNIONFS_D(dentry)))
-+ goto out; /* skip if no lower branches */
-+ /* must lock our branch configuration here */
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ unionfs_check_dentry(dentry);
-+ /* this could be a negative dentry, so check first */
-+ if (dbstart(dentry) < 0) {
-+ unionfs_unlock_dentry(dentry);
-+ goto out; /* due to a (normal) failed lookup */
-+ }
-+
-+ /* Release all the lower dentries */
-+ path_put_lowers_all(dentry, true);
-+
-+ unionfs_unlock_dentry(dentry);
-+
-+out:
-+ free_dentry_private_data(dentry);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return;
-+}
-+
-+/*
-+ * Called when we're removing the last reference to our dentry. So we
-+ * should drop all lower references too.
-+ */
-+static void unionfs_d_iput(struct dentry *dentry, struct inode *inode)
-+{
-+ int rc;
-+
-+ BUG_ON(!dentry);
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ if (!UNIONFS_D(dentry) || dbstart(dentry) < 0)
-+ goto drop_lower_inodes;
-+ path_put_lowers_all(dentry, false);
-+
-+drop_lower_inodes:
-+ rc = atomic_read(&inode->i_count);
-+ if (rc == 1 && inode->i_nlink == 1 && ibstart(inode) >= 0) {
-+ /* see Documentation/filesystems/unionfs/issues.txt */
-+ lockdep_off();
-+ iput(unionfs_lower_inode(inode));
-+ lockdep_on();
-+ unionfs_set_lower_inode(inode, NULL);
-+ /* XXX: may need to set start/end to -1? */
-+ }
-+
-+ iput(inode);
-+
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_read_unlock(dentry->d_sb);
-+}
-+
-+struct dentry_operations unionfs_dops = {
-+ .d_revalidate = unionfs_d_revalidate,
-+ .d_release = unionfs_d_release,
-+ .d_iput = unionfs_d_iput,
-+};
-diff --git a/fs/unionfs/dirfops.c b/fs/unionfs/dirfops.c
-new file mode 100644
-index 0000000..7da0ff0
---- /dev/null
-+++ b/fs/unionfs/dirfops.c
-@@ -0,0 +1,302 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/* Make sure our rdstate is playing by the rules. */
-+static void verify_rdstate_offset(struct unionfs_dir_state *rdstate)
-+{
-+ BUG_ON(rdstate->offset >= DIREOF);
-+ BUG_ON(rdstate->cookie >= MAXRDCOOKIE);
-+}
-+
-+struct unionfs_getdents_callback {
-+ struct unionfs_dir_state *rdstate;
-+ void *dirent;
-+ int entries_written;
-+ int filldir_called;
-+ int filldir_error;
-+ filldir_t filldir;
-+ struct super_block *sb;
-+};
-+
-+/* based on generic filldir in fs/readir.c */
-+static int unionfs_filldir(void *dirent, const char *oname, int namelen,
-+ loff_t offset, u64 ino, unsigned int d_type)
-+{
-+ struct unionfs_getdents_callback *buf = dirent;
-+ struct filldir_node *found = NULL;
-+ int err = 0;
-+ int is_whiteout;
-+ char *name = (char *) oname;
-+
-+ buf->filldir_called++;
-+
-+ is_whiteout = is_whiteout_name(&name, &namelen);
-+
-+ found = find_filldir_node(buf->rdstate, name, namelen, is_whiteout);
-+
-+ if (found) {
-+ /*
-+ * If we had non-whiteout entry in dir cache, then mark it
-+ * as a whiteout and but leave it in the dir cache.
-+ */
-+ if (is_whiteout && !found->whiteout)
-+ found->whiteout = is_whiteout;
-+ goto out;
-+ }
-+
-+ /* if 'name' isn't a whiteout, filldir it. */
-+ if (!is_whiteout) {
-+ off_t pos = rdstate2offset(buf->rdstate);
-+ u64 unionfs_ino = ino;
-+
-+ err = buf->filldir(buf->dirent, name, namelen, pos,
-+ unionfs_ino, d_type);
-+ buf->rdstate->offset++;
-+ verify_rdstate_offset(buf->rdstate);
-+ }
-+ /*
-+ * If we did fill it, stuff it in our hash, otherwise return an
-+ * error.
-+ */
-+ if (err) {
-+ buf->filldir_error = err;
-+ goto out;
-+ }
-+ buf->entries_written++;
-+ err = add_filldir_node(buf->rdstate, name, namelen,
-+ buf->rdstate->bindex, is_whiteout);
-+ if (err)
-+ buf->filldir_error = err;
-+
-+out:
-+ return err;
-+}
-+
-+static int unionfs_readdir(struct file *file, void *dirent, filldir_t filldir)
-+{
-+ int err = 0;
-+ struct file *lower_file = NULL;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+ struct inode *inode = NULL;
-+ struct unionfs_getdents_callback buf;
-+ struct unionfs_dir_state *uds;
-+ int bend;
-+ loff_t offset;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ err = unionfs_file_revalidate(file, parent, false);
-+ if (unlikely(err))
-+ goto out;
-+
-+ inode = dentry->d_inode;
-+
-+ uds = UNIONFS_F(file)->rdstate;
-+ if (!uds) {
-+ if (file->f_pos == DIREOF) {
-+ goto out;
-+ } else if (file->f_pos > 0) {
-+ uds = find_rdstate(inode, file->f_pos);
-+ if (unlikely(!uds)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+ UNIONFS_F(file)->rdstate = uds;
-+ } else {
-+ init_rdstate(file);
-+ uds = UNIONFS_F(file)->rdstate;
-+ }
-+ }
-+ bend = fbend(file);
-+
-+ while (uds->bindex <= bend) {
-+ lower_file = unionfs_lower_file_idx(file, uds->bindex);
-+ if (!lower_file) {
-+ uds->bindex++;
-+ uds->dirpos = 0;
-+ continue;
-+ }
-+
-+ /* prepare callback buffer */
-+ buf.filldir_called = 0;
-+ buf.filldir_error = 0;
-+ buf.entries_written = 0;
-+ buf.dirent = dirent;
-+ buf.filldir = filldir;
-+ buf.rdstate = uds;
-+ buf.sb = inode->i_sb;
-+
-+ /* Read starting from where we last left off. */
-+ offset = vfs_llseek(lower_file, uds->dirpos, SEEK_SET);
-+ if (offset < 0) {
-+ err = offset;
-+ goto out;
-+ }
-+ err = vfs_readdir(lower_file, unionfs_filldir, &buf);
-+
-+ /* Save the position for when we continue. */
-+ offset = vfs_llseek(lower_file, 0, SEEK_CUR);
-+ if (offset < 0) {
-+ err = offset;
-+ goto out;
-+ }
-+ uds->dirpos = offset;
-+
-+ /* Copy the atime. */
-+ fsstack_copy_attr_atime(inode,
-+ lower_file->f_path.dentry->d_inode);
-+
-+ if (err < 0)
-+ goto out;
-+
-+ if (buf.filldir_error)
-+ break;
-+
-+ if (!buf.entries_written) {
-+ uds->bindex++;
-+ uds->dirpos = 0;
-+ }
-+ }
-+
-+ if (!buf.filldir_error && uds->bindex >= bend) {
-+ /* Save the number of hash entries for next time. */
-+ UNIONFS_I(inode)->hashsize = uds->hashentries;
-+ free_rdstate(uds);
-+ UNIONFS_F(file)->rdstate = NULL;
-+ file->f_pos = DIREOF;
-+ } else {
-+ file->f_pos = rdstate2offset(uds);
-+ }
-+
-+out:
-+ if (!err)
-+ unionfs_check_file(file);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+/*
-+ * This is not meant to be a generic repositioning function. If you do
-+ * things that aren't supported, then we return EINVAL.
-+ *
-+ * What is allowed:
-+ * (1) seeking to the same position that you are currently at
-+ * This really has no effect, but returns where you are.
-+ * (2) seeking to the beginning of the file
-+ * This throws out all state, and lets you begin again.
-+ */
-+static loff_t unionfs_dir_llseek(struct file *file, loff_t offset, int origin)
-+{
-+ struct unionfs_dir_state *rdstate;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+ loff_t err;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ err = unionfs_file_revalidate(file, parent, false);
-+ if (unlikely(err))
-+ goto out;
-+
-+ rdstate = UNIONFS_F(file)->rdstate;
-+
-+ /*
-+ * we let users seek to their current position, but not anywhere
-+ * else.
-+ */
-+ if (!offset) {
-+ switch (origin) {
-+ case SEEK_SET:
-+ if (rdstate) {
-+ free_rdstate(rdstate);
-+ UNIONFS_F(file)->rdstate = NULL;
-+ }
-+ init_rdstate(file);
-+ err = 0;
-+ break;
-+ case SEEK_CUR:
-+ err = file->f_pos;
-+ break;
-+ case SEEK_END:
-+ /* Unsupported, because we would break everything. */
-+ err = -EINVAL;
-+ break;
-+ }
-+ } else {
-+ switch (origin) {
-+ case SEEK_SET:
-+ if (rdstate) {
-+ if (offset == rdstate2offset(rdstate))
-+ err = offset;
-+ else if (file->f_pos == DIREOF)
-+ err = DIREOF;
-+ else
-+ err = -EINVAL;
-+ } else {
-+ struct inode *inode;
-+ inode = dentry->d_inode;
-+ rdstate = find_rdstate(inode, offset);
-+ if (rdstate) {
-+ UNIONFS_F(file)->rdstate = rdstate;
-+ err = rdstate->offset;
-+ } else {
-+ err = -EINVAL;
-+ }
-+ }
-+ break;
-+ case SEEK_CUR:
-+ case SEEK_END:
-+ /* Unsupported, because we would break everything. */
-+ err = -EINVAL;
-+ break;
-+ }
-+ }
-+
-+out:
-+ if (!err)
-+ unionfs_check_file(file);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+/*
-+ * Trimmed directory options, we shouldn't pass everything down since
-+ * we don't want to operate on partial directories.
-+ */
-+struct file_operations unionfs_dir_fops = {
-+ .llseek = unionfs_dir_llseek,
-+ .read = generic_read_dir,
-+ .readdir = unionfs_readdir,
-+ .unlocked_ioctl = unionfs_ioctl,
-+ .open = unionfs_open,
-+ .release = unionfs_file_release,
-+ .flush = unionfs_flush,
-+ .fsync = unionfs_fsync,
-+ .fasync = unionfs_fasync,
-+};
-diff --git a/fs/unionfs/dirhelper.c b/fs/unionfs/dirhelper.c
-new file mode 100644
-index 0000000..033343b
---- /dev/null
-+++ b/fs/unionfs/dirhelper.c
-@@ -0,0 +1,158 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+#define RD_NONE 0
-+#define RD_CHECK_EMPTY 1
-+/* The callback structure for check_empty. */
-+struct unionfs_rdutil_callback {
-+ int err;
-+ int filldir_called;
-+ struct unionfs_dir_state *rdstate;
-+ int mode;
-+};
-+
-+/* This filldir function makes sure only whiteouts exist within a directory. */
-+static int readdir_util_callback(void *dirent, const char *oname, int namelen,
-+ loff_t offset, u64 ino, unsigned int d_type)
-+{
-+ int err = 0;
-+ struct unionfs_rdutil_callback *buf = dirent;
-+ int is_whiteout;
-+ struct filldir_node *found;
-+ char *name = (char *) oname;
-+
-+ buf->filldir_called = 1;
-+
-+ if (name[0] == '.' && (namelen == 1 ||
-+ (name[1] == '.' && namelen == 2)))
-+ goto out;
-+
-+ is_whiteout = is_whiteout_name(&name, &namelen);
-+
-+ found = find_filldir_node(buf->rdstate, name, namelen, is_whiteout);
-+ /* If it was found in the table there was a previous whiteout. */
-+ if (found)
-+ goto out;
-+
-+ /*
-+ * if it wasn't found and isn't a whiteout, the directory isn't
-+ * empty.
-+ */
-+ err = -ENOTEMPTY;
-+ if ((buf->mode == RD_CHECK_EMPTY) && !is_whiteout)
-+ goto out;
-+
-+ err = add_filldir_node(buf->rdstate, name, namelen,
-+ buf->rdstate->bindex, is_whiteout);
-+
-+out:
-+ buf->err = err;
-+ return err;
-+}
-+
-+/* Is a directory logically empty? */
-+int check_empty(struct dentry *dentry, struct dentry *parent,
-+ struct unionfs_dir_state **namelist)
-+{
-+ int err = 0;
-+ struct dentry *lower_dentry = NULL;
-+ struct vfsmount *mnt;
-+ struct super_block *sb;
-+ struct file *lower_file;
-+ struct unionfs_rdutil_callback *buf = NULL;
-+ int bindex, bstart, bend, bopaque;
-+
-+ sb = dentry->d_sb;
-+
-+
-+ BUG_ON(!S_ISDIR(dentry->d_inode->i_mode));
-+
-+ err = unionfs_partial_lookup(dentry, parent);
-+ if (err)
-+ goto out;
-+
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+ bopaque = dbopaque(dentry);
-+ if (0 <= bopaque && bopaque < bend)
-+ bend = bopaque;
-+
-+ buf = kmalloc(sizeof(struct unionfs_rdutil_callback), GFP_KERNEL);
-+ if (unlikely(!buf)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+ buf->err = 0;
-+ buf->mode = RD_CHECK_EMPTY;
-+ buf->rdstate = alloc_rdstate(dentry->d_inode, bstart);
-+ if (unlikely(!buf->rdstate)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ /* Process the lower directories with rdutil_callback as a filldir. */
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry)
-+ continue;
-+ if (!lower_dentry->d_inode)
-+ continue;
-+ if (!S_ISDIR(lower_dentry->d_inode->i_mode))
-+ continue;
-+
-+ dget(lower_dentry);
-+ mnt = unionfs_mntget(dentry, bindex);
-+ branchget(sb, bindex);
-+ lower_file = dentry_open(lower_dentry, mnt, O_RDONLY, current_cred());
-+ if (IS_ERR(lower_file)) {
-+ err = PTR_ERR(lower_file);
-+ branchput(sb, bindex);
-+ goto out;
-+ }
-+
-+ do {
-+ buf->filldir_called = 0;
-+ buf->rdstate->bindex = bindex;
-+ err = vfs_readdir(lower_file,
-+ readdir_util_callback, buf);
-+ if (buf->err)
-+ err = buf->err;
-+ } while ((err >= 0) && buf->filldir_called);
-+
-+ /* fput calls dput for lower_dentry */
-+ fput(lower_file);
-+ branchput(sb, bindex);
-+
-+ if (err < 0)
-+ goto out;
-+ }
-+
-+out:
-+ if (buf) {
-+ if (namelist && !err)
-+ *namelist = buf->rdstate;
-+ else if (buf->rdstate)
-+ free_rdstate(buf->rdstate);
-+ kfree(buf);
-+ }
-+
-+
-+ return err;
-+}
-diff --git a/fs/unionfs/fanout.h b/fs/unionfs/fanout.h
-new file mode 100644
-index 0000000..5b77eac
---- /dev/null
-+++ b/fs/unionfs/fanout.h
-@@ -0,0 +1,407 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#ifndef _FANOUT_H_
-+#define _FANOUT_H_
-+
-+/*
-+ * Inode to private data
-+ *
-+ * Since we use containers and the struct inode is _inside_ the
-+ * unionfs_inode_info structure, UNIONFS_I will always (given a non-NULL
-+ * inode pointer), return a valid non-NULL pointer.
-+ */
-+static inline struct unionfs_inode_info *UNIONFS_I(const struct inode *inode)
-+{
-+ return container_of(inode, struct unionfs_inode_info, vfs_inode);
-+}
-+
-+#define ibstart(ino) (UNIONFS_I(ino)->bstart)
-+#define ibend(ino) (UNIONFS_I(ino)->bend)
-+
-+/* Dentry to private data */
-+#define UNIONFS_D(dent) ((struct unionfs_dentry_info *)(dent)->d_fsdata)
-+#define dbstart(dent) (UNIONFS_D(dent)->bstart)
-+#define dbend(dent) (UNIONFS_D(dent)->bend)
-+#define dbopaque(dent) (UNIONFS_D(dent)->bopaque)
-+
-+/* Superblock to private data */
-+#define UNIONFS_SB(super) ((struct unionfs_sb_info *)(super)->s_fs_info)
-+#define sbstart(sb) 0
-+#define sbend(sb) (UNIONFS_SB(sb)->bend)
-+#define sbmax(sb) (UNIONFS_SB(sb)->bend + 1)
-+#define sbhbid(sb) (UNIONFS_SB(sb)->high_branch_id)
-+
-+/* File to private Data */
-+#define UNIONFS_F(file) ((struct unionfs_file_info *)((file)->private_data))
-+#define fbstart(file) (UNIONFS_F(file)->bstart)
-+#define fbend(file) (UNIONFS_F(file)->bend)
-+
-+/* macros to manipulate branch IDs in stored in our superblock */
-+static inline int branch_id(struct super_block *sb, int index)
-+{
-+ BUG_ON(!sb || index < 0);
-+ return UNIONFS_SB(sb)->data[index].branch_id;
-+}
-+
-+static inline void set_branch_id(struct super_block *sb, int index, int val)
-+{
-+ BUG_ON(!sb || index < 0);
-+ UNIONFS_SB(sb)->data[index].branch_id = val;
-+}
-+
-+static inline void new_branch_id(struct super_block *sb, int index)
-+{
-+ BUG_ON(!sb || index < 0);
-+ set_branch_id(sb, index, ++UNIONFS_SB(sb)->high_branch_id);
-+}
-+
-+/*
-+ * Find new index of matching branch with an existing superblock of a known
-+ * (possibly old) id. This is needed because branches could have been
-+ * added/deleted causing the branches of any open files to shift.
-+ *
-+ * @sb: the new superblock which may have new/different branch IDs
-+ * @id: the old/existing id we're looking for
-+ * Returns index of newly found branch (0 or greater), -1 otherwise.
-+ */
-+static inline int branch_id_to_idx(struct super_block *sb, int id)
-+{
-+ int i;
-+ for (i = 0; i < sbmax(sb); i++) {
-+ if (branch_id(sb, i) == id)
-+ return i;
-+ }
-+ /* in the non-ODF code, this should really never happen */
-+ printk(KERN_WARNING "unionfs: cannot find branch with id %d\n", id);
-+ return -1;
-+}
-+
-+/* File to lower file. */
-+static inline struct file *unionfs_lower_file(const struct file *f)
-+{
-+ BUG_ON(!f);
-+ return UNIONFS_F(f)->lower_files[fbstart(f)];
-+}
-+
-+static inline struct file *unionfs_lower_file_idx(const struct file *f,
-+ int index)
-+{
-+ BUG_ON(!f || index < 0);
-+ return UNIONFS_F(f)->lower_files[index];
-+}
-+
-+static inline void unionfs_set_lower_file_idx(struct file *f, int index,
-+ struct file *val)
-+{
-+ BUG_ON(!f || index < 0);
-+ UNIONFS_F(f)->lower_files[index] = val;
-+ /* save branch ID (may be redundant?) */
-+ UNIONFS_F(f)->saved_branch_ids[index] =
-+ branch_id((f)->f_path.dentry->d_sb, index);
-+}
-+
-+static inline void unionfs_set_lower_file(struct file *f, struct file *val)
-+{
-+ BUG_ON(!f);
-+ unionfs_set_lower_file_idx((f), fbstart(f), (val));
-+}
-+
-+/* Inode to lower inode. */
-+static inline struct inode *unionfs_lower_inode(const struct inode *i)
-+{
-+ BUG_ON(!i);
-+ return UNIONFS_I(i)->lower_inodes[ibstart(i)];
-+}
-+
-+static inline struct inode *unionfs_lower_inode_idx(const struct inode *i,
-+ int index)
-+{
-+ BUG_ON(!i || index < 0);
-+ return UNIONFS_I(i)->lower_inodes[index];
-+}
-+
-+static inline void unionfs_set_lower_inode_idx(struct inode *i, int index,
-+ struct inode *val)
-+{
-+ BUG_ON(!i || index < 0);
-+ UNIONFS_I(i)->lower_inodes[index] = val;
-+}
-+
-+static inline void unionfs_set_lower_inode(struct inode *i, struct inode *val)
-+{
-+ BUG_ON(!i);
-+ UNIONFS_I(i)->lower_inodes[ibstart(i)] = val;
-+}
-+
-+/* Superblock to lower superblock. */
-+static inline struct super_block *unionfs_lower_super(
-+ const struct super_block *sb)
-+{
-+ BUG_ON(!sb);
-+ return UNIONFS_SB(sb)->data[sbstart(sb)].sb;
-+}
-+
-+static inline struct super_block *unionfs_lower_super_idx(
-+ const struct super_block *sb,
-+ int index)
-+{
-+ BUG_ON(!sb || index < 0);
-+ return UNIONFS_SB(sb)->data[index].sb;
-+}
-+
-+static inline void unionfs_set_lower_super_idx(struct super_block *sb,
-+ int index,
-+ struct super_block *val)
-+{
-+ BUG_ON(!sb || index < 0);
-+ UNIONFS_SB(sb)->data[index].sb = val;
-+}
-+
-+static inline void unionfs_set_lower_super(struct super_block *sb,
-+ struct super_block *val)
-+{
-+ BUG_ON(!sb);
-+ UNIONFS_SB(sb)->data[sbstart(sb)].sb = val;
-+}
-+
-+/* Branch count macros. */
-+static inline int branch_count(const struct super_block *sb, int index)
-+{
-+ BUG_ON(!sb || index < 0);
-+ return atomic_read(&UNIONFS_SB(sb)->data[index].open_files);
-+}
-+
-+static inline void set_branch_count(struct super_block *sb, int index, int val)
-+{
-+ BUG_ON(!sb || index < 0);
-+ atomic_set(&UNIONFS_SB(sb)->data[index].open_files, val);
-+}
-+
-+static inline void branchget(struct super_block *sb, int index)
-+{
-+ BUG_ON(!sb || index < 0);
-+ atomic_inc(&UNIONFS_SB(sb)->data[index].open_files);
-+}
-+
-+static inline void branchput(struct super_block *sb, int index)
-+{
-+ BUG_ON(!sb || index < 0);
-+ atomic_dec(&UNIONFS_SB(sb)->data[index].open_files);
-+}
-+
-+/* Dentry macros */
-+static inline void unionfs_set_lower_dentry_idx(struct dentry *dent, int index,
-+ struct dentry *val)
-+{
-+ BUG_ON(!dent || index < 0);
-+ UNIONFS_D(dent)->lower_paths[index].dentry = val;
-+}
-+
-+static inline struct dentry *unionfs_lower_dentry_idx(
-+ const struct dentry *dent,
-+ int index)
-+{
-+ BUG_ON(!dent || index < 0);
-+ return UNIONFS_D(dent)->lower_paths[index].dentry;
-+}
-+
-+static inline struct dentry *unionfs_lower_dentry(const struct dentry *dent)
-+{
-+ BUG_ON(!dent);
-+ return unionfs_lower_dentry_idx(dent, dbstart(dent));
-+}
-+
-+static inline void unionfs_set_lower_mnt_idx(struct dentry *dent, int index,
-+ struct vfsmount *mnt)
-+{
-+ BUG_ON(!dent || index < 0);
-+ UNIONFS_D(dent)->lower_paths[index].mnt = mnt;
-+}
-+
-+static inline struct vfsmount *unionfs_lower_mnt_idx(
-+ const struct dentry *dent,
-+ int index)
-+{
-+ BUG_ON(!dent || index < 0);
-+ return UNIONFS_D(dent)->lower_paths[index].mnt;
-+}
-+
-+static inline struct vfsmount *unionfs_lower_mnt(const struct dentry *dent)
-+{
-+ BUG_ON(!dent);
-+ return unionfs_lower_mnt_idx(dent, dbstart(dent));
-+}
-+
-+/* Macros for locking a dentry. */
-+enum unionfs_dentry_lock_class {
-+ UNIONFS_DMUTEX_NORMAL,
-+ UNIONFS_DMUTEX_ROOT,
-+ UNIONFS_DMUTEX_PARENT,
-+ UNIONFS_DMUTEX_CHILD,
-+ UNIONFS_DMUTEX_WHITEOUT,
-+ UNIONFS_DMUTEX_REVAL_PARENT, /* for file/dentry revalidate */
-+ UNIONFS_DMUTEX_REVAL_CHILD, /* for file/dentry revalidate */
-+};
-+
-+static inline void unionfs_lock_dentry(struct dentry *d,
-+ unsigned int subclass)
-+{
-+ BUG_ON(!d);
-+ mutex_lock_nested(&UNIONFS_D(d)->lock, subclass);
-+}
-+
-+static inline void unionfs_unlock_dentry(struct dentry *d)
-+{
-+ BUG_ON(!d);
-+ mutex_unlock(&UNIONFS_D(d)->lock);
-+}
-+
-+static inline struct dentry *unionfs_lock_parent(struct dentry *d,
-+ unsigned int subclass)
-+{
-+ struct dentry *p;
-+
-+ BUG_ON(!d);
-+ p = dget_parent(d);
-+ if (p != d)
-+ mutex_lock_nested(&UNIONFS_D(p)->lock, subclass);
-+ return p;
-+}
-+
-+static inline void unionfs_unlock_parent(struct dentry *d, struct dentry *p)
-+{
-+ BUG_ON(!d);
-+ BUG_ON(!p);
-+ if (p != d) {
-+ BUG_ON(!mutex_is_locked(&UNIONFS_D(p)->lock));
-+ mutex_unlock(&UNIONFS_D(p)->lock);
-+ }
-+ dput(p);
-+}
-+
-+static inline void verify_locked(struct dentry *d)
-+{
-+ BUG_ON(!d);
-+ BUG_ON(!mutex_is_locked(&UNIONFS_D(d)->lock));
-+}
-+
-+/* macros to put lower objects */
-+
-+/*
-+ * iput lower inodes of an unionfs dentry, from bstart to bend. If
-+ * @free_lower is true, then also kfree the memory used to hold the lower
-+ * object pointers.
-+ */
-+static inline void iput_lowers(struct inode *inode,
-+ int bstart, int bend, bool free_lower)
-+{
-+ struct inode *lower_inode;
-+ int bindex;
-+
-+ BUG_ON(!inode);
-+ BUG_ON(!UNIONFS_I(inode));
-+ BUG_ON(bstart < 0);
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (lower_inode) {
-+ unionfs_set_lower_inode_idx(inode, bindex, NULL);
-+ /* see Documentation/filesystems/unionfs/issues.txt */
-+ lockdep_off();
-+ iput(lower_inode);
-+ lockdep_on();
-+ }
-+ }
-+
-+ if (free_lower) {
-+ kfree(UNIONFS_I(inode)->lower_inodes);
-+ UNIONFS_I(inode)->lower_inodes = NULL;
-+ }
-+}
-+
-+/* iput all lower inodes, and reset start/end branch indices to -1 */
-+static inline void iput_lowers_all(struct inode *inode, bool free_lower)
-+{
-+ int bstart, bend;
-+
-+ BUG_ON(!inode);
-+ BUG_ON(!UNIONFS_I(inode));
-+ bstart = ibstart(inode);
-+ bend = ibend(inode);
-+ BUG_ON(bstart < 0);
-+
-+ iput_lowers(inode, bstart, bend, free_lower);
-+ ibstart(inode) = ibend(inode) = -1;
-+}
-+
-+/*
-+ * dput/mntput all lower dentries and vfsmounts of an unionfs dentry, from
-+ * bstart to bend. If @free_lower is true, then also kfree the memory used
-+ * to hold the lower object pointers.
-+ *
-+ * XXX: implement using path_put VFS macros
-+ */
-+static inline void path_put_lowers(struct dentry *dentry,
-+ int bstart, int bend, bool free_lower)
-+{
-+ struct dentry *lower_dentry;
-+ struct vfsmount *lower_mnt;
-+ int bindex;
-+
-+ BUG_ON(!dentry);
-+ BUG_ON(!UNIONFS_D(dentry));
-+ BUG_ON(bstart < 0);
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (lower_dentry) {
-+ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
-+ dput(lower_dentry);
-+ }
-+ lower_mnt = unionfs_lower_mnt_idx(dentry, bindex);
-+ if (lower_mnt) {
-+ unionfs_set_lower_mnt_idx(dentry, bindex, NULL);
-+ mntput(lower_mnt);
-+ }
-+ }
-+
-+ if (free_lower) {
-+ kfree(UNIONFS_D(dentry)->lower_paths);
-+ UNIONFS_D(dentry)->lower_paths = NULL;
-+ }
-+}
-+
-+/*
-+ * dput/mntput all lower dentries and vfsmounts, and reset start/end branch
-+ * indices to -1.
-+ */
-+static inline void path_put_lowers_all(struct dentry *dentry, bool free_lower)
-+{
-+ int bstart, bend;
-+
-+ BUG_ON(!dentry);
-+ BUG_ON(!UNIONFS_D(dentry));
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+ BUG_ON(bstart < 0);
-+
-+ path_put_lowers(dentry, bstart, bend, free_lower);
-+ dbstart(dentry) = dbend(dentry) = -1;
-+}
-+
-+#endif /* not _FANOUT_H */
-diff --git a/fs/unionfs/file.c b/fs/unionfs/file.c
-new file mode 100644
-index 0000000..1c694c3
---- /dev/null
-+++ b/fs/unionfs/file.c
-@@ -0,0 +1,382 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+static ssize_t unionfs_read(struct file *file, char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ int err;
-+ struct file *lower_file;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ err = unionfs_file_revalidate(file, parent, false);
-+ if (unlikely(err))
-+ goto out;
-+
-+ lower_file = unionfs_lower_file(file);
-+ err = vfs_read(lower_file, buf, count, ppos);
-+ /* update our inode atime upon a successful lower read */
-+ if (err >= 0) {
-+ fsstack_copy_attr_atime(dentry->d_inode,
-+ lower_file->f_path.dentry->d_inode);
-+ unionfs_check_file(file);
-+ }
-+
-+out:
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+static ssize_t unionfs_write(struct file *file, const char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ int err = 0;
-+ struct file *lower_file;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ err = unionfs_file_revalidate(file, parent, true);
-+ if (unlikely(err))
-+ goto out;
-+
-+ lower_file = unionfs_lower_file(file);
-+ err = vfs_write(lower_file, buf, count, ppos);
-+ /* update our inode times+sizes upon a successful lower write */
-+ if (err >= 0) {
-+ fsstack_copy_inode_size(dentry->d_inode,
-+ lower_file->f_path.dentry->d_inode);
-+ fsstack_copy_attr_times(dentry->d_inode,
-+ lower_file->f_path.dentry->d_inode);
-+ UNIONFS_F(file)->wrote_to_file = true; /* for delayed copyup */
-+ unionfs_check_file(file);
-+ }
-+
-+out:
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+static int unionfs_file_readdir(struct file *file, void *dirent,
-+ filldir_t filldir)
-+{
-+ return -ENOTDIR;
-+}
-+
-+static int unionfs_mmap(struct file *file, struct vm_area_struct *vma)
-+{
-+ int err = 0;
-+ bool willwrite;
-+ struct file *lower_file;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+ const struct vm_operations_struct *saved_vm_ops = NULL;
-+
-+ /*
-+ * Since mm/memory.c:might_fault() (under PROVE_LOCKING) was
-+ * modified in 2.6.29-rc1 to call might_lock_read on mmap_sem, this
-+ * has been causing false positives in file system stacking layers.
-+ * In particular, our ->mmap is called after sys_mmap2 already holds
-+ * mmap_sem, then we lock our own mutexes; but earlier, it's
-+ * possible for lockdep to have locked our mutexes first, and then
-+ * we call a lower ->readdir which could call might_fault. The
-+ * different ordering of the locks is what lockdep complains about
-+ * -- unnecessarily. Therefore, we have no choice but to tell
-+ * lockdep to temporarily turn off lockdep here. Note: the comments
-+ * inside might_sleep also suggest that it would have been
-+ * nicer to only annotate paths that needs that might_lock_read.
-+ */
-+ lockdep_off();
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ /* This might be deferred to mmap's writepage */
-+ willwrite = ((vma->vm_flags | VM_SHARED | VM_WRITE) == vma->vm_flags);
-+ err = unionfs_file_revalidate(file, parent, willwrite);
-+ if (unlikely(err))
-+ goto out;
-+ unionfs_check_file(file);
-+
-+ /*
-+ * File systems which do not implement ->writepage may use
-+ * generic_file_readonly_mmap as their ->mmap op. If you call
-+ * generic_file_readonly_mmap with VM_WRITE, you'd get an -EINVAL.
-+ * But we cannot call the lower ->mmap op, so we can't tell that
-+ * writeable mappings won't work. Therefore, our only choice is to
-+ * check if the lower file system supports the ->writepage, and if
-+ * not, return EINVAL (the same error that
-+ * generic_file_readonly_mmap returns in that case).
-+ */
-+ lower_file = unionfs_lower_file(file);
-+ if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
-+ err = -EINVAL;
-+ printk(KERN_ERR "unionfs: branch %d file system does not "
-+ "support writeable mmap\n", fbstart(file));
-+ goto out;
-+ }
-+
-+ /*
-+ * find and save lower vm_ops.
-+ *
-+ * XXX: the VFS should have a cleaner way of finding the lower vm_ops
-+ */
-+ if (!UNIONFS_F(file)->lower_vm_ops) {
-+ err = lower_file->f_op->mmap(lower_file, vma);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: lower mmap failed %d\n", err);
-+ goto out;
-+ }
-+ saved_vm_ops = vma->vm_ops;
-+ err = do_munmap(current->mm, vma->vm_start,
-+ vma->vm_end - vma->vm_start);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: do_munmap failed %d\n", err);
-+ goto out;
-+ }
-+ }
-+
-+ file->f_mapping->a_ops = &unionfs_dummy_aops;
-+ err = generic_file_mmap(file, vma);
-+ file->f_mapping->a_ops = &unionfs_aops;
-+ if (err) {
-+ printk(KERN_ERR "unionfs: generic_file_mmap failed %d\n", err);
-+ goto out;
-+ }
-+ vma->vm_ops = &unionfs_vm_ops;
-+ if (!UNIONFS_F(file)->lower_vm_ops)
-+ UNIONFS_F(file)->lower_vm_ops = saved_vm_ops;
-+
-+out:
-+ if (!err) {
-+ /* copyup could cause parent dir times to change */
-+ unionfs_copy_attr_times(parent->d_inode);
-+ unionfs_check_file(file);
-+ }
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ lockdep_on();
-+ return err;
-+}
-+
-+int unionfs_fsync(struct file *file, int datasync)
-+{
-+ int bindex, bstart, bend;
-+ struct file *lower_file;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *lower_dentry;
-+ struct dentry *parent;
-+ struct inode *lower_inode, *inode;
-+ int err = -EINVAL;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ err = unionfs_file_revalidate(file, parent, true);
-+ if (unlikely(err))
-+ goto out;
-+ unionfs_check_file(file);
-+
-+ bstart = fbstart(file);
-+ bend = fbend(file);
-+ if (bstart < 0 || bend < 0)
-+ goto out;
-+
-+ inode = dentry->d_inode;
-+ if (unlikely(!inode)) {
-+ printk(KERN_ERR
-+ "unionfs: null lower inode in unionfs_fsync\n");
-+ goto out;
-+ }
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (!lower_inode || !lower_inode->i_fop->fsync)
-+ continue;
-+ lower_file = unionfs_lower_file_idx(file, bindex);
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ mutex_lock(&lower_inode->i_mutex);
-+ err = lower_inode->i_fop->fsync(lower_file, datasync);
-+ if (!err && bindex == bstart)
-+ fsstack_copy_attr_times(inode, lower_inode);
-+ mutex_unlock(&lower_inode->i_mutex);
-+ if (err)
-+ goto out;
-+ }
-+
-+out:
-+ if (!err)
-+ unionfs_check_file(file);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+int unionfs_fasync(int fd, struct file *file, int flag)
-+{
-+ int bindex, bstart, bend;
-+ struct file *lower_file;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+ struct inode *lower_inode, *inode;
-+ int err = 0;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ err = unionfs_file_revalidate(file, parent, true);
-+ if (unlikely(err))
-+ goto out;
-+ unionfs_check_file(file);
-+
-+ bstart = fbstart(file);
-+ bend = fbend(file);
-+ if (bstart < 0 || bend < 0)
-+ goto out;
-+
-+ inode = dentry->d_inode;
-+ if (unlikely(!inode)) {
-+ printk(KERN_ERR
-+ "unionfs: null lower inode in unionfs_fasync\n");
-+ goto out;
-+ }
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (!lower_inode || !lower_inode->i_fop->fasync)
-+ continue;
-+ lower_file = unionfs_lower_file_idx(file, bindex);
-+ mutex_lock(&lower_inode->i_mutex);
-+ err = lower_inode->i_fop->fasync(fd, lower_file, flag);
-+ if (!err && bindex == bstart)
-+ fsstack_copy_attr_times(inode, lower_inode);
-+ mutex_unlock(&lower_inode->i_mutex);
-+ if (err)
-+ goto out;
-+ }
-+
-+out:
-+ if (!err)
-+ unionfs_check_file(file);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+static ssize_t unionfs_splice_read(struct file *file, loff_t *ppos,
-+ struct pipe_inode_info *pipe, size_t len,
-+ unsigned int flags)
-+{
-+ ssize_t err;
-+ struct file *lower_file;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ err = unionfs_file_revalidate(file, parent, false);
-+ if (unlikely(err))
-+ goto out;
-+
-+ lower_file = unionfs_lower_file(file);
-+ err = vfs_splice_to(lower_file, ppos, pipe, len, flags);
-+ /* update our inode atime upon a successful lower splice-read */
-+ if (err >= 0) {
-+ fsstack_copy_attr_atime(dentry->d_inode,
-+ lower_file->f_path.dentry->d_inode);
-+ unionfs_check_file(file);
-+ }
-+
-+out:
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+static ssize_t unionfs_splice_write(struct pipe_inode_info *pipe,
-+ struct file *file, loff_t *ppos,
-+ size_t len, unsigned int flags)
-+{
-+ ssize_t err = 0;
-+ struct file *lower_file;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct dentry *parent;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ err = unionfs_file_revalidate(file, parent, true);
-+ if (unlikely(err))
-+ goto out;
-+
-+ lower_file = unionfs_lower_file(file);
-+ err = vfs_splice_from(pipe, lower_file, ppos, len, flags);
-+ /* update our inode times+sizes upon a successful lower write */
-+ if (err >= 0) {
-+ fsstack_copy_inode_size(dentry->d_inode,
-+ lower_file->f_path.dentry->d_inode);
-+ fsstack_copy_attr_times(dentry->d_inode,
-+ lower_file->f_path.dentry->d_inode);
-+ unionfs_check_file(file);
-+ }
-+
-+out:
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+struct file_operations unionfs_main_fops = {
-+ .llseek = generic_file_llseek,
-+ .read = unionfs_read,
-+ .write = unionfs_write,
-+ .readdir = unionfs_file_readdir,
-+ .unlocked_ioctl = unionfs_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = unionfs_ioctl,
-+#endif
-+ .mmap = unionfs_mmap,
-+ .open = unionfs_open,
-+ .flush = unionfs_flush,
-+ .release = unionfs_file_release,
-+ .fsync = unionfs_fsync,
-+ .fasync = unionfs_fasync,
-+ .splice_read = unionfs_splice_read,
-+ .splice_write = unionfs_splice_write,
-+};
-diff --git a/fs/unionfs/inode.c b/fs/unionfs/inode.c
-new file mode 100644
-index 0000000..4c36f16
---- /dev/null
-+++ b/fs/unionfs/inode.c
-@@ -0,0 +1,1061 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * Find a writeable branch to create new object in. Checks all writeble
-+ * branches of the parent inode, from istart to iend order; if none are
-+ * suitable, also tries branch 0 (which may require a copyup).
-+ *
-+ * Return a lower_dentry we can use to create object in, or ERR_PTR.
-+ */
-+static struct dentry *find_writeable_branch(struct inode *parent,
-+ struct dentry *dentry)
-+{
-+ int err = -EINVAL;
-+ int bindex, istart, iend;
-+ struct dentry *lower_dentry = NULL;
-+
-+ istart = ibstart(parent);
-+ iend = ibend(parent);
-+ if (istart < 0)
-+ goto out;
-+
-+begin:
-+ for (bindex = istart; bindex <= iend; bindex++) {
-+ /* skip non-writeable branches */
-+ err = is_robranch_super(dentry->d_sb, bindex);
-+ if (err) {
-+ err = -EROFS;
-+ continue;
-+ }
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry)
-+ continue;
-+ /*
-+ * check for whiteouts in writeable branch, and remove them
-+ * if necessary.
-+ */
-+ err = check_unlink_whiteout(dentry, lower_dentry, bindex);
-+ if (err > 0) /* ignore if whiteout found and removed */
-+ err = 0;
-+ if (err)
-+ continue;
-+ /* if get here, we can write to the branch */
-+ break;
-+ }
-+ /*
-+ * If istart wasn't already branch 0, and we got any error, then try
-+ * branch 0 (which may require copyup)
-+ */
-+ if (err && istart > 0) {
-+ istart = iend = 0;
-+ goto begin;
-+ }
-+
-+ /*
-+ * If we tried even branch 0, and still got an error, abort. But if
-+ * the error was an EROFS, then we should try to copyup.
-+ */
-+ if (err && err != -EROFS)
-+ goto out;
-+
-+ /*
-+ * If we get here, then check if copyup needed. If lower_dentry is
-+ * NULL, create the entire dentry directory structure in branch 0.
-+ */
-+ if (!lower_dentry) {
-+ bindex = 0;
-+ lower_dentry = create_parents(parent, dentry,
-+ dentry->d_name.name, bindex);
-+ if (IS_ERR(lower_dentry)) {
-+ err = PTR_ERR(lower_dentry);
-+ goto out;
-+ }
-+ }
-+ err = 0; /* all's well */
-+out:
-+ if (err)
-+ return ERR_PTR(err);
-+ return lower_dentry;
-+}
-+
-+static int unionfs_create(struct inode *dir, struct dentry *dentry,
-+ int mode, struct nameidata *nd_unused)
-+{
-+ int err = 0;
-+ struct dentry *lower_dentry = NULL;
-+ struct dentry *lower_parent_dentry = NULL;
-+ struct dentry *parent;
-+ int valid = 0;
-+ struct nameidata lower_nd;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE; /* same as what real_lookup does */
-+ goto out;
-+ }
-+
-+ lower_dentry = find_writeable_branch(dir, dentry);
-+ if (IS_ERR(lower_dentry)) {
-+ err = PTR_ERR(lower_dentry);
-+ goto out;
-+ }
-+
-+ lower_parent_dentry = lock_parent(lower_dentry);
-+ if (IS_ERR(lower_parent_dentry)) {
-+ err = PTR_ERR(lower_parent_dentry);
-+ goto out_unlock;
-+ }
-+
-+ err = init_lower_nd(&lower_nd, LOOKUP_CREATE);
-+ if (unlikely(err < 0))
-+ goto out_unlock;
-+ err = vfs_create(lower_parent_dentry->d_inode, lower_dentry, mode,
-+ &lower_nd);
-+ release_lower_nd(&lower_nd, err);
-+
-+ if (!err) {
-+ err = PTR_ERR(unionfs_interpose(dentry, dir->i_sb, 0));
-+ if (!err) {
-+ unionfs_copy_attr_times(dir);
-+ fsstack_copy_inode_size(dir,
-+ lower_parent_dentry->d_inode);
-+ /* update no. of links on parent directory */
-+ dir->i_nlink = unionfs_get_nlinks(dir);
-+ }
-+ }
-+
-+out_unlock:
-+ unlock_dir(lower_parent_dentry);
-+out:
-+ if (!err) {
-+ unionfs_postcopyup_setmnt(dentry);
-+ unionfs_check_inode(dir);
-+ unionfs_check_dentry(dentry);
-+ }
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+/*
-+ * unionfs_lookup is the only special function which takes a dentry, yet we
-+ * do NOT want to call __unionfs_d_revalidate_chain because by definition,
-+ * we don't have a valid dentry here yet.
-+ */
-+static struct dentry *unionfs_lookup(struct inode *dir,
-+ struct dentry *dentry,
-+ struct nameidata *nd_unused)
-+{
-+ struct dentry *ret, *parent;
-+ int err = 0;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+
-+ /*
-+ * As long as we lock/dget the parent, then can skip validating the
-+ * parent now; we may have to rebuild this dentry on the next
-+ * ->d_revalidate, however.
-+ */
-+
-+ /* allocate dentry private data. We free it in ->d_release */
-+ err = new_dentry_private_data(dentry, UNIONFS_DMUTEX_CHILD);
-+ if (unlikely(err)) {
-+ ret = ERR_PTR(err);
-+ goto out;
-+ }
-+
-+ ret = unionfs_lookup_full(dentry, parent, INTERPOSE_LOOKUP);
-+
-+ if (!IS_ERR(ret)) {
-+ if (ret)
-+ dentry = ret;
-+ /* lookup_full can return multiple positive dentries */
-+ if (dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode)) {
-+ BUG_ON(dbstart(dentry) < 0);
-+ unionfs_postcopyup_release(dentry);
-+ }
-+ unionfs_copy_attr_times(dentry->d_inode);
-+ }
-+
-+ unionfs_check_inode(dir);
-+ if (!IS_ERR(ret))
-+ unionfs_check_dentry(dentry);
-+ unionfs_check_dentry(parent);
-+ unionfs_unlock_dentry(dentry); /* locked in new_dentry_private data */
-+
-+out:
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+
-+ return ret;
-+}
-+
-+static int unionfs_link(struct dentry *old_dentry, struct inode *dir,
-+ struct dentry *new_dentry)
-+{
-+ int err = 0;
-+ struct dentry *lower_old_dentry = NULL;
-+ struct dentry *lower_new_dentry = NULL;
-+ struct dentry *lower_dir_dentry = NULL;
-+ struct dentry *old_parent, *new_parent;
-+ char *name = NULL;
-+ bool valid;
-+
-+ unionfs_read_lock(old_dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ old_parent = dget_parent(old_dentry);
-+ new_parent = dget_parent(new_dentry);
-+ unionfs_double_lock_parents(old_parent, new_parent);
-+ unionfs_double_lock_dentry(old_dentry, new_dentry);
-+
-+ valid = __unionfs_d_revalidate(old_dentry, old_parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+ if (new_dentry->d_inode) {
-+ valid = __unionfs_d_revalidate(new_dentry, new_parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+ }
-+
-+ lower_new_dentry = unionfs_lower_dentry(new_dentry);
-+
-+ /* check for a whiteout in new dentry branch, and delete it */
-+ err = check_unlink_whiteout(new_dentry, lower_new_dentry,
-+ dbstart(new_dentry));
-+ if (err > 0) { /* whiteout found and removed successfully */
-+ lower_dir_dentry = dget_parent(lower_new_dentry);
-+ fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
-+ dput(lower_dir_dentry);
-+ dir->i_nlink = unionfs_get_nlinks(dir);
-+ err = 0;
-+ }
-+ if (err)
-+ goto out;
-+
-+ /* check if parent hierachy is needed, then link in same branch */
-+ if (dbstart(old_dentry) != dbstart(new_dentry)) {
-+ lower_new_dentry = create_parents(dir, new_dentry,
-+ new_dentry->d_name.name,
-+ dbstart(old_dentry));
-+ err = PTR_ERR(lower_new_dentry);
-+ if (IS_COPYUP_ERR(err))
-+ goto docopyup;
-+ if (!lower_new_dentry || IS_ERR(lower_new_dentry))
-+ goto out;
-+ }
-+ lower_new_dentry = unionfs_lower_dentry(new_dentry);
-+ lower_old_dentry = unionfs_lower_dentry(old_dentry);
-+
-+ BUG_ON(dbstart(old_dentry) != dbstart(new_dentry));
-+ lower_dir_dentry = lock_parent(lower_new_dentry);
-+ err = is_robranch(old_dentry);
-+ if (!err) {
-+ /* see Documentation/filesystems/unionfs/issues.txt */
-+ lockdep_off();
-+ err = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode,
-+ lower_new_dentry);
-+ lockdep_on();
-+ }
-+ unlock_dir(lower_dir_dentry);
-+
-+docopyup:
-+ if (IS_COPYUP_ERR(err)) {
-+ int old_bstart = dbstart(old_dentry);
-+ int bindex;
-+
-+ for (bindex = old_bstart - 1; bindex >= 0; bindex--) {
-+ err = copyup_dentry(old_parent->d_inode,
-+ old_dentry, old_bstart,
-+ bindex, old_dentry->d_name.name,
-+ old_dentry->d_name.len, NULL,
-+ i_size_read(old_dentry->d_inode));
-+ if (err)
-+ continue;
-+ lower_new_dentry =
-+ create_parents(dir, new_dentry,
-+ new_dentry->d_name.name,
-+ bindex);
-+ lower_old_dentry = unionfs_lower_dentry(old_dentry);
-+ lower_dir_dentry = lock_parent(lower_new_dentry);
-+ /* see Documentation/filesystems/unionfs/issues.txt */
-+ lockdep_off();
-+ /* do vfs_link */
-+ err = vfs_link(lower_old_dentry,
-+ lower_dir_dentry->d_inode,
-+ lower_new_dentry);
-+ lockdep_on();
-+ unlock_dir(lower_dir_dentry);
-+ goto check_link;
-+ }
-+ goto out;
-+ }
-+
-+check_link:
-+ if (err || !lower_new_dentry->d_inode)
-+ goto out;
-+
-+ /* Its a hard link, so use the same inode */
-+ new_dentry->d_inode = igrab(old_dentry->d_inode);
-+ d_add(new_dentry, new_dentry->d_inode);
-+ unionfs_copy_attr_all(dir, lower_new_dentry->d_parent->d_inode);
-+ fsstack_copy_inode_size(dir, lower_new_dentry->d_parent->d_inode);
-+
-+ /* propagate number of hard-links */
-+ old_dentry->d_inode->i_nlink = unionfs_get_nlinks(old_dentry->d_inode);
-+ /* new dentry's ctime may have changed due to hard-link counts */
-+ unionfs_copy_attr_times(new_dentry->d_inode);
-+
-+out:
-+ if (!new_dentry->d_inode)
-+ d_drop(new_dentry);
-+
-+ kfree(name);
-+ if (!err)
-+ unionfs_postcopyup_setmnt(new_dentry);
-+
-+ unionfs_check_inode(dir);
-+ unionfs_check_dentry(new_dentry);
-+ unionfs_check_dentry(old_dentry);
-+
-+ unionfs_double_unlock_dentry(old_dentry, new_dentry);
-+ unionfs_double_unlock_parents(old_parent, new_parent);
-+ dput(new_parent);
-+ dput(old_parent);
-+ unionfs_read_unlock(old_dentry->d_sb);
-+
-+ return err;
-+}
-+
-+static int unionfs_symlink(struct inode *dir, struct dentry *dentry,
-+ const char *symname)
-+{
-+ int err = 0;
-+ struct dentry *lower_dentry = NULL;
-+ struct dentry *wh_dentry = NULL;
-+ struct dentry *lower_parent_dentry = NULL;
-+ struct dentry *parent;
-+ char *name = NULL;
-+ int valid = 0;
-+ umode_t mode;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+
-+ /*
-+ * It's only a bug if this dentry was not negative and couldn't be
-+ * revalidated (shouldn't happen).
-+ */
-+ BUG_ON(!valid && dentry->d_inode);
-+
-+ lower_dentry = find_writeable_branch(dir, dentry);
-+ if (IS_ERR(lower_dentry)) {
-+ err = PTR_ERR(lower_dentry);
-+ goto out;
-+ }
-+
-+ lower_parent_dentry = lock_parent(lower_dentry);
-+ if (IS_ERR(lower_parent_dentry)) {
-+ err = PTR_ERR(lower_parent_dentry);
-+ goto out_unlock;
-+ }
-+
-+ mode = S_IALLUGO;
-+ err = vfs_symlink(lower_parent_dentry->d_inode, lower_dentry, symname);
-+ if (!err) {
-+ err = PTR_ERR(unionfs_interpose(dentry, dir->i_sb, 0));
-+ if (!err) {
-+ unionfs_copy_attr_times(dir);
-+ fsstack_copy_inode_size(dir,
-+ lower_parent_dentry->d_inode);
-+ /* update no. of links on parent directory */
-+ dir->i_nlink = unionfs_get_nlinks(dir);
-+ }
-+ }
-+
-+out_unlock:
-+ unlock_dir(lower_parent_dentry);
-+out:
-+ dput(wh_dentry);
-+ kfree(name);
-+
-+ if (!err) {
-+ unionfs_postcopyup_setmnt(dentry);
-+ unionfs_check_inode(dir);
-+ unionfs_check_dentry(dentry);
-+ }
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+static int unionfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
-+{
-+ int err = 0;
-+ struct dentry *lower_dentry = NULL;
-+ struct dentry *lower_parent_dentry = NULL;
-+ struct dentry *parent;
-+ int bindex = 0, bstart;
-+ char *name = NULL;
-+ int valid;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE; /* same as what real_lookup does */
-+ goto out;
-+ }
-+
-+ bstart = dbstart(dentry);
-+
-+ lower_dentry = unionfs_lower_dentry(dentry);
-+
-+ /* check for a whiteout in new dentry branch, and delete it */
-+ err = check_unlink_whiteout(dentry, lower_dentry, bstart);
-+ if (err > 0) /* whiteout found and removed successfully */
-+ err = 0;
-+ if (err) {
-+ /* exit if the error returned was NOT -EROFS */
-+ if (!IS_COPYUP_ERR(err))
-+ goto out;
-+ bstart--;
-+ }
-+
-+ /* check if copyup's needed, and mkdir */
-+ for (bindex = bstart; bindex >= 0; bindex--) {
-+ int i;
-+ int bend = dbend(dentry);
-+
-+ if (is_robranch_super(dentry->d_sb, bindex))
-+ continue;
-+
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry) {
-+ lower_dentry = create_parents(dir, dentry,
-+ dentry->d_name.name,
-+ bindex);
-+ if (!lower_dentry || IS_ERR(lower_dentry)) {
-+ printk(KERN_ERR "unionfs: lower dentry "
-+ " NULL for bindex = %d\n", bindex);
-+ continue;
-+ }
-+ }
-+
-+ lower_parent_dentry = lock_parent(lower_dentry);
-+
-+ if (IS_ERR(lower_parent_dentry)) {
-+ err = PTR_ERR(lower_parent_dentry);
-+ goto out;
-+ }
-+
-+ err = vfs_mkdir(lower_parent_dentry->d_inode, lower_dentry,
-+ mode);
-+
-+ unlock_dir(lower_parent_dentry);
-+
-+ /* did the mkdir succeed? */
-+ if (err)
-+ break;
-+
-+ for (i = bindex + 1; i <= bend; i++) {
-+ /* XXX: use path_put_lowers? */
-+ if (unionfs_lower_dentry_idx(dentry, i)) {
-+ dput(unionfs_lower_dentry_idx(dentry, i));
-+ unionfs_set_lower_dentry_idx(dentry, i, NULL);
-+ }
-+ }
-+ dbend(dentry) = bindex;
-+
-+ /*
-+ * Only INTERPOSE_LOOKUP can return a value other than 0 on
-+ * err.
-+ */
-+ err = PTR_ERR(unionfs_interpose(dentry, dir->i_sb, 0));
-+ if (!err) {
-+ unionfs_copy_attr_times(dir);
-+ fsstack_copy_inode_size(dir,
-+ lower_parent_dentry->d_inode);
-+
-+ /* update number of links on parent directory */
-+ dir->i_nlink = unionfs_get_nlinks(dir);
-+ }
-+
-+ err = make_dir_opaque(dentry, dbstart(dentry));
-+ if (err) {
-+ printk(KERN_ERR "unionfs: mkdir: error creating "
-+ ".wh.__dir_opaque: %d\n", err);
-+ goto out;
-+ }
-+
-+ /* we are done! */
-+ break;
-+ }
-+
-+out:
-+ if (!dentry->d_inode)
-+ d_drop(dentry);
-+
-+ kfree(name);
-+
-+ if (!err) {
-+ unionfs_copy_attr_times(dentry->d_inode);
-+ unionfs_postcopyup_setmnt(dentry);
-+ }
-+ unionfs_check_inode(dir);
-+ unionfs_check_dentry(dentry);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+
-+ return err;
-+}
-+
-+static int unionfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
-+ dev_t dev)
-+{
-+ int err = 0;
-+ struct dentry *lower_dentry = NULL;
-+ struct dentry *wh_dentry = NULL;
-+ struct dentry *lower_parent_dentry = NULL;
-+ struct dentry *parent;
-+ char *name = NULL;
-+ int valid = 0;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+
-+ /*
-+ * It's only a bug if this dentry was not negative and couldn't be
-+ * revalidated (shouldn't happen).
-+ */
-+ BUG_ON(!valid && dentry->d_inode);
-+
-+ lower_dentry = find_writeable_branch(dir, dentry);
-+ if (IS_ERR(lower_dentry)) {
-+ err = PTR_ERR(lower_dentry);
-+ goto out;
-+ }
-+
-+ lower_parent_dentry = lock_parent(lower_dentry);
-+ if (IS_ERR(lower_parent_dentry)) {
-+ err = PTR_ERR(lower_parent_dentry);
-+ goto out_unlock;
-+ }
-+
-+ err = vfs_mknod(lower_parent_dentry->d_inode, lower_dentry, mode, dev);
-+ if (!err) {
-+ err = PTR_ERR(unionfs_interpose(dentry, dir->i_sb, 0));
-+ if (!err) {
-+ unionfs_copy_attr_times(dir);
-+ fsstack_copy_inode_size(dir,
-+ lower_parent_dentry->d_inode);
-+ /* update no. of links on parent directory */
-+ dir->i_nlink = unionfs_get_nlinks(dir);
-+ }
-+ }
-+
-+out_unlock:
-+ unlock_dir(lower_parent_dentry);
-+out:
-+ dput(wh_dentry);
-+ kfree(name);
-+
-+ if (!err) {
-+ unionfs_postcopyup_setmnt(dentry);
-+ unionfs_check_inode(dir);
-+ unionfs_check_dentry(dentry);
-+ }
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+/* requires sb, dentry, and parent to already be locked */
-+static int __unionfs_readlink(struct dentry *dentry, char __user *buf,
-+ int bufsiz)
-+{
-+ int err;
-+ struct dentry *lower_dentry;
-+
-+ lower_dentry = unionfs_lower_dentry(dentry);
-+
-+ if (!lower_dentry->d_inode->i_op ||
-+ !lower_dentry->d_inode->i_op->readlink) {
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ err = lower_dentry->d_inode->i_op->readlink(lower_dentry,
-+ buf, bufsiz);
-+ if (err >= 0)
-+ fsstack_copy_attr_atime(dentry->d_inode,
-+ lower_dentry->d_inode);
-+
-+out:
-+ return err;
-+}
-+
-+static int unionfs_readlink(struct dentry *dentry, char __user *buf,
-+ int bufsiz)
-+{
-+ int err;
-+ struct dentry *parent;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ if (unlikely(!__unionfs_d_revalidate(dentry, parent, false))) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+
-+ err = __unionfs_readlink(dentry, buf, bufsiz);
-+
-+out:
-+ unionfs_check_dentry(dentry);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+
-+ return err;
-+}
-+
-+static void *unionfs_follow_link(struct dentry *dentry, struct nameidata *nd)
-+{
-+ char *buf;
-+ int len = PAGE_SIZE, err;
-+ mm_segment_t old_fs;
-+ struct dentry *parent;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ /* This is freed by the put_link method assuming a successful call. */
-+ buf = kmalloc(len, GFP_KERNEL);
-+ if (unlikely(!buf)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ /* read the symlink, and then we will follow it */
-+ old_fs = get_fs();
-+ set_fs(KERNEL_DS);
-+ err = __unionfs_readlink(dentry, buf, len);
-+ set_fs(old_fs);
-+ if (err < 0) {
-+ kfree(buf);
-+ buf = NULL;
-+ goto out;
-+ }
-+ buf[err] = 0;
-+ nd_set_link(nd, buf);
-+ err = 0;
-+
-+out:
-+ if (err >= 0) {
-+ unionfs_check_nd(nd);
-+ unionfs_check_dentry(dentry);
-+ }
-+
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+
-+ return ERR_PTR(err);
-+}
-+
-+/* this @nd *IS* still used */
-+static void unionfs_put_link(struct dentry *dentry, struct nameidata *nd,
-+ void *cookie)
-+{
-+ struct dentry *parent;
-+ char *buf;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ if (unlikely(!__unionfs_d_revalidate(dentry, parent, false)))
-+ printk(KERN_ERR
-+ "unionfs: put_link failed to revalidate dentry\n");
-+
-+ unionfs_check_dentry(dentry);
-+#if 0
-+ /* XXX: can't run this check b/c this fxn can receive a poisoned 'nd' PTR */
-+ unionfs_check_nd(nd);
-+#endif
-+ buf = nd_get_link(nd);
-+ if (!IS_ERR(buf))
-+ kfree(buf);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+}
-+
-+/*
-+ * This is a variant of fs/namei.c:permission() or inode_permission() which
-+ * skips over EROFS tests (because we perform copyup on EROFS).
-+ */
-+static int __inode_permission(struct inode *inode, int mask)
-+{
-+ int retval;
-+
-+ /* nobody gets write access to an immutable file */
-+ if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
-+ return -EACCES;
-+
-+ /* Ordinary permission routines do not understand MAY_APPEND. */
-+ if (inode->i_op && inode->i_op->permission) {
-+ retval = inode->i_op->permission(inode, mask);
-+ if (!retval) {
-+ /*
-+ * Exec permission on a regular file is denied if none
-+ * of the execute bits are set.
-+ *
-+ * This check should be done by the ->permission()
-+ * method.
-+ */
-+ if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode) &&
-+ !(inode->i_mode & S_IXUGO))
-+ return -EACCES;
-+ }
-+ } else {
-+ retval = generic_permission(inode, mask, NULL);
-+ }
-+ if (retval)
-+ return retval;
-+
-+ return security_inode_permission(inode,
-+ mask & (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND));
-+}
-+
-+/*
-+ * Don't grab the superblock read-lock in unionfs_permission, which prevents
-+ * a deadlock with the branch-management "add branch" code (which grabbed
-+ * the write lock). It is safe to not grab the read lock here, because even
-+ * with branch management taking place, there is no chance that
-+ * unionfs_permission, or anything it calls, will use stale branch
-+ * information.
-+ */
-+static int unionfs_permission(struct inode *inode, int mask)
-+{
-+ struct inode *lower_inode = NULL;
-+ int err = 0;
-+ int bindex, bstart, bend;
-+ const int is_file = !S_ISDIR(inode->i_mode);
-+ const int write_mask = (mask & MAY_WRITE) && !(mask & MAY_READ);
-+ struct inode *inode_grabbed = igrab(inode);
-+ struct dentry *dentry = d_find_alias(inode);
-+
-+ if (dentry)
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ if (!UNIONFS_I(inode)->lower_inodes) {
-+ if (is_file) /* dirs can be unlinked but chdir'ed to */
-+ err = -ESTALE; /* force revalidate */
-+ goto out;
-+ }
-+ bstart = ibstart(inode);
-+ bend = ibend(inode);
-+ if (unlikely(bstart < 0 || bend < 0)) {
-+ /*
-+ * With branch-management, we can get a stale inode here.
-+ * If so, we return ESTALE back to link_path_walk, which
-+ * would discard the dcache entry and re-lookup the
-+ * dentry+inode. This should be equivalent to issuing
-+ * __unionfs_d_revalidate_chain on nd.dentry here.
-+ */
-+ if (is_file) /* dirs can be unlinked but chdir'ed to */
-+ err = -ESTALE; /* force revalidate */
-+ goto out;
-+ }
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (!lower_inode)
-+ continue;
-+
-+ /*
-+ * check the condition for D-F-D underlying files/directories,
-+ * we don't have to check for files, if we are checking for
-+ * directories.
-+ */
-+ if (!is_file && !S_ISDIR(lower_inode->i_mode))
-+ continue;
-+
-+ /*
-+ * We check basic permissions, but we ignore any conditions
-+ * such as readonly file systems or branches marked as
-+ * readonly, because those conditions should lead to a
-+ * copyup taking place later on. However, if user never had
-+ * access to the file, then no copyup could ever take place.
-+ */
-+ err = __inode_permission(lower_inode, mask);
-+ if (err && err != -EACCES && err != EPERM && bindex > 0) {
-+ umode_t mode = lower_inode->i_mode;
-+ if ((is_robranch_super(inode->i_sb, bindex) ||
-+ __is_rdonly(lower_inode)) &&
-+ (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
-+ err = 0;
-+ if (IS_COPYUP_ERR(err))
-+ err = 0;
-+ }
-+
-+ /*
-+ * NFS HACK: NFSv2/3 return EACCES on readonly-exported,
-+ * locally readonly-mounted file systems, instead of EROFS
-+ * like other file systems do. So we have no choice here
-+ * but to intercept this and ignore it for NFS branches
-+ * marked readonly. Specifically, we avoid using NFS's own
-+ * "broken" ->permission method, and rely on
-+ * generic_permission() to do basic checking for us.
-+ */
-+ if (err && err == -EACCES &&
-+ is_robranch_super(inode->i_sb, bindex) &&
-+ lower_inode->i_sb->s_magic == NFS_SUPER_MAGIC)
-+ err = generic_permission(lower_inode, mask, NULL);
-+
-+ /*
-+ * The permissions are an intersection of the overall directory
-+ * permissions, so we fail if one fails.
-+ */
-+ if (err)
-+ goto out;
-+
-+ /* only the leftmost file matters. */
-+ if (is_file || write_mask) {
-+ if (is_file && write_mask) {
-+ err = get_write_access(lower_inode);
-+ if (!err)
-+ put_write_access(lower_inode);
-+ }
-+ break;
-+ }
-+ }
-+ /* sync times which may have changed (asynchronously) below */
-+ unionfs_copy_attr_times(inode);
-+
-+out:
-+ unionfs_check_inode(inode);
-+ if (dentry) {
-+ unionfs_unlock_dentry(dentry);
-+ dput(dentry);
-+ }
-+ iput(inode_grabbed);
-+ return err;
-+}
-+
-+static int unionfs_setattr(struct dentry *dentry, struct iattr *ia)
-+{
-+ int err = 0;
-+ struct dentry *lower_dentry;
-+ struct dentry *parent;
-+ struct inode *inode;
-+ struct inode *lower_inode;
-+ int bstart, bend, bindex;
-+ loff_t size;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ if (unlikely(!__unionfs_d_revalidate(dentry, parent, false))) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+ inode = dentry->d_inode;
-+
-+ /*
-+ * mode change is for clearing setuid/setgid. Allow lower filesystem
-+ * to reinterpret it in its own way.
-+ */
-+ if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
-+ ia->ia_valid &= ~ATTR_MODE;
-+
-+ lower_dentry = unionfs_lower_dentry(dentry);
-+ if (!lower_dentry) { /* should never happen after above revalidate */
-+ err = -EINVAL;
-+ goto out;
-+ }
-+ lower_inode = unionfs_lower_inode(inode);
-+
-+ /* check if user has permission to change lower inode */
-+ err = inode_change_ok(lower_inode, ia);
-+ if (err)
-+ goto out;
-+
-+ /* copyup if the file is on a read only branch */
-+ if (is_robranch_super(dentry->d_sb, bstart)
-+ || __is_rdonly(lower_inode)) {
-+ /* check if we have a branch to copy up to */
-+ if (bstart <= 0) {
-+ err = -EACCES;
-+ goto out;
-+ }
-+
-+ if (ia->ia_valid & ATTR_SIZE)
-+ size = ia->ia_size;
-+ else
-+ size = i_size_read(inode);
-+ /* copyup to next available branch */
-+ for (bindex = bstart - 1; bindex >= 0; bindex--) {
-+ err = copyup_dentry(parent->d_inode,
-+ dentry, bstart, bindex,
-+ dentry->d_name.name,
-+ dentry->d_name.len,
-+ NULL, size);
-+ if (!err)
-+ break;
-+ }
-+ if (err)
-+ goto out;
-+ /* get updated lower_dentry/inode after copyup */
-+ lower_dentry = unionfs_lower_dentry(dentry);
-+ lower_inode = unionfs_lower_inode(inode);
-+ }
-+
-+ /*
-+ * If shrinking, first truncate upper level to cancel writing dirty
-+ * pages beyond the new eof; and also if its' maxbytes is more
-+ * limiting (fail with -EFBIG before making any change to the lower
-+ * level). There is no need to vmtruncate the upper level
-+ * afterwards in the other cases: we fsstack_copy_inode_size from
-+ * the lower level.
-+ */
-+ if (ia->ia_valid & ATTR_SIZE) {
-+ size = i_size_read(inode);
-+ if (ia->ia_size < size || (ia->ia_size > size &&
-+ inode->i_sb->s_maxbytes < lower_inode->i_sb->s_maxbytes)) {
-+ err = vmtruncate(inode, ia->ia_size);
-+ if (err)
-+ goto out;
-+ }
-+ }
-+
-+ /* notify the (possibly copied-up) lower inode */
-+ /*
-+ * Note: we use lower_dentry->d_inode, because lower_inode may be
-+ * unlinked (no inode->i_sb and i_ino==0. This happens if someone
-+ * tries to open(), unlink(), then ftruncate() a file.
-+ */
-+ mutex_lock(&lower_dentry->d_inode->i_mutex);
-+ err = notify_change(lower_dentry, ia);
-+ mutex_unlock(&lower_dentry->d_inode->i_mutex);
-+ if (err)
-+ goto out;
-+
-+ /* get attributes from the first lower inode */
-+ if (ibstart(inode) >= 0)
-+ unionfs_copy_attr_all(inode, lower_inode);
-+ /*
-+ * unionfs_copy_attr_all will copy the lower times to our inode if
-+ * the lower ones are newer (useful for cache coherency). However,
-+ * ->setattr is the only place in which we may have to copy the
-+ * lower inode times absolutely, to support utimes(2).
-+ */
-+ if (ia->ia_valid & ATTR_MTIME_SET)
-+ inode->i_mtime = lower_inode->i_mtime;
-+ if (ia->ia_valid & ATTR_CTIME)
-+ inode->i_ctime = lower_inode->i_ctime;
-+ if (ia->ia_valid & ATTR_ATIME_SET)
-+ inode->i_atime = lower_inode->i_atime;
-+ fsstack_copy_inode_size(inode, lower_inode);
-+
-+out:
-+ if (!err)
-+ unionfs_check_dentry(dentry);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+
-+ return err;
-+}
-+
-+struct inode_operations unionfs_symlink_iops = {
-+ .readlink = unionfs_readlink,
-+ .permission = unionfs_permission,
-+ .follow_link = unionfs_follow_link,
-+ .setattr = unionfs_setattr,
-+ .put_link = unionfs_put_link,
-+};
-+
-+struct inode_operations unionfs_dir_iops = {
-+ .create = unionfs_create,
-+ .lookup = unionfs_lookup,
-+ .link = unionfs_link,
-+ .unlink = unionfs_unlink,
-+ .symlink = unionfs_symlink,
-+ .mkdir = unionfs_mkdir,
-+ .rmdir = unionfs_rmdir,
-+ .mknod = unionfs_mknod,
-+ .rename = unionfs_rename,
-+ .permission = unionfs_permission,
-+ .setattr = unionfs_setattr,
-+#ifdef CONFIG_UNION_FS_XATTR
-+ .setxattr = unionfs_setxattr,
-+ .getxattr = unionfs_getxattr,
-+ .removexattr = unionfs_removexattr,
-+ .listxattr = unionfs_listxattr,
-+#endif /* CONFIG_UNION_FS_XATTR */
-+};
-+
-+struct inode_operations unionfs_main_iops = {
-+ .permission = unionfs_permission,
-+ .setattr = unionfs_setattr,
-+#ifdef CONFIG_UNION_FS_XATTR
-+ .setxattr = unionfs_setxattr,
-+ .getxattr = unionfs_getxattr,
-+ .removexattr = unionfs_removexattr,
-+ .listxattr = unionfs_listxattr,
-+#endif /* CONFIG_UNION_FS_XATTR */
-+};
-diff --git a/fs/unionfs/lookup.c b/fs/unionfs/lookup.c
-new file mode 100644
-index 0000000..b63c17e
---- /dev/null
-+++ b/fs/unionfs/lookup.c
-@@ -0,0 +1,569 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * Lookup one path component @name relative to a <base,mnt> path pair.
-+ * Behaves nearly the same as lookup_one_len (i.e., return negative dentry
-+ * on ENOENT), but uses the @mnt passed, so it can cross bind mounts and
-+ * other lower mounts properly. If @new_mnt is non-null, will fill in the
-+ * new mnt there. Caller is responsible to dput/mntput/path_put returned
-+ * @dentry and @new_mnt.
-+ */
-+struct dentry *__lookup_one(struct dentry *base, struct vfsmount *mnt,
-+ const char *name, struct vfsmount **new_mnt)
-+{
-+ struct dentry *dentry = NULL;
-+ struct nameidata lower_nd;
-+ int err;
-+
-+ /* we use flags=0 to get basic lookup */
-+ err = vfs_path_lookup(base, mnt, name, 0, &lower_nd);
-+
-+ switch (err) {
-+ case 0: /* no error */
-+ dentry = lower_nd.path.dentry;
-+ if (new_mnt)
-+ *new_mnt = lower_nd.path.mnt; /* rc already inc'ed */
-+ break;
-+ case -ENOENT:
-+ /*
-+ * We don't consider ENOENT an error, and we want to return
-+ * a negative dentry (ala lookup_one_len). As we know
-+ * there was no inode for this name before (-ENOENT), then
-+ * it's safe to call lookup_one_len (which doesn't take a
-+ * vfsmount).
-+ */
-+ dentry = lookup_lck_len(name, base, strlen(name));
-+ if (new_mnt)
-+ *new_mnt = mntget(lower_nd.path.mnt);
-+ break;
-+ default: /* all other real errors */
-+ dentry = ERR_PTR(err);
-+ break;
-+ }
-+
-+ return dentry;
-+}
-+
-+/*
-+ * This is a utility function that fills in a unionfs dentry.
-+ * Caller must lock this dentry with unionfs_lock_dentry.
-+ *
-+ * Returns: 0 (ok), or -ERRNO if an error occurred.
-+ * XXX: get rid of _partial_lookup and make callers call _lookup_full directly
-+ */
-+int unionfs_partial_lookup(struct dentry *dentry, struct dentry *parent)
-+{
-+ struct dentry *tmp;
-+ int err = -ENOSYS;
-+
-+ tmp = unionfs_lookup_full(dentry, parent, INTERPOSE_PARTIAL);
-+
-+ if (!tmp) {
-+ err = 0;
-+ goto out;
-+ }
-+ if (IS_ERR(tmp)) {
-+ err = PTR_ERR(tmp);
-+ goto out;
-+ }
-+ /* XXX: need to change the interface */
-+ BUG_ON(tmp != dentry);
-+out:
-+ return err;
-+}
-+
-+/* The dentry cache is just so we have properly sized dentries. */
-+static struct kmem_cache *unionfs_dentry_cachep;
-+int unionfs_init_dentry_cache(void)
-+{
-+ unionfs_dentry_cachep =
-+ kmem_cache_create("unionfs_dentry",
-+ sizeof(struct unionfs_dentry_info),
-+ 0, SLAB_RECLAIM_ACCOUNT, NULL);
-+
-+ return (unionfs_dentry_cachep ? 0 : -ENOMEM);
-+}
-+
-+void unionfs_destroy_dentry_cache(void)
-+{
-+ if (unionfs_dentry_cachep)
-+ kmem_cache_destroy(unionfs_dentry_cachep);
-+}
-+
-+void free_dentry_private_data(struct dentry *dentry)
-+{
-+ if (!dentry || !dentry->d_fsdata)
-+ return;
-+ kfree(UNIONFS_D(dentry)->lower_paths);
-+ UNIONFS_D(dentry)->lower_paths = NULL;
-+ kmem_cache_free(unionfs_dentry_cachep, dentry->d_fsdata);
-+ dentry->d_fsdata = NULL;
-+}
-+
-+static inline int __realloc_dentry_private_data(struct dentry *dentry)
-+{
-+ struct unionfs_dentry_info *info = UNIONFS_D(dentry);
-+ void *p;
-+ int size;
-+
-+ BUG_ON(!info);
-+
-+ size = sizeof(struct path) * sbmax(dentry->d_sb);
-+ p = krealloc(info->lower_paths, size, GFP_ATOMIC);
-+ if (unlikely(!p))
-+ return -ENOMEM;
-+
-+ info->lower_paths = p;
-+
-+ info->bstart = -1;
-+ info->bend = -1;
-+ info->bopaque = -1;
-+ info->bcount = sbmax(dentry->d_sb);
-+ atomic_set(&info->generation,
-+ atomic_read(&UNIONFS_SB(dentry->d_sb)->generation));
-+
-+ memset(info->lower_paths, 0, size);
-+
-+ return 0;
-+}
-+
-+/* UNIONFS_D(dentry)->lock must be locked */
-+int realloc_dentry_private_data(struct dentry *dentry)
-+{
-+ if (!__realloc_dentry_private_data(dentry))
-+ return 0;
-+
-+ kfree(UNIONFS_D(dentry)->lower_paths);
-+ free_dentry_private_data(dentry);
-+ return -ENOMEM;
-+}
-+
-+/* allocate new dentry private data */
-+int new_dentry_private_data(struct dentry *dentry, int subclass)
-+{
-+ struct unionfs_dentry_info *info = UNIONFS_D(dentry);
-+
-+ BUG_ON(info);
-+
-+ info = kmem_cache_alloc(unionfs_dentry_cachep, GFP_ATOMIC);
-+ if (unlikely(!info))
-+ return -ENOMEM;
-+
-+ mutex_init(&info->lock);
-+ mutex_lock_nested(&info->lock, subclass);
-+
-+ info->lower_paths = NULL;
-+
-+ dentry->d_fsdata = info;
-+
-+ if (!__realloc_dentry_private_data(dentry))
-+ return 0;
-+
-+ mutex_unlock(&info->lock);
-+ free_dentry_private_data(dentry);
-+ return -ENOMEM;
-+}
-+
-+/*
-+ * scan through the lower dentry objects, and set bstart to reflect the
-+ * starting branch
-+ */
-+void update_bstart(struct dentry *dentry)
-+{
-+ int bindex;
-+ int bstart = dbstart(dentry);
-+ int bend = dbend(dentry);
-+ struct dentry *lower_dentry;
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry)
-+ continue;
-+ if (lower_dentry->d_inode) {
-+ dbstart(dentry) = bindex;
-+ break;
-+ }
-+ dput(lower_dentry);
-+ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
-+ }
-+}
-+
-+
-+/*
-+ * Initialize a nameidata structure (the intent part) we can pass to a lower
-+ * file system. Returns 0 on success or -error (only -ENOMEM possible).
-+ * Inside that nd structure, this function may also return an allocated
-+ * struct file (for open intents). The caller, when done with this nd, must
-+ * kfree the intent file (using release_lower_nd).
-+ *
-+ * XXX: this code, and the callers of this code, should be redone using
-+ * vfs_path_lookup() when (1) the nameidata structure is refactored into a
-+ * separate intent-structure, and (2) open_namei() is broken into a VFS-only
-+ * function and a method that other file systems can call.
-+ */
-+int init_lower_nd(struct nameidata *nd, unsigned int flags)
-+{
-+ int err = 0;
-+#ifdef ALLOC_LOWER_ND_FILE
-+ /*
-+ * XXX: one day we may need to have the lower return an open file
-+ * for us. It is not needed in 2.6.23-rc1 for nfs2/nfs3, but may
-+ * very well be needed for nfs4.
-+ */
-+ struct file *file;
-+#endif /* ALLOC_LOWER_ND_FILE */
-+
-+ memset(nd, 0, sizeof(struct nameidata));
-+ if (!flags)
-+ return err;
-+
-+ switch (flags) {
-+ case LOOKUP_CREATE:
-+ nd->intent.open.flags |= O_CREAT;
-+ /* fall through: shared code for create/open cases */
-+ case LOOKUP_OPEN:
-+ nd->flags = flags;
-+ nd->intent.open.flags |= (FMODE_READ | FMODE_WRITE);
-+#ifdef ALLOC_LOWER_ND_FILE
-+ file = kzalloc(sizeof(struct file), GFP_KERNEL);
-+ if (unlikely(!file)) {
-+ err = -ENOMEM;
-+ break; /* exit switch statement and thus return */
-+ }
-+ nd->intent.open.file = file;
-+#endif /* ALLOC_LOWER_ND_FILE */
-+ break;
-+ default:
-+ /*
-+ * We should never get here, for now.
-+ * We can add new cases here later on.
-+ */
-+ pr_debug("unionfs: unknown nameidata flag 0x%x\n", flags);
-+ BUG();
-+ break;
-+ }
-+
-+ return err;
-+}
-+
-+void release_lower_nd(struct nameidata *nd, int err)
-+{
-+ if (!nd->intent.open.file)
-+ return;
-+ else if (!err)
-+ release_open_intent(nd);
-+#ifdef ALLOC_LOWER_ND_FILE
-+ kfree(nd->intent.open.file);
-+#endif /* ALLOC_LOWER_ND_FILE */
-+}
-+
-+/*
-+ * Main (and complex) driver function for Unionfs's lookup
-+ *
-+ * Returns: NULL (ok), ERR_PTR if an error occurred, or a non-null non-error
-+ * PTR if d_splice returned a different dentry.
-+ *
-+ * If lookupmode is INTERPOSE_PARTIAL/REVAL/REVAL_NEG, the passed dentry's
-+ * inode info must be locked. If lookupmode is INTERPOSE_LOOKUP (i.e., a
-+ * newly looked-up dentry), then unionfs_lookup_backend will return a locked
-+ * dentry's info, which the caller must unlock.
-+ */
-+struct dentry *unionfs_lookup_full(struct dentry *dentry,
-+ struct dentry *parent, int lookupmode)
-+{
-+ int err = 0;
-+ struct dentry *lower_dentry = NULL;
-+ struct vfsmount *lower_mnt;
-+ struct vfsmount *lower_dir_mnt;
-+ struct dentry *wh_lower_dentry = NULL;
-+ struct dentry *lower_dir_dentry = NULL;
-+ struct dentry *d_interposed = NULL;
-+ int bindex, bstart, bend, bopaque;
-+ int opaque, num_positive = 0;
-+ const char *name;
-+ int namelen;
-+ int pos_start, pos_end;
-+
-+ /*
-+ * We should already have a lock on this dentry in the case of a
-+ * partial lookup, or a revalidation. Otherwise it is returned from
-+ * new_dentry_private_data already locked.
-+ */
-+ verify_locked(dentry);
-+ verify_locked(parent);
-+
-+ /* must initialize dentry operations */
-+ dentry->d_op = &unionfs_dops;
-+
-+ /* We never partial lookup the root directory. */
-+ if (IS_ROOT(dentry))
-+ goto out;
-+
-+ name = dentry->d_name.name;
-+ namelen = dentry->d_name.len;
-+
-+ /* No dentries should get created for possible whiteout names. */
-+ if (!is_validname(name)) {
-+ err = -EPERM;
-+ goto out_free;
-+ }
-+
-+ /* Now start the actual lookup procedure. */
-+ bstart = dbstart(parent);
-+ bend = dbend(parent);
-+ bopaque = dbopaque(parent);
-+ BUG_ON(bstart < 0);
-+
-+ /* adjust bend to bopaque if needed */
-+ if ((bopaque >= 0) && (bopaque < bend))
-+ bend = bopaque;
-+
-+ /* lookup all possible dentries */
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ lower_mnt = unionfs_lower_mnt_idx(dentry, bindex);
-+
-+ /* skip if we already have a positive lower dentry */
-+ if (lower_dentry) {
-+ if (dbstart(dentry) < 0)
-+ dbstart(dentry) = bindex;
-+ if (bindex > dbend(dentry))
-+ dbend(dentry) = bindex;
-+ if (lower_dentry->d_inode)
-+ num_positive++;
-+ continue;
-+ }
-+
-+ lower_dir_dentry =
-+ unionfs_lower_dentry_idx(parent, bindex);
-+ /* if the lower dentry's parent does not exist, skip this */
-+ if (!lower_dir_dentry || !lower_dir_dentry->d_inode)
-+ continue;
-+
-+ /* also skip it if the parent isn't a directory. */
-+ if (!S_ISDIR(lower_dir_dentry->d_inode->i_mode))
-+ continue; /* XXX: should be BUG_ON */
-+
-+ /* check for whiteouts: stop lookup if found */
-+ wh_lower_dentry = lookup_whiteout(name, lower_dir_dentry);
-+ if (IS_ERR(wh_lower_dentry)) {
-+ err = PTR_ERR(wh_lower_dentry);
-+ goto out_free;
-+ }
-+ if (wh_lower_dentry->d_inode) {
-+ dbend(dentry) = dbopaque(dentry) = bindex;
-+ if (dbstart(dentry) < 0)
-+ dbstart(dentry) = bindex;
-+ dput(wh_lower_dentry);
-+ break;
-+ }
-+ dput(wh_lower_dentry);
-+
-+ /* Now do regular lookup; lookup @name */
-+ lower_dir_mnt = unionfs_lower_mnt_idx(parent, bindex);
-+ lower_mnt = NULL; /* XXX: needed? */
-+
-+ lower_dentry = __lookup_one(lower_dir_dentry, lower_dir_mnt,
-+ name, &lower_mnt);
-+
-+ if (IS_ERR(lower_dentry)) {
-+ err = PTR_ERR(lower_dentry);
-+ goto out_free;
-+ }
-+ unionfs_set_lower_dentry_idx(dentry, bindex, lower_dentry);
-+ if (!lower_mnt)
-+ lower_mnt = unionfs_mntget(dentry->d_sb->s_root,
-+ bindex);
-+ unionfs_set_lower_mnt_idx(dentry, bindex, lower_mnt);
-+
-+ /* adjust dbstart/end */
-+ if (dbstart(dentry) < 0)
-+ dbstart(dentry) = bindex;
-+ if (bindex > dbend(dentry))
-+ dbend(dentry) = bindex;
-+ /*
-+ * We always store the lower dentries above, and update
-+ * dbstart/dbend, even if the whole unionfs dentry is
-+ * negative (i.e., no lower inodes).
-+ */
-+ if (!lower_dentry->d_inode)
-+ continue;
-+ num_positive++;
-+
-+ /*
-+ * check if we just found an opaque directory, if so, stop
-+ * lookups here.
-+ */
-+ if (!S_ISDIR(lower_dentry->d_inode->i_mode))
-+ continue;
-+ opaque = is_opaque_dir(dentry, bindex);
-+ if (opaque < 0) {
-+ err = opaque;
-+ goto out_free;
-+ } else if (opaque) {
-+ dbend(dentry) = dbopaque(dentry) = bindex;
-+ break;
-+ }
-+ dbend(dentry) = bindex;
-+
-+ /* update parent directory's atime with the bindex */
-+ fsstack_copy_attr_atime(parent->d_inode,
-+ lower_dir_dentry->d_inode);
-+ }
-+
-+ /* sanity checks, then decide if to process a negative dentry */
-+ BUG_ON(dbstart(dentry) < 0 && dbend(dentry) >= 0);
-+ BUG_ON(dbstart(dentry) >= 0 && dbend(dentry) < 0);
-+
-+ if (num_positive > 0)
-+ goto out_positive;
-+
-+ /*** handle NEGATIVE dentries ***/
-+
-+ /*
-+ * If negative, keep only first lower negative dentry, to save on
-+ * memory.
-+ */
-+ if (dbstart(dentry) < dbend(dentry)) {
-+ path_put_lowers(dentry, dbstart(dentry) + 1,
-+ dbend(dentry), false);
-+ dbend(dentry) = dbstart(dentry);
-+ }
-+ if (lookupmode == INTERPOSE_PARTIAL)
-+ goto out;
-+ if (lookupmode == INTERPOSE_LOOKUP) {
-+ /*
-+ * If all we found was a whiteout in the first available
-+ * branch, then create a negative dentry for a possibly new
-+ * file to be created.
-+ */
-+ if (dbopaque(dentry) < 0)
-+ goto out;
-+ /* XXX: need to get mnt here */
-+ bindex = dbstart(dentry);
-+ if (unionfs_lower_dentry_idx(dentry, bindex))
-+ goto out;
-+ lower_dir_dentry =
-+ unionfs_lower_dentry_idx(parent, bindex);
-+ if (!lower_dir_dentry || !lower_dir_dentry->d_inode)
-+ goto out;
-+ if (!S_ISDIR(lower_dir_dentry->d_inode->i_mode))
-+ goto out; /* XXX: should be BUG_ON */
-+ /* XXX: do we need to cross bind mounts here? */
-+ lower_dentry = lookup_lck_len(name, lower_dir_dentry, namelen);
-+ if (IS_ERR(lower_dentry)) {
-+ err = PTR_ERR(lower_dentry);
-+ goto out;
-+ }
-+ /* XXX: need to mntget/mntput as needed too! */
-+ unionfs_set_lower_dentry_idx(dentry, bindex, lower_dentry);
-+ /* XXX: wrong mnt for crossing bind mounts! */
-+ lower_mnt = unionfs_mntget(dentry->d_sb->s_root, bindex);
-+ unionfs_set_lower_mnt_idx(dentry, bindex, lower_mnt);
-+
-+ goto out;
-+ }
-+
-+ /* if we're revalidating a positive dentry, don't make it negative */
-+ if (lookupmode != INTERPOSE_REVAL)
-+ d_add(dentry, NULL);
-+
-+ goto out;
-+
-+out_positive:
-+ /*** handle POSITIVE dentries ***/
-+
-+ /*
-+ * This unionfs dentry is positive (at least one lower inode
-+ * exists), so scan entire dentry from beginning to end, and remove
-+ * any negative lower dentries, if any. Then, update dbstart/dbend
-+ * to reflect the start/end of positive dentries.
-+ */
-+ pos_start = pos_end = -1;
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry,
-+ bindex);
-+ if (lower_dentry && lower_dentry->d_inode) {
-+ if (pos_start < 0)
-+ pos_start = bindex;
-+ if (bindex > pos_end)
-+ pos_end = bindex;
-+ continue;
-+ }
-+ path_put_lowers(dentry, bindex, bindex, false);
-+ }
-+ if (pos_start >= 0)
-+ dbstart(dentry) = pos_start;
-+ if (pos_end >= 0)
-+ dbend(dentry) = pos_end;
-+
-+ /* Partial lookups need to re-interpose, or throw away older negs. */
-+ if (lookupmode == INTERPOSE_PARTIAL) {
-+ if (dentry->d_inode) {
-+ unionfs_reinterpose(dentry);
-+ goto out;
-+ }
-+
-+ /*
-+ * This dentry was positive, so it is as if we had a
-+ * negative revalidation.
-+ */
-+ lookupmode = INTERPOSE_REVAL_NEG;
-+ update_bstart(dentry);
-+ }
-+
-+ /*
-+ * Interpose can return a dentry if d_splice returned a different
-+ * dentry.
-+ */
-+ d_interposed = unionfs_interpose(dentry, dentry->d_sb, lookupmode);
-+ if (IS_ERR(d_interposed))
-+ err = PTR_ERR(d_interposed);
-+ else if (d_interposed)
-+ dentry = d_interposed;
-+
-+ if (!err)
-+ goto out;
-+ d_drop(dentry);
-+
-+out_free:
-+ /* should dput/mntput all the underlying dentries on error condition */
-+ if (dbstart(dentry) >= 0)
-+ path_put_lowers_all(dentry, false);
-+ /* free lower_paths unconditionally */
-+ kfree(UNIONFS_D(dentry)->lower_paths);
-+ UNIONFS_D(dentry)->lower_paths = NULL;
-+
-+out:
-+ if (dentry && UNIONFS_D(dentry)) {
-+ BUG_ON(dbstart(dentry) < 0 && dbend(dentry) >= 0);
-+ BUG_ON(dbstart(dentry) >= 0 && dbend(dentry) < 0);
-+ }
-+ if (d_interposed && UNIONFS_D(d_interposed)) {
-+ BUG_ON(dbstart(d_interposed) < 0 && dbend(d_interposed) >= 0);
-+ BUG_ON(dbstart(d_interposed) >= 0 && dbend(d_interposed) < 0);
-+ }
-+
-+ if (!err && d_interposed)
-+ return d_interposed;
-+ return ERR_PTR(err);
-+}
-diff --git a/fs/unionfs/main.c b/fs/unionfs/main.c
-new file mode 100644
-index 0000000..258386e
---- /dev/null
-+++ b/fs/unionfs/main.c
-@@ -0,0 +1,758 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+
-+static void unionfs_fill_inode(struct dentry *dentry,
-+ struct inode *inode)
-+{
-+ struct inode *lower_inode;
-+ struct dentry *lower_dentry;
-+ int bindex, bstart, bend;
-+
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry) {
-+ unionfs_set_lower_inode_idx(inode, bindex, NULL);
-+ continue;
-+ }
-+
-+ /* Initialize the lower inode to the new lower inode. */
-+ if (!lower_dentry->d_inode)
-+ continue;
-+
-+ unionfs_set_lower_inode_idx(inode, bindex,
-+ igrab(lower_dentry->d_inode));
-+ }
-+
-+ ibstart(inode) = dbstart(dentry);
-+ ibend(inode) = dbend(dentry);
-+
-+ /* Use attributes from the first branch. */
-+ lower_inode = unionfs_lower_inode(inode);
-+
-+ /* Use different set of inode ops for symlinks & directories */
-+ if (S_ISLNK(lower_inode->i_mode))
-+ inode->i_op = &unionfs_symlink_iops;
-+ else if (S_ISDIR(lower_inode->i_mode))
-+ inode->i_op = &unionfs_dir_iops;
-+
-+ /* Use different set of file ops for directories */
-+ if (S_ISDIR(lower_inode->i_mode))
-+ inode->i_fop = &unionfs_dir_fops;
-+
-+ /* properly initialize special inodes */
-+ if (S_ISBLK(lower_inode->i_mode) || S_ISCHR(lower_inode->i_mode) ||
-+ S_ISFIFO(lower_inode->i_mode) || S_ISSOCK(lower_inode->i_mode))
-+ init_special_inode(inode, lower_inode->i_mode,
-+ lower_inode->i_rdev);
-+
-+ /* all well, copy inode attributes */
-+ unionfs_copy_attr_all(inode, lower_inode);
-+ fsstack_copy_inode_size(inode, lower_inode);
-+}
-+
-+/*
-+ * Connect a unionfs inode dentry/inode with several lower ones. This is
-+ * the classic stackable file system "vnode interposition" action.
-+ *
-+ * @sb: unionfs's super_block
-+ */
-+struct dentry *unionfs_interpose(struct dentry *dentry, struct super_block *sb,
-+ int flag)
-+{
-+ int err = 0;
-+ struct inode *inode;
-+ int need_fill_inode = 1;
-+ struct dentry *spliced = NULL;
-+
-+ verify_locked(dentry);
-+
-+ /*
-+ * We allocate our new inode below by calling unionfs_iget,
-+ * which will initialize some of the new inode's fields
-+ */
-+
-+ /*
-+ * On revalidate we've already got our own inode and just need
-+ * to fix it up.
-+ */
-+ if (flag == INTERPOSE_REVAL) {
-+ inode = dentry->d_inode;
-+ UNIONFS_I(inode)->bstart = -1;
-+ UNIONFS_I(inode)->bend = -1;
-+ atomic_set(&UNIONFS_I(inode)->generation,
-+ atomic_read(&UNIONFS_SB(sb)->generation));
-+
-+ UNIONFS_I(inode)->lower_inodes =
-+ kcalloc(sbmax(sb), sizeof(struct inode *), GFP_KERNEL);
-+ if (unlikely(!UNIONFS_I(inode)->lower_inodes)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+ } else {
-+ /* get unique inode number for unionfs */
-+ inode = unionfs_iget(sb, iunique(sb, UNIONFS_ROOT_INO));
-+ if (IS_ERR(inode)) {
-+ err = PTR_ERR(inode);
-+ goto out;
-+ }
-+ if (atomic_read(&inode->i_count) > 1)
-+ goto skip;
-+ }
-+
-+ need_fill_inode = 0;
-+ unionfs_fill_inode(dentry, inode);
-+
-+skip:
-+ /* only (our) lookup wants to do a d_add */
-+ switch (flag) {
-+ case INTERPOSE_DEFAULT:
-+ /* for operations which create new inodes */
-+ d_add(dentry, inode);
-+ break;
-+ case INTERPOSE_REVAL_NEG:
-+ d_instantiate(dentry, inode);
-+ break;
-+ case INTERPOSE_LOOKUP:
-+ spliced = d_splice_alias(inode, dentry);
-+ if (spliced && spliced != dentry) {
-+ /*
-+ * d_splice can return a dentry if it was
-+ * disconnected and had to be moved. We must ensure
-+ * that the private data of the new dentry is
-+ * correct and that the inode info was filled
-+ * properly. Finally we must return this new
-+ * dentry.
-+ */
-+ spliced->d_op = &unionfs_dops;
-+ spliced->d_fsdata = dentry->d_fsdata;
-+ dentry->d_fsdata = NULL;
-+ dentry = spliced;
-+ if (need_fill_inode) {
-+ need_fill_inode = 0;
-+ unionfs_fill_inode(dentry, inode);
-+ }
-+ goto out_spliced;
-+ } else if (!spliced) {
-+ if (need_fill_inode) {
-+ need_fill_inode = 0;
-+ unionfs_fill_inode(dentry, inode);
-+ goto out_spliced;
-+ }
-+ }
-+ break;
-+ case INTERPOSE_REVAL:
-+ /* Do nothing. */
-+ break;
-+ default:
-+ printk(KERN_CRIT "unionfs: invalid interpose flag passed!\n");
-+ BUG();
-+ }
-+ goto out;
-+
-+out_spliced:
-+ if (!err)
-+ return spliced;
-+out:
-+ return ERR_PTR(err);
-+}
-+
-+/* like interpose above, but for an already existing dentry */
-+void unionfs_reinterpose(struct dentry *dentry)
-+{
-+ struct dentry *lower_dentry;
-+ struct inode *inode;
-+ int bindex, bstart, bend;
-+
-+ verify_locked(dentry);
-+
-+ /* This is pre-allocated inode */
-+ inode = dentry->d_inode;
-+
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry)
-+ continue;
-+
-+ if (!lower_dentry->d_inode)
-+ continue;
-+ if (unionfs_lower_inode_idx(inode, bindex))
-+ continue;
-+ unionfs_set_lower_inode_idx(inode, bindex,
-+ igrab(lower_dentry->d_inode));
-+ }
-+ ibstart(inode) = dbstart(dentry);
-+ ibend(inode) = dbend(dentry);
-+}
-+
-+/*
-+ * make sure the branch we just looked up (nd) makes sense:
-+ *
-+ * 1) we're not trying to stack unionfs on top of unionfs
-+ * 2) it exists
-+ * 3) is a directory
-+ */
-+int check_branch(struct nameidata *nd)
-+{
-+ /* XXX: remove in ODF code -- stacking unions allowed there */
-+ if (!strcmp(nd->path.dentry->d_sb->s_type->name, UNIONFS_NAME))
-+ return -EINVAL;
-+ if (!nd->path.dentry->d_inode)
-+ return -ENOENT;
-+ if (!S_ISDIR(nd->path.dentry->d_inode->i_mode))
-+ return -ENOTDIR;
-+ return 0;
-+}
-+
-+/* checks if two lower_dentries have overlapping branches */
-+static int is_branch_overlap(struct dentry *dent1, struct dentry *dent2)
-+{
-+ struct dentry *dent = NULL;
-+
-+ dent = dent1;
-+ while ((dent != dent2) && (dent->d_parent != dent))
-+ dent = dent->d_parent;
-+
-+ if (dent == dent2)
-+ return 1;
-+
-+ dent = dent2;
-+ while ((dent != dent1) && (dent->d_parent != dent))
-+ dent = dent->d_parent;
-+
-+ return (dent == dent1);
-+}
-+
-+/*
-+ * Parse "ro" or "rw" options, but default to "rw" if no mode options was
-+ * specified. Fill the mode bits in @perms. If encounter an unknown
-+ * string, return -EINVAL. Otherwise return 0.
-+ */
-+int parse_branch_mode(const char *name, int *perms)
-+{
-+ if (!name || !strcmp(name, "rw")) {
-+ *perms = MAY_READ | MAY_WRITE;
-+ return 0;
-+ }
-+ if (!strcmp(name, "ro")) {
-+ *perms = MAY_READ;
-+ return 0;
-+ }
-+ return -EINVAL;
-+}
-+
-+/*
-+ * parse the dirs= mount argument
-+ *
-+ * We don't need to lock the superblock private data's rwsem, as we get
-+ * called only by unionfs_read_super - it is still a long time before anyone
-+ * can even get a reference to us.
-+ */
-+static int parse_dirs_option(struct super_block *sb, struct unionfs_dentry_info
-+ *lower_root_info, char *options)
-+{
-+ struct nameidata nd;
-+ char *name;
-+ int err = 0;
-+ int branches = 1;
-+ int bindex = 0;
-+ int i = 0;
-+ int j = 0;
-+ struct dentry *dent1;
-+ struct dentry *dent2;
-+
-+ if (options[0] == '\0') {
-+ printk(KERN_ERR "unionfs: no branches specified\n");
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Each colon means we have a separator, this is really just a rough
-+ * guess, since strsep will handle empty fields for us.
-+ */
-+ for (i = 0; options[i]; i++)
-+ if (options[i] == ':')
-+ branches++;
-+
-+ /* allocate space for underlying pointers to lower dentry */
-+ UNIONFS_SB(sb)->data =
-+ kcalloc(branches, sizeof(struct unionfs_data), GFP_KERNEL);
-+ if (unlikely(!UNIONFS_SB(sb)->data)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ lower_root_info->lower_paths =
-+ kcalloc(branches, sizeof(struct path), GFP_KERNEL);
-+ if (unlikely(!lower_root_info->lower_paths)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ /* now parsing a string such as "b1:b2=rw:b3=ro:b4" */
-+ branches = 0;
-+ while ((name = strsep(&options, ":")) != NULL) {
-+ int perms;
-+ char *mode = strchr(name, '=');
-+
-+ if (!name)
-+ continue;
-+ if (!*name) { /* bad use of ':' (extra colons) */
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ branches++;
-+
-+ /* strip off '=' if any */
-+ if (mode)
-+ *mode++ = '\0';
-+
-+ err = parse_branch_mode(mode, &perms);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: invalid mode \"%s\" for "
-+ "branch %d\n", mode, bindex);
-+ goto out;
-+ }
-+ /* ensure that leftmost branch is writeable */
-+ if (!bindex && !(perms & MAY_WRITE)) {
-+ printk(KERN_ERR "unionfs: leftmost branch cannot be "
-+ "read-only (use \"-o ro\" to create a "
-+ "read-only union)\n");
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ err = path_lookup(name, LOOKUP_FOLLOW, &nd);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: error accessing "
-+ "lower directory '%s' (error %d)\n",
-+ name, err);
-+ goto out;
-+ }
-+
-+ err = check_branch(&nd);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: lower directory "
-+ "'%s' is not a valid branch\n", name);
-+ path_put(&nd.path);
-+ goto out;
-+ }
-+
-+ lower_root_info->lower_paths[bindex].dentry = nd.path.dentry;
-+ lower_root_info->lower_paths[bindex].mnt = nd.path.mnt;
-+
-+ set_branchperms(sb, bindex, perms);
-+ set_branch_count(sb, bindex, 0);
-+ new_branch_id(sb, bindex);
-+
-+ if (lower_root_info->bstart < 0)
-+ lower_root_info->bstart = bindex;
-+ lower_root_info->bend = bindex;
-+ bindex++;
-+ }
-+
-+ if (branches == 0) {
-+ printk(KERN_ERR "unionfs: no branches specified\n");
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ BUG_ON(branches != (lower_root_info->bend + 1));
-+
-+ /*
-+ * Ensure that no overlaps exist in the branches.
-+ *
-+ * This test is required because the Linux kernel has no support
-+ * currently for ensuring coherency between stackable layers and
-+ * branches. If we were to allow overlapping branches, it would be
-+ * possible, for example, to delete a file via one branch, which
-+ * would not be reflected in another branch. Such incoherency could
-+ * lead to inconsistencies and even kernel oopses. Rather than
-+ * implement hacks to work around some of these cache-coherency
-+ * problems, we prevent branch overlapping, for now. A complete
-+ * solution will involve proper kernel/VFS support for cache
-+ * coherency, at which time we could safely remove this
-+ * branch-overlapping test.
-+ */
-+ for (i = 0; i < branches; i++) {
-+ dent1 = lower_root_info->lower_paths[i].dentry;
-+ for (j = i + 1; j < branches; j++) {
-+ dent2 = lower_root_info->lower_paths[j].dentry;
-+ if (is_branch_overlap(dent1, dent2)) {
-+ printk(KERN_ERR "unionfs: branches %d and "
-+ "%d overlap\n", i, j);
-+ err = -EINVAL;
-+ goto out;
-+ }
-+ }
-+ }
-+
-+out:
-+ if (err) {
-+ for (i = 0; i < branches; i++)
-+ path_put(&lower_root_info->lower_paths[i]);
-+
-+ kfree(lower_root_info->lower_paths);
-+ kfree(UNIONFS_SB(sb)->data);
-+
-+ /*
-+ * MUST clear the pointers to prevent potential double free if
-+ * the caller dies later on
-+ */
-+ lower_root_info->lower_paths = NULL;
-+ UNIONFS_SB(sb)->data = NULL;
-+ }
-+ return err;
-+}
-+
-+/*
-+ * Parse mount options. See the manual page for usage instructions.
-+ *
-+ * Returns the dentry object of the lower-level (lower) directory;
-+ * We want to mount our stackable file system on top of that lower directory.
-+ */
-+static struct unionfs_dentry_info *unionfs_parse_options(
-+ struct super_block *sb,
-+ char *options)
-+{
-+ struct unionfs_dentry_info *lower_root_info;
-+ char *optname;
-+ int err = 0;
-+ int bindex;
-+ int dirsfound = 0;
-+
-+ /* allocate private data area */
-+ err = -ENOMEM;
-+ lower_root_info =
-+ kzalloc(sizeof(struct unionfs_dentry_info), GFP_KERNEL);
-+ if (unlikely(!lower_root_info))
-+ goto out_error;
-+ lower_root_info->bstart = -1;
-+ lower_root_info->bend = -1;
-+ lower_root_info->bopaque = -1;
-+
-+ while ((optname = strsep(&options, ",")) != NULL) {
-+ char *optarg;
-+
-+ if (!optname || !*optname)
-+ continue;
-+
-+ optarg = strchr(optname, '=');
-+ if (optarg)
-+ *optarg++ = '\0';
-+
-+ /*
-+ * All of our options take an argument now. Insert ones that
-+ * don't, above this check.
-+ */
-+ if (!optarg) {
-+ printk(KERN_ERR "unionfs: %s requires an argument\n",
-+ optname);
-+ err = -EINVAL;
-+ goto out_error;
-+ }
-+
-+ if (!strcmp("dirs", optname)) {
-+ if (++dirsfound > 1) {
-+ printk(KERN_ERR
-+ "unionfs: multiple dirs specified\n");
-+ err = -EINVAL;
-+ goto out_error;
-+ }
-+ err = parse_dirs_option(sb, lower_root_info, optarg);
-+ if (err)
-+ goto out_error;
-+ continue;
-+ }
-+
-+ err = -EINVAL;
-+ printk(KERN_ERR
-+ "unionfs: unrecognized option '%s'\n", optname);
-+ goto out_error;
-+ }
-+ if (dirsfound != 1) {
-+ printk(KERN_ERR "unionfs: dirs option required\n");
-+ err = -EINVAL;
-+ goto out_error;
-+ }
-+ goto out;
-+
-+out_error:
-+ if (lower_root_info && lower_root_info->lower_paths) {
-+ for (bindex = lower_root_info->bstart;
-+ bindex >= 0 && bindex <= lower_root_info->bend;
-+ bindex++)
-+ path_put(&lower_root_info->lower_paths[bindex]);
-+ }
-+
-+ kfree(lower_root_info->lower_paths);
-+ kfree(lower_root_info);
-+
-+ kfree(UNIONFS_SB(sb)->data);
-+ UNIONFS_SB(sb)->data = NULL;
-+
-+ lower_root_info = ERR_PTR(err);
-+out:
-+ return lower_root_info;
-+}
-+
-+/*
-+ * our custom d_alloc_root work-alike
-+ *
-+ * we can't use d_alloc_root if we want to use our own interpose function
-+ * unchanged, so we simply call our own "fake" d_alloc_root
-+ */
-+static struct dentry *unionfs_d_alloc_root(struct super_block *sb)
-+{
-+ struct dentry *ret = NULL;
-+
-+ if (sb) {
-+ static const struct qstr name = {
-+ .name = "/",
-+ .len = 1
-+ };
-+
-+ ret = d_alloc(NULL, &name);
-+ if (likely(ret)) {
-+ ret->d_op = &unionfs_dops;
-+ ret->d_sb = sb;
-+ ret->d_parent = ret;
-+ }
-+ }
-+ return ret;
-+}
-+
-+/*
-+ * There is no need to lock the unionfs_super_info's rwsem as there is no
-+ * way anyone can have a reference to the superblock at this point in time.
-+ */
-+static int unionfs_read_super(struct super_block *sb, void *raw_data,
-+ int silent)
-+{
-+ int err = 0;
-+ struct unionfs_dentry_info *lower_root_info = NULL;
-+ int bindex, bstart, bend;
-+
-+ if (!raw_data) {
-+ printk(KERN_ERR
-+ "unionfs: read_super: missing data argument\n");
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ /* Allocate superblock private data */
-+ sb->s_fs_info = kzalloc(sizeof(struct unionfs_sb_info), GFP_KERNEL);
-+ if (unlikely(!UNIONFS_SB(sb))) {
-+ printk(KERN_CRIT "unionfs: read_super: out of memory\n");
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ UNIONFS_SB(sb)->bend = -1;
-+ atomic_set(&UNIONFS_SB(sb)->generation, 1);
-+ init_rwsem(&UNIONFS_SB(sb)->rwsem);
-+ UNIONFS_SB(sb)->high_branch_id = -1; /* -1 == invalid branch ID */
-+
-+ lower_root_info = unionfs_parse_options(sb, raw_data);
-+ if (IS_ERR(lower_root_info)) {
-+ printk(KERN_ERR
-+ "unionfs: read_super: error while parsing options "
-+ "(err = %ld)\n", PTR_ERR(lower_root_info));
-+ err = PTR_ERR(lower_root_info);
-+ lower_root_info = NULL;
-+ goto out_free;
-+ }
-+ if (lower_root_info->bstart == -1) {
-+ err = -ENOENT;
-+ goto out_free;
-+ }
-+
-+ /* set the lower superblock field of upper superblock */
-+ bstart = lower_root_info->bstart;
-+ BUG_ON(bstart != 0);
-+ sbend(sb) = bend = lower_root_info->bend;
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ struct dentry *d = lower_root_info->lower_paths[bindex].dentry;
-+ atomic_inc(&d->d_sb->s_active);
-+ unionfs_set_lower_super_idx(sb, bindex, d->d_sb);
-+ }
-+
-+ /* max Bytes is the maximum bytes from highest priority branch */
-+ sb->s_maxbytes = unionfs_lower_super_idx(sb, 0)->s_maxbytes;
-+
-+ /*
-+ * Our c/m/atime granularity is 1 ns because we may stack on file
-+ * systems whose granularity is as good. This is important for our
-+ * time-based cache coherency.
-+ */
-+ sb->s_time_gran = 1;
-+
-+ sb->s_op = &unionfs_sops;
-+
-+ /* See comment next to the definition of unionfs_d_alloc_root */
-+ sb->s_root = unionfs_d_alloc_root(sb);
-+ if (unlikely(!sb->s_root)) {
-+ err = -ENOMEM;
-+ goto out_dput;
-+ }
-+
-+ /* link the upper and lower dentries */
-+ sb->s_root->d_fsdata = NULL;
-+ err = new_dentry_private_data(sb->s_root, UNIONFS_DMUTEX_ROOT);
-+ if (unlikely(err))
-+ goto out_freedpd;
-+
-+ /* Set the lower dentries for s_root */
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ struct dentry *d;
-+ struct vfsmount *m;
-+
-+ d = lower_root_info->lower_paths[bindex].dentry;
-+ m = lower_root_info->lower_paths[bindex].mnt;
-+
-+ unionfs_set_lower_dentry_idx(sb->s_root, bindex, d);
-+ unionfs_set_lower_mnt_idx(sb->s_root, bindex, m);
-+ }
-+ dbstart(sb->s_root) = bstart;
-+ dbend(sb->s_root) = bend;
-+
-+ /* Set the generation number to one, since this is for the mount. */
-+ atomic_set(&UNIONFS_D(sb->s_root)->generation, 1);
-+
-+ /*
-+ * Call interpose to create the upper level inode. Only
-+ * INTERPOSE_LOOKUP can return a value other than 0 on err.
-+ */
-+ err = PTR_ERR(unionfs_interpose(sb->s_root, sb, 0));
-+ unionfs_unlock_dentry(sb->s_root);
-+ if (!err)
-+ goto out;
-+ /* else fall through */
-+
-+out_freedpd:
-+ if (UNIONFS_D(sb->s_root)) {
-+ kfree(UNIONFS_D(sb->s_root)->lower_paths);
-+ free_dentry_private_data(sb->s_root);
-+ }
-+ dput(sb->s_root);
-+
-+out_dput:
-+ if (lower_root_info && !IS_ERR(lower_root_info)) {
-+ for (bindex = lower_root_info->bstart;
-+ bindex <= lower_root_info->bend; bindex++) {
-+ struct dentry *d;
-+ d = lower_root_info->lower_paths[bindex].dentry;
-+ /* drop refs we took earlier */
-+ atomic_dec(&d->d_sb->s_active);
-+ path_put(&lower_root_info->lower_paths[bindex]);
-+ }
-+ kfree(lower_root_info->lower_paths);
-+ kfree(lower_root_info);
-+ lower_root_info = NULL;
-+ }
-+
-+out_free:
-+ kfree(UNIONFS_SB(sb)->data);
-+ kfree(UNIONFS_SB(sb));
-+ sb->s_fs_info = NULL;
-+
-+out:
-+ if (lower_root_info && !IS_ERR(lower_root_info)) {
-+ kfree(lower_root_info->lower_paths);
-+ kfree(lower_root_info);
-+ }
-+ return err;
-+}
-+
-+static int unionfs_get_sb(struct file_system_type *fs_type,
-+ int flags, const char *dev_name,
-+ void *raw_data, struct vfsmount *mnt)
-+{
-+ int err;
-+ err = get_sb_nodev(fs_type, flags, raw_data, unionfs_read_super, mnt);
-+ if (!err)
-+ UNIONFS_SB(mnt->mnt_sb)->dev_name =
-+ kstrdup(dev_name, GFP_KERNEL);
-+ return err;
-+}
-+
-+static struct file_system_type unionfs_fs_type = {
-+ .owner = THIS_MODULE,
-+ .name = UNIONFS_NAME,
-+ .get_sb = unionfs_get_sb,
-+ .kill_sb = generic_shutdown_super,
-+ .fs_flags = FS_REVAL_DOT,
-+};
-+
-+static int __init init_unionfs_fs(void)
-+{
-+ int err;
-+
-+ pr_info("Registering unionfs " UNIONFS_VERSION "\n");
-+
-+ err = unionfs_init_filldir_cache();
-+ if (unlikely(err))
-+ goto out;
-+ err = unionfs_init_inode_cache();
-+ if (unlikely(err))
-+ goto out;
-+ err = unionfs_init_dentry_cache();
-+ if (unlikely(err))
-+ goto out;
-+ err = init_sioq();
-+ if (unlikely(err))
-+ goto out;
-+ err = register_filesystem(&unionfs_fs_type);
-+out:
-+ if (unlikely(err)) {
-+ stop_sioq();
-+ unionfs_destroy_filldir_cache();
-+ unionfs_destroy_inode_cache();
-+ unionfs_destroy_dentry_cache();
-+ }
-+ return err;
-+}
-+
-+static void __exit exit_unionfs_fs(void)
-+{
-+ stop_sioq();
-+ unionfs_destroy_filldir_cache();
-+ unionfs_destroy_inode_cache();
-+ unionfs_destroy_dentry_cache();
-+ unregister_filesystem(&unionfs_fs_type);
-+ pr_info("Completed unionfs module unload\n");
-+}
-+
-+MODULE_AUTHOR("Erez Zadok, Filesystems and Storage Lab, Stony Brook University"
-+ " (http://www.fsl.cs.sunysb.edu)");
-+MODULE_DESCRIPTION("Unionfs " UNIONFS_VERSION
-+ " (http://unionfs.filesystems.org)");
-+MODULE_LICENSE("GPL");
-+
-+module_init(init_unionfs_fs);
-+module_exit(exit_unionfs_fs);
-diff --git a/fs/unionfs/mmap.c b/fs/unionfs/mmap.c
-new file mode 100644
-index 0000000..1f70535
---- /dev/null
-+++ b/fs/unionfs/mmap.c
-@@ -0,0 +1,89 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2006 Shaya Potter
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+
-+/*
-+ * XXX: we need a dummy readpage handler because generic_file_mmap (which we
-+ * use in unionfs_mmap) checks for the existence of
-+ * mapping->a_ops->readpage, else it returns -ENOEXEC. The VFS will need to
-+ * be fixed to allow a file system to define vm_ops->fault without any
-+ * address_space_ops whatsoever.
-+ *
-+ * Otherwise, we don't want to use our readpage method at all.
-+ */
-+static int unionfs_readpage(struct file *file, struct page *page)
-+{
-+ BUG();
-+ return -EINVAL;
-+}
-+
-+static int unionfs_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-+{
-+ int err;
-+ struct file *file, *lower_file;
-+ const struct vm_operations_struct *lower_vm_ops;
-+ struct vm_area_struct lower_vma;
-+
-+ BUG_ON(!vma);
-+ memcpy(&lower_vma, vma, sizeof(struct vm_area_struct));
-+ file = lower_vma.vm_file;
-+ lower_vm_ops = UNIONFS_F(file)->lower_vm_ops;
-+ BUG_ON(!lower_vm_ops);
-+
-+ lower_file = unionfs_lower_file(file);
-+ BUG_ON(!lower_file);
-+ /*
-+ * XXX: vm_ops->fault may be called in parallel. Because we have to
-+ * resort to temporarily changing the vma->vm_file to point to the
-+ * lower file, a concurrent invocation of unionfs_fault could see a
-+ * different value. In this workaround, we keep a different copy of
-+ * the vma structure in our stack, so we never expose a different
-+ * value of the vma->vm_file called to us, even temporarily. A
-+ * better fix would be to change the calling semantics of ->fault to
-+ * take an explicit file pointer.
-+ */
-+ lower_vma.vm_file = lower_file;
-+ err = lower_vm_ops->fault(&lower_vma, vmf);
-+ return err;
-+}
-+
-+/*
-+ * XXX: the default address_space_ops for unionfs is empty. We cannot set
-+ * our inode->i_mapping->a_ops to NULL because too many code paths expect
-+ * the a_ops vector to be non-NULL.
-+ */
-+struct address_space_operations unionfs_aops = {
-+ /* empty on purpose */
-+};
-+
-+/*
-+ * XXX: we need a second, dummy address_space_ops vector, to be used
-+ * temporarily during unionfs_mmap, because the latter calls
-+ * generic_file_mmap, which checks if ->readpage exists, else returns
-+ * -ENOEXEC.
-+ */
-+struct address_space_operations unionfs_dummy_aops = {
-+ .readpage = unionfs_readpage,
-+};
-+
-+struct vm_operations_struct unionfs_vm_ops = {
-+ .fault = unionfs_fault,
-+};
-diff --git a/fs/unionfs/rdstate.c b/fs/unionfs/rdstate.c
-new file mode 100644
-index 0000000..f745fbc
---- /dev/null
-+++ b/fs/unionfs/rdstate.c
-@@ -0,0 +1,285 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/* This file contains the routines for maintaining readdir state. */
-+
-+/*
-+ * There are two structures here, rdstate which is a hash table
-+ * of the second structure which is a filldir_node.
-+ */
-+
-+/*
-+ * This is a struct kmem_cache for filldir nodes, because we allocate a lot
-+ * of them and they shouldn't waste memory. If the node has a small name
-+ * (as defined by the dentry structure), then we use an inline name to
-+ * preserve kmalloc space.
-+ */
-+static struct kmem_cache *unionfs_filldir_cachep;
-+
-+int unionfs_init_filldir_cache(void)
-+{
-+ unionfs_filldir_cachep =
-+ kmem_cache_create("unionfs_filldir",
-+ sizeof(struct filldir_node), 0,
-+ SLAB_RECLAIM_ACCOUNT, NULL);
-+
-+ return (unionfs_filldir_cachep ? 0 : -ENOMEM);
-+}
-+
-+void unionfs_destroy_filldir_cache(void)
-+{
-+ if (unionfs_filldir_cachep)
-+ kmem_cache_destroy(unionfs_filldir_cachep);
-+}
-+
-+/*
-+ * This is a tuning parameter that tells us roughly how big to make the
-+ * hash table in directory entries per page. This isn't perfect, but
-+ * at least we get a hash table size that shouldn't be too overloaded.
-+ * The following averages are based on my home directory.
-+ * 14.44693 Overall
-+ * 12.29 Single Page Directories
-+ * 117.93 Multi-page directories
-+ */
-+#define DENTPAGE 4096
-+#define DENTPERONEPAGE 12
-+#define DENTPERPAGE 118
-+#define MINHASHSIZE 1
-+static int guesstimate_hash_size(struct inode *inode)
-+{
-+ struct inode *lower_inode;
-+ int bindex;
-+ int hashsize = MINHASHSIZE;
-+
-+ if (UNIONFS_I(inode)->hashsize > 0)
-+ return UNIONFS_I(inode)->hashsize;
-+
-+ for (bindex = ibstart(inode); bindex <= ibend(inode); bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (!lower_inode)
-+ continue;
-+
-+ if (i_size_read(lower_inode) == DENTPAGE)
-+ hashsize += DENTPERONEPAGE;
-+ else
-+ hashsize += (i_size_read(lower_inode) / DENTPAGE) *
-+ DENTPERPAGE;
-+ }
-+
-+ return hashsize;
-+}
-+
-+int init_rdstate(struct file *file)
-+{
-+ BUG_ON(sizeof(loff_t) !=
-+ (sizeof(unsigned int) + sizeof(unsigned int)));
-+ BUG_ON(UNIONFS_F(file)->rdstate != NULL);
-+
-+ UNIONFS_F(file)->rdstate = alloc_rdstate(file->f_path.dentry->d_inode,
-+ fbstart(file));
-+
-+ return (UNIONFS_F(file)->rdstate ? 0 : -ENOMEM);
-+}
-+
-+struct unionfs_dir_state *find_rdstate(struct inode *inode, loff_t fpos)
-+{
-+ struct unionfs_dir_state *rdstate = NULL;
-+ struct list_head *pos;
-+
-+ spin_lock(&UNIONFS_I(inode)->rdlock);
-+ list_for_each(pos, &UNIONFS_I(inode)->readdircache) {
-+ struct unionfs_dir_state *r =
-+ list_entry(pos, struct unionfs_dir_state, cache);
-+ if (fpos == rdstate2offset(r)) {
-+ UNIONFS_I(inode)->rdcount--;
-+ list_del(&r->cache);
-+ rdstate = r;
-+ break;
-+ }
-+ }
-+ spin_unlock(&UNIONFS_I(inode)->rdlock);
-+ return rdstate;
-+}
-+
-+struct unionfs_dir_state *alloc_rdstate(struct inode *inode, int bindex)
-+{
-+ int i = 0;
-+ int hashsize;
-+ unsigned long mallocsize = sizeof(struct unionfs_dir_state);
-+ struct unionfs_dir_state *rdstate;
-+
-+ hashsize = guesstimate_hash_size(inode);
-+ mallocsize += hashsize * sizeof(struct list_head);
-+ mallocsize = __roundup_pow_of_two(mallocsize);
-+
-+ /* This should give us about 500 entries anyway. */
-+ if (mallocsize > PAGE_SIZE)
-+ mallocsize = PAGE_SIZE;
-+
-+ hashsize = (mallocsize - sizeof(struct unionfs_dir_state)) /
-+ sizeof(struct list_head);
-+
-+ rdstate = kmalloc(mallocsize, GFP_KERNEL);
-+ if (unlikely(!rdstate))
-+ return NULL;
-+
-+ spin_lock(&UNIONFS_I(inode)->rdlock);
-+ if (UNIONFS_I(inode)->cookie >= (MAXRDCOOKIE - 1))
-+ UNIONFS_I(inode)->cookie = 1;
-+ else
-+ UNIONFS_I(inode)->cookie++;
-+
-+ rdstate->cookie = UNIONFS_I(inode)->cookie;
-+ spin_unlock(&UNIONFS_I(inode)->rdlock);
-+ rdstate->offset = 1;
-+ rdstate->access = jiffies;
-+ rdstate->bindex = bindex;
-+ rdstate->dirpos = 0;
-+ rdstate->hashentries = 0;
-+ rdstate->size = hashsize;
-+ for (i = 0; i < rdstate->size; i++)
-+ INIT_LIST_HEAD(&rdstate->list[i]);
-+
-+ return rdstate;
-+}
-+
-+static void free_filldir_node(struct filldir_node *node)
-+{
-+ if (node->namelen >= DNAME_INLINE_LEN_MIN)
-+ kfree(node->name);
-+ kmem_cache_free(unionfs_filldir_cachep, node);
-+}
-+
-+void free_rdstate(struct unionfs_dir_state *state)
-+{
-+ struct filldir_node *tmp;
-+ int i;
-+
-+ for (i = 0; i < state->size; i++) {
-+ struct list_head *head = &(state->list[i]);
-+ struct list_head *pos, *n;
-+
-+ /* traverse the list and deallocate space */
-+ list_for_each_safe(pos, n, head) {
-+ tmp = list_entry(pos, struct filldir_node, file_list);
-+ list_del(&tmp->file_list);
-+ free_filldir_node(tmp);
-+ }
-+ }
-+
-+ kfree(state);
-+}
-+
-+struct filldir_node *find_filldir_node(struct unionfs_dir_state *rdstate,
-+ const char *name, int namelen,
-+ int is_whiteout)
-+{
-+ int index;
-+ unsigned int hash;
-+ struct list_head *head;
-+ struct list_head *pos;
-+ struct filldir_node *cursor = NULL;
-+ int found = 0;
-+
-+ BUG_ON(namelen <= 0);
-+
-+ hash = full_name_hash(name, namelen);
-+ index = hash % rdstate->size;
-+
-+ head = &(rdstate->list[index]);
-+ list_for_each(pos, head) {
-+ cursor = list_entry(pos, struct filldir_node, file_list);
-+
-+ if (cursor->namelen == namelen && cursor->hash == hash &&
-+ !strncmp(cursor->name, name, namelen)) {
-+ /*
-+ * a duplicate exists, and hence no need to create
-+ * entry to the list
-+ */
-+ found = 1;
-+
-+ /*
-+ * if a duplicate is found in this branch, and is
-+ * not due to the caller looking for an entry to
-+ * whiteout, then the file system may be corrupted.
-+ */
-+ if (unlikely(!is_whiteout &&
-+ cursor->bindex == rdstate->bindex))
-+ printk(KERN_ERR "unionfs: filldir: possible "
-+ "I/O error: a file is duplicated "
-+ "in the same branch %d: %s\n",
-+ rdstate->bindex, cursor->name);
-+ break;
-+ }
-+ }
-+
-+ if (!found)
-+ cursor = NULL;
-+
-+ return cursor;
-+}
-+
-+int add_filldir_node(struct unionfs_dir_state *rdstate, const char *name,
-+ int namelen, int bindex, int whiteout)
-+{
-+ struct filldir_node *new;
-+ unsigned int hash;
-+ int index;
-+ int err = 0;
-+ struct list_head *head;
-+
-+ BUG_ON(namelen <= 0);
-+
-+ hash = full_name_hash(name, namelen);
-+ index = hash % rdstate->size;
-+ head = &(rdstate->list[index]);
-+
-+ new = kmem_cache_alloc(unionfs_filldir_cachep, GFP_KERNEL);
-+ if (unlikely(!new)) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ INIT_LIST_HEAD(&new->file_list);
-+ new->namelen = namelen;
-+ new->hash = hash;
-+ new->bindex = bindex;
-+ new->whiteout = whiteout;
-+
-+ if (namelen < DNAME_INLINE_LEN_MIN) {
-+ new->name = new->iname;
-+ } else {
-+ new->name = kmalloc(namelen + 1, GFP_KERNEL);
-+ if (unlikely(!new->name)) {
-+ kmem_cache_free(unionfs_filldir_cachep, new);
-+ new = NULL;
-+ goto out;
-+ }
-+ }
-+
-+ memcpy(new->name, name, namelen);
-+ new->name[namelen] = '\0';
-+
-+ rdstate->hashentries++;
-+
-+ list_add(&(new->file_list), head);
-+out:
-+ return err;
-+}
-diff --git a/fs/unionfs/rename.c b/fs/unionfs/rename.c
-new file mode 100644
-index 0000000..936700e
---- /dev/null
-+++ b/fs/unionfs/rename.c
-@@ -0,0 +1,517 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * This is a helper function for rename, used when rename ends up with hosed
-+ * over dentries and we need to revert.
-+ */
-+static int unionfs_refresh_lower_dentry(struct dentry *dentry,
-+ struct dentry *parent, int bindex)
-+{
-+ struct dentry *lower_dentry;
-+ struct dentry *lower_parent;
-+ int err = 0;
-+
-+ verify_locked(dentry);
-+
-+ lower_parent = unionfs_lower_dentry_idx(parent, bindex);
-+
-+ BUG_ON(!S_ISDIR(lower_parent->d_inode->i_mode));
-+
-+ lower_dentry = lookup_one_len(dentry->d_name.name, lower_parent,
-+ dentry->d_name.len);
-+ if (IS_ERR(lower_dentry)) {
-+ err = PTR_ERR(lower_dentry);
-+ goto out;
-+ }
-+
-+ dput(unionfs_lower_dentry_idx(dentry, bindex));
-+ iput(unionfs_lower_inode_idx(dentry->d_inode, bindex));
-+ unionfs_set_lower_inode_idx(dentry->d_inode, bindex, NULL);
-+
-+ if (!lower_dentry->d_inode) {
-+ dput(lower_dentry);
-+ unionfs_set_lower_dentry_idx(dentry, bindex, NULL);
-+ } else {
-+ unionfs_set_lower_dentry_idx(dentry, bindex, lower_dentry);
-+ unionfs_set_lower_inode_idx(dentry->d_inode, bindex,
-+ igrab(lower_dentry->d_inode));
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+static int __unionfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-+ struct dentry *old_parent,
-+ struct inode *new_dir, struct dentry *new_dentry,
-+ struct dentry *new_parent,
-+ int bindex)
-+{
-+ int err = 0;
-+ struct dentry *lower_old_dentry;
-+ struct dentry *lower_new_dentry;
-+ struct dentry *lower_old_dir_dentry;
-+ struct dentry *lower_new_dir_dentry;
-+ struct dentry *trap;
-+
-+ lower_new_dentry = unionfs_lower_dentry_idx(new_dentry, bindex);
-+ lower_old_dentry = unionfs_lower_dentry_idx(old_dentry, bindex);
-+
-+ if (!lower_new_dentry) {
-+ lower_new_dentry =
-+ create_parents(new_parent->d_inode,
-+ new_dentry, new_dentry->d_name.name,
-+ bindex);
-+ if (IS_ERR(lower_new_dentry)) {
-+ err = PTR_ERR(lower_new_dentry);
-+ if (IS_COPYUP_ERR(err))
-+ goto out;
-+ printk(KERN_ERR "unionfs: error creating directory "
-+ "tree for rename, bindex=%d err=%d\n",
-+ bindex, err);
-+ goto out;
-+ }
-+ }
-+
-+ /* check for and remove whiteout, if any */
-+ err = check_unlink_whiteout(new_dentry, lower_new_dentry, bindex);
-+ if (err > 0) /* ignore if whiteout found and successfully removed */
-+ err = 0;
-+ if (err)
-+ goto out;
-+
-+ /* check of old_dentry branch is writable */
-+ err = is_robranch_super(old_dentry->d_sb, bindex);
-+ if (err)
-+ goto out;
-+
-+ dget(lower_old_dentry);
-+ dget(lower_new_dentry);
-+ lower_old_dir_dentry = dget_parent(lower_old_dentry);
-+ lower_new_dir_dentry = dget_parent(lower_new_dentry);
-+
-+ trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
-+ /* source should not be ancenstor of target */
-+ if (trap == lower_old_dentry) {
-+ err = -EINVAL;
-+ goto out_err_unlock;
-+ }
-+ /* target should not be ancenstor of source */
-+ if (trap == lower_new_dentry) {
-+ err = -ENOTEMPTY;
-+ goto out_err_unlock;
-+ }
-+ err = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
-+ lower_new_dir_dentry->d_inode, lower_new_dentry);
-+out_err_unlock:
-+ if (!err) {
-+ /* update parent dir times */
-+ fsstack_copy_attr_times(old_dir, lower_old_dir_dentry->d_inode);
-+ fsstack_copy_attr_times(new_dir, lower_new_dir_dentry->d_inode);
-+ }
-+ unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
-+
-+ dput(lower_old_dir_dentry);
-+ dput(lower_new_dir_dentry);
-+ dput(lower_old_dentry);
-+ dput(lower_new_dentry);
-+
-+out:
-+ if (!err) {
-+ /* Fixup the new_dentry. */
-+ if (bindex < dbstart(new_dentry))
-+ dbstart(new_dentry) = bindex;
-+ else if (bindex > dbend(new_dentry))
-+ dbend(new_dentry) = bindex;
-+ }
-+
-+ return err;
-+}
-+
-+/*
-+ * Main rename code. This is sufficiently complex, that it's documented in
-+ * Documentation/filesystems/unionfs/rename.txt. This routine calls
-+ * __unionfs_rename() above to perform some of the work.
-+ */
-+static int do_unionfs_rename(struct inode *old_dir,
-+ struct dentry *old_dentry,
-+ struct dentry *old_parent,
-+ struct inode *new_dir,
-+ struct dentry *new_dentry,
-+ struct dentry *new_parent)
-+{
-+ int err = 0;
-+ int bindex;
-+ int old_bstart, old_bend;
-+ int new_bstart, new_bend;
-+ int do_copyup = -1;
-+ int local_err = 0;
-+ int eio = 0;
-+ int revert = 0;
-+
-+ old_bstart = dbstart(old_dentry);
-+ old_bend = dbend(old_dentry);
-+
-+ new_bstart = dbstart(new_dentry);
-+ new_bend = dbend(new_dentry);
-+
-+ /* Rename source to destination. */
-+ err = __unionfs_rename(old_dir, old_dentry, old_parent,
-+ new_dir, new_dentry, new_parent,
-+ old_bstart);
-+ if (err) {
-+ if (!IS_COPYUP_ERR(err))
-+ goto out;
-+ do_copyup = old_bstart - 1;
-+ } else {
-+ revert = 1;
-+ }
-+
-+ /*
-+ * Unlink all instances of destination that exist to the left of
-+ * bstart of source. On error, revert back, goto out.
-+ */
-+ for (bindex = old_bstart - 1; bindex >= new_bstart; bindex--) {
-+ struct dentry *unlink_dentry;
-+ struct dentry *unlink_dir_dentry;
-+
-+ BUG_ON(bindex < 0);
-+ unlink_dentry = unionfs_lower_dentry_idx(new_dentry, bindex);
-+ if (!unlink_dentry)
-+ continue;
-+
-+ unlink_dir_dentry = lock_parent(unlink_dentry);
-+ err = is_robranch_super(old_dir->i_sb, bindex);
-+ if (!err)
-+ err = vfs_unlink(unlink_dir_dentry->d_inode,
-+ unlink_dentry);
-+
-+ fsstack_copy_attr_times(new_parent->d_inode,
-+ unlink_dir_dentry->d_inode);
-+ /* propagate number of hard-links */
-+ new_parent->d_inode->i_nlink =
-+ unionfs_get_nlinks(new_parent->d_inode);
-+
-+ unlock_dir(unlink_dir_dentry);
-+ if (!err) {
-+ if (bindex != new_bstart) {
-+ dput(unlink_dentry);
-+ unionfs_set_lower_dentry_idx(new_dentry,
-+ bindex, NULL);
-+ }
-+ } else if (IS_COPYUP_ERR(err)) {
-+ do_copyup = bindex - 1;
-+ } else if (revert) {
-+ goto revert;
-+ }
-+ }
-+
-+ if (do_copyup != -1) {
-+ for (bindex = do_copyup; bindex >= 0; bindex--) {
-+ /*
-+ * copyup the file into some left directory, so that
-+ * you can rename it
-+ */
-+ err = copyup_dentry(old_parent->d_inode,
-+ old_dentry, old_bstart, bindex,
-+ old_dentry->d_name.name,
-+ old_dentry->d_name.len, NULL,
-+ i_size_read(old_dentry->d_inode));
-+ /* if copyup failed, try next branch to the left */
-+ if (err)
-+ continue;
-+ /*
-+ * create whiteout before calling __unionfs_rename
-+ * because the latter will change the old_dentry's
-+ * lower name and parent dir, resulting in the
-+ * whiteout getting created in the wrong dir.
-+ */
-+ err = create_whiteout(old_dentry, bindex);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: can't create a "
-+ "whiteout for %s in rename (err=%d)\n",
-+ old_dentry->d_name.name, err);
-+ continue;
-+ }
-+ err = __unionfs_rename(old_dir, old_dentry, old_parent,
-+ new_dir, new_dentry, new_parent,
-+ bindex);
-+ break;
-+ }
-+ }
-+
-+ /* make it opaque */
-+ if (S_ISDIR(old_dentry->d_inode->i_mode)) {
-+ err = make_dir_opaque(old_dentry, dbstart(old_dentry));
-+ if (err)
-+ goto revert;
-+ }
-+
-+ /*
-+ * Create whiteout for source, only if:
-+ * (1) There is more than one underlying instance of source.
-+ * (We did a copy_up is taken care of above).
-+ */
-+ if ((old_bstart != old_bend) && (do_copyup == -1)) {
-+ err = create_whiteout(old_dentry, old_bstart);
-+ if (err) {
-+ /* can't fix anything now, so we exit with -EIO */
-+ printk(KERN_ERR "unionfs: can't create a whiteout for "
-+ "%s in rename!\n", old_dentry->d_name.name);
-+ err = -EIO;
-+ }
-+ }
-+
-+out:
-+ return err;
-+
-+revert:
-+ /* Do revert here. */
-+ local_err = unionfs_refresh_lower_dentry(new_dentry, new_parent,
-+ old_bstart);
-+ if (local_err) {
-+ printk(KERN_ERR "unionfs: revert failed in rename: "
-+ "the new refresh failed\n");
-+ eio = -EIO;
-+ }
-+
-+ local_err = unionfs_refresh_lower_dentry(old_dentry, old_parent,
-+ old_bstart);
-+ if (local_err) {
-+ printk(KERN_ERR "unionfs: revert failed in rename: "
-+ "the old refresh failed\n");
-+ eio = -EIO;
-+ goto revert_out;
-+ }
-+
-+ if (!unionfs_lower_dentry_idx(new_dentry, bindex) ||
-+ !unionfs_lower_dentry_idx(new_dentry, bindex)->d_inode) {
-+ printk(KERN_ERR "unionfs: revert failed in rename: "
-+ "the object disappeared from under us!\n");
-+ eio = -EIO;
-+ goto revert_out;
-+ }
-+
-+ if (unionfs_lower_dentry_idx(old_dentry, bindex) &&
-+ unionfs_lower_dentry_idx(old_dentry, bindex)->d_inode) {
-+ printk(KERN_ERR "unionfs: revert failed in rename: "
-+ "the object was created underneath us!\n");
-+ eio = -EIO;
-+ goto revert_out;
-+ }
-+
-+ local_err = __unionfs_rename(new_dir, new_dentry, new_parent,
-+ old_dir, old_dentry, old_parent,
-+ old_bstart);
-+
-+ /* If we can't fix it, then we cop-out with -EIO. */
-+ if (local_err) {
-+ printk(KERN_ERR "unionfs: revert failed in rename!\n");
-+ eio = -EIO;
-+ }
-+
-+ local_err = unionfs_refresh_lower_dentry(new_dentry, new_parent,
-+ bindex);
-+ if (local_err)
-+ eio = -EIO;
-+ local_err = unionfs_refresh_lower_dentry(old_dentry, old_parent,
-+ bindex);
-+ if (local_err)
-+ eio = -EIO;
-+
-+revert_out:
-+ if (eio)
-+ err = eio;
-+ return err;
-+}
-+
-+/*
-+ * We can't copyup a directory, because it may involve huge numbers of
-+ * children, etc. Doing that in the kernel would be bad, so instead we
-+ * return EXDEV to the user-space utility that caused this, and let the
-+ * user-space recurse and ask us to copy up each file separately.
-+ */
-+static int may_rename_dir(struct dentry *dentry, struct dentry *parent)
-+{
-+ int err, bstart;
-+
-+ err = check_empty(dentry, parent, NULL);
-+ if (err == -ENOTEMPTY) {
-+ if (is_robranch(dentry))
-+ return -EXDEV;
-+ } else if (err) {
-+ return err;
-+ }
-+
-+ bstart = dbstart(dentry);
-+ if (dbend(dentry) == bstart || dbopaque(dentry) == bstart)
-+ return 0;
-+
-+ dbstart(dentry) = bstart + 1;
-+ err = check_empty(dentry, parent, NULL);
-+ dbstart(dentry) = bstart;
-+ if (err == -ENOTEMPTY)
-+ err = -EXDEV;
-+ return err;
-+}
-+
-+/*
-+ * The locking rules in unionfs_rename are complex. We could use a simpler
-+ * superblock-level name-space lock for renames and copy-ups.
-+ */
-+int unionfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-+ struct inode *new_dir, struct dentry *new_dentry)
-+{
-+ int err = 0;
-+ struct dentry *wh_dentry;
-+ struct dentry *old_parent, *new_parent;
-+ int valid = true;
-+
-+ unionfs_read_lock(old_dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ old_parent = dget_parent(old_dentry);
-+ new_parent = dget_parent(new_dentry);
-+ /* un/lock parent dentries only if they differ from old/new_dentry */
-+ if (old_parent != old_dentry &&
-+ old_parent != new_dentry)
-+ unionfs_lock_dentry(old_parent, UNIONFS_DMUTEX_REVAL_PARENT);
-+ if (new_parent != old_dentry &&
-+ new_parent != new_dentry &&
-+ new_parent != old_parent)
-+ unionfs_lock_dentry(new_parent, UNIONFS_DMUTEX_REVAL_CHILD);
-+ unionfs_double_lock_dentry(old_dentry, new_dentry);
-+
-+ valid = __unionfs_d_revalidate(old_dentry, old_parent, false);
-+ if (!valid) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+ if (!d_deleted(new_dentry) && new_dentry->d_inode) {
-+ valid = __unionfs_d_revalidate(new_dentry, new_parent, false);
-+ if (!valid) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+ }
-+
-+ if (!S_ISDIR(old_dentry->d_inode->i_mode))
-+ err = unionfs_partial_lookup(old_dentry, old_parent);
-+ else
-+ err = may_rename_dir(old_dentry, old_parent);
-+
-+ if (err)
-+ goto out;
-+
-+ err = unionfs_partial_lookup(new_dentry, new_parent);
-+ if (err)
-+ goto out;
-+
-+ /*
-+ * if new_dentry is already lower because of whiteout,
-+ * simply override it even if the whited-out dir is not empty.
-+ */
-+ wh_dentry = find_first_whiteout(new_dentry);
-+ if (!IS_ERR(wh_dentry)) {
-+ dput(wh_dentry);
-+ } else if (new_dentry->d_inode) {
-+ if (S_ISDIR(old_dentry->d_inode->i_mode) !=
-+ S_ISDIR(new_dentry->d_inode->i_mode)) {
-+ err = S_ISDIR(old_dentry->d_inode->i_mode) ?
-+ -ENOTDIR : -EISDIR;
-+ goto out;
-+ }
-+
-+ if (S_ISDIR(new_dentry->d_inode->i_mode)) {
-+ struct unionfs_dir_state *namelist = NULL;
-+ /* check if this unionfs directory is empty or not */
-+ err = check_empty(new_dentry, new_parent, &namelist);
-+ if (err)
-+ goto out;
-+
-+ if (!is_robranch(new_dentry))
-+ err = delete_whiteouts(new_dentry,
-+ dbstart(new_dentry),
-+ namelist);
-+
-+ free_rdstate(namelist);
-+
-+ if (err)
-+ goto out;
-+ }
-+ }
-+
-+ err = do_unionfs_rename(old_dir, old_dentry, old_parent,
-+ new_dir, new_dentry, new_parent);
-+ if (err)
-+ goto out;
-+
-+ /*
-+ * force re-lookup since the dir on ro branch is not renamed, and
-+ * lower dentries still indicate the un-renamed ones.
-+ */
-+ if (S_ISDIR(old_dentry->d_inode->i_mode))
-+ atomic_dec(&UNIONFS_D(old_dentry)->generation);
-+ else
-+ unionfs_postcopyup_release(old_dentry);
-+ if (new_dentry->d_inode && !S_ISDIR(new_dentry->d_inode->i_mode)) {
-+ unionfs_postcopyup_release(new_dentry);
-+ unionfs_postcopyup_setmnt(new_dentry);
-+ if (!unionfs_lower_inode(new_dentry->d_inode)) {
-+ /*
-+ * If we get here, it means that no copyup was
-+ * needed, and that a file by the old name already
-+ * existing on the destination branch; that file got
-+ * renamed earlier in this function, so all we need
-+ * to do here is set the lower inode.
-+ */
-+ struct inode *inode;
-+ inode = unionfs_lower_inode(old_dentry->d_inode);
-+ igrab(inode);
-+ unionfs_set_lower_inode_idx(new_dentry->d_inode,
-+ dbstart(new_dentry),
-+ inode);
-+ }
-+ }
-+ /* if all of this renaming succeeded, update our times */
-+ unionfs_copy_attr_times(old_dentry->d_inode);
-+ unionfs_copy_attr_times(new_dentry->d_inode);
-+ unionfs_check_inode(old_dir);
-+ unionfs_check_inode(new_dir);
-+ unionfs_check_dentry(old_dentry);
-+ unionfs_check_dentry(new_dentry);
-+
-+out:
-+ if (err) /* clear the new_dentry stuff created */
-+ d_drop(new_dentry);
-+
-+ unionfs_double_unlock_dentry(old_dentry, new_dentry);
-+ if (new_parent != old_dentry &&
-+ new_parent != new_dentry &&
-+ new_parent != old_parent)
-+ unionfs_unlock_dentry(new_parent);
-+ if (old_parent != old_dentry &&
-+ old_parent != new_dentry)
-+ unionfs_unlock_dentry(old_parent);
-+ dput(new_parent);
-+ dput(old_parent);
-+ unionfs_read_unlock(old_dentry->d_sb);
-+
-+ return err;
-+}
-diff --git a/fs/unionfs/sioq.c b/fs/unionfs/sioq.c
-new file mode 100644
-index 0000000..760c580
---- /dev/null
-+++ b/fs/unionfs/sioq.c
-@@ -0,0 +1,101 @@
-+/*
-+ * Copyright (c) 2006-2010 Erez Zadok
-+ * Copyright (c) 2006 Charles P. Wright
-+ * Copyright (c) 2006-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2006 Junjiro Okajima
-+ * Copyright (c) 2006 David P. Quigley
-+ * Copyright (c) 2006-2010 Stony Brook University
-+ * Copyright (c) 2006-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * Super-user IO work Queue - sometimes we need to perform actions which
-+ * would fail due to the unix permissions on the parent directory (e.g.,
-+ * rmdir a directory which appears empty, but in reality contains
-+ * whiteouts).
-+ */
-+
-+static struct workqueue_struct *superio_workqueue;
-+
-+int __init init_sioq(void)
-+{
-+ int err;
-+
-+ superio_workqueue = create_workqueue("unionfs_siod");
-+ if (!IS_ERR(superio_workqueue))
-+ return 0;
-+
-+ err = PTR_ERR(superio_workqueue);
-+ printk(KERN_ERR "unionfs: create_workqueue failed %d\n", err);
-+ superio_workqueue = NULL;
-+ return err;
-+}
-+
-+void stop_sioq(void)
-+{
-+ if (superio_workqueue)
-+ destroy_workqueue(superio_workqueue);
-+}
-+
-+void run_sioq(work_func_t func, struct sioq_args *args)
-+{
-+ INIT_WORK(&args->work, func);
-+
-+ init_completion(&args->comp);
-+ while (!queue_work(superio_workqueue, &args->work)) {
-+ /* TODO: do accounting if needed */
-+ schedule();
-+ }
-+ wait_for_completion(&args->comp);
-+}
-+
-+void __unionfs_create(struct work_struct *work)
-+{
-+ struct sioq_args *args = container_of(work, struct sioq_args, work);
-+ struct create_args *c = &args->create;
-+
-+ args->err = vfs_create(c->parent, c->dentry, c->mode, c->nd);
-+ complete(&args->comp);
-+}
-+
-+void __unionfs_mkdir(struct work_struct *work)
-+{
-+ struct sioq_args *args = container_of(work, struct sioq_args, work);
-+ struct mkdir_args *m = &args->mkdir;
-+
-+ args->err = vfs_mkdir(m->parent, m->dentry, m->mode);
-+ complete(&args->comp);
-+}
-+
-+void __unionfs_mknod(struct work_struct *work)
-+{
-+ struct sioq_args *args = container_of(work, struct sioq_args, work);
-+ struct mknod_args *m = &args->mknod;
-+
-+ args->err = vfs_mknod(m->parent, m->dentry, m->mode, m->dev);
-+ complete(&args->comp);
-+}
-+
-+void __unionfs_symlink(struct work_struct *work)
-+{
-+ struct sioq_args *args = container_of(work, struct sioq_args, work);
-+ struct symlink_args *s = &args->symlink;
-+
-+ args->err = vfs_symlink(s->parent, s->dentry, s->symbuf);
-+ complete(&args->comp);
-+}
-+
-+void __unionfs_unlink(struct work_struct *work)
-+{
-+ struct sioq_args *args = container_of(work, struct sioq_args, work);
-+ struct unlink_args *u = &args->unlink;
-+
-+ args->err = vfs_unlink(u->parent, u->dentry);
-+ complete(&args->comp);
-+}
-diff --git a/fs/unionfs/sioq.h b/fs/unionfs/sioq.h
-new file mode 100644
-index 0000000..b26d248
---- /dev/null
-+++ b/fs/unionfs/sioq.h
-@@ -0,0 +1,91 @@
-+/*
-+ * Copyright (c) 2006-2010 Erez Zadok
-+ * Copyright (c) 2006 Charles P. Wright
-+ * Copyright (c) 2006-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2006 Junjiro Okajima
-+ * Copyright (c) 2006 David P. Quigley
-+ * Copyright (c) 2006-2010 Stony Brook University
-+ * Copyright (c) 2006-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#ifndef _SIOQ_H
-+#define _SIOQ_H
-+
-+struct deletewh_args {
-+ struct unionfs_dir_state *namelist;
-+ struct dentry *dentry;
-+ int bindex;
-+};
-+
-+struct is_opaque_args {
-+ struct dentry *dentry;
-+};
-+
-+struct create_args {
-+ struct inode *parent;
-+ struct dentry *dentry;
-+ umode_t mode;
-+ struct nameidata *nd;
-+};
-+
-+struct mkdir_args {
-+ struct inode *parent;
-+ struct dentry *dentry;
-+ umode_t mode;
-+};
-+
-+struct mknod_args {
-+ struct inode *parent;
-+ struct dentry *dentry;
-+ umode_t mode;
-+ dev_t dev;
-+};
-+
-+struct symlink_args {
-+ struct inode *parent;
-+ struct dentry *dentry;
-+ char *symbuf;
-+};
-+
-+struct unlink_args {
-+ struct inode *parent;
-+ struct dentry *dentry;
-+};
-+
-+
-+struct sioq_args {
-+ struct completion comp;
-+ struct work_struct work;
-+ int err;
-+ void *ret;
-+
-+ union {
-+ struct deletewh_args deletewh;
-+ struct is_opaque_args is_opaque;
-+ struct create_args create;
-+ struct mkdir_args mkdir;
-+ struct mknod_args mknod;
-+ struct symlink_args symlink;
-+ struct unlink_args unlink;
-+ };
-+};
-+
-+/* Extern definitions for SIOQ functions */
-+extern int __init init_sioq(void);
-+extern void stop_sioq(void);
-+extern void run_sioq(work_func_t func, struct sioq_args *args);
-+
-+/* Extern definitions for our privilege escalation helpers */
-+extern void __unionfs_create(struct work_struct *work);
-+extern void __unionfs_mkdir(struct work_struct *work);
-+extern void __unionfs_mknod(struct work_struct *work);
-+extern void __unionfs_symlink(struct work_struct *work);
-+extern void __unionfs_unlink(struct work_struct *work);
-+extern void __delete_whiteouts(struct work_struct *work);
-+extern void __is_opaque_dir(struct work_struct *work);
-+
-+#endif /* not _SIOQ_H */
-diff --git a/fs/unionfs/subr.c b/fs/unionfs/subr.c
-new file mode 100644
-index 0000000..570a344
---- /dev/null
-+++ b/fs/unionfs/subr.c
-@@ -0,0 +1,95 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * returns the right n_link value based on the inode type
-+ */
-+int unionfs_get_nlinks(const struct inode *inode)
-+{
-+ /* don't bother to do all the work since we're unlinked */
-+ if (inode->i_nlink == 0)
-+ return 0;
-+
-+ if (!S_ISDIR(inode->i_mode))
-+ return unionfs_lower_inode(inode)->i_nlink;
-+
-+ /*
-+ * For directories, we return 1. The only place that could cares
-+ * about links is readdir, and there's d_type there so even that
-+ * doesn't matter.
-+ */
-+ return 1;
-+}
-+
-+/* copy a/m/ctime from the lower branch with the newest times */
-+void unionfs_copy_attr_times(struct inode *upper)
-+{
-+ int bindex;
-+ struct inode *lower;
-+
-+ if (!upper)
-+ return;
-+ if (ibstart(upper) < 0) {
-+#ifdef CONFIG_UNION_FS_DEBUG
-+ WARN_ON(ibstart(upper) < 0);
-+#endif /* CONFIG_UNION_FS_DEBUG */
-+ return;
-+ }
-+ for (bindex = ibstart(upper); bindex <= ibend(upper); bindex++) {
-+ lower = unionfs_lower_inode_idx(upper, bindex);
-+ if (!lower)
-+ continue; /* not all lower dir objects may exist */
-+ if (unlikely(timespec_compare(&upper->i_mtime,
-+ &lower->i_mtime) < 0))
-+ upper->i_mtime = lower->i_mtime;
-+ if (unlikely(timespec_compare(&upper->i_ctime,
-+ &lower->i_ctime) < 0))
-+ upper->i_ctime = lower->i_ctime;
-+ if (unlikely(timespec_compare(&upper->i_atime,
-+ &lower->i_atime) < 0))
-+ upper->i_atime = lower->i_atime;
-+ }
-+}
-+
-+/*
-+ * A unionfs/fanout version of fsstack_copy_attr_all. Uses a
-+ * unionfs_get_nlinks to properly calcluate the number of links to a file.
-+ * Also, copies the max() of all a/m/ctimes for all lower inodes (which is
-+ * important if the lower inode is a directory type)
-+ */
-+void unionfs_copy_attr_all(struct inode *dest,
-+ const struct inode *src)
-+{
-+ dest->i_mode = src->i_mode;
-+ dest->i_uid = src->i_uid;
-+ dest->i_gid = src->i_gid;
-+ dest->i_rdev = src->i_rdev;
-+
-+ unionfs_copy_attr_times(dest);
-+
-+ dest->i_blkbits = src->i_blkbits;
-+ dest->i_flags = src->i_flags;
-+
-+ /*
-+ * Update the nlinks AFTER updating the above fields, because the
-+ * get_links callback may depend on them.
-+ */
-+ dest->i_nlink = unionfs_get_nlinks(dest);
-+}
-diff --git a/fs/unionfs/super.c b/fs/unionfs/super.c
-new file mode 100644
-index 0000000..45bb9bf
---- /dev/null
-+++ b/fs/unionfs/super.c
-@@ -0,0 +1,1029 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * The inode cache is used with alloc_inode for both our inode info and the
-+ * vfs inode.
-+ */
-+static struct kmem_cache *unionfs_inode_cachep;
-+
-+struct inode *unionfs_iget(struct super_block *sb, unsigned long ino)
-+{
-+ int size;
-+ struct unionfs_inode_info *info;
-+ struct inode *inode;
-+
-+ inode = iget_locked(sb, ino);
-+ if (!inode)
-+ return ERR_PTR(-ENOMEM);
-+ if (!(inode->i_state & I_NEW))
-+ return inode;
-+
-+ info = UNIONFS_I(inode);
-+ memset(info, 0, offsetof(struct unionfs_inode_info, vfs_inode));
-+ info->bstart = -1;
-+ info->bend = -1;
-+ atomic_set(&info->generation,
-+ atomic_read(&UNIONFS_SB(inode->i_sb)->generation));
-+ spin_lock_init(&info->rdlock);
-+ info->rdcount = 1;
-+ info->hashsize = -1;
-+ INIT_LIST_HEAD(&info->readdircache);
-+
-+ size = sbmax(inode->i_sb) * sizeof(struct inode *);
-+ info->lower_inodes = kzalloc(size, GFP_KERNEL);
-+ if (unlikely(!info->lower_inodes)) {
-+ printk(KERN_CRIT "unionfs: no kernel memory when allocating "
-+ "lower-pointer array!\n");
-+ iget_failed(inode);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ inode->i_version++;
-+ inode->i_op = &unionfs_main_iops;
-+ inode->i_fop = &unionfs_main_fops;
-+
-+ inode->i_mapping->a_ops = &unionfs_aops;
-+
-+ /*
-+ * reset times so unionfs_copy_attr_all can keep out time invariants
-+ * right (upper inode time being the max of all lower ones).
-+ */
-+ inode->i_atime.tv_sec = inode->i_atime.tv_nsec = 0;
-+ inode->i_mtime.tv_sec = inode->i_mtime.tv_nsec = 0;
-+ inode->i_ctime.tv_sec = inode->i_ctime.tv_nsec = 0;
-+ unlock_new_inode(inode);
-+ return inode;
-+}
-+
-+/*
-+ * final actions when unmounting a file system
-+ *
-+ * No need to lock rwsem.
-+ */
-+static void unionfs_put_super(struct super_block *sb)
-+{
-+ int bindex, bstart, bend;
-+ struct unionfs_sb_info *spd;
-+ int leaks = 0;
-+
-+ spd = UNIONFS_SB(sb);
-+ if (!spd)
-+ return;
-+
-+ bstart = sbstart(sb);
-+ bend = sbend(sb);
-+
-+ /* Make sure we have no leaks of branchget/branchput. */
-+ for (bindex = bstart; bindex <= bend; bindex++)
-+ if (unlikely(branch_count(sb, bindex) != 0)) {
-+ printk(KERN_CRIT
-+ "unionfs: branch %d has %d references left!\n",
-+ bindex, branch_count(sb, bindex));
-+ leaks = 1;
-+ }
-+ WARN_ON(leaks != 0);
-+
-+ /* decrement lower super references */
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ struct super_block *s;
-+ s = unionfs_lower_super_idx(sb, bindex);
-+ unionfs_set_lower_super_idx(sb, bindex, NULL);
-+ atomic_dec(&s->s_active);
-+ }
-+
-+ kfree(spd->dev_name);
-+ kfree(spd->data);
-+ kfree(spd);
-+ sb->s_fs_info = NULL;
-+}
-+
-+/*
-+ * Since people use this to answer the "How big of a file can I write?"
-+ * question, we report the size of the highest priority branch as the size of
-+ * the union.
-+ */
-+static int unionfs_statfs(struct dentry *dentry, struct kstatfs *buf)
-+{
-+ int err = 0;
-+ struct super_block *sb;
-+ struct dentry *lower_dentry;
-+ struct dentry *parent;
-+ struct path lower_path;
-+ bool valid;
-+
-+ sb = dentry->d_sb;
-+
-+ unionfs_read_lock(sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+ unionfs_check_dentry(dentry);
-+
-+ lower_dentry = unionfs_lower_dentry(sb->s_root);
-+ lower_path.dentry = lower_dentry;
-+ lower_path.mnt = unionfs_mntget(sb->s_root, 0);
-+ err = vfs_statfs(&lower_path, buf);
-+ mntput(lower_path.mnt);
-+
-+ /* set return buf to our f/s to avoid confusing user-level utils */
-+ buf->f_type = UNIONFS_SUPER_MAGIC;
-+ /*
-+ * Our maximum file name can is shorter by a few bytes because every
-+ * file name could potentially be whited-out.
-+ *
-+ * XXX: this restriction goes away with ODF.
-+ */
-+ unionfs_set_max_namelen(&buf->f_namelen);
-+
-+ /*
-+ * reset two fields to avoid confusing user-land.
-+ * XXX: is this still necessary?
-+ */
-+ memset(&buf->f_fsid, 0, sizeof(__kernel_fsid_t));
-+ memset(&buf->f_spare, 0, sizeof(buf->f_spare));
-+
-+out:
-+ unionfs_check_dentry(dentry);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(sb);
-+ return err;
-+}
-+
-+/* handle mode changing during remount */
-+static noinline_for_stack int do_remount_mode_option(
-+ char *optarg,
-+ int cur_branches,
-+ struct unionfs_data *new_data,
-+ struct path *new_lower_paths)
-+{
-+ int err = -EINVAL;
-+ int perms, idx;
-+ char *modename = strchr(optarg, '=');
-+ struct nameidata nd;
-+
-+ /* by now, optarg contains the branch name */
-+ if (!*optarg) {
-+ printk(KERN_ERR
-+ "unionfs: no branch specified for mode change\n");
-+ goto out;
-+ }
-+ if (!modename) {
-+ printk(KERN_ERR "unionfs: branch \"%s\" requires a mode\n",
-+ optarg);
-+ goto out;
-+ }
-+ *modename++ = '\0';
-+ err = parse_branch_mode(modename, &perms);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: invalid mode \"%s\" for \"%s\"\n",
-+ modename, optarg);
-+ goto out;
-+ }
-+
-+ /*
-+ * Find matching branch index. For now, this assumes that nothing
-+ * has been mounted on top of this Unionfs stack. Once we have /odf
-+ * and cache-coherency resolved, we'll address the branch-path
-+ * uniqueness.
-+ */
-+ err = path_lookup(optarg, LOOKUP_FOLLOW, &nd);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: error accessing "
-+ "lower directory \"%s\" (error %d)\n",
-+ optarg, err);
-+ goto out;
-+ }
-+ for (idx = 0; idx < cur_branches; idx++)
-+ if (nd.path.mnt == new_lower_paths[idx].mnt &&
-+ nd.path.dentry == new_lower_paths[idx].dentry)
-+ break;
-+ path_put(&nd.path); /* no longer needed */
-+ if (idx == cur_branches) {
-+ err = -ENOENT; /* err may have been reset above */
-+ printk(KERN_ERR "unionfs: branch \"%s\" "
-+ "not found\n", optarg);
-+ goto out;
-+ }
-+ /* check/change mode for existing branch */
-+ /* we don't warn if perms==branchperms */
-+ new_data[idx].branchperms = perms;
-+ err = 0;
-+out:
-+ return err;
-+}
-+
-+/* handle branch deletion during remount */
-+static noinline_for_stack int do_remount_del_option(
-+ char *optarg, int cur_branches,
-+ struct unionfs_data *new_data,
-+ struct path *new_lower_paths)
-+{
-+ int err = -EINVAL;
-+ int idx;
-+ struct nameidata nd;
-+
-+ /* optarg contains the branch name to delete */
-+
-+ /*
-+ * Find matching branch index. For now, this assumes that nothing
-+ * has been mounted on top of this Unionfs stack. Once we have /odf
-+ * and cache-coherency resolved, we'll address the branch-path
-+ * uniqueness.
-+ */
-+ err = path_lookup(optarg, LOOKUP_FOLLOW, &nd);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: error accessing "
-+ "lower directory \"%s\" (error %d)\n",
-+ optarg, err);
-+ goto out;
-+ }
-+ for (idx = 0; idx < cur_branches; idx++)
-+ if (nd.path.mnt == new_lower_paths[idx].mnt &&
-+ nd.path.dentry == new_lower_paths[idx].dentry)
-+ break;
-+ path_put(&nd.path); /* no longer needed */
-+ if (idx == cur_branches) {
-+ printk(KERN_ERR "unionfs: branch \"%s\" "
-+ "not found\n", optarg);
-+ err = -ENOENT;
-+ goto out;
-+ }
-+ /* check if there are any open files on the branch to be deleted */
-+ if (atomic_read(&new_data[idx].open_files) > 0) {
-+ err = -EBUSY;
-+ goto out;
-+ }
-+
-+ /*
-+ * Now we have to delete the branch. First, release any handles it
-+ * has. Then, move the remaining array indexes past "idx" in
-+ * new_data and new_lower_paths one to the left. Finally, adjust
-+ * cur_branches.
-+ */
-+ path_put(&new_lower_paths[idx]);
-+
-+ if (idx < cur_branches - 1) {
-+ /* if idx==cur_branches-1, we delete last branch: easy */
-+ memmove(&new_data[idx], &new_data[idx+1],
-+ (cur_branches - 1 - idx) *
-+ sizeof(struct unionfs_data));
-+ memmove(&new_lower_paths[idx], &new_lower_paths[idx+1],
-+ (cur_branches - 1 - idx) * sizeof(struct path));
-+ }
-+
-+ err = 0;
-+out:
-+ return err;
-+}
-+
-+/* handle branch insertion during remount */
-+static noinline_for_stack int do_remount_add_option(
-+ char *optarg, int cur_branches,
-+ struct unionfs_data *new_data,
-+ struct path *new_lower_paths,
-+ int *high_branch_id)
-+{
-+ int err = -EINVAL;
-+ int perms;
-+ int idx = 0; /* default: insert at beginning */
-+ char *new_branch , *modename = NULL;
-+ struct nameidata nd;
-+
-+ /*
-+ * optarg can be of several forms:
-+ *
-+ * /bar:/foo insert /foo before /bar
-+ * /bar:/foo=ro insert /foo in ro mode before /bar
-+ * /foo insert /foo in the beginning (prepend)
-+ * :/foo insert /foo at the end (append)
-+ */
-+ if (*optarg == ':') { /* append? */
-+ new_branch = optarg + 1; /* skip ':' */
-+ idx = cur_branches;
-+ goto found_insertion_point;
-+ }
-+ new_branch = strchr(optarg, ':');
-+ if (!new_branch) { /* prepend? */
-+ new_branch = optarg;
-+ goto found_insertion_point;
-+ }
-+ *new_branch++ = '\0'; /* holds path+mode of new branch */
-+
-+ /*
-+ * Find matching branch index. For now, this assumes that nothing
-+ * has been mounted on top of this Unionfs stack. Once we have /odf
-+ * and cache-coherency resolved, we'll address the branch-path
-+ * uniqueness.
-+ */
-+ err = path_lookup(optarg, LOOKUP_FOLLOW, &nd);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: error accessing "
-+ "lower directory \"%s\" (error %d)\n",
-+ optarg, err);
-+ goto out;
-+ }
-+ for (idx = 0; idx < cur_branches; idx++)
-+ if (nd.path.mnt == new_lower_paths[idx].mnt &&
-+ nd.path.dentry == new_lower_paths[idx].dentry)
-+ break;
-+ path_put(&nd.path); /* no longer needed */
-+ if (idx == cur_branches) {
-+ printk(KERN_ERR "unionfs: branch \"%s\" "
-+ "not found\n", optarg);
-+ err = -ENOENT;
-+ goto out;
-+ }
-+
-+ /*
-+ * At this point idx will hold the index where the new branch should
-+ * be inserted before.
-+ */
-+found_insertion_point:
-+ /* find the mode for the new branch */
-+ if (new_branch)
-+ modename = strchr(new_branch, '=');
-+ if (modename)
-+ *modename++ = '\0';
-+ if (!new_branch || !*new_branch) {
-+ printk(KERN_ERR "unionfs: null new branch\n");
-+ err = -EINVAL;
-+ goto out;
-+ }
-+ err = parse_branch_mode(modename, &perms);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: invalid mode \"%s\" for "
-+ "branch \"%s\"\n", modename, new_branch);
-+ goto out;
-+ }
-+ err = path_lookup(new_branch, LOOKUP_FOLLOW, &nd);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: error accessing "
-+ "lower directory \"%s\" (error %d)\n",
-+ new_branch, err);
-+ goto out;
-+ }
-+ /*
-+ * It's probably safe to check_mode the new branch to insert. Note:
-+ * we don't allow inserting branches which are unionfs's by
-+ * themselves (check_branch returns EINVAL in that case). This is
-+ * because this code base doesn't support stacking unionfs: the ODF
-+ * code base supports that correctly.
-+ */
-+ err = check_branch(&nd);
-+ if (err) {
-+ printk(KERN_ERR "unionfs: lower directory "
-+ "\"%s\" is not a valid branch\n", optarg);
-+ path_put(&nd.path);
-+ goto out;
-+ }
-+
-+ /*
-+ * Now we have to insert the new branch. But first, move the bits
-+ * to make space for the new branch, if needed. Finally, adjust
-+ * cur_branches.
-+ * We don't release nd here; it's kept until umount/remount.
-+ */
-+ if (idx < cur_branches) {
-+ /* if idx==cur_branches, we append: easy */
-+ memmove(&new_data[idx+1], &new_data[idx],
-+ (cur_branches - idx) * sizeof(struct unionfs_data));
-+ memmove(&new_lower_paths[idx+1], &new_lower_paths[idx],
-+ (cur_branches - idx) * sizeof(struct path));
-+ }
-+ new_lower_paths[idx].dentry = nd.path.dentry;
-+ new_lower_paths[idx].mnt = nd.path.mnt;
-+
-+ new_data[idx].sb = nd.path.dentry->d_sb;
-+ atomic_set(&new_data[idx].open_files, 0);
-+ new_data[idx].branchperms = perms;
-+ new_data[idx].branch_id = ++*high_branch_id; /* assign new branch ID */
-+
-+ err = 0;
-+out:
-+ return err;
-+}
-+
-+
-+/*
-+ * Support branch management options on remount.
-+ *
-+ * See Documentation/filesystems/unionfs/ for details.
-+ *
-+ * @flags: numeric mount options
-+ * @options: mount options string
-+ *
-+ * This function can rearrange a mounted union dynamically, adding and
-+ * removing branches, including changing branch modes. Clearly this has to
-+ * be done safely and atomically. Luckily, the VFS already calls this
-+ * function with lock_super(sb) and lock_kernel() held, preventing
-+ * concurrent mixing of new mounts, remounts, and unmounts. Moreover,
-+ * do_remount_sb(), our caller function, already called shrink_dcache_sb(sb)
-+ * to purge dentries/inodes from our superblock, and also called
-+ * fsync_super(sb) to purge any dirty pages. So we're good.
-+ *
-+ * XXX: however, our remount code may also need to invalidate mapped pages
-+ * so as to force them to be re-gotten from the (newly reconfigured) lower
-+ * branches. This has to wait for proper mmap and cache coherency support
-+ * in the VFS.
-+ *
-+ */
-+static int unionfs_remount_fs(struct super_block *sb, int *flags,
-+ char *options)
-+{
-+ int err = 0;
-+ int i;
-+ char *optionstmp, *tmp_to_free; /* kstrdup'ed of "options" */
-+ char *optname;
-+ int cur_branches = 0; /* no. of current branches */
-+ int new_branches = 0; /* no. of branches actually left in the end */
-+ int add_branches; /* est. no. of branches to add */
-+ int del_branches; /* est. no. of branches to del */
-+ int max_branches; /* max possible no. of branches */
-+ struct unionfs_data *new_data = NULL, *tmp_data = NULL;
-+ struct path *new_lower_paths = NULL, *tmp_lower_paths = NULL;
-+ struct inode **new_lower_inodes = NULL;
-+ int new_high_branch_id; /* new high branch ID */
-+ int size; /* memory allocation size, temp var */
-+ int old_ibstart, old_ibend;
-+
-+ unionfs_write_lock(sb);
-+
-+ /*
-+ * The VFS will take care of "ro" and "rw" flags, and we can safely
-+ * ignore MS_SILENT, but anything else left over is an error. So we
-+ * need to check if any other flags may have been passed (none are
-+ * allowed/supported as of now).
-+ */
-+ if ((*flags & ~(MS_RDONLY | MS_SILENT)) != 0) {
-+ printk(KERN_ERR
-+ "unionfs: remount flags 0x%x unsupported\n", *flags);
-+ err = -EINVAL;
-+ goto out_error;
-+ }
-+
-+ /*
-+ * If 'options' is NULL, it's probably because the user just changed
-+ * the union to a "ro" or "rw" and the VFS took care of it. So
-+ * nothing to do and we're done.
-+ */
-+ if (!options || options[0] == '\0')
-+ goto out_error;
-+
-+ /*
-+ * Find out how many branches we will have in the end, counting
-+ * "add" and "del" commands. Copy the "options" string because
-+ * strsep modifies the string and we need it later.
-+ */
-+ tmp_to_free = kstrdup(options, GFP_KERNEL);
-+ optionstmp = tmp_to_free;
-+ if (unlikely(!optionstmp)) {
-+ err = -ENOMEM;
-+ goto out_free;
-+ }
-+ cur_branches = sbmax(sb); /* current no. branches */
-+ new_branches = sbmax(sb);
-+ del_branches = 0;
-+ add_branches = 0;
-+ new_high_branch_id = sbhbid(sb); /* save current high_branch_id */
-+ while ((optname = strsep(&optionstmp, ",")) != NULL) {
-+ char *optarg;
-+
-+ if (!optname || !*optname)
-+ continue;
-+
-+ optarg = strchr(optname, '=');
-+ if (optarg)
-+ *optarg++ = '\0';
-+
-+ if (!strcmp("add", optname))
-+ add_branches++;
-+ else if (!strcmp("del", optname))
-+ del_branches++;
-+ }
-+ kfree(tmp_to_free);
-+ /* after all changes, will we have at least one branch left? */
-+ if ((new_branches + add_branches - del_branches) < 1) {
-+ printk(KERN_ERR
-+ "unionfs: no branches left after remount\n");
-+ err = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ /*
-+ * Since we haven't actually parsed all the add/del options, nor
-+ * have we checked them for errors, we don't know for sure how many
-+ * branches we will have after all changes have taken place. In
-+ * fact, the total number of branches left could be less than what
-+ * we have now. So we need to allocate space for a temporary
-+ * placeholder that is at least as large as the maximum number of
-+ * branches we *could* have, which is the current number plus all
-+ * the additions. Once we're done with these temp placeholders, we
-+ * may have to re-allocate the final size, copy over from the temp,
-+ * and then free the temps (done near the end of this function).
-+ */
-+ max_branches = cur_branches + add_branches;
-+ /* allocate space for new pointers to lower dentry */
-+ tmp_data = kcalloc(max_branches,
-+ sizeof(struct unionfs_data), GFP_KERNEL);
-+ if (unlikely(!tmp_data)) {
-+ err = -ENOMEM;
-+ goto out_free;
-+ }
-+ /* allocate space for new pointers to lower paths */
-+ tmp_lower_paths = kcalloc(max_branches,
-+ sizeof(struct path), GFP_KERNEL);
-+ if (unlikely(!tmp_lower_paths)) {
-+ err = -ENOMEM;
-+ goto out_free;
-+ }
-+ /* copy current info into new placeholders, incrementing refcnts */
-+ memcpy(tmp_data, UNIONFS_SB(sb)->data,
-+ cur_branches * sizeof(struct unionfs_data));
-+ memcpy(tmp_lower_paths, UNIONFS_D(sb->s_root)->lower_paths,
-+ cur_branches * sizeof(struct path));
-+ for (i = 0; i < cur_branches; i++)
-+ path_get(&tmp_lower_paths[i]); /* drop refs at end of fxn */
-+
-+ /*******************************************************************
-+ * For each branch command, do path_lookup on the requested branch,
-+ * and apply the change to a temp branch list. To handle errors, we
-+ * already dup'ed the old arrays (above), and increased the refcnts
-+ * on various f/s objects. So now we can do all the path_lookups
-+ * and branch-management commands on the new arrays. If it fail mid
-+ * way, we free the tmp arrays and *put all objects. If we succeed,
-+ * then we free old arrays and *put its objects, and then replace
-+ * the arrays with the new tmp list (we may have to re-allocate the
-+ * memory because the temp lists could have been larger than what we
-+ * actually needed).
-+ *******************************************************************/
-+
-+ while ((optname = strsep(&options, ",")) != NULL) {
-+ char *optarg;
-+
-+ if (!optname || !*optname)
-+ continue;
-+ /*
-+ * At this stage optname holds a comma-delimited option, but
-+ * without the commas. Next, we need to break the string on
-+ * the '=' symbol to separate CMD=ARG, where ARG itself can
-+ * be KEY=VAL. For example, in mode=/foo=rw, CMD is "mode",
-+ * KEY is "/foo", and VAL is "rw".
-+ */
-+ optarg = strchr(optname, '=');
-+ if (optarg)
-+ *optarg++ = '\0';
-+ /* incgen remount option (instead of old ioctl) */
-+ if (!strcmp("incgen", optname)) {
-+ err = 0;
-+ goto out_no_change;
-+ }
-+
-+ /*
-+ * All of our options take an argument now. (Insert ones
-+ * that don't above this check.) So at this stage optname
-+ * contains the CMD part and optarg contains the ARG part.
-+ */
-+ if (!optarg || !*optarg) {
-+ printk(KERN_ERR "unionfs: all remount options require "
-+ "an argument (%s)\n", optname);
-+ err = -EINVAL;
-+ goto out_release;
-+ }
-+
-+ if (!strcmp("add", optname)) {
-+ err = do_remount_add_option(optarg, new_branches,
-+ tmp_data,
-+ tmp_lower_paths,
-+ &new_high_branch_id);
-+ if (err)
-+ goto out_release;
-+ new_branches++;
-+ if (new_branches > UNIONFS_MAX_BRANCHES) {
-+ printk(KERN_ERR "unionfs: command exceeds "
-+ "%d branches\n", UNIONFS_MAX_BRANCHES);
-+ err = -E2BIG;
-+ goto out_release;
-+ }
-+ continue;
-+ }
-+ if (!strcmp("del", optname)) {
-+ err = do_remount_del_option(optarg, new_branches,
-+ tmp_data,
-+ tmp_lower_paths);
-+ if (err)
-+ goto out_release;
-+ new_branches--;
-+ continue;
-+ }
-+ if (!strcmp("mode", optname)) {
-+ err = do_remount_mode_option(optarg, new_branches,
-+ tmp_data,
-+ tmp_lower_paths);
-+ if (err)
-+ goto out_release;
-+ continue;
-+ }
-+
-+ /*
-+ * When you use "mount -o remount,ro", mount(8) will
-+ * reportedly pass the original dirs= string from
-+ * /proc/mounts. So for now, we have to ignore dirs= and
-+ * not consider it an error, unless we want to allow users
-+ * to pass dirs= in remount. Note that to allow the VFS to
-+ * actually process the ro/rw remount options, we have to
-+ * return 0 from this function.
-+ */
-+ if (!strcmp("dirs", optname)) {
-+ printk(KERN_WARNING
-+ "unionfs: remount ignoring option \"%s\"\n",
-+ optname);
-+ continue;
-+ }
-+
-+ err = -EINVAL;
-+ printk(KERN_ERR
-+ "unionfs: unrecognized option \"%s\"\n", optname);
-+ goto out_release;
-+ }
-+
-+out_no_change:
-+
-+ /******************************************************************
-+ * WE'RE ALMOST DONE: check if leftmost branch might be read-only,
-+ * see if we need to allocate a small-sized new vector, copy the
-+ * vectors to their correct place, release the refcnt of the older
-+ * ones, and return. Also handle invalidating any pages that will
-+ * have to be re-read.
-+ *******************************************************************/
-+
-+ if (!(tmp_data[0].branchperms & MAY_WRITE)) {
-+ printk(KERN_ERR "unionfs: leftmost branch cannot be read-only "
-+ "(use \"remount,ro\" to create a read-only union)\n");
-+ err = -EINVAL;
-+ goto out_release;
-+ }
-+
-+ /* (re)allocate space for new pointers to lower dentry */
-+ size = new_branches * sizeof(struct unionfs_data);
-+ new_data = krealloc(tmp_data, size, GFP_KERNEL);
-+ if (unlikely(!new_data)) {
-+ err = -ENOMEM;
-+ goto out_release;
-+ }
-+
-+ /* allocate space for new pointers to lower paths */
-+ size = new_branches * sizeof(struct path);
-+ new_lower_paths = krealloc(tmp_lower_paths, size, GFP_KERNEL);
-+ if (unlikely(!new_lower_paths)) {
-+ err = -ENOMEM;
-+ goto out_release;
-+ }
-+
-+ /* allocate space for new pointers to lower inodes */
-+ new_lower_inodes = kcalloc(new_branches,
-+ sizeof(struct inode *), GFP_KERNEL);
-+ if (unlikely(!new_lower_inodes)) {
-+ err = -ENOMEM;
-+ goto out_release;
-+ }
-+
-+ /*
-+ * OK, just before we actually put the new set of branches in place,
-+ * we need to ensure that our own f/s has no dirty objects left.
-+ * Luckily, do_remount_sb() already calls shrink_dcache_sb(sb) and
-+ * fsync_super(sb), taking care of dentries, inodes, and dirty
-+ * pages. So all that's left is for us to invalidate any leftover
-+ * (non-dirty) pages to ensure that they will be re-read from the
-+ * new lower branches (and to support mmap).
-+ */
-+
-+ /*
-+ * Once we finish the remounting successfully, our superblock
-+ * generation number will have increased. This will be detected by
-+ * our dentry-revalidation code upon subsequent f/s operations
-+ * through unionfs. The revalidation code will rebuild the union of
-+ * lower inodes for a given unionfs inode and invalidate any pages
-+ * of such "stale" inodes (by calling our purge_inode_data
-+ * function). This revalidation will happen lazily and
-+ * incrementally, as users perform operations on cached inodes. We
-+ * would like to encourage this revalidation to happen sooner if
-+ * possible, so we like to try to invalidate as many other pages in
-+ * our superblock as we can. We used to call drop_pagecache_sb() or
-+ * a variant thereof, but either method was racy (drop_caches alone
-+ * is known to be racy). So now we let the revalidation happen on a
-+ * per file basis in ->d_revalidate.
-+ */
-+
-+ /* grab new lower super references; release old ones */
-+ for (i = 0; i < new_branches; i++)
-+ atomic_inc(&new_data[i].sb->s_active);
-+ for (i = 0; i < sbmax(sb); i++)
-+ atomic_dec(&UNIONFS_SB(sb)->data[i].sb->s_active);
-+
-+ /* copy new vectors into their correct place */
-+ tmp_data = UNIONFS_SB(sb)->data;
-+ UNIONFS_SB(sb)->data = new_data;
-+ new_data = NULL; /* so don't free good pointers below */
-+ tmp_lower_paths = UNIONFS_D(sb->s_root)->lower_paths;
-+ UNIONFS_D(sb->s_root)->lower_paths = new_lower_paths;
-+ new_lower_paths = NULL; /* so don't free good pointers below */
-+
-+ /* update our unionfs_sb_info and root dentry index of last branch */
-+ i = sbmax(sb); /* save no. of branches to release at end */
-+ sbend(sb) = new_branches - 1;
-+ dbend(sb->s_root) = new_branches - 1;
-+ old_ibstart = ibstart(sb->s_root->d_inode);
-+ old_ibend = ibend(sb->s_root->d_inode);
-+ ibend(sb->s_root->d_inode) = new_branches - 1;
-+ UNIONFS_D(sb->s_root)->bcount = new_branches;
-+ new_branches = i; /* no. of branches to release below */
-+
-+ /*
-+ * Update lower inodes: 3 steps
-+ * 1. grab ref on all new lower inodes
-+ */
-+ for (i = dbstart(sb->s_root); i <= dbend(sb->s_root); i++) {
-+ struct dentry *lower_dentry =
-+ unionfs_lower_dentry_idx(sb->s_root, i);
-+ igrab(lower_dentry->d_inode);
-+ new_lower_inodes[i] = lower_dentry->d_inode;
-+ }
-+ /* 2. release reference on all older lower inodes */
-+ iput_lowers(sb->s_root->d_inode, old_ibstart, old_ibend, true);
-+ /* 3. update root dentry's inode to new lower_inodes array */
-+ UNIONFS_I(sb->s_root->d_inode)->lower_inodes = new_lower_inodes;
-+ new_lower_inodes = NULL;
-+
-+ /* maxbytes may have changed */
-+ sb->s_maxbytes = unionfs_lower_super_idx(sb, 0)->s_maxbytes;
-+ /* update high branch ID */
-+ sbhbid(sb) = new_high_branch_id;
-+
-+ /* update our sb->generation for revalidating objects */
-+ i = atomic_inc_return(&UNIONFS_SB(sb)->generation);
-+ atomic_set(&UNIONFS_D(sb->s_root)->generation, i);
-+ atomic_set(&UNIONFS_I(sb->s_root->d_inode)->generation, i);
-+ if (!(*flags & MS_SILENT))
-+ pr_info("unionfs: %s: new generation number %d\n",
-+ UNIONFS_SB(sb)->dev_name, i);
-+ /* finally, update the root dentry's times */
-+ unionfs_copy_attr_times(sb->s_root->d_inode);
-+ err = 0; /* reset to success */
-+
-+ /*
-+ * The code above falls through to the next label, and releases the
-+ * refcnts of the older ones (stored in tmp_*): if we fell through
-+ * here, it means success. However, if we jump directly to this
-+ * label from any error above, then an error occurred after we
-+ * grabbed various refcnts, and so we have to release the
-+ * temporarily constructed structures.
-+ */
-+out_release:
-+ /* no need to cleanup/release anything in tmp_data */
-+ if (tmp_lower_paths)
-+ for (i = 0; i < new_branches; i++)
-+ path_put(&tmp_lower_paths[i]);
-+out_free:
-+ kfree(tmp_lower_paths);
-+ kfree(tmp_data);
-+ kfree(new_lower_paths);
-+ kfree(new_data);
-+ kfree(new_lower_inodes);
-+out_error:
-+ unionfs_check_dentry(sb->s_root);
-+ unionfs_write_unlock(sb);
-+ return err;
-+}
-+
-+/*
-+ * Called by iput() when the inode reference count reached zero
-+ * and the inode is not hashed anywhere. Used to clear anything
-+ * that needs to be, before the inode is completely destroyed and put
-+ * on the inode free list.
-+ *
-+ * No need to lock sb info's rwsem.
-+ */
-+static void unionfs_evict_inode(struct inode *inode)
-+{
-+ int bindex, bstart, bend;
-+ struct inode *lower_inode;
-+ struct list_head *pos, *n;
-+ struct unionfs_dir_state *rdstate;
-+
-+ truncate_inode_pages(&inode->i_data, 0);
-+ end_writeback(inode);
-+
-+ list_for_each_safe(pos, n, &UNIONFS_I(inode)->readdircache) {
-+ rdstate = list_entry(pos, struct unionfs_dir_state, cache);
-+ list_del(&rdstate->cache);
-+ free_rdstate(rdstate);
-+ }
-+
-+ /*
-+ * Decrement a reference to a lower_inode, which was incremented
-+ * by our read_inode when it was created initially.
-+ */
-+ bstart = ibstart(inode);
-+ bend = ibend(inode);
-+ if (bstart >= 0) {
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_inode = unionfs_lower_inode_idx(inode, bindex);
-+ if (!lower_inode)
-+ continue;
-+ unionfs_set_lower_inode_idx(inode, bindex, NULL);
-+ /* see Documentation/filesystems/unionfs/issues.txt */
-+ lockdep_off();
-+ iput(lower_inode);
-+ lockdep_on();
-+ }
-+ }
-+
-+ kfree(UNIONFS_I(inode)->lower_inodes);
-+ UNIONFS_I(inode)->lower_inodes = NULL;
-+}
-+
-+static struct inode *unionfs_alloc_inode(struct super_block *sb)
-+{
-+ struct unionfs_inode_info *i;
-+
-+ i = kmem_cache_alloc(unionfs_inode_cachep, GFP_KERNEL);
-+ if (unlikely(!i))
-+ return NULL;
-+
-+ /* memset everything up to the inode to 0 */
-+ memset(i, 0, offsetof(struct unionfs_inode_info, vfs_inode));
-+
-+ i->vfs_inode.i_version = 1;
-+ return &i->vfs_inode;
-+}
-+
-+static void unionfs_destroy_inode(struct inode *inode)
-+{
-+ kmem_cache_free(unionfs_inode_cachep, UNIONFS_I(inode));
-+}
-+
-+/* unionfs inode cache constructor */
-+static void init_once(void *obj)
-+{
-+ struct unionfs_inode_info *i = obj;
-+
-+ inode_init_once(&i->vfs_inode);
-+}
-+
-+int unionfs_init_inode_cache(void)
-+{
-+ int err = 0;
-+
-+ unionfs_inode_cachep =
-+ kmem_cache_create("unionfs_inode_cache",
-+ sizeof(struct unionfs_inode_info), 0,
-+ SLAB_RECLAIM_ACCOUNT, init_once);
-+ if (unlikely(!unionfs_inode_cachep))
-+ err = -ENOMEM;
-+ return err;
-+}
-+
-+/* unionfs inode cache destructor */
-+void unionfs_destroy_inode_cache(void)
-+{
-+ if (unionfs_inode_cachep)
-+ kmem_cache_destroy(unionfs_inode_cachep);
-+}
-+
-+/*
-+ * Called when we have a dirty inode, right here we only throw out
-+ * parts of our readdir list that are too old.
-+ *
-+ * No need to grab sb info's rwsem.
-+ */
-+static int unionfs_write_inode(struct inode *inode,
-+ struct writeback_control *wbc)
-+{
-+ struct list_head *pos, *n;
-+ struct unionfs_dir_state *rdstate;
-+
-+ spin_lock(&UNIONFS_I(inode)->rdlock);
-+ list_for_each_safe(pos, n, &UNIONFS_I(inode)->readdircache) {
-+ rdstate = list_entry(pos, struct unionfs_dir_state, cache);
-+ /* We keep this list in LRU order. */
-+ if ((rdstate->access + RDCACHE_JIFFIES) > jiffies)
-+ break;
-+ UNIONFS_I(inode)->rdcount--;
-+ list_del(&rdstate->cache);
-+ free_rdstate(rdstate);
-+ }
-+ spin_unlock(&UNIONFS_I(inode)->rdlock);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Used only in nfs, to kill any pending RPC tasks, so that subsequent
-+ * code can actually succeed and won't leave tasks that need handling.
-+ */
-+static void unionfs_umount_begin(struct super_block *sb)
-+{
-+ struct super_block *lower_sb;
-+ int bindex, bstart, bend;
-+
-+ unionfs_read_lock(sb, UNIONFS_SMUTEX_CHILD);
-+
-+ bstart = sbstart(sb);
-+ bend = sbend(sb);
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_sb = unionfs_lower_super_idx(sb, bindex);
-+
-+ if (lower_sb && lower_sb->s_op &&
-+ lower_sb->s_op->umount_begin)
-+ lower_sb->s_op->umount_begin(lower_sb);
-+ }
-+
-+ unionfs_read_unlock(sb);
-+}
-+
-+static int unionfs_show_options(struct seq_file *m, struct vfsmount *mnt)
-+{
-+ struct super_block *sb = mnt->mnt_sb;
-+ int ret = 0;
-+ char *tmp_page;
-+ char *path;
-+ int bindex, bstart, bend;
-+ int perms;
-+
-+ unionfs_read_lock(sb, UNIONFS_SMUTEX_CHILD);
-+
-+ unionfs_lock_dentry(sb->s_root, UNIONFS_DMUTEX_CHILD);
-+
-+ tmp_page = (char *) __get_free_page(GFP_KERNEL);
-+ if (unlikely(!tmp_page)) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ bstart = sbstart(sb);
-+ bend = sbend(sb);
-+
-+ seq_printf(m, ",dirs=");
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ struct path p;
-+ p.dentry = unionfs_lower_dentry_idx(sb->s_root, bindex);
-+ p.mnt = unionfs_lower_mnt_idx(sb->s_root, bindex);
-+ path = d_path(&p, tmp_page, PAGE_SIZE);
-+ if (IS_ERR(path)) {
-+ ret = PTR_ERR(path);
-+ goto out;
-+ }
-+
-+ perms = branchperms(sb, bindex);
-+
-+ seq_printf(m, "%s=%s", path,
-+ perms & MAY_WRITE ? "rw" : "ro");
-+ if (bindex != bend)
-+ seq_printf(m, ":");
-+ }
-+
-+out:
-+ free_page((unsigned long) tmp_page);
-+
-+ unionfs_unlock_dentry(sb->s_root);
-+
-+ unionfs_read_unlock(sb);
-+
-+ return ret;
-+}
-+
-+struct super_operations unionfs_sops = {
-+ .put_super = unionfs_put_super,
-+ .statfs = unionfs_statfs,
-+ .remount_fs = unionfs_remount_fs,
-+ .evict_inode = unionfs_evict_inode,
-+ .umount_begin = unionfs_umount_begin,
-+ .show_options = unionfs_show_options,
-+ .write_inode = unionfs_write_inode,
-+ .alloc_inode = unionfs_alloc_inode,
-+ .destroy_inode = unionfs_destroy_inode,
-+};
-diff --git a/fs/unionfs/union.h b/fs/unionfs/union.h
-new file mode 100644
-index 0000000..d49c834
---- /dev/null
-+++ b/fs/unionfs/union.h
-@@ -0,0 +1,669 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#ifndef _UNION_H_
-+#define _UNION_H_
-+
-+#include <linux/dcache.h>
-+#include <linux/file.h>
-+#include <linux/list.h>
-+#include <linux/fs.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/mount.h>
-+#include <linux/namei.h>
-+#include <linux/page-flags.h>
-+#include <linux/pagemap.h>
-+#include <linux/poll.h>
-+#include <linux/security.h>
-+#include <linux/seq_file.h>
-+#include <linux/slab.h>
-+#include <linux/spinlock.h>
-+#include <linux/smp_lock.h>
-+#include <linux/statfs.h>
-+#include <linux/string.h>
-+#include <linux/vmalloc.h>
-+#include <linux/writeback.h>
-+#include <linux/buffer_head.h>
-+#include <linux/xattr.h>
-+#include <linux/fs_stack.h>
-+#include <linux/magic.h>
-+#include <linux/log2.h>
-+#include <linux/poison.h>
-+#include <linux/mman.h>
-+#include <linux/backing-dev.h>
-+#include <linux/splice.h>
-+
-+#include <asm/system.h>
-+
-+#include <linux/union_fs.h>
-+
-+/* the file system name */
-+#define UNIONFS_NAME "unionfs"
-+
-+/* unionfs root inode number */
-+#define UNIONFS_ROOT_INO 1
-+
-+/* number of times we try to get a unique temporary file name */
-+#define GET_TMPNAM_MAX_RETRY 5
-+
-+/* maximum number of branches we support, to avoid memory blowup */
-+#define UNIONFS_MAX_BRANCHES 128
-+
-+/* minimum time (seconds) required for time-based cache-coherency */
-+#define UNIONFS_MIN_CC_TIME 3
-+
-+/* Operations vectors defined in specific files. */
-+extern struct file_operations unionfs_main_fops;
-+extern struct file_operations unionfs_dir_fops;
-+extern struct inode_operations unionfs_main_iops;
-+extern struct inode_operations unionfs_dir_iops;
-+extern struct inode_operations unionfs_symlink_iops;
-+extern struct super_operations unionfs_sops;
-+extern struct dentry_operations unionfs_dops;
-+extern struct address_space_operations unionfs_aops, unionfs_dummy_aops;
-+extern struct vm_operations_struct unionfs_vm_ops;
-+
-+/* How long should an entry be allowed to persist */
-+#define RDCACHE_JIFFIES (5*HZ)
-+
-+/* compatibility with Real-Time patches */
-+#ifdef CONFIG_PREEMPT_RT
-+# define unionfs_rw_semaphore compat_rw_semaphore
-+#else /* not CONFIG_PREEMPT_RT */
-+# define unionfs_rw_semaphore rw_semaphore
-+#endif /* not CONFIG_PREEMPT_RT */
-+
-+/* file private data. */
-+struct unionfs_file_info {
-+ int bstart;
-+ int bend;
-+ atomic_t generation;
-+
-+ struct unionfs_dir_state *rdstate;
-+ struct file **lower_files;
-+ int *saved_branch_ids; /* IDs of branches when file was opened */
-+ const struct vm_operations_struct *lower_vm_ops;
-+ bool wrote_to_file; /* for delayed copyup */
-+};
-+
-+/* unionfs inode data in memory */
-+struct unionfs_inode_info {
-+ int bstart;
-+ int bend;
-+ atomic_t generation;
-+ /* Stuff for readdir over NFS. */
-+ spinlock_t rdlock;
-+ struct list_head readdircache;
-+ int rdcount;
-+ int hashsize;
-+ int cookie;
-+
-+ /* The lower inodes */
-+ struct inode **lower_inodes;
-+
-+ struct inode vfs_inode;
-+};
-+
-+/* unionfs dentry data in memory */
-+struct unionfs_dentry_info {
-+ /*
-+ * The semaphore is used to lock the dentry as soon as we get into a
-+ * unionfs function from the VFS. Our lock ordering is that children
-+ * go before their parents.
-+ */
-+ struct mutex lock;
-+ int bstart;
-+ int bend;
-+ int bopaque;
-+ int bcount;
-+ atomic_t generation;
-+ struct path *lower_paths;
-+};
-+
-+/* These are the pointers to our various objects. */
-+struct unionfs_data {
-+ struct super_block *sb; /* lower super_block */
-+ atomic_t open_files; /* number of open files on branch */
-+ int branchperms;
-+ int branch_id; /* unique branch ID at re/mount time */
-+};
-+
-+/* unionfs super-block data in memory */
-+struct unionfs_sb_info {
-+ int bend;
-+
-+ atomic_t generation;
-+
-+ /*
-+ * This rwsem is used to make sure that a branch management
-+ * operation...
-+ * 1) will not begin before all currently in-flight operations
-+ * complete.
-+ * 2) any new operations do not execute until the currently
-+ * running branch management operation completes.
-+ *
-+ * The write_lock_owner records the PID of the task which grabbed
-+ * the rw_sem for writing. If the same task also tries to grab the
-+ * read lock, we allow it. This prevents a self-deadlock when
-+ * branch-management is used on a pivot_root'ed union, because we
-+ * have to ->lookup paths which belong to the same union.
-+ */
-+ struct unionfs_rw_semaphore rwsem;
-+ pid_t write_lock_owner; /* PID of rw_sem owner (write lock) */
-+ int high_branch_id; /* last unique branch ID given */
-+ char *dev_name; /* to identify different unions in pr_debug */
-+ struct unionfs_data *data;
-+};
-+
-+/*
-+ * structure for making the linked list of entries by readdir on left branch
-+ * to compare with entries on right branch
-+ */
-+struct filldir_node {
-+ struct list_head file_list; /* list for directory entries */
-+ char *name; /* name entry */
-+ int hash; /* name hash */
-+ int namelen; /* name len since name is not 0 terminated */
-+
-+ /*
-+ * we can check for duplicate whiteouts and files in the same branch
-+ * in order to return -EIO.
-+ */
-+ int bindex;
-+
-+ /* is this a whiteout entry? */
-+ int whiteout;
-+
-+ /* Inline name, so we don't need to separately kmalloc small ones */
-+ char iname[DNAME_INLINE_LEN_MIN];
-+};
-+
-+/* Directory hash table. */
-+struct unionfs_dir_state {
-+ unsigned int cookie; /* the cookie, based off of rdversion */
-+ unsigned int offset; /* The entry we have returned. */
-+ int bindex;
-+ loff_t dirpos; /* offset within the lower level directory */
-+ int size; /* How big is the hash table? */
-+ int hashentries; /* How many entries have been inserted? */
-+ unsigned long access;
-+
-+ /* This cache list is used when the inode keeps us around. */
-+ struct list_head cache;
-+ struct list_head list[0];
-+};
-+
-+/* externs needed for fanout.h or sioq.h */
-+extern int unionfs_get_nlinks(const struct inode *inode);
-+extern void unionfs_copy_attr_times(struct inode *upper);
-+extern void unionfs_copy_attr_all(struct inode *dest, const struct inode *src);
-+
-+/* include miscellaneous macros */
-+#include "fanout.h"
-+#include "sioq.h"
-+
-+/* externs for cache creation/deletion routines */
-+extern void unionfs_destroy_filldir_cache(void);
-+extern int unionfs_init_filldir_cache(void);
-+extern int unionfs_init_inode_cache(void);
-+extern void unionfs_destroy_inode_cache(void);
-+extern int unionfs_init_dentry_cache(void);
-+extern void unionfs_destroy_dentry_cache(void);
-+
-+/* Initialize and free readdir-specific state. */
-+extern int init_rdstate(struct file *file);
-+extern struct unionfs_dir_state *alloc_rdstate(struct inode *inode,
-+ int bindex);
-+extern struct unionfs_dir_state *find_rdstate(struct inode *inode,
-+ loff_t fpos);
-+extern void free_rdstate(struct unionfs_dir_state *state);
-+extern int add_filldir_node(struct unionfs_dir_state *rdstate,
-+ const char *name, int namelen, int bindex,
-+ int whiteout);
-+extern struct filldir_node *find_filldir_node(struct unionfs_dir_state *rdstate,
-+ const char *name, int namelen,
-+ int is_whiteout);
-+
-+extern struct dentry **alloc_new_dentries(int objs);
-+extern struct unionfs_data *alloc_new_data(int objs);
-+
-+/* We can only use 32-bits of offset for rdstate --- blech! */
-+#define DIREOF (0xfffff)
-+#define RDOFFBITS 20 /* This is the number of bits in DIREOF. */
-+#define MAXRDCOOKIE (0xfff)
-+/* Turn an rdstate into an offset. */
-+static inline off_t rdstate2offset(struct unionfs_dir_state *buf)
-+{
-+ off_t tmp;
-+
-+ tmp = ((buf->cookie & MAXRDCOOKIE) << RDOFFBITS)
-+ | (buf->offset & DIREOF);
-+ return tmp;
-+}
-+
-+/* Macros for locking a super_block. */
-+enum unionfs_super_lock_class {
-+ UNIONFS_SMUTEX_NORMAL,
-+ UNIONFS_SMUTEX_PARENT, /* when locking on behalf of file */
-+ UNIONFS_SMUTEX_CHILD, /* when locking on behalf of dentry */
-+};
-+static inline void unionfs_read_lock(struct super_block *sb, int subclass)
-+{
-+ if (UNIONFS_SB(sb)->write_lock_owner &&
-+ UNIONFS_SB(sb)->write_lock_owner == current->pid)
-+ return;
-+ down_read_nested(&UNIONFS_SB(sb)->rwsem, subclass);
-+}
-+static inline void unionfs_read_unlock(struct super_block *sb)
-+{
-+ if (UNIONFS_SB(sb)->write_lock_owner &&
-+ UNIONFS_SB(sb)->write_lock_owner == current->pid)
-+ return;
-+ up_read(&UNIONFS_SB(sb)->rwsem);
-+}
-+static inline void unionfs_write_lock(struct super_block *sb)
-+{
-+ down_write(&UNIONFS_SB(sb)->rwsem);
-+ UNIONFS_SB(sb)->write_lock_owner = current->pid;
-+}
-+static inline void unionfs_write_unlock(struct super_block *sb)
-+{
-+ up_write(&UNIONFS_SB(sb)->rwsem);
-+ UNIONFS_SB(sb)->write_lock_owner = 0;
-+}
-+
-+static inline void unionfs_double_lock_dentry(struct dentry *d1,
-+ struct dentry *d2)
-+{
-+ BUG_ON(d1 == d2);
-+ if (d1 < d2) {
-+ unionfs_lock_dentry(d1, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(d2, UNIONFS_DMUTEX_CHILD);
-+ } else {
-+ unionfs_lock_dentry(d2, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(d1, UNIONFS_DMUTEX_CHILD);
-+ }
-+}
-+
-+static inline void unionfs_double_unlock_dentry(struct dentry *d1,
-+ struct dentry *d2)
-+{
-+ BUG_ON(d1 == d2);
-+ if (d1 < d2) { /* unlock in reverse order than double_lock_dentry */
-+ unionfs_unlock_dentry(d1);
-+ unionfs_unlock_dentry(d2);
-+ } else {
-+ unionfs_unlock_dentry(d2);
-+ unionfs_unlock_dentry(d1);
-+ }
-+}
-+
-+static inline void unionfs_double_lock_parents(struct dentry *p1,
-+ struct dentry *p2)
-+{
-+ if (p1 == p2) {
-+ unionfs_lock_dentry(p1, UNIONFS_DMUTEX_REVAL_PARENT);
-+ return;
-+ }
-+ if (p1 < p2) {
-+ unionfs_lock_dentry(p1, UNIONFS_DMUTEX_REVAL_PARENT);
-+ unionfs_lock_dentry(p2, UNIONFS_DMUTEX_REVAL_CHILD);
-+ } else {
-+ unionfs_lock_dentry(p2, UNIONFS_DMUTEX_REVAL_PARENT);
-+ unionfs_lock_dentry(p1, UNIONFS_DMUTEX_REVAL_CHILD);
-+ }
-+}
-+
-+static inline void unionfs_double_unlock_parents(struct dentry *p1,
-+ struct dentry *p2)
-+{
-+ if (p1 == p2) {
-+ unionfs_unlock_dentry(p1);
-+ return;
-+ }
-+ if (p1 < p2) { /* unlock in reverse order of double_lock_parents */
-+ unionfs_unlock_dentry(p1);
-+ unionfs_unlock_dentry(p2);
-+ } else {
-+ unionfs_unlock_dentry(p2);
-+ unionfs_unlock_dentry(p1);
-+ }
-+}
-+
-+extern int new_dentry_private_data(struct dentry *dentry, int subclass);
-+extern int realloc_dentry_private_data(struct dentry *dentry);
-+extern void free_dentry_private_data(struct dentry *dentry);
-+extern void update_bstart(struct dentry *dentry);
-+extern int init_lower_nd(struct nameidata *nd, unsigned int flags);
-+extern void release_lower_nd(struct nameidata *nd, int err);
-+
-+/*
-+ * EXTERNALS:
-+ */
-+
-+/* replicates the directory structure up to given dentry in given branch */
-+extern struct dentry *create_parents(struct inode *dir, struct dentry *dentry,
-+ const char *name, int bindex);
-+
-+/* partial lookup */
-+extern int unionfs_partial_lookup(struct dentry *dentry,
-+ struct dentry *parent);
-+extern struct dentry *unionfs_lookup_full(struct dentry *dentry,
-+ struct dentry *parent,
-+ int lookupmode);
-+
-+/* copies a file from dbstart to newbindex branch */
-+extern int copyup_file(struct inode *dir, struct file *file, int bstart,
-+ int newbindex, loff_t size);
-+extern int copyup_named_file(struct inode *dir, struct file *file,
-+ char *name, int bstart, int new_bindex,
-+ loff_t len);
-+/* copies a dentry from dbstart to newbindex branch */
-+extern int copyup_dentry(struct inode *dir, struct dentry *dentry,
-+ int bstart, int new_bindex, const char *name,
-+ int namelen, struct file **copyup_file, loff_t len);
-+/* helper functions for post-copyup actions */
-+extern void unionfs_postcopyup_setmnt(struct dentry *dentry);
-+extern void unionfs_postcopyup_release(struct dentry *dentry);
-+
-+/* Is this directory empty: 0 if it is empty, -ENOTEMPTY if not. */
-+extern int check_empty(struct dentry *dentry, struct dentry *parent,
-+ struct unionfs_dir_state **namelist);
-+/* whiteout and opaque directory helpers */
-+extern char *alloc_whname(const char *name, int len);
-+extern bool is_whiteout_name(char **namep, int *namelenp);
-+extern bool is_validname(const char *name);
-+extern struct dentry *lookup_whiteout(const char *name,
-+ struct dentry *lower_parent);
-+extern struct dentry *find_first_whiteout(struct dentry *dentry);
-+extern int unlink_whiteout(struct dentry *wh_dentry);
-+extern int check_unlink_whiteout(struct dentry *dentry,
-+ struct dentry *lower_dentry, int bindex);
-+extern int create_whiteout(struct dentry *dentry, int start);
-+extern int delete_whiteouts(struct dentry *dentry, int bindex,
-+ struct unionfs_dir_state *namelist);
-+extern int is_opaque_dir(struct dentry *dentry, int bindex);
-+extern int make_dir_opaque(struct dentry *dir, int bindex);
-+extern void unionfs_set_max_namelen(long *namelen);
-+
-+extern void unionfs_reinterpose(struct dentry *this_dentry);
-+extern struct super_block *unionfs_duplicate_super(struct super_block *sb);
-+
-+/* Locking functions. */
-+extern int unionfs_setlk(struct file *file, int cmd, struct file_lock *fl);
-+extern int unionfs_getlk(struct file *file, struct file_lock *fl);
-+
-+/* Common file operations. */
-+extern int unionfs_file_revalidate(struct file *file, struct dentry *parent,
-+ bool willwrite);
-+extern int unionfs_open(struct inode *inode, struct file *file);
-+extern int unionfs_file_release(struct inode *inode, struct file *file);
-+extern int unionfs_flush(struct file *file, fl_owner_t id);
-+extern long unionfs_ioctl(struct file *file, unsigned int cmd,
-+ unsigned long arg);
-+extern int unionfs_fsync(struct file *file, int datasync);
-+extern int unionfs_fasync(int fd, struct file *file, int flag);
-+
-+/* Inode operations */
-+extern struct inode *unionfs_iget(struct super_block *sb, unsigned long ino);
-+extern int unionfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-+ struct inode *new_dir, struct dentry *new_dentry);
-+extern int unionfs_unlink(struct inode *dir, struct dentry *dentry);
-+extern int unionfs_rmdir(struct inode *dir, struct dentry *dentry);
-+
-+extern bool __unionfs_d_revalidate(struct dentry *dentry,
-+ struct dentry *parent, bool willwrite);
-+extern bool is_negative_lower(const struct dentry *dentry);
-+extern bool is_newer_lower(const struct dentry *dentry);
-+extern void purge_sb_data(struct super_block *sb);
-+
-+/* The values for unionfs_interpose's flag. */
-+#define INTERPOSE_DEFAULT 0
-+#define INTERPOSE_LOOKUP 1
-+#define INTERPOSE_REVAL 2
-+#define INTERPOSE_REVAL_NEG 3
-+#define INTERPOSE_PARTIAL 4
-+
-+extern struct dentry *unionfs_interpose(struct dentry *this_dentry,
-+ struct super_block *sb, int flag);
-+
-+#ifdef CONFIG_UNION_FS_XATTR
-+/* Extended attribute functions. */
-+extern void *unionfs_xattr_alloc(size_t size, size_t limit);
-+static inline void unionfs_xattr_kfree(const void *p)
-+{
-+ kfree(p);
-+}
-+extern ssize_t unionfs_getxattr(struct dentry *dentry, const char *name,
-+ void *value, size_t size);
-+extern int unionfs_removexattr(struct dentry *dentry, const char *name);
-+extern ssize_t unionfs_listxattr(struct dentry *dentry, char *list,
-+ size_t size);
-+extern int unionfs_setxattr(struct dentry *dentry, const char *name,
-+ const void *value, size_t size, int flags);
-+#endif /* CONFIG_UNION_FS_XATTR */
-+
-+/* The root directory is unhashed, but isn't deleted. */
-+static inline int d_deleted(struct dentry *d)
-+{
-+ return d_unhashed(d) && (d != d->d_sb->s_root);
-+}
-+
-+/* unionfs_permission, check if we should bypass error to facilitate copyup */
-+#define IS_COPYUP_ERR(err) ((err) == -EROFS)
-+
-+/* unionfs_open, check if we need to copyup the file */
-+#define OPEN_WRITE_FLAGS (O_WRONLY | O_RDWR | O_APPEND)
-+#define IS_WRITE_FLAG(flag) ((flag) & OPEN_WRITE_FLAGS)
-+
-+static inline int branchperms(const struct super_block *sb, int index)
-+{
-+ BUG_ON(index < 0);
-+ return UNIONFS_SB(sb)->data[index].branchperms;
-+}
-+
-+static inline int set_branchperms(struct super_block *sb, int index, int perms)
-+{
-+ BUG_ON(index < 0);
-+ UNIONFS_SB(sb)->data[index].branchperms = perms;
-+ return perms;
-+}
-+
-+/* check if readonly lower inode, but possibly unlinked (no inode->i_sb) */
-+static inline int __is_rdonly(const struct inode *inode)
-+{
-+ /* if unlinked, can't be readonly (?) */
-+ if (!inode->i_sb)
-+ return 0;
-+ return IS_RDONLY(inode);
-+
-+}
-+/* Is this file on a read-only branch? */
-+static inline int is_robranch_super(const struct super_block *sb, int index)
-+{
-+ int ret;
-+
-+ ret = (!(branchperms(sb, index) & MAY_WRITE)) ? -EROFS : 0;
-+ return ret;
-+}
-+
-+/* Is this file on a read-only branch? */
-+static inline int is_robranch_idx(const struct dentry *dentry, int index)
-+{
-+ struct super_block *lower_sb;
-+
-+ BUG_ON(index < 0);
-+
-+ if (!(branchperms(dentry->d_sb, index) & MAY_WRITE))
-+ return -EROFS;
-+
-+ lower_sb = unionfs_lower_super_idx(dentry->d_sb, index);
-+ BUG_ON(lower_sb == NULL);
-+ /*
-+ * test sb flags directly, not IS_RDONLY(lower_inode) because the
-+ * lower_dentry could be a negative.
-+ */
-+ if (lower_sb->s_flags & MS_RDONLY)
-+ return -EROFS;
-+
-+ return 0;
-+}
-+
-+static inline int is_robranch(const struct dentry *dentry)
-+{
-+ int index;
-+
-+ index = UNIONFS_D(dentry)->bstart;
-+ BUG_ON(index < 0);
-+
-+ return is_robranch_idx(dentry, index);
-+}
-+
-+/*
-+ * EXTERNALS:
-+ */
-+extern int check_branch(struct nameidata *nd);
-+extern int parse_branch_mode(const char *name, int *perms);
-+
-+/* locking helpers */
-+static inline struct dentry *lock_parent(struct dentry *dentry)
-+{
-+ struct dentry *dir = dget_parent(dentry);
-+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
-+ return dir;
-+}
-+static inline struct dentry *lock_parent_wh(struct dentry *dentry)
-+{
-+ struct dentry *dir = dget_parent(dentry);
-+
-+ mutex_lock_nested(&dir->d_inode->i_mutex, UNIONFS_DMUTEX_WHITEOUT);
-+ return dir;
-+}
-+
-+static inline void unlock_dir(struct dentry *dir)
-+{
-+ mutex_unlock(&dir->d_inode->i_mutex);
-+ dput(dir);
-+}
-+
-+/* lock base inode mutex before calling lookup_one_len */
-+static inline struct dentry *lookup_lck_len(const char *name,
-+ struct dentry *base, int len)
-+{
-+ struct dentry *d;
-+ mutex_lock(&base->d_inode->i_mutex);
-+ d = lookup_one_len(name, base, len);
-+ mutex_unlock(&base->d_inode->i_mutex);
-+ return d;
-+}
-+
-+static inline struct vfsmount *unionfs_mntget(struct dentry *dentry,
-+ int bindex)
-+{
-+ struct vfsmount *mnt;
-+
-+ BUG_ON(!dentry || bindex < 0);
-+
-+ mnt = mntget(unionfs_lower_mnt_idx(dentry, bindex));
-+#ifdef CONFIG_UNION_FS_DEBUG
-+ if (!mnt)
-+ pr_debug("unionfs: mntget: mnt=%p bindex=%d\n",
-+ mnt, bindex);
-+#endif /* CONFIG_UNION_FS_DEBUG */
-+
-+ return mnt;
-+}
-+
-+static inline void unionfs_mntput(struct dentry *dentry, int bindex)
-+{
-+ struct vfsmount *mnt;
-+
-+ if (!dentry && bindex < 0)
-+ return;
-+ BUG_ON(!dentry || bindex < 0);
-+
-+ mnt = unionfs_lower_mnt_idx(dentry, bindex);
-+#ifdef CONFIG_UNION_FS_DEBUG
-+ /*
-+ * Directories can have NULL lower objects in between start/end, but
-+ * NOT if at the start/end range. We cannot verify that this dentry
-+ * is a type=DIR, because it may already be a negative dentry. But
-+ * if dbstart is greater than dbend, we know that this couldn't have
-+ * been a regular file: it had to have been a directory.
-+ */
-+ if (!mnt && !(bindex > dbstart(dentry) && bindex < dbend(dentry)))
-+ pr_debug("unionfs: mntput: mnt=%p bindex=%d\n", mnt, bindex);
-+#endif /* CONFIG_UNION_FS_DEBUG */
-+ mntput(mnt);
-+}
-+
-+#ifdef CONFIG_UNION_FS_DEBUG
-+
-+/* useful for tracking code reachability */
-+#define UDBG pr_debug("DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__)
-+
-+#define unionfs_check_inode(i) __unionfs_check_inode((i), \
-+ __FILE__, __func__, __LINE__)
-+#define unionfs_check_dentry(d) __unionfs_check_dentry((d), \
-+ __FILE__, __func__, __LINE__)
-+#define unionfs_check_file(f) __unionfs_check_file((f), \
-+ __FILE__, __func__, __LINE__)
-+#define unionfs_check_nd(n) __unionfs_check_nd((n), \
-+ __FILE__, __func__, __LINE__)
-+#define show_branch_counts(sb) __show_branch_counts((sb), \
-+ __FILE__, __func__, __LINE__)
-+#define show_inode_times(i) __show_inode_times((i), \
-+ __FILE__, __func__, __LINE__)
-+#define show_dinode_times(d) __show_dinode_times((d), \
-+ __FILE__, __func__, __LINE__)
-+#define show_inode_counts(i) __show_inode_counts((i), \
-+ __FILE__, __func__, __LINE__)
-+
-+extern void __unionfs_check_inode(const struct inode *inode, const char *fname,
-+ const char *fxn, int line);
-+extern void __unionfs_check_dentry(const struct dentry *dentry,
-+ const char *fname, const char *fxn,
-+ int line);
-+extern void __unionfs_check_file(const struct file *file,
-+ const char *fname, const char *fxn, int line);
-+extern void __unionfs_check_nd(const struct nameidata *nd,
-+ const char *fname, const char *fxn, int line);
-+extern void __show_branch_counts(const struct super_block *sb,
-+ const char *file, const char *fxn, int line);
-+extern void __show_inode_times(const struct inode *inode,
-+ const char *file, const char *fxn, int line);
-+extern void __show_dinode_times(const struct dentry *dentry,
-+ const char *file, const char *fxn, int line);
-+extern void __show_inode_counts(const struct inode *inode,
-+ const char *file, const char *fxn, int line);
-+
-+#else /* not CONFIG_UNION_FS_DEBUG */
-+
-+/* we leave useful hooks for these check functions throughout the code */
-+#define unionfs_check_inode(i) do { } while (0)
-+#define unionfs_check_dentry(d) do { } while (0)
-+#define unionfs_check_file(f) do { } while (0)
-+#define unionfs_check_nd(n) do { } while (0)
-+#define show_branch_counts(sb) do { } while (0)
-+#define show_inode_times(i) do { } while (0)
-+#define show_dinode_times(d) do { } while (0)
-+#define show_inode_counts(i) do { } while (0)
-+
-+#endif /* not CONFIG_UNION_FS_DEBUG */
-+
-+#endif /* not _UNION_H_ */
-diff --git a/fs/unionfs/unlink.c b/fs/unionfs/unlink.c
-new file mode 100644
-index 0000000..542c513
---- /dev/null
-+++ b/fs/unionfs/unlink.c
-@@ -0,0 +1,278 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * Helper function for Unionfs's unlink operation.
-+ *
-+ * The main goal of this function is to optimize the unlinking of non-dir
-+ * objects in unionfs by deleting all possible lower inode objects from the
-+ * underlying branches having same dentry name as the non-dir dentry on
-+ * which this unlink operation is called. This way we delete as many lower
-+ * inodes as possible, and save space. Whiteouts need to be created in
-+ * branch0 only if unlinking fails on any of the lower branch other than
-+ * branch0, or if a lower branch is marked read-only.
-+ *
-+ * Also, while unlinking a file, if we encounter any dir type entry in any
-+ * intermediate branch, then we remove the directory by calling vfs_rmdir.
-+ * The following special cases are also handled:
-+
-+ * (1) If an error occurs in branch0 during vfs_unlink, then we return
-+ * appropriate error.
-+ *
-+ * (2) If we get an error during unlink in any of other lower branch other
-+ * than branch0, then we create a whiteout in branch0.
-+ *
-+ * (3) If a whiteout already exists in any intermediate branch, we delete
-+ * all possible inodes only up to that branch (this is an "opaqueness"
-+ * as as per Documentation/filesystems/unionfs/concepts.txt).
-+ *
-+ */
-+static int unionfs_unlink_whiteout(struct inode *dir, struct dentry *dentry,
-+ struct dentry *parent)
-+{
-+ struct dentry *lower_dentry;
-+ struct dentry *lower_dir_dentry;
-+ int bindex;
-+ int err = 0;
-+
-+ err = unionfs_partial_lookup(dentry, parent);
-+ if (err)
-+ goto out;
-+
-+ /* trying to unlink all possible valid instances */
-+ for (bindex = dbstart(dentry); bindex <= dbend(dentry); bindex++) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ if (!lower_dentry || !lower_dentry->d_inode)
-+ continue;
-+
-+ lower_dir_dentry = lock_parent(lower_dentry);
-+
-+ /* avoid destroying the lower inode if the object is in use */
-+ dget(lower_dentry);
-+ err = is_robranch_super(dentry->d_sb, bindex);
-+ if (!err) {
-+ /* see Documentation/filesystems/unionfs/issues.txt */
-+ lockdep_off();
-+ if (!S_ISDIR(lower_dentry->d_inode->i_mode))
-+ err = vfs_unlink(lower_dir_dentry->d_inode,
-+ lower_dentry);
-+ else
-+ err = vfs_rmdir(lower_dir_dentry->d_inode,
-+ lower_dentry);
-+ lockdep_on();
-+ }
-+
-+ /* if lower object deletion succeeds, update inode's times */
-+ if (!err)
-+ unionfs_copy_attr_times(dentry->d_inode);
-+ dput(lower_dentry);
-+ fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
-+ unlock_dir(lower_dir_dentry);
-+
-+ if (err)
-+ break;
-+ }
-+
-+ /*
-+ * Create the whiteout in branch 0 (highest priority) only if (a)
-+ * there was an error in any intermediate branch other than branch 0
-+ * due to failure of vfs_unlink/vfs_rmdir or (b) a branch marked or
-+ * mounted read-only.
-+ */
-+ if (err) {
-+ if ((bindex == 0) ||
-+ ((bindex == dbstart(dentry)) &&
-+ (!IS_COPYUP_ERR(err))))
-+ goto out;
-+ else {
-+ if (!IS_COPYUP_ERR(err))
-+ pr_debug("unionfs: lower object deletion "
-+ "failed in branch:%d\n", bindex);
-+ err = create_whiteout(dentry, sbstart(dentry->d_sb));
-+ }
-+ }
-+
-+out:
-+ if (!err)
-+ inode_dec_link_count(dentry->d_inode);
-+
-+ /* We don't want to leave negative leftover dentries for revalidate. */
-+ if (!err && (dbopaque(dentry) != -1))
-+ update_bstart(dentry);
-+
-+ return err;
-+}
-+
-+int unionfs_unlink(struct inode *dir, struct dentry *dentry)
-+{
-+ int err = 0;
-+ struct inode *inode = dentry->d_inode;
-+ struct dentry *parent;
-+ int valid;
-+
-+ BUG_ON(S_ISDIR(inode->i_mode));
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+ unionfs_check_dentry(dentry);
-+
-+ err = unionfs_unlink_whiteout(dir, dentry, parent);
-+ /* call d_drop so the system "forgets" about us */
-+ if (!err) {
-+ unionfs_postcopyup_release(dentry);
-+ unionfs_postcopyup_setmnt(parent);
-+ if (inode->i_nlink == 0) /* drop lower inodes */
-+ iput_lowers_all(inode, false);
-+ d_drop(dentry);
-+ /*
-+ * if unlink/whiteout succeeded, parent dir mtime has
-+ * changed
-+ */
-+ unionfs_copy_attr_times(dir);
-+ }
-+
-+out:
-+ if (!err) {
-+ unionfs_check_dentry(dentry);
-+ unionfs_check_inode(dir);
-+ }
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+static int unionfs_rmdir_first(struct inode *dir, struct dentry *dentry,
-+ struct unionfs_dir_state *namelist)
-+{
-+ int err;
-+ struct dentry *lower_dentry;
-+ struct dentry *lower_dir_dentry = NULL;
-+
-+ /* Here we need to remove whiteout entries. */
-+ err = delete_whiteouts(dentry, dbstart(dentry), namelist);
-+ if (err)
-+ goto out;
-+
-+ lower_dentry = unionfs_lower_dentry(dentry);
-+
-+ lower_dir_dentry = lock_parent(lower_dentry);
-+
-+ /* avoid destroying the lower inode if the file is in use */
-+ dget(lower_dentry);
-+ err = is_robranch(dentry);
-+ if (!err)
-+ err = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
-+ dput(lower_dentry);
-+
-+ fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
-+ /* propagate number of hard-links */
-+ dentry->d_inode->i_nlink = unionfs_get_nlinks(dentry->d_inode);
-+
-+out:
-+ if (lower_dir_dentry)
-+ unlock_dir(lower_dir_dentry);
-+ return err;
-+}
-+
-+int unionfs_rmdir(struct inode *dir, struct dentry *dentry)
-+{
-+ int err = 0;
-+ struct unionfs_dir_state *namelist = NULL;
-+ struct dentry *parent;
-+ int dstart, dend;
-+ bool valid;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+ unionfs_check_dentry(dentry);
-+
-+ /* check if this unionfs directory is empty or not */
-+ err = check_empty(dentry, parent, &namelist);
-+ if (err)
-+ goto out;
-+
-+ err = unionfs_rmdir_first(dir, dentry, namelist);
-+ dstart = dbstart(dentry);
-+ dend = dbend(dentry);
-+ /*
-+ * We create a whiteout for the directory if there was an error to
-+ * rmdir the first directory entry in the union. Otherwise, we
-+ * create a whiteout only if there is no chance that a lower
-+ * priority branch might also have the same named directory. IOW,
-+ * if there is not another same-named directory at a lower priority
-+ * branch, then we don't need to create a whiteout for it.
-+ */
-+ if (!err) {
-+ if (dstart < dend)
-+ err = create_whiteout(dentry, dstart);
-+ } else {
-+ int new_err;
-+
-+ if (dstart == 0)
-+ goto out;
-+
-+ /* exit if the error returned was NOT -EROFS */
-+ if (!IS_COPYUP_ERR(err))
-+ goto out;
-+
-+ new_err = create_whiteout(dentry, dstart - 1);
-+ if (new_err != -EEXIST)
-+ err = new_err;
-+ }
-+
-+out:
-+ /*
-+ * Drop references to lower dentry/inode so storage space for them
-+ * can be reclaimed. Then, call d_drop so the system "forgets"
-+ * about us.
-+ */
-+ if (!err) {
-+ iput_lowers_all(dentry->d_inode, false);
-+ dput(unionfs_lower_dentry_idx(dentry, dstart));
-+ unionfs_set_lower_dentry_idx(dentry, dstart, NULL);
-+ d_drop(dentry);
-+ /* update our lower vfsmnts, in case a copyup took place */
-+ unionfs_postcopyup_setmnt(dentry);
-+ unionfs_check_dentry(dentry);
-+ unionfs_check_inode(dir);
-+ }
-+
-+ if (namelist)
-+ free_rdstate(namelist);
-+
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-diff --git a/fs/unionfs/whiteout.c b/fs/unionfs/whiteout.c
-new file mode 100644
-index 0000000..405073a
---- /dev/null
-+++ b/fs/unionfs/whiteout.c
-@@ -0,0 +1,584 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/*
-+ * whiteout and opaque directory helpers
-+ */
-+
-+/* What do we use for whiteouts. */
-+#define UNIONFS_WHPFX ".wh."
-+#define UNIONFS_WHLEN 4
-+/*
-+ * If a directory contains this file, then it is opaque. We start with the
-+ * .wh. flag so that it is blocked by lookup.
-+ */
-+#define UNIONFS_DIR_OPAQUE_NAME "__dir_opaque"
-+#define UNIONFS_DIR_OPAQUE UNIONFS_WHPFX UNIONFS_DIR_OPAQUE_NAME
-+
-+/* construct whiteout filename */
-+char *alloc_whname(const char *name, int len)
-+{
-+ char *buf;
-+
-+ buf = kmalloc(len + UNIONFS_WHLEN + 1, GFP_KERNEL);
-+ if (unlikely(!buf))
-+ return ERR_PTR(-ENOMEM);
-+
-+ strcpy(buf, UNIONFS_WHPFX);
-+ strlcat(buf, name, len + UNIONFS_WHLEN + 1);
-+
-+ return buf;
-+}
-+
-+/*
-+ * XXX: this can be inline or CPP macro, but is here to keep all whiteout
-+ * code in one place.
-+ */
-+void unionfs_set_max_namelen(long *namelen)
-+{
-+ *namelen -= UNIONFS_WHLEN;
-+}
-+
-+/* check if @namep is a whiteout, update @namep and @namelenp accordingly */
-+bool is_whiteout_name(char **namep, int *namelenp)
-+{
-+ if (*namelenp > UNIONFS_WHLEN &&
-+ !strncmp(*namep, UNIONFS_WHPFX, UNIONFS_WHLEN)) {
-+ *namep += UNIONFS_WHLEN;
-+ *namelenp -= UNIONFS_WHLEN;
-+ return true;
-+ }
-+ return false;
-+}
-+
-+/* is the filename valid == !(whiteout for a file or opaque dir marker) */
-+bool is_validname(const char *name)
-+{
-+ if (!strncmp(name, UNIONFS_WHPFX, UNIONFS_WHLEN))
-+ return false;
-+ if (!strncmp(name, UNIONFS_DIR_OPAQUE_NAME,
-+ sizeof(UNIONFS_DIR_OPAQUE_NAME) - 1))
-+ return false;
-+ return true;
-+}
-+
-+/*
-+ * Look for a whiteout @name in @lower_parent directory. If error, return
-+ * ERR_PTR. Caller must dput() the returned dentry if not an error.
-+ *
-+ * XXX: some callers can reuse the whname allocated buffer to avoid repeated
-+ * free then re-malloc calls. Need to provide a different API for those
-+ * callers.
-+ */
-+struct dentry *lookup_whiteout(const char *name, struct dentry *lower_parent)
-+{
-+ char *whname = NULL;
-+ int err = 0, namelen;
-+ struct dentry *wh_dentry = NULL;
-+
-+ namelen = strlen(name);
-+ whname = alloc_whname(name, namelen);
-+ if (unlikely(IS_ERR(whname))) {
-+ err = PTR_ERR(whname);
-+ goto out;
-+ }
-+
-+ /* check if whiteout exists in this branch: lookup .wh.foo */
-+ wh_dentry = lookup_lck_len(whname, lower_parent, strlen(whname));
-+ if (IS_ERR(wh_dentry)) {
-+ err = PTR_ERR(wh_dentry);
-+ goto out;
-+ }
-+
-+ /* check if negative dentry (ENOENT) */
-+ if (!wh_dentry->d_inode)
-+ goto out;
-+
-+ /* whiteout found: check if valid type */
-+ if (!S_ISREG(wh_dentry->d_inode->i_mode)) {
-+ printk(KERN_ERR "unionfs: invalid whiteout %s entry type %d\n",
-+ whname, wh_dentry->d_inode->i_mode);
-+ dput(wh_dentry);
-+ err = -EIO;
-+ goto out;
-+ }
-+
-+out:
-+ kfree(whname);
-+ if (err)
-+ wh_dentry = ERR_PTR(err);
-+ return wh_dentry;
-+}
-+
-+/* find and return first whiteout in parent directory, else ENOENT */
-+struct dentry *find_first_whiteout(struct dentry *dentry)
-+{
-+ int bindex, bstart, bend;
-+ struct dentry *parent, *lower_parent, *wh_dentry;
-+
-+ parent = dget_parent(dentry);
-+
-+ bstart = dbstart(parent);
-+ bend = dbend(parent);
-+ wh_dentry = ERR_PTR(-ENOENT);
-+
-+ for (bindex = bstart; bindex <= bend; bindex++) {
-+ lower_parent = unionfs_lower_dentry_idx(parent, bindex);
-+ if (!lower_parent)
-+ continue;
-+ wh_dentry = lookup_whiteout(dentry->d_name.name, lower_parent);
-+ if (IS_ERR(wh_dentry))
-+ continue;
-+ if (wh_dentry->d_inode)
-+ break;
-+ dput(wh_dentry);
-+ wh_dentry = ERR_PTR(-ENOENT);
-+ }
-+
-+ dput(parent);
-+
-+ return wh_dentry;
-+}
-+
-+/*
-+ * Unlink a whiteout dentry. Returns 0 or -errno. Caller must hold and
-+ * release dentry reference.
-+ */
-+int unlink_whiteout(struct dentry *wh_dentry)
-+{
-+ int err;
-+ struct dentry *lower_dir_dentry;
-+
-+ /* dget and lock parent dentry */
-+ lower_dir_dentry = lock_parent_wh(wh_dentry);
-+
-+ /* see Documentation/filesystems/unionfs/issues.txt */
-+ lockdep_off();
-+ err = vfs_unlink(lower_dir_dentry->d_inode, wh_dentry);
-+ lockdep_on();
-+ unlock_dir(lower_dir_dentry);
-+
-+ /*
-+ * Whiteouts are special files and should be deleted no matter what
-+ * (as if they never existed), in order to allow this create
-+ * operation to succeed. This is especially important in sticky
-+ * directories: a whiteout may have been created by one user, but
-+ * the newly created file may be created by another user.
-+ * Therefore, in order to maintain Unix semantics, if the vfs_unlink
-+ * above failed, then we have to try to directly unlink the
-+ * whiteout. Note: in the ODF version of unionfs, whiteout are
-+ * handled much more cleanly.
-+ */
-+ if (err == -EPERM) {
-+ struct inode *inode = lower_dir_dentry->d_inode;
-+ err = inode->i_op->unlink(inode, wh_dentry);
-+ }
-+ if (err)
-+ printk(KERN_ERR "unionfs: could not unlink whiteout %s, "
-+ "err = %d\n", wh_dentry->d_name.name, err);
-+
-+ return err;
-+
-+}
-+
-+/*
-+ * Helper function when creating new objects (create, symlink, mknod, etc.).
-+ * Checks to see if there's a whiteout in @lower_dentry's parent directory,
-+ * whose name is taken from @dentry. Then tries to remove that whiteout, if
-+ * found. If <dentry,bindex> is a branch marked readonly, return -EROFS.
-+ * If it finds both a regular file and a whiteout, return -EIO (this should
-+ * never happen).
-+ *
-+ * Return 0 if no whiteout was found. Return 1 if one was found and
-+ * successfully removed. Therefore a value >= 0 tells the caller that
-+ * @lower_dentry belongs to a good branch to create the new object in).
-+ * Return -ERRNO if an error occurred during whiteout lookup or in trying to
-+ * unlink the whiteout.
-+ */
-+int check_unlink_whiteout(struct dentry *dentry, struct dentry *lower_dentry,
-+ int bindex)
-+{
-+ int err;
-+ struct dentry *wh_dentry = NULL;
-+ struct dentry *lower_dir_dentry = NULL;
-+
-+ /* look for whiteout dentry first */
-+ lower_dir_dentry = dget_parent(lower_dentry);
-+ wh_dentry = lookup_whiteout(dentry->d_name.name, lower_dir_dentry);
-+ dput(lower_dir_dentry);
-+ if (IS_ERR(wh_dentry)) {
-+ err = PTR_ERR(wh_dentry);
-+ goto out;
-+ }
-+
-+ if (!wh_dentry->d_inode) { /* no whiteout exists*/
-+ err = 0;
-+ goto out_dput;
-+ }
-+
-+ /* check if regular file and whiteout were both found */
-+ if (unlikely(lower_dentry->d_inode)) {
-+ err = -EIO;
-+ printk(KERN_ERR "unionfs: found both whiteout and regular "
-+ "file in directory %s (branch %d)\n",
-+ lower_dir_dentry->d_name.name, bindex);
-+ goto out_dput;
-+ }
-+
-+ /* check if branch is writeable */
-+ err = is_robranch_super(dentry->d_sb, bindex);
-+ if (err)
-+ goto out_dput;
-+
-+ /* .wh.foo has been found, so let's unlink it */
-+ err = unlink_whiteout(wh_dentry);
-+ if (!err)
-+ err = 1; /* a whiteout was found and successfully removed */
-+out_dput:
-+ dput(wh_dentry);
-+out:
-+ return err;
-+}
-+
-+/*
-+ * Pass an unionfs dentry and an index. It will try to create a whiteout
-+ * for the filename in dentry, and will try in branch 'index'. On error,
-+ * it will proceed to a branch to the left.
-+ */
-+int create_whiteout(struct dentry *dentry, int start)
-+{
-+ int bstart, bend, bindex;
-+ struct dentry *lower_dir_dentry;
-+ struct dentry *lower_dentry;
-+ struct dentry *lower_wh_dentry;
-+ struct nameidata nd;
-+ char *name = NULL;
-+ int err = -EINVAL;
-+
-+ verify_locked(dentry);
-+
-+ bstart = dbstart(dentry);
-+ bend = dbend(dentry);
-+
-+ /* create dentry's whiteout equivalent */
-+ name = alloc_whname(dentry->d_name.name, dentry->d_name.len);
-+ if (unlikely(IS_ERR(name))) {
-+ err = PTR_ERR(name);
-+ goto out;
-+ }
-+
-+ for (bindex = start; bindex >= 0; bindex--) {
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+
-+ if (!lower_dentry) {
-+ /*
-+ * if lower dentry is not present, create the
-+ * entire lower dentry directory structure and go
-+ * ahead. Since we want to just create whiteout, we
-+ * only want the parent dentry, and hence get rid of
-+ * this dentry.
-+ */
-+ lower_dentry = create_parents(dentry->d_inode,
-+ dentry,
-+ dentry->d_name.name,
-+ bindex);
-+ if (!lower_dentry || IS_ERR(lower_dentry)) {
-+ int ret = PTR_ERR(lower_dentry);
-+ if (!IS_COPYUP_ERR(ret))
-+ printk(KERN_ERR
-+ "unionfs: create_parents for "
-+ "whiteout failed: bindex=%d "
-+ "err=%d\n", bindex, ret);
-+ continue;
-+ }
-+ }
-+
-+ lower_wh_dentry =
-+ lookup_lck_len(name, lower_dentry->d_parent,
-+ dentry->d_name.len + UNIONFS_WHLEN);
-+ if (IS_ERR(lower_wh_dentry))
-+ continue;
-+
-+ /*
-+ * The whiteout already exists. This used to be impossible,
-+ * but now is possible because of opaqueness.
-+ */
-+ if (lower_wh_dentry->d_inode) {
-+ dput(lower_wh_dentry);
-+ err = 0;
-+ goto out;
-+ }
-+
-+ err = init_lower_nd(&nd, LOOKUP_CREATE);
-+ if (unlikely(err < 0))
-+ goto out;
-+ lower_dir_dentry = lock_parent_wh(lower_wh_dentry);
-+ err = is_robranch_super(dentry->d_sb, bindex);
-+ if (!err)
-+ err = vfs_create(lower_dir_dentry->d_inode,
-+ lower_wh_dentry,
-+ current_umask() & S_IRUGO,
-+ &nd);
-+ unlock_dir(lower_dir_dentry);
-+ dput(lower_wh_dentry);
-+ release_lower_nd(&nd, err);
-+
-+ if (!err || !IS_COPYUP_ERR(err))
-+ break;
-+ }
-+
-+ /* set dbopaque so that lookup will not proceed after this branch */
-+ if (!err)
-+ dbopaque(dentry) = bindex;
-+
-+out:
-+ kfree(name);
-+ return err;
-+}
-+
-+/*
-+ * Delete all of the whiteouts in a given directory for rmdir.
-+ *
-+ * lower directory inode should be locked
-+ */
-+static int do_delete_whiteouts(struct dentry *dentry, int bindex,
-+ struct unionfs_dir_state *namelist)
-+{
-+ int err = 0;
-+ struct dentry *lower_dir_dentry = NULL;
-+ struct dentry *lower_dentry;
-+ char *name = NULL, *p;
-+ struct inode *lower_dir;
-+ int i;
-+ struct list_head *pos;
-+ struct filldir_node *cursor;
-+
-+ /* Find out lower parent dentry */
-+ lower_dir_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ BUG_ON(!S_ISDIR(lower_dir_dentry->d_inode->i_mode));
-+ lower_dir = lower_dir_dentry->d_inode;
-+ BUG_ON(!S_ISDIR(lower_dir->i_mode));
-+
-+ err = -ENOMEM;
-+ name = __getname();
-+ if (unlikely(!name))
-+ goto out;
-+ strcpy(name, UNIONFS_WHPFX);
-+ p = name + UNIONFS_WHLEN;
-+
-+ err = 0;
-+ for (i = 0; !err && i < namelist->size; i++) {
-+ list_for_each(pos, &namelist->list[i]) {
-+ cursor =
-+ list_entry(pos, struct filldir_node,
-+ file_list);
-+ /* Only operate on whiteouts in this branch. */
-+ if (cursor->bindex != bindex)
-+ continue;
-+ if (!cursor->whiteout)
-+ continue;
-+
-+ strlcpy(p, cursor->name, PATH_MAX - UNIONFS_WHLEN);
-+ lower_dentry =
-+ lookup_lck_len(name, lower_dir_dentry,
-+ cursor->namelen +
-+ UNIONFS_WHLEN);
-+ if (IS_ERR(lower_dentry)) {
-+ err = PTR_ERR(lower_dentry);
-+ break;
-+ }
-+ if (lower_dentry->d_inode)
-+ err = vfs_unlink(lower_dir, lower_dentry);
-+ dput(lower_dentry);
-+ if (err)
-+ break;
-+ }
-+ }
-+
-+ __putname(name);
-+
-+ /* After all of the removals, we should copy the attributes once. */
-+ fsstack_copy_attr_times(dentry->d_inode, lower_dir_dentry->d_inode);
-+
-+out:
-+ return err;
-+}
-+
-+
-+void __delete_whiteouts(struct work_struct *work)
-+{
-+ struct sioq_args *args = container_of(work, struct sioq_args, work);
-+ struct deletewh_args *d = &args->deletewh;
-+
-+ args->err = do_delete_whiteouts(d->dentry, d->bindex, d->namelist);
-+ complete(&args->comp);
-+}
-+
-+/* delete whiteouts in a dir (for rmdir operation) using sioq if necessary */
-+int delete_whiteouts(struct dentry *dentry, int bindex,
-+ struct unionfs_dir_state *namelist)
-+{
-+ int err;
-+ struct super_block *sb;
-+ struct dentry *lower_dir_dentry;
-+ struct inode *lower_dir;
-+ struct sioq_args args;
-+
-+ sb = dentry->d_sb;
-+
-+ BUG_ON(!S_ISDIR(dentry->d_inode->i_mode));
-+ BUG_ON(bindex < dbstart(dentry));
-+ BUG_ON(bindex > dbend(dentry));
-+ err = is_robranch_super(sb, bindex);
-+ if (err)
-+ goto out;
-+
-+ lower_dir_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ BUG_ON(!S_ISDIR(lower_dir_dentry->d_inode->i_mode));
-+ lower_dir = lower_dir_dentry->d_inode;
-+ BUG_ON(!S_ISDIR(lower_dir->i_mode));
-+
-+ if (!inode_permission(lower_dir, MAY_WRITE | MAY_EXEC)) {
-+ err = do_delete_whiteouts(dentry, bindex, namelist);
-+ } else {
-+ args.deletewh.namelist = namelist;
-+ args.deletewh.dentry = dentry;
-+ args.deletewh.bindex = bindex;
-+ run_sioq(__delete_whiteouts, &args);
-+ err = args.err;
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+/****************************************************************************
-+ * Opaque directory helpers *
-+ ****************************************************************************/
-+
-+/*
-+ * is_opaque_dir: returns 0 if it is NOT an opaque dir, 1 if it is, and
-+ * -errno if an error occurred trying to figure this out.
-+ */
-+int is_opaque_dir(struct dentry *dentry, int bindex)
-+{
-+ int err = 0;
-+ struct dentry *lower_dentry;
-+ struct dentry *wh_lower_dentry;
-+ struct inode *lower_inode;
-+ struct sioq_args args;
-+
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ lower_inode = lower_dentry->d_inode;
-+
-+ BUG_ON(!S_ISDIR(lower_inode->i_mode));
-+
-+ mutex_lock(&lower_inode->i_mutex);
-+
-+ if (!inode_permission(lower_inode, MAY_EXEC)) {
-+ wh_lower_dentry =
-+ lookup_one_len(UNIONFS_DIR_OPAQUE, lower_dentry,
-+ sizeof(UNIONFS_DIR_OPAQUE) - 1);
-+ } else {
-+ args.is_opaque.dentry = lower_dentry;
-+ run_sioq(__is_opaque_dir, &args);
-+ wh_lower_dentry = args.ret;
-+ }
-+
-+ mutex_unlock(&lower_inode->i_mutex);
-+
-+ if (IS_ERR(wh_lower_dentry)) {
-+ err = PTR_ERR(wh_lower_dentry);
-+ goto out;
-+ }
-+
-+ /* This is an opaque dir iff wh_lower_dentry is positive */
-+ err = !!wh_lower_dentry->d_inode;
-+
-+ dput(wh_lower_dentry);
-+out:
-+ return err;
-+}
-+
-+void __is_opaque_dir(struct work_struct *work)
-+{
-+ struct sioq_args *args = container_of(work, struct sioq_args, work);
-+
-+ args->ret = lookup_one_len(UNIONFS_DIR_OPAQUE, args->is_opaque.dentry,
-+ sizeof(UNIONFS_DIR_OPAQUE) - 1);
-+ complete(&args->comp);
-+}
-+
-+int make_dir_opaque(struct dentry *dentry, int bindex)
-+{
-+ int err = 0;
-+ struct dentry *lower_dentry, *diropq;
-+ struct inode *lower_dir;
-+ struct nameidata nd;
-+ const struct cred *old_creds;
-+ struct cred *new_creds;
-+
-+ /*
-+ * Opaque directory whiteout markers are special files (like regular
-+ * whiteouts), and should appear to the users as if they don't
-+ * exist. They should be created/deleted regardless of directory
-+ * search/create permissions, but only for the duration of this
-+ * creation of the .wh.__dir_opaque: file. Note, this does not
-+ * circumvent normal ->permission).
-+ */
-+ new_creds = prepare_creds();
-+ if (unlikely(!new_creds)) {
-+ err = -ENOMEM;
-+ goto out_err;
-+ }
-+ cap_raise(new_creds->cap_effective, CAP_DAC_READ_SEARCH);
-+ cap_raise(new_creds->cap_effective, CAP_DAC_OVERRIDE);
-+ old_creds = override_creds(new_creds);
-+
-+ lower_dentry = unionfs_lower_dentry_idx(dentry, bindex);
-+ lower_dir = lower_dentry->d_inode;
-+ BUG_ON(!S_ISDIR(dentry->d_inode->i_mode) ||
-+ !S_ISDIR(lower_dir->i_mode));
-+
-+ mutex_lock(&lower_dir->i_mutex);
-+ diropq = lookup_one_len(UNIONFS_DIR_OPAQUE, lower_dentry,
-+ sizeof(UNIONFS_DIR_OPAQUE) - 1);
-+ if (IS_ERR(diropq)) {
-+ err = PTR_ERR(diropq);
-+ goto out;
-+ }
-+
-+ err = init_lower_nd(&nd, LOOKUP_CREATE);
-+ if (unlikely(err < 0))
-+ goto out;
-+ if (!diropq->d_inode)
-+ err = vfs_create(lower_dir, diropq, S_IRUGO, &nd);
-+ if (!err)
-+ dbopaque(dentry) = bindex;
-+ release_lower_nd(&nd, err);
-+
-+ dput(diropq);
-+
-+out:
-+ mutex_unlock(&lower_dir->i_mutex);
-+ revert_creds(old_creds);
-+out_err:
-+ return err;
-+}
-diff --git a/fs/unionfs/xattr.c b/fs/unionfs/xattr.c
-new file mode 100644
-index 0000000..9002e06
---- /dev/null
-+++ b/fs/unionfs/xattr.c
-@@ -0,0 +1,173 @@
-+/*
-+ * Copyright (c) 2003-2010 Erez Zadok
-+ * Copyright (c) 2003-2006 Charles P. Wright
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2005-2006 Junjiro Okajima
-+ * Copyright (c) 2005 Arun M. Krishnakumar
-+ * Copyright (c) 2004-2006 David P. Quigley
-+ * Copyright (c) 2003-2004 Mohammad Nayyer Zubair
-+ * Copyright (c) 2003 Puja Gupta
-+ * Copyright (c) 2003 Harikesavan Krishnan
-+ * Copyright (c) 2003-2010 Stony Brook University
-+ * Copyright (c) 2003-2010 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include "union.h"
-+
-+/* This is lifted from fs/xattr.c */
-+void *unionfs_xattr_alloc(size_t size, size_t limit)
-+{
-+ void *ptr;
-+
-+ if (size > limit)
-+ return ERR_PTR(-E2BIG);
-+
-+ if (!size) /* size request, no buffer is needed */
-+ return NULL;
-+
-+ ptr = kmalloc(size, GFP_KERNEL);
-+ if (unlikely(!ptr))
-+ return ERR_PTR(-ENOMEM);
-+ return ptr;
-+}
-+
-+/*
-+ * BKL held by caller.
-+ * dentry->d_inode->i_mutex locked
-+ */
-+ssize_t unionfs_getxattr(struct dentry *dentry, const char *name, void *value,
-+ size_t size)
-+{
-+ struct dentry *lower_dentry = NULL;
-+ struct dentry *parent;
-+ int err = -EOPNOTSUPP;
-+ bool valid;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+
-+ lower_dentry = unionfs_lower_dentry(dentry);
-+
-+ err = vfs_getxattr(lower_dentry, (char *) name, value, size);
-+
-+out:
-+ unionfs_check_dentry(dentry);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+/*
-+ * BKL held by caller.
-+ * dentry->d_inode->i_mutex locked
-+ */
-+int unionfs_setxattr(struct dentry *dentry, const char *name,
-+ const void *value, size_t size, int flags)
-+{
-+ struct dentry *lower_dentry = NULL;
-+ struct dentry *parent;
-+ int err = -EOPNOTSUPP;
-+ bool valid;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+
-+ lower_dentry = unionfs_lower_dentry(dentry);
-+
-+ err = vfs_setxattr(lower_dentry, (char *) name, (void *) value,
-+ size, flags);
-+
-+out:
-+ unionfs_check_dentry(dentry);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+/*
-+ * BKL held by caller.
-+ * dentry->d_inode->i_mutex locked
-+ */
-+int unionfs_removexattr(struct dentry *dentry, const char *name)
-+{
-+ struct dentry *lower_dentry = NULL;
-+ struct dentry *parent;
-+ int err = -EOPNOTSUPP;
-+ bool valid;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+
-+ lower_dentry = unionfs_lower_dentry(dentry);
-+
-+ err = vfs_removexattr(lower_dentry, (char *) name);
-+
-+out:
-+ unionfs_check_dentry(dentry);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-+
-+/*
-+ * BKL held by caller.
-+ * dentry->d_inode->i_mutex locked
-+ */
-+ssize_t unionfs_listxattr(struct dentry *dentry, char *list, size_t size)
-+{
-+ struct dentry *lower_dentry = NULL;
-+ struct dentry *parent;
-+ int err = -EOPNOTSUPP;
-+ char *encoded_list = NULL;
-+ bool valid;
-+
-+ unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_CHILD);
-+ parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
-+ unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);
-+
-+ valid = __unionfs_d_revalidate(dentry, parent, false);
-+ if (unlikely(!valid)) {
-+ err = -ESTALE;
-+ goto out;
-+ }
-+
-+ lower_dentry = unionfs_lower_dentry(dentry);
-+
-+ encoded_list = list;
-+ err = vfs_listxattr(lower_dentry, encoded_list, size);
-+
-+out:
-+ unionfs_check_dentry(dentry);
-+ unionfs_unlock_dentry(dentry);
-+ unionfs_unlock_parent(dentry, parent);
-+ unionfs_read_unlock(dentry->d_sb);
-+ return err;
-+}
-diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
-index da317c7..64f1ced 100644
---- a/include/linux/fs_stack.h
-+++ b/include/linux/fs_stack.h
-@@ -1,7 +1,19 @@
-+/*
-+ * Copyright (c) 2006-2009 Erez Zadok
-+ * Copyright (c) 2006-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2006-2009 Stony Brook University
-+ * Copyright (c) 2006-2009 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
- #ifndef _LINUX_FS_STACK_H
- #define _LINUX_FS_STACK_H
-
--/* This file defines generic functions used primarily by stackable
-+/*
-+ * This file defines generic functions used primarily by stackable
- * filesystems; none of these functions require i_mutex to be held.
- */
-
-diff --git a/include/linux/magic.h b/include/linux/magic.h
-index eb9800f..9770154 100644
---- a/include/linux/magic.h
-+++ b/include/linux/magic.h
-@@ -47,6 +47,8 @@
- #define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs"
- #define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs"
-
-+#define UNIONFS_SUPER_MAGIC 0xf15f083d
-+
- #define SMB_SUPER_MAGIC 0x517B
- #define USBDEVICE_SUPER_MAGIC 0x9fa2
- #define CGROUP_SUPER_MAGIC 0x27e0eb
-diff --git a/include/linux/namei.h b/include/linux/namei.h
-index 05b441d..dca6f9a 100644
---- a/include/linux/namei.h
-+++ b/include/linux/namei.h
-@@ -72,6 +72,7 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
-
- extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
- int (*open)(struct inode *, struct file *));
-+extern void release_open_intent(struct nameidata *);
-
- extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
-
-diff --git a/include/linux/splice.h b/include/linux/splice.h
-index 997c3b4..54f5501 100644
---- a/include/linux/splice.h
-+++ b/include/linux/splice.h
-@@ -81,6 +81,11 @@ extern ssize_t splice_to_pipe(struct pipe_inode_info *,
- struct splice_pipe_desc *);
- extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
- splice_direct_actor *);
-+extern long vfs_splice_from(struct pipe_inode_info *pipe, struct file *out,
-+ loff_t *ppos, size_t len, unsigned int flags);
-+extern long vfs_splice_to(struct file *in, loff_t *ppos,
-+ struct pipe_inode_info *pipe, size_t len,
-+ unsigned int flags);
-
- /*
- * for dynamic pipe sizing
-diff --git a/include/linux/union_fs.h b/include/linux/union_fs.h
-new file mode 100644
-index 0000000..c84d97e
---- /dev/null
-+++ b/include/linux/union_fs.h
-@@ -0,0 +1,22 @@
-+/*
-+ * Copyright (c) 2003-2009 Erez Zadok
-+ * Copyright (c) 2005-2007 Josef 'Jeff' Sipek
-+ * Copyright (c) 2003-2009 Stony Brook University
-+ * Copyright (c) 2003-2009 The Research Foundation of SUNY
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#ifndef _LINUX_UNION_FS_H
-+#define _LINUX_UNION_FS_H
-+
-+/*
-+ * DEFINITIONS FOR USER AND KERNEL CODE:
-+ */
-+# define UNIONFS_IOCTL_INCGEN _IOR(0x15, 11, int)
-+# define UNIONFS_IOCTL_QUERYFILE _IOR(0x15, 15, int)
-+
-+#endif /* _LINUX_UNIONFS_H */
-+
-diff --git a/security/security.c b/security/security.c
-index c53949f..eb71394 100644
---- a/security/security.c
-+++ b/security/security.c
-@@ -528,6 +528,7 @@ int security_inode_permission(struct inode *inode, int mask)
- return 0;
- return security_ops->inode_permission(inode, mask);
- }
-+EXPORT_SYMBOL(security_inode_permission);
-
- int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
- {