aboutsummaryrefslogtreecommitdiffstats
path: root/main/linux-scst/scst-2.2.0-3.2.2.patch
diff options
context:
space:
mode:
Diffstat (limited to 'main/linux-scst/scst-2.2.0-3.2.2.patch')
-rw-r--r--main/linux-scst/scst-2.2.0-3.2.2.patch80901
1 files changed, 0 insertions, 80901 deletions
diff --git a/main/linux-scst/scst-2.2.0-3.2.2.patch b/main/linux-scst/scst-2.2.0-3.2.2.patch
deleted file mode 100644
index 68e90a791e..0000000000
--- a/main/linux-scst/scst-2.2.0-3.2.2.patch
+++ /dev/null
@@ -1,80901 +0,0 @@
-=== modified file 'block/blk-map.c'
---- old/block/blk-map.c 2012-01-10 22:58:17 +0000
-+++ new/block/blk-map.c 2012-01-10 23:01:21 +0000
-@@ -5,6 +5,8 @@
- #include <linux/module.h>
- #include <linux/bio.h>
- #include <linux/blkdev.h>
-+#include <linux/scatterlist.h>
-+#include <linux/slab.h>
- #include <scsi/sg.h> /* for struct sg_iovec */
-
- #include "blk.h"
-@@ -275,6 +277,339 @@ int blk_rq_unmap_user(struct bio *bio)
- }
- EXPORT_SYMBOL(blk_rq_unmap_user);
-
-+struct blk_kern_sg_work {
-+ atomic_t bios_inflight;
-+ struct sg_table sg_table;
-+ struct scatterlist *src_sgl;
-+};
-+
-+static void blk_free_kern_sg_work(struct blk_kern_sg_work *bw)
-+{
-+ struct sg_table *sgt = &bw->sg_table;
-+ struct scatterlist *sg;
-+ int i;
-+
-+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
-+ struct page *pg = sg_page(sg);
-+ if (pg == NULL)
-+ break;
-+ __free_page(pg);
-+ }
-+
-+ sg_free_table(sgt);
-+ kfree(bw);
-+ return;
-+}
-+
-+static void blk_bio_map_kern_endio(struct bio *bio, int err)
-+{
-+ struct blk_kern_sg_work *bw = bio->bi_private;
-+
-+ if (bw != NULL) {
-+ /* Decrement the bios in processing and, if zero, free */
-+ BUG_ON(atomic_read(&bw->bios_inflight) <= 0);
-+ if (atomic_dec_and_test(&bw->bios_inflight)) {
-+ if ((bio_data_dir(bio) == READ) && (err == 0)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags); /* to protect KMs */
-+ sg_copy(bw->src_sgl, bw->sg_table.sgl, 0, 0,
-+ KM_BIO_DST_IRQ, KM_BIO_SRC_IRQ);
-+ local_irq_restore(flags);
-+ }
-+ blk_free_kern_sg_work(bw);
-+ }
-+ }
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static int blk_rq_copy_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work **pbw,
-+ gfp_t gfp, gfp_t page_gfp)
-+{
-+ int res = 0, i;
-+ struct scatterlist *sg;
-+ struct scatterlist *new_sgl;
-+ int new_sgl_nents;
-+ size_t len = 0, to_copy;
-+ struct blk_kern_sg_work *bw;
-+
-+ bw = kzalloc(sizeof(*bw), gfp);
-+ if (bw == NULL)
-+ goto out;
-+
-+ bw->src_sgl = sgl;
-+
-+ for_each_sg(sgl, sg, nents, i)
-+ len += sg->length;
-+ to_copy = len;
-+
-+ new_sgl_nents = PFN_UP(len);
-+
-+ res = sg_alloc_table(&bw->sg_table, new_sgl_nents, gfp);
-+ if (res != 0)
-+ goto err_free;
-+
-+ new_sgl = bw->sg_table.sgl;
-+
-+ for_each_sg(new_sgl, sg, new_sgl_nents, i) {
-+ struct page *pg;
-+
-+ pg = alloc_page(page_gfp);
-+ if (pg == NULL)
-+ goto err_free;
-+
-+ sg_assign_page(sg, pg);
-+ sg->length = min_t(size_t, PAGE_SIZE, len);
-+
-+ len -= PAGE_SIZE;
-+ }
-+
-+ if (rq_data_dir(rq) == WRITE) {
-+ /*
-+ * We need to limit amount of copied data to to_copy, because
-+ * sgl might have the last element in sgl not marked as last in
-+ * SG chaining.
-+ */
-+ sg_copy(new_sgl, sgl, 0, to_copy,
-+ KM_USER0, KM_USER1);
-+ }
-+
-+ *pbw = bw;
-+ /*
-+ * REQ_COPY_USER name is misleading. It should be something like
-+ * REQ_HAS_TAIL_SPACE_FOR_PADDING.
-+ */
-+ rq->cmd_flags |= REQ_COPY_USER;
-+
-+out:
-+ return res;
-+
-+err_free:
-+ blk_free_kern_sg_work(bw);
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, struct blk_kern_sg_work *bw, gfp_t gfp)
-+{
-+ int res;
-+ struct request_queue *q = rq->q;
-+ int rw = rq_data_dir(rq);
-+ int max_nr_vecs, i;
-+ size_t tot_len;
-+ bool need_new_bio;
-+ struct scatterlist *sg, *prev_sg = NULL;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int bios;
-+
-+ if (unlikely((sgl == NULL) || (sgl->length == 0) || (nents <= 0))) {
-+ WARN_ON(1);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * Let's keep each bio allocation inside a single page to decrease
-+ * probability of failure.
-+ */
-+ max_nr_vecs = min_t(size_t,
-+ ((PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec)),
-+ BIO_MAX_PAGES);
-+
-+ need_new_bio = true;
-+ tot_len = 0;
-+ bios = 0;
-+ for_each_sg(sgl, sg, nents, i) {
-+ struct page *page = sg_page(sg);
-+ void *page_addr = page_address(page);
-+ size_t len = sg->length, l;
-+ size_t offset = sg->offset;
-+
-+ tot_len += len;
-+ prev_sg = sg;
-+
-+ /*
-+ * Each segment must be aligned on DMA boundary and
-+ * not on stack. The last one may have unaligned
-+ * length as long as the total length is aligned to
-+ * DMA padding alignment.
-+ */
-+ if (i == nents - 1)
-+ l = 0;
-+ else
-+ l = len;
-+ if (((sg->offset | l) & queue_dma_alignment(q)) ||
-+ (page_addr && object_is_on_stack(page_addr + sg->offset))) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ while (len > 0) {
-+ size_t bytes;
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp, max_nr_vecs);
-+ if (bio == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_bios;
-+ }
-+
-+ if (rw == WRITE)
-+ bio->bi_rw |= REQ_WRITE;
-+
-+ bios++;
-+ bio->bi_private = bw;
-+ bio->bi_end_io = blk_bio_map_kern_endio;
-+
-+ if (hbio == NULL)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(size_t, len, PAGE_SIZE - offset);
-+
-+ rc = bio_add_pc_page(q, bio, page, bytes, offset);
-+ if (rc < bytes) {
-+ if (unlikely(need_new_bio || (rc < 0))) {
-+ if (rc < 0)
-+ res = rc;
-+ else
-+ res = -EIO;
-+ goto out_free_bios;
-+ } else {
-+ need_new_bio = true;
-+ len -= rc;
-+ offset += rc;
-+ continue;
-+ }
-+ }
-+
-+ need_new_bio = false;
-+ offset = 0;
-+ len -= bytes;
-+ page = nth_page(page, 1);
-+ }
-+ }
-+
-+ if (hbio == NULL) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ /* Total length must be aligned on DMA padding alignment */
-+ if ((tot_len & q->dma_pad_mask) &&
-+ !(rq->cmd_flags & REQ_COPY_USER)) {
-+ res = -EINVAL;
-+ goto out_free_bios;
-+ }
-+
-+ if (bw != NULL)
-+ atomic_set(&bw->bios_inflight, bios);
-+
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+
-+ blk_queue_bounce(q, &bio);
-+
-+ res = blk_rq_append_bio(q, rq, bio);
-+ if (unlikely(res != 0)) {
-+ bio->bi_next = hbio;
-+ hbio = bio;
-+ /* We can have one or more bios bounced */
-+ goto out_unmap_bios;
-+ }
-+ }
-+
-+ res = 0;
-+
-+ rq->buffer = NULL;
-+out:
-+ return res;
-+
-+out_unmap_bios:
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_bios:
-+ while (hbio != NULL) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * blk_rq_map_kern_sg - map kernel data to a request, for REQ_TYPE_BLOCK_PC
-+ * @rq: request to fill
-+ * @sgl: area to map
-+ * @nents: number of elements in @sgl
-+ * @gfp: memory allocation flags
-+ *
-+ * Description:
-+ * Data will be mapped directly if possible. Otherwise a bounce
-+ * buffer will be used.
-+ */
-+int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp)
-+{
-+ int res;
-+
-+ res = __blk_rq_map_kern_sg(rq, sgl, nents, NULL, gfp);
-+ if (unlikely(res != 0)) {
-+ struct blk_kern_sg_work *bw = NULL;
-+
-+ res = blk_rq_copy_kern_sg(rq, sgl, nents, &bw,
-+ gfp, rq->q->bounce_gfp | gfp);
-+ if (unlikely(res != 0))
-+ goto out;
-+
-+ res = __blk_rq_map_kern_sg(rq, bw->sg_table.sgl,
-+ bw->sg_table.nents, bw, gfp);
-+ if (res != 0) {
-+ blk_free_kern_sg_work(bw);
-+ goto out;
-+ }
-+ }
-+
-+ rq->buffer = NULL;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(blk_rq_map_kern_sg);
-+
-+/**
-+ * blk_rq_unmap_kern_sg - unmap a request with kernel sg
-+ * @rq: request to unmap
-+ * @err: non-zero error code
-+ *
-+ * Description:
-+ * Unmap a rq previously mapped by blk_rq_map_kern_sg(). Must be called
-+ * only in case of an error!
-+ */
-+void blk_rq_unmap_kern_sg(struct request *rq, int err)
-+{
-+ struct bio *bio = rq->bio;
-+
-+ while (bio) {
-+ struct bio *b = bio;
-+ bio = bio->bi_next;
-+ b->bi_end_io(b, err);
-+ }
-+ rq->bio = NULL;
-+
-+ return;
-+}
-+EXPORT_SYMBOL(blk_rq_unmap_kern_sg);
-+
- /**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
- * @q: request queue where request should be inserted
-
-=== modified file 'include/linux/blkdev.h'
---- old/include/linux/blkdev.h 2012-01-10 22:58:17 +0000
-+++ new/include/linux/blkdev.h 2012-01-10 23:01:21 +0000
-@@ -716,6 +718,9 @@ extern int blk_rq_map_kern(struct reques
- extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-+extern int blk_rq_map_kern_sg(struct request *rq, struct scatterlist *sgl,
-+ int nents, gfp_t gfp);
-+extern void blk_rq_unmap_kern_sg(struct request *rq, int err);
- extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
- extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
-
-=== modified file 'include/linux/scatterlist.h'
---- old/include/linux/scatterlist.h 2012-01-10 22:58:17 +0000
-+++ new/include/linux/scatterlist.h 2012-01-10 23:01:21 +0000
-@@ -3,6 +3,7 @@
-
- #include <asm/types.h>
- #include <asm/scatterlist.h>
-+#include <asm/kmap_types.h>
- #include <linux/mm.h>
- #include <linux/string.h>
- #include <asm/io.h>
-@@ -218,6 +219,10 @@ size_t sg_copy_from_buffer(struct scatte
- size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
-
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type);
-+
- /*
- * Maximum number of entries that will be allocated in one piece, if
- * a list larger than this is required then chaining will be utilized.
-
-=== modified file 'lib/scatterlist.c'
---- old/lib/scatterlist.c 2012-01-10 22:58:17 +0000
-+++ new/lib/scatterlist.c 2012-01-10 23:01:21 +0000
-@@ -517,3 +517,132 @@ size_t sg_copy_to_buffer(struct scatterl
- return sg_copy_buffer(sgl, nents, buf, buflen, 1);
- }
- EXPORT_SYMBOL(sg_copy_to_buffer);
-+
-+/*
-+ * Can switch to the next dst_sg element, so, to copy to strictly only
-+ * one dst_sg element, it must be either last in the chain, or
-+ * copy_len == dst_sg->length.
-+ */
-+static int sg_copy_elem(struct scatterlist **pdst_sg, size_t *pdst_len,
-+ size_t *pdst_offs, struct scatterlist *src_sg,
-+ size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ struct scatterlist *dst_sg;
-+ size_t src_len, dst_len, src_offs, dst_offs;
-+ struct page *src_page, *dst_page;
-+
-+ dst_sg = *pdst_sg;
-+ dst_len = *pdst_len;
-+ dst_offs = *pdst_offs;
-+ dst_page = sg_page(dst_sg);
-+
-+ src_page = sg_page(src_sg);
-+ src_len = src_sg->length;
-+ src_offs = src_sg->offset;
-+
-+ do {
-+ void *saddr, *daddr;
-+ size_t n;
-+
-+ saddr = kmap_atomic(src_page +
-+ (src_offs >> PAGE_SHIFT), s_km_type) +
-+ (src_offs & ~PAGE_MASK);
-+ daddr = kmap_atomic(dst_page +
-+ (dst_offs >> PAGE_SHIFT), d_km_type) +
-+ (dst_offs & ~PAGE_MASK);
-+
-+ if (((src_offs & ~PAGE_MASK) == 0) &&
-+ ((dst_offs & ~PAGE_MASK) == 0) &&
-+ (src_len >= PAGE_SIZE) && (dst_len >= PAGE_SIZE) &&
-+ (copy_len >= PAGE_SIZE)) {
-+ copy_page(daddr, saddr);
-+ n = PAGE_SIZE;
-+ } else {
-+ n = min_t(size_t, PAGE_SIZE - (dst_offs & ~PAGE_MASK),
-+ PAGE_SIZE - (src_offs & ~PAGE_MASK));
-+ n = min(n, src_len);
-+ n = min(n, dst_len);
-+ n = min_t(size_t, n, copy_len);
-+ memcpy(daddr, saddr, n);
-+ }
-+ dst_offs += n;
-+ src_offs += n;
-+
-+ kunmap_atomic(saddr, s_km_type);
-+ kunmap_atomic(daddr, d_km_type);
-+
-+ res += n;
-+ copy_len -= n;
-+ if (copy_len == 0)
-+ goto out;
-+
-+ src_len -= n;
-+ dst_len -= n;
-+ if (dst_len == 0) {
-+ dst_sg = sg_next(dst_sg);
-+ if (dst_sg == NULL)
-+ goto out;
-+ dst_page = sg_page(dst_sg);
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+ }
-+ } while (src_len > 0);
-+
-+out:
-+ *pdst_sg = dst_sg;
-+ *pdst_len = dst_len;
-+ *pdst_offs = dst_offs;
-+ return res;
-+}
-+
-+/**
-+ * sg_copy - copy one SG vector to another
-+ * @dst_sg: destination SG
-+ * @src_sg: source SG
-+ * @nents_to_copy: maximum number of entries to copy
-+ * @copy_len: maximum amount of data to copy. If 0, then copy all.
-+ * @d_km_type: kmap_atomic type for the destination SG
-+ * @s_km_type: kmap_atomic type for the source SG
-+ *
-+ * Description:
-+ * Data from the source SG vector will be copied to the destination SG
-+ * vector. End of the vectors will be determined by sg_next() returning
-+ * NULL. Returns number of bytes copied.
-+ */
-+int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
-+ int nents_to_copy, size_t copy_len,
-+ enum km_type d_km_type, enum km_type s_km_type)
-+{
-+ int res = 0;
-+ size_t dst_len, dst_offs;
-+
-+ if (copy_len == 0)
-+ copy_len = 0x7FFFFFFF; /* copy all */
-+
-+ if (nents_to_copy == 0)
-+ nents_to_copy = 0x7FFFFFFF; /* copy all */
-+
-+ dst_len = dst_sg->length;
-+ dst_offs = dst_sg->offset;
-+
-+ do {
-+ int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
-+ src_sg, copy_len, d_km_type, s_km_type);
-+ copy_len -= copied;
-+ res += copied;
-+ if ((copy_len == 0) || (dst_sg == NULL))
-+ goto out;
-+
-+ nents_to_copy--;
-+ if (nents_to_copy == 0)
-+ goto out;
-+
-+ src_sg = sg_next(src_sg);
-+ } while (src_sg != NULL);
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL(sg_copy);
-
-=== modified file 'include/linux/mm_types.h'
---- old/include/linux/mm_types.h 2012-01-10 22:58:17 +0000
-+++ new/include/linux/mm_types.h 2012-01-10 23:02:48 +0000
-@@ -149,6 +149,17 @@ struct page {
- */
- void *shadow;
- #endif
-+
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ /*
-+ * Used to implement support for notification on zero-copy TCP transfer
-+ * completion. It might look as not good to have this field here and
-+ * it's better to have it in struct sk_buff, but it would make the code
-+ * much more complicated and fragile, since all skb then would have to
-+ * contain only pages with the same value in this field.
-+ */
-+ void *net_priv;
-+#endif
- }
- /*
- * If another subsystem starts using the double word pairing for atomic
-
-=== modified file 'include/linux/net.h'
---- old/include/linux/net.h 2012-01-10 22:58:17 +0000
-+++ new/include/linux/net.h 2012-01-10 23:02:48 +0000
-@@ -61,6 +61,7 @@ typedef enum {
- #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
- #include <linux/kmemcheck.h>
- #include <linux/rcupdate.h>
-+#include <linux/mm.h>
-
- struct poll_table_struct;
- struct pipe_inode_info;
-@@ -289,5 +290,44 @@ extern int kernel_sock_shutdown(struct s
- MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
- "-type-" __stringify(type))
-
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+/* Support for notification on zero-copy TCP transfer completion */
-+typedef void (*net_get_page_callback_t)(struct page *page);
-+typedef void (*net_put_page_callback_t)(struct page *page);
-+
-+extern net_get_page_callback_t net_get_page_callback;
-+extern net_put_page_callback_t net_put_page_callback;
-+
-+extern int net_set_get_put_page_callbacks(
-+ net_get_page_callback_t get_callback,
-+ net_put_page_callback_t put_callback);
-+
-+/*
-+ * See comment for net_set_get_put_page_callbacks() why those functions
-+ * don't need any protection.
-+ */
-+static inline void net_get_page(struct page *page)
-+{
-+ if (page->net_priv != 0)
-+ net_get_page_callback(page);
-+ get_page(page);
-+}
-+static inline void net_put_page(struct page *page)
-+{
-+ if (page->net_priv != 0)
-+ net_put_page_callback(page);
-+ put_page(page);
-+}
-+#else
-+static inline void net_get_page(struct page *page)
-+{
-+ get_page(page);
-+}
-+static inline void net_put_page(struct page *page)
-+{
-+ put_page(page);
-+}
-+#endif /* CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION */
-+
- #endif /* __KERNEL__ */
- #endif /* _LINUX_NET_H */
-
-=== modified file 'include/linux/skbuff.h'
---- old/include/linux/skbuff.h 2012-01-10 22:58:17 +0000
-+++ new/include/linux/skbuff.h 2012-01-10 23:15:31 +0000
-@@ -1712,7 +1712,7 @@ static inline struct page *skb_frag_page
- */
- static inline void __skb_frag_ref(skb_frag_t *frag)
- {
-- get_page(skb_frag_page(frag));
-+ net_get_page(skb_frag_page(frag));
- }
-
- /**
-@@ -1735,7 +1735,7 @@ static inline void skb_frag_ref(struct s
- */
- static inline void __skb_frag_unref(skb_frag_t *frag)
- {
-- put_page(skb_frag_page(frag));
-+ net_put_page(skb_frag_page(frag));
- }
-
- /**
-
-=== modified file 'net/Kconfig'
---- old/net/Kconfig 2012-01-10 22:58:17 +0000
-+++ new/net/Kconfig 2012-01-10 23:02:48 +0000
-@@ -72,6 +72,18 @@ config INET
-
- Short answer: say Y.
-
-+config TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION
-+ bool "TCP/IP zero-copy transfer completion notification"
-+ depends on INET
-+ default SCST_ISCSI
-+ ---help---
-+ Adds support for sending a notification upon completion of a
-+ zero-copy TCP/IP transfer. This can speed up certain TCP/IP
-+ software. Currently this is only used by the iSCSI target driver
-+ iSCSI-SCST.
-+
-+ If unsure, say N.
-+
- if INET
- source "net/ipv4/Kconfig"
- source "net/ipv6/Kconfig"
-
-=== modified file 'net/core/skbuff.c'
---- old/net/core/skbuff.c 2012-01-10 22:58:17 +0000
-+++ new/net/core/skbuff.c 2012-01-10 23:02:48 +0000
-@@ -77,13 +77,13 @@ static struct kmem_cache *skbuff_fclone_
- static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
- {
-- put_page(buf->page);
-+ net_put_page(buf->page);
- }
-
- static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
- {
-- get_page(buf->page);
-+ net_get_page(buf->page);
- }
-
- static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
-@@ -654,7 +654,7 @@ int skb_copy_ubufs(struct sk_buff *skb,
- if (!page) {
- while (head) {
- struct page *next = (struct page *)head->private;
-- put_page(head);
-+ net_put_page(head);
- head = next;
- }
- return -ENOMEM;
-@@ -1493,7 +1493,7 @@ EXPORT_SYMBOL(skb_copy_bits);
- */
- static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
- {
-- put_page(spd->pages[i]);
-+ net_put_page(spd->pages[i]);
- }
-
- static inline struct page *linear_to_page(struct page *page, unsigned int *len,
-@@ -1517,7 +1517,7 @@ new_page:
- off = sk->sk_sndmsg_off;
- mlen = PAGE_SIZE - off;
- if (mlen < 64 && mlen < *len) {
-- put_page(p);
-+ net_put_page(p);
- goto new_page;
- }
-
-@@ -1527,7 +1527,7 @@ new_page:
- memcpy(page_address(p) + off, page_address(page) + *offset, *len);
- sk->sk_sndmsg_off += *len;
- *offset = off;
-- get_page(p);
-+ net_get_page(p);
-
- return p;
- }
-@@ -1549,7 +1549,7 @@ static inline int spd_fill_page(struct s
- if (!page)
- return 1;
- } else
-- get_page(page);
-+ net_get_page(page);
-
- spd->pages[spd->nr_pages] = page;
- spd->partial[spd->nr_pages].len = *len;
-
-=== modified file 'net/ipv4/Makefile'
---- old/net/ipv4/Makefile 2012-01-10 22:58:17 +0000
-+++ new/net/ipv4/Makefile 2012-01-10 23:02:48 +0000
-@@ -48,6 +48,7 @@ obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
- obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
- obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
- obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
-+obj-$(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION) += tcp_zero_copy.o
-
- obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
- xfrm4_output.o
-
-=== modified file 'net/ipv4/ip_output.c'
---- old/net/ipv4/ip_output.c 2012-01-10 22:58:17 +0000
-+++ new/net/ipv4/ip_output.c 2012-01-10 23:02:48 +0000
-@@ -1232,7 +1232,7 @@ ssize_t ip_append_page(struct sock *sk,
- if (skb_can_coalesce(skb, i, page, offset)) {
- skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
- } else if (i < MAX_SKB_FRAGS) {
-- get_page(page);
-+ net_get_page(page);
- skb_fill_page_desc(skb, i, page, offset, len);
- } else {
- err = -EMSGSIZE;
-
-=== modified file 'net/ipv4/tcp.c'
---- old/net/ipv4/tcp.c 2012-01-10 22:58:17 +0000
-+++ new/net/ipv4/tcp.c 2012-01-10 23:02:48 +0000
-@@ -815,7 +815,7 @@ new_segment:
- if (can_coalesce) {
- skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
- } else {
-- get_page(page);
-+ net_get_page(page);
- skb_fill_page_desc(skb, i, page, offset, copy);
- }
-
-@@ -1022,7 +1022,7 @@ new_segment:
- goto new_segment;
- } else if (page) {
- if (off == PAGE_SIZE) {
-- put_page(page);
-+ net_put_page(page);
- TCP_PAGE(sk) = page = NULL;
- off = 0;
- }
-@@ -1062,9 +1062,9 @@ new_segment:
- } else {
- skb_fill_page_desc(skb, i, page, off, copy);
- if (TCP_PAGE(sk)) {
-- get_page(page);
-+ net_get_page(page);
- } else if (off + copy < PAGE_SIZE) {
-- get_page(page);
-+ net_get_page(page);
- TCP_PAGE(sk) = page;
- }
- }
-
-=== added file 'net/ipv4/tcp_zero_copy.c'
---- old/net/ipv4/tcp_zero_copy.c 1970-01-01 00:00:00 +0000
-+++ new/net/ipv4/tcp_zero_copy.c 2012-01-10 23:43:22 +0000
-@@ -0,0 +1,50 @@
-+/*
-+ * Support routines for TCP zero copy transmit
-+ *
-+ * Created by Vladislav Bolkhovitin
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * version 2 as published by the Free Software Foundation.
-+ */
-+
-+#include <linux/export.h>
-+#include <linux/skbuff.h>
-+
-+net_get_page_callback_t net_get_page_callback __read_mostly;
-+EXPORT_SYMBOL_GPL(net_get_page_callback);
-+
-+net_put_page_callback_t net_put_page_callback __read_mostly;
-+EXPORT_SYMBOL_GPL(net_put_page_callback);
-+
-+/*
-+ * Caller of this function must ensure that at the moment when it's called
-+ * there are no pages in the system with net_priv field set to non-zero
-+ * value. Hence, this function, as well as net_get_page() and net_put_page(),
-+ * don't need any protection.
-+ */
-+int net_set_get_put_page_callbacks(
-+ net_get_page_callback_t get_callback,
-+ net_put_page_callback_t put_callback)
-+{
-+ int res = 0;
-+
-+ if ((net_get_page_callback != NULL) && (get_callback != NULL) &&
-+ (net_get_page_callback != get_callback)) {
-+ res = -EBUSY;
-+ goto out;
-+ }
-+
-+ if ((net_put_page_callback != NULL) && (put_callback != NULL) &&
-+ (net_put_page_callback != put_callback)) {
-+ res = -EBUSY;
-+ goto out;
-+ }
-+
-+ net_get_page_callback = get_callback;
-+ net_put_page_callback = put_callback;
-+
-+out:
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(net_set_get_put_page_callbacks);
-
-diff --git a/drivers/Kconfig b/drivers/Kconfig
-index a2b902f..92e3d67 100644
---- orig/linux-3.2/drivers/Kconfig
-+++ linux-3.2/drivers/Kconfig
-@@ -22,6 +22,8 @@ source "drivers/ide/Kconfig"
-
- source "drivers/scsi/Kconfig"
-
-+source "drivers/scst/Kconfig"
-+
- source "drivers/ata/Kconfig"
-
- source "drivers/md/Kconfig"
-diff --git a/drivers/Makefile b/drivers/Makefile
-index b423bb1..f780114 100644
---- orig/linux-3.2/drivers/Makefile
-+++ linux-3.2/drivers/Makefile
-@@ -115,5 +115,6 @@ obj-$(CONFIG_VLYNQ) += vlynq/
- obj-$(CONFIG_STAGING) += staging/
- obj-y += platform/
- obj-y += ieee802154/
-+obj-$(CONFIG_SCST) += scst/
- #common clk code
- obj-y += clk/
-diff -uprN orig/linux-3.2/drivers/scst/Kconfig linux-3.2/drivers/scst/Kconfig
---- orig/linux-3.2/drivers/scst/Kconfig
-+++ linux-3.2/drivers/scst/Kconfig
-@@ -0,0 +1,255 @@
-+menu "SCSI target (SCST) support"
-+
-+config SCST
-+ tristate "SCSI target (SCST) support"
-+ depends on SCSI
-+ help
-+ SCSI target (SCST) is designed to provide unified, consistent
-+ interface between SCSI target drivers and Linux kernel and
-+ simplify target drivers development as much as possible. Visit
-+ http://scst.sourceforge.net for more info about it.
-+
-+config SCST_DISK
-+ tristate "SCSI target disk support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for disk device.
-+
-+config SCST_TAPE
-+ tristate "SCSI target tape support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for tape device.
-+
-+config SCST_CDROM
-+ tristate "SCSI target CDROM support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for CDROM device.
-+
-+config SCST_MODISK
-+ tristate "SCSI target MO disk support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for MO disk device.
-+
-+config SCST_CHANGER
-+ tristate "SCSI target changer support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for changer device.
-+
-+config SCST_PROCESSOR
-+ tristate "SCSI target processor support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for processor device.
-+
-+config SCST_RAID
-+ tristate "SCSI target storage array controller (RAID) support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST pass-through device handler for raid storage array controller (RAID) device.
-+
-+config SCST_VDISK
-+ tristate "SCSI target virtual disk and/or CDROM support"
-+ default SCST
-+ depends on SCSI && SCST
-+ help
-+ SCST device handler for virtual disk and/or CDROM device.
-+
-+config SCST_USER
-+ tristate "User-space SCSI target driver support"
-+ default SCST
-+ depends on SCSI && SCST && !HIGHMEM4G && !HIGHMEM64G
-+ help
-+ The SCST device handler scst_user allows to implement full-feature
-+ SCSI target devices in user space.
-+
-+ If unsure, say "N".
-+
-+config SCST_STRICT_SERIALIZING
-+ bool "Strict serialization"
-+ depends on SCST
-+ help
-+ Enable strict SCSI command serialization. When enabled, SCST sends
-+ all SCSI commands to the underlying SCSI device synchronously, one
-+ after one. This makes task management more reliable, at the cost of
-+ a performance penalty. This is most useful for stateful SCSI devices
-+ like tapes, where the result of the execution of a command
-+ depends on the device settings configured by previous commands. Disk
-+ and RAID devices are stateless in most cases. The current SCSI core
-+ in Linux doesn't allow to abort all commands reliably if they have
-+ been sent asynchronously to a stateful device.
-+ Enable this option if you use stateful device(s) and need as much
-+ error recovery reliability as possible.
-+
-+ If unsure, say "N".
-+
-+config SCST_STRICT_SECURITY
-+ bool "Strict security"
-+ depends on SCST
-+ help
-+ Makes SCST clear (zero-fill) allocated data buffers. Note: this has a
-+ significant performance penalty.
-+
-+ If unsure, say "N".
-+
-+config SCST_TEST_IO_IN_SIRQ
-+ bool "Allow test I/O from soft-IRQ context"
-+ depends on SCST
-+ help
-+ Allows SCST to submit selected SCSI commands (TUR and
-+ READ/WRITE) from soft-IRQ context (tasklets). Enabling it will
-+ decrease amount of context switches and slightly improve
-+ performance. The goal of this option is to be able to measure
-+ overhead of the context switches. See more info about it in
-+ README.scst.
-+
-+ WARNING! Improperly used, this option can lead you to a kernel crash!
-+
-+ If unsure, say "N".
-+
-+config SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
-+ bool "Send back UNKNOWN TASK when an already finished task is aborted"
-+ depends on SCST
-+ help
-+ Controls which response is sent by SCST to the initiator in case
-+ the initiator attempts to abort (ABORT TASK) an already finished
-+ request. If this option is enabled, the response UNKNOWN TASK is
-+ sent back to the initiator. However, some initiators, particularly
-+ the VMware iSCSI initiator, interpret the UNKNOWN TASK response as
-+ if the target got crazy and try to RESET it. Then sometimes the
-+ initiator gets crazy itself.
-+
-+ If unsure, say "N".
-+
-+config SCST_USE_EXPECTED_VALUES
-+ bool "Prefer initiator-supplied SCSI command attributes"
-+ depends on SCST
-+ help
-+ When SCST receives a SCSI command from an initiator, such a SCSI
-+ command has both data transfer length and direction attributes.
-+ There are two possible sources for these attributes: either the
-+ values computed by SCST from its internal command translation table
-+ or the values supplied by the initiator. The former are used by
-+ default because of security reasons. Invalid initiator-supplied
-+ attributes can crash the target, especially in pass-through mode.
-+ Only consider enabling this option when SCST logs the following
-+ message: "Unknown opcode XX for YY. Should you update
-+ scst_scsi_op_table?" and when the initiator complains. Please
-+ report any unrecognized commands to scst-devel@lists.sourceforge.net.
-+
-+ If unsure, say "N".
-+
-+config SCST_EXTRACHECKS
-+ bool "Extra consistency checks"
-+ depends on SCST
-+ help
-+ Enable additional consistency checks in the SCSI middle level target
-+ code. This may be helpful for SCST developers. Enable it if you have
-+ any problems.
-+
-+ If unsure, say "N".
-+
-+config SCST_TRACING
-+ bool "Tracing support"
-+ depends on SCST
-+ default y
-+ help
-+ Enable SCSI middle level tracing support. Tracing can be controlled
-+ dynamically via sysfs interface. The traced information
-+ is sent to the kernel log and may be very helpful when analyzing
-+ the cause of a communication problem between initiator and target.
-+
-+ If unsure, say "Y".
-+
-+config SCST_DEBUG
-+ bool "Debugging support"
-+ depends on SCST
-+ select DEBUG_BUGVERBOSE
-+ help
-+ Enables support for debugging SCST. This may be helpful for SCST
-+ developers.
-+
-+ If unsure, say "N".
-+
-+config SCST_DEBUG_OOM
-+ bool "Out-of-memory debugging support"
-+ depends on SCST
-+ help
-+ Let SCST's internal memory allocation function
-+ (scst_alloc_sg_entries()) fail about once in every 10000 calls, at
-+ least if the flag __GFP_NOFAIL has not been set. This allows SCST
-+ developers to test the behavior of SCST in out-of-memory conditions.
-+ This may be helpful for SCST developers.
-+
-+ If unsure, say "N".
-+
-+config SCST_DEBUG_RETRY
-+ bool "SCSI command retry debugging support"
-+ depends on SCST
-+ help
-+ Let SCST's internal SCSI command transfer function
-+ (scst_rdy_to_xfer()) fail about once in every 100 calls. This allows
-+ SCST developers to test the behavior of SCST when SCSI queues fill
-+ up. This may be helpful for SCST developers.
-+
-+ If unsure, say "N".
-+
-+config SCST_DEBUG_SN
-+ bool "SCSI sequence number debugging support"
-+ depends on SCST
-+ help
-+ Allows to test SCSI command ordering via sequence numbers by
-+ randomly changing the type of SCSI commands into
-+ SCST_CMD_QUEUE_ORDERED, SCST_CMD_QUEUE_HEAD_OF_QUEUE or
-+ SCST_CMD_QUEUE_SIMPLE for about one in 300 SCSI commands.
-+ This may be helpful for SCST developers.
-+
-+ If unsure, say "N".
-+
-+config SCST_DEBUG_TM
-+ bool "Task management debugging support"
-+ depends on SCST_DEBUG
-+ help
-+ Enables support for debugging of SCST's task management functions.
-+ When enabled, some of the commands on LUN 0 in the default access
-+ control group will be delayed for about 60 seconds. This will
-+ cause the remote initiator send SCSI task management functions,
-+ e.g. ABORT TASK and TARGET RESET.
-+
-+ If unsure, say "N".
-+
-+config SCST_TM_DBG_GO_OFFLINE
-+ bool "Let devices become completely unresponsive"
-+ depends on SCST_DEBUG_TM
-+ help
-+ Enable this option if you want that the device eventually becomes
-+ completely unresponsive. When disabled, the device will receive
-+ ABORT and RESET commands.
-+
-+config SCST_MEASURE_LATENCY
-+ bool "Commands processing latency measurement facility"
-+ depends on SCST
-+ help
-+ This option enables commands processing latency measurement
-+ facility in SCST. It will provide in the sysfs interface
-+ average commands processing latency statistics. You can clear
-+ already measured results by writing 0 in the corresponding sysfs file.
-+ Note, you need a non-preemtible kernel to have correct results.
-+
-+ If unsure, say "N".
-+
-+source "drivers/scst/iscsi-scst/Kconfig"
-+source "drivers/scst/scst_local/Kconfig"
-+source "drivers/scst/srpt/Kconfig"
-+
-+endmenu
-diff -uprN orig/linux-3.2/drivers/scst/Makefile linux-3.2/drivers/scst/Makefile
---- orig/linux-3.2/drivers/scst/Makefile
-+++ linux-3.2/drivers/scst/Makefile
-@@ -0,0 +1,13 @@
-+ccflags-y += -Wno-unused-parameter
-+
-+scst-y += scst_main.o
-+scst-y += scst_pres.o
-+scst-y += scst_targ.o
-+scst-y += scst_lib.o
-+scst-y += scst_sysfs.o
-+scst-y += scst_mem.o
-+scst-y += scst_tg.o
-+scst-y += scst_debug.o
-+
-+obj-$(CONFIG_SCST) += scst.o dev_handlers/ iscsi-scst/ qla2xxx-target/ \
-+ srpt/ scst_local/
-diff -uprN orig/linux-3.2/include/scst/scst.h linux-3.2/include/scst/scst.h
---- orig/linux-3.2/include/scst/scst.h
-+++ linux-3.2/include/scst/scst.h
-@@ -0,0 +1,3867 @@
-+/*
-+ * include/scst.h
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ * Copyright (C) 2010 - 2011 Bart Van Assche <bvanassche@acm.org>.
-+ *
-+ * Main SCSI target mid-level include file.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef __SCST_H
-+#define __SCST_H
-+
-+#include <linux/types.h>
-+#include <linux/blkdev.h>
-+#include <linux/interrupt.h>
-+#include <linux/wait.h>
-+#include <linux/cpumask.h>
-+
-+
-+#include <scsi/scsi_cmnd.h>
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_eh.h>
-+#include <scsi/scsi.h>
-+
-+#include <scst/scst_const.h>
-+
-+#include <scst/scst_sgv.h>
-+
-+#define SCST_INTERFACE_VERSION \
-+ SCST_VERSION_STRING "$Revision: 3992 $" SCST_CONST_VERSION
-+
-+#define SCST_LOCAL_NAME "scst_local"
-+
-+/*************************************************************
-+ ** States of command processing state machine. At first,
-+ ** "active" states, then - "passive" ones. This is to have
-+ ** more efficient generated code of the corresponding
-+ ** "switch" statements.
-+ *************************************************************/
-+
-+/* Dev handler's parse() is going to be called */
-+#define SCST_CMD_STATE_PARSE 0
-+
-+/* Allocation of the cmd's data buffer */
-+#define SCST_CMD_STATE_PREPARE_SPACE 1
-+
-+/* Calling preprocessing_done() */
-+#define SCST_CMD_STATE_PREPROCESSING_DONE 2
-+
-+/* Target driver's rdy_to_xfer() is going to be called */
-+#define SCST_CMD_STATE_RDY_TO_XFER 3
-+
-+/* Target driver's pre_exec() is going to be called */
-+#define SCST_CMD_STATE_TGT_PRE_EXEC 4
-+
-+/* Cmd is going to be sent for execution */
-+#define SCST_CMD_STATE_SEND_FOR_EXEC 5
-+
-+/* Internal post-exec checks */
-+#define SCST_CMD_STATE_PRE_DEV_DONE 6
-+
-+/* Internal MODE SELECT pages related checks */
-+#define SCST_CMD_STATE_MODE_SELECT_CHECKS 7
-+
-+/* Dev handler's dev_done() is going to be called */
-+#define SCST_CMD_STATE_DEV_DONE 8
-+
-+/* Checks before target driver's xmit_response() is called */
-+#define SCST_CMD_STATE_PRE_XMIT_RESP 9
-+
-+/* Target driver's xmit_response() is going to be called */
-+#define SCST_CMD_STATE_XMIT_RESP 10
-+
-+/* Cmd finished */
-+#define SCST_CMD_STATE_FINISHED 11
-+
-+/* Internal cmd finished */
-+#define SCST_CMD_STATE_FINISHED_INTERNAL 12
-+
-+#define SCST_CMD_STATE_LAST_ACTIVE (SCST_CMD_STATE_FINISHED_INTERNAL+100)
-+
-+/* A cmd is created, but scst_cmd_init_done() not called */
-+#define SCST_CMD_STATE_INIT_WAIT (SCST_CMD_STATE_LAST_ACTIVE+1)
-+
-+/* LUN translation (cmd->tgt_dev assignment) */
-+#define SCST_CMD_STATE_INIT (SCST_CMD_STATE_LAST_ACTIVE+2)
-+
-+/* Waiting for scst_restart_cmd() */
-+#define SCST_CMD_STATE_PREPROCESSING_DONE_CALLED (SCST_CMD_STATE_LAST_ACTIVE+3)
-+
-+/* Waiting for data from the initiator (until scst_rx_data() called) */
-+#define SCST_CMD_STATE_DATA_WAIT (SCST_CMD_STATE_LAST_ACTIVE+4)
-+
-+/*
-+ * Cmd is ready for exec (after check if its device is blocked or should
-+ * be blocked)
-+ */
-+#define SCST_CMD_STATE_START_EXEC (SCST_CMD_STATE_LAST_ACTIVE+5)
-+
-+/* Cmd is being checked if it should be executed locally */
-+#define SCST_CMD_STATE_LOCAL_EXEC (SCST_CMD_STATE_LAST_ACTIVE+6)
-+
-+/* Cmd is ready for execution */
-+#define SCST_CMD_STATE_REAL_EXEC (SCST_CMD_STATE_LAST_ACTIVE+7)
-+
-+/* Waiting for CDB's execution finish */
-+#define SCST_CMD_STATE_REAL_EXECUTING (SCST_CMD_STATE_LAST_ACTIVE+8)
-+
-+/* Waiting for response's transmission finish */
-+#define SCST_CMD_STATE_XMIT_WAIT (SCST_CMD_STATE_LAST_ACTIVE+9)
-+
-+/*************************************************************
-+ * Can be returned instead of cmd's state by dev handlers'
-+ * functions, if the command's state should be set by default
-+ *************************************************************/
-+#define SCST_CMD_STATE_DEFAULT 500
-+
-+/*************************************************************
-+ * Can be returned instead of cmd's state by dev handlers'
-+ * functions, if it is impossible to complete requested
-+ * task in atomic context. The cmd will be restarted in thread
-+ * context.
-+ *************************************************************/
-+#define SCST_CMD_STATE_NEED_THREAD_CTX 1000
-+
-+/*************************************************************
-+ * Can be returned instead of cmd's state by dev handlers'
-+ * parse function, if the cmd processing should be stopped
-+ * for now. The cmd will be restarted by dev handlers itself.
-+ *************************************************************/
-+#define SCST_CMD_STATE_STOP 1001
-+
-+/*************************************************************
-+ ** States of mgmt command processing state machine
-+ *************************************************************/
-+
-+/* LUN translation (mcmd->tgt_dev assignment) */
-+#define SCST_MCMD_STATE_INIT 0
-+
-+/* Mgmt cmd is being processed */
-+#define SCST_MCMD_STATE_EXEC 1
-+
-+/* Waiting for affected commands done */
-+#define SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_DONE 2
-+
-+/* Post actions when affected commands done */
-+#define SCST_MCMD_STATE_AFFECTED_CMDS_DONE 3
-+
-+/* Waiting for affected local commands finished */
-+#define SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_FINISHED 4
-+
-+/* Target driver's task_mgmt_fn_done() is going to be called */
-+#define SCST_MCMD_STATE_DONE 5
-+
-+/* The mcmd finished */
-+#define SCST_MCMD_STATE_FINISHED 6
-+
-+/*************************************************************
-+ ** Constants for "atomic" parameter of SCST's functions
-+ *************************************************************/
-+#define SCST_NON_ATOMIC 0
-+#define SCST_ATOMIC 1
-+
-+/*************************************************************
-+ ** Values for pref_context parameter of scst_cmd_init_done(),
-+ ** scst_rx_data(), scst_restart_cmd(), scst_tgt_cmd_done()
-+ ** and scst_cmd_done()
-+ *************************************************************/
-+
-+enum scst_exec_context {
-+ /*
-+ * Direct cmd's processing (i.e. regular function calls in the current
-+ * context) sleeping is not allowed
-+ */
-+ SCST_CONTEXT_DIRECT_ATOMIC,
-+
-+ /*
-+ * Direct cmd's processing (i.e. regular function calls in the current
-+ * context), sleeping is allowed, no restrictions
-+ */
-+ SCST_CONTEXT_DIRECT,
-+
-+ /* Tasklet or thread context required for cmd's processing */
-+ SCST_CONTEXT_TASKLET,
-+
-+ /* Thread context required for cmd's processing */
-+ SCST_CONTEXT_THREAD,
-+
-+ /*
-+ * Context is the same as it was in previous call of the corresponding
-+ * callback. For example, if dev handler's exec() does sync. data
-+ * reading this value should be used for scst_cmd_done(). The same is
-+ * true if scst_tgt_cmd_done() called directly from target driver's
-+ * xmit_response(). Not allowed in scst_cmd_init_done() and
-+ * scst_cmd_init_stage1_done().
-+ */
-+ SCST_CONTEXT_SAME
-+};
-+
-+/*************************************************************
-+ ** Values for status parameter of scst_rx_data()
-+ *************************************************************/
-+
-+/* Success */
-+#define SCST_RX_STATUS_SUCCESS 0
-+
-+/*
-+ * Data receiving finished with error, so set the sense and
-+ * finish the command, including xmit_response() call
-+ */
-+#define SCST_RX_STATUS_ERROR 1
-+
-+/*
-+ * Data receiving finished with error and the sense is set,
-+ * so finish the command, including xmit_response() call
-+ */
-+#define SCST_RX_STATUS_ERROR_SENSE_SET 2
-+
-+/*
-+ * Data receiving finished with fatal error, so finish the command,
-+ * but don't call xmit_response()
-+ */
-+#define SCST_RX_STATUS_ERROR_FATAL 3
-+
-+/*************************************************************
-+ ** Values for status parameter of scst_restart_cmd()
-+ *************************************************************/
-+
-+/* Success */
-+#define SCST_PREPROCESS_STATUS_SUCCESS 0
-+
-+/*
-+ * Command's processing finished with error, so set the sense and
-+ * finish the command, including xmit_response() call
-+ */
-+#define SCST_PREPROCESS_STATUS_ERROR 1
-+
-+/*
-+ * Command's processing finished with error and the sense is set,
-+ * so finish the command, including xmit_response() call
-+ */
-+#define SCST_PREPROCESS_STATUS_ERROR_SENSE_SET 2
-+
-+/*
-+ * Command's processing finished with fatal error, so finish the command,
-+ * but don't call xmit_response()
-+ */
-+#define SCST_PREPROCESS_STATUS_ERROR_FATAL 3
-+
-+/*************************************************************
-+ ** Values for AEN functions
-+ *************************************************************/
-+
-+/*
-+ * SCSI Asynchronous Event. Parameter contains SCSI sense
-+ * (Unit Attention). AENs generated only for 2 the following UAs:
-+ * CAPACITY DATA HAS CHANGED and REPORTED LUNS DATA HAS CHANGED.
-+ * Other UAs reported regularly as CHECK CONDITION status,
-+ * because it doesn't look safe to report them using AENs, since
-+ * reporting using AENs opens delivery race windows even in case of
-+ * untagged commands.
-+ */
-+#define SCST_AEN_SCSI 0
-+
-+/*
-+ * Notifies that CPU affinity mask on the corresponding session changed
-+ */
-+#define SCST_AEN_CPU_MASK_CHANGED 1
-+
-+/*************************************************************
-+ ** Allowed return/status codes for report_aen() callback and
-+ ** scst_set_aen_delivery_status() function
-+ *************************************************************/
-+
-+/* Success */
-+#define SCST_AEN_RES_SUCCESS 0
-+
-+/* Not supported */
-+#define SCST_AEN_RES_NOT_SUPPORTED -1
-+
-+/* Failure */
-+#define SCST_AEN_RES_FAILED -2
-+
-+/*************************************************************
-+ ** Allowed return codes for xmit_response(), rdy_to_xfer()
-+ *************************************************************/
-+
-+/* Success */
-+#define SCST_TGT_RES_SUCCESS 0
-+
-+/* Internal device queue is full, retry again later */
-+#define SCST_TGT_RES_QUEUE_FULL -1
-+
-+/*
-+ * It is impossible to complete requested task in atomic context.
-+ * The cmd will be restarted in thread context.
-+ */
-+#define SCST_TGT_RES_NEED_THREAD_CTX -2
-+
-+/*
-+ * Fatal error, if returned by xmit_response() the cmd will
-+ * be destroyed, if by any other function, xmit_response()
-+ * will be called with HARDWARE ERROR sense data
-+ */
-+#define SCST_TGT_RES_FATAL_ERROR -3
-+
-+/*************************************************************
-+ ** Return codes for dev handler's exec()
-+ *************************************************************/
-+
-+/* The cmd is done, go to other ones */
-+#define SCST_EXEC_COMPLETED 0
-+
-+/* The cmd should be sent to SCSI mid-level */
-+#define SCST_EXEC_NOT_COMPLETED 1
-+
-+/*************************************************************
-+ ** Additional return code for dev handler's task_mgmt_fn()
-+ *************************************************************/
-+
-+/* Regular standard actions for the command should be done */
-+#define SCST_DEV_TM_NOT_COMPLETED 1
-+
-+/*************************************************************
-+ ** Session initialization phases
-+ *************************************************************/
-+
-+/* Set if session is being initialized */
-+#define SCST_SESS_IPH_INITING 0
-+
-+/* Set if the session is successfully initialized */
-+#define SCST_SESS_IPH_SUCCESS 1
-+
-+/* Set if the session initialization failed */
-+#define SCST_SESS_IPH_FAILED 2
-+
-+/* Set if session is initialized and ready */
-+#define SCST_SESS_IPH_READY 3
-+
-+/*************************************************************
-+ ** Session shutdown phases
-+ *************************************************************/
-+
-+/* Set if session is initialized and ready */
-+#define SCST_SESS_SPH_READY 0
-+
-+/* Set if session is shutting down */
-+#define SCST_SESS_SPH_SHUTDOWN 1
-+
-+/* Set if session is shutting down */
-+#define SCST_SESS_SPH_UNREG_DONE_CALLING 2
-+
-+/*************************************************************
-+ ** Session's async (atomic) flags
-+ *************************************************************/
-+
-+/* Set if the sess's hw pending work is scheduled */
-+#define SCST_SESS_HW_PENDING_WORK_SCHEDULED 0
-+
-+/*************************************************************
-+ ** Cmd's async (atomic) flags
-+ *************************************************************/
-+
-+/* Set if the cmd is aborted and ABORTED sense will be sent as the result */
-+#define SCST_CMD_ABORTED 0
-+
-+/* Set if the cmd is aborted by other initiator */
-+#define SCST_CMD_ABORTED_OTHER 1
-+
-+/* Set if no response should be sent to the target about this cmd */
-+#define SCST_CMD_NO_RESP 2
-+
-+/* Set if the cmd is dead and can be destroyed at any time */
-+#define SCST_CMD_CAN_BE_DESTROYED 3
-+
-+/*
-+ * Set if the cmd's device has TAS flag set. Used only when aborted by
-+ * other initiator.
-+ */
-+#define SCST_CMD_DEVICE_TAS 4
-+
-+/*************************************************************
-+ ** Tgt_dev's async. flags (tgt_dev_flags)
-+ *************************************************************/
-+
-+/* Set if tgt_dev has Unit Attention sense */
-+#define SCST_TGT_DEV_UA_PENDING 0
-+
-+/* Set if tgt_dev is RESERVED by another session */
-+#define SCST_TGT_DEV_RESERVED 1
-+
-+/* Set if the corresponding context should be atomic */
-+#define SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC 5
-+#define SCST_TGT_DEV_AFTER_EXEC_ATOMIC 6
-+
-+#define SCST_TGT_DEV_CLUST_POOL 11
-+
-+/*************************************************************
-+ ** I/O grouping types. Changing them don't forget to change
-+ ** the corresponding *_STR values in scst_const.h!
-+ *************************************************************/
-+
-+/*
-+ * All initiators with the same name connected to this group will have
-+ * shared IO context, for each name own context. All initiators with
-+ * different names will have own IO context.
-+ */
-+#define SCST_IO_GROUPING_AUTO 0
-+
-+/* All initiators connected to this group will have shared IO context */
-+#define SCST_IO_GROUPING_THIS_GROUP_ONLY -1
-+
-+/* Each initiator connected to this group will have own IO context */
-+#define SCST_IO_GROUPING_NEVER -2
-+
-+/*************************************************************
-+ ** Kernel cache creation helper
-+ *************************************************************/
-+
-+/*************************************************************
-+ ** Valid_mask constants for scst_analyze_sense()
-+ *************************************************************/
-+
-+#define SCST_SENSE_KEY_VALID 1
-+#define SCST_SENSE_ASC_VALID 2
-+#define SCST_SENSE_ASCQ_VALID 4
-+
-+#define SCST_SENSE_ASCx_VALID (SCST_SENSE_ASC_VALID | \
-+ SCST_SENSE_ASCQ_VALID)
-+
-+#define SCST_SENSE_ALL_VALID (SCST_SENSE_KEY_VALID | \
-+ SCST_SENSE_ASC_VALID | \
-+ SCST_SENSE_ASCQ_VALID)
-+
-+/*************************************************************
-+ * TYPES
-+ *************************************************************/
-+
-+struct scst_tgt;
-+struct scst_session;
-+struct scst_cmd;
-+struct scst_mgmt_cmd;
-+struct scst_device;
-+struct scst_tgt_dev;
-+struct scst_dev_type;
-+struct scst_acg;
-+struct scst_acg_dev;
-+struct scst_acn;
-+struct scst_aen;
-+
-+/*
-+ * SCST uses 64-bit numbers to represent LUN's internally. The value
-+ * NO_SUCH_LUN is guaranteed to be different of every valid LUN.
-+ */
-+#define NO_SUCH_LUN ((uint64_t)-1)
-+
-+typedef enum dma_data_direction scst_data_direction;
-+
-+/*
-+ * SCST target template: defines target driver's parameters and callback
-+ * functions.
-+ *
-+ * MUST HAVEs define functions that are expected to be defined in order to
-+ * work. OPTIONAL says that there is a choice.
-+ */
-+struct scst_tgt_template {
-+ /* public: */
-+
-+ /*
-+ * SG tablesize allows to check whether scatter/gather can be used
-+ * or not.
-+ */
-+ int sg_tablesize;
-+
-+ /*
-+ * True, if this target adapter uses unchecked DMA onto an ISA bus.
-+ */
-+ unsigned unchecked_isa_dma:1;
-+
-+ /*
-+ * True, if this target adapter can benefit from using SG-vector
-+ * clustering (i.e. smaller number of segments).
-+ */
-+ unsigned use_clustering:1;
-+
-+ /*
-+ * True, if this target adapter doesn't support SG-vector clustering
-+ */
-+ unsigned no_clustering:1;
-+
-+ /*
-+ * True, if corresponding function supports execution in
-+ * the atomic (non-sleeping) context
-+ */
-+ unsigned xmit_response_atomic:1;
-+ unsigned rdy_to_xfer_atomic:1;
-+
-+ /* True, if this target doesn't need "enabled" attribute */
-+ unsigned enabled_attr_not_needed:1;
-+
-+ /*
-+ * True if SCST should report that it supports ACA although it does
-+ * not yet support ACA. Necessary for the IBM virtual SCSI target
-+ * driver.
-+ */
-+ unsigned fake_aca:1;
-+
-+ /*
-+ * Preferred SCSI LUN addressing method.
-+ */
-+ enum scst_lun_addr_method preferred_addr_method;
-+
-+ /*
-+ * The maximum time in seconds cmd can stay inside the target
-+ * hardware, i.e. after rdy_to_xfer() and xmit_response(), before
-+ * on_hw_pending_cmd_timeout() will be called, if defined.
-+ *
-+ * In the current implementation a cmd will be aborted in time t
-+ * max_hw_pending_time <= t < 2*max_hw_pending_time.
-+ */
-+ int max_hw_pending_time;
-+
-+ /*
-+ * This function is equivalent to the SCSI
-+ * queuecommand. The target should transmit the response
-+ * buffer and the status in the scst_cmd struct.
-+ * The expectation is that this executing this command is NON-BLOCKING.
-+ * If it is blocking, consider to set threads_num to some none 0 number.
-+ *
-+ * After the response is actually transmitted, the target
-+ * should call the scst_tgt_cmd_done() function of the
-+ * mid-level, which will allow it to free up the command.
-+ * Returns one of the SCST_TGT_RES_* constants.
-+ *
-+ * Pay attention to "atomic" attribute of the cmd, which can be get
-+ * by scst_cmd_atomic(): it is true if the function called in the
-+ * atomic (non-sleeping) context.
-+ *
-+ * MUST HAVE
-+ */
-+ int (*xmit_response) (struct scst_cmd *cmd);
-+
-+ /*
-+ * This function informs the driver that data
-+ * buffer corresponding to the said command have now been
-+ * allocated and it is OK to receive data for this command.
-+ * This function is necessary because a SCSI target does not
-+ * have any control over the commands it receives. Most lower
-+ * level protocols have a corresponding function which informs
-+ * the initiator that buffers have been allocated e.g., XFER_
-+ * RDY in Fibre Channel. After the data is actually received
-+ * the low-level driver needs to call scst_rx_data() in order to
-+ * continue processing this command.
-+ * Returns one of the SCST_TGT_RES_* constants.
-+ *
-+ * This command is expected to be NON-BLOCKING.
-+ * If it is blocking, consider to set threads_num to some none 0 number.
-+ *
-+ * Pay attention to "atomic" attribute of the cmd, which can be get
-+ * by scst_cmd_atomic(): it is true if the function called in the
-+ * atomic (non-sleeping) context.
-+ *
-+ * OPTIONAL
-+ */
-+ int (*rdy_to_xfer) (struct scst_cmd *cmd);
-+
-+ /*
-+ * Called if cmd stays inside the target hardware, i.e. after
-+ * rdy_to_xfer() and xmit_response(), more than max_hw_pending_time
-+ * time. The target driver supposed to cleanup this command and
-+ * resume cmd's processing.
-+ *
-+ * OPTIONAL
-+ */
-+ void (*on_hw_pending_cmd_timeout) (struct scst_cmd *cmd);
-+
-+ /*
-+ * Called to notify the driver that the command is about to be freed.
-+ * Necessary, because for aborted commands xmit_response() could not
-+ * be called. Could be called on IRQ context.
-+ *
-+ * OPTIONAL
-+ */
-+ void (*on_free_cmd) (struct scst_cmd *cmd);
-+
-+ /*
-+ * This function allows target driver to handle data buffer
-+ * allocations on its own.
-+ *
-+ * Target driver doesn't have to always allocate buffer in this
-+ * function, but if it decide to do it, it must check that
-+ * scst_cmd_get_data_buff_alloced() returns 0, otherwise to avoid
-+ * double buffer allocation and memory leaks alloc_data_buf() shall
-+ * fail.
-+ *
-+ * Shall return 0 in case of success or < 0 (preferably -ENOMEM)
-+ * in case of error, or > 0 if the regular SCST allocation should be
-+ * done. In case of returning successfully,
-+ * scst_cmd->tgt_data_buf_alloced will be set by SCST.
-+ *
-+ * It is possible that both target driver and dev handler request own
-+ * memory allocation. In this case, data will be memcpy() between
-+ * buffers, where necessary.
-+ *
-+ * If allocation in atomic context - cf. scst_cmd_atomic() - is not
-+ * desired or fails and consequently < 0 is returned, this function
-+ * will be re-called in thread context.
-+ *
-+ * Please note that the driver will have to handle itself all relevant
-+ * details such as scatterlist setup, highmem, freeing the allocated
-+ * memory, etc.
-+ *
-+ * OPTIONAL.
-+ */
-+ int (*alloc_data_buf) (struct scst_cmd *cmd);
-+
-+ /*
-+ * This function informs the driver that data
-+ * buffer corresponding to the said command have now been
-+ * allocated and other preprocessing tasks have been done.
-+ * A target driver could need to do some actions at this stage.
-+ * After the target driver done the needed actions, it shall call
-+ * scst_restart_cmd() in order to continue processing this command.
-+ * In case of preliminary the command completion, this function will
-+ * also be called before xmit_response().
-+ *
-+ * Called only if the cmd is queued using scst_cmd_init_stage1_done()
-+ * instead of scst_cmd_init_done().
-+ *
-+ * Returns void, the result is expected to be returned using
-+ * scst_restart_cmd().
-+ *
-+ * This command is expected to be NON-BLOCKING.
-+ * If it is blocking, consider to set threads_num to some none 0 number.
-+ *
-+ * Pay attention to "atomic" attribute of the cmd, which can be get
-+ * by scst_cmd_atomic(): it is true if the function called in the
-+ * atomic (non-sleeping) context.
-+ *
-+ * OPTIONAL.
-+ */
-+ void (*preprocessing_done) (struct scst_cmd *cmd);
-+
-+ /*
-+ * This function informs the driver that the said command is about
-+ * to be executed.
-+ *
-+ * Returns one of the SCST_PREPROCESS_* constants.
-+ *
-+ * This command is expected to be NON-BLOCKING.
-+ * If it is blocking, consider to set threads_num to some none 0 number.
-+ *
-+ * OPTIONAL
-+ */
-+ int (*pre_exec) (struct scst_cmd *cmd);
-+
-+ /*
-+ * This function informs the driver that all affected by the
-+ * corresponding task management function commands have beed completed.
-+ * No return value expected.
-+ *
-+ * This function is expected to be NON-BLOCKING.
-+ *
-+ * Called without any locks held from a thread context.
-+ *
-+ * OPTIONAL
-+ */
-+ void (*task_mgmt_affected_cmds_done) (struct scst_mgmt_cmd *mgmt_cmd);
-+
-+ /*
-+ * This function informs the driver that the corresponding task
-+ * management function has been completed, i.e. all the corresponding
-+ * commands completed and freed. No return value expected.
-+ *
-+ * This function is expected to be NON-BLOCKING.
-+ *
-+ * Called without any locks held from a thread context.
-+ *
-+ * MUST HAVE if the target supports task management.
-+ */
-+ void (*task_mgmt_fn_done) (struct scst_mgmt_cmd *mgmt_cmd);
-+
-+ /*
-+ * Called to notify target driver that the command is being aborted.
-+ * If target driver wants to redirect processing to some outside
-+ * processing, it should get it using scst_cmd_get().
-+ *
-+ * OPTIONAL
-+ */
-+ void (*on_abort_cmd) (struct scst_cmd *cmd);
-+
-+ /*
-+ * This function should detect the target adapters that
-+ * are present in the system. The function should return a value
-+ * >= 0 to signify the number of detected target adapters.
-+ * A negative value should be returned whenever there is
-+ * an error.
-+ *
-+ * MUST HAVE
-+ */
-+ int (*detect) (struct scst_tgt_template *tgt_template);
-+
-+ /*
-+ * This function should free up the resources allocated to the device.
-+ * The function should return 0 to indicate successful release
-+ * or a negative value if there are some issues with the release.
-+ * In the current version the return value is ignored.
-+ *
-+ * MUST HAVE
-+ */
-+ int (*release) (struct scst_tgt *tgt);
-+
-+ /*
-+ * This function is used for Asynchronous Event Notifications.
-+ *
-+ * Returns one of the SCST_AEN_RES_* constants.
-+ * After AEN is sent, target driver must call scst_aen_done() and,
-+ * optionally, scst_set_aen_delivery_status().
-+ *
-+ * This function is expected to be NON-BLOCKING, but can sleep.
-+ *
-+ * This function must be prepared to handle AENs between calls for the
-+ * corresponding session of scst_unregister_session() and
-+ * unreg_done_fn() callback called or before scst_unregister_session()
-+ * returned, if its called in the blocking mode. AENs for such sessions
-+ * should be ignored.
-+ *
-+ * MUST HAVE, if low-level protocol supports AENs.
-+ */
-+ int (*report_aen) (struct scst_aen *aen);
-+
-+ /*
-+ * This function returns in tr_id the corresponding to sess initiator
-+ * port TransportID in the form as it's used by PR commands, see
-+ * "Transport Identifiers" in SPC. Space for the initiator port
-+ * TransportID must be allocated via kmalloc(). Caller supposed to
-+ * kfree() it, when it isn't needed anymore.
-+ *
-+ * If sess is NULL, this function must return TransportID PROTOCOL
-+ * IDENTIFIER for the requested target.
-+ *
-+ * Returns 0 on success or negative error code otherwise.
-+ *
-+ * SHOULD HAVE, because it's required for Persistent Reservations.
-+ */
-+ int (*get_initiator_port_transport_id) (struct scst_tgt *tgt,
-+ struct scst_session *sess, uint8_t **transport_id);
-+
-+ /*
-+ * This function allows to enable or disable particular target.
-+ * A disabled target doesn't receive and process any SCSI commands.
-+ *
-+ * SHOULD HAVE to avoid race when there are connected initiators,
-+ * while target not yet completed the initial configuration. In this
-+ * case the too early connected initiators would see not those devices,
-+ * which they intended to see.
-+ *
-+ * If you are sure your target driver doesn't need enabling target,
-+ * you should set enabled_attr_not_needed in 1.
-+ */
-+ int (*enable_target) (struct scst_tgt *tgt, bool enable);
-+
-+ /*
-+ * This function shows if particular target is enabled or not.
-+ *
-+ * SHOULD HAVE, see above why.
-+ */
-+ bool (*is_target_enabled) (struct scst_tgt *tgt);
-+
-+ /*
-+ * This function adds a virtual target.
-+ *
-+ * If both add_target and del_target callbacks defined, then this
-+ * target driver supposed to support virtual targets. In this case
-+ * an "mgmt" entry will be created in the sysfs root for this driver.
-+ * The "mgmt" entry will support 2 commands: "add_target" and
-+ * "del_target", for which the corresponding callbacks will be called.
-+ * Also target driver can define own commands for the "mgmt" entry, see
-+ * mgmt_cmd and mgmt_cmd_help below.
-+ *
-+ * This approach allows uniform targets management to simplify external
-+ * management tools like scstadmin. See README for more details.
-+ *
-+ * Either both add_target and del_target must be defined, or none.
-+ *
-+ * MUST HAVE if virtual targets are supported.
-+ */
-+ ssize_t (*add_target) (const char *target_name, char *params);
-+
-+ /*
-+ * This function deletes a virtual target. See comment for add_target
-+ * above.
-+ *
-+ * MUST HAVE if virtual targets are supported.
-+ */
-+ ssize_t (*del_target) (const char *target_name);
-+
-+ /*
-+ * This function called if not "add_target" or "del_target" command is
-+ * sent to the mgmt entry (see comment for add_target above). In this
-+ * case the command passed to this function as is in a string form.
-+ *
-+ * OPTIONAL.
-+ */
-+ ssize_t (*mgmt_cmd) (char *cmd);
-+
-+ /*
-+ * Should return physical transport version. Used in the corresponding
-+ * INQUIRY version descriptor. See SPC for the list of available codes.
-+ *
-+ * OPTIONAL
-+ */
-+ uint16_t (*get_phys_transport_version) (struct scst_tgt *tgt);
-+
-+ /*
-+ * Should return SCSI transport version. Used in the corresponding
-+ * INQUIRY version descriptor. See SPC for the list of available codes.
-+ *
-+ * OPTIONAL
-+ */
-+ uint16_t (*get_scsi_transport_version) (struct scst_tgt *tgt);
-+
-+ /*
-+ * Name of the template. Must be unique to identify
-+ * the template. MUST HAVE
-+ */
-+ const char name[SCST_MAX_NAME];
-+
-+ /*
-+ * Number of additional threads to the pool of dedicated threads.
-+ * Used if xmit_response() or rdy_to_xfer() is blocking.
-+ * It is the target driver's duty to ensure that not more, than that
-+ * number of threads, are blocked in those functions at any time.
-+ */
-+ int threads_num;
-+
-+ /* Optional default log flags */
-+ const unsigned long default_trace_flags;
-+
-+ /* Optional pointer to trace flags */
-+ unsigned long *trace_flags;
-+
-+ /* Optional local trace table */
-+ struct scst_trace_log *trace_tbl;
-+
-+ /* Optional local trace table help string */
-+ const char *trace_tbl_help;
-+
-+ /* sysfs attributes, if any */
-+ const struct attribute **tgtt_attrs;
-+
-+ /* sysfs target attributes, if any */
-+ const struct attribute **tgt_attrs;
-+
-+ /* sysfs session attributes, if any */
-+ const struct attribute **sess_attrs;
-+
-+ /* Optional help string for mgmt_cmd commands */
-+ const char *mgmt_cmd_help;
-+
-+ /* List of parameters for add_target command, if any */
-+ const char *add_target_parameters;
-+
-+ /*
-+ * List of optional, i.e. which could be added by add_attribute command
-+ * and deleted by del_attribute command, sysfs attributes, if any.
-+ * Helpful for scstadmin to work correctly.
-+ */
-+ const char *tgtt_optional_attributes;
-+
-+ /*
-+ * List of optional, i.e. which could be added by add_target_attribute
-+ * command and deleted by del_target_attribute command, sysfs
-+ * attributes, if any. Helpful for scstadmin to work correctly.
-+ */
-+ const char *tgt_optional_attributes;
-+
-+ /** Private, must be inited to 0 by memset() **/
-+
-+ /* List of targets per template, protected by scst_mutex */
-+ struct list_head tgt_list;
-+
-+ /* List entry of global templates list */
-+ struct list_head scst_template_list_entry;
-+
-+ struct kobject tgtt_kobj; /* kobject for this struct */
-+
-+ /* Number of currently active sysfs mgmt works (scst_sysfs_work_item) */
-+ int tgtt_active_sysfs_works_count;
-+
-+ /* sysfs release completion */
-+ struct completion *tgtt_kobj_release_cmpl;
-+
-+ /*
-+ * Optional vendor to be reported via the SCSI inquiry data. If NULL,
-+ * an SCST device handler specific default value will be used, e.g.
-+ * "SCST_FIO" for scst_vdisk file I/O.
-+ */
-+ const char *vendor;
-+
-+ /*
-+ * Optional method that sets the product ID in [buf, buf+size) based
-+ * on the device type (byte 0 of the SCSI inquiry data, which contains
-+ * the peripheral qualifier in the highest three bits and the
-+ * peripheral device type in the lower five bits).
-+ */
-+ void (*get_product_id)(const struct scst_tgt_dev *tgt_dev,
-+ char *buf, int size);
-+
-+ /*
-+ * Optional revision to be reported in the SCSI inquiry response. If
-+ * NULL, an SCST device handler specific default value will be used,
-+ * e.g. " 220" for scst_vdisk file I/O.
-+ */
-+ const char *revision;
-+
-+ /*
-+ * Optional method that writes the serial number of a target device in
-+ * [buf, buf+size) and returns the number of bytes written.
-+ *
-+ * Note: SCST can be configured such that a device can be accessed
-+ * from several different transports at the same time. It is important
-+ * that all clients see the same USN for proper operation. Overriding
-+ * the serial number can lead to subtle misbehavior. Particularly,
-+ * "usn" sysfs attribute of the corresponding devices will still show
-+ * the devices generated or assigned serial numbers.
-+ */
-+ int (*get_serial)(const struct scst_tgt_dev *tgt_dev, char *buf,
-+ int size);
-+
-+ /*
-+ * Optional method that writes the SCSI inquiry vendor-specific data in
-+ * [buf, buf+size) and returns the number of bytes written.
-+ */
-+ int (*get_vend_specific)(const struct scst_tgt_dev *tgt_dev, char *buf,
-+ int size);
-+};
-+
-+/*
-+ * Threads pool types. Changing them don't forget to change
-+ * the corresponding *_STR values in scst_const.h!
-+ */
-+enum scst_dev_type_threads_pool_type {
-+ /* Each initiator will have dedicated threads pool. */
-+ SCST_THREADS_POOL_PER_INITIATOR = 0,
-+
-+ /* All connected initiators will use shared threads pool */
-+ SCST_THREADS_POOL_SHARED,
-+
-+ /* Invalid value for scst_parse_threads_pool_type() */
-+ SCST_THREADS_POOL_TYPE_INVALID,
-+};
-+
-+/*
-+ * SCST dev handler template: defines dev handler's parameters and callback
-+ * functions.
-+ *
-+ * MUST HAVEs define functions that are expected to be defined in order to
-+ * work. OPTIONAL says that there is a choice.
-+ */
-+struct scst_dev_type {
-+ /* SCSI type of the supported device. MUST HAVE */
-+ int type;
-+
-+ /*
-+ * True, if corresponding function supports execution in
-+ * the atomic (non-sleeping) context
-+ */
-+ unsigned parse_atomic:1;
-+ unsigned alloc_data_buf_atomic:1;
-+ unsigned dev_done_atomic:1;
-+
-+ /*
-+ * Should be true, if exec() is synchronous. This is a hint to SCST core
-+ * to optimize commands order management.
-+ */
-+ unsigned exec_sync:1;
-+
-+ /*
-+ * Should be set if the device wants to receive notification of
-+ * Persistent Reservation commands (PR OUT only)
-+ * Note: The notification will not be send if the command failed
-+ */
-+ unsigned pr_cmds_notifications:1;
-+
-+ /*
-+ * Called to parse CDB from the cmd and initialize
-+ * cmd->bufflen and cmd->data_direction (both - REQUIRED).
-+ *
-+ * Returns the command's next state or SCST_CMD_STATE_DEFAULT,
-+ * if the next default state should be used, or
-+ * SCST_CMD_STATE_NEED_THREAD_CTX if the function called in atomic
-+ * context, but requires sleeping, or SCST_CMD_STATE_STOP if the
-+ * command should not be further processed for now. In the
-+ * SCST_CMD_STATE_NEED_THREAD_CTX case the function
-+ * will be recalled in the thread context, where sleeping is allowed.
-+ *
-+ * Pay attention to "atomic" attribute of the cmd, which can be get
-+ * by scst_cmd_atomic(): it is true if the function called in the
-+ * atomic (non-sleeping) context.
-+ *
-+ * MUST HAVE
-+ */
-+ int (*parse) (struct scst_cmd *cmd);
-+
-+ /*
-+ * This function allows dev handler to handle data buffer
-+ * allocations on its own.
-+ *
-+ * Returns the command's next state or SCST_CMD_STATE_DEFAULT,
-+ * if the next default state should be used, or
-+ * SCST_CMD_STATE_NEED_THREAD_CTX if the function called in atomic
-+ * context, but requires sleeping, or SCST_CMD_STATE_STOP if the
-+ * command should not be further processed for now. In the
-+ * SCST_CMD_STATE_NEED_THREAD_CTX case the function
-+ * will be recalled in the thread context, where sleeping is allowed.
-+ *
-+ * Pay attention to "atomic" attribute of the cmd, which can be get
-+ * by scst_cmd_atomic(): it is true if the function called in the
-+ * atomic (non-sleeping) context.
-+ *
-+ * OPTIONAL
-+ */
-+ int (*alloc_data_buf) (struct scst_cmd *cmd);
-+
-+ /*
-+ * Called to execute CDB. Useful, for instance, to implement
-+ * data caching. The result of CDB execution is reported via
-+ * cmd->scst_cmd_done() callback.
-+ * Returns:
-+ * - SCST_EXEC_COMPLETED - the cmd is done, go to other ones
-+ * - SCST_EXEC_NOT_COMPLETED - the cmd should be sent to SCSI
-+ * mid-level.
-+ *
-+ * If this function provides sync execution, you should set
-+ * exec_sync flag and consider to setup dedicated threads by
-+ * setting threads_num > 0.
-+ *
-+ * !! If this function is implemented, scst_check_local_events() !!
-+ * !! shall be called inside it just before the actual command's !!
-+ * !! execution. !!
-+ *
-+ * OPTIONAL, if not set, the commands will be sent directly to SCSI
-+ * device.
-+ */
-+ int (*exec) (struct scst_cmd *cmd);
-+
-+ /*
-+ * Called to notify dev handler about the result of cmd execution
-+ * and perform some post processing. Cmd's fields is_send_status and
-+ * resp_data_len should be set by this function, but SCST offers good
-+ * defaults.
-+ * Returns the command's next state or SCST_CMD_STATE_DEFAULT,
-+ * if the next default state should be used, or
-+ * SCST_CMD_STATE_NEED_THREAD_CTX if the function called in atomic
-+ * context, but requires sleeping. In the last case, the function
-+ * will be recalled in the thread context, where sleeping is allowed.
-+ *
-+ * Pay attention to "atomic" attribute of the cmd, which can be get
-+ * by scst_cmd_atomic(): it is true if the function called in the
-+ * atomic (non-sleeping) context.
-+ *
-+ * OPTIONAL
-+ */
-+ int (*dev_done) (struct scst_cmd *cmd);
-+
-+ /*
-+ * Called to notify dev hander that the command is about to be freed.
-+ *
-+ * Could be called on IRQ context.
-+ *
-+ * OPTIONAL
-+ */
-+ void (*on_free_cmd) (struct scst_cmd *cmd);
-+
-+ /*
-+ * Called to execute a task management command.
-+ * Returns:
-+ * - SCST_MGMT_STATUS_SUCCESS - the command is done with success,
-+ * no further actions required
-+ * - The SCST_MGMT_STATUS_* error code if the command is failed and
-+ * no further actions required
-+ * - SCST_DEV_TM_NOT_COMPLETED - regular standard actions for the
-+ * command should be done
-+ *
-+ * Can be called under many internal SCST locks, including under
-+ * disabled IRQs, so dev handler should be careful with locking and,
-+ * if necessary, pass processing somewhere outside (in a work, e.g.)
-+ *
-+ * But at the moment it's called under disabled IRQs only for
-+ * SCST_ABORT_TASK, however dev handler using it should add a BUG_ON
-+ * trap to catch if it's changed in future.
-+ *
-+ * OPTIONAL
-+ */
-+ int (*task_mgmt_fn) (struct scst_mgmt_cmd *mgmt_cmd,
-+ struct scst_tgt_dev *tgt_dev);
-+
-+ /*
-+ * Called to notify dev handler that its sg_tablesize is too low to
-+ * satisfy this command's data transfer requirements. Should return
-+ * true if exec() callback will split this command's CDB on smaller
-+ * transfers, false otherwise.
-+ *
-+ * Could be called on SIRQ context.
-+ *
-+ * MUST HAVE, if dev handler supports CDB splitting.
-+ */
-+ bool (*on_sg_tablesize_low) (struct scst_cmd *cmd);
-+
-+ /*
-+ * Called when new device is attaching to the dev handler
-+ * Returns 0 on success, error code otherwise.
-+ *
-+ * OPTIONAL
-+ */
-+ int (*attach) (struct scst_device *dev);
-+
-+ /*
-+ * Called when a device is detaching from the dev handler.
-+ *
-+ * OPTIONAL
-+ */
-+ void (*detach) (struct scst_device *dev);
-+
-+ /*
-+ * Called when new tgt_dev (session) is attaching to the dev handler.
-+ * Returns 0 on success, error code otherwise.
-+ *
-+ * OPTIONAL
-+ */
-+ int (*attach_tgt) (struct scst_tgt_dev *tgt_dev);
-+
-+ /*
-+ * Called when tgt_dev (session) is detaching from the dev handler.
-+ *
-+ * OPTIONAL
-+ */
-+ void (*detach_tgt) (struct scst_tgt_dev *tgt_dev);
-+
-+ /*
-+ * This function adds a virtual device.
-+ *
-+ * If both add_device and del_device callbacks defined, then this
-+ * dev handler supposed to support adding/deleting virtual devices.
-+ * In this case an "mgmt" entry will be created in the sysfs root for
-+ * this handler. The "mgmt" entry will support 2 commands: "add_device"
-+ * and "del_device", for which the corresponding callbacks will be called.
-+ * Also dev handler can define own commands for the "mgmt" entry, see
-+ * mgmt_cmd and mgmt_cmd_help below.
-+ *
-+ * This approach allows uniform devices management to simplify external
-+ * management tools like scstadmin. See README for more details.
-+ *
-+ * Either both add_device and del_device must be defined, or none.
-+ *
-+ * MUST HAVE if virtual devices are supported.
-+ */
-+ ssize_t (*add_device) (const char *device_name, char *params);
-+
-+ /*
-+ * This function deletes a virtual device. See comment for add_device
-+ * above.
-+ *
-+ * MUST HAVE if virtual devices are supported.
-+ */
-+ ssize_t (*del_device) (const char *device_name);
-+
-+ /*
-+ * This function called if not "add_device" or "del_device" command is
-+ * sent to the mgmt entry (see comment for add_device above). In this
-+ * case the command passed to this function as is in a string form.
-+ *
-+ * OPTIONAL.
-+ */
-+ ssize_t (*mgmt_cmd) (char *cmd);
-+
-+ /*
-+ * Name of the dev handler. Must be unique. MUST HAVE.
-+ *
-+ * It's SCST_MAX_NAME + few more bytes to match scst_user expectations.
-+ */
-+ char name[SCST_MAX_NAME + 10];
-+
-+ /*
-+ * Number of threads in this handler's devices' threads pools.
-+ * If 0 - no threads will be created, if <0 - creation of the threads
-+ * pools is prohibited. Also pay attention to threads_pool_type below.
-+ */
-+ int threads_num;
-+
-+ /* Threads pool type. Valid only if threads_num > 0. */
-+ enum scst_dev_type_threads_pool_type threads_pool_type;
-+
-+ /* Optional default log flags */
-+ const unsigned long default_trace_flags;
-+
-+ /* Optional pointer to trace flags */
-+ unsigned long *trace_flags;
-+
-+ /* Optional local trace table */
-+ struct scst_trace_log *trace_tbl;
-+
-+ /* Optional local trace table help string */
-+ const char *trace_tbl_help;
-+
-+ /* Optional help string for mgmt_cmd commands */
-+ const char *mgmt_cmd_help;
-+
-+ /* List of parameters for add_device command, if any */
-+ const char *add_device_parameters;
-+
-+ /*
-+ * List of optional, i.e. which could be added by add_attribute command
-+ * and deleted by del_attribute command, sysfs attributes, if any.
-+ * Helpful for scstadmin to work correctly.
-+ */
-+ const char *devt_optional_attributes;
-+
-+ /*
-+ * List of optional, i.e. which could be added by add_device_attribute
-+ * command and deleted by del_device_attribute command, sysfs
-+ * attributes, if any. Helpful for scstadmin to work correctly.
-+ */
-+ const char *dev_optional_attributes;
-+
-+ /* sysfs attributes, if any */
-+ const struct attribute **devt_attrs;
-+
-+ /* sysfs device attributes, if any */
-+ const struct attribute **dev_attrs;
-+
-+ /* Pointer to dev handler's private data */
-+ void *devt_priv;
-+
-+ /* Pointer to parent dev type in the sysfs hierarchy */
-+ struct scst_dev_type *parent;
-+
-+ struct module *module;
-+
-+ /** Private, must be inited to 0 by memset() **/
-+
-+ /* list entry in scst_(virtual_)dev_type_list */
-+ struct list_head dev_type_list_entry;
-+
-+ struct kobject devt_kobj; /* main handlers/driver */
-+
-+ /* Number of currently active sysfs mgmt works (scst_sysfs_work_item) */
-+ int devt_active_sysfs_works_count;
-+
-+ /* To wait until devt_kobj released */
-+ struct completion *devt_kobj_release_compl;
-+};
-+
-+/*
-+ * An SCST target, analog of SCSI target port.
-+ */
-+struct scst_tgt {
-+ /* List of remote sessions per target, protected by scst_mutex */
-+ struct list_head sess_list;
-+
-+ /* List entry of targets per template (tgts_list) */
-+ struct list_head tgt_list_entry;
-+
-+ struct scst_tgt_template *tgtt; /* corresponding target template */
-+
-+ struct scst_acg *default_acg; /* default acg for this target */
-+
-+ struct list_head tgt_acg_list; /* target ACG groups */
-+
-+ /*
-+ * Maximum SG table size. Needed here, since different cards on the
-+ * same target template can have different SG table limitations.
-+ */
-+ int sg_tablesize;
-+
-+ /* Used for storage of target driver private stuff */
-+ void *tgt_priv;
-+
-+ /*
-+ * The following fields used to store and retry cmds if target's
-+ * internal queue is full, so the target is unable to accept
-+ * the cmd returning QUEUE FULL.
-+ * They protected by tgt_lock, where necessary.
-+ */
-+ bool retry_timer_active;
-+ struct timer_list retry_timer;
-+ atomic_t finished_cmds;
-+ int retry_cmds;
-+ spinlock_t tgt_lock;
-+ struct list_head retry_cmd_list;
-+
-+ /* Used to wait until session finished to unregister */
-+ wait_queue_head_t unreg_waitQ;
-+
-+ /* Name of the target */
-+ char *tgt_name;
-+
-+ /* User comment to it to let easier distinguish targets */
-+ char *tgt_comment;
-+
-+ uint16_t rel_tgt_id;
-+
-+ /* sysfs release completion */
-+ struct completion *tgt_kobj_release_cmpl;
-+
-+ struct kobject tgt_kobj; /* main targets/target kobject */
-+ struct kobject *tgt_sess_kobj; /* target/sessions/ */
-+ struct kobject *tgt_luns_kobj; /* target/luns/ */
-+ struct kobject *tgt_ini_grp_kobj; /* target/ini_groups/ */
-+};
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+
-+/* Defines extended latency statistics */
-+struct scst_ext_latency_stat {
-+ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
-+ unsigned int processed_cmds_rd;
-+ uint64_t min_scst_time_rd, min_tgt_time_rd, min_dev_time_rd;
-+ uint64_t max_scst_time_rd, max_tgt_time_rd, max_dev_time_rd;
-+
-+ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
-+ unsigned int processed_cmds_wr;
-+ uint64_t min_scst_time_wr, min_tgt_time_wr, min_dev_time_wr;
-+ uint64_t max_scst_time_wr, max_tgt_time_wr, max_dev_time_wr;
-+};
-+
-+#define SCST_IO_SIZE_THRESHOLD_SMALL (8*1024)
-+#define SCST_IO_SIZE_THRESHOLD_MEDIUM (32*1024)
-+#define SCST_IO_SIZE_THRESHOLD_LARGE (128*1024)
-+#define SCST_IO_SIZE_THRESHOLD_VERY_LARGE (512*1024)
-+
-+#define SCST_LATENCY_STAT_INDEX_SMALL 0
-+#define SCST_LATENCY_STAT_INDEX_MEDIUM 1
-+#define SCST_LATENCY_STAT_INDEX_LARGE 2
-+#define SCST_LATENCY_STAT_INDEX_VERY_LARGE 3
-+#define SCST_LATENCY_STAT_INDEX_OTHER 4
-+#define SCST_LATENCY_STATS_NUM (SCST_LATENCY_STAT_INDEX_OTHER + 1)
-+
-+#endif /* CONFIG_SCST_MEASURE_LATENCY */
-+
-+struct scst_io_stat_entry {
-+ uint64_t cmd_count;
-+ uint64_t io_byte_count;
-+};
-+
-+/*
-+ * SCST session, analog of SCSI I_T nexus
-+ */
-+struct scst_session {
-+ /*
-+ * Initialization phase, one of SCST_SESS_IPH_* constants, protected by
-+ * sess_list_lock
-+ */
-+ int init_phase;
-+
-+ struct scst_tgt *tgt; /* corresponding target */
-+
-+ /* Used for storage of target driver private stuff */
-+ void *tgt_priv;
-+
-+ /* session's async flags */
-+ unsigned long sess_aflags;
-+
-+ /*
-+ * Hash list for tgt_dev's for this session with size and fn. It isn't
-+ * hlist_entry, because we need ability to go over the list in the
-+ * reverse order. Protected by scst_mutex and suspended activity.
-+ */
-+#define SESS_TGT_DEV_LIST_HASH_SIZE (1 << 5)
-+#define SESS_TGT_DEV_LIST_HASH_FN(val) ((val) & (SESS_TGT_DEV_LIST_HASH_SIZE - 1))
-+ struct list_head sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_SIZE];
-+
-+ /*
-+ * List of cmds in this session. Protected by sess_list_lock.
-+ *
-+ * We must always keep commands in the sess list from the
-+ * very beginning, because otherwise they can be missed during
-+ * TM processing.
-+ */
-+ struct list_head sess_cmd_list;
-+
-+ spinlock_t sess_list_lock; /* protects sess_cmd_list, etc */
-+
-+ atomic_t refcnt; /* get/put counter */
-+
-+ /*
-+ * Alive commands for this session. ToDo: make it part of the common
-+ * IO flow control.
-+ */
-+ atomic_t sess_cmd_count;
-+
-+ /* Some statistics. Protected by sess_list_lock. */
-+ struct scst_io_stat_entry io_stats[SCST_DATA_DIR_MAX];
-+
-+ /* Access control for this session and list entry there */
-+ struct scst_acg *acg;
-+
-+ /* Initiator port transport id */
-+ uint8_t *transport_id;
-+
-+ /* List entry for the sessions list inside ACG */
-+ struct list_head acg_sess_list_entry;
-+
-+ struct delayed_work hw_pending_work;
-+
-+ /* Name of attached initiator */
-+ const char *initiator_name;
-+
-+ /* List entry of sessions per target */
-+ struct list_head sess_list_entry;
-+
-+ /* List entry for the list that keeps session, waiting for the init */
-+ struct list_head sess_init_list_entry;
-+
-+ /*
-+ * List entry for the list that keeps session, waiting for the shutdown
-+ */
-+ struct list_head sess_shut_list_entry;
-+
-+ /*
-+ * Lists of deferred during session initialization commands.
-+ * Protected by sess_list_lock.
-+ */
-+ struct list_head init_deferred_cmd_list;
-+ struct list_head init_deferred_mcmd_list;
-+
-+ /*
-+ * Shutdown phase, one of SCST_SESS_SPH_* constants, unprotected.
-+ * Async. relating to init_phase, must be a separate variable, because
-+ * session could be unregistered before async. registration is finished.
-+ */
-+ unsigned long shut_phase;
-+
-+ /* Used if scst_unregister_session() called in wait mode */
-+ struct completion *shutdown_compl;
-+
-+ /* sysfs release completion */
-+ struct completion *sess_kobj_release_cmpl;
-+
-+ unsigned int sess_kobj_ready:1;
-+
-+ struct kobject sess_kobj; /* kobject for this struct */
-+
-+ /*
-+ * Functions and data for user callbacks from scst_register_session()
-+ * and scst_unregister_session()
-+ */
-+ void *reg_sess_data;
-+ void (*init_result_fn) (struct scst_session *sess, void *data,
-+ int result);
-+ void (*unreg_done_fn) (struct scst_session *sess);
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+ /*
-+ * Must be the last to allow to work with drivers who don't know
-+ * about this config time option.
-+ */
-+ spinlock_t lat_lock;
-+ uint64_t scst_time, tgt_time, dev_time;
-+ unsigned int processed_cmds;
-+ uint64_t min_scst_time, min_tgt_time, min_dev_time;
-+ uint64_t max_scst_time, max_tgt_time, max_dev_time;
-+ struct scst_ext_latency_stat sess_latency_stat[SCST_LATENCY_STATS_NUM];
-+#endif
-+};
-+
-+/*
-+ * SCST_PR_ABORT_ALL TM function helper structure
-+ */
-+struct scst_pr_abort_all_pending_mgmt_cmds_counter {
-+ /*
-+ * How many there are pending for this cmd SCST_PR_ABORT_ALL TM
-+ * commands.
-+ */
-+ atomic_t pr_abort_pending_cnt;
-+
-+ /* Saved completion routine */
-+ void (*saved_cmd_done) (struct scst_cmd *cmd, int next_state,
-+ enum scst_exec_context pref_context);
-+
-+ /*
-+ * How many there are pending for this cmd SCST_PR_ABORT_ALL TM
-+ * commands, which not yet aborted all affected commands and
-+ * a completion to signal, when it's done.
-+ */
-+ atomic_t pr_aborting_cnt;
-+ struct completion pr_aborting_cmpl;
-+};
-+
-+/*
-+ * Structure to control commands' queuing and threads pool processing the queue
-+ */
-+struct scst_cmd_threads {
-+ spinlock_t cmd_list_lock;
-+ struct list_head active_cmd_list; /* commands queue */
-+ wait_queue_head_t cmd_list_waitQ;
-+
-+ struct io_context *io_context; /* IO context of the threads pool */
-+ int io_context_refcnt;
-+
-+ bool io_context_ready;
-+
-+ /* io_context_mutex protects io_context and io_context_refcnt. */
-+ struct mutex io_context_mutex;
-+
-+ int nr_threads; /* number of processing threads */
-+ struct list_head threads_list; /* processing threads */
-+
-+ struct list_head lists_list_entry;
-+};
-+
-+/*
-+ * Used to execute cmd's in order of arrival, honoring SCSI task attributes
-+ */
-+struct scst_order_data {
-+ /*
-+ * Protected by sn_lock, except expected_sn, which is protected by
-+ * itself. Curr_sn must have the same size as expected_sn to
-+ * overflow simultaneously.
-+ */
-+ int def_cmd_count;
-+ spinlock_t sn_lock;
-+ unsigned int expected_sn;
-+ unsigned int curr_sn;
-+ int hq_cmd_count;
-+ struct list_head deferred_cmd_list;
-+ struct list_head skipped_sn_list;
-+
-+ /*
-+ * Set if the prev cmd was ORDERED. Size and, hence, alignment must
-+ * allow unprotected modifications independently to the neighbour fields.
-+ */
-+ unsigned long prev_cmd_ordered;
-+
-+ int num_free_sn_slots; /* if it's <0, then all slots are busy */
-+ atomic_t *cur_sn_slot;
-+ atomic_t sn_slots[15];
-+};
-+
-+/*
-+ * SCST command, analog of I_T_L_Q nexus or task
-+ */
-+struct scst_cmd {
-+ /* List entry for below *_cmd_threads */
-+ struct list_head cmd_list_entry;
-+
-+ /* Pointer to lists of commands with the lock */
-+ struct scst_cmd_threads *cmd_threads;
-+
-+ atomic_t cmd_ref;
-+
-+ struct scst_session *sess; /* corresponding session */
-+
-+ atomic_t *cpu_cmd_counter;
-+
-+ /* Cmd state, one of SCST_CMD_STATE_* constants */
-+ int state;
-+
-+ /*************************************************************
-+ ** Cmd's flags
-+ *************************************************************/
-+
-+ /*
-+ * Set if expected_sn should be incremented, i.e. cmd was sent
-+ * for execution
-+ */
-+ unsigned int sent_for_exec:1;
-+
-+ /* Set if the cmd's action is completed */
-+ unsigned int completed:1;
-+
-+ /* Set if we should ignore Unit Attention in scst_check_sense() */
-+ unsigned int ua_ignore:1;
-+
-+ /* Set if cmd is being processed in atomic context */
-+ unsigned int atomic:1;
-+
-+ /* Set if this command was sent in double UA possible state */
-+ unsigned int double_ua_possible:1;
-+
-+ /* Set if this command contains status */
-+ unsigned int is_send_status:1;
-+
-+ /* Set if cmd is being retried */
-+ unsigned int retry:1;
-+
-+ /* Set if cmd is internally generated */
-+ unsigned int internal:1;
-+
-+ /* Set if the device was blocked by scst_check_blocked_dev() */
-+ unsigned int unblock_dev:1;
-+
-+ /* Set if this cmd incremented dev->pr_readers_count */
-+ unsigned int dec_pr_readers_count_needed:1;
-+
-+ /* Set if scst_dec_on_dev_cmd() call is needed on the cmd's finish */
-+ unsigned int dec_on_dev_needed:1;
-+
-+ /* Set if cmd is queued as hw pending */
-+ unsigned int cmd_hw_pending:1;
-+
-+ /*
-+ * Set, if for this cmd required to not have any IO or FS calls on
-+ * memory buffers allocations, at least for READ and WRITE commands.
-+ * Needed for cases like file systems mounted over scst_local's
-+ * devices.
-+ */
-+ unsigned noio_mem_alloc:1;
-+
-+ /*
-+ * Set if the target driver wants to alloc data buffers on its own.
-+ * In this case alloc_data_buf() must be provided in the target driver
-+ * template.
-+ */
-+ unsigned int tgt_need_alloc_data_buf:1;
-+
-+ /*
-+ * Set by SCST if the custom data buffer allocation by the target driver
-+ * succeeded.
-+ */
-+ unsigned int tgt_data_buf_alloced:1;
-+
-+ /* Set if custom data buffer allocated by dev handler */
-+ unsigned int dh_data_buf_alloced:1;
-+
-+ /* Set if the target driver called scst_set_expected() */
-+ unsigned int expected_values_set:1;
-+
-+ /*
-+ * Set if the SG buffer was modified by scst_adjust_sg()
-+ */
-+ unsigned int sg_buff_modified:1;
-+
-+ /*
-+ * Set if cmd buffer was vmallocated and copied from more
-+ * then one sg chunk
-+ */
-+ unsigned int sg_buff_vmallocated:1;
-+
-+ /*
-+ * Set if scst_cmd_init_stage1_done() called and the target
-+ * want that preprocessing_done() will be called
-+ */
-+ unsigned int preprocessing_only:1;
-+
-+ /* Set if cmd's SN was set */
-+ unsigned int sn_set:1;
-+
-+ /* Set if hq_cmd_count was incremented */
-+ unsigned int hq_cmd_inced:1;
-+
-+ /*
-+ * Set if scst_cmd_init_stage1_done() called and the target wants
-+ * that the SN for the cmd won't be assigned until scst_restart_cmd()
-+ */
-+ unsigned int set_sn_on_restart_cmd:1;
-+
-+ /* Set if the cmd's must not use sgv cache for data buffer */
-+ unsigned int no_sgv:1;
-+
-+ /*
-+ * Set if target driver may need to call dma_sync_sg() or similar
-+ * function before transferring cmd' data to the target device
-+ * via DMA.
-+ */
-+ unsigned int may_need_dma_sync:1;
-+
-+ /* Set if the cmd was done or aborted out of its SN */
-+ unsigned int out_of_sn:1;
-+
-+ /* Set if increment expected_sn in cmd->scst_cmd_done() */
-+ unsigned int inc_expected_sn_on_done:1;
-+
-+ /* Set if tgt_sn field is valid */
-+ unsigned int tgt_sn_set:1;
-+
-+ /* Set if any direction residual is possible */
-+ unsigned int resid_possible:1;
-+
-+ /* Set if cmd is done */
-+ unsigned int done:1;
-+
-+ /*
-+ * Set if cmd is finished. Used under sess_list_lock to sync
-+ * between scst_finish_cmd() and scst_abort_cmd()
-+ */
-+ unsigned int finished:1;
-+
-+ /*
-+ * Set if scst_check_local_events() can be called more than once. Set by
-+ * scst_pre_check_local_events().
-+ */
-+ unsigned int check_local_events_once_done:1;
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+ /* Set if the cmd was delayed by task management debugging code */
-+ unsigned int tm_dbg_delayed:1;
-+
-+ /* Set if the cmd must be ignored by task management debugging code */
-+ unsigned int tm_dbg_immut:1;
-+#endif
-+
-+ /**************************************************************/
-+
-+ /* cmd's async flags */
-+ unsigned long cmd_flags;
-+
-+ /* Keeps status of cmd's status/data delivery to remote initiator */
-+ int delivery_status;
-+
-+ struct scst_tgt_template *tgtt; /* to save extra dereferences */
-+ struct scst_tgt *tgt; /* to save extra dereferences */
-+ struct scst_device *dev; /* to save extra dereferences */
-+
-+ /* corresponding I_T_L device for this cmd */
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ struct scst_order_data *cur_order_data; /* to save extra dereferences */
-+
-+ uint64_t lun; /* LUN for this cmd */
-+
-+ unsigned long start_time;
-+
-+ /* List entry for tgt_dev's SN related lists */
-+ struct list_head sn_cmd_list_entry;
-+
-+ /* Cmd's serial number, used to execute cmd's in order of arrival */
-+ unsigned int sn;
-+
-+ /* The corresponding sn_slot in tgt_dev->sn_slots */
-+ atomic_t *sn_slot;
-+
-+ /* List entry for sess's sess_cmd_list */
-+ struct list_head sess_cmd_list_entry;
-+
-+ /*
-+ * Used to found the cmd by scst_find_cmd_by_tag(). Set by the
-+ * target driver on the cmd's initialization time
-+ */
-+ uint64_t tag;
-+
-+ uint32_t tgt_sn; /* SN set by target driver (for TM purposes) */
-+
-+ uint8_t *cdb; /* Pointer on CDB. Points on cdb_buf for small CDBs. */
-+ unsigned short cdb_len;
-+ uint8_t cdb_buf[SCST_MAX_CDB_SIZE];
-+
-+ enum scst_cdb_flags op_flags;
-+ const char *op_name;
-+
-+ enum scst_cmd_queue_type queue_type;
-+
-+ int timeout; /* CDB execution timeout in seconds */
-+ int retries; /* Amount of retries that will be done by SCSI mid-level */
-+
-+ /* SCSI data direction, one of SCST_DATA_* constants */
-+ scst_data_direction data_direction;
-+
-+ /* Remote initiator supplied values, if any */
-+ scst_data_direction expected_data_direction;
-+ int expected_transfer_len;
-+ int expected_out_transfer_len; /* for bidi writes */
-+
-+ /*
-+ * Cmd data length. Could be different from bufflen for commands like
-+ * VERIFY, which transfer different amount of data (if any), than
-+ * processed.
-+ */
-+ int data_len;
-+
-+ /* Completion routine */
-+ void (*scst_cmd_done) (struct scst_cmd *cmd, int next_state,
-+ enum scst_exec_context pref_context);
-+
-+ struct sgv_pool_obj *sgv; /* sgv object */
-+ int bufflen; /* cmd buffer length */
-+ struct scatterlist *sg; /* cmd data buffer SG vector */
-+ int sg_cnt; /* SG segments count */
-+
-+ /*
-+ * Response data length in data buffer. Must not be set
-+ * directly, use scst_set_resp_data_len() for that.
-+ */
-+ int resp_data_len;
-+
-+ /*
-+ * Response data length adjusted on residual, i.e.
-+ * min(expected_len, resp_len), if expected len set.
-+ */
-+ int adjusted_resp_data_len;
-+
-+ /*
-+ * Data length to write, i.e. transfer from the initiator. Might be
-+ * different from (out_)bufflen, if the initiator asked too big or too
-+ * small expected(_out_)transfer_len.
-+ */
-+ int write_len;
-+
-+ /*
-+ * Write sg and sg_cnt to point out either on sg/sg_cnt, or on
-+ * out_sg/out_sg_cnt.
-+ */
-+ struct scatterlist **write_sg;
-+ int *write_sg_cnt;
-+
-+ /* scst_get_sg_buf_[first,next]() support */
-+ struct scatterlist *get_sg_buf_cur_sg_entry;
-+ int get_sg_buf_entry_num;
-+
-+ /* Bidirectional transfers support */
-+ int out_bufflen; /* WRITE buffer length */
-+ struct sgv_pool_obj *out_sgv; /* WRITE sgv object */
-+ struct scatterlist *out_sg; /* WRITE data buffer SG vector */
-+ int out_sg_cnt; /* WRITE SG segments count */
-+
-+ /*
-+ * Used if both target driver and dev handler request own memory
-+ * allocation. In other cases, both are equal to sg and sg_cnt
-+ * correspondingly.
-+ *
-+ * If target driver requests own memory allocations, it MUST use
-+ * functions scst_cmd_get_tgt_sg*() to get sg and sg_cnt! Otherwise,
-+ * it may use functions scst_cmd_get_sg*().
-+ */
-+ struct scatterlist *tgt_sg;
-+ int tgt_sg_cnt;
-+ struct scatterlist *tgt_out_sg; /* bidirectional */
-+ int tgt_out_sg_cnt; /* bidirectional */
-+
-+ /*
-+ * The status fields in case of errors must be set using
-+ * scst_set_cmd_error_status()!
-+ */
-+ uint8_t status; /* status byte from target device */
-+ uint8_t msg_status; /* return status from host adapter itself */
-+ uint8_t host_status; /* set by low-level driver to indicate status */
-+ uint8_t driver_status; /* set by mid-level */
-+
-+ uint8_t *sense; /* pointer to sense buffer */
-+ unsigned short sense_valid_len; /* length of valid sense data */
-+ unsigned short sense_buflen; /* length of the sense buffer, if any */
-+
-+ /* Start time when cmd was sent to rdy_to_xfer() or xmit_response() */
-+ unsigned long hw_pending_start;
-+
-+ /* Used for storage of target driver private stuff */
-+ void *tgt_priv;
-+
-+ /* Used for storage of dev handler private stuff */
-+ void *dh_priv;
-+
-+ /* Used to restore sg if it was modified by scst_adjust_sg() */
-+ struct scatterlist *orig_sg;
-+ int *p_orig_sg_cnt;
-+ int orig_sg_cnt, orig_sg_entry, orig_entry_len;
-+
-+ /* Used to retry commands in case of double UA */
-+ int dbl_ua_orig_resp_data_len, dbl_ua_orig_data_direction;
-+
-+ /*
-+ * List of the corresponding mgmt cmds, if any. Protected by
-+ * sess_list_lock.
-+ */
-+ struct list_head mgmt_cmd_list;
-+
-+ /* List entry for dev's blocked_cmd_list */
-+ struct list_head blocked_cmd_list_entry;
-+
-+ /* Counter of the corresponding SCST_PR_ABORT_ALL TM commands */
-+ struct scst_pr_abort_all_pending_mgmt_cmds_counter *pr_abort_counter;
-+
-+ struct scst_cmd *orig_cmd; /* Used to issue REQUEST SENSE */
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+ /*
-+ * Must be the last to allow to work with drivers who don't know
-+ * about this config time option.
-+ */
-+ uint64_t start, curr_start, parse_time, alloc_buf_time;
-+ uint64_t restart_waiting_time, rdy_to_xfer_time;
-+ uint64_t pre_exec_time, exec_time, dev_done_time;
-+ uint64_t xmit_time, tgt_on_free_time, dev_on_free_time;
-+#endif
-+};
-+
-+/*
-+ * Parameters for SCST management commands
-+ */
-+struct scst_rx_mgmt_params {
-+ int fn;
-+ uint64_t tag;
-+ const uint8_t *lun;
-+ int lun_len;
-+ uint32_t cmd_sn;
-+ int atomic;
-+ void *tgt_priv;
-+ unsigned char tag_set;
-+ unsigned char lun_set;
-+ unsigned char cmd_sn_set;
-+};
-+
-+/*
-+ * A stub structure to link an management command and affected regular commands
-+ */
-+struct scst_mgmt_cmd_stub {
-+ struct scst_mgmt_cmd *mcmd;
-+
-+ /* List entry in cmd->mgmt_cmd_list */
-+ struct list_head cmd_mgmt_cmd_list_entry;
-+
-+ /* Set if the cmd was counted in mcmd->cmd_done_wait_count */
-+ unsigned int done_counted:1;
-+
-+ /* Set if the cmd was counted in mcmd->cmd_finish_wait_count */
-+ unsigned int finish_counted:1;
-+};
-+
-+/*
-+ * SCST task management structure
-+ */
-+struct scst_mgmt_cmd {
-+ /* List entry for *_mgmt_cmd_list */
-+ struct list_head mgmt_cmd_list_entry;
-+
-+ struct scst_session *sess;
-+
-+ atomic_t *cpu_cmd_counter;
-+
-+ /* Mgmt cmd state, one of SCST_MCMD_STATE_* constants */
-+ int state;
-+
-+ int fn; /* task management function */
-+
-+ /* Set if device(s) should be unblocked after mcmd's finish */
-+ unsigned int needs_unblocking:1;
-+ unsigned int lun_set:1; /* set, if lun field is valid */
-+ unsigned int cmd_sn_set:1; /* set, if cmd_sn field is valid */
-+
-+ /*
-+ * Number of commands to finish before sending response,
-+ * protected by scst_mcmd_lock
-+ */
-+ int cmd_finish_wait_count;
-+
-+ /*
-+ * Number of commands to complete (done) before resetting reservation,
-+ * protected by scst_mcmd_lock
-+ */
-+ int cmd_done_wait_count;
-+
-+ /* Number of completed commands, protected by scst_mcmd_lock */
-+ int completed_cmd_count;
-+
-+ uint64_t lun; /* LUN for this mgmt cmd */
-+ /* or (and for iSCSI) */
-+ uint64_t tag; /* tag of the corresponding cmd */
-+
-+ uint32_t cmd_sn; /* affected command's highest SN */
-+
-+ /* corresponding cmd (to be aborted, found by tag) */
-+ struct scst_cmd *cmd_to_abort;
-+
-+ /* corresponding device for this mgmt cmd (found by lun) */
-+ struct scst_tgt_dev *mcmd_tgt_dev;
-+
-+ /* completion status, one of the SCST_MGMT_STATUS_* constants */
-+ int status;
-+
-+ /* Used for storage of target driver private stuff or origin PR cmd */
-+ union {
-+ void *tgt_priv;
-+ struct scst_cmd *origin_pr_cmd;
-+ };
-+};
-+
-+/*
-+ * Persistent reservations registrant
-+ */
-+struct scst_dev_registrant {
-+ uint8_t *transport_id;
-+ uint16_t rel_tgt_id;
-+ __be64 key;
-+
-+ /* tgt_dev (I_T nexus) for this registrant, if any */
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ /* List entry for dev_registrants_list */
-+ struct list_head dev_registrants_list_entry;
-+
-+ /* 2 auxiliary fields used to rollback changes for errors, etc. */
-+ struct list_head aux_list_entry;
-+ __be64 rollback_key;
-+};
-+
-+/*
-+ * SCST device
-+ */
-+struct scst_device {
-+ unsigned short type; /* SCSI type of the device */
-+
-+ /*************************************************************
-+ ** Dev's flags. Updates serialized by dev_lock or suspended
-+ ** activity
-+ *************************************************************/
-+
-+ /* Set if dev is RESERVED */
-+ unsigned short dev_reserved:1;
-+
-+ /* Set if double reset UA is possible */
-+ unsigned short dev_double_ua_possible:1;
-+
-+ /* If set, dev is read only */
-+ unsigned short rd_only:1;
-+
-+ /* Set, if a strictly serialized cmd is waiting blocked */
-+ unsigned short strictly_serialized_cmd_waiting:1;
-+
-+ /*
-+ * Set, if this device is being unregistered. Useful to let sysfs
-+ * attributes know when they should exit immediatelly to prevent
-+ * possible deadlocks with their device unregistration waiting for
-+ * their kobj last put.
-+ */
-+ unsigned short dev_unregistering:1;
-+
-+ /**************************************************************/
-+
-+ /*************************************************************
-+ ** Dev's control mode page related values. Updates serialized
-+ ** by scst_block_dev(). Modified independently to the above and
-+ ** below fields, hence the alignment.
-+ *************************************************************/
-+
-+ unsigned int queue_alg:4 __attribute__((aligned(sizeof(long))));
-+ unsigned int tst:3;
-+ unsigned int tas:1;
-+ unsigned int swp:1;
-+ unsigned int d_sense:1;
-+
-+ /*
-+ * Set if device implements own ordered commands management. If not set
-+ * and queue_alg is SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER,
-+ * expected_sn will be incremented only after commands finished.
-+ */
-+ unsigned int has_own_order_mgmt:1;
-+
-+ /**************************************************************/
-+
-+ /* How many cmds alive on this dev */
-+ atomic_t dev_cmd_count;
-+
-+ spinlock_t dev_lock; /* device lock */
-+
-+ /*
-+ * How many times device was blocked for new cmds execution.
-+ * Protected by dev_lock.
-+ */
-+ int block_count;
-+
-+ /*
-+ * How many there are "on_dev" commands, i.e. ones who passed
-+ * scst_check_blocked_dev(). Protected by dev_lock.
-+ */
-+ int on_dev_cmd_count;
-+
-+ /*
-+ * How many threads are checking commands for PR allowance.
-+ * Protected by dev_lock.
-+ */
-+ int pr_readers_count;
-+
-+ /*
-+ * Set if dev is persistently reserved. Protected by dev_pr_mutex.
-+ * Modified independently to the above field, hence the alignment.
-+ */
-+ unsigned int pr_is_set:1 __attribute__((aligned(sizeof(long))));
-+
-+ /*
-+ * Set if there is a thread changing or going to change PR state(s).
-+ * Protected by dev_pr_mutex.
-+ */
-+ unsigned int pr_writer_active:1;
-+
-+ struct scst_dev_type *handler; /* corresponding dev handler */
-+
-+ /* Used for storage of dev handler private stuff */
-+ void *dh_priv;
-+
-+ /* Corresponding real SCSI device, could be NULL for virtual devices */
-+ struct scsi_device *scsi_dev;
-+
-+ /* List of commands with lock, if dedicated threads are used */
-+ struct scst_cmd_threads dev_cmd_threads;
-+
-+ /* Memory limits for this device */
-+ struct scst_mem_lim dev_mem_lim;
-+
-+ /*************************************************************
-+ ** Persistent reservation fields. Protected by dev_pr_mutex.
-+ *************************************************************/
-+
-+ /*
-+ * True if persist through power loss is activated. Modified
-+ * independently to the above field, hence the alignment.
-+ */
-+ unsigned short pr_aptpl:1 __attribute__((aligned(sizeof(long))));
-+
-+ /* Persistent reservation type */
-+ uint8_t pr_type;
-+
-+ /* Persistent reservation scope */
-+ uint8_t pr_scope;
-+
-+ /* Mutex to protect PR operations */
-+ struct mutex dev_pr_mutex;
-+
-+ /* Persistent reservation generation value */
-+ uint32_t pr_generation;
-+
-+ /* Reference to registrant - persistent reservation holder */
-+ struct scst_dev_registrant *pr_holder;
-+
-+ /* List of dev's registrants */
-+ struct list_head dev_registrants_list;
-+
-+ /*
-+ * Count of connected tgt_devs from transports, which don't support
-+ * PRs, i.e. don't have get_initiator_port_transport_id(). Protected
-+ * by scst_mutex.
-+ */
-+ int not_pr_supporting_tgt_devs_num;
-+
-+ struct scst_order_data dev_order_data;
-+
-+ /* Persist through power loss files */
-+ char *pr_file_name;
-+ char *pr_file_name1;
-+
-+ /**************************************************************/
-+
-+ /* List of blocked commands, protected by dev_lock. */
-+ struct list_head blocked_cmd_list;
-+
-+ /* A list entry used during TM, protected by scst_mutex */
-+ struct list_head tm_dev_list_entry;
-+
-+ int virt_id; /* virtual device internal ID */
-+
-+ /* Pointer to virtual device name, for convenience only */
-+ char *virt_name;
-+
-+ struct list_head dev_list_entry; /* list entry in global devices list */
-+
-+ /*
-+ * List of tgt_dev's, one per session, protected by scst_mutex or
-+ * dev_lock for reads and both for writes
-+ */
-+ struct list_head dev_tgt_dev_list;
-+
-+ /* List of acg_dev's, one per acg, protected by scst_mutex */
-+ struct list_head dev_acg_dev_list;
-+
-+ /* Number of threads in the device's threads pools */
-+ int threads_num;
-+
-+ /* Threads pool type of the device. Valid only if threads_num > 0. */
-+ enum scst_dev_type_threads_pool_type threads_pool_type;
-+
-+ /* sysfs release completion */
-+ struct completion *dev_kobj_release_cmpl;
-+
-+ struct kobject dev_kobj; /* kobject for this struct */
-+ struct kobject *dev_exp_kobj; /* exported groups */
-+
-+ /* Export number in the dev's sysfs list. Protected by scst_mutex */
-+ int dev_exported_lun_num;
-+};
-+
-+/*
-+ * Used to store threads local tgt_dev specific data
-+ */
-+struct scst_thr_data_hdr {
-+ /* List entry in tgt_dev->thr_data_list */
-+ struct list_head thr_data_list_entry;
-+ struct task_struct *owner_thr; /* the owner thread */
-+ atomic_t ref;
-+ /* Function that will be called on the tgt_dev destruction */
-+ void (*free_fn) (struct scst_thr_data_hdr *data);
-+};
-+
-+/*
-+ * Used to clearly dispose async io_context
-+ */
-+struct scst_async_io_context_keeper {
-+ struct kref aic_keeper_kref;
-+ bool aic_ready;
-+ struct io_context *aic;
-+ struct task_struct *aic_keeper_thr;
-+ wait_queue_head_t aic_keeper_waitQ;
-+};
-+
-+/*
-+ * Used to store per-session specific device information, analog of
-+ * SCSI I_T_L nexus.
-+ */
-+struct scst_tgt_dev {
-+ /* List entry in sess->sess_tgt_dev_list */
-+ struct list_head sess_tgt_dev_list_entry;
-+
-+ struct scst_device *dev; /* to save extra dereferences */
-+ uint64_t lun; /* to save extra dereferences */
-+
-+ gfp_t gfp_mask;
-+ struct sgv_pool *pool;
-+ int max_sg_cnt;
-+
-+ /*
-+ * Tgt_dev's async flags. Modified independently to the neighbour
-+ * fields.
-+ */
-+ unsigned long tgt_dev_flags;
-+
-+ /* Used for storage of dev handler private stuff */
-+ void *dh_priv;
-+
-+ /* How many cmds alive on this dev in this session */
-+ atomic_t tgt_dev_cmd_count;
-+
-+ struct scst_order_data *curr_order_data;
-+ struct scst_order_data tgt_dev_order_data;
-+
-+ /* List of scst_thr_data_hdr and lock */
-+ spinlock_t thr_data_lock;
-+ struct list_head thr_data_list;
-+
-+ /* Pointer to lists of commands with the lock */
-+ struct scst_cmd_threads *active_cmd_threads;
-+
-+ /* Union to save some CPU cache footprint */
-+ union {
-+ struct {
-+ /* Copy to save fast path dereference */
-+ struct io_context *async_io_context;
-+
-+ struct scst_async_io_context_keeper *aic_keeper;
-+ };
-+
-+ /* Lists of commands with lock, if dedicated threads are used */
-+ struct scst_cmd_threads tgt_dev_cmd_threads;
-+ };
-+
-+ spinlock_t tgt_dev_lock; /* per-session device lock */
-+
-+ /* List of UA's for this device, protected by tgt_dev_lock */
-+ struct list_head UA_list;
-+
-+ struct scst_session *sess; /* corresponding session */
-+ struct scst_acg_dev *acg_dev; /* corresponding acg_dev */
-+
-+ /* Reference to registrant to find quicker */
-+ struct scst_dev_registrant *registrant;
-+
-+ /* List entry in dev->dev_tgt_dev_list */
-+ struct list_head dev_tgt_dev_list_entry;
-+
-+ /* Internal tmp list entry */
-+ struct list_head extra_tgt_dev_list_entry;
-+
-+ /* Set if INQUIRY DATA HAS CHANGED UA is needed */
-+ unsigned int inq_changed_ua_needed:1;
-+
-+ /*
-+ * Stored Unit Attention sense and its length for possible
-+ * subsequent REQUEST SENSE. Both protected by tgt_dev_lock.
-+ */
-+ unsigned short tgt_dev_valid_sense_len;
-+ uint8_t tgt_dev_sense[SCST_SENSE_BUFFERSIZE];
-+
-+ /* sysfs release completion */
-+ struct completion *tgt_dev_kobj_release_cmpl;
-+
-+ struct kobject tgt_dev_kobj; /* kobject for this struct */
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+ /*
-+ * Must be the last to allow to work with drivers who don't know
-+ * about this config time option.
-+ *
-+ * Protected by sess->lat_lock.
-+ */
-+ uint64_t scst_time, tgt_time, dev_time;
-+ unsigned int processed_cmds;
-+ struct scst_ext_latency_stat dev_latency_stat[SCST_LATENCY_STATS_NUM];
-+#endif
-+};
-+
-+/*
-+ * Used to store ACG-specific device information, like LUN
-+ */
-+struct scst_acg_dev {
-+ struct scst_device *dev; /* corresponding device */
-+
-+ uint64_t lun; /* device's LUN in this acg */
-+
-+ /* If set, the corresponding LU is read only */
-+ unsigned int rd_only:1;
-+
-+ struct scst_acg *acg; /* parent acg */
-+
-+ /* List entry in dev->dev_acg_dev_list */
-+ struct list_head dev_acg_dev_list_entry;
-+
-+ /* List entry in acg->acg_dev_list */
-+ struct list_head acg_dev_list_entry;
-+
-+ /* kobject for this structure */
-+ struct kobject acg_dev_kobj;
-+
-+ /* sysfs release completion */
-+ struct completion *acg_dev_kobj_release_cmpl;
-+
-+ /* Name of the link to the corresponding LUN */
-+ char acg_dev_link_name[20];
-+};
-+
-+/*
-+ * ACG - access control group. Used to store group related
-+ * control information.
-+ */
-+struct scst_acg {
-+ /* Owner target */
-+ struct scst_tgt *tgt;
-+
-+ /* List of acg_dev's in this acg, protected by scst_mutex */
-+ struct list_head acg_dev_list;
-+
-+ /* List of attached sessions, protected by scst_mutex */
-+ struct list_head acg_sess_list;
-+
-+ /* List of attached acn's, protected by scst_mutex */
-+ struct list_head acn_list;
-+
-+ /* List entry in acg_lists */
-+ struct list_head acg_list_entry;
-+
-+ /* Name of this acg */
-+ const char *acg_name;
-+
-+ /* Type of I/O initiators grouping */
-+ int acg_io_grouping_type;
-+
-+ /* CPU affinity for threads in this ACG */
-+ cpumask_t acg_cpu_mask;
-+
-+ unsigned int tgt_acg:1;
-+
-+ /* sysfs release completion */
-+ struct completion *acg_kobj_release_cmpl;
-+
-+ /* kobject for this structure */
-+ struct kobject acg_kobj;
-+
-+ struct kobject *luns_kobj;
-+ struct kobject *initiators_kobj;
-+
-+ enum scst_lun_addr_method addr_method;
-+};
-+
-+/*
-+ * ACN - access control name. Used to store names, by which
-+ * incoming sessions will be assigned to appropriate ACG.
-+ */
-+struct scst_acn {
-+ struct scst_acg *acg; /* owner ACG */
-+
-+ const char *name; /* initiator's name */
-+
-+ /* List entry in acg->acn_list */
-+ struct list_head acn_list_entry;
-+
-+ /* sysfs file attributes */
-+ struct kobj_attribute *acn_attr;
-+};
-+
-+/**
-+ * struct scst_dev_group - A group of SCST devices (struct scst_device).
-+ *
-+ * Each device is member of zero or one device groups. With each device group
-+ * there are zero or more target groups associated.
-+ */
-+struct scst_dev_group {
-+ char *name;
-+ struct list_head entry;
-+ struct list_head dev_list;
-+ struct list_head tg_list;
-+ struct kobject kobj;
-+ struct kobject *dev_kobj;
-+ struct kobject *tg_kobj;
-+};
-+
-+/**
-+ * struct scst_dg_dev - A node in scst_dev_group.dev_list.
-+ */
-+struct scst_dg_dev {
-+ struct list_head entry;
-+ struct scst_device *dev;
-+};
-+
-+/**
-+ * struct scst_target_group - A group of SCSI targets (struct scst_tgt).
-+ *
-+ * Such a group is either a primary target port group or a secondary
-+ * port group. See also SPC-4 for more information.
-+ */
-+struct scst_target_group {
-+ struct scst_dev_group *dg;
-+ char *name;
-+ uint16_t group_id;
-+ enum scst_tg_state state;
-+ bool preferred;
-+ struct list_head entry;
-+ struct list_head tgt_list;
-+ struct kobject kobj;
-+};
-+
-+/**
-+ * struct scst_tg_tgt - A node in scst_target_group.tgt_list.
-+ *
-+ * Such a node can either represent a local storage target (struct scst_tgt)
-+ * or a storage target on another system running SCST. In the former case tgt
-+ * != NULL and rel_tgt_id is ignored. In the latter case tgt == NULL and
-+ * rel_tgt_id is relevant.
-+ */
-+struct scst_tg_tgt {
-+ struct list_head entry;
-+ struct scst_target_group *tg;
-+ struct kobject kobj;
-+ struct scst_tgt *tgt;
-+ char *name;
-+ uint16_t rel_tgt_id;
-+};
-+
-+/*
-+ * Used to store per-session UNIT ATTENTIONs
-+ */
-+struct scst_tgt_dev_UA {
-+ /* List entry in tgt_dev->UA_list */
-+ struct list_head UA_list_entry;
-+
-+ /* Set if UA is global for session */
-+ unsigned short global_UA:1;
-+
-+ /* Unit Attention valid sense len */
-+ unsigned short UA_valid_sense_len;
-+ /* Unit Attention sense buf */
-+ uint8_t UA_sense_buffer[SCST_SENSE_BUFFERSIZE];
-+};
-+
-+/* Used to deliver AENs */
-+struct scst_aen {
-+ int event_fn; /* AEN fn */
-+
-+ struct scst_session *sess; /* corresponding session */
-+ __be64 lun; /* corresponding LUN in SCSI form */
-+
-+ union {
-+ /* SCSI AEN data */
-+ struct {
-+ int aen_sense_len;
-+ uint8_t aen_sense[SCST_STANDARD_SENSE_LEN];
-+ };
-+ };
-+
-+ /* Keeps status of AEN's delivery to remote initiator */
-+ int delivery_status;
-+};
-+
-+#ifndef smp_mb__after_set_bit
-+/* There is no smp_mb__after_set_bit() in the kernel */
-+#define smp_mb__after_set_bit() smp_mb()
-+#endif
-+
-+/*
-+ * Registers target template.
-+ * Returns 0 on success or appropriate error code otherwise.
-+ */
-+int __scst_register_target_template(struct scst_tgt_template *vtt,
-+ const char *version);
-+static inline int scst_register_target_template(struct scst_tgt_template *vtt)
-+{
-+ return __scst_register_target_template(vtt, SCST_INTERFACE_VERSION);
-+}
-+
-+/*
-+ * Registers target template, non-GPL version.
-+ * Returns 0 on success or appropriate error code otherwise.
-+ *
-+ * Note: *vtt must be static!
-+ */
-+int __scst_register_target_template_non_gpl(struct scst_tgt_template *vtt,
-+ const char *version);
-+static inline int scst_register_target_template_non_gpl(
-+ struct scst_tgt_template *vtt)
-+{
-+ return __scst_register_target_template_non_gpl(vtt,
-+ SCST_INTERFACE_VERSION);
-+}
-+
-+void scst_unregister_target_template(struct scst_tgt_template *vtt);
-+
-+struct scst_tgt *scst_register_target(struct scst_tgt_template *vtt,
-+ const char *target_name);
-+void scst_unregister_target(struct scst_tgt *tgt);
-+
-+struct scst_session *scst_register_session(struct scst_tgt *tgt, int atomic,
-+ const char *initiator_name, void *tgt_priv, void *result_fn_data,
-+ void (*result_fn) (struct scst_session *sess, void *data, int result));
-+struct scst_session *scst_register_session_non_gpl(struct scst_tgt *tgt,
-+ const char *initiator_name, void *tgt_priv);
-+void scst_unregister_session(struct scst_session *sess, int wait,
-+ void (*unreg_done_fn) (struct scst_session *sess));
-+void scst_unregister_session_non_gpl(struct scst_session *sess);
-+
-+int __scst_register_dev_driver(struct scst_dev_type *dev_type,
-+ const char *version);
-+static inline int scst_register_dev_driver(struct scst_dev_type *dev_type)
-+{
-+ return __scst_register_dev_driver(dev_type, SCST_INTERFACE_VERSION);
-+}
-+void scst_unregister_dev_driver(struct scst_dev_type *dev_type);
-+
-+int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
-+ const char *version);
-+/*
-+ * Registers dev handler driver for virtual devices (eg VDISK).
-+ * Returns 0 on success or appropriate error code otherwise.
-+ */
-+static inline int scst_register_virtual_dev_driver(
-+ struct scst_dev_type *dev_type)
-+{
-+ return __scst_register_virtual_dev_driver(dev_type,
-+ SCST_INTERFACE_VERSION);
-+}
-+
-+void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type);
-+
-+bool scst_initiator_has_luns(struct scst_tgt *tgt, const char *initiator_name);
-+
-+struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
-+ const uint8_t *lun, int lun_len, const uint8_t *cdb,
-+ unsigned int cdb_len, int atomic);
-+void scst_cmd_init_done(struct scst_cmd *cmd,
-+ enum scst_exec_context pref_context);
-+
-+/*
-+ * Notifies SCST that the driver finished the first stage of the command
-+ * initialization, and the command is ready for execution, but after
-+ * SCST done the command's preprocessing preprocessing_done() function
-+ * should be called. The second argument sets preferred command execution
-+ * context. See SCST_CONTEXT_* constants for details.
-+ *
-+ * See comment for scst_cmd_init_done() for the serialization requirements.
-+ */
-+static inline void scst_cmd_init_stage1_done(struct scst_cmd *cmd,
-+ enum scst_exec_context pref_context, int set_sn)
-+{
-+ cmd->preprocessing_only = 1;
-+ cmd->set_sn_on_restart_cmd = !set_sn;
-+ scst_cmd_init_done(cmd, pref_context);
-+}
-+
-+void scst_restart_cmd(struct scst_cmd *cmd, int status,
-+ enum scst_exec_context pref_context);
-+
-+void scst_rx_data(struct scst_cmd *cmd, int status,
-+ enum scst_exec_context pref_context);
-+
-+void scst_tgt_cmd_done(struct scst_cmd *cmd,
-+ enum scst_exec_context pref_context);
-+
-+int scst_rx_mgmt_fn(struct scst_session *sess,
-+ const struct scst_rx_mgmt_params *params);
-+
-+/*
-+ * Creates new management command using tag and sends it for execution.
-+ * Can be used for SCST_ABORT_TASK only.
-+ * Must not be called in parallel with scst_unregister_session() for the
-+ * same sess. Returns 0 for success, error code otherwise.
-+ *
-+ * Obsolete in favor of scst_rx_mgmt_fn()
-+ */
-+static inline int scst_rx_mgmt_fn_tag(struct scst_session *sess, int fn,
-+ uint64_t tag, int atomic, void *tgt_priv)
-+{
-+ struct scst_rx_mgmt_params params;
-+
-+ BUG_ON(fn != SCST_ABORT_TASK);
-+
-+ memset(&params, 0, sizeof(params));
-+ params.fn = fn;
-+ params.tag = tag;
-+ params.tag_set = 1;
-+ params.atomic = atomic;
-+ params.tgt_priv = tgt_priv;
-+ return scst_rx_mgmt_fn(sess, &params);
-+}
-+
-+/*
-+ * Creates new management command using LUN and sends it for execution.
-+ * Currently can be used for any fn, except SCST_ABORT_TASK.
-+ * Must not be called in parallel with scst_unregister_session() for the
-+ * same sess. Returns 0 for success, error code otherwise.
-+ *
-+ * Obsolete in favor of scst_rx_mgmt_fn()
-+ */
-+static inline int scst_rx_mgmt_fn_lun(struct scst_session *sess, int fn,
-+ const uint8_t *lun, int lun_len, int atomic, void *tgt_priv)
-+{
-+ struct scst_rx_mgmt_params params;
-+
-+ BUG_ON(fn == SCST_ABORT_TASK);
-+
-+ memset(&params, 0, sizeof(params));
-+ params.fn = fn;
-+ params.lun = lun;
-+ params.lun_len = lun_len;
-+ params.lun_set = 1;
-+ params.atomic = atomic;
-+ params.tgt_priv = tgt_priv;
-+ return scst_rx_mgmt_fn(sess, &params);
-+}
-+
-+int scst_get_cdb_info(struct scst_cmd *cmd);
-+
-+int scst_set_cmd_error_status(struct scst_cmd *cmd, int status);
-+int scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq);
-+void scst_set_busy(struct scst_cmd *cmd);
-+
-+void scst_check_convert_sense(struct scst_cmd *cmd);
-+
-+void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq);
-+
-+void scst_capacity_data_changed(struct scst_device *dev);
-+
-+struct scst_cmd *scst_find_cmd_by_tag(struct scst_session *sess, uint64_t tag);
-+struct scst_cmd *scst_find_cmd(struct scst_session *sess, void *data,
-+ int (*cmp_fn) (struct scst_cmd *cmd,
-+ void *data));
-+
-+enum dma_data_direction scst_to_dma_dir(int scst_dir);
-+enum dma_data_direction scst_to_tgt_dma_dir(int scst_dir);
-+
-+/*
-+ * Returns true, if cmd's CDB is fully locally handled by SCST and false
-+ * otherwise. Dev handlers parse() and dev_done() not called for such commands.
-+ */
-+static inline bool scst_is_cmd_fully_local(struct scst_cmd *cmd)
-+{
-+ return (cmd->op_flags & SCST_FULLY_LOCAL_CMD) != 0;
-+}
-+
-+/*
-+ * Returns true, if cmd's CDB is locally handled by SCST and
-+ * false otherwise.
-+ */
-+static inline bool scst_is_cmd_local(struct scst_cmd *cmd)
-+{
-+ return (cmd->op_flags & SCST_LOCAL_CMD) != 0;
-+}
-+
-+/* Returns true, if cmd can deliver UA */
-+static inline bool scst_is_ua_command(struct scst_cmd *cmd)
-+{
-+ return (cmd->op_flags & SCST_SKIP_UA) == 0;
-+}
-+
-+int scst_register_virtual_device(struct scst_dev_type *dev_handler,
-+ const char *dev_name);
-+void scst_unregister_virtual_device(int id);
-+
-+/*
-+ * Get/Set functions for tgt's sg_tablesize
-+ */
-+static inline int scst_tgt_get_sg_tablesize(struct scst_tgt *tgt)
-+{
-+ return tgt->sg_tablesize;
-+}
-+
-+static inline void scst_tgt_set_sg_tablesize(struct scst_tgt *tgt, int val)
-+{
-+ tgt->sg_tablesize = val;
-+}
-+
-+/*
-+ * Get/Set functions for tgt's target private data
-+ */
-+static inline void *scst_tgt_get_tgt_priv(struct scst_tgt *tgt)
-+{
-+ return tgt->tgt_priv;
-+}
-+
-+static inline void scst_tgt_set_tgt_priv(struct scst_tgt *tgt, void *val)
-+{
-+ tgt->tgt_priv = val;
-+}
-+
-+void scst_update_hw_pending_start(struct scst_cmd *cmd);
-+
-+/*
-+ * Get/Set functions for session's target private data
-+ */
-+static inline void *scst_sess_get_tgt_priv(struct scst_session *sess)
-+{
-+ return sess->tgt_priv;
-+}
-+
-+static inline void scst_sess_set_tgt_priv(struct scst_session *sess,
-+ void *val)
-+{
-+ sess->tgt_priv = val;
-+}
-+
-+uint16_t scst_lookup_tg_id(struct scst_device *dev, struct scst_tgt *tgt);
-+bool scst_impl_alua_configured(struct scst_device *dev);
-+int scst_tg_get_group_info(void **buf, uint32_t *response_length,
-+ struct scst_device *dev, uint8_t data_format);
-+
-+/**
-+ * Returns TRUE if cmd is being executed in atomic context.
-+ *
-+ * This function must be used outside of spinlocks and preempt/BH/IRQ
-+ * disabled sections, because of the EXTRACHECK in it.
-+ */
-+static inline bool scst_cmd_atomic(struct scst_cmd *cmd)
-+{
-+ int res = cmd->atomic;
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ /*
-+ * Checkpatch will complain on the use of in_atomic() below. You
-+ * can safely ignore this warning since in_atomic() is used here
-+ * only for debugging purposes.
-+ */
-+ if (unlikely((in_atomic() || in_interrupt() || irqs_disabled()) &&
-+ !res)) {
-+ printk(KERN_ERR "ERROR: atomic context and non-atomic cmd!\n");
-+ dump_stack();
-+ cmd->atomic = 1;
-+ res = 1;
-+ }
-+#endif
-+ return res;
-+}
-+
-+/*
-+ * Returns TRUE if cmd has been preliminary completed, i.e. completed or
-+ * aborted.
-+ */
-+static inline bool scst_cmd_prelim_completed(struct scst_cmd *cmd)
-+{
-+ return cmd->completed || test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
-+}
-+
-+static inline enum scst_exec_context __scst_estimate_context(bool atomic)
-+{
-+ if (in_irq())
-+ return SCST_CONTEXT_TASKLET;
-+/*
-+ * We come here from many non reliable places, like the block layer, and don't
-+ * have any reliable way to detect if we called under atomic context or not
-+ * (in_atomic() isn't reliable), so let's be safe and disable this section
-+ * for now to unconditionally return thread context.
-+ */
-+#if 0
-+ else if (irqs_disabled())
-+ return SCST_CONTEXT_THREAD;
-+ else if (in_atomic())
-+ return SCST_CONTEXT_DIRECT_ATOMIC;
-+ else
-+ return atomic ? SCST_CONTEXT_DIRECT :
-+ SCST_CONTEXT_DIRECT_ATOMIC;
-+#else
-+ return SCST_CONTEXT_THREAD;
-+#endif
-+}
-+
-+static inline enum scst_exec_context scst_estimate_context(void)
-+{
-+ return __scst_estimate_context(false);
-+}
-+
-+static inline enum scst_exec_context scst_estimate_context_atomic(void)
-+{
-+ return __scst_estimate_context(true);
-+}
-+
-+/* Returns cmd's CDB */
-+static inline const uint8_t *scst_cmd_get_cdb(struct scst_cmd *cmd)
-+{
-+ return cmd->cdb;
-+}
-+
-+/* Returns cmd's CDB length */
-+static inline unsigned int scst_cmd_get_cdb_len(struct scst_cmd *cmd)
-+{
-+ return cmd->cdb_len;
-+}
-+
-+void scst_cmd_set_ext_cdb(struct scst_cmd *cmd,
-+ uint8_t *ext_cdb, unsigned int ext_cdb_len, gfp_t gfp_mask);
-+
-+/* Returns cmd's session */
-+static inline struct scst_session *scst_cmd_get_session(struct scst_cmd *cmd)
-+{
-+ return cmd->sess;
-+}
-+
-+/* Returns cmd's response data length */
-+static inline int scst_cmd_get_resp_data_len(struct scst_cmd *cmd)
-+{
-+ return cmd->resp_data_len;
-+}
-+
-+/* Returns cmd's adjusted response data length */
-+static inline int scst_cmd_get_adjusted_resp_data_len(struct scst_cmd *cmd)
-+{
-+ return cmd->adjusted_resp_data_len;
-+}
-+
-+/* Returns if status should be sent for cmd */
-+static inline int scst_cmd_get_is_send_status(struct scst_cmd *cmd)
-+{
-+ return cmd->is_send_status;
-+}
-+
-+/*
-+ * Returns pointer to cmd's SG data buffer.
-+ *
-+ * Usage of this function is not recommended, use scst_get_buf_*()
-+ * family of functions instead.
-+ */
-+static inline struct scatterlist *scst_cmd_get_sg(struct scst_cmd *cmd)
-+{
-+ return cmd->sg;
-+}
-+
-+/*
-+ * Returns cmd's sg_cnt.
-+ *
-+ * Usage of this function is not recommended, use scst_get_buf_*()
-+ * family of functions instead.
-+ */
-+static inline int scst_cmd_get_sg_cnt(struct scst_cmd *cmd)
-+{
-+ return cmd->sg_cnt;
-+}
-+
-+/*
-+ * Returns cmd's data buffer length.
-+ *
-+ * In case if you need to iterate over data in the buffer, usage of
-+ * this function is not recommended, use scst_get_buf_*()
-+ * family of functions instead.
-+ */
-+static inline unsigned int scst_cmd_get_bufflen(struct scst_cmd *cmd)
-+{
-+ return cmd->bufflen;
-+}
-+
-+/*
-+ * Returns pointer to cmd's bidirectional in (WRITE) SG data buffer.
-+ *
-+ * Usage of this function is not recommended, use scst_get_out_buf_*()
-+ * family of functions instead.
-+ */
-+static inline struct scatterlist *scst_cmd_get_out_sg(struct scst_cmd *cmd)
-+{
-+ return cmd->out_sg;
-+}
-+
-+/*
-+ * Returns cmd's bidirectional in (WRITE) sg_cnt.
-+ *
-+ * Usage of this function is not recommended, use scst_get_out_buf_*()
-+ * family of functions instead.
-+ */
-+static inline int scst_cmd_get_out_sg_cnt(struct scst_cmd *cmd)
-+{
-+ return cmd->out_sg_cnt;
-+}
-+
-+void scst_restore_sg_buff(struct scst_cmd *cmd);
-+
-+/* Restores modified sg buffer in the original state, if necessary */
-+static inline void scst_check_restore_sg_buff(struct scst_cmd *cmd)
-+{
-+ if (unlikely(cmd->sg_buff_modified))
-+ scst_restore_sg_buff(cmd);
-+}
-+
-+/*
-+ * Returns cmd's bidirectional in (WRITE) data buffer length.
-+ *
-+ * In case if you need to iterate over data in the buffer, usage of
-+ * this function is not recommended, use scst_get_out_buf_*()
-+ * family of functions instead.
-+ */
-+static inline unsigned int scst_cmd_get_out_bufflen(struct scst_cmd *cmd)
-+{
-+ return cmd->out_bufflen;
-+}
-+
-+/* Returns pointer to cmd's target's SG data buffer */
-+static inline struct scatterlist *scst_cmd_get_tgt_sg(struct scst_cmd *cmd)
-+{
-+ return cmd->tgt_sg;
-+}
-+
-+/* Returns cmd's target's sg_cnt */
-+static inline int scst_cmd_get_tgt_sg_cnt(struct scst_cmd *cmd)
-+{
-+ return cmd->tgt_sg_cnt;
-+}
-+
-+/* Sets cmd's target's SG data buffer */
-+static inline void scst_cmd_set_tgt_sg(struct scst_cmd *cmd,
-+ struct scatterlist *sg, int sg_cnt)
-+{
-+ cmd->tgt_sg = sg;
-+ cmd->tgt_sg_cnt = sg_cnt;
-+ cmd->tgt_data_buf_alloced = 1;
-+}
-+
-+/* Returns pointer to cmd's target's OUT SG data buffer */
-+static inline struct scatterlist *scst_cmd_get_out_tgt_sg(struct scst_cmd *cmd)
-+{
-+ return cmd->tgt_out_sg;
-+}
-+
-+/* Returns cmd's target's OUT sg_cnt */
-+static inline int scst_cmd_get_tgt_out_sg_cnt(struct scst_cmd *cmd)
-+{
-+ return cmd->tgt_out_sg_cnt;
-+}
-+
-+/* Sets cmd's target's OUT SG data buffer */
-+static inline void scst_cmd_set_tgt_out_sg(struct scst_cmd *cmd,
-+ struct scatterlist *sg, int sg_cnt)
-+{
-+ WARN_ON(!cmd->tgt_data_buf_alloced);
-+
-+ cmd->tgt_out_sg = sg;
-+ cmd->tgt_out_sg_cnt = sg_cnt;
-+}
-+
-+/* Returns cmd's data direction */
-+static inline scst_data_direction scst_cmd_get_data_direction(
-+ struct scst_cmd *cmd)
-+{
-+ return cmd->data_direction;
-+}
-+
-+/* Returns cmd's write len as well as write SG and sg_cnt */
-+static inline int scst_cmd_get_write_fields(struct scst_cmd *cmd,
-+ struct scatterlist **sg, int *sg_cnt)
-+{
-+ *sg = *cmd->write_sg;
-+ *sg_cnt = *cmd->write_sg_cnt;
-+ return cmd->write_len;
-+}
-+
-+void scst_cmd_set_write_not_received_data_len(struct scst_cmd *cmd,
-+ int not_received);
-+
-+bool __scst_get_resid(struct scst_cmd *cmd, int *resid, int *bidi_out_resid);
-+
-+/*
-+ * Returns true if cmd has residual(s) and returns them in the corresponding
-+ * parameters(s).
-+ */
-+static inline bool scst_get_resid(struct scst_cmd *cmd,
-+ int *resid, int *bidi_out_resid)
-+{
-+ if (likely(!cmd->resid_possible))
-+ return false;
-+ return __scst_get_resid(cmd, resid, bidi_out_resid);
-+}
-+
-+/* Returns cmd's status byte from host device */
-+static inline uint8_t scst_cmd_get_status(struct scst_cmd *cmd)
-+{
-+ return cmd->status;
-+}
-+
-+/* Returns cmd's status from host adapter itself */
-+static inline uint8_t scst_cmd_get_msg_status(struct scst_cmd *cmd)
-+{
-+ return cmd->msg_status;
-+}
-+
-+/* Returns cmd's status set by low-level driver to indicate its status */
-+static inline uint8_t scst_cmd_get_host_status(struct scst_cmd *cmd)
-+{
-+ return cmd->host_status;
-+}
-+
-+/* Returns cmd's status set by SCSI mid-level */
-+static inline uint8_t scst_cmd_get_driver_status(struct scst_cmd *cmd)
-+{
-+ return cmd->driver_status;
-+}
-+
-+/* Returns pointer to cmd's sense buffer */
-+static inline uint8_t *scst_cmd_get_sense_buffer(struct scst_cmd *cmd)
-+{
-+ return cmd->sense;
-+}
-+
-+/* Returns cmd's valid sense length */
-+static inline int scst_cmd_get_sense_buffer_len(struct scst_cmd *cmd)
-+{
-+ return cmd->sense_valid_len;
-+}
-+
-+/*
-+ * Get/Set functions for cmd's queue_type
-+ */
-+static inline enum scst_cmd_queue_type scst_cmd_get_queue_type(
-+ struct scst_cmd *cmd)
-+{
-+ return cmd->queue_type;
-+}
-+
-+static inline void scst_cmd_set_queue_type(struct scst_cmd *cmd,
-+ enum scst_cmd_queue_type queue_type)
-+{
-+ cmd->queue_type = queue_type;
-+}
-+
-+/*
-+ * Get/Set functions for cmd's target SN
-+ */
-+static inline uint64_t scst_cmd_get_tag(struct scst_cmd *cmd)
-+{
-+ return cmd->tag;
-+}
-+
-+static inline void scst_cmd_set_tag(struct scst_cmd *cmd, uint64_t tag)
-+{
-+ cmd->tag = tag;
-+}
-+
-+/*
-+ * Get/Set functions for cmd's target private data.
-+ * Variant with *_lock must be used if target driver uses
-+ * scst_find_cmd() to avoid race with it, except inside scst_find_cmd()'s
-+ * callback, where lock is already taken.
-+ */
-+static inline void *scst_cmd_get_tgt_priv(struct scst_cmd *cmd)
-+{
-+ return cmd->tgt_priv;
-+}
-+
-+static inline void scst_cmd_set_tgt_priv(struct scst_cmd *cmd, void *val)
-+{
-+ cmd->tgt_priv = val;
-+}
-+
-+/*
-+ * Get/Set functions for tgt_need_alloc_data_buf flag
-+ */
-+static inline int scst_cmd_get_tgt_need_alloc_data_buf(struct scst_cmd *cmd)
-+{
-+ return cmd->tgt_need_alloc_data_buf;
-+}
-+
-+static inline void scst_cmd_set_tgt_need_alloc_data_buf(struct scst_cmd *cmd)
-+{
-+ cmd->tgt_need_alloc_data_buf = 1;
-+}
-+
-+/*
-+ * Get/Set functions for tgt_data_buf_alloced flag
-+ */
-+static inline int scst_cmd_get_tgt_data_buff_alloced(struct scst_cmd *cmd)
-+{
-+ return cmd->tgt_data_buf_alloced;
-+}
-+
-+static inline void scst_cmd_set_tgt_data_buff_alloced(struct scst_cmd *cmd)
-+{
-+ cmd->tgt_data_buf_alloced = 1;
-+}
-+
-+/*
-+ * Get/Set functions for dh_data_buf_alloced flag
-+ */
-+static inline int scst_cmd_get_dh_data_buff_alloced(struct scst_cmd *cmd)
-+{
-+ return cmd->dh_data_buf_alloced;
-+}
-+
-+static inline void scst_cmd_set_dh_data_buff_alloced(struct scst_cmd *cmd)
-+{
-+ cmd->dh_data_buf_alloced = 1;
-+}
-+
-+/*
-+ * Get/Set functions for no_sgv flag
-+ */
-+static inline int scst_cmd_get_no_sgv(struct scst_cmd *cmd)
-+{
-+ return cmd->no_sgv;
-+}
-+
-+static inline void scst_cmd_set_no_sgv(struct scst_cmd *cmd)
-+{
-+ cmd->no_sgv = 1;
-+}
-+
-+/*
-+ * Get/Set functions for tgt_sn
-+ */
-+static inline int scst_cmd_get_tgt_sn(struct scst_cmd *cmd)
-+{
-+ BUG_ON(!cmd->tgt_sn_set);
-+ return cmd->tgt_sn;
-+}
-+
-+static inline void scst_cmd_set_tgt_sn(struct scst_cmd *cmd, uint32_t tgt_sn)
-+{
-+ cmd->tgt_sn_set = 1;
-+ cmd->tgt_sn = tgt_sn;
-+}
-+
-+/*
-+ * Get/Set functions for noio_mem_alloc
-+ */
-+static inline bool scst_cmd_get_noio_mem_alloc(struct scst_cmd *cmd)
-+{
-+ return cmd->noio_mem_alloc;
-+}
-+
-+static inline void scst_cmd_set_noio_mem_alloc(struct scst_cmd *cmd)
-+{
-+ cmd->noio_mem_alloc = 1;
-+}
-+
-+/*
-+ * Returns 1 if the cmd was aborted, so its status is invalid and no
-+ * reply shall be sent to the remote initiator. A target driver should
-+ * only clear internal resources, associated with cmd.
-+ */
-+static inline int scst_cmd_aborted(struct scst_cmd *cmd)
-+{
-+ return test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags) &&
-+ !test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
-+}
-+
-+/* Returns sense data format for cmd's dev */
-+static inline bool scst_get_cmd_dev_d_sense(struct scst_cmd *cmd)
-+{
-+ return (cmd->dev != NULL) ? cmd->dev->d_sense : 0;
-+}
-+
-+/*
-+ * Get/Set functions for expected data direction, transfer length
-+ * and its validity flag
-+ */
-+static inline int scst_cmd_is_expected_set(struct scst_cmd *cmd)
-+{
-+ return cmd->expected_values_set;
-+}
-+
-+static inline scst_data_direction scst_cmd_get_expected_data_direction(
-+ struct scst_cmd *cmd)
-+{
-+ return cmd->expected_data_direction;
-+}
-+
-+static inline int scst_cmd_get_expected_transfer_len(
-+ struct scst_cmd *cmd)
-+{
-+ return cmd->expected_transfer_len;
-+}
-+
-+static inline int scst_cmd_get_expected_out_transfer_len(
-+ struct scst_cmd *cmd)
-+{
-+ return cmd->expected_out_transfer_len;
-+}
-+
-+static inline void scst_cmd_set_expected(struct scst_cmd *cmd,
-+ scst_data_direction expected_data_direction,
-+ int expected_transfer_len)
-+{
-+ cmd->expected_data_direction = expected_data_direction;
-+ cmd->expected_transfer_len = expected_transfer_len;
-+ cmd->expected_values_set = 1;
-+}
-+
-+static inline void scst_cmd_set_expected_out_transfer_len(struct scst_cmd *cmd,
-+ int expected_out_transfer_len)
-+{
-+ WARN_ON(!cmd->expected_values_set);
-+ cmd->expected_out_transfer_len = expected_out_transfer_len;
-+}
-+
-+/*
-+ * Get/clear functions for cmd's may_need_dma_sync
-+ */
-+static inline int scst_get_may_need_dma_sync(struct scst_cmd *cmd)
-+{
-+ return cmd->may_need_dma_sync;
-+}
-+
-+static inline void scst_clear_may_need_dma_sync(struct scst_cmd *cmd)
-+{
-+ cmd->may_need_dma_sync = 0;
-+}
-+
-+/*
-+ * Get/set functions for cmd's delivery_status. It is one of
-+ * SCST_CMD_DELIVERY_* constants. It specifies the status of the
-+ * command's delivery to initiator.
-+ */
-+static inline int scst_get_delivery_status(struct scst_cmd *cmd)
-+{
-+ return cmd->delivery_status;
-+}
-+
-+static inline void scst_set_delivery_status(struct scst_cmd *cmd,
-+ int delivery_status)
-+{
-+ cmd->delivery_status = delivery_status;
-+}
-+
-+static inline unsigned int scst_get_active_cmd_count(struct scst_cmd *cmd)
-+{
-+ if (likely(cmd->tgt_dev != NULL))
-+ return atomic_read(&cmd->tgt_dev->tgt_dev_cmd_count);
-+ else
-+ return (unsigned int)-1;
-+}
-+
-+/*
-+ * Get/Set function for mgmt cmd's target private data
-+ */
-+static inline void *scst_mgmt_cmd_get_tgt_priv(struct scst_mgmt_cmd *mcmd)
-+{
-+ return mcmd->tgt_priv;
-+}
-+
-+static inline void scst_mgmt_cmd_set_tgt_priv(struct scst_mgmt_cmd *mcmd,
-+ void *val)
-+{
-+ mcmd->tgt_priv = val;
-+}
-+
-+/* Returns mgmt cmd's completion status (SCST_MGMT_STATUS_* constants) */
-+static inline int scst_mgmt_cmd_get_status(struct scst_mgmt_cmd *mcmd)
-+{
-+ return mcmd->status;
-+}
-+
-+/* Returns mgmt cmd's TM fn */
-+static inline int scst_mgmt_cmd_get_fn(struct scst_mgmt_cmd *mcmd)
-+{
-+ return mcmd->fn;
-+}
-+
-+/*
-+ * Called by dev handler's task_mgmt_fn() to notify SCST core that mcmd
-+ * is going to complete asynchronously.
-+ */
-+void scst_prepare_async_mcmd(struct scst_mgmt_cmd *mcmd);
-+
-+/*
-+ * Called by dev handler to notify SCST core that async. mcmd is completed
-+ * with status "status".
-+ */
-+void scst_async_mcmd_completed(struct scst_mgmt_cmd *mcmd, int status);
-+
-+/* Returns AEN's fn */
-+static inline int scst_aen_get_event_fn(struct scst_aen *aen)
-+{
-+ return aen->event_fn;
-+}
-+
-+/* Returns AEN's session */
-+static inline struct scst_session *scst_aen_get_sess(struct scst_aen *aen)
-+{
-+ return aen->sess;
-+}
-+
-+/* Returns AEN's LUN */
-+static inline __be64 scst_aen_get_lun(struct scst_aen *aen)
-+{
-+ return aen->lun;
-+}
-+
-+/* Returns SCSI AEN's sense */
-+static inline const uint8_t *scst_aen_get_sense(struct scst_aen *aen)
-+{
-+ return aen->aen_sense;
-+}
-+
-+/* Returns SCSI AEN's sense length */
-+static inline int scst_aen_get_sense_len(struct scst_aen *aen)
-+{
-+ return aen->aen_sense_len;
-+}
-+
-+/*
-+ * Get/set functions for AEN's delivery_status. It is one of
-+ * SCST_AEN_RES_* constants. It specifies the status of the
-+ * command's delivery to initiator.
-+ */
-+static inline int scst_get_aen_delivery_status(struct scst_aen *aen)
-+{
-+ return aen->delivery_status;
-+}
-+
-+static inline void scst_set_aen_delivery_status(struct scst_aen *aen,
-+ int status)
-+{
-+ aen->delivery_status = status;
-+}
-+
-+void scst_aen_done(struct scst_aen *aen);
-+
-+static inline void sg_clear(struct scatterlist *sg)
-+{
-+ memset(sg, 0, sizeof(*sg));
-+#ifdef CONFIG_DEBUG_SG
-+ sg->sg_magic = SG_MAGIC;
-+#endif
-+}
-+
-+enum scst_sg_copy_dir {
-+ SCST_SG_COPY_FROM_TARGET,
-+ SCST_SG_COPY_TO_TARGET
-+};
-+
-+void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir);
-+
-+/*
-+ * Functions for access to the commands data (SG) buffer. Should be used
-+ * instead of direct access. Returns the buffer length for success, 0 for EOD,
-+ * negative error code otherwise.
-+ *
-+ * Never EVER use this function to process only "the first page" of the buffer.
-+ * The first SG entry can be as low as few bytes long. Use scst_get_buf_full()
-+ * instead for such cases.
-+ *
-+ * "Buf" argument returns the mapped buffer
-+ *
-+ * The "put" function unmaps the buffer.
-+ */
-+static inline int __scst_get_buf(struct scst_cmd *cmd, int sg_cnt,
-+ uint8_t **buf)
-+{
-+ int res = 0;
-+ struct scatterlist *sg = cmd->get_sg_buf_cur_sg_entry;
-+
-+ if (cmd->get_sg_buf_entry_num >= sg_cnt) {
-+ *buf = NULL;
-+ goto out;
-+ }
-+
-+ if (unlikely(sg_is_chain(sg)))
-+ sg = sg_chain_ptr(sg);
-+
-+ *buf = page_address(sg_page(sg));
-+ *buf += sg->offset;
-+
-+ res = sg->length;
-+
-+ cmd->get_sg_buf_entry_num++;
-+ cmd->get_sg_buf_cur_sg_entry = ++sg;
-+
-+out:
-+ return res;
-+}
-+
-+static inline int scst_get_buf_first(struct scst_cmd *cmd, uint8_t **buf)
-+{
-+ if (unlikely(cmd->sg == NULL)) {
-+ *buf = NULL;
-+ return 0;
-+ }
-+ cmd->get_sg_buf_entry_num = 0;
-+ cmd->get_sg_buf_cur_sg_entry = cmd->sg;
-+ cmd->may_need_dma_sync = 1;
-+ return __scst_get_buf(cmd, cmd->sg_cnt, buf);
-+}
-+
-+static inline int scst_get_buf_next(struct scst_cmd *cmd, uint8_t **buf)
-+{
-+ return __scst_get_buf(cmd, cmd->sg_cnt, buf);
-+}
-+
-+static inline void scst_put_buf(struct scst_cmd *cmd, void *buf)
-+{
-+ /* Nothing to do */
-+}
-+
-+static inline int scst_get_out_buf_first(struct scst_cmd *cmd, uint8_t **buf)
-+{
-+ if (unlikely(cmd->out_sg == NULL)) {
-+ *buf = NULL;
-+ return 0;
-+ }
-+ cmd->get_sg_buf_entry_num = 0;
-+ cmd->get_sg_buf_cur_sg_entry = cmd->out_sg;
-+ cmd->may_need_dma_sync = 1;
-+ return __scst_get_buf(cmd, cmd->out_sg_cnt, buf);
-+}
-+
-+static inline int scst_get_out_buf_next(struct scst_cmd *cmd, uint8_t **buf)
-+{
-+ return __scst_get_buf(cmd, cmd->out_sg_cnt, buf);
-+}
-+
-+static inline void scst_put_out_buf(struct scst_cmd *cmd, void *buf)
-+{
-+ /* Nothing to do */
-+}
-+
-+static inline int scst_get_sg_buf_first(struct scst_cmd *cmd, uint8_t **buf,
-+ struct scatterlist *sg, int sg_cnt)
-+{
-+ if (unlikely(sg == NULL)) {
-+ *buf = NULL;
-+ return 0;
-+ }
-+ cmd->get_sg_buf_entry_num = 0;
-+ cmd->get_sg_buf_cur_sg_entry = cmd->sg;
-+ cmd->may_need_dma_sync = 1;
-+ return __scst_get_buf(cmd, sg_cnt, buf);
-+}
-+
-+static inline int scst_get_sg_buf_next(struct scst_cmd *cmd, uint8_t **buf,
-+ struct scatterlist *sg, int sg_cnt)
-+{
-+ return __scst_get_buf(cmd, sg_cnt, buf);
-+}
-+
-+static inline void scst_put_sg_buf(struct scst_cmd *cmd, void *buf,
-+ struct scatterlist *sg, int sg_cnt)
-+{
-+ /* Nothing to do */
-+}
-+
-+/*
-+ * Functions for access to the commands data (SG) page. Should be used
-+ * instead of direct access. Returns the buffer length for success, 0 for EOD,
-+ * negative error code otherwise.
-+ *
-+ * "Page" argument returns the starting page, "offset" - offset in it.
-+ *
-+ * The "put" function "puts" the buffer. It should be always be used, because
-+ * in future may need to do some additional operations.
-+ */
-+static inline int __scst_get_sg_page(struct scst_cmd *cmd, int sg_cnt,
-+ struct page **page, int *offset)
-+{
-+ int res = 0;
-+ struct scatterlist *sg = cmd->get_sg_buf_cur_sg_entry;
-+
-+ if (cmd->get_sg_buf_entry_num >= sg_cnt) {
-+ *page = NULL;
-+ *offset = 0;
-+ goto out;
-+ }
-+
-+ if (unlikely(sg_is_chain(sg)))
-+ sg = sg_chain_ptr(sg);
-+
-+ *page = sg_page(sg);
-+ *offset = sg->offset;
-+ res = sg->length;
-+
-+ cmd->get_sg_buf_entry_num++;
-+ cmd->get_sg_buf_cur_sg_entry = ++sg;
-+
-+out:
-+ return res;
-+}
-+
-+static inline int scst_get_sg_page_first(struct scst_cmd *cmd,
-+ struct page **page, int *offset)
-+{
-+ if (unlikely(cmd->sg == NULL)) {
-+ *page = NULL;
-+ *offset = 0;
-+ return 0;
-+ }
-+ cmd->get_sg_buf_entry_num = 0;
-+ cmd->get_sg_buf_cur_sg_entry = cmd->sg;
-+ return __scst_get_sg_page(cmd, cmd->sg_cnt, page, offset);
-+}
-+
-+static inline int scst_get_sg_page_next(struct scst_cmd *cmd,
-+ struct page **page, int *offset)
-+{
-+ return __scst_get_sg_page(cmd, cmd->sg_cnt, page, offset);
-+}
-+
-+static inline void scst_put_sg_page(struct scst_cmd *cmd,
-+ struct page *page, int offset)
-+{
-+ /* Nothing to do */
-+}
-+
-+static inline int scst_get_out_sg_page_first(struct scst_cmd *cmd,
-+ struct page **page, int *offset)
-+{
-+ if (unlikely(cmd->out_sg == NULL)) {
-+ *page = NULL;
-+ *offset = 0;
-+ return 0;
-+ }
-+ cmd->get_sg_buf_entry_num = 0;
-+ cmd->get_sg_buf_cur_sg_entry = cmd->out_sg;
-+ return __scst_get_sg_page(cmd, cmd->out_sg_cnt, page, offset);
-+}
-+
-+static inline int scst_get_out_sg_page_next(struct scst_cmd *cmd,
-+ struct page **page, int *offset)
-+{
-+ return __scst_get_sg_page(cmd, cmd->out_sg_cnt, page, offset);
-+}
-+
-+static inline void scst_put_out_sg_page(struct scst_cmd *cmd,
-+ struct page *page, int offset)
-+{
-+ /* Nothing to do */
-+}
-+
-+/*
-+ * Returns approximate higher rounded buffers count that
-+ * scst_get_buf_[first|next]() return.
-+ */
-+static inline int scst_get_buf_count(struct scst_cmd *cmd)
-+{
-+ return (cmd->sg_cnt == 0) ? 1 : cmd->sg_cnt;
-+}
-+
-+/*
-+ * Returns approximate higher rounded buffers count that
-+ * scst_get_out_buf_[first|next]() return.
-+ */
-+static inline int scst_get_out_buf_count(struct scst_cmd *cmd)
-+{
-+ return (cmd->out_sg_cnt == 0) ? 1 : cmd->out_sg_cnt;
-+}
-+
-+int scst_get_buf_full(struct scst_cmd *cmd, uint8_t **buf);
-+void scst_put_buf_full(struct scst_cmd *cmd, uint8_t *buf);
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+extern struct lockdep_map scst_suspend_dep_map;
-+#define scst_assert_activity_suspended() \
-+ WARN_ON(debug_locks && !lock_is_held(&scst_suspend_dep_map));
-+#else
-+#define scst_assert_activity_suspended() do { } while (0)
-+#endif
-+int scst_suspend_activity(bool interruptible);
-+void scst_resume_activity(void);
-+
-+void scst_process_active_cmd(struct scst_cmd *cmd, bool atomic);
-+
-+void scst_post_parse(struct scst_cmd *cmd);
-+void scst_post_alloc_data_buf(struct scst_cmd *cmd);
-+
-+int scst_check_local_events(struct scst_cmd *cmd);
-+
-+static inline int scst_pre_check_local_events(struct scst_cmd *cmd)
-+{
-+ int res = scst_check_local_events(cmd);
-+ cmd->check_local_events_once_done = 1;
-+ return res;
-+}
-+
-+int scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd);
-+
-+struct scst_trace_log {
-+ unsigned int val;
-+ const char *token;
-+};
-+
-+extern struct mutex scst_mutex;
-+
-+const struct sysfs_ops *scst_sysfs_get_sysfs_ops(void);
-+
-+/*
-+ * Returns target driver's root sysfs kobject.
-+ * The driver can create own files/directories/links here.
-+ */
-+static inline struct kobject *scst_sysfs_get_tgtt_kobj(
-+ struct scst_tgt_template *tgtt)
-+{
-+ return &tgtt->tgtt_kobj;
-+}
-+
-+/*
-+ * Returns target's root sysfs kobject.
-+ * The driver can create own files/directories/links here.
-+ */
-+static inline struct kobject *scst_sysfs_get_tgt_kobj(
-+ struct scst_tgt *tgt)
-+{
-+ return &tgt->tgt_kobj;
-+}
-+
-+/*
-+ * Returns device handler's root sysfs kobject.
-+ * The driver can create own files/directories/links here.
-+ */
-+static inline struct kobject *scst_sysfs_get_devt_kobj(
-+ struct scst_dev_type *devt)
-+{
-+ return &devt->devt_kobj;
-+}
-+
-+/*
-+ * Returns device's root sysfs kobject.
-+ * The driver can create own files/directories/links here.
-+ */
-+static inline struct kobject *scst_sysfs_get_dev_kobj(
-+ struct scst_device *dev)
-+{
-+ return &dev->dev_kobj;
-+}
-+
-+/*
-+ * Returns session's root sysfs kobject.
-+ * The driver can create own files/directories/links here.
-+ */
-+static inline struct kobject *scst_sysfs_get_sess_kobj(
-+ struct scst_session *sess)
-+{
-+ return &sess->sess_kobj;
-+}
-+
-+/* Returns target name */
-+static inline const char *scst_get_tgt_name(const struct scst_tgt *tgt)
-+{
-+ return tgt->tgt_name;
-+}
-+
-+int scst_alloc_sense(struct scst_cmd *cmd, int atomic);
-+int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
-+ const uint8_t *sense, unsigned int len);
-+
-+int scst_set_sense(uint8_t *buffer, int len, bool d_sense,
-+ int key, int asc, int ascq);
-+
-+bool scst_is_ua_sense(const uint8_t *sense, int len);
-+
-+bool scst_analyze_sense(const uint8_t *sense, int len,
-+ unsigned int valid_mask, int key, int asc, int ascq);
-+
-+unsigned long scst_random(void);
-+
-+void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len);
-+
-+void scst_cmd_get(struct scst_cmd *cmd);
-+void scst_cmd_put(struct scst_cmd *cmd);
-+
-+struct scatterlist *scst_alloc(int size, gfp_t gfp_mask, int *count);
-+void scst_free(struct scatterlist *sg, int count);
-+
-+void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
-+ struct scst_thr_data_hdr *data,
-+ void (*free_fn) (struct scst_thr_data_hdr *data));
-+void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev);
-+void scst_dev_del_all_thr_data(struct scst_device *dev);
-+struct scst_thr_data_hdr *__scst_find_thr_data(struct scst_tgt_dev *tgt_dev,
-+ struct task_struct *tsk);
-+
-+/* Finds local to the current thread data. Returns NULL, if they not found. */
-+static inline struct scst_thr_data_hdr *scst_find_thr_data(
-+ struct scst_tgt_dev *tgt_dev)
-+{
-+ return __scst_find_thr_data(tgt_dev, current);
-+}
-+
-+/* Increase ref counter for the thread data */
-+static inline void scst_thr_data_get(struct scst_thr_data_hdr *data)
-+{
-+ atomic_inc(&data->ref);
-+}
-+
-+/* Decrease ref counter for the thread data */
-+static inline void scst_thr_data_put(struct scst_thr_data_hdr *data)
-+{
-+ if (atomic_dec_and_test(&data->ref))
-+ data->free_fn(data);
-+}
-+
-+int scst_calc_block_shift(int sector_size);
-+int scst_sbc_generic_parse(struct scst_cmd *cmd,
-+ int (*get_block_shift)(struct scst_cmd *cmd));
-+int scst_cdrom_generic_parse(struct scst_cmd *cmd,
-+ int (*get_block_shift)(struct scst_cmd *cmd));
-+int scst_modisk_generic_parse(struct scst_cmd *cmd,
-+ int (*get_block_shift)(struct scst_cmd *cmd));
-+int scst_tape_generic_parse(struct scst_cmd *cmd,
-+ int (*get_block_size)(struct scst_cmd *cmd));
-+int scst_changer_generic_parse(struct scst_cmd *cmd,
-+ int (*nothing)(struct scst_cmd *cmd));
-+int scst_processor_generic_parse(struct scst_cmd *cmd,
-+ int (*nothing)(struct scst_cmd *cmd));
-+int scst_raid_generic_parse(struct scst_cmd *cmd,
-+ int (*nothing)(struct scst_cmd *cmd));
-+
-+int scst_block_generic_dev_done(struct scst_cmd *cmd,
-+ void (*set_block_shift)(struct scst_cmd *cmd, int block_shift));
-+int scst_tape_generic_dev_done(struct scst_cmd *cmd,
-+ void (*set_block_size)(struct scst_cmd *cmd, int block_size));
-+
-+int scst_obtain_device_parameters(struct scst_device *dev);
-+
-+void scst_reassign_persistent_sess_states(struct scst_session *new_sess,
-+ struct scst_session *old_sess);
-+
-+int scst_get_max_lun_commands(struct scst_session *sess, uint64_t lun);
-+
-+/*
-+ * Has to be put here open coded, because Linux doesn't have equivalent, which
-+ * allows exclusive wake ups of threads in LIFO order. We need it to let (yet)
-+ * unneeded threads sleep and not pollute CPU cache by their stacks.
-+ */
-+static inline void add_wait_queue_exclusive_head(wait_queue_head_t *q,
-+ wait_queue_t *wait)
-+{
-+ unsigned long flags;
-+
-+ wait->flags |= WQ_FLAG_EXCLUSIVE;
-+ spin_lock_irqsave(&q->lock, flags);
-+ __add_wait_queue(q, wait);
-+ spin_unlock_irqrestore(&q->lock, flags);
-+}
-+
-+/*
-+ * Structure to match events to user space and replies on them
-+ */
-+struct scst_sysfs_user_info {
-+ /* Unique cookie to identify request */
-+ uint32_t info_cookie;
-+
-+ /* Entry in the global list */
-+ struct list_head info_list_entry;
-+
-+ /* Set if reply from the user space is being executed */
-+ unsigned int info_being_executed:1;
-+
-+ /* Set if this info is in the info_list */
-+ unsigned int info_in_list:1;
-+
-+ /* Completion to wait on for the request completion */
-+ struct completion info_completion;
-+
-+ /* Request completion status and optional data */
-+ int info_status;
-+ void *data;
-+};
-+
-+int scst_sysfs_user_add_info(struct scst_sysfs_user_info **out_info);
-+void scst_sysfs_user_del_info(struct scst_sysfs_user_info *info);
-+struct scst_sysfs_user_info *scst_sysfs_user_get_info(uint32_t cookie);
-+int scst_wait_info_completion(struct scst_sysfs_user_info *info,
-+ unsigned long timeout);
-+
-+unsigned int scst_get_setup_id(void);
-+
-+/*
-+ * Needed to avoid potential circular locking dependency between scst_mutex
-+ * and internal sysfs locking (s_active). It could be since most sysfs entries
-+ * are created and deleted under scst_mutex AND scst_mutex is taken inside
-+ * sysfs functions. So, we push from the sysfs functions all the processing
-+ * taking scst_mutex. To avoid deadlock, we return from them with EAGAIN
-+ * if processing is taking too long. User space then should poll
-+ * last_sysfs_mgmt_res until it returns the result of the processing
-+ * (something other than EAGAIN).
-+ */
-+struct scst_sysfs_work_item {
-+ /*
-+ * If true, then last_sysfs_mgmt_res will not be updated. This is
-+ * needed to allow read only sysfs monitoring during management actions.
-+ * All management actions are supposed to be externally serialized,
-+ * so then last_sysfs_mgmt_res automatically serialized too.
-+ * Otherwise a monitoring action can overwrite value of simultaneous
-+ * management action's last_sysfs_mgmt_res.
-+ */
-+ bool read_only_action;
-+
-+ struct list_head sysfs_work_list_entry;
-+ struct kref sysfs_work_kref;
-+ int (*sysfs_work_fn)(struct scst_sysfs_work_item *work);
-+ struct completion sysfs_work_done;
-+ char *buf;
-+
-+ union {
-+ struct scst_dev_type *devt;
-+ struct scst_tgt_template *tgtt;
-+ struct {
-+ struct scst_tgt *tgt;
-+ struct scst_acg *acg;
-+ union {
-+ bool is_tgt_kobj;
-+ int io_grouping_type;
-+ bool enable;
-+ cpumask_t cpu_mask;
-+ };
-+ };
-+ struct {
-+ struct scst_device *dev;
-+ int new_threads_num;
-+ enum scst_dev_type_threads_pool_type new_threads_pool_type;
-+ };
-+ struct scst_session *sess;
-+ struct {
-+ struct scst_tgt *tgt_r;
-+ unsigned long rel_tgt_id;
-+ };
-+ struct {
-+ struct kobject *kobj;
-+ };
-+ };
-+ int work_res;
-+ char *res_buf;
-+};
-+
-+int scst_alloc_sysfs_work(int (*sysfs_work_fn)(struct scst_sysfs_work_item *),
-+ bool read_only_action, struct scst_sysfs_work_item **res_work);
-+int scst_sysfs_queue_wait_work(struct scst_sysfs_work_item *work);
-+void scst_sysfs_work_get(struct scst_sysfs_work_item *work);
-+void scst_sysfs_work_put(struct scst_sysfs_work_item *work);
-+
-+char *scst_get_next_lexem(char **token_str);
-+void scst_restore_token_str(char *prev_lexem, char *token_str);
-+char *scst_get_next_token_str(char **input_str);
-+
-+void scst_init_threads(struct scst_cmd_threads *cmd_threads);
-+void scst_deinit_threads(struct scst_cmd_threads *cmd_threads);
-+
-+void scst_pass_through_cmd_done(void *data, char *sense, int result, int resid);
-+int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
-+ void (*done)(void *data, char *sense, int result, int resid));
-+
-+#endif /* __SCST_H */
-diff -uprN orig/linux-3.2/include/scst/scst_const.h linux-3.2/include/scst/scst_const.h
---- orig/linux-3.2/include/scst/scst_const.h
-+++ linux-3.2/include/scst/scst_const.h
-@@ -0,0 +1,487 @@
-+/*
-+ * include/scst_const.h
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * Contains common SCST constants.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef __SCST_CONST_H
-+#define __SCST_CONST_H
-+
-+#ifndef GENERATING_UPSTREAM_PATCH
-+/*
-+ * Include <linux/version.h> only when not converting this header file into
-+ * a patch for upstream review because only then the symbol LINUX_VERSION_CODE
-+ * is needed.
-+ */
-+#include <linux/version.h>
-+#endif
-+#include <scsi/scsi.h>
-+
-+/*
-+ * Version numbers, the same as for the kernel.
-+ *
-+ * Changing it don't forget to change SCST_FIO_REV in scst_vdisk.c
-+ * and FIO_REV in usr/fileio/common.h as well.
-+ */
-+#define SCST_VERSION(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + d)
-+#define SCST_VERSION_CODE SCST_VERSION(2, 2, 0, 0)
-+#define SCST_VERSION_STRING_SUFFIX
-+#define SCST_VERSION_NAME "2.2.0"
-+#define SCST_VERSION_STRING SCST_VERSION_NAME SCST_VERSION_STRING_SUFFIX
-+
-+#define SCST_CONST_VERSION "$Revision: 3987 $"
-+
-+/*** Shared constants between user and kernel spaces ***/
-+
-+/* Max size of CDB */
-+#define SCST_MAX_CDB_SIZE 16
-+
-+/* Max size of long CDB */
-+#define SCST_MAX_LONG_CDB_SIZE 65536
-+
-+/* Max size of various names */
-+#define SCST_MAX_NAME 50
-+
-+/* Max size of external names, like initiator name */
-+#define SCST_MAX_EXTERNAL_NAME 256
-+
-+/* Max LUN. 2 bits are used for addressing method. */
-+#define SCST_MAX_LUN ((1 << (16-2)) - 1)
-+
-+/*
-+ * Size of sense sufficient to carry standard sense data.
-+ * Warning! It's allocated on stack!
-+ */
-+#define SCST_STANDARD_SENSE_LEN 18
-+
-+/* Max size of sense */
-+#define SCST_SENSE_BUFFERSIZE 96
-+
-+/*************************************************************
-+ ** Allowed delivery statuses for cmd's delivery_status
-+ *************************************************************/
-+
-+#define SCST_CMD_DELIVERY_SUCCESS 0
-+#define SCST_CMD_DELIVERY_FAILED -1
-+#define SCST_CMD_DELIVERY_ABORTED -2
-+
-+/*************************************************************
-+ ** Values for task management functions
-+ *************************************************************/
-+#define SCST_ABORT_TASK 0
-+#define SCST_ABORT_TASK_SET 1
-+#define SCST_CLEAR_ACA 2
-+#define SCST_CLEAR_TASK_SET 3
-+#define SCST_LUN_RESET 4
-+#define SCST_TARGET_RESET 5
-+
-+/** SCST extensions **/
-+
-+/*
-+ * Notifies about I_T nexus loss event in the corresponding session.
-+ * Aborts all tasks there, resets the reservation, if any, and sets
-+ * up the I_T Nexus loss UA.
-+ */
-+#define SCST_NEXUS_LOSS_SESS 6
-+
-+/* Aborts all tasks in the corresponding session */
-+#define SCST_ABORT_ALL_TASKS_SESS 7
-+
-+/*
-+ * Notifies about I_T nexus loss event. Aborts all tasks in all sessions
-+ * of the tgt, resets the reservations, if any, and sets up the I_T Nexus
-+ * loss UA.
-+ */
-+#define SCST_NEXUS_LOSS 8
-+
-+/* Aborts all tasks in all sessions of the tgt */
-+#define SCST_ABORT_ALL_TASKS 9
-+
-+/*
-+ * Internal TM command issued by SCST in scst_unregister_session(). It is the
-+ * same as SCST_NEXUS_LOSS_SESS, except:
-+ * - it doesn't call task_mgmt_affected_cmds_done()
-+ * - it doesn't call task_mgmt_fn_done()
-+ * - it doesn't queue NEXUS LOSS UA.
-+ *
-+ * Target drivers must NEVER use it!!
-+ */
-+#define SCST_UNREG_SESS_TM 10
-+
-+/*
-+ * Internal TM command issued by SCST in scst_pr_abort_reg(). It aborts all
-+ * tasks from mcmd->origin_pr_cmd->tgt_dev, except mcmd->origin_pr_cmd.
-+ * Additionally:
-+ * - it signals pr_aborting_cmpl completion when all affected
-+ * commands marked as aborted.
-+ * - it doesn't call task_mgmt_affected_cmds_done()
-+ * - it doesn't call task_mgmt_fn_done()
-+ * - it calls mcmd->origin_pr_cmd->scst_cmd_done() when all affected
-+ * commands aborted.
-+ *
-+ * Target drivers must NEVER use it!!
-+ */
-+#define SCST_PR_ABORT_ALL 11
-+
-+/*************************************************************
-+ ** Values for mgmt cmd's status field. Codes taken from iSCSI
-+ *************************************************************/
-+#define SCST_MGMT_STATUS_SUCCESS 0
-+#define SCST_MGMT_STATUS_TASK_NOT_EXIST -1
-+#define SCST_MGMT_STATUS_LUN_NOT_EXIST -2
-+#define SCST_MGMT_STATUS_FN_NOT_SUPPORTED -5
-+#define SCST_MGMT_STATUS_REJECTED -255
-+#define SCST_MGMT_STATUS_FAILED -129
-+
-+/*************************************************************
-+ ** SCSI LUN addressing methods. See also SAM-2 and the
-+ ** section about eight byte LUNs.
-+ *************************************************************/
-+enum scst_lun_addr_method {
-+ SCST_LUN_ADDR_METHOD_PERIPHERAL = 0,
-+ SCST_LUN_ADDR_METHOD_FLAT = 1,
-+ SCST_LUN_ADDR_METHOD_LUN = 2,
-+ SCST_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
-+};
-+
-+/*************************************************************
-+ ** SCSI task attribute queue types
-+ *************************************************************/
-+enum scst_cmd_queue_type {
-+ SCST_CMD_QUEUE_UNTAGGED = 0,
-+ SCST_CMD_QUEUE_SIMPLE,
-+ SCST_CMD_QUEUE_ORDERED,
-+ SCST_CMD_QUEUE_HEAD_OF_QUEUE,
-+ SCST_CMD_QUEUE_ACA
-+};
-+
-+/*************************************************************
-+ ** CDB flags
-+ *************************************************************/
-+enum scst_cdb_flags {
-+ SCST_TRANSFER_LEN_TYPE_FIXED = 0x0001,
-+ SCST_SMALL_TIMEOUT = 0x0002,
-+ SCST_LONG_TIMEOUT = 0x0004,
-+ SCST_UNKNOWN_LENGTH = 0x0008,
-+ SCST_INFO_VALID = 0x0010, /* must be single bit */
-+ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED = 0x0020,
-+ SCST_IMPLICIT_HQ = 0x0040,
-+ SCST_SKIP_UA = 0x0080,
-+ SCST_WRITE_MEDIUM = 0x0100,
-+ SCST_LOCAL_CMD = 0x0200,
-+ SCST_FULLY_LOCAL_CMD = 0x0400,
-+ SCST_REG_RESERVE_ALLOWED = 0x0800,
-+ SCST_WRITE_EXCL_ALLOWED = 0x1000,
-+ SCST_EXCL_ACCESS_ALLOWED = 0x2000,
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED = 0x4000,
-+#endif
-+ SCST_SERIALIZED = 0x8000,
-+ SCST_STRICTLY_SERIALIZED = 0x10000|SCST_SERIALIZED,
-+};
-+
-+/*************************************************************
-+ ** Data direction aliases. Changing it don't forget to change
-+ ** scst_to_tgt_dma_dir and SCST_DATA_DIR_MAX as well!!
-+ *************************************************************/
-+#define SCST_DATA_UNKNOWN 0
-+#define SCST_DATA_WRITE 1
-+#define SCST_DATA_READ 2
-+#define SCST_DATA_BIDI (SCST_DATA_WRITE | SCST_DATA_READ)
-+#define SCST_DATA_NONE 4
-+
-+#define SCST_DATA_DIR_MAX (SCST_DATA_NONE+1)
-+
-+/*************************************************************
-+ ** Default suffix for targets with NULL names
-+ *************************************************************/
-+#define SCST_DEFAULT_TGT_NAME_SUFFIX "_target_"
-+
-+/*************************************************************
-+ ** Sense manipulation and examination
-+ *************************************************************/
-+#define SCST_LOAD_SENSE(key_asc_ascq) key_asc_ascq
-+
-+#define SCST_SENSE_VALID(sense) ((sense != NULL) && \
-+ ((((const uint8_t *)(sense))[0] & 0x70) == 0x70))
-+
-+#define SCST_NO_SENSE(sense) ((sense != NULL) && \
-+ (((const uint8_t *)(sense))[2] == 0))
-+
-+/*************************************************************
-+ ** Sense data for the appropriate errors. Can be used with
-+ ** scst_set_cmd_error()
-+ *************************************************************/
-+#define scst_sense_no_sense NO_SENSE, 0x00, 0
-+#define scst_sense_hardw_error HARDWARE_ERROR, 0x44, 0
-+#define scst_sense_aborted_command ABORTED_COMMAND, 0x00, 0
-+#define scst_sense_invalid_opcode ILLEGAL_REQUEST, 0x20, 0
-+#define scst_sense_invalid_field_in_cdb ILLEGAL_REQUEST, 0x24, 0
-+#define scst_sense_invalid_field_in_parm_list ILLEGAL_REQUEST, 0x26, 0
-+#define scst_sense_parameter_value_invalid ILLEGAL_REQUEST, 0x26, 2
-+#define scst_sense_invalid_release ILLEGAL_REQUEST, 0x26, 4
-+#define scst_sense_parameter_list_length_invalid \
-+ ILLEGAL_REQUEST, 0x1A, 0
-+#define scst_sense_reset_UA UNIT_ATTENTION, 0x29, 0
-+#define scst_sense_nexus_loss_UA UNIT_ATTENTION, 0x29, 0x7
-+#define scst_sense_saving_params_unsup ILLEGAL_REQUEST, 0x39, 0
-+#define scst_sense_lun_not_supported ILLEGAL_REQUEST, 0x25, 0
-+#define scst_sense_data_protect DATA_PROTECT, 0x00, 0
-+#define scst_sense_miscompare_error MISCOMPARE, 0x1D, 0
-+#define scst_sense_block_out_range_error ILLEGAL_REQUEST, 0x21, 0
-+#define scst_sense_medium_changed_UA UNIT_ATTENTION, 0x28, 0
-+#define scst_sense_read_error MEDIUM_ERROR, 0x11, 0
-+#define scst_sense_write_error MEDIUM_ERROR, 0x03, 0
-+#define scst_sense_not_ready NOT_READY, 0x04, 0x10
-+#define scst_sense_invalid_message ILLEGAL_REQUEST, 0x49, 0
-+#define scst_sense_cleared_by_another_ini_UA UNIT_ATTENTION, 0x2F, 0
-+#define scst_sense_capacity_data_changed UNIT_ATTENTION, 0x2A, 0x9
-+#define scst_sense_reservation_preempted UNIT_ATTENTION, 0x2A, 0x03
-+#define scst_sense_reservation_released UNIT_ATTENTION, 0x2A, 0x04
-+#define scst_sense_registrations_preempted UNIT_ATTENTION, 0x2A, 0x05
-+#define scst_sense_asym_access_state_changed UNIT_ATTENTION, 0x2A, 0x06
-+#define scst_sense_reported_luns_data_changed UNIT_ATTENTION, 0x3F, 0xE
-+#define scst_sense_inquery_data_changed UNIT_ATTENTION, 0x3F, 0x3
-+
-+/*************************************************************
-+ * SCSI opcodes not listed anywhere else
-+ *************************************************************/
-+#define INIT_ELEMENT_STATUS 0x07
-+#define INIT_ELEMENT_STATUS_RANGE 0x37
-+#define PREVENT_ALLOW_MEDIUM 0x1E
-+#define REQUEST_VOLUME_ADDRESS 0xB5
-+#define WRITE_VERIFY_16 0x8E
-+#define VERIFY_6 0x13
-+#ifndef VERIFY_12
-+#define VERIFY_12 0xAF
-+#endif
-+#ifndef GENERATING_UPSTREAM_PATCH
-+/*
-+ * The constants below have been defined in the kernel header <scsi/scsi.h>
-+ * and hence are not needed when this header file is included in kernel code.
-+ * The definitions below are only used when this header file is included during
-+ * compilation of SCST's user space components.
-+ */
-+#ifndef READ_16
-+#define READ_16 0x88
-+#endif
-+#ifndef WRITE_16
-+#define WRITE_16 0x8a
-+#endif
-+#ifndef VERIFY_16
-+#define VERIFY_16 0x8f
-+#endif
-+#ifndef SERVICE_ACTION_IN
-+#define SERVICE_ACTION_IN 0x9e
-+#endif
-+#ifndef SAI_READ_CAPACITY_16
-+/* values for service action in */
-+#define SAI_READ_CAPACITY_16 0x10
-+#endif
-+#endif
-+#ifndef GENERATING_UPSTREAM_PATCH
-+#ifndef REPORT_LUNS
-+#define REPORT_LUNS 0xa0
-+#endif
-+#endif
-+
-+
-+/*************************************************************
-+ ** SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
-+ ** T10/1561-D Revision 4 Draft dated 7th November 2002.
-+ *************************************************************/
-+#define SAM_STAT_GOOD 0x00
-+#define SAM_STAT_CHECK_CONDITION 0x02
-+#define SAM_STAT_CONDITION_MET 0x04
-+#define SAM_STAT_BUSY 0x08
-+#define SAM_STAT_INTERMEDIATE 0x10
-+#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
-+#define SAM_STAT_RESERVATION_CONFLICT 0x18
-+#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
-+#define SAM_STAT_TASK_SET_FULL 0x28
-+#define SAM_STAT_ACA_ACTIVE 0x30
-+#define SAM_STAT_TASK_ABORTED 0x40
-+
-+/*************************************************************
-+ ** Control byte field in CDB
-+ *************************************************************/
-+#define CONTROL_BYTE_LINK_BIT 0x01
-+#define CONTROL_BYTE_NACA_BIT 0x04
-+
-+/*************************************************************
-+ ** Byte 1 in INQUIRY CDB
-+ *************************************************************/
-+#define SCST_INQ_EVPD 0x01
-+
-+/*************************************************************
-+ ** Byte 3 in Standard INQUIRY data
-+ *************************************************************/
-+#define SCST_INQ_BYTE3 3
-+
-+#define SCST_INQ_NORMACA_BIT 0x20
-+
-+/*************************************************************
-+ ** TPGS field in byte 5 of the INQUIRY response (SPC-4).
-+ *************************************************************/
-+enum {
-+ SCST_INQ_TPGS_MODE_IMPLICIT = 0x10,
-+ SCST_INQ_TPGS_MODE_EXPLICIT = 0x20,
-+};
-+
-+/*************************************************************
-+ ** Byte 2 in RESERVE_10 CDB
-+ *************************************************************/
-+#define SCST_RES_3RDPTY 0x10
-+#define SCST_RES_LONGID 0x02
-+
-+/*************************************************************
-+ ** Values for the control mode page TST field
-+ *************************************************************/
-+#define SCST_CONTR_MODE_ONE_TASK_SET 0
-+#define SCST_CONTR_MODE_SEP_TASK_SETS 1
-+
-+/*******************************************************************
-+ ** Values for the control mode page QUEUE ALGORITHM MODIFIER field
-+ *******************************************************************/
-+#define SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER 0
-+#define SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER 1
-+
-+/*************************************************************
-+ ** Values for the control mode page D_SENSE field
-+ *************************************************************/
-+#define SCST_CONTR_MODE_FIXED_SENSE 0
-+#define SCST_CONTR_MODE_DESCR_SENSE 1
-+
-+/*************************************************************
-+ ** TransportID protocol identifiers
-+ *************************************************************/
-+
-+#define SCSI_TRANSPORTID_PROTOCOLID_FCP2 0
-+#define SCSI_TRANSPORTID_PROTOCOLID_SPI5 1
-+#define SCSI_TRANSPORTID_PROTOCOLID_SRP 4
-+#define SCSI_TRANSPORTID_PROTOCOLID_ISCSI 5
-+#define SCSI_TRANSPORTID_PROTOCOLID_SAS 6
-+
-+/**
-+ * enum scst_tg_state - SCSI target port group asymmetric access state.
-+ *
-+ * See also the documentation of the REPORT TARGET PORT GROUPS command in SPC-4.
-+ */
-+enum scst_tg_state {
-+ SCST_TG_STATE_OPTIMIZED = 0x0,
-+ SCST_TG_STATE_NONOPTIMIZED = 0x1,
-+ SCST_TG_STATE_STANDBY = 0x2,
-+ SCST_TG_STATE_UNAVAILABLE = 0x3,
-+ SCST_TG_STATE_LBA_DEPENDENT = 0x4,
-+ SCST_TG_STATE_OFFLINE = 0xe,
-+ SCST_TG_STATE_TRANSITIONING = 0xf,
-+};
-+
-+/**
-+ * Target port group preferred bit.
-+ *
-+ * See also the documentation of the REPORT TARGET PORT GROUPS command in SPC-4.
-+ */
-+enum {
-+ SCST_TG_PREFERRED = 0x80,
-+};
-+
-+/**
-+ * enum scst_tg_sup - Supported SCSI target port group states.
-+ *
-+ * See also the documentation of the REPORT TARGET PORT GROUPS command in SPC-4.
-+ */
-+enum scst_tg_sup {
-+ SCST_TG_SUP_OPTIMIZED = 0x01,
-+ SCST_TG_SUP_NONOPTIMIZED = 0x02,
-+ SCST_TG_SUP_STANDBY = 0x04,
-+ SCST_TG_SUP_UNAVAILABLE = 0x08,
-+ SCST_TG_SUP_LBA_DEPENDENT = 0x10,
-+ SCST_TG_SUP_OFFLINE = 0x40,
-+ SCST_TG_SUP_TRANSITION = 0x80,
-+};
-+
-+/*************************************************************
-+ ** Misc SCSI constants
-+ *************************************************************/
-+#define SCST_SENSE_ASC_UA_RESET 0x29
-+#define BYTCHK 0x02
-+#define POSITION_LEN_SHORT 20
-+#define POSITION_LEN_LONG 32
-+
-+/*************************************************************
-+ ** Various timeouts
-+ *************************************************************/
-+#define SCST_DEFAULT_TIMEOUT (60 * HZ)
-+
-+#define SCST_GENERIC_CHANGER_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_CHANGER_LONG_TIMEOUT (14000 * HZ)
-+
-+#define SCST_GENERIC_PROCESSOR_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_PROCESSOR_LONG_TIMEOUT (14000 * HZ)
-+
-+#define SCST_GENERIC_TAPE_SMALL_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_TAPE_REG_TIMEOUT (900 * HZ)
-+#define SCST_GENERIC_TAPE_LONG_TIMEOUT (14000 * HZ)
-+
-+#define SCST_GENERIC_MODISK_SMALL_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_MODISK_REG_TIMEOUT (900 * HZ)
-+#define SCST_GENERIC_MODISK_LONG_TIMEOUT (14000 * HZ)
-+
-+#define SCST_GENERIC_DISK_SMALL_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_DISK_REG_TIMEOUT (60 * HZ)
-+#define SCST_GENERIC_DISK_LONG_TIMEOUT (3600 * HZ)
-+
-+#define SCST_GENERIC_RAID_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_RAID_LONG_TIMEOUT (14000 * HZ)
-+
-+#define SCST_GENERIC_CDROM_SMALL_TIMEOUT (3 * HZ)
-+#define SCST_GENERIC_CDROM_REG_TIMEOUT (900 * HZ)
-+#define SCST_GENERIC_CDROM_LONG_TIMEOUT (14000 * HZ)
-+
-+#define SCST_MAX_OTHER_TIMEOUT (14000 * HZ)
-+
-+/*************************************************************
-+ ** I/O grouping attribute string values. Must match constants
-+ ** w/o '_STR' suffix!
-+ *************************************************************/
-+#define SCST_IO_GROUPING_AUTO_STR "auto"
-+#define SCST_IO_GROUPING_THIS_GROUP_ONLY_STR "this_group_only"
-+#define SCST_IO_GROUPING_NEVER_STR "never"
-+
-+/*************************************************************
-+ ** Threads pool type attribute string values.
-+ ** Must match scst_dev_type_threads_pool_type!
-+ *************************************************************/
-+#define SCST_THREADS_POOL_PER_INITIATOR_STR "per_initiator"
-+#define SCST_THREADS_POOL_SHARED_STR "shared"
-+
-+/*************************************************************
-+ ** Misc constants
-+ *************************************************************/
-+#define SCST_SYSFS_BLOCK_SIZE PAGE_SIZE
-+
-+#define SCST_PR_DIR "/var/lib/scst/pr"
-+
-+#define TID_COMMON_SIZE 24
-+
-+#define SCST_SYSFS_KEY_MARK "[key]"
-+
-+#define SCST_MIN_REL_TGT_ID 1
-+#define SCST_MAX_REL_TGT_ID 65535
-+
-+#endif /* __SCST_CONST_H */
-diff -uprN orig/linux-3.2/drivers/scst/scst_main.c linux-3.2/drivers/scst/scst_main.c
---- orig/linux-3.2/drivers/scst/scst_main.c
-+++ linux-3.2/drivers/scst/scst_main.c
-@@ -0,0 +1,2229 @@
-+/*
-+ * scst_main.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/unistd.h>
-+#include <linux/string.h>
-+#include <linux/kthread.h>
-+#include <linux/delay.h>
-+#include <linux/lockdep.h>
-+
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+#include "scst_mem.h"
-+#include "scst_pres.h"
-+
-+#if defined(CONFIG_HIGHMEM4G) || defined(CONFIG_HIGHMEM64G)
-+#warning HIGHMEM kernel configurations are fully supported, but not \
-+recommended for performance reasons. Consider changing VMSPLIT \
-+option or use a 64-bit configuration instead. See README file for \
-+details.
-+#endif
-+
-+/**
-+ ** SCST global variables. They are all uninitialized to have their layout in
-+ ** memory be exactly as specified. Otherwise compiler puts zero-initialized
-+ ** variable separately from nonzero-initialized ones.
-+ **/
-+
-+/*
-+ * Main SCST mutex. All targets, devices and dev_types management is done
-+ * under this mutex.
-+ *
-+ * It must NOT be used in any works (schedule_work(), etc.), because
-+ * otherwise a deadlock (double lock, actually) is possible, e.g., with
-+ * scst_user detach_tgt(), which is called under scst_mutex and calls
-+ * flush_scheduled_work().
-+ */
-+struct mutex scst_mutex;
-+EXPORT_SYMBOL_GPL(scst_mutex);
-+
-+/*
-+ * Secondary level main mutex, inner for scst_mutex. Needed for
-+ * __scst_pr_register_all_tg_pt(), since we can't use scst_mutex there,
-+ * because of the circular locking dependency with dev_pr_mutex.
-+ */
-+struct mutex scst_mutex2;
-+
-+/* Both protected by scst_mutex or scst_mutex2 on read and both on write */
-+struct list_head scst_template_list;
-+struct list_head scst_dev_list;
-+
-+/* Protected by scst_mutex */
-+struct list_head scst_dev_type_list;
-+struct list_head scst_virtual_dev_type_list;
-+
-+spinlock_t scst_main_lock;
-+
-+static struct kmem_cache *scst_mgmt_cachep;
-+mempool_t *scst_mgmt_mempool;
-+static struct kmem_cache *scst_mgmt_stub_cachep;
-+mempool_t *scst_mgmt_stub_mempool;
-+static struct kmem_cache *scst_ua_cachep;
-+mempool_t *scst_ua_mempool;
-+static struct kmem_cache *scst_sense_cachep;
-+mempool_t *scst_sense_mempool;
-+static struct kmem_cache *scst_aen_cachep;
-+mempool_t *scst_aen_mempool;
-+struct kmem_cache *scst_tgtd_cachep;
-+struct kmem_cache *scst_sess_cachep;
-+struct kmem_cache *scst_acgd_cachep;
-+
-+unsigned int scst_setup_id;
-+
-+spinlock_t scst_init_lock;
-+wait_queue_head_t scst_init_cmd_list_waitQ;
-+struct list_head scst_init_cmd_list;
-+unsigned int scst_init_poll_cnt;
-+
-+struct kmem_cache *scst_cmd_cachep;
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+unsigned long scst_trace_flag;
-+#endif
-+
-+int scst_max_tasklet_cmd = SCST_DEF_MAX_TASKLET_CMD;
-+
-+unsigned long scst_flags;
-+
-+struct scst_cmd_threads scst_main_cmd_threads;
-+
-+struct scst_percpu_info scst_percpu_infos[NR_CPUS];
-+
-+spinlock_t scst_mcmd_lock;
-+struct list_head scst_active_mgmt_cmd_list;
-+struct list_head scst_delayed_mgmt_cmd_list;
-+wait_queue_head_t scst_mgmt_cmd_list_waitQ;
-+
-+wait_queue_head_t scst_mgmt_waitQ;
-+spinlock_t scst_mgmt_lock;
-+struct list_head scst_sess_init_list;
-+struct list_head scst_sess_shut_list;
-+
-+wait_queue_head_t scst_dev_cmd_waitQ;
-+
-+#ifdef CONFIG_LOCKDEP
-+static struct lock_class_key scst_suspend_key;
-+struct lockdep_map scst_suspend_dep_map =
-+ STATIC_LOCKDEP_MAP_INIT("scst_suspend_activity", &scst_suspend_key);
-+#endif
-+static struct mutex scst_suspend_mutex;
-+/* protected by scst_suspend_mutex */
-+static struct list_head scst_cmd_threads_list;
-+
-+int scst_threads;
-+static struct task_struct *scst_init_cmd_thread;
-+static struct task_struct *scst_mgmt_thread;
-+static struct task_struct *scst_mgmt_cmd_thread;
-+
-+static int suspend_count;
-+
-+static int scst_virt_dev_last_id; /* protected by scst_mutex */
-+
-+cpumask_t default_cpu_mask;
-+
-+static unsigned int scst_max_cmd_mem;
-+unsigned int scst_max_dev_cmd_mem;
-+
-+module_param_named(scst_threads, scst_threads, int, 0);
-+MODULE_PARM_DESC(scst_threads, "SCSI target threads count");
-+
-+module_param_named(scst_max_cmd_mem, scst_max_cmd_mem, int, S_IRUGO);
-+MODULE_PARM_DESC(scst_max_cmd_mem, "Maximum memory allowed to be consumed by "
-+ "all SCSI commands of all devices at any given time in MB");
-+
-+module_param_named(scst_max_dev_cmd_mem, scst_max_dev_cmd_mem, int, S_IRUGO);
-+MODULE_PARM_DESC(scst_max_dev_cmd_mem, "Maximum memory allowed to be consumed "
-+ "by all SCSI commands of a device at any given time in MB");
-+
-+struct scst_dev_type scst_null_devtype = {
-+ .name = "none",
-+ .threads_num = -1,
-+};
-+
-+static void __scst_resume_activity(void);
-+
-+/**
-+ * __scst_register_target_template() - register target template.
-+ * @vtt: target template
-+ * @version: SCST_INTERFACE_VERSION version string to ensure that
-+ * SCST core and the target driver use the same version of
-+ * the SCST interface
-+ *
-+ * Description:
-+ * Registers a target template and returns 0 on success or appropriate
-+ * error code otherwise.
-+ *
-+ * Target drivers supposed to behave sanely and not call register()
-+ * and unregister() randomly simultaneously.
-+ */
-+int __scst_register_target_template(struct scst_tgt_template *vtt,
-+ const char *version)
-+{
-+ int res = 0;
-+ struct scst_tgt_template *t;
-+
-+ TRACE_ENTRY();
-+
-+ INIT_LIST_HEAD(&vtt->tgt_list);
-+
-+ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
-+ PRINT_ERROR("Incorrect version of target %s", vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (!vtt->detect) {
-+ PRINT_ERROR("Target driver %s must have "
-+ "detect() method.", vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (!vtt->release) {
-+ PRINT_ERROR("Target driver %s must have "
-+ "release() method.", vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (!vtt->xmit_response) {
-+ PRINT_ERROR("Target driver %s must have "
-+ "xmit_response() method.", vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (vtt->get_initiator_port_transport_id == NULL)
-+ PRINT_WARNING("Target driver %s doesn't support Persistent "
-+ "Reservations", vtt->name);
-+
-+ if (vtt->threads_num < 0) {
-+ PRINT_ERROR("Wrong threads_num value %d for "
-+ "target \"%s\"", vtt->threads_num,
-+ vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if ((!vtt->enable_target || !vtt->is_target_enabled) &&
-+ !vtt->enabled_attr_not_needed)
-+ PRINT_WARNING("Target driver %s doesn't have enable_target() "
-+ "and/or is_target_enabled() method(s). This is unsafe "
-+ "and can lead that initiators connected on the "
-+ "initialization time can see an unexpected set of "
-+ "devices or no devices at all!", vtt->name);
-+
-+ if (((vtt->add_target != NULL) && (vtt->del_target == NULL)) ||
-+ ((vtt->add_target == NULL) && (vtt->del_target != NULL))) {
-+ PRINT_ERROR("Target driver %s must either define both "
-+ "add_target() and del_target(), or none.", vtt->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (vtt->rdy_to_xfer == NULL)
-+ vtt->rdy_to_xfer_atomic = 1;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out;
-+ list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
-+ if (strcmp(t->name, vtt->name) == 0) {
-+ PRINT_ERROR("Target driver %s already registered",
-+ vtt->name);
-+ mutex_unlock(&scst_mutex);
-+ goto out_unlock;
-+ }
-+ }
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_tgtt_sysfs_create(vtt);
-+ if (res != 0)
-+ goto out;
-+
-+ mutex_lock(&scst_mutex);
-+ mutex_lock(&scst_mutex2);
-+ list_add_tail(&vtt->scst_template_list_entry, &scst_template_list);
-+ mutex_unlock(&scst_mutex2);
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_DBG("%s", "Calling target driver's detect()");
-+ res = vtt->detect(vtt);
-+ TRACE_DBG("Target driver's detect() returned %d", res);
-+ if (res < 0) {
-+ PRINT_ERROR("%s", "The detect() routine failed");
-+ res = -EINVAL;
-+ goto out_del;
-+ }
-+
-+ PRINT_INFO("Target template %s registered successfully", vtt->name);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ scst_tgtt_sysfs_del(vtt);
-+
-+ mutex_lock(&scst_mutex);
-+
-+ mutex_lock(&scst_mutex2);
-+ list_del(&vtt->scst_template_list_entry);
-+ mutex_unlock(&scst_mutex2);
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(__scst_register_target_template);
-+
-+static int scst_check_non_gpl_target_template(struct scst_tgt_template *vtt)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (vtt->task_mgmt_affected_cmds_done || vtt->threads_num ||
-+ vtt->on_hw_pending_cmd_timeout) {
-+ PRINT_ERROR("Not allowed functionality in non-GPL version for "
-+ "target template %s", vtt->name);
-+ res = -EPERM;
-+ goto out;
-+ }
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * __scst_register_target_template_non_gpl() - register target template,
-+ * non-GPL version
-+ * @vtt: target template
-+ * @version: SCST_INTERFACE_VERSION version string to ensure that
-+ * SCST core and the target driver use the same version of
-+ * the SCST interface
-+ *
-+ * Description:
-+ * Registers a target template and returns 0 on success or appropriate
-+ * error code otherwise.
-+ *
-+ * Note: *vtt must be static!
-+ */
-+int __scst_register_target_template_non_gpl(struct scst_tgt_template *vtt,
-+ const char *version)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_check_non_gpl_target_template(vtt);
-+ if (res != 0)
-+ goto out;
-+
-+ res = __scst_register_target_template(vtt, version);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(__scst_register_target_template_non_gpl);
-+
-+/**
-+ * scst_unregister_target_template() - unregister target template
-+ *
-+ * Target drivers supposed to behave sanely and not call register()
-+ * and unregister() randomly simultaneously. Also it is supposed that
-+ * no attempts to create new targets for this vtt will be done in a race
-+ * with this function.
-+ */
-+void scst_unregister_target_template(struct scst_tgt_template *vtt)
-+{
-+ struct scst_tgt *tgt;
-+ struct scst_tgt_template *t;
-+ int found = 0;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(t, &scst_template_list, scst_template_list_entry) {
-+ if (strcmp(t->name, vtt->name) == 0) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ if (!found) {
-+ PRINT_ERROR("Target driver %s isn't registered", vtt->name);
-+ goto out_err_up;
-+ }
-+
-+ mutex_lock(&scst_mutex2);
-+ list_del(&vtt->scst_template_list_entry);
-+ mutex_unlock(&scst_mutex2);
-+
-+ /* Wait for outstanding sysfs mgmt calls completed */
-+ while (vtt->tgtt_active_sysfs_works_count > 0) {
-+ mutex_unlock(&scst_mutex);
-+ msleep(100);
-+ mutex_lock(&scst_mutex);
-+ }
-+
-+restart:
-+ list_for_each_entry(tgt, &vtt->tgt_list, tgt_list_entry) {
-+ mutex_unlock(&scst_mutex);
-+ scst_unregister_target(tgt);
-+ mutex_lock(&scst_mutex);
-+ goto restart;
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_tgtt_sysfs_del(vtt);
-+
-+ PRINT_INFO("Target template %s unregistered successfully", vtt->name);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_err_up:
-+ mutex_unlock(&scst_mutex);
-+ goto out;
-+}
-+EXPORT_SYMBOL(scst_unregister_target_template);
-+
-+/**
-+ * scst_register_target() - register target
-+ *
-+ * Registers a target for template vtt and returns new target structure on
-+ * success or NULL otherwise.
-+ */
-+struct scst_tgt *scst_register_target(struct scst_tgt_template *vtt,
-+ const char *target_name)
-+{
-+ struct scst_tgt *tgt, *t;
-+ int rc = 0;
-+
-+ TRACE_ENTRY();
-+
-+ rc = scst_alloc_tgt(vtt, &tgt);
-+ if (rc != 0)
-+ goto out;
-+
-+ if (target_name != NULL) {
-+
-+ tgt->tgt_name = kstrdup(target_name, GFP_KERNEL);
-+ if (tgt->tgt_name == NULL) {
-+ PRINT_ERROR("Allocation of tgt name %s failed",
-+ target_name);
-+ rc = -ENOMEM;
-+ goto out_free_tgt;
-+ }
-+ } else {
-+ static int tgt_num; /* protected by scst_mutex */
-+
-+ PRINT_WARNING("Usage of autogenerated SCST target names "
-+ "is deprecated and will be removed in one of the next "
-+ "versions. It is strongly recommended to update target "
-+ "driver %s to use hardware related persistent target "
-+ "names instead", vtt->name);
-+
-+ tgt->tgt_name = kasprintf(GFP_KERNEL, "%s%s%d", vtt->name,
-+ SCST_DEFAULT_TGT_NAME_SUFFIX, tgt_num);
-+ if (tgt->tgt_name == NULL) {
-+ PRINT_ERROR("Allocation of tgt name failed "
-+ "(template name %s)", vtt->name);
-+ rc = -ENOMEM;
-+ goto out_free_tgt;
-+ }
-+ tgt_num++;
-+ }
-+
-+ rc = mutex_lock_interruptible(&scst_mutex);
-+ if (rc != 0)
-+ goto out_free_tgt;
-+
-+ list_for_each_entry(t, &vtt->tgt_list, tgt_list_entry) {
-+ if (strcmp(t->tgt_name, tgt->tgt_name) == 0) {
-+ PRINT_ERROR("target %s already exists", tgt->tgt_name);
-+ rc = -EEXIST;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ rc = scst_tgt_sysfs_create(tgt);
-+ if (rc < 0)
-+ goto out_unlock;
-+
-+ tgt->default_acg = scst_alloc_add_acg(tgt, tgt->tgt_name, false);
-+ if (tgt->default_acg == NULL)
-+ goto out_sysfs_del;
-+
-+ mutex_lock(&scst_mutex2);
-+ list_add_tail(&tgt->tgt_list_entry, &vtt->tgt_list);
-+ mutex_unlock(&scst_mutex2);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ PRINT_INFO("Target %s for template %s registered successfully",
-+ tgt->tgt_name, vtt->name);
-+
-+ TRACE_DBG("tgt %p", tgt);
-+
-+out:
-+ TRACE_EXIT();
-+ return tgt;
-+
-+out_sysfs_del:
-+ mutex_unlock(&scst_mutex);
-+ scst_tgt_sysfs_del(tgt);
-+ goto out_free_tgt;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_free_tgt:
-+ /* In case of error tgt_name will be freed in scst_free_tgt() */
-+ scst_free_tgt(tgt);
-+ tgt = NULL;
-+ goto out;
-+}
-+EXPORT_SYMBOL(scst_register_target);
-+
-+static inline int test_sess_list(struct scst_tgt *tgt)
-+{
-+ int res;
-+ mutex_lock(&scst_mutex);
-+ res = list_empty(&tgt->sess_list);
-+ mutex_unlock(&scst_mutex);
-+ return res;
-+}
-+
-+/**
-+ * scst_unregister_target() - unregister target.
-+ *
-+ * It is supposed that no attempts to create new sessions for this
-+ * target will be done in a race with this function.
-+ */
-+void scst_unregister_target(struct scst_tgt *tgt)
-+{
-+ struct scst_session *sess;
-+ struct scst_tgt_template *vtt = tgt->tgtt;
-+ struct scst_acg *acg, *acg_tmp;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("%s", "Calling target driver's release()");
-+ tgt->tgtt->release(tgt);
-+ TRACE_DBG("%s", "Target driver's release() returned");
-+
-+ mutex_lock(&scst_mutex);
-+again:
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ if (sess->shut_phase == SCST_SESS_SPH_READY) {
-+ /*
-+ * Sometimes it's hard for target driver to track all
-+ * its sessions (see scst_local, eg), so let's help it.
-+ */
-+ mutex_unlock(&scst_mutex);
-+ scst_unregister_session(sess, 0, NULL);
-+ mutex_lock(&scst_mutex);
-+ goto again;
-+ }
-+ }
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_DBG("%s", "Waiting for sessions shutdown");
-+ wait_event(tgt->unreg_waitQ, test_sess_list(tgt));
-+ TRACE_DBG("%s", "wait_event() returned");
-+
-+ scst_suspend_activity(false);
-+ mutex_lock(&scst_mutex);
-+
-+ mutex_lock(&scst_mutex2);
-+ list_del(&tgt->tgt_list_entry);
-+ mutex_unlock(&scst_mutex2);
-+
-+ del_timer_sync(&tgt->retry_timer);
-+
-+ scst_tg_tgt_remove_by_tgt(tgt);
-+
-+ scst_del_free_acg(tgt->default_acg);
-+
-+ list_for_each_entry_safe(acg, acg_tmp, &tgt->tgt_acg_list,
-+ acg_list_entry) {
-+ scst_del_free_acg(acg);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+
-+ scst_tgt_sysfs_del(tgt);
-+
-+ PRINT_INFO("Target %s for template %s unregistered successfully",
-+ tgt->tgt_name, vtt->name);
-+
-+ scst_free_tgt(tgt);
-+
-+ TRACE_DBG("Unregistering tgt %p finished", tgt);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_unregister_target);
-+
-+int scst_get_cmd_counter(void)
-+{
-+ int i, res = 0;
-+ for (i = 0; i < (int)ARRAY_SIZE(scst_percpu_infos); i++)
-+ res += atomic_read(&scst_percpu_infos[i].cpu_cmd_count);
-+ return res;
-+}
-+
-+static int scst_susp_wait(bool interruptible)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (interruptible) {
-+ res = wait_event_interruptible_timeout(scst_dev_cmd_waitQ,
-+ (scst_get_cmd_counter() == 0),
-+ SCST_SUSPENDING_TIMEOUT);
-+ if (res <= 0) {
-+ __scst_resume_activity();
-+ if (res == 0)
-+ res = -EBUSY;
-+ } else
-+ res = 0;
-+ } else
-+ wait_event(scst_dev_cmd_waitQ, scst_get_cmd_counter() == 0);
-+
-+ TRACE_MGMT_DBG("wait_event() returned %d", res);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_suspend_activity() - globally suspend any activity
-+ *
-+ * Description:
-+ * Globally suspends any activity and doesn't return, until there are any
-+ * active commands (state after SCST_CMD_STATE_INIT). If "interruptible"
-+ * is true, it returns after SCST_SUSPENDING_TIMEOUT or if it was interrupted
-+ * by a signal with the corresponding error status < 0. If "interruptible"
-+ * is false, it will wait virtually forever. On success returns 0.
-+ *
-+ * New arriving commands stay in the suspended state until
-+ * scst_resume_activity() is called.
-+ */
-+int scst_suspend_activity(bool interruptible)
-+{
-+ int res = 0;
-+ bool rep = false;
-+
-+ TRACE_ENTRY();
-+
-+ rwlock_acquire_read(&scst_suspend_dep_map, 0, 0, _RET_IP_);
-+
-+ if (interruptible) {
-+ if (mutex_lock_interruptible(&scst_suspend_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+ } else
-+ mutex_lock(&scst_suspend_mutex);
-+
-+ TRACE_MGMT_DBG("suspend_count %d", suspend_count);
-+ suspend_count++;
-+ if (suspend_count > 1)
-+ goto out_up;
-+
-+ set_bit(SCST_FLAG_SUSPENDING, &scst_flags);
-+ set_bit(SCST_FLAG_SUSPENDED, &scst_flags);
-+ /*
-+ * Assignment of SCST_FLAG_SUSPENDING and SCST_FLAG_SUSPENDED must be
-+ * ordered with cpu_cmd_count in scst_get(). Otherwise lockless logic in
-+ * scst_translate_lun() and scst_mgmt_translate_lun() won't work.
-+ */
-+ smp_mb__after_set_bit();
-+
-+ /*
-+ * See comment in scst_user.c::dev_user_task_mgmt_fn() for more
-+ * information about scst_user behavior.
-+ *
-+ * ToDo: make the global suspending unneeded (switch to per-device
-+ * reference counting? That would mean to switch off from lockless
-+ * implementation of scst_translate_lun().. )
-+ */
-+
-+ if (scst_get_cmd_counter() != 0) {
-+ PRINT_INFO("Waiting for %d active commands to complete... This "
-+ "might take few minutes for disks or few hours for "
-+ "tapes, if you use long executed commands, like "
-+ "REWIND or FORMAT. In case, if you have a hung user "
-+ "space device (i.e. made using scst_user module) not "
-+ "responding to any commands, if might take virtually "
-+ "forever until the corresponding user space "
-+ "program recovers and starts responding or gets "
-+ "killed.", scst_get_cmd_counter());
-+ rep = true;
-+
-+ lock_contended(&scst_suspend_dep_map, _RET_IP_);
-+ }
-+
-+ res = scst_susp_wait(interruptible);
-+ if (res != 0)
-+ goto out_clear;
-+
-+ clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
-+ /* See comment about smp_mb() above */
-+ smp_mb__after_clear_bit();
-+
-+ TRACE_MGMT_DBG("Waiting for %d active commands finally to complete",
-+ scst_get_cmd_counter());
-+
-+ res = scst_susp_wait(interruptible);
-+ if (res != 0)
-+ goto out_clear;
-+
-+ if (rep)
-+ PRINT_INFO("%s", "All active commands completed");
-+
-+out_up:
-+ mutex_unlock(&scst_suspend_mutex);
-+
-+out:
-+ if (res == 0)
-+ lock_acquired(&scst_suspend_dep_map, _RET_IP_);
-+ else
-+ rwlock_release(&scst_suspend_dep_map, 1, _RET_IP_);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_clear:
-+ clear_bit(SCST_FLAG_SUSPENDING, &scst_flags);
-+ /* See comment about smp_mb() above */
-+ smp_mb__after_clear_bit();
-+ goto out_up;
-+}
-+EXPORT_SYMBOL_GPL(scst_suspend_activity);
-+
-+static void __scst_resume_activity(void)
-+{
-+ struct scst_cmd_threads *l;
-+
-+ TRACE_ENTRY();
-+
-+ suspend_count--;
-+ TRACE_MGMT_DBG("suspend_count %d left", suspend_count);
-+ if (suspend_count > 0)
-+ goto out;
-+
-+ clear_bit(SCST_FLAG_SUSPENDED, &scst_flags);
-+ /*
-+ * The barrier is needed to make sure all woken up threads see the
-+ * cleared flag. Not sure if it's really needed, but let's be safe.
-+ */
-+ smp_mb__after_clear_bit();
-+
-+ list_for_each_entry(l, &scst_cmd_threads_list, lists_list_entry) {
-+ wake_up_all(&l->cmd_list_waitQ);
-+ }
-+ wake_up_all(&scst_init_cmd_list_waitQ);
-+
-+ spin_lock_irq(&scst_mcmd_lock);
-+ if (!list_empty(&scst_delayed_mgmt_cmd_list)) {
-+ struct scst_mgmt_cmd *m;
-+ m = list_entry(scst_delayed_mgmt_cmd_list.next, typeof(*m),
-+ mgmt_cmd_list_entry);
-+ TRACE_MGMT_DBG("Moving delayed mgmt cmd %p to head of active "
-+ "mgmt cmd list", m);
-+ list_move(&m->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
-+ }
-+ spin_unlock_irq(&scst_mcmd_lock);
-+ wake_up_all(&scst_mgmt_cmd_list_waitQ);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_resume_activity() - globally resume all activities
-+ *
-+ * Resumes suspended by scst_suspend_activity() activities.
-+ */
-+void scst_resume_activity(void)
-+{
-+ TRACE_ENTRY();
-+
-+ rwlock_release(&scst_suspend_dep_map, 1, _RET_IP_);
-+
-+ mutex_lock(&scst_suspend_mutex);
-+ __scst_resume_activity();
-+ mutex_unlock(&scst_suspend_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_resume_activity);
-+
-+static int scst_register_device(struct scsi_device *scsidp)
-+{
-+ int res;
-+ struct scst_device *dev, *d;
-+
-+ TRACE_ENTRY();
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out;
-+
-+ res = scst_alloc_device(GFP_KERNEL, &dev);
-+ if (res != 0)
-+ goto out_unlock;
-+
-+ dev->type = scsidp->type;
-+
-+ dev->virt_name = kasprintf(GFP_KERNEL, "%d:%d:%d:%d",
-+ scsidp->host->host_no,
-+ scsidp->channel, scsidp->id, scsidp->lun);
-+ if (dev->virt_name == NULL) {
-+ PRINT_ERROR("%s", "Unable to alloc device name");
-+ res = -ENOMEM;
-+ goto out_free_dev;
-+ }
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (strcmp(d->virt_name, dev->virt_name) == 0) {
-+ PRINT_ERROR("Device %s already exists", dev->virt_name);
-+ res = -EEXIST;
-+ goto out_free_dev;
-+ }
-+ }
-+
-+ dev->scsi_dev = scsidp;
-+
-+ list_add_tail(&dev->dev_list_entry, &scst_dev_list);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_dev_sysfs_create(dev);
-+ if (res != 0)
-+ goto out_del;
-+
-+ PRINT_INFO("Attached to scsi%d, channel %d, id %d, lun %d, "
-+ "type %d", scsidp->host->host_no, scsidp->channel,
-+ scsidp->id, scsidp->lun, scsidp->type);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ list_del(&dev->dev_list_entry);
-+
-+out_free_dev:
-+ scst_free_device(dev);
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ goto out;
-+}
-+
-+static void scst_unregister_device(struct scsi_device *scsidp)
-+{
-+ struct scst_device *d, *dev = NULL;
-+ struct scst_acg_dev *acg_dev, *aa;
-+
-+ TRACE_ENTRY();
-+
-+ scst_suspend_activity(false);
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (d->scsi_dev == scsidp) {
-+ dev = d;
-+ TRACE_DBG("Device %p found", dev);
-+ break;
-+ }
-+ }
-+ if (dev == NULL) {
-+ PRINT_ERROR("SCST device for SCSI device %d:%d:%d:%d not found",
-+ scsidp->host->host_no, scsidp->channel, scsidp->id,
-+ scsidp->lun);
-+ goto out_unlock;
-+ }
-+
-+ dev->dev_unregistering = 1;
-+
-+ list_del(&dev->dev_list_entry);
-+
-+ scst_dg_dev_remove_by_dev(dev);
-+
-+ scst_assign_dev_handler(dev, &scst_null_devtype);
-+
-+ list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
-+ dev_acg_dev_list_entry) {
-+ scst_acg_del_lun(acg_dev->acg, acg_dev->lun, true);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_resume_activity();
-+
-+ scst_dev_sysfs_del(dev);
-+
-+ PRINT_INFO("Detached from scsi%d, channel %d, id %d, lun %d, type %d",
-+ scsidp->host->host_no, scsidp->channel, scsidp->id,
-+ scsidp->lun, scsidp->type);
-+
-+ scst_free_device(dev);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+ goto out;
-+}
-+
-+static int scst_dev_handler_check(struct scst_dev_type *dev_handler)
-+{
-+ int res = 0;
-+
-+ if (dev_handler->parse == NULL) {
-+ PRINT_ERROR("scst dev handler %s must have "
-+ "parse() method.", dev_handler->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (((dev_handler->add_device != NULL) &&
-+ (dev_handler->del_device == NULL)) ||
-+ ((dev_handler->add_device == NULL) &&
-+ (dev_handler->del_device != NULL))) {
-+ PRINT_ERROR("Dev handler %s must either define both "
-+ "add_device() and del_device(), or none.",
-+ dev_handler->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (dev_handler->alloc_data_buf == NULL)
-+ dev_handler->alloc_data_buf_atomic = 1;
-+
-+ if (dev_handler->dev_done == NULL)
-+ dev_handler->dev_done_atomic = 1;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_check_device_name(const char *dev_name)
-+{
-+ int res = 0;
-+
-+ if (strchr(dev_name, '/') != NULL) {
-+ PRINT_ERROR("Dev name %s contains illegal character '/'",
-+ dev_name);
-+ res = -EINVAL;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_register_virtual_device() - register a virtual device.
-+ * @dev_handler: the device's device handler
-+ * @dev_name: the new device name, NULL-terminated string. Must be uniq
-+ * among all virtual devices in the system.
-+ *
-+ * Registers a virtual device and returns ID assigned to the device on
-+ * success, or negative value otherwise
-+ */
-+int scst_register_virtual_device(struct scst_dev_type *dev_handler,
-+ const char *dev_name)
-+{
-+ int res;
-+ struct scst_device *dev, *d;
-+ bool sysfs_del = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev_handler == NULL) {
-+ PRINT_ERROR("%s: valid device handler must be supplied",
-+ __func__);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (dev_name == NULL) {
-+ PRINT_ERROR("%s: device name must be non-NULL", __func__);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_check_device_name(dev_name);
-+ if (res != 0)
-+ goto out;
-+
-+ res = scst_dev_handler_check(dev_handler);
-+ if (res != 0)
-+ goto out;
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out_resume;
-+
-+ res = scst_alloc_device(GFP_KERNEL, &dev);
-+ if (res != 0)
-+ goto out_unlock;
-+
-+ dev->type = dev_handler->type;
-+ dev->scsi_dev = NULL;
-+ dev->virt_name = kstrdup(dev_name, GFP_KERNEL);
-+ if (dev->virt_name == NULL) {
-+ PRINT_ERROR("Unable to allocate virt_name for dev %s",
-+ dev_name);
-+ res = -ENOMEM;
-+ goto out_free_dev;
-+ }
-+
-+ while (1) {
-+ dev->virt_id = scst_virt_dev_last_id++;
-+ if (dev->virt_id > 0)
-+ break;
-+ scst_virt_dev_last_id = 1;
-+ }
-+
-+ res = dev->virt_id;
-+
-+ res = scst_pr_init_dev(dev);
-+ if (res != 0)
-+ goto out_free_dev;
-+
-+ /*
-+ * We can drop scst_mutex, because we have not yet added the dev in
-+ * scst_dev_list, so it "doesn't exist" yet.
-+ */
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_dev_sysfs_create(dev);
-+ if (res != 0)
-+ goto out_lock_pr_clear_dev;
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (strcmp(d->virt_name, dev_name) == 0) {
-+ PRINT_ERROR("Device %s already exists", dev_name);
-+ res = -EEXIST;
-+ sysfs_del = true;
-+ goto out_pr_clear_dev;
-+ }
-+ }
-+
-+ res = scst_assign_dev_handler(dev, dev_handler);
-+ if (res != 0) {
-+ sysfs_del = true;
-+ goto out_pr_clear_dev;
-+ }
-+
-+ list_add_tail(&dev->dev_list_entry, &scst_dev_list);
-+
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+
-+ res = dev->virt_id;
-+
-+ PRINT_INFO("Attached to virtual device %s (id %d)", dev_name, res);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_lock_pr_clear_dev:
-+ mutex_lock(&scst_mutex);
-+
-+out_pr_clear_dev:
-+ scst_pr_clear_dev(dev);
-+
-+out_free_dev:
-+ mutex_unlock(&scst_mutex);
-+ if (sysfs_del)
-+ scst_dev_sysfs_del(dev);
-+ scst_free_device(dev);
-+ goto out_resume;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_resume:
-+ scst_resume_activity();
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(scst_register_virtual_device);
-+
-+/**
-+ * scst_unregister_virtual_device() - unegister a virtual device.
-+ * @id: the device's ID, returned by the registration function
-+ */
-+void scst_unregister_virtual_device(int id)
-+{
-+ struct scst_device *d, *dev = NULL;
-+ struct scst_acg_dev *acg_dev, *aa;
-+
-+ TRACE_ENTRY();
-+
-+ scst_suspend_activity(false);
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (d->virt_id == id) {
-+ dev = d;
-+ TRACE_DBG("Virtual device %p (id %d) found", dev, id);
-+ break;
-+ }
-+ }
-+ if (dev == NULL) {
-+ PRINT_ERROR("Virtual device (id %d) not found", id);
-+ goto out_unlock;
-+ }
-+
-+ dev->dev_unregistering = 1;
-+
-+ list_del(&dev->dev_list_entry);
-+
-+ scst_pr_clear_dev(dev);
-+
-+ scst_dg_dev_remove_by_dev(dev);
-+
-+ scst_assign_dev_handler(dev, &scst_null_devtype);
-+
-+ list_for_each_entry_safe(acg_dev, aa, &dev->dev_acg_dev_list,
-+ dev_acg_dev_list_entry) {
-+ scst_acg_del_lun(acg_dev->acg, acg_dev->lun, true);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+
-+ scst_dev_sysfs_del(dev);
-+
-+ PRINT_INFO("Detached from virtual device %s (id %d)",
-+ dev->virt_name, dev->virt_id);
-+
-+ scst_free_device(dev);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(scst_unregister_virtual_device);
-+
-+/**
-+ * __scst_register_dev_driver() - register pass-through dev handler driver
-+ * @dev_type: dev handler template
-+ * @version: SCST_INTERFACE_VERSION version string to ensure that
-+ * SCST core and the dev handler use the same version of
-+ * the SCST interface
-+ *
-+ * Description:
-+ * Registers a pass-through dev handler driver. Returns 0 on success
-+ * or appropriate error code otherwise.
-+ */
-+int __scst_register_dev_driver(struct scst_dev_type *dev_type,
-+ const char *version)
-+{
-+ int res, exist;
-+ struct scst_dev_type *dt;
-+
-+ TRACE_ENTRY();
-+
-+ res = -EINVAL;
-+ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
-+ PRINT_ERROR("Incorrect version of dev handler %s",
-+ dev_type->name);
-+ goto out;
-+ }
-+
-+ res = scst_dev_handler_check(dev_type);
-+ if (res != 0)
-+ goto out;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out;
-+
-+ exist = 0;
-+ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
-+ if (strcmp(dt->name, dev_type->name) == 0) {
-+ PRINT_ERROR("Device type handler \"%s\" already "
-+ "exists", dt->name);
-+ exist = 1;
-+ break;
-+ }
-+ }
-+ if (exist)
-+ goto out_unlock;
-+
-+ list_add_tail(&dev_type->dev_type_list_entry, &scst_dev_type_list);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_devt_sysfs_create(dev_type);
-+ if (res < 0)
-+ goto out;
-+
-+ PRINT_INFO("Device handler \"%s\" for type %d registered "
-+ "successfully", dev_type->name, dev_type->type);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(__scst_register_dev_driver);
-+
-+/**
-+ * scst_unregister_dev_driver() - unregister pass-through dev handler driver
-+ */
-+void scst_unregister_dev_driver(struct scst_dev_type *dev_type)
-+{
-+ struct scst_device *dev;
-+ struct scst_dev_type *dt;
-+ int found = 0;
-+
-+ TRACE_ENTRY();
-+
-+ scst_suspend_activity(false);
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
-+ if (strcmp(dt->name, dev_type->name) == 0) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ if (!found) {
-+ PRINT_ERROR("Dev handler \"%s\" isn't registered",
-+ dev_type->name);
-+ goto out_up;
-+ }
-+
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ if (dev->handler == dev_type) {
-+ scst_assign_dev_handler(dev, &scst_null_devtype);
-+ TRACE_DBG("Dev handler removed from device %p", dev);
-+ }
-+ }
-+
-+ list_del(&dev_type->dev_type_list_entry);
-+
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+
-+ scst_devt_sysfs_del(dev_type);
-+
-+ PRINT_INFO("Device handler \"%s\" for type %d unloaded",
-+ dev_type->name, dev_type->type);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_up:
-+ mutex_unlock(&scst_mutex);
-+ scst_resume_activity();
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(scst_unregister_dev_driver);
-+
-+/**
-+ * __scst_register_virtual_dev_driver() - register virtual dev handler driver
-+ * @dev_type: dev handler template
-+ * @version: SCST_INTERFACE_VERSION version string to ensure that
-+ * SCST core and the dev handler use the same version of
-+ * the SCST interface
-+ *
-+ * Description:
-+ * Registers a virtual dev handler driver. Returns 0 on success or
-+ * appropriate error code otherwise.
-+ */
-+int __scst_register_virtual_dev_driver(struct scst_dev_type *dev_type,
-+ const char *version)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (strcmp(version, SCST_INTERFACE_VERSION) != 0) {
-+ PRINT_ERROR("Incorrect version of virtual dev handler %s",
-+ dev_type->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_dev_handler_check(dev_type);
-+ if (res != 0)
-+ goto out;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out;
-+ list_add_tail(&dev_type->dev_type_list_entry, &scst_virtual_dev_type_list);
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_devt_sysfs_create(dev_type);
-+ if (res < 0)
-+ goto out;
-+
-+ if (dev_type->type != -1) {
-+ PRINT_INFO("Virtual device handler %s for type %d "
-+ "registered successfully", dev_type->name,
-+ dev_type->type);
-+ } else {
-+ PRINT_INFO("Virtual device handler \"%s\" registered "
-+ "successfully", dev_type->name);
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(__scst_register_virtual_dev_driver);
-+
-+/**
-+ * scst_unregister_virtual_dev_driver() - unregister virtual dev driver
-+ */
-+void scst_unregister_virtual_dev_driver(struct scst_dev_type *dev_type)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ /* Disable sysfs mgmt calls (e.g. addition of new devices) */
-+ list_del(&dev_type->dev_type_list_entry);
-+
-+ /* Wait for outstanding sysfs mgmt calls completed */
-+ while (dev_type->devt_active_sysfs_works_count > 0) {
-+ mutex_unlock(&scst_mutex);
-+ msleep(100);
-+ mutex_lock(&scst_mutex);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_devt_sysfs_del(dev_type);
-+
-+ PRINT_INFO("Device handler \"%s\" unloaded", dev_type->name);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_unregister_virtual_dev_driver);
-+
-+/* scst_mutex supposed to be held */
-+int scst_add_threads(struct scst_cmd_threads *cmd_threads,
-+ struct scst_device *dev, struct scst_tgt_dev *tgt_dev, int num)
-+{
-+ int res = 0, i;
-+ struct scst_cmd_thread_t *thr;
-+ int n = 0, tgt_dev_num = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (num == 0) {
-+ res = 0;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(thr, &cmd_threads->threads_list, thread_list_entry) {
-+ n++;
-+ }
-+
-+ TRACE_DBG("cmd_threads %p, dev %s, tgt_dev %p, num %d, n %d",
-+ cmd_threads, dev ? dev->virt_name : NULL, tgt_dev, num, n);
-+
-+ if (tgt_dev != NULL) {
-+ struct scst_tgt_dev *t;
-+ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if (t == tgt_dev)
-+ break;
-+ tgt_dev_num++;
-+ }
-+ }
-+
-+ for (i = 0; i < num; i++) {
-+ thr = kmalloc(sizeof(*thr), GFP_KERNEL);
-+ if (!thr) {
-+ res = -ENOMEM;
-+ PRINT_ERROR("Fail to allocate thr %d", res);
-+ goto out_wait;
-+ }
-+
-+ if (dev != NULL) {
-+ thr->cmd_thread = kthread_create(scst_cmd_thread,
-+ cmd_threads, "%.13s%d", dev->virt_name, n++);
-+ } else if (tgt_dev != NULL) {
-+ thr->cmd_thread = kthread_create(scst_cmd_thread,
-+ cmd_threads, "%.10s%d_%d",
-+ tgt_dev->dev->virt_name, tgt_dev_num, n++);
-+ } else
-+ thr->cmd_thread = kthread_create(scst_cmd_thread,
-+ cmd_threads, "scstd%d", n++);
-+
-+ if (IS_ERR(thr->cmd_thread)) {
-+ res = PTR_ERR(thr->cmd_thread);
-+ PRINT_ERROR("kthread_create() failed: %d", res);
-+ kfree(thr);
-+ goto out_wait;
-+ }
-+
-+ if (tgt_dev != NULL) {
-+ int rc;
-+ /*
-+ * sess->acg can be NULL here, if called from
-+ * scst_check_reassign_sess()!
-+ */
-+ rc = set_cpus_allowed_ptr(thr->cmd_thread,
-+ &tgt_dev->acg_dev->acg->acg_cpu_mask);
-+ if (rc != 0)
-+ PRINT_ERROR("Setting CPU affinity failed: "
-+ "%d", rc);
-+ }
-+
-+ list_add(&thr->thread_list_entry, &cmd_threads->threads_list);
-+ cmd_threads->nr_threads++;
-+
-+ TRACE_DBG("Added thr %p to threads list (nr_threads %d, n %d)",
-+ thr, cmd_threads->nr_threads, n);
-+
-+ wake_up_process(thr->cmd_thread);
-+ }
-+
-+out_wait:
-+ if (i > 0 && cmd_threads != &scst_main_cmd_threads) {
-+ /*
-+ * Wait for io_context gets initialized to avoid possible races
-+ * for it from the sharing it tgt_devs.
-+ */
-+ while (!*(volatile bool*)&cmd_threads->io_context_ready) {
-+ TRACE_DBG("Waiting for io_context for cmd_threads %p "
-+ "initialized", cmd_threads);
-+ msleep(50);
-+ }
-+ smp_rmb();
-+ }
-+
-+ if (res != 0)
-+ scst_del_threads(cmd_threads, i);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* scst_mutex supposed to be held */
-+void scst_del_threads(struct scst_cmd_threads *cmd_threads, int num)
-+{
-+ struct scst_cmd_thread_t *ct, *tmp;
-+
-+ TRACE_ENTRY();
-+
-+ if (num == 0)
-+ goto out;
-+
-+ list_for_each_entry_safe_reverse(ct, tmp, &cmd_threads->threads_list,
-+ thread_list_entry) {
-+ int rc;
-+ struct scst_device *dev;
-+
-+ rc = kthread_stop(ct->cmd_thread);
-+ if (rc != 0 && rc != -EINTR)
-+ TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
-+
-+ list_del(&ct->thread_list_entry);
-+
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ scst_del_thr_data(tgt_dev, ct->cmd_thread);
-+ }
-+ }
-+
-+ kfree(ct);
-+
-+ cmd_threads->nr_threads--;
-+
-+ --num;
-+ if (num == 0)
-+ break;
-+ }
-+
-+ EXTRACHECKS_BUG_ON((cmd_threads->nr_threads == 0) &&
-+ (cmd_threads->io_context != NULL));
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+void scst_stop_dev_threads(struct scst_device *dev)
-+{
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ scst_tgt_dev_stop_threads(tgt_dev);
-+ }
-+
-+ if ((dev->threads_num > 0) &&
-+ (dev->threads_pool_type == SCST_THREADS_POOL_SHARED))
-+ scst_del_threads(&dev->dev_cmd_threads, -1);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+int scst_create_dev_threads(struct scst_device *dev)
-+{
-+ int res = 0;
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ res = scst_tgt_dev_setup_threads(tgt_dev);
-+ if (res != 0)
-+ goto out_err;
-+ }
-+
-+ if ((dev->threads_num > 0) &&
-+ (dev->threads_pool_type == SCST_THREADS_POOL_SHARED)) {
-+ res = scst_add_threads(&dev->dev_cmd_threads, dev, NULL,
-+ dev->threads_num);
-+ if (res != 0)
-+ goto out_err;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err:
-+ scst_stop_dev_threads(dev);
-+ goto out;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+int scst_assign_dev_handler(struct scst_device *dev,
-+ struct scst_dev_type *handler)
-+{
-+ int res = 0;
-+ struct scst_tgt_dev *tgt_dev;
-+ LIST_HEAD(attached_tgt_devs);
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(handler == NULL);
-+
-+ if (dev->handler == handler)
-+ goto out;
-+
-+ if (dev->handler == NULL)
-+ goto assign;
-+
-+ if (dev->handler->detach_tgt) {
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ TRACE_DBG("Calling dev handler's detach_tgt(%p)",
-+ tgt_dev);
-+ dev->handler->detach_tgt(tgt_dev);
-+ TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
-+ }
-+ }
-+
-+ /*
-+ * devt_dev sysfs must be created AFTER attach() and deleted BEFORE
-+ * detach() to avoid calls from sysfs for not yet ready or already dead
-+ * objects.
-+ */
-+ scst_devt_dev_sysfs_del(dev);
-+
-+ if (dev->handler->detach) {
-+ TRACE_DBG("%s", "Calling dev handler's detach()");
-+ dev->handler->detach(dev);
-+ TRACE_DBG("%s", "Old handler's detach() returned");
-+ }
-+
-+ scst_stop_dev_threads(dev);
-+
-+assign:
-+ dev->handler = handler;
-+
-+ if (handler == NULL)
-+ goto out;
-+
-+ dev->threads_num = handler->threads_num;
-+ dev->threads_pool_type = handler->threads_pool_type;
-+
-+ if (handler->attach) {
-+ TRACE_DBG("Calling new dev handler's attach(%p)", dev);
-+ res = handler->attach(dev);
-+ TRACE_DBG("New dev handler's attach() returned %d", res);
-+ if (res != 0) {
-+ PRINT_ERROR("New device handler's %s attach() "
-+ "failed: %d", handler->name, res);
-+ goto out;
-+ }
-+ }
-+
-+ res = scst_devt_dev_sysfs_create(dev);
-+ if (res != 0)
-+ goto out_detach;
-+
-+ if (handler->attach_tgt) {
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ TRACE_DBG("Calling dev handler's attach_tgt(%p)",
-+ tgt_dev);
-+ res = handler->attach_tgt(tgt_dev);
-+ TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
-+ if (res != 0) {
-+ PRINT_ERROR("Device handler's %s attach_tgt() "
-+ "failed: %d", handler->name, res);
-+ goto out_err_remove_sysfs;
-+ }
-+ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
-+ &attached_tgt_devs);
-+ }
-+ }
-+
-+ res = scst_create_dev_threads(dev);
-+ if (res != 0)
-+ goto out_err_detach_tgt;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err_detach_tgt:
-+ if (handler && handler->detach_tgt) {
-+ list_for_each_entry(tgt_dev, &attached_tgt_devs,
-+ extra_tgt_dev_list_entry) {
-+ TRACE_DBG("Calling handler's detach_tgt(%p)",
-+ tgt_dev);
-+ handler->detach_tgt(tgt_dev);
-+ TRACE_DBG("%s", "Handler's detach_tgt() returned");
-+ }
-+ }
-+
-+out_err_remove_sysfs:
-+ scst_devt_dev_sysfs_del(dev);
-+
-+out_detach:
-+ if (handler && handler->detach) {
-+ TRACE_DBG("%s", "Calling handler's detach()");
-+ handler->detach(dev);
-+ TRACE_DBG("%s", "Handler's detach() returned");
-+ }
-+
-+ dev->handler = &scst_null_devtype;
-+ dev->threads_num = scst_null_devtype.threads_num;
-+ dev->threads_pool_type = scst_null_devtype.threads_pool_type;
-+ goto out;
-+}
-+
-+/**
-+ * scst_init_threads() - initialize SCST processing threads pool
-+ *
-+ * Initializes scst_cmd_threads structure
-+ */
-+void scst_init_threads(struct scst_cmd_threads *cmd_threads)
-+{
-+ TRACE_ENTRY();
-+
-+ spin_lock_init(&cmd_threads->cmd_list_lock);
-+ INIT_LIST_HEAD(&cmd_threads->active_cmd_list);
-+ init_waitqueue_head(&cmd_threads->cmd_list_waitQ);
-+ INIT_LIST_HEAD(&cmd_threads->threads_list);
-+ mutex_init(&cmd_threads->io_context_mutex);
-+
-+ mutex_lock(&scst_suspend_mutex);
-+ list_add_tail(&cmd_threads->lists_list_entry,
-+ &scst_cmd_threads_list);
-+ mutex_unlock(&scst_suspend_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_init_threads);
-+
-+/**
-+ * scst_deinit_threads() - deinitialize SCST processing threads pool
-+ *
-+ * Deinitializes scst_cmd_threads structure
-+ */
-+void scst_deinit_threads(struct scst_cmd_threads *cmd_threads)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_suspend_mutex);
-+ list_del(&cmd_threads->lists_list_entry);
-+ mutex_unlock(&scst_suspend_mutex);
-+
-+ BUG_ON(cmd_threads->io_context);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_deinit_threads);
-+
-+static void scst_stop_global_threads(void)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ scst_del_threads(&scst_main_cmd_threads, -1);
-+
-+ if (scst_mgmt_cmd_thread)
-+ kthread_stop(scst_mgmt_cmd_thread);
-+ if (scst_mgmt_thread)
-+ kthread_stop(scst_mgmt_thread);
-+ if (scst_init_cmd_thread)
-+ kthread_stop(scst_init_cmd_thread);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* It does NOT stop ran threads on error! */
-+static int scst_start_global_threads(int num)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ res = scst_add_threads(&scst_main_cmd_threads, NULL, NULL, num);
-+ if (res < 0)
-+ goto out_unlock;
-+
-+ scst_init_cmd_thread = kthread_run(scst_init_thread,
-+ NULL, "scst_initd");
-+ if (IS_ERR(scst_init_cmd_thread)) {
-+ res = PTR_ERR(scst_init_cmd_thread);
-+ PRINT_ERROR("kthread_create() for init cmd failed: %d", res);
-+ scst_init_cmd_thread = NULL;
-+ goto out_unlock;
-+ }
-+
-+ scst_mgmt_cmd_thread = kthread_run(scst_tm_thread,
-+ NULL, "scsi_tm");
-+ if (IS_ERR(scst_mgmt_cmd_thread)) {
-+ res = PTR_ERR(scst_mgmt_cmd_thread);
-+ PRINT_ERROR("kthread_create() for TM failed: %d", res);
-+ scst_mgmt_cmd_thread = NULL;
-+ goto out_unlock;
-+ }
-+
-+ scst_mgmt_thread = kthread_run(scst_global_mgmt_thread,
-+ NULL, "scst_mgmtd");
-+ if (IS_ERR(scst_mgmt_thread)) {
-+ res = PTR_ERR(scst_mgmt_thread);
-+ PRINT_ERROR("kthread_create() for mgmt failed: %d", res);
-+ scst_mgmt_thread = NULL;
-+ goto out_unlock;
-+ }
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_get_setup_id() - return SCST setup ID
-+ *
-+ * Returns SCST setup ID. This ID can be used for multiple
-+ * setups with the same configuration.
-+ */
-+unsigned int scst_get_setup_id(void)
-+{
-+ return scst_setup_id;
-+}
-+EXPORT_SYMBOL_GPL(scst_get_setup_id);
-+
-+static int scst_add(struct device *cdev, struct class_interface *intf)
-+{
-+ struct scsi_device *scsidp;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ scsidp = to_scsi_device(cdev->parent);
-+
-+ if ((scsidp->host->hostt->name == NULL) ||
-+ (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0))
-+ res = scst_register_device(scsidp);
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+static void scst_remove(struct device *cdev, struct class_interface *intf)
-+{
-+ struct scsi_device *scsidp;
-+
-+ TRACE_ENTRY();
-+
-+ scsidp = to_scsi_device(cdev->parent);
-+
-+ if ((scsidp->host->hostt->name == NULL) ||
-+ (strcmp(scsidp->host->hostt->name, SCST_LOCAL_NAME) != 0))
-+ scst_unregister_device(scsidp);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct class_interface scst_interface = {
-+ .add_dev = scst_add,
-+ .remove_dev = scst_remove,
-+};
-+
-+static void __init scst_print_config(void)
-+{
-+ char buf[128];
-+ int i, j;
-+
-+ i = snprintf(buf, sizeof(buf), "Enabled features: ");
-+ j = i;
-+
-+#ifdef CONFIG_SCST_STRICT_SERIALIZING
-+ i += snprintf(&buf[i], sizeof(buf) - i, "STRICT_SERIALIZING");
-+#endif
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sEXTRACHECKS",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_TRACING
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sTRACING",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_TM",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_RETRY
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_RETRY",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_OOM
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_OOM",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_SN
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sDEBUG_SN",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sUSE_EXPECTED_VALUES",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ i += snprintf(&buf[i], sizeof(buf) - i,
-+ "%sTEST_IO_IN_SIRQ",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+#ifdef CONFIG_SCST_STRICT_SECURITY
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sSTRICT_SECURITY",
-+ (j == i) ? "" : ", ");
-+#endif
-+
-+ if (j != i)
-+ PRINT_INFO("%s", buf);
-+}
-+
-+static int __init init_scst(void)
-+{
-+ int res, i;
-+ int scst_num_cpus;
-+
-+ TRACE_ENTRY();
-+
-+ {
-+ struct scsi_sense_hdr *shdr;
-+ struct scst_order_data *o;
-+ struct scst_cmd *c;
-+ BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < sizeof(*shdr));
-+ BUILD_BUG_ON(sizeof(o->curr_sn) != sizeof(o->expected_sn));
-+ BUILD_BUG_ON(sizeof(c->sn) != sizeof(o->expected_sn));
-+ }
-+
-+ mutex_init(&scst_mutex);
-+ mutex_init(&scst_mutex2);
-+ INIT_LIST_HEAD(&scst_template_list);
-+ INIT_LIST_HEAD(&scst_dev_list);
-+ INIT_LIST_HEAD(&scst_dev_type_list);
-+ INIT_LIST_HEAD(&scst_virtual_dev_type_list);
-+ spin_lock_init(&scst_main_lock);
-+ spin_lock_init(&scst_init_lock);
-+ init_waitqueue_head(&scst_init_cmd_list_waitQ);
-+ INIT_LIST_HEAD(&scst_init_cmd_list);
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ scst_trace_flag = SCST_DEFAULT_LOG_FLAGS;
-+#endif
-+ spin_lock_init(&scst_mcmd_lock);
-+ INIT_LIST_HEAD(&scst_active_mgmt_cmd_list);
-+ INIT_LIST_HEAD(&scst_delayed_mgmt_cmd_list);
-+ init_waitqueue_head(&scst_mgmt_cmd_list_waitQ);
-+ init_waitqueue_head(&scst_mgmt_waitQ);
-+ spin_lock_init(&scst_mgmt_lock);
-+ INIT_LIST_HEAD(&scst_sess_init_list);
-+ INIT_LIST_HEAD(&scst_sess_shut_list);
-+ init_waitqueue_head(&scst_dev_cmd_waitQ);
-+ mutex_init(&scst_suspend_mutex);
-+ INIT_LIST_HEAD(&scst_cmd_threads_list);
-+ cpus_setall(default_cpu_mask);
-+
-+ scst_init_threads(&scst_main_cmd_threads);
-+
-+ res = scst_lib_init();
-+ if (res != 0)
-+ goto out_deinit_threads;
-+
-+ scst_num_cpus = num_online_cpus();
-+
-+ /* ToDo: register_cpu_notifier() */
-+
-+ if (scst_threads == 0)
-+ scst_threads = scst_num_cpus;
-+
-+ if (scst_threads < 1) {
-+ PRINT_ERROR("%s", "scst_threads can not be less than 1");
-+ scst_threads = scst_num_cpus;
-+ }
-+
-+#define INIT_CACHEP(p, s, o) do { \
-+ p = KMEM_CACHE(s, SCST_SLAB_FLAGS); \
-+ TRACE_MEM("Slab create: %s at %p size %zd", #s, p, \
-+ sizeof(struct s)); \
-+ if (p == NULL) { \
-+ res = -ENOMEM; \
-+ goto o; \
-+ } \
-+ } while (0)
-+
-+ INIT_CACHEP(scst_mgmt_cachep, scst_mgmt_cmd, out_lib_exit);
-+ INIT_CACHEP(scst_mgmt_stub_cachep, scst_mgmt_cmd_stub,
-+ out_destroy_mgmt_cache);
-+ INIT_CACHEP(scst_ua_cachep, scst_tgt_dev_UA,
-+ out_destroy_mgmt_stub_cache);
-+ {
-+ struct scst_sense { uint8_t s[SCST_SENSE_BUFFERSIZE]; };
-+ INIT_CACHEP(scst_sense_cachep, scst_sense,
-+ out_destroy_ua_cache);
-+ }
-+ INIT_CACHEP(scst_aen_cachep, scst_aen, out_destroy_sense_cache);
-+ INIT_CACHEP(scst_cmd_cachep, scst_cmd, out_destroy_aen_cache);
-+ INIT_CACHEP(scst_sess_cachep, scst_session, out_destroy_cmd_cache);
-+ INIT_CACHEP(scst_tgtd_cachep, scst_tgt_dev, out_destroy_sess_cache);
-+ INIT_CACHEP(scst_acgd_cachep, scst_acg_dev, out_destroy_tgt_cache);
-+
-+ scst_mgmt_mempool = mempool_create(64, mempool_alloc_slab,
-+ mempool_free_slab, scst_mgmt_cachep);
-+ if (scst_mgmt_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_destroy_acg_cache;
-+ }
-+
-+ /*
-+ * All mgmt stubs, UAs and sense buffers are bursty and loosing them
-+ * may have fatal consequences, so let's have big pools for them.
-+ */
-+
-+ scst_mgmt_stub_mempool = mempool_create(1024, mempool_alloc_slab,
-+ mempool_free_slab, scst_mgmt_stub_cachep);
-+ if (scst_mgmt_stub_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_destroy_mgmt_mempool;
-+ }
-+
-+ scst_ua_mempool = mempool_create(512, mempool_alloc_slab,
-+ mempool_free_slab, scst_ua_cachep);
-+ if (scst_ua_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_destroy_mgmt_stub_mempool;
-+ }
-+
-+ scst_sense_mempool = mempool_create(1024, mempool_alloc_slab,
-+ mempool_free_slab, scst_sense_cachep);
-+ if (scst_sense_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_destroy_ua_mempool;
-+ }
-+
-+ scst_aen_mempool = mempool_create(100, mempool_alloc_slab,
-+ mempool_free_slab, scst_aen_cachep);
-+ if (scst_aen_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_destroy_sense_mempool;
-+ }
-+
-+ res = scst_sysfs_init();
-+ if (res != 0)
-+ goto out_destroy_aen_mempool;
-+
-+ scst_tg_init();
-+
-+ if (scst_max_cmd_mem == 0) {
-+ struct sysinfo si;
-+ si_meminfo(&si);
-+#if BITS_PER_LONG == 32
-+ scst_max_cmd_mem = min(
-+ (((uint64_t)(si.totalram - si.totalhigh) << PAGE_SHIFT)
-+ >> 20) >> 2, (uint64_t)1 << 30);
-+#else
-+ scst_max_cmd_mem = (((si.totalram - si.totalhigh) << PAGE_SHIFT)
-+ >> 20) >> 2;
-+#endif
-+ }
-+
-+ if (scst_max_dev_cmd_mem != 0) {
-+ if (scst_max_dev_cmd_mem > scst_max_cmd_mem) {
-+ PRINT_ERROR("scst_max_dev_cmd_mem (%d) > "
-+ "scst_max_cmd_mem (%d)",
-+ scst_max_dev_cmd_mem,
-+ scst_max_cmd_mem);
-+ scst_max_dev_cmd_mem = scst_max_cmd_mem;
-+ }
-+ } else
-+ scst_max_dev_cmd_mem = scst_max_cmd_mem * 2 / 5;
-+
-+ res = scst_sgv_pools_init(
-+ ((uint64_t)scst_max_cmd_mem << 10) >> (PAGE_SHIFT - 10), 0);
-+ if (res != 0)
-+ goto out_sysfs_cleanup;
-+
-+ res = scsi_register_interface(&scst_interface);
-+ if (res != 0)
-+ goto out_destroy_sgv_pool;
-+
-+ for (i = 0; i < (int)ARRAY_SIZE(scst_percpu_infos); i++) {
-+ atomic_set(&scst_percpu_infos[i].cpu_cmd_count, 0);
-+ spin_lock_init(&scst_percpu_infos[i].tasklet_lock);
-+ INIT_LIST_HEAD(&scst_percpu_infos[i].tasklet_cmd_list);
-+ tasklet_init(&scst_percpu_infos[i].tasklet,
-+ (void *)scst_cmd_tasklet,
-+ (unsigned long)&scst_percpu_infos[i]);
-+ }
-+
-+ TRACE_DBG("%d CPUs found, starting %d threads", scst_num_cpus,
-+ scst_threads);
-+
-+ res = scst_start_global_threads(scst_threads);
-+ if (res < 0)
-+ goto out_thread_free;
-+
-+ PRINT_INFO("SCST version %s loaded successfully (max mem for "
-+ "commands %dMB, per device %dMB)", SCST_VERSION_STRING,
-+ scst_max_cmd_mem, scst_max_dev_cmd_mem);
-+
-+ scst_print_config();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_thread_free:
-+ scst_stop_global_threads();
-+
-+ scsi_unregister_interface(&scst_interface);
-+
-+out_destroy_sgv_pool:
-+ scst_sgv_pools_deinit();
-+ scst_tg_cleanup();
-+
-+out_sysfs_cleanup:
-+ scst_sysfs_cleanup();
-+
-+out_destroy_aen_mempool:
-+ mempool_destroy(scst_aen_mempool);
-+
-+out_destroy_sense_mempool:
-+ mempool_destroy(scst_sense_mempool);
-+
-+out_destroy_ua_mempool:
-+ mempool_destroy(scst_ua_mempool);
-+
-+out_destroy_mgmt_stub_mempool:
-+ mempool_destroy(scst_mgmt_stub_mempool);
-+
-+out_destroy_mgmt_mempool:
-+ mempool_destroy(scst_mgmt_mempool);
-+
-+out_destroy_acg_cache:
-+ kmem_cache_destroy(scst_acgd_cachep);
-+
-+out_destroy_tgt_cache:
-+ kmem_cache_destroy(scst_tgtd_cachep);
-+
-+out_destroy_sess_cache:
-+ kmem_cache_destroy(scst_sess_cachep);
-+
-+out_destroy_cmd_cache:
-+ kmem_cache_destroy(scst_cmd_cachep);
-+
-+out_destroy_aen_cache:
-+ kmem_cache_destroy(scst_aen_cachep);
-+
-+out_destroy_sense_cache:
-+ kmem_cache_destroy(scst_sense_cachep);
-+
-+out_destroy_ua_cache:
-+ kmem_cache_destroy(scst_ua_cachep);
-+
-+out_destroy_mgmt_stub_cache:
-+ kmem_cache_destroy(scst_mgmt_stub_cachep);
-+
-+out_destroy_mgmt_cache:
-+ kmem_cache_destroy(scst_mgmt_cachep);
-+
-+out_lib_exit:
-+ scst_lib_exit();
-+
-+out_deinit_threads:
-+ scst_deinit_threads(&scst_main_cmd_threads);
-+ goto out;
-+}
-+
-+static void __exit exit_scst(void)
-+{
-+ TRACE_ENTRY();
-+
-+ /* ToDo: unregister_cpu_notifier() */
-+
-+ scst_stop_global_threads();
-+
-+ scst_deinit_threads(&scst_main_cmd_threads);
-+
-+ scsi_unregister_interface(&scst_interface);
-+
-+ scst_sgv_pools_deinit();
-+
-+ scst_tg_cleanup();
-+
-+ scst_sysfs_cleanup();
-+
-+#define DEINIT_CACHEP(p) do { \
-+ kmem_cache_destroy(p); \
-+ p = NULL; \
-+ } while (0)
-+
-+ mempool_destroy(scst_mgmt_mempool);
-+ mempool_destroy(scst_mgmt_stub_mempool);
-+ mempool_destroy(scst_ua_mempool);
-+ mempool_destroy(scst_sense_mempool);
-+ mempool_destroy(scst_aen_mempool);
-+
-+ DEINIT_CACHEP(scst_mgmt_cachep);
-+ DEINIT_CACHEP(scst_mgmt_stub_cachep);
-+ DEINIT_CACHEP(scst_ua_cachep);
-+ DEINIT_CACHEP(scst_sense_cachep);
-+ DEINIT_CACHEP(scst_aen_cachep);
-+ DEINIT_CACHEP(scst_cmd_cachep);
-+ DEINIT_CACHEP(scst_sess_cachep);
-+ DEINIT_CACHEP(scst_tgtd_cachep);
-+ DEINIT_CACHEP(scst_acgd_cachep);
-+
-+ scst_lib_exit();
-+
-+ PRINT_INFO("%s", "SCST unloaded");
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(init_scst);
-+module_exit(exit_scst);
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("SCSI target core");
-+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-3.2/drivers/scst/scst_module.c linux-3.2/drivers/scst/scst_module.c
---- orig/linux-3.2/drivers/scst/scst_module.c
-+++ linux-3.2/drivers/scst/scst_module.c
-@@ -0,0 +1,70 @@
-+/*
-+ * scst_module.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * Support for loading target modules. The usage is similar to scsi_module.c
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+
-+#include <scst.h>
-+
-+static int __init init_this_scst_driver(void)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_register_target_template(&driver_target_template);
-+ TRACE_DBG("scst_register_target_template() returned %d", res);
-+ if (res < 0)
-+ goto out;
-+
-+#ifdef SCST_REGISTER_INITIATOR_DRIVER
-+ driver_template.module = THIS_MODULE;
-+ scsi_register_module(MODULE_SCSI_HA, &driver_template);
-+ TRACE_DBG("driver_template.present=%d",
-+ driver_template.present);
-+ if (driver_template.present == 0) {
-+ res = -ENODEV;
-+ MOD_DEC_USE_COUNT;
-+ goto out;
-+ }
-+#endif
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void __exit exit_this_scst_driver(void)
-+{
-+ TRACE_ENTRY();
-+
-+#ifdef SCST_REGISTER_INITIATOR_DRIVER
-+ scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
-+#endif
-+
-+ scst_unregister_target_template(&driver_target_template);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(init_this_scst_driver);
-+module_exit(exit_this_scst_driver);
-diff -uprN orig/linux-3.2/drivers/scst/scst_priv.h linux-3.2/drivers/scst/scst_priv.h
---- orig/linux-3.2/drivers/scst/scst_priv.h
-+++ linux-3.2/drivers/scst/scst_priv.h
-@@ -0,0 +1,646 @@
-+/*
-+ * scst_priv.h
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef __SCST_PRIV_H
-+#define __SCST_PRIV_H
-+
-+#include <linux/types.h>
-+#include <linux/slab.h>
-+#include <linux/export.h>
-+#include <scsi/scsi.h>
-+#include <scsi/scsi_cmnd.h>
-+#include <scsi/scsi_driver.h>
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_host.h>
-+
-+#define LOG_PREFIX "scst"
-+
-+#include <scst/scst_debug.h>
-+
-+#define TRACE_RTRY 0x80000000
-+#define TRACE_SCSI_SERIALIZING 0x40000000
-+/** top being the edge away from the interrupt */
-+#define TRACE_SND_TOP 0x20000000
-+#define TRACE_RCV_TOP 0x01000000
-+/** bottom being the edge toward the interrupt */
-+#define TRACE_SND_BOT 0x08000000
-+#define TRACE_RCV_BOT 0x04000000
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+#define trace_flag scst_trace_flag
-+extern unsigned long scst_trace_flag;
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+
-+#define SCST_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR | TRACE_PID | \
-+ TRACE_LINE | TRACE_FUNCTION | TRACE_SPECIAL | TRACE_MGMT | \
-+ TRACE_MGMT_DEBUG | TRACE_RTRY)
-+
-+#define TRACE_RETRY(args...) TRACE_DBG_FLAG(TRACE_RTRY, args)
-+#define TRACE_SN(args...) TRACE_DBG_FLAG(TRACE_SCSI_SERIALIZING, args)
-+#define TRACE_SEND_TOP(args...) TRACE_DBG_FLAG(TRACE_SND_TOP, args)
-+#define TRACE_RECV_TOP(args...) TRACE_DBG_FLAG(TRACE_RCV_TOP, args)
-+#define TRACE_SEND_BOT(args...) TRACE_DBG_FLAG(TRACE_SND_BOT, args)
-+#define TRACE_RECV_BOT(args...) TRACE_DBG_FLAG(TRACE_RCV_BOT, args)
-+
-+#else /* CONFIG_SCST_DEBUG */
-+
-+# ifdef CONFIG_SCST_TRACING
-+#define SCST_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
-+ TRACE_SPECIAL)
-+# else
-+#define SCST_DEFAULT_LOG_FLAGS 0
-+# endif
-+
-+#define TRACE_RETRY(args...)
-+#define TRACE_SN(args...)
-+#define TRACE_SEND_TOP(args...)
-+#define TRACE_RECV_TOP(args...)
-+#define TRACE_SEND_BOT(args...)
-+#define TRACE_RECV_BOT(args...)
-+
-+#endif
-+
-+/**
-+ ** Bits for scst_flags
-+ **/
-+
-+/*
-+ * Set if new commands initialization is being suspended for a while.
-+ * Used to let TM commands execute while preparing the suspend, since
-+ * RESET or ABORT could be necessary to free SCSI commands.
-+ */
-+#define SCST_FLAG_SUSPENDING 0
-+
-+/* Set if new commands initialization is suspended for a while */
-+#define SCST_FLAG_SUSPENDED 1
-+
-+/**
-+ ** Return codes for cmd state process functions. Codes are the same as
-+ ** for SCST_EXEC_* to avoid translation to them and, hence, have better code.
-+ **/
-+#define SCST_CMD_STATE_RES_CONT_NEXT SCST_EXEC_COMPLETED
-+#define SCST_CMD_STATE_RES_CONT_SAME SCST_EXEC_NOT_COMPLETED
-+#define SCST_CMD_STATE_RES_NEED_THREAD (SCST_EXEC_NOT_COMPLETED+1)
-+
-+/**
-+ ** Maximum count of uncompleted commands that an initiator could
-+ ** queue on any device. Then it will start getting TASK QUEUE FULL status.
-+ **/
-+#define SCST_MAX_TGT_DEV_COMMANDS 48
-+
-+/**
-+ ** Maximum count of uncompleted commands that could be queued on any device.
-+ ** Then initiators sending commands to this device will start getting
-+ ** TASK QUEUE FULL status.
-+ **/
-+#define SCST_MAX_DEV_COMMANDS 256
-+
-+#define SCST_TGT_RETRY_TIMEOUT (3/2*HZ)
-+
-+/* Activities suspending timeout */
-+#define SCST_SUSPENDING_TIMEOUT (90 * HZ)
-+
-+extern struct mutex scst_mutex2;
-+
-+extern int scst_threads;
-+
-+extern unsigned int scst_max_dev_cmd_mem;
-+
-+extern mempool_t *scst_mgmt_mempool;
-+extern mempool_t *scst_mgmt_stub_mempool;
-+extern mempool_t *scst_ua_mempool;
-+extern mempool_t *scst_sense_mempool;
-+extern mempool_t *scst_aen_mempool;
-+
-+extern struct kmem_cache *scst_cmd_cachep;
-+extern struct kmem_cache *scst_sess_cachep;
-+extern struct kmem_cache *scst_tgtd_cachep;
-+extern struct kmem_cache *scst_acgd_cachep;
-+
-+extern spinlock_t scst_main_lock;
-+
-+extern unsigned long scst_flags;
-+extern struct list_head scst_template_list;
-+extern struct list_head scst_dev_list;
-+extern struct list_head scst_dev_type_list;
-+extern struct list_head scst_virtual_dev_type_list;
-+extern wait_queue_head_t scst_dev_cmd_waitQ;
-+
-+extern unsigned int scst_setup_id;
-+
-+#define SCST_DEF_MAX_TASKLET_CMD 10
-+extern int scst_max_tasklet_cmd;
-+
-+extern spinlock_t scst_init_lock;
-+extern struct list_head scst_init_cmd_list;
-+extern wait_queue_head_t scst_init_cmd_list_waitQ;
-+extern unsigned int scst_init_poll_cnt;
-+
-+extern struct scst_cmd_threads scst_main_cmd_threads;
-+
-+extern spinlock_t scst_mcmd_lock;
-+/* The following lists protected by scst_mcmd_lock */
-+extern struct list_head scst_active_mgmt_cmd_list;
-+extern struct list_head scst_delayed_mgmt_cmd_list;
-+extern wait_queue_head_t scst_mgmt_cmd_list_waitQ;
-+
-+struct scst_percpu_info {
-+ atomic_t cpu_cmd_count;
-+ spinlock_t tasklet_lock;
-+ struct list_head tasklet_cmd_list;
-+ struct tasklet_struct tasklet;
-+} ____cacheline_aligned_in_smp;
-+extern struct scst_percpu_info scst_percpu_infos[NR_CPUS];
-+
-+extern wait_queue_head_t scst_mgmt_waitQ;
-+extern spinlock_t scst_mgmt_lock;
-+extern struct list_head scst_sess_init_list;
-+extern struct list_head scst_sess_shut_list;
-+
-+extern cpumask_t default_cpu_mask;
-+
-+struct scst_cmd_thread_t {
-+ struct task_struct *cmd_thread;
-+ struct list_head thread_list_entry;
-+};
-+
-+static inline bool scst_set_io_context(struct scst_cmd *cmd,
-+ struct io_context **old)
-+{
-+ bool res;
-+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ return false;
-+#endif
-+
-+ if (cmd->cmd_threads == &scst_main_cmd_threads) {
-+ EXTRACHECKS_BUG_ON(in_interrupt());
-+ /*
-+ * No need for any ref counting action, because io_context
-+ * supposed to be cleared in the end of the caller function.
-+ */
-+ current->io_context = cmd->tgt_dev->async_io_context;
-+ res = true;
-+ TRACE_DBG("io_context %p (tgt_dev %p)", current->io_context,
-+ cmd->tgt_dev);
-+ EXTRACHECKS_BUG_ON(current->io_context == NULL);
-+ } else
-+ res = false;
-+
-+ return res;
-+}
-+
-+static inline void scst_reset_io_context(struct scst_tgt_dev *tgt_dev,
-+ struct io_context *old)
-+{
-+ current->io_context = old;
-+ TRACE_DBG("io_context %p reset", current->io_context);
-+ return;
-+}
-+
-+/*
-+ * Converts string presentation of threads pool type to enum.
-+ * Returns SCST_THREADS_POOL_TYPE_INVALID if the string is invalid.
-+ */
-+extern enum scst_dev_type_threads_pool_type scst_parse_threads_pool_type(
-+ const char *p, int len);
-+
-+extern int scst_add_threads(struct scst_cmd_threads *cmd_threads,
-+ struct scst_device *dev, struct scst_tgt_dev *tgt_dev, int num);
-+extern void scst_del_threads(struct scst_cmd_threads *cmd_threads, int num);
-+
-+extern int scst_create_dev_threads(struct scst_device *dev);
-+extern void scst_stop_dev_threads(struct scst_device *dev);
-+
-+extern int scst_tgt_dev_setup_threads(struct scst_tgt_dev *tgt_dev);
-+extern void scst_tgt_dev_stop_threads(struct scst_tgt_dev *tgt_dev);
-+
-+extern bool scst_del_thr_data(struct scst_tgt_dev *tgt_dev,
-+ struct task_struct *tsk);
-+
-+extern struct scst_dev_type scst_null_devtype;
-+
-+extern struct scst_cmd *__scst_check_deferred_commands(
-+ struct scst_order_data *order_data);
-+
-+/* Used to save the function call on the fast path */
-+static inline struct scst_cmd *scst_check_deferred_commands(
-+ struct scst_order_data *order_data)
-+{
-+ if (order_data->def_cmd_count == 0)
-+ return NULL;
-+ else
-+ return __scst_check_deferred_commands(order_data);
-+}
-+
-+static inline void scst_make_deferred_commands_active(
-+ struct scst_order_data *order_data)
-+{
-+ struct scst_cmd *c;
-+
-+ c = __scst_check_deferred_commands(order_data);
-+ if (c != NULL) {
-+ TRACE_SN("Adding cmd %p to active cmd list", c);
-+ spin_lock_irq(&c->cmd_threads->cmd_list_lock);
-+ list_add_tail(&c->cmd_list_entry,
-+ &c->cmd_threads->active_cmd_list);
-+ wake_up(&c->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irq(&c->cmd_threads->cmd_list_lock);
-+ }
-+
-+ return;
-+}
-+
-+void scst_inc_expected_sn(struct scst_order_data *order_data, atomic_t *slot);
-+int scst_check_hq_cmd(struct scst_cmd *cmd);
-+
-+void scst_unblock_deferred(struct scst_order_data *order_data,
-+ struct scst_cmd *cmd_sn);
-+
-+void scst_on_hq_cmd_response(struct scst_cmd *cmd);
-+void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd);
-+
-+int scst_pre_parse(struct scst_cmd *cmd);
-+
-+int scst_cmd_thread(void *arg);
-+void scst_cmd_tasklet(long p);
-+int scst_init_thread(void *arg);
-+int scst_tm_thread(void *arg);
-+int scst_global_mgmt_thread(void *arg);
-+
-+void scst_zero_write_rest(struct scst_cmd *cmd);
-+void scst_limit_sg_write_len(struct scst_cmd *cmd);
-+void scst_adjust_resp_data_len(struct scst_cmd *cmd);
-+
-+int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds);
-+
-+int scst_alloc_tgt(struct scst_tgt_template *tgtt, struct scst_tgt **tgt);
-+void scst_free_tgt(struct scst_tgt *tgt);
-+
-+int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev);
-+void scst_free_device(struct scst_device *dev);
-+
-+struct scst_acg *scst_alloc_add_acg(struct scst_tgt *tgt,
-+ const char *acg_name, bool tgt_acg);
-+void scst_del_free_acg(struct scst_acg *acg);
-+
-+struct scst_acg *scst_tgt_find_acg(struct scst_tgt *tgt, const char *name);
-+struct scst_acg *scst_find_acg(const struct scst_session *sess);
-+
-+void scst_check_reassign_sessions(void);
-+
-+int scst_sess_alloc_tgt_devs(struct scst_session *sess);
-+void scst_sess_free_tgt_devs(struct scst_session *sess);
-+void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA);
-+
-+int scst_acg_add_lun(struct scst_acg *acg, struct kobject *parent,
-+ struct scst_device *dev, uint64_t lun, int read_only,
-+ bool gen_scst_report_luns_changed, struct scst_acg_dev **out_acg_dev);
-+int scst_acg_del_lun(struct scst_acg *acg, uint64_t lun,
-+ bool gen_scst_report_luns_changed);
-+
-+int scst_acg_add_acn(struct scst_acg *acg, const char *name);
-+void scst_del_free_acn(struct scst_acn *acn, bool reassign);
-+struct scst_acn *scst_find_acn(struct scst_acg *acg, const char *name);
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static inline bool scst_acg_sess_is_empty(struct scst_acg *acg)
-+{
-+ return list_empty(&acg->acg_sess_list);
-+}
-+
-+int scst_prepare_request_sense(struct scst_cmd *orig_cmd);
-+int scst_finish_internal_cmd(struct scst_cmd *cmd);
-+
-+void scst_store_sense(struct scst_cmd *cmd);
-+
-+int scst_assign_dev_handler(struct scst_device *dev,
-+ struct scst_dev_type *handler);
-+
-+struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
-+ const char *initiator_name);
-+void scst_free_session(struct scst_session *sess);
-+void scst_free_session_callback(struct scst_session *sess);
-+
-+struct scst_cmd *scst_alloc_cmd(const uint8_t *cdb,
-+ unsigned int cdb_len, gfp_t gfp_mask);
-+void scst_free_cmd(struct scst_cmd *cmd);
-+static inline void scst_destroy_cmd(struct scst_cmd *cmd)
-+{
-+ kmem_cache_free(scst_cmd_cachep, cmd);
-+ return;
-+}
-+
-+void scst_check_retries(struct scst_tgt *tgt);
-+
-+int scst_alloc_space(struct scst_cmd *cmd);
-+
-+int scst_lib_init(void);
-+void scst_lib_exit(void);
-+
-+__be64 scst_pack_lun(const uint64_t lun, enum scst_lun_addr_method addr_method);
-+uint64_t scst_unpack_lun(const uint8_t *lun, int len);
-+
-+struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask);
-+void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd);
-+void scst_done_cmd_mgmt(struct scst_cmd *cmd);
-+
-+static inline void scst_devt_cleanup(struct scst_dev_type *devt) { }
-+
-+void scst_tg_init(void);
-+void scst_tg_cleanup(void);
-+int scst_dg_add(struct kobject *parent, const char *name);
-+int scst_dg_remove(const char *name);
-+struct scst_dev_group *scst_lookup_dg_by_kobj(struct kobject *kobj);
-+int scst_dg_dev_add(struct scst_dev_group *dg, const char *name);
-+int scst_dg_dev_remove_by_name(struct scst_dev_group *dg, const char *name);
-+int scst_dg_dev_remove_by_dev(struct scst_device *dev);
-+int scst_tg_add(struct scst_dev_group *dg, const char *name);
-+int scst_tg_remove_by_name(struct scst_dev_group *dg, const char *name);
-+int scst_tg_set_state(struct scst_target_group *tg, enum scst_tg_state state);
-+int scst_tg_tgt_add(struct scst_target_group *tg, const char *name);
-+int scst_tg_tgt_remove_by_name(struct scst_target_group *tg, const char *name);
-+void scst_tg_tgt_remove_by_tgt(struct scst_tgt *tgt);
-+int scst_dg_sysfs_add(struct kobject *parent, struct scst_dev_group *dg);
-+void scst_dg_sysfs_del(struct scst_dev_group *dg);
-+int scst_dg_dev_sysfs_add(struct scst_dev_group *dg, struct scst_dg_dev *dgdev);
-+void scst_dg_dev_sysfs_del(struct scst_dev_group *dg,
-+ struct scst_dg_dev *dgdev);
-+int scst_tg_sysfs_add(struct scst_dev_group *dg,
-+ struct scst_target_group *tg);
-+void scst_tg_sysfs_del(struct scst_target_group *tg);
-+int scst_tg_tgt_sysfs_add(struct scst_target_group *tg,
-+ struct scst_tg_tgt *tg_tgt);
-+void scst_tg_tgt_sysfs_del(struct scst_target_group *tg,
-+ struct scst_tg_tgt *tg_tgt);
-+
-+extern const struct sysfs_ops scst_sysfs_ops;
-+int scst_sysfs_init(void);
-+void scst_sysfs_cleanup(void);
-+int scst_tgtt_sysfs_create(struct scst_tgt_template *tgtt);
-+void scst_tgtt_sysfs_del(struct scst_tgt_template *tgtt);
-+int scst_tgt_sysfs_create(struct scst_tgt *tgt);
-+void scst_tgt_sysfs_prepare_put(struct scst_tgt *tgt);
-+void scst_tgt_sysfs_del(struct scst_tgt *tgt);
-+int scst_sess_sysfs_create(struct scst_session *sess);
-+void scst_sess_sysfs_del(struct scst_session *sess);
-+int scst_recreate_sess_luns_link(struct scst_session *sess);
-+int scst_add_sgv_kobj(struct kobject *parent, const char *name);
-+void scst_del_put_sgv_kobj(void);
-+int scst_devt_sysfs_create(struct scst_dev_type *devt);
-+void scst_devt_sysfs_del(struct scst_dev_type *devt);
-+int scst_dev_sysfs_create(struct scst_device *dev);
-+void scst_dev_sysfs_del(struct scst_device *dev);
-+int scst_tgt_dev_sysfs_create(struct scst_tgt_dev *tgt_dev);
-+void scst_tgt_dev_sysfs_del(struct scst_tgt_dev *tgt_dev);
-+int scst_devt_dev_sysfs_create(struct scst_device *dev);
-+void scst_devt_dev_sysfs_del(struct scst_device *dev);
-+int scst_acg_sysfs_create(struct scst_tgt *tgt,
-+ struct scst_acg *acg);
-+void scst_acg_sysfs_del(struct scst_acg *acg);
-+int scst_acg_dev_sysfs_create(struct scst_acg_dev *acg_dev,
-+ struct kobject *parent);
-+void scst_acg_dev_sysfs_del(struct scst_acg_dev *acg_dev);
-+int scst_acn_sysfs_create(struct scst_acn *acn);
-+void scst_acn_sysfs_del(struct scst_acn *acn);
-+
-+void __scst_dev_check_set_UA(struct scst_device *dev, struct scst_cmd *exclude,
-+ const uint8_t *sense, int sense_len);
-+void scst_tgt_dev_del_free_UA(struct scst_tgt_dev *tgt_dev,
-+ struct scst_tgt_dev_UA *ua);
-+static inline void scst_dev_check_set_UA(struct scst_device *dev,
-+ struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
-+{
-+ spin_lock_bh(&dev->dev_lock);
-+ __scst_dev_check_set_UA(dev, exclude, sense, sense_len);
-+ spin_unlock_bh(&dev->dev_lock);
-+ return;
-+}
-+void scst_dev_check_set_local_UA(struct scst_device *dev,
-+ struct scst_cmd *exclude, const uint8_t *sense, int sense_len);
-+
-+#define SCST_SET_UA_FLAG_AT_HEAD 1
-+#define SCST_SET_UA_FLAG_GLOBAL 2
-+
-+void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
-+ const uint8_t *sense, int sense_len, int flags);
-+int scst_set_pending_UA(struct scst_cmd *cmd);
-+
-+void scst_report_luns_changed(struct scst_acg *acg);
-+
-+void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
-+ bool other_ini, bool call_dev_task_mgmt_fn);
-+void scst_process_reset(struct scst_device *dev,
-+ struct scst_session *originator, struct scst_cmd *exclude_cmd,
-+ struct scst_mgmt_cmd *mcmd, bool setUA);
-+
-+bool scst_is_ua_global(const uint8_t *sense, int len);
-+void scst_requeue_ua(struct scst_cmd *cmd);
-+
-+struct scst_aen *scst_alloc_aen(struct scst_session *sess,
-+ uint64_t unpacked_lun);
-+void scst_free_aen(struct scst_aen *aen);
-+
-+void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
-+ int key, int asc, int ascq);
-+
-+static inline bool scst_is_implicit_hq_cmd(struct scst_cmd *cmd)
-+{
-+ return (cmd->op_flags & SCST_IMPLICIT_HQ) != 0;
-+}
-+
-+static inline bool scst_is_serialized_cmd(struct scst_cmd *cmd)
-+{
-+ return (cmd->op_flags & SCST_SERIALIZED) != 0;
-+}
-+
-+static inline bool scst_is_strictly_serialized_cmd(struct scst_cmd *cmd)
-+{
-+ return (cmd->op_flags & SCST_STRICTLY_SERIALIZED) == SCST_STRICTLY_SERIALIZED;
-+}
-+
-+/*
-+ * Some notes on devices "blocking". Blocking means that no
-+ * commands will go from SCST to underlying SCSI device until it
-+ * is unblocked. But, except for strictly serialized commands,
-+ * we don't care about all commands that already on the device.
-+ */
-+
-+extern void scst_block_dev(struct scst_device *dev);
-+extern void scst_unblock_dev(struct scst_device *dev);
-+
-+bool __scst_check_blocked_dev(struct scst_cmd *cmd);
-+
-+/*
-+ * Increases global SCST ref counters which prevent from entering into suspended
-+ * activities stage, so protects from any global management operations.
-+ */
-+static inline atomic_t *scst_get(void)
-+{
-+ atomic_t *a;
-+ /*
-+ * We don't mind if we because of preemption inc counter from another
-+ * CPU as soon in the majority cases we will the correct one. So, let's
-+ * have preempt_disable/enable only in the debug build to avoid warning.
-+ */
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ preempt_disable();
-+#endif
-+ a = &scst_percpu_infos[smp_processor_id()].cpu_cmd_count;
-+ atomic_inc(a);
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ preempt_enable();
-+#endif
-+ TRACE_DBG("Incrementing cpu_cmd_count %p (new value %d)",
-+ a, atomic_read(a));
-+ /* See comment about smp_mb() in scst_suspend_activity() */
-+ smp_mb__after_atomic_inc();
-+
-+ return a;
-+}
-+
-+/*
-+ * Decreases global SCST ref counters which prevent from entering into suspended
-+ * activities stage, so protects from any global management operations. On
-+ * all them zero, if suspending activities is waiting, it will be proceed.
-+ */
-+static inline void scst_put(atomic_t *a)
-+{
-+ int f;
-+ f = atomic_dec_and_test(a);
-+ /* See comment about smp_mb() in scst_suspend_activity() */
-+ if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) && f) {
-+ TRACE_MGMT_DBG("%s", "Waking up scst_dev_cmd_waitQ");
-+ wake_up_all(&scst_dev_cmd_waitQ);
-+ }
-+ TRACE_DBG("Decrementing cpu_cmd_count %p (new value %d)",
-+ a, atomic_read(a));
-+}
-+
-+int scst_get_cmd_counter(void);
-+
-+void scst_sched_session_free(struct scst_session *sess);
-+
-+static inline void scst_sess_get(struct scst_session *sess)
-+{
-+ atomic_inc(&sess->refcnt);
-+ TRACE_DBG("Incrementing sess %p refcnt (new value %d)",
-+ sess, atomic_read(&sess->refcnt));
-+}
-+
-+static inline void scst_sess_put(struct scst_session *sess)
-+{
-+ TRACE_DBG("Decrementing sess %p refcnt (new value %d)",
-+ sess, atomic_read(&sess->refcnt)-1);
-+ if (atomic_dec_and_test(&sess->refcnt))
-+ scst_sched_session_free(sess);
-+}
-+
-+static inline void __scst_cmd_get(struct scst_cmd *cmd)
-+{
-+ atomic_inc(&cmd->cmd_ref);
-+ TRACE_DBG("Incrementing cmd %p ref (new value %d)",
-+ cmd, atomic_read(&cmd->cmd_ref));
-+}
-+
-+static inline void __scst_cmd_put(struct scst_cmd *cmd)
-+{
-+ TRACE_DBG("Decrementing cmd %p ref (new value %d)",
-+ cmd, atomic_read(&cmd->cmd_ref)-1);
-+ if (atomic_dec_and_test(&cmd->cmd_ref))
-+ scst_free_cmd(cmd);
-+}
-+
-+extern void scst_throttle_cmd(struct scst_cmd *cmd);
-+extern void scst_unthrottle_cmd(struct scst_cmd *cmd);
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+extern void tm_dbg_check_released_cmds(void);
-+extern int tm_dbg_check_cmd(struct scst_cmd *cmd);
-+extern void tm_dbg_release_cmd(struct scst_cmd *cmd);
-+extern void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn,
-+ int force);
-+extern int tm_dbg_is_release(void);
-+#else
-+static inline void tm_dbg_check_released_cmds(void) {}
-+static inline int tm_dbg_check_cmd(struct scst_cmd *cmd)
-+{
-+ return 0;
-+}
-+static inline void tm_dbg_release_cmd(struct scst_cmd *cmd) {}
-+static inline void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn,
-+ int force) {}
-+static inline int tm_dbg_is_release(void)
-+{
-+ return 0;
-+}
-+#endif /* CONFIG_SCST_DEBUG_TM */
-+
-+#ifdef CONFIG_SCST_DEBUG_SN
-+void scst_check_debug_sn(struct scst_cmd *cmd);
-+#else
-+static inline void scst_check_debug_sn(struct scst_cmd *cmd) {}
-+#endif
-+
-+static inline int scst_sn_before(uint32_t seq1, uint32_t seq2)
-+{
-+ return (int32_t)(seq1-seq2) < 0;
-+}
-+
-+int gen_relative_target_port_id(uint16_t *id);
-+bool scst_is_relative_target_port_id_unique(uint16_t id,
-+ const struct scst_tgt *t);
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+
-+void scst_set_start_time(struct scst_cmd *cmd);
-+void scst_set_cur_start(struct scst_cmd *cmd);
-+void scst_set_parse_time(struct scst_cmd *cmd);
-+void scst_set_alloc_buf_time(struct scst_cmd *cmd);
-+void scst_set_restart_waiting_time(struct scst_cmd *cmd);
-+void scst_set_rdy_to_xfer_time(struct scst_cmd *cmd);
-+void scst_set_pre_exec_time(struct scst_cmd *cmd);
-+void scst_set_exec_time(struct scst_cmd *cmd);
-+void scst_set_dev_done_time(struct scst_cmd *cmd);
-+void scst_set_xmit_time(struct scst_cmd *cmd);
-+void scst_set_tgt_on_free_time(struct scst_cmd *cmd);
-+void scst_set_dev_on_free_time(struct scst_cmd *cmd);
-+void scst_update_lat_stats(struct scst_cmd *cmd);
-+
-+#else
-+
-+static inline void scst_set_start_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_cur_start(struct scst_cmd *cmd) {}
-+static inline void scst_set_parse_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_alloc_buf_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_restart_waiting_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_rdy_to_xfer_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_pre_exec_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_exec_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_dev_done_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_xmit_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_tgt_on_free_time(struct scst_cmd *cmd) {}
-+static inline void scst_set_dev_on_free_time(struct scst_cmd *cmd) {}
-+static inline void scst_update_lat_stats(struct scst_cmd *cmd) {}
-+
-+#endif /* CONFIG_SCST_MEASURE_LATENCY */
-+
-+#endif /* __SCST_PRIV_H */
-diff -uprN orig/linux-3.2/drivers/scst/scst_targ.c linux-3.2/drivers/scst/scst_targ.c
---- orig/linux-3.2/drivers/scst/scst_targ.c
-+++ linux-3.2/drivers/scst/scst_targ.c
-@@ -0,0 +1,6705 @@
-+/*
-+ * scst_targ.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/unistd.h>
-+#include <linux/string.h>
-+#include <linux/kthread.h>
-+#include <linux/delay.h>
-+#include <linux/ktime.h>
-+
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+#include "scst_pres.h"
-+
-+#if 0 /* Let's disable it for now to see if users will complain about it */
-+/* Deleting it don't forget to delete dev_cmd_count */
-+#define CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
-+#endif
-+
-+static void scst_cmd_set_sn(struct scst_cmd *cmd);
-+static int __scst_init_cmd(struct scst_cmd *cmd);
-+static void scst_finish_cmd_mgmt(struct scst_cmd *cmd);
-+static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
-+ uint64_t tag, bool to_abort);
-+static void scst_process_redirect_cmd(struct scst_cmd *cmd,
-+ enum scst_exec_context context, int check_retries);
-+
-+/**
-+ * scst_post_parse() - do post parse actions
-+ *
-+ * This function must be called by dev handler after its parse() callback
-+ * returned SCST_CMD_STATE_STOP before calling scst_process_active_cmd().
-+ */
-+void scst_post_parse(struct scst_cmd *cmd)
-+{
-+ scst_set_parse_time(cmd);
-+}
-+EXPORT_SYMBOL_GPL(scst_post_parse);
-+
-+/**
-+ * scst_post_alloc_data_buf() - do post alloc_data_buf actions
-+ *
-+ * This function must be called by dev handler after its alloc_data_buf()
-+ * callback returned SCST_CMD_STATE_STOP before calling
-+ * scst_process_active_cmd().
-+ */
-+void scst_post_alloc_data_buf(struct scst_cmd *cmd)
-+{
-+ scst_set_alloc_buf_time(cmd);
-+}
-+EXPORT_SYMBOL_GPL(scst_post_alloc_data_buf);
-+
-+static inline void scst_schedule_tasklet(struct scst_cmd *cmd)
-+{
-+ struct scst_percpu_info *i = &scst_percpu_infos[smp_processor_id()];
-+ unsigned long flags;
-+
-+ if (atomic_read(&i->cpu_cmd_count) <= scst_max_tasklet_cmd) {
-+ spin_lock_irqsave(&i->tasklet_lock, flags);
-+ TRACE_DBG("Adding cmd %p to tasklet %d cmd list", cmd,
-+ smp_processor_id());
-+ list_add_tail(&cmd->cmd_list_entry, &i->tasklet_cmd_list);
-+ spin_unlock_irqrestore(&i->tasklet_lock, flags);
-+
-+ tasklet_schedule(&i->tasklet);
-+ } else {
-+ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
-+ TRACE_DBG("Too many tasklet commands (%d), adding cmd %p to "
-+ "active cmd list", atomic_read(&i->cpu_cmd_count), cmd);
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
-+ }
-+ return;
-+}
-+
-+/* No locks */
-+static bool scst_check_blocked_dev(struct scst_cmd *cmd)
-+{
-+ bool res;
-+ struct scst_device *dev = cmd->dev;
-+
-+ spin_lock_bh(&dev->dev_lock);
-+
-+ dev->on_dev_cmd_count++;
-+ cmd->dec_on_dev_needed = 1;
-+ TRACE_DBG("New inc on_dev_count %d (cmd %p)", dev->on_dev_cmd_count,
-+ cmd);
-+
-+ scst_inc_pr_readers_count(cmd, true);
-+
-+ if (unlikely(dev->block_count > 0) ||
-+ unlikely(dev->dev_double_ua_possible) ||
-+ unlikely(scst_is_serialized_cmd(cmd)))
-+ res = __scst_check_blocked_dev(cmd);
-+ else
-+ res = false;
-+
-+ if (unlikely(res)) {
-+ /* Undo increments */
-+ dev->on_dev_cmd_count--;
-+ cmd->dec_on_dev_needed = 0;
-+ TRACE_DBG("New dec on_dev_count %d (cmd %p)",
-+ dev->on_dev_cmd_count, cmd);
-+
-+ scst_dec_pr_readers_count(cmd, true);
-+ }
-+
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ return res;
-+}
-+
-+/* No locks */
-+static void scst_check_unblock_dev(struct scst_cmd *cmd)
-+{
-+ struct scst_device *dev = cmd->dev;
-+
-+ spin_lock_bh(&dev->dev_lock);
-+
-+ if (likely(cmd->dec_on_dev_needed)) {
-+ dev->on_dev_cmd_count--;
-+ cmd->dec_on_dev_needed = 0;
-+ TRACE_DBG("New dec on_dev_count %d (cmd %p)",
-+ dev->on_dev_cmd_count, cmd);
-+ }
-+
-+ if (unlikely(cmd->dec_pr_readers_count_needed))
-+ scst_dec_pr_readers_count(cmd, true);
-+
-+ if (unlikely(cmd->unblock_dev)) {
-+ TRACE_MGMT_DBG("cmd %p (tag %llu): unblocking dev %s", cmd,
-+ (long long unsigned int)cmd->tag, dev->virt_name);
-+ cmd->unblock_dev = 0;
-+ scst_unblock_dev(dev);
-+ } else if (unlikely(dev->strictly_serialized_cmd_waiting)) {
-+ if (dev->on_dev_cmd_count == 0) {
-+ TRACE_MGMT_DBG("Strictly serialized cmd waiting: "
-+ "unblocking dev %s", dev->virt_name);
-+ scst_unblock_dev(dev);
-+ }
-+ }
-+
-+ spin_unlock_bh(&dev->dev_lock);
-+ return;
-+}
-+
-+/**
-+ * scst_rx_cmd() - create new command
-+ * @sess: SCST session
-+ * @lun: LUN for the command
-+ * @lun_len: length of the LUN in bytes
-+ * @cdb: CDB of the command
-+ * @cdb_len: length of the CDB in bytes
-+ * @atomic: true, if current context is atomic
-+ *
-+ * Description:
-+ * Creates new SCST command. Returns new command on success or
-+ * NULL otherwise.
-+ *
-+ * Must not be called in parallel with scst_unregister_session() for the
-+ * same session.
-+ */
-+struct scst_cmd *scst_rx_cmd(struct scst_session *sess,
-+ const uint8_t *lun, int lun_len, const uint8_t *cdb,
-+ unsigned int cdb_len, int atomic)
-+{
-+ struct scst_cmd *cmd;
-+
-+ TRACE_ENTRY();
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
-+ PRINT_CRIT_ERROR("%s",
-+ "New cmd while shutting down the session");
-+ BUG();
-+ }
-+#endif
-+
-+ cmd = scst_alloc_cmd(cdb, cdb_len, atomic ? GFP_ATOMIC : GFP_KERNEL);
-+ if (unlikely(cmd == NULL))
-+ goto out;
-+
-+ cmd->sess = sess;
-+ cmd->tgt = sess->tgt;
-+ cmd->tgtt = sess->tgt->tgtt;
-+
-+ cmd->lun = scst_unpack_lun(lun, lun_len);
-+ if (unlikely(cmd->lun == NO_SUCH_LUN))
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_lun_not_supported));
-+
-+ TRACE_DBG("cmd %p, sess %p", cmd, sess);
-+ scst_sess_get(sess);
-+
-+out:
-+ TRACE_EXIT();
-+ return cmd;
-+}
-+EXPORT_SYMBOL(scst_rx_cmd);
-+
-+/*
-+ * No locks, but might be on IRQ. Returns 0 on success, <0 if processing of
-+ * this command should be stopped.
-+ */
-+static int scst_init_cmd(struct scst_cmd *cmd, enum scst_exec_context *context)
-+{
-+ int rc, res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /* See the comment in scst_do_job_init() */
-+ if (unlikely(!list_empty(&scst_init_cmd_list))) {
-+ TRACE_MGMT_DBG("%s", "init cmd list busy");
-+ goto out_redirect;
-+ }
-+ /*
-+ * Memory barrier isn't necessary here, because CPU appears to
-+ * be self-consistent and we don't care about the race, described
-+ * in comment in scst_do_job_init().
-+ */
-+
-+ rc = __scst_init_cmd(cmd);
-+ if (unlikely(rc > 0))
-+ goto out_redirect;
-+ else if (unlikely(rc != 0)) {
-+ res = 1;
-+ goto out;
-+ }
-+
-+ EXTRACHECKS_BUG_ON(*context == SCST_CONTEXT_SAME);
-+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
-+ goto out;
-+#endif
-+
-+ /* Small context optimization */
-+ if ((*context == SCST_CONTEXT_TASKLET) ||
-+ (*context == SCST_CONTEXT_DIRECT_ATOMIC)) {
-+ /*
-+ * If any data_direction not set, it's SCST_DATA_UNKNOWN,
-+ * which is 0, so we can safely | them
-+ */
-+ BUILD_BUG_ON(SCST_DATA_UNKNOWN != 0);
-+ if ((cmd->data_direction | cmd->expected_data_direction) & SCST_DATA_WRITE) {
-+ if (!test_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
-+ &cmd->tgt_dev->tgt_dev_flags))
-+ *context = SCST_CONTEXT_THREAD;
-+ } else
-+ *context = SCST_CONTEXT_THREAD;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_redirect:
-+ if (cmd->preprocessing_only) {
-+ /*
-+ * Poor man solution for single threaded targets, where
-+ * blocking receiver at least sometimes means blocking all.
-+ * For instance, iSCSI target won't be able to receive
-+ * Data-Out PDUs.
-+ */
-+ BUG_ON(*context != SCST_CONTEXT_DIRECT);
-+ scst_set_busy(cmd);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = 1;
-+ /* Keep initiator away from too many BUSY commands */
-+ msleep(50);
-+ } else {
-+ unsigned long flags;
-+ spin_lock_irqsave(&scst_init_lock, flags);
-+ TRACE_MGMT_DBG("Adding cmd %p to init cmd list)", cmd);
-+ list_add_tail(&cmd->cmd_list_entry, &scst_init_cmd_list);
-+ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
-+ scst_init_poll_cnt++;
-+ spin_unlock_irqrestore(&scst_init_lock, flags);
-+ wake_up(&scst_init_cmd_list_waitQ);
-+ res = -1;
-+ }
-+ goto out;
-+}
-+
-+/**
-+ * scst_cmd_init_done() - the command's initialization done
-+ * @cmd: SCST command
-+ * @pref_context: preferred command execution context
-+ *
-+ * Description:
-+ * Notifies SCST that the driver finished its part of the command
-+ * initialization, and the command is ready for execution.
-+ * The second argument sets preferred command execution context.
-+ * See SCST_CONTEXT_* constants for details.
-+ *
-+ * !!IMPORTANT!!
-+ *
-+ * If cmd->set_sn_on_restart_cmd not set, this function, as well as
-+ * scst_cmd_init_stage1_done() and scst_restart_cmd(), must not be
-+ * called simultaneously for the same session (more precisely,
-+ * for the same session/LUN, i.e. tgt_dev), i.e. they must be
-+ * somehow externally serialized. This is needed to have lock free fast
-+ * path in scst_cmd_set_sn(). For majority of targets those functions are
-+ * naturally serialized by the single source of commands. Only iSCSI
-+ * immediate commands with multiple connections per session seems to be an
-+ * exception. For it, some mutex/lock shall be used for the serialization.
-+ */
-+void scst_cmd_init_done(struct scst_cmd *cmd,
-+ enum scst_exec_context pref_context)
-+{
-+ unsigned long flags;
-+ struct scst_session *sess = cmd->sess;
-+ int rc;
-+
-+ TRACE_ENTRY();
-+
-+ scst_set_start_time(cmd);
-+
-+ TRACE_DBG("Preferred context: %d (cmd %p)", pref_context, cmd);
-+ TRACE(TRACE_SCSI, "tag=%llu, lun=%lld, CDB len=%d, queue_type=%x "
-+ "(cmd %p, sess %p)", (long long unsigned int)cmd->tag,
-+ (long long unsigned int)cmd->lun, cmd->cdb_len,
-+ cmd->queue_type, cmd, sess);
-+ PRINT_BUFF_FLAG(TRACE_SCSI|TRACE_RCV_BOT, "Receiving CDB",
-+ cmd->cdb, cmd->cdb_len);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (unlikely((in_irq() || irqs_disabled())) &&
-+ ((pref_context == SCST_CONTEXT_DIRECT) ||
-+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
-+ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
-+ "SCST_CONTEXT_THREAD instead", pref_context,
-+ cmd->tgtt->name);
-+ dump_stack();
-+ pref_context = SCST_CONTEXT_THREAD;
-+ }
-+#endif
-+
-+ atomic_inc(&sess->sess_cmd_count);
-+
-+ spin_lock_irqsave(&sess->sess_list_lock, flags);
-+
-+ if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
-+ /*
-+ * We must always keep commands in the sess list from the
-+ * very beginning, because otherwise they can be missed during
-+ * TM processing. This check is needed because there might be
-+ * old, i.e. deferred, commands and new, i.e. just coming, ones.
-+ */
-+ if (cmd->sess_cmd_list_entry.next == NULL)
-+ list_add_tail(&cmd->sess_cmd_list_entry,
-+ &sess->sess_cmd_list);
-+ switch (sess->init_phase) {
-+ case SCST_SESS_IPH_SUCCESS:
-+ break;
-+ case SCST_SESS_IPH_INITING:
-+ TRACE_DBG("Adding cmd %p to init deferred cmd list",
-+ cmd);
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &sess->init_deferred_cmd_list);
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
-+ goto out;
-+ case SCST_SESS_IPH_FAILED:
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
-+ scst_set_busy(cmd);
-+ goto set_state;
-+ default:
-+ BUG();
-+ }
-+ } else
-+ list_add_tail(&cmd->sess_cmd_list_entry,
-+ &sess->sess_cmd_list);
-+
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
-+
-+ if (unlikely(cmd->queue_type >= SCST_CMD_QUEUE_ACA)) {
-+ PRINT_ERROR("Unsupported queue type %d", cmd->queue_type);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_message));
-+ }
-+
-+set_state:
-+ if (unlikely(cmd->status != SAM_STAT_GOOD)) {
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ goto active;
-+ }
-+
-+ /*
-+ * Cmd must be inited here to preserve the order. In case if cmd
-+ * already preliminary completed by target driver we need to init
-+ * cmd anyway to find out in which format we should return sense.
-+ */
-+ cmd->state = SCST_CMD_STATE_INIT;
-+ rc = scst_init_cmd(cmd, &pref_context);
-+ if (unlikely(rc < 0))
-+ goto out;
-+
-+active:
-+ /* Here cmd must not be in any cmd list, no locks */
-+ switch (pref_context) {
-+ case SCST_CONTEXT_TASKLET:
-+ scst_schedule_tasklet(cmd);
-+ break;
-+
-+ default:
-+ PRINT_ERROR("Context %x is undefined, using the thread one",
-+ pref_context);
-+ /* go through */
-+ case SCST_CONTEXT_THREAD:
-+ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
-+ TRACE_DBG("Adding cmd %p to active cmd list", cmd);
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ list_add(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ else
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
-+ break;
-+
-+ case SCST_CONTEXT_DIRECT:
-+ scst_process_active_cmd(cmd, false);
-+ break;
-+
-+ case SCST_CONTEXT_DIRECT_ATOMIC:
-+ scst_process_active_cmd(cmd, true);
-+ break;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_cmd_init_done);
-+
-+int scst_pre_parse(struct scst_cmd *cmd)
-+{
-+ int res;
-+ struct scst_device *dev = cmd->dev;
-+ int rc;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * Expected transfer data supplied by the SCSI transport via the
-+ * target driver are untrusted, so we prefer to fetch them from CDB.
-+ * Additionally, not all transports support supplying the expected
-+ * transfer data.
-+ */
-+
-+ rc = scst_get_cdb_info(cmd);
-+ if (unlikely(rc != 0)) {
-+ if (rc > 0) {
-+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
-+ goto out_err;
-+ }
-+
-+ EXTRACHECKS_BUG_ON(cmd->op_flags & SCST_INFO_VALID);
-+
-+ TRACE(TRACE_MINOR, "Unknown opcode 0x%02x for %s. "
-+ "Should you update scst_scsi_op_table?",
-+ cmd->cdb[0], dev->handler->name);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Failed CDB", cmd->cdb,
-+ cmd->cdb_len);
-+ } else
-+ EXTRACHECKS_BUG_ON(!(cmd->op_flags & SCST_INFO_VALID));
-+
-+#ifdef CONFIG_SCST_STRICT_SERIALIZING
-+ cmd->inc_expected_sn_on_done = 1;
-+#else
-+ cmd->inc_expected_sn_on_done = dev->handler->exec_sync ||
-+ (!dev->has_own_order_mgmt &&
-+ (dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER ||
-+ cmd->queue_type == SCST_CMD_QUEUE_ORDERED));
-+#endif
-+
-+ TRACE_DBG("op_name <%s> (cmd %p), direction=%d "
-+ "(expected %d, set %s), bufflen=%d, out_bufflen=%d (expected "
-+ "len %d, out expected len %d), flags=0x%x", cmd->op_name, cmd,
-+ cmd->data_direction, cmd->expected_data_direction,
-+ scst_cmd_is_expected_set(cmd) ? "yes" : "no",
-+ cmd->bufflen, cmd->out_bufflen, cmd->expected_transfer_len,
-+ cmd->expected_out_transfer_len, cmd->op_flags);
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = -1;
-+ goto out;
-+}
-+
-+#ifndef CONFIG_SCST_USE_EXPECTED_VALUES
-+static bool scst_is_allowed_to_mismatch_cmd(struct scst_cmd *cmd)
-+{
-+ bool res = false;
-+
-+ /* VERIFY commands with BYTCHK unset shouldn't fail here */
-+ if ((cmd->op_flags & SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED) &&
-+ (cmd->cdb[1] & BYTCHK) == 0) {
-+ res = true;
-+ goto out;
-+ }
-+
-+ switch (cmd->cdb[0]) {
-+ case TEST_UNIT_READY:
-+ /* Crazy VMware people sometimes do TUR with READ direction */
-+ if ((cmd->expected_data_direction == SCST_DATA_READ) ||
-+ (cmd->expected_data_direction == SCST_DATA_NONE))
-+ res = true;
-+ break;
-+ }
-+
-+out:
-+ return res;
-+}
-+#endif
-+
-+static int scst_parse_cmd(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_RES_CONT_SAME;
-+ int state;
-+ struct scst_device *dev = cmd->dev;
-+ int orig_bufflen = cmd->bufflen;
-+
-+ TRACE_ENTRY();
-+
-+ if (likely(!scst_is_cmd_fully_local(cmd))) {
-+ if (unlikely(!dev->handler->parse_atomic &&
-+ scst_cmd_atomic(cmd))) {
-+ /*
-+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
-+ * optimization.
-+ */
-+ TRACE_MGMT_DBG("Dev handler %s parse() needs thread "
-+ "context, rescheduling", dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ TRACE_DBG("Calling dev handler %s parse(%p)",
-+ dev->handler->name, cmd);
-+ TRACE_BUFF_FLAG(TRACE_SND_BOT, "Parsing: ",
-+ cmd->cdb, cmd->cdb_len);
-+ scst_set_cur_start(cmd);
-+ state = dev->handler->parse(cmd);
-+ /* Caution: cmd can be already dead here */
-+ TRACE_DBG("Dev handler %s parse() returned %d",
-+ dev->handler->name, state);
-+
-+ switch (state) {
-+ case SCST_CMD_STATE_NEED_THREAD_CTX:
-+ scst_set_parse_time(cmd);
-+ TRACE_DBG("Dev handler %s parse() requested thread "
-+ "context, rescheduling", dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+
-+ case SCST_CMD_STATE_STOP:
-+ TRACE_DBG("Dev handler %s parse() requested stop "
-+ "processing", dev->handler->name);
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ goto out;
-+ }
-+
-+ scst_set_parse_time(cmd);
-+
-+ if (state == SCST_CMD_STATE_DEFAULT)
-+ state = SCST_CMD_STATE_PREPARE_SPACE;
-+ } else
-+ state = SCST_CMD_STATE_PREPARE_SPACE;
-+
-+ if (unlikely(state == SCST_CMD_STATE_PRE_XMIT_RESP))
-+ goto set_res;
-+
-+ if (unlikely(!(cmd->op_flags & SCST_INFO_VALID))) {
-+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
-+ if (scst_cmd_is_expected_set(cmd)) {
-+ TRACE(TRACE_MINOR, "Using initiator supplied values: "
-+ "direction %d, transfer_len %d/%d",
-+ cmd->expected_data_direction,
-+ cmd->expected_transfer_len,
-+ cmd->expected_out_transfer_len);
-+ cmd->data_direction = cmd->expected_data_direction;
-+ cmd->bufflen = cmd->expected_transfer_len;
-+ cmd->out_bufflen = cmd->expected_out_transfer_len;
-+ } else {
-+ PRINT_ERROR("Unknown opcode 0x%02x for %s and "
-+ "target %s not supplied expected values",
-+ cmd->cdb[0], dev->handler->name, cmd->tgtt->name);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+ }
-+#else
-+ /*
-+ * Let's ignore reporting T10/04-262r7 16-byte and 12-byte ATA
-+ * pass-thru commands to not pollute logs (udev(?) checks them
-+ * for some reason). If somebody has their description, please,
-+ * update scst_scsi_op_table.
-+ */
-+ if ((cmd->cdb[0] != 0x85) && (cmd->cdb[0] != 0xa1))
-+ PRINT_ERROR("Refusing unknown opcode %x", cmd->cdb[0]);
-+ else
-+ TRACE(TRACE_MINOR, "Refusing unknown opcode %x",
-+ cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+#endif
-+ }
-+
-+ if (unlikely(cmd->cdb_len == 0)) {
-+ PRINT_ERROR("Unable to get CDB length for "
-+ "opcode 0x%02x. Returning INVALID "
-+ "OPCODE", cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+ }
-+
-+ EXTRACHECKS_BUG_ON(cmd->cdb_len == 0);
-+
-+ TRACE(TRACE_SCSI, "op_name <%s> (cmd %p), direction=%d "
-+ "(expected %d, set %s), bufflen=%d, out_bufflen=%d, (expected "
-+ "len %d, out expected len %d), flags=%x", cmd->op_name, cmd,
-+ cmd->data_direction, cmd->expected_data_direction,
-+ scst_cmd_is_expected_set(cmd) ? "yes" : "no",
-+ cmd->bufflen, cmd->out_bufflen, cmd->expected_transfer_len,
-+ cmd->expected_out_transfer_len, cmd->op_flags);
-+
-+ if (unlikely((cmd->op_flags & SCST_UNKNOWN_LENGTH) != 0)) {
-+ if (scst_cmd_is_expected_set(cmd)) {
-+ /*
-+ * Command data length can't be easily
-+ * determined from the CDB. ToDo, all such
-+ * commands processing should be fixed. Until
-+ * it's done, get the length from the supplied
-+ * expected value, but limit it to some
-+ * reasonable value (15MB).
-+ */
-+ cmd->bufflen = min(cmd->expected_transfer_len,
-+ 15*1024*1024);
-+ if (cmd->data_direction == SCST_DATA_BIDI)
-+ cmd->out_bufflen = min(cmd->expected_out_transfer_len,
-+ 15*1024*1024);
-+ cmd->op_flags &= ~SCST_UNKNOWN_LENGTH;
-+ } else {
-+ PRINT_ERROR("Unknown data transfer length for opcode "
-+ "0x%x (handler %s, target %s)", cmd->cdb[0],
-+ dev->handler->name, cmd->tgtt->name);
-+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_message));
-+ goto out_done;
-+ }
-+ }
-+
-+ if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_NACA_BIT)) {
-+ PRINT_ERROR("NACA bit in control byte CDB is not supported "
-+ "(opcode 0x%02x)", cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_done;
-+ }
-+
-+ if (unlikely(cmd->cdb[cmd->cdb_len - 1] & CONTROL_BYTE_LINK_BIT)) {
-+ PRINT_ERROR("Linked commands are not supported "
-+ "(opcode 0x%02x)", cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_done;
-+ }
-+
-+ if (cmd->dh_data_buf_alloced &&
-+ unlikely((orig_bufflen > cmd->bufflen))) {
-+ PRINT_ERROR("Dev handler supplied data buffer (size %d), "
-+ "is less, than required (size %d)", cmd->bufflen,
-+ orig_bufflen);
-+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
-+ goto out_hw_error;
-+ }
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if ((cmd->bufflen != 0) &&
-+ ((cmd->data_direction == SCST_DATA_NONE) ||
-+ ((cmd->sg == NULL) && (state > SCST_CMD_STATE_PREPARE_SPACE)))) {
-+ PRINT_ERROR("Dev handler %s parse() returned "
-+ "invalid cmd data_direction %d, bufflen %d, state %d "
-+ "or sg %p (opcode 0x%x)", dev->handler->name,
-+ cmd->data_direction, cmd->bufflen, state, cmd->sg,
-+ cmd->cdb[0]);
-+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
-+ goto out_hw_error;
-+ }
-+#endif
-+
-+ if (scst_cmd_is_expected_set(cmd)) {
-+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
-+ if (unlikely((cmd->data_direction != cmd->expected_data_direction) ||
-+ (cmd->bufflen != cmd->expected_transfer_len) ||
-+ (cmd->out_bufflen != cmd->expected_out_transfer_len))) {
-+ TRACE(TRACE_MINOR, "Expected values don't match "
-+ "decoded ones: data_direction %d, "
-+ "expected_data_direction %d, "
-+ "bufflen %d, expected_transfer_len %d, "
-+ "out_bufflen %d, expected_out_transfer_len %d",
-+ cmd->data_direction,
-+ cmd->expected_data_direction,
-+ cmd->bufflen, cmd->expected_transfer_len,
-+ cmd->out_bufflen, cmd->expected_out_transfer_len);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
-+ cmd->cdb, cmd->cdb_len);
-+ cmd->data_direction = cmd->expected_data_direction;
-+ cmd->bufflen = cmd->expected_transfer_len;
-+ cmd->out_bufflen = cmd->expected_out_transfer_len;
-+ cmd->resid_possible = 1;
-+ }
-+#else
-+ if (unlikely(cmd->data_direction !=
-+ cmd->expected_data_direction)) {
-+ if (((cmd->expected_data_direction != SCST_DATA_NONE) ||
-+ (cmd->bufflen != 0)) &&
-+ !scst_is_allowed_to_mismatch_cmd(cmd)) {
-+ PRINT_ERROR("Expected data direction %d for "
-+ "opcode 0x%02x (handler %s, target %s) "
-+ "doesn't match decoded value %d",
-+ cmd->expected_data_direction,
-+ cmd->cdb[0], dev->handler->name,
-+ cmd->tgtt->name, cmd->data_direction);
-+ PRINT_BUFFER("Failed CDB", cmd->cdb,
-+ cmd->cdb_len);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_message));
-+ goto out_done;
-+ }
-+ }
-+ if (unlikely(cmd->bufflen != cmd->expected_transfer_len)) {
-+ TRACE(TRACE_MINOR, "Warning: expected "
-+ "transfer length %d for opcode 0x%02x "
-+ "(handler %s, target %s) doesn't match "
-+ "decoded value %d",
-+ cmd->expected_transfer_len, cmd->cdb[0],
-+ dev->handler->name, cmd->tgtt->name,
-+ cmd->bufflen);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
-+ cmd->cdb, cmd->cdb_len);
-+ if ((cmd->data_direction & SCST_DATA_READ) ||
-+ (cmd->data_direction & SCST_DATA_WRITE))
-+ cmd->resid_possible = 1;
-+ }
-+ if (unlikely(cmd->out_bufflen != cmd->expected_out_transfer_len)) {
-+ TRACE(TRACE_MINOR, "Warning: expected bidirectional OUT "
-+ "transfer length %d for opcode 0x%02x "
-+ "(handler %s, target %s) doesn't match "
-+ "decoded value %d",
-+ cmd->expected_out_transfer_len, cmd->cdb[0],
-+ dev->handler->name, cmd->tgtt->name,
-+ cmd->out_bufflen);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Suspicious CDB",
-+ cmd->cdb, cmd->cdb_len);
-+ cmd->resid_possible = 1;
-+ }
-+#endif
-+ }
-+
-+ if (unlikely(cmd->data_direction == SCST_DATA_UNKNOWN)) {
-+ PRINT_ERROR("Unknown data direction. Opcode 0x%x, handler %s, "
-+ "target %s", cmd->cdb[0], dev->handler->name,
-+ cmd->tgtt->name);
-+ PRINT_BUFFER("Failed CDB", cmd->cdb, cmd->cdb_len);
-+ goto out_hw_error;
-+ }
-+
-+set_res:
-+ if (cmd->data_len == -1)
-+ cmd->data_len = cmd->bufflen;
-+
-+ if (cmd->bufflen == 0) {
-+ /*
-+ * According to SPC bufflen 0 for data transfer commands isn't
-+ * an error, so we need to fix the transfer direction.
-+ */
-+ cmd->data_direction = SCST_DATA_NONE;
-+ }
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ switch (state) {
-+ case SCST_CMD_STATE_PREPARE_SPACE:
-+ case SCST_CMD_STATE_PARSE:
-+ case SCST_CMD_STATE_RDY_TO_XFER:
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ case SCST_CMD_STATE_START_EXEC:
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ case SCST_CMD_STATE_PRE_DEV_DONE:
-+ case SCST_CMD_STATE_DEV_DONE:
-+ case SCST_CMD_STATE_PRE_XMIT_RESP:
-+ case SCST_CMD_STATE_XMIT_RESP:
-+ case SCST_CMD_STATE_FINISHED:
-+ case SCST_CMD_STATE_FINISHED_INTERNAL:
-+#endif
-+ cmd->state = state;
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ break;
-+
-+ default:
-+ if (state >= 0) {
-+ PRINT_ERROR("Dev handler %s parse() returned "
-+ "invalid cmd state %d (opcode %d)",
-+ dev->handler->name, state, cmd->cdb[0]);
-+ } else {
-+ PRINT_ERROR("Dev handler %s parse() returned "
-+ "error %d (opcode %d)", dev->handler->name,
-+ state, cmd->cdb[0]);
-+ }
-+ goto out_hw_error;
-+ }
-+#endif
-+
-+ if (cmd->resp_data_len == -1) {
-+ if (cmd->data_direction & SCST_DATA_READ)
-+ cmd->resp_data_len = cmd->bufflen;
-+ else
-+ cmd->resp_data_len = 0;
-+ }
-+
-+ /* We already completed (with an error) */
-+ if (unlikely(cmd->completed))
-+ goto out_done;
-+
-+#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ /*
-+ * We can't allow atomic command on the exec stages. It shouldn't
-+ * be because of the SCST_TGT_DEV_AFTER_* optimization, but during
-+ * parsing data_direction can change, so we need to recheck.
-+ */
-+ if (unlikely(scst_cmd_atomic(cmd) &&
-+ !(cmd->data_direction & SCST_DATA_WRITE))) {
-+ TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_MINOR, "Atomic context and "
-+ "non-WRITE data direction, rescheduling (cmd %p)", cmd);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+#endif
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+
-+out_hw_error:
-+ /* dev_done() will be called as part of the regular cmd's finish */
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+
-+out_done:
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
-+}
-+
-+static void scst_set_write_len(struct scst_cmd *cmd)
-+{
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(!(cmd->data_direction & SCST_DATA_WRITE));
-+
-+ if (cmd->data_direction & SCST_DATA_READ) {
-+ cmd->write_len = cmd->out_bufflen;
-+ cmd->write_sg = &cmd->out_sg;
-+ cmd->write_sg_cnt = &cmd->out_sg_cnt;
-+ } else {
-+ cmd->write_len = cmd->bufflen;
-+ /* write_sg and write_sg_cnt already initialized correctly */
-+ }
-+
-+ TRACE_MEM("cmd %p, write_len %d, write_sg %p, write_sg_cnt %d, "
-+ "resid_possible %d", cmd, cmd->write_len, *cmd->write_sg,
-+ *cmd->write_sg_cnt, cmd->resid_possible);
-+
-+ if (unlikely(cmd->resid_possible)) {
-+ if (cmd->data_direction & SCST_DATA_READ) {
-+ cmd->write_len = min(cmd->out_bufflen,
-+ cmd->expected_out_transfer_len);
-+ if (cmd->write_len == cmd->out_bufflen)
-+ goto out;
-+ } else {
-+ cmd->write_len = min(cmd->bufflen,
-+ cmd->expected_transfer_len);
-+ if (cmd->write_len == cmd->bufflen)
-+ goto out;
-+ }
-+ scst_limit_sg_write_len(cmd);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_prepare_space(struct scst_cmd *cmd)
-+{
-+ int r = 0, res = SCST_CMD_STATE_RES_CONT_SAME;
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (cmd->data_direction == SCST_DATA_NONE)
-+ goto done;
-+
-+ if (likely(!scst_is_cmd_fully_local(cmd)) &&
-+ (dev->handler->alloc_data_buf != NULL)) {
-+ int state;
-+
-+ if (unlikely(!dev->handler->alloc_data_buf_atomic &&
-+ scst_cmd_atomic(cmd))) {
-+ /*
-+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
-+ * optimization.
-+ */
-+ TRACE_MGMT_DBG("Dev handler %s alloc_data_buf() needs "
-+ "thread context, rescheduling",
-+ dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ TRACE_DBG("Calling dev handler %s alloc_data_buf(%p)",
-+ dev->handler->name, cmd);
-+ scst_set_cur_start(cmd);
-+ state = dev->handler->alloc_data_buf(cmd);
-+ /* Caution: cmd can be already dead here */
-+ TRACE_DBG("Dev handler %s alloc_data_buf() returned %d",
-+ dev->handler->name, state);
-+
-+ switch (state) {
-+ case SCST_CMD_STATE_NEED_THREAD_CTX:
-+ scst_set_alloc_buf_time(cmd);
-+ TRACE_DBG("Dev handler %s alloc_data_buf() requested "
-+ "thread context, rescheduling",
-+ dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+
-+ case SCST_CMD_STATE_STOP:
-+ TRACE_DBG("Dev handler %s alloc_data_buf() requested "
-+ "stop processing", dev->handler->name);
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ goto out;
-+ }
-+
-+ scst_set_alloc_buf_time(cmd);
-+
-+ if (unlikely(state != SCST_CMD_STATE_DEFAULT)) {
-+ cmd->state = state;
-+ goto out;
-+ }
-+ }
-+
-+ if (cmd->tgt_need_alloc_data_buf) {
-+ int orig_bufflen = cmd->bufflen;
-+
-+ TRACE_MEM("Custom tgt data buf allocation requested (cmd %p)",
-+ cmd);
-+
-+ scst_set_cur_start(cmd);
-+ r = cmd->tgtt->alloc_data_buf(cmd);
-+ scst_set_alloc_buf_time(cmd);
-+
-+ if (r > 0)
-+ goto alloc;
-+ else if (r == 0) {
-+ if (unlikely(cmd->bufflen == 0)) {
-+ /* See comment in scst_alloc_space() */
-+ if (cmd->sg == NULL)
-+ goto alloc;
-+ }
-+
-+ cmd->tgt_data_buf_alloced = 1;
-+
-+ if (unlikely(orig_bufflen < cmd->bufflen)) {
-+ PRINT_ERROR("Target driver allocated data "
-+ "buffer (size %d), is less, than "
-+ "required (size %d)", orig_bufflen,
-+ cmd->bufflen);
-+ goto out_error;
-+ }
-+ TRACE_MEM("tgt_data_buf_alloced (cmd %p)", cmd);
-+ } else
-+ goto check;
-+ }
-+
-+alloc:
-+ if (!cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
-+ r = scst_alloc_space(cmd);
-+ } else if (cmd->dh_data_buf_alloced && !cmd->tgt_data_buf_alloced) {
-+ TRACE_MEM("dh_data_buf_alloced set (cmd %p)", cmd);
-+ r = 0;
-+ } else if (cmd->tgt_data_buf_alloced && !cmd->dh_data_buf_alloced) {
-+ TRACE_MEM("tgt_data_buf_alloced set (cmd %p)", cmd);
-+ cmd->sg = cmd->tgt_sg;
-+ cmd->sg_cnt = cmd->tgt_sg_cnt;
-+ cmd->out_sg = cmd->tgt_out_sg;
-+ cmd->out_sg_cnt = cmd->tgt_out_sg_cnt;
-+ r = 0;
-+ } else {
-+ TRACE_MEM("Both *_data_buf_alloced set (cmd %p, sg %p, "
-+ "sg_cnt %d, tgt_sg %p, tgt_sg_cnt %d)", cmd, cmd->sg,
-+ cmd->sg_cnt, cmd->tgt_sg, cmd->tgt_sg_cnt);
-+ r = 0;
-+ }
-+
-+check:
-+ if (r != 0) {
-+ if (scst_cmd_atomic(cmd)) {
-+ TRACE_MEM("%s", "Atomic memory allocation failed, "
-+ "rescheduling to the thread");
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ } else
-+ goto out_no_space;
-+ }
-+
-+done:
-+ if (cmd->preprocessing_only) {
-+ cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE;
-+ if (cmd->data_direction & SCST_DATA_WRITE)
-+ scst_set_write_len(cmd);
-+ } else if (cmd->data_direction & SCST_DATA_WRITE) {
-+ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
-+ scst_set_write_len(cmd);
-+ } else
-+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+
-+out_no_space:
-+ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate or build requested buffer "
-+ "(size %d), sending BUSY or QUEUE FULL status", cmd->bufflen);
-+ scst_set_busy(cmd);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
-+
-+out_error:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
-+}
-+
-+static int scst_preprocessing_done(struct scst_cmd *cmd)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(!cmd->preprocessing_only);
-+
-+ cmd->preprocessing_only = 0;
-+
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ cmd->state = SCST_CMD_STATE_PREPROCESSING_DONE_CALLED;
-+
-+ TRACE_DBG("Calling preprocessing_done(cmd %p)", cmd);
-+ scst_set_cur_start(cmd);
-+ cmd->tgtt->preprocessing_done(cmd);
-+ TRACE_DBG("%s", "preprocessing_done() returned");
-+
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_restart_cmd() - restart execution of the command
-+ * @cmd: SCST commands
-+ * @status: completion status
-+ * @pref_context: preferred command execution context
-+ *
-+ * Description:
-+ * Notifies SCST that the driver finished its part of the command's
-+ * preprocessing and it is ready for further processing.
-+ *
-+ * The second argument sets completion status
-+ * (see SCST_PREPROCESS_STATUS_* constants for details)
-+ *
-+ * See also comment for scst_cmd_init_done() for the serialization
-+ * requirements.
-+ */
-+void scst_restart_cmd(struct scst_cmd *cmd, int status,
-+ enum scst_exec_context pref_context)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_set_restart_waiting_time(cmd);
-+
-+ TRACE_DBG("Preferred context: %d", pref_context);
-+ TRACE_DBG("tag=%llu, status=%#x",
-+ (long long unsigned int)scst_cmd_get_tag(cmd),
-+ status);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if ((in_irq() || irqs_disabled()) &&
-+ ((pref_context == SCST_CONTEXT_DIRECT) ||
-+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
-+ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
-+ "SCST_CONTEXT_THREAD instead", pref_context,
-+ cmd->tgtt->name);
-+ dump_stack();
-+ pref_context = SCST_CONTEXT_THREAD;
-+ }
-+#endif
-+
-+ switch (status) {
-+ case SCST_PREPROCESS_STATUS_SUCCESS:
-+ if (cmd->data_direction & SCST_DATA_WRITE)
-+ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
-+ else
-+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
-+ if (cmd->set_sn_on_restart_cmd)
-+ scst_cmd_set_sn(cmd);
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
-+ break;
-+#endif
-+ /* Small context optimization */
-+ if ((pref_context == SCST_CONTEXT_TASKLET) ||
-+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
-+ ((pref_context == SCST_CONTEXT_SAME) &&
-+ scst_cmd_atomic(cmd)))
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ case SCST_PREPROCESS_STATUS_ERROR_FATAL:
-+ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
-+ /* go through */
-+ case SCST_PREPROCESS_STATUS_ERROR:
-+ if (cmd->sense != NULL)
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ default:
-+ PRINT_ERROR("%s() received unknown status %x", __func__,
-+ status);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+ }
-+
-+ scst_process_redirect_cmd(cmd, pref_context, 1);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_restart_cmd);
-+
-+static int scst_rdy_to_xfer(struct scst_cmd *cmd)
-+{
-+ int res, rc;
-+ struct scst_tgt_template *tgtt = cmd->tgtt;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
-+ goto out_dev_done;
-+ }
-+
-+ if ((tgtt->rdy_to_xfer == NULL) || unlikely(cmd->internal)) {
-+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
-+#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ /* We can't allow atomic command on the exec stages */
-+ if (scst_cmd_atomic(cmd)) {
-+ TRACE_DBG("NULL rdy_to_xfer() and atomic context, "
-+ "rescheduling (cmd %p)", cmd);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ } else
-+#endif
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
-+ }
-+
-+ if (unlikely(!tgtt->rdy_to_xfer_atomic && scst_cmd_atomic(cmd))) {
-+ /*
-+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
-+ * optimization.
-+ */
-+ TRACE_MGMT_DBG("Target driver %s rdy_to_xfer() needs thread "
-+ "context, rescheduling", tgtt->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ while (1) {
-+ int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
-+
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ cmd->state = SCST_CMD_STATE_DATA_WAIT;
-+
-+ if (tgtt->on_hw_pending_cmd_timeout != NULL) {
-+ struct scst_session *sess = cmd->sess;
-+ cmd->hw_pending_start = jiffies;
-+ cmd->cmd_hw_pending = 1;
-+ if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
-+ TRACE_DBG("Sched HW pending work for sess %p "
-+ "(max time %d)", sess,
-+ tgtt->max_hw_pending_time);
-+ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
-+ &sess->sess_aflags);
-+ schedule_delayed_work(&sess->hw_pending_work,
-+ tgtt->max_hw_pending_time * HZ);
-+ }
-+ }
-+
-+ scst_set_cur_start(cmd);
-+
-+ TRACE_DBG("Calling rdy_to_xfer(%p)", cmd);
-+#ifdef CONFIG_SCST_DEBUG_RETRY
-+ if (((scst_random() % 100) == 75))
-+ rc = SCST_TGT_RES_QUEUE_FULL;
-+ else
-+#endif
-+ rc = tgtt->rdy_to_xfer(cmd);
-+ TRACE_DBG("rdy_to_xfer() returned %d", rc);
-+
-+ if (likely(rc == SCST_TGT_RES_SUCCESS))
-+ goto out;
-+
-+ scst_set_rdy_to_xfer_time(cmd);
-+
-+ cmd->cmd_hw_pending = 0;
-+
-+ /* Restore the previous state */
-+ cmd->state = SCST_CMD_STATE_RDY_TO_XFER;
-+
-+ switch (rc) {
-+ case SCST_TGT_RES_QUEUE_FULL:
-+ if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
-+ break;
-+ else
-+ continue;
-+
-+ case SCST_TGT_RES_NEED_THREAD_CTX:
-+ TRACE_DBG("Target driver %s "
-+ "rdy_to_xfer() requested thread "
-+ "context, rescheduling", tgtt->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+
-+ default:
-+ goto out_error_rc;
-+ }
-+ break;
-+ }
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+
-+out_error_rc:
-+ if (rc == SCST_TGT_RES_FATAL_ERROR) {
-+ PRINT_ERROR("Target driver %s rdy_to_xfer() returned "
-+ "fatal error", tgtt->name);
-+ } else {
-+ PRINT_ERROR("Target driver %s rdy_to_xfer() returned invalid "
-+ "value %d", tgtt->name, rc);
-+ }
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+
-+out_dev_done:
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
-+}
-+
-+/* No locks, but might be in IRQ */
-+static void scst_process_redirect_cmd(struct scst_cmd *cmd,
-+ enum scst_exec_context context, int check_retries)
-+{
-+ struct scst_tgt *tgt = cmd->tgt;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Context: %x", context);
-+
-+ if (check_retries)
-+ scst_check_retries(tgt);
-+
-+ if (context == SCST_CONTEXT_SAME)
-+ context = scst_cmd_atomic(cmd) ? SCST_CONTEXT_DIRECT_ATOMIC :
-+ SCST_CONTEXT_DIRECT;
-+
-+ switch (context) {
-+ case SCST_CONTEXT_DIRECT_ATOMIC:
-+ scst_process_active_cmd(cmd, true);
-+ break;
-+
-+ case SCST_CONTEXT_DIRECT:
-+ scst_process_active_cmd(cmd, false);
-+ break;
-+
-+ case SCST_CONTEXT_TASKLET:
-+ scst_schedule_tasklet(cmd);
-+ break;
-+
-+ default:
-+ PRINT_ERROR("Context %x is unknown, using the thread one",
-+ context);
-+ /* go through */
-+ case SCST_CONTEXT_THREAD:
-+ spin_lock_irqsave(&cmd->cmd_threads->cmd_list_lock, flags);
-+ TRACE_DBG("Adding cmd %p to active cmd list", cmd);
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ list_add(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ else
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irqrestore(&cmd->cmd_threads->cmd_list_lock, flags);
-+ break;
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_rx_data() - the command's data received
-+ * @cmd: SCST commands
-+ * @status: data receiving completion status
-+ * @pref_context: preferred command execution context
-+ *
-+ * Description:
-+ * Notifies SCST that the driver received all the necessary data
-+ * and the command is ready for further processing.
-+ *
-+ * The second argument sets data receiving completion status
-+ * (see SCST_RX_STATUS_* constants for details)
-+ */
-+void scst_rx_data(struct scst_cmd *cmd, int status,
-+ enum scst_exec_context pref_context)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_set_rdy_to_xfer_time(cmd);
-+
-+ TRACE_DBG("Preferred context: %d", pref_context);
-+ TRACE(TRACE_SCSI, "cmd %p, status %#x", cmd, status);
-+
-+ cmd->cmd_hw_pending = 0;
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if ((in_irq() || irqs_disabled()) &&
-+ ((pref_context == SCST_CONTEXT_DIRECT) ||
-+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC))) {
-+ PRINT_ERROR("Wrong context %d in IRQ from target %s, use "
-+ "SCST_CONTEXT_THREAD instead", pref_context,
-+ cmd->tgtt->name);
-+ dump_stack();
-+ pref_context = SCST_CONTEXT_THREAD;
-+ }
-+#endif
-+
-+ switch (status) {
-+ case SCST_RX_STATUS_SUCCESS:
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ if (trace_flag & TRACE_RCV_BOT) {
-+ int i, j;
-+ struct scatterlist *sg;
-+ if (cmd->out_sg != NULL)
-+ sg = cmd->out_sg;
-+ else if (cmd->tgt_out_sg != NULL)
-+ sg = cmd->tgt_out_sg;
-+ else if (cmd->tgt_sg != NULL)
-+ sg = cmd->tgt_sg;
-+ else
-+ sg = cmd->sg;
-+ if (sg != NULL) {
-+ TRACE_RECV_BOT("RX data for cmd %p "
-+ "(sg_cnt %d, sg %p, sg[0].page %p)",
-+ cmd, cmd->tgt_sg_cnt, sg,
-+ (void *)sg_page(&sg[0]));
-+ for (i = 0, j = 0; i < cmd->tgt_sg_cnt; ++i, ++j) {
-+ if (unlikely(sg_is_chain(&sg[j]))) {
-+ sg = sg_chain_ptr(&sg[j]);
-+ j = 0;
-+ }
-+ PRINT_BUFF_FLAG(TRACE_RCV_BOT, "RX sg",
-+ sg_virt(&sg[j]), sg[j].length);
-+ }
-+ }
-+ }
-+#endif
-+ cmd->state = SCST_CMD_STATE_TGT_PRE_EXEC;
-+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ if (cmd->op_flags & SCST_TEST_IO_IN_SIRQ_ALLOWED)
-+ break;
-+#endif
-+
-+ /* Small context optimization */
-+ if ((pref_context == SCST_CONTEXT_TASKLET) ||
-+ (pref_context == SCST_CONTEXT_DIRECT_ATOMIC) ||
-+ ((pref_context == SCST_CONTEXT_SAME) &&
-+ scst_cmd_atomic(cmd)))
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ case SCST_RX_STATUS_ERROR_SENSE_SET:
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ case SCST_RX_STATUS_ERROR_FATAL:
-+ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
-+ /* go through */
-+ case SCST_RX_STATUS_ERROR:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+
-+ default:
-+ PRINT_ERROR("scst_rx_data() received unknown status %x",
-+ status);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ pref_context = SCST_CONTEXT_THREAD;
-+ break;
-+ }
-+
-+ scst_process_redirect_cmd(cmd, pref_context, 1);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_rx_data);
-+
-+static int scst_tgt_pre_exec(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(cmd->resid_possible)) {
-+ if (cmd->data_direction & SCST_DATA_WRITE) {
-+ bool do_zero = false;
-+ if (cmd->data_direction & SCST_DATA_READ) {
-+ if (cmd->write_len != cmd->out_bufflen)
-+ do_zero = true;
-+ } else {
-+ if (cmd->write_len != cmd->bufflen)
-+ do_zero = true;
-+ }
-+ if (do_zero) {
-+ scst_check_restore_sg_buff(cmd);
-+ scst_zero_write_rest(cmd);
-+ }
-+ }
-+ }
-+
-+ cmd->state = SCST_CMD_STATE_SEND_FOR_EXEC;
-+
-+ if ((cmd->tgtt->pre_exec == NULL) || unlikely(cmd->internal))
-+ goto out;
-+
-+ TRACE_DBG("Calling pre_exec(%p)", cmd);
-+ scst_set_cur_start(cmd);
-+ rc = cmd->tgtt->pre_exec(cmd);
-+ scst_set_pre_exec_time(cmd);
-+ TRACE_DBG("pre_exec() returned %d", rc);
-+
-+ if (unlikely(rc != SCST_PREPROCESS_STATUS_SUCCESS)) {
-+ switch (rc) {
-+ case SCST_PREPROCESS_STATUS_ERROR_SENSE_SET:
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ break;
-+ case SCST_PREPROCESS_STATUS_ERROR_FATAL:
-+ set_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags);
-+ /* go through */
-+ case SCST_PREPROCESS_STATUS_ERROR:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ break;
-+ default:
-+ BUG();
-+ break;
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void scst_do_cmd_done(struct scst_cmd *cmd, int result,
-+ const uint8_t *rq_sense, int rq_sense_len, int resid)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_set_exec_time(cmd);
-+
-+ cmd->status = result & 0xff;
-+ cmd->msg_status = msg_byte(result);
-+ cmd->host_status = host_byte(result);
-+ cmd->driver_status = driver_byte(result);
-+ if (unlikely(resid != 0)) {
-+ if ((cmd->data_direction & SCST_DATA_READ) &&
-+ (resid > 0) && (resid < cmd->resp_data_len))
-+ scst_set_resp_data_len(cmd, cmd->resp_data_len - resid);
-+ /*
-+ * We ignore write direction residue, because from the
-+ * initiator's POV we already transferred all the data.
-+ */
-+ }
-+
-+ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION)) {
-+ /* We might have double reset UA here */
-+ cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
-+ cmd->dbl_ua_orig_data_direction = cmd->data_direction;
-+
-+ scst_alloc_set_sense(cmd, 1, rq_sense, rq_sense_len);
-+ }
-+
-+ TRACE(TRACE_SCSI, "cmd %p, result %x, cmd->status %x, resid %d, "
-+ "cmd->msg_status %x, cmd->host_status %x, "
-+ "cmd->driver_status %x", cmd, result, cmd->status, resid,
-+ cmd->msg_status, cmd->host_status, cmd->driver_status);
-+
-+ cmd->completed = 1;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* For small context optimization */
-+static inline enum scst_exec_context scst_optimize_post_exec_context(
-+ struct scst_cmd *cmd, enum scst_exec_context context)
-+{
-+ if (((context == SCST_CONTEXT_SAME) && scst_cmd_atomic(cmd)) ||
-+ (context == SCST_CONTEXT_TASKLET) ||
-+ (context == SCST_CONTEXT_DIRECT_ATOMIC)) {
-+ if (!test_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
-+ &cmd->tgt_dev->tgt_dev_flags))
-+ context = SCST_CONTEXT_THREAD;
-+ }
-+ return context;
-+}
-+
-+/**
-+ * scst_pass_through_cmd_done - done callback for pass-through commands
-+ * @data: private opaque data
-+ * @sense: pointer to the sense data, if any
-+ * @result: command's execution result
-+ * @resid: residual, if any
-+ */
-+void scst_pass_through_cmd_done(void *data, char *sense, int result, int resid)
-+{
-+ struct scst_cmd *cmd;
-+
-+ TRACE_ENTRY();
-+
-+ cmd = (struct scst_cmd *)data;
-+ if (cmd == NULL)
-+ goto out;
-+
-+ scst_do_cmd_done(cmd, result, sense, SCSI_SENSE_BUFFERSIZE, resid);
-+
-+ cmd->state = SCST_CMD_STATE_PRE_DEV_DONE;
-+
-+ scst_process_redirect_cmd(cmd,
-+ scst_optimize_post_exec_context(cmd, scst_estimate_context()), 0);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_pass_through_cmd_done);
-+
-+static void scst_cmd_done_local(struct scst_cmd *cmd, int next_state,
-+ enum scst_exec_context pref_context)
-+{
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(cmd->pr_abort_counter != NULL);
-+
-+ scst_set_exec_time(cmd);
-+
-+ TRACE(TRACE_SCSI, "cmd %p, status %x, msg_status %x, host_status %x, "
-+ "driver_status %x, resp_data_len %d", cmd, cmd->status,
-+ cmd->msg_status, cmd->host_status, cmd->driver_status,
-+ cmd->resp_data_len);
-+
-+ if (next_state == SCST_CMD_STATE_DEFAULT)
-+ next_state = SCST_CMD_STATE_PRE_DEV_DONE;
-+
-+#if defined(CONFIG_SCST_DEBUG)
-+ if (next_state == SCST_CMD_STATE_PRE_DEV_DONE) {
-+ if ((trace_flag & TRACE_RCV_TOP) && (cmd->sg != NULL)) {
-+ int i, j;
-+ struct scatterlist *sg = cmd->sg;
-+ TRACE_RECV_TOP("Exec'd %d S/G(s) at %p sg[0].page at "
-+ "%p", cmd->sg_cnt, sg, (void *)sg_page(&sg[0]));
-+ for (i = 0, j = 0; i < cmd->sg_cnt; ++i, ++j) {
-+ if (unlikely(sg_is_chain(&sg[j]))) {
-+ sg = sg_chain_ptr(&sg[j]);
-+ j = 0;
-+ }
-+ TRACE_BUFF_FLAG(TRACE_RCV_TOP,
-+ "Exec'd sg", sg_virt(&sg[j]),
-+ sg[j].length);
-+ }
-+ }
-+ }
-+#endif
-+
-+ cmd->state = next_state;
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if ((next_state != SCST_CMD_STATE_PRE_DEV_DONE) &&
-+ (next_state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
-+ (next_state != SCST_CMD_STATE_FINISHED) &&
-+ (next_state != SCST_CMD_STATE_FINISHED_INTERNAL)) {
-+ PRINT_ERROR("%s() received invalid cmd state %d (opcode %d)",
-+ __func__, next_state, cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ }
-+#endif
-+ pref_context = scst_optimize_post_exec_context(cmd, pref_context);
-+ scst_process_redirect_cmd(cmd, pref_context, 0);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_report_luns_local(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_COMPLETED, rc;
-+ int dev_cnt = 0;
-+ int buffer_size;
-+ int i;
-+ struct scst_tgt_dev *tgt_dev = NULL;
-+ uint8_t *buffer;
-+ int offs, overflow = 0;
-+
-+ TRACE_ENTRY();
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+
-+ if ((cmd->cdb[2] != 0) && (cmd->cdb[2] != 2)) {
-+ PRINT_ERROR("Unsupported SELECT REPORT value %x in REPORT "
-+ "LUNS command", cmd->cdb[2]);
-+ goto out_err;
-+ }
-+
-+ buffer_size = scst_get_buf_full(cmd, &buffer);
-+ if (unlikely(buffer_size == 0))
-+ goto out_compl;
-+ else if (unlikely(buffer_size < 0))
-+ goto out_hw_err;
-+
-+ if (buffer_size < 16)
-+ goto out_put_err;
-+
-+ memset(buffer, 0, buffer_size);
-+ offs = 8;
-+
-+ /*
-+ * cmd won't allow to suspend activities, so we can access
-+ * sess->sess_tgt_dev_list without any additional protection.
-+ */
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &cmd->sess->sess_tgt_dev_list[i];
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ if (!overflow) {
-+ if ((buffer_size - offs) < 8) {
-+ overflow = 1;
-+ goto inc_dev_cnt;
-+ }
-+ *(__force __be64 *)&buffer[offs]
-+ = scst_pack_lun(tgt_dev->lun,
-+ cmd->sess->acg->addr_method);
-+ offs += 8;
-+ }
-+inc_dev_cnt:
-+ dev_cnt++;
-+ }
-+ }
-+
-+ /* Set the response header */
-+ dev_cnt *= 8;
-+ buffer[0] = (dev_cnt >> 24) & 0xff;
-+ buffer[1] = (dev_cnt >> 16) & 0xff;
-+ buffer[2] = (dev_cnt >> 8) & 0xff;
-+ buffer[3] = dev_cnt & 0xff;
-+
-+ scst_put_buf_full(cmd, buffer);
-+
-+ dev_cnt += 8;
-+ if (dev_cnt < cmd->resp_data_len)
-+ scst_set_resp_data_len(cmd, dev_cnt);
-+
-+out_compl:
-+ cmd->completed = 1;
-+
-+ /* Clear left sense_reported_luns_data_changed UA, if any. */
-+
-+ /*
-+ * cmd won't allow to suspend activities, so we can access
-+ * sess->sess_tgt_dev_list without any additional protection.
-+ */
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &cmd->sess->sess_tgt_dev_list[i];
-+
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ struct scst_tgt_dev_UA *ua;
-+
-+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
-+ list_for_each_entry(ua, &tgt_dev->UA_list,
-+ UA_list_entry) {
-+ if (scst_analyze_sense(ua->UA_sense_buffer,
-+ ua->UA_valid_sense_len,
-+ SCST_SENSE_ALL_VALID,
-+ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
-+ TRACE_MGMT_DBG("Freeing not needed "
-+ "REPORTED LUNS DATA CHANGED UA "
-+ "%p", ua);
-+ scst_tgt_dev_del_free_UA(tgt_dev, ua);
-+ break;
-+ }
-+ }
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+ }
-+ }
-+
-+out_done:
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_put_err:
-+ scst_put_buf_full(cmd, buffer);
-+
-+out_err:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_compl;
-+
-+out_hw_err:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_compl;
-+}
-+
-+static int scst_request_sense_local(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_COMPLETED, rc;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ uint8_t *buffer;
-+ int buffer_size = 0, sl = 0;
-+
-+ TRACE_ENTRY();
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+
-+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
-+
-+ if (tgt_dev->tgt_dev_valid_sense_len == 0)
-+ goto out_unlock_not_completed;
-+
-+ TRACE(TRACE_SCSI, "%s: Returning stored sense", cmd->op_name);
-+
-+ buffer_size = scst_get_buf_full(cmd, &buffer);
-+ if (unlikely(buffer_size == 0))
-+ goto out_unlock_compl;
-+ else if (unlikely(buffer_size < 0))
-+ goto out_unlock_hw_err;
-+
-+ memset(buffer, 0, buffer_size);
-+
-+ if (((tgt_dev->tgt_dev_sense[0] == 0x70) ||
-+ (tgt_dev->tgt_dev_sense[0] == 0x71)) && (cmd->cdb[1] & 1)) {
-+ PRINT_WARNING("%s: Fixed format of the saved sense, but "
-+ "descriptor format requested. Conversion will "
-+ "truncated data", cmd->op_name);
-+ PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
-+ tgt_dev->tgt_dev_valid_sense_len);
-+
-+ buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
-+ sl = scst_set_sense(buffer, buffer_size, true,
-+ tgt_dev->tgt_dev_sense[2], tgt_dev->tgt_dev_sense[12],
-+ tgt_dev->tgt_dev_sense[13]);
-+ } else if (((tgt_dev->tgt_dev_sense[0] == 0x72) ||
-+ (tgt_dev->tgt_dev_sense[0] == 0x73)) && !(cmd->cdb[1] & 1)) {
-+ PRINT_WARNING("%s: Descriptor format of the "
-+ "saved sense, but fixed format requested. Conversion "
-+ "will truncated data", cmd->op_name);
-+ PRINT_BUFFER("Original sense", tgt_dev->tgt_dev_sense,
-+ tgt_dev->tgt_dev_valid_sense_len);
-+
-+ buffer_size = min(SCST_STANDARD_SENSE_LEN, buffer_size);
-+ sl = scst_set_sense(buffer, buffer_size, false,
-+ tgt_dev->tgt_dev_sense[1], tgt_dev->tgt_dev_sense[2],
-+ tgt_dev->tgt_dev_sense[3]);
-+ } else {
-+ if (buffer_size >= tgt_dev->tgt_dev_valid_sense_len)
-+ sl = tgt_dev->tgt_dev_valid_sense_len;
-+ else {
-+ sl = buffer_size;
-+ TRACE(TRACE_MINOR, "%s: Being returned sense truncated "
-+ "to size %d (needed %d)", cmd->op_name,
-+ buffer_size, tgt_dev->tgt_dev_valid_sense_len);
-+ }
-+ memcpy(buffer, tgt_dev->tgt_dev_sense, sl);
-+ }
-+
-+ scst_put_buf_full(cmd, buffer);
-+
-+ tgt_dev->tgt_dev_valid_sense_len = 0;
-+
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+
-+ scst_set_resp_data_len(cmd, sl);
-+
-+out_compl:
-+ cmd->completed = 1;
-+
-+out_done:
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unlock_hw_err:
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_compl;
-+
-+out_unlock_not_completed:
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+ res = SCST_EXEC_NOT_COMPLETED;
-+ goto out;
-+
-+out_unlock_compl:
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+ goto out_compl;
-+}
-+
-+static int scst_reserve_local(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_NOT_COMPLETED, rc;
-+ struct scst_device *dev;
-+ struct scst_tgt_dev *tgt_dev_tmp;
-+
-+ TRACE_ENTRY();
-+
-+ if ((cmd->cdb[0] == RESERVE_10) && (cmd->cdb[2] & SCST_RES_3RDPTY)) {
-+ PRINT_ERROR("RESERVE_10: 3rdPty RESERVE not implemented "
-+ "(lun=%lld)", (long long unsigned int)cmd->lun);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_done;
-+ }
-+
-+ dev = cmd->dev;
-+
-+ /*
-+ * There's no need to block this device, even for
-+ * SCST_CONTR_MODE_ONE_TASK_SET, or anyhow else protect reservations
-+ * changes, because:
-+ *
-+ * 1. The reservation changes are (rather) atomic, i.e., in contrast
-+ * to persistent reservations, don't have any invalid intermediate
-+ * states during being changed.
-+ *
-+ * 2. It's a duty of initiators to ensure order of regular commands
-+ * around the reservation command either by ORDERED attribute, or by
-+ * queue draining, or etc. For case of SCST_CONTR_MODE_ONE_TASK_SET
-+ * there are no target drivers which can ensure even for ORDERED
-+ * commands order of their delivery, so, because initiators know
-+ * it, also there's no point to do any extra protection actions.
-+ */
-+
-+ rc = scst_pre_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ if (!list_empty(&dev->dev_registrants_list)) {
-+ if (scst_pr_crh_case(cmd))
-+ goto out_completed;
-+ else {
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+ }
-+
-+ spin_lock_bh(&dev->dev_lock);
-+
-+ if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
-+ spin_unlock_bh(&dev->dev_lock);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+
-+ list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if (cmd->tgt_dev != tgt_dev_tmp)
-+ set_bit(SCST_TGT_DEV_RESERVED,
-+ &tgt_dev_tmp->tgt_dev_flags);
-+ }
-+ dev->dev_reserved = 1;
-+
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_completed:
-+ cmd->completed = 1;
-+
-+out_done:
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ res = SCST_EXEC_COMPLETED;
-+ goto out;
-+}
-+
-+static int scst_release_local(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_NOT_COMPLETED, rc;
-+ struct scst_tgt_dev *tgt_dev_tmp;
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = cmd->dev;
-+
-+ /*
-+ * See comment in scst_reserve_local() why no dev blocking or any
-+ * other protection is needed here.
-+ */
-+
-+ rc = scst_pre_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ if (!list_empty(&dev->dev_registrants_list)) {
-+ if (scst_pr_crh_case(cmd))
-+ goto out_completed;
-+ else {
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+ }
-+
-+ spin_lock_bh(&dev->dev_lock);
-+
-+ /*
-+ * The device could be RELEASED behind us, if RESERVING session
-+ * is closed (see scst_free_tgt_dev()), but this actually doesn't
-+ * matter, so use lock and no retest for DEV_RESERVED bits again
-+ */
-+ if (test_bit(SCST_TGT_DEV_RESERVED, &cmd->tgt_dev->tgt_dev_flags)) {
-+ res = SCST_EXEC_COMPLETED;
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+ cmd->completed = 1;
-+ } else {
-+ list_for_each_entry(tgt_dev_tmp,
-+ &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ clear_bit(SCST_TGT_DEV_RESERVED,
-+ &tgt_dev_tmp->tgt_dev_flags);
-+ }
-+ dev->dev_reserved = 0;
-+ }
-+
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ if (res == SCST_EXEC_COMPLETED)
-+ goto out_done;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_completed:
-+ cmd->completed = 1;
-+
-+out_done:
-+ res = SCST_EXEC_COMPLETED;
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ goto out;
-+}
-+
-+/* No locks, no IRQ or IRQ-disabled context allowed */
-+static int scst_persistent_reserve_in_local(struct scst_cmd *cmd)
-+{
-+ int rc;
-+ struct scst_device *dev;
-+ struct scst_tgt_dev *tgt_dev;
-+ struct scst_session *session;
-+ int action;
-+ uint8_t *buffer;
-+ int buffer_size;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(scst_cmd_atomic(cmd));
-+
-+ dev = cmd->dev;
-+ tgt_dev = cmd->tgt_dev;
-+ session = cmd->sess;
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ if (unlikely(dev->not_pr_supporting_tgt_devs_num != 0)) {
-+ PRINT_WARNING("Persistent Reservation command %x refused for "
-+ "device %s, because the device has not supporting PR "
-+ "transports connected", cmd->cdb[0], dev->virt_name);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+ }
-+
-+ if (dev->dev_reserved) {
-+ TRACE_PR("PR command rejected, because device %s holds regular "
-+ "reservation", dev->virt_name);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+
-+ if (dev->scsi_dev != NULL) {
-+ PRINT_WARNING("PR commands for pass-through devices not "
-+ "supported (device %s)", dev->virt_name);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+ }
-+
-+ buffer_size = scst_get_buf_full(cmd, &buffer);
-+ if (unlikely(buffer_size <= 0)) {
-+ if (buffer_size < 0)
-+ scst_set_busy(cmd);
-+ goto out_done;
-+ }
-+
-+ scst_pr_write_lock(dev);
-+
-+ /* We can be aborted by another PR command while waiting for the lock */
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
-+ goto out_unlock;
-+ }
-+
-+ action = cmd->cdb[1] & 0x1f;
-+
-+ TRACE(TRACE_SCSI, "PR action %x for '%s' (LUN %llx) from '%s'", action,
-+ dev->virt_name, tgt_dev->lun, session->initiator_name);
-+
-+ switch (action) {
-+ case PR_READ_KEYS:
-+ scst_pr_read_keys(cmd, buffer, buffer_size);
-+ break;
-+ case PR_READ_RESERVATION:
-+ scst_pr_read_reservation(cmd, buffer, buffer_size);
-+ break;
-+ case PR_REPORT_CAPS:
-+ scst_pr_report_caps(cmd, buffer, buffer_size);
-+ break;
-+ case PR_READ_FULL_STATUS:
-+ scst_pr_read_full_status(cmd, buffer, buffer_size);
-+ break;
-+ default:
-+ PRINT_ERROR("Unsupported action %x", action);
-+ scst_pr_write_unlock(dev);
-+ goto out_err;
-+ }
-+
-+out_complete:
-+ cmd->completed = 1;
-+
-+out_unlock:
-+ scst_pr_write_unlock(dev);
-+
-+ scst_put_buf_full(cmd, buffer);
-+
-+out_done:
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+
-+ TRACE_EXIT_RES(SCST_EXEC_COMPLETED);
-+ return SCST_EXEC_COMPLETED;
-+
-+out_err:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_complete;
-+}
-+
-+/* No locks, no IRQ or IRQ-disabled context allowed */
-+static int scst_persistent_reserve_out_local(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_COMPLETED;
-+ int rc;
-+ struct scst_device *dev;
-+ struct scst_tgt_dev *tgt_dev;
-+ struct scst_session *session;
-+ int action;
-+ uint8_t *buffer;
-+ int buffer_size;
-+ bool aborted = false;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(scst_cmd_atomic(cmd));
-+
-+ dev = cmd->dev;
-+ tgt_dev = cmd->tgt_dev;
-+ session = cmd->sess;
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ if (unlikely(dev->not_pr_supporting_tgt_devs_num != 0)) {
-+ PRINT_WARNING("Persistent Reservation command %x refused for "
-+ "device %s, because the device has not supporting PR "
-+ "transports connected", cmd->cdb[0], dev->virt_name);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_done;
-+ }
-+
-+ action = cmd->cdb[1] & 0x1f;
-+
-+ TRACE(TRACE_SCSI, "PR action %x for '%s' (LUN %llx) from '%s'", action,
-+ dev->virt_name, tgt_dev->lun, session->initiator_name);
-+
-+ if (dev->dev_reserved) {
-+ TRACE_PR("PR command rejected, because device %s holds regular "
-+ "reservation", dev->virt_name);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+
-+ /*
-+ * Check if tgt_dev already registered. Also by this check we make
-+ * sure that table "PERSISTENT RESERVE OUT service actions that are
-+ * allowed in the presence of various reservations" is honored.
-+ * REGISTER AND MOVE and RESERVE will be additionally checked for
-+ * conflicts later.
-+ */
-+ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
-+ (tgt_dev->registrant == NULL)) {
-+ TRACE_PR("'%s' not registered", cmd->sess->initiator_name);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_done;
-+ }
-+
-+ buffer_size = scst_get_buf_full(cmd, &buffer);
-+ if (unlikely(buffer_size <= 0)) {
-+ if (buffer_size < 0)
-+ scst_set_busy(cmd);
-+ goto out_done;
-+ }
-+
-+ /* Check scope */
-+ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
-+ (action != PR_CLEAR) && ((cmd->cdb[2] & 0x0f) >> 4) != SCOPE_LU) {
-+ TRACE_PR("Scope must be SCOPE_LU for action %x", action);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_put_buf_full;
-+ }
-+
-+ /* Check SPEC_I_PT (PR_REGISTER_AND_MOVE has another format) */
-+ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_MOVE) &&
-+ ((buffer[20] >> 3) & 0x01)) {
-+ TRACE_PR("SPEC_I_PT must be zero for action %x", action);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_invalid_field_in_cdb));
-+ goto out_put_buf_full;
-+ }
-+
-+ /* Check ALL_TG_PT (PR_REGISTER_AND_MOVE has another format) */
-+ if ((action != PR_REGISTER) && (action != PR_REGISTER_AND_IGNORE) &&
-+ (action != PR_REGISTER_AND_MOVE) && ((buffer[20] >> 2) & 0x01)) {
-+ TRACE_PR("ALL_TG_PT must be zero for action %x", action);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_invalid_field_in_cdb));
-+ goto out_put_buf_full;
-+ }
-+
-+ scst_pr_write_lock(dev);
-+
-+ /* We can be aborted by another PR command while waiting for the lock */
-+ aborted = test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
-+ if (unlikely(aborted)) {
-+ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
-+ goto out_unlock;
-+ }
-+
-+ switch (action) {
-+ case PR_REGISTER:
-+ scst_pr_register(cmd, buffer, buffer_size);
-+ break;
-+ case PR_RESERVE:
-+ scst_pr_reserve(cmd, buffer, buffer_size);
-+ break;
-+ case PR_RELEASE:
-+ scst_pr_release(cmd, buffer, buffer_size);
-+ break;
-+ case PR_CLEAR:
-+ scst_pr_clear(cmd, buffer, buffer_size);
-+ break;
-+ case PR_PREEMPT:
-+ scst_pr_preempt(cmd, buffer, buffer_size);
-+ break;
-+ case PR_PREEMPT_AND_ABORT:
-+ scst_pr_preempt_and_abort(cmd, buffer, buffer_size);
-+ break;
-+ case PR_REGISTER_AND_IGNORE:
-+ scst_pr_register_and_ignore(cmd, buffer, buffer_size);
-+ break;
-+ case PR_REGISTER_AND_MOVE:
-+ scst_pr_register_and_move(cmd, buffer, buffer_size);
-+ break;
-+ default:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_unlock;
-+ }
-+
-+ if (cmd->status == SAM_STAT_GOOD)
-+ scst_pr_sync_device_file(tgt_dev, cmd);
-+
-+ if ((dev->handler->pr_cmds_notifications) &&
-+ (cmd->status == SAM_STAT_GOOD)) /* sync file may change status */
-+ res = SCST_EXEC_NOT_COMPLETED;
-+
-+out_unlock:
-+ scst_pr_write_unlock(dev);
-+
-+out_put_buf_full:
-+ scst_put_buf_full(cmd, buffer);
-+
-+out_done:
-+ if (SCST_EXEC_COMPLETED == res) {
-+ if (!aborted)
-+ cmd->completed = 1;
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT,
-+ SCST_CONTEXT_SAME);
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_check_local_events() - check if there are any local SCSI events
-+ *
-+ * Description:
-+ * Checks if the command can be executed or there are local events,
-+ * like reservations, pending UAs, etc. Returns < 0 if command must be
-+ * aborted, > 0 if there is an event and command should be immediately
-+ * completed, or 0 otherwise.
-+ *
-+ * !! 1.Dev handlers implementing exec() callback must call this function there
-+ * !! just before the actual command's execution!
-+ * !!
-+ * !! 2. If this function can be called more than once on the processing path
-+ * !! scst_pre_check_local_events() should be used for the first call!
-+ *
-+ * On call no locks, no IRQ or IRQ-disabled context allowed.
-+ */
-+int scst_check_local_events(struct scst_cmd *cmd)
-+{
-+ int res, rc;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * There's no race here, because we need to trace commands sent
-+ * *after* dev_double_ua_possible flag was set.
-+ */
-+ if (unlikely(dev->dev_double_ua_possible))
-+ cmd->double_ua_possible = 1;
-+
-+ /* Reserve check before Unit Attention */
-+ if (unlikely(test_bit(SCST_TGT_DEV_RESERVED,
-+ &tgt_dev->tgt_dev_flags))) {
-+ if ((cmd->op_flags & SCST_REG_RESERVE_ALLOWED) == 0) {
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_dec_pr_readers_count;
-+ }
-+ }
-+
-+ if (likely(!cmd->check_local_events_once_done)) {
-+ if (dev->pr_is_set) {
-+ if (unlikely(!scst_pr_is_cmd_allowed(cmd))) {
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out_complete;
-+ }
-+ } else
-+ scst_dec_pr_readers_count(cmd, false);
-+ }
-+
-+ /*
-+ * Let's check for ABORTED after scst_pr_is_cmd_allowed(), because
-+ * we might sleep for a while there.
-+ */
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("ABORTED set, aborting cmd %p", cmd);
-+ goto out_uncomplete;
-+ }
-+
-+ /* If we had internal bus reset, set the command error unit attention */
-+ if ((dev->scsi_dev != NULL) &&
-+ unlikely(dev->scsi_dev->was_reset)) {
-+ if (scst_is_ua_command(cmd)) {
-+ int done = 0;
-+ /*
-+ * Prevent more than 1 cmd to be triggered by was_reset
-+ */
-+ spin_lock_bh(&dev->dev_lock);
-+ if (dev->scsi_dev->was_reset) {
-+ TRACE(TRACE_MGMT, "was_reset is %d", 1);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_reset_UA));
-+ /*
-+ * It looks like it is safe to clear was_reset
-+ * here
-+ */
-+ dev->scsi_dev->was_reset = 0;
-+ done = 1;
-+ }
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ if (done)
-+ goto out_complete;
-+ }
-+ }
-+
-+ if (unlikely(test_bit(SCST_TGT_DEV_UA_PENDING,
-+ &cmd->tgt_dev->tgt_dev_flags))) {
-+ if (scst_is_ua_command(cmd)) {
-+ rc = scst_set_pending_UA(cmd);
-+ if (rc == 0)
-+ goto out_complete;
-+ }
-+ }
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_dec_pr_readers_count:
-+ if (cmd->dec_pr_readers_count_needed)
-+ scst_dec_pr_readers_count(cmd, false);
-+
-+out_complete:
-+ res = 1;
-+ BUG_ON(!cmd->completed);
-+ goto out;
-+
-+out_uncomplete:
-+ res = -1;
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(scst_check_local_events);
-+
-+/* No locks */
-+void scst_inc_expected_sn(struct scst_order_data *order_data, atomic_t *slot)
-+{
-+ if (slot == NULL)
-+ goto inc;
-+
-+ /* Optimized for lockless fast path */
-+
-+ TRACE_SN("Slot %zd, *cur_sn_slot %d", slot - order_data->sn_slots,
-+ atomic_read(slot));
-+
-+ if (!atomic_dec_and_test(slot))
-+ goto out;
-+
-+ TRACE_SN("Slot is 0 (num_free_sn_slots=%d)",
-+ order_data->num_free_sn_slots);
-+ if (order_data->num_free_sn_slots < (int)ARRAY_SIZE(order_data->sn_slots)-1) {
-+ spin_lock_irq(&order_data->sn_lock);
-+ if (likely(order_data->num_free_sn_slots < (int)ARRAY_SIZE(order_data->sn_slots)-1)) {
-+ if (order_data->num_free_sn_slots < 0)
-+ order_data->cur_sn_slot = slot;
-+ /* To be in-sync with SIMPLE case in scst_cmd_set_sn() */
-+ smp_mb();
-+ order_data->num_free_sn_slots++;
-+ TRACE_SN("Incremented num_free_sn_slots (%d)",
-+ order_data->num_free_sn_slots);
-+
-+ }
-+ spin_unlock_irq(&order_data->sn_lock);
-+ }
-+
-+inc:
-+ /*
-+ * No protection of expected_sn is needed, because only one thread
-+ * at time can be here (serialized by sn). Also it is supposed that
-+ * there could not be half-incremented halves.
-+ */
-+ order_data->expected_sn++;
-+ /*
-+ * Write must be before def_cmd_count read to be in sync. with
-+ * scst_post_exec_sn(). See comment in scst_send_for_exec().
-+ */
-+ smp_mb();
-+ TRACE_SN("Next expected_sn: %d", order_data->expected_sn);
-+
-+out:
-+ return;
-+}
-+
-+/* No locks */
-+static struct scst_cmd *scst_post_exec_sn(struct scst_cmd *cmd,
-+ bool make_active)
-+{
-+ /* For HQ commands SN is not set */
-+ bool inc_expected_sn = !cmd->inc_expected_sn_on_done &&
-+ cmd->sn_set && !cmd->retry;
-+ struct scst_order_data *order_data = cmd->cur_order_data;
-+ struct scst_cmd *res;
-+
-+ TRACE_ENTRY();
-+
-+ if (inc_expected_sn)
-+ scst_inc_expected_sn(order_data, cmd->sn_slot);
-+
-+ if (make_active) {
-+ scst_make_deferred_commands_active(order_data);
-+ res = NULL;
-+ } else
-+ res = scst_check_deferred_commands(order_data);
-+
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/* cmd must be additionally referenced to not die inside */
-+static int scst_do_real_exec(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_NOT_COMPLETED;
-+ int rc;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_dev_type *handler = dev->handler;
-+ struct io_context *old_ctx = NULL;
-+ bool ctx_changed = false;
-+ struct scsi_device *scsi_dev;
-+
-+ TRACE_ENTRY();
-+
-+ ctx_changed = scst_set_io_context(cmd, &old_ctx);
-+
-+ cmd->state = SCST_CMD_STATE_REAL_EXECUTING;
-+
-+ if (handler->exec) {
-+ TRACE_DBG("Calling dev handler %s exec(%p)",
-+ handler->name, cmd);
-+ TRACE_BUFF_FLAG(TRACE_SND_TOP, "Execing: ", cmd->cdb,
-+ cmd->cdb_len);
-+ scst_set_cur_start(cmd);
-+ res = handler->exec(cmd);
-+ TRACE_DBG("Dev handler %s exec() returned %d",
-+ handler->name, res);
-+
-+ if (res == SCST_EXEC_COMPLETED)
-+ goto out_complete;
-+
-+ scst_set_exec_time(cmd);
-+
-+ BUG_ON(res != SCST_EXEC_NOT_COMPLETED);
-+ }
-+
-+ TRACE_DBG("Sending cmd %p to SCSI mid-level", cmd);
-+
-+ scsi_dev = dev->scsi_dev;
-+
-+ if (unlikely(scsi_dev == NULL)) {
-+ PRINT_ERROR("Command for virtual device must be "
-+ "processed by device handler (LUN %lld)!",
-+ (long long unsigned int)cmd->lun);
-+ goto out_error;
-+ }
-+
-+ res = scst_check_local_events(cmd);
-+ if (unlikely(res != 0))
-+ goto out_done;
-+
-+ scst_set_cur_start(cmd);
-+
-+ rc = scst_scsi_exec_async(cmd, cmd, scst_pass_through_cmd_done);
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("scst pass-through exec failed: %x", rc);
-+ if (((int)rc == -EINVAL) &&
-+ (cmd->bufflen > queue_max_hw_sectors(scsi_dev->request_queue)))
-+ PRINT_ERROR("Too low max_hw_sectors %d sectors on %s "
-+ "to serve command %x with bufflen %db."
-+ "See README for more details.",
-+ queue_max_hw_sectors(scsi_dev->request_queue),
-+ dev->virt_name, cmd->cdb[0], cmd->bufflen);
-+ goto out_error;
-+ }
-+
-+out_complete:
-+ res = SCST_EXEC_COMPLETED;
-+
-+ if (ctx_changed)
-+ scst_reset_io_context(cmd->tgt_dev, old_ctx);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_error:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_done;
-+
-+out_done:
-+ res = SCST_EXEC_COMPLETED;
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ goto out_complete;
-+}
-+
-+static inline int scst_real_exec(struct scst_cmd *cmd)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
-+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
-+
-+ __scst_cmd_get(cmd);
-+
-+ res = scst_do_real_exec(cmd);
-+ if (likely(res == SCST_EXEC_COMPLETED)) {
-+ scst_post_exec_sn(cmd, true);
-+ } else
-+ BUG();
-+
-+ __scst_cmd_put(cmd);
-+
-+ /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_do_local_exec(struct scst_cmd *cmd)
-+{
-+ int res;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ /* Check READ_ONLY device status */
-+ if ((cmd->op_flags & SCST_WRITE_MEDIUM) &&
-+ (tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
-+ cmd->dev->rd_only)) {
-+ PRINT_WARNING("Attempt of write access to read-only device: "
-+ "initiator %s, LUN %lld, op %x",
-+ cmd->sess->initiator_name, cmd->lun, cmd->cdb[0]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_data_protect));
-+ goto out_done;
-+ }
-+
-+ if (!scst_is_cmd_local(cmd)) {
-+ res = SCST_EXEC_NOT_COMPLETED;
-+ goto out;
-+ }
-+
-+ switch (cmd->cdb[0]) {
-+ case RESERVE:
-+ case RESERVE_10:
-+ res = scst_reserve_local(cmd);
-+ break;
-+ case RELEASE:
-+ case RELEASE_10:
-+ res = scst_release_local(cmd);
-+ break;
-+ case PERSISTENT_RESERVE_IN:
-+ res = scst_persistent_reserve_in_local(cmd);
-+ break;
-+ case PERSISTENT_RESERVE_OUT:
-+ res = scst_persistent_reserve_out_local(cmd);
-+ break;
-+ case REPORT_LUNS:
-+ res = scst_report_luns_local(cmd);
-+ break;
-+ case REQUEST_SENSE:
-+ res = scst_request_sense_local(cmd);
-+ break;
-+ default:
-+ res = SCST_EXEC_NOT_COMPLETED;
-+ break;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_done:
-+ /* Report the result */
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ res = SCST_EXEC_COMPLETED;
-+ goto out;
-+}
-+
-+static int scst_local_exec(struct scst_cmd *cmd)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_SAME != SCST_EXEC_NOT_COMPLETED);
-+ BUILD_BUG_ON(SCST_CMD_STATE_RES_CONT_NEXT != SCST_EXEC_COMPLETED);
-+
-+ __scst_cmd_get(cmd);
-+
-+ res = scst_do_local_exec(cmd);
-+ if (likely(res == SCST_EXEC_NOT_COMPLETED))
-+ cmd->state = SCST_CMD_STATE_REAL_EXEC;
-+ else if (res == SCST_EXEC_COMPLETED)
-+ scst_post_exec_sn(cmd, true);
-+ else
-+ BUG();
-+
-+ __scst_cmd_put(cmd);
-+
-+ /* SCST_EXEC_* match SCST_CMD_STATE_RES_* */
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_exec(struct scst_cmd **active_cmd)
-+{
-+ struct scst_cmd *cmd = *active_cmd;
-+ struct scst_cmd *ref_cmd;
-+ int res = SCST_CMD_STATE_RES_CONT_NEXT, count = 0;
-+
-+ TRACE_ENTRY();
-+
-+ cmd->state = SCST_CMD_STATE_START_EXEC;
-+
-+ if (unlikely(scst_check_blocked_dev(cmd)))
-+ goto out;
-+
-+ /* To protect tgt_dev */
-+ ref_cmd = cmd;
-+ __scst_cmd_get(ref_cmd);
-+
-+ while (1) {
-+ int rc;
-+
-+ cmd->sent_for_exec = 1;
-+ /*
-+ * To sync with scst_abort_cmd(). The above assignment must
-+ * be before SCST_CMD_ABORTED test, done later in
-+ * scst_check_local_events(). It's far from here, so the order
-+ * is virtually guaranteed, but let's have it just in case.
-+ */
-+ smp_mb();
-+
-+ cmd->scst_cmd_done = scst_cmd_done_local;
-+ cmd->state = SCST_CMD_STATE_LOCAL_EXEC;
-+
-+ rc = scst_do_local_exec(cmd);
-+ if (likely(rc == SCST_EXEC_NOT_COMPLETED))
-+ /* Nothing to do */;
-+ else {
-+ BUG_ON(rc != SCST_EXEC_COMPLETED);
-+ goto done;
-+ }
-+
-+ cmd->state = SCST_CMD_STATE_REAL_EXEC;
-+
-+ rc = scst_do_real_exec(cmd);
-+ BUG_ON(rc != SCST_EXEC_COMPLETED);
-+
-+done:
-+ count++;
-+
-+ cmd = scst_post_exec_sn(cmd, false);
-+ if (cmd == NULL)
-+ break;
-+
-+ cmd->state = SCST_CMD_STATE_START_EXEC;
-+
-+ if (unlikely(scst_check_blocked_dev(cmd)))
-+ break;
-+
-+ __scst_cmd_put(ref_cmd);
-+ ref_cmd = cmd;
-+ __scst_cmd_get(ref_cmd);
-+
-+ }
-+
-+ *active_cmd = cmd;
-+
-+ if (count == 0)
-+ goto out_put;
-+
-+out_put:
-+ __scst_cmd_put(ref_cmd);
-+ /* !! At this point sess, dev and tgt_dev can be already freed !! */
-+
-+out:
-+ EXTRACHECKS_BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_send_for_exec(struct scst_cmd **active_cmd)
-+{
-+ int res;
-+ struct scst_cmd *cmd = *active_cmd;
-+ struct scst_order_data *order_data = cmd->cur_order_data;
-+ typeof(order_data->expected_sn) expected_sn;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(cmd->internal))
-+ goto exec;
-+
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ goto exec;
-+
-+ BUG_ON(!cmd->sn_set);
-+
-+ expected_sn = order_data->expected_sn;
-+ /* Optimized for lockless fast path */
-+ if ((cmd->sn != expected_sn) || (order_data->hq_cmd_count > 0)) {
-+ spin_lock_irq(&order_data->sn_lock);
-+
-+ order_data->def_cmd_count++;
-+ /*
-+ * Memory barrier is needed here to implement lockless fast
-+ * path. We need the exact order of read and write between
-+ * def_cmd_count and expected_sn. Otherwise, we can miss case,
-+ * when expected_sn was changed to be equal to cmd->sn while
-+ * we are queueing cmd the deferred list after the expected_sn
-+ * below. It will lead to a forever stuck command. But with
-+ * the barrier in such case __scst_check_deferred_commands()
-+ * will be called and it will take sn_lock, so we will be
-+ * synchronized.
-+ */
-+ smp_mb();
-+
-+ expected_sn = order_data->expected_sn;
-+ if ((cmd->sn != expected_sn) || (order_data->hq_cmd_count > 0)) {
-+ if (unlikely(test_bit(SCST_CMD_ABORTED,
-+ &cmd->cmd_flags))) {
-+ /* Necessary to allow aborting out of sn cmds */
-+ TRACE_MGMT_DBG("Aborting out of sn cmd %p "
-+ "(tag %llu, sn %u)", cmd,
-+ (long long unsigned)cmd->tag, cmd->sn);
-+ order_data->def_cmd_count--;
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ } else {
-+ TRACE_SN("Deferring cmd %p (sn=%d, set %d, "
-+ "expected_sn=%d)", cmd, cmd->sn,
-+ cmd->sn_set, expected_sn);
-+ list_add_tail(&cmd->sn_cmd_list_entry,
-+ &order_data->deferred_cmd_list);
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ }
-+ spin_unlock_irq(&order_data->sn_lock);
-+ goto out;
-+ } else {
-+ TRACE_SN("Somebody incremented expected_sn %d, "
-+ "continuing", expected_sn);
-+ order_data->def_cmd_count--;
-+ spin_unlock_irq(&order_data->sn_lock);
-+ }
-+ }
-+
-+exec:
-+ res = scst_exec(active_cmd);
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/* No locks supposed to be held */
-+static int scst_check_sense(struct scst_cmd *cmd)
-+{
-+ int res = 0;
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(cmd->ua_ignore))
-+ goto out;
-+
-+ /* If we had internal bus reset behind us, set the command error UA */
-+ if ((dev->scsi_dev != NULL) &&
-+ unlikely(cmd->host_status == DID_RESET) &&
-+ scst_is_ua_command(cmd)) {
-+ TRACE(TRACE_MGMT, "DID_RESET: was_reset=%d host_status=%x",
-+ dev->scsi_dev->was_reset, cmd->host_status);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_reset_UA));
-+ /* It looks like it is safe to clear was_reset here */
-+ dev->scsi_dev->was_reset = 0;
-+ }
-+
-+ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
-+ SCST_SENSE_VALID(cmd->sense)) {
-+ PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
-+ cmd->sense_valid_len);
-+
-+ /* Check Unit Attention Sense Key */
-+ if (scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
-+ if (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASC_VALID,
-+ 0, SCST_SENSE_ASC_UA_RESET, 0)) {
-+ if (cmd->double_ua_possible) {
-+ TRACE_MGMT_DBG("Double UA "
-+ "detected for device %p", dev);
-+ TRACE_MGMT_DBG("Retrying cmd"
-+ " %p (tag %llu)", cmd,
-+ (long long unsigned)cmd->tag);
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+ cmd->completed = 0;
-+
-+ mempool_free(cmd->sense,
-+ scst_sense_mempool);
-+ cmd->sense = NULL;
-+
-+ scst_check_restore_sg_buff(cmd);
-+
-+ BUG_ON(cmd->dbl_ua_orig_resp_data_len < 0);
-+ cmd->data_direction =
-+ cmd->dbl_ua_orig_data_direction;
-+ cmd->resp_data_len =
-+ cmd->dbl_ua_orig_resp_data_len;
-+
-+ cmd->state = SCST_CMD_STATE_REAL_EXEC;
-+ cmd->retry = 1;
-+ scst_reset_requeued_cmd(cmd);
-+ res = 1;
-+ goto out;
-+ }
-+ }
-+ scst_dev_check_set_UA(dev, cmd, cmd->sense,
-+ cmd->sense_valid_len);
-+ }
-+ }
-+
-+ if (unlikely(cmd->double_ua_possible)) {
-+ if (scst_is_ua_command(cmd)) {
-+ TRACE_MGMT_DBG("Clearing dbl_ua_possible flag (dev %p, "
-+ "cmd %p)", dev, cmd);
-+ /*
-+ * Lock used to protect other flags in the bitfield
-+ * (just in case, actually). Those flags can't be
-+ * changed in parallel, because the device is
-+ * serialized.
-+ */
-+ spin_lock_bh(&dev->dev_lock);
-+ dev->dev_double_ua_possible = 0;
-+ spin_unlock_bh(&dev->dev_lock);
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_check_auto_sense(struct scst_cmd *cmd)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION) &&
-+ (!SCST_SENSE_VALID(cmd->sense) ||
-+ SCST_NO_SENSE(cmd->sense))) {
-+ TRACE(TRACE_SCSI|TRACE_MINOR_AND_MGMT_DBG, "CHECK_CONDITION, "
-+ "but no sense: cmd->status=%x, cmd->msg_status=%x, "
-+ "cmd->host_status=%x, cmd->driver_status=%x (cmd %p)",
-+ cmd->status, cmd->msg_status, cmd->host_status,
-+ cmd->driver_status, cmd);
-+ res = 1;
-+ } else if (unlikely(cmd->host_status)) {
-+ if ((cmd->host_status == DID_REQUEUE) ||
-+ (cmd->host_status == DID_IMM_RETRY) ||
-+ (cmd->host_status == DID_SOFT_ERROR) ||
-+ (cmd->host_status == DID_ABORT)) {
-+ scst_set_busy(cmd);
-+ } else {
-+ TRACE(TRACE_SCSI|TRACE_MINOR_AND_MGMT_DBG, "Host "
-+ "status %x received, returning HARDWARE ERROR "
-+ "instead (cmd %p)", cmd->host_status, cmd);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_pre_dev_done(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_RES_CONT_SAME, rc;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(scst_check_auto_sense(cmd))) {
-+ PRINT_INFO("Command finished with CHECK CONDITION, but "
-+ "without sense data (opcode 0x%x), issuing "
-+ "REQUEST SENSE", cmd->cdb[0]);
-+ rc = scst_prepare_request_sense(cmd);
-+ if (rc == 0)
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ else {
-+ PRINT_ERROR("%s", "Unable to issue REQUEST SENSE, "
-+ "returning HARDWARE ERROR");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ goto out;
-+ } else if (unlikely(scst_check_sense(cmd))) {
-+ /*
-+ * We can't allow atomic command on the exec stages, so
-+ * restart to the thread
-+ */
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ if (likely(scsi_status_is_good(cmd->status))) {
-+ unsigned char type = cmd->dev->type;
-+ if (unlikely((cmd->cdb[0] == MODE_SENSE ||
-+ cmd->cdb[0] == MODE_SENSE_10)) &&
-+ (cmd->tgt_dev->acg_dev->rd_only || cmd->dev->swp ||
-+ cmd->dev->rd_only) &&
-+ (type == TYPE_DISK ||
-+ type == TYPE_WORM ||
-+ type == TYPE_MOD ||
-+ type == TYPE_TAPE)) {
-+ int32_t length;
-+ uint8_t *address;
-+ bool err = false;
-+
-+ length = scst_get_buf_full(cmd, &address);
-+ if (length < 0) {
-+ PRINT_ERROR("%s", "Unable to get "
-+ "MODE_SENSE buffer");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(
-+ scst_sense_hardw_error));
-+ err = true;
-+ } else if (length > 2 && cmd->cdb[0] == MODE_SENSE)
-+ address[2] |= 0x80; /* Write Protect*/
-+ else if (length > 3 && cmd->cdb[0] == MODE_SENSE_10)
-+ address[3] |= 0x80; /* Write Protect*/
-+ scst_put_buf_full(cmd, address);
-+
-+ if (err)
-+ goto out;
-+ }
-+
-+ /*
-+ * Check and clear NormACA option for the device, if necessary,
-+ * since we don't support ACA
-+ */
-+ if (unlikely((cmd->cdb[0] == INQUIRY)) &&
-+ /* Std INQUIRY data (no EVPD) */
-+ !(cmd->cdb[1] & SCST_INQ_EVPD) &&
-+ (cmd->resp_data_len > SCST_INQ_BYTE3)) {
-+ uint8_t *buffer;
-+ int buflen;
-+ bool err = false;
-+
-+ buflen = scst_get_buf_full(cmd, &buffer);
-+ if (buflen > SCST_INQ_BYTE3 && !cmd->tgtt->fake_aca) {
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (buffer[SCST_INQ_BYTE3] & SCST_INQ_NORMACA_BIT) {
-+ PRINT_INFO("NormACA set for device: "
-+ "lun=%lld, type 0x%02x. Clear it, "
-+ "since it's unsupported.",
-+ (long long unsigned int)cmd->lun,
-+ buffer[0]);
-+ }
-+#endif
-+ buffer[SCST_INQ_BYTE3] &= ~SCST_INQ_NORMACA_BIT;
-+ } else if (buflen <= SCST_INQ_BYTE3 && buflen != 0) {
-+ PRINT_ERROR("%s", "Unable to get INQUIRY "
-+ "buffer");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ err = true;
-+ }
-+ if (buflen > 0)
-+ scst_put_buf_full(cmd, buffer);
-+
-+ if (err)
-+ goto out;
-+ }
-+
-+ if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
-+ (cmd->cdb[0] == MODE_SELECT_10) ||
-+ (cmd->cdb[0] == LOG_SELECT))) {
-+ TRACE(TRACE_SCSI,
-+ "MODE/LOG SELECT succeeded (LUN %lld)",
-+ (long long unsigned int)cmd->lun);
-+ cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
-+ goto out;
-+ }
-+ } else {
-+ TRACE(TRACE_SCSI, "cmd %p not succeeded with status %x",
-+ cmd, cmd->status);
-+
-+ if ((cmd->cdb[0] == RESERVE) || (cmd->cdb[0] == RESERVE_10)) {
-+ if (!test_bit(SCST_TGT_DEV_RESERVED,
-+ &cmd->tgt_dev->tgt_dev_flags)) {
-+ struct scst_tgt_dev *tgt_dev_tmp;
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE(TRACE_SCSI, "RESERVE failed lun=%lld, "
-+ "status=%x",
-+ (long long unsigned int)cmd->lun,
-+ cmd->status);
-+ PRINT_BUFF_FLAG(TRACE_SCSI, "Sense", cmd->sense,
-+ cmd->sense_valid_len);
-+
-+ /* Clearing the reservation */
-+ spin_lock_bh(&dev->dev_lock);
-+ list_for_each_entry(tgt_dev_tmp,
-+ &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ clear_bit(SCST_TGT_DEV_RESERVED,
-+ &tgt_dev_tmp->tgt_dev_flags);
-+ }
-+ dev->dev_reserved = 0;
-+ spin_unlock_bh(&dev->dev_lock);
-+ }
-+ }
-+
-+ /* Check for MODE PARAMETERS CHANGED UA */
-+ if ((cmd->dev->scsi_dev != NULL) &&
-+ (cmd->status == SAM_STAT_CHECK_CONDITION) &&
-+ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
-+ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASCx_VALID,
-+ 0, 0x2a, 0x01)) {
-+ TRACE(TRACE_SCSI, "MODE PARAMETERS CHANGED UA (lun "
-+ "%lld)", (long long unsigned int)cmd->lun);
-+ cmd->state = SCST_CMD_STATE_MODE_SELECT_CHECKS;
-+ goto out;
-+ }
-+ }
-+
-+ cmd->state = SCST_CMD_STATE_DEV_DONE;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_mode_select_checks(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_RES_CONT_SAME;
-+
-+ TRACE_ENTRY();
-+
-+ if (likely(scsi_status_is_good(cmd->status))) {
-+ int atomic = scst_cmd_atomic(cmd);
-+ if (unlikely((cmd->cdb[0] == MODE_SELECT) ||
-+ (cmd->cdb[0] == MODE_SELECT_10) ||
-+ (cmd->cdb[0] == LOG_SELECT))) {
-+ struct scst_device *dev = cmd->dev;
-+ int sl;
-+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
-+
-+ if (atomic && (dev->scsi_dev != NULL)) {
-+ TRACE_DBG("%s", "MODE/LOG SELECT: thread "
-+ "context required");
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ TRACE(TRACE_SCSI, "MODE/LOG SELECT succeeded, "
-+ "setting the SELECT UA (lun=%lld)",
-+ (long long unsigned int)cmd->lun);
-+
-+ spin_lock_bh(&dev->dev_lock);
-+ if (cmd->cdb[0] == LOG_SELECT) {
-+ sl = scst_set_sense(sense_buffer,
-+ sizeof(sense_buffer),
-+ dev->d_sense,
-+ UNIT_ATTENTION, 0x2a, 0x02);
-+ } else {
-+ sl = scst_set_sense(sense_buffer,
-+ sizeof(sense_buffer),
-+ dev->d_sense,
-+ UNIT_ATTENTION, 0x2a, 0x01);
-+ }
-+ scst_dev_check_set_local_UA(dev, cmd, sense_buffer, sl);
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ if (dev->scsi_dev != NULL)
-+ scst_obtain_device_parameters(dev);
-+ }
-+ } else if ((cmd->status == SAM_STAT_CHECK_CONDITION) &&
-+ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len) &&
-+ /* mode parameters changed */
-+ (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASCx_VALID,
-+ 0, 0x2a, 0x01) ||
-+ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASC_VALID,
-+ 0, 0x29, 0) /* reset */ ||
-+ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASC_VALID,
-+ 0, 0x28, 0) /* medium changed */ ||
-+ /* cleared by another ini (just in case) */
-+ scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ASC_VALID,
-+ 0, 0x2F, 0))) {
-+ int atomic = scst_cmd_atomic(cmd);
-+ if (atomic) {
-+ TRACE_DBG("Possible parameters changed UA %x: "
-+ "thread context required", cmd->sense[12]);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ TRACE(TRACE_SCSI, "Possible parameters changed UA %x "
-+ "(LUN %lld): getting new parameters", cmd->sense[12],
-+ (long long unsigned int)cmd->lun);
-+
-+ scst_obtain_device_parameters(cmd->dev);
-+ } else
-+ BUG();
-+
-+ cmd->state = SCST_CMD_STATE_DEV_DONE;
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+static void scst_inc_check_expected_sn(struct scst_cmd *cmd)
-+{
-+ if (likely(cmd->sn_set))
-+ scst_inc_expected_sn(cmd->cur_order_data, cmd->sn_slot);
-+
-+ scst_make_deferred_commands_active(cmd->cur_order_data);
-+}
-+
-+static int scst_dev_done(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_RES_CONT_SAME;
-+ int state;
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE_ENTRY();
-+
-+ state = SCST_CMD_STATE_PRE_XMIT_RESP;
-+
-+ if (likely(!scst_is_cmd_fully_local(cmd)) &&
-+ likely(dev->handler->dev_done != NULL)) {
-+ int rc;
-+
-+ if (unlikely(!dev->handler->dev_done_atomic &&
-+ scst_cmd_atomic(cmd))) {
-+ /*
-+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
-+ * optimization.
-+ */
-+ TRACE_MGMT_DBG("Dev handler %s dev_done() needs thread "
-+ "context, rescheduling", dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ TRACE_DBG("Calling dev handler %s dev_done(%p)",
-+ dev->handler->name, cmd);
-+ scst_set_cur_start(cmd);
-+ rc = dev->handler->dev_done(cmd);
-+ scst_set_dev_done_time(cmd);
-+ TRACE_DBG("Dev handler %s dev_done() returned %d",
-+ dev->handler->name, rc);
-+ if (rc != SCST_CMD_STATE_DEFAULT)
-+ state = rc;
-+ }
-+
-+ switch (state) {
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ case SCST_CMD_STATE_PRE_XMIT_RESP:
-+ case SCST_CMD_STATE_PARSE:
-+ case SCST_CMD_STATE_PREPARE_SPACE:
-+ case SCST_CMD_STATE_RDY_TO_XFER:
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ case SCST_CMD_STATE_START_EXEC:
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ case SCST_CMD_STATE_PRE_DEV_DONE:
-+ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
-+ case SCST_CMD_STATE_DEV_DONE:
-+ case SCST_CMD_STATE_XMIT_RESP:
-+ case SCST_CMD_STATE_FINISHED:
-+ case SCST_CMD_STATE_FINISHED_INTERNAL:
-+#else
-+ default:
-+#endif
-+ cmd->state = state;
-+ break;
-+ case SCST_CMD_STATE_NEED_THREAD_CTX:
-+ TRACE_DBG("Dev handler %s dev_done() requested "
-+ "thread context, rescheduling",
-+ dev->handler->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ default:
-+ if (state >= 0) {
-+ PRINT_ERROR("Dev handler %s dev_done() returned "
-+ "invalid cmd state %d",
-+ dev->handler->name, state);
-+ } else {
-+ PRINT_ERROR("Dev handler %s dev_done() returned "
-+ "error %d", dev->handler->name,
-+ state);
-+ }
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ break;
-+#endif
-+ }
-+
-+ scst_check_unblock_dev(cmd);
-+
-+ if (cmd->inc_expected_sn_on_done && cmd->sent_for_exec)
-+ scst_inc_check_expected_sn(cmd);
-+
-+ if (unlikely(cmd->internal))
-+ cmd->state = SCST_CMD_STATE_FINISHED_INTERNAL;
-+
-+#ifndef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ if (cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) {
-+ /* We can't allow atomic command on the exec stages */
-+ if (scst_cmd_atomic(cmd)) {
-+ switch (state) {
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ case SCST_CMD_STATE_START_EXEC:
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ TRACE_DBG("Atomic context and redirect, "
-+ "rescheduling (cmd %p)", cmd);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ break;
-+ }
-+ }
-+ }
-+#endif
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+static int scst_pre_xmit_response(struct scst_cmd *cmd)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(cmd->internal);
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+ if (cmd->tm_dbg_delayed &&
-+ !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
-+ if (scst_cmd_atomic(cmd)) {
-+ TRACE_MGMT_DBG("%s",
-+ "DEBUG_TM delayed cmd needs a thread");
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ return res;
-+ }
-+ TRACE_MGMT_DBG("Delaying cmd %p (tag %llu) for 1 second",
-+ cmd, cmd->tag);
-+ schedule_timeout_uninterruptible(HZ);
-+ }
-+#endif
-+
-+ if (likely(cmd->tgt_dev != NULL)) {
-+ /*
-+ * Those counters protect from not getting too long processing
-+ * latency, so we should decrement them after cmd completed.
-+ */
-+ atomic_dec(&cmd->tgt_dev->tgt_dev_cmd_count);
-+#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
-+ atomic_dec(&cmd->dev->dev_cmd_count);
-+#endif
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ scst_on_hq_cmd_response(cmd);
-+
-+ if (unlikely(!cmd->sent_for_exec)) {
-+ TRACE_SN("cmd %p was not sent to mid-lev"
-+ " (sn %d, set %d)",
-+ cmd, cmd->sn, cmd->sn_set);
-+ scst_unblock_deferred(cmd->cur_order_data, cmd);
-+ cmd->sent_for_exec = 1;
-+ }
-+ }
-+
-+ cmd->done = 1;
-+ smp_mb(); /* to sync with scst_abort_cmd() */
-+
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
-+ scst_xmit_process_aborted_cmd(cmd);
-+ else if (unlikely(cmd->status == SAM_STAT_CHECK_CONDITION))
-+ scst_store_sense(cmd);
-+
-+ if (unlikely(test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("Flag NO_RESP set for cmd %p (tag %llu), "
-+ "skipping", cmd, (long long unsigned int)cmd->tag);
-+ cmd->state = SCST_CMD_STATE_FINISHED;
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
-+ }
-+
-+ if (unlikely(cmd->resid_possible))
-+ scst_adjust_resp_data_len(cmd);
-+ else
-+ cmd->adjusted_resp_data_len = cmd->resp_data_len;
-+
-+ cmd->state = SCST_CMD_STATE_XMIT_RESP;
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+static int scst_xmit_response(struct scst_cmd *cmd)
-+{
-+ struct scst_tgt_template *tgtt = cmd->tgtt;
-+ int res, rc;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(cmd->internal);
-+
-+ if (unlikely(!tgtt->xmit_response_atomic &&
-+ scst_cmd_atomic(cmd))) {
-+ /*
-+ * It shouldn't be because of the SCST_TGT_DEV_AFTER_*
-+ * optimization.
-+ */
-+ TRACE_MGMT_DBG("Target driver %s xmit_response() needs thread "
-+ "context, rescheduling", tgtt->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+
-+ while (1) {
-+ int finished_cmds = atomic_read(&cmd->tgt->finished_cmds);
-+
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ cmd->state = SCST_CMD_STATE_XMIT_WAIT;
-+
-+ TRACE_DBG("Calling xmit_response(%p)", cmd);
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ if (trace_flag & TRACE_SND_BOT) {
-+ int i, j;
-+ struct scatterlist *sg;
-+ if (cmd->tgt_sg != NULL)
-+ sg = cmd->tgt_sg;
-+ else
-+ sg = cmd->sg;
-+ if (sg != NULL) {
-+ TRACE(TRACE_SND_BOT, "Xmitting data for cmd %p "
-+ "(sg_cnt %d, sg %p, sg[0].page %p, buf %p, "
-+ "resp len %d)", cmd, cmd->tgt_sg_cnt,
-+ sg, (void *)sg_page(&sg[0]), sg_virt(sg),
-+ cmd->resp_data_len);
-+ for (i = 0, j = 0; i < cmd->tgt_sg_cnt; ++i, ++j) {
-+ if (unlikely(sg_is_chain(&sg[j]))) {
-+ sg = sg_chain_ptr(&sg[j]);
-+ j = 0;
-+ }
-+ TRACE(TRACE_SND_BOT, "sg %d", j);
-+ PRINT_BUFF_FLAG(TRACE_SND_BOT,
-+ "Xmitting sg", sg_virt(&sg[j]),
-+ sg[j].length);
-+ }
-+ }
-+ }
-+#endif
-+
-+ if (tgtt->on_hw_pending_cmd_timeout != NULL) {
-+ struct scst_session *sess = cmd->sess;
-+ cmd->hw_pending_start = jiffies;
-+ cmd->cmd_hw_pending = 1;
-+ if (!test_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags)) {
-+ TRACE_DBG("Sched HW pending work for sess %p "
-+ "(max time %d)", sess,
-+ tgtt->max_hw_pending_time);
-+ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED,
-+ &sess->sess_aflags);
-+ schedule_delayed_work(&sess->hw_pending_work,
-+ tgtt->max_hw_pending_time * HZ);
-+ }
-+ }
-+
-+ scst_set_cur_start(cmd);
-+
-+#ifdef CONFIG_SCST_DEBUG_RETRY
-+ if (((scst_random() % 100) == 77))
-+ rc = SCST_TGT_RES_QUEUE_FULL;
-+ else
-+#endif
-+ rc = tgtt->xmit_response(cmd);
-+ TRACE_DBG("xmit_response() returned %d", rc);
-+
-+ if (likely(rc == SCST_TGT_RES_SUCCESS))
-+ goto out;
-+
-+ scst_set_xmit_time(cmd);
-+
-+ cmd->cmd_hw_pending = 0;
-+
-+ /* Restore the previous state */
-+ cmd->state = SCST_CMD_STATE_XMIT_RESP;
-+
-+ switch (rc) {
-+ case SCST_TGT_RES_QUEUE_FULL:
-+ if (scst_queue_retry_cmd(cmd, finished_cmds) == 0)
-+ break;
-+ else
-+ continue;
-+
-+ case SCST_TGT_RES_NEED_THREAD_CTX:
-+ TRACE_DBG("Target driver %s xmit_response() "
-+ "requested thread context, rescheduling",
-+ tgtt->name);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+
-+ default:
-+ goto out_error;
-+ }
-+ break;
-+ }
-+
-+out:
-+ /* Caution: cmd can be already dead here */
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+
-+out_error:
-+ if (rc == SCST_TGT_RES_FATAL_ERROR) {
-+ PRINT_ERROR("Target driver %s xmit_response() returned "
-+ "fatal error", tgtt->name);
-+ } else {
-+ PRINT_ERROR("Target driver %s xmit_response() returned "
-+ "invalid value %d", tgtt->name, rc);
-+ }
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ cmd->state = SCST_CMD_STATE_FINISHED;
-+ res = SCST_CMD_STATE_RES_CONT_SAME;
-+ goto out;
-+}
-+
-+/**
-+ * scst_tgt_cmd_done() - the command's processing done
-+ * @cmd: SCST command
-+ * @pref_context: preferred command execution context
-+ *
-+ * Description:
-+ * Notifies SCST that the driver sent the response and the command
-+ * can be freed now. Don't forget to set the delivery status, if it
-+ * isn't success, using scst_set_delivery_status() before calling
-+ * this function. The third argument sets preferred command execution
-+ * context (see SCST_CONTEXT_* constants for details)
-+ */
-+void scst_tgt_cmd_done(struct scst_cmd *cmd,
-+ enum scst_exec_context pref_context)
-+{
-+ TRACE_ENTRY();
-+
-+ BUG_ON(cmd->state != SCST_CMD_STATE_XMIT_WAIT);
-+
-+ scst_set_xmit_time(cmd);
-+
-+ cmd->cmd_hw_pending = 0;
-+
-+ if (unlikely(cmd->tgt_dev == NULL))
-+ pref_context = SCST_CONTEXT_THREAD;
-+
-+ cmd->state = SCST_CMD_STATE_FINISHED;
-+
-+ scst_process_redirect_cmd(cmd, pref_context, 1);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_tgt_cmd_done);
-+
-+static int scst_finish_cmd(struct scst_cmd *cmd)
-+{
-+ int res;
-+ struct scst_session *sess = cmd->sess;
-+ struct scst_io_stat_entry *stat;
-+
-+ TRACE_ENTRY();
-+
-+ scst_update_lat_stats(cmd);
-+
-+ if (unlikely(cmd->delivery_status != SCST_CMD_DELIVERY_SUCCESS)) {
-+ if ((cmd->tgt_dev != NULL) &&
-+ scst_is_ua_sense(cmd->sense, cmd->sense_valid_len)) {
-+ /* This UA delivery failed, so we need to requeue it */
-+ if (scst_cmd_atomic(cmd) &&
-+ scst_is_ua_global(cmd->sense, cmd->sense_valid_len)) {
-+ TRACE_MGMT_DBG("Requeuing of global UA for "
-+ "failed cmd %p needs a thread", cmd);
-+ res = SCST_CMD_STATE_RES_NEED_THREAD;
-+ goto out;
-+ }
-+ scst_requeue_ua(cmd);
-+ }
-+ }
-+
-+ atomic_dec(&sess->sess_cmd_count);
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+
-+ stat = &sess->io_stats[cmd->data_direction];
-+ stat->cmd_count++;
-+ stat->io_byte_count += cmd->bufflen + cmd->out_bufflen;
-+
-+ list_del(&cmd->sess_cmd_list_entry);
-+
-+ /*
-+ * Done under sess_list_lock to sync with scst_abort_cmd() without
-+ * using extra barrier.
-+ */
-+ cmd->finished = 1;
-+
-+ spin_unlock_irq(&sess->sess_list_lock);
-+
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))) {
-+ TRACE_MGMT_DBG("Aborted cmd %p finished (cmd_ref %d)",
-+ cmd, atomic_read(&cmd->cmd_ref));
-+
-+ scst_finish_cmd_mgmt(cmd);
-+ }
-+
-+ __scst_cmd_put(cmd);
-+
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/*
-+ * No locks, but it must be externally serialized (see comment for
-+ * scst_cmd_init_done() in scst.h)
-+ */
-+static void scst_cmd_set_sn(struct scst_cmd *cmd)
-+{
-+ struct scst_order_data *order_data = cmd->cur_order_data;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ if (scst_is_implicit_hq_cmd(cmd) &&
-+ likely(cmd->queue_type == SCST_CMD_QUEUE_SIMPLE)) {
-+ TRACE_SN("Implicit HQ cmd %p", cmd);
-+ cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
-+ }
-+
-+ EXTRACHECKS_BUG_ON(cmd->sn_set || cmd->hq_cmd_inced);
-+
-+ /* Optimized for lockless fast path */
-+
-+ scst_check_debug_sn(cmd);
-+
-+#ifdef CONFIG_SCST_STRICT_SERIALIZING
-+ cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
-+#endif
-+
-+ if (cmd->dev->queue_alg == SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) {
-+ /*
-+ * Not the best way, but good enough until there is a
-+ * possibility to specify queue type during pass-through
-+ * commands submission.
-+ */
-+ cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
-+ }
-+
-+ switch (cmd->queue_type) {
-+ case SCST_CMD_QUEUE_SIMPLE:
-+ case SCST_CMD_QUEUE_UNTAGGED:
-+ if (likely(order_data->num_free_sn_slots >= 0)) {
-+ /*
-+ * atomic_inc_return() implies memory barrier to sync
-+ * with scst_inc_expected_sn()
-+ */
-+ if (atomic_inc_return(order_data->cur_sn_slot) == 1) {
-+ order_data->curr_sn++;
-+ TRACE_SN("Incremented curr_sn %d",
-+ order_data->curr_sn);
-+ }
-+ cmd->sn_slot = order_data->cur_sn_slot;
-+ cmd->sn = order_data->curr_sn;
-+
-+ order_data->prev_cmd_ordered = 0;
-+ } else {
-+ TRACE(TRACE_MINOR, "***WARNING*** Not enough SN slots "
-+ "%zd", ARRAY_SIZE(order_data->sn_slots));
-+ goto ordered;
-+ }
-+ break;
-+
-+ case SCST_CMD_QUEUE_ORDERED:
-+ TRACE_SN("ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
-+ordered:
-+ if (!order_data->prev_cmd_ordered) {
-+ spin_lock_irqsave(&order_data->sn_lock, flags);
-+ if (order_data->num_free_sn_slots >= 0) {
-+ order_data->num_free_sn_slots--;
-+ if (order_data->num_free_sn_slots >= 0) {
-+ int i = 0;
-+ /* Commands can finish in any order, so
-+ * we don't know which slot is empty.
-+ */
-+ while (1) {
-+ order_data->cur_sn_slot++;
-+ if (order_data->cur_sn_slot ==
-+ order_data->sn_slots + ARRAY_SIZE(order_data->sn_slots))
-+ order_data->cur_sn_slot = order_data->sn_slots;
-+
-+ if (atomic_read(order_data->cur_sn_slot) == 0)
-+ break;
-+
-+ i++;
-+ BUG_ON(i == ARRAY_SIZE(order_data->sn_slots));
-+ }
-+ TRACE_SN("New cur SN slot %zd",
-+ order_data->cur_sn_slot -
-+ order_data->sn_slots);
-+ }
-+ }
-+ spin_unlock_irqrestore(&order_data->sn_lock, flags);
-+ }
-+ order_data->prev_cmd_ordered = 1;
-+ order_data->curr_sn++;
-+ cmd->sn = order_data->curr_sn;
-+ break;
-+
-+ case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
-+ TRACE_SN("HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
-+ spin_lock_irqsave(&order_data->sn_lock, flags);
-+ order_data->hq_cmd_count++;
-+ spin_unlock_irqrestore(&order_data->sn_lock, flags);
-+ cmd->hq_cmd_inced = 1;
-+ goto out;
-+
-+ default:
-+ BUG();
-+ }
-+
-+ TRACE_SN("cmd(%p)->sn: %d (order_data %p, *cur_sn_slot %d, "
-+ "num_free_sn_slots %d, prev_cmd_ordered %ld, "
-+ "cur_sn_slot %zd)", cmd, cmd->sn, order_data,
-+ atomic_read(order_data->cur_sn_slot),
-+ order_data->num_free_sn_slots, order_data->prev_cmd_ordered,
-+ order_data->cur_sn_slot - order_data->sn_slots);
-+
-+ cmd->sn_set = 1;
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * Returns 0 on success, > 0 when we need to wait for unblock,
-+ * < 0 if there is no device (lun) or device type handler.
-+ *
-+ * No locks, but might be on IRQ, protection is done by the
-+ * suspended activity.
-+ */
-+static int scst_translate_lun(struct scst_cmd *cmd)
-+{
-+ struct scst_tgt_dev *tgt_dev = NULL;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ cmd->cpu_cmd_counter = scst_get();
-+
-+ if (likely(!test_bit(SCST_FLAG_SUSPENDED, &scst_flags))) {
-+ struct list_head *head =
-+ &cmd->sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(cmd->lun)];
-+ TRACE_DBG("Finding tgt_dev for cmd %p (lun %lld)", cmd,
-+ (long long unsigned int)cmd->lun);
-+ res = -1;
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ if (tgt_dev->lun == cmd->lun) {
-+ TRACE_DBG("tgt_dev %p found", tgt_dev);
-+
-+ if (unlikely(tgt_dev->dev->handler ==
-+ &scst_null_devtype)) {
-+ PRINT_INFO("Dev handler for device "
-+ "%lld is NULL, the device will not "
-+ "be visible remotely",
-+ (long long unsigned int)cmd->lun);
-+ break;
-+ }
-+
-+ cmd->cmd_threads = tgt_dev->active_cmd_threads;
-+ cmd->tgt_dev = tgt_dev;
-+ cmd->cur_order_data = tgt_dev->curr_order_data;
-+ cmd->dev = tgt_dev->dev;
-+
-+ res = 0;
-+ break;
-+ }
-+ }
-+ if (res != 0) {
-+ TRACE(TRACE_MINOR,
-+ "tgt_dev for LUN %lld not found, command to "
-+ "unexisting LU (initiator %s, target %s)?",
-+ (long long unsigned int)cmd->lun,
-+ cmd->sess->initiator_name, cmd->tgt->tgt_name);
-+ scst_put(cmd->cpu_cmd_counter);
-+ }
-+ } else {
-+ TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
-+ scst_put(cmd->cpu_cmd_counter);
-+ res = 1;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * No locks, but might be on IRQ.
-+ *
-+ * Returns 0 on success, > 0 when we need to wait for unblock,
-+ * < 0 if there is no device (lun) or device type handler.
-+ */
-+static int __scst_init_cmd(struct scst_cmd *cmd)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_translate_lun(cmd);
-+ if (likely(res == 0)) {
-+ int cnt;
-+ bool failure = false;
-+
-+ cmd->state = SCST_CMD_STATE_PARSE;
-+
-+ cnt = atomic_inc_return(&cmd->tgt_dev->tgt_dev_cmd_count);
-+ if (unlikely(cnt > SCST_MAX_TGT_DEV_COMMANDS)) {
-+ TRACE(TRACE_FLOW_CONTROL,
-+ "Too many pending commands (%d) in "
-+ "session, returning BUSY to initiator \"%s\"",
-+ cnt, (cmd->sess->initiator_name[0] == '\0') ?
-+ "Anonymous" : cmd->sess->initiator_name);
-+ failure = true;
-+ }
-+
-+#ifdef CONFIG_SCST_PER_DEVICE_CMD_COUNT_LIMIT
-+ cnt = atomic_inc_return(&cmd->dev->dev_cmd_count);
-+ if (unlikely(cnt > SCST_MAX_DEV_COMMANDS)) {
-+ if (!failure) {
-+ TRACE(TRACE_FLOW_CONTROL,
-+ "Too many pending device "
-+ "commands (%d), returning BUSY to "
-+ "initiator \"%s\"", cnt,
-+ (cmd->sess->initiator_name[0] == '\0') ?
-+ "Anonymous" :
-+ cmd->sess->initiator_name);
-+ failure = true;
-+ }
-+ }
-+#endif
-+
-+ if (unlikely(failure))
-+ goto out_busy;
-+
-+ /*
-+ * SCST_IMPLICIT_HQ for unknown commands not implemented for
-+ * case when set_sn_on_restart_cmd not set, because custom parse
-+ * can reorder commands due to multithreaded processing. To
-+ * implement it we need to implement all unknown commands as
-+ * ORDERED in the beginning and post parse reprocess of
-+ * queue_type to change it if needed. ToDo.
-+ */
-+ scst_pre_parse(cmd);
-+
-+ if (!cmd->set_sn_on_restart_cmd)
-+ scst_cmd_set_sn(cmd);
-+ } else if (res < 0) {
-+ TRACE_DBG("Finishing cmd %p", cmd);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_lun_not_supported));
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ } else
-+ goto out;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_busy:
-+ scst_set_busy(cmd);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ goto out;
-+}
-+
-+/* Called under scst_init_lock and IRQs disabled */
-+static void scst_do_job_init(void)
-+ __releases(&scst_init_lock)
-+ __acquires(&scst_init_lock)
-+{
-+ struct scst_cmd *cmd;
-+ int susp;
-+
-+ TRACE_ENTRY();
-+
-+restart:
-+ /*
-+ * There is no need for read barrier here, because we don't care where
-+ * this check will be done.
-+ */
-+ susp = test_bit(SCST_FLAG_SUSPENDED, &scst_flags);
-+ if (scst_init_poll_cnt > 0)
-+ scst_init_poll_cnt--;
-+
-+ list_for_each_entry(cmd, &scst_init_cmd_list, cmd_list_entry) {
-+ int rc;
-+ if (susp && !test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
-+ continue;
-+ if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
-+ spin_unlock_irq(&scst_init_lock);
-+ rc = __scst_init_cmd(cmd);
-+ spin_lock_irq(&scst_init_lock);
-+ if (rc > 0) {
-+ TRACE_MGMT_DBG("%s",
-+ "FLAG SUSPENDED set, restarting");
-+ goto restart;
-+ }
-+ } else {
-+ TRACE_MGMT_DBG("Aborting not inited cmd %p (tag %llu)",
-+ cmd, (long long unsigned int)cmd->tag);
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ }
-+
-+ /*
-+ * Deleting cmd from init cmd list after __scst_init_cmd()
-+ * is necessary to keep the check in scst_init_cmd() correct
-+ * to preserve the commands order.
-+ *
-+ * We don't care about the race, when init cmd list is empty
-+ * and one command detected that it just was not empty, so
-+ * it's inserting to it, but another command at the same time
-+ * seeing init cmd list empty and goes directly, because it
-+ * could affect only commands from the same initiator to the
-+ * same tgt_dev, but scst_cmd_init_done*() doesn't guarantee
-+ * the order in case of simultaneous such calls anyway.
-+ */
-+ TRACE_MGMT_DBG("Deleting cmd %p from init cmd list", cmd);
-+ smp_wmb(); /* enforce the required order */
-+ list_del(&cmd->cmd_list_entry);
-+ spin_unlock(&scst_init_lock);
-+
-+ spin_lock(&cmd->cmd_threads->cmd_list_lock);
-+ TRACE_MGMT_DBG("Adding cmd %p to active cmd list", cmd);
-+ if (unlikely(cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))
-+ list_add(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ else
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
-+
-+ spin_lock(&scst_init_lock);
-+ goto restart;
-+ }
-+
-+ /* It isn't really needed, but let's keep it */
-+ if (susp != test_bit(SCST_FLAG_SUSPENDED, &scst_flags))
-+ goto restart;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline int test_init_cmd_list(void)
-+{
-+ int res = (!list_empty(&scst_init_cmd_list) &&
-+ !test_bit(SCST_FLAG_SUSPENDED, &scst_flags)) ||
-+ unlikely(kthread_should_stop()) ||
-+ (scst_init_poll_cnt > 0);
-+ return res;
-+}
-+
-+int scst_init_thread(void *arg)
-+{
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("Init thread started, PID %d", current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+
-+ set_user_nice(current, -10);
-+
-+ spin_lock_irq(&scst_init_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (!test_init_cmd_list()) {
-+ add_wait_queue_exclusive(&scst_init_cmd_list_waitQ,
-+ &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_init_cmd_list())
-+ break;
-+ spin_unlock_irq(&scst_init_lock);
-+ schedule();
-+ spin_lock_irq(&scst_init_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&scst_init_cmd_list_waitQ, &wait);
-+ }
-+ scst_do_job_init();
-+ }
-+ spin_unlock_irq(&scst_init_lock);
-+
-+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so scst_init_cmd_list must be empty.
-+ */
-+ BUG_ON(!list_empty(&scst_init_cmd_list));
-+
-+ PRINT_INFO("Init thread PID %d finished", current->pid);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+/**
-+ * scst_process_active_cmd() - process active command
-+ *
-+ * Description:
-+ * Main SCST commands processing routing. Must be used only by dev handlers.
-+ *
-+ * Argument atomic is true, if function called in atomic context.
-+ *
-+ * Must be called with no locks held.
-+ */
-+void scst_process_active_cmd(struct scst_cmd *cmd, bool atomic)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * Checkpatch will complain on the use of in_atomic() below. You
-+ * can safely ignore this warning since in_atomic() is used here only
-+ * for debugging purposes.
-+ */
-+ EXTRACHECKS_BUG_ON(in_irq() || irqs_disabled());
-+ EXTRACHECKS_WARN_ON((in_atomic() || in_interrupt()) && !atomic);
-+
-+ cmd->atomic = atomic;
-+
-+ TRACE_DBG("cmd %p, atomic %d", cmd, atomic);
-+
-+ do {
-+ switch (cmd->state) {
-+ case SCST_CMD_STATE_PARSE:
-+ res = scst_parse_cmd(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_PREPARE_SPACE:
-+ res = scst_prepare_space(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_PREPROCESSING_DONE:
-+ res = scst_preprocessing_done(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_RDY_TO_XFER:
-+ res = scst_rdy_to_xfer(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ res = scst_tgt_pre_exec(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ if (tm_dbg_check_cmd(cmd) != 0) {
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ TRACE_MGMT_DBG("Skipping cmd %p (tag %llu), "
-+ "because of TM DBG delay", cmd,
-+ (long long unsigned int)cmd->tag);
-+ break;
-+ }
-+ res = scst_send_for_exec(&cmd);
-+ EXTRACHECKS_BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
-+ /*
-+ * !! At this point cmd, sess & tgt_dev can already be
-+ * freed !!
-+ */
-+ break;
-+
-+ case SCST_CMD_STATE_START_EXEC:
-+ res = scst_exec(&cmd);
-+ EXTRACHECKS_BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
-+ /*
-+ * !! At this point cmd, sess & tgt_dev can already be
-+ * freed !!
-+ */
-+ break;
-+
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ res = scst_local_exec(cmd);
-+ EXTRACHECKS_BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
-+ /*
-+ * !! At this point cmd, sess & tgt_dev can already be
-+ * freed !!
-+ */
-+ break;
-+
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ res = scst_real_exec(cmd);
-+ EXTRACHECKS_BUG_ON(res == SCST_CMD_STATE_RES_NEED_THREAD);
-+ /*
-+ * !! At this point cmd, sess & tgt_dev can already be
-+ * freed !!
-+ */
-+ break;
-+
-+ case SCST_CMD_STATE_PRE_DEV_DONE:
-+ res = scst_pre_dev_done(cmd);
-+ EXTRACHECKS_BUG_ON((res == SCST_CMD_STATE_RES_NEED_THREAD) &&
-+ (cmd->state == SCST_CMD_STATE_PRE_DEV_DONE));
-+ break;
-+
-+ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
-+ res = scst_mode_select_checks(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_DEV_DONE:
-+ res = scst_dev_done(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_PRE_XMIT_RESP:
-+ res = scst_pre_xmit_response(cmd);
-+ EXTRACHECKS_BUG_ON(res ==
-+ SCST_CMD_STATE_RES_NEED_THREAD);
-+ break;
-+
-+ case SCST_CMD_STATE_XMIT_RESP:
-+ res = scst_xmit_response(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_FINISHED:
-+ res = scst_finish_cmd(cmd);
-+ break;
-+
-+ case SCST_CMD_STATE_FINISHED_INTERNAL:
-+ res = scst_finish_internal_cmd(cmd);
-+ EXTRACHECKS_BUG_ON(res ==
-+ SCST_CMD_STATE_RES_NEED_THREAD);
-+ break;
-+
-+ default:
-+ PRINT_CRIT_ERROR("cmd (%p) in state %d, but shouldn't "
-+ "be", cmd, cmd->state);
-+ BUG();
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+ break;
-+ }
-+ } while (res == SCST_CMD_STATE_RES_CONT_SAME);
-+
-+ if (res == SCST_CMD_STATE_RES_CONT_NEXT) {
-+ /* None */
-+ } else if (res == SCST_CMD_STATE_RES_NEED_THREAD) {
-+ spin_lock_irq(&cmd->cmd_threads->cmd_list_lock);
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ switch (cmd->state) {
-+ case SCST_CMD_STATE_PARSE:
-+ case SCST_CMD_STATE_PREPARE_SPACE:
-+ case SCST_CMD_STATE_RDY_TO_XFER:
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ case SCST_CMD_STATE_START_EXEC:
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ case SCST_CMD_STATE_DEV_DONE:
-+ case SCST_CMD_STATE_XMIT_RESP:
-+#endif
-+ TRACE_DBG("Adding cmd %p to head of active cmd list",
-+ cmd);
-+ list_add(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ break;
-+ default:
-+ PRINT_CRIT_ERROR("cmd %p is in invalid state %d)", cmd,
-+ cmd->state);
-+ spin_unlock_irq(&cmd->cmd_threads->cmd_list_lock);
-+ BUG();
-+ spin_lock_irq(&cmd->cmd_threads->cmd_list_lock);
-+ break;
-+ }
-+#endif
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irq(&cmd->cmd_threads->cmd_list_lock);
-+ } else
-+ BUG();
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_process_active_cmd);
-+
-+/* Called under cmd_list_lock and IRQs disabled */
-+static void scst_do_job_active(struct list_head *cmd_list,
-+ spinlock_t *cmd_list_lock, bool atomic)
-+ __releases(cmd_list_lock)
-+ __acquires(cmd_list_lock)
-+{
-+ TRACE_ENTRY();
-+
-+ while (!list_empty(cmd_list)) {
-+ struct scst_cmd *cmd = list_entry(cmd_list->next, typeof(*cmd),
-+ cmd_list_entry);
-+ TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
-+ list_del(&cmd->cmd_list_entry);
-+ spin_unlock_irq(cmd_list_lock);
-+ scst_process_active_cmd(cmd, atomic);
-+ spin_lock_irq(cmd_list_lock);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline int test_cmd_threads(struct scst_cmd_threads *p_cmd_threads)
-+{
-+ int res = !list_empty(&p_cmd_threads->active_cmd_list) ||
-+ unlikely(kthread_should_stop()) ||
-+ tm_dbg_is_release();
-+ return res;
-+}
-+
-+int scst_cmd_thread(void *arg)
-+{
-+ struct scst_cmd_threads *p_cmd_threads = arg;
-+
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("Processing thread %s (PID %d) started", current->comm,
-+ current->pid);
-+
-+#if 0
-+ set_user_nice(current, 10);
-+#endif
-+ current->flags |= PF_NOFREEZE;
-+
-+ mutex_lock(&p_cmd_threads->io_context_mutex);
-+
-+ WARN_ON(current->io_context);
-+
-+ if (p_cmd_threads != &scst_main_cmd_threads) {
-+ /*
-+ * For linked IO contexts io_context might be not NULL while
-+ * io_context 0.
-+ */
-+ if (p_cmd_threads->io_context == NULL) {
-+ p_cmd_threads->io_context = get_io_context(GFP_KERNEL, -1);
-+ TRACE_MGMT_DBG("Alloced new IO context %p "
-+ "(p_cmd_threads %p)",
-+ p_cmd_threads->io_context,
-+ p_cmd_threads);
-+ /*
-+ * Put the extra reference created by get_io_context()
-+ * because we don't need it.
-+ */
-+ put_io_context(p_cmd_threads->io_context);
-+ } else {
-+ current->io_context = ioc_task_link(p_cmd_threads->io_context);
-+ TRACE_MGMT_DBG("Linked IO context %p "
-+ "(p_cmd_threads %p)", p_cmd_threads->io_context,
-+ p_cmd_threads);
-+ }
-+ p_cmd_threads->io_context_refcnt++;
-+ }
-+
-+ mutex_unlock(&p_cmd_threads->io_context_mutex);
-+
-+ smp_wmb();
-+ p_cmd_threads->io_context_ready = true;
-+
-+ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (!test_cmd_threads(p_cmd_threads)) {
-+ add_wait_queue_exclusive_head(
-+ &p_cmd_threads->cmd_list_waitQ,
-+ &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_cmd_threads(p_cmd_threads))
-+ break;
-+ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
-+ schedule();
-+ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&p_cmd_threads->cmd_list_waitQ, &wait);
-+ }
-+
-+ if (tm_dbg_is_release()) {
-+ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
-+ tm_dbg_check_released_cmds();
-+ spin_lock_irq(&p_cmd_threads->cmd_list_lock);
-+ }
-+
-+ scst_do_job_active(&p_cmd_threads->active_cmd_list,
-+ &p_cmd_threads->cmd_list_lock, false);
-+ }
-+ spin_unlock_irq(&p_cmd_threads->cmd_list_lock);
-+
-+ if (p_cmd_threads != &scst_main_cmd_threads) {
-+ mutex_lock(&p_cmd_threads->io_context_mutex);
-+ if (--p_cmd_threads->io_context_refcnt == 0)
-+ p_cmd_threads->io_context = NULL;
-+ mutex_unlock(&p_cmd_threads->io_context_mutex);
-+ }
-+
-+ PRINT_INFO("Processing thread %s (PID %d) finished", current->comm,
-+ current->pid);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+void scst_cmd_tasklet(long p)
-+{
-+ struct scst_percpu_info *i = (struct scst_percpu_info *)p;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_irq(&i->tasklet_lock);
-+ scst_do_job_active(&i->tasklet_cmd_list, &i->tasklet_lock, true);
-+ spin_unlock_irq(&i->tasklet_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * Returns 0 on success, < 0 if there is no device handler or
-+ * > 0 if SCST_FLAG_SUSPENDED set and SCST_FLAG_SUSPENDING - not.
-+ * No locks, protection is done by the suspended activity.
-+ */
-+static int scst_mgmt_translate_lun(struct scst_mgmt_cmd *mcmd)
-+{
-+ struct scst_tgt_dev *tgt_dev = NULL;
-+ struct list_head *head;
-+ int res = -1;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Finding tgt_dev for mgmt cmd %p (lun %lld)", mcmd,
-+ (long long unsigned int)mcmd->lun);
-+
-+ mcmd->cpu_cmd_counter = scst_get();
-+
-+ if (unlikely(test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
-+ !test_bit(SCST_FLAG_SUSPENDING, &scst_flags))) {
-+ TRACE_MGMT_DBG("%s", "FLAG SUSPENDED set, skipping");
-+ scst_put(mcmd->cpu_cmd_counter);
-+ res = 1;
-+ goto out;
-+ }
-+
-+ head = &mcmd->sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(mcmd->lun)];
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ if (tgt_dev->lun == mcmd->lun) {
-+ TRACE_DBG("tgt_dev %p found", tgt_dev);
-+ mcmd->mcmd_tgt_dev = tgt_dev;
-+ res = 0;
-+ break;
-+ }
-+ }
-+ if (mcmd->mcmd_tgt_dev == NULL)
-+ scst_put(mcmd->cpu_cmd_counter);
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/* No locks */
-+void scst_done_cmd_mgmt(struct scst_cmd *cmd)
-+{
-+ struct scst_mgmt_cmd_stub *mstb, *t;
-+ bool wake = 0;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("cmd %p done (tag %llu)",
-+ cmd, (long long unsigned int)cmd->tag);
-+
-+ spin_lock_irqsave(&scst_mcmd_lock, flags);
-+
-+ list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
-+ cmd_mgmt_cmd_list_entry) {
-+ struct scst_mgmt_cmd *mcmd;
-+
-+ if (!mstb->done_counted)
-+ continue;
-+
-+ mcmd = mstb->mcmd;
-+ TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_done_wait_count %d",
-+ mcmd, mcmd->cmd_done_wait_count);
-+
-+ mcmd->cmd_done_wait_count--;
-+
-+ BUG_ON(mcmd->cmd_done_wait_count < 0);
-+
-+ if (mcmd->cmd_done_wait_count > 0) {
-+ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
-+ "skipping", mcmd->cmd_done_wait_count);
-+ goto check_free;
-+ }
-+
-+ if (mcmd->state == SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_DONE) {
-+ mcmd->state = SCST_MCMD_STATE_AFFECTED_CMDS_DONE;
-+ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
-+ "list", mcmd);
-+ list_add_tail(&mcmd->mgmt_cmd_list_entry,
-+ &scst_active_mgmt_cmd_list);
-+ wake = 1;
-+ }
-+
-+check_free:
-+ if (!mstb->finish_counted) {
-+ TRACE_DBG("Releasing mstb %p", mstb);
-+ list_del(&mstb->cmd_mgmt_cmd_list_entry);
-+ mempool_free(mstb, scst_mgmt_stub_mempool);
-+ }
-+ }
-+
-+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
-+
-+ if (wake)
-+ wake_up(&scst_mgmt_cmd_list_waitQ);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called under scst_mcmd_lock and IRQs disabled */
-+static void __scst_dec_finish_wait_count(struct scst_mgmt_cmd *mcmd, bool *wake)
-+{
-+ TRACE_ENTRY();
-+
-+ mcmd->cmd_finish_wait_count--;
-+
-+ BUG_ON(mcmd->cmd_finish_wait_count < 0);
-+
-+ if (mcmd->cmd_finish_wait_count > 0) {
-+ TRACE_MGMT_DBG("cmd_finish_wait_count(%d) not 0, "
-+ "skipping", mcmd->cmd_finish_wait_count);
-+ goto out;
-+ }
-+
-+ if (mcmd->cmd_done_wait_count > 0) {
-+ TRACE_MGMT_DBG("cmd_done_wait_count(%d) not 0, "
-+ "skipping", mcmd->cmd_done_wait_count);
-+ goto out;
-+ }
-+
-+ if (mcmd->state == SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_FINISHED) {
-+ mcmd->state = SCST_MCMD_STATE_DONE;
-+ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd "
-+ "list", mcmd);
-+ list_add_tail(&mcmd->mgmt_cmd_list_entry,
-+ &scst_active_mgmt_cmd_list);
-+ *wake = true;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_prepare_async_mcmd() - prepare async management command
-+ *
-+ * Notifies SCST that management command is going to be async, i.e.
-+ * will be completed in another context.
-+ *
-+ * No SCST locks supposed to be held on entrance.
-+ */
-+void scst_prepare_async_mcmd(struct scst_mgmt_cmd *mcmd)
-+{
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Preparing mcmd %p for async execution "
-+ "(cmd_finish_wait_count %d)", mcmd,
-+ mcmd->cmd_finish_wait_count);
-+
-+ spin_lock_irqsave(&scst_mcmd_lock, flags);
-+ mcmd->cmd_finish_wait_count++;
-+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_prepare_async_mcmd);
-+
-+/**
-+ * scst_async_mcmd_completed() - async management command completed
-+ *
-+ * Notifies SCST that async management command, prepared by
-+ * scst_prepare_async_mcmd(), completed.
-+ *
-+ * No SCST locks supposed to be held on entrance.
-+ */
-+void scst_async_mcmd_completed(struct scst_mgmt_cmd *mcmd, int status)
-+{
-+ unsigned long flags;
-+ bool wake = false;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Async mcmd %p completed (status %d)", mcmd, status);
-+
-+ spin_lock_irqsave(&scst_mcmd_lock, flags);
-+
-+ if (status != SCST_MGMT_STATUS_SUCCESS)
-+ mcmd->status = status;
-+
-+ __scst_dec_finish_wait_count(mcmd, &wake);
-+
-+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
-+
-+ if (wake)
-+ wake_up(&scst_mgmt_cmd_list_waitQ);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_async_mcmd_completed);
-+
-+/* No locks */
-+static void scst_finish_cmd_mgmt(struct scst_cmd *cmd)
-+{
-+ struct scst_mgmt_cmd_stub *mstb, *t;
-+ bool wake = false;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("cmd %p finished (tag %llu)",
-+ cmd, (long long unsigned int)cmd->tag);
-+
-+ spin_lock_irqsave(&scst_mcmd_lock, flags);
-+
-+ list_for_each_entry_safe(mstb, t, &cmd->mgmt_cmd_list,
-+ cmd_mgmt_cmd_list_entry) {
-+ struct scst_mgmt_cmd *mcmd = mstb->mcmd;
-+
-+ TRACE_MGMT_DBG("mcmd %p, mcmd->cmd_finish_wait_count %d", mcmd,
-+ mcmd->cmd_finish_wait_count);
-+
-+ BUG_ON(!mstb->finish_counted);
-+
-+ if (cmd->completed)
-+ mcmd->completed_cmd_count++;
-+
-+ __scst_dec_finish_wait_count(mcmd, &wake);
-+
-+ TRACE_DBG("Releasing mstb %p", mstb);
-+ list_del(&mstb->cmd_mgmt_cmd_list_entry);
-+ mempool_free(mstb, scst_mgmt_stub_mempool);
-+ }
-+
-+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
-+
-+ if (wake)
-+ wake_up(&scst_mgmt_cmd_list_waitQ);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_call_dev_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
-+ struct scst_tgt_dev *tgt_dev, int set_status)
-+{
-+ int res = SCST_DEV_TM_NOT_COMPLETED;
-+ struct scst_dev_type *h = tgt_dev->dev->handler;
-+
-+ if (h->task_mgmt_fn) {
-+ TRACE_MGMT_DBG("Calling dev handler %s task_mgmt_fn(fn=%d)",
-+ h->name, mcmd->fn);
-+ res = h->task_mgmt_fn(mcmd, tgt_dev);
-+ TRACE_MGMT_DBG("Dev handler %s task_mgmt_fn() returned %d",
-+ h->name, res);
-+ if (set_status && (res != SCST_DEV_TM_NOT_COMPLETED))
-+ mcmd->status = res;
-+ }
-+ return res;
-+}
-+
-+static inline int scst_is_strict_mgmt_fn(int mgmt_fn)
-+{
-+ switch (mgmt_fn) {
-+#ifdef CONFIG_SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING
-+ case SCST_ABORT_TASK:
-+#endif
-+#if 0
-+ case SCST_ABORT_TASK_SET:
-+ case SCST_CLEAR_TASK_SET:
-+#endif
-+ return 1;
-+ default:
-+ return 0;
-+ }
-+}
-+
-+/*
-+ * Must be called under sess_list_lock to sync with finished flag assignment in
-+ * scst_finish_cmd()
-+ */
-+void scst_abort_cmd(struct scst_cmd *cmd, struct scst_mgmt_cmd *mcmd,
-+ bool other_ini, bool call_dev_task_mgmt_fn)
-+{
-+ unsigned long flags;
-+ static DEFINE_SPINLOCK(other_ini_lock);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE(TRACE_SCSI|TRACE_MGMT_DEBUG, "Aborting cmd %p (tag %llu, op %x)",
-+ cmd, (long long unsigned int)cmd->tag, cmd->cdb[0]);
-+
-+ /* To protect from concurrent aborts */
-+ spin_lock_irqsave(&other_ini_lock, flags);
-+
-+ if (other_ini) {
-+ struct scst_device *dev = NULL;
-+
-+ /* Might be necessary if command aborted several times */
-+ if (!test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags))
-+ set_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
-+
-+ /* Necessary for scst_xmit_process_aborted_cmd */
-+ if (cmd->dev != NULL)
-+ dev = cmd->dev;
-+ else if ((mcmd != NULL) && (mcmd->mcmd_tgt_dev != NULL))
-+ dev = mcmd->mcmd_tgt_dev->dev;
-+
-+ if (dev != NULL) {
-+ if (dev->tas)
-+ set_bit(SCST_CMD_DEVICE_TAS, &cmd->cmd_flags);
-+ } else
-+ PRINT_WARNING("Abort cmd %p from other initiator, but "
-+ "neither cmd, nor mcmd %p have tgt_dev set, so "
-+ "TAS information can be lost", cmd, mcmd);
-+ } else {
-+ /* Might be necessary if command aborted several times */
-+ clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
-+ }
-+
-+ set_bit(SCST_CMD_ABORTED, &cmd->cmd_flags);
-+
-+ spin_unlock_irqrestore(&other_ini_lock, flags);
-+
-+ /*
-+ * To sync with setting cmd->done in scst_pre_xmit_response() (with
-+ * scst_finish_cmd() we synced by using sess_list_lock) and with
-+ * setting UA for aborted cmd in scst_set_pending_UA().
-+ */
-+ smp_mb__after_set_bit();
-+
-+ if (cmd->tgt_dev == NULL) {
-+ spin_lock_irqsave(&scst_init_lock, flags);
-+ scst_init_poll_cnt++;
-+ spin_unlock_irqrestore(&scst_init_lock, flags);
-+ wake_up(&scst_init_cmd_list_waitQ);
-+ }
-+
-+ if (!cmd->finished && call_dev_task_mgmt_fn && (cmd->tgt_dev != NULL))
-+ scst_call_dev_task_mgmt_fn(mcmd, cmd->tgt_dev, 1);
-+
-+ spin_lock_irqsave(&scst_mcmd_lock, flags);
-+ if ((mcmd != NULL) && !cmd->finished) {
-+ struct scst_mgmt_cmd_stub *mstb;
-+
-+ mstb = mempool_alloc(scst_mgmt_stub_mempool, GFP_ATOMIC);
-+ if (mstb == NULL) {
-+ PRINT_CRIT_ERROR("Allocation of management command "
-+ "stub failed (mcmd %p, cmd %p)", mcmd, cmd);
-+ goto unlock;
-+ }
-+ memset(mstb, 0, sizeof(*mstb));
-+
-+ TRACE_DBG("mstb %p, mcmd %p", mstb, mcmd);
-+
-+ mstb->mcmd = mcmd;
-+
-+ /*
-+ * Delay the response until the command's finish in order to
-+ * guarantee that "no further responses from the task are sent
-+ * to the SCSI initiator port" after response from the TM
-+ * function is sent (SAM). Plus, we must wait here to be sure
-+ * that we won't receive double commands with the same tag.
-+ * Moreover, if we don't wait here, we might have a possibility
-+ * for data corruption, when aborted and reported as completed
-+ * command actually gets executed *after* new commands sent
-+ * after this TM command completed.
-+ */
-+
-+ if (cmd->sent_for_exec && !cmd->done) {
-+ TRACE_MGMT_DBG("cmd %p (tag %llu) is being executed",
-+ cmd, (long long unsigned int)cmd->tag);
-+ mstb->done_counted = 1;
-+ mcmd->cmd_done_wait_count++;
-+ }
-+
-+ /*
-+ * We don't have to wait the command's status delivery finish
-+ * to other initiators + it can affect MPIO failover.
-+ */
-+ if (!other_ini) {
-+ mstb->finish_counted = 1;
-+ mcmd->cmd_finish_wait_count++;
-+ }
-+
-+ if (mstb->done_counted || mstb->finish_counted) {
-+ TRACE(TRACE_SCSI|TRACE_MGMT_DEBUG, "cmd %p (tag %llu, "
-+ "sn %u) being executed/xmitted (state %d, "
-+ "op %x, proc time %ld sec., timeout %d sec.), "
-+ "deferring ABORT (cmd_done_wait_count %d, "
-+ "cmd_finish_wait_count %d)", cmd,
-+ (long long unsigned int)cmd->tag,
-+ cmd->sn, cmd->state, cmd->cdb[0],
-+ (long)(jiffies - cmd->start_time) / HZ,
-+ cmd->timeout / HZ, mcmd->cmd_done_wait_count,
-+ mcmd->cmd_finish_wait_count);
-+ /*
-+ * cmd can't die here or sess_list_lock already taken
-+ * and cmd is in the sess list
-+ */
-+ list_add_tail(&mstb->cmd_mgmt_cmd_list_entry,
-+ &cmd->mgmt_cmd_list);
-+ } else {
-+ /* We don't need to wait for this cmd */
-+ mempool_free(mstb, scst_mgmt_stub_mempool);
-+ }
-+
-+ if (cmd->tgtt->on_abort_cmd)
-+ cmd->tgtt->on_abort_cmd(cmd);
-+ }
-+
-+unlock:
-+ spin_unlock_irqrestore(&scst_mcmd_lock, flags);
-+
-+ tm_dbg_release_cmd(cmd);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* No locks. Returns 0, if mcmd should be processed further. */
-+static int scst_set_mcmd_next_state(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res;
-+
-+ spin_lock_irq(&scst_mcmd_lock);
-+
-+ switch (mcmd->state) {
-+ case SCST_MCMD_STATE_INIT:
-+ case SCST_MCMD_STATE_EXEC:
-+ if (mcmd->cmd_done_wait_count == 0) {
-+ mcmd->state = SCST_MCMD_STATE_AFFECTED_CMDS_DONE;
-+ res = 0;
-+ } else {
-+ TRACE(TRACE_SCSI|TRACE_MGMT_DEBUG,
-+ "cmd_done_wait_count(%d) not 0, "
-+ "preparing to wait", mcmd->cmd_done_wait_count);
-+ mcmd->state = SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_DONE;
-+ res = -1;
-+ }
-+ break;
-+
-+ case SCST_MCMD_STATE_AFFECTED_CMDS_DONE:
-+ if (mcmd->cmd_finish_wait_count == 0) {
-+ mcmd->state = SCST_MCMD_STATE_DONE;
-+ res = 0;
-+ } else {
-+ TRACE(TRACE_SCSI|TRACE_MGMT_DEBUG,
-+ "cmd_finish_wait_count(%d) not 0, "
-+ "preparing to wait",
-+ mcmd->cmd_finish_wait_count);
-+ mcmd->state = SCST_MCMD_STATE_WAITING_AFFECTED_CMDS_FINISHED;
-+ res = -1;
-+ }
-+ break;
-+
-+ case SCST_MCMD_STATE_DONE:
-+ mcmd->state = SCST_MCMD_STATE_FINISHED;
-+ res = 0;
-+ break;
-+
-+ default:
-+ PRINT_CRIT_ERROR("Wrong mcmd %p state %d (fn %d, "
-+ "cmd_finish_wait_count %d, cmd_done_wait_count %d)",
-+ mcmd, mcmd->state, mcmd->fn,
-+ mcmd->cmd_finish_wait_count, mcmd->cmd_done_wait_count);
-+ spin_unlock_irq(&scst_mcmd_lock);
-+ res = -1;
-+ BUG();
-+ goto out;
-+ }
-+
-+ spin_unlock_irq(&scst_mcmd_lock);
-+
-+out:
-+ return res;
-+}
-+
-+/* IRQs supposed to be disabled */
-+static bool __scst_check_unblock_aborted_cmd(struct scst_cmd *cmd,
-+ struct list_head *list_entry)
-+{
-+ bool res;
-+ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
-+ list_del(list_entry);
-+ spin_lock(&cmd->cmd_threads->cmd_list_lock);
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
-+ res = 1;
-+ } else
-+ res = 0;
-+ return res;
-+}
-+
-+static void scst_unblock_aborted_cmds(int scst_mutex_held)
-+{
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (!scst_mutex_held)
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ struct scst_cmd *cmd, *tcmd;
-+ struct scst_tgt_dev *tgt_dev;
-+ spin_lock_bh(&dev->dev_lock);
-+ local_irq_disable();
-+ list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
-+ blocked_cmd_list_entry) {
-+ if (__scst_check_unblock_aborted_cmd(cmd,
-+ &cmd->blocked_cmd_list_entry)) {
-+ TRACE_MGMT_DBG("Unblock aborted blocked cmd %p",
-+ cmd);
-+ }
-+ }
-+ local_irq_enable();
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ local_irq_disable();
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ struct scst_order_data *order_data = tgt_dev->curr_order_data;
-+ spin_lock(&order_data->sn_lock);
-+ list_for_each_entry_safe(cmd, tcmd,
-+ &order_data->deferred_cmd_list,
-+ sn_cmd_list_entry) {
-+ if (__scst_check_unblock_aborted_cmd(cmd,
-+ &cmd->sn_cmd_list_entry)) {
-+ TRACE_MGMT_DBG("Unblocked aborted SN "
-+ "cmd %p (sn %u)",
-+ cmd, cmd->sn);
-+ order_data->def_cmd_count--;
-+ }
-+ }
-+ spin_unlock(&order_data->sn_lock);
-+ }
-+ local_irq_enable();
-+ }
-+
-+ if (!scst_mutex_held)
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void __scst_abort_task_set(struct scst_mgmt_cmd *mcmd,
-+ struct scst_tgt_dev *tgt_dev)
-+{
-+ struct scst_cmd *cmd;
-+ struct scst_session *sess = tgt_dev->sess;
-+ bool other_ini;
-+
-+ TRACE_ENTRY();
-+
-+ if ((mcmd->fn == SCST_PR_ABORT_ALL) &&
-+ (mcmd->origin_pr_cmd->sess != sess))
-+ other_ini = true;
-+ else
-+ other_ini = false;
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+
-+ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
-+ list_for_each_entry(cmd, &sess->sess_cmd_list,
-+ sess_cmd_list_entry) {
-+ if ((mcmd->fn == SCST_PR_ABORT_ALL) &&
-+ (mcmd->origin_pr_cmd == cmd))
-+ continue;
-+ if ((cmd->tgt_dev == tgt_dev) ||
-+ ((cmd->tgt_dev == NULL) &&
-+ (cmd->lun == tgt_dev->lun))) {
-+ if (mcmd->cmd_sn_set) {
-+ BUG_ON(!cmd->tgt_sn_set);
-+ if (scst_sn_before(mcmd->cmd_sn, cmd->tgt_sn) ||
-+ (mcmd->cmd_sn == cmd->tgt_sn))
-+ continue;
-+ }
-+ scst_abort_cmd(cmd, mcmd, other_ini, 0);
-+ }
-+ }
-+ spin_unlock_irq(&sess->sess_list_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_abort_task_set(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res;
-+ struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
-+
-+ TRACE(TRACE_MGMT, "Aborting task set (lun=%lld, mcmd=%p)",
-+ (long long unsigned int)tgt_dev->lun, mcmd);
-+
-+ __scst_abort_task_set(mcmd, tgt_dev);
-+
-+ if (mcmd->fn == SCST_PR_ABORT_ALL) {
-+ struct scst_pr_abort_all_pending_mgmt_cmds_counter *pr_cnt =
-+ mcmd->origin_pr_cmd->pr_abort_counter;
-+ if (atomic_dec_and_test(&pr_cnt->pr_aborting_cnt))
-+ complete_all(&pr_cnt->pr_aborting_cmpl);
-+ }
-+
-+ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "ABORT TASK SET/PR ABORT", 0);
-+
-+ scst_unblock_aborted_cmds(0);
-+
-+ scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_is_cmd_belongs_to_dev(struct scst_cmd *cmd,
-+ struct scst_device *dev)
-+{
-+ struct scst_tgt_dev *tgt_dev = NULL;
-+ struct list_head *head;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Finding match for dev %s and cmd %p (lun %lld)", dev->virt_name,
-+ cmd, (long long unsigned int)cmd->lun);
-+
-+ head = &cmd->sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(cmd->lun)];
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ if (tgt_dev->lun == cmd->lun) {
-+ TRACE_DBG("dev %s found", tgt_dev->dev->virt_name);
-+ res = (tgt_dev->dev == dev);
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_clear_task_set(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res;
-+ struct scst_device *dev = mcmd->mcmd_tgt_dev->dev;
-+ struct scst_tgt_dev *tgt_dev;
-+ LIST_HEAD(UA_tgt_devs);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE(TRACE_MGMT, "Clearing task set (lun=%lld, mcmd=%p)",
-+ (long long unsigned int)mcmd->lun, mcmd);
-+
-+#if 0 /* we are SAM-3 */
-+ /*
-+ * When a logical unit is aborting one or more tasks from a SCSI
-+ * initiator port with the TASK ABORTED status it should complete all
-+ * of those tasks before entering additional tasks from that SCSI
-+ * initiator port into the task set - SAM2
-+ */
-+ mcmd->needs_unblocking = 1;
-+ spin_lock_bh(&dev->dev_lock);
-+ scst_block_dev(dev);
-+ spin_unlock_bh(&dev->dev_lock);
-+#endif
-+
-+ __scst_abort_task_set(mcmd, mcmd->mcmd_tgt_dev);
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ struct scst_session *sess = tgt_dev->sess;
-+ struct scst_cmd *cmd;
-+ int aborted = 0;
-+
-+ if (tgt_dev == mcmd->mcmd_tgt_dev)
-+ continue;
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+
-+ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
-+ list_for_each_entry(cmd, &sess->sess_cmd_list,
-+ sess_cmd_list_entry) {
-+ if ((cmd->dev == dev) ||
-+ ((cmd->dev == NULL) &&
-+ scst_is_cmd_belongs_to_dev(cmd, dev))) {
-+ scst_abort_cmd(cmd, mcmd, 1, 0);
-+ aborted = 1;
-+ }
-+ }
-+ spin_unlock_irq(&sess->sess_list_lock);
-+
-+ if (aborted)
-+ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
-+ &UA_tgt_devs);
-+ }
-+
-+ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "CLEAR TASK SET", 0);
-+
-+ scst_unblock_aborted_cmds(1);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ if (!dev->tas) {
-+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
-+ int sl;
-+
-+ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
-+ dev->d_sense,
-+ SCST_LOAD_SENSE(scst_sense_cleared_by_another_ini_UA));
-+
-+ list_for_each_entry(tgt_dev, &UA_tgt_devs,
-+ extra_tgt_dev_list_entry) {
-+ scst_check_set_UA(tgt_dev, sense_buffer, sl, 0);
-+ }
-+ }
-+
-+ scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 0);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Returns 0 if the command processing should be continued,
-+ * >0, if it should be requeued, <0 otherwise */
-+static int scst_mgmt_cmd_init(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res = 0, rc;
-+
-+ TRACE_ENTRY();
-+
-+ switch (mcmd->fn) {
-+ case SCST_ABORT_TASK:
-+ {
-+ struct scst_session *sess = mcmd->sess;
-+ struct scst_cmd *cmd;
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+ cmd = __scst_find_cmd_by_tag(sess, mcmd->tag, true);
-+ if (cmd == NULL) {
-+ TRACE_MGMT_DBG("ABORT TASK: command "
-+ "for tag %llu not found",
-+ (long long unsigned int)mcmd->tag);
-+ mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
-+ spin_unlock_irq(&sess->sess_list_lock);
-+ res = scst_set_mcmd_next_state(mcmd);
-+ goto out;
-+ }
-+ __scst_cmd_get(cmd);
-+ spin_unlock_irq(&sess->sess_list_lock);
-+ TRACE_DBG("Cmd to abort %p for tag %llu found",
-+ cmd, (long long unsigned int)mcmd->tag);
-+ mcmd->cmd_to_abort = cmd;
-+ mcmd->state = SCST_MCMD_STATE_EXEC;
-+ break;
-+ }
-+
-+ case SCST_TARGET_RESET:
-+ case SCST_NEXUS_LOSS_SESS:
-+ case SCST_ABORT_ALL_TASKS_SESS:
-+ case SCST_NEXUS_LOSS:
-+ case SCST_ABORT_ALL_TASKS:
-+ case SCST_UNREG_SESS_TM:
-+ mcmd->state = SCST_MCMD_STATE_EXEC;
-+ break;
-+
-+ case SCST_ABORT_TASK_SET:
-+ case SCST_CLEAR_ACA:
-+ case SCST_CLEAR_TASK_SET:
-+ case SCST_LUN_RESET:
-+ case SCST_PR_ABORT_ALL:
-+ rc = scst_mgmt_translate_lun(mcmd);
-+ if (rc == 0)
-+ mcmd->state = SCST_MCMD_STATE_EXEC;
-+ else if (rc < 0) {
-+ PRINT_ERROR("Corresponding device for LUN %lld not "
-+ "found", (long long unsigned int)mcmd->lun);
-+ mcmd->status = SCST_MGMT_STATUS_LUN_NOT_EXIST;
-+ res = scst_set_mcmd_next_state(mcmd);
-+ } else
-+ res = rc;
-+ break;
-+
-+ default:
-+ BUG();
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_target_reset(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res, rc;
-+ struct scst_device *dev;
-+ struct scst_acg *acg = mcmd->sess->acg;
-+ struct scst_acg_dev *acg_dev;
-+ int cont, c;
-+ LIST_HEAD(host_devs);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE(TRACE_MGMT, "Target reset (mcmd %p, cmd count %d)",
-+ mcmd, atomic_read(&mcmd->sess->sess_cmd_count));
-+
-+ mcmd->needs_unblocking = 1;
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
-+ struct scst_device *d;
-+ struct scst_tgt_dev *tgt_dev;
-+ int found = 0;
-+
-+ dev = acg_dev->dev;
-+
-+ spin_lock_bh(&dev->dev_lock);
-+ scst_block_dev(dev);
-+ scst_process_reset(dev, mcmd->sess, NULL, mcmd, true);
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ cont = 0;
-+ c = 0;
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ cont = 1;
-+ if (mcmd->sess == tgt_dev->sess) {
-+ rc = scst_call_dev_task_mgmt_fn(mcmd,
-+ tgt_dev, 0);
-+ if (rc == SCST_DEV_TM_NOT_COMPLETED)
-+ c = 1;
-+ else if ((rc < 0) &&
-+ (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
-+ mcmd->status = rc;
-+ break;
-+ }
-+ }
-+ if (cont && !c)
-+ continue;
-+
-+ if (dev->scsi_dev == NULL)
-+ continue;
-+
-+ list_for_each_entry(d, &host_devs, tm_dev_list_entry) {
-+ if (dev->scsi_dev->host->host_no ==
-+ d->scsi_dev->host->host_no) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ if (!found)
-+ list_add_tail(&dev->tm_dev_list_entry, &host_devs);
-+
-+ tm_dbg_task_mgmt(dev, "TARGET RESET", 0);
-+ }
-+
-+ scst_unblock_aborted_cmds(1);
-+
-+ /*
-+ * We suppose here that for all commands that already on devices
-+ * on/after scsi_reset_provider() completion callbacks will be called.
-+ */
-+
-+ list_for_each_entry(dev, &host_devs, tm_dev_list_entry) {
-+ /* dev->scsi_dev must be non-NULL here */
-+ TRACE(TRACE_MGMT, "Resetting host %d bus ",
-+ dev->scsi_dev->host->host_no);
-+ rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_TARGET);
-+ TRACE(TRACE_MGMT, "Result of host %d target reset: %s",
-+ dev->scsi_dev->host->host_no,
-+ (rc == SUCCESS) ? "SUCCESS" : "FAILED");
-+#if 0
-+ if ((rc != SUCCESS) &&
-+ (mcmd->status == SCST_MGMT_STATUS_SUCCESS)) {
-+ /*
-+ * SCSI_TRY_RESET_BUS is also done by
-+ * scsi_reset_provider()
-+ */
-+ mcmd->status = SCST_MGMT_STATUS_FAILED;
-+ }
-+#else
-+ /*
-+ * scsi_reset_provider() returns very weird status, so let's
-+ * always succeed
-+ */
-+#endif
-+ }
-+
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
-+ dev = acg_dev->dev;
-+ if (dev->scsi_dev != NULL)
-+ dev->scsi_dev->was_reset = 0;
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_lun_reset(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res, rc;
-+ struct scst_tgt_dev *tgt_dev = mcmd->mcmd_tgt_dev;
-+ struct scst_device *dev = tgt_dev->dev;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE(TRACE_MGMT, "Resetting LUN %lld (mcmd %p)",
-+ (long long unsigned int)tgt_dev->lun, mcmd);
-+
-+ mcmd->needs_unblocking = 1;
-+
-+ spin_lock_bh(&dev->dev_lock);
-+ scst_block_dev(dev);
-+ scst_process_reset(dev, mcmd->sess, NULL, mcmd, true);
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 1);
-+ if (rc != SCST_DEV_TM_NOT_COMPLETED)
-+ goto out_tm_dbg;
-+
-+ if (dev->scsi_dev != NULL) {
-+ TRACE(TRACE_MGMT, "Resetting host %d bus ",
-+ dev->scsi_dev->host->host_no);
-+ rc = scsi_reset_provider(dev->scsi_dev, SCSI_TRY_RESET_DEVICE);
-+#if 0
-+ if (rc != SUCCESS && mcmd->status == SCST_MGMT_STATUS_SUCCESS)
-+ mcmd->status = SCST_MGMT_STATUS_FAILED;
-+#else
-+ /*
-+ * scsi_reset_provider() returns very weird status, so let's
-+ * always succeed
-+ */
-+#endif
-+ dev->scsi_dev->was_reset = 0;
-+ }
-+
-+ scst_unblock_aborted_cmds(0);
-+
-+out_tm_dbg:
-+ tm_dbg_task_mgmt(mcmd->mcmd_tgt_dev->dev, "LUN RESET", 0);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* scst_mutex supposed to be held */
-+static void scst_do_nexus_loss_sess(struct scst_mgmt_cmd *mcmd)
-+{
-+ int i;
-+ struct scst_session *sess = mcmd->sess;
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ scst_nexus_loss(tgt_dev,
-+ (mcmd->fn != SCST_UNREG_SESS_TM));
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_abort_all_nexus_loss_sess(struct scst_mgmt_cmd *mcmd,
-+ int nexus_loss)
-+{
-+ int res;
-+ int i;
-+ struct scst_session *sess = mcmd->sess;
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (nexus_loss) {
-+ TRACE_MGMT_DBG("Nexus loss for sess %p (mcmd %p)",
-+ sess, mcmd);
-+ } else {
-+ TRACE_MGMT_DBG("Aborting all from sess %p (mcmd %p)",
-+ sess, mcmd);
-+ }
-+
-+ mutex_lock(&scst_mutex);
-+
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ int rc;
-+
-+ __scst_abort_task_set(mcmd, tgt_dev);
-+
-+ rc = scst_call_dev_task_mgmt_fn(mcmd, tgt_dev, 0);
-+ if (rc < 0 && mcmd->status == SCST_MGMT_STATUS_SUCCESS)
-+ mcmd->status = rc;
-+
-+ tm_dbg_task_mgmt(tgt_dev->dev, "NEXUS LOSS SESS or "
-+ "ABORT ALL SESS or UNREG SESS",
-+ (mcmd->fn == SCST_UNREG_SESS_TM));
-+ }
-+ }
-+
-+ scst_unblock_aborted_cmds(1);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* scst_mutex supposed to be held */
-+static void scst_do_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd)
-+{
-+ int i;
-+ struct scst_tgt *tgt = mcmd->sess->tgt;
-+ struct scst_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ scst_nexus_loss(tgt_dev, true);
-+ }
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_abort_all_nexus_loss_tgt(struct scst_mgmt_cmd *mcmd,
-+ int nexus_loss)
-+{
-+ int res;
-+ int i;
-+ struct scst_tgt *tgt = mcmd->sess->tgt;
-+ struct scst_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ if (nexus_loss) {
-+ TRACE_MGMT_DBG("I_T Nexus loss (tgt %p, mcmd %p)",
-+ tgt, mcmd);
-+ } else {
-+ TRACE_MGMT_DBG("Aborting all from tgt %p (mcmd %p)",
-+ tgt, mcmd);
-+ }
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ int rc;
-+
-+ __scst_abort_task_set(mcmd, tgt_dev);
-+
-+ if (mcmd->sess == tgt_dev->sess) {
-+ rc = scst_call_dev_task_mgmt_fn(
-+ mcmd, tgt_dev, 0);
-+ if ((rc < 0) &&
-+ (mcmd->status == SCST_MGMT_STATUS_SUCCESS))
-+ mcmd->status = rc;
-+ }
-+
-+ tm_dbg_task_mgmt(tgt_dev->dev, "NEXUS LOSS or "
-+ "ABORT ALL", 0);
-+ }
-+ }
-+ }
-+
-+ scst_unblock_aborted_cmds(1);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_abort_task(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res;
-+ struct scst_cmd *cmd = mcmd->cmd_to_abort;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Aborting task (cmd %p, sn %d, set %d, tag %llu, "
-+ "queue_type %x)", cmd, cmd->sn, cmd->sn_set,
-+ (long long unsigned int)mcmd->tag, cmd->queue_type);
-+
-+ if (mcmd->lun_set && (mcmd->lun != cmd->lun)) {
-+ PRINT_ERROR("ABORT TASK: LUN mismatch: mcmd LUN %llx, "
-+ "cmd LUN %llx, cmd tag %llu",
-+ (long long unsigned int)mcmd->lun,
-+ (long long unsigned int)cmd->lun,
-+ (long long unsigned int)mcmd->tag);
-+ mcmd->status = SCST_MGMT_STATUS_REJECTED;
-+ } else if (mcmd->cmd_sn_set &&
-+ (scst_sn_before(mcmd->cmd_sn, cmd->tgt_sn) ||
-+ (mcmd->cmd_sn == cmd->tgt_sn))) {
-+ PRINT_ERROR("ABORT TASK: SN mismatch: mcmd SN %x, "
-+ "cmd SN %x, cmd tag %llu", mcmd->cmd_sn,
-+ cmd->tgt_sn, (long long unsigned int)mcmd->tag);
-+ mcmd->status = SCST_MGMT_STATUS_REJECTED;
-+ } else {
-+ spin_lock_irq(&cmd->sess->sess_list_lock);
-+ scst_abort_cmd(cmd, mcmd, 0, 1);
-+ spin_unlock_irq(&cmd->sess->sess_list_lock);
-+
-+ scst_unblock_aborted_cmds(0);
-+ }
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ mcmd->cmd_to_abort = NULL; /* just in case */
-+
-+ __scst_cmd_put(cmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Returns 0 if the command processing should be continued, <0 otherwise */
-+static int scst_mgmt_cmd_exec(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ mcmd->status = SCST_MGMT_STATUS_SUCCESS;
-+
-+ switch (mcmd->fn) {
-+ case SCST_ABORT_TASK:
-+ res = scst_abort_task(mcmd);
-+ break;
-+
-+ case SCST_ABORT_TASK_SET:
-+ case SCST_PR_ABORT_ALL:
-+ res = scst_abort_task_set(mcmd);
-+ break;
-+
-+ case SCST_CLEAR_TASK_SET:
-+ if (mcmd->mcmd_tgt_dev->dev->tst ==
-+ SCST_CONTR_MODE_SEP_TASK_SETS)
-+ res = scst_abort_task_set(mcmd);
-+ else
-+ res = scst_clear_task_set(mcmd);
-+ break;
-+
-+ case SCST_LUN_RESET:
-+ res = scst_lun_reset(mcmd);
-+ break;
-+
-+ case SCST_TARGET_RESET:
-+ res = scst_target_reset(mcmd);
-+ break;
-+
-+ case SCST_ABORT_ALL_TASKS_SESS:
-+ res = scst_abort_all_nexus_loss_sess(mcmd, 0);
-+ break;
-+
-+ case SCST_NEXUS_LOSS_SESS:
-+ case SCST_UNREG_SESS_TM:
-+ res = scst_abort_all_nexus_loss_sess(mcmd, 1);
-+ break;
-+
-+ case SCST_ABORT_ALL_TASKS:
-+ res = scst_abort_all_nexus_loss_tgt(mcmd, 0);
-+ break;
-+
-+ case SCST_NEXUS_LOSS:
-+ res = scst_abort_all_nexus_loss_tgt(mcmd, 1);
-+ break;
-+
-+ case SCST_CLEAR_ACA:
-+ if (scst_call_dev_task_mgmt_fn(mcmd, mcmd->mcmd_tgt_dev, 1) ==
-+ SCST_DEV_TM_NOT_COMPLETED) {
-+ mcmd->status = SCST_MGMT_STATUS_FN_NOT_SUPPORTED;
-+ /* Nothing to do (yet) */
-+ }
-+ goto out_done;
-+
-+ default:
-+ PRINT_ERROR("Unknown task management function %d", mcmd->fn);
-+ mcmd->status = SCST_MGMT_STATUS_REJECTED;
-+ goto out_done;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_done:
-+ res = scst_set_mcmd_next_state(mcmd);
-+ goto out;
-+}
-+
-+static void scst_call_task_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
-+{
-+ struct scst_session *sess = mcmd->sess;
-+
-+ if ((sess->tgt->tgtt->task_mgmt_affected_cmds_done != NULL) &&
-+ (mcmd->fn != SCST_UNREG_SESS_TM) &&
-+ (mcmd->fn != SCST_PR_ABORT_ALL)) {
-+ TRACE_DBG("Calling target %s task_mgmt_affected_cmds_done(%p)",
-+ sess->tgt->tgtt->name, sess);
-+ sess->tgt->tgtt->task_mgmt_affected_cmds_done(mcmd);
-+ TRACE_MGMT_DBG("Target's %s task_mgmt_affected_cmds_done() "
-+ "returned", sess->tgt->tgtt->name);
-+ }
-+ return;
-+}
-+
-+static int scst_mgmt_affected_cmds_done(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ switch (mcmd->fn) {
-+ case SCST_NEXUS_LOSS_SESS:
-+ case SCST_UNREG_SESS_TM:
-+ scst_do_nexus_loss_sess(mcmd);
-+ break;
-+
-+ case SCST_NEXUS_LOSS:
-+ scst_do_nexus_loss_tgt(mcmd);
-+ break;
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_call_task_mgmt_affected_cmds_done(mcmd);
-+
-+ res = scst_set_mcmd_next_state(mcmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void scst_mgmt_cmd_send_done(struct scst_mgmt_cmd *mcmd)
-+{
-+ struct scst_device *dev;
-+ struct scst_session *sess = mcmd->sess;
-+
-+ TRACE_ENTRY();
-+
-+ mcmd->state = SCST_MCMD_STATE_FINISHED;
-+ if (scst_is_strict_mgmt_fn(mcmd->fn) && (mcmd->completed_cmd_count > 0))
-+ mcmd->status = SCST_MGMT_STATUS_TASK_NOT_EXIST;
-+
-+ if (mcmd->fn < SCST_UNREG_SESS_TM)
-+ TRACE(TRACE_MGMT, "TM fn %d (%p) finished, "
-+ "status %d", mcmd->fn, mcmd, mcmd->status);
-+ else
-+ TRACE_MGMT_DBG("TM fn %d (%p) finished, "
-+ "status %d", mcmd->fn, mcmd, mcmd->status);
-+
-+ if (mcmd->fn == SCST_PR_ABORT_ALL) {
-+ mcmd->origin_pr_cmd->scst_cmd_done(mcmd->origin_pr_cmd,
-+ SCST_CMD_STATE_DEFAULT,
-+ SCST_CONTEXT_THREAD);
-+ } else if ((sess->tgt->tgtt->task_mgmt_fn_done != NULL) &&
-+ (mcmd->fn != SCST_UNREG_SESS_TM)) {
-+ TRACE_DBG("Calling target %s task_mgmt_fn_done(%p)",
-+ sess->tgt->tgtt->name, sess);
-+ sess->tgt->tgtt->task_mgmt_fn_done(mcmd);
-+ TRACE_MGMT_DBG("Target's %s task_mgmt_fn_done() "
-+ "returned", sess->tgt->tgtt->name);
-+ }
-+
-+ if (mcmd->needs_unblocking) {
-+ switch (mcmd->fn) {
-+ case SCST_LUN_RESET:
-+ case SCST_CLEAR_TASK_SET:
-+ dev = mcmd->mcmd_tgt_dev->dev;
-+ spin_lock_bh(&dev->dev_lock);
-+ scst_unblock_dev(dev);
-+ spin_unlock_bh(&dev->dev_lock);
-+ break;
-+
-+ case SCST_TARGET_RESET:
-+ {
-+ struct scst_acg *acg = mcmd->sess->acg;
-+ struct scst_acg_dev *acg_dev;
-+
-+ mutex_lock(&scst_mutex);
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ dev = acg_dev->dev;
-+ spin_lock_bh(&dev->dev_lock);
-+ scst_unblock_dev(dev);
-+ spin_unlock_bh(&dev->dev_lock);
-+ }
-+ mutex_unlock(&scst_mutex);
-+ break;
-+ }
-+
-+ default:
-+ BUG();
-+ break;
-+ }
-+ }
-+
-+ mcmd->tgt_priv = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Returns >0, if cmd should be requeued */
-+static int scst_process_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * We are in the TM thread and mcmd->state guaranteed to not be
-+ * changed behind us.
-+ */
-+
-+ TRACE_DBG("mcmd %p, state %d", mcmd, mcmd->state);
-+
-+ while (1) {
-+ switch (mcmd->state) {
-+ case SCST_MCMD_STATE_INIT:
-+ res = scst_mgmt_cmd_init(mcmd);
-+ if (res != 0)
-+ goto out;
-+ break;
-+
-+ case SCST_MCMD_STATE_EXEC:
-+ if (scst_mgmt_cmd_exec(mcmd))
-+ goto out;
-+ break;
-+
-+ case SCST_MCMD_STATE_AFFECTED_CMDS_DONE:
-+ if (scst_mgmt_affected_cmds_done(mcmd))
-+ goto out;
-+ break;
-+
-+ case SCST_MCMD_STATE_DONE:
-+ scst_mgmt_cmd_send_done(mcmd);
-+ break;
-+
-+ case SCST_MCMD_STATE_FINISHED:
-+ scst_free_mgmt_cmd(mcmd);
-+ /* mcmd is dead */
-+ goto out;
-+
-+ default:
-+ PRINT_CRIT_ERROR("Wrong mcmd %p state %d (fn %d, "
-+ "cmd_finish_wait_count %d, cmd_done_wait_count "
-+ "%d)", mcmd, mcmd->state, mcmd->fn,
-+ mcmd->cmd_finish_wait_count,
-+ mcmd->cmd_done_wait_count);
-+ BUG();
-+ res = -1;
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static inline int test_mgmt_cmd_list(void)
-+{
-+ int res = !list_empty(&scst_active_mgmt_cmd_list) ||
-+ unlikely(kthread_should_stop());
-+ return res;
-+}
-+
-+int scst_tm_thread(void *arg)
-+{
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("Task management thread started, PID %d", current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+
-+ set_user_nice(current, -10);
-+
-+ spin_lock_irq(&scst_mcmd_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (!test_mgmt_cmd_list()) {
-+ add_wait_queue_exclusive(&scst_mgmt_cmd_list_waitQ,
-+ &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_mgmt_cmd_list())
-+ break;
-+ spin_unlock_irq(&scst_mcmd_lock);
-+ schedule();
-+ spin_lock_irq(&scst_mcmd_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&scst_mgmt_cmd_list_waitQ, &wait);
-+ }
-+
-+ while (!list_empty(&scst_active_mgmt_cmd_list)) {
-+ int rc;
-+ struct scst_mgmt_cmd *mcmd;
-+ mcmd = list_entry(scst_active_mgmt_cmd_list.next,
-+ typeof(*mcmd), mgmt_cmd_list_entry);
-+ TRACE_MGMT_DBG("Deleting mgmt cmd %p from active cmd "
-+ "list", mcmd);
-+ list_del(&mcmd->mgmt_cmd_list_entry);
-+ spin_unlock_irq(&scst_mcmd_lock);
-+ rc = scst_process_mgmt_cmd(mcmd);
-+ spin_lock_irq(&scst_mcmd_lock);
-+ if (rc > 0) {
-+ if (test_bit(SCST_FLAG_SUSPENDED, &scst_flags) &&
-+ !test_bit(SCST_FLAG_SUSPENDING,
-+ &scst_flags)) {
-+ TRACE_MGMT_DBG("Adding mgmt cmd %p to "
-+ "head of delayed mgmt cmd list",
-+ mcmd);
-+ list_add(&mcmd->mgmt_cmd_list_entry,
-+ &scst_delayed_mgmt_cmd_list);
-+ } else {
-+ TRACE_MGMT_DBG("Adding mgmt cmd %p to "
-+ "head of active mgmt cmd list",
-+ mcmd);
-+ list_add(&mcmd->mgmt_cmd_list_entry,
-+ &scst_active_mgmt_cmd_list);
-+ }
-+ }
-+ }
-+ }
-+ spin_unlock_irq(&scst_mcmd_lock);
-+
-+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so scst_active_mgmt_cmd_list must be empty.
-+ */
-+ BUG_ON(!list_empty(&scst_active_mgmt_cmd_list));
-+
-+ PRINT_INFO("Task management thread PID %d finished", current->pid);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static struct scst_mgmt_cmd *scst_pre_rx_mgmt_cmd(struct scst_session
-+ *sess, int fn, int atomic, void *tgt_priv)
-+{
-+ struct scst_mgmt_cmd *mcmd = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(sess->tgt->tgtt->task_mgmt_fn_done == NULL)) {
-+ PRINT_ERROR("New mgmt cmd, but task_mgmt_fn_done() is NULL "
-+ "(target %s)", sess->tgt->tgtt->name);
-+ goto out;
-+ }
-+
-+ mcmd = scst_alloc_mgmt_cmd(atomic ? GFP_ATOMIC : GFP_KERNEL);
-+ if (mcmd == NULL) {
-+ PRINT_CRIT_ERROR("Lost TM fn %d, initiator %s", fn,
-+ sess->initiator_name);
-+ goto out;
-+ }
-+
-+ mcmd->sess = sess;
-+ mcmd->fn = fn;
-+ mcmd->state = SCST_MCMD_STATE_INIT;
-+ mcmd->tgt_priv = tgt_priv;
-+
-+ if (fn == SCST_PR_ABORT_ALL) {
-+ atomic_inc(&mcmd->origin_pr_cmd->pr_abort_counter->pr_abort_pending_cnt);
-+ atomic_inc(&mcmd->origin_pr_cmd->pr_abort_counter->pr_aborting_cnt);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return mcmd;
-+}
-+
-+static int scst_post_rx_mgmt_cmd(struct scst_session *sess,
-+ struct scst_mgmt_cmd *mcmd)
-+{
-+ unsigned long flags;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ scst_sess_get(sess);
-+
-+ if (unlikely(sess->shut_phase != SCST_SESS_SPH_READY)) {
-+ PRINT_CRIT_ERROR("New mgmt cmd while shutting down the "
-+ "session %p shut_phase %ld", sess, sess->shut_phase);
-+ BUG();
-+ }
-+
-+ local_irq_save(flags);
-+
-+ spin_lock(&sess->sess_list_lock);
-+ atomic_inc(&sess->sess_cmd_count);
-+
-+ if (unlikely(sess->init_phase != SCST_SESS_IPH_READY)) {
-+ switch (sess->init_phase) {
-+ case SCST_SESS_IPH_INITING:
-+ TRACE_DBG("Adding mcmd %p to init deferred mcmd list",
-+ mcmd);
-+ list_add_tail(&mcmd->mgmt_cmd_list_entry,
-+ &sess->init_deferred_mcmd_list);
-+ goto out_unlock;
-+ case SCST_SESS_IPH_SUCCESS:
-+ break;
-+ case SCST_SESS_IPH_FAILED:
-+ res = -1;
-+ goto out_unlock;
-+ default:
-+ BUG();
-+ }
-+ }
-+
-+ spin_unlock(&sess->sess_list_lock);
-+
-+ TRACE_MGMT_DBG("Adding mgmt cmd %p to active mgmt cmd list", mcmd);
-+ spin_lock(&scst_mcmd_lock);
-+ list_add_tail(&mcmd->mgmt_cmd_list_entry, &scst_active_mgmt_cmd_list);
-+ spin_unlock(&scst_mcmd_lock);
-+
-+ local_irq_restore(flags);
-+
-+ wake_up(&scst_mgmt_cmd_list_waitQ);
-+
-+out:
-+ TRACE_EXIT();
-+ return res;
-+
-+out_unlock:
-+ spin_unlock(&sess->sess_list_lock);
-+ local_irq_restore(flags);
-+ goto out;
-+}
-+
-+/**
-+ * scst_rx_mgmt_fn() - create new management command and send it for execution
-+ *
-+ * Description:
-+ * Creates new management command and sends it for execution.
-+ *
-+ * Returns 0 for success, error code otherwise.
-+ *
-+ * Must not be called in parallel with scst_unregister_session() for the
-+ * same sess.
-+ */
-+int scst_rx_mgmt_fn(struct scst_session *sess,
-+ const struct scst_rx_mgmt_params *params)
-+{
-+ int res = -EFAULT;
-+ struct scst_mgmt_cmd *mcmd = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ switch (params->fn) {
-+ case SCST_ABORT_TASK:
-+ BUG_ON(!params->tag_set);
-+ break;
-+ case SCST_TARGET_RESET:
-+ case SCST_ABORT_ALL_TASKS:
-+ case SCST_NEXUS_LOSS:
-+ break;
-+ default:
-+ BUG_ON(!params->lun_set);
-+ }
-+
-+ mcmd = scst_pre_rx_mgmt_cmd(sess, params->fn, params->atomic,
-+ params->tgt_priv);
-+ if (mcmd == NULL)
-+ goto out;
-+
-+ if (params->lun_set) {
-+ mcmd->lun = scst_unpack_lun(params->lun, params->lun_len);
-+ if (mcmd->lun == NO_SUCH_LUN)
-+ goto out_free;
-+ mcmd->lun_set = 1;
-+ }
-+
-+ if (params->tag_set)
-+ mcmd->tag = params->tag;
-+
-+ mcmd->cmd_sn_set = params->cmd_sn_set;
-+ mcmd->cmd_sn = params->cmd_sn;
-+
-+ if (params->fn < SCST_UNREG_SESS_TM)
-+ TRACE(TRACE_MGMT, "TM fn %d (%p)", params->fn, mcmd);
-+ else
-+ TRACE_MGMT_DBG("TM fn %d (%p)", params->fn, mcmd);
-+
-+ TRACE_MGMT_DBG("sess=%p, tag_set %d, tag %lld, lun_set %d, "
-+ "lun=%lld, cmd_sn_set %d, cmd_sn %d, priv %p", sess,
-+ params->tag_set,
-+ (long long unsigned int)params->tag,
-+ params->lun_set,
-+ (long long unsigned int)mcmd->lun,
-+ params->cmd_sn_set,
-+ params->cmd_sn,
-+ params->tgt_priv);
-+
-+ if (scst_post_rx_mgmt_cmd(sess, mcmd) != 0)
-+ goto out_free;
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ scst_free_mgmt_cmd(mcmd);
-+ mcmd = NULL;
-+ goto out;
-+}
-+EXPORT_SYMBOL(scst_rx_mgmt_fn);
-+
-+/*
-+ * Written by Jack Handy - jakkhandy@hotmail.com
-+ * Taken by Gennadiy Nerubayev <parakie@gmail.com> from
-+ * http://www.codeproject.com/KB/string/wildcmp.aspx. No license attached
-+ * to it, and it's posted on a free site; assumed to be free for use.
-+ *
-+ * Added the negative sign support - VLNB
-+ *
-+ * Also see comment for wildcmp().
-+ *
-+ * User space part of iSCSI-SCST also has a copy of this code, so fixing a bug
-+ * here, don't forget to fix the copy too!
-+ */
-+static bool __wildcmp(const char *wild, const char *string, int recursion_level)
-+{
-+ const char *cp = NULL, *mp = NULL;
-+
-+ while ((*string) && (*wild != '*')) {
-+ if ((*wild == '!') && (recursion_level == 0))
-+ return !__wildcmp(++wild, string, ++recursion_level);
-+
-+ if ((*wild != *string) && (*wild != '?'))
-+ return false;
-+
-+ wild++;
-+ string++;
-+ }
-+
-+ while (*string) {
-+ if ((*wild == '!') && (recursion_level == 0))
-+ return !__wildcmp(++wild, string, ++recursion_level);
-+
-+ if (*wild == '*') {
-+ if (!*++wild)
-+ return true;
-+
-+ mp = wild;
-+ cp = string+1;
-+ } else if ((*wild == *string) || (*wild == '?')) {
-+ wild++;
-+ string++;
-+ } else {
-+ wild = mp;
-+ string = cp++;
-+ }
-+ }
-+
-+ while (*wild == '*')
-+ wild++;
-+
-+ return !*wild;
-+}
-+
-+/*
-+ * Returns true if string "string" matches pattern "wild", false otherwise.
-+ * Pattern is a regular DOS-type pattern, containing '*' and '?' symbols.
-+ * '*' means match all any symbols, '?' means match only any single symbol.
-+ *
-+ * For instance:
-+ * if (wildcmp("bl?h.*", "blah.jpg")) {
-+ * // match
-+ * } else {
-+ * // no match
-+ * }
-+ *
-+ * Also it supports boolean inversion sign '!', which does boolean inversion of
-+ * the value of the rest of the string. Only one '!' allowed in the pattern,
-+ * other '!' are treated as regular symbols. For instance:
-+ * if (wildcmp("bl!?h.*", "blah.jpg")) {
-+ * // no match
-+ * } else {
-+ * // match
-+ * }
-+ *
-+ * Also see comment for __wildcmp().
-+ */
-+static bool wildcmp(const char *wild, const char *string)
-+{
-+ return __wildcmp(wild, string, 0);
-+}
-+
-+/* scst_mutex supposed to be held */
-+static struct scst_acg *scst_find_tgt_acg_by_name_wild(struct scst_tgt *tgt,
-+ const char *initiator_name)
-+{
-+ struct scst_acg *acg, *res = NULL;
-+ struct scst_acn *n;
-+
-+ TRACE_ENTRY();
-+
-+ if (initiator_name == NULL)
-+ goto out;
-+
-+ list_for_each_entry(acg, &tgt->tgt_acg_list, acg_list_entry) {
-+ list_for_each_entry(n, &acg->acn_list, acn_list_entry) {
-+ if (wildcmp(n->name, initiator_name)) {
-+ TRACE_DBG("Access control group %s found",
-+ acg->acg_name);
-+ res = acg;
-+ goto out;
-+ }
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/* Must be called under scst_mutex */
-+static struct scst_acg *__scst_find_acg(struct scst_tgt *tgt,
-+ const char *initiator_name)
-+{
-+ struct scst_acg *acg = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ acg = scst_find_tgt_acg_by_name_wild(tgt, initiator_name);
-+ if (acg == NULL)
-+ acg = tgt->default_acg;
-+
-+ TRACE_EXIT_HRES((unsigned long)acg);
-+ return acg;
-+}
-+
-+/* Must be called under scst_mutex */
-+struct scst_acg *scst_find_acg(const struct scst_session *sess)
-+{
-+ return __scst_find_acg(sess->tgt, sess->initiator_name);
-+}
-+
-+/**
-+ * scst_initiator_has_luns() - check if this initiator will see any LUNs
-+ *
-+ * Checks if this initiator will see any LUNs upon connect to this target.
-+ * Returns true if yes and false otherwise.
-+ */
-+bool scst_initiator_has_luns(struct scst_tgt *tgt, const char *initiator_name)
-+{
-+ bool res;
-+ struct scst_acg *acg;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ acg = __scst_find_acg(tgt, initiator_name);
-+
-+ res = !list_empty(&acg->acg_dev_list);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_initiator_has_luns);
-+
-+static int scst_init_session(struct scst_session *sess)
-+{
-+ int res = 0;
-+ struct scst_cmd *cmd;
-+ struct scst_mgmt_cmd *mcmd, *tm;
-+ int mwake = 0;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ sess->acg = scst_find_acg(sess);
-+
-+ PRINT_INFO("Using security group \"%s\" for initiator \"%s\" "
-+ "(target %s)", sess->acg->acg_name, sess->initiator_name,
-+ sess->tgt->tgt_name);
-+
-+ list_add_tail(&sess->acg_sess_list_entry, &sess->acg->acg_sess_list);
-+
-+ TRACE_DBG("Adding sess %p to tgt->sess_list", sess);
-+ list_add_tail(&sess->sess_list_entry, &sess->tgt->sess_list);
-+
-+ if (sess->tgt->tgtt->get_initiator_port_transport_id != NULL) {
-+ res = sess->tgt->tgtt->get_initiator_port_transport_id(
-+ sess->tgt, sess, &sess->transport_id);
-+ if (res != 0) {
-+ PRINT_ERROR("Unable to make initiator %s port "
-+ "transport id", sess->initiator_name);
-+ goto failed;
-+ }
-+ TRACE_PR("sess %p (ini %s), transport id %s/%d", sess,
-+ sess->initiator_name,
-+ debug_transport_id_to_initiator_name(
-+ sess->transport_id), sess->tgt->rel_tgt_id);
-+ }
-+
-+ res = scst_sess_sysfs_create(sess);
-+ if (res != 0)
-+ goto failed;
-+
-+ /*
-+ * scst_sess_alloc_tgt_devs() must be called after session added in the
-+ * sess_list to not race with scst_check_reassign_sess()!
-+ */
-+ res = scst_sess_alloc_tgt_devs(sess);
-+
-+failed:
-+ mutex_unlock(&scst_mutex);
-+
-+ if (sess->init_result_fn) {
-+ TRACE_DBG("Calling init_result_fn(%p)", sess);
-+ sess->init_result_fn(sess, sess->reg_sess_data, res);
-+ TRACE_DBG("%s", "init_result_fn() returned");
-+ }
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+
-+ if (res == 0)
-+ sess->init_phase = SCST_SESS_IPH_SUCCESS;
-+ else
-+ sess->init_phase = SCST_SESS_IPH_FAILED;
-+
-+restart:
-+ list_for_each_entry(cmd, &sess->init_deferred_cmd_list,
-+ cmd_list_entry) {
-+ TRACE_DBG("Deleting cmd %p from init deferred cmd list", cmd);
-+ list_del(&cmd->cmd_list_entry);
-+ atomic_dec(&sess->sess_cmd_count);
-+ spin_unlock_irq(&sess->sess_list_lock);
-+ scst_cmd_init_done(cmd, SCST_CONTEXT_THREAD);
-+ spin_lock_irq(&sess->sess_list_lock);
-+ goto restart;
-+ }
-+
-+ spin_lock(&scst_mcmd_lock);
-+ list_for_each_entry_safe(mcmd, tm, &sess->init_deferred_mcmd_list,
-+ mgmt_cmd_list_entry) {
-+ TRACE_DBG("Moving mgmt command %p from init deferred mcmd list",
-+ mcmd);
-+ list_move_tail(&mcmd->mgmt_cmd_list_entry,
-+ &scst_active_mgmt_cmd_list);
-+ mwake = 1;
-+ }
-+
-+ spin_unlock(&scst_mcmd_lock);
-+ /*
-+ * In case of an error at this point the caller target driver supposed
-+ * to already call this sess's unregistration.
-+ */
-+ sess->init_phase = SCST_SESS_IPH_READY;
-+ spin_unlock_irq(&sess->sess_list_lock);
-+
-+ if (mwake)
-+ wake_up(&scst_mgmt_cmd_list_waitQ);
-+
-+ scst_sess_put(sess);
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+/**
-+ * scst_register_session() - register session
-+ * @tgt: target
-+ * @atomic: true, if the function called in the atomic context. If false,
-+ * this function will block until the session registration is
-+ * completed.
-+ * @initiator_name: remote initiator's name, any NULL-terminated string,
-+ * e.g. iSCSI name, which used as the key to found appropriate
-+ * access control group. Could be NULL, then the default
-+ * target's LUNs are used.
-+ * @tgt_priv: pointer to target driver's private data
-+ * @result_fn_data: any target driver supplied data
-+ * @result_fn: pointer to the function that will be asynchronously called
-+ * when session initialization finishes.
-+ * Can be NULL. Parameters:
-+ * - sess - session
-+ * - data - target driver supplied to scst_register_session()
-+ * data
-+ * - result - session initialization result, 0 on success or
-+ * appropriate error code otherwise
-+ *
-+ * Description:
-+ * Registers new session. Returns new session on success or NULL otherwise.
-+ *
-+ * Note: A session creation and initialization is a complex task,
-+ * which requires sleeping state, so it can't be fully done
-+ * in interrupt context. Therefore the "bottom half" of it, if
-+ * scst_register_session() is called from atomic context, will be
-+ * done in SCST thread context. In this case scst_register_session()
-+ * will return not completely initialized session, but the target
-+ * driver can supply commands to this session via scst_rx_cmd().
-+ * Those commands processing will be delayed inside SCST until
-+ * the session initialization is finished, then their processing
-+ * will be restarted. The target driver will be notified about
-+ * finish of the session initialization by function result_fn().
-+ * On success the target driver could do nothing, but if the
-+ * initialization fails, the target driver must ensure that
-+ * no more new commands being sent or will be sent to SCST after
-+ * result_fn() returns. All already sent to SCST commands for
-+ * failed session will be returned in xmit_response() with BUSY status.
-+ * In case of failure the driver shall call scst_unregister_session()
-+ * inside result_fn(), it will NOT be called automatically.
-+ */
-+struct scst_session *scst_register_session(struct scst_tgt *tgt, int atomic,
-+ const char *initiator_name, void *tgt_priv, void *result_fn_data,
-+ void (*result_fn) (struct scst_session *sess, void *data, int result))
-+{
-+ struct scst_session *sess;
-+ int res;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ sess = scst_alloc_session(tgt, atomic ? GFP_ATOMIC : GFP_KERNEL,
-+ initiator_name);
-+ if (sess == NULL)
-+ goto out;
-+
-+ scst_sess_set_tgt_priv(sess, tgt_priv);
-+
-+ scst_sess_get(sess); /* one for registered session */
-+ scst_sess_get(sess); /* one held until sess is inited */
-+
-+ if (atomic) {
-+ sess->reg_sess_data = result_fn_data;
-+ sess->init_result_fn = result_fn;
-+ spin_lock_irqsave(&scst_mgmt_lock, flags);
-+ TRACE_DBG("Adding sess %p to scst_sess_init_list", sess);
-+ list_add_tail(&sess->sess_init_list_entry,
-+ &scst_sess_init_list);
-+ spin_unlock_irqrestore(&scst_mgmt_lock, flags);
-+ wake_up(&scst_mgmt_waitQ);
-+ } else {
-+ res = scst_init_session(sess);
-+ if (res != 0)
-+ goto out_free;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return sess;
-+
-+out_free:
-+ scst_free_session(sess);
-+ sess = NULL;
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(scst_register_session);
-+
-+/**
-+ * scst_register_session_non_gpl() - register session (non-GPL version)
-+ * @tgt: target
-+ * @initiator_name: remote initiator's name, any NULL-terminated string,
-+ * e.g. iSCSI name, which used as the key to found appropriate
-+ * access control group. Could be NULL, then the default
-+ * target's LUNs are used.
-+ * @tgt_priv: pointer to target driver's private data
-+ *
-+ * Description:
-+ * Registers new session. Returns new session on success or NULL otherwise.
-+ */
-+struct scst_session *scst_register_session_non_gpl(struct scst_tgt *tgt,
-+ const char *initiator_name, void *tgt_priv)
-+{
-+ return scst_register_session(tgt, 0, initiator_name, tgt_priv,
-+ NULL, NULL);
-+}
-+EXPORT_SYMBOL(scst_register_session_non_gpl);
-+
-+/**
-+ * scst_unregister_session() - unregister session
-+ * @sess: session to be unregistered
-+ * @wait: if true, instructs to wait until all commands, which
-+ * currently is being executed and belonged to the session,
-+ * finished. Otherwise, target driver should be prepared to
-+ * receive xmit_response() for the session's command after
-+ * scst_unregister_session() returns.
-+ * @unreg_done_fn: pointer to the function that will be asynchronously called
-+ * when the last session's command finishes and
-+ * the session is about to be completely freed. Can be NULL.
-+ * Parameter:
-+ * - sess - session
-+ *
-+ * Unregisters session.
-+ *
-+ * Notes:
-+ * - All outstanding commands will be finished regularly. After
-+ * scst_unregister_session() returned, no new commands must be sent to
-+ * SCST via scst_rx_cmd().
-+ *
-+ * - The caller must ensure that no scst_rx_cmd() or scst_rx_mgmt_fn_*() is
-+ * called in parallel with scst_unregister_session().
-+ *
-+ * - Can be called before result_fn() of scst_register_session() called,
-+ * i.e. during the session registration/initialization.
-+ *
-+ * - It is highly recommended to call scst_unregister_session() as soon as it
-+ * gets clear that session will be unregistered and not to wait until all
-+ * related commands finished. This function provides the wait functionality,
-+ * but it also starts recovering stuck commands, if there are any.
-+ * Otherwise, your target driver could wait for those commands forever.
-+ */
-+void scst_unregister_session(struct scst_session *sess, int wait,
-+ void (*unreg_done_fn) (struct scst_session *sess))
-+{
-+ unsigned long flags;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+ int rc, lun;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Unregistering session %p (wait %d)", sess, wait);
-+
-+ sess->unreg_done_fn = unreg_done_fn;
-+
-+ /* Abort all outstanding commands and clear reservation, if necessary */
-+ lun = 0;
-+ rc = scst_rx_mgmt_fn_lun(sess, SCST_UNREG_SESS_TM,
-+ (uint8_t *)&lun, sizeof(lun), SCST_ATOMIC, NULL);
-+ if (rc != 0) {
-+ PRINT_ERROR("SCST_UNREG_SESS_TM failed %d (sess %p)",
-+ rc, sess);
-+ }
-+
-+ sess->shut_phase = SCST_SESS_SPH_SHUTDOWN;
-+
-+ spin_lock_irqsave(&scst_mgmt_lock, flags);
-+
-+ if (wait)
-+ sess->shutdown_compl = &c;
-+
-+ spin_unlock_irqrestore(&scst_mgmt_lock, flags);
-+
-+ scst_sess_put(sess);
-+
-+ if (wait) {
-+ TRACE_DBG("Waiting for session %p to complete", sess);
-+ wait_for_completion(&c);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_unregister_session);
-+
-+/**
-+ * scst_unregister_session_non_gpl() - unregister session, non-GPL version
-+ * @sess: session to be unregistered
-+ *
-+ * Unregisters session.
-+ *
-+ * See notes for scst_unregister_session() above.
-+ */
-+void scst_unregister_session_non_gpl(struct scst_session *sess)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_unregister_session(sess, 1, NULL);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_unregister_session_non_gpl);
-+
-+static inline int test_mgmt_list(void)
-+{
-+ int res = !list_empty(&scst_sess_init_list) ||
-+ !list_empty(&scst_sess_shut_list) ||
-+ unlikely(kthread_should_stop());
-+ return res;
-+}
-+
-+int scst_global_mgmt_thread(void *arg)
-+{
-+ struct scst_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("Management thread started, PID %d", current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+
-+ set_user_nice(current, -10);
-+
-+ spin_lock_irq(&scst_mgmt_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (!test_mgmt_list()) {
-+ add_wait_queue_exclusive(&scst_mgmt_waitQ, &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_mgmt_list())
-+ break;
-+ spin_unlock_irq(&scst_mgmt_lock);
-+ schedule();
-+ spin_lock_irq(&scst_mgmt_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&scst_mgmt_waitQ, &wait);
-+ }
-+
-+ while (!list_empty(&scst_sess_init_list)) {
-+ sess = list_entry(scst_sess_init_list.next,
-+ typeof(*sess), sess_init_list_entry);
-+ TRACE_DBG("Removing sess %p from scst_sess_init_list",
-+ sess);
-+ list_del(&sess->sess_init_list_entry);
-+ spin_unlock_irq(&scst_mgmt_lock);
-+
-+ if (sess->init_phase == SCST_SESS_IPH_INITING)
-+ scst_init_session(sess);
-+ else {
-+ PRINT_CRIT_ERROR("session %p is in "
-+ "scst_sess_init_list, but in unknown "
-+ "init phase %x", sess,
-+ sess->init_phase);
-+ BUG();
-+ }
-+
-+ spin_lock_irq(&scst_mgmt_lock);
-+ }
-+
-+ while (!list_empty(&scst_sess_shut_list)) {
-+ sess = list_entry(scst_sess_shut_list.next,
-+ typeof(*sess), sess_shut_list_entry);
-+ TRACE_DBG("Removing sess %p from scst_sess_shut_list",
-+ sess);
-+ list_del(&sess->sess_shut_list_entry);
-+ spin_unlock_irq(&scst_mgmt_lock);
-+
-+ switch (sess->shut_phase) {
-+ case SCST_SESS_SPH_SHUTDOWN:
-+ BUG_ON(atomic_read(&sess->refcnt) != 0);
-+ scst_free_session_callback(sess);
-+ break;
-+ default:
-+ PRINT_CRIT_ERROR("session %p is in "
-+ "scst_sess_shut_list, but in unknown "
-+ "shut phase %lx", sess,
-+ sess->shut_phase);
-+ BUG();
-+ break;
-+ }
-+
-+ spin_lock_irq(&scst_mgmt_lock);
-+ }
-+ }
-+ spin_unlock_irq(&scst_mgmt_lock);
-+
-+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so both lists must be empty.
-+ */
-+ BUG_ON(!list_empty(&scst_sess_init_list));
-+ BUG_ON(!list_empty(&scst_sess_shut_list));
-+
-+ PRINT_INFO("Management thread PID %d finished", current->pid);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+/* Called under sess->sess_list_lock */
-+static struct scst_cmd *__scst_find_cmd_by_tag(struct scst_session *sess,
-+ uint64_t tag, bool to_abort)
-+{
-+ struct scst_cmd *cmd, *res = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ /* ToDo: hash list */
-+
-+ TRACE_DBG("%s (sess=%p, tag=%llu)", "Searching in sess cmd list",
-+ sess, (long long unsigned int)tag);
-+
-+ list_for_each_entry(cmd, &sess->sess_cmd_list,
-+ sess_cmd_list_entry) {
-+ if (cmd->tag == tag) {
-+ /*
-+ * We must not count done commands, because
-+ * they were submitted for transmission.
-+ * Otherwise we can have a race, when for
-+ * some reason cmd's release delayed
-+ * after transmission and initiator sends
-+ * cmd with the same tag => it can be possible
-+ * that a wrong cmd will be returned.
-+ */
-+ if (cmd->done) {
-+ if (to_abort) {
-+ /*
-+ * We should return the latest not
-+ * aborted cmd with this tag.
-+ */
-+ if (res == NULL)
-+ res = cmd;
-+ else {
-+ if (test_bit(SCST_CMD_ABORTED,
-+ &res->cmd_flags)) {
-+ res = cmd;
-+ } else if (!test_bit(SCST_CMD_ABORTED,
-+ &cmd->cmd_flags))
-+ res = cmd;
-+ }
-+ }
-+ continue;
-+ } else {
-+ res = cmd;
-+ break;
-+ }
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+/**
-+ * scst_find_cmd() - find command by custom comparison function
-+ *
-+ * Finds a command based on user supplied data and comparison
-+ * callback function, that should return true, if the command is found.
-+ * Returns the command on success or NULL otherwise.
-+ */
-+struct scst_cmd *scst_find_cmd(struct scst_session *sess, void *data,
-+ int (*cmp_fn) (struct scst_cmd *cmd,
-+ void *data))
-+{
-+ struct scst_cmd *cmd = NULL;
-+ unsigned long flags = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (cmp_fn == NULL)
-+ goto out;
-+
-+ spin_lock_irqsave(&sess->sess_list_lock, flags);
-+
-+ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
-+ list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list_entry) {
-+ /*
-+ * We must not count done commands, because they were
-+ * submitted for transmission. Otherwise we can have a race,
-+ * when for some reason cmd's release delayed after
-+ * transmission and initiator sends cmd with the same tag =>
-+ * it can be possible that a wrong cmd will be returned.
-+ */
-+ if (cmd->done)
-+ continue;
-+ if (cmp_fn(cmd, data))
-+ goto out_unlock;
-+ }
-+
-+ cmd = NULL;
-+
-+out_unlock:
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
-+
-+out:
-+ TRACE_EXIT();
-+ return cmd;
-+}
-+EXPORT_SYMBOL(scst_find_cmd);
-+
-+/**
-+ * scst_find_cmd_by_tag() - find command by tag
-+ *
-+ * Finds a command based on the supplied tag comparing it with one
-+ * that previously set by scst_cmd_set_tag(). Returns the found command on
-+ * success or NULL otherwise.
-+ */
-+struct scst_cmd *scst_find_cmd_by_tag(struct scst_session *sess,
-+ uint64_t tag)
-+{
-+ unsigned long flags;
-+ struct scst_cmd *cmd;
-+ spin_lock_irqsave(&sess->sess_list_lock, flags);
-+ cmd = __scst_find_cmd_by_tag(sess, tag, false);
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
-+ return cmd;
-+}
-+EXPORT_SYMBOL(scst_find_cmd_by_tag);
-diff -uprN orig/linux-3.2/drivers/scst/scst_lib.c linux-3.2/drivers/scst/scst_lib.c
---- orig/linux-3.2/drivers/scst/scst_lib.c
-+++ linux-3.2/drivers/scst/scst_lib.c
-@@ -0,0 +1,7480 @@
-+/*
-+ * scst_lib.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/kthread.h>
-+#include <linux/cdrom.h>
-+#include <linux/unistd.h>
-+#include <linux/string.h>
-+#include <linux/ctype.h>
-+#include <linux/delay.h>
-+#include <linux/vmalloc.h>
-+#include <asm/kmap_types.h>
-+#include <asm/unaligned.h>
-+
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+#include "scst_mem.h"
-+#include "scst_pres.h"
-+
-+struct scsi_io_context {
-+ void *data;
-+ void (*done)(void *data, char *sense, int result, int resid);
-+ char sense[SCST_SENSE_BUFFERSIZE];
-+};
-+static struct kmem_cache *scsi_io_context_cache;
-+
-+/* get_trans_len_x extract x bytes from cdb as length starting from off */
-+static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off);
-+
-+static int get_bidi_trans_len_2(struct scst_cmd *cmd, uint8_t off);
-+
-+/* for special commands */
-+static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_prevent_allow_medium_removal(struct scst_cmd *cmd,
-+ uint8_t off);
-+static int get_trans_len_3_read_elem_stat(struct scst_cmd *cmd, uint8_t off);
-+static int get_trans_len_start_stop(struct scst_cmd *cmd, uint8_t off);
-+
-+/*
-++=====================================-============-======-
-+| Command name | Operation | Type |
-+| | code | |
-+|-------------------------------------+------------+------+
-+
-++=========================================================+
-+|Key: M = command implementation is mandatory. |
-+| O = command implementation is optional. |
-+| V = Vendor-specific |
-+| R = Reserved |
-+| ' '= DON'T use for this device |
-++=========================================================+
-+*/
-+
-+#define SCST_CDB_MANDATORY 'M' /* mandatory */
-+#define SCST_CDB_OPTIONAL 'O' /* optional */
-+#define SCST_CDB_VENDOR 'V' /* vendor */
-+#define SCST_CDB_RESERVED 'R' /* reserved */
-+#define SCST_CDB_NOTSUPP ' ' /* don't use */
-+
-+struct scst_sdbops {
-+ uint8_t ops; /* SCSI-2 op codes */
-+ uint8_t devkey[16]; /* Key for every device type M,O,V,R
-+ * type_disk devkey[0]
-+ * type_tape devkey[1]
-+ * type_printer devkey[2]
-+ * type_processor devkey[3]
-+ * type_worm devkey[4]
-+ * type_cdrom devkey[5]
-+ * type_scanner devkey[6]
-+ * type_mod devkey[7]
-+ * type_changer devkey[8]
-+ * type_commdev devkey[9]
-+ * type_reserv devkey[A]
-+ * type_reserv devkey[B]
-+ * type_raid devkey[C]
-+ * type_enclosure devkey[D]
-+ * type_reserv devkey[E]
-+ * type_reserv devkey[F]
-+ */
-+ const char *op_name; /* SCSI-2 op codes full name */
-+ uint8_t direction; /* init --> target: SCST_DATA_WRITE
-+ * target --> init: SCST_DATA_READ
-+ */
-+ uint32_t flags; /* opcode -- various flags */
-+ uint8_t off; /* length offset in cdb */
-+ int (*get_trans_len)(struct scst_cmd *cmd, uint8_t off);
-+};
-+
-+static int scst_scsi_op_list[256];
-+
-+#define FLAG_NONE 0
-+
-+static const struct scst_sdbops scst_scsi_op_table[] = {
-+ /*
-+ * +-------------------> TYPE_IS_DISK (0)
-+ * |
-+ * |+------------------> TYPE_IS_TAPE (1)
-+ * ||
-+ * || +----------------> TYPE_IS_PROCESSOR (3)
-+ * || |
-+ * || | +--------------> TYPE_IS_CDROM (5)
-+ * || | |
-+ * || | | +------------> TYPE_IS_MOD (7)
-+ * || | | |
-+ * || | | |+-----------> TYPE_IS_CHANGER (8)
-+ * || | | ||
-+ * || | | || +-------> TYPE_IS_RAID (C)
-+ * || | | || |
-+ * || | | || |
-+ * 0123456789ABCDEF ---> TYPE_IS_???? */
-+
-+ /* 6-bytes length CDB */
-+ {0x00, "MMMMMMMMMMMMMMMM", "TEST UNIT READY",
-+ /* let's be HQ to don't look dead under high load */
-+ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_IMPLICIT_HQ|
-+ SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED|
-+#endif
-+ SCST_EXCL_ACCESS_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x01, " M ", "REWIND",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT|SCST_WRITE_EXCL_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x01, "O V OO OO ", "REZERO UNIT",
-+ SCST_DATA_NONE, SCST_WRITE_EXCL_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x02, "VVVVVV V ", "REQUEST BLOCK ADDR",
-+ SCST_DATA_NONE, SCST_SMALL_TIMEOUT, 0, get_trans_len_none},
-+ {0x03, "MMMMMMMMMMMMMMMM", "REQUEST SENSE",
-+ SCST_DATA_READ, SCST_SMALL_TIMEOUT|SCST_SKIP_UA|SCST_LOCAL_CMD|
-+ SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|
-+ SCST_EXCL_ACCESS_ALLOWED,
-+ 4, get_trans_len_1},
-+ {0x04, "M O O ", "FORMAT UNIT",
-+ SCST_DATA_WRITE, SCST_LONG_TIMEOUT|SCST_UNKNOWN_LENGTH|SCST_WRITE_MEDIUM,
-+ 0, get_trans_len_none},
-+ {0x04, " O ", "FORMAT",
-+ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
-+ {0x05, "VMVVVV V ", "READ BLOCK LIMITS",
-+ SCST_DATA_READ, SCST_SMALL_TIMEOUT|
-+ SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|
-+ SCST_EXCL_ACCESS_ALLOWED,
-+ 0, get_trans_len_block_limit},
-+ {0x07, " O ", "INITIALIZE ELEMENT STATUS",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
-+ {0x07, "OVV O OV ", "REASSIGN BLOCKS",
-+ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
-+ {0x08, "O ", "READ(6)",
-+ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED|
-+#endif
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 4, get_trans_len_1_256},
-+ {0x08, " MV OO OV ", "READ(6)",
-+ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 2, get_trans_len_3},
-+ {0x08, " M ", "GET MESSAGE(6)",
-+ SCST_DATA_READ, FLAG_NONE, 2, get_trans_len_3},
-+ {0x08, " O ", "RECEIVE",
-+ SCST_DATA_READ, FLAG_NONE, 2, get_trans_len_3},
-+ {0x0A, "O ", "WRITE(6)",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED|
-+#endif
-+ SCST_WRITE_MEDIUM,
-+ 4, get_trans_len_1_256},
-+ {0x0A, " M O OV ", "WRITE(6)",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
-+ 2, get_trans_len_3},
-+ {0x0A, " M ", "PRINT",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x0A, " M ", "SEND MESSAGE(6)",
-+ SCST_DATA_WRITE, FLAG_NONE, 2, get_trans_len_3},
-+ {0x0A, " M ", "SEND(6)",
-+ SCST_DATA_WRITE, FLAG_NONE, 2, get_trans_len_3},
-+ {0x0B, "O OO OV ", "SEEK(6)",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x0B, " ", "TRACK SELECT",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x0B, " O ", "SLEW AND PRINT",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x0C, "VVVVVV V ", "SEEK BLOCK",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
-+ {0x0D, "VVVVVV V ", "PARTITION",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT|SCST_WRITE_MEDIUM,
-+ 0, get_trans_len_none},
-+ {0x0F, "VOVVVV V ", "READ REVERSE",
-+ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 2, get_trans_len_3},
-+ {0x10, "VM V V ", "WRITE FILEMARKS",
-+ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
-+ {0x10, " O O ", "SYNCHRONIZE BUFFER",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x11, "VMVVVV ", "SPACE",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x12, "MMMMMMMMMMMMMMMM", "INQUIRY",
-+ SCST_DATA_READ, SCST_SMALL_TIMEOUT|SCST_IMPLICIT_HQ|SCST_SKIP_UA|
-+ SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
-+ 3, get_trans_len_2},
-+ {0x13, "VOVVVV ", "VERIFY(6)",
-+ SCST_DATA_NONE, SCST_TRANSFER_LEN_TYPE_FIXED|
-+ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 2, get_trans_len_3},
-+ {0x14, "VOOVVV ", "RECOVER BUFFERED DATA",
-+ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 2, get_trans_len_3},
-+ {0x15, "OMOOOOOOOOOOOOOO", "MODE SELECT(6)",
-+ SCST_DATA_WRITE, SCST_STRICTLY_SERIALIZED, 4, get_trans_len_1},
-+ {0x16, "MMMMMMMMMMMMMMMM", "RESERVE",
-+ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|SCST_SERIALIZED|
-+ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x17, "MMMMMMMMMMMMMMMM", "RELEASE",
-+ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|SCST_SERIALIZED|
-+ SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x18, "OOOOOOOO ", "COPY",
-+ SCST_DATA_WRITE, SCST_LONG_TIMEOUT, 2, get_trans_len_3},
-+ {0x19, "VMVVVV ", "ERASE",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT|SCST_WRITE_MEDIUM,
-+ 0, get_trans_len_none},
-+ {0x1A, "OMOOOOOOOOOOOOOO", "MODE SENSE(6)",
-+ SCST_DATA_READ, SCST_SMALL_TIMEOUT, 4, get_trans_len_1},
-+ {0x1B, " O ", "SCAN",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x1B, " O ", "LOAD UNLOAD",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
-+ {0x1B, " O ", "STOP PRINT",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x1B, "O OO O O ", "START STOP UNIT",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_start_stop},
-+ {0x1C, "OOOOOOOOOOOOOOOO", "RECEIVE DIAGNOSTIC RESULTS",
-+ SCST_DATA_READ, FLAG_NONE, 3, get_trans_len_2},
-+ {0x1D, "MMMMMMMMMMMMMMMM", "SEND DIAGNOSTIC",
-+ SCST_DATA_WRITE, FLAG_NONE, 4, get_trans_len_1},
-+ {0x1E, "OOOOOOOOOOOOOOOO", "PREVENT ALLOW MEDIUM REMOVAL",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0,
-+ get_trans_len_prevent_allow_medium_removal},
-+ {0x1F, " O ", "PORT STATUS",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+
-+ /* 10-bytes length CDB */
-+ {0x23, "V VV V ", "READ FORMAT CAPACITY",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x24, "V VVM ", "SET WINDOW",
-+ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_3},
-+ {0x25, "M MM M ", "READ CAPACITY",
-+ SCST_DATA_READ, SCST_IMPLICIT_HQ|
-+ SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|
-+ SCST_EXCL_ACCESS_ALLOWED,
-+ 0, get_trans_len_read_capacity},
-+ {0x25, " O ", "GET WINDOW",
-+ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_3},
-+ {0x28, "M MMMM ", "READ(10)",
-+ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED|
-+#endif
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 7, get_trans_len_2},
-+ {0x28, " O ", "GET MESSAGE(10)",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x29, "V VV O ", "READ GENERATION",
-+ SCST_DATA_READ, FLAG_NONE, 8, get_trans_len_1},
-+ {0x2A, "O MO M ", "WRITE(10)",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED|
-+#endif
-+ SCST_WRITE_MEDIUM,
-+ 7, get_trans_len_2},
-+ {0x2A, " O ", "SEND MESSAGE(10)",
-+ SCST_DATA_WRITE, FLAG_NONE, 7, get_trans_len_2},
-+ {0x2A, " O ", "SEND(10)",
-+ SCST_DATA_WRITE, FLAG_NONE, 7, get_trans_len_2},
-+ {0x2B, " O ", "LOCATE",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x2B, " O ", "POSITION TO ELEMENT",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
-+ {0x2B, "O OO O ", "SEEK(10)",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x2C, "V O O ", "ERASE(10)",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT|SCST_WRITE_MEDIUM,
-+ 0, get_trans_len_none},
-+ {0x2D, "V O O ", "READ UPDATED BLOCK",
-+ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED, 0, get_trans_len_single},
-+ {0x2E, "O OO O ", "WRITE AND VERIFY(10)",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
-+ 7, get_trans_len_2},
-+ {0x2F, "O OO O ", "VERIFY(10)",
-+ SCST_DATA_NONE, SCST_TRANSFER_LEN_TYPE_FIXED|
-+ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 7, get_trans_len_2},
-+ {0x33, "O OO O ", "SET LIMITS(10)",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x34, " O ", "READ POSITION",
-+ SCST_DATA_READ, SCST_SMALL_TIMEOUT|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 7, get_trans_len_read_pos},
-+ {0x34, " O ", "GET DATA BUFFER STATUS",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x34, "O OO O ", "PRE-FETCH",
-+ SCST_DATA_NONE, SCST_WRITE_EXCL_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x35, "O OO O ", "SYNCHRONIZE CACHE",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x36, "O OO O ", "LOCK UNLOCK CACHE",
-+ SCST_DATA_NONE, SCST_WRITE_EXCL_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x37, "O O ", "READ DEFECT DATA(10)",
-+ SCST_DATA_READ, SCST_WRITE_EXCL_ALLOWED,
-+ 8, get_trans_len_1},
-+ {0x37, " O ", "INIT ELEMENT STATUS WRANGE",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
-+ {0x38, " O O ", "MEDIUM SCAN",
-+ SCST_DATA_READ, FLAG_NONE, 8, get_trans_len_1},
-+ {0x39, "OOOOOOOO ", "COMPARE",
-+ SCST_DATA_WRITE, FLAG_NONE, 3, get_trans_len_3},
-+ {0x3A, "OOOOOOOO ", "COPY AND VERIFY",
-+ SCST_DATA_WRITE, FLAG_NONE, 3, get_trans_len_3},
-+ {0x3B, "OOOOOOOOOOOOOOOO", "WRITE BUFFER",
-+ SCST_DATA_WRITE, SCST_SMALL_TIMEOUT, 6, get_trans_len_3},
-+ {0x3C, "OOOOOOOOOOOOOOOO", "READ BUFFER",
-+ SCST_DATA_READ, SCST_SMALL_TIMEOUT, 6, get_trans_len_3},
-+ {0x3D, " O O ", "UPDATE BLOCK",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED,
-+ 0, get_trans_len_single},
-+ {0x3E, "O OO O ", "READ LONG",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x3F, "O O O ", "WRITE LONG",
-+ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 7, get_trans_len_2},
-+ {0x40, "OOOOOOOOOO ", "CHANGE DEFINITION",
-+ SCST_DATA_WRITE, SCST_SMALL_TIMEOUT, 8, get_trans_len_1},
-+ {0x41, "O O ", "WRITE SAME",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
-+ 0, get_trans_len_single},
-+ {0x42, " O ", "READ SUB-CHANNEL",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x42, "O ", "UNMAP",
-+ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 7, get_trans_len_2},
-+ {0x43, " O ", "READ TOC/PMA/ATIP",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x44, " M ", "REPORT DENSITY SUPPORT",
-+ SCST_DATA_READ, SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|
-+ SCST_EXCL_ACCESS_ALLOWED,
-+ 7, get_trans_len_2},
-+ {0x44, " O ", "READ HEADER",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x45, " O ", "PLAY AUDIO(10)",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x46, " O ", "GET CONFIGURATION",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x47, " O ", "PLAY AUDIO MSF",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x48, " O ", "PLAY AUDIO TRACK INDEX",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x49, " O ", "PLAY TRACK RELATIVE(10)",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x4A, " O ", "GET EVENT STATUS NOTIFICATION",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x4B, " O ", "PAUSE/RESUME",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x4C, "OOOOOOOOOOOOOOOO", "LOG SELECT",
-+ SCST_DATA_WRITE, SCST_STRICTLY_SERIALIZED, 7, get_trans_len_2},
-+ {0x4D, "OOOOOOOOOOOOOOOO", "LOG SENSE",
-+ SCST_DATA_READ, SCST_SMALL_TIMEOUT|
-+ SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|
-+ SCST_EXCL_ACCESS_ALLOWED,
-+ 7, get_trans_len_2},
-+ {0x4E, " O ", "STOP PLAY/SCAN",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x50, " ", "XDWRITE",
-+ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
-+ {0x51, " O ", "READ DISC INFORMATION",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x51, " ", "XPWRITE",
-+ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
-+ {0x52, " O ", "READ TRACK INFORMATION",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x53, "O ", "XDWRITEREAD(10)",
-+ SCST_DATA_READ|SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|
-+ SCST_WRITE_MEDIUM,
-+ 7, get_bidi_trans_len_2},
-+ {0x53, " O ", "RESERVE TRACK",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x54, " O ", "SEND OPC INFORMATION",
-+ SCST_DATA_WRITE, FLAG_NONE, 7, get_trans_len_2},
-+ {0x55, "OOOOOOOOOOOOOOOO", "MODE SELECT(10)",
-+ SCST_DATA_WRITE, SCST_STRICTLY_SERIALIZED, 7, get_trans_len_2},
-+ {0x56, "OOOOOOOOOOOOOOOO", "RESERVE(10)",
-+ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|SCST_SERIALIZED|
-+ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x57, "OOOOOOOOOOOOOOOO", "RELEASE(10)",
-+ SCST_DATA_NONE, SCST_SMALL_TIMEOUT|SCST_LOCAL_CMD|SCST_SERIALIZED|
-+ SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x58, " O ", "REPAIR TRACK",
-+ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
-+ {0x5A, "OOOOOOOOOOOOOOOO", "MODE SENSE(10)",
-+ SCST_DATA_READ, SCST_SMALL_TIMEOUT, 7, get_trans_len_2},
-+ {0x5B, " O ", "CLOSE TRACK/SESSION",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x5C, " O ", "READ BUFFER CAPACITY",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_2},
-+ {0x5D, " O ", "SEND CUE SHEET",
-+ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_3},
-+ {0x5E, "OOOOO OOOO ", "PERSISTENT RESERVE IN",
-+ SCST_DATA_READ, SCST_SMALL_TIMEOUT|
-+ SCST_LOCAL_CMD|SCST_SERIALIZED|
-+ SCST_WRITE_EXCL_ALLOWED|
-+ SCST_EXCL_ACCESS_ALLOWED,
-+ 5, get_trans_len_4},
-+ {0x5F, "OOOOO OOOO ", "PERSISTENT RESERVE OUT",
-+ SCST_DATA_WRITE, SCST_SMALL_TIMEOUT|
-+ SCST_LOCAL_CMD|SCST_SERIALIZED|
-+ SCST_WRITE_EXCL_ALLOWED|
-+ SCST_EXCL_ACCESS_ALLOWED,
-+ 5, get_trans_len_4},
-+
-+ /* 16-bytes length CDB */
-+ {0x80, "O OO O ", "XDWRITE EXTENDED",
-+ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
-+ {0x80, " M ", "WRITE FILEMARKS",
-+ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
-+ {0x81, "O OO O ", "REBUILD",
-+ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 10, get_trans_len_4},
-+ {0x82, "O OO O ", "REGENERATE",
-+ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 10, get_trans_len_4},
-+ {0x83, "OOOOOOOOOOOOOOOO", "EXTENDED COPY",
-+ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 10, get_trans_len_4},
-+ {0x84, "OOOOOOOOOOOOOOOO", "RECEIVE COPY RESULT",
-+ SCST_DATA_WRITE, FLAG_NONE, 10, get_trans_len_4},
-+ {0x86, "OOOOOOOOOO ", "ACCESS CONTROL IN",
-+ SCST_DATA_NONE, SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|
-+ SCST_EXCL_ACCESS_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x87, "OOOOOOOOOO ", "ACCESS CONTROL OUT",
-+ SCST_DATA_NONE, SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|
-+ SCST_EXCL_ACCESS_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x88, "M MMMM ", "READ(16)",
-+ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED|
-+#endif
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 10, get_trans_len_4},
-+ {0x8A, "O OO O ", "WRITE(16)",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED|
-+#endif
-+ SCST_WRITE_MEDIUM,
-+ 10, get_trans_len_4},
-+ {0x8C, "OOOOOOOOOO ", "READ ATTRIBUTE",
-+ SCST_DATA_READ, FLAG_NONE, 10, get_trans_len_4},
-+ {0x8D, "OOOOOOOOOO ", "WRITE ATTRIBUTE",
-+ SCST_DATA_WRITE, SCST_WRITE_MEDIUM, 10, get_trans_len_4},
-+ {0x8E, "O OO O ", "WRITE AND VERIFY(16)",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
-+ 10, get_trans_len_4},
-+ {0x8F, "O OO O ", "VERIFY(16)",
-+ SCST_DATA_NONE, SCST_TRANSFER_LEN_TYPE_FIXED|
-+ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 10, get_trans_len_4},
-+ {0x90, "O OO O ", "PRE-FETCH(16)",
-+ SCST_DATA_NONE, SCST_WRITE_EXCL_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x91, "O OO O ", "SYNCHRONIZE CACHE(16)",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x91, " M ", "SPACE(16)",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x92, "O OO O ", "LOCK UNLOCK CACHE(16)",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0x92, " O ", "LOCATE(16)",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 0, get_trans_len_none},
-+ {0x93, "O O ", "WRITE SAME(16)",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
-+ 10, get_trans_len_4},
-+ {0x93, " M ", "ERASE(16)",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT|SCST_WRITE_MEDIUM,
-+ 0, get_trans_len_none},
-+ {0x9E, "O ", "SERVICE ACTION IN",
-+ SCST_DATA_READ, FLAG_NONE, 0, get_trans_len_serv_act_in},
-+
-+ /* 12-bytes length CDB */
-+ {0xA0, "VVVVVVVVVV M ", "REPORT LUNS",
-+ SCST_DATA_READ, SCST_SMALL_TIMEOUT|SCST_IMPLICIT_HQ|SCST_SKIP_UA|
-+ SCST_FULLY_LOCAL_CMD|SCST_LOCAL_CMD|
-+ SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
-+ 6, get_trans_len_4},
-+ {0xA1, " O ", "BLANK",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
-+ {0xA3, " O ", "SEND KEY",
-+ SCST_DATA_WRITE, FLAG_NONE, 8, get_trans_len_2},
-+ {0xA3, "OOOOO OOOO ", "REPORT DEVICE IDENTIDIER",
-+ SCST_DATA_READ, SCST_REG_RESERVE_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED|SCST_EXCL_ACCESS_ALLOWED,
-+ 6, get_trans_len_4},
-+ {0xA3, " M ", "MAINTENANCE(IN)",
-+ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_4},
-+ {0xA4, " O ", "REPORT KEY",
-+ SCST_DATA_READ, FLAG_NONE, 8, get_trans_len_2},
-+ {0xA4, " O ", "MAINTENANCE(OUT)",
-+ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_4},
-+ {0xA5, " M ", "MOVE MEDIUM",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
-+ {0xA5, " O ", "PLAY AUDIO(12)",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0xA6, " O O ", "EXCHANGE/LOAD/UNLOAD MEDIUM",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
-+ {0xA7, " O ", "SET READ AHEAD",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0xA8, " O ", "GET MESSAGE(12)",
-+ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_4},
-+ {0xA8, "O OO O ", "READ(12)",
-+ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED|
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED|
-+#endif
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 6, get_trans_len_4},
-+ {0xA9, " O ", "PLAY TRACK RELATIVE(12)",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0xAA, "O OO O ", "WRITE(12)",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ SCST_TEST_IO_IN_SIRQ_ALLOWED|
-+#endif
-+ SCST_WRITE_MEDIUM,
-+ 6, get_trans_len_4},
-+ {0xAA, " O ", "SEND MESSAGE(12)",
-+ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_4},
-+ {0xAC, " O ", "ERASE(12)",
-+ SCST_DATA_NONE, SCST_WRITE_MEDIUM, 0, get_trans_len_none},
-+ {0xAC, " M ", "GET PERFORMANCE",
-+ SCST_DATA_READ, SCST_UNKNOWN_LENGTH, 0, get_trans_len_none},
-+ {0xAD, " O ", "READ DVD STRUCTURE",
-+ SCST_DATA_READ, FLAG_NONE, 8, get_trans_len_2},
-+ {0xAE, "O OO O ", "WRITE AND VERIFY(12)",
-+ SCST_DATA_WRITE, SCST_TRANSFER_LEN_TYPE_FIXED|SCST_WRITE_MEDIUM,
-+ 6, get_trans_len_4},
-+ {0xAF, "O OO O ", "VERIFY(12)",
-+ SCST_DATA_NONE, SCST_TRANSFER_LEN_TYPE_FIXED|
-+ SCST_VERIFY_BYTCHK_MISMATCH_ALLOWED|
-+ SCST_WRITE_EXCL_ALLOWED,
-+ 6, get_trans_len_4},
-+#if 0 /* No need to support at all */
-+ {0xB0, " OO O ", "SEARCH DATA HIGH(12)",
-+ SCST_DATA_WRITE, FLAG_NONE, 9, get_trans_len_1},
-+ {0xB1, " OO O ", "SEARCH DATA EQUAL(12)",
-+ SCST_DATA_WRITE, FLAG_NONE, 9, get_trans_len_1},
-+ {0xB2, " OO O ", "SEARCH DATA LOW(12)",
-+ SCST_DATA_WRITE, FLAG_NONE, 9, get_trans_len_1},
-+#endif
-+ {0xB3, " OO O ", "SET LIMITS(12)",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0xB5, " O ", "REQUEST VOLUME ELEMENT ADDRESS",
-+ SCST_DATA_READ, FLAG_NONE, 9, get_trans_len_1},
-+ {0xB6, " O ", "SEND VOLUME TAG",
-+ SCST_DATA_WRITE, FLAG_NONE, 9, get_trans_len_1},
-+ {0xB6, " M ", "SET STREAMING",
-+ SCST_DATA_WRITE, FLAG_NONE, 9, get_trans_len_2},
-+ {0xB7, " O ", "READ DEFECT DATA(12)",
-+ SCST_DATA_READ, SCST_WRITE_EXCL_ALLOWED,
-+ 9, get_trans_len_1},
-+ {0xB8, " O ", "READ ELEMENT STATUS",
-+ SCST_DATA_READ, FLAG_NONE, 7, get_trans_len_3_read_elem_stat},
-+ {0xB9, " O ", "READ CD MSF",
-+ SCST_DATA_READ, SCST_UNKNOWN_LENGTH, 0, get_trans_len_none},
-+ {0xBA, " O ", "SCAN",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_len_none},
-+ {0xBA, " O ", "REDUNDANCY GROUP(IN)",
-+ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_4},
-+ {0xBB, " O ", "SET SPEED",
-+ SCST_DATA_NONE, FLAG_NONE, 0, get_trans_len_none},
-+ {0xBB, " O ", "REDUNDANCY GROUP(OUT)",
-+ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_4},
-+ {0xBC, " O ", "SPARE(IN)",
-+ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_4},
-+ {0xBD, " O ", "MECHANISM STATUS",
-+ SCST_DATA_READ, FLAG_NONE, 8, get_trans_len_2},
-+ {0xBD, " O ", "SPARE(OUT)",
-+ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_4},
-+ {0xBE, " O ", "READ CD",
-+ SCST_DATA_READ, SCST_TRANSFER_LEN_TYPE_FIXED, 6, get_trans_len_3},
-+ {0xBE, " O ", "VOLUME SET(IN)",
-+ SCST_DATA_READ, FLAG_NONE, 6, get_trans_len_4},
-+ {0xBF, " O ", "SEND DVD STRUCTUE",
-+ SCST_DATA_WRITE, FLAG_NONE, 8, get_trans_len_2},
-+ {0xBF, " O ", "VOLUME SET(OUT)",
-+ SCST_DATA_WRITE, FLAG_NONE, 6, get_trans_len_4},
-+ {0xE7, " V ", "INIT ELEMENT STATUS WRANGE",
-+ SCST_DATA_NONE, SCST_LONG_TIMEOUT, 0, get_trans_cdb_len_10}
-+};
-+
-+#define SCST_CDB_TBL_SIZE ((int)ARRAY_SIZE(scst_scsi_op_table))
-+
-+static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev);
-+static void scst_check_internal_sense(struct scst_device *dev, int result,
-+ uint8_t *sense, int sense_len);
-+static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
-+ int flags);
-+static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
-+ const uint8_t *sense, int sense_len, int flags);
-+static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
-+ const uint8_t *sense, int sense_len, int flags);
-+static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev);
-+static void scst_release_space(struct scst_cmd *cmd);
-+static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev);
-+static int scst_alloc_add_tgt_dev(struct scst_session *sess,
-+ struct scst_acg_dev *acg_dev, struct scst_tgt_dev **out_tgt_dev);
-+static void scst_tgt_retry_timer_fn(unsigned long arg);
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev);
-+static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev);
-+#else
-+static inline void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
-+static inline void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev) {}
-+#endif /* CONFIG_SCST_DEBUG_TM */
-+
-+/**
-+ * scst_alloc_sense() - allocate sense buffer for command
-+ *
-+ * Allocates, if necessary, sense buffer for command. Returns 0 on success
-+ * and error code otherwise. Parameter "atomic" should be non-0 if the
-+ * function called in atomic context.
-+ */
-+int scst_alloc_sense(struct scst_cmd *cmd, int atomic)
-+{
-+ int res = 0;
-+ gfp_t gfp_mask = atomic ? GFP_ATOMIC : (GFP_KERNEL|__GFP_NOFAIL);
-+
-+ TRACE_ENTRY();
-+
-+ if (cmd->sense != NULL)
-+ goto memzero;
-+
-+ cmd->sense = mempool_alloc(scst_sense_mempool, gfp_mask);
-+ if (cmd->sense == NULL) {
-+ PRINT_CRIT_ERROR("Sense memory allocation failed (op %x). "
-+ "The sense data will be lost!!", cmd->cdb[0]);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ cmd->sense_buflen = SCST_SENSE_BUFFERSIZE;
-+
-+memzero:
-+ cmd->sense_valid_len = 0;
-+ memset(cmd->sense, 0, cmd->sense_buflen);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(scst_alloc_sense);
-+
-+/**
-+ * scst_alloc_set_sense() - allocate and fill sense buffer for command
-+ *
-+ * Allocates, if necessary, sense buffer for command and copies in
-+ * it data from the supplied sense buffer. Returns 0 on success
-+ * and error code otherwise.
-+ */
-+int scst_alloc_set_sense(struct scst_cmd *cmd, int atomic,
-+ const uint8_t *sense, unsigned int len)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * We don't check here if the existing sense is valid or not, because
-+ * we suppose the caller did it based on cmd->status.
-+ */
-+
-+ res = scst_alloc_sense(cmd, atomic);
-+ if (res != 0) {
-+ PRINT_BUFFER("Lost sense", sense, len);
-+ goto out;
-+ }
-+
-+ cmd->sense_valid_len = len;
-+ if (cmd->sense_buflen < len) {
-+ PRINT_WARNING("Sense truncated (needed %d), shall you increase "
-+ "SCST_SENSE_BUFFERSIZE? Op: %x", len, cmd->cdb[0]);
-+ cmd->sense_valid_len = cmd->sense_buflen;
-+ }
-+
-+ memcpy(cmd->sense, sense, cmd->sense_valid_len);
-+ TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_valid_len);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(scst_alloc_set_sense);
-+
-+/**
-+ * scst_set_cmd_error_status() - set error SCSI status
-+ * @cmd: SCST command
-+ * @status: SCSI status to set
-+ *
-+ * Description:
-+ * Sets error SCSI status in the command and prepares it for returning it.
-+ * Returns 0 on success, error code otherwise.
-+ */
-+int scst_set_cmd_error_status(struct scst_cmd *cmd, int status)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (status == SAM_STAT_RESERVATION_CONFLICT) {
-+ TRACE(TRACE_SCSI|TRACE_MINOR, "Reservation conflict (dev %s, "
-+ "initiator %s, tgt_id %d)",
-+ cmd->dev ? cmd->dev->virt_name : NULL,
-+ cmd->sess->initiator_name, cmd->tgt->rel_tgt_id);
-+ }
-+
-+ if (cmd->status != 0) {
-+ TRACE_MGMT_DBG("cmd %p already has status %x set", cmd,
-+ cmd->status);
-+ res = -EEXIST;
-+ goto out;
-+ }
-+
-+ cmd->status = status;
-+ cmd->host_status = DID_OK;
-+
-+ cmd->dbl_ua_orig_resp_data_len = cmd->resp_data_len;
-+ cmd->dbl_ua_orig_data_direction = cmd->data_direction;
-+
-+ cmd->data_direction = SCST_DATA_NONE;
-+ cmd->resp_data_len = 0;
-+ cmd->resid_possible = 1;
-+ cmd->is_send_status = 1;
-+
-+ cmd->completed = 1;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(scst_set_cmd_error_status);
-+
-+static int scst_set_lun_not_supported_request_sense(struct scst_cmd *cmd,
-+ int key, int asc, int ascq)
-+{
-+ int res;
-+ int sense_len, len;
-+ struct scatterlist *sg;
-+
-+ TRACE_ENTRY();
-+
-+ if (cmd->status != 0) {
-+ TRACE_MGMT_DBG("cmd %p already has status %x set", cmd,
-+ cmd->status);
-+ res = -EEXIST;
-+ goto out;
-+ }
-+
-+ if ((cmd->sg != NULL) && SCST_SENSE_VALID(sg_virt(cmd->sg))) {
-+ TRACE_MGMT_DBG("cmd %p already has sense set", cmd);
-+ res = -EEXIST;
-+ goto out;
-+ }
-+
-+ if (cmd->sg == NULL) {
-+ /*
-+ * If target driver preparing data buffer using alloc_data_buf()
-+ * callback, it is responsible to copy the sense to its buffer
-+ * in xmit_response().
-+ */
-+ if (cmd->tgt_data_buf_alloced && (cmd->tgt_sg != NULL)) {
-+ cmd->sg = cmd->tgt_sg;
-+ cmd->sg_cnt = cmd->tgt_sg_cnt;
-+ TRACE_MEM("Tgt sg used for sense for cmd %p", cmd);
-+ goto go;
-+ }
-+
-+ if (cmd->bufflen == 0)
-+ cmd->bufflen = cmd->cdb[4];
-+
-+ cmd->sg = scst_alloc(cmd->bufflen, GFP_ATOMIC, &cmd->sg_cnt);
-+ if (cmd->sg == NULL) {
-+ PRINT_ERROR("Unable to alloc sg for REQUEST SENSE"
-+ "(sense %x/%x/%x)", key, asc, ascq);
-+ res = 1;
-+ goto out;
-+ }
-+
-+ TRACE_MEM("sg %p alloced for sense for cmd %p (cnt %d, "
-+ "len %d)", cmd->sg, cmd, cmd->sg_cnt, cmd->bufflen);
-+ }
-+
-+go:
-+ sg = cmd->sg;
-+ len = sg->length;
-+
-+ TRACE_MEM("sg %p (len %d) for sense for cmd %p", sg, len, cmd);
-+
-+ sense_len = scst_set_sense(sg_virt(sg), len, cmd->cdb[1] & 1,
-+ key, asc, ascq);
-+
-+ TRACE_BUFFER("Sense set", sg_virt(sg), sense_len);
-+
-+ cmd->data_direction = SCST_DATA_READ;
-+ scst_set_resp_data_len(cmd, sense_len);
-+
-+ res = 0;
-+ cmd->completed = 1;
-+ cmd->resid_possible = 1;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_set_lun_not_supported_inquiry(struct scst_cmd *cmd)
-+{
-+ int res;
-+ uint8_t *buf;
-+ struct scatterlist *sg;
-+ int len;
-+
-+ TRACE_ENTRY();
-+
-+ if (cmd->status != 0) {
-+ TRACE_MGMT_DBG("cmd %p already has status %x set", cmd,
-+ cmd->status);
-+ res = -EEXIST;
-+ goto out;
-+ }
-+
-+ if (cmd->sg == NULL) {
-+ /*
-+ * If target driver preparing data buffer using alloc_data_buf()
-+ * callback, it is responsible to copy the sense to its buffer
-+ * in xmit_response().
-+ */
-+ if (cmd->tgt_data_buf_alloced && (cmd->tgt_sg != NULL)) {
-+ cmd->sg = cmd->tgt_sg;
-+ cmd->sg_cnt = cmd->tgt_sg_cnt;
-+ TRACE_MEM("Tgt used for INQUIRY for not supported "
-+ "LUN for cmd %p", cmd);
-+ goto go;
-+ }
-+
-+ if (cmd->bufflen == 0)
-+ cmd->bufflen = min_t(int, 36, (cmd->cdb[3] << 8) | cmd->cdb[4]);
-+
-+ cmd->sg = scst_alloc(cmd->bufflen, GFP_ATOMIC, &cmd->sg_cnt);
-+ if (cmd->sg == NULL) {
-+ PRINT_ERROR("%s", "Unable to alloc sg for INQUIRY "
-+ "for not supported LUN");
-+ res = 1;
-+ goto out;
-+ }
-+
-+ TRACE_MEM("sg %p alloced for INQUIRY for not supported LUN for "
-+ "cmd %p (cnt %d, len %d)", cmd->sg, cmd, cmd->sg_cnt,
-+ cmd->bufflen);
-+ }
-+
-+go:
-+ sg = cmd->sg;
-+ len = sg->length;
-+
-+ TRACE_MEM("sg %p (len %d) for INQUIRY for cmd %p", sg, len, cmd);
-+
-+ buf = sg_virt(sg);
-+ len = min_t(int, 36, len);
-+
-+ memset(buf, 0, len);
-+ buf[0] = 0x7F; /* Peripheral qualifier 011b, Peripheral device type 1Fh */
-+ buf[4] = len - 4;
-+
-+ TRACE_BUFFER("INQUIRY for not supported LUN set", buf, len);
-+
-+ cmd->data_direction = SCST_DATA_READ;
-+ scst_set_resp_data_len(cmd, len);
-+
-+ res = 0;
-+ cmd->completed = 1;
-+ cmd->resid_possible = 1;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_set_cmd_error() - set error in the command and fill the sense buffer.
-+ *
-+ * Sets error in the command and fill the sense buffer. Returns 0 on success,
-+ * error code otherwise.
-+ */
-+int scst_set_cmd_error(struct scst_cmd *cmd, int key, int asc, int ascq)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * We need for LOGICAL UNIT NOT SUPPORTED special handling for
-+ * REQUEST SENSE and INQUIRY.
-+ */
-+ if ((key == ILLEGAL_REQUEST) && (asc == 0x25) && (ascq == 0)) {
-+ if (cmd->cdb[0] == REQUEST_SENSE)
-+ res = scst_set_lun_not_supported_request_sense(cmd,
-+ key, asc, ascq);
-+ else if (cmd->cdb[0] == INQUIRY)
-+ res = scst_set_lun_not_supported_inquiry(cmd);
-+ else
-+ goto do_sense;
-+
-+ if (res > 0)
-+ goto do_sense;
-+ else
-+ goto out;
-+ }
-+
-+do_sense:
-+ res = scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
-+ if (res != 0)
-+ goto out;
-+
-+ res = scst_alloc_sense(cmd, 1);
-+ if (res != 0) {
-+ PRINT_ERROR("Lost sense data (key %x, asc %x, ascq %x)",
-+ key, asc, ascq);
-+ goto out;
-+ }
-+
-+ cmd->sense_valid_len = scst_set_sense(cmd->sense, cmd->sense_buflen,
-+ scst_get_cmd_dev_d_sense(cmd), key, asc, ascq);
-+ TRACE_BUFFER("Sense set", cmd->sense, cmd->sense_valid_len);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(scst_set_cmd_error);
-+
-+/**
-+ * scst_set_sense() - set sense from KEY/ASC/ASCQ numbers
-+ *
-+ * Sets the corresponding fields in the sense buffer taking sense type
-+ * into account. Returns resulting sense length.
-+ */
-+int scst_set_sense(uint8_t *buffer, int len, bool d_sense,
-+ int key, int asc, int ascq)
-+{
-+ int res;
-+
-+ BUG_ON(len == 0);
-+
-+ memset(buffer, 0, len);
-+
-+ if (d_sense) {
-+ /* Descriptor format */
-+ if (len < 8) {
-+ PRINT_ERROR("Length %d of sense buffer too small to "
-+ "fit sense %x:%x:%x", len, key, asc, ascq);
-+ }
-+
-+ buffer[0] = 0x72; /* Response Code */
-+ if (len > 1)
-+ buffer[1] = key; /* Sense Key */
-+ if (len > 2)
-+ buffer[2] = asc; /* ASC */
-+ if (len > 3)
-+ buffer[3] = ascq; /* ASCQ */
-+ res = 8;
-+ } else {
-+ /* Fixed format */
-+ if (len < 18) {
-+ PRINT_ERROR("Length %d of sense buffer too small to "
-+ "fit sense %x:%x:%x", len, key, asc, ascq);
-+ }
-+
-+ buffer[0] = 0x70; /* Response Code */
-+ if (len > 2)
-+ buffer[2] = key; /* Sense Key */
-+ if (len > 7)
-+ buffer[7] = 0x0a; /* Additional Sense Length */
-+ if (len > 12)
-+ buffer[12] = asc; /* ASC */
-+ if (len > 13)
-+ buffer[13] = ascq; /* ASCQ */
-+ res = 18;
-+ }
-+
-+ TRACE_BUFFER("Sense set", buffer, res);
-+ return res;
-+}
-+EXPORT_SYMBOL(scst_set_sense);
-+
-+/**
-+ * scst_analyze_sense() - analyze sense
-+ *
-+ * Returns true if sense matches to (key, asc, ascq) and false otherwise.
-+ * Valid_mask is one or several SCST_SENSE_*_VALID constants setting valid
-+ * (key, asc, ascq) values.
-+ */
-+bool scst_analyze_sense(const uint8_t *sense, int len, unsigned int valid_mask,
-+ int key, int asc, int ascq)
-+{
-+ bool res = false;
-+
-+ /* Response Code */
-+ if ((sense[0] == 0x70) || (sense[0] == 0x71)) {
-+ /* Fixed format */
-+
-+ /* Sense Key */
-+ if (valid_mask & SCST_SENSE_KEY_VALID) {
-+ if (len < 3)
-+ goto out;
-+ if (sense[2] != key)
-+ goto out;
-+ }
-+
-+ /* ASC */
-+ if (valid_mask & SCST_SENSE_ASC_VALID) {
-+ if (len < 13)
-+ goto out;
-+ if (sense[12] != asc)
-+ goto out;
-+ }
-+
-+ /* ASCQ */
-+ if (valid_mask & SCST_SENSE_ASCQ_VALID) {
-+ if (len < 14)
-+ goto out;
-+ if (sense[13] != ascq)
-+ goto out;
-+ }
-+ } else if ((sense[0] == 0x72) || (sense[0] == 0x73)) {
-+ /* Descriptor format */
-+
-+ /* Sense Key */
-+ if (valid_mask & SCST_SENSE_KEY_VALID) {
-+ if (len < 2)
-+ goto out;
-+ if (sense[1] != key)
-+ goto out;
-+ }
-+
-+ /* ASC */
-+ if (valid_mask & SCST_SENSE_ASC_VALID) {
-+ if (len < 3)
-+ goto out;
-+ if (sense[2] != asc)
-+ goto out;
-+ }
-+
-+ /* ASCQ */
-+ if (valid_mask & SCST_SENSE_ASCQ_VALID) {
-+ if (len < 4)
-+ goto out;
-+ if (sense[3] != ascq)
-+ goto out;
-+ }
-+ } else
-+ goto out;
-+
-+ res = true;
-+
-+out:
-+ TRACE_EXIT_RES((int)res);
-+ return res;
-+}
-+EXPORT_SYMBOL(scst_analyze_sense);
-+
-+/**
-+ * scst_is_ua_sense() - determine if the sense is UA sense
-+ *
-+ * Returns true if the sense is valid and carrying a Unit
-+ * Attention or false otherwise.
-+ */
-+bool scst_is_ua_sense(const uint8_t *sense, int len)
-+{
-+ if (SCST_SENSE_VALID(sense))
-+ return scst_analyze_sense(sense, len,
-+ SCST_SENSE_KEY_VALID, UNIT_ATTENTION, 0, 0);
-+ else
-+ return false;
-+}
-+EXPORT_SYMBOL(scst_is_ua_sense);
-+
-+bool scst_is_ua_global(const uint8_t *sense, int len)
-+{
-+ bool res;
-+
-+ /* Changing it don't forget to change scst_requeue_ua() as well!! */
-+
-+ if (scst_analyze_sense(sense, len, SCST_SENSE_ALL_VALID,
-+ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed)))
-+ res = true;
-+ else
-+ res = false;
-+
-+ return res;
-+}
-+
-+/**
-+ * scst_check_convert_sense() - check sense type and convert it if needed
-+ *
-+ * Checks if sense in the sense buffer, if any, is in the correct format.
-+ * If not, converts it in the correct format.
-+ */
-+void scst_check_convert_sense(struct scst_cmd *cmd)
-+{
-+ bool d_sense;
-+
-+ TRACE_ENTRY();
-+
-+ if ((cmd->sense == NULL) || (cmd->status != SAM_STAT_CHECK_CONDITION))
-+ goto out;
-+
-+ d_sense = scst_get_cmd_dev_d_sense(cmd);
-+ if (d_sense && ((cmd->sense[0] == 0x70) || (cmd->sense[0] == 0x71))) {
-+ TRACE_MGMT_DBG("Converting fixed sense to descriptor (cmd %p)",
-+ cmd);
-+ if ((cmd->sense_valid_len < 18)) {
-+ PRINT_ERROR("Sense too small to convert (%d, "
-+ "type: fixed)", cmd->sense_buflen);
-+ goto out;
-+ }
-+ cmd->sense_valid_len = scst_set_sense(cmd->sense, cmd->sense_buflen,
-+ d_sense, cmd->sense[2], cmd->sense[12], cmd->sense[13]);
-+ } else if (!d_sense && ((cmd->sense[0] == 0x72) ||
-+ (cmd->sense[0] == 0x73))) {
-+ TRACE_MGMT_DBG("Converting descriptor sense to fixed (cmd %p)",
-+ cmd);
-+ if ((cmd->sense_buflen < 18) || (cmd->sense_valid_len < 8)) {
-+ PRINT_ERROR("Sense too small to convert (%d, "
-+ "type: descriptor, valid %d)",
-+ cmd->sense_buflen, cmd->sense_valid_len);
-+ goto out;
-+ }
-+ cmd->sense_valid_len = scst_set_sense(cmd->sense,
-+ cmd->sense_buflen, d_sense,
-+ cmd->sense[1], cmd->sense[2], cmd->sense[3]);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_check_convert_sense);
-+
-+static int scst_set_cmd_error_sense(struct scst_cmd *cmd, uint8_t *sense,
-+ unsigned int len)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_set_cmd_error_status(cmd, SAM_STAT_CHECK_CONDITION);
-+ if (res != 0)
-+ goto out;
-+
-+ res = scst_alloc_set_sense(cmd, 1, sense, len);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_set_busy() - set BUSY or TASK QUEUE FULL status
-+ *
-+ * Sets BUSY or TASK QUEUE FULL status depending on if this session has other
-+ * outstanding commands or not.
-+ */
-+void scst_set_busy(struct scst_cmd *cmd)
-+{
-+ int c = atomic_read(&cmd->sess->sess_cmd_count);
-+
-+ TRACE_ENTRY();
-+
-+ if ((c <= 1) || (cmd->sess->init_phase != SCST_SESS_IPH_READY)) {
-+ scst_set_cmd_error_status(cmd, SAM_STAT_BUSY);
-+ TRACE(TRACE_FLOW_CONTROL, "Sending BUSY status to initiator %s "
-+ "(cmds count %d, queue_type %x, sess->init_phase %d)",
-+ cmd->sess->initiator_name, c,
-+ cmd->queue_type, cmd->sess->init_phase);
-+ } else {
-+ scst_set_cmd_error_status(cmd, SAM_STAT_TASK_SET_FULL);
-+ TRACE(TRACE_FLOW_CONTROL, "Sending QUEUE_FULL status to "
-+ "initiator %s (cmds count %d, queue_type %x, "
-+ "sess->init_phase %d)", cmd->sess->initiator_name, c,
-+ cmd->queue_type, cmd->sess->init_phase);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_set_busy);
-+
-+/**
-+ * scst_set_initial_UA() - set initial Unit Attention
-+ *
-+ * Sets initial Unit Attention on all devices of the session,
-+ * replacing default scst_sense_reset_UA
-+ */
-+void scst_set_initial_UA(struct scst_session *sess, int key, int asc, int ascq)
-+{
-+ int i;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Setting for sess %p initial UA %x/%x/%x", sess, key,
-+ asc, ascq);
-+
-+ /* To protect sess_tgt_dev_list */
-+ mutex_lock(&scst_mutex);
-+
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
-+ if (!list_empty(&tgt_dev->UA_list)) {
-+ struct scst_tgt_dev_UA *ua;
-+
-+ ua = list_entry(tgt_dev->UA_list.next,
-+ typeof(*ua), UA_list_entry);
-+ if (scst_analyze_sense(ua->UA_sense_buffer,
-+ ua->UA_valid_sense_len,
-+ SCST_SENSE_ALL_VALID,
-+ SCST_LOAD_SENSE(scst_sense_reset_UA))) {
-+ ua->UA_valid_sense_len = scst_set_sense(
-+ ua->UA_sense_buffer,
-+ sizeof(ua->UA_sense_buffer),
-+ tgt_dev->dev->d_sense,
-+ key, asc, ascq);
-+ } else
-+ PRINT_ERROR("%s",
-+ "The first UA isn't RESET UA");
-+ } else
-+ PRINT_ERROR("%s", "There's no RESET UA to "
-+ "replace");
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+ }
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_set_initial_UA);
-+
-+struct scst_aen *scst_alloc_aen(struct scst_session *sess,
-+ uint64_t unpacked_lun)
-+{
-+ struct scst_aen *aen;
-+
-+ TRACE_ENTRY();
-+
-+ aen = mempool_alloc(scst_aen_mempool, GFP_KERNEL);
-+ if (aen == NULL) {
-+ PRINT_ERROR("AEN memory allocation failed. Corresponding "
-+ "event notification will not be performed (initiator "
-+ "%s)", sess->initiator_name);
-+ goto out;
-+ }
-+ memset(aen, 0, sizeof(*aen));
-+
-+ aen->sess = sess;
-+ scst_sess_get(sess);
-+
-+ aen->lun = scst_pack_lun(unpacked_lun, sess->acg->addr_method);
-+
-+out:
-+ TRACE_EXIT_HRES((unsigned long)aen);
-+ return aen;
-+}
-+
-+void scst_free_aen(struct scst_aen *aen)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_sess_put(aen->sess);
-+ mempool_free(aen, scst_aen_mempool);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called under scst_mutex */
-+void scst_gen_aen_or_ua(struct scst_tgt_dev *tgt_dev,
-+ int key, int asc, int ascq)
-+{
-+ struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
-+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
-+ int sl;
-+
-+ TRACE_ENTRY();
-+
-+ if ((tgt_dev->sess->init_phase != SCST_SESS_IPH_READY) ||
-+ (tgt_dev->sess->shut_phase != SCST_SESS_SPH_READY))
-+ goto out;
-+
-+ if (tgtt->report_aen != NULL) {
-+ struct scst_aen *aen;
-+ int rc;
-+
-+ aen = scst_alloc_aen(tgt_dev->sess, tgt_dev->lun);
-+ if (aen == NULL)
-+ goto queue_ua;
-+
-+ aen->event_fn = SCST_AEN_SCSI;
-+ aen->aen_sense_len = scst_set_sense(aen->aen_sense,
-+ sizeof(aen->aen_sense), tgt_dev->dev->d_sense,
-+ key, asc, ascq);
-+
-+ TRACE_DBG("Calling target's %s report_aen(%p)",
-+ tgtt->name, aen);
-+ rc = tgtt->report_aen(aen);
-+ TRACE_DBG("Target's %s report_aen(%p) returned %d",
-+ tgtt->name, aen, rc);
-+ if (rc == SCST_AEN_RES_SUCCESS)
-+ goto out;
-+
-+ scst_free_aen(aen);
-+ }
-+
-+queue_ua:
-+ TRACE_MGMT_DBG("AEN not supported, queueing plain UA (tgt_dev %p)",
-+ tgt_dev);
-+ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
-+ tgt_dev->dev->d_sense, key, asc, ascq);
-+ scst_check_set_UA(tgt_dev, sense_buffer, sl, 0);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_capacity_data_changed() - notify SCST about device capacity change
-+ *
-+ * Notifies SCST core that dev has changed its capacity. Called under no locks.
-+ */
-+void scst_capacity_data_changed(struct scst_device *dev)
-+{
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->type != TYPE_DISK) {
-+ TRACE_MGMT_DBG("Device type %d isn't for CAPACITY DATA "
-+ "CHANGED UA", dev->type);
-+ goto out;
-+ }
-+
-+ TRACE_MGMT_DBG("CAPACITY DATA CHANGED (dev %p)", dev);
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ scst_gen_aen_or_ua(tgt_dev,
-+ SCST_LOAD_SENSE(scst_sense_capacity_data_changed));
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_capacity_data_changed);
-+
-+static inline bool scst_is_report_luns_changed_type(int type)
-+{
-+ switch (type) {
-+ case TYPE_DISK:
-+ case TYPE_TAPE:
-+ case TYPE_PRINTER:
-+ case TYPE_PROCESSOR:
-+ case TYPE_WORM:
-+ case TYPE_ROM:
-+ case TYPE_SCANNER:
-+ case TYPE_MOD:
-+ case TYPE_MEDIUM_CHANGER:
-+ case TYPE_RAID:
-+ case TYPE_ENCLOSURE:
-+ return true;
-+ default:
-+ return false;
-+ }
-+}
-+
-+/* scst_mutex supposed to be held */
-+static void scst_queue_report_luns_changed_UA(struct scst_session *sess,
-+ int flags)
-+{
-+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
-+ struct list_head *head;
-+ struct scst_tgt_dev *tgt_dev;
-+ int i;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Queueing REPORTED LUNS DATA CHANGED UA "
-+ "(sess %p)", sess);
-+
-+ local_bh_disable();
-+
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ head = &sess->sess_tgt_dev_list[i];
-+
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ /* Lockdep triggers here a false positive.. */
-+ spin_lock(&tgt_dev->tgt_dev_lock);
-+ }
-+ }
-+
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ head = &sess->sess_tgt_dev_list[i];
-+
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ int sl;
-+
-+ if (!scst_is_report_luns_changed_type(
-+ tgt_dev->dev->type))
-+ continue;
-+
-+ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
-+ tgt_dev->dev->d_sense,
-+ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
-+
-+ __scst_check_set_UA(tgt_dev, sense_buffer,
-+ sl, flags | SCST_SET_UA_FLAG_GLOBAL);
-+ }
-+ }
-+
-+ for (i = SESS_TGT_DEV_LIST_HASH_SIZE-1; i >= 0; i--) {
-+ head = &sess->sess_tgt_dev_list[i];
-+
-+ list_for_each_entry_reverse(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ spin_unlock(&tgt_dev->tgt_dev_lock);
-+ }
-+ }
-+
-+ local_bh_enable();
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static void scst_report_luns_changed_sess(struct scst_session *sess)
-+{
-+ int i;
-+ struct scst_tgt_template *tgtt = sess->tgt->tgtt;
-+ int d_sense = 0;
-+ uint64_t lun = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if ((sess->init_phase != SCST_SESS_IPH_READY) ||
-+ (sess->shut_phase != SCST_SESS_SPH_READY))
-+ goto out;
-+
-+ TRACE_DBG("REPORTED LUNS DATA CHANGED (sess %p)", sess);
-+
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head;
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ head = &sess->sess_tgt_dev_list[i];
-+
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ if (scst_is_report_luns_changed_type(
-+ tgt_dev->dev->type)) {
-+ lun = tgt_dev->lun;
-+ d_sense = tgt_dev->dev->d_sense;
-+ goto found;
-+ }
-+ }
-+ }
-+
-+found:
-+ if (tgtt->report_aen != NULL) {
-+ struct scst_aen *aen;
-+ int rc;
-+
-+ aen = scst_alloc_aen(sess, lun);
-+ if (aen == NULL)
-+ goto queue_ua;
-+
-+ aen->event_fn = SCST_AEN_SCSI;
-+ aen->aen_sense_len = scst_set_sense(aen->aen_sense,
-+ sizeof(aen->aen_sense), d_sense,
-+ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed));
-+
-+ TRACE_DBG("Calling target's %s report_aen(%p)",
-+ tgtt->name, aen);
-+ rc = tgtt->report_aen(aen);
-+ TRACE_DBG("Target's %s report_aen(%p) returned %d",
-+ tgtt->name, aen, rc);
-+ if (rc == SCST_AEN_RES_SUCCESS)
-+ goto out;
-+
-+ scst_free_aen(aen);
-+ }
-+
-+queue_ua:
-+ scst_queue_report_luns_changed_UA(sess, 0);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+void scst_report_luns_changed(struct scst_acg *acg)
-+{
-+ struct scst_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("REPORTED LUNS DATA CHANGED (acg %s)", acg->acg_name);
-+
-+ list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
-+ scst_report_luns_changed_sess(sess);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_aen_done() - AEN processing done
-+ *
-+ * Notifies SCST that the driver has sent the AEN and it
-+ * can be freed now. Don't forget to set the delivery status, if it
-+ * isn't success, using scst_set_aen_delivery_status() before calling
-+ * this function.
-+ */
-+void scst_aen_done(struct scst_aen *aen)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("AEN %p (fn %d) done (initiator %s)", aen,
-+ aen->event_fn, aen->sess->initiator_name);
-+
-+ if (aen->delivery_status == SCST_AEN_RES_SUCCESS)
-+ goto out_free;
-+
-+ if (aen->event_fn != SCST_AEN_SCSI)
-+ goto out_free;
-+
-+ TRACE_MGMT_DBG("Delivery of SCSI AEN failed (initiator %s)",
-+ aen->sess->initiator_name);
-+
-+ if (scst_analyze_sense(aen->aen_sense, aen->aen_sense_len,
-+ SCST_SENSE_ALL_VALID, SCST_LOAD_SENSE(
-+ scst_sense_reported_luns_data_changed))) {
-+ mutex_lock(&scst_mutex);
-+ scst_queue_report_luns_changed_UA(aen->sess,
-+ SCST_SET_UA_FLAG_AT_HEAD);
-+ mutex_unlock(&scst_mutex);
-+ } else {
-+ struct list_head *head;
-+ struct scst_tgt_dev *tgt_dev;
-+ uint64_t lun;
-+
-+ lun = scst_unpack_lun((uint8_t *)&aen->lun, sizeof(aen->lun));
-+
-+ mutex_lock(&scst_mutex);
-+
-+ /* tgt_dev might get dead, so we need to reseek it */
-+ head = &aen->sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(lun)];
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ if (tgt_dev->lun == lun) {
-+ TRACE_MGMT_DBG("Requeuing failed AEN UA for "
-+ "tgt_dev %p", tgt_dev);
-+ scst_check_set_UA(tgt_dev, aen->aen_sense,
-+ aen->aen_sense_len,
-+ SCST_SET_UA_FLAG_AT_HEAD);
-+ break;
-+ }
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+ }
-+
-+out_free:
-+ scst_free_aen(aen);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_aen_done);
-+
-+void scst_requeue_ua(struct scst_cmd *cmd)
-+{
-+ TRACE_ENTRY();
-+
-+ if (scst_analyze_sense(cmd->sense, cmd->sense_valid_len,
-+ SCST_SENSE_ALL_VALID,
-+ SCST_LOAD_SENSE(scst_sense_reported_luns_data_changed))) {
-+ TRACE_MGMT_DBG("Requeuing REPORTED LUNS DATA CHANGED UA "
-+ "for delivery failed cmd %p", cmd);
-+ mutex_lock(&scst_mutex);
-+ scst_queue_report_luns_changed_UA(cmd->sess,
-+ SCST_SET_UA_FLAG_AT_HEAD);
-+ mutex_unlock(&scst_mutex);
-+ } else {
-+ TRACE_MGMT_DBG("Requeuing UA for delivery failed cmd %p", cmd);
-+ scst_check_set_UA(cmd->tgt_dev, cmd->sense,
-+ cmd->sense_valid_len, SCST_SET_UA_FLAG_AT_HEAD);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static void scst_check_reassign_sess(struct scst_session *sess)
-+{
-+ struct scst_acg *acg, *old_acg;
-+ struct scst_acg_dev *acg_dev;
-+ int i, rc;
-+ struct list_head *head;
-+ struct scst_tgt_dev *tgt_dev;
-+ bool luns_changed = false;
-+ bool add_failed, something_freed, not_needed_freed = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (sess->shut_phase != SCST_SESS_SPH_READY)
-+ goto out;
-+
-+ TRACE_MGMT_DBG("Checking reassignment for sess %p (initiator %s)",
-+ sess, sess->initiator_name);
-+
-+ acg = scst_find_acg(sess);
-+ if (acg == sess->acg) {
-+ TRACE_MGMT_DBG("No reassignment for sess %p", sess);
-+ goto out;
-+ }
-+
-+ TRACE_MGMT_DBG("sess %p will be reassigned from acg %s to acg %s",
-+ sess, sess->acg->acg_name, acg->acg_name);
-+
-+ old_acg = sess->acg;
-+ sess->acg = NULL; /* to catch implicit dependencies earlier */
-+
-+retry_add:
-+ add_failed = false;
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
-+ unsigned int inq_changed_ua_needed = 0;
-+
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ head = &sess->sess_tgt_dev_list[i];
-+
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ if ((tgt_dev->dev == acg_dev->dev) &&
-+ (tgt_dev->lun == acg_dev->lun) &&
-+ (tgt_dev->acg_dev->rd_only == acg_dev->rd_only)) {
-+ TRACE_MGMT_DBG("sess %p: tgt_dev %p for "
-+ "LUN %lld stays the same",
-+ sess, tgt_dev,
-+ (unsigned long long)tgt_dev->lun);
-+ tgt_dev->acg_dev = acg_dev;
-+ goto next;
-+ } else if (tgt_dev->lun == acg_dev->lun)
-+ inq_changed_ua_needed = 1;
-+ }
-+ }
-+
-+ luns_changed = true;
-+
-+ TRACE_MGMT_DBG("sess %p: Allocing new tgt_dev for LUN %lld",
-+ sess, (unsigned long long)acg_dev->lun);
-+
-+ rc = scst_alloc_add_tgt_dev(sess, acg_dev, &tgt_dev);
-+ if (rc == -EPERM)
-+ continue;
-+ else if (rc != 0) {
-+ add_failed = true;
-+ break;
-+ }
-+
-+ tgt_dev->inq_changed_ua_needed = inq_changed_ua_needed ||
-+ not_needed_freed;
-+next:
-+ continue;
-+ }
-+
-+ something_freed = false;
-+ not_needed_freed = true;
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct scst_tgt_dev *t;
-+ head = &sess->sess_tgt_dev_list[i];
-+
-+ list_for_each_entry_safe(tgt_dev, t, head,
-+ sess_tgt_dev_list_entry) {
-+ if (tgt_dev->acg_dev->acg != acg) {
-+ TRACE_MGMT_DBG("sess %p: Deleting not used "
-+ "tgt_dev %p for LUN %lld",
-+ sess, tgt_dev,
-+ (unsigned long long)tgt_dev->lun);
-+ luns_changed = true;
-+ something_freed = true;
-+ scst_free_tgt_dev(tgt_dev);
-+ }
-+ }
-+ }
-+
-+ if (add_failed && something_freed) {
-+ TRACE_MGMT_DBG("sess %p: Retrying adding new tgt_devs", sess);
-+ goto retry_add;
-+ }
-+
-+ sess->acg = acg;
-+
-+ TRACE_DBG("Moving sess %p from acg %s to acg %s", sess,
-+ old_acg->acg_name, acg->acg_name);
-+ list_move_tail(&sess->acg_sess_list_entry, &acg->acg_sess_list);
-+
-+ scst_recreate_sess_luns_link(sess);
-+ /* Ignore possible error, since we can't do anything on it */
-+
-+ if (luns_changed) {
-+ scst_report_luns_changed_sess(sess);
-+
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ head = &sess->sess_tgt_dev_list[i];
-+
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ if (tgt_dev->inq_changed_ua_needed) {
-+ TRACE_MGMT_DBG("sess %p: Setting "
-+ "INQUIRY DATA HAS CHANGED UA "
-+ "(tgt_dev %p)", sess, tgt_dev);
-+
-+ tgt_dev->inq_changed_ua_needed = 0;
-+
-+ scst_gen_aen_or_ua(tgt_dev,
-+ SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
-+ }
-+ }
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+void scst_check_reassign_sessions(void)
-+{
-+ struct scst_tgt_template *tgtt;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
-+ struct scst_tgt *tgt;
-+ list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
-+ struct scst_session *sess;
-+ list_for_each_entry(sess, &tgt->sess_list,
-+ sess_list_entry) {
-+ scst_check_reassign_sess(sess);
-+ }
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_get_cmd_abnormal_done_state(const struct scst_cmd *cmd)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ switch (cmd->state) {
-+ case SCST_CMD_STATE_INIT_WAIT:
-+ case SCST_CMD_STATE_INIT:
-+ case SCST_CMD_STATE_PARSE:
-+ if (cmd->preprocessing_only) {
-+ res = SCST_CMD_STATE_PREPROCESSING_DONE;
-+ break;
-+ } /* else go through */
-+ case SCST_CMD_STATE_DEV_DONE:
-+ if (cmd->internal)
-+ res = SCST_CMD_STATE_FINISHED_INTERNAL;
-+ else
-+ res = SCST_CMD_STATE_PRE_XMIT_RESP;
-+ break;
-+
-+ case SCST_CMD_STATE_PRE_DEV_DONE:
-+ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
-+ res = SCST_CMD_STATE_DEV_DONE;
-+ break;
-+
-+ case SCST_CMD_STATE_PRE_XMIT_RESP:
-+ res = SCST_CMD_STATE_XMIT_RESP;
-+ break;
-+
-+ case SCST_CMD_STATE_PREPROCESSING_DONE:
-+ case SCST_CMD_STATE_PREPROCESSING_DONE_CALLED:
-+ if (cmd->tgt_dev == NULL)
-+ res = SCST_CMD_STATE_PRE_XMIT_RESP;
-+ else
-+ res = SCST_CMD_STATE_PRE_DEV_DONE;
-+ break;
-+
-+ case SCST_CMD_STATE_PREPARE_SPACE:
-+ if (cmd->preprocessing_only) {
-+ res = SCST_CMD_STATE_PREPROCESSING_DONE;
-+ break;
-+ } /* else go through */
-+ case SCST_CMD_STATE_RDY_TO_XFER:
-+ case SCST_CMD_STATE_DATA_WAIT:
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ case SCST_CMD_STATE_START_EXEC:
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXECUTING:
-+ res = SCST_CMD_STATE_PRE_DEV_DONE;
-+ break;
-+
-+ default:
-+ PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
-+ cmd->state, cmd, cmd->cdb[0]);
-+ BUG();
-+ /* Invalid state to suppress a compiler warning */
-+ res = SCST_CMD_STATE_LAST_ACTIVE;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_set_cmd_abnormal_done_state() - set command's next abnormal done state
-+ *
-+ * Sets state of the SCSI target state machine to abnormally complete command
-+ * ASAP.
-+ *
-+ * Returns the new state.
-+ */
-+int scst_set_cmd_abnormal_done_state(struct scst_cmd *cmd)
-+{
-+ TRACE_ENTRY();
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ switch (cmd->state) {
-+ case SCST_CMD_STATE_XMIT_RESP:
-+ case SCST_CMD_STATE_FINISHED:
-+ case SCST_CMD_STATE_FINISHED_INTERNAL:
-+ case SCST_CMD_STATE_XMIT_WAIT:
-+ PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
-+ cmd->state, cmd, cmd->cdb[0]);
-+ BUG();
-+ }
-+#endif
-+
-+ cmd->state = scst_get_cmd_abnormal_done_state(cmd);
-+
-+ switch (cmd->state) {
-+ case SCST_CMD_STATE_INIT_WAIT:
-+ case SCST_CMD_STATE_INIT:
-+ case SCST_CMD_STATE_PARSE:
-+ case SCST_CMD_STATE_PREPROCESSING_DONE:
-+ case SCST_CMD_STATE_PREPROCESSING_DONE_CALLED:
-+ case SCST_CMD_STATE_PREPARE_SPACE:
-+ case SCST_CMD_STATE_RDY_TO_XFER:
-+ case SCST_CMD_STATE_DATA_WAIT:
-+ cmd->write_len = 0;
-+ cmd->resid_possible = 1;
-+ break;
-+ case SCST_CMD_STATE_TGT_PRE_EXEC:
-+ case SCST_CMD_STATE_SEND_FOR_EXEC:
-+ case SCST_CMD_STATE_START_EXEC:
-+ case SCST_CMD_STATE_LOCAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXEC:
-+ case SCST_CMD_STATE_REAL_EXECUTING:
-+ case SCST_CMD_STATE_DEV_DONE:
-+ case SCST_CMD_STATE_PRE_DEV_DONE:
-+ case SCST_CMD_STATE_MODE_SELECT_CHECKS:
-+ case SCST_CMD_STATE_PRE_XMIT_RESP:
-+ case SCST_CMD_STATE_FINISHED_INTERNAL:
-+ break;
-+ default:
-+ PRINT_CRIT_ERROR("Wrong cmd state %d (cmd %p, op %x)",
-+ cmd->state, cmd, cmd->cdb[0]);
-+ BUG();
-+ break;
-+ }
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (((cmd->state != SCST_CMD_STATE_PRE_XMIT_RESP) &&
-+ (cmd->state != SCST_CMD_STATE_PREPROCESSING_DONE)) &&
-+ (cmd->tgt_dev == NULL) && !cmd->internal) {
-+ PRINT_CRIT_ERROR("Wrong not inited cmd state %d (cmd %p, "
-+ "op %x)", cmd->state, cmd, cmd->cdb[0]);
-+ BUG();
-+ }
-+#endif
-+
-+ TRACE_EXIT_RES(cmd->state);
-+ return cmd->state;
-+}
-+EXPORT_SYMBOL_GPL(scst_set_cmd_abnormal_done_state);
-+
-+void scst_zero_write_rest(struct scst_cmd *cmd)
-+{
-+ int len, offs = 0;
-+ uint8_t *buf;
-+
-+ TRACE_ENTRY();
-+
-+ len = scst_get_sg_buf_first(cmd, &buf, *cmd->write_sg,
-+ *cmd->write_sg_cnt);
-+ while (len > 0) {
-+ int cur_offs;
-+
-+ if (offs + len <= cmd->write_len)
-+ goto next;
-+ else if (offs >= cmd->write_len)
-+ cur_offs = 0;
-+ else
-+ cur_offs = cmd->write_len - offs;
-+
-+ memset(&buf[cur_offs], 0, len - cur_offs);
-+
-+next:
-+ offs += len;
-+ scst_put_sg_buf(cmd, buf, *cmd->write_sg, *cmd->write_sg_cnt);
-+ len = scst_get_sg_buf_next(cmd, &buf, *cmd->write_sg,
-+ *cmd->write_sg_cnt);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void scst_adjust_sg(struct scst_cmd *cmd, struct scatterlist *sg,
-+ int *sg_cnt, int adjust_len)
-+{
-+ int i, j, l;
-+
-+ TRACE_ENTRY();
-+
-+ l = 0;
-+ for (i = 0, j = 0; i < *sg_cnt; i++, j++) {
-+ TRACE_DBG("i %d, j %d, sg_cnt %d, sg %p, page_link %lx", i, j,
-+ *sg_cnt, sg, sg[j].page_link);
-+ if (unlikely(sg_is_chain(&sg[j]))) {
-+ sg = sg_chain_ptr(&sg[j]);
-+ j = 0;
-+ }
-+ l += sg[j].length;
-+ if (l >= adjust_len) {
-+ int left = adjust_len - (l - sg[j].length);
-+#ifdef CONFIG_SCST_DEBUG
-+ TRACE(TRACE_SG_OP|TRACE_MEMORY, "cmd %p (tag %llu), "
-+ "sg %p, sg_cnt %d, adjust_len %d, i %d, j %d, "
-+ "sg[j].length %d, left %d",
-+ cmd, (long long unsigned int)cmd->tag,
-+ sg, *sg_cnt, adjust_len, i, j,
-+ sg[j].length, left);
-+#endif
-+ cmd->orig_sg = sg;
-+ cmd->p_orig_sg_cnt = sg_cnt;
-+ cmd->orig_sg_cnt = *sg_cnt;
-+ cmd->orig_sg_entry = j;
-+ cmd->orig_entry_len = sg[j].length;
-+ *sg_cnt = (left > 0) ? j+1 : j;
-+ sg[j].length = left;
-+ cmd->sg_buff_modified = 1;
-+ break;
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_restore_sg_buff() - restores modified sg buffer
-+ *
-+ * Restores modified sg buffer in the original state.
-+ */
-+void scst_restore_sg_buff(struct scst_cmd *cmd)
-+{
-+ TRACE_MEM("cmd %p, sg %p, orig_sg_entry %d, "
-+ "orig_entry_len %d, orig_sg_cnt %d", cmd, cmd->orig_sg,
-+ cmd->orig_sg_entry, cmd->orig_entry_len,
-+ cmd->orig_sg_cnt);
-+ cmd->orig_sg[cmd->orig_sg_entry].length = cmd->orig_entry_len;
-+ *cmd->p_orig_sg_cnt = cmd->orig_sg_cnt;
-+ cmd->sg_buff_modified = 0;
-+}
-+EXPORT_SYMBOL(scst_restore_sg_buff);
-+
-+/**
-+ * scst_set_resp_data_len() - set response data length
-+ *
-+ * Sets response data length for cmd and truncates its SG vector accordingly.
-+ *
-+ * The cmd->resp_data_len must not be set directly, it must be set only
-+ * using this function. Value of resp_data_len must be <= cmd->bufflen.
-+ */
-+void scst_set_resp_data_len(struct scst_cmd *cmd, int resp_data_len)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_check_restore_sg_buff(cmd);
-+ cmd->resp_data_len = resp_data_len;
-+
-+ if (resp_data_len == cmd->bufflen)
-+ goto out;
-+
-+ TRACE_DBG("cmd %p, resp_data_len %d", cmd, resp_data_len);
-+
-+ scst_adjust_sg(cmd, cmd->sg, &cmd->sg_cnt, resp_data_len);
-+
-+ cmd->resid_possible = 1;
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_set_resp_data_len);
-+
-+void scst_limit_sg_write_len(struct scst_cmd *cmd)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_MEM("Limiting sg write len to %d (cmd %p, sg %p, sg_cnt %d)",
-+ cmd->write_len, cmd, *cmd->write_sg, *cmd->write_sg_cnt);
-+
-+ scst_check_restore_sg_buff(cmd);
-+ scst_adjust_sg(cmd, *cmd->write_sg, cmd->write_sg_cnt, cmd->write_len);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void scst_adjust_resp_data_len(struct scst_cmd *cmd)
-+{
-+ TRACE_ENTRY();
-+
-+ if (!cmd->expected_values_set) {
-+ cmd->adjusted_resp_data_len = cmd->resp_data_len;
-+ goto out;
-+ }
-+
-+ cmd->adjusted_resp_data_len = min(cmd->resp_data_len,
-+ cmd->expected_transfer_len);
-+
-+ if (cmd->adjusted_resp_data_len != cmd->resp_data_len) {
-+ TRACE_MEM("Adjusting resp_data_len to %d (cmd %p, sg %p, "
-+ "sg_cnt %d)", cmd->adjusted_resp_data_len, cmd, cmd->sg,
-+ cmd->sg_cnt);
-+ scst_check_restore_sg_buff(cmd);
-+ scst_adjust_sg(cmd, cmd->sg, &cmd->sg_cnt,
-+ cmd->adjusted_resp_data_len);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_cmd_set_write_not_received_data_len() - sets cmd's not received len
-+ *
-+ * Sets cmd's not received data length. Also automatically sets resid_possible.
-+ */
-+void scst_cmd_set_write_not_received_data_len(struct scst_cmd *cmd,
-+ int not_received)
-+{
-+ TRACE_ENTRY();
-+
-+ if (!cmd->expected_values_set) {
-+ /*
-+ * No expected values set, so no residuals processing.
-+ * It can happen if a command preliminary completed before
-+ * target driver had a chance to set expected values.
-+ */
-+ TRACE_MGMT_DBG("No expected values set, ignoring (cmd %p)", cmd);
-+ goto out;
-+ }
-+
-+ cmd->resid_possible = 1;
-+
-+ if ((cmd->expected_data_direction & SCST_DATA_READ) &&
-+ (cmd->expected_data_direction & SCST_DATA_WRITE)) {
-+ cmd->write_len = cmd->expected_out_transfer_len - not_received;
-+ if (cmd->write_len == cmd->out_bufflen)
-+ goto out;
-+ } else if (cmd->expected_data_direction & SCST_DATA_WRITE) {
-+ cmd->write_len = cmd->expected_transfer_len - not_received;
-+ if (cmd->write_len == cmd->bufflen)
-+ goto out;
-+ }
-+
-+ /*
-+ * Write len now can be bigger cmd->(out_)bufflen, but that's OK,
-+ * because it will be used to only calculate write residuals.
-+ */
-+
-+ TRACE_DBG("cmd %p, not_received %d, write_len %d", cmd, not_received,
-+ cmd->write_len);
-+
-+ if (cmd->data_direction & SCST_DATA_WRITE)
-+ scst_limit_sg_write_len(cmd);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_cmd_set_write_not_received_data_len);
-+
-+/**
-+ * __scst_get_resid() - returns residuals for cmd
-+ *
-+ * Returns residuals for command. Must not be called directly, use
-+ * scst_get_resid() instead.
-+ */
-+bool __scst_get_resid(struct scst_cmd *cmd, int *resid, int *bidi_out_resid)
-+{
-+ bool res;
-+
-+ TRACE_ENTRY();
-+
-+ *resid = 0;
-+ if (bidi_out_resid != NULL)
-+ *bidi_out_resid = 0;
-+
-+ if (!cmd->expected_values_set) {
-+ /*
-+ * No expected values set, so no residuals processing.
-+ * It can happen if a command preliminary completed before
-+ * target driver had a chance to set expected values.
-+ */
-+ TRACE_MGMT_DBG("No expected values set, returning no residual "
-+ "(cmd %p)", cmd);
-+ res = false;
-+ goto out;
-+ }
-+
-+ if (cmd->expected_data_direction & SCST_DATA_READ) {
-+ *resid = cmd->expected_transfer_len - cmd->resp_data_len;
-+ if ((cmd->expected_data_direction & SCST_DATA_WRITE) && bidi_out_resid) {
-+ if (cmd->write_len < cmd->expected_out_transfer_len)
-+ *bidi_out_resid = cmd->expected_out_transfer_len -
-+ cmd->write_len;
-+ else
-+ *bidi_out_resid = cmd->write_len - cmd->out_bufflen;
-+ }
-+ } else if (cmd->expected_data_direction & SCST_DATA_WRITE) {
-+ if (cmd->write_len < cmd->expected_transfer_len)
-+ *resid = cmd->expected_transfer_len - cmd->write_len;
-+ else
-+ *resid = cmd->write_len - cmd->bufflen;
-+ }
-+
-+ res = true;
-+
-+ TRACE_DBG("cmd %p, resid %d, bidi_out_resid %d (resp_data_len %d, "
-+ "expected_data_direction %d, write_len %d, bufflen %d)", cmd,
-+ *resid, bidi_out_resid ? *bidi_out_resid : 0, cmd->resp_data_len,
-+ cmd->expected_data_direction, cmd->write_len, cmd->bufflen);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(__scst_get_resid);
-+
-+/* No locks */
-+int scst_queue_retry_cmd(struct scst_cmd *cmd, int finished_cmds)
-+{
-+ struct scst_tgt *tgt = cmd->tgt;
-+ int res = 0;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_irqsave(&tgt->tgt_lock, flags);
-+ tgt->retry_cmds++;
-+ /*
-+ * Memory barrier is needed here, because we need the exact order
-+ * between the read and write between retry_cmds and finished_cmds to
-+ * not miss the case when a command finished while we queueing it for
-+ * retry after the finished_cmds check.
-+ */
-+ smp_mb();
-+ TRACE_RETRY("TGT QUEUE FULL: incrementing retry_cmds %d",
-+ tgt->retry_cmds);
-+ if (finished_cmds != atomic_read(&tgt->finished_cmds)) {
-+ /* At least one cmd finished, so try again */
-+ tgt->retry_cmds--;
-+ TRACE_RETRY("Some command(s) finished, direct retry "
-+ "(finished_cmds=%d, tgt->finished_cmds=%d, "
-+ "retry_cmds=%d)", finished_cmds,
-+ atomic_read(&tgt->finished_cmds), tgt->retry_cmds);
-+ res = -1;
-+ goto out_unlock_tgt;
-+ }
-+
-+ TRACE_RETRY("Adding cmd %p to retry cmd list", cmd);
-+ list_add_tail(&cmd->cmd_list_entry, &tgt->retry_cmd_list);
-+
-+ if (!tgt->retry_timer_active) {
-+ tgt->retry_timer.expires = jiffies + SCST_TGT_RETRY_TIMEOUT;
-+ add_timer(&tgt->retry_timer);
-+ tgt->retry_timer_active = 1;
-+ }
-+
-+out_unlock_tgt:
-+ spin_unlock_irqrestore(&tgt->tgt_lock, flags);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_update_hw_pending_start() - update commands pending start
-+ *
-+ * Updates the command's hw_pending_start as if it's just started hw pending.
-+ * Target drivers should call it if they received reply from this pending
-+ * command, but SCST core won't see it.
-+ */
-+void scst_update_hw_pending_start(struct scst_cmd *cmd)
-+{
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ /* To sync with scst_check_hw_pending_cmd() */
-+ spin_lock_irqsave(&cmd->sess->sess_list_lock, flags);
-+ cmd->hw_pending_start = jiffies;
-+ TRACE_MGMT_DBG("Updated hw_pending_start to %ld (cmd %p)",
-+ cmd->hw_pending_start, cmd);
-+ spin_unlock_irqrestore(&cmd->sess->sess_list_lock, flags);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_update_hw_pending_start);
-+
-+/*
-+ * Supposed to be called under sess_list_lock, but can release/reacquire it.
-+ * Returns 0 to continue, >0 to restart, <0 to break.
-+ */
-+static int scst_check_hw_pending_cmd(struct scst_cmd *cmd,
-+ unsigned long cur_time, unsigned long max_time,
-+ struct scst_session *sess, unsigned long *flags,
-+ struct scst_tgt_template *tgtt)
-+{
-+ int res = -1; /* break */
-+
-+ TRACE_DBG("cmd %p, hw_pending %d, proc time %ld, "
-+ "pending time %ld", cmd, cmd->cmd_hw_pending,
-+ (long)(cur_time - cmd->start_time) / HZ,
-+ (long)(cur_time - cmd->hw_pending_start) / HZ);
-+
-+ if (time_before(cur_time, cmd->start_time + max_time)) {
-+ /* Cmds are ordered, so no need to check more */
-+ goto out;
-+ }
-+
-+ if (!cmd->cmd_hw_pending) {
-+ res = 0; /* continue */
-+ goto out;
-+ }
-+
-+ if (time_before(cur_time, cmd->hw_pending_start + max_time)) {
-+ res = 0; /* continue */
-+ goto out;
-+ }
-+
-+ TRACE_MGMT_DBG("Cmd %p HW pending for too long %ld (state %x)",
-+ cmd, (cur_time - cmd->hw_pending_start) / HZ,
-+ cmd->state);
-+
-+ cmd->cmd_hw_pending = 0;
-+
-+ spin_unlock_irqrestore(&sess->sess_list_lock, *flags);
-+ tgtt->on_hw_pending_cmd_timeout(cmd);
-+ spin_lock_irqsave(&sess->sess_list_lock, *flags);
-+
-+ res = 1; /* restart */
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void scst_hw_pending_work_fn(struct delayed_work *work)
-+{
-+ struct scst_session *sess = container_of(work, struct scst_session,
-+ hw_pending_work);
-+ struct scst_tgt_template *tgtt = sess->tgt->tgtt;
-+ struct scst_cmd *cmd;
-+ unsigned long cur_time = jiffies;
-+ unsigned long flags;
-+ unsigned long max_time = tgtt->max_hw_pending_time * HZ;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("HW pending work (sess %p, max time %ld)", sess, max_time/HZ);
-+
-+ clear_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
-+
-+ spin_lock_irqsave(&sess->sess_list_lock, flags);
-+
-+restart:
-+ list_for_each_entry(cmd, &sess->sess_cmd_list, sess_cmd_list_entry) {
-+ int rc;
-+
-+ rc = scst_check_hw_pending_cmd(cmd, cur_time, max_time, sess,
-+ &flags, tgtt);
-+ if (rc < 0)
-+ break;
-+ else if (rc == 0)
-+ continue;
-+ else
-+ goto restart;
-+ }
-+
-+ if (!list_empty(&sess->sess_cmd_list)) {
-+ /*
-+ * For stuck cmds if there is no activity we might need to have
-+ * one more run to release them, so reschedule once again.
-+ */
-+ TRACE_DBG("Sched HW pending work for sess %p (max time %d)",
-+ sess, tgtt->max_hw_pending_time);
-+ set_bit(SCST_SESS_HW_PENDING_WORK_SCHEDULED, &sess->sess_aflags);
-+ schedule_delayed_work(&sess->hw_pending_work,
-+ tgtt->max_hw_pending_time * HZ);
-+ }
-+
-+ spin_unlock_irqrestore(&sess->sess_list_lock, flags);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static bool __scst_is_relative_target_port_id_unique(uint16_t id,
-+ const struct scst_tgt *t)
-+{
-+ bool res = true;
-+ struct scst_tgt_template *tgtt;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(tgtt, &scst_template_list,
-+ scst_template_list_entry) {
-+ struct scst_tgt *tgt;
-+ list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
-+ if (tgt == t)
-+ continue;
-+ if ((tgt->tgtt->is_target_enabled != NULL) &&
-+ !tgt->tgtt->is_target_enabled(tgt))
-+ continue;
-+ if (id == tgt->rel_tgt_id) {
-+ res = false;
-+ break;
-+ }
-+ }
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* scst_mutex supposed to be locked */
-+bool scst_is_relative_target_port_id_unique(uint16_t id,
-+ const struct scst_tgt *t)
-+{
-+ bool res;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+ res = __scst_is_relative_target_port_id_unique(id, t);
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+int gen_relative_target_port_id(uint16_t *id)
-+{
-+ int res = -EOVERFLOW;
-+ static unsigned long rti = SCST_MIN_REL_TGT_ID, rti_prev;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ rti_prev = rti;
-+ do {
-+ if (__scst_is_relative_target_port_id_unique(rti, NULL)) {
-+ *id = (uint16_t)rti++;
-+ res = 0;
-+ goto out_unlock;
-+ }
-+ rti++;
-+ if (rti > SCST_MAX_REL_TGT_ID)
-+ rti = SCST_MIN_REL_TGT_ID;
-+ } while (rti != rti_prev);
-+
-+ PRINT_ERROR("%s", "Unable to create unique relative target port id");
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* No locks */
-+int scst_alloc_tgt(struct scst_tgt_template *tgtt, struct scst_tgt **tgt)
-+{
-+ struct scst_tgt *t;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ t = kzalloc(sizeof(*t), GFP_KERNEL);
-+ if (t == NULL) {
-+ PRINT_ERROR("%s", "Allocation of tgt failed");
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ INIT_LIST_HEAD(&t->sess_list);
-+ init_waitqueue_head(&t->unreg_waitQ);
-+ t->tgtt = tgtt;
-+ t->sg_tablesize = tgtt->sg_tablesize;
-+ spin_lock_init(&t->tgt_lock);
-+ INIT_LIST_HEAD(&t->retry_cmd_list);
-+ atomic_set(&t->finished_cmds, 0);
-+ init_timer(&t->retry_timer);
-+ t->retry_timer.data = (unsigned long)t;
-+ t->retry_timer.function = scst_tgt_retry_timer_fn;
-+
-+ INIT_LIST_HEAD(&t->tgt_acg_list);
-+
-+ *tgt = t;
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/* No locks */
-+void scst_free_tgt(struct scst_tgt *tgt)
-+{
-+ TRACE_ENTRY();
-+
-+ kfree(tgt->tgt_name);
-+ kfree(tgt->tgt_comment);
-+
-+ kfree(tgt);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void scst_init_order_data(struct scst_order_data *order_data)
-+{
-+ int i;
-+ spin_lock_init(&order_data->sn_lock);
-+ INIT_LIST_HEAD(&order_data->deferred_cmd_list);
-+ INIT_LIST_HEAD(&order_data->skipped_sn_list);
-+ order_data->curr_sn = (typeof(order_data->curr_sn))(-300);
-+ order_data->expected_sn = order_data->curr_sn + 1;
-+ order_data->num_free_sn_slots = ARRAY_SIZE(order_data->sn_slots)-1;
-+ order_data->cur_sn_slot = &order_data->sn_slots[0];
-+ for (i = 0; i < (int)ARRAY_SIZE(order_data->sn_slots); i++)
-+ atomic_set(&order_data->sn_slots[i], 0);
-+ return;
-+}
-+
-+/* Called under scst_mutex and suspended activity */
-+int scst_alloc_device(gfp_t gfp_mask, struct scst_device **out_dev)
-+{
-+ struct scst_device *dev;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ dev = kzalloc(sizeof(*dev), gfp_mask);
-+ if (dev == NULL) {
-+ PRINT_ERROR("%s", "Allocation of scst_device failed");
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ dev->handler = &scst_null_devtype;
-+ atomic_set(&dev->dev_cmd_count, 0);
-+ scst_init_mem_lim(&dev->dev_mem_lim);
-+ spin_lock_init(&dev->dev_lock);
-+ INIT_LIST_HEAD(&dev->blocked_cmd_list);
-+ INIT_LIST_HEAD(&dev->dev_tgt_dev_list);
-+ INIT_LIST_HEAD(&dev->dev_acg_dev_list);
-+ dev->dev_double_ua_possible = 1;
-+ dev->queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER;
-+
-+ mutex_init(&dev->dev_pr_mutex);
-+ dev->pr_generation = 0;
-+ dev->pr_is_set = 0;
-+ dev->pr_holder = NULL;
-+ dev->pr_scope = SCOPE_LU;
-+ dev->pr_type = TYPE_UNSPECIFIED;
-+ INIT_LIST_HEAD(&dev->dev_registrants_list);
-+
-+ scst_init_order_data(&dev->dev_order_data);
-+
-+ scst_init_threads(&dev->dev_cmd_threads);
-+
-+ *out_dev = dev;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+void scst_free_device(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (!list_empty(&dev->dev_tgt_dev_list) ||
-+ !list_empty(&dev->dev_acg_dev_list)) {
-+ PRINT_CRIT_ERROR("%s: dev_tgt_dev_list or dev_acg_dev_list "
-+ "is not empty!", __func__);
-+ BUG();
-+ }
-+#endif
-+
-+ scst_deinit_threads(&dev->dev_cmd_threads);
-+
-+ kfree(dev->virt_name);
-+ kfree(dev);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_init_mem_lim - initialize memory limits structure
-+ *
-+ * Initializes memory limits structure mem_lim according to
-+ * the current system configuration. This structure should be latter used
-+ * to track and limit allocated by one or more SGV pools memory.
-+ */
-+void scst_init_mem_lim(struct scst_mem_lim *mem_lim)
-+{
-+ atomic_set(&mem_lim->alloced_pages, 0);
-+ mem_lim->max_allowed_pages =
-+ ((uint64_t)scst_max_dev_cmd_mem << 10) >> (PAGE_SHIFT - 10);
-+}
-+EXPORT_SYMBOL_GPL(scst_init_mem_lim);
-+
-+static struct scst_acg_dev *scst_alloc_acg_dev(struct scst_acg *acg,
-+ struct scst_device *dev, uint64_t lun)
-+{
-+ struct scst_acg_dev *res;
-+
-+ TRACE_ENTRY();
-+
-+ res = kmem_cache_zalloc(scst_acgd_cachep, GFP_KERNEL);
-+ if (res == NULL) {
-+ PRINT_ERROR("%s", "Allocation of scst_acg_dev failed");
-+ goto out;
-+ }
-+
-+ res->dev = dev;
-+ res->acg = acg;
-+ res->lun = lun;
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/*
-+ * The activity supposed to be suspended and scst_mutex held or the
-+ * corresponding target supposed to be stopped.
-+ */
-+static void scst_del_free_acg_dev(struct scst_acg_dev *acg_dev, bool del_sysfs)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Removing acg_dev %p from acg_dev_list and dev_acg_dev_list",
-+ acg_dev);
-+ list_del(&acg_dev->acg_dev_list_entry);
-+ list_del(&acg_dev->dev_acg_dev_list_entry);
-+
-+ if (del_sysfs)
-+ scst_acg_dev_sysfs_del(acg_dev);
-+
-+ kmem_cache_free(scst_acgd_cachep, acg_dev);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+int scst_acg_add_lun(struct scst_acg *acg, struct kobject *parent,
-+ struct scst_device *dev, uint64_t lun, int read_only,
-+ bool gen_scst_report_luns_changed, struct scst_acg_dev **out_acg_dev)
-+{
-+ int res = 0;
-+ struct scst_acg_dev *acg_dev;
-+ struct scst_tgt_dev *tgt_dev;
-+ struct scst_session *sess;
-+ LIST_HEAD(tmp_tgt_dev_list);
-+
-+ TRACE_ENTRY();
-+
-+ INIT_LIST_HEAD(&tmp_tgt_dev_list);
-+
-+ acg_dev = scst_alloc_acg_dev(acg, dev, lun);
-+ if (acg_dev == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ acg_dev->rd_only = read_only;
-+
-+ TRACE_DBG("Adding acg_dev %p to acg_dev_list and dev_acg_dev_list",
-+ acg_dev);
-+ list_add_tail(&acg_dev->acg_dev_list_entry, &acg->acg_dev_list);
-+ list_add_tail(&acg_dev->dev_acg_dev_list_entry, &dev->dev_acg_dev_list);
-+
-+ list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
-+ res = scst_alloc_add_tgt_dev(sess, acg_dev, &tgt_dev);
-+ if (res == -EPERM)
-+ continue;
-+ else if (res != 0)
-+ goto out_free;
-+
-+ list_add_tail(&tgt_dev->extra_tgt_dev_list_entry,
-+ &tmp_tgt_dev_list);
-+ }
-+
-+ res = scst_acg_dev_sysfs_create(acg_dev, parent);
-+ if (res != 0)
-+ goto out_free;
-+
-+ if (gen_scst_report_luns_changed)
-+ scst_report_luns_changed(acg);
-+
-+ PRINT_INFO("Added device %s to group %s (LUN %lld, "
-+ "rd_only %d)", dev->virt_name, acg->acg_name,
-+ (long long unsigned int)lun, read_only);
-+
-+ if (out_acg_dev != NULL)
-+ *out_acg_dev = acg_dev;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ list_for_each_entry(tgt_dev, &tmp_tgt_dev_list,
-+ extra_tgt_dev_list_entry) {
-+ scst_free_tgt_dev(tgt_dev);
-+ }
-+ scst_del_free_acg_dev(acg_dev, false);
-+ goto out;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+int scst_acg_del_lun(struct scst_acg *acg, uint64_t lun,
-+ bool gen_scst_report_luns_changed)
-+{
-+ int res = 0;
-+ struct scst_acg_dev *acg_dev = NULL, *a;
-+ struct scst_tgt_dev *tgt_dev, *tt;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
-+ if (a->lun == lun) {
-+ acg_dev = a;
-+ break;
-+ }
-+ }
-+ if (acg_dev == NULL) {
-+ PRINT_ERROR("Device is not found in group %s", acg->acg_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ list_for_each_entry_safe(tgt_dev, tt, &acg_dev->dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if (tgt_dev->acg_dev == acg_dev)
-+ scst_free_tgt_dev(tgt_dev);
-+ }
-+
-+ scst_del_free_acg_dev(acg_dev, true);
-+
-+ if (gen_scst_report_luns_changed)
-+ scst_report_luns_changed(acg);
-+
-+ PRINT_INFO("Removed LUN %lld from group %s", (unsigned long long)lun,
-+ acg->acg_name);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+struct scst_acg *scst_alloc_add_acg(struct scst_tgt *tgt,
-+ const char *acg_name, bool tgt_acg)
-+{
-+ struct scst_acg *acg;
-+
-+ TRACE_ENTRY();
-+
-+ acg = kzalloc(sizeof(*acg), GFP_KERNEL);
-+ if (acg == NULL) {
-+ PRINT_ERROR("%s", "Allocation of acg failed");
-+ goto out;
-+ }
-+
-+ acg->tgt = tgt;
-+ INIT_LIST_HEAD(&acg->acg_dev_list);
-+ INIT_LIST_HEAD(&acg->acg_sess_list);
-+ INIT_LIST_HEAD(&acg->acn_list);
-+ cpumask_copy(&acg->acg_cpu_mask, &default_cpu_mask);
-+ acg->acg_name = kstrdup(acg_name, GFP_KERNEL);
-+ if (acg->acg_name == NULL) {
-+ PRINT_ERROR("%s", "Allocation of acg_name failed");
-+ goto out_free;
-+ }
-+
-+ acg->addr_method = tgt->tgtt->preferred_addr_method;
-+
-+ if (tgt_acg) {
-+ int rc;
-+
-+ TRACE_DBG("Adding acg '%s' to device '%s' acg_list", acg_name,
-+ tgt->tgt_name);
-+ list_add_tail(&acg->acg_list_entry, &tgt->tgt_acg_list);
-+ acg->tgt_acg = 1;
-+
-+ rc = scst_acg_sysfs_create(tgt, acg);
-+ if (rc != 0)
-+ goto out_del;
-+ }
-+
-+out:
-+ TRACE_EXIT_HRES(acg);
-+ return acg;
-+
-+out_del:
-+ list_del(&acg->acg_list_entry);
-+
-+out_free:
-+ kfree(acg);
-+ acg = NULL;
-+ goto out;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+void scst_del_free_acg(struct scst_acg *acg)
-+{
-+ struct scst_acn *acn, *acnt;
-+ struct scst_acg_dev *acg_dev, *acg_dev_tmp;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Clearing acg %s from list", acg->acg_name);
-+
-+ BUG_ON(!list_empty(&acg->acg_sess_list));
-+
-+ /* Freeing acg_devs */
-+ list_for_each_entry_safe(acg_dev, acg_dev_tmp, &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ struct scst_tgt_dev *tgt_dev, *tt;
-+ list_for_each_entry_safe(tgt_dev, tt,
-+ &acg_dev->dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if (tgt_dev->acg_dev == acg_dev)
-+ scst_free_tgt_dev(tgt_dev);
-+ }
-+ scst_del_free_acg_dev(acg_dev, true);
-+ }
-+
-+ /* Freeing names */
-+ list_for_each_entry_safe(acn, acnt, &acg->acn_list, acn_list_entry) {
-+ scst_del_free_acn(acn,
-+ list_is_last(&acn->acn_list_entry, &acg->acn_list));
-+ }
-+ INIT_LIST_HEAD(&acg->acn_list);
-+
-+ if (acg->tgt_acg) {
-+ TRACE_DBG("Removing acg %s from list", acg->acg_name);
-+ list_del(&acg->acg_list_entry);
-+
-+ scst_acg_sysfs_del(acg);
-+ } else
-+ acg->tgt->default_acg = NULL;
-+
-+ BUG_ON(!list_empty(&acg->acg_sess_list));
-+ BUG_ON(!list_empty(&acg->acg_dev_list));
-+ BUG_ON(!list_empty(&acg->acn_list));
-+
-+ kfree(acg->acg_name);
-+ kfree(acg);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+struct scst_acg *scst_tgt_find_acg(struct scst_tgt *tgt, const char *name)
-+{
-+ struct scst_acg *acg, *acg_ret = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(acg, &tgt->tgt_acg_list, acg_list_entry) {
-+ if (strcmp(acg->acg_name, name) == 0) {
-+ acg_ret = acg;
-+ break;
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return acg_ret;
-+}
-+
-+/* scst_mutex supposed to be held */
-+static struct scst_tgt_dev *scst_find_shared_io_tgt_dev(
-+ struct scst_tgt_dev *tgt_dev)
-+{
-+ struct scst_tgt_dev *res = NULL;
-+ struct scst_acg *acg = tgt_dev->acg_dev->acg;
-+ struct scst_tgt_dev *t;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("tgt_dev %s (acg %p, io_grouping_type %d)",
-+ tgt_dev->sess->initiator_name, acg, acg->acg_io_grouping_type);
-+
-+ switch (acg->acg_io_grouping_type) {
-+ case SCST_IO_GROUPING_AUTO:
-+ if (tgt_dev->sess->initiator_name == NULL)
-+ goto out;
-+
-+ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if ((t == tgt_dev) ||
-+ (t->sess->initiator_name == NULL) ||
-+ (t->active_cmd_threads == NULL))
-+ continue;
-+
-+ TRACE_DBG("t %s", t->sess->initiator_name);
-+
-+ /* We check other ACG's as well */
-+
-+ if (strcmp(t->sess->initiator_name,
-+ tgt_dev->sess->initiator_name) == 0)
-+ goto found;
-+ }
-+ break;
-+
-+ case SCST_IO_GROUPING_THIS_GROUP_ONLY:
-+ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if ((t == tgt_dev) || (t->active_cmd_threads == NULL))
-+ continue;
-+
-+ TRACE_DBG("t %s (acg %p)", t->sess->initiator_name,
-+ t->acg_dev->acg);
-+
-+ if (t->acg_dev->acg == acg)
-+ goto found;
-+ }
-+ break;
-+
-+ case SCST_IO_GROUPING_NEVER:
-+ goto out;
-+
-+ default:
-+ list_for_each_entry(t, &tgt_dev->dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if ((t == tgt_dev) || (t->active_cmd_threads == NULL))
-+ continue;
-+
-+ TRACE_DBG("t %s (acg %p, io_grouping_type %d)",
-+ t->sess->initiator_name, t->acg_dev->acg,
-+ t->acg_dev->acg->acg_io_grouping_type);
-+
-+ if (t->acg_dev->acg->acg_io_grouping_type ==
-+ acg->acg_io_grouping_type)
-+ goto found;
-+ }
-+ break;
-+ }
-+
-+out:
-+ TRACE_EXIT_HRES((unsigned long)res);
-+ return res;
-+
-+found:
-+ if (t->active_cmd_threads == &scst_main_cmd_threads) {
-+ res = t;
-+ TRACE_MGMT_DBG("Going to share async IO context %p (res %p, "
-+ "ini %s, dev %s, grouping type %d)",
-+ t->aic_keeper->aic, res, t->sess->initiator_name,
-+ t->dev->virt_name,
-+ t->acg_dev->acg->acg_io_grouping_type);
-+ } else {
-+ res = t;
-+ if (!*(volatile bool*)&res->active_cmd_threads->io_context_ready) {
-+ TRACE_MGMT_DBG("IO context for t %p not yet "
-+ "initialized, waiting...", t);
-+ msleep(100);
-+ goto found;
-+ }
-+ smp_rmb();
-+ TRACE_MGMT_DBG("Going to share IO context %p (res %p, ini %s, "
-+ "dev %s, cmd_threads %p, grouping type %d)",
-+ res->active_cmd_threads->io_context, res,
-+ t->sess->initiator_name, t->dev->virt_name,
-+ t->active_cmd_threads,
-+ t->acg_dev->acg->acg_io_grouping_type);
-+ }
-+ goto out;
-+}
-+
-+enum scst_dev_type_threads_pool_type scst_parse_threads_pool_type(const char *p,
-+ int len)
-+{
-+ enum scst_dev_type_threads_pool_type res;
-+
-+ if (strncasecmp(p, SCST_THREADS_POOL_PER_INITIATOR_STR,
-+ min_t(int, strlen(SCST_THREADS_POOL_PER_INITIATOR_STR),
-+ len)) == 0)
-+ res = SCST_THREADS_POOL_PER_INITIATOR;
-+ else if (strncasecmp(p, SCST_THREADS_POOL_SHARED_STR,
-+ min_t(int, strlen(SCST_THREADS_POOL_SHARED_STR),
-+ len)) == 0)
-+ res = SCST_THREADS_POOL_SHARED;
-+ else {
-+ PRINT_ERROR("Unknown threads pool type %s", p);
-+ res = SCST_THREADS_POOL_TYPE_INVALID;
-+ }
-+
-+ return res;
-+}
-+
-+static int scst_ioc_keeper_thread(void *arg)
-+{
-+ struct scst_async_io_context_keeper *aic_keeper =
-+ (struct scst_async_io_context_keeper *)arg;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("AIC %p keeper thread %s (PID %d) started", aic_keeper,
-+ current->comm, current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+
-+ BUG_ON(aic_keeper->aic != NULL);
-+
-+ aic_keeper->aic = get_io_context(GFP_KERNEL, -1);
-+ TRACE_MGMT_DBG("Alloced new async IO context %p (aic %p)",
-+ aic_keeper->aic, aic_keeper);
-+
-+ /* We have our own ref counting */
-+ put_io_context(aic_keeper->aic);
-+
-+ /* We are ready */
-+ aic_keeper->aic_ready = true;
-+ wake_up_all(&aic_keeper->aic_keeper_waitQ);
-+
-+ wait_event_interruptible(aic_keeper->aic_keeper_waitQ,
-+ kthread_should_stop());
-+
-+ TRACE_MGMT_DBG("AIC %p keeper thread %s (PID %d) finished", aic_keeper,
-+ current->comm, current->pid);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+/* scst_mutex supposed to be held */
-+int scst_tgt_dev_setup_threads(struct scst_tgt_dev *tgt_dev)
-+{
-+ int res = 0;
-+ struct scst_device *dev = tgt_dev->dev;
-+ struct scst_async_io_context_keeper *aic_keeper;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->threads_num < 0)
-+ goto out;
-+
-+ if (dev->threads_num == 0) {
-+ struct scst_tgt_dev *shared_io_tgt_dev;
-+ tgt_dev->active_cmd_threads = &scst_main_cmd_threads;
-+
-+ shared_io_tgt_dev = scst_find_shared_io_tgt_dev(tgt_dev);
-+ if (shared_io_tgt_dev != NULL) {
-+ aic_keeper = shared_io_tgt_dev->aic_keeper;
-+ kref_get(&aic_keeper->aic_keeper_kref);
-+
-+ TRACE_MGMT_DBG("Linking async io context %p "
-+ "for shared tgt_dev %p (dev %s)",
-+ aic_keeper->aic, tgt_dev,
-+ tgt_dev->dev->virt_name);
-+ } else {
-+ /* Create new context */
-+ aic_keeper = kzalloc(sizeof(*aic_keeper), GFP_KERNEL);
-+ if (aic_keeper == NULL) {
-+ PRINT_ERROR("Unable to alloc aic_keeper "
-+ "(size %zd)", sizeof(*aic_keeper));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ kref_init(&aic_keeper->aic_keeper_kref);
-+ init_waitqueue_head(&aic_keeper->aic_keeper_waitQ);
-+
-+ aic_keeper->aic_keeper_thr =
-+ kthread_run(scst_ioc_keeper_thread,
-+ aic_keeper, "aic_keeper");
-+ if (IS_ERR(aic_keeper->aic_keeper_thr)) {
-+ PRINT_ERROR("Error running ioc_keeper "
-+ "thread (tgt_dev %p)", tgt_dev);
-+ res = PTR_ERR(aic_keeper->aic_keeper_thr);
-+ goto out_free_keeper;
-+ }
-+
-+ wait_event(aic_keeper->aic_keeper_waitQ,
-+ aic_keeper->aic_ready);
-+
-+ TRACE_MGMT_DBG("Created async io context %p "
-+ "for not shared tgt_dev %p (dev %s)",
-+ aic_keeper->aic, tgt_dev,
-+ tgt_dev->dev->virt_name);
-+ }
-+
-+ tgt_dev->async_io_context = aic_keeper->aic;
-+ tgt_dev->aic_keeper = aic_keeper;
-+
-+ res = scst_add_threads(tgt_dev->active_cmd_threads, NULL, NULL,
-+ tgt_dev->sess->tgt->tgtt->threads_num);
-+ goto out;
-+ }
-+
-+ switch (dev->threads_pool_type) {
-+ case SCST_THREADS_POOL_PER_INITIATOR:
-+ {
-+ struct scst_tgt_dev *shared_io_tgt_dev;
-+
-+ scst_init_threads(&tgt_dev->tgt_dev_cmd_threads);
-+
-+ tgt_dev->active_cmd_threads = &tgt_dev->tgt_dev_cmd_threads;
-+
-+ shared_io_tgt_dev = scst_find_shared_io_tgt_dev(tgt_dev);
-+ if (shared_io_tgt_dev != NULL) {
-+ TRACE_MGMT_DBG("Linking io context %p for "
-+ "shared tgt_dev %p (cmd_threads %p)",
-+ shared_io_tgt_dev->active_cmd_threads->io_context,
-+ tgt_dev, tgt_dev->active_cmd_threads);
-+ /* It's ref counted via threads */
-+ tgt_dev->active_cmd_threads->io_context =
-+ shared_io_tgt_dev->active_cmd_threads->io_context;
-+ }
-+
-+ res = scst_add_threads(tgt_dev->active_cmd_threads, NULL,
-+ tgt_dev,
-+ dev->threads_num + tgt_dev->sess->tgt->tgtt->threads_num);
-+ if (res != 0) {
-+ /* Let's clear here, because no threads could be run */
-+ tgt_dev->active_cmd_threads->io_context = NULL;
-+ }
-+ break;
-+ }
-+ case SCST_THREADS_POOL_SHARED:
-+ {
-+ tgt_dev->active_cmd_threads = &dev->dev_cmd_threads;
-+
-+ res = scst_add_threads(tgt_dev->active_cmd_threads, dev, NULL,
-+ tgt_dev->sess->tgt->tgtt->threads_num);
-+ break;
-+ }
-+ default:
-+ PRINT_CRIT_ERROR("Unknown threads pool type %d (dev %s)",
-+ dev->threads_pool_type, dev->virt_name);
-+ BUG();
-+ break;
-+ }
-+
-+out:
-+ if (res == 0)
-+ tm_dbg_init_tgt_dev(tgt_dev);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free_keeper:
-+ kfree(aic_keeper);
-+ goto out;
-+}
-+
-+static void scst_aic_keeper_release(struct kref *kref)
-+{
-+ struct scst_async_io_context_keeper *aic_keeper;
-+
-+ TRACE_ENTRY();
-+
-+ aic_keeper = container_of(kref, struct scst_async_io_context_keeper,
-+ aic_keeper_kref);
-+
-+ kthread_stop(aic_keeper->aic_keeper_thr);
-+
-+ kfree(aic_keeper);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* scst_mutex supposed to be held */
-+void scst_tgt_dev_stop_threads(struct scst_tgt_dev *tgt_dev)
-+{
-+ TRACE_ENTRY();
-+
-+ if (tgt_dev->dev->threads_num < 0)
-+ goto out_deinit;
-+
-+ if (tgt_dev->active_cmd_threads == &scst_main_cmd_threads) {
-+ /* Global async threads */
-+ kref_put(&tgt_dev->aic_keeper->aic_keeper_kref,
-+ scst_aic_keeper_release);
-+ tgt_dev->async_io_context = NULL;
-+ tgt_dev->aic_keeper = NULL;
-+ } else if (tgt_dev->active_cmd_threads == &tgt_dev->dev->dev_cmd_threads) {
-+ /* Per device shared threads */
-+ scst_del_threads(tgt_dev->active_cmd_threads,
-+ tgt_dev->sess->tgt->tgtt->threads_num);
-+ } else if (tgt_dev->active_cmd_threads == &tgt_dev->tgt_dev_cmd_threads) {
-+ /* Per tgt_dev threads */
-+ scst_del_threads(tgt_dev->active_cmd_threads, -1);
-+ scst_deinit_threads(&tgt_dev->tgt_dev_cmd_threads);
-+ } /* else no threads (not yet initialized, e.g.) */
-+
-+out_deinit:
-+ tm_dbg_deinit_tgt_dev(tgt_dev);
-+ tgt_dev->active_cmd_threads = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * scst_mutex supposed to be held, there must not be parallel activity in this
-+ * session.
-+ */
-+static int scst_alloc_add_tgt_dev(struct scst_session *sess,
-+ struct scst_acg_dev *acg_dev, struct scst_tgt_dev **out_tgt_dev)
-+{
-+ int res = 0;
-+ int ini_sg, ini_unchecked_isa_dma, ini_use_clustering;
-+ struct scst_tgt_dev *tgt_dev;
-+ struct scst_device *dev = acg_dev->dev;
-+ struct list_head *head;
-+ int sl;
-+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
-+
-+ TRACE_ENTRY();
-+
-+ tgt_dev = kmem_cache_zalloc(scst_tgtd_cachep, GFP_KERNEL);
-+ if (tgt_dev == NULL) {
-+ PRINT_ERROR("%s", "Allocation of scst_tgt_dev failed");
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ tgt_dev->dev = dev;
-+ tgt_dev->lun = acg_dev->lun;
-+ tgt_dev->acg_dev = acg_dev;
-+ tgt_dev->sess = sess;
-+ atomic_set(&tgt_dev->tgt_dev_cmd_count, 0);
-+
-+ scst_sgv_pool_use_norm(tgt_dev);
-+
-+ if (dev->scsi_dev != NULL) {
-+ ini_sg = dev->scsi_dev->host->sg_tablesize;
-+ ini_unchecked_isa_dma = dev->scsi_dev->host->unchecked_isa_dma;
-+ ini_use_clustering = (dev->scsi_dev->host->use_clustering ==
-+ ENABLE_CLUSTERING);
-+ } else {
-+ ini_sg = (1 << 15) /* infinite */;
-+ ini_unchecked_isa_dma = 0;
-+ ini_use_clustering = 0;
-+ }
-+ tgt_dev->max_sg_cnt = min(ini_sg, sess->tgt->sg_tablesize);
-+
-+ if ((sess->tgt->tgtt->use_clustering || ini_use_clustering) &&
-+ !sess->tgt->tgtt->no_clustering)
-+ scst_sgv_pool_use_norm_clust(tgt_dev);
-+
-+ if (sess->tgt->tgtt->unchecked_isa_dma || ini_unchecked_isa_dma)
-+ scst_sgv_pool_use_dma(tgt_dev);
-+
-+ TRACE_MGMT_DBG("Device %s on SCST lun=%lld",
-+ dev->virt_name, (long long unsigned int)tgt_dev->lun);
-+
-+ spin_lock_init(&tgt_dev->tgt_dev_lock);
-+ INIT_LIST_HEAD(&tgt_dev->UA_list);
-+ spin_lock_init(&tgt_dev->thr_data_lock);
-+ INIT_LIST_HEAD(&tgt_dev->thr_data_list);
-+
-+ scst_init_order_data(&tgt_dev->tgt_dev_order_data);
-+ if (dev->tst == SCST_CONTR_MODE_SEP_TASK_SETS)
-+ tgt_dev->curr_order_data = &tgt_dev->tgt_dev_order_data;
-+ else
-+ tgt_dev->curr_order_data = &dev->dev_order_data;
-+
-+ if (dev->handler->parse_atomic &&
-+ dev->handler->alloc_data_buf_atomic &&
-+ (sess->tgt->tgtt->preprocessing_done == NULL)) {
-+ if (sess->tgt->tgtt->rdy_to_xfer_atomic)
-+ __set_bit(SCST_TGT_DEV_AFTER_INIT_WR_ATOMIC,
-+ &tgt_dev->tgt_dev_flags);
-+ }
-+ if (dev->handler->dev_done_atomic &&
-+ sess->tgt->tgtt->xmit_response_atomic) {
-+ __set_bit(SCST_TGT_DEV_AFTER_EXEC_ATOMIC,
-+ &tgt_dev->tgt_dev_flags);
-+ }
-+
-+ sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
-+ dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
-+ scst_alloc_set_UA(tgt_dev, sense_buffer, sl, 0);
-+
-+ if (sess->tgt->tgtt->get_initiator_port_transport_id == NULL) {
-+ if (!list_empty(&dev->dev_registrants_list)) {
-+ PRINT_WARNING("Initiators from target %s can't connect "
-+ "to device %s, because the device has PR "
-+ "registrants and the target doesn't support "
-+ "Persistent Reservations", sess->tgt->tgtt->name,
-+ dev->virt_name);
-+ res = -EPERM;
-+ goto out_free;
-+ }
-+ dev->not_pr_supporting_tgt_devs_num++;
-+ }
-+
-+ res = scst_pr_init_tgt_dev(tgt_dev);
-+ if (res != 0)
-+ goto out_dec_free;
-+
-+ res = scst_tgt_dev_setup_threads(tgt_dev);
-+ if (res != 0)
-+ goto out_pr_clear;
-+
-+ if (dev->handler && dev->handler->attach_tgt) {
-+ TRACE_DBG("Calling dev handler's attach_tgt(%p)", tgt_dev);
-+ res = dev->handler->attach_tgt(tgt_dev);
-+ TRACE_DBG("%s", "Dev handler's attach_tgt() returned");
-+ if (res != 0) {
-+ PRINT_ERROR("Device handler's %s attach_tgt() "
-+ "failed: %d", dev->handler->name, res);
-+ goto out_stop_threads;
-+ }
-+ }
-+
-+ res = scst_tgt_dev_sysfs_create(tgt_dev);
-+ if (res != 0)
-+ goto out_detach;
-+
-+ spin_lock_bh(&dev->dev_lock);
-+ list_add_tail(&tgt_dev->dev_tgt_dev_list_entry, &dev->dev_tgt_dev_list);
-+ if (dev->dev_reserved)
-+ __set_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags);
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ head = &sess->sess_tgt_dev_list[SESS_TGT_DEV_LIST_HASH_FN(tgt_dev->lun)];
-+ list_add_tail(&tgt_dev->sess_tgt_dev_list_entry, head);
-+
-+ *out_tgt_dev = tgt_dev;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_detach:
-+ if (dev->handler && dev->handler->detach_tgt) {
-+ TRACE_DBG("Calling dev handler's detach_tgt(%p)",
-+ tgt_dev);
-+ dev->handler->detach_tgt(tgt_dev);
-+ TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
-+ }
-+
-+out_stop_threads:
-+ scst_tgt_dev_stop_threads(tgt_dev);
-+
-+out_pr_clear:
-+ scst_pr_clear_tgt_dev(tgt_dev);
-+
-+out_dec_free:
-+ if (tgt_dev->sess->tgt->tgtt->get_initiator_port_transport_id == NULL)
-+ dev->not_pr_supporting_tgt_devs_num--;
-+
-+out_free:
-+ scst_free_all_UA(tgt_dev);
-+ kmem_cache_free(scst_tgtd_cachep, tgt_dev);
-+ goto out;
-+}
-+
-+/* No locks supposed to be held, scst_mutex - held */
-+void scst_nexus_loss(struct scst_tgt_dev *tgt_dev, bool queue_UA)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_clear_reservation(tgt_dev);
-+
-+#if 0 /* Clearing UAs and last sense isn't required by SAM and it looks to be
-+ * better to not clear them to not loose important events, so let's
-+ * disable it.
-+ */
-+ /* With activity suspended the lock isn't needed, but let's be safe */
-+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
-+ scst_free_all_UA(tgt_dev);
-+ memset(tgt_dev->tgt_dev_sense, 0, sizeof(tgt_dev->tgt_dev_sense));
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+#endif
-+
-+ if (queue_UA) {
-+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
-+ int sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
-+ tgt_dev->dev->d_sense,
-+ SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
-+ scst_check_set_UA(tgt_dev, sense_buffer, sl, 0);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * scst_mutex supposed to be held, there must not be parallel activity in this
-+ * session.
-+ */
-+static void scst_free_tgt_dev(struct scst_tgt_dev *tgt_dev)
-+{
-+ struct scst_device *dev = tgt_dev->dev;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_bh(&dev->dev_lock);
-+ list_del(&tgt_dev->dev_tgt_dev_list_entry);
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ list_del(&tgt_dev->sess_tgt_dev_list_entry);
-+
-+ scst_tgt_dev_sysfs_del(tgt_dev);
-+
-+ if (tgt_dev->sess->tgt->tgtt->get_initiator_port_transport_id == NULL)
-+ dev->not_pr_supporting_tgt_devs_num--;
-+
-+ scst_clear_reservation(tgt_dev);
-+ scst_pr_clear_tgt_dev(tgt_dev);
-+ scst_free_all_UA(tgt_dev);
-+
-+ if (dev->handler && dev->handler->detach_tgt) {
-+ TRACE_DBG("Calling dev handler's detach_tgt(%p)",
-+ tgt_dev);
-+ dev->handler->detach_tgt(tgt_dev);
-+ TRACE_DBG("%s", "Dev handler's detach_tgt() returned");
-+ }
-+
-+ scst_tgt_dev_stop_threads(tgt_dev);
-+
-+ BUG_ON(!list_empty(&tgt_dev->thr_data_list));
-+
-+ kmem_cache_free(scst_tgtd_cachep, tgt_dev);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* scst_mutex supposed to be held */
-+int scst_sess_alloc_tgt_devs(struct scst_session *sess)
-+{
-+ int res = 0;
-+ struct scst_acg_dev *acg_dev;
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(acg_dev, &sess->acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ res = scst_alloc_add_tgt_dev(sess, acg_dev, &tgt_dev);
-+ if (res == -EPERM)
-+ continue;
-+ else if (res != 0)
-+ goto out_free;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return res;
-+
-+out_free:
-+ scst_sess_free_tgt_devs(sess);
-+ goto out;
-+}
-+
-+/*
-+ * scst_mutex supposed to be held, there must not be parallel activity in this
-+ * session.
-+ */
-+void scst_sess_free_tgt_devs(struct scst_session *sess)
-+{
-+ int i;
-+ struct scst_tgt_dev *tgt_dev, *t;
-+
-+ TRACE_ENTRY();
-+
-+ /* The session is going down, no users, so no locks */
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ list_for_each_entry_safe(tgt_dev, t, head,
-+ sess_tgt_dev_list_entry) {
-+ scst_free_tgt_dev(tgt_dev);
-+ }
-+ INIT_LIST_HEAD(head);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+int scst_acg_add_acn(struct scst_acg *acg, const char *name)
-+{
-+ int res = 0;
-+ struct scst_acn *acn;
-+ char *nm;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(acn, &acg->acn_list, acn_list_entry) {
-+ if (strcmp(acn->name, name) == 0) {
-+ PRINT_ERROR("Name %s already exists in group %s",
-+ name, acg->acg_name);
-+ res = -EEXIST;
-+ goto out;
-+ }
-+ }
-+
-+ acn = kzalloc(sizeof(*acn), GFP_KERNEL);
-+ if (acn == NULL) {
-+ PRINT_ERROR("%s", "Unable to allocate scst_acn");
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ acn->acg = acg;
-+
-+ nm = kstrdup(name, GFP_KERNEL);
-+ if (nm == NULL) {
-+ PRINT_ERROR("%s", "Unable to allocate scst_acn->name");
-+ res = -ENOMEM;
-+ goto out_free;
-+ }
-+ acn->name = nm;
-+
-+ res = scst_acn_sysfs_create(acn);
-+ if (res != 0)
-+ goto out_free_nm;
-+
-+ list_add_tail(&acn->acn_list_entry, &acg->acn_list);
-+
-+out:
-+ if (res == 0) {
-+ PRINT_INFO("Added name %s to group %s", name, acg->acg_name);
-+ scst_check_reassign_sessions();
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free_nm:
-+ kfree(nm);
-+
-+out_free:
-+ kfree(acn);
-+ goto out;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+void scst_del_free_acn(struct scst_acn *acn, bool reassign)
-+{
-+ TRACE_ENTRY();
-+
-+ list_del(&acn->acn_list_entry);
-+
-+ scst_acn_sysfs_del(acn);
-+
-+ kfree(acn->name);
-+ kfree(acn);
-+
-+ if (reassign)
-+ scst_check_reassign_sessions();
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+struct scst_acn *scst_find_acn(struct scst_acg *acg, const char *name)
-+{
-+ struct scst_acn *acn;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Trying to find name '%s'", name);
-+
-+ list_for_each_entry(acn, &acg->acn_list, acn_list_entry) {
-+ if (strcmp(acn->name, name) == 0) {
-+ TRACE_DBG("%s", "Found");
-+ goto out;
-+ }
-+ }
-+ acn = NULL;
-+out:
-+ TRACE_EXIT();
-+ return acn;
-+}
-+
-+static struct scst_cmd *scst_create_prepare_internal_cmd(
-+ struct scst_cmd *orig_cmd, const uint8_t *cdb,
-+ unsigned int cdb_len, int bufsize)
-+{
-+ struct scst_cmd *res;
-+ int rc;
-+ gfp_t gfp_mask = scst_cmd_atomic(orig_cmd) ? GFP_ATOMIC : GFP_KERNEL;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_alloc_cmd(cdb, cdb_len, gfp_mask);
-+ if (res == NULL)
-+ goto out;
-+
-+ res->cmd_threads = orig_cmd->cmd_threads;
-+ res->sess = orig_cmd->sess;
-+ res->atomic = scst_cmd_atomic(orig_cmd);
-+ res->internal = 1;
-+ res->tgtt = orig_cmd->tgtt;
-+ res->tgt = orig_cmd->tgt;
-+ res->dev = orig_cmd->dev;
-+ res->tgt_dev = orig_cmd->tgt_dev;
-+ res->cur_order_data = orig_cmd->tgt_dev->curr_order_data;
-+ res->lun = orig_cmd->lun;
-+ res->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
-+ res->data_direction = SCST_DATA_UNKNOWN;
-+ res->orig_cmd = orig_cmd;
-+ res->bufflen = bufsize;
-+
-+ scst_sess_get(res->sess);
-+ if (res->tgt_dev != NULL)
-+ res->cpu_cmd_counter = scst_get();
-+
-+ rc = scst_pre_parse(res);
-+ BUG_ON(rc != 0);
-+
-+ res->state = SCST_CMD_STATE_PARSE;
-+
-+out:
-+ TRACE_EXIT_HRES((unsigned long)res);
-+ return res;
-+}
-+
-+int scst_prepare_request_sense(struct scst_cmd *orig_cmd)
-+{
-+ int res = 0;
-+ static const uint8_t request_sense[6] = {
-+ REQUEST_SENSE, 0, 0, 0, SCST_SENSE_BUFFERSIZE, 0
-+ };
-+ struct scst_cmd *rs_cmd;
-+
-+ TRACE_ENTRY();
-+
-+ if (orig_cmd->sense != NULL) {
-+ TRACE_MEM("Releasing sense %p (orig_cmd %p)",
-+ orig_cmd->sense, orig_cmd);
-+ mempool_free(orig_cmd->sense, scst_sense_mempool);
-+ orig_cmd->sense = NULL;
-+ }
-+
-+ rs_cmd = scst_create_prepare_internal_cmd(orig_cmd,
-+ request_sense, sizeof(request_sense),
-+ SCST_SENSE_BUFFERSIZE);
-+ if (rs_cmd == NULL)
-+ goto out_error;
-+
-+ rs_cmd->cdb[1] |= scst_get_cmd_dev_d_sense(orig_cmd);
-+ rs_cmd->data_direction = SCST_DATA_READ;
-+ rs_cmd->expected_data_direction = rs_cmd->data_direction;
-+ rs_cmd->expected_transfer_len = SCST_SENSE_BUFFERSIZE;
-+ rs_cmd->expected_values_set = 1;
-+
-+ TRACE_MGMT_DBG("Adding REQUEST SENSE cmd %p to head of active "
-+ "cmd list", rs_cmd);
-+ spin_lock_irq(&rs_cmd->cmd_threads->cmd_list_lock);
-+ list_add(&rs_cmd->cmd_list_entry, &rs_cmd->cmd_threads->active_cmd_list);
-+ wake_up(&rs_cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irq(&rs_cmd->cmd_threads->cmd_list_lock);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_error:
-+ res = -1;
-+ goto out;
-+}
-+
-+static void scst_complete_request_sense(struct scst_cmd *req_cmd)
-+{
-+ struct scst_cmd *orig_cmd = req_cmd->orig_cmd;
-+ uint8_t *buf;
-+ int len;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(orig_cmd == NULL);
-+
-+ len = scst_get_buf_full(req_cmd, &buf);
-+
-+ if (scsi_status_is_good(req_cmd->status) && (len > 0) &&
-+ SCST_SENSE_VALID(buf) && (!SCST_NO_SENSE(buf))) {
-+ PRINT_BUFF_FLAG(TRACE_SCSI, "REQUEST SENSE returned",
-+ buf, len);
-+ scst_alloc_set_sense(orig_cmd, scst_cmd_atomic(req_cmd), buf,
-+ len);
-+ } else {
-+ PRINT_ERROR("%s", "Unable to get the sense via "
-+ "REQUEST SENSE, returning HARDWARE ERROR");
-+ scst_set_cmd_error(orig_cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+
-+ if (len > 0)
-+ scst_put_buf_full(req_cmd, buf);
-+
-+ TRACE_MGMT_DBG("Adding orig cmd %p to head of active "
-+ "cmd list", orig_cmd);
-+ spin_lock_irq(&orig_cmd->cmd_threads->cmd_list_lock);
-+ list_add(&orig_cmd->cmd_list_entry, &orig_cmd->cmd_threads->active_cmd_list);
-+ wake_up(&orig_cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irq(&orig_cmd->cmd_threads->cmd_list_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+int scst_finish_internal_cmd(struct scst_cmd *cmd)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(!cmd->internal);
-+
-+ if (cmd->cdb[0] == REQUEST_SENSE)
-+ scst_complete_request_sense(cmd);
-+
-+ __scst_cmd_put(cmd);
-+
-+ res = SCST_CMD_STATE_RES_CONT_NEXT;
-+
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+static void scst_send_release(struct scst_device *dev)
-+{
-+ struct scsi_device *scsi_dev;
-+ unsigned char cdb[6];
-+ uint8_t sense[SCSI_SENSE_BUFFERSIZE];
-+ int rc, i;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->scsi_dev == NULL)
-+ goto out;
-+
-+ scsi_dev = dev->scsi_dev;
-+
-+ for (i = 0; i < 5; i++) {
-+ memset(cdb, 0, sizeof(cdb));
-+ cdb[0] = RELEASE;
-+ cdb[1] = (scsi_dev->scsi_level <= SCSI_2) ?
-+ ((scsi_dev->lun << 5) & 0xe0) : 0;
-+
-+ memset(sense, 0, sizeof(sense));
-+
-+ TRACE(TRACE_DEBUG | TRACE_SCSI, "%s", "Sending RELEASE req to "
-+ "SCSI mid-level");
-+ rc = scsi_execute(scsi_dev, cdb, SCST_DATA_NONE, NULL, 0,
-+ sense, 15, 0, 0
-+ , NULL
-+ );
-+ TRACE_DBG("MODE_SENSE done: %x", rc);
-+
-+ if (scsi_status_is_good(rc)) {
-+ break;
-+ } else {
-+ PRINT_ERROR("RELEASE failed: %d", rc);
-+ PRINT_BUFFER("RELEASE sense", sense, sizeof(sense));
-+ scst_check_internal_sense(dev, rc, sense,
-+ sizeof(sense));
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* scst_mutex supposed to be held */
-+static void scst_clear_reservation(struct scst_tgt_dev *tgt_dev)
-+{
-+ struct scst_device *dev = tgt_dev->dev;
-+ int release = 0;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_bh(&dev->dev_lock);
-+ if (dev->dev_reserved &&
-+ !test_bit(SCST_TGT_DEV_RESERVED, &tgt_dev->tgt_dev_flags)) {
-+ /* This is one who holds the reservation */
-+ struct scst_tgt_dev *tgt_dev_tmp;
-+ list_for_each_entry(tgt_dev_tmp, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ clear_bit(SCST_TGT_DEV_RESERVED,
-+ &tgt_dev_tmp->tgt_dev_flags);
-+ }
-+ dev->dev_reserved = 0;
-+ release = 1;
-+ }
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ if (release)
-+ scst_send_release(dev);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+struct scst_session *scst_alloc_session(struct scst_tgt *tgt, gfp_t gfp_mask,
-+ const char *initiator_name)
-+{
-+ struct scst_session *sess;
-+ int i;
-+
-+ TRACE_ENTRY();
-+
-+ sess = kmem_cache_zalloc(scst_sess_cachep, gfp_mask);
-+ if (sess == NULL) {
-+ PRINT_ERROR("%s", "Allocation of scst_session failed");
-+ goto out;
-+ }
-+
-+ sess->init_phase = SCST_SESS_IPH_INITING;
-+ sess->shut_phase = SCST_SESS_SPH_READY;
-+ atomic_set(&sess->refcnt, 0);
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ INIT_LIST_HEAD(head);
-+ }
-+ spin_lock_init(&sess->sess_list_lock);
-+ INIT_LIST_HEAD(&sess->sess_cmd_list);
-+ sess->tgt = tgt;
-+ INIT_LIST_HEAD(&sess->init_deferred_cmd_list);
-+ INIT_LIST_HEAD(&sess->init_deferred_mcmd_list);
-+ INIT_DELAYED_WORK(&sess->hw_pending_work,
-+ (void (*)(struct work_struct *))scst_hw_pending_work_fn);
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+ spin_lock_init(&sess->lat_lock);
-+#endif
-+
-+ sess->initiator_name = kstrdup(initiator_name, gfp_mask);
-+ if (sess->initiator_name == NULL) {
-+ PRINT_ERROR("%s", "Unable to dup sess->initiator_name");
-+ goto out_free;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return sess;
-+
-+out_free:
-+ kmem_cache_free(scst_sess_cachep, sess);
-+ sess = NULL;
-+ goto out;
-+}
-+
-+void scst_free_session(struct scst_session *sess)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ scst_sess_free_tgt_devs(sess);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_sess_sysfs_del(sess);
-+ if (sess->unreg_done_fn) {
-+ TRACE_DBG("Calling unreg_done_fn(%p)", sess);
-+ sess->unreg_done_fn(sess);
-+ TRACE_DBG("%s", "unreg_done_fn() returned");
-+ }
-+
-+ mutex_lock(&scst_mutex);
-+
-+ /*
-+ * The lists delete must be after sysfs del. Otherwise it would break
-+ * logic in scst_sess_sysfs_create() to avoid duplicate sysfs names.
-+ */
-+
-+ TRACE_DBG("Removing sess %p from the list", sess);
-+ list_del(&sess->sess_list_entry);
-+ TRACE_DBG("Removing session %p from acg %s", sess, sess->acg->acg_name);
-+ list_del(&sess->acg_sess_list_entry);
-+
-+ /* Called under lock to protect from too early tgt release */
-+ wake_up_all(&sess->tgt->unreg_waitQ);
-+
-+ /*
-+ * NOTE: do not dereference the sess->tgt pointer after scst_mutex
-+ * has been unlocked, because it can be already dead!!
-+ */
-+ mutex_unlock(&scst_mutex);
-+
-+ kfree(sess->transport_id);
-+ kfree(sess->initiator_name);
-+
-+ kmem_cache_free(scst_sess_cachep, sess);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void scst_free_session_callback(struct scst_session *sess)
-+{
-+ struct completion *c;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Freeing session %p", sess);
-+
-+ cancel_delayed_work_sync(&sess->hw_pending_work);
-+
-+ c = sess->shutdown_compl;
-+
-+ mutex_lock(&scst_mutex);
-+ /*
-+ * Necessary to sync with other threads trying to queue AEN, which
-+ * the target driver will not be able to serve and crash, because after
-+ * unreg_done_fn() called its internal session data will be destroyed.
-+ */
-+ sess->shut_phase = SCST_SESS_SPH_UNREG_DONE_CALLING;
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_free_session(sess);
-+
-+ if (c)
-+ complete_all(c);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void scst_sched_session_free(struct scst_session *sess)
-+{
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ if (sess->shut_phase != SCST_SESS_SPH_SHUTDOWN) {
-+ PRINT_CRIT_ERROR("session %p is going to shutdown with unknown "
-+ "shut phase %lx", sess, sess->shut_phase);
-+ BUG();
-+ }
-+
-+ spin_lock_irqsave(&scst_mgmt_lock, flags);
-+ TRACE_DBG("Adding sess %p to scst_sess_shut_list", sess);
-+ list_add_tail(&sess->sess_shut_list_entry, &scst_sess_shut_list);
-+ spin_unlock_irqrestore(&scst_mgmt_lock, flags);
-+
-+ wake_up(&scst_mgmt_waitQ);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_cmd_get() - increase command's reference counter
-+ */
-+void scst_cmd_get(struct scst_cmd *cmd)
-+{
-+ __scst_cmd_get(cmd);
-+}
-+EXPORT_SYMBOL(scst_cmd_get);
-+
-+/**
-+ * scst_cmd_put() - decrease command's reference counter
-+ */
-+void scst_cmd_put(struct scst_cmd *cmd)
-+{
-+ __scst_cmd_put(cmd);
-+}
-+EXPORT_SYMBOL(scst_cmd_put);
-+
-+/**
-+ * scst_cmd_set_ext_cdb() - sets cmd's extended CDB and its length
-+ */
-+void scst_cmd_set_ext_cdb(struct scst_cmd *cmd,
-+ uint8_t *ext_cdb, unsigned int ext_cdb_len,
-+ gfp_t gfp_mask)
-+{
-+ unsigned int len = cmd->cdb_len + ext_cdb_len;
-+
-+ TRACE_ENTRY();
-+
-+ if (len <= sizeof(cmd->cdb_buf))
-+ goto copy;
-+
-+ if (unlikely(len > SCST_MAX_LONG_CDB_SIZE)) {
-+ PRINT_ERROR("Too big CDB (%d)", len);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out;
-+ }
-+
-+ cmd->cdb = kmalloc(len, gfp_mask);
-+ if (unlikely(cmd->cdb == NULL)) {
-+ PRINT_ERROR("Unable to alloc extended CDB (size %d)", len);
-+ goto out_err;
-+ }
-+
-+ memcpy(cmd->cdb, cmd->cdb_buf, cmd->cdb_len);
-+
-+copy:
-+ memcpy(&cmd->cdb[cmd->cdb_len], ext_cdb, ext_cdb_len);
-+
-+ cmd->cdb_len = cmd->cdb_len + ext_cdb_len;
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_err:
-+ cmd->cdb = cmd->cdb_buf;
-+ scst_set_busy(cmd);
-+ goto out;
-+}
-+EXPORT_SYMBOL(scst_cmd_set_ext_cdb);
-+
-+struct scst_cmd *scst_alloc_cmd(const uint8_t *cdb,
-+ unsigned int cdb_len, gfp_t gfp_mask)
-+{
-+ struct scst_cmd *cmd;
-+
-+ TRACE_ENTRY();
-+
-+ cmd = kmem_cache_zalloc(scst_cmd_cachep, gfp_mask);
-+ if (cmd == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of scst_cmd failed");
-+ goto out;
-+ }
-+
-+ cmd->state = SCST_CMD_STATE_INIT_WAIT;
-+ cmd->start_time = jiffies;
-+ atomic_set(&cmd->cmd_ref, 1);
-+ cmd->cmd_threads = &scst_main_cmd_threads;
-+ INIT_LIST_HEAD(&cmd->mgmt_cmd_list);
-+ cmd->cdb = cmd->cdb_buf;
-+ cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
-+ cmd->timeout = SCST_DEFAULT_TIMEOUT;
-+ cmd->retries = 0;
-+ cmd->data_len = -1;
-+ cmd->is_send_status = 1;
-+ cmd->resp_data_len = -1;
-+ cmd->write_sg = &cmd->sg;
-+ cmd->write_sg_cnt = &cmd->sg_cnt;
-+
-+ cmd->dbl_ua_orig_data_direction = SCST_DATA_UNKNOWN;
-+ cmd->dbl_ua_orig_resp_data_len = -1;
-+
-+ if (unlikely(cdb_len == 0)) {
-+ PRINT_ERROR("%s", "Wrong CDB len 0, finishing cmd");
-+ goto out_free;
-+ } else if (cdb_len <= SCST_MAX_CDB_SIZE) {
-+ /* Duplicate memcpy to save a branch on the most common path */
-+ memcpy(cmd->cdb, cdb, cdb_len);
-+ } else {
-+ if (unlikely(cdb_len > SCST_MAX_LONG_CDB_SIZE)) {
-+ PRINT_ERROR("Too big CDB (%d), finishing cmd", cdb_len);
-+ goto out_free;
-+ }
-+ cmd->cdb = kmalloc(cdb_len, gfp_mask);
-+ if (unlikely(cmd->cdb == NULL)) {
-+ PRINT_ERROR("Unable to alloc extended CDB (size %d)",
-+ cdb_len);
-+ goto out_free;
-+ }
-+ memcpy(cmd->cdb, cdb, cdb_len);
-+ }
-+
-+ cmd->cdb_len = cdb_len;
-+
-+out:
-+ TRACE_EXIT();
-+ return cmd;
-+
-+out_free:
-+ kmem_cache_free(scst_cmd_cachep, cmd);
-+ cmd = NULL;
-+ goto out;
-+}
-+
-+static void scst_destroy_put_cmd(struct scst_cmd *cmd)
-+{
-+ scst_sess_put(cmd->sess);
-+
-+ /*
-+ * At this point tgt_dev can be dead, but the pointer remains non-NULL
-+ */
-+ if (likely(cmd->tgt_dev != NULL))
-+ scst_put(cmd->cpu_cmd_counter);
-+
-+ scst_destroy_cmd(cmd);
-+ return;
-+}
-+
-+/* No locks supposed to be held */
-+void scst_free_cmd(struct scst_cmd *cmd)
-+{
-+ int destroy = 1;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Freeing cmd %p (tag %llu)",
-+ cmd, (long long unsigned int)cmd->tag);
-+
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
-+ TRACE_MGMT_DBG("Freeing aborted cmd %p", cmd);
-+
-+ EXTRACHECKS_BUG_ON(cmd->unblock_dev || cmd->dec_on_dev_needed ||
-+ cmd->dec_pr_readers_count_needed);
-+
-+ /*
-+ * Target driver can already free sg buffer before calling
-+ * scst_tgt_cmd_done(). E.g., scst_local has to do that.
-+ */
-+ if (!cmd->tgt_data_buf_alloced)
-+ scst_check_restore_sg_buff(cmd);
-+
-+ if ((cmd->tgtt->on_free_cmd != NULL) && likely(!cmd->internal)) {
-+ TRACE_DBG("Calling target's on_free_cmd(%p)", cmd);
-+ scst_set_cur_start(cmd);
-+ cmd->tgtt->on_free_cmd(cmd);
-+ scst_set_tgt_on_free_time(cmd);
-+ TRACE_DBG("%s", "Target's on_free_cmd() returned");
-+ }
-+
-+ if (likely(cmd->dev != NULL)) {
-+ struct scst_dev_type *handler = cmd->dev->handler;
-+ if (handler->on_free_cmd != NULL) {
-+ TRACE_DBG("Calling dev handler %s on_free_cmd(%p)",
-+ handler->name, cmd);
-+ scst_set_cur_start(cmd);
-+ handler->on_free_cmd(cmd);
-+ scst_set_dev_on_free_time(cmd);
-+ TRACE_DBG("Dev handler %s on_free_cmd() returned",
-+ handler->name);
-+ }
-+ }
-+
-+ scst_release_space(cmd);
-+
-+ if (unlikely(cmd->sense != NULL)) {
-+ TRACE_MEM("Releasing sense %p (cmd %p)", cmd->sense, cmd);
-+ mempool_free(cmd->sense, scst_sense_mempool);
-+ cmd->sense = NULL;
-+ }
-+
-+ if (likely(cmd->tgt_dev != NULL)) {
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (unlikely(!cmd->sent_for_exec) && !cmd->internal) {
-+ PRINT_ERROR("Finishing not executed cmd %p (opcode "
-+ "%d, target %s, LUN %lld, sn %d, expected_sn %d)",
-+ cmd, cmd->cdb[0], cmd->tgtt->name,
-+ (long long unsigned int)cmd->lun,
-+ cmd->sn, cmd->cur_order_data->expected_sn);
-+ scst_unblock_deferred(cmd->cur_order_data, cmd);
-+ }
-+#endif
-+
-+ if (unlikely(cmd->out_of_sn)) {
-+ TRACE_SN("Out of SN cmd %p (tag %llu, sn %d), "
-+ "destroy=%d", cmd,
-+ (long long unsigned int)cmd->tag,
-+ cmd->sn, destroy);
-+ destroy = test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
-+ &cmd->cmd_flags);
-+ }
-+ }
-+
-+ if (cmd->cdb != cmd->cdb_buf)
-+ kfree(cmd->cdb);
-+
-+ if (likely(destroy))
-+ scst_destroy_put_cmd(cmd);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* No locks supposed to be held. */
-+void scst_check_retries(struct scst_tgt *tgt)
-+{
-+ int need_wake_up = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * We don't worry about overflow of finished_cmds, because we check
-+ * only for its change.
-+ */
-+ atomic_inc(&tgt->finished_cmds);
-+ /* See comment in scst_queue_retry_cmd() */
-+ smp_mb__after_atomic_inc();
-+ if (unlikely(tgt->retry_cmds > 0)) {
-+ struct scst_cmd *c, *tc;
-+ unsigned long flags;
-+
-+ TRACE_RETRY("Checking retry cmd list (retry_cmds %d)",
-+ tgt->retry_cmds);
-+
-+ spin_lock_irqsave(&tgt->tgt_lock, flags);
-+ list_for_each_entry_safe(c, tc, &tgt->retry_cmd_list,
-+ cmd_list_entry) {
-+ tgt->retry_cmds--;
-+
-+ TRACE_RETRY("Moving retry cmd %p to head of active "
-+ "cmd list (retry_cmds left %d)",
-+ c, tgt->retry_cmds);
-+ spin_lock(&c->cmd_threads->cmd_list_lock);
-+ list_move(&c->cmd_list_entry,
-+ &c->cmd_threads->active_cmd_list);
-+ wake_up(&c->cmd_threads->cmd_list_waitQ);
-+ spin_unlock(&c->cmd_threads->cmd_list_lock);
-+
-+ need_wake_up++;
-+ if (need_wake_up >= 2) /* "slow start" */
-+ break;
-+ }
-+ spin_unlock_irqrestore(&tgt->tgt_lock, flags);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void scst_tgt_retry_timer_fn(unsigned long arg)
-+{
-+ struct scst_tgt *tgt = (struct scst_tgt *)arg;
-+ unsigned long flags;
-+
-+ TRACE_RETRY("Retry timer expired (retry_cmds %d)", tgt->retry_cmds);
-+
-+ spin_lock_irqsave(&tgt->tgt_lock, flags);
-+ tgt->retry_timer_active = 0;
-+ spin_unlock_irqrestore(&tgt->tgt_lock, flags);
-+
-+ scst_check_retries(tgt);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+struct scst_mgmt_cmd *scst_alloc_mgmt_cmd(gfp_t gfp_mask)
-+{
-+ struct scst_mgmt_cmd *mcmd;
-+
-+ TRACE_ENTRY();
-+
-+ mcmd = mempool_alloc(scst_mgmt_mempool, gfp_mask);
-+ if (mcmd == NULL) {
-+ PRINT_CRIT_ERROR("%s", "Allocation of management command "
-+ "failed, some commands and their data could leak");
-+ goto out;
-+ }
-+ memset(mcmd, 0, sizeof(*mcmd));
-+
-+out:
-+ TRACE_EXIT();
-+ return mcmd;
-+}
-+
-+void scst_free_mgmt_cmd(struct scst_mgmt_cmd *mcmd)
-+{
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_irqsave(&mcmd->sess->sess_list_lock, flags);
-+ atomic_dec(&mcmd->sess->sess_cmd_count);
-+ spin_unlock_irqrestore(&mcmd->sess->sess_list_lock, flags);
-+
-+ scst_sess_put(mcmd->sess);
-+
-+ if (mcmd->mcmd_tgt_dev != NULL)
-+ scst_put(mcmd->cpu_cmd_counter);
-+
-+ mempool_free(mcmd, scst_mgmt_mempool);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static bool scst_on_sg_tablesize_low(struct scst_cmd *cmd, bool out)
-+{
-+ bool res;
-+ int sg_cnt = out ? cmd->out_sg_cnt : cmd->sg_cnt;
-+ static int ll;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (sg_cnt > cmd->tgt->sg_tablesize) {
-+ /* It's the target's side business */
-+ goto failed;
-+ }
-+
-+ if (tgt_dev->dev->handler->on_sg_tablesize_low == NULL)
-+ goto failed;
-+
-+ res = tgt_dev->dev->handler->on_sg_tablesize_low(cmd);
-+
-+ TRACE_DBG("on_sg_tablesize_low(%p) returned %d", cmd, res);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+failed:
-+ res = false;
-+ if ((ll < 10) || TRACING_MINOR()) {
-+ PRINT_INFO("Unable to complete command due to SG IO count "
-+ "limitation (%srequested %d, available %d, tgt lim %d)",
-+ out ? "OUT buffer, " : "", cmd->sg_cnt,
-+ tgt_dev->max_sg_cnt, cmd->tgt->sg_tablesize);
-+ ll++;
-+ }
-+ goto out;
-+}
-+
-+int scst_alloc_space(struct scst_cmd *cmd)
-+{
-+ gfp_t gfp_mask;
-+ int res = -ENOMEM;
-+ int atomic = scst_cmd_atomic(cmd);
-+ int flags;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ gfp_mask = tgt_dev->gfp_mask | (atomic ? GFP_ATOMIC : GFP_KERNEL);
-+
-+ flags = atomic ? SGV_POOL_NO_ALLOC_ON_CACHE_MISS : 0;
-+ if (cmd->no_sgv)
-+ flags |= SGV_POOL_ALLOC_NO_CACHED;
-+
-+ cmd->sg = sgv_pool_alloc(tgt_dev->pool, cmd->bufflen, gfp_mask, flags,
-+ &cmd->sg_cnt, &cmd->sgv, &cmd->dev->dev_mem_lim, NULL);
-+ if (cmd->sg == NULL)
-+ goto out;
-+
-+ if (unlikely(cmd->sg_cnt > tgt_dev->max_sg_cnt))
-+ if (!scst_on_sg_tablesize_low(cmd, false))
-+ goto out_sg_free;
-+
-+ if (cmd->data_direction != SCST_DATA_BIDI)
-+ goto success;
-+
-+ cmd->out_sg = sgv_pool_alloc(tgt_dev->pool, cmd->out_bufflen, gfp_mask,
-+ flags, &cmd->out_sg_cnt, &cmd->out_sgv,
-+ &cmd->dev->dev_mem_lim, NULL);
-+ if (cmd->out_sg == NULL)
-+ goto out_sg_free;
-+
-+ if (unlikely(cmd->out_sg_cnt > tgt_dev->max_sg_cnt))
-+ if (!scst_on_sg_tablesize_low(cmd, true))
-+ goto out_out_sg_free;
-+
-+success:
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT();
-+ return res;
-+
-+out_out_sg_free:
-+ sgv_pool_free(cmd->out_sgv, &cmd->dev->dev_mem_lim);
-+ cmd->out_sgv = NULL;
-+ cmd->out_sg = NULL;
-+ cmd->out_sg_cnt = 0;
-+
-+out_sg_free:
-+ sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
-+ cmd->sgv = NULL;
-+ cmd->sg = NULL;
-+ cmd->sg_cnt = 0;
-+ goto out;
-+}
-+
-+static void scst_release_space(struct scst_cmd *cmd)
-+{
-+ TRACE_ENTRY();
-+
-+ if (cmd->sgv == NULL) {
-+ if ((cmd->sg != NULL) &&
-+ !(cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced)) {
-+ TRACE_MEM("Freeing sg %p for cmd %p (cnt %d)", cmd->sg,
-+ cmd, cmd->sg_cnt);
-+ scst_free(cmd->sg, cmd->sg_cnt);
-+ goto out_zero;
-+ } else
-+ goto out;
-+ }
-+
-+ if (cmd->tgt_data_buf_alloced || cmd->dh_data_buf_alloced) {
-+ TRACE_MEM("%s", "*data_buf_alloced set, returning");
-+ goto out;
-+ }
-+
-+ if (cmd->out_sgv != NULL) {
-+ sgv_pool_free(cmd->out_sgv, &cmd->dev->dev_mem_lim);
-+ cmd->out_sgv = NULL;
-+ cmd->out_sg_cnt = 0;
-+ cmd->out_sg = NULL;
-+ cmd->out_bufflen = 0;
-+ }
-+
-+ sgv_pool_free(cmd->sgv, &cmd->dev->dev_mem_lim);
-+
-+out_zero:
-+ cmd->sgv = NULL;
-+ cmd->sg_cnt = 0;
-+ cmd->sg = NULL;
-+ cmd->bufflen = 0;
-+ cmd->data_len = 0;
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void scsi_end_async(struct request *req, int error)
-+{
-+ struct scsi_io_context *sioc = req->end_io_data;
-+
-+ TRACE_DBG("sioc %p, cmd %p", sioc, sioc->data);
-+
-+ if (sioc->done)
-+ sioc->done(sioc->data, sioc->sense, req->errors, req->resid_len);
-+
-+ kmem_cache_free(scsi_io_context_cache, sioc);
-+
-+ __blk_put_request(req->q, req);
-+ return;
-+}
-+
-+/**
-+ * scst_scsi_exec_async - executes a SCSI command in pass-through mode
-+ * @cmd: scst command
-+ * @data: pointer passed to done() as "data"
-+ * @done: callback function when done
-+ */
-+int scst_scsi_exec_async(struct scst_cmd *cmd, void *data,
-+ void (*done)(void *data, char *sense, int result, int resid))
-+{
-+ int res = 0;
-+ struct request_queue *q = cmd->dev->scsi_dev->request_queue;
-+ struct request *rq;
-+ struct scsi_io_context *sioc;
-+ int write = (cmd->data_direction & SCST_DATA_WRITE) ? WRITE : READ;
-+ gfp_t gfp = cmd->noio_mem_alloc ? GFP_NOIO : GFP_KERNEL;
-+ int cmd_len = cmd->cdb_len;
-+
-+ sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
-+ if (sioc == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ rq = blk_get_request(q, write, gfp);
-+ if (rq == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_sioc;
-+ }
-+
-+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
-+ rq->cmd_flags |= REQ_QUIET;
-+
-+ if (cmd->sg == NULL)
-+ goto done;
-+
-+ if (cmd->data_direction == SCST_DATA_BIDI) {
-+ struct request *next_rq;
-+
-+ if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
-+ res = -EOPNOTSUPP;
-+ goto out_free_rq;
-+ }
-+
-+ res = blk_rq_map_kern_sg(rq, cmd->out_sg, cmd->out_sg_cnt, gfp);
-+ if (res != 0) {
-+ TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
-+ goto out_free_rq;
-+ }
-+
-+ next_rq = blk_get_request(q, READ, gfp);
-+ if (next_rq == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_unmap;
-+ }
-+ rq->next_rq = next_rq;
-+ next_rq->cmd_type = rq->cmd_type;
-+
-+ res = blk_rq_map_kern_sg(next_rq, cmd->sg, cmd->sg_cnt, gfp);
-+ if (res != 0) {
-+ TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
-+ goto out_free_unmap;
-+ }
-+ } else {
-+ res = blk_rq_map_kern_sg(rq, cmd->sg, cmd->sg_cnt, gfp);
-+ if (res != 0) {
-+ TRACE_DBG("blk_rq_map_kern_sg() failed: %d", res);
-+ goto out_free_rq;
-+ }
-+ }
-+
-+done:
-+ TRACE_DBG("sioc %p, cmd %p", sioc, cmd);
-+
-+ sioc->data = data;
-+ sioc->done = done;
-+
-+ rq->cmd_len = cmd_len;
-+ if (rq->cmd_len <= BLK_MAX_CDB) {
-+ memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
-+ memcpy(rq->cmd, cmd->cdb, cmd->cdb_len);
-+ } else
-+ rq->cmd = cmd->cdb;
-+
-+ rq->sense = sioc->sense;
-+ rq->sense_len = sizeof(sioc->sense);
-+ rq->timeout = cmd->timeout;
-+ rq->retries = cmd->retries;
-+ rq->end_io_data = sioc;
-+
-+ blk_execute_rq_nowait(rq->q, NULL, rq,
-+ (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE), scsi_end_async);
-+out:
-+ return res;
-+
-+out_free_unmap:
-+ if (rq->next_rq != NULL) {
-+ blk_put_request(rq->next_rq);
-+ rq->next_rq = NULL;
-+ }
-+ blk_rq_unmap_kern_sg(rq, res);
-+
-+out_free_rq:
-+ blk_put_request(rq);
-+
-+out_free_sioc:
-+ kmem_cache_free(scsi_io_context_cache, sioc);
-+ goto out;
-+}
-+EXPORT_SYMBOL(scst_scsi_exec_async);
-+
-+/**
-+ * scst_copy_sg() - copy data between the command's SGs
-+ *
-+ * Copies data between cmd->tgt_sg and cmd->sg in direction defined by
-+ * copy_dir parameter.
-+ */
-+void scst_copy_sg(struct scst_cmd *cmd, enum scst_sg_copy_dir copy_dir)
-+{
-+ struct scatterlist *src_sg, *dst_sg;
-+ unsigned int to_copy;
-+ int atomic = scst_cmd_atomic(cmd);
-+
-+ TRACE_ENTRY();
-+
-+ if (copy_dir == SCST_SG_COPY_FROM_TARGET) {
-+ if (cmd->data_direction != SCST_DATA_BIDI) {
-+ src_sg = cmd->tgt_sg;
-+ dst_sg = cmd->sg;
-+ to_copy = cmd->bufflen;
-+ } else {
-+ TRACE_MEM("BIDI cmd %p", cmd);
-+ src_sg = cmd->tgt_out_sg;
-+ dst_sg = cmd->out_sg;
-+ to_copy = cmd->out_bufflen;
-+ }
-+ } else {
-+ src_sg = cmd->sg;
-+ dst_sg = cmd->tgt_sg;
-+ to_copy = cmd->resp_data_len;
-+ }
-+
-+ TRACE_MEM("cmd %p, copy_dir %d, src_sg %p, dst_sg %p, to_copy %lld",
-+ cmd, copy_dir, src_sg, dst_sg, (long long)to_copy);
-+
-+ if (unlikely(src_sg == NULL) || unlikely(dst_sg == NULL)) {
-+ /*
-+ * It can happened, e.g., with scst_user for cmd with delay
-+ * alloc, which failed with Check Condition.
-+ */
-+ goto out;
-+ }
-+
-+ sg_copy(dst_sg, src_sg, 0, to_copy,
-+ atomic ? KM_SOFTIRQ0 : KM_USER0,
-+ atomic ? KM_SOFTIRQ1 : KM_USER1);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_copy_sg);
-+
-+/**
-+ * scst_get_buf_full - return linear buffer for command
-+ * @cmd: scst command
-+ * @buf: pointer on the resulting pointer
-+ *
-+ * If the command's buffer >single page, it vmalloc() the needed area
-+ * and copies the buffer there. Returns length of the buffer or negative
-+ * error code otherwise.
-+ */
-+int scst_get_buf_full(struct scst_cmd *cmd, uint8_t **buf)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(cmd->sg_buff_vmallocated);
-+
-+ if (scst_get_buf_count(cmd) > 1) {
-+ int len;
-+ uint8_t *tmp_buf;
-+ int full_size;
-+
-+ full_size = 0;
-+ len = scst_get_buf_first(cmd, &tmp_buf);
-+ while (len > 0) {
-+ full_size += len;
-+ scst_put_buf(cmd, tmp_buf);
-+ len = scst_get_buf_next(cmd, &tmp_buf);
-+ }
-+
-+ *buf = vmalloc(full_size);
-+ if (*buf == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "vmalloc() failed for opcode "
-+ "%x", cmd->cdb[0]);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ cmd->sg_buff_vmallocated = 1;
-+
-+ if (scst_cmd_get_data_direction(cmd) == SCST_DATA_WRITE) {
-+ uint8_t *buf_ptr;
-+
-+ buf_ptr = *buf;
-+
-+ len = scst_get_buf_first(cmd, &tmp_buf);
-+ while (len > 0) {
-+ memcpy(buf_ptr, tmp_buf, len);
-+ buf_ptr += len;
-+
-+ scst_put_buf(cmd, tmp_buf);
-+ len = scst_get_buf_next(cmd, &tmp_buf);
-+ }
-+ }
-+ res = full_size;
-+ } else
-+ res = scst_get_buf_first(cmd, buf);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(scst_get_buf_full);
-+
-+/**
-+ * scst_put_buf_full - unmaps linear buffer for command
-+ * @cmd: scst command
-+ * @buf: pointer on the buffer to unmap
-+ *
-+ * Reverse operation for scst_get_buf_full. If the buffer was vmalloced(),
-+ * it vfree() the buffer.
-+ */
-+void scst_put_buf_full(struct scst_cmd *cmd, uint8_t *buf)
-+{
-+ TRACE_ENTRY();
-+
-+ if (buf == NULL)
-+ goto out;
-+
-+ if (cmd->sg_buff_vmallocated) {
-+ if (scst_cmd_get_data_direction(cmd) == SCST_DATA_READ) {
-+ int len;
-+ uint8_t *tmp_buf, *buf_p;
-+
-+ buf_p = buf;
-+
-+ len = scst_get_buf_first(cmd, &tmp_buf);
-+ while (len > 0) {
-+ memcpy(tmp_buf, buf_p, len);
-+ buf_p += len;
-+
-+ scst_put_buf(cmd, tmp_buf);
-+ len = scst_get_buf_next(cmd, &tmp_buf);
-+ }
-+
-+ }
-+
-+ cmd->sg_buff_vmallocated = 0;
-+
-+ vfree(buf);
-+ } else
-+ scst_put_buf(cmd, buf);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_put_buf_full);
-+
-+static const int SCST_CDB_LENGTH[8] = { 6, 10, 10, 0, 16, 12, 0, 0 };
-+
-+#define SCST_CDB_GROUP(opcode) ((opcode >> 5) & 0x7)
-+#define SCST_GET_CDB_LEN(opcode) SCST_CDB_LENGTH[SCST_CDB_GROUP(opcode)]
-+
-+/* get_trans_len_x extract x bytes from cdb as length starting from off */
-+
-+static int get_trans_cdb_len_10(struct scst_cmd *cmd, uint8_t off)
-+{
-+ cmd->cdb_len = 10;
-+ cmd->bufflen = 0;
-+ return 0;
-+}
-+
-+static int get_trans_len_block_limit(struct scst_cmd *cmd, uint8_t off)
-+{
-+ cmd->bufflen = 6;
-+ return 0;
-+}
-+
-+static int get_trans_len_read_capacity(struct scst_cmd *cmd, uint8_t off)
-+{
-+ cmd->bufflen = 8;
-+ return 0;
-+}
-+
-+static int get_trans_len_serv_act_in(struct scst_cmd *cmd, uint8_t off)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
-+ cmd->op_name = "READ CAPACITY(16)";
-+ cmd->bufflen = be32_to_cpu(get_unaligned((__be32 *)&cmd->cdb[10]));
-+ cmd->op_flags |= SCST_IMPLICIT_HQ | SCST_REG_RESERVE_ALLOWED |
-+ SCST_WRITE_EXCL_ALLOWED | SCST_EXCL_ACCESS_ALLOWED;
-+ } else
-+ cmd->op_flags |= SCST_UNKNOWN_LENGTH;
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int get_trans_len_single(struct scst_cmd *cmd, uint8_t off)
-+{
-+ cmd->bufflen = 1;
-+ return 0;
-+}
-+
-+static int get_trans_len_read_pos(struct scst_cmd *cmd, uint8_t off)
-+{
-+ uint8_t *p = (uint8_t *)cmd->cdb + off;
-+ int res = 0;
-+
-+ cmd->bufflen = 0;
-+ cmd->bufflen |= ((u32)p[0]) << 8;
-+ cmd->bufflen |= ((u32)p[1]);
-+
-+ switch (cmd->cdb[1] & 0x1f) {
-+ case 0:
-+ case 1:
-+ case 6:
-+ if (cmd->bufflen != 0) {
-+ PRINT_ERROR("READ POSITION: Invalid non-zero (%d) "
-+ "allocation length for service action %x",
-+ cmd->bufflen, cmd->cdb[1] & 0x1f);
-+ goto out_inval;
-+ }
-+ break;
-+ }
-+
-+ switch (cmd->cdb[1] & 0x1f) {
-+ case 0:
-+ case 1:
-+ cmd->bufflen = 20;
-+ break;
-+ case 6:
-+ cmd->bufflen = 32;
-+ break;
-+ case 8:
-+ cmd->bufflen = max(28, cmd->bufflen);
-+ break;
-+ default:
-+ PRINT_ERROR("READ POSITION: Invalid service action %x",
-+ cmd->cdb[1] & 0x1f);
-+ goto out_inval;
-+ }
-+
-+out:
-+ return res;
-+
-+out_inval:
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ res = 1;
-+ goto out;
-+}
-+
-+static int get_trans_len_prevent_allow_medium_removal(struct scst_cmd *cmd,
-+ uint8_t off)
-+{
-+ if ((cmd->cdb[4] & 3) == 0)
-+ cmd->op_flags |= SCST_REG_RESERVE_ALLOWED |
-+ SCST_WRITE_EXCL_ALLOWED | SCST_EXCL_ACCESS_ALLOWED;
-+ return 0;
-+}
-+
-+static int get_trans_len_start_stop(struct scst_cmd *cmd, uint8_t off)
-+{
-+ if ((cmd->cdb[4] & 0xF1) == 0x1)
-+ cmd->op_flags |= SCST_REG_RESERVE_ALLOWED |
-+ SCST_WRITE_EXCL_ALLOWED | SCST_EXCL_ACCESS_ALLOWED;
-+ return 0;
-+}
-+
-+static int get_trans_len_3_read_elem_stat(struct scst_cmd *cmd, uint8_t off)
-+{
-+ const uint8_t *p = cmd->cdb + off;
-+
-+ cmd->bufflen = 0;
-+ cmd->bufflen |= ((u32)p[0]) << 16;
-+ cmd->bufflen |= ((u32)p[1]) << 8;
-+ cmd->bufflen |= ((u32)p[2]);
-+
-+ if ((cmd->cdb[6] & 0x2) == 0x2)
-+ cmd->op_flags |= SCST_REG_RESERVE_ALLOWED |
-+ SCST_WRITE_EXCL_ALLOWED | SCST_EXCL_ACCESS_ALLOWED;
-+ return 0;
-+}
-+
-+static int get_trans_len_1(struct scst_cmd *cmd, uint8_t off)
-+{
-+ cmd->bufflen = (u32)cmd->cdb[off];
-+ return 0;
-+}
-+
-+static int get_trans_len_1_256(struct scst_cmd *cmd, uint8_t off)
-+{
-+ cmd->bufflen = (u32)cmd->cdb[off];
-+ if (cmd->bufflen == 0)
-+ cmd->bufflen = 256;
-+ return 0;
-+}
-+
-+static int get_trans_len_2(struct scst_cmd *cmd, uint8_t off)
-+{
-+ const uint8_t *p = cmd->cdb + off;
-+
-+ cmd->bufflen = 0;
-+ cmd->bufflen |= ((u32)p[0]) << 8;
-+ cmd->bufflen |= ((u32)p[1]);
-+
-+ return 0;
-+}
-+
-+static int get_trans_len_3(struct scst_cmd *cmd, uint8_t off)
-+{
-+ const uint8_t *p = cmd->cdb + off;
-+
-+ cmd->bufflen = 0;
-+ cmd->bufflen |= ((u32)p[0]) << 16;
-+ cmd->bufflen |= ((u32)p[1]) << 8;
-+ cmd->bufflen |= ((u32)p[2]);
-+
-+ return 0;
-+}
-+
-+static int get_trans_len_4(struct scst_cmd *cmd, uint8_t off)
-+{
-+ const uint8_t *p = cmd->cdb + off;
-+
-+ cmd->bufflen = 0;
-+ cmd->bufflen |= ((u32)p[0]) << 24;
-+ cmd->bufflen |= ((u32)p[1]) << 16;
-+ cmd->bufflen |= ((u32)p[2]) << 8;
-+ cmd->bufflen |= ((u32)p[3]);
-+
-+ return 0;
-+}
-+
-+static int get_trans_len_none(struct scst_cmd *cmd, uint8_t off)
-+{
-+ cmd->bufflen = 0;
-+ return 0;
-+}
-+
-+static int get_bidi_trans_len_2(struct scst_cmd *cmd, uint8_t off)
-+{
-+ const uint8_t *p = cmd->cdb + off;
-+
-+ cmd->bufflen = 0;
-+ cmd->bufflen |= ((u32)p[0]) << 8;
-+ cmd->bufflen |= ((u32)p[1]);
-+
-+ cmd->out_bufflen = cmd->bufflen;
-+
-+ return 0;
-+}
-+
-+/**
-+ * scst_get_cdb_info() - fill various info about the command's CDB
-+ *
-+ * Description:
-+ * Fills various info about the command's CDB in the corresponding fields
-+ * in the command.
-+ *
-+ * Returns: 0 on success, <0 if command is unknown, >0 if command
-+ * is invalid.
-+ */
-+int scst_get_cdb_info(struct scst_cmd *cmd)
-+{
-+ int dev_type = cmd->dev->type;
-+ int i, res = 0;
-+ uint8_t op;
-+ const struct scst_sdbops *ptr = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ op = cmd->cdb[0]; /* get clear opcode */
-+
-+ TRACE_DBG("opcode=%02x, cdblen=%d bytes, tblsize=%d, "
-+ "dev_type=%d", op, SCST_GET_CDB_LEN(op), SCST_CDB_TBL_SIZE,
-+ dev_type);
-+
-+ i = scst_scsi_op_list[op];
-+ while (i < SCST_CDB_TBL_SIZE && scst_scsi_op_table[i].ops == op) {
-+ if (scst_scsi_op_table[i].devkey[dev_type] != SCST_CDB_NOTSUPP) {
-+ ptr = &scst_scsi_op_table[i];
-+ TRACE_DBG("op = 0x%02x+'%c%c%c%c%c%c%c%c%c%c'+<%s>",
-+ ptr->ops, ptr->devkey[0], /* disk */
-+ ptr->devkey[1], /* tape */
-+ ptr->devkey[2], /* printer */
-+ ptr->devkey[3], /* cpu */
-+ ptr->devkey[4], /* cdr */
-+ ptr->devkey[5], /* cdrom */
-+ ptr->devkey[6], /* scanner */
-+ ptr->devkey[7], /* worm */
-+ ptr->devkey[8], /* changer */
-+ ptr->devkey[9], /* commdev */
-+ ptr->op_name);
-+ TRACE_DBG("direction=%d flags=%d off=%d",
-+ ptr->direction,
-+ ptr->flags,
-+ ptr->off);
-+ break;
-+ }
-+ i++;
-+ }
-+
-+ if (unlikely(ptr == NULL)) {
-+ /* opcode not found or now not used */
-+ TRACE(TRACE_MINOR, "Unknown opcode 0x%x for type %d", op,
-+ dev_type);
-+ res = -1;
-+ goto out;
-+ }
-+
-+ cmd->cdb_len = SCST_GET_CDB_LEN(op);
-+ cmd->op_name = ptr->op_name;
-+ cmd->data_direction = ptr->direction;
-+ cmd->op_flags = ptr->flags | SCST_INFO_VALID;
-+ res = (*ptr->get_trans_len)(cmd, ptr->off);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_get_cdb_info);
-+
-+/* Packs SCST LUN back to SCSI form */
-+__be64 scst_pack_lun(const uint64_t lun, enum scst_lun_addr_method addr_method)
-+{
-+ uint64_t res = 0;
-+
-+ if (lun) {
-+ res = (addr_method << 14) | (lun & 0x3fff);
-+ res = res << 48;
-+ }
-+
-+ TRACE_EXIT_HRES(res >> 48);
-+ return cpu_to_be64(res);
-+}
-+
-+/*
-+ * Function to extract a LUN number from an 8-byte LUN structure in network byte
-+ * order (big endian). Supports three LUN addressing methods: peripheral, flat
-+ * and logical unit. See also SAM-2, section 4.9.4 (page 40).
-+ */
-+uint64_t scst_unpack_lun(const uint8_t *lun, int len)
-+{
-+ uint64_t res = NO_SUCH_LUN;
-+ int address_method;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_BUFF_FLAG(TRACE_DEBUG, "Raw LUN", lun, len);
-+
-+ if (unlikely(len < 2)) {
-+ PRINT_ERROR("Illegal lun length %d, expected 2 bytes or "
-+ "more", len);
-+ goto out;
-+ }
-+
-+ if (len > 2) {
-+ switch (len) {
-+ case 8:
-+ if ((*((__be64 *)lun) &
-+ __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
-+ goto out_err;
-+ break;
-+ case 4:
-+ if (*((__be16 *)&lun[2]) != 0)
-+ goto out_err;
-+ break;
-+ case 6:
-+ if (*((__be32 *)&lun[2]) != 0)
-+ goto out_err;
-+ break;
-+ default:
-+ goto out_err;
-+ }
-+ }
-+
-+ address_method = (*lun) >> 6; /* high 2 bits of byte 0 */
-+ switch (address_method) {
-+ case SCST_LUN_ADDR_METHOD_PERIPHERAL:
-+ case SCST_LUN_ADDR_METHOD_FLAT:
-+ case SCST_LUN_ADDR_METHOD_LUN:
-+ res = *(lun + 1) | (((*lun) & 0x3f) << 8);
-+ break;
-+
-+ case SCST_LUN_ADDR_METHOD_EXTENDED_LUN:
-+ default:
-+ PRINT_ERROR("Unimplemented LUN addressing method %u",
-+ address_method);
-+ break;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES((int)res);
-+ return res;
-+
-+out_err:
-+ PRINT_ERROR("%s", "Multi-level LUN unimplemented");
-+ goto out;
-+}
-+
-+/**
-+ ** Generic parse() support routines.
-+ ** Done via pointer on functions to avoid unneeded dereferences on
-+ ** the fast path.
-+ **/
-+
-+/**
-+ * scst_calc_block_shift() - calculate block shift
-+ *
-+ * Calculates and returns block shift for the given sector size
-+ */
-+int scst_calc_block_shift(int sector_size)
-+{
-+ int block_shift = 0;
-+ int t;
-+
-+ if (sector_size == 0)
-+ sector_size = 512;
-+
-+ t = sector_size;
-+ while (1) {
-+ if ((t & 1) != 0)
-+ break;
-+ t >>= 1;
-+ block_shift++;
-+ }
-+ if (block_shift < 9) {
-+ PRINT_ERROR("Wrong sector size %d", sector_size);
-+ block_shift = -1;
-+ }
-+
-+ TRACE_EXIT_RES(block_shift);
-+ return block_shift;
-+}
-+EXPORT_SYMBOL_GPL(scst_calc_block_shift);
-+
-+/**
-+ * scst_sbc_generic_parse() - generic SBC parsing
-+ *
-+ * Generic parse() for SBC (disk) devices
-+ */
-+int scst_sbc_generic_parse(struct scst_cmd *cmd,
-+ int (*get_block_shift)(struct scst_cmd *cmd))
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
-+ * therefore change them only if necessary
-+ */
-+
-+ TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
-+ cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
-+
-+ switch (cmd->cdb[0]) {
-+ case VERIFY_6:
-+ case VERIFY:
-+ case VERIFY_12:
-+ case VERIFY_16:
-+ if ((cmd->cdb[1] & BYTCHK) == 0) {
-+ cmd->data_len = cmd->bufflen << get_block_shift(cmd);
-+ cmd->bufflen = 0;
-+ goto set_timeout;
-+ } else
-+ cmd->data_len = 0;
-+ break;
-+ default:
-+ /* It's all good */
-+ break;
-+ }
-+
-+ if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
-+ int block_shift = get_block_shift(cmd);
-+ /*
-+ * No need for locks here, since *_detach() can not be
-+ * called, when there are existing commands.
-+ */
-+ cmd->bufflen = cmd->bufflen << block_shift;
-+ cmd->out_bufflen = cmd->out_bufflen << block_shift;
-+ }
-+
-+set_timeout:
-+ if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
-+ cmd->timeout = SCST_GENERIC_DISK_REG_TIMEOUT;
-+ else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_DISK_SMALL_TIMEOUT;
-+ else if (cmd->op_flags & SCST_LONG_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_DISK_LONG_TIMEOUT;
-+
-+ TRACE_DBG("res %d, bufflen %d, data_len %d, direct %d",
-+ res, cmd->bufflen, cmd->data_len, cmd->data_direction);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_sbc_generic_parse);
-+
-+/**
-+ * scst_cdrom_generic_parse() - generic MMC parse
-+ *
-+ * Generic parse() for MMC (cdrom) devices
-+ */
-+int scst_cdrom_generic_parse(struct scst_cmd *cmd,
-+ int (*get_block_shift)(struct scst_cmd *cmd))
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
-+ * therefore change them only if necessary
-+ */
-+
-+ TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
-+ cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
-+
-+ cmd->cdb[1] &= 0x1f;
-+
-+ switch (cmd->cdb[0]) {
-+ case VERIFY_6:
-+ case VERIFY:
-+ case VERIFY_12:
-+ case VERIFY_16:
-+ if ((cmd->cdb[1] & BYTCHK) == 0) {
-+ cmd->data_len = cmd->bufflen << get_block_shift(cmd);
-+ cmd->bufflen = 0;
-+ goto set_timeout;
-+ }
-+ break;
-+ default:
-+ /* It's all good */
-+ break;
-+ }
-+
-+ if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
-+ int block_shift = get_block_shift(cmd);
-+ cmd->bufflen = cmd->bufflen << block_shift;
-+ cmd->out_bufflen = cmd->out_bufflen << block_shift;
-+ }
-+
-+set_timeout:
-+ if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
-+ cmd->timeout = SCST_GENERIC_CDROM_REG_TIMEOUT;
-+ else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_CDROM_SMALL_TIMEOUT;
-+ else if (cmd->op_flags & SCST_LONG_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_CDROM_LONG_TIMEOUT;
-+
-+ TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
-+ cmd->data_direction);
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_cdrom_generic_parse);
-+
-+/**
-+ * scst_modisk_generic_parse() - generic MO parse
-+ *
-+ * Generic parse() for MO disk devices
-+ */
-+int scst_modisk_generic_parse(struct scst_cmd *cmd,
-+ int (*get_block_shift)(struct scst_cmd *cmd))
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
-+ * therefore change them only if necessary
-+ */
-+
-+ TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
-+ cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
-+
-+ cmd->cdb[1] &= 0x1f;
-+
-+ switch (cmd->cdb[0]) {
-+ case VERIFY_6:
-+ case VERIFY:
-+ case VERIFY_12:
-+ case VERIFY_16:
-+ if ((cmd->cdb[1] & BYTCHK) == 0) {
-+ cmd->data_len = cmd->bufflen << get_block_shift(cmd);
-+ cmd->bufflen = 0;
-+ goto set_timeout;
-+ }
-+ break;
-+ default:
-+ /* It's all good */
-+ break;
-+ }
-+
-+ if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED) {
-+ int block_shift = get_block_shift(cmd);
-+ cmd->bufflen = cmd->bufflen << block_shift;
-+ cmd->out_bufflen = cmd->out_bufflen << block_shift;
-+ }
-+
-+set_timeout:
-+ if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
-+ cmd->timeout = SCST_GENERIC_MODISK_REG_TIMEOUT;
-+ else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_MODISK_SMALL_TIMEOUT;
-+ else if (cmd->op_flags & SCST_LONG_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_MODISK_LONG_TIMEOUT;
-+
-+ TRACE_DBG("res=%d, bufflen=%d, direct=%d", res, cmd->bufflen,
-+ cmd->data_direction);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_modisk_generic_parse);
-+
-+/**
-+ * scst_tape_generic_parse() - generic tape parse
-+ *
-+ * Generic parse() for tape devices
-+ */
-+int scst_tape_generic_parse(struct scst_cmd *cmd,
-+ int (*get_block_size)(struct scst_cmd *cmd))
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
-+ * therefore change them only if necessary
-+ */
-+
-+ TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
-+ cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
-+
-+ if (cmd->cdb[0] == READ_POSITION) {
-+ int tclp = cmd->cdb[1] & 4;
-+ int long_bit = cmd->cdb[1] & 2;
-+ int bt = cmd->cdb[1] & 1;
-+
-+ if ((tclp == long_bit) && (!bt || !long_bit)) {
-+ cmd->bufflen =
-+ tclp ? POSITION_LEN_LONG : POSITION_LEN_SHORT;
-+ cmd->data_direction = SCST_DATA_READ;
-+ } else {
-+ cmd->bufflen = 0;
-+ cmd->data_direction = SCST_DATA_NONE;
-+ }
-+ }
-+
-+ if (cmd->op_flags & SCST_TRANSFER_LEN_TYPE_FIXED & cmd->cdb[1]) {
-+ int block_size = get_block_size(cmd);
-+ cmd->bufflen = cmd->bufflen * block_size;
-+ cmd->out_bufflen = cmd->out_bufflen * block_size;
-+ }
-+
-+ if ((cmd->op_flags & (SCST_SMALL_TIMEOUT | SCST_LONG_TIMEOUT)) == 0)
-+ cmd->timeout = SCST_GENERIC_TAPE_REG_TIMEOUT;
-+ else if (cmd->op_flags & SCST_SMALL_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_TAPE_SMALL_TIMEOUT;
-+ else if (cmd->op_flags & SCST_LONG_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_TAPE_LONG_TIMEOUT;
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_tape_generic_parse);
-+
-+static int scst_null_parse(struct scst_cmd *cmd)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * SCST sets good defaults for cmd->data_direction and cmd->bufflen,
-+ * therefore change them only if necessary
-+ */
-+
-+ TRACE_DBG("op_name <%s> direct %d flags %d transfer_len %d",
-+ cmd->op_name, cmd->data_direction, cmd->op_flags, cmd->bufflen);
-+#if 0
-+ switch (cmd->cdb[0]) {
-+ default:
-+ /* It's all good */
-+ break;
-+ }
-+#endif
-+ TRACE_DBG("res %d bufflen %d direct %d",
-+ res, cmd->bufflen, cmd->data_direction);
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+/**
-+ * scst_changer_generic_parse() - generic changer parse
-+ *
-+ * Generic parse() for changer devices
-+ */
-+int scst_changer_generic_parse(struct scst_cmd *cmd,
-+ int (*nothing)(struct scst_cmd *cmd))
-+{
-+ int res = scst_null_parse(cmd);
-+
-+ if (cmd->op_flags & SCST_LONG_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_CHANGER_LONG_TIMEOUT;
-+ else
-+ cmd->timeout = SCST_GENERIC_CHANGER_TIMEOUT;
-+
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_changer_generic_parse);
-+
-+/**
-+ * scst_processor_generic_parse - generic SCSI processor parse
-+ *
-+ * Generic parse() for SCSI processor devices
-+ */
-+int scst_processor_generic_parse(struct scst_cmd *cmd,
-+ int (*nothing)(struct scst_cmd *cmd))
-+{
-+ int res = scst_null_parse(cmd);
-+
-+ if (cmd->op_flags & SCST_LONG_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_PROCESSOR_LONG_TIMEOUT;
-+ else
-+ cmd->timeout = SCST_GENERIC_PROCESSOR_TIMEOUT;
-+
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_processor_generic_parse);
-+
-+/**
-+ * scst_raid_generic_parse() - generic RAID parse
-+ *
-+ * Generic parse() for RAID devices
-+ */
-+int scst_raid_generic_parse(struct scst_cmd *cmd,
-+ int (*nothing)(struct scst_cmd *cmd))
-+{
-+ int res = scst_null_parse(cmd);
-+
-+ if (cmd->op_flags & SCST_LONG_TIMEOUT)
-+ cmd->timeout = SCST_GENERIC_RAID_LONG_TIMEOUT;
-+ else
-+ cmd->timeout = SCST_GENERIC_RAID_TIMEOUT;
-+
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_raid_generic_parse);
-+
-+/**
-+ ** Generic dev_done() support routines.
-+ ** Done via pointer on functions to avoid unneeded dereferences on
-+ ** the fast path.
-+ **/
-+
-+/**
-+ * scst_block_generic_dev_done() - generic SBC dev_done
-+ *
-+ * Generic dev_done() for block (SBC) devices
-+ */
-+int scst_block_generic_dev_done(struct scst_cmd *cmd,
-+ void (*set_block_shift)(struct scst_cmd *cmd, int block_shift))
-+{
-+ int opcode = cmd->cdb[0];
-+ int status = cmd->status;
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * SCST sets good defaults for cmd->is_send_status and
-+ * cmd->resp_data_len based on cmd->status and cmd->data_direction,
-+ * therefore change them only if necessary
-+ */
-+
-+ if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET)) {
-+ switch (opcode) {
-+ case READ_CAPACITY:
-+ {
-+ /* Always keep track of disk capacity */
-+ int buffer_size, sector_size, sh;
-+ uint8_t *buffer;
-+
-+ buffer_size = scst_get_buf_full(cmd, &buffer);
-+ if (unlikely(buffer_size <= 0)) {
-+ if (buffer_size < 0) {
-+ PRINT_ERROR("%s: Unable to get the"
-+ " buffer (%d)", __func__, buffer_size);
-+ }
-+ goto out;
-+ }
-+
-+ sector_size =
-+ ((buffer[4] << 24) | (buffer[5] << 16) |
-+ (buffer[6] << 8) | (buffer[7] << 0));
-+ scst_put_buf_full(cmd, buffer);
-+ if (sector_size != 0)
-+ sh = scst_calc_block_shift(sector_size);
-+ else
-+ sh = 0;
-+ set_block_shift(cmd, sh);
-+ TRACE_DBG("block_shift %d", sh);
-+ break;
-+ }
-+ default:
-+ /* It's all good */
-+ break;
-+ }
-+ }
-+
-+ TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
-+ "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_block_generic_dev_done);
-+
-+/**
-+ * scst_tape_generic_dev_done() - generic tape dev done
-+ *
-+ * Generic dev_done() for tape devices
-+ */
-+int scst_tape_generic_dev_done(struct scst_cmd *cmd,
-+ void (*set_block_size)(struct scst_cmd *cmd, int block_shift))
-+{
-+ int opcode = cmd->cdb[0];
-+ int res = SCST_CMD_STATE_DEFAULT;
-+ int buffer_size, bs;
-+ uint8_t *buffer = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * SCST sets good defaults for cmd->is_send_status and
-+ * cmd->resp_data_len based on cmd->status and cmd->data_direction,
-+ * therefore change them only if necessary
-+ */
-+
-+ if (cmd->status != SAM_STAT_GOOD)
-+ goto out;
-+
-+ switch (opcode) {
-+ case MODE_SENSE:
-+ case MODE_SELECT:
-+ buffer_size = scst_get_buf_full(cmd, &buffer);
-+ if (unlikely(buffer_size <= 0)) {
-+ if (buffer_size < 0) {
-+ PRINT_ERROR("%s: Unable to get the buffer (%d)",
-+ __func__, buffer_size);
-+ }
-+ goto out;
-+ }
-+ break;
-+ }
-+
-+ switch (opcode) {
-+ case MODE_SENSE:
-+ TRACE_DBG("%s", "MODE_SENSE");
-+ if ((cmd->cdb[2] & 0xC0) == 0) {
-+ if (buffer[3] == 8) {
-+ bs = (buffer[9] << 16) |
-+ (buffer[10] << 8) | buffer[11];
-+ set_block_size(cmd, bs);
-+ }
-+ }
-+ break;
-+ case MODE_SELECT:
-+ TRACE_DBG("%s", "MODE_SELECT");
-+ if (buffer[3] == 8) {
-+ bs = (buffer[9] << 16) | (buffer[10] << 8) |
-+ (buffer[11]);
-+ set_block_size(cmd, bs);
-+ }
-+ break;
-+ default:
-+ /* It's all good */
-+ break;
-+ }
-+
-+ switch (opcode) {
-+ case MODE_SENSE:
-+ case MODE_SELECT:
-+ scst_put_buf_full(cmd, buffer);
-+ break;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_tape_generic_dev_done);
-+
-+static void scst_check_internal_sense(struct scst_device *dev, int result,
-+ uint8_t *sense, int sense_len)
-+{
-+ TRACE_ENTRY();
-+
-+ if (host_byte(result) == DID_RESET) {
-+ int sl;
-+ TRACE(TRACE_MGMT, "DID_RESET received for device %s, "
-+ "triggering reset UA", dev->virt_name);
-+ sl = scst_set_sense(sense, sense_len, dev->d_sense,
-+ SCST_LOAD_SENSE(scst_sense_reset_UA));
-+ scst_dev_check_set_UA(dev, NULL, sense, sl);
-+ } else if ((status_byte(result) == CHECK_CONDITION) &&
-+ scst_is_ua_sense(sense, sense_len))
-+ scst_dev_check_set_UA(dev, NULL, sense, sense_len);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_to_dma_dir() - translate SCST's data direction to DMA direction
-+ *
-+ * Translates SCST's data direction to DMA one from backend storage
-+ * perspective.
-+ */
-+enum dma_data_direction scst_to_dma_dir(int scst_dir)
-+{
-+ static const enum dma_data_direction tr_tbl[] = { DMA_NONE,
-+ DMA_TO_DEVICE, DMA_FROM_DEVICE, DMA_BIDIRECTIONAL, DMA_NONE };
-+
-+ return tr_tbl[scst_dir];
-+}
-+EXPORT_SYMBOL(scst_to_dma_dir);
-+
-+/*
-+ * scst_to_tgt_dma_dir() - translate SCST data direction to DMA direction
-+ *
-+ * Translates SCST data direction to DMA data direction from the perspective
-+ * of a target.
-+ */
-+enum dma_data_direction scst_to_tgt_dma_dir(int scst_dir)
-+{
-+ static const enum dma_data_direction tr_tbl[] = { DMA_NONE,
-+ DMA_FROM_DEVICE, DMA_TO_DEVICE, DMA_BIDIRECTIONAL, DMA_NONE };
-+
-+ return tr_tbl[scst_dir];
-+}
-+EXPORT_SYMBOL(scst_to_tgt_dma_dir);
-+
-+/**
-+ * scst_obtain_device_parameters() - obtain device control parameters
-+ *
-+ * Issues a MODE SENSE for control mode page data and sets the corresponding
-+ * dev's parameter from it. Returns 0 on success and not 0 otherwise.
-+ */
-+int scst_obtain_device_parameters(struct scst_device *dev)
-+{
-+ int rc, i;
-+ uint8_t cmd[16];
-+ uint8_t buffer[4+0x0A];
-+ uint8_t sense_buffer[SCSI_SENSE_BUFFERSIZE];
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(dev->scsi_dev == NULL);
-+
-+ for (i = 0; i < 5; i++) {
-+ /* Get control mode page */
-+ memset(cmd, 0, sizeof(cmd));
-+#if 0
-+ cmd[0] = MODE_SENSE_10;
-+ cmd[1] = 0;
-+ cmd[2] = 0x0A;
-+ cmd[8] = sizeof(buffer); /* it's < 256 */
-+#else
-+ cmd[0] = MODE_SENSE;
-+ cmd[1] = 8; /* DBD */
-+ cmd[2] = 0x0A;
-+ cmd[4] = sizeof(buffer);
-+#endif
-+
-+ memset(buffer, 0, sizeof(buffer));
-+ memset(sense_buffer, 0, sizeof(sense_buffer));
-+
-+ TRACE(TRACE_SCSI, "%s", "Doing internal MODE_SENSE");
-+ rc = scsi_execute(dev->scsi_dev, cmd, SCST_DATA_READ, buffer,
-+ sizeof(buffer), sense_buffer, 15, 0, 0
-+ , NULL
-+ );
-+
-+ TRACE_DBG("MODE_SENSE done: %x", rc);
-+
-+ if (scsi_status_is_good(rc)) {
-+ int q;
-+
-+ PRINT_BUFF_FLAG(TRACE_SCSI, "Returned control mode "
-+ "page data", buffer, sizeof(buffer));
-+
-+ dev->tst = buffer[4+2] >> 5;
-+ q = buffer[4+3] >> 4;
-+ if (q > SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER) {
-+ PRINT_ERROR("Too big QUEUE ALG %x, dev %s",
-+ dev->queue_alg, dev->virt_name);
-+ }
-+ dev->queue_alg = q;
-+ dev->swp = (buffer[4+4] & 0x8) >> 3;
-+ dev->tas = (buffer[4+5] & 0x40) >> 6;
-+ dev->d_sense = (buffer[4+2] & 0x4) >> 2;
-+
-+ /*
-+ * Unfortunately, SCSI ML doesn't provide a way to
-+ * specify commands task attribute, so we can rely on
-+ * device's restricted reordering only. Linux I/O
-+ * subsystem doesn't reorder pass-through (PC) requests.
-+ */
-+ dev->has_own_order_mgmt = !dev->queue_alg;
-+
-+ PRINT_INFO("Device %s: TST %x, QUEUE ALG %x, SWP %x, "
-+ "TAS %x, D_SENSE %d, has_own_order_mgmt %d",
-+ dev->virt_name, dev->tst, dev->queue_alg,
-+ dev->swp, dev->tas, dev->d_sense,
-+ dev->has_own_order_mgmt);
-+
-+ goto out;
-+ } else {
-+ scst_check_internal_sense(dev, rc, sense_buffer,
-+ sizeof(sense_buffer));
-+#if 0
-+ if ((status_byte(rc) == CHECK_CONDITION) &&
-+ SCST_SENSE_VALID(sense_buffer)) {
-+#else
-+ /*
-+ * 3ware controller is buggy and returns CONDITION_GOOD
-+ * instead of CHECK_CONDITION
-+ */
-+ if (SCST_SENSE_VALID(sense_buffer)) {
-+#endif
-+ PRINT_BUFF_FLAG(TRACE_SCSI, "Returned sense "
-+ "data", sense_buffer,
-+ sizeof(sense_buffer));
-+ if (scst_analyze_sense(sense_buffer,
-+ sizeof(sense_buffer),
-+ SCST_SENSE_KEY_VALID,
-+ ILLEGAL_REQUEST, 0, 0)) {
-+ PRINT_INFO("Device %s doesn't support "
-+ "MODE SENSE", dev->virt_name);
-+ break;
-+ } else if (scst_analyze_sense(sense_buffer,
-+ sizeof(sense_buffer),
-+ SCST_SENSE_KEY_VALID,
-+ NOT_READY, 0, 0)) {
-+ PRINT_ERROR("Device %s not ready",
-+ dev->virt_name);
-+ break;
-+ }
-+ } else {
-+ PRINT_INFO("Internal MODE SENSE to "
-+ "device %s failed: %x",
-+ dev->virt_name, rc);
-+ PRINT_BUFF_FLAG(TRACE_SCSI, "MODE SENSE sense",
-+ sense_buffer, sizeof(sense_buffer));
-+ switch (host_byte(rc)) {
-+ case DID_RESET:
-+ case DID_ABORT:
-+ case DID_SOFT_ERROR:
-+ break;
-+ default:
-+ goto brk;
-+ }
-+ switch (driver_byte(rc)) {
-+ case DRIVER_BUSY:
-+ case DRIVER_SOFT:
-+ break;
-+ default:
-+ goto brk;
-+ }
-+ }
-+ }
-+ }
-+brk:
-+ PRINT_WARNING("Unable to get device's %s control mode page, using "
-+ "existing values/defaults: TST %x, QUEUE ALG %x, SWP %x, "
-+ "TAS %x, D_SENSE %d, has_own_order_mgmt %d", dev->virt_name,
-+ dev->tst, dev->queue_alg, dev->swp, dev->tas, dev->d_sense,
-+ dev->has_own_order_mgmt);
-+
-+out:
-+ TRACE_EXIT();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(scst_obtain_device_parameters);
-+
-+/* Called under dev_lock and BH off */
-+void scst_process_reset(struct scst_device *dev,
-+ struct scst_session *originator, struct scst_cmd *exclude_cmd,
-+ struct scst_mgmt_cmd *mcmd, bool setUA)
-+{
-+ struct scst_tgt_dev *tgt_dev;
-+ struct scst_cmd *cmd, *tcmd;
-+
-+ TRACE_ENTRY();
-+
-+ /* Clear RESERVE'ation, if necessary */
-+ if (dev->dev_reserved) {
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ TRACE_MGMT_DBG("Clearing RESERVE'ation for "
-+ "tgt_dev LUN %lld",
-+ (long long unsigned int)tgt_dev->lun);
-+ clear_bit(SCST_TGT_DEV_RESERVED,
-+ &tgt_dev->tgt_dev_flags);
-+ }
-+ dev->dev_reserved = 0;
-+ /*
-+ * There is no need to send RELEASE, since the device is going
-+ * to be reset. Actually, since we can be in RESET TM
-+ * function, it might be dangerous.
-+ */
-+ }
-+
-+ dev->dev_double_ua_possible = 1;
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ struct scst_session *sess = tgt_dev->sess;
-+
-+#if 0 /* Clearing UAs and last sense isn't required by SAM and it
-+ * looks to be better to not clear them to not loose important
-+ * events, so let's disable it.
-+ */
-+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
-+ scst_free_all_UA(tgt_dev);
-+ memset(tgt_dev->tgt_dev_sense, 0,
-+ sizeof(tgt_dev->tgt_dev_sense));
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+#endif
-+
-+ spin_lock_irq(&sess->sess_list_lock);
-+
-+ TRACE_DBG("Searching in sess cmd list (sess=%p)", sess);
-+ list_for_each_entry(cmd, &sess->sess_cmd_list,
-+ sess_cmd_list_entry) {
-+ if (cmd == exclude_cmd)
-+ continue;
-+ if ((cmd->tgt_dev == tgt_dev) ||
-+ ((cmd->tgt_dev == NULL) &&
-+ (cmd->lun == tgt_dev->lun))) {
-+ scst_abort_cmd(cmd, mcmd,
-+ (tgt_dev->sess != originator), 0);
-+ }
-+ }
-+ spin_unlock_irq(&sess->sess_list_lock);
-+ }
-+
-+ list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
-+ blocked_cmd_list_entry) {
-+ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
-+ list_del(&cmd->blocked_cmd_list_entry);
-+ TRACE_MGMT_DBG("Adding aborted blocked cmd %p "
-+ "to active cmd list", cmd);
-+ spin_lock_irq(&cmd->cmd_threads->cmd_list_lock);
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock_irq(&cmd->cmd_threads->cmd_list_lock);
-+ }
-+ }
-+
-+ if (setUA) {
-+ uint8_t sense_buffer[SCST_STANDARD_SENSE_LEN];
-+ int sl = scst_set_sense(sense_buffer, sizeof(sense_buffer),
-+ dev->d_sense, SCST_LOAD_SENSE(scst_sense_reset_UA));
-+ scst_dev_check_set_local_UA(dev, exclude_cmd, sense_buffer, sl);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Caller must hold tgt_dev->tgt_dev_lock. */
-+void scst_tgt_dev_del_free_UA(struct scst_tgt_dev *tgt_dev,
-+ struct scst_tgt_dev_UA *ua)
-+{
-+ list_del(&ua->UA_list_entry);
-+ if (list_empty(&tgt_dev->UA_list))
-+ clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
-+ mempool_free(ua, scst_ua_mempool);
-+}
-+
-+/* No locks, no IRQ or IRQ-disabled context allowed */
-+int scst_set_pending_UA(struct scst_cmd *cmd)
-+{
-+ int res = 0, i;
-+ struct scst_tgt_dev_UA *UA_entry;
-+ bool first = true, global_unlock = false;
-+ struct scst_session *sess = cmd->sess;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * RMB and recheck to sync with setting SCST_CMD_ABORTED in
-+ * scst_abort_cmd() to not set UA for the being aborted cmd, hence
-+ * possibly miss its delivery by a legitimate command while the UA is
-+ * being requeued.
-+ */
-+ smp_rmb();
-+ if (test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)) {
-+ TRACE_MGMT_DBG("Not set pending UA for aborted cmd %p", cmd);
-+ res = -1;
-+ goto out;
-+ }
-+
-+ TRACE_MGMT_DBG("Setting pending UA cmd %p", cmd);
-+
-+ spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
-+
-+again:
-+ /* UA list could be cleared behind us, so retest */
-+ if (list_empty(&cmd->tgt_dev->UA_list)) {
-+ TRACE_DBG("%s",
-+ "SCST_TGT_DEV_UA_PENDING set, but UA_list empty");
-+ res = -1;
-+ goto out_unlock;
-+ }
-+
-+ UA_entry = list_entry(cmd->tgt_dev->UA_list.next, typeof(*UA_entry),
-+ UA_list_entry);
-+
-+ TRACE_DBG("next %p UA_entry %p",
-+ cmd->tgt_dev->UA_list.next, UA_entry);
-+
-+ if (UA_entry->global_UA && first) {
-+ TRACE_MGMT_DBG("Global UA %p detected", UA_entry);
-+
-+ spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
-+
-+ /*
-+ * cmd won't allow to suspend activities, so we can access
-+ * sess->sess_tgt_dev_list without any additional
-+ * protection.
-+ */
-+
-+ local_bh_disable();
-+
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ /* Lockdep triggers here a false positive.. */
-+ spin_lock(&tgt_dev->tgt_dev_lock);
-+ }
-+ }
-+
-+ first = false;
-+ global_unlock = true;
-+ goto again;
-+ }
-+
-+ if (scst_set_cmd_error_sense(cmd, UA_entry->UA_sense_buffer,
-+ UA_entry->UA_valid_sense_len) != 0)
-+ goto out_unlock;
-+
-+ cmd->ua_ignore = 1;
-+
-+ list_del(&UA_entry->UA_list_entry);
-+
-+ if (UA_entry->global_UA) {
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ struct scst_tgt_dev_UA *ua;
-+ list_for_each_entry(ua, &tgt_dev->UA_list,
-+ UA_list_entry) {
-+ if (ua->global_UA &&
-+ memcmp(ua->UA_sense_buffer,
-+ UA_entry->UA_sense_buffer,
-+ sizeof(ua->UA_sense_buffer)) == 0) {
-+ TRACE_MGMT_DBG("Freeing not "
-+ "needed global UA %p",
-+ ua);
-+ scst_tgt_dev_del_free_UA(tgt_dev,
-+ ua);
-+ break;
-+ }
-+ }
-+ }
-+ }
-+ }
-+
-+ mempool_free(UA_entry, scst_ua_mempool);
-+
-+ if (list_empty(&cmd->tgt_dev->UA_list)) {
-+ clear_bit(SCST_TGT_DEV_UA_PENDING,
-+ &cmd->tgt_dev->tgt_dev_flags);
-+ }
-+
-+out_unlock:
-+ if (global_unlock) {
-+ for (i = SESS_TGT_DEV_LIST_HASH_SIZE-1; i >= 0; i--) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry_reverse(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ spin_unlock(&tgt_dev->tgt_dev_lock);
-+ }
-+ }
-+
-+ local_bh_enable();
-+ spin_lock_bh(&cmd->tgt_dev->tgt_dev_lock);
-+ }
-+
-+ spin_unlock_bh(&cmd->tgt_dev->tgt_dev_lock);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Called under tgt_dev_lock and BH off */
-+static void scst_alloc_set_UA(struct scst_tgt_dev *tgt_dev,
-+ const uint8_t *sense, int sense_len, int flags)
-+{
-+ struct scst_tgt_dev_UA *UA_entry = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ UA_entry = mempool_alloc(scst_ua_mempool, GFP_ATOMIC);
-+ if (UA_entry == NULL) {
-+ PRINT_CRIT_ERROR("%s", "UNIT ATTENTION memory "
-+ "allocation failed. The UNIT ATTENTION "
-+ "on some sessions will be missed");
-+ PRINT_BUFFER("Lost UA", sense, sense_len);
-+ goto out;
-+ }
-+ memset(UA_entry, 0, sizeof(*UA_entry));
-+
-+ UA_entry->global_UA = (flags & SCST_SET_UA_FLAG_GLOBAL) != 0;
-+ if (UA_entry->global_UA)
-+ TRACE_MGMT_DBG("Queueing global UA %p", UA_entry);
-+
-+ if (sense_len > (int)sizeof(UA_entry->UA_sense_buffer)) {
-+ PRINT_WARNING("Sense truncated (needed %d), shall you increase "
-+ "SCST_SENSE_BUFFERSIZE?", sense_len);
-+ sense_len = sizeof(UA_entry->UA_sense_buffer);
-+ }
-+ memcpy(UA_entry->UA_sense_buffer, sense, sense_len);
-+ UA_entry->UA_valid_sense_len = sense_len;
-+
-+ set_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
-+
-+ TRACE_MGMT_DBG("Adding new UA to tgt_dev %p", tgt_dev);
-+
-+ if (flags & SCST_SET_UA_FLAG_AT_HEAD)
-+ list_add(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
-+ else
-+ list_add_tail(&UA_entry->UA_list_entry, &tgt_dev->UA_list);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* tgt_dev_lock supposed to be held and BH off */
-+static void __scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
-+ const uint8_t *sense, int sense_len, int flags)
-+{
-+ int skip_UA = 0;
-+ struct scst_tgt_dev_UA *UA_entry_tmp;
-+ int len = min_t(int, sizeof(UA_entry_tmp->UA_sense_buffer), sense_len);
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(UA_entry_tmp, &tgt_dev->UA_list,
-+ UA_list_entry) {
-+ if (memcmp(sense, UA_entry_tmp->UA_sense_buffer, len) == 0) {
-+ TRACE_MGMT_DBG("%s", "UA already exists");
-+ skip_UA = 1;
-+ break;
-+ }
-+ }
-+
-+ if (skip_UA == 0)
-+ scst_alloc_set_UA(tgt_dev, sense, len, flags);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void scst_check_set_UA(struct scst_tgt_dev *tgt_dev,
-+ const uint8_t *sense, int sense_len, int flags)
-+{
-+ TRACE_ENTRY();
-+
-+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
-+ __scst_check_set_UA(tgt_dev, sense, sense_len, flags);
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called under dev_lock and BH off */
-+void scst_dev_check_set_local_UA(struct scst_device *dev,
-+ struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
-+{
-+ struct scst_tgt_dev *tgt_dev, *exclude_tgt_dev = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ if (exclude != NULL)
-+ exclude_tgt_dev = exclude->tgt_dev;
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if (tgt_dev != exclude_tgt_dev)
-+ scst_check_set_UA(tgt_dev, sense, sense_len, 0);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called under dev_lock and BH off */
-+void __scst_dev_check_set_UA(struct scst_device *dev,
-+ struct scst_cmd *exclude, const uint8_t *sense, int sense_len)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Processing UA dev %s", dev->virt_name);
-+
-+ /* Check for reset UA */
-+ if (scst_analyze_sense(sense, sense_len, SCST_SENSE_ASC_VALID,
-+ 0, SCST_SENSE_ASC_UA_RESET, 0))
-+ scst_process_reset(dev,
-+ (exclude != NULL) ? exclude->sess : NULL,
-+ exclude, NULL, false);
-+
-+ scst_dev_check_set_local_UA(dev, exclude, sense, sense_len);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called under tgt_dev_lock or when tgt_dev is unused */
-+static void scst_free_all_UA(struct scst_tgt_dev *tgt_dev)
-+{
-+ struct scst_tgt_dev_UA *UA_entry, *t;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry_safe(UA_entry, t,
-+ &tgt_dev->UA_list, UA_list_entry) {
-+ TRACE_MGMT_DBG("Clearing UA for tgt_dev LUN %lld",
-+ (long long unsigned int)tgt_dev->lun);
-+ list_del(&UA_entry->UA_list_entry);
-+ mempool_free(UA_entry, scst_ua_mempool);
-+ }
-+ INIT_LIST_HEAD(&tgt_dev->UA_list);
-+ clear_bit(SCST_TGT_DEV_UA_PENDING, &tgt_dev->tgt_dev_flags);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* No locks */
-+struct scst_cmd *__scst_check_deferred_commands(struct scst_order_data *order_data)
-+{
-+ struct scst_cmd *res = NULL, *cmd, *t;
-+ typeof(order_data->expected_sn) expected_sn = order_data->expected_sn;
-+
-+ spin_lock_irq(&order_data->sn_lock);
-+
-+ if (unlikely(order_data->hq_cmd_count != 0))
-+ goto out_unlock;
-+
-+restart:
-+ list_for_each_entry_safe(cmd, t, &order_data->deferred_cmd_list,
-+ sn_cmd_list_entry) {
-+ EXTRACHECKS_BUG_ON(cmd->queue_type ==
-+ SCST_CMD_QUEUE_HEAD_OF_QUEUE);
-+ if (cmd->sn == expected_sn) {
-+ TRACE_SN("Deferred command %p (sn %d, set %d) found",
-+ cmd, cmd->sn, cmd->sn_set);
-+ order_data->def_cmd_count--;
-+ list_del(&cmd->sn_cmd_list_entry);
-+ if (res == NULL)
-+ res = cmd;
-+ else {
-+ spin_lock(&cmd->cmd_threads->cmd_list_lock);
-+ TRACE_SN("Adding cmd %p to active cmd list",
-+ cmd);
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
-+ }
-+ }
-+ }
-+ if (res != NULL)
-+ goto out_unlock;
-+
-+ list_for_each_entry(cmd, &order_data->skipped_sn_list,
-+ sn_cmd_list_entry) {
-+ EXTRACHECKS_BUG_ON(cmd->queue_type ==
-+ SCST_CMD_QUEUE_HEAD_OF_QUEUE);
-+ if (cmd->sn == expected_sn) {
-+ atomic_t *slot = cmd->sn_slot;
-+ /*
-+ * !! At this point any pointer in cmd, except !!
-+ * !! sn_slot and sn_cmd_list_entry, could be !!
-+ * !! already destroyed !!
-+ */
-+ TRACE_SN("cmd %p (tag %llu) with skipped sn %d found",
-+ cmd,
-+ (long long unsigned int)cmd->tag,
-+ cmd->sn);
-+ order_data->def_cmd_count--;
-+ list_del(&cmd->sn_cmd_list_entry);
-+ spin_unlock_irq(&order_data->sn_lock);
-+ if (test_and_set_bit(SCST_CMD_CAN_BE_DESTROYED,
-+ &cmd->cmd_flags))
-+ scst_destroy_put_cmd(cmd);
-+ scst_inc_expected_sn(order_data, slot);
-+ expected_sn = order_data->expected_sn;
-+ spin_lock_irq(&order_data->sn_lock);
-+ goto restart;
-+ }
-+ }
-+
-+out_unlock:
-+ spin_unlock_irq(&order_data->sn_lock);
-+ return res;
-+}
-+
-+/*****************************************************************
-+ ** The following thr_data functions are necessary, because the
-+ ** kernel doesn't provide a better way to have threads local
-+ ** storage
-+ *****************************************************************/
-+
-+/**
-+ * scst_add_thr_data() - add the current thread's local data
-+ *
-+ * Adds local to the current thread data to tgt_dev
-+ * (they will be local for the tgt_dev and current thread).
-+ */
-+void scst_add_thr_data(struct scst_tgt_dev *tgt_dev,
-+ struct scst_thr_data_hdr *data,
-+ void (*free_fn) (struct scst_thr_data_hdr *data))
-+{
-+ data->owner_thr = current;
-+ atomic_set(&data->ref, 1);
-+ EXTRACHECKS_BUG_ON(free_fn == NULL);
-+ data->free_fn = free_fn;
-+ spin_lock(&tgt_dev->thr_data_lock);
-+ list_add_tail(&data->thr_data_list_entry, &tgt_dev->thr_data_list);
-+ spin_unlock(&tgt_dev->thr_data_lock);
-+}
-+EXPORT_SYMBOL_GPL(scst_add_thr_data);
-+
-+/**
-+ * scst_del_all_thr_data() - delete all thread's local data
-+ *
-+ * Deletes all local to threads data from tgt_dev
-+ */
-+void scst_del_all_thr_data(struct scst_tgt_dev *tgt_dev)
-+{
-+ spin_lock(&tgt_dev->thr_data_lock);
-+ while (!list_empty(&tgt_dev->thr_data_list)) {
-+ struct scst_thr_data_hdr *d = list_entry(
-+ tgt_dev->thr_data_list.next, typeof(*d),
-+ thr_data_list_entry);
-+ list_del(&d->thr_data_list_entry);
-+ spin_unlock(&tgt_dev->thr_data_lock);
-+ scst_thr_data_put(d);
-+ spin_lock(&tgt_dev->thr_data_lock);
-+ }
-+ spin_unlock(&tgt_dev->thr_data_lock);
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_del_all_thr_data);
-+
-+/**
-+ * scst_dev_del_all_thr_data() - delete all thread's local data from device
-+ *
-+ * Deletes all local to threads data from all tgt_dev's of the device
-+ */
-+void scst_dev_del_all_thr_data(struct scst_device *dev)
-+{
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ scst_del_all_thr_data(tgt_dev);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_dev_del_all_thr_data);
-+
-+/* thr_data_lock supposed to be held */
-+static struct scst_thr_data_hdr *__scst_find_thr_data_locked(
-+ struct scst_tgt_dev *tgt_dev, struct task_struct *tsk)
-+{
-+ struct scst_thr_data_hdr *res = NULL, *d;
-+
-+ list_for_each_entry(d, &tgt_dev->thr_data_list, thr_data_list_entry) {
-+ if (d->owner_thr == tsk) {
-+ res = d;
-+ scst_thr_data_get(res);
-+ break;
-+ }
-+ }
-+ return res;
-+}
-+
-+/**
-+ * __scst_find_thr_data() - find local to the thread data
-+ *
-+ * Finds local to the thread data. Returns NULL, if they not found.
-+ */
-+struct scst_thr_data_hdr *__scst_find_thr_data(struct scst_tgt_dev *tgt_dev,
-+ struct task_struct *tsk)
-+{
-+ struct scst_thr_data_hdr *res;
-+
-+ spin_lock(&tgt_dev->thr_data_lock);
-+ res = __scst_find_thr_data_locked(tgt_dev, tsk);
-+ spin_unlock(&tgt_dev->thr_data_lock);
-+
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(__scst_find_thr_data);
-+
-+bool scst_del_thr_data(struct scst_tgt_dev *tgt_dev, struct task_struct *tsk)
-+{
-+ bool res;
-+ struct scst_thr_data_hdr *td;
-+
-+ spin_lock(&tgt_dev->thr_data_lock);
-+
-+ td = __scst_find_thr_data_locked(tgt_dev, tsk);
-+ if (td != NULL) {
-+ list_del(&td->thr_data_list_entry);
-+ res = true;
-+ } else
-+ res = false;
-+
-+ spin_unlock(&tgt_dev->thr_data_lock);
-+
-+ if (td != NULL) {
-+ /* the find() fn also gets it */
-+ scst_thr_data_put(td);
-+ scst_thr_data_put(td);
-+ }
-+
-+ return res;
-+}
-+
-+static void __scst_unblock_deferred(struct scst_order_data *order_data,
-+ struct scst_cmd *out_of_sn_cmd)
-+{
-+ EXTRACHECKS_BUG_ON(!out_of_sn_cmd->sn_set);
-+
-+ if (out_of_sn_cmd->sn == order_data->expected_sn) {
-+ scst_inc_expected_sn(order_data, out_of_sn_cmd->sn_slot);
-+ scst_make_deferred_commands_active(order_data);
-+ } else {
-+ out_of_sn_cmd->out_of_sn = 1;
-+ spin_lock_irq(&order_data->sn_lock);
-+ order_data->def_cmd_count++;
-+ list_add_tail(&out_of_sn_cmd->sn_cmd_list_entry,
-+ &order_data->skipped_sn_list);
-+ TRACE_SN("out_of_sn_cmd %p with sn %d added to skipped_sn_list"
-+ " (expected_sn %d)", out_of_sn_cmd, out_of_sn_cmd->sn,
-+ order_data->expected_sn);
-+ spin_unlock_irq(&order_data->sn_lock);
-+ }
-+
-+ return;
-+}
-+
-+void scst_unblock_deferred(struct scst_order_data *order_data,
-+ struct scst_cmd *out_of_sn_cmd)
-+{
-+ TRACE_ENTRY();
-+
-+ if (!out_of_sn_cmd->sn_set) {
-+ TRACE_SN("cmd %p without sn", out_of_sn_cmd);
-+ goto out;
-+ }
-+
-+ __scst_unblock_deferred(order_data, out_of_sn_cmd);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* dev_lock supposed to be held and BH disabled */
-+void scst_block_dev(struct scst_device *dev)
-+{
-+ dev->block_count++;
-+ TRACE_MGMT_DBG("Device BLOCK (new count %d), dev %s", dev->block_count,
-+ dev->virt_name);
-+}
-+
-+/* dev_lock supposed to be held and BH disabled */
-+bool __scst_check_blocked_dev(struct scst_cmd *cmd)
-+{
-+ int res = false;
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(cmd->unblock_dev);
-+
-+ if (unlikely(cmd->internal) && (cmd->cdb[0] == REQUEST_SENSE)) {
-+ /*
-+ * The original command can already block the device, so
-+ * REQUEST SENSE command should always pass.
-+ */
-+ goto out;
-+ }
-+
-+ if (unlikely(test_bit(SCST_CMD_ABORTED, &cmd->cmd_flags)))
-+ goto out;
-+
-+ if (dev->block_count > 0) {
-+ TRACE_MGMT_DBG("Delaying cmd %p due to blocking "
-+ "(tag %llu, op %x, dev %s)", cmd,
-+ (long long unsigned int)cmd->tag, cmd->cdb[0],
-+ dev->virt_name);
-+ goto out_block;
-+ } else if (scst_is_strictly_serialized_cmd(cmd)) {
-+ TRACE_MGMT_DBG("cmd %p (tag %llu, op %x): blocking further "
-+ "cmds on dev %s due to strict serialization", cmd,
-+ (long long unsigned int)cmd->tag, cmd->cdb[0],
-+ dev->virt_name);
-+ scst_block_dev(dev);
-+ if (dev->on_dev_cmd_count > 1) {
-+ TRACE_MGMT_DBG("Delaying strictly serialized cmd %p "
-+ "(dev %s, on_dev_cmds to wait %d)", cmd,
-+ dev->virt_name, dev->on_dev_cmd_count-1);
-+ EXTRACHECKS_BUG_ON(dev->strictly_serialized_cmd_waiting);
-+ dev->strictly_serialized_cmd_waiting = 1;
-+ goto out_block;
-+ } else
-+ cmd->unblock_dev = 1;
-+ } else if ((dev->dev_double_ua_possible) || scst_is_serialized_cmd(cmd)) {
-+ TRACE_MGMT_DBG("cmd %p (tag %llu, op %x): blocking further cmds "
-+ "on dev %s due to %s", cmd, (long long unsigned int)cmd->tag,
-+ cmd->cdb[0], dev->virt_name,
-+ dev->dev_double_ua_possible ? "possible double reset UA" :
-+ "serialized cmd");
-+ scst_block_dev(dev);
-+ cmd->unblock_dev = 1;
-+ } else
-+ TRACE_MGMT_DBG("No blocks for device %s", dev->virt_name);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_block:
-+ if (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE)
-+ list_add(&cmd->blocked_cmd_list_entry,
-+ &dev->blocked_cmd_list);
-+ else
-+ list_add_tail(&cmd->blocked_cmd_list_entry,
-+ &dev->blocked_cmd_list);
-+ res = true;
-+ goto out;
-+}
-+
-+/* dev_lock supposed to be held and BH disabled */
-+void scst_unblock_dev(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Device UNBLOCK(new %d), dev %s",
-+ dev->block_count-1, dev->virt_name);
-+
-+#ifdef CONFIG_SMP
-+ EXTRACHECKS_BUG_ON(!spin_is_locked(&dev->dev_lock));
-+#endif
-+
-+ if (--dev->block_count == 0) {
-+ struct scst_cmd *cmd, *tcmd;
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ list_for_each_entry_safe(cmd, tcmd, &dev->blocked_cmd_list,
-+ blocked_cmd_list_entry) {
-+ bool strictly_serialized;
-+ list_del(&cmd->blocked_cmd_list_entry);
-+ TRACE_MGMT_DBG("Adding blocked cmd %p to active cmd "
-+ "list", cmd);
-+ spin_lock(&cmd->cmd_threads->cmd_list_lock);
-+ if (cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE)
-+ list_add(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ else
-+ list_add_tail(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ strictly_serialized = scst_is_strictly_serialized_cmd(cmd);
-+ wake_up(&cmd->cmd_threads->cmd_list_waitQ);
-+ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
-+ if (dev->strictly_serialized_cmd_waiting && strictly_serialized)
-+ break;
-+ }
-+ local_irq_restore(flags);
-+
-+ dev->strictly_serialized_cmd_waiting = 0;
-+ }
-+
-+ BUG_ON(dev->block_count < 0);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void scst_on_hq_cmd_response(struct scst_cmd *cmd)
-+{
-+ struct scst_order_data *order_data = cmd->cur_order_data;
-+
-+ TRACE_ENTRY();
-+
-+ if (!cmd->hq_cmd_inced)
-+ goto out;
-+
-+ spin_lock_irq(&order_data->sn_lock);
-+ order_data->hq_cmd_count--;
-+ spin_unlock_irq(&order_data->sn_lock);
-+
-+ EXTRACHECKS_BUG_ON(order_data->hq_cmd_count < 0);
-+
-+ /*
-+ * There is no problem in checking hq_cmd_count in the
-+ * non-locked state. In the worst case we will only have
-+ * unneeded run of the deferred commands.
-+ */
-+ if (order_data->hq_cmd_count == 0)
-+ scst_make_deferred_commands_active(order_data);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void scst_store_sense(struct scst_cmd *cmd)
-+{
-+ TRACE_ENTRY();
-+
-+ if (SCST_SENSE_VALID(cmd->sense) &&
-+ !test_bit(SCST_CMD_NO_RESP, &cmd->cmd_flags) &&
-+ (cmd->tgt_dev != NULL)) {
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+
-+ TRACE_DBG("Storing sense (cmd %p)", cmd);
-+
-+ spin_lock_bh(&tgt_dev->tgt_dev_lock);
-+
-+ if (cmd->sense_valid_len <= sizeof(tgt_dev->tgt_dev_sense))
-+ tgt_dev->tgt_dev_valid_sense_len = cmd->sense_valid_len;
-+ else {
-+ tgt_dev->tgt_dev_valid_sense_len = sizeof(tgt_dev->tgt_dev_sense);
-+ PRINT_ERROR("Stored sense truncated to size %d "
-+ "(needed %d)", tgt_dev->tgt_dev_valid_sense_len,
-+ cmd->sense_valid_len);
-+ }
-+ memcpy(tgt_dev->tgt_dev_sense, cmd->sense,
-+ tgt_dev->tgt_dev_valid_sense_len);
-+
-+ spin_unlock_bh(&tgt_dev->tgt_dev_lock);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void scst_xmit_process_aborted_cmd(struct scst_cmd *cmd)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Aborted cmd %p done (cmd_ref %d)", cmd,
-+ atomic_read(&cmd->cmd_ref));
-+
-+ scst_done_cmd_mgmt(cmd);
-+
-+ if (test_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags)) {
-+ if (cmd->completed) {
-+ /* It's completed and it's OK to return its result */
-+ goto out;
-+ }
-+
-+ /* For not yet inited commands cmd->dev can be NULL here */
-+ if (test_bit(SCST_CMD_DEVICE_TAS, &cmd->cmd_flags)) {
-+ TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
-+ "(tag %llu), returning TASK ABORTED ", cmd,
-+ (long long unsigned int)cmd->tag);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_TASK_ABORTED);
-+ } else {
-+ TRACE_MGMT_DBG("Flag ABORTED OTHER set for cmd %p "
-+ "(tag %llu), aborting without delivery or "
-+ "notification",
-+ cmd, (long long unsigned int)cmd->tag);
-+ /*
-+ * There is no need to check/requeue possible UA,
-+ * because, if it exists, it will be delivered
-+ * by the "completed" branch above.
-+ */
-+ clear_bit(SCST_CMD_ABORTED_OTHER, &cmd->cmd_flags);
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_get_max_lun_commands() - return maximum supported commands count
-+ *
-+ * Returns maximum commands count which can be queued to this LUN in this
-+ * session.
-+ *
-+ * If lun is NO_SUCH_LUN, returns minimum of maximum commands count which
-+ * can be queued to any LUN in this session.
-+ *
-+ * If sess is NULL, returns minimum of maximum commands count which can be
-+ * queued to any SCST device.
-+ */
-+int scst_get_max_lun_commands(struct scst_session *sess, uint64_t lun)
-+{
-+ return SCST_MAX_TGT_DEV_COMMANDS;
-+}
-+EXPORT_SYMBOL(scst_get_max_lun_commands);
-+
-+/**
-+ * scst_reassign_persistent_sess_states() - reassigns persistent states
-+ *
-+ * Reassigns persistent states from old_sess to new_sess.
-+ */
-+void scst_reassign_persistent_sess_states(struct scst_session *new_sess,
-+ struct scst_session *old_sess)
-+{
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_PR("Reassigning persistent states from old_sess %p to "
-+ "new_sess %p", old_sess, new_sess);
-+
-+ if ((new_sess == NULL) || (old_sess == NULL)) {
-+ TRACE_DBG("%s", "new_sess or old_sess is NULL");
-+ goto out;
-+ }
-+
-+ if (new_sess == old_sess) {
-+ TRACE_DBG("%s", "new_sess or old_sess are the same");
-+ goto out;
-+ }
-+
-+ if ((new_sess->transport_id == NULL) ||
-+ (old_sess->transport_id == NULL)) {
-+ TRACE_DBG("%s", "new_sess or old_sess doesn't support PRs");
-+ goto out;
-+ }
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ struct scst_tgt_dev *tgt_dev;
-+ struct scst_tgt_dev *new_tgt_dev = NULL, *old_tgt_dev = NULL;
-+
-+ TRACE_DBG("Processing dev %s", dev->virt_name);
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if (tgt_dev->sess == new_sess) {
-+ new_tgt_dev = tgt_dev;
-+ if (old_tgt_dev != NULL)
-+ break;
-+ }
-+ if (tgt_dev->sess == old_sess) {
-+ old_tgt_dev = tgt_dev;
-+ if (new_tgt_dev != NULL)
-+ break;
-+ }
-+ }
-+
-+ if ((new_tgt_dev == NULL) || (old_tgt_dev == NULL)) {
-+ TRACE_DBG("new_tgt_dev %p or old_sess %p is NULL, "
-+ "skipping (dev %s)", new_tgt_dev, old_tgt_dev,
-+ dev->virt_name);
-+ continue;
-+ }
-+
-+ scst_pr_write_lock(dev);
-+
-+ if (old_tgt_dev->registrant != NULL) {
-+ TRACE_PR("Reassigning reg %p from tgt_dev %p to %p",
-+ old_tgt_dev->registrant, old_tgt_dev,
-+ new_tgt_dev);
-+
-+ if (new_tgt_dev->registrant != NULL)
-+ new_tgt_dev->registrant->tgt_dev = NULL;
-+
-+ new_tgt_dev->registrant = old_tgt_dev->registrant;
-+ new_tgt_dev->registrant->tgt_dev = new_tgt_dev;
-+
-+ old_tgt_dev->registrant = NULL;
-+ }
-+
-+ scst_pr_write_unlock(dev);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL(scst_reassign_persistent_sess_states);
-+
-+/**
-+ * scst_get_next_lexem() - parse and return next lexem in the string
-+ *
-+ * Returns pointer to the next lexem from token_str skipping
-+ * spaces and '=' character and using them then as a delimeter. Content
-+ * of token_str is modified by setting '\0' at the delimeter's position.
-+ */
-+char *scst_get_next_lexem(char **token_str)
-+{
-+ char *p = *token_str;
-+ char *q;
-+ static const char blank = '\0';
-+
-+ if ((token_str == NULL) || (*token_str == NULL))
-+ return (char *)&blank;
-+
-+ for (p = *token_str; (*p != '\0') && (isspace(*p) || (*p == '=')); p++)
-+ ;
-+
-+ for (q = p; (*q != '\0') && !isspace(*q) && (*q != '='); q++)
-+ ;
-+
-+ if (*q != '\0')
-+ *q++ = '\0';
-+
-+ *token_str = q;
-+ return p;
-+}
-+EXPORT_SYMBOL_GPL(scst_get_next_lexem);
-+
-+/**
-+ * scst_restore_token_str() - restore string modified by scst_get_next_lexem()
-+ *
-+ * Restores token_str modified by scst_get_next_lexem() to the
-+ * previous value before scst_get_next_lexem() was called. Prev_lexem is
-+ * a pointer to lexem returned by scst_get_next_lexem().
-+ */
-+void scst_restore_token_str(char *prev_lexem, char *token_str)
-+{
-+ if (&prev_lexem[strlen(prev_lexem)] != token_str)
-+ prev_lexem[strlen(prev_lexem)] = ' ';
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_restore_token_str);
-+
-+/**
-+ * scst_get_next_token_str() - parse and return next token
-+ *
-+ * This function returns pointer to the next token strings from input_str
-+ * using '\n', ';' and '\0' as a delimeter. Content of input_str is
-+ * modified by setting '\0' at the delimeter's position.
-+ */
-+char *scst_get_next_token_str(char **input_str)
-+{
-+ char *p = *input_str;
-+ int i = 0;
-+
-+ while ((p[i] != '\n') && (p[i] != ';') && (p[i] != '\0'))
-+ i++;
-+
-+ if (i == 0)
-+ return NULL;
-+
-+ if (p[i] == '\0')
-+ *input_str = &p[i];
-+ else
-+ *input_str = &p[i+1];
-+
-+ p[i] = '\0';
-+
-+ return p;
-+}
-+EXPORT_SYMBOL_GPL(scst_get_next_token_str);
-+
-+static void __init scst_scsi_op_list_init(void)
-+{
-+ int i;
-+ uint8_t op = 0xff;
-+
-+ TRACE_ENTRY();
-+
-+ for (i = 0; i < 256; i++)
-+ scst_scsi_op_list[i] = SCST_CDB_TBL_SIZE;
-+
-+ for (i = 0; i < SCST_CDB_TBL_SIZE; i++) {
-+ if (scst_scsi_op_table[i].ops != op) {
-+ op = scst_scsi_op_table[i].ops;
-+ scst_scsi_op_list[op] = i;
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+int __init scst_lib_init(void)
-+{
-+ int res = 0;
-+
-+ scst_scsi_op_list_init();
-+
-+ scsi_io_context_cache = kmem_cache_create("scst_scsi_io_context",
-+ sizeof(struct scsi_io_context),
-+ 0, 0, NULL);
-+ if (!scsi_io_context_cache) {
-+ PRINT_ERROR("%s", "Can't init scsi io context cache");
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+void scst_lib_exit(void)
-+{
-+ BUILD_BUG_ON(SCST_MAX_CDB_SIZE != BLK_MAX_CDB);
-+ BUILD_BUG_ON(SCST_SENSE_BUFFERSIZE < SCSI_SENSE_BUFFERSIZE);
-+
-+ kmem_cache_destroy(scsi_io_context_cache);
-+}
-+
-+#ifdef CONFIG_SCST_DEBUG
-+
-+/**
-+ * scst_random() - return a pseudo-random number for debugging purposes.
-+ *
-+ * Returns a pseudo-random number for debugging purposes. Available only in
-+ * the DEBUG build.
-+ *
-+ * Original taken from the XFS code
-+ */
-+unsigned long scst_random(void)
-+{
-+ static int Inited;
-+ static unsigned long RandomValue;
-+ static DEFINE_SPINLOCK(lock);
-+ /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
-+ register long rv;
-+ register long lo;
-+ register long hi;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&lock, flags);
-+ if (!Inited) {
-+ RandomValue = jiffies;
-+ Inited = 1;
-+ }
-+ rv = RandomValue;
-+ hi = rv / 127773;
-+ lo = rv % 127773;
-+ rv = 16807 * lo - 2836 * hi;
-+ if (rv <= 0)
-+ rv += 2147483647;
-+ RandomValue = rv;
-+ spin_unlock_irqrestore(&lock, flags);
-+ return rv;
-+}
-+EXPORT_SYMBOL_GPL(scst_random);
-+#endif /* CONFIG_SCST_DEBUG */
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+
-+#define TM_DBG_STATE_ABORT 0
-+#define TM_DBG_STATE_RESET 1
-+#define TM_DBG_STATE_OFFLINE 2
-+
-+#define INIT_TM_DBG_STATE TM_DBG_STATE_ABORT
-+
-+static void tm_dbg_timer_fn(unsigned long arg);
-+
-+static DEFINE_SPINLOCK(scst_tm_dbg_lock);
-+/* All serialized by scst_tm_dbg_lock */
-+static struct {
-+ unsigned int tm_dbg_release:1;
-+ unsigned int tm_dbg_blocked:1;
-+} tm_dbg_flags;
-+static LIST_HEAD(tm_dbg_delayed_cmd_list);
-+static int tm_dbg_delayed_cmds_count;
-+static int tm_dbg_passed_cmds_count;
-+static int tm_dbg_state;
-+static int tm_dbg_on_state_passes;
-+static DEFINE_TIMER(tm_dbg_timer, tm_dbg_timer_fn, 0, 0);
-+static struct scst_tgt_dev *tm_dbg_tgt_dev;
-+
-+static const int tm_dbg_on_state_num_passes[] = { 5, 1, 0x7ffffff };
-+
-+static void tm_dbg_init_tgt_dev(struct scst_tgt_dev *tgt_dev)
-+{
-+ if (tgt_dev->lun == 6) {
-+ unsigned long flags;
-+
-+ if (tm_dbg_tgt_dev != NULL)
-+ tm_dbg_deinit_tgt_dev(tm_dbg_tgt_dev);
-+
-+ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
-+ tm_dbg_state = INIT_TM_DBG_STATE;
-+ tm_dbg_on_state_passes =
-+ tm_dbg_on_state_num_passes[tm_dbg_state];
-+ tm_dbg_tgt_dev = tgt_dev;
-+ PRINT_INFO("LUN %lld connected from initiator %s is under "
-+ "TM debugging (tgt_dev %p)",
-+ (unsigned long long)tgt_dev->lun,
-+ tgt_dev->sess->initiator_name, tgt_dev);
-+ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
-+ }
-+ return;
-+}
-+
-+static void tm_dbg_deinit_tgt_dev(struct scst_tgt_dev *tgt_dev)
-+{
-+ if (tm_dbg_tgt_dev == tgt_dev) {
-+ unsigned long flags;
-+ TRACE_MGMT_DBG("Deinit TM debugging tgt_dev %p", tgt_dev);
-+ del_timer_sync(&tm_dbg_timer);
-+ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
-+ tm_dbg_tgt_dev = NULL;
-+ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
-+ }
-+ return;
-+}
-+
-+static void tm_dbg_timer_fn(unsigned long arg)
-+{
-+ TRACE_MGMT_DBG("%s", "delayed cmd timer expired");
-+ tm_dbg_flags.tm_dbg_release = 1;
-+ /* Used to make sure that all woken up threads see the new value */
-+ smp_wmb();
-+ wake_up_all(&tm_dbg_tgt_dev->active_cmd_threads->cmd_list_waitQ);
-+ return;
-+}
-+
-+/* Called under scst_tm_dbg_lock and IRQs off */
-+static void tm_dbg_delay_cmd(struct scst_cmd *cmd)
-+{
-+ switch (tm_dbg_state) {
-+ case TM_DBG_STATE_ABORT:
-+ if (tm_dbg_delayed_cmds_count == 0) {
-+ unsigned long d = 58*HZ + (scst_random() % (4*HZ));
-+ TRACE_MGMT_DBG("STATE ABORT: delaying cmd %p (tag %llu)"
-+ " for %ld.%ld seconds (%ld HZ), "
-+ "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
-+ d/HZ, (d%HZ)*100/HZ, d, tm_dbg_on_state_passes);
-+ mod_timer(&tm_dbg_timer, jiffies + d);
-+#if 0
-+ tm_dbg_flags.tm_dbg_blocked = 1;
-+#endif
-+ } else {
-+ TRACE_MGMT_DBG("Delaying another timed cmd %p "
-+ "(tag %llu), delayed_cmds_count=%d, "
-+ "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
-+ tm_dbg_delayed_cmds_count,
-+ tm_dbg_on_state_passes);
-+ if (tm_dbg_delayed_cmds_count == 2)
-+ tm_dbg_flags.tm_dbg_blocked = 0;
-+ }
-+ break;
-+
-+ case TM_DBG_STATE_RESET:
-+ case TM_DBG_STATE_OFFLINE:
-+ TRACE_MGMT_DBG("STATE RESET/OFFLINE: delaying cmd %p "
-+ "(tag %llu), delayed_cmds_count=%d, "
-+ "tm_dbg_on_state_passes=%d", cmd, cmd->tag,
-+ tm_dbg_delayed_cmds_count, tm_dbg_on_state_passes);
-+ tm_dbg_flags.tm_dbg_blocked = 1;
-+ break;
-+
-+ default:
-+ BUG();
-+ }
-+ /* IRQs already off */
-+ spin_lock(&cmd->cmd_threads->cmd_list_lock);
-+ list_add_tail(&cmd->cmd_list_entry, &tm_dbg_delayed_cmd_list);
-+ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
-+ cmd->tm_dbg_delayed = 1;
-+ tm_dbg_delayed_cmds_count++;
-+ return;
-+}
-+
-+/* No locks */
-+void tm_dbg_check_released_cmds(void)
-+{
-+ if (tm_dbg_flags.tm_dbg_release) {
-+ struct scst_cmd *cmd, *tc;
-+ spin_lock_irq(&scst_tm_dbg_lock);
-+ list_for_each_entry_safe_reverse(cmd, tc,
-+ &tm_dbg_delayed_cmd_list, cmd_list_entry) {
-+ TRACE_MGMT_DBG("Releasing timed cmd %p (tag %llu), "
-+ "delayed_cmds_count=%d", cmd, cmd->tag,
-+ tm_dbg_delayed_cmds_count);
-+ spin_lock(&cmd->cmd_threads->cmd_list_lock);
-+ list_move(&cmd->cmd_list_entry,
-+ &cmd->cmd_threads->active_cmd_list);
-+ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
-+ }
-+ tm_dbg_flags.tm_dbg_release = 0;
-+ spin_unlock_irq(&scst_tm_dbg_lock);
-+ }
-+}
-+
-+/* Called under scst_tm_dbg_lock */
-+static void tm_dbg_change_state(void)
-+{
-+ tm_dbg_flags.tm_dbg_blocked = 0;
-+ if (--tm_dbg_on_state_passes == 0) {
-+ switch (tm_dbg_state) {
-+ case TM_DBG_STATE_ABORT:
-+ TRACE_MGMT_DBG("%s", "Changing "
-+ "tm_dbg_state to RESET");
-+ tm_dbg_state = TM_DBG_STATE_RESET;
-+ tm_dbg_flags.tm_dbg_blocked = 0;
-+ break;
-+ case TM_DBG_STATE_RESET:
-+ case TM_DBG_STATE_OFFLINE:
-+#ifdef CONFIG_SCST_TM_DBG_GO_OFFLINE
-+ TRACE_MGMT_DBG("%s", "Changing "
-+ "tm_dbg_state to OFFLINE");
-+ tm_dbg_state = TM_DBG_STATE_OFFLINE;
-+#else
-+ TRACE_MGMT_DBG("%s", "Changing "
-+ "tm_dbg_state to ABORT");
-+ tm_dbg_state = TM_DBG_STATE_ABORT;
-+#endif
-+ break;
-+ default:
-+ BUG();
-+ }
-+ tm_dbg_on_state_passes =
-+ tm_dbg_on_state_num_passes[tm_dbg_state];
-+ }
-+
-+ TRACE_MGMT_DBG("%s", "Deleting timer");
-+ del_timer_sync(&tm_dbg_timer);
-+ return;
-+}
-+
-+/* No locks */
-+int tm_dbg_check_cmd(struct scst_cmd *cmd)
-+{
-+ int res = 0;
-+ unsigned long flags;
-+
-+ if (cmd->tm_dbg_immut)
-+ goto out;
-+
-+ if (cmd->tm_dbg_delayed) {
-+ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
-+ TRACE_MGMT_DBG("Processing delayed cmd %p (tag %llu), "
-+ "delayed_cmds_count=%d", cmd, cmd->tag,
-+ tm_dbg_delayed_cmds_count);
-+
-+ cmd->tm_dbg_immut = 1;
-+ tm_dbg_delayed_cmds_count--;
-+ if ((tm_dbg_delayed_cmds_count == 0) &&
-+ (tm_dbg_state == TM_DBG_STATE_ABORT))
-+ tm_dbg_change_state();
-+ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
-+ } else if (cmd->tgt_dev && (tm_dbg_tgt_dev == cmd->tgt_dev)) {
-+ /* Delay 50th command */
-+ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
-+ if (tm_dbg_flags.tm_dbg_blocked ||
-+ (++tm_dbg_passed_cmds_count % 50) == 0) {
-+ tm_dbg_delay_cmd(cmd);
-+ res = 1;
-+ } else
-+ cmd->tm_dbg_immut = 1;
-+ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
-+ }
-+
-+out:
-+ return res;
-+}
-+
-+/* No locks */
-+void tm_dbg_release_cmd(struct scst_cmd *cmd)
-+{
-+ struct scst_cmd *c;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
-+ list_for_each_entry(c, &tm_dbg_delayed_cmd_list,
-+ cmd_list_entry) {
-+ if (c == cmd) {
-+ TRACE_MGMT_DBG("Abort request for "
-+ "delayed cmd %p (tag=%llu), moving it to "
-+ "active cmd list (delayed_cmds_count=%d)",
-+ c, c->tag, tm_dbg_delayed_cmds_count);
-+
-+ if (!test_bit(SCST_CMD_ABORTED_OTHER,
-+ &cmd->cmd_flags)) {
-+ /* Test how completed commands handled */
-+ if (((scst_random() % 10) == 5)) {
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(
-+ scst_sense_hardw_error));
-+ /* It's completed now */
-+ }
-+ }
-+
-+ spin_lock(&cmd->cmd_threads->cmd_list_lock);
-+ list_move(&c->cmd_list_entry,
-+ &c->cmd_threads->active_cmd_list);
-+ wake_up(&c->cmd_threads->cmd_list_waitQ);
-+ spin_unlock(&cmd->cmd_threads->cmd_list_lock);
-+ break;
-+ }
-+ }
-+ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
-+ return;
-+}
-+
-+/* Might be called under scst_mutex */
-+void tm_dbg_task_mgmt(struct scst_device *dev, const char *fn, int force)
-+{
-+ unsigned long flags;
-+
-+ if (dev != NULL) {
-+ if (tm_dbg_tgt_dev == NULL)
-+ goto out;
-+
-+ if (tm_dbg_tgt_dev->dev != dev)
-+ goto out;
-+ }
-+
-+ spin_lock_irqsave(&scst_tm_dbg_lock, flags);
-+ if ((tm_dbg_state != TM_DBG_STATE_OFFLINE) || force) {
-+ TRACE_MGMT_DBG("%s: freeing %d delayed cmds", fn,
-+ tm_dbg_delayed_cmds_count);
-+ tm_dbg_change_state();
-+ tm_dbg_flags.tm_dbg_release = 1;
-+ /*
-+ * Used to make sure that all woken up threads see the new
-+ * value.
-+ */
-+ smp_wmb();
-+ if (tm_dbg_tgt_dev != NULL)
-+ wake_up_all(&tm_dbg_tgt_dev->active_cmd_threads->cmd_list_waitQ);
-+ } else {
-+ TRACE_MGMT_DBG("%s: while OFFLINE state, doing nothing", fn);
-+ }
-+ spin_unlock_irqrestore(&scst_tm_dbg_lock, flags);
-+
-+out:
-+ return;
-+}
-+
-+int tm_dbg_is_release(void)
-+{
-+ return tm_dbg_flags.tm_dbg_release;
-+}
-+#endif /* CONFIG_SCST_DEBUG_TM */
-+
-+#ifdef CONFIG_SCST_DEBUG_SN
-+void scst_check_debug_sn(struct scst_cmd *cmd)
-+{
-+ static DEFINE_SPINLOCK(lock);
-+ static int type;
-+ static int cnt;
-+ unsigned long flags;
-+ int old = cmd->queue_type;
-+
-+ spin_lock_irqsave(&lock, flags);
-+
-+ if (cnt == 0) {
-+ if ((scst_random() % 1000) == 500) {
-+ if ((scst_random() % 3) == 1)
-+ type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
-+ else
-+ type = SCST_CMD_QUEUE_ORDERED;
-+ do {
-+ cnt = scst_random() % 10;
-+ } while (cnt == 0);
-+ } else
-+ goto out_unlock;
-+ }
-+
-+ cmd->queue_type = type;
-+ cnt--;
-+
-+ if (((scst_random() % 1000) == 750))
-+ cmd->queue_type = SCST_CMD_QUEUE_ORDERED;
-+ else if (((scst_random() % 1000) == 751))
-+ cmd->queue_type = SCST_CMD_QUEUE_HEAD_OF_QUEUE;
-+ else if (((scst_random() % 1000) == 752))
-+ cmd->queue_type = SCST_CMD_QUEUE_SIMPLE;
-+
-+ TRACE_SN("DbgSN changed cmd %p: %d/%d (cnt %d)", cmd, old,
-+ cmd->queue_type, cnt);
-+
-+out_unlock:
-+ spin_unlock_irqrestore(&lock, flags);
-+ return;
-+}
-+#endif /* CONFIG_SCST_DEBUG_SN */
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+
-+static uint64_t scst_get_nsec(void)
-+{
-+ struct timespec ts;
-+ ktime_get_ts(&ts);
-+ return (uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec;
-+}
-+
-+void scst_set_start_time(struct scst_cmd *cmd)
-+{
-+ cmd->start = scst_get_nsec();
-+ TRACE_DBG("cmd %p: start %lld", cmd, cmd->start);
-+}
-+
-+void scst_set_cur_start(struct scst_cmd *cmd)
-+{
-+ cmd->curr_start = scst_get_nsec();
-+ TRACE_DBG("cmd %p: cur_start %lld", cmd, cmd->curr_start);
-+}
-+
-+void scst_set_parse_time(struct scst_cmd *cmd)
-+{
-+ cmd->parse_time += scst_get_nsec() - cmd->curr_start;
-+ TRACE_DBG("cmd %p: parse_time %lld", cmd, cmd->parse_time);
-+}
-+
-+void scst_set_alloc_buf_time(struct scst_cmd *cmd)
-+{
-+ cmd->alloc_buf_time += scst_get_nsec() - cmd->curr_start;
-+ TRACE_DBG("cmd %p: alloc_buf_time %lld", cmd, cmd->alloc_buf_time);
-+}
-+
-+void scst_set_restart_waiting_time(struct scst_cmd *cmd)
-+{
-+ cmd->restart_waiting_time += scst_get_nsec() - cmd->curr_start;
-+ TRACE_DBG("cmd %p: restart_waiting_time %lld", cmd,
-+ cmd->restart_waiting_time);
-+}
-+
-+void scst_set_rdy_to_xfer_time(struct scst_cmd *cmd)
-+{
-+ cmd->rdy_to_xfer_time += scst_get_nsec() - cmd->curr_start;
-+ TRACE_DBG("cmd %p: rdy_to_xfer_time %lld", cmd, cmd->rdy_to_xfer_time);
-+}
-+
-+void scst_set_pre_exec_time(struct scst_cmd *cmd)
-+{
-+ cmd->pre_exec_time += scst_get_nsec() - cmd->curr_start;
-+ TRACE_DBG("cmd %p: pre_exec_time %lld", cmd, cmd->pre_exec_time);
-+}
-+
-+void scst_set_exec_time(struct scst_cmd *cmd)
-+{
-+ cmd->exec_time += scst_get_nsec() - cmd->curr_start;
-+ TRACE_DBG("cmd %p: exec_time %lld", cmd, cmd->exec_time);
-+}
-+
-+void scst_set_dev_done_time(struct scst_cmd *cmd)
-+{
-+ cmd->dev_done_time += scst_get_nsec() - cmd->curr_start;
-+ TRACE_DBG("cmd %p: dev_done_time %lld", cmd, cmd->dev_done_time);
-+}
-+
-+void scst_set_xmit_time(struct scst_cmd *cmd)
-+{
-+ cmd->xmit_time += scst_get_nsec() - cmd->curr_start;
-+ TRACE_DBG("cmd %p: xmit_time %lld", cmd, cmd->xmit_time);
-+}
-+
-+void scst_set_tgt_on_free_time(struct scst_cmd *cmd)
-+{
-+ cmd->tgt_on_free_time += scst_get_nsec() - cmd->curr_start;
-+ TRACE_DBG("cmd %p: tgt_on_free_time %lld", cmd, cmd->tgt_on_free_time);
-+}
-+
-+void scst_set_dev_on_free_time(struct scst_cmd *cmd)
-+{
-+ cmd->dev_on_free_time += scst_get_nsec() - cmd->curr_start;
-+ TRACE_DBG("cmd %p: dev_on_free_time %lld", cmd, cmd->dev_on_free_time);
-+}
-+
-+void scst_update_lat_stats(struct scst_cmd *cmd)
-+{
-+ uint64_t finish, scst_time, tgt_time, dev_time;
-+ struct scst_session *sess = cmd->sess;
-+ int data_len;
-+ int i;
-+ struct scst_ext_latency_stat *latency_stat, *dev_latency_stat;
-+
-+ finish = scst_get_nsec();
-+
-+ /* Determine the IO size for extended latency statistics */
-+ data_len = cmd->bufflen;
-+ i = SCST_LATENCY_STAT_INDEX_OTHER;
-+ if (data_len <= SCST_IO_SIZE_THRESHOLD_SMALL)
-+ i = SCST_LATENCY_STAT_INDEX_SMALL;
-+ else if (data_len <= SCST_IO_SIZE_THRESHOLD_MEDIUM)
-+ i = SCST_LATENCY_STAT_INDEX_MEDIUM;
-+ else if (data_len <= SCST_IO_SIZE_THRESHOLD_LARGE)
-+ i = SCST_LATENCY_STAT_INDEX_LARGE;
-+ else if (data_len <= SCST_IO_SIZE_THRESHOLD_VERY_LARGE)
-+ i = SCST_LATENCY_STAT_INDEX_VERY_LARGE;
-+ latency_stat = &sess->sess_latency_stat[i];
-+ if (cmd->tgt_dev != NULL)
-+ dev_latency_stat = &cmd->tgt_dev->dev_latency_stat[i];
-+ else
-+ dev_latency_stat = NULL;
-+
-+ /* Calculate the latencies */
-+ scst_time = finish - cmd->start - (cmd->parse_time +
-+ cmd->alloc_buf_time + cmd->restart_waiting_time +
-+ cmd->rdy_to_xfer_time + cmd->pre_exec_time +
-+ cmd->exec_time + cmd->dev_done_time + cmd->xmit_time +
-+ cmd->tgt_on_free_time + cmd->dev_on_free_time);
-+ tgt_time = cmd->alloc_buf_time + cmd->restart_waiting_time +
-+ cmd->rdy_to_xfer_time + cmd->pre_exec_time +
-+ cmd->xmit_time + cmd->tgt_on_free_time;
-+ dev_time = cmd->parse_time + cmd->exec_time + cmd->dev_done_time +
-+ cmd->dev_on_free_time;
-+
-+ spin_lock_bh(&sess->lat_lock);
-+
-+ /* Save the basic latency information */
-+ sess->scst_time += scst_time;
-+ sess->tgt_time += tgt_time;
-+ sess->dev_time += dev_time;
-+ sess->processed_cmds++;
-+
-+ if ((sess->min_scst_time == 0) ||
-+ (sess->min_scst_time > scst_time))
-+ sess->min_scst_time = scst_time;
-+ if ((sess->min_tgt_time == 0) ||
-+ (sess->min_tgt_time > tgt_time))
-+ sess->min_tgt_time = tgt_time;
-+ if ((sess->min_dev_time == 0) ||
-+ (sess->min_dev_time > dev_time))
-+ sess->min_dev_time = dev_time;
-+
-+ if (sess->max_scst_time < scst_time)
-+ sess->max_scst_time = scst_time;
-+ if (sess->max_tgt_time < tgt_time)
-+ sess->max_tgt_time = tgt_time;
-+ if (sess->max_dev_time < dev_time)
-+ sess->max_dev_time = dev_time;
-+
-+ /* Save the extended latency information */
-+ if (cmd->data_direction & SCST_DATA_READ) {
-+ latency_stat->scst_time_rd += scst_time;
-+ latency_stat->tgt_time_rd += tgt_time;
-+ latency_stat->dev_time_rd += dev_time;
-+ latency_stat->processed_cmds_rd++;
-+
-+ if ((latency_stat->min_scst_time_rd == 0) ||
-+ (latency_stat->min_scst_time_rd > scst_time))
-+ latency_stat->min_scst_time_rd = scst_time;
-+ if ((latency_stat->min_tgt_time_rd == 0) ||
-+ (latency_stat->min_tgt_time_rd > tgt_time))
-+ latency_stat->min_tgt_time_rd = tgt_time;
-+ if ((latency_stat->min_dev_time_rd == 0) ||
-+ (latency_stat->min_dev_time_rd > dev_time))
-+ latency_stat->min_dev_time_rd = dev_time;
-+
-+ if (latency_stat->max_scst_time_rd < scst_time)
-+ latency_stat->max_scst_time_rd = scst_time;
-+ if (latency_stat->max_tgt_time_rd < tgt_time)
-+ latency_stat->max_tgt_time_rd = tgt_time;
-+ if (latency_stat->max_dev_time_rd < dev_time)
-+ latency_stat->max_dev_time_rd = dev_time;
-+
-+ if (dev_latency_stat != NULL) {
-+ dev_latency_stat->scst_time_rd += scst_time;
-+ dev_latency_stat->tgt_time_rd += tgt_time;
-+ dev_latency_stat->dev_time_rd += dev_time;
-+ dev_latency_stat->processed_cmds_rd++;
-+
-+ if ((dev_latency_stat->min_scst_time_rd == 0) ||
-+ (dev_latency_stat->min_scst_time_rd > scst_time))
-+ dev_latency_stat->min_scst_time_rd = scst_time;
-+ if ((dev_latency_stat->min_tgt_time_rd == 0) ||
-+ (dev_latency_stat->min_tgt_time_rd > tgt_time))
-+ dev_latency_stat->min_tgt_time_rd = tgt_time;
-+ if ((dev_latency_stat->min_dev_time_rd == 0) ||
-+ (dev_latency_stat->min_dev_time_rd > dev_time))
-+ dev_latency_stat->min_dev_time_rd = dev_time;
-+
-+ if (dev_latency_stat->max_scst_time_rd < scst_time)
-+ dev_latency_stat->max_scst_time_rd = scst_time;
-+ if (dev_latency_stat->max_tgt_time_rd < tgt_time)
-+ dev_latency_stat->max_tgt_time_rd = tgt_time;
-+ if (dev_latency_stat->max_dev_time_rd < dev_time)
-+ dev_latency_stat->max_dev_time_rd = dev_time;
-+ }
-+ } else if (cmd->data_direction & SCST_DATA_WRITE) {
-+ latency_stat->scst_time_wr += scst_time;
-+ latency_stat->tgt_time_wr += tgt_time;
-+ latency_stat->dev_time_wr += dev_time;
-+ latency_stat->processed_cmds_wr++;
-+
-+ if ((latency_stat->min_scst_time_wr == 0) ||
-+ (latency_stat->min_scst_time_wr > scst_time))
-+ latency_stat->min_scst_time_wr = scst_time;
-+ if ((latency_stat->min_tgt_time_wr == 0) ||
-+ (latency_stat->min_tgt_time_wr > tgt_time))
-+ latency_stat->min_tgt_time_wr = tgt_time;
-+ if ((latency_stat->min_dev_time_wr == 0) ||
-+ (latency_stat->min_dev_time_wr > dev_time))
-+ latency_stat->min_dev_time_wr = dev_time;
-+
-+ if (latency_stat->max_scst_time_wr < scst_time)
-+ latency_stat->max_scst_time_wr = scst_time;
-+ if (latency_stat->max_tgt_time_wr < tgt_time)
-+ latency_stat->max_tgt_time_wr = tgt_time;
-+ if (latency_stat->max_dev_time_wr < dev_time)
-+ latency_stat->max_dev_time_wr = dev_time;
-+
-+ if (dev_latency_stat != NULL) {
-+ dev_latency_stat->scst_time_wr += scst_time;
-+ dev_latency_stat->tgt_time_wr += tgt_time;
-+ dev_latency_stat->dev_time_wr += dev_time;
-+ dev_latency_stat->processed_cmds_wr++;
-+
-+ if ((dev_latency_stat->min_scst_time_wr == 0) ||
-+ (dev_latency_stat->min_scst_time_wr > scst_time))
-+ dev_latency_stat->min_scst_time_wr = scst_time;
-+ if ((dev_latency_stat->min_tgt_time_wr == 0) ||
-+ (dev_latency_stat->min_tgt_time_wr > tgt_time))
-+ dev_latency_stat->min_tgt_time_wr = tgt_time;
-+ if ((dev_latency_stat->min_dev_time_wr == 0) ||
-+ (dev_latency_stat->min_dev_time_wr > dev_time))
-+ dev_latency_stat->min_dev_time_wr = dev_time;
-+
-+ if (dev_latency_stat->max_scst_time_wr < scst_time)
-+ dev_latency_stat->max_scst_time_wr = scst_time;
-+ if (dev_latency_stat->max_tgt_time_wr < tgt_time)
-+ dev_latency_stat->max_tgt_time_wr = tgt_time;
-+ if (dev_latency_stat->max_dev_time_wr < dev_time)
-+ dev_latency_stat->max_dev_time_wr = dev_time;
-+ }
-+ }
-+
-+ spin_unlock_bh(&sess->lat_lock);
-+
-+ TRACE_DBG("cmd %p: finish %lld, scst_time %lld, "
-+ "tgt_time %lld, dev_time %lld", cmd, finish, scst_time,
-+ tgt_time, dev_time);
-+ return;
-+}
-+
-+#endif /* CONFIG_SCST_MEASURE_LATENCY */
-diff -uprN orig/linux-3.2/drivers/scst/scst_pres.h linux-3.2/drivers/scst/scst_pres.h
---- orig/linux-3.2/drivers/scst/scst_pres.h
-+++ linux-3.2/drivers/scst/scst_pres.h
-@@ -0,0 +1,234 @@
-+/*
-+ * scst_pres.c
-+ *
-+ * Copyright (C) 2009 - 2010 Alexey Obitotskiy <alexeyo1@open-e.com>
-+ * Copyright (C) 2009 - 2010 Open-E, Inc.
-+ * Copyright (C) 2009 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef SCST_PRES_H_
-+#define SCST_PRES_H_
-+
-+#include <linux/delay.h>
-+
-+#define PR_REGISTER 0x00
-+#define PR_RESERVE 0x01
-+#define PR_RELEASE 0x02
-+#define PR_CLEAR 0x03
-+#define PR_PREEMPT 0x04
-+#define PR_PREEMPT_AND_ABORT 0x05
-+#define PR_REGISTER_AND_IGNORE 0x06
-+#define PR_REGISTER_AND_MOVE 0x07
-+
-+#define PR_READ_KEYS 0x00
-+#define PR_READ_RESERVATION 0x01
-+#define PR_REPORT_CAPS 0x02
-+#define PR_READ_FULL_STATUS 0x03
-+
-+#define TYPE_UNSPECIFIED (-1)
-+#define TYPE_WRITE_EXCLUSIVE 0x01
-+#define TYPE_EXCLUSIVE_ACCESS 0x03
-+#define TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
-+#define TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
-+#define TYPE_WRITE_EXCLUSIVE_ALL_REG 0x07
-+#define TYPE_EXCLUSIVE_ACCESS_ALL_REG 0x08
-+
-+#define SCOPE_LU 0x00
-+
-+static inline void scst_inc_pr_readers_count(struct scst_cmd *cmd,
-+ bool locked)
-+{
-+ struct scst_device *dev = cmd->dev;
-+
-+ EXTRACHECKS_BUG_ON(cmd->dec_pr_readers_count_needed);
-+
-+ if (!locked)
-+ spin_lock_bh(&dev->dev_lock);
-+
-+#ifdef CONFIG_SMP
-+ EXTRACHECKS_BUG_ON(!spin_is_locked(&dev->dev_lock));
-+#endif
-+
-+ dev->pr_readers_count++;
-+ cmd->dec_pr_readers_count_needed = 1;
-+ TRACE_DBG("New inc pr_readers_count %d (cmd %p)", dev->pr_readers_count,
-+ cmd);
-+
-+ if (!locked)
-+ spin_unlock_bh(&dev->dev_lock);
-+ return;
-+}
-+
-+static inline void scst_dec_pr_readers_count(struct scst_cmd *cmd,
-+ bool locked)
-+{
-+ struct scst_device *dev = cmd->dev;
-+
-+ if (unlikely(!cmd->dec_pr_readers_count_needed)) {
-+ PRINT_ERROR("scst_check_local_events() should not be called "
-+ "twice (cmd %p, op %x)! Use "
-+ "scst_pre_check_local_events() instead.", cmd,
-+ cmd->cdb[0]);
-+ WARN_ON(1);
-+ goto out;
-+ }
-+
-+ if (!locked)
-+ spin_lock_bh(&dev->dev_lock);
-+
-+#ifdef CONFIG_SMP
-+ EXTRACHECKS_BUG_ON(!spin_is_locked(&dev->dev_lock));
-+#endif
-+
-+ dev->pr_readers_count--;
-+ cmd->dec_pr_readers_count_needed = 0;
-+ TRACE_DBG("New dec pr_readers_count %d (cmd %p)", dev->pr_readers_count,
-+ cmd);
-+
-+ if (!locked)
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+out:
-+ EXTRACHECKS_BUG_ON(dev->pr_readers_count < 0);
-+ return;
-+}
-+
-+static inline void scst_reset_requeued_cmd(struct scst_cmd *cmd)
-+{
-+ TRACE_DBG("Reset requeued cmd %p (op %x)", cmd, cmd->cdb[0]);
-+ scst_inc_pr_readers_count(cmd, false);
-+ cmd->check_local_events_once_done = 0;
-+ return;
-+}
-+
-+static inline bool scst_pr_type_valid(uint8_t type)
-+{
-+ switch (type) {
-+ case TYPE_WRITE_EXCLUSIVE:
-+ case TYPE_EXCLUSIVE_ACCESS:
-+ case TYPE_WRITE_EXCLUSIVE_REGONLY:
-+ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
-+ case TYPE_WRITE_EXCLUSIVE_ALL_REG:
-+ case TYPE_EXCLUSIVE_ACCESS_ALL_REG:
-+ return true;
-+ default:
-+ return false;
-+ }
-+}
-+
-+static inline bool scst_pr_read_lock(struct scst_cmd *cmd)
-+{
-+ struct scst_device *dev = cmd->dev;
-+ bool unlock = false;
-+
-+ TRACE_ENTRY();
-+
-+ smp_mb(); /* to sync with scst_pr_write_lock() */
-+ if (unlikely(dev->pr_writer_active)) {
-+ unlock = true;
-+ scst_dec_pr_readers_count(cmd, false);
-+ mutex_lock(&dev->dev_pr_mutex);
-+ }
-+
-+ TRACE_EXIT_RES(unlock);
-+ return unlock;
-+}
-+
-+static inline void scst_pr_read_unlock(struct scst_cmd *cmd, bool unlock)
-+{
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(unlock))
-+ mutex_unlock(&dev->dev_pr_mutex);
-+ else
-+ scst_dec_pr_readers_count(cmd, false);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline void scst_pr_write_lock(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev->dev_pr_mutex);
-+
-+ dev->pr_writer_active = 1;
-+ /* to sync with scst_pr_read_lock() and unlock() */
-+ smp_mb();
-+
-+ while (true) {
-+ int readers;
-+ spin_lock_bh(&dev->dev_lock);
-+ readers = dev->pr_readers_count;
-+ spin_unlock_bh(&dev->dev_lock);
-+ if (readers == 0)
-+ break;
-+ TRACE_DBG("Waiting for %d readers (dev %p)", readers, dev);
-+ msleep(1);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline void scst_pr_write_unlock(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ dev->pr_writer_active = 0;
-+ mutex_unlock(&dev->dev_pr_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+int scst_pr_init_dev(struct scst_device *dev);
-+void scst_pr_clear_dev(struct scst_device *dev);
-+
-+int scst_pr_init_tgt_dev(struct scst_tgt_dev *tgt_dev);
-+void scst_pr_clear_tgt_dev(struct scst_tgt_dev *tgt_dev);
-+
-+bool scst_pr_crh_case(struct scst_cmd *cmd);
-+bool scst_pr_is_cmd_allowed(struct scst_cmd *cmd);
-+
-+void scst_pr_register(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_register_and_ignore(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size);
-+void scst_pr_register_and_move(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size);
-+void scst_pr_reserve(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_release(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_clear(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_preempt(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_preempt_and_abort(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size);
-+
-+void scst_pr_read_keys(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_read_reservation(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size);
-+void scst_pr_report_caps(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size);
-+void scst_pr_read_full_status(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size);
-+
-+void scst_pr_sync_device_file(struct scst_tgt_dev *tgt_dev, struct scst_cmd *cmd);
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+void scst_pr_dump_prs(struct scst_device *dev, bool force);
-+#else
-+static inline void scst_pr_dump_prs(struct scst_device *dev, bool force) {}
-+#endif
-+
-+#endif /* SCST_PRES_H_ */
-diff -uprN orig/linux-3.2/drivers/scst/scst_pres.c linux-3.2/drivers/scst/scst_pres.c
---- orig/linux-3.2/drivers/scst/scst_pres.c
-+++ linux-3.2/drivers/scst/scst_pres.c
-@@ -0,0 +1,2636 @@
-+/*
-+ * scst_pres.c
-+ *
-+ * Copyright (C) 2009 - 2010 Alexey Obitotskiy <alexeyo1@open-e.com>
-+ * Copyright (C) 2009 - 2010 Open-E, Inc.
-+ * Copyright (C) 2009 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/unistd.h>
-+#include <linux/string.h>
-+#include <linux/kthread.h>
-+#include <linux/delay.h>
-+#include <linux/time.h>
-+#include <linux/ctype.h>
-+#include <asm/byteorder.h>
-+#include <linux/syscalls.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/fcntl.h>
-+#include <linux/uaccess.h>
-+#include <linux/namei.h>
-+#include <linux/vmalloc.h>
-+#include <asm/unaligned.h>
-+
-+#include <scst/scst.h>
-+#include <scst/scst_const.h>
-+#include "scst_priv.h"
-+#include "scst_pres.h"
-+
-+#define SCST_PR_ROOT_ENTRY "pr"
-+#define SCST_PR_FILE_SIGN 0xBBEEEEAAEEBBDD77LLU
-+#define SCST_PR_FILE_VERSION 1LLU
-+
-+#define FILE_BUFFER_SIZE 512
-+
-+#ifndef isblank
-+#define isblank(c) ((c) == ' ' || (c) == '\t')
-+#endif
-+
-+static inline int tid_size(const uint8_t *tid)
-+{
-+ BUG_ON(tid == NULL);
-+
-+ if ((tid[0] & 0x0f) == SCSI_TRANSPORTID_PROTOCOLID_ISCSI)
-+ return be16_to_cpu(get_unaligned((__be16 *)&tid[2])) + 4;
-+ else
-+ return TID_COMMON_SIZE;
-+}
-+
-+/* Secures tid by setting 0 in the last byte of NULL-terminated tid's */
-+static inline void tid_secure(uint8_t *tid)
-+{
-+ if ((tid[0] & 0x0f) == SCSI_TRANSPORTID_PROTOCOLID_ISCSI) {
-+ int size = tid_size(tid);
-+ tid[size - 1] = '\0';
-+ }
-+
-+ return;
-+}
-+
-+/* Returns false if tid's are not equal, true otherwise */
-+static bool tid_equal(const uint8_t *tid_a, const uint8_t *tid_b)
-+{
-+ int len;
-+
-+ if (tid_a == NULL || tid_b == NULL)
-+ return false;
-+
-+ if ((tid_a[0] & 0x0f) != (tid_b[0] & 0x0f)) {
-+ TRACE_DBG("%s", "Different protocol IDs");
-+ return false;
-+ }
-+
-+ if ((tid_a[0] & 0x0f) == SCSI_TRANSPORTID_PROTOCOLID_ISCSI) {
-+ const uint8_t tid_a_fmt = tid_a[0] & 0xc0;
-+ const uint8_t tid_b_fmt = tid_b[0] & 0xc0;
-+ int tid_a_len, tid_a_max = tid_size(tid_a) - 4;
-+ int tid_b_len, tid_b_max = tid_size(tid_b) - 4;
-+ int i;
-+
-+ tid_a += 4;
-+ tid_b += 4;
-+
-+ if (tid_a_fmt == 0x00)
-+ tid_a_len = strnlen(tid_a, tid_a_max);
-+ else if (tid_a_fmt == 0x40) {
-+ if (tid_a_fmt != tid_b_fmt) {
-+ uint8_t *p = strnchr(tid_a, tid_a_max, ',');
-+ if (p == NULL)
-+ goto out_error;
-+ tid_a_len = p - tid_a;
-+
-+ BUG_ON(tid_a_len > tid_a_max);
-+ BUG_ON(tid_a_len < 0);
-+ } else
-+ tid_a_len = strnlen(tid_a, tid_a_max);
-+ } else
-+ goto out_error;
-+
-+ if (tid_b_fmt == 0x00)
-+ tid_b_len = strnlen(tid_b, tid_b_max);
-+ else if (tid_b_fmt == 0x40) {
-+ if (tid_a_fmt != tid_b_fmt) {
-+ uint8_t *p = strnchr(tid_b, tid_b_max, ',');
-+ if (p == NULL)
-+ goto out_error;
-+ tid_b_len = p - tid_b;
-+
-+ BUG_ON(tid_b_len > tid_b_max);
-+ BUG_ON(tid_b_len < 0);
-+ } else
-+ tid_b_len = strnlen(tid_b, tid_b_max);
-+ } else
-+ goto out_error;
-+
-+ if (tid_a_len != tid_b_len)
-+ return false;
-+
-+ len = tid_a_len;
-+
-+ /* ISCSI names are case insensitive */
-+ for (i = 0; i < len; i++)
-+ if (tolower(tid_a[i]) != tolower(tid_b[i]))
-+ return false;
-+ return true;
-+ } else
-+ len = TID_COMMON_SIZE;
-+
-+ return memcmp(tid_a, tid_b, len) == 0;
-+
-+out_error:
-+ PRINT_ERROR("%s", "Invalid initiator port transport id");
-+ return false;
-+}
-+
-+/* Must be called under dev_pr_mutex */
-+static inline void scst_pr_set_holder(struct scst_device *dev,
-+ struct scst_dev_registrant *holder, uint8_t scope, uint8_t type)
-+{
-+ dev->pr_is_set = 1;
-+ dev->pr_scope = scope;
-+ dev->pr_type = type;
-+ if (dev->pr_type != TYPE_EXCLUSIVE_ACCESS_ALL_REG &&
-+ dev->pr_type != TYPE_WRITE_EXCLUSIVE_ALL_REG)
-+ dev->pr_holder = holder;
-+}
-+
-+/* Must be called under dev_pr_mutex */
-+static bool scst_pr_is_holder(struct scst_device *dev,
-+ struct scst_dev_registrant *reg)
-+{
-+ bool res = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (!dev->pr_is_set)
-+ goto out;
-+
-+ if (dev->pr_type == TYPE_EXCLUSIVE_ACCESS_ALL_REG ||
-+ dev->pr_type == TYPE_WRITE_EXCLUSIVE_ALL_REG) {
-+ res = (reg != NULL);
-+ } else
-+ res = (dev->pr_holder == reg);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+/* Must be called under dev_pr_mutex */
-+void scst_pr_dump_prs(struct scst_device *dev, bool force)
-+{
-+ if (!force) {
-+#if defined(CONFIG_SCST_DEBUG)
-+ if ((trace_flag & TRACE_PRES) == 0)
-+#endif
-+ goto out;
-+ }
-+
-+ PRINT_INFO("Persistent reservations for device %s:", dev->virt_name);
-+
-+ if (list_empty(&dev->dev_registrants_list))
-+ PRINT_INFO("%s", " No registrants");
-+ else {
-+ struct scst_dev_registrant *reg;
-+ int i = 0;
-+ list_for_each_entry(reg, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ PRINT_INFO(" [%d] registrant %s/%d, key %016llx "
-+ "(reg %p, tgt_dev %p)", i++,
-+ debug_transport_id_to_initiator_name(
-+ reg->transport_id),
-+ reg->rel_tgt_id, reg->key, reg, reg->tgt_dev);
-+ }
-+ }
-+
-+ if (dev->pr_is_set) {
-+ struct scst_dev_registrant *holder = dev->pr_holder;
-+ if (holder != NULL)
-+ PRINT_INFO("Reservation holder is %s/%d (key %016llx, "
-+ "scope %x, type %x, reg %p, tgt_dev %p)",
-+ debug_transport_id_to_initiator_name(
-+ holder->transport_id),
-+ holder->rel_tgt_id, holder->key, dev->pr_scope,
-+ dev->pr_type, holder, holder->tgt_dev);
-+ else
-+ PRINT_INFO("All registrants are reservation holders "
-+ "(scope %x, type %x)", dev->pr_scope,
-+ dev->pr_type);
-+ } else
-+ PRINT_INFO("%s", "Not reserved");
-+
-+out:
-+ return;
-+}
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+/* dev_pr_mutex must be locked */
-+static void scst_pr_find_registrants_list_all(struct scst_device *dev,
-+ struct scst_dev_registrant *exclude_reg, struct list_head *list)
-+{
-+ struct scst_dev_registrant *reg;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_PR("Finding all registered records for device '%s' "
-+ "with exclude reg key %016llx",
-+ dev->virt_name, exclude_reg->key);
-+
-+ list_for_each_entry(reg, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ if (reg == exclude_reg)
-+ continue;
-+ TRACE_PR("Adding registrant %s/%d (%p) to find list (key %016llx)",
-+ debug_transport_id_to_initiator_name(reg->transport_id),
-+ reg->rel_tgt_id, reg, reg->key);
-+ list_add_tail(&reg->aux_list_entry, list);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* dev_pr_mutex must be locked */
-+static void scst_pr_find_registrants_list_key(struct scst_device *dev,
-+ __be64 key, struct list_head *list)
-+{
-+ struct scst_dev_registrant *reg;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_PR("Finding registrants for device '%s' with key %016llx",
-+ dev->virt_name, key);
-+
-+ list_for_each_entry(reg, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ if (reg->key == key) {
-+ TRACE_PR("Adding registrant %s/%d (%p) to the find "
-+ "list (key %016llx)",
-+ debug_transport_id_to_initiator_name(
-+ reg->transport_id),
-+ reg->rel_tgt_id, reg->tgt_dev, key);
-+ list_add_tail(&reg->aux_list_entry, list);
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* dev_pr_mutex must be locked */
-+static struct scst_dev_registrant *scst_pr_find_reg(
-+ struct scst_device *dev, const uint8_t *transport_id,
-+ const uint16_t rel_tgt_id)
-+{
-+ struct scst_dev_registrant *reg, *res = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(reg, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ if ((reg->rel_tgt_id == rel_tgt_id) &&
-+ tid_equal(reg->transport_id, transport_id)) {
-+ res = reg;
-+ break;
-+ }
-+ }
-+
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/* Must be called under dev_pr_mutex */
-+static void scst_pr_clear_reservation(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ WARN_ON(!dev->pr_is_set);
-+
-+ dev->pr_is_set = 0;
-+ dev->pr_scope = SCOPE_LU;
-+ dev->pr_type = TYPE_UNSPECIFIED;
-+
-+ dev->pr_holder = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called under dev_pr_mutex */
-+static void scst_pr_clear_holder(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ WARN_ON(!dev->pr_is_set);
-+
-+ if (dev->pr_type == TYPE_WRITE_EXCLUSIVE_ALL_REG ||
-+ dev->pr_type == TYPE_EXCLUSIVE_ACCESS_ALL_REG) {
-+ if (list_empty(&dev->dev_registrants_list))
-+ scst_pr_clear_reservation(dev);
-+ } else
-+ scst_pr_clear_reservation(dev);
-+
-+ dev->pr_holder = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called under dev_pr_mutex */
-+static struct scst_dev_registrant *scst_pr_add_registrant(
-+ struct scst_device *dev, const uint8_t *transport_id,
-+ const uint16_t rel_tgt_id, __be64 key,
-+ bool dev_lock_locked)
-+{
-+ struct scst_dev_registrant *reg;
-+ struct scst_tgt_dev *t;
-+ gfp_t gfp_flags = dev_lock_locked ? GFP_ATOMIC : GFP_KERNEL;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(dev == NULL);
-+ BUG_ON(transport_id == NULL);
-+
-+ TRACE_PR("Registering %s/%d (dev %s)",
-+ debug_transport_id_to_initiator_name(transport_id),
-+ rel_tgt_id, dev->virt_name);
-+
-+ reg = scst_pr_find_reg(dev, transport_id, rel_tgt_id);
-+ if (reg != NULL) {
-+ /*
-+ * It might happen when a target driver would make >1 session
-+ * from the same initiator to the same target.
-+ */
-+ PRINT_ERROR("Registrant %p/%d (dev %s) already exists!", reg,
-+ rel_tgt_id, dev->virt_name);
-+ PRINT_BUFFER("TransportID", transport_id, 24);
-+ WARN_ON(1);
-+ reg = NULL;
-+ goto out;
-+ }
-+
-+ reg = kzalloc(sizeof(*reg), gfp_flags);
-+ if (reg == NULL) {
-+ PRINT_ERROR("%s", "Unable to allocate registration record");
-+ goto out;
-+ }
-+
-+ reg->transport_id = kmalloc(tid_size(transport_id), gfp_flags);
-+ if (reg->transport_id == NULL) {
-+ PRINT_ERROR("%s", "Unable to allocate initiator port "
-+ "transport id");
-+ goto out_free;
-+ }
-+ memcpy(reg->transport_id, transport_id, tid_size(transport_id));
-+
-+ reg->rel_tgt_id = rel_tgt_id;
-+ reg->key = key;
-+
-+ /*
-+ * We can't use scst_mutex here, because of the circular
-+ * locking dependency with dev_pr_mutex.
-+ */
-+ if (!dev_lock_locked)
-+ spin_lock_bh(&dev->dev_lock);
-+ list_for_each_entry(t, &dev->dev_tgt_dev_list, dev_tgt_dev_list_entry) {
-+ if (tid_equal(t->sess->transport_id, transport_id) &&
-+ (t->sess->tgt->rel_tgt_id == rel_tgt_id) &&
-+ (t->registrant == NULL)) {
-+ /*
-+ * We must assign here, because t can die
-+ * immediately after we release dev_lock.
-+ */
-+ TRACE_PR("Found tgt_dev %p", t);
-+ reg->tgt_dev = t;
-+ t->registrant = reg;
-+ break;
-+ }
-+ }
-+ if (!dev_lock_locked)
-+ spin_unlock_bh(&dev->dev_lock);
-+
-+ list_add_tail(&reg->dev_registrants_list_entry,
-+ &dev->dev_registrants_list);
-+
-+ TRACE_PR("Reg %p registered (dev %s, tgt_dev %p)", reg,
-+ dev->virt_name, reg->tgt_dev);
-+
-+out:
-+ TRACE_EXIT_HRES((unsigned long)reg);
-+ return reg;
-+
-+out_free:
-+ kfree(reg);
-+ reg = NULL;
-+ goto out;
-+}
-+
-+/* Must be called under dev_pr_mutex */
-+static void scst_pr_remove_registrant(struct scst_device *dev,
-+ struct scst_dev_registrant *reg)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_PR("Removing registrant %s/%d (reg %p, tgt_dev %p, key %016llx, "
-+ "dev %s)", debug_transport_id_to_initiator_name(reg->transport_id),
-+ reg->rel_tgt_id, reg, reg->tgt_dev, reg->key, dev->virt_name);
-+
-+ list_del(&reg->dev_registrants_list_entry);
-+
-+ if (scst_pr_is_holder(dev, reg))
-+ scst_pr_clear_holder(dev);
-+
-+ if (reg->tgt_dev)
-+ reg->tgt_dev->registrant = NULL;
-+
-+ kfree(reg->transport_id);
-+ kfree(reg);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called under dev_pr_mutex */
-+static void scst_pr_send_ua_reg(struct scst_device *dev,
-+ struct scst_dev_registrant *reg,
-+ int key, int asc, int ascq)
-+{
-+ static uint8_t ua[SCST_STANDARD_SENSE_LEN];
-+
-+ TRACE_ENTRY();
-+
-+ scst_set_sense(ua, sizeof(ua), dev->d_sense, key, asc, ascq);
-+
-+ TRACE_PR("Queueing UA [%x %x %x]: registrant %s/%d (%p), tgt_dev %p, "
-+ "key %016llx", ua[2], ua[12], ua[13],
-+ debug_transport_id_to_initiator_name(reg->transport_id),
-+ reg->rel_tgt_id, reg, reg->tgt_dev, reg->key);
-+
-+ if (reg->tgt_dev)
-+ scst_check_set_UA(reg->tgt_dev, ua, sizeof(ua), 0);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called under dev_pr_mutex */
-+static void scst_pr_send_ua_all(struct scst_device *dev,
-+ struct scst_dev_registrant *exclude_reg,
-+ int key, int asc, int ascq)
-+{
-+ struct scst_dev_registrant *reg;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(reg, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ if (reg != exclude_reg)
-+ scst_pr_send_ua_reg(dev, reg, key, asc, ascq);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called under dev_pr_mutex */
-+static void scst_pr_abort_reg(struct scst_device *dev,
-+ struct scst_cmd *pr_cmd, struct scst_dev_registrant *reg)
-+{
-+ struct scst_session *sess;
-+ __be64 packed_lun;
-+ int rc;
-+
-+ TRACE_ENTRY();
-+
-+ if (reg->tgt_dev == NULL) {
-+ TRACE_PR("Registrant %s/%d (%p, key 0x%016llx) has no session",
-+ debug_transport_id_to_initiator_name(reg->transport_id),
-+ reg->rel_tgt_id, reg, reg->key);
-+ goto out;
-+ }
-+
-+ sess = reg->tgt_dev->sess;
-+
-+ TRACE_PR("Aborting %d commands for %s/%d (reg %p, key 0x%016llx, "
-+ "tgt_dev %p, sess %p)",
-+ atomic_read(&reg->tgt_dev->tgt_dev_cmd_count),
-+ debug_transport_id_to_initiator_name(reg->transport_id),
-+ reg->rel_tgt_id, reg, reg->key, reg->tgt_dev, sess);
-+
-+ packed_lun = scst_pack_lun(reg->tgt_dev->lun, sess->acg->addr_method);
-+
-+ rc = scst_rx_mgmt_fn_lun(sess, SCST_PR_ABORT_ALL,
-+ (uint8_t *)&packed_lun, sizeof(packed_lun), SCST_NON_ATOMIC,
-+ pr_cmd);
-+ if (rc != 0) {
-+ /*
-+ * There's nothing more we can do here... Hopefully, it would
-+ * never happen.
-+ */
-+ PRINT_ERROR("SCST_PR_ABORT_ALL failed %d (sess %p)",
-+ rc, sess);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Abstract vfs_unlink() for different kernel versions (as possible) */
-+static inline void scst_pr_vfs_unlink_and_put(struct path *path)
-+{
-+ vfs_unlink(path->dentry->d_parent->d_inode, path->dentry);
-+ path_put(path);
-+}
-+
-+/* Called under scst_mutex */
-+static int scst_pr_do_load_device_file(struct scst_device *dev,
-+ const char *file_name)
-+{
-+ int res = 0, rc;
-+ struct file *file = NULL;
-+ struct inode *inode;
-+ char *buf = NULL;
-+ loff_t file_size, pos, data_size;
-+ uint64_t sign, version;
-+ mm_segment_t old_fs;
-+ uint8_t pr_is_set, aptpl;
-+ __be64 key;
-+ uint16_t rel_tgt_id;
-+
-+ TRACE_ENTRY();
-+
-+ old_fs = get_fs();
-+ set_fs(KERNEL_DS);
-+
-+ TRACE_PR("Loading persistent file '%s'", file_name);
-+
-+ file = filp_open(file_name, O_RDONLY, 0);
-+ if (IS_ERR(file)) {
-+ res = PTR_ERR(file);
-+ TRACE_PR("Unable to open file '%s' - error %d", file_name, res);
-+ goto out;
-+ }
-+
-+ inode = file->f_dentry->d_inode;
-+
-+ if (S_ISREG(inode->i_mode))
-+ /* Nothing to do */;
-+ else if (S_ISBLK(inode->i_mode))
-+ inode = inode->i_bdev->bd_inode;
-+ else {
-+ PRINT_ERROR("Invalid file mode 0x%x", inode->i_mode);
-+ goto out_close;
-+ }
-+
-+ file_size = inode->i_size;
-+
-+ /* Let's limit the file size by some reasonable number */
-+ if ((file_size == 0) || (file_size >= 15*1024*1024)) {
-+ PRINT_ERROR("Invalid PR file size %d", (int)file_size);
-+ res = -EINVAL;
-+ goto out_close;
-+ }
-+
-+ buf = vmalloc(file_size);
-+ if (buf == NULL) {
-+ res = -ENOMEM;
-+ PRINT_ERROR("%s", "Unable to allocate buffer");
-+ goto out_close;
-+ }
-+
-+ pos = 0;
-+ rc = vfs_read(file, (void __force __user *)buf, file_size, &pos);
-+ if (rc != file_size) {
-+ PRINT_ERROR("Unable to read file '%s' - error %d", file_name,
-+ rc);
-+ res = rc;
-+ goto out_close;
-+ }
-+
-+ data_size = 0;
-+ data_size += sizeof(sign);
-+ data_size += sizeof(version);
-+ data_size += sizeof(aptpl);
-+ data_size += sizeof(pr_is_set);
-+ data_size += sizeof(dev->pr_type);
-+ data_size += sizeof(dev->pr_scope);
-+
-+ if (file_size < data_size) {
-+ res = -EINVAL;
-+ PRINT_ERROR("Invalid file '%s' - size too small", file_name);
-+ goto out_close;
-+ }
-+
-+ pos = 0;
-+
-+ sign = get_unaligned((uint64_t *)&buf[pos]);
-+ if (sign != SCST_PR_FILE_SIGN) {
-+ res = -EINVAL;
-+ PRINT_ERROR("Invalid persistent file signature %016llx "
-+ "(expected %016llx)", sign, SCST_PR_FILE_SIGN);
-+ goto out_close;
-+ }
-+ pos += sizeof(sign);
-+
-+ version = get_unaligned((uint64_t *)&buf[pos]);
-+ if (version != SCST_PR_FILE_VERSION) {
-+ res = -EINVAL;
-+ PRINT_ERROR("Invalid persistent file version %016llx "
-+ "(expected %016llx)", version, SCST_PR_FILE_VERSION);
-+ goto out_close;
-+ }
-+ pos += sizeof(version);
-+
-+ while (data_size < file_size) {
-+ uint8_t *tid;
-+
-+ data_size++;
-+ tid = &buf[data_size];
-+ data_size += tid_size(tid);
-+ data_size += sizeof(key);
-+ data_size += sizeof(rel_tgt_id);
-+
-+ if (data_size > file_size) {
-+ res = -EINVAL;
-+ PRINT_ERROR("Invalid file '%s' - size mismatch have "
-+ "%lld expected %lld", file_name, file_size,
-+ data_size);
-+ goto out_close;
-+ }
-+ }
-+
-+ aptpl = buf[pos];
-+ dev->pr_aptpl = aptpl ? 1 : 0;
-+ pos += sizeof(aptpl);
-+
-+ pr_is_set = buf[pos];
-+ dev->pr_is_set = pr_is_set ? 1 : 0;
-+ pos += sizeof(pr_is_set);
-+
-+ dev->pr_type = buf[pos];
-+ pos += sizeof(dev->pr_type);
-+
-+ dev->pr_scope = buf[pos];
-+ pos += sizeof(dev->pr_scope);
-+
-+ while (pos < file_size) {
-+ uint8_t is_holder;
-+ uint8_t *tid;
-+ struct scst_dev_registrant *reg = NULL;
-+
-+ is_holder = buf[pos++];
-+
-+ tid = &buf[pos];
-+ pos += tid_size(tid);
-+
-+ key = get_unaligned((__be64 *)&buf[pos]);
-+ pos += sizeof(key);
-+
-+ rel_tgt_id = get_unaligned((uint16_t *)&buf[pos]);
-+ pos += sizeof(rel_tgt_id);
-+
-+ reg = scst_pr_add_registrant(dev, tid, rel_tgt_id, key, false);
-+ if (reg == NULL) {
-+ res = -ENOMEM;
-+ goto out_close;
-+ }
-+
-+ if (is_holder)
-+ dev->pr_holder = reg;
-+ }
-+
-+out_close:
-+ filp_close(file, NULL);
-+
-+out:
-+ if (buf != NULL)
-+ vfree(buf);
-+
-+ set_fs(old_fs);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_pr_load_device_file(struct scst_device *dev)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->pr_file_name == NULL || dev->pr_file_name1 == NULL) {
-+ PRINT_ERROR("Invalid file paths for '%s'", dev->virt_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_pr_do_load_device_file(dev, dev->pr_file_name);
-+ if (res == 0)
-+ goto out;
-+ else if (res == -ENOMEM)
-+ goto out;
-+
-+ res = scst_pr_do_load_device_file(dev, dev->pr_file_name1);
-+
-+ scst_pr_dump_prs(dev, false);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_pr_copy_file(const char *src, const char *dest)
-+{
-+ int res = 0;
-+ struct inode *inode;
-+ loff_t file_size, pos;
-+ uint8_t *buf = NULL;
-+ struct file *file_src = NULL, *file_dest = NULL;
-+ mm_segment_t old_fs = get_fs();
-+
-+ TRACE_ENTRY();
-+
-+ if (src == NULL || dest == NULL) {
-+ res = -EINVAL;
-+ PRINT_ERROR("%s", "Invalid persistent files path - backup "
-+ "skipped");
-+ goto out;
-+ }
-+
-+ TRACE_PR("Copying '%s' into '%s'", src, dest);
-+
-+ set_fs(KERNEL_DS);
-+
-+ file_src = filp_open(src, O_RDONLY, 0);
-+ if (IS_ERR(file_src)) {
-+ res = PTR_ERR(file_src);
-+ TRACE_PR("Unable to open file '%s' - error %d", src,
-+ res);
-+ goto out_free;
-+ }
-+
-+ file_dest = filp_open(dest, O_WRONLY | O_CREAT | O_TRUNC, 0644);
-+ if (IS_ERR(file_dest)) {
-+ res = PTR_ERR(file_dest);
-+ TRACE_PR("Unable to open backup file '%s' - error %d", dest,
-+ res);
-+ goto out_close;
-+ }
-+
-+ inode = file_src->f_dentry->d_inode;
-+
-+ if (S_ISREG(inode->i_mode))
-+ /* Nothing to do */;
-+ else if (S_ISBLK(inode->i_mode))
-+ inode = inode->i_bdev->bd_inode;
-+ else {
-+ PRINT_ERROR("Invalid file mode 0x%x", inode->i_mode);
-+ res = -EINVAL;
-+ set_fs(old_fs);
-+ goto out_skip;
-+ }
-+
-+ file_size = inode->i_size;
-+
-+ buf = vmalloc(file_size);
-+ if (buf == NULL) {
-+ res = -ENOMEM;
-+ PRINT_ERROR("%s", "Unable to allocate temporary buffer");
-+ goto out_skip;
-+ }
-+
-+ pos = 0;
-+ res = vfs_read(file_src, (void __force __user *)buf, file_size, &pos);
-+ if (res != file_size) {
-+ PRINT_ERROR("Unable to read file '%s' - error %d", src, res);
-+ goto out_skip;
-+ }
-+
-+ pos = 0;
-+ res = vfs_write(file_dest, (void __force __user *)buf, file_size, &pos);
-+ if (res != file_size) {
-+ PRINT_ERROR("Unable to write to '%s' - error %d", dest, res);
-+ goto out_skip;
-+ }
-+
-+ res = vfs_fsync(file_dest, 0);
-+ if (res != 0) {
-+ PRINT_ERROR("fsync() of the backup PR file failed: %d", res);
-+ goto out_skip;
-+ }
-+
-+out_skip:
-+ filp_close(file_dest, NULL);
-+
-+out_close:
-+ filp_close(file_src, NULL);
-+
-+out_free:
-+ if (buf != NULL)
-+ vfree(buf);
-+
-+ set_fs(old_fs);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void scst_pr_remove_device_files(struct scst_tgt_dev *tgt_dev)
-+{
-+ int res = 0;
-+ struct scst_device *dev = tgt_dev->dev;
-+ struct path path;
-+ mm_segment_t old_fs = get_fs();
-+
-+ TRACE_ENTRY();
-+
-+ set_fs(KERNEL_DS);
-+
-+ res = dev->pr_file_name ? kern_path(dev->pr_file_name, 0, &path) :
-+ -ENOENT;
-+ if (!res)
-+ scst_pr_vfs_unlink_and_put(&path);
-+ else
-+ TRACE_DBG("Unable to lookup file '%s' - error %d",
-+ dev->pr_file_name, res);
-+
-+ res = dev->pr_file_name1 ? kern_path(dev->pr_file_name1, 0, &path) :
-+ -ENOENT;
-+ if (!res)
-+ scst_pr_vfs_unlink_and_put(&path);
-+ else
-+ TRACE_DBG("Unable to lookup file '%s' - error %d",
-+ dev->pr_file_name1, res);
-+
-+ set_fs(old_fs);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called under dev_pr_mutex */
-+void scst_pr_sync_device_file(struct scst_tgt_dev *tgt_dev, struct scst_cmd *cmd)
-+{
-+ int res = 0;
-+ struct scst_device *dev = tgt_dev->dev;
-+ struct file *file;
-+ mm_segment_t old_fs = get_fs();
-+ loff_t pos = 0;
-+ uint64_t sign;
-+ uint64_t version;
-+ uint8_t pr_is_set, aptpl;
-+
-+ TRACE_ENTRY();
-+
-+ if ((dev->pr_aptpl == 0) || list_empty(&dev->dev_registrants_list)) {
-+ scst_pr_remove_device_files(tgt_dev);
-+ goto out;
-+ }
-+
-+ scst_pr_copy_file(dev->pr_file_name, dev->pr_file_name1);
-+
-+ set_fs(KERNEL_DS);
-+
-+ file = filp_open(dev->pr_file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
-+ if (IS_ERR(file)) {
-+ res = PTR_ERR(file);
-+ PRINT_ERROR("Unable to (re)create PR file '%s' - error %d",
-+ dev->pr_file_name, res);
-+ goto out_set_fs;
-+ }
-+
-+ TRACE_PR("Updating pr file '%s'", dev->pr_file_name);
-+
-+ /*
-+ * signature
-+ */
-+ sign = 0;
-+ pos = 0;
-+ res = vfs_write(file, (void __force __user *)&sign, sizeof(sign), &pos);
-+ if (res != sizeof(sign))
-+ goto write_error;
-+
-+ /*
-+ * version
-+ */
-+ version = SCST_PR_FILE_VERSION;
-+ res = vfs_write(file, (void __force __user *)&version, sizeof(version), &pos);
-+ if (res != sizeof(version))
-+ goto write_error;
-+
-+ /*
-+ * APTPL
-+ */
-+ aptpl = dev->pr_aptpl;
-+ res = vfs_write(file, (void __force __user *)&aptpl, sizeof(aptpl), &pos);
-+ if (res != sizeof(aptpl))
-+ goto write_error;
-+
-+ /*
-+ * reservation
-+ */
-+ pr_is_set = dev->pr_is_set;
-+ res = vfs_write(file, (void __force __user *)&pr_is_set, sizeof(pr_is_set), &pos);
-+ if (res != sizeof(pr_is_set))
-+ goto write_error;
-+
-+ res = vfs_write(file, (void __force __user *)&dev->pr_type, sizeof(dev->pr_type), &pos);
-+ if (res != sizeof(dev->pr_type))
-+ goto write_error;
-+
-+ res = vfs_write(file, (void __force __user *)&dev->pr_scope, sizeof(dev->pr_scope), &pos);
-+ if (res != sizeof(dev->pr_scope))
-+ goto write_error;
-+
-+ /*
-+ * registration records
-+ */
-+ if (!list_empty(&dev->dev_registrants_list)) {
-+ struct scst_dev_registrant *reg;
-+
-+ list_for_each_entry(reg, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ uint8_t is_holder = 0;
-+ int size;
-+
-+ is_holder = (dev->pr_holder == reg);
-+
-+ res = vfs_write(file, (void __force __user *)&is_holder, sizeof(is_holder),
-+ &pos);
-+ if (res != sizeof(is_holder))
-+ goto write_error;
-+
-+ size = tid_size(reg->transport_id);
-+ res = vfs_write(file, (void __force __user *)reg->transport_id, size, &pos);
-+ if (res != size)
-+ goto write_error;
-+
-+ res = vfs_write(file, (void __force __user *)&reg->key,
-+ sizeof(reg->key), &pos);
-+ if (res != sizeof(reg->key))
-+ goto write_error;
-+
-+ res = vfs_write(file, (void __force __user *)&reg->rel_tgt_id,
-+ sizeof(reg->rel_tgt_id), &pos);
-+ if (res != sizeof(reg->rel_tgt_id))
-+ goto write_error;
-+ }
-+ }
-+
-+ res = vfs_fsync(file, 0);
-+ if (res != 0) {
-+ PRINT_ERROR("fsync() of the PR file failed: %d", res);
-+ goto write_error_close;
-+ }
-+
-+ sign = SCST_PR_FILE_SIGN;
-+ pos = 0;
-+ res = vfs_write(file, (void __force __user *)&sign, sizeof(sign), &pos);
-+ if (res != sizeof(sign))
-+ goto write_error;
-+
-+ res = vfs_fsync(file, 0);
-+ if (res != 0) {
-+ PRINT_ERROR("fsync() of the PR file failed: %d", res);
-+ goto write_error_close;
-+ }
-+
-+ res = 0;
-+
-+ filp_close(file, NULL);
-+
-+out_set_fs:
-+ set_fs(old_fs);
-+
-+out:
-+ if (res != 0) {
-+ PRINT_CRIT_ERROR("Unable to save persistent information "
-+ "(target %s, initiator %s, device %s)",
-+ tgt_dev->sess->tgt->tgt_name,
-+ tgt_dev->sess->initiator_name, dev->virt_name);
-+#if 0 /*
-+ * Looks like it's safer to return SUCCESS and expect operator's
-+ * intervention to be able to save the PR's state next time, than
-+ * to return HARDWARE ERROR and screw up all the interaction with
-+ * the affected initiator.
-+ */
-+ if (cmd != NULL)
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+#endif
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return;
-+
-+write_error:
-+ PRINT_ERROR("Error writing to '%s' - error %d", dev->pr_file_name, res);
-+
-+write_error_close:
-+ filp_close(file, NULL);
-+ {
-+ struct path path;
-+ int rc;
-+
-+ rc = kern_path(dev->pr_file_name, 0, &path);
-+ if (!rc)
-+ scst_pr_vfs_unlink_and_put(&path);
-+ else
-+ TRACE_PR("Unable to lookup '%s' - error %d",
-+ dev->pr_file_name, rc);
-+ }
-+ goto out_set_fs;
-+}
-+
-+static int scst_pr_check_pr_path(void)
-+{
-+ int res;
-+ struct path path;
-+
-+ mm_segment_t old_fs = get_fs();
-+
-+ TRACE_ENTRY();
-+
-+ set_fs(KERNEL_DS);
-+
-+ res = kern_path(SCST_PR_DIR, 0, &path);
-+ if (res == 0)
-+ path_put(&path);
-+ if (res != 0) {
-+ PRINT_ERROR("Unable to find %s (err %d), you should create "
-+ "this directory manually or reinstall SCST",
-+ SCST_PR_DIR, res);
-+ goto out_setfs;
-+ }
-+
-+out_setfs:
-+ set_fs(old_fs);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Called under scst_mutex */
-+int scst_pr_init_dev(struct scst_device *dev)
-+{
-+ int res = 0;
-+ uint8_t q;
-+ int name_len;
-+
-+ TRACE_ENTRY();
-+
-+ name_len = snprintf(&q, sizeof(q), "%s/%s", SCST_PR_DIR, dev->virt_name) + 1;
-+ dev->pr_file_name = kmalloc(name_len, GFP_KERNEL);
-+ if (dev->pr_file_name == NULL) {
-+ PRINT_ERROR("Allocation of device '%s' file path failed",
-+ dev->virt_name);
-+ res = -ENOMEM;
-+ goto out;
-+ } else
-+ snprintf(dev->pr_file_name, name_len, "%s/%s", SCST_PR_DIR,
-+ dev->virt_name);
-+
-+ name_len = snprintf(&q, sizeof(q), "%s/%s.1", SCST_PR_DIR, dev->virt_name) + 1;
-+ dev->pr_file_name1 = kmalloc(name_len, GFP_KERNEL);
-+ if (dev->pr_file_name1 == NULL) {
-+ PRINT_ERROR("Allocation of device '%s' backup file path failed",
-+ dev->virt_name);
-+ res = -ENOMEM;
-+ goto out_free_name;
-+ } else
-+ snprintf(dev->pr_file_name1, name_len, "%s/%s.1", SCST_PR_DIR,
-+ dev->virt_name);
-+
-+ res = scst_pr_check_pr_path();
-+ if (res == 0) {
-+ res = scst_pr_load_device_file(dev);
-+ if (res == -ENOENT)
-+ res = 0;
-+ }
-+
-+ if (res != 0)
-+ goto out_free_name1;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free_name1:
-+ kfree(dev->pr_file_name1);
-+ dev->pr_file_name1 = NULL;
-+
-+out_free_name:
-+ kfree(dev->pr_file_name);
-+ dev->pr_file_name = NULL;
-+ goto out;
-+}
-+
-+/* Called under scst_mutex */
-+void scst_pr_clear_dev(struct scst_device *dev)
-+{
-+ struct scst_dev_registrant *reg, *tmp_reg;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry_safe(reg, tmp_reg, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ scst_pr_remove_registrant(dev, reg);
-+ }
-+
-+ kfree(dev->pr_file_name);
-+ kfree(dev->pr_file_name1);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called under scst_mutex */
-+int scst_pr_init_tgt_dev(struct scst_tgt_dev *tgt_dev)
-+{
-+ int res = 0;
-+ struct scst_dev_registrant *reg;
-+ struct scst_device *dev = tgt_dev->dev;
-+ const uint8_t *transport_id = tgt_dev->sess->transport_id;
-+ const uint16_t rel_tgt_id = tgt_dev->sess->tgt->rel_tgt_id;
-+
-+ TRACE_ENTRY();
-+
-+ if (tgt_dev->sess->transport_id == NULL)
-+ goto out;
-+
-+ scst_pr_write_lock(dev);
-+
-+ reg = scst_pr_find_reg(dev, transport_id, rel_tgt_id);
-+ if ((reg != NULL) && (reg->tgt_dev == NULL)) {
-+ TRACE_PR("Assigning reg %s/%d (%p) to tgt_dev %p (dev %s)",
-+ debug_transport_id_to_initiator_name(transport_id),
-+ rel_tgt_id, reg, tgt_dev, dev->virt_name);
-+ tgt_dev->registrant = reg;
-+ reg->tgt_dev = tgt_dev;
-+ }
-+
-+ scst_pr_write_unlock(dev);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Called under scst_mutex */
-+void scst_pr_clear_tgt_dev(struct scst_tgt_dev *tgt_dev)
-+{
-+ TRACE_ENTRY();
-+
-+ if (tgt_dev->registrant != NULL) {
-+ struct scst_dev_registrant *reg = tgt_dev->registrant;
-+ struct scst_device *dev = tgt_dev->dev;
-+ struct scst_tgt_dev *t;
-+
-+ scst_pr_write_lock(dev);
-+
-+ tgt_dev->registrant = NULL;
-+ reg->tgt_dev = NULL;
-+
-+ /* Just in case, actually. It should never happen. */
-+ list_for_each_entry(t, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if (t == tgt_dev)
-+ continue;
-+ if ((t->sess->tgt->rel_tgt_id == reg->rel_tgt_id) &&
-+ tid_equal(t->sess->transport_id, reg->transport_id)) {
-+ TRACE_PR("Reassigning reg %s/%d (%p) to tgt_dev "
-+ "%p (being cleared tgt_dev %p)",
-+ debug_transport_id_to_initiator_name(
-+ reg->transport_id),
-+ reg->rel_tgt_id, reg, t, tgt_dev);
-+ t->registrant = reg;
-+ reg->tgt_dev = t;
-+ break;
-+ }
-+ }
-+
-+ scst_pr_write_unlock(dev);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked. Might also be called under scst_mutex2. */
-+static int scst_pr_register_with_spec_i_pt(struct scst_cmd *cmd,
-+ const uint16_t rel_tgt_id, uint8_t *buffer, int buffer_size,
-+ struct list_head *rollback_list)
-+{
-+ int res = 0;
-+ int offset, ext_size;
-+ __be64 action_key;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_dev_registrant *reg;
-+ uint8_t *transport_id;
-+
-+ action_key = get_unaligned((__be64 *)&buffer[8]);
-+
-+ ext_size = be32_to_cpu(get_unaligned((__be32 *)&buffer[24]));
-+ if ((ext_size + 28) > buffer_size) {
-+ TRACE_PR("Invalid buffer size %d (max %d)", buffer_size,
-+ ext_size + 28);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ offset = 0;
-+ while (offset < ext_size) {
-+ transport_id = &buffer[28 + offset];
-+
-+ if ((offset + tid_size(transport_id)) > ext_size) {
-+ TRACE_PR("Invalid transport_id size %d (max %d)",
-+ tid_size(transport_id), ext_size - offset);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ tid_secure(transport_id);
-+ offset += tid_size(transport_id);
-+ }
-+
-+ offset = 0;
-+ while (offset < ext_size) {
-+ struct scst_tgt_dev *t;
-+
-+ transport_id = &buffer[28 + offset];
-+
-+ TRACE_PR("rel_tgt_id %d, transport_id %s", rel_tgt_id,
-+ debug_transport_id_to_initiator_name(transport_id));
-+
-+ if ((transport_id[0] & 0x0f) == SCSI_TRANSPORTID_PROTOCOLID_ISCSI &&
-+ (transport_id[0] & 0xc0) == 0) {
-+ TRACE_PR("Wildcard iSCSI TransportID %s",
-+ &transport_id[4]);
-+ /*
-+ * We can't use scst_mutex here, because of the
-+ * circular locking dependency with dev_pr_mutex.
-+ */
-+ spin_lock_bh(&dev->dev_lock);
-+ list_for_each_entry(t, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ /*
-+ * We must go over all matching tgt_devs and
-+ * register them on the requested rel_tgt_id
-+ */
-+ if (!tid_equal(t->sess->transport_id,
-+ transport_id))
-+ continue;
-+
-+ reg = scst_pr_find_reg(dev,
-+ t->sess->transport_id, rel_tgt_id);
-+ if (reg == NULL) {
-+ reg = scst_pr_add_registrant(dev,
-+ t->sess->transport_id,
-+ rel_tgt_id, action_key, true);
-+ if (reg == NULL) {
-+ spin_unlock_bh(&dev->dev_lock);
-+ scst_set_busy(cmd);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ } else if (reg->key != action_key) {
-+ TRACE_PR("Changing key of reg %p "
-+ "(tgt_dev %p)", reg, t);
-+ reg->rollback_key = reg->key;
-+ reg->key = action_key;
-+ } else
-+ continue;
-+
-+ list_add_tail(&reg->aux_list_entry,
-+ rollback_list);
-+ }
-+ spin_unlock_bh(&dev->dev_lock);
-+ } else {
-+ reg = scst_pr_find_reg(dev, transport_id, rel_tgt_id);
-+ if (reg != NULL) {
-+ if (reg->key == action_key)
-+ goto next;
-+ TRACE_PR("Changing key of reg %p (tgt_dev %p)",
-+ reg, reg->tgt_dev);
-+ reg->rollback_key = reg->key;
-+ reg->key = action_key;
-+ } else {
-+ reg = scst_pr_add_registrant(dev, transport_id,
-+ rel_tgt_id, action_key, false);
-+ if (reg == NULL) {
-+ scst_set_busy(cmd);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ }
-+
-+ list_add_tail(&reg->aux_list_entry,
-+ rollback_list);
-+ }
-+next:
-+ offset += tid_size(transport_id);
-+ }
-+out:
-+ return res;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+static void scst_pr_unregister(struct scst_device *dev,
-+ struct scst_dev_registrant *reg)
-+{
-+ bool is_holder;
-+ uint8_t pr_type;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_PR("Unregistering key %0llx", reg->key);
-+
-+ is_holder = scst_pr_is_holder(dev, reg);
-+ pr_type = dev->pr_type;
-+
-+ scst_pr_remove_registrant(dev, reg);
-+
-+ if (is_holder && !dev->pr_is_set) {
-+ /* A registration just released */
-+ switch (pr_type) {
-+ case TYPE_WRITE_EXCLUSIVE_REGONLY:
-+ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
-+ scst_pr_send_ua_all(dev, NULL,
-+ SCST_LOAD_SENSE(scst_sense_reservation_released));
-+ break;
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+static void scst_pr_unregister_all_tg_pt(struct scst_device *dev,
-+ const uint8_t *transport_id)
-+{
-+ struct scst_tgt_template *tgtt;
-+ uint8_t proto_id = transport_id[0] & 0x0f;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * We can't use scst_mutex here, because of the circular locking
-+ * dependency with dev_pr_mutex.
-+ */
-+ mutex_lock(&scst_mutex2);
-+
-+ list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
-+ struct scst_tgt *tgt;
-+
-+ if (tgtt->get_initiator_port_transport_id == NULL)
-+ continue;
-+
-+ list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
-+ struct scst_dev_registrant *reg;
-+
-+ if (tgtt->get_initiator_port_transport_id(tgt, NULL, NULL) != proto_id)
-+ continue;
-+
-+ reg = scst_pr_find_reg(dev, transport_id,
-+ tgt->rel_tgt_id);
-+ if (reg == NULL)
-+ continue;
-+
-+ scst_pr_unregister(dev, reg);
-+ }
-+ }
-+
-+ mutex_unlock(&scst_mutex2);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked. Might also be called under scst_mutex2. */
-+static int scst_pr_register_on_tgt_id(struct scst_cmd *cmd,
-+ const uint16_t rel_tgt_id, uint8_t *buffer, int buffer_size,
-+ bool spec_i_pt, struct list_head *rollback_list)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_PR("rel_tgt_id %d, spec_i_pt %d", rel_tgt_id, spec_i_pt);
-+
-+ if (spec_i_pt) {
-+ res = scst_pr_register_with_spec_i_pt(cmd, rel_tgt_id, buffer,
-+ buffer_size, rollback_list);
-+ if (res != 0)
-+ goto out;
-+ }
-+
-+ /* tgt_dev can be among TIDs for scst_pr_register_with_spec_i_pt() */
-+
-+ if (scst_pr_find_reg(cmd->dev, cmd->sess->transport_id, rel_tgt_id) == NULL) {
-+ __be64 action_key;
-+ struct scst_dev_registrant *reg;
-+
-+ action_key = get_unaligned((__be64 *)&buffer[8]);
-+
-+ reg = scst_pr_add_registrant(cmd->dev, cmd->sess->transport_id,
-+ rel_tgt_id, action_key, false);
-+ if (reg == NULL) {
-+ res = -ENOMEM;
-+ scst_set_busy(cmd);
-+ goto out;
-+ }
-+
-+ list_add_tail(&reg->aux_list_entry, rollback_list);
-+ }
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+static int scst_pr_register_all_tg_pt(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size, bool spec_i_pt, struct list_head *rollback_list)
-+{
-+ int res = 0;
-+ struct scst_tgt_template *tgtt;
-+ uint8_t proto_id = cmd->sess->transport_id[0] & 0x0f;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * We can't use scst_mutex here, because of the circular locking
-+ * dependency with dev_pr_mutex.
-+ */
-+ mutex_lock(&scst_mutex2);
-+
-+ list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
-+ struct scst_tgt *tgt;
-+
-+ if (tgtt->get_initiator_port_transport_id == NULL)
-+ continue;
-+
-+ TRACE_PR("tgtt %s, spec_i_pt %d", tgtt->name, spec_i_pt);
-+
-+ list_for_each_entry(tgt, &tgtt->tgt_list, tgt_list_entry) {
-+ if (tgtt->get_initiator_port_transport_id(tgt, NULL, NULL) != proto_id)
-+ continue;
-+ if (tgt->rel_tgt_id == 0)
-+ continue;
-+ TRACE_PR("tgt %s, rel_tgt_id %d", tgt->tgt_name,
-+ tgt->rel_tgt_id);
-+ res = scst_pr_register_on_tgt_id(cmd, tgt->rel_tgt_id,
-+ buffer, buffer_size, spec_i_pt, rollback_list);
-+ if (res != 0)
-+ goto out_unlock;
-+ }
-+ }
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex2);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+static int __scst_pr_register(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size, bool spec_i_pt, bool all_tg_pt)
-+{
-+ int res;
-+ struct scst_dev_registrant *reg, *treg;
-+ LIST_HEAD(rollback_list);
-+
-+ TRACE_ENTRY();
-+
-+ if (all_tg_pt) {
-+ res = scst_pr_register_all_tg_pt(cmd, buffer, buffer_size,
-+ spec_i_pt, &rollback_list);
-+ if (res != 0)
-+ goto out_rollback;
-+ } else {
-+ res = scst_pr_register_on_tgt_id(cmd,
-+ cmd->sess->tgt->rel_tgt_id, buffer, buffer_size,
-+ spec_i_pt, &rollback_list);
-+ if (res != 0)
-+ goto out_rollback;
-+ }
-+
-+ list_for_each_entry(reg, &rollback_list, aux_list_entry) {
-+ reg->rollback_key = 0;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_rollback:
-+ list_for_each_entry_safe(reg, treg, &rollback_list, aux_list_entry) {
-+ list_del(&reg->aux_list_entry);
-+ if (reg->rollback_key == 0)
-+ scst_pr_remove_registrant(cmd->dev, reg);
-+ else {
-+ reg->key = reg->rollback_key;
-+ reg->rollback_key = 0;
-+ }
-+ }
-+ goto out;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_register(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
-+{
-+ int aptpl, spec_i_pt, all_tg_pt;
-+ __be64 key, action_key;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_session *sess = cmd->sess;
-+ struct scst_dev_registrant *reg;
-+
-+ TRACE_ENTRY();
-+
-+ aptpl = buffer[20] & 0x01;
-+ spec_i_pt = (buffer[20] >> 3) & 0x01;
-+ all_tg_pt = (buffer[20] >> 2) & 0x01;
-+ key = get_unaligned((__be64 *)&buffer[0]);
-+ action_key = get_unaligned((__be64 *)&buffer[8]);
-+
-+ if (spec_i_pt == 0 && buffer_size != 24) {
-+ TRACE_PR("Invalid buffer size %d", buffer_size);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
-+ goto out;
-+ }
-+
-+ reg = tgt_dev->registrant;
-+
-+ TRACE_PR("Register: initiator %s/%d (%p), key %0llx, action_key %0llx "
-+ "(tgt_dev %p)",
-+ debug_transport_id_to_initiator_name(sess->transport_id),
-+ sess->tgt->rel_tgt_id, reg, key, action_key, tgt_dev);
-+
-+ if (reg == NULL) {
-+ TRACE_PR("tgt_dev %p is not registered yet - registering",
-+ tgt_dev);
-+ if (key) {
-+ TRACE_PR("%s", "Key must be zero on new registration");
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ }
-+ if (action_key) {
-+ int rc = __scst_pr_register(cmd, buffer, buffer_size,
-+ spec_i_pt, all_tg_pt);
-+ if (rc != 0)
-+ goto out;
-+ } else
-+ TRACE_PR("%s", "Doing nothing - action_key is zero");
-+ } else {
-+ if (reg->key != key) {
-+ TRACE_PR("tgt_dev %p already registered - reservation "
-+ "key %0llx mismatch", tgt_dev, reg->key);
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ }
-+ if (spec_i_pt) {
-+ TRACE_PR("%s", "spec_i_pt must be zero in this case");
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+ if (action_key == 0) {
-+ if (all_tg_pt)
-+ scst_pr_unregister_all_tg_pt(dev,
-+ sess->transport_id);
-+ else
-+ scst_pr_unregister(dev, reg);
-+ } else
-+ reg->key = action_key;
-+ }
-+
-+ dev->pr_generation++;
-+
-+ dev->pr_aptpl = aptpl;
-+
-+ scst_pr_dump_prs(dev, false);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_register_and_ignore(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size)
-+{
-+ int aptpl, all_tg_pt;
-+ __be64 action_key;
-+ struct scst_dev_registrant *reg = NULL;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_session *sess = cmd->sess;
-+
-+ TRACE_ENTRY();
-+
-+ aptpl = buffer[20] & 0x01;
-+ all_tg_pt = (buffer[20] >> 2) & 0x01;
-+ action_key = get_unaligned((__be64 *)&buffer[8]);
-+
-+ if (buffer_size != 24) {
-+ TRACE_PR("Invalid buffer size %d", buffer_size);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
-+ goto out;
-+ }
-+
-+ reg = tgt_dev->registrant;
-+
-+ TRACE_PR("Register and ignore: initiator %s/%d (%p), action_key "
-+ "%016llx (tgt_dev %p)",
-+ debug_transport_id_to_initiator_name(sess->transport_id),
-+ sess->tgt->rel_tgt_id, reg, action_key, tgt_dev);
-+
-+ if (reg == NULL) {
-+ TRACE_PR("Tgt_dev %p is not registered yet - trying to "
-+ "register", tgt_dev);
-+ if (action_key) {
-+ int rc = __scst_pr_register(cmd, buffer, buffer_size,
-+ false, all_tg_pt);
-+ if (rc != 0)
-+ goto out;
-+ } else
-+ TRACE_PR("%s", "Doing nothing, action_key is zero");
-+ } else {
-+ if (action_key == 0) {
-+ if (all_tg_pt)
-+ scst_pr_unregister_all_tg_pt(dev,
-+ sess->transport_id);
-+ else
-+ scst_pr_unregister(dev, reg);
-+ } else
-+ reg->key = action_key;
-+ }
-+
-+ dev->pr_generation++;
-+
-+ dev->pr_aptpl = aptpl;
-+
-+ scst_pr_dump_prs(dev, false);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_register_and_move(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size)
-+{
-+ int aptpl;
-+ int unreg;
-+ int tid_buffer_size;
-+ __be64 key, action_key;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_session *sess = cmd->sess;
-+ struct scst_dev_registrant *reg, *reg_move;
-+ const uint8_t *transport_id = NULL;
-+ uint8_t *transport_id_move = NULL;
-+ uint16_t rel_tgt_id_move;
-+
-+ TRACE_ENTRY();
-+
-+ aptpl = buffer[17] & 0x01;
-+ key = get_unaligned((__be64 *)&buffer[0]);
-+ action_key = get_unaligned((__be64 *)&buffer[8]);
-+ unreg = (buffer[17] >> 1) & 0x01;
-+ tid_buffer_size = be32_to_cpu(get_unaligned((__be32 *)&buffer[20]));
-+
-+ if ((tid_buffer_size + 24) > buffer_size) {
-+ TRACE_PR("Invalid buffer size %d (%d)",
-+ buffer_size, tid_buffer_size + 24);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
-+ goto out;
-+ }
-+
-+ if (tid_buffer_size < 24) {
-+ TRACE_PR("%s", "Transport id buffer too small");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
-+ goto out;
-+ }
-+
-+ reg = tgt_dev->registrant;
-+ /* We already checked reg is not NULL */
-+ if (reg->key != key) {
-+ TRACE_PR("Registrant's %s/%d (%p) key %016llx mismatch with "
-+ "%016llx (tgt_dev %p)",
-+ debug_transport_id_to_initiator_name(reg->transport_id),
-+ reg->rel_tgt_id, reg, reg->key, key, tgt_dev);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ }
-+
-+ if (!dev->pr_is_set) {
-+ TRACE_PR("%s", "There must be a PR");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+
-+ /*
-+ * This check also required by table "PERSISTENT RESERVE OUT service
-+ * actions that are allowed in the presence of various reservations".
-+ */
-+ if (!scst_pr_is_holder(dev, reg)) {
-+ TRACE_PR("Registrant %s/%d (%p) is not a holder (tgt_dev %p)",
-+ debug_transport_id_to_initiator_name(
-+ reg->transport_id), reg->rel_tgt_id,
-+ reg, tgt_dev);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ }
-+
-+ if (action_key == 0) {
-+ TRACE_PR("%s", "Action key must be non-zero");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+
-+ transport_id = sess->transport_id;
-+ transport_id_move = (uint8_t *)&buffer[24];
-+ rel_tgt_id_move = be16_to_cpu(get_unaligned((__be16 *)&buffer[18]));
-+
-+ if ((tid_size(transport_id_move) + 24) > buffer_size) {
-+ TRACE_PR("Invalid buffer size %d (%d)",
-+ buffer_size, tid_size(transport_id_move) + 24);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
-+ goto out;
-+ }
-+
-+ tid_secure(transport_id_move);
-+
-+ if (dev->pr_type == TYPE_WRITE_EXCLUSIVE_ALL_REG ||
-+ dev->pr_type == TYPE_EXCLUSIVE_ACCESS_ALL_REG) {
-+ TRACE_PR("Unable to finish operation due to wrong reservation "
-+ "type %02x", dev->pr_type);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ }
-+
-+ if (tid_equal(transport_id, transport_id_move)) {
-+ TRACE_PR("%s", "Equal transport id's");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
-+ goto out;
-+ }
-+
-+ reg_move = scst_pr_find_reg(dev, transport_id_move, rel_tgt_id_move);
-+ if (reg_move == NULL) {
-+ reg_move = scst_pr_add_registrant(dev, transport_id_move,
-+ rel_tgt_id_move, action_key, false);
-+ if (reg_move == NULL) {
-+ scst_set_busy(cmd);
-+ goto out;
-+ }
-+ } else if (reg_move->key != action_key) {
-+ TRACE_PR("Changing key for reg %p", reg);
-+ reg_move->key = action_key;
-+ }
-+
-+ TRACE_PR("Register and move: from initiator %s/%d (%p, tgt_dev %p) to "
-+ "initiator %s/%d (%p, tgt_dev %p), key %016llx (unreg %d)",
-+ debug_transport_id_to_initiator_name(reg->transport_id),
-+ reg->rel_tgt_id, reg, reg->tgt_dev,
-+ debug_transport_id_to_initiator_name(transport_id_move),
-+ rel_tgt_id_move, reg_move, reg_move->tgt_dev, action_key,
-+ unreg);
-+
-+ /* Move the holder */
-+ scst_pr_set_holder(dev, reg_move, dev->pr_scope, dev->pr_type);
-+
-+ if (unreg)
-+ scst_pr_remove_registrant(dev, reg);
-+
-+ dev->pr_generation++;
-+
-+ dev->pr_aptpl = aptpl;
-+
-+ scst_pr_dump_prs(dev, false);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_reserve(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
-+{
-+ uint8_t scope, type;
-+ __be64 key;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_dev_registrant *reg;
-+
-+ TRACE_ENTRY();
-+
-+ key = get_unaligned((__be64 *)&buffer[0]);
-+ scope = (cmd->cdb[2] & 0x0f) >> 4;
-+ type = cmd->cdb[2] & 0x0f;
-+
-+ if (buffer_size != 24) {
-+ TRACE_PR("Invalid buffer size %d", buffer_size);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
-+ goto out;
-+ }
-+
-+ if (!scst_pr_type_valid(type)) {
-+ TRACE_PR("Invalid reservation type %d", type);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+
-+ if (((cmd->cdb[2] & 0x0f) >> 4) != SCOPE_LU) {
-+ TRACE_PR("Invalid reservation scope %d", scope);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+
-+ reg = tgt_dev->registrant;
-+
-+ TRACE_PR("Reserve: initiator %s/%d (%p), key %016llx, scope %d, "
-+ "type %d (tgt_dev %p)",
-+ debug_transport_id_to_initiator_name(cmd->sess->transport_id),
-+ cmd->sess->tgt->rel_tgt_id, reg, key, scope, type, tgt_dev);
-+
-+ /* We already checked reg is not NULL */
-+ if (reg->key != key) {
-+ TRACE_PR("Registrant's %p key %016llx mismatch with %016llx",
-+ reg, reg->key, key);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ }
-+
-+ if (!dev->pr_is_set)
-+ scst_pr_set_holder(dev, reg, scope, type);
-+ else {
-+ if (!scst_pr_is_holder(dev, reg)) {
-+ /*
-+ * This check also required by table "PERSISTENT
-+ * RESERVE OUT service actions that are allowed in the
-+ * presence of various reservations".
-+ */
-+ TRACE_PR("Only holder can override - reg %p is not a "
-+ "holder", reg);
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ } else {
-+ if (dev->pr_scope != scope || dev->pr_type != type) {
-+ TRACE_PR("Error overriding scope or type for "
-+ "reg %p", reg);
-+ scst_set_cmd_error_status(cmd,
-+ SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ } else
-+ TRACE_PR("Do nothing: reservation of reg %p "
-+ "is the same", reg);
-+ }
-+ }
-+
-+ scst_pr_dump_prs(dev, false);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_release(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
-+{
-+ int scope, type;
-+ __be64 key;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_dev_registrant *reg;
-+ uint8_t cur_pr_type;
-+
-+ TRACE_ENTRY();
-+
-+ key = get_unaligned((__be64 *)&buffer[0]);
-+ scope = (cmd->cdb[2] & 0x0f) >> 4;
-+ type = cmd->cdb[2] & 0x0f;
-+
-+ if (buffer_size != 24) {
-+ TRACE_PR("Invalid buffer size %d", buffer_size);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
-+ goto out;
-+ }
-+
-+ if (!dev->pr_is_set) {
-+ TRACE_PR("%s", "There is no PR - do nothing");
-+ goto out;
-+ }
-+
-+ reg = tgt_dev->registrant;
-+
-+ TRACE_PR("Release: initiator %s/%d (%p), key %016llx, scope %d, type "
-+ "%d (tgt_dev %p)", debug_transport_id_to_initiator_name(
-+ cmd->sess->transport_id),
-+ cmd->sess->tgt->rel_tgt_id, reg, key, scope, type, tgt_dev);
-+
-+ /* We already checked reg is not NULL */
-+ if (reg->key != key) {
-+ TRACE_PR("Registrant's %p key %016llx mismatch with %016llx",
-+ reg, reg->key, key);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ }
-+
-+ if (!scst_pr_is_holder(dev, reg)) {
-+ TRACE_PR("Registrant %p is not a holder - do nothing", reg);
-+ goto out;
-+ }
-+
-+ if (dev->pr_scope != scope || dev->pr_type != type) {
-+ TRACE_PR("%s", "Released scope or type do not match with "
-+ "holder");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_release));
-+ goto out;
-+ }
-+
-+ cur_pr_type = dev->pr_type; /* it will be cleared */
-+
-+ scst_pr_clear_reservation(dev);
-+
-+ switch (cur_pr_type) {
-+ case TYPE_WRITE_EXCLUSIVE_REGONLY:
-+ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
-+ case TYPE_WRITE_EXCLUSIVE_ALL_REG:
-+ case TYPE_EXCLUSIVE_ACCESS_ALL_REG:
-+ scst_pr_send_ua_all(dev, reg,
-+ SCST_LOAD_SENSE(scst_sense_reservation_released));
-+ }
-+
-+ scst_pr_dump_prs(dev, false);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_clear(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
-+{
-+ __be64 key;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_dev_registrant *reg, *r, *t;
-+
-+ TRACE_ENTRY();
-+
-+ key = get_unaligned((__be64 *)&buffer[0]);
-+
-+ if (buffer_size != 24) {
-+ TRACE_PR("Invalid buffer size %d", buffer_size);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
-+ goto out;
-+ }
-+
-+ reg = tgt_dev->registrant;
-+
-+ TRACE_PR("Clear: initiator %s/%d (%p), key %016llx (tgt_dev %p)",
-+ debug_transport_id_to_initiator_name(cmd->sess->transport_id),
-+ cmd->sess->tgt->rel_tgt_id, reg, key, tgt_dev);
-+
-+ /* We already checked reg is not NULL */
-+ if (reg->key != key) {
-+ TRACE_PR("Registrant's %p key %016llx mismatch with %016llx",
-+ reg, reg->key, key);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ }
-+
-+ scst_pr_send_ua_all(dev, reg,
-+ SCST_LOAD_SENSE(scst_sense_reservation_preempted));
-+
-+ list_for_each_entry_safe(r, t, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ scst_pr_remove_registrant(dev, r);
-+ }
-+
-+ dev->pr_generation++;
-+
-+ scst_pr_dump_prs(dev, false);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void scst_pr_do_preempt(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size, bool abort)
-+{
-+ __be64 key, action_key;
-+ int scope, type;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_dev_registrant *reg, *r, *rt;
-+ int existing_pr_type = dev->pr_type;
-+ int existing_pr_scope = dev->pr_scope;
-+ LIST_HEAD(preempt_list);
-+
-+ TRACE_ENTRY();
-+
-+ key = get_unaligned((__be64 *)&buffer[0]);
-+ action_key = get_unaligned((__be64 *)&buffer[8]);
-+ scope = (cmd->cdb[2] & 0x0f) >> 4;
-+ type = cmd->cdb[2] & 0x0f;
-+
-+ if (buffer_size != 24) {
-+ TRACE_PR("Invalid buffer size %d", buffer_size);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_parameter_list_length_invalid));
-+ goto out;
-+ }
-+
-+ if (!scst_pr_type_valid(type)) {
-+ TRACE_PR("Invalid reservation type %d", type);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+
-+ reg = tgt_dev->registrant;
-+
-+ TRACE_PR("Preempt%s: initiator %s/%d (%p), key %016llx, action_key "
-+ "%016llx, scope %x type %x (tgt_dev %p)",
-+ abort ? " and abort" : "",
-+ debug_transport_id_to_initiator_name(cmd->sess->transport_id),
-+ cmd->sess->tgt->rel_tgt_id, reg, key, action_key, scope, type,
-+ tgt_dev);
-+
-+ /* We already checked reg is not NULL */
-+ if (reg->key != key) {
-+ TRACE_PR("Registrant's %p key %016llx mismatch with %016llx",
-+ reg, reg->key, key);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+ }
-+
-+ if (!dev->pr_is_set) {
-+ scst_pr_find_registrants_list_key(dev, action_key,
-+ &preempt_list);
-+ if (list_empty(&preempt_list))
-+ goto out_error;
-+ list_for_each_entry_safe(r, rt, &preempt_list, aux_list_entry) {
-+ if (abort)
-+ scst_pr_abort_reg(dev, cmd, r);
-+ if (r != reg) {
-+ scst_pr_send_ua_reg(dev, r, SCST_LOAD_SENSE(
-+ scst_sense_registrations_preempted));
-+ scst_pr_remove_registrant(dev, r);
-+ }
-+ }
-+ goto done;
-+ }
-+
-+ if (dev->pr_type == TYPE_WRITE_EXCLUSIVE_ALL_REG ||
-+ dev->pr_type == TYPE_EXCLUSIVE_ACCESS_ALL_REG) {
-+ if (action_key == 0) {
-+ scst_pr_find_registrants_list_all(dev, reg,
-+ &preempt_list);
-+ list_for_each_entry_safe(r, rt, &preempt_list,
-+ aux_list_entry) {
-+ BUG_ON(r == reg);
-+ if (abort)
-+ scst_pr_abort_reg(dev, cmd, r);
-+ scst_pr_send_ua_reg(dev, r,
-+ SCST_LOAD_SENSE(
-+ scst_sense_registrations_preempted));
-+ scst_pr_remove_registrant(dev, r);
-+ }
-+ scst_pr_set_holder(dev, reg, scope, type);
-+ } else {
-+ scst_pr_find_registrants_list_key(dev, action_key,
-+ &preempt_list);
-+ if (list_empty(&preempt_list))
-+ goto out_error;
-+ list_for_each_entry_safe(r, rt, &preempt_list,
-+ aux_list_entry) {
-+ if (abort)
-+ scst_pr_abort_reg(dev, cmd, r);
-+ if (r != reg) {
-+ scst_pr_send_ua_reg(dev, r,
-+ SCST_LOAD_SENSE(
-+ scst_sense_registrations_preempted));
-+ scst_pr_remove_registrant(dev, r);
-+ }
-+ }
-+ }
-+ goto done;
-+ }
-+
-+ if (dev->pr_holder->key != action_key) {
-+ if (action_key == 0) {
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_invalid_field_in_parm_list));
-+ goto out;
-+ } else {
-+ scst_pr_find_registrants_list_key(dev, action_key,
-+ &preempt_list);
-+ if (list_empty(&preempt_list))
-+ goto out_error;
-+ list_for_each_entry_safe(r, rt, &preempt_list,
-+ aux_list_entry) {
-+ if (abort)
-+ scst_pr_abort_reg(dev, cmd, r);
-+ if (r != reg)
-+ scst_pr_send_ua_reg(dev, r,
-+ SCST_LOAD_SENSE(
-+ scst_sense_registrations_preempted));
-+ scst_pr_remove_registrant(dev, r);
-+ }
-+ goto done;
-+ }
-+ }
-+
-+ scst_pr_find_registrants_list_key(dev, action_key,
-+ &preempt_list);
-+
-+ list_for_each_entry_safe(r, rt, &preempt_list, aux_list_entry) {
-+ if (abort)
-+ scst_pr_abort_reg(dev, cmd, r);
-+ if (r != reg) {
-+ scst_pr_send_ua_reg(dev, r, SCST_LOAD_SENSE(
-+ scst_sense_registrations_preempted));
-+ scst_pr_remove_registrant(dev, r);
-+ }
-+ }
-+
-+ scst_pr_set_holder(dev, reg, scope, type);
-+
-+ if (existing_pr_type != type || existing_pr_scope != scope) {
-+ list_for_each_entry(r, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ if (r != reg)
-+ scst_pr_send_ua_reg(dev, r, SCST_LOAD_SENSE(
-+ scst_sense_reservation_released));
-+ }
-+ }
-+
-+done:
-+ dev->pr_generation++;
-+
-+ scst_pr_dump_prs(dev, false);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_error:
-+ TRACE_PR("Invalid key %016llx", action_key);
-+ scst_set_cmd_error_status(cmd, SAM_STAT_RESERVATION_CONFLICT);
-+ goto out;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_preempt(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_pr_do_preempt(cmd, buffer, buffer_size, false);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void scst_cmd_done_pr_preempt(struct scst_cmd *cmd, int next_state,
-+ enum scst_exec_context pref_context)
-+{
-+ void (*saved_cmd_done) (struct scst_cmd *cmd, int next_state,
-+ enum scst_exec_context pref_context);
-+
-+ TRACE_ENTRY();
-+
-+ if (!atomic_dec_and_test(&cmd->pr_abort_counter->pr_abort_pending_cnt))
-+ goto out;
-+
-+ saved_cmd_done = cmd->pr_abort_counter->saved_cmd_done;
-+ kfree(cmd->pr_abort_counter);
-+ cmd->pr_abort_counter = NULL;
-+
-+ saved_cmd_done(cmd, next_state, pref_context);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * Called with dev_pr_mutex locked, no IRQ. Expects session_list_lock
-+ * not locked
-+ */
-+void scst_pr_preempt_and_abort(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size)
-+{
-+ TRACE_ENTRY();
-+
-+ cmd->pr_abort_counter = kzalloc(sizeof(*cmd->pr_abort_counter),
-+ GFP_KERNEL);
-+ if (cmd->pr_abort_counter == NULL) {
-+ PRINT_ERROR("Unable to allocate PR abort counter (size %zd)",
-+ sizeof(*cmd->pr_abort_counter));
-+ scst_set_busy(cmd);
-+ goto out;
-+ }
-+
-+ /* 1 to protect cmd from be done by the TM thread too early */
-+ atomic_set(&cmd->pr_abort_counter->pr_abort_pending_cnt, 1);
-+ atomic_set(&cmd->pr_abort_counter->pr_aborting_cnt, 1);
-+ init_completion(&cmd->pr_abort_counter->pr_aborting_cmpl);
-+
-+ cmd->pr_abort_counter->saved_cmd_done = cmd->scst_cmd_done;
-+ cmd->scst_cmd_done = scst_cmd_done_pr_preempt;
-+
-+ scst_pr_do_preempt(cmd, buffer, buffer_size, true);
-+
-+ if (!atomic_dec_and_test(&cmd->pr_abort_counter->pr_aborting_cnt))
-+ wait_for_completion(&cmd->pr_abort_counter->pr_aborting_cmpl);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Checks if this is a Compatible Reservation Handling (CRH) case */
-+bool scst_pr_crh_case(struct scst_cmd *cmd)
-+{
-+ bool allowed;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_dev_registrant *reg;
-+ uint8_t type;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Test if there is a CRH case for command %s (0x%x) from "
-+ "%s", cmd->op_name, cmd->cdb[0], cmd->sess->initiator_name);
-+
-+ if (!dev->pr_is_set) {
-+ TRACE_PR("%s", "PR not set");
-+ allowed = false;
-+ goto out;
-+ }
-+
-+ reg = tgt_dev->registrant;
-+ type = dev->pr_type;
-+
-+ switch (type) {
-+ case TYPE_WRITE_EXCLUSIVE:
-+ case TYPE_EXCLUSIVE_ACCESS:
-+ WARN_ON(dev->pr_holder == NULL);
-+ if (reg == dev->pr_holder)
-+ allowed = true;
-+ else
-+ allowed = false;
-+ break;
-+
-+ case TYPE_WRITE_EXCLUSIVE_REGONLY:
-+ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
-+ case TYPE_WRITE_EXCLUSIVE_ALL_REG:
-+ case TYPE_EXCLUSIVE_ACCESS_ALL_REG:
-+ allowed = (reg != NULL);
-+ break;
-+
-+ default:
-+ PRINT_ERROR("Invalid PR type %x", type);
-+ allowed = false;
-+ break;
-+ }
-+
-+ if (!allowed)
-+ TRACE_PR("Command %s (0x%x) from %s rejected due to not CRH "
-+ "reservation", cmd->op_name, cmd->cdb[0],
-+ cmd->sess->initiator_name);
-+ else
-+ TRACE_DBG("Command %s (0x%x) from %s is allowed to execute "
-+ "due to CRH", cmd->op_name, cmd->cdb[0],
-+ cmd->sess->initiator_name);
-+
-+out:
-+ TRACE_EXIT_RES(allowed);
-+ return allowed;
-+
-+}
-+
-+/* Check if command allowed in presence of reservation */
-+bool scst_pr_is_cmd_allowed(struct scst_cmd *cmd)
-+{
-+ bool allowed;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_dev_registrant *reg;
-+ uint8_t type;
-+ bool unlock;
-+
-+ TRACE_ENTRY();
-+
-+ unlock = scst_pr_read_lock(cmd);
-+
-+ TRACE_DBG("Testing if command %s (0x%x) from %s allowed to execute",
-+ cmd->op_name, cmd->cdb[0], cmd->sess->initiator_name);
-+
-+ /* Recheck, because it can change while we were waiting for the lock */
-+ if (unlikely(!dev->pr_is_set)) {
-+ allowed = true;
-+ goto out_unlock;
-+ }
-+
-+ reg = tgt_dev->registrant;
-+ type = dev->pr_type;
-+
-+ switch (type) {
-+ case TYPE_WRITE_EXCLUSIVE:
-+ if (reg && reg == dev->pr_holder)
-+ allowed = true;
-+ else
-+ allowed = (cmd->op_flags & SCST_WRITE_EXCL_ALLOWED) != 0;
-+ break;
-+
-+ case TYPE_EXCLUSIVE_ACCESS:
-+ if (reg && reg == dev->pr_holder)
-+ allowed = true;
-+ else
-+ allowed = (cmd->op_flags & SCST_EXCL_ACCESS_ALLOWED) != 0;
-+ break;
-+
-+ case TYPE_WRITE_EXCLUSIVE_REGONLY:
-+ case TYPE_WRITE_EXCLUSIVE_ALL_REG:
-+ if (reg)
-+ allowed = true;
-+ else
-+ allowed = (cmd->op_flags & SCST_WRITE_EXCL_ALLOWED) != 0;
-+ break;
-+
-+ case TYPE_EXCLUSIVE_ACCESS_REGONLY:
-+ case TYPE_EXCLUSIVE_ACCESS_ALL_REG:
-+ if (reg)
-+ allowed = true;
-+ else
-+ allowed = (cmd->op_flags & SCST_EXCL_ACCESS_ALLOWED) != 0;
-+ break;
-+
-+ default:
-+ PRINT_ERROR("Invalid PR type %x", type);
-+ allowed = false;
-+ break;
-+ }
-+
-+ if (!allowed)
-+ TRACE_PR("Command %s (0x%x) from %s rejected due "
-+ "to PR", cmd->op_name, cmd->cdb[0],
-+ cmd->sess->initiator_name);
-+ else
-+ TRACE_DBG("Command %s (0x%x) from %s is allowed to execute",
-+ cmd->op_name, cmd->cdb[0], cmd->sess->initiator_name);
-+
-+out_unlock:
-+ scst_pr_read_unlock(cmd, unlock);
-+
-+ TRACE_EXIT_RES(allowed);
-+ return allowed;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_read_keys(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
-+{
-+ int i, offset = 0, size, size_max;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_dev_registrant *reg;
-+
-+ TRACE_ENTRY();
-+
-+ if (buffer_size < 8) {
-+ TRACE_PR("buffer_size too small: %d. expected >= 8 "
-+ "(buffer %p)", buffer_size, buffer);
-+ goto skip;
-+ }
-+
-+ TRACE_PR("Read Keys (dev %s): PRGen %d", dev->virt_name,
-+ dev->pr_generation);
-+
-+ put_unaligned(cpu_to_be32(dev->pr_generation), (__be32 *)&buffer[0]);
-+
-+ offset = 8;
-+ size = 0;
-+ size_max = buffer_size - 8;
-+
-+ i = 0;
-+ list_for_each_entry(reg, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ if (size_max - size >= 8) {
-+ TRACE_PR("Read Keys (dev %s): key 0x%llx",
-+ dev->virt_name, reg->key);
-+
-+ WARN_ON(reg->key == 0);
-+
-+ put_unaligned(reg->key,
-+ (__be64 *)&buffer[offset + 8 * i]);
-+
-+ offset += 8;
-+ }
-+ size += 8;
-+ }
-+
-+ put_unaligned(cpu_to_be32(size), (__be32 *)&buffer[4]);
-+
-+skip:
-+ scst_set_resp_data_len(cmd, offset);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_read_reservation(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size)
-+{
-+ struct scst_device *dev = cmd->dev;
-+ uint8_t b[24];
-+ int size = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (buffer_size < 8) {
-+ TRACE_PR("buffer_size too small: %d. expected >= 8 "
-+ "(buffer %p)", buffer_size, buffer);
-+ goto skip;
-+ }
-+
-+ memset(b, 0, sizeof(b));
-+
-+ put_unaligned(cpu_to_be32(dev->pr_generation), (__be32 *)&b[0]);
-+
-+ if (!dev->pr_is_set) {
-+ TRACE_PR("Read Reservation: no reservations for dev %s",
-+ dev->virt_name);
-+ b[4] =
-+ b[5] =
-+ b[6] =
-+ b[7] = 0;
-+
-+ size = 8;
-+ } else {
-+ __be64 key = dev->pr_holder ? dev->pr_holder->key : 0;
-+
-+ TRACE_PR("Read Reservation: dev %s, holder %p, key 0x%llx, "
-+ "scope %d, type %d", dev->virt_name, dev->pr_holder,
-+ key, dev->pr_scope, dev->pr_type);
-+
-+ b[4] =
-+ b[5] =
-+ b[6] = 0;
-+ b[7] = 0x10;
-+
-+ put_unaligned(key, (__be64 *)&b[8]);
-+ b[21] = dev->pr_scope << 4 | dev->pr_type;
-+
-+ size = 24;
-+ }
-+
-+ memset(buffer, 0, buffer_size);
-+ memcpy(buffer, b, min(size, buffer_size));
-+
-+skip:
-+ scst_set_resp_data_len(cmd, size);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_report_caps(struct scst_cmd *cmd, uint8_t *buffer, int buffer_size)
-+{
-+ int offset = 0;
-+ unsigned int crh = 1;
-+ unsigned int atp_c = 1;
-+ unsigned int sip_c = 1;
-+ unsigned int ptpl_c = 1;
-+ struct scst_device *dev = cmd->dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (buffer_size < 8) {
-+ TRACE_PR("buffer_size too small: %d. expected >= 8 "
-+ "(buffer %p)", buffer_size, buffer);
-+ goto skip;
-+ }
-+
-+ TRACE_PR("Reporting capabilities (dev %s): crh %x, sip_c %x, "
-+ "atp_c %x, ptpl_c %x, pr_aptpl %x", dev->virt_name,
-+ crh, sip_c, atp_c, ptpl_c, dev->pr_aptpl);
-+
-+ buffer[0] = 0;
-+ buffer[1] = 8;
-+
-+ buffer[2] = crh << 4 | sip_c << 3 | atp_c << 2 | ptpl_c;
-+ buffer[3] = (1 << 7) | (dev->pr_aptpl > 0 ? 1 : 0);
-+
-+ /* All commands supported */
-+ buffer[4] = 0xEA;
-+ buffer[5] = 0x1;
-+
-+ offset += 8;
-+
-+skip:
-+ scst_set_resp_data_len(cmd, offset);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called with dev_pr_mutex locked, no IRQ */
-+void scst_pr_read_full_status(struct scst_cmd *cmd, uint8_t *buffer,
-+ int buffer_size)
-+{
-+ int offset = 0, size, size_max;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_dev_registrant *reg;
-+
-+ TRACE_ENTRY();
-+
-+ if (buffer_size < 8)
-+ goto skip;
-+
-+ put_unaligned(cpu_to_be32(dev->pr_generation), (__be32 *)&buffer[0]);
-+ offset += 8;
-+
-+ size = 0;
-+ size_max = buffer_size - 8;
-+
-+ list_for_each_entry(reg, &dev->dev_registrants_list,
-+ dev_registrants_list_entry) {
-+ int ts;
-+ int rec_len;
-+
-+ ts = tid_size(reg->transport_id);
-+ rec_len = 24 + ts;
-+
-+ if (size_max - size > rec_len) {
-+ memset(&buffer[offset], 0, rec_len);
-+
-+ put_unaligned(reg->key, (__be64 *)(&buffer[offset]));
-+
-+ if (dev->pr_is_set && scst_pr_is_holder(dev, reg)) {
-+ buffer[offset + 12] = 1;
-+ buffer[offset + 13] = (dev->pr_scope << 4) | dev->pr_type;
-+ }
-+
-+ put_unaligned(cpu_to_be16(reg->rel_tgt_id),
-+ (__be16 *)&buffer[offset + 18]);
-+ put_unaligned(cpu_to_be32(ts),
-+ (__be32 *)&buffer[offset + 20]);
-+
-+ memcpy(&buffer[offset + 24], reg->transport_id, ts);
-+
-+ offset += rec_len;
-+ }
-+ size += rec_len;
-+ }
-+
-+ put_unaligned(cpu_to_be32(size), (__be32 *)&buffer[4]);
-+
-+skip:
-+ scst_set_resp_data_len(cmd, offset);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-diff -uprN orig/linux-3.2/drivers/scst/scst_sysfs.c linux-3.2/drivers/scst/scst_sysfs.c
---- orig/linux-3.2/drivers/scst/scst_sysfs.c
-+++ linux-3.2/drivers/scst/scst_sysfs.c
-@@ -0,0 +1,6224 @@
-+/*
-+ * scst_sysfs.c
-+ *
-+ * Copyright (C) 2009 Daniel Henrique Debonzi <debonzi@linux.vnet.ibm.com>
-+ * Copyright (C) 2009 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2009 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/kobject.h>
-+#include <linux/string.h>
-+#include <linux/sysfs.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/ctype.h>
-+#include <linux/slab.h>
-+#include <linux/kthread.h>
-+
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+#include "scst_pres.h"
-+
-+static DECLARE_COMPLETION(scst_sysfs_root_release_completion);
-+
-+static struct kobject *scst_targets_kobj;
-+static struct kobject *scst_devices_kobj;
-+static struct kobject *scst_handlers_kobj;
-+static struct kobject *scst_device_groups_kobj;
-+
-+static const char *const scst_dev_handler_types[] = {
-+ "Direct-access device (e.g., magnetic disk)",
-+ "Sequential-access device (e.g., magnetic tape)",
-+ "Printer device",
-+ "Processor device",
-+ "Write-once device (e.g., some optical disks)",
-+ "CD-ROM device",
-+ "Scanner device (obsolete)",
-+ "Optical memory device (e.g., some optical disks)",
-+ "Medium changer device (e.g., jukeboxes)",
-+ "Communications device (obsolete)",
-+ "Defined by ASC IT8 (Graphic arts pre-press devices)",
-+ "Defined by ASC IT8 (Graphic arts pre-press devices)",
-+ "Storage array controller device (e.g., RAID)",
-+ "Enclosure services device",
-+ "Simplified direct-access device (e.g., magnetic disk)",
-+ "Optical card reader/writer device"
-+};
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+static DEFINE_MUTEX(scst_log_mutex);
-+
-+static struct scst_trace_log scst_trace_tbl[] = {
-+ { TRACE_OUT_OF_MEM, "out_of_mem" },
-+ { TRACE_MINOR, "minor" },
-+ { TRACE_SG_OP, "sg" },
-+ { TRACE_MEMORY, "mem" },
-+ { TRACE_BUFF, "buff" },
-+#ifndef GENERATING_UPSTREAM_PATCH
-+ { TRACE_ENTRYEXIT, "entryexit" },
-+#endif
-+ { TRACE_PID, "pid" },
-+ { TRACE_LINE, "line" },
-+ { TRACE_FUNCTION, "function" },
-+ { TRACE_DEBUG, "debug" },
-+ { TRACE_SPECIAL, "special" },
-+ { TRACE_SCSI, "scsi" },
-+ { TRACE_MGMT, "mgmt" },
-+ { TRACE_MGMT_DEBUG, "mgmt_dbg" },
-+ { TRACE_FLOW_CONTROL, "flow_control" },
-+ { TRACE_PRES, "pr" },
-+ { 0, NULL }
-+};
-+
-+static struct scst_trace_log scst_local_trace_tbl[] = {
-+ { TRACE_RTRY, "retry" },
-+ { TRACE_SCSI_SERIALIZING, "scsi_serializing" },
-+ { TRACE_RCV_BOT, "recv_bot" },
-+ { TRACE_SND_BOT, "send_bot" },
-+ { TRACE_RCV_TOP, "recv_top" },
-+ { TRACE_SND_TOP, "send_top" },
-+ { 0, NULL }
-+};
-+
-+static void scst_read_trace_tbl(const struct scst_trace_log *tbl, char *buf,
-+ unsigned long log_level, int *pos)
-+{
-+ const struct scst_trace_log *t = tbl;
-+
-+ if (t == NULL)
-+ goto out;
-+
-+ while (t->token) {
-+ if (log_level & t->val) {
-+ *pos += sprintf(&buf[*pos], "%s%s",
-+ (*pos == 0) ? "" : " | ",
-+ t->token);
-+ }
-+ t++;
-+ }
-+out:
-+ return;
-+}
-+
-+static ssize_t scst_trace_level_show(const struct scst_trace_log *local_tbl,
-+ unsigned long log_level, char *buf, const char *help)
-+{
-+ int pos = 0;
-+
-+ scst_read_trace_tbl(scst_trace_tbl, buf, log_level, &pos);
-+ scst_read_trace_tbl(local_tbl, buf, log_level, &pos);
-+
-+ pos += sprintf(&buf[pos], "\n\n\nUsage:\n"
-+ " echo \"all|none|default\" >trace_level\n"
-+ " echo \"value DEC|0xHEX|0OCT\" >trace_level\n"
-+ " echo \"add|del TOKEN\" >trace_level\n"
-+ "\nwhere TOKEN is one of [debug, function, line, pid,\n"
-+#ifndef GENERATING_UPSTREAM_PATCH
-+ " entryexit, buff, mem, sg, out_of_mem,\n"
-+#else
-+ " buff, mem, sg, out_of_mem,\n"
-+#endif
-+ " special, scsi, mgmt, minor,\n"
-+ " mgmt_dbg, scsi_serializing,\n"
-+ " retry, recv_bot, send_bot, recv_top, pr,\n"
-+ " send_top%s]\n", help != NULL ? help : "");
-+
-+ return pos;
-+}
-+
-+static int scst_write_trace(const char *buf, size_t length,
-+ unsigned long *log_level, unsigned long default_level,
-+ const char *name, const struct scst_trace_log *tbl)
-+{
-+ int res = length;
-+ int action;
-+ unsigned long level = 0, oldlevel;
-+ char *buffer, *p, *e;
-+ const struct scst_trace_log *t;
-+ enum {
-+ SCST_TRACE_ACTION_ALL = 1,
-+ SCST_TRACE_ACTION_NONE = 2,
-+ SCST_TRACE_ACTION_DEFAULT = 3,
-+ SCST_TRACE_ACTION_ADD = 4,
-+ SCST_TRACE_ACTION_DEL = 5,
-+ SCST_TRACE_ACTION_VALUE = 6,
-+ };
-+
-+ TRACE_ENTRY();
-+
-+ if ((buf == NULL) || (length == 0)) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ buffer = kasprintf(GFP_KERNEL, "%.*s", (int)length, buf);
-+ if (buffer == NULL) {
-+ PRINT_ERROR("Unable to alloc intermediate buffer (size %zd)",
-+ length+1);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ TRACE_DBG("buffer %s", buffer);
-+
-+ p = buffer;
-+ if (!strncasecmp("all", p, 3)) {
-+ action = SCST_TRACE_ACTION_ALL;
-+ } else if (!strncasecmp("none", p, 4) || !strncasecmp("null", p, 4)) {
-+ action = SCST_TRACE_ACTION_NONE;
-+ } else if (!strncasecmp("default", p, 7)) {
-+ action = SCST_TRACE_ACTION_DEFAULT;
-+ } else if (!strncasecmp("add", p, 3)) {
-+ p += 3;
-+ action = SCST_TRACE_ACTION_ADD;
-+ } else if (!strncasecmp("del", p, 3)) {
-+ p += 3;
-+ action = SCST_TRACE_ACTION_DEL;
-+ } else if (!strncasecmp("value", p, 5)) {
-+ p += 5;
-+ action = SCST_TRACE_ACTION_VALUE;
-+ } else {
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ switch (action) {
-+ case SCST_TRACE_ACTION_ADD:
-+ case SCST_TRACE_ACTION_DEL:
-+ case SCST_TRACE_ACTION_VALUE:
-+ if (!isspace(*p)) {
-+ PRINT_ERROR("%s", "Syntax error");
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+ }
-+
-+ switch (action) {
-+ case SCST_TRACE_ACTION_ALL:
-+ level = TRACE_ALL;
-+ break;
-+ case SCST_TRACE_ACTION_DEFAULT:
-+ level = default_level;
-+ break;
-+ case SCST_TRACE_ACTION_NONE:
-+ level = TRACE_NULL;
-+ break;
-+ case SCST_TRACE_ACTION_ADD:
-+ case SCST_TRACE_ACTION_DEL:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = 0;
-+ if (tbl) {
-+ t = tbl;
-+ while (t->token) {
-+ if (!strcasecmp(p, t->token)) {
-+ level = t->val;
-+ break;
-+ }
-+ t++;
-+ }
-+ }
-+ if (level == 0) {
-+ t = scst_trace_tbl;
-+ while (t->token) {
-+ if (!strcasecmp(p, t->token)) {
-+ level = t->val;
-+ break;
-+ }
-+ t++;
-+ }
-+ }
-+ if (level == 0) {
-+ PRINT_ERROR("Unknown token \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+ break;
-+ case SCST_TRACE_ACTION_VALUE:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ res = strict_strtoul(p, 0, &level);
-+ if (res != 0) {
-+ PRINT_ERROR("Invalid trace value \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+ break;
-+ }
-+
-+ oldlevel = *log_level;
-+
-+ switch (action) {
-+ case SCST_TRACE_ACTION_ADD:
-+ *log_level |= level;
-+ break;
-+ case SCST_TRACE_ACTION_DEL:
-+ *log_level &= ~level;
-+ break;
-+ default:
-+ *log_level = level;
-+ break;
-+ }
-+
-+ PRINT_INFO("Changed trace level for \"%s\": old 0x%08lx, new 0x%08lx",
-+ name, oldlevel, *log_level);
-+
-+out_free:
-+ kfree(buffer);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+/**
-+ ** Sysfs work
-+ **/
-+
-+static DEFINE_SPINLOCK(sysfs_work_lock);
-+static LIST_HEAD(sysfs_work_list);
-+static DECLARE_WAIT_QUEUE_HEAD(sysfs_work_waitQ);
-+static int active_sysfs_works;
-+static int last_sysfs_work_res;
-+static struct task_struct *sysfs_work_thread;
-+
-+/**
-+ * scst_alloc_sysfs_work() - allocates a sysfs work
-+ */
-+int scst_alloc_sysfs_work(int (*sysfs_work_fn)(struct scst_sysfs_work_item *),
-+ bool read_only_action, struct scst_sysfs_work_item **res_work)
-+{
-+ int res = 0;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ if (sysfs_work_fn == NULL) {
-+ PRINT_ERROR("%s", "sysfs_work_fn is NULL");
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ *res_work = NULL;
-+
-+ work = kzalloc(sizeof(*work), GFP_KERNEL);
-+ if (work == NULL) {
-+ PRINT_ERROR("Unable to alloc sysfs work (size %zd)",
-+ sizeof(*work));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ work->read_only_action = read_only_action;
-+ kref_init(&work->sysfs_work_kref);
-+ init_completion(&work->sysfs_work_done);
-+ work->sysfs_work_fn = sysfs_work_fn;
-+
-+ *res_work = work;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(scst_alloc_sysfs_work);
-+
-+static void scst_sysfs_work_release(struct kref *kref)
-+{
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ work = container_of(kref, struct scst_sysfs_work_item,
-+ sysfs_work_kref);
-+
-+ TRACE_DBG("Freeing sysfs work %p (buf %p)", work, work->buf);
-+
-+ kfree(work->buf);
-+ kfree(work->res_buf);
-+ kfree(work);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * scst_sysfs_work_get() - increases ref counter of the sysfs work
-+ */
-+void scst_sysfs_work_get(struct scst_sysfs_work_item *work)
-+{
-+ kref_get(&work->sysfs_work_kref);
-+}
-+EXPORT_SYMBOL(scst_sysfs_work_get);
-+
-+/**
-+ * scst_sysfs_work_put() - decreases ref counter of the sysfs work
-+ */
-+void scst_sysfs_work_put(struct scst_sysfs_work_item *work)
-+{
-+ kref_put(&work->sysfs_work_kref, scst_sysfs_work_release);
-+}
-+EXPORT_SYMBOL(scst_sysfs_work_put);
-+
-+/* Called under sysfs_work_lock and drops/reacquire it inside */
-+static void scst_process_sysfs_works(void)
-+ __releases(&sysfs_work_lock)
-+ __acquires(&sysfs_work_lock)
-+{
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ while (!list_empty(&sysfs_work_list)) {
-+ work = list_entry(sysfs_work_list.next,
-+ struct scst_sysfs_work_item, sysfs_work_list_entry);
-+ list_del(&work->sysfs_work_list_entry);
-+ spin_unlock(&sysfs_work_lock);
-+
-+ TRACE_DBG("Sysfs work %p", work);
-+
-+ work->work_res = work->sysfs_work_fn(work);
-+
-+ spin_lock(&sysfs_work_lock);
-+ if (!work->read_only_action)
-+ last_sysfs_work_res = work->work_res;
-+ active_sysfs_works--;
-+ spin_unlock(&sysfs_work_lock);
-+
-+ complete_all(&work->sysfs_work_done);
-+ kref_put(&work->sysfs_work_kref, scst_sysfs_work_release);
-+
-+ spin_lock(&sysfs_work_lock);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline int test_sysfs_work_list(void)
-+{
-+ int res = !list_empty(&sysfs_work_list) ||
-+ unlikely(kthread_should_stop());
-+ return res;
-+}
-+
-+static int sysfs_work_thread_fn(void *arg)
-+{
-+ bool one_time_only = (bool)arg;
-+
-+ TRACE_ENTRY();
-+
-+ if (!one_time_only)
-+ PRINT_INFO("User interface thread started, PID %d", current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+
-+ set_user_nice(current, -10);
-+
-+ spin_lock(&sysfs_work_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (one_time_only && !test_sysfs_work_list())
-+ break;
-+
-+ if (!test_sysfs_work_list()) {
-+ add_wait_queue_exclusive(&sysfs_work_waitQ, &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_sysfs_work_list())
-+ break;
-+ spin_unlock(&sysfs_work_lock);
-+ schedule();
-+ spin_lock(&sysfs_work_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&sysfs_work_waitQ, &wait);
-+ }
-+
-+ scst_process_sysfs_works();
-+ }
-+ spin_unlock(&sysfs_work_lock);
-+
-+ if (!one_time_only) {
-+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so both lists must be empty.
-+ */
-+ BUG_ON(!list_empty(&sysfs_work_list));
-+
-+ PRINT_INFO("User interface thread PID %d finished", current->pid);
-+ }
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+/**
-+ * scst_sysfs_queue_wait_work() - waits for the work to complete
-+ *
-+ * Returns status of the completed work or -EAGAIN if the work not
-+ * completed before timeout. In the latter case a user should poll
-+ * last_sysfs_mgmt_res until it returns the result of the processing.
-+ */
-+int scst_sysfs_queue_wait_work(struct scst_sysfs_work_item *work)
-+{
-+ int res = 0, rc;
-+ unsigned long timeout = 15*HZ;
-+ struct task_struct *t;
-+ static atomic_t uid_thread_name = ATOMIC_INIT(0);
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock(&sysfs_work_lock);
-+
-+ TRACE_DBG("Adding sysfs work %p to the list", work);
-+ list_add_tail(&work->sysfs_work_list_entry, &sysfs_work_list);
-+
-+ active_sysfs_works++;
-+
-+ kref_get(&work->sysfs_work_kref);
-+
-+ spin_unlock(&sysfs_work_lock);
-+
-+ wake_up(&sysfs_work_waitQ);
-+
-+ /*
-+ * We can have a dead lock possibility like: the sysfs thread is waiting
-+ * for the last put during some object unregistration and at the same
-+ * time another queued work is having reference on that object taken and
-+ * waiting for attention from the sysfs thread. Generally, all sysfs
-+ * functions calling kobject_get() and then queuing sysfs thread job
-+ * affected by this. This is especially dangerous in read only cases,
-+ * like vdev_sysfs_filename_show().
-+ *
-+ * So, to eliminate that deadlock we will create an extra sysfs thread
-+ * for each queued sysfs work. This thread will quit as soon as it will
-+ * see that there is not more queued works to process.
-+ */
-+
-+ t = kthread_run(sysfs_work_thread_fn, (void *)true, "scst_uid%d",
-+ atomic_inc_return(&uid_thread_name));
-+ if (IS_ERR(t))
-+ PRINT_ERROR("kthread_run() for user interface thread %d "
-+ "failed: %d", atomic_read(&uid_thread_name),
-+ (int)PTR_ERR(t));
-+
-+ while (1) {
-+ rc = wait_for_completion_interruptible_timeout(
-+ &work->sysfs_work_done, timeout);
-+ if (rc == 0) {
-+ if (!mutex_is_locked(&scst_mutex)) {
-+ TRACE_DBG("scst_mutex not locked, continue "
-+ "waiting (work %p)", work);
-+ timeout = 5*HZ;
-+ continue;
-+ }
-+ TRACE_MGMT_DBG("Time out waiting for work %p", work);
-+ res = -EAGAIN;
-+ goto out_put;
-+ } else if (rc < 0) {
-+ res = rc;
-+ goto out_put;
-+ }
-+ break;
-+ }
-+
-+ res = work->work_res;
-+
-+out_put:
-+ kref_put(&work->sysfs_work_kref, scst_sysfs_work_release);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL(scst_sysfs_queue_wait_work);
-+
-+/* No locks */
-+static int scst_check_grab_tgtt_ptr(struct scst_tgt_template *tgtt)
-+{
-+ int res = 0;
-+ struct scst_tgt_template *tt;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(tt, &scst_template_list, scst_template_list_entry) {
-+ if (tt == tgtt) {
-+ tgtt->tgtt_active_sysfs_works_count++;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ TRACE_DBG("Tgtt %p not found", tgtt);
-+ res = -ENOENT;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* No locks */
-+static void scst_ungrab_tgtt_ptr(struct scst_tgt_template *tgtt)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+ tgtt->tgtt_active_sysfs_works_count--;
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* scst_mutex supposed to be locked */
-+static int scst_check_tgt_acg_ptrs(struct scst_tgt *tgt, struct scst_acg *acg)
-+{
-+ int res = 0;
-+ struct scst_tgt_template *tgtt;
-+
-+ list_for_each_entry(tgtt, &scst_template_list, scst_template_list_entry) {
-+ struct scst_tgt *t;
-+ list_for_each_entry(t, &tgtt->tgt_list, tgt_list_entry) {
-+ if (t == tgt) {
-+ struct scst_acg *a;
-+ if (acg == NULL)
-+ goto out;
-+ if (acg == tgt->default_acg)
-+ goto out;
-+ list_for_each_entry(a, &tgt->tgt_acg_list,
-+ acg_list_entry) {
-+ if (a == acg)
-+ goto out;
-+ }
-+ }
-+ }
-+ }
-+
-+ TRACE_DBG("Tgt %p/ACG %p not found", tgt, acg);
-+ res = -ENOENT;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* scst_mutex supposed to be locked */
-+static int scst_check_devt_ptr(struct scst_dev_type *devt,
-+ struct list_head *list)
-+{
-+ int res = 0;
-+ struct scst_dev_type *dt;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(dt, list, dev_type_list_entry) {
-+ if (dt == devt)
-+ goto out;
-+ }
-+
-+ TRACE_DBG("Devt %p not found", devt);
-+ res = -ENOENT;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* scst_mutex supposed to be locked */
-+static int scst_check_dev_ptr(struct scst_device *dev)
-+{
-+ int res = 0;
-+ struct scst_device *d;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (d == dev)
-+ goto out;
-+ }
-+
-+ TRACE_DBG("Dev %p not found", dev);
-+ res = -ENOENT;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* No locks */
-+static int scst_check_grab_devt_ptr(struct scst_dev_type *devt,
-+ struct list_head *list)
-+{
-+ int res = 0;
-+ struct scst_dev_type *dt;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+
-+ list_for_each_entry(dt, list, dev_type_list_entry) {
-+ if (dt == devt) {
-+ devt->devt_active_sysfs_works_count++;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ TRACE_DBG("Devt %p not found", devt);
-+ res = -ENOENT;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* No locks */
-+static void scst_ungrab_devt_ptr(struct scst_dev_type *devt)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_mutex);
-+ devt->devt_active_sysfs_works_count--;
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ ** Regular SCST sysfs ops
-+ **/
-+static ssize_t scst_show(struct kobject *kobj, struct attribute *attr,
-+ char *buf)
-+{
-+ struct kobj_attribute *kobj_attr;
-+ kobj_attr = container_of(attr, struct kobj_attribute, attr);
-+
-+ return kobj_attr->show(kobj, kobj_attr, buf);
-+}
-+
-+static ssize_t scst_store(struct kobject *kobj, struct attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct kobj_attribute *kobj_attr;
-+ kobj_attr = container_of(attr, struct kobj_attribute, attr);
-+
-+ if (kobj_attr->store)
-+ return kobj_attr->store(kobj, kobj_attr, buf, count);
-+ else
-+ return -EIO;
-+}
-+
-+const struct sysfs_ops scst_sysfs_ops = {
-+ .show = scst_show,
-+ .store = scst_store,
-+};
-+
-+const struct sysfs_ops *scst_sysfs_get_sysfs_ops(void)
-+{
-+ return &scst_sysfs_ops;
-+}
-+EXPORT_SYMBOL_GPL(scst_sysfs_get_sysfs_ops);
-+
-+/**
-+ ** Target Template
-+ **/
-+
-+static void scst_tgtt_release(struct kobject *kobj)
-+{
-+ struct scst_tgt_template *tgtt;
-+
-+ TRACE_ENTRY();
-+
-+ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
-+ if (tgtt->tgtt_kobj_release_cmpl)
-+ complete_all(tgtt->tgtt_kobj_release_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct kobj_type tgtt_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_tgtt_release,
-+};
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+static ssize_t scst_tgtt_trace_level_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_tgt_template *tgtt;
-+
-+ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
-+
-+ return scst_trace_level_show(tgtt->trace_tbl,
-+ tgtt->trace_flags ? *tgtt->trace_flags : 0, buf,
-+ tgtt->trace_tbl_help);
-+}
-+
-+static ssize_t scst_tgtt_trace_level_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_tgt_template *tgtt;
-+
-+ TRACE_ENTRY();
-+
-+ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
-+
-+ res = mutex_lock_interruptible(&scst_log_mutex);
-+ if (res != 0)
-+ goto out;
-+
-+ res = scst_write_trace(buf, count, tgtt->trace_flags,
-+ tgtt->default_trace_flags, tgtt->name, tgtt->trace_tbl);
-+
-+ mutex_unlock(&scst_log_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute tgtt_trace_attr =
-+ __ATTR(trace_level, S_IRUGO | S_IWUSR,
-+ scst_tgtt_trace_level_show, scst_tgtt_trace_level_store);
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+static ssize_t scst_tgtt_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ static const char help[] =
-+ "Usage: echo \"add_target target_name [parameters]\" >mgmt\n"
-+ " echo \"del_target target_name\" >mgmt\n"
-+ "%s%s"
-+ "%s"
-+ "\n"
-+ "where parameters are one or more "
-+ "param_name=value pairs separated by ';'\n\n"
-+ "%s%s%s%s%s%s%s%s\n";
-+ struct scst_tgt_template *tgtt;
-+
-+ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
-+
-+ return scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, help,
-+ (tgtt->tgtt_optional_attributes != NULL) ?
-+ " echo \"add_attribute <attribute> <value>\" >mgmt\n"
-+ " echo \"del_attribute <attribute> <value>\" >mgmt\n" : "",
-+ (tgtt->tgt_optional_attributes != NULL) ?
-+ " echo \"add_target_attribute target_name <attribute> <value>\" >mgmt\n"
-+ " echo \"del_target_attribute target_name <attribute> <value>\" >mgmt\n" : "",
-+ (tgtt->mgmt_cmd_help) ? tgtt->mgmt_cmd_help : "",
-+ (tgtt->add_target_parameters != NULL) ?
-+ "The following parameters available: " : "",
-+ (tgtt->add_target_parameters != NULL) ?
-+ tgtt->add_target_parameters : "",
-+ (tgtt->tgtt_optional_attributes != NULL) ?
-+ "The following target driver attributes available: " : "",
-+ (tgtt->tgtt_optional_attributes != NULL) ?
-+ tgtt->tgtt_optional_attributes : "",
-+ (tgtt->tgtt_optional_attributes != NULL) ? "\n" : "",
-+ (tgtt->tgt_optional_attributes != NULL) ?
-+ "The following target attributes available: " : "",
-+ (tgtt->tgt_optional_attributes != NULL) ?
-+ tgtt->tgt_optional_attributes : "",
-+ (tgtt->tgt_optional_attributes != NULL) ? "\n" : "");
-+}
-+
-+static int scst_process_tgtt_mgmt_store(char *buffer,
-+ struct scst_tgt_template *tgtt)
-+{
-+ int res = 0;
-+ char *p, *pp, *target_name;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("buffer %s", buffer);
-+
-+ /* Check if our pointer is still alive and, if yes, grab it */
-+ if (scst_check_grab_tgtt_ptr(tgtt) != 0)
-+ goto out;
-+
-+ pp = buffer;
-+ if (pp[strlen(pp) - 1] == '\n')
-+ pp[strlen(pp) - 1] = '\0';
-+
-+ p = scst_get_next_lexem(&pp);
-+
-+ if (strcasecmp("add_target", p) == 0) {
-+ target_name = scst_get_next_lexem(&pp);
-+ if (*target_name == '\0') {
-+ PRINT_ERROR("%s", "Target name required");
-+ res = -EINVAL;
-+ goto out_ungrab;
-+ }
-+ res = tgtt->add_target(target_name, pp);
-+ } else if (strcasecmp("del_target", p) == 0) {
-+ target_name = scst_get_next_lexem(&pp);
-+ if (*target_name == '\0') {
-+ PRINT_ERROR("%s", "Target name required");
-+ res = -EINVAL;
-+ goto out_ungrab;
-+ }
-+
-+ p = scst_get_next_lexem(&pp);
-+ if (*p != '\0')
-+ goto out_syntax_err;
-+
-+ res = tgtt->del_target(target_name);
-+ } else if (tgtt->mgmt_cmd != NULL) {
-+ scst_restore_token_str(p, pp);
-+ res = tgtt->mgmt_cmd(buffer);
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_ungrab;
-+ }
-+
-+out_ungrab:
-+ scst_ungrab_tgtt_ptr(tgtt);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_syntax_err:
-+ PRINT_ERROR("Syntax error on \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_ungrab;
-+}
-+
-+static int scst_tgtt_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return scst_process_tgtt_mgmt_store(work->buf, work->tgtt);
-+}
-+
-+static ssize_t scst_tgtt_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ char *buffer;
-+ struct scst_sysfs_work_item *work;
-+ struct scst_tgt_template *tgtt;
-+
-+ TRACE_ENTRY();
-+
-+ tgtt = container_of(kobj, struct scst_tgt_template, tgtt_kobj);
-+
-+ buffer = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
-+ if (buffer == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ res = scst_alloc_sysfs_work(scst_tgtt_mgmt_store_work_fn, false, &work);
-+ if (res != 0)
-+ goto out_free;
-+
-+ work->buf = buffer;
-+ work->tgtt = tgtt;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(buffer);
-+ goto out;
-+}
-+
-+static struct kobj_attribute scst_tgtt_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_tgtt_mgmt_show,
-+ scst_tgtt_mgmt_store);
-+
-+int scst_tgtt_sysfs_create(struct scst_tgt_template *tgtt)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ res = kobject_init_and_add(&tgtt->tgtt_kobj, &tgtt_ktype,
-+ scst_targets_kobj, tgtt->name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgtt %s to sysfs", tgtt->name);
-+ goto out;
-+ }
-+
-+ if (tgtt->add_target != NULL) {
-+ res = sysfs_create_file(&tgtt->tgtt_kobj,
-+ &scst_tgtt_mgmt.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add mgmt attr for target driver %s",
-+ tgtt->name);
-+ goto out_del;
-+ }
-+ }
-+
-+ if (tgtt->tgtt_attrs) {
-+ res = sysfs_create_files(&tgtt->tgtt_kobj, tgtt->tgtt_attrs);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attributes for target "
-+ "driver %s", tgtt->name);
-+ goto out_del;
-+ }
-+ }
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ if (tgtt->trace_flags != NULL) {
-+ res = sysfs_create_file(&tgtt->tgtt_kobj,
-+ &tgtt_trace_attr.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add trace_flag for target "
-+ "driver %s", tgtt->name);
-+ goto out_del;
-+ }
-+ }
-+#endif
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ scst_tgtt_sysfs_del(tgtt);
-+ goto out;
-+}
-+
-+/*
-+ * Must not be called under scst_mutex, due to possible deadlock with
-+ * sysfs ref counting in sysfs works (it is waiting for the last put, but
-+ * the last ref counter holder is waiting for scst_mutex)
-+ */
-+void scst_tgtt_sysfs_del(struct scst_tgt_template *tgtt)
-+{
-+ int rc;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+
-+ TRACE_ENTRY();
-+
-+ tgtt->tgtt_kobj_release_cmpl = &c;
-+
-+ kobject_del(&tgtt->tgtt_kobj);
-+ kobject_put(&tgtt->tgtt_kobj);
-+
-+ rc = wait_for_completion_timeout(tgtt->tgtt_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for target template %s (%d refs)...", tgtt->name,
-+ atomic_read(&tgtt->tgtt_kobj.kref.refcount));
-+ wait_for_completion(tgtt->tgtt_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs "
-+ "entry for target template %s", tgtt->name);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ ** Target directory implementation
-+ **/
-+
-+static void scst_tgt_release(struct kobject *kobj)
-+{
-+ struct scst_tgt *tgt;
-+
-+ TRACE_ENTRY();
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ if (tgt->tgt_kobj_release_cmpl)
-+ complete_all(tgt->tgt_kobj_release_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct kobj_type tgt_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_tgt_release,
-+};
-+
-+static int __scst_process_luns_mgmt_store(char *buffer,
-+ struct scst_tgt *tgt, struct scst_acg *acg, bool tgt_kobj)
-+{
-+ int res, read_only = 0, action;
-+ char *p, *e = NULL;
-+ unsigned int virt_lun;
-+ struct scst_acg_dev *acg_dev = NULL, *acg_dev_tmp;
-+ struct scst_device *d, *dev = NULL;
-+ enum {
-+ SCST_LUN_ACTION_ADD = 1,
-+ SCST_LUN_ACTION_DEL = 2,
-+ SCST_LUN_ACTION_REPLACE = 3,
-+ SCST_LUN_ACTION_CLEAR = 4,
-+ };
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("buffer %s", buffer);
-+
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ if (strncasecmp("add", p, 3) == 0) {
-+ p += 3;
-+ action = SCST_LUN_ACTION_ADD;
-+ } else if (strncasecmp("del", p, 3) == 0) {
-+ p += 3;
-+ action = SCST_LUN_ACTION_DEL;
-+ } else if (!strncasecmp("replace", p, 7)) {
-+ p += 7;
-+ action = SCST_LUN_ACTION_REPLACE;
-+ } else if (!strncasecmp("clear", p, 5)) {
-+ p += 5;
-+ action = SCST_LUN_ACTION_CLEAR;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out_resume;
-+
-+ /* Check if tgt and acg not already freed while we were coming here */
-+ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
-+ goto out_unlock;
-+
-+ if ((action != SCST_LUN_ACTION_CLEAR) &&
-+ (action != SCST_LUN_ACTION_DEL)) {
-+ if (!isspace(*p)) {
-+ PRINT_ERROR("%s", "Syntax error");
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p; /* save p */
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = '\0';
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (!strcmp(d->virt_name, p)) {
-+ dev = d;
-+ TRACE_DBG("Device %p (%s) found", dev, p);
-+ break;
-+ }
-+ }
-+ if (dev == NULL) {
-+ PRINT_ERROR("Device '%s' not found", p);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ switch (action) {
-+ case SCST_LUN_ACTION_ADD:
-+ case SCST_LUN_ACTION_REPLACE:
-+ {
-+ bool dev_replaced = false;
-+
-+ e++;
-+ while (isspace(*e) && *e != '\0')
-+ e++;
-+
-+ virt_lun = simple_strtoul(e, &e, 0);
-+ if (virt_lun > SCST_MAX_LUN) {
-+ PRINT_ERROR("Too big LUN %d (max %d)", virt_lun,
-+ SCST_MAX_LUN);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ while (isspace(*e) && *e != '\0')
-+ e++;
-+
-+ while (1) {
-+ char *pp;
-+ unsigned long val;
-+ char *param = scst_get_next_token_str(&e);
-+ if (param == NULL)
-+ break;
-+
-+ p = scst_get_next_lexem(&param);
-+ if (*p == '\0') {
-+ PRINT_ERROR("Syntax error at %s (device %s)",
-+ param, dev->virt_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ pp = scst_get_next_lexem(&param);
-+ if (*pp == '\0') {
-+ PRINT_ERROR("Parameter %s value missed for device %s",
-+ p, dev->virt_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ if (scst_get_next_lexem(&param)[0] != '\0') {
-+ PRINT_ERROR("Too many parameter's %s values (device %s)",
-+ p, dev->virt_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ res = strict_strtoul(pp, 0, &val);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtoul() for %s failed: %d "
-+ "(device %s)", pp, res, dev->virt_name);
-+ goto out_unlock;
-+ }
-+
-+ if (!strcasecmp("read_only", p)) {
-+ read_only = val;
-+ TRACE_DBG("READ ONLY %d", read_only);
-+ } else {
-+ PRINT_ERROR("Unknown parameter %s (device %s)",
-+ p, dev->virt_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ acg_dev = NULL;
-+ list_for_each_entry(acg_dev_tmp, &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ if (acg_dev_tmp->lun == virt_lun) {
-+ acg_dev = acg_dev_tmp;
-+ break;
-+ }
-+ }
-+
-+ if (acg_dev != NULL) {
-+ if (action == SCST_LUN_ACTION_ADD) {
-+ PRINT_ERROR("virt lun %d already exists in "
-+ "group %s", virt_lun, acg->acg_name);
-+ res = -EEXIST;
-+ goto out_unlock;
-+ } else {
-+ /* Replace */
-+ res = scst_acg_del_lun(acg, acg_dev->lun,
-+ false);
-+ if (res != 0)
-+ goto out_unlock;
-+
-+ dev_replaced = true;
-+ }
-+ }
-+
-+ res = scst_acg_add_lun(acg,
-+ tgt_kobj ? tgt->tgt_luns_kobj : acg->luns_kobj,
-+ dev, virt_lun, read_only, !dev_replaced, NULL);
-+ if (res != 0)
-+ goto out_unlock;
-+
-+ if (dev_replaced) {
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if ((tgt_dev->acg_dev->acg == acg) &&
-+ (tgt_dev->lun == virt_lun)) {
-+ TRACE_MGMT_DBG("INQUIRY DATA HAS CHANGED"
-+ " on tgt_dev %p", tgt_dev);
-+ scst_gen_aen_or_ua(tgt_dev,
-+ SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
-+ }
-+ }
-+ }
-+
-+ break;
-+ }
-+ case SCST_LUN_ACTION_DEL:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ virt_lun = simple_strtoul(p, &p, 0);
-+
-+ res = scst_acg_del_lun(acg, virt_lun, true);
-+ if (res != 0)
-+ goto out_unlock;
-+ break;
-+ case SCST_LUN_ACTION_CLEAR:
-+ PRINT_INFO("Removed all devices from group %s",
-+ acg->acg_name);
-+ list_for_each_entry_safe(acg_dev, acg_dev_tmp,
-+ &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ res = scst_acg_del_lun(acg, acg_dev->lun,
-+ list_is_last(&acg_dev->acg_dev_list_entry,
-+ &acg->acg_dev_list));
-+ if (res != 0)
-+ goto out_unlock;
-+ }
-+ break;
-+ }
-+
-+ res = 0;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_resume:
-+ scst_resume_activity();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_luns_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return __scst_process_luns_mgmt_store(work->buf, work->tgt, work->acg,
-+ work->is_tgt_kobj);
-+}
-+
-+static ssize_t __scst_acg_mgmt_store(struct scst_acg *acg,
-+ const char *buf, size_t count, bool is_tgt_kobj,
-+ int (*sysfs_work_fn)(struct scst_sysfs_work_item *))
-+{
-+ int res;
-+ char *buffer;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ buffer = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
-+ if (buffer == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ res = scst_alloc_sysfs_work(sysfs_work_fn, false, &work);
-+ if (res != 0)
-+ goto out_free;
-+
-+ work->buf = buffer;
-+ work->tgt = acg->tgt;
-+ work->acg = acg;
-+ work->is_tgt_kobj = is_tgt_kobj;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(buffer);
-+ goto out;
-+}
-+
-+static ssize_t __scst_luns_mgmt_store(struct scst_acg *acg,
-+ bool tgt_kobj, const char *buf, size_t count)
-+{
-+ return __scst_acg_mgmt_store(acg, buf, count, tgt_kobj,
-+ scst_luns_mgmt_store_work_fn);
-+}
-+
-+static ssize_t scst_luns_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ static const char help[] =
-+ "Usage: echo \"add|del H:C:I:L lun [parameters]\" >mgmt\n"
-+ " echo \"add VNAME lun [parameters]\" >mgmt\n"
-+ " echo \"del lun\" >mgmt\n"
-+ " echo \"replace H:C:I:L lun [parameters]\" >mgmt\n"
-+ " echo \"replace VNAME lun [parameters]\" >mgmt\n"
-+ " echo \"clear\" >mgmt\n"
-+ "\n"
-+ "where parameters are one or more "
-+ "param_name=value pairs separated by ';'\n"
-+ "\nThe following parameters available: read_only.\n";
-+
-+ return sprintf(buf, "%s", help);
-+}
-+
-+static ssize_t scst_luns_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj->parent, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ res = __scst_luns_mgmt_store(acg, true, buf, count);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_luns_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_luns_mgmt_show,
-+ scst_luns_mgmt_store);
-+
-+static ssize_t __scst_acg_addr_method_show(struct scst_acg *acg, char *buf)
-+{
-+ int res;
-+
-+ switch (acg->addr_method) {
-+ case SCST_LUN_ADDR_METHOD_FLAT:
-+ res = sprintf(buf, "FLAT\n");
-+ break;
-+ case SCST_LUN_ADDR_METHOD_PERIPHERAL:
-+ res = sprintf(buf, "PERIPHERAL\n");
-+ break;
-+ case SCST_LUN_ADDR_METHOD_LUN:
-+ res = sprintf(buf, "LUN\n");
-+ break;
-+ default:
-+ res = sprintf(buf, "UNKNOWN\n");
-+ break;
-+ }
-+
-+ if (acg->addr_method != acg->tgt->tgtt->preferred_addr_method)
-+ res += sprintf(&buf[res], "%s\n", SCST_SYSFS_KEY_MARK);
-+
-+ return res;
-+}
-+
-+static ssize_t __scst_acg_addr_method_store(struct scst_acg *acg,
-+ const char *buf, size_t count)
-+{
-+ int res = count;
-+
-+ if (strncasecmp(buf, "FLAT", min_t(int, 4, count)) == 0)
-+ acg->addr_method = SCST_LUN_ADDR_METHOD_FLAT;
-+ else if (strncasecmp(buf, "PERIPHERAL", min_t(int, 10, count)) == 0)
-+ acg->addr_method = SCST_LUN_ADDR_METHOD_PERIPHERAL;
-+ else if (strncasecmp(buf, "LUN", min_t(int, 3, count)) == 0)
-+ acg->addr_method = SCST_LUN_ADDR_METHOD_LUN;
-+ else {
-+ PRINT_ERROR("Unknown address method %s", buf);
-+ res = -EINVAL;
-+ }
-+
-+ TRACE_DBG("acg %p, addr_method %d", acg, acg->addr_method);
-+
-+ return res;
-+}
-+
-+static ssize_t scst_tgt_addr_method_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ return __scst_acg_addr_method_show(acg, buf);
-+}
-+
-+static ssize_t scst_tgt_addr_method_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ res = __scst_acg_addr_method_store(acg, buf, count);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_tgt_addr_method =
-+ __ATTR(addr_method, S_IRUGO | S_IWUSR, scst_tgt_addr_method_show,
-+ scst_tgt_addr_method_store);
-+
-+static ssize_t __scst_acg_io_grouping_type_show(struct scst_acg *acg, char *buf)
-+{
-+ int res;
-+
-+ switch (acg->acg_io_grouping_type) {
-+ case SCST_IO_GROUPING_AUTO:
-+ res = sprintf(buf, "%s\n", SCST_IO_GROUPING_AUTO_STR);
-+ break;
-+ case SCST_IO_GROUPING_THIS_GROUP_ONLY:
-+ res = sprintf(buf, "%s\n%s\n",
-+ SCST_IO_GROUPING_THIS_GROUP_ONLY_STR,
-+ SCST_SYSFS_KEY_MARK);
-+ break;
-+ case SCST_IO_GROUPING_NEVER:
-+ res = sprintf(buf, "%s\n%s\n", SCST_IO_GROUPING_NEVER_STR,
-+ SCST_SYSFS_KEY_MARK);
-+ break;
-+ default:
-+ res = sprintf(buf, "%d\n%s\n", acg->acg_io_grouping_type,
-+ SCST_SYSFS_KEY_MARK);
-+ break;
-+ }
-+
-+ return res;
-+}
-+
-+static int __scst_acg_process_io_grouping_type_store(struct scst_tgt *tgt,
-+ struct scst_acg *acg, int io_grouping_type)
-+{
-+ int res = 0;
-+ struct scst_acg_dev *acg_dev;
-+
-+ TRACE_DBG("tgt %p, acg %p, io_grouping_type %d", tgt, acg,
-+ io_grouping_type);
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out_resume;
-+
-+ /* Check if tgt and acg not already freed while we were coming here */
-+ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
-+ goto out_unlock;
-+
-+ acg->acg_io_grouping_type = io_grouping_type;
-+
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
-+ int rc;
-+
-+ scst_stop_dev_threads(acg_dev->dev);
-+
-+ rc = scst_create_dev_threads(acg_dev->dev);
-+ if (rc != 0)
-+ res = rc;
-+ }
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_resume:
-+ scst_resume_activity();
-+
-+out:
-+ return res;
-+}
-+
-+static int __scst_acg_io_grouping_type_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return __scst_acg_process_io_grouping_type_store(work->tgt, work->acg,
-+ work->io_grouping_type);
-+}
-+
-+static ssize_t __scst_acg_io_grouping_type_store(struct scst_acg *acg,
-+ const char *buf, size_t count)
-+{
-+ int res = 0;
-+ int prev = acg->acg_io_grouping_type;
-+ long io_grouping_type;
-+ struct scst_sysfs_work_item *work;
-+
-+ if (strncasecmp(buf, SCST_IO_GROUPING_AUTO_STR,
-+ min_t(int, strlen(SCST_IO_GROUPING_AUTO_STR), count)) == 0)
-+ io_grouping_type = SCST_IO_GROUPING_AUTO;
-+ else if (strncasecmp(buf, SCST_IO_GROUPING_THIS_GROUP_ONLY_STR,
-+ min_t(int, strlen(SCST_IO_GROUPING_THIS_GROUP_ONLY_STR), count)) == 0)
-+ io_grouping_type = SCST_IO_GROUPING_THIS_GROUP_ONLY;
-+ else if (strncasecmp(buf, SCST_IO_GROUPING_NEVER_STR,
-+ min_t(int, strlen(SCST_IO_GROUPING_NEVER_STR), count)) == 0)
-+ io_grouping_type = SCST_IO_GROUPING_NEVER;
-+ else {
-+ res = strict_strtol(buf, 0, &io_grouping_type);
-+ if ((res != 0) || (io_grouping_type <= 0)) {
-+ PRINT_ERROR("Unknown or not allowed I/O grouping type "
-+ "%s", buf);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ }
-+
-+ if (prev == io_grouping_type)
-+ goto out;
-+
-+ res = scst_alloc_sysfs_work(__scst_acg_io_grouping_type_store_work_fn,
-+ false, &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->tgt = acg->tgt;
-+ work->acg = acg;
-+ work->io_grouping_type = io_grouping_type;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+
-+out:
-+ return res;
-+}
-+
-+static ssize_t scst_tgt_io_grouping_type_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ return __scst_acg_io_grouping_type_show(acg, buf);
-+}
-+
-+static ssize_t scst_tgt_io_grouping_type_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ res = __scst_acg_io_grouping_type_store(acg, buf, count);
-+ if (res != 0)
-+ goto out;
-+
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_tgt_io_grouping_type =
-+ __ATTR(io_grouping_type, S_IRUGO | S_IWUSR,
-+ scst_tgt_io_grouping_type_show,
-+ scst_tgt_io_grouping_type_store);
-+
-+static ssize_t __scst_acg_cpu_mask_show(struct scst_acg *acg, char *buf)
-+{
-+ int res;
-+
-+ res = cpumask_scnprintf(buf, SCST_SYSFS_BLOCK_SIZE,
-+ &acg->acg_cpu_mask);
-+ if (!cpus_equal(acg->acg_cpu_mask, default_cpu_mask))
-+ res += sprintf(&buf[res], "\n%s\n", SCST_SYSFS_KEY_MARK);
-+
-+ return res;
-+}
-+
-+static int __scst_acg_process_cpu_mask_store(struct scst_tgt *tgt,
-+ struct scst_acg *acg, cpumask_t *cpu_mask)
-+{
-+ int res = 0;
-+ struct scst_session *sess;
-+
-+ TRACE_DBG("tgt %p, acg %p", tgt, acg);
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out;
-+
-+ /* Check if tgt and acg not already freed while we were coming here */
-+ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
-+ goto out_unlock;
-+
-+ cpumask_copy(&acg->acg_cpu_mask, cpu_mask);
-+
-+ list_for_each_entry(sess, &acg->acg_sess_list, acg_sess_list_entry) {
-+ int i;
-+ for (i = 0; i < SESS_TGT_DEV_LIST_HASH_SIZE; i++) {
-+ struct scst_tgt_dev *tgt_dev;
-+ struct list_head *head = &sess->sess_tgt_dev_list[i];
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ struct scst_cmd_thread_t *thr;
-+ if (tgt_dev->active_cmd_threads != &tgt_dev->tgt_dev_cmd_threads)
-+ continue;
-+ list_for_each_entry(thr,
-+ &tgt_dev->active_cmd_threads->threads_list,
-+ thread_list_entry) {
-+ int rc;
-+ rc = set_cpus_allowed_ptr(thr->cmd_thread, cpu_mask);
-+ if (rc != 0)
-+ PRINT_ERROR("Setting CPU "
-+ "affinity failed: %d", rc);
-+ }
-+ }
-+ }
-+ if (tgt->tgtt->report_aen != NULL) {
-+ struct scst_aen *aen;
-+ int rc;
-+
-+ aen = scst_alloc_aen(sess, 0);
-+ if (aen == NULL) {
-+ PRINT_ERROR("Unable to notify target driver %s "
-+ "about cpu_mask change", tgt->tgt_name);
-+ continue;
-+ }
-+
-+ aen->event_fn = SCST_AEN_CPU_MASK_CHANGED;
-+
-+ TRACE_DBG("Calling target's %s report_aen(%p)",
-+ tgt->tgtt->name, aen);
-+ rc = tgt->tgtt->report_aen(aen);
-+ TRACE_DBG("Target's %s report_aen(%p) returned %d",
-+ tgt->tgtt->name, aen, rc);
-+ if (rc != SCST_AEN_RES_SUCCESS)
-+ scst_free_aen(aen);
-+ }
-+ }
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ return res;
-+}
-+
-+static int __scst_acg_cpu_mask_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return __scst_acg_process_cpu_mask_store(work->tgt, work->acg,
-+ &work->cpu_mask);
-+}
-+
-+static ssize_t __scst_acg_cpu_mask_store(struct scst_acg *acg,
-+ const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_sysfs_work_item *work;
-+
-+ /* cpumask might be too big for stack */
-+
-+ res = scst_alloc_sysfs_work(__scst_acg_cpu_mask_store_work_fn,
-+ false, &work);
-+ if (res != 0)
-+ goto out;
-+
-+ /*
-+ * We can't use cpumask_parse_user() here, because it expects
-+ * buffer in the user space.
-+ */
-+ res = __bitmap_parse(buf, count, 0, cpumask_bits(&work->cpu_mask),
-+ nr_cpumask_bits);
-+ if (res != 0) {
-+ PRINT_ERROR("__bitmap_parse() failed: %d", res);
-+ goto out_release;
-+ }
-+
-+ if (cpus_equal(acg->acg_cpu_mask, work->cpu_mask))
-+ goto out;
-+
-+ work->tgt = acg->tgt;
-+ work->acg = acg;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+
-+out:
-+ return res;
-+
-+out_release:
-+ scst_sysfs_work_release(&work->sysfs_work_kref);
-+ goto out;
-+}
-+
-+static ssize_t scst_tgt_cpu_mask_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ return __scst_acg_cpu_mask_show(acg, buf);
-+}
-+
-+static ssize_t scst_tgt_cpu_mask_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+ struct scst_tgt *tgt;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ acg = tgt->default_acg;
-+
-+ res = __scst_acg_cpu_mask_store(acg, buf, count);
-+ if (res != 0)
-+ goto out;
-+
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_tgt_cpu_mask =
-+ __ATTR(cpu_mask, S_IRUGO | S_IWUSR,
-+ scst_tgt_cpu_mask_show,
-+ scst_tgt_cpu_mask_store);
-+
-+static ssize_t scst_ini_group_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ static const char help[] =
-+ "Usage: echo \"create GROUP_NAME\" >mgmt\n"
-+ " echo \"del GROUP_NAME\" >mgmt\n";
-+
-+ return sprintf(buf, "%s", help);
-+}
-+
-+static int scst_process_ini_group_mgmt_store(char *buffer,
-+ struct scst_tgt *tgt)
-+{
-+ int res, action;
-+ char *p, *e = NULL;
-+ struct scst_acg *a, *acg = NULL;
-+ enum {
-+ SCST_INI_GROUP_ACTION_CREATE = 1,
-+ SCST_INI_GROUP_ACTION_DEL = 2,
-+ };
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("tgt %p, buffer %s", tgt, buffer);
-+
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ if (strncasecmp("create ", p, 7) == 0) {
-+ p += 7;
-+ action = SCST_INI_GROUP_ACTION_CREATE;
-+ } else if (strncasecmp("del ", p, 4) == 0) {
-+ p += 4;
-+ action = SCST_INI_GROUP_ACTION_DEL;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out_resume;
-+
-+ /* Check if our pointer is still alive */
-+ if (scst_check_tgt_acg_ptrs(tgt, NULL) != 0)
-+ goto out_unlock;
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = '\0';
-+
-+ if (p[0] == '\0') {
-+ PRINT_ERROR("%s", "Group name required");
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ list_for_each_entry(a, &tgt->tgt_acg_list, acg_list_entry) {
-+ if (strcmp(a->acg_name, p) == 0) {
-+ TRACE_DBG("group (acg) %p %s found",
-+ a, a->acg_name);
-+ acg = a;
-+ break;
-+ }
-+ }
-+
-+ switch (action) {
-+ case SCST_INI_GROUP_ACTION_CREATE:
-+ TRACE_DBG("Creating group '%s'", p);
-+ if (acg != NULL) {
-+ PRINT_ERROR("acg name %s exist", p);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ acg = scst_alloc_add_acg(tgt, p, true);
-+ if (acg == NULL)
-+ goto out_unlock;
-+ break;
-+ case SCST_INI_GROUP_ACTION_DEL:
-+ TRACE_DBG("Deleting group '%s'", p);
-+ if (acg == NULL) {
-+ PRINT_ERROR("Group %s not found", p);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ if (!scst_acg_sess_is_empty(acg)) {
-+ PRINT_ERROR("Group %s is not empty", acg->acg_name);
-+ res = -EBUSY;
-+ goto out_unlock;
-+ }
-+ scst_del_free_acg(acg);
-+ break;
-+ }
-+
-+ res = 0;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_resume:
-+ scst_resume_activity();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_ini_group_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return scst_process_ini_group_mgmt_store(work->buf, work->tgt);
-+}
-+
-+static ssize_t scst_ini_group_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ char *buffer;
-+ struct scst_tgt *tgt;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ tgt = container_of(kobj->parent, struct scst_tgt, tgt_kobj);
-+
-+ buffer = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
-+ if (buffer == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ res = scst_alloc_sysfs_work(scst_ini_group_mgmt_store_work_fn, false,
-+ &work);
-+ if (res != 0)
-+ goto out_free;
-+
-+ work->buf = buffer;
-+ work->tgt = tgt;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(buffer);
-+ goto out;
-+}
-+
-+static struct kobj_attribute scst_ini_group_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_ini_group_mgmt_show,
-+ scst_ini_group_mgmt_store);
-+
-+static ssize_t scst_tgt_enable_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_tgt *tgt;
-+ int res;
-+ bool enabled;
-+
-+ TRACE_ENTRY();
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+
-+ enabled = tgt->tgtt->is_target_enabled(tgt);
-+
-+ res = sprintf(buf, "%d\n", enabled ? 1 : 0);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_process_tgt_enable_store(struct scst_tgt *tgt, bool enable)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ /* Tgt protected by kobject reference */
-+
-+ TRACE_DBG("tgt %s, enable %d", tgt->tgt_name, enable);
-+
-+ if (enable) {
-+ if (tgt->rel_tgt_id == 0) {
-+ res = gen_relative_target_port_id(&tgt->rel_tgt_id);
-+ if (res != 0)
-+ goto out_put;
-+ PRINT_INFO("Using autogenerated rel ID %d for target "
-+ "%s", tgt->rel_tgt_id, tgt->tgt_name);
-+ } else {
-+ if (!scst_is_relative_target_port_id_unique(
-+ tgt->rel_tgt_id, tgt)) {
-+ PRINT_ERROR("Relative port id %d is not unique",
-+ tgt->rel_tgt_id);
-+ res = -EBADSLT;
-+ goto out_put;
-+ }
-+ }
-+ }
-+
-+ res = tgt->tgtt->enable_target(tgt, enable);
-+
-+out_put:
-+ kobject_put(&tgt->tgt_kobj);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_tgt_enable_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return scst_process_tgt_enable_store(work->tgt, work->enable);
-+}
-+
-+static ssize_t scst_tgt_enable_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_tgt *tgt;
-+ bool enable;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ if (buf == NULL) {
-+ PRINT_ERROR("%s: NULL buffer?", __func__);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+
-+ switch (buf[0]) {
-+ case '0':
-+ enable = false;
-+ break;
-+ case '1':
-+ enable = true;
-+ break;
-+ default:
-+ PRINT_ERROR("%s: Requested action not understood: %s",
-+ __func__, buf);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_alloc_sysfs_work(scst_tgt_enable_store_work_fn, false,
-+ &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->tgt = tgt;
-+ work->enable = enable;
-+
-+ kobject_get(&tgt->tgt_kobj);
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute tgt_enable_attr =
-+ __ATTR(enabled, S_IRUGO | S_IWUSR,
-+ scst_tgt_enable_show, scst_tgt_enable_store);
-+
-+static ssize_t scst_rel_tgt_id_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_tgt *tgt;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+
-+ res = sprintf(buf, "%d\n%s", tgt->rel_tgt_id,
-+ (tgt->rel_tgt_id != 0) ? SCST_SYSFS_KEY_MARK "\n" : "");
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_process_rel_tgt_id_store(struct scst_sysfs_work_item *work)
-+{
-+ int res = 0;
-+ struct scst_tgt *tgt = work->tgt_r;
-+ unsigned long rel_tgt_id = work->rel_tgt_id;
-+ bool enabled;
-+
-+ TRACE_ENTRY();
-+
-+ /* tgt protected by kobject_get() */
-+
-+ TRACE_DBG("Trying to set relative target port id %d",
-+ (uint16_t)rel_tgt_id);
-+
-+ if (tgt->tgtt->is_target_enabled != NULL)
-+ enabled = tgt->tgtt->is_target_enabled(tgt);
-+ else
-+ enabled = true;
-+
-+ if (enabled && rel_tgt_id != tgt->rel_tgt_id) {
-+ if (!scst_is_relative_target_port_id_unique(rel_tgt_id, tgt)) {
-+ PRINT_ERROR("Relative port id %d is not unique",
-+ (uint16_t)rel_tgt_id);
-+ res = -EBADSLT;
-+ goto out_put;
-+ }
-+ }
-+
-+ if (rel_tgt_id < SCST_MIN_REL_TGT_ID ||
-+ rel_tgt_id > SCST_MAX_REL_TGT_ID) {
-+ if ((rel_tgt_id == 0) && !enabled)
-+ goto set;
-+
-+ PRINT_ERROR("Invalid relative port id %d",
-+ (uint16_t)rel_tgt_id);
-+ res = -EINVAL;
-+ goto out_put;
-+ }
-+
-+set:
-+ tgt->rel_tgt_id = (uint16_t)rel_tgt_id;
-+
-+out_put:
-+ kobject_put(&tgt->tgt_kobj);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_rel_tgt_id_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res = 0;
-+ struct scst_tgt *tgt;
-+ unsigned long rel_tgt_id;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ if (buf == NULL)
-+ goto out;
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+
-+ res = strict_strtoul(buf, 0, &rel_tgt_id);
-+ if (res != 0) {
-+ PRINT_ERROR("%s", "Wrong rel_tgt_id");
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_alloc_sysfs_work(scst_process_rel_tgt_id_store, false,
-+ &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->tgt_r = tgt;
-+ work->rel_tgt_id = rel_tgt_id;
-+
-+ kobject_get(&tgt->tgt_kobj);
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_rel_tgt_id =
-+ __ATTR(rel_tgt_id, S_IRUGO | S_IWUSR, scst_rel_tgt_id_show,
-+ scst_rel_tgt_id_store);
-+
-+static ssize_t scst_tgt_comment_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_tgt *tgt;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+
-+ if (tgt->tgt_comment != NULL)
-+ res = sprintf(buf, "%s\n%s", tgt->tgt_comment,
-+ SCST_SYSFS_KEY_MARK "\n");
-+ else
-+ res = 0;
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_tgt_comment_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_tgt *tgt;
-+ char *p;
-+ int len;
-+
-+ TRACE_ENTRY();
-+
-+ if ((buf == NULL) || (count == 0)) {
-+ res = 0;
-+ goto out;
-+ }
-+
-+ tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+
-+ len = strnlen(buf, count);
-+ if (buf[count-1] == '\n')
-+ len--;
-+
-+ if (len == 0) {
-+ kfree(tgt->tgt_comment);
-+ tgt->tgt_comment = NULL;
-+ goto out_done;
-+ }
-+
-+ p = kmalloc(len+1, GFP_KERNEL);
-+ if (p == NULL) {
-+ PRINT_ERROR("Unable to alloc tgt_comment string (len %d)",
-+ len+1);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ memcpy(p, buf, len);
-+ p[len] = '\0';
-+
-+ kfree(tgt->tgt_comment);
-+
-+ tgt->tgt_comment = p;
-+
-+out_done:
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_tgt_comment =
-+ __ATTR(comment, S_IRUGO | S_IWUSR, scst_tgt_comment_show,
-+ scst_tgt_comment_store);
-+
-+/*
-+ * Supposed to be called under scst_mutex. In case of error will drop,
-+ * then reacquire it.
-+ */
-+int scst_tgt_sysfs_create(struct scst_tgt *tgt)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = kobject_init_and_add(&tgt->tgt_kobj, &tgt_ktype,
-+ &tgt->tgtt->tgtt_kobj, tgt->tgt_name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt %s to sysfs", tgt->tgt_name);
-+ goto out;
-+ }
-+
-+ if ((tgt->tgtt->enable_target != NULL) &&
-+ (tgt->tgtt->is_target_enabled != NULL)) {
-+ res = sysfs_create_file(&tgt->tgt_kobj,
-+ &tgt_enable_attr.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attr %s to sysfs",
-+ tgt_enable_attr.attr.name);
-+ goto out_err;
-+ }
-+ }
-+
-+ tgt->tgt_sess_kobj = kobject_create_and_add("sessions", &tgt->tgt_kobj);
-+ if (tgt->tgt_sess_kobj == NULL) {
-+ PRINT_ERROR("Can't create sess kobj for tgt %s", tgt->tgt_name);
-+ goto out_nomem;
-+ }
-+
-+ tgt->tgt_luns_kobj = kobject_create_and_add("luns", &tgt->tgt_kobj);
-+ if (tgt->tgt_luns_kobj == NULL) {
-+ PRINT_ERROR("Can't create luns kobj for tgt %s", tgt->tgt_name);
-+ goto out_nomem;
-+ }
-+
-+ res = sysfs_create_file(tgt->tgt_luns_kobj, &scst_luns_mgmt.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attribute %s for tgt %s",
-+ scst_luns_mgmt.attr.name, tgt->tgt_name);
-+ goto out_err;
-+ }
-+
-+ tgt->tgt_ini_grp_kobj = kobject_create_and_add("ini_groups",
-+ &tgt->tgt_kobj);
-+ if (tgt->tgt_ini_grp_kobj == NULL) {
-+ PRINT_ERROR("Can't create ini_grp kobj for tgt %s",
-+ tgt->tgt_name);
-+ goto out_nomem;
-+ }
-+
-+ res = sysfs_create_file(tgt->tgt_ini_grp_kobj,
-+ &scst_ini_group_mgmt.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attribute %s for tgt %s",
-+ scst_ini_group_mgmt.attr.name, tgt->tgt_name);
-+ goto out_err;
-+ }
-+
-+ res = sysfs_create_file(&tgt->tgt_kobj,
-+ &scst_rel_tgt_id.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attribute %s for tgt %s",
-+ scst_rel_tgt_id.attr.name, tgt->tgt_name);
-+ goto out_err;
-+ }
-+
-+ res = sysfs_create_file(&tgt->tgt_kobj,
-+ &scst_tgt_comment.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attribute %s for tgt %s",
-+ scst_tgt_comment.attr.name, tgt->tgt_name);
-+ goto out_err;
-+ }
-+
-+ res = sysfs_create_file(&tgt->tgt_kobj,
-+ &scst_tgt_addr_method.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attribute %s for tgt %s",
-+ scst_tgt_addr_method.attr.name, tgt->tgt_name);
-+ goto out_err;
-+ }
-+
-+ res = sysfs_create_file(&tgt->tgt_kobj,
-+ &scst_tgt_io_grouping_type.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attribute %s for tgt %s",
-+ scst_tgt_io_grouping_type.attr.name, tgt->tgt_name);
-+ goto out_err;
-+ }
-+
-+ res = sysfs_create_file(&tgt->tgt_kobj, &scst_tgt_cpu_mask.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attribute %s for tgt %s",
-+ scst_tgt_cpu_mask.attr.name, tgt->tgt_name);
-+ goto out_err;
-+ }
-+
-+ if (tgt->tgtt->tgt_attrs) {
-+ res = sysfs_create_files(&tgt->tgt_kobj, tgt->tgtt->tgt_attrs);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attributes for tgt %s",
-+ tgt->tgt_name);
-+ goto out_err;
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_nomem:
-+ res = -ENOMEM;
-+
-+out_err:
-+ mutex_unlock(&scst_mutex);
-+ scst_tgt_sysfs_del(tgt);
-+ mutex_lock(&scst_mutex);
-+ goto out;
-+}
-+
-+/*
-+ * Must not be called under scst_mutex, due to possible deadlock with
-+ * sysfs ref counting in sysfs works (it is waiting for the last put, but
-+ * the last ref counter holder is waiting for scst_mutex)
-+ */
-+void scst_tgt_sysfs_del(struct scst_tgt *tgt)
-+{
-+ int rc;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+
-+ TRACE_ENTRY();
-+
-+ tgt->tgt_kobj_release_cmpl = &c;
-+
-+ kobject_del(tgt->tgt_sess_kobj);
-+ kobject_del(tgt->tgt_luns_kobj);
-+ kobject_del(tgt->tgt_ini_grp_kobj);
-+ kobject_del(&tgt->tgt_kobj);
-+
-+ kobject_put(tgt->tgt_sess_kobj);
-+ kobject_put(tgt->tgt_luns_kobj);
-+ kobject_put(tgt->tgt_ini_grp_kobj);
-+ kobject_put(&tgt->tgt_kobj);
-+
-+ rc = wait_for_completion_timeout(tgt->tgt_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for target %s (%d refs)...", tgt->tgt_name,
-+ atomic_read(&tgt->tgt_kobj.kref.refcount));
-+ wait_for_completion(tgt->tgt_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs "
-+ "entry for target %s", tgt->tgt_name);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ ** Devices directory implementation
-+ **/
-+
-+static ssize_t scst_dev_sysfs_type_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+
-+ struct scst_device *dev;
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+
-+ pos = sprintf(buf, "%d - %s\n", dev->type,
-+ (unsigned)dev->type >= ARRAY_SIZE(scst_dev_handler_types) ?
-+ "unknown" : scst_dev_handler_types[dev->type]);
-+
-+ return pos;
-+}
-+
-+static struct kobj_attribute dev_type_attr =
-+ __ATTR(type, S_IRUGO, scst_dev_sysfs_type_show, NULL);
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+static ssize_t scst_dev_sysfs_dump_prs(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+
-+ scst_pr_dump_prs(dev, true);
-+
-+ TRACE_EXIT_RES(count);
-+ return count;
-+}
-+
-+static struct kobj_attribute dev_dump_prs_attr =
-+ __ATTR(dump_prs, S_IWUSR, NULL, scst_dev_sysfs_dump_prs);
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+static int scst_process_dev_sysfs_threads_data_store(
-+ struct scst_device *dev, int threads_num,
-+ enum scst_dev_type_threads_pool_type threads_pool_type)
-+{
-+ int res = 0;
-+ int oldtn = dev->threads_num;
-+ enum scst_dev_type_threads_pool_type oldtt = dev->threads_pool_type;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("dev %p, threads_num %d, threads_pool_type %d", dev,
-+ threads_num, threads_pool_type);
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out_resume;
-+
-+ /* Check if our pointer is still alive */
-+ if (scst_check_dev_ptr(dev) != 0)
-+ goto out_unlock;
-+
-+ scst_stop_dev_threads(dev);
-+
-+ dev->threads_num = threads_num;
-+ dev->threads_pool_type = threads_pool_type;
-+
-+ res = scst_create_dev_threads(dev);
-+ if (res != 0)
-+ goto out_unlock;
-+
-+ if (oldtn != dev->threads_num)
-+ PRINT_INFO("Changed cmd threads num to %d", dev->threads_num);
-+ else if (oldtt != dev->threads_pool_type)
-+ PRINT_INFO("Changed cmd threads pool type to %d",
-+ dev->threads_pool_type);
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_resume:
-+ scst_resume_activity();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_dev_sysfs_threads_data_store_work_fn(
-+ struct scst_sysfs_work_item *work)
-+{
-+ return scst_process_dev_sysfs_threads_data_store(work->dev,
-+ work->new_threads_num, work->new_threads_pool_type);
-+}
-+
-+static ssize_t scst_dev_sysfs_check_threads_data(
-+ struct scst_device *dev, int threads_num,
-+ enum scst_dev_type_threads_pool_type threads_pool_type, bool *stop)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ *stop = false;
-+
-+ if (dev->threads_num < 0) {
-+ PRINT_ERROR("Threads pool disabled for device %s",
-+ dev->virt_name);
-+ res = -EPERM;
-+ goto out;
-+ }
-+
-+ if ((threads_num == dev->threads_num) &&
-+ (threads_pool_type == dev->threads_pool_type)) {
-+ *stop = true;
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_dev_sysfs_threads_num_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+
-+ pos = sprintf(buf, "%d\n%s", dev->threads_num,
-+ (dev->threads_num != dev->handler->threads_num) ?
-+ SCST_SYSFS_KEY_MARK "\n" : "");
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t scst_dev_sysfs_threads_num_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_device *dev;
-+ long newtn;
-+ bool stop;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+
-+ res = strict_strtol(buf, 0, &newtn);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtol() for %s failed: %d ", buf, res);
-+ goto out;
-+ }
-+ if (newtn < 0) {
-+ PRINT_ERROR("Illegal threads num value %ld", newtn);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_dev_sysfs_check_threads_data(dev, newtn,
-+ dev->threads_pool_type, &stop);
-+ if ((res != 0) || stop)
-+ goto out;
-+
-+ res = scst_alloc_sysfs_work(scst_dev_sysfs_threads_data_store_work_fn,
-+ false, &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->dev = dev;
-+ work->new_threads_num = newtn;
-+ work->new_threads_pool_type = dev->threads_pool_type;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+
-+out:
-+ if (res == 0)
-+ res = count;
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute dev_threads_num_attr =
-+ __ATTR(threads_num, S_IRUGO | S_IWUSR,
-+ scst_dev_sysfs_threads_num_show,
-+ scst_dev_sysfs_threads_num_store);
-+
-+static ssize_t scst_dev_sysfs_threads_pool_type_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+
-+ if (dev->threads_num == 0) {
-+ pos = sprintf(buf, "Async\n");
-+ goto out;
-+ } else if (dev->threads_num < 0) {
-+ pos = sprintf(buf, "Not valid\n");
-+ goto out;
-+ }
-+
-+ switch (dev->threads_pool_type) {
-+ case SCST_THREADS_POOL_PER_INITIATOR:
-+ pos = sprintf(buf, "%s\n%s", SCST_THREADS_POOL_PER_INITIATOR_STR,
-+ (dev->threads_pool_type != dev->handler->threads_pool_type) ?
-+ SCST_SYSFS_KEY_MARK "\n" : "");
-+ break;
-+ case SCST_THREADS_POOL_SHARED:
-+ pos = sprintf(buf, "%s\n%s", SCST_THREADS_POOL_SHARED_STR,
-+ (dev->threads_pool_type != dev->handler->threads_pool_type) ?
-+ SCST_SYSFS_KEY_MARK "\n" : "");
-+ break;
-+ default:
-+ pos = sprintf(buf, "Unknown\n");
-+ break;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t scst_dev_sysfs_threads_pool_type_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_device *dev;
-+ enum scst_dev_type_threads_pool_type newtpt;
-+ struct scst_sysfs_work_item *work;
-+ bool stop;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+
-+ newtpt = scst_parse_threads_pool_type(buf, count);
-+ if (newtpt == SCST_THREADS_POOL_TYPE_INVALID) {
-+ PRINT_ERROR("Illegal threads pool type %s", buf);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ TRACE_DBG("buf %s, count %zd, newtpt %d", buf, count, newtpt);
-+
-+ res = scst_dev_sysfs_check_threads_data(dev, dev->threads_num,
-+ newtpt, &stop);
-+ if ((res != 0) || stop)
-+ goto out;
-+
-+ res = scst_alloc_sysfs_work(scst_dev_sysfs_threads_data_store_work_fn,
-+ false, &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->dev = dev;
-+ work->new_threads_num = dev->threads_num;
-+ work->new_threads_pool_type = newtpt;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+
-+out:
-+ if (res == 0)
-+ res = count;
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute dev_threads_pool_type_attr =
-+ __ATTR(threads_pool_type, S_IRUGO | S_IWUSR,
-+ scst_dev_sysfs_threads_pool_type_show,
-+ scst_dev_sysfs_threads_pool_type_store);
-+
-+static struct attribute *scst_dev_attrs[] = {
-+ &dev_type_attr.attr,
-+ NULL,
-+};
-+
-+static void scst_sysfs_dev_release(struct kobject *kobj)
-+{
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ if (dev->dev_kobj_release_cmpl)
-+ complete_all(dev->dev_kobj_release_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+int scst_devt_dev_sysfs_create(struct scst_device *dev)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->handler == &scst_null_devtype)
-+ goto out;
-+
-+ res = sysfs_create_link(&dev->dev_kobj,
-+ &dev->handler->devt_kobj, "handler");
-+ if (res != 0) {
-+ PRINT_ERROR("Can't create handler link for dev %s",
-+ dev->virt_name);
-+ goto out;
-+ }
-+
-+ res = sysfs_create_link(&dev->handler->devt_kobj,
-+ &dev->dev_kobj, dev->virt_name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't create handler link for dev %s",
-+ dev->virt_name);
-+ goto out_err;
-+ }
-+
-+ if (dev->handler->threads_num >= 0) {
-+ res = sysfs_create_file(&dev->dev_kobj,
-+ &dev_threads_num_attr.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add dev attr %s for dev %s",
-+ dev_threads_num_attr.attr.name,
-+ dev->virt_name);
-+ goto out_err;
-+ }
-+ res = sysfs_create_file(&dev->dev_kobj,
-+ &dev_threads_pool_type_attr.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add dev attr %s for dev %s",
-+ dev_threads_pool_type_attr.attr.name,
-+ dev->virt_name);
-+ goto out_err;
-+ }
-+ }
-+
-+ if (dev->handler->dev_attrs) {
-+ res = sysfs_create_files(&dev->dev_kobj,
-+ dev->handler->dev_attrs);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add dev attributes for dev %s",
-+ dev->virt_name);
-+ goto out_err;
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err:
-+ scst_devt_dev_sysfs_del(dev);
-+ goto out;
-+}
-+
-+void scst_devt_dev_sysfs_del(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ if (dev->handler == &scst_null_devtype)
-+ goto out;
-+
-+ if (dev->handler->dev_attrs)
-+ sysfs_remove_files(&dev->dev_kobj, dev->handler->dev_attrs);
-+
-+ sysfs_remove_link(&dev->dev_kobj, "handler");
-+ sysfs_remove_link(&dev->handler->devt_kobj, dev->virt_name);
-+
-+ if (dev->handler->threads_num >= 0) {
-+ sysfs_remove_file(&dev->dev_kobj,
-+ &dev_threads_num_attr.attr);
-+ sysfs_remove_file(&dev->dev_kobj,
-+ &dev_threads_pool_type_attr.attr);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct kobj_type scst_dev_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_sysfs_dev_release,
-+ .default_attrs = scst_dev_attrs,
-+};
-+
-+/*
-+ * Must not be called under scst_mutex, because it can call
-+ * scst_dev_sysfs_del()
-+ */
-+int scst_dev_sysfs_create(struct scst_device *dev)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ res = kobject_init_and_add(&dev->dev_kobj, &scst_dev_ktype,
-+ scst_devices_kobj, dev->virt_name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add device %s to sysfs", dev->virt_name);
-+ goto out;
-+ }
-+
-+ dev->dev_exp_kobj = kobject_create_and_add("exported",
-+ &dev->dev_kobj);
-+ if (dev->dev_exp_kobj == NULL) {
-+ PRINT_ERROR("Can't create exported link for device %s",
-+ dev->virt_name);
-+ res = -ENOMEM;
-+ goto out_del;
-+ }
-+
-+ if (dev->scsi_dev != NULL) {
-+ res = sysfs_create_link(&dev->dev_kobj,
-+ &dev->scsi_dev->sdev_dev.kobj, "scsi_device");
-+ if (res != 0) {
-+ PRINT_ERROR("Can't create scsi_device link for dev %s",
-+ dev->virt_name);
-+ goto out_del;
-+ }
-+ }
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ if (dev->scsi_dev == NULL) {
-+ res = sysfs_create_file(&dev->dev_kobj,
-+ &dev_dump_prs_attr.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't create attr %s for dev %s",
-+ dev_dump_prs_attr.attr.name, dev->virt_name);
-+ goto out_del;
-+ }
-+ }
-+#endif
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ scst_dev_sysfs_del(dev);
-+ goto out;
-+}
-+
-+/*
-+ * Must not be called under scst_mutex, due to possible deadlock with
-+ * sysfs ref counting in sysfs works (it is waiting for the last put, but
-+ * the last ref counter holder is waiting for scst_mutex)
-+ */
-+void scst_dev_sysfs_del(struct scst_device *dev)
-+{
-+ int rc;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+
-+ TRACE_ENTRY();
-+
-+ dev->dev_kobj_release_cmpl = &c;
-+
-+ kobject_del(dev->dev_exp_kobj);
-+ kobject_del(&dev->dev_kobj);
-+
-+ kobject_put(dev->dev_exp_kobj);
-+ kobject_put(&dev->dev_kobj);
-+
-+ rc = wait_for_completion_timeout(dev->dev_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for device %s (%d refs)...", dev->virt_name,
-+ atomic_read(&dev->dev_kobj.kref.refcount));
-+ wait_for_completion(dev->dev_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs "
-+ "entry for device %s", dev->virt_name);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ ** Tgt_dev implementation
-+ **/
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+
-+static char *scst_io_size_names[] = {
-+ "<=8K ",
-+ "<=32K ",
-+ "<=128K",
-+ "<=512K",
-+ ">512K "
-+};
-+
-+static ssize_t scst_tgt_dev_latency_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buffer)
-+{
-+ int res = 0, i;
-+ char buf[50];
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ tgt_dev = container_of(kobj, struct scst_tgt_dev, tgt_dev_kobj);
-+
-+ for (i = 0; i < SCST_LATENCY_STATS_NUM; i++) {
-+ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
-+ unsigned int processed_cmds_wr;
-+ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
-+ unsigned int processed_cmds_rd;
-+ struct scst_ext_latency_stat *latency_stat;
-+
-+ latency_stat = &tgt_dev->dev_latency_stat[i];
-+ scst_time_wr = latency_stat->scst_time_wr;
-+ scst_time_rd = latency_stat->scst_time_rd;
-+ tgt_time_wr = latency_stat->tgt_time_wr;
-+ tgt_time_rd = latency_stat->tgt_time_rd;
-+ dev_time_wr = latency_stat->dev_time_wr;
-+ dev_time_rd = latency_stat->dev_time_rd;
-+ processed_cmds_wr = latency_stat->processed_cmds_wr;
-+ processed_cmds_rd = latency_stat->processed_cmds_rd;
-+
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-5s %-9s %-15lu ", "Write", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_wr);
-+ if (processed_cmds_wr == 0)
-+ processed_cmds_wr = 1;
-+
-+ do_div(scst_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_wr,
-+ (unsigned long)scst_time_wr,
-+ (unsigned long)latency_stat->max_scst_time_wr,
-+ (unsigned long)latency_stat->scst_time_wr);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s", buf);
-+
-+ do_div(tgt_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_wr,
-+ (unsigned long)tgt_time_wr,
-+ (unsigned long)latency_stat->max_tgt_time_wr,
-+ (unsigned long)latency_stat->tgt_time_wr);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s", buf);
-+
-+ do_div(dev_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_wr,
-+ (unsigned long)dev_time_wr,
-+ (unsigned long)latency_stat->max_dev_time_wr,
-+ (unsigned long)latency_stat->dev_time_wr);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s\n", buf);
-+
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-5s %-9s %-15lu ", "Read", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_rd);
-+ if (processed_cmds_rd == 0)
-+ processed_cmds_rd = 1;
-+
-+ do_div(scst_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_rd,
-+ (unsigned long)scst_time_rd,
-+ (unsigned long)latency_stat->max_scst_time_rd,
-+ (unsigned long)latency_stat->scst_time_rd);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s", buf);
-+
-+ do_div(tgt_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_rd,
-+ (unsigned long)tgt_time_rd,
-+ (unsigned long)latency_stat->max_tgt_time_rd,
-+ (unsigned long)latency_stat->tgt_time_rd);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s", buf);
-+
-+ do_div(dev_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_rd,
-+ (unsigned long)dev_time_rd,
-+ (unsigned long)latency_stat->max_dev_time_rd,
-+ (unsigned long)latency_stat->dev_time_rd);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s\n", buf);
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute tgt_dev_latency_attr =
-+ __ATTR(latency, S_IRUGO,
-+ scst_tgt_dev_latency_show, NULL);
-+
-+#endif /* CONFIG_SCST_MEASURE_LATENCY */
-+
-+static ssize_t scst_tgt_dev_active_commands_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ tgt_dev = container_of(kobj, struct scst_tgt_dev, tgt_dev_kobj);
-+
-+ pos = sprintf(buf, "%d\n", atomic_read(&tgt_dev->tgt_dev_cmd_count));
-+
-+ return pos;
-+}
-+
-+static struct kobj_attribute tgt_dev_active_commands_attr =
-+ __ATTR(active_commands, S_IRUGO,
-+ scst_tgt_dev_active_commands_show, NULL);
-+
-+static struct attribute *scst_tgt_dev_attrs[] = {
-+ &tgt_dev_active_commands_attr.attr,
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+ &tgt_dev_latency_attr.attr,
-+#endif
-+ NULL,
-+};
-+
-+static void scst_sysfs_tgt_dev_release(struct kobject *kobj)
-+{
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ tgt_dev = container_of(kobj, struct scst_tgt_dev, tgt_dev_kobj);
-+ if (tgt_dev->tgt_dev_kobj_release_cmpl)
-+ complete_all(tgt_dev->tgt_dev_kobj_release_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct kobj_type scst_tgt_dev_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_sysfs_tgt_dev_release,
-+ .default_attrs = scst_tgt_dev_attrs,
-+};
-+
-+int scst_tgt_dev_sysfs_create(struct scst_tgt_dev *tgt_dev)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ res = kobject_init_and_add(&tgt_dev->tgt_dev_kobj, &scst_tgt_dev_ktype,
-+ &tgt_dev->sess->sess_kobj, "lun%lld",
-+ (unsigned long long)tgt_dev->lun);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt_dev %lld to sysfs",
-+ (unsigned long long)tgt_dev->lun);
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * Called with scst_mutex held.
-+ *
-+ * !! No sysfs works must use kobject_get() to protect tgt_dev, due to possible
-+ * !! deadlock with scst_mutex (it is waiting for the last put, but
-+ * !! the last ref counter holder is waiting for scst_mutex)
-+ */
-+void scst_tgt_dev_sysfs_del(struct scst_tgt_dev *tgt_dev)
-+{
-+ int rc;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+
-+ TRACE_ENTRY();
-+
-+ tgt_dev->tgt_dev_kobj_release_cmpl = &c;
-+
-+ kobject_del(&tgt_dev->tgt_dev_kobj);
-+ kobject_put(&tgt_dev->tgt_dev_kobj);
-+
-+ rc = wait_for_completion_timeout(
-+ tgt_dev->tgt_dev_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for tgt_dev %lld (%d refs)...",
-+ (unsigned long long)tgt_dev->lun,
-+ atomic_read(&tgt_dev->tgt_dev_kobj.kref.refcount));
-+ wait_for_completion(tgt_dev->tgt_dev_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs entry for "
-+ "tgt_dev %lld", (unsigned long long)tgt_dev->lun);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ ** Sessions subdirectory implementation
-+ **/
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+
-+static ssize_t scst_sess_latency_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buffer)
-+{
-+ ssize_t res = 0;
-+ struct scst_session *sess;
-+ int i;
-+ char buf[50];
-+ uint64_t scst_time, tgt_time, dev_time;
-+ unsigned int processed_cmds;
-+
-+ TRACE_ENTRY();
-+
-+ sess = container_of(kobj, struct scst_session, sess_kobj);
-+
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-15s %-15s %-46s %-46s %-46s\n",
-+ "T-L names", "Total commands", "SCST latency",
-+ "Target latency", "Dev latency (min/avg/max/all ns)");
-+
-+ spin_lock_bh(&sess->lat_lock);
-+
-+ for (i = 0; i < SCST_LATENCY_STATS_NUM ; i++) {
-+ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
-+ unsigned int processed_cmds_wr;
-+ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
-+ unsigned int processed_cmds_rd;
-+ struct scst_ext_latency_stat *latency_stat;
-+
-+ latency_stat = &sess->sess_latency_stat[i];
-+ scst_time_wr = latency_stat->scst_time_wr;
-+ scst_time_rd = latency_stat->scst_time_rd;
-+ tgt_time_wr = latency_stat->tgt_time_wr;
-+ tgt_time_rd = latency_stat->tgt_time_rd;
-+ dev_time_wr = latency_stat->dev_time_wr;
-+ dev_time_rd = latency_stat->dev_time_rd;
-+ processed_cmds_wr = latency_stat->processed_cmds_wr;
-+ processed_cmds_rd = latency_stat->processed_cmds_rd;
-+
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-5s %-9s %-15lu ",
-+ "Write", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_wr);
-+ if (processed_cmds_wr == 0)
-+ processed_cmds_wr = 1;
-+
-+ do_div(scst_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_wr,
-+ (unsigned long)scst_time_wr,
-+ (unsigned long)latency_stat->max_scst_time_wr,
-+ (unsigned long)latency_stat->scst_time_wr);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s", buf);
-+
-+ do_div(tgt_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_wr,
-+ (unsigned long)tgt_time_wr,
-+ (unsigned long)latency_stat->max_tgt_time_wr,
-+ (unsigned long)latency_stat->tgt_time_wr);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s", buf);
-+
-+ do_div(dev_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_wr,
-+ (unsigned long)dev_time_wr,
-+ (unsigned long)latency_stat->max_dev_time_wr,
-+ (unsigned long)latency_stat->dev_time_wr);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s\n", buf);
-+
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-5s %-9s %-15lu ",
-+ "Read", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_rd);
-+ if (processed_cmds_rd == 0)
-+ processed_cmds_rd = 1;
-+
-+ do_div(scst_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_rd,
-+ (unsigned long)scst_time_rd,
-+ (unsigned long)latency_stat->max_scst_time_rd,
-+ (unsigned long)latency_stat->scst_time_rd);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s", buf);
-+
-+ do_div(tgt_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_rd,
-+ (unsigned long)tgt_time_rd,
-+ (unsigned long)latency_stat->max_tgt_time_rd,
-+ (unsigned long)latency_stat->tgt_time_rd);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s", buf);
-+
-+ do_div(dev_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_rd,
-+ (unsigned long)dev_time_rd,
-+ (unsigned long)latency_stat->max_dev_time_rd,
-+ (unsigned long)latency_stat->dev_time_rd);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s\n", buf);
-+ }
-+
-+ scst_time = sess->scst_time;
-+ tgt_time = sess->tgt_time;
-+ dev_time = sess->dev_time;
-+ processed_cmds = sess->processed_cmds;
-+
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "\n%-15s %-16d", "Overall ", processed_cmds);
-+
-+ if (processed_cmds == 0)
-+ processed_cmds = 1;
-+
-+ do_div(scst_time, processed_cmds);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)sess->min_scst_time,
-+ (unsigned long)scst_time,
-+ (unsigned long)sess->max_scst_time,
-+ (unsigned long)sess->scst_time);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s", buf);
-+
-+ do_div(tgt_time, processed_cmds);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)sess->min_tgt_time,
-+ (unsigned long)tgt_time,
-+ (unsigned long)sess->max_tgt_time,
-+ (unsigned long)sess->tgt_time);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s", buf);
-+
-+ do_div(dev_time, processed_cmds);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)sess->min_dev_time,
-+ (unsigned long)dev_time,
-+ (unsigned long)sess->max_dev_time,
-+ (unsigned long)sess->dev_time);
-+ res += scnprintf(&buffer[res], SCST_SYSFS_BLOCK_SIZE - res,
-+ "%-47s\n\n", buf);
-+
-+ spin_unlock_bh(&sess->lat_lock);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_sess_zero_latency(struct scst_sysfs_work_item *work)
-+{
-+ int res = 0, t;
-+ struct scst_session *sess = work->sess;
-+
-+ TRACE_ENTRY();
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out_put;
-+
-+ PRINT_INFO("Zeroing latency statistics for initiator "
-+ "%s", sess->initiator_name);
-+
-+ spin_lock_bh(&sess->lat_lock);
-+
-+ sess->scst_time = 0;
-+ sess->tgt_time = 0;
-+ sess->dev_time = 0;
-+ sess->min_scst_time = 0;
-+ sess->min_tgt_time = 0;
-+ sess->min_dev_time = 0;
-+ sess->max_scst_time = 0;
-+ sess->max_tgt_time = 0;
-+ sess->max_dev_time = 0;
-+ sess->processed_cmds = 0;
-+ memset(sess->sess_latency_stat, 0,
-+ sizeof(sess->sess_latency_stat));
-+
-+ for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[t];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ tgt_dev->scst_time = 0;
-+ tgt_dev->tgt_time = 0;
-+ tgt_dev->dev_time = 0;
-+ tgt_dev->processed_cmds = 0;
-+ memset(tgt_dev->dev_latency_stat, 0,
-+ sizeof(tgt_dev->dev_latency_stat));
-+ }
-+ }
-+
-+ spin_unlock_bh(&sess->lat_lock);
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out_put:
-+ kobject_put(&sess->sess_kobj);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_sess_latency_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_session *sess;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ sess = container_of(kobj, struct scst_session, sess_kobj);
-+
-+ res = scst_alloc_sysfs_work(scst_sess_zero_latency, false, &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->sess = sess;
-+
-+ kobject_get(&sess->sess_kobj);
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute session_latency_attr =
-+ __ATTR(latency, S_IRUGO | S_IWUSR, scst_sess_latency_show,
-+ scst_sess_latency_store);
-+
-+#endif /* CONFIG_SCST_MEASURE_LATENCY */
-+
-+static ssize_t scst_sess_sysfs_commands_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_session *sess;
-+
-+ sess = container_of(kobj, struct scst_session, sess_kobj);
-+
-+ return sprintf(buf, "%i\n", atomic_read(&sess->sess_cmd_count));
-+}
-+
-+static struct kobj_attribute session_commands_attr =
-+ __ATTR(commands, S_IRUGO, scst_sess_sysfs_commands_show, NULL);
-+
-+static int scst_sysfs_sess_get_active_commands(struct scst_session *sess)
-+{
-+ int res;
-+ int active_cmds = 0, t;
-+
-+ TRACE_ENTRY();
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out_put;
-+
-+ for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
-+ struct list_head *head = &sess->sess_tgt_dev_list[t];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, head, sess_tgt_dev_list_entry) {
-+ active_cmds += atomic_read(&tgt_dev->tgt_dev_cmd_count);
-+ }
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ res = active_cmds;
-+
-+out_put:
-+ kobject_put(&sess->sess_kobj);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_sysfs_sess_get_active_commands_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return scst_sysfs_sess_get_active_commands(work->sess);
-+}
-+
-+static ssize_t scst_sess_sysfs_active_commands_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int res;
-+ struct scst_session *sess;
-+ struct scst_sysfs_work_item *work;
-+
-+ sess = container_of(kobj, struct scst_session, sess_kobj);
-+
-+ res = scst_alloc_sysfs_work(scst_sysfs_sess_get_active_commands_work_fn,
-+ true, &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->sess = sess;
-+
-+ kobject_get(&sess->sess_kobj);
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res != -EAGAIN)
-+ res = sprintf(buf, "%i\n", res);
-+
-+out:
-+ return res;
-+}
-+
-+static struct kobj_attribute session_active_commands_attr =
-+ __ATTR(active_commands, S_IRUGO, scst_sess_sysfs_active_commands_show,
-+ NULL);
-+
-+static ssize_t scst_sess_sysfs_initiator_name_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_session *sess;
-+
-+ sess = container_of(kobj, struct scst_session, sess_kobj);
-+
-+ return scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, "%s\n",
-+ sess->initiator_name);
-+}
-+
-+static struct kobj_attribute session_initiator_name_attr =
-+ __ATTR(initiator_name, S_IRUGO, scst_sess_sysfs_initiator_name_show,
-+ NULL);
-+
-+#define SCST_SESS_SYSFS_STAT_ATTR(name, exported_name, dir, kb) \
-+static ssize_t scst_sess_sysfs_##exported_name##_show(struct kobject *kobj, \
-+ struct kobj_attribute *attr, char *buf) \
-+{ \
-+ struct scst_session *sess; \
-+ int res; \
-+ uint64_t v; \
-+ \
-+ BUILD_BUG_ON(SCST_DATA_UNKNOWN != 0); \
-+ BUILD_BUG_ON(SCST_DATA_WRITE != 1); \
-+ BUILD_BUG_ON(SCST_DATA_READ != 2); \
-+ BUILD_BUG_ON(SCST_DATA_BIDI != 3); \
-+ BUILD_BUG_ON(SCST_DATA_NONE != 4); \
-+ \
-+ BUILD_BUG_ON(dir >= SCST_DATA_DIR_MAX); \
-+ \
-+ sess = container_of(kobj, struct scst_session, sess_kobj); \
-+ v = sess->io_stats[dir].name; \
-+ if (kb) \
-+ v >>= 10; \
-+ res = sprintf(buf, "%llu\n", (unsigned long long)v); \
-+ return res; \
-+} \
-+ \
-+static ssize_t scst_sess_sysfs_##exported_name##_store(struct kobject *kobj, \
-+ struct kobj_attribute *attr, const char *buf, size_t count) \
-+{ \
-+ struct scst_session *sess; \
-+ sess = container_of(kobj, struct scst_session, sess_kobj); \
-+ spin_lock_irq(&sess->sess_list_lock); \
-+ BUILD_BUG_ON(dir >= SCST_DATA_DIR_MAX); \
-+ sess->io_stats[dir].cmd_count = 0; \
-+ sess->io_stats[dir].io_byte_count = 0; \
-+ spin_unlock_irq(&sess->sess_list_lock); \
-+ return count; \
-+} \
-+ \
-+static struct kobj_attribute session_##exported_name##_attr = \
-+ __ATTR(exported_name, S_IRUGO | S_IWUSR, \
-+ scst_sess_sysfs_##exported_name##_show, \
-+ scst_sess_sysfs_##exported_name##_store);
-+
-+SCST_SESS_SYSFS_STAT_ATTR(cmd_count, unknown_cmd_count, SCST_DATA_UNKNOWN, 0);
-+SCST_SESS_SYSFS_STAT_ATTR(cmd_count, write_cmd_count, SCST_DATA_WRITE, 0);
-+SCST_SESS_SYSFS_STAT_ATTR(io_byte_count, write_io_count_kb, SCST_DATA_WRITE, 1);
-+SCST_SESS_SYSFS_STAT_ATTR(cmd_count, read_cmd_count, SCST_DATA_READ, 0);
-+SCST_SESS_SYSFS_STAT_ATTR(io_byte_count, read_io_count_kb, SCST_DATA_READ, 1);
-+SCST_SESS_SYSFS_STAT_ATTR(cmd_count, bidi_cmd_count, SCST_DATA_BIDI, 0);
-+SCST_SESS_SYSFS_STAT_ATTR(io_byte_count, bidi_io_count_kb, SCST_DATA_BIDI, 1);
-+SCST_SESS_SYSFS_STAT_ATTR(cmd_count, none_cmd_count, SCST_DATA_NONE, 0);
-+
-+static struct attribute *scst_session_attrs[] = {
-+ &session_commands_attr.attr,
-+ &session_active_commands_attr.attr,
-+ &session_initiator_name_attr.attr,
-+ &session_unknown_cmd_count_attr.attr,
-+ &session_write_cmd_count_attr.attr,
-+ &session_write_io_count_kb_attr.attr,
-+ &session_read_cmd_count_attr.attr,
-+ &session_read_io_count_kb_attr.attr,
-+ &session_bidi_cmd_count_attr.attr,
-+ &session_bidi_io_count_kb_attr.attr,
-+ &session_none_cmd_count_attr.attr,
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+ &session_latency_attr.attr,
-+#endif /* CONFIG_SCST_MEASURE_LATENCY */
-+ NULL,
-+};
-+
-+static void scst_sysfs_session_release(struct kobject *kobj)
-+{
-+ struct scst_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ sess = container_of(kobj, struct scst_session, sess_kobj);
-+ if (sess->sess_kobj_release_cmpl)
-+ complete_all(sess->sess_kobj_release_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct kobj_type scst_session_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_sysfs_session_release,
-+ .default_attrs = scst_session_attrs,
-+};
-+
-+static int scst_create_sess_luns_link(struct scst_session *sess)
-+{
-+ int res;
-+
-+ /*
-+ * No locks are needed, because sess supposed to be in acg->acg_sess_list
-+ * and tgt->sess_list, so blocking them from disappearing.
-+ */
-+
-+ if (sess->acg == sess->tgt->default_acg)
-+ res = sysfs_create_link(&sess->sess_kobj,
-+ sess->tgt->tgt_luns_kobj, "luns");
-+ else
-+ res = sysfs_create_link(&sess->sess_kobj,
-+ sess->acg->luns_kobj, "luns");
-+
-+ if (res != 0)
-+ PRINT_ERROR("Can't create luns link for initiator %s",
-+ sess->initiator_name);
-+
-+ return res;
-+}
-+
-+int scst_recreate_sess_luns_link(struct scst_session *sess)
-+{
-+ sysfs_remove_link(&sess->sess_kobj, "luns");
-+ return scst_create_sess_luns_link(sess);
-+}
-+
-+/* Supposed to be called under scst_mutex */
-+int scst_sess_sysfs_create(struct scst_session *sess)
-+{
-+ int res = 0;
-+ struct scst_session *s;
-+ char *name = (char *)sess->initiator_name;
-+ int len = strlen(name) + 1, n = 1;
-+
-+ TRACE_ENTRY();
-+
-+restart:
-+ list_for_each_entry(s, &sess->tgt->sess_list, sess_list_entry) {
-+ if (!s->sess_kobj_ready)
-+ continue;
-+
-+ if (strcmp(name, kobject_name(&s->sess_kobj)) == 0) {
-+ if (s == sess)
-+ continue;
-+
-+ TRACE_DBG("Duplicated session from the same initiator "
-+ "%s found", name);
-+
-+ if (name == sess->initiator_name) {
-+ len = strlen(sess->initiator_name);
-+ len += 20;
-+ name = kmalloc(len, GFP_KERNEL);
-+ if (name == NULL) {
-+ PRINT_ERROR("Unable to allocate a "
-+ "replacement name (size %d)",
-+ len);
-+ }
-+ }
-+
-+ snprintf(name, len, "%s_%d", sess->initiator_name, n);
-+ n++;
-+ goto restart;
-+ }
-+ }
-+
-+ TRACE_DBG("Adding session %s to sysfs", name);
-+
-+ res = kobject_init_and_add(&sess->sess_kobj, &scst_session_ktype,
-+ sess->tgt->tgt_sess_kobj, name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add session %s to sysfs", name);
-+ goto out_free;
-+ }
-+
-+ sess->sess_kobj_ready = 1;
-+
-+ if (sess->tgt->tgtt->sess_attrs) {
-+ res = sysfs_create_files(&sess->sess_kobj,
-+ sess->tgt->tgtt->sess_attrs);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attributes for session %s", name);
-+ goto out_free;
-+ }
-+ }
-+
-+ res = scst_create_sess_luns_link(sess);
-+
-+out_free:
-+ if (name != sess->initiator_name)
-+ kfree(name);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * Must not be called under scst_mutex, due to possible deadlock with
-+ * sysfs ref counting in sysfs works (it is waiting for the last put, but
-+ * the last ref counter holder is waiting for scst_mutex)
-+ */
-+void scst_sess_sysfs_del(struct scst_session *sess)
-+{
-+ int rc;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+
-+ TRACE_ENTRY();
-+
-+ if (!sess->sess_kobj_ready)
-+ goto out;
-+
-+ TRACE_DBG("Deleting session %s from sysfs",
-+ kobject_name(&sess->sess_kobj));
-+
-+ sess->sess_kobj_release_cmpl = &c;
-+
-+ kobject_del(&sess->sess_kobj);
-+ kobject_put(&sess->sess_kobj);
-+
-+ rc = wait_for_completion_timeout(sess->sess_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for session from %s (%d refs)...", sess->initiator_name,
-+ atomic_read(&sess->sess_kobj.kref.refcount));
-+ wait_for_completion(sess->sess_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs "
-+ "entry for session %s", sess->initiator_name);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ ** Target luns directory implementation
-+ **/
-+
-+static void scst_acg_dev_release(struct kobject *kobj)
-+{
-+ struct scst_acg_dev *acg_dev;
-+
-+ TRACE_ENTRY();
-+
-+ acg_dev = container_of(kobj, struct scst_acg_dev, acg_dev_kobj);
-+ if (acg_dev->acg_dev_kobj_release_cmpl)
-+ complete_all(acg_dev->acg_dev_kobj_release_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static ssize_t scst_lun_rd_only_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ struct scst_acg_dev *acg_dev;
-+
-+ acg_dev = container_of(kobj, struct scst_acg_dev, acg_dev_kobj);
-+
-+ if (acg_dev->rd_only || acg_dev->dev->rd_only)
-+ return sprintf(buf, "%d\n%s\n", 1, SCST_SYSFS_KEY_MARK);
-+ else
-+ return sprintf(buf, "%d\n", 0);
-+}
-+
-+static struct kobj_attribute lun_options_attr =
-+ __ATTR(read_only, S_IRUGO, scst_lun_rd_only_show, NULL);
-+
-+static struct attribute *lun_attrs[] = {
-+ &lun_options_attr.attr,
-+ NULL,
-+};
-+
-+static struct kobj_type acg_dev_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_acg_dev_release,
-+ .default_attrs = lun_attrs,
-+};
-+
-+/*
-+ * Called with scst_mutex held.
-+ *
-+ * !! No sysfs works must use kobject_get() to protect acg_dev, due to possible
-+ * !! deadlock with scst_mutex (it is waiting for the last put, but
-+ * !! the last ref counter holder is waiting for scst_mutex)
-+ */
-+void scst_acg_dev_sysfs_del(struct scst_acg_dev *acg_dev)
-+{
-+ int rc;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+
-+ TRACE_ENTRY();
-+
-+ acg_dev->acg_dev_kobj_release_cmpl = &c;
-+
-+ if (acg_dev->dev != NULL) {
-+ sysfs_remove_link(acg_dev->dev->dev_exp_kobj,
-+ acg_dev->acg_dev_link_name);
-+ kobject_put(&acg_dev->dev->dev_kobj);
-+ }
-+
-+ kobject_del(&acg_dev->acg_dev_kobj);
-+ kobject_put(&acg_dev->acg_dev_kobj);
-+
-+ rc = wait_for_completion_timeout(acg_dev->acg_dev_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for acg_dev %p (%d refs)...", acg_dev,
-+ atomic_read(&acg_dev->acg_dev_kobj.kref.refcount));
-+ wait_for_completion(acg_dev->acg_dev_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs "
-+ "entry for acg_dev %p", acg_dev);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+int scst_acg_dev_sysfs_create(struct scst_acg_dev *acg_dev,
-+ struct kobject *parent)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = kobject_init_and_add(&acg_dev->acg_dev_kobj, &acg_dev_ktype,
-+ parent, "%llu", acg_dev->lun);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add acg_dev %p to sysfs", acg_dev);
-+ goto out;
-+ }
-+
-+ kobject_get(&acg_dev->dev->dev_kobj);
-+
-+ snprintf(acg_dev->acg_dev_link_name, sizeof(acg_dev->acg_dev_link_name),
-+ "export%u", acg_dev->dev->dev_exported_lun_num++);
-+
-+ res = sysfs_create_link(acg_dev->dev->dev_exp_kobj,
-+ &acg_dev->acg_dev_kobj, acg_dev->acg_dev_link_name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't create acg %s LUN link",
-+ acg_dev->acg->acg_name);
-+ goto out_del;
-+ }
-+
-+ res = sysfs_create_link(&acg_dev->acg_dev_kobj,
-+ &acg_dev->dev->dev_kobj, "device");
-+ if (res != 0) {
-+ PRINT_ERROR("Can't create acg %s device link",
-+ acg_dev->acg->acg_name);
-+ goto out_del;
-+ }
-+
-+out:
-+ return res;
-+
-+out_del:
-+ scst_acg_dev_sysfs_del(acg_dev);
-+ goto out;
-+}
-+
-+/**
-+ ** ini_groups directory implementation.
-+ **/
-+
-+static void scst_acg_release(struct kobject *kobj)
-+{
-+ struct scst_acg *acg;
-+
-+ TRACE_ENTRY();
-+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+ if (acg->acg_kobj_release_cmpl)
-+ complete_all(acg->acg_kobj_release_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct kobj_type acg_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_acg_release,
-+};
-+
-+static ssize_t scst_acg_ini_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ static const char help[] =
-+ "Usage: echo \"add INITIATOR_NAME\" >mgmt\n"
-+ " echo \"del INITIATOR_NAME\" >mgmt\n"
-+ " echo \"move INITIATOR_NAME DEST_GROUP_NAME\" >mgmt\n"
-+ " echo \"clear\" >mgmt\n";
-+
-+ return sprintf(buf, "%s", help);
-+}
-+
-+static int scst_process_acg_ini_mgmt_store(char *buffer,
-+ struct scst_tgt *tgt, struct scst_acg *acg)
-+{
-+ int res, action;
-+ char *p, *e = NULL;
-+ char *name = NULL, *group = NULL;
-+ struct scst_acg *acg_dest = NULL;
-+ struct scst_acn *acn = NULL, *acn_tmp;
-+ enum {
-+ SCST_ACG_ACTION_INI_ADD = 1,
-+ SCST_ACG_ACTION_INI_DEL = 2,
-+ SCST_ACG_ACTION_INI_CLEAR = 3,
-+ SCST_ACG_ACTION_INI_MOVE = 4,
-+ };
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("tgt %p, acg %p, buffer %s", tgt, acg, buffer);
-+
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+
-+ if (strncasecmp("add", p, 3) == 0) {
-+ p += 3;
-+ action = SCST_ACG_ACTION_INI_ADD;
-+ } else if (strncasecmp("del", p, 3) == 0) {
-+ p += 3;
-+ action = SCST_ACG_ACTION_INI_DEL;
-+ } else if (strncasecmp("clear", p, 5) == 0) {
-+ p += 5;
-+ action = SCST_ACG_ACTION_INI_CLEAR;
-+ } else if (strncasecmp("move", p, 4) == 0) {
-+ p += 4;
-+ action = SCST_ACG_ACTION_INI_MOVE;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (action != SCST_ACG_ACTION_INI_CLEAR)
-+ if (!isspace(*p)) {
-+ PRINT_ERROR("%s", "Syntax error");
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out_resume;
-+
-+ /* Check if tgt and acg not already freed while we were coming here */
-+ if (scst_check_tgt_acg_ptrs(tgt, acg) != 0)
-+ goto out_unlock;
-+
-+ if (action != SCST_ACG_ACTION_INI_CLEAR)
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+
-+ switch (action) {
-+ case SCST_ACG_ACTION_INI_ADD:
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = '\0';
-+ name = p;
-+
-+ if (name[0] == '\0') {
-+ PRINT_ERROR("%s", "Invalid initiator name");
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ res = scst_acg_add_acn(acg, name);
-+ if (res != 0)
-+ goto out_unlock;
-+ break;
-+ case SCST_ACG_ACTION_INI_DEL:
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = '\0';
-+ name = p;
-+
-+ if (name[0] == '\0') {
-+ PRINT_ERROR("%s", "Invalid initiator name");
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ acn = scst_find_acn(acg, name);
-+ if (acn == NULL) {
-+ PRINT_ERROR("Unable to find "
-+ "initiator '%s' in group '%s'",
-+ name, acg->acg_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ scst_del_free_acn(acn, true);
-+ break;
-+ case SCST_ACG_ACTION_INI_CLEAR:
-+ list_for_each_entry_safe(acn, acn_tmp, &acg->acn_list,
-+ acn_list_entry) {
-+ scst_del_free_acn(acn, false);
-+ }
-+ scst_check_reassign_sessions();
-+ break;
-+ case SCST_ACG_ACTION_INI_MOVE:
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ if (*e == '\0') {
-+ PRINT_ERROR("%s", "Too few parameters");
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ *e = '\0';
-+ name = p;
-+
-+ if (name[0] == '\0') {
-+ PRINT_ERROR("%s", "Invalid initiator name");
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ e++;
-+ p = e;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = '\0';
-+ group = p;
-+
-+ if (group[0] == '\0') {
-+ PRINT_ERROR("%s", "Invalid group name");
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ TRACE_DBG("Move initiator '%s' to group '%s'",
-+ name, group);
-+
-+ acn = scst_find_acn(acg, name);
-+ if (acn == NULL) {
-+ PRINT_ERROR("Unable to find "
-+ "initiator '%s' in group '%s'",
-+ name, acg->acg_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ acg_dest = scst_tgt_find_acg(tgt, group);
-+ if (acg_dest == NULL) {
-+ PRINT_ERROR("Unable to find group '%s' in target '%s'",
-+ group, tgt->tgt_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ if (scst_find_acn(acg_dest, name) != NULL) {
-+ PRINT_ERROR("Initiator '%s' already exists in group '%s'",
-+ name, acg_dest->acg_name);
-+ res = -EEXIST;
-+ goto out_unlock;
-+ }
-+ scst_del_free_acn(acn, false);
-+
-+ res = scst_acg_add_acn(acg_dest, name);
-+ if (res != 0)
-+ goto out_unlock;
-+ break;
-+ }
-+
-+ res = 0;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_resume:
-+ scst_resume_activity();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_acg_ini_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return scst_process_acg_ini_mgmt_store(work->buf, work->tgt, work->acg);
-+}
-+
-+static ssize_t scst_acg_ini_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj->parent, struct scst_acg, acg_kobj);
-+
-+ return __scst_acg_mgmt_store(acg, buf, count, false,
-+ scst_acg_ini_mgmt_store_work_fn);
-+}
-+
-+static struct kobj_attribute scst_acg_ini_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_acg_ini_mgmt_show,
-+ scst_acg_ini_mgmt_store);
-+
-+static ssize_t scst_acg_luns_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj->parent, struct scst_acg, acg_kobj);
-+ res = __scst_luns_mgmt_store(acg, false, buf, count);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_acg_luns_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_luns_mgmt_show,
-+ scst_acg_luns_mgmt_store);
-+
-+static ssize_t scst_acg_addr_method_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+
-+ return __scst_acg_addr_method_show(acg, buf);
-+}
-+
-+static ssize_t scst_acg_addr_method_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+
-+ res = __scst_acg_addr_method_store(acg, buf, count);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_acg_addr_method =
-+ __ATTR(addr_method, S_IRUGO | S_IWUSR, scst_acg_addr_method_show,
-+ scst_acg_addr_method_store);
-+
-+static ssize_t scst_acg_io_grouping_type_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+
-+ return __scst_acg_io_grouping_type_show(acg, buf);
-+}
-+
-+static ssize_t scst_acg_io_grouping_type_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+
-+ res = __scst_acg_io_grouping_type_store(acg, buf, count);
-+ if (res != 0)
-+ goto out;
-+
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_acg_io_grouping_type =
-+ __ATTR(io_grouping_type, S_IRUGO | S_IWUSR,
-+ scst_acg_io_grouping_type_show,
-+ scst_acg_io_grouping_type_store);
-+
-+static ssize_t scst_acg_cpu_mask_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+
-+ return __scst_acg_cpu_mask_show(acg, buf);
-+}
-+
-+static ssize_t scst_acg_cpu_mask_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_acg *acg;
-+
-+ acg = container_of(kobj, struct scst_acg, acg_kobj);
-+
-+ res = __scst_acg_cpu_mask_store(acg, buf, count);
-+ if (res != 0)
-+ goto out;
-+
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_acg_cpu_mask =
-+ __ATTR(cpu_mask, S_IRUGO | S_IWUSR,
-+ scst_acg_cpu_mask_show,
-+ scst_acg_cpu_mask_store);
-+
-+/*
-+ * Called with scst_mutex held.
-+ *
-+ * !! No sysfs works must use kobject_get() to protect acg, due to possible
-+ * !! deadlock with scst_mutex (it is waiting for the last put, but
-+ * !! the last ref counter holder is waiting for scst_mutex)
-+ */
-+void scst_acg_sysfs_del(struct scst_acg *acg)
-+{
-+ int rc;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+
-+ TRACE_ENTRY();
-+
-+ acg->acg_kobj_release_cmpl = &c;
-+
-+ kobject_del(acg->luns_kobj);
-+ kobject_del(acg->initiators_kobj);
-+ kobject_del(&acg->acg_kobj);
-+
-+ kobject_put(acg->luns_kobj);
-+ kobject_put(acg->initiators_kobj);
-+ kobject_put(&acg->acg_kobj);
-+
-+ rc = wait_for_completion_timeout(acg->acg_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for acg %s (%d refs)...", acg->acg_name,
-+ atomic_read(&acg->acg_kobj.kref.refcount));
-+ wait_for_completion(acg->acg_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs "
-+ "entry for acg %s", acg->acg_name);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+int scst_acg_sysfs_create(struct scst_tgt *tgt,
-+ struct scst_acg *acg)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ res = kobject_init_and_add(&acg->acg_kobj, &acg_ktype,
-+ tgt->tgt_ini_grp_kobj, acg->acg_name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add acg '%s' to sysfs", acg->acg_name);
-+ goto out;
-+ }
-+
-+ acg->luns_kobj = kobject_create_and_add("luns", &acg->acg_kobj);
-+ if (acg->luns_kobj == NULL) {
-+ PRINT_ERROR("Can't create luns kobj for tgt %s",
-+ tgt->tgt_name);
-+ res = -ENOMEM;
-+ goto out_del;
-+ }
-+
-+ res = sysfs_create_file(acg->luns_kobj, &scst_acg_luns_mgmt.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
-+ scst_acg_luns_mgmt.attr.name, tgt->tgt_name);
-+ goto out_del;
-+ }
-+
-+ acg->initiators_kobj = kobject_create_and_add("initiators",
-+ &acg->acg_kobj);
-+ if (acg->initiators_kobj == NULL) {
-+ PRINT_ERROR("Can't create initiators kobj for tgt %s",
-+ tgt->tgt_name);
-+ res = -ENOMEM;
-+ goto out_del;
-+ }
-+
-+ res = sysfs_create_file(acg->initiators_kobj,
-+ &scst_acg_ini_mgmt.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
-+ scst_acg_ini_mgmt.attr.name, tgt->tgt_name);
-+ goto out_del;
-+ }
-+
-+ res = sysfs_create_file(&acg->acg_kobj, &scst_acg_addr_method.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
-+ scst_acg_addr_method.attr.name, tgt->tgt_name);
-+ goto out_del;
-+ }
-+
-+ res = sysfs_create_file(&acg->acg_kobj, &scst_acg_io_grouping_type.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
-+ scst_acg_io_grouping_type.attr.name, tgt->tgt_name);
-+ goto out_del;
-+ }
-+
-+ res = sysfs_create_file(&acg->acg_kobj, &scst_acg_cpu_mask.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add tgt attr %s for tgt %s",
-+ scst_acg_cpu_mask.attr.name, tgt->tgt_name);
-+ goto out_del;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ scst_acg_sysfs_del(acg);
-+ goto out;
-+}
-+
-+/**
-+ ** acn
-+ **/
-+
-+static ssize_t scst_acn_file_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, "%s\n",
-+ attr->attr.name);
-+}
-+
-+int scst_acn_sysfs_create(struct scst_acn *acn)
-+{
-+ int res = 0;
-+ struct scst_acg *acg = acn->acg;
-+ struct kobj_attribute *attr = NULL;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ static struct lock_class_key __key;
-+#endif
-+
-+ TRACE_ENTRY();
-+
-+ acn->acn_attr = NULL;
-+
-+ attr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL);
-+ if (attr == NULL) {
-+ PRINT_ERROR("Unable to allocate attributes for initiator '%s'",
-+ acn->name);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ attr->attr.name = kstrdup(acn->name, GFP_KERNEL);
-+ if (attr->attr.name == NULL) {
-+ PRINT_ERROR("Unable to allocate attributes for initiator '%s'",
-+ acn->name);
-+ res = -ENOMEM;
-+ goto out_free;
-+ }
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ attr->attr.key = &__key;
-+#endif
-+
-+ attr->attr.mode = S_IRUGO;
-+ attr->show = scst_acn_file_show;
-+ attr->store = NULL;
-+
-+ res = sysfs_create_file(acg->initiators_kobj, &attr->attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Unable to create acn '%s' for group '%s'",
-+ acn->name, acg->acg_name);
-+ kfree(attr->attr.name);
-+ goto out_free;
-+ }
-+
-+ acn->acn_attr = attr;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(attr);
-+ goto out;
-+}
-+
-+void scst_acn_sysfs_del(struct scst_acn *acn)
-+{
-+ struct scst_acg *acg = acn->acg;
-+
-+ TRACE_ENTRY();
-+
-+ if (acn->acn_attr != NULL) {
-+ sysfs_remove_file(acg->initiators_kobj,
-+ &acn->acn_attr->attr);
-+ kfree(acn->acn_attr->attr.name);
-+ kfree(acn->acn_attr);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ ** Dev handlers
-+ **/
-+
-+static void scst_devt_release(struct kobject *kobj)
-+{
-+ struct scst_dev_type *devt;
-+
-+ TRACE_ENTRY();
-+
-+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
-+ if (devt->devt_kobj_release_compl)
-+ complete_all(devt->devt_kobj_release_compl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+static ssize_t scst_devt_trace_level_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_dev_type *devt;
-+
-+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
-+
-+ return scst_trace_level_show(devt->trace_tbl,
-+ devt->trace_flags ? *devt->trace_flags : 0, buf,
-+ devt->trace_tbl_help);
-+}
-+
-+static ssize_t scst_devt_trace_level_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_dev_type *devt;
-+
-+ TRACE_ENTRY();
-+
-+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
-+
-+ res = mutex_lock_interruptible(&scst_log_mutex);
-+ if (res != 0)
-+ goto out;
-+
-+ res = scst_write_trace(buf, count, devt->trace_flags,
-+ devt->default_trace_flags, devt->name, devt->trace_tbl);
-+
-+ mutex_unlock(&scst_log_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute devt_trace_attr =
-+ __ATTR(trace_level, S_IRUGO | S_IWUSR,
-+ scst_devt_trace_level_show, scst_devt_trace_level_store);
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+static ssize_t scst_devt_type_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos;
-+ struct scst_dev_type *devt;
-+
-+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
-+
-+ pos = sprintf(buf, "%d - %s\n", devt->type,
-+ (unsigned)devt->type >= ARRAY_SIZE(scst_dev_handler_types) ?
-+ "unknown" : scst_dev_handler_types[devt->type]);
-+
-+ return pos;
-+}
-+
-+static struct kobj_attribute scst_devt_type_attr =
-+ __ATTR(type, S_IRUGO, scst_devt_type_show, NULL);
-+
-+static struct attribute *scst_devt_default_attrs[] = {
-+ &scst_devt_type_attr.attr,
-+ NULL,
-+};
-+
-+static struct kobj_type scst_devt_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_devt_release,
-+ .default_attrs = scst_devt_default_attrs,
-+};
-+
-+static ssize_t scst_devt_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ static const char help[] =
-+ "Usage: echo \"add_device device_name [parameters]\" >mgmt\n"
-+ " echo \"del_device device_name\" >mgmt\n"
-+ "%s%s"
-+ "%s"
-+ "\n"
-+ "where parameters are one or more "
-+ "param_name=value pairs separated by ';'\n\n"
-+ "%s%s%s%s%s%s%s%s\n";
-+ struct scst_dev_type *devt;
-+
-+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
-+
-+ return scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, help,
-+ (devt->devt_optional_attributes != NULL) ?
-+ " echo \"add_attribute <attribute> <value>\" >mgmt\n"
-+ " echo \"del_attribute <attribute> <value>\" >mgmt\n" : "",
-+ (devt->dev_optional_attributes != NULL) ?
-+ " echo \"add_device_attribute device_name <attribute> <value>\" >mgmt"
-+ " echo \"del_device_attribute device_name <attribute> <value>\" >mgmt\n" : "",
-+ (devt->mgmt_cmd_help) ? devt->mgmt_cmd_help : "",
-+ (devt->add_device_parameters != NULL) ?
-+ "The following parameters available: " : "",
-+ (devt->add_device_parameters != NULL) ?
-+ devt->add_device_parameters : "",
-+ (devt->devt_optional_attributes != NULL) ?
-+ "The following dev handler attributes available: " : "",
-+ (devt->devt_optional_attributes != NULL) ?
-+ devt->devt_optional_attributes : "",
-+ (devt->devt_optional_attributes != NULL) ? "\n" : "",
-+ (devt->dev_optional_attributes != NULL) ?
-+ "The following device attributes available: " : "",
-+ (devt->dev_optional_attributes != NULL) ?
-+ devt->dev_optional_attributes : "",
-+ (devt->dev_optional_attributes != NULL) ? "\n" : "");
-+}
-+
-+static int scst_process_devt_mgmt_store(char *buffer,
-+ struct scst_dev_type *devt)
-+{
-+ int res = 0;
-+ char *p, *pp, *dev_name;
-+
-+ TRACE_ENTRY();
-+
-+ /* Check if our pointer is still alive and, if yes, grab it */
-+ if (scst_check_grab_devt_ptr(devt, &scst_virtual_dev_type_list) != 0)
-+ goto out;
-+
-+ TRACE_DBG("devt %p, buffer %s", devt, buffer);
-+
-+ pp = buffer;
-+ if (pp[strlen(pp) - 1] == '\n')
-+ pp[strlen(pp) - 1] = '\0';
-+
-+ p = scst_get_next_lexem(&pp);
-+
-+ if (strcasecmp("add_device", p) == 0) {
-+ dev_name = scst_get_next_lexem(&pp);
-+ if (*dev_name == '\0') {
-+ PRINT_ERROR("%s", "Device name required");
-+ res = -EINVAL;
-+ goto out_ungrab;
-+ }
-+ res = devt->add_device(dev_name, pp);
-+ } else if (strcasecmp("del_device", p) == 0) {
-+ dev_name = scst_get_next_lexem(&pp);
-+ if (*dev_name == '\0') {
-+ PRINT_ERROR("%s", "Device name required");
-+ res = -EINVAL;
-+ goto out_ungrab;
-+ }
-+
-+ p = scst_get_next_lexem(&pp);
-+ if (*p != '\0')
-+ goto out_syntax_err;
-+
-+ res = devt->del_device(dev_name);
-+ } else if (devt->mgmt_cmd != NULL) {
-+ scst_restore_token_str(p, pp);
-+ res = devt->mgmt_cmd(buffer);
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_ungrab;
-+ }
-+
-+out_ungrab:
-+ scst_ungrab_devt_ptr(devt);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_syntax_err:
-+ PRINT_ERROR("Syntax error on \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_ungrab;
-+}
-+
-+static int scst_devt_mgmt_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return scst_process_devt_mgmt_store(work->buf, work->devt);
-+}
-+
-+static ssize_t __scst_devt_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count,
-+ int (*sysfs_work_fn)(struct scst_sysfs_work_item *work))
-+{
-+ int res;
-+ char *buffer;
-+ struct scst_dev_type *devt;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ devt = container_of(kobj, struct scst_dev_type, devt_kobj);
-+
-+ buffer = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
-+ if (buffer == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ res = scst_alloc_sysfs_work(sysfs_work_fn, false, &work);
-+ if (res != 0)
-+ goto out_free;
-+
-+ work->buf = buffer;
-+ work->devt = devt;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(buffer);
-+ goto out;
-+}
-+
-+static ssize_t scst_devt_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ return __scst_devt_mgmt_store(kobj, attr, buf, count,
-+ scst_devt_mgmt_store_work_fn);
-+}
-+
-+static struct kobj_attribute scst_devt_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_devt_mgmt_show,
-+ scst_devt_mgmt_store);
-+
-+static ssize_t scst_devt_pass_through_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ static const char help[] =
-+ "Usage: echo \"add_device H:C:I:L\" >mgmt\n"
-+ " echo \"del_device H:C:I:L\" >mgmt\n";
-+ return sprintf(buf, "%s", help);
-+}
-+
-+static int scst_process_devt_pass_through_mgmt_store(char *buffer,
-+ struct scst_dev_type *devt)
-+{
-+ int res = 0;
-+ char *p, *pp, *action;
-+ unsigned long host, channel, id, lun;
-+ struct scst_device *d, *dev = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("devt %p, buffer %s", devt, buffer);
-+
-+ pp = buffer;
-+ if (pp[strlen(pp) - 1] == '\n')
-+ pp[strlen(pp) - 1] = '\0';
-+
-+ action = scst_get_next_lexem(&pp);
-+ p = scst_get_next_lexem(&pp);
-+ if (*p == '\0') {
-+ PRINT_ERROR("%s", "Device required");
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (*scst_get_next_lexem(&pp) != '\0') {
-+ PRINT_ERROR("%s", "Too many parameters");
-+ res = -EINVAL;
-+ goto out_syntax_err;
-+ }
-+
-+ host = simple_strtoul(p, &p, 0);
-+ if ((host == ULONG_MAX) || (*p != ':'))
-+ goto out_syntax_err;
-+ p++;
-+ channel = simple_strtoul(p, &p, 0);
-+ if ((channel == ULONG_MAX) || (*p != ':'))
-+ goto out_syntax_err;
-+ p++;
-+ id = simple_strtoul(p, &p, 0);
-+ if ((channel == ULONG_MAX) || (*p != ':'))
-+ goto out_syntax_err;
-+ p++;
-+ lun = simple_strtoul(p, &p, 0);
-+ if (lun == ULONG_MAX)
-+ goto out_syntax_err;
-+
-+ TRACE_DBG("Dev %ld:%ld:%ld:%ld", host, channel, id, lun);
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out;
-+
-+ /* Check if devt not be already freed while we were coming here */
-+ if (scst_check_devt_ptr(devt, &scst_dev_type_list) != 0)
-+ goto out_unlock;
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if ((d->virt_id == 0) &&
-+ d->scsi_dev->host->host_no == host &&
-+ d->scsi_dev->channel == channel &&
-+ d->scsi_dev->id == id &&
-+ d->scsi_dev->lun == lun) {
-+ dev = d;
-+ TRACE_DBG("Dev %p (%ld:%ld:%ld:%ld) found",
-+ dev, host, channel, id, lun);
-+ break;
-+ }
-+ }
-+ if (dev == NULL) {
-+ PRINT_ERROR("Device %ld:%ld:%ld:%ld not found",
-+ host, channel, id, lun);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ if (dev->scsi_dev->type != devt->type) {
-+ PRINT_ERROR("Type %d of device %s differs from type "
-+ "%d of dev handler %s", dev->type,
-+ dev->virt_name, devt->type, devt->name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ if (strcasecmp("add_device", action) == 0) {
-+ res = scst_assign_dev_handler(dev, devt);
-+ if (res == 0)
-+ PRINT_INFO("Device %s assigned to dev handler %s",
-+ dev->virt_name, devt->name);
-+ } else if (strcasecmp("del_device", action) == 0) {
-+ if (dev->handler != devt) {
-+ PRINT_ERROR("Device %s is not assigned to handler %s",
-+ dev->virt_name, devt->name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ res = scst_assign_dev_handler(dev, &scst_null_devtype);
-+ if (res == 0)
-+ PRINT_INFO("Device %s unassigned from dev handler %s",
-+ dev->virt_name, devt->name);
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", action);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_syntax_err:
-+ PRINT_ERROR("Syntax error on \"%s\"", p);
-+ res = -EINVAL;
-+ goto out;
-+}
-+
-+static int scst_devt_pass_through_mgmt_store_work_fn(
-+ struct scst_sysfs_work_item *work)
-+{
-+ return scst_process_devt_pass_through_mgmt_store(work->buf, work->devt);
-+}
-+
-+static ssize_t scst_devt_pass_through_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ return __scst_devt_mgmt_store(kobj, attr, buf, count,
-+ scst_devt_pass_through_mgmt_store_work_fn);
-+}
-+
-+static struct kobj_attribute scst_devt_pass_through_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_devt_pass_through_mgmt_show,
-+ scst_devt_pass_through_mgmt_store);
-+
-+int scst_devt_sysfs_create(struct scst_dev_type *devt)
-+{
-+ int res;
-+ struct kobject *parent;
-+
-+ TRACE_ENTRY();
-+
-+ if (devt->parent != NULL)
-+ parent = &devt->parent->devt_kobj;
-+ else
-+ parent = scst_handlers_kobj;
-+
-+ res = kobject_init_and_add(&devt->devt_kobj, &scst_devt_ktype,
-+ parent, devt->name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add devt %s to sysfs", devt->name);
-+ goto out;
-+ }
-+
-+ if (devt->add_device != NULL) {
-+ res = sysfs_create_file(&devt->devt_kobj,
-+ &scst_devt_mgmt.attr);
-+ } else {
-+ res = sysfs_create_file(&devt->devt_kobj,
-+ &scst_devt_pass_through_mgmt.attr);
-+ }
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add mgmt attr for dev handler %s",
-+ devt->name);
-+ goto out_err;
-+ }
-+
-+ if (devt->devt_attrs) {
-+ res = sysfs_create_files(&devt->devt_kobj, devt->devt_attrs);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add attributes for dev handler %s",
-+ devt->name);
-+ goto out_err;
-+ }
-+ }
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ if (devt->trace_flags != NULL) {
-+ res = sysfs_create_file(&devt->devt_kobj,
-+ &devt_trace_attr.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add devt trace_flag for dev "
-+ "handler %s", devt->name);
-+ goto out_err;
-+ }
-+ }
-+#endif
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err:
-+ scst_devt_sysfs_del(devt);
-+ goto out;
-+}
-+
-+void scst_devt_sysfs_del(struct scst_dev_type *devt)
-+{
-+ int rc;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+
-+ TRACE_ENTRY();
-+
-+ devt->devt_kobj_release_compl = &c;
-+
-+ kobject_del(&devt->devt_kobj);
-+ kobject_put(&devt->devt_kobj);
-+
-+ rc = wait_for_completion_timeout(devt->devt_kobj_release_compl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing of sysfs entry "
-+ "for dev handler template %s (%d refs)...", devt->name,
-+ atomic_read(&devt->devt_kobj.kref.refcount));
-+ wait_for_completion(devt->devt_kobj_release_compl);
-+ PRINT_INFO("Done waiting for releasing sysfs entry "
-+ "for dev handler template %s", devt->name);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ ** SCST sysfs device_groups/<dg>/devices/<dev> implementation.
-+ **/
-+
-+int scst_dg_dev_sysfs_add(struct scst_dev_group *dg, struct scst_dg_dev *dgdev)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+ res = sysfs_create_link(dg->dev_kobj, &dgdev->dev->dev_kobj,
-+ dgdev->dev->virt_name);
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+void scst_dg_dev_sysfs_del(struct scst_dev_group *dg, struct scst_dg_dev *dgdev)
-+{
-+ TRACE_ENTRY();
-+ sysfs_remove_link(dg->dev_kobj, dgdev->dev->virt_name);
-+ TRACE_EXIT();
-+}
-+
-+/**
-+ ** SCST sysfs device_groups/<dg>/devices directory implementation.
-+ **/
-+
-+static ssize_t scst_dg_devs_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ static const char help[] =
-+ "Usage: echo \"add device\" >mgmt\n"
-+ " echo \"del device\" >mgmt\n";
-+
-+ return scnprintf(buf, PAGE_SIZE, help);
-+}
-+
-+static int scst_dg_devs_mgmt_store_work_fn(struct scst_sysfs_work_item *w)
-+{
-+ struct scst_dev_group *dg;
-+ char *cmd, *p, *pp, *dev_name;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ cmd = w->buf;
-+ dg = scst_lookup_dg_by_kobj(w->kobj);
-+ WARN_ON(!dg);
-+
-+ p = strchr(cmd, '\n');
-+ if (p)
-+ *p = '\0';
-+
-+ res = -EINVAL;
-+ pp = cmd;
-+ p = scst_get_next_lexem(&pp);
-+ if (strcasecmp(p, "add") == 0) {
-+ dev_name = scst_get_next_lexem(&pp);
-+ if (!*dev_name)
-+ goto out;
-+ res = scst_dg_dev_add(dg, dev_name);
-+ } else if (strcasecmp(p, "del") == 0) {
-+ dev_name = scst_get_next_lexem(&pp);
-+ if (!*dev_name)
-+ goto out;
-+ res = scst_dg_dev_remove_by_name(dg, dev_name);
-+ }
-+out:
-+ kobject_put(w->kobj);
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_dg_devs_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ char *cmd;
-+ struct scst_sysfs_work_item *work;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = -ENOMEM;
-+ cmd = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
-+ if (!cmd)
-+ goto out;
-+
-+ res = scst_alloc_sysfs_work(scst_dg_devs_mgmt_store_work_fn, false,
-+ &work);
-+ if (res)
-+ goto out;
-+
-+ work->buf = cmd;
-+ work->kobj = kobj;
-+ kobject_get(kobj);
-+ res = scst_sysfs_queue_wait_work(work);
-+
-+out:
-+ if (res == 0)
-+ res = count;
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_dg_devs_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_dg_devs_mgmt_show,
-+ scst_dg_devs_mgmt_store);
-+
-+static const struct attribute *scst_dg_devs_attrs[] = {
-+ &scst_dg_devs_mgmt.attr,
-+ NULL,
-+};
-+
-+/**
-+ ** SCST sysfs device_groups/<dg>/target_groups/<tg>/<tgt> implementation.
-+ **/
-+
-+static ssize_t scst_tg_tgt_rel_tgt_id_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ struct scst_tg_tgt *tg_tgt;
-+
-+ tg_tgt = container_of(kobj, struct scst_tg_tgt, kobj);
-+ return scnprintf(buf, PAGE_SIZE, "%u\n" SCST_SYSFS_KEY_MARK "\n",
-+ tg_tgt->rel_tgt_id);
-+}
-+
-+static ssize_t scst_tg_tgt_rel_tgt_id_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct scst_tg_tgt *tg_tgt;
-+ unsigned long rel_tgt_id;
-+ char ch[8];
-+ int res;
-+
-+ TRACE_ENTRY();
-+ tg_tgt = container_of(kobj, struct scst_tg_tgt, kobj);
-+ snprintf(ch, sizeof(ch), "%.*s", min_t(int, count, sizeof(ch)-1), buf);
-+ res = strict_strtoul(ch, 0, &rel_tgt_id);
-+ if (res)
-+ goto out;
-+ res = -EINVAL;
-+ if (rel_tgt_id == 0 || rel_tgt_id > 0xffff)
-+ goto out;
-+ tg_tgt->rel_tgt_id = rel_tgt_id;
-+ res = count;
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_tg_tgt_rel_tgt_id =
-+ __ATTR(rel_tgt_id, S_IRUGO | S_IWUSR, scst_tg_tgt_rel_tgt_id_show,
-+ scst_tg_tgt_rel_tgt_id_store);
-+
-+static const struct attribute *scst_tg_tgt_attrs[] = {
-+ &scst_tg_tgt_rel_tgt_id.attr,
-+ NULL,
-+};
-+
-+int scst_tg_tgt_sysfs_add(struct scst_target_group *tg,
-+ struct scst_tg_tgt *tg_tgt)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+ BUG_ON(!tg);
-+ BUG_ON(!tg_tgt);
-+ BUG_ON(!tg_tgt->name);
-+ if (tg_tgt->tgt)
-+ res = sysfs_create_link(&tg->kobj, &tg_tgt->tgt->tgt_kobj,
-+ tg_tgt->name);
-+ else {
-+ res = kobject_add(&tg_tgt->kobj, &tg->kobj, "%s", tg_tgt->name);
-+ if (res)
-+ goto err;
-+ res = sysfs_create_files(&tg_tgt->kobj, scst_tg_tgt_attrs);
-+ if (res)
-+ goto err;
-+ }
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+err:
-+ scst_tg_tgt_sysfs_del(tg, tg_tgt);
-+ goto out;
-+}
-+
-+void scst_tg_tgt_sysfs_del(struct scst_target_group *tg,
-+ struct scst_tg_tgt *tg_tgt)
-+{
-+ TRACE_ENTRY();
-+ if (tg_tgt->tgt)
-+ sysfs_remove_link(&tg->kobj, tg_tgt->name);
-+ else {
-+ sysfs_remove_files(&tg_tgt->kobj, scst_tg_tgt_attrs);
-+ kobject_del(&tg_tgt->kobj);
-+ }
-+ TRACE_EXIT();
-+}
-+
-+/**
-+ ** SCST sysfs device_groups/<dg>/target_groups/<tg> directory implementation.
-+ **/
-+
-+static ssize_t scst_tg_group_id_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ struct scst_target_group *tg;
-+
-+ tg = container_of(kobj, struct scst_target_group, kobj);
-+ return scnprintf(buf, PAGE_SIZE, "%u\n" SCST_SYSFS_KEY_MARK "\n",
-+ tg->group_id);
-+}
-+
-+static ssize_t scst_tg_group_id_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct scst_target_group *tg;
-+ unsigned long group_id;
-+ char ch[8];
-+ int res;
-+
-+ TRACE_ENTRY();
-+ tg = container_of(kobj, struct scst_target_group, kobj);
-+ snprintf(ch, sizeof(ch), "%.*s", min_t(int, count, sizeof(ch)-1), buf);
-+ res = strict_strtoul(ch, 0, &group_id);
-+ if (res)
-+ goto out;
-+ res = -EINVAL;
-+ if (group_id == 0 || group_id > 0xffff)
-+ goto out;
-+ tg->group_id = group_id;
-+ res = count;
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_tg_group_id =
-+ __ATTR(group_id, S_IRUGO | S_IWUSR, scst_tg_group_id_show,
-+ scst_tg_group_id_store);
-+
-+static ssize_t scst_tg_preferred_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ struct scst_target_group *tg;
-+
-+ tg = container_of(kobj, struct scst_target_group, kobj);
-+ return scnprintf(buf, PAGE_SIZE, "%u\n%s",
-+ tg->preferred, SCST_SYSFS_KEY_MARK "\n");
-+}
-+
-+static ssize_t scst_tg_preferred_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ struct scst_target_group *tg;
-+ unsigned long preferred;
-+ char ch[8];
-+ int res;
-+
-+ TRACE_ENTRY();
-+ tg = container_of(kobj, struct scst_target_group, kobj);
-+ snprintf(ch, sizeof(ch), "%.*s", min_t(int, count, sizeof(ch)-1), buf);
-+ res = strict_strtoul(ch, 0, &preferred);
-+ if (res)
-+ goto out;
-+ res = -EINVAL;
-+ if (preferred != 0 && preferred != 1)
-+ goto out;
-+ tg->preferred = preferred;
-+ res = count;
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_tg_preferred =
-+ __ATTR(preferred, S_IRUGO | S_IWUSR, scst_tg_preferred_show,
-+ scst_tg_preferred_store);
-+
-+static struct { enum scst_tg_state s; const char *n; } scst_tg_state_names[] = {
-+ { SCST_TG_STATE_OPTIMIZED, "active" },
-+ { SCST_TG_STATE_NONOPTIMIZED, "nonoptimized" },
-+ { SCST_TG_STATE_STANDBY, "standby" },
-+ { SCST_TG_STATE_UNAVAILABLE, "unavailable" },
-+ { SCST_TG_STATE_OFFLINE, "offline" },
-+ { SCST_TG_STATE_TRANSITIONING, "transitioning" },
-+};
-+
-+static ssize_t scst_tg_state_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ struct scst_target_group *tg;
-+ int i;
-+
-+ tg = container_of(kobj, struct scst_target_group, kobj);
-+ for (i = ARRAY_SIZE(scst_tg_state_names) - 1; i >= 0; i--)
-+ if (scst_tg_state_names[i].s == tg->state)
-+ break;
-+
-+ return scnprintf(buf, PAGE_SIZE, "%s\n" SCST_SYSFS_KEY_MARK "\n",
-+ i >= 0 ? scst_tg_state_names[i].n : "???");
-+}
-+
-+static int scst_tg_state_store_work_fn(struct scst_sysfs_work_item *w)
-+{
-+ struct scst_target_group *tg;
-+ char *cmd, *p;
-+ int i, res;
-+
-+ TRACE_ENTRY();
-+
-+ cmd = w->buf;
-+ tg = container_of(w->kobj, struct scst_target_group, kobj);
-+
-+ p = strchr(cmd, '\n');
-+ if (p)
-+ *p = '\0';
-+
-+ for (i = ARRAY_SIZE(scst_tg_state_names) - 1; i >= 0; i--)
-+ if (strcmp(scst_tg_state_names[i].n, cmd) == 0)
-+ break;
-+
-+ res = -EINVAL;
-+ if (i < 0)
-+ goto out;
-+ res = scst_tg_set_state(tg, scst_tg_state_names[i].s);
-+out:
-+ kobject_put(w->kobj);
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_tg_state_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ char *cmd;
-+ struct scst_sysfs_work_item *work;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = -ENOMEM;
-+ cmd = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
-+ if (!cmd)
-+ goto out;
-+
-+ res = scst_alloc_sysfs_work(scst_tg_state_store_work_fn, false,
-+ &work);
-+ if (res)
-+ goto out;
-+
-+ work->buf = cmd;
-+ work->kobj = kobj;
-+ kobject_get(kobj);
-+ res = scst_sysfs_queue_wait_work(work);
-+
-+out:
-+ if (res == 0)
-+ res = count;
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_tg_state =
-+ __ATTR(state, S_IRUGO | S_IWUSR, scst_tg_state_show,
-+ scst_tg_state_store);
-+
-+static ssize_t scst_tg_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ static const char help[] =
-+ "Usage: echo \"add target\" >mgmt\n"
-+ " echo \"del target\" >mgmt\n";
-+
-+ return scnprintf(buf, PAGE_SIZE, help);
-+}
-+
-+static int scst_tg_mgmt_store_work_fn(struct scst_sysfs_work_item *w)
-+{
-+ struct scst_target_group *tg;
-+ char *cmd, *p, *pp, *target_name;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ cmd = w->buf;
-+ tg = container_of(w->kobj, struct scst_target_group, kobj);
-+
-+ p = strchr(cmd, '\n');
-+ if (p)
-+ *p = '\0';
-+
-+ res = -EINVAL;
-+ pp = cmd;
-+ p = scst_get_next_lexem(&pp);
-+ if (strcasecmp(p, "add") == 0) {
-+ target_name = scst_get_next_lexem(&pp);
-+ if (!*target_name)
-+ goto out;
-+ res = scst_tg_tgt_add(tg, target_name);
-+ } else if (strcasecmp(p, "del") == 0) {
-+ target_name = scst_get_next_lexem(&pp);
-+ if (!*target_name)
-+ goto out;
-+ res = scst_tg_tgt_remove_by_name(tg, target_name);
-+ }
-+out:
-+ kobject_put(w->kobj);
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_tg_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ char *cmd;
-+ struct scst_sysfs_work_item *work;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = -ENOMEM;
-+ cmd = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
-+ if (!cmd)
-+ goto out;
-+
-+ res = scst_alloc_sysfs_work(scst_tg_mgmt_store_work_fn, false,
-+ &work);
-+ if (res)
-+ goto out;
-+
-+ work->buf = cmd;
-+ work->kobj = kobj;
-+ kobject_get(kobj);
-+ res = scst_sysfs_queue_wait_work(work);
-+
-+out:
-+ if (res == 0)
-+ res = count;
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_tg_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_tg_mgmt_show,
-+ scst_tg_mgmt_store);
-+
-+static const struct attribute *scst_tg_attrs[] = {
-+ &scst_tg_mgmt.attr,
-+ &scst_tg_group_id.attr,
-+ &scst_tg_preferred.attr,
-+ &scst_tg_state.attr,
-+ NULL,
-+};
-+
-+int scst_tg_sysfs_add(struct scst_dev_group *dg, struct scst_target_group *tg)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+ res = kobject_add(&tg->kobj, dg->tg_kobj, "%s", tg->name);
-+ if (res)
-+ goto err;
-+ res = sysfs_create_files(&tg->kobj, scst_tg_attrs);
-+ if (res)
-+ goto err;
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+err:
-+ scst_tg_sysfs_del(tg);
-+ goto out;
-+}
-+
-+void scst_tg_sysfs_del(struct scst_target_group *tg)
-+{
-+ TRACE_ENTRY();
-+ sysfs_remove_files(&tg->kobj, scst_tg_attrs);
-+ kobject_del(&tg->kobj);
-+ TRACE_EXIT();
-+}
-+
-+/**
-+ ** SCST sysfs device_groups/<dg>/target_groups directory implementation.
-+ **/
-+
-+static ssize_t scst_dg_tgs_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ static const char help[] =
-+ "Usage: echo \"create group_name\" >mgmt\n"
-+ " echo \"del group_name\" >mgmt\n";
-+
-+ return scnprintf(buf, PAGE_SIZE, help);
-+}
-+
-+static int scst_dg_tgs_mgmt_store_work_fn(struct scst_sysfs_work_item *w)
-+{
-+ struct scst_dev_group *dg;
-+ char *cmd, *p, *pp, *dev_name;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ cmd = w->buf;
-+ dg = scst_lookup_dg_by_kobj(w->kobj);
-+ WARN_ON(!dg);
-+
-+ p = strchr(cmd, '\n');
-+ if (p)
-+ *p = '\0';
-+
-+ res = -EINVAL;
-+ pp = cmd;
-+ p = scst_get_next_lexem(&pp);
-+ if (strcasecmp(p, "create") == 0 || strcasecmp(p, "add") == 0) {
-+ dev_name = scst_get_next_lexem(&pp);
-+ if (!*dev_name)
-+ goto out;
-+ res = scst_tg_add(dg, dev_name);
-+ } else if (strcasecmp(p, "del") == 0) {
-+ dev_name = scst_get_next_lexem(&pp);
-+ if (!*dev_name)
-+ goto out;
-+ res = scst_tg_remove_by_name(dg, dev_name);
-+ }
-+out:
-+ kobject_put(w->kobj);
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_dg_tgs_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ char *cmd;
-+ struct scst_sysfs_work_item *work;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = -ENOMEM;
-+ cmd = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
-+ if (!cmd)
-+ goto out;
-+
-+ res = scst_alloc_sysfs_work(scst_dg_tgs_mgmt_store_work_fn, false,
-+ &work);
-+ if (res)
-+ goto out;
-+
-+ work->buf = cmd;
-+ work->kobj = kobj;
-+ kobject_get(kobj);
-+ res = scst_sysfs_queue_wait_work(work);
-+
-+out:
-+ if (res == 0)
-+ res = count;
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_dg_tgs_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_dg_tgs_mgmt_show,
-+ scst_dg_tgs_mgmt_store);
-+
-+static const struct attribute *scst_dg_tgs_attrs[] = {
-+ &scst_dg_tgs_mgmt.attr,
-+ NULL,
-+};
-+
-+/**
-+ ** SCST sysfs device_groups directory implementation.
-+ **/
-+
-+int scst_dg_sysfs_add(struct kobject *parent, struct scst_dev_group *dg)
-+{
-+ int res;
-+
-+ dg->dev_kobj = NULL;
-+ dg->tg_kobj = NULL;
-+ res = kobject_add(&dg->kobj, parent, "%s", dg->name);
-+ if (res)
-+ goto err;
-+ res = -EEXIST;
-+ dg->dev_kobj = kobject_create_and_add("devices", &dg->kobj);
-+ if (!dg->dev_kobj)
-+ goto err;
-+ res = sysfs_create_files(dg->dev_kobj, scst_dg_devs_attrs);
-+ if (res)
-+ goto err;
-+ dg->tg_kobj = kobject_create_and_add("target_groups", &dg->kobj);
-+ if (!dg->tg_kobj)
-+ goto err;
-+ res = sysfs_create_files(dg->tg_kobj, scst_dg_tgs_attrs);
-+ if (res)
-+ goto err;
-+out:
-+ return res;
-+err:
-+ scst_dg_sysfs_del(dg);
-+ goto out;
-+}
-+
-+void scst_dg_sysfs_del(struct scst_dev_group *dg)
-+{
-+ if (dg->tg_kobj) {
-+ sysfs_remove_files(dg->tg_kobj, scst_dg_tgs_attrs);
-+ kobject_del(dg->tg_kobj);
-+ kobject_put(dg->tg_kobj);
-+ dg->tg_kobj = NULL;
-+ }
-+ if (dg->dev_kobj) {
-+ sysfs_remove_files(dg->dev_kobj, scst_dg_devs_attrs);
-+ kobject_del(dg->dev_kobj);
-+ kobject_put(dg->dev_kobj);
-+ dg->dev_kobj = NULL;
-+ }
-+ kobject_del(&dg->kobj);
-+}
-+
-+static ssize_t scst_device_groups_mgmt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ static const char help[] =
-+ "Usage: echo \"create group_name\" >mgmt\n"
-+ " echo \"del group_name\" >mgmt\n";
-+
-+ return scnprintf(buf, PAGE_SIZE, help);
-+}
-+
-+static ssize_t scst_device_groups_mgmt_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int res;
-+ char *p, *pp, *input, *group_name;
-+
-+ TRACE_ENTRY();
-+
-+ input = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
-+ pp = input;
-+ p = strchr(input, '\n');
-+ if (p)
-+ *p = '\0';
-+
-+ res = -EINVAL;
-+ p = scst_get_next_lexem(&pp);
-+ if (strcasecmp(p, "create") == 0 || strcasecmp(p, "add") == 0) {
-+ group_name = scst_get_next_lexem(&pp);
-+ if (!*group_name)
-+ goto out;
-+ res = scst_dg_add(scst_device_groups_kobj, group_name);
-+ } else if (strcasecmp(p, "del") == 0) {
-+ group_name = scst_get_next_lexem(&pp);
-+ if (!*group_name)
-+ goto out;
-+ res = scst_dg_remove(group_name);
-+ }
-+out:
-+ kfree(input);
-+ if (res == 0)
-+ res = count;
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_device_groups_mgmt =
-+ __ATTR(mgmt, S_IRUGO | S_IWUSR, scst_device_groups_mgmt_show,
-+ scst_device_groups_mgmt_store);
-+
-+static const struct attribute *scst_device_groups_attrs[] = {
-+ &scst_device_groups_mgmt.attr,
-+ NULL,
-+};
-+
-+/**
-+ ** SCST sysfs root directory implementation
-+ **/
-+
-+static ssize_t scst_threads_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int count;
-+
-+ TRACE_ENTRY();
-+
-+ count = sprintf(buf, "%d\n%s", scst_main_cmd_threads.nr_threads,
-+ (scst_main_cmd_threads.nr_threads != scst_threads) ?
-+ SCST_SYSFS_KEY_MARK "\n" : "");
-+
-+ TRACE_EXIT();
-+ return count;
-+}
-+
-+static int scst_process_threads_store(int newtn)
-+{
-+ int res;
-+ long oldtn, delta;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("newtn %d", newtn);
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res != 0)
-+ goto out;
-+
-+ oldtn = scst_main_cmd_threads.nr_threads;
-+
-+ delta = newtn - oldtn;
-+ if (delta < 0)
-+ scst_del_threads(&scst_main_cmd_threads, -delta);
-+ else {
-+ res = scst_add_threads(&scst_main_cmd_threads, NULL, NULL, delta);
-+ if (res != 0)
-+ goto out_up;
-+ }
-+
-+ PRINT_INFO("Changed cmd threads num: old %ld, new %d", oldtn, newtn);
-+
-+out_up:
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_threads_store_work_fn(struct scst_sysfs_work_item *work)
-+{
-+ return scst_process_threads_store(work->new_threads_num);
-+}
-+
-+static ssize_t scst_threads_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ long newtn;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ res = strict_strtol(buf, 0, &newtn);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtol() for %s failed: %d ", buf, res);
-+ goto out;
-+ }
-+ if (newtn <= 0) {
-+ PRINT_ERROR("Illegal threads num value %ld", newtn);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_alloc_sysfs_work(scst_threads_store_work_fn, false, &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->new_threads_num = newtn;
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_threads_attr =
-+ __ATTR(threads, S_IRUGO | S_IWUSR, scst_threads_show,
-+ scst_threads_store);
-+
-+static ssize_t scst_setup_id_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int count;
-+
-+ TRACE_ENTRY();
-+
-+ count = sprintf(buf, "0x%x\n%s\n", scst_setup_id,
-+ (scst_setup_id == 0) ? "" : SCST_SYSFS_KEY_MARK);
-+
-+ TRACE_EXIT();
-+ return count;
-+}
-+
-+static ssize_t scst_setup_id_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ unsigned long val;
-+
-+ TRACE_ENTRY();
-+
-+ res = strict_strtoul(buf, 0, &val);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtoul() for %s failed: %d ", buf, res);
-+ goto out;
-+ }
-+
-+ scst_setup_id = val;
-+ PRINT_INFO("Changed scst_setup_id to %x", scst_setup_id);
-+
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_setup_id_attr =
-+ __ATTR(setup_id, S_IRUGO | S_IWUSR, scst_setup_id_show,
-+ scst_setup_id_store);
-+
-+static ssize_t scst_max_tasklet_cmd_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int count;
-+
-+ TRACE_ENTRY();
-+
-+ count = sprintf(buf, "%d\n%s\n", scst_max_tasklet_cmd,
-+ (scst_max_tasklet_cmd == SCST_DEF_MAX_TASKLET_CMD)
-+ ? "" : SCST_SYSFS_KEY_MARK);
-+
-+ TRACE_EXIT();
-+ return count;
-+}
-+
-+static ssize_t scst_max_tasklet_cmd_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ unsigned long val;
-+
-+ TRACE_ENTRY();
-+
-+ res = strict_strtoul(buf, 0, &val);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtoul() for %s failed: %d ", buf, res);
-+ goto out;
-+ }
-+
-+ scst_max_tasklet_cmd = val;
-+ PRINT_INFO("Changed scst_max_tasklet_cmd to %d", scst_max_tasklet_cmd);
-+
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_max_tasklet_cmd_attr =
-+ __ATTR(max_tasklet_cmd, S_IRUGO | S_IWUSR, scst_max_tasklet_cmd_show,
-+ scst_max_tasklet_cmd_store);
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+static ssize_t scst_main_trace_level_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return scst_trace_level_show(scst_local_trace_tbl, trace_flag,
-+ buf, NULL);
-+}
-+
-+static ssize_t scst_main_trace_level_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = mutex_lock_interruptible(&scst_log_mutex);
-+ if (res != 0)
-+ goto out;
-+
-+ res = scst_write_trace(buf, count, &trace_flag,
-+ SCST_DEFAULT_LOG_FLAGS, "scst", scst_local_trace_tbl);
-+
-+ mutex_unlock(&scst_log_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_main_trace_level_attr =
-+ __ATTR(trace_level, S_IRUGO | S_IWUSR, scst_main_trace_level_show,
-+ scst_main_trace_level_store);
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+static ssize_t scst_version_show(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ TRACE_ENTRY();
-+
-+ sprintf(buf, "%s\n", SCST_VERSION_STRING);
-+
-+#ifdef CONFIG_SCST_STRICT_SERIALIZING
-+ strcat(buf, "STRICT_SERIALIZING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ strcat(buf, "EXTRACHECKS\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_TRACING
-+ strcat(buf, "TRACING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+ strcat(buf, "DEBUG\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+ strcat(buf, "DEBUG_TM\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_RETRY
-+ strcat(buf, "DEBUG_RETRY\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_OOM
-+ strcat(buf, "DEBUG_OOM\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_SN
-+ strcat(buf, "DEBUG_SN\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
-+ strcat(buf, "USE_EXPECTED_VALUES\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ strcat(buf, "TEST_IO_IN_SIRQ\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_STRICT_SECURITY
-+ strcat(buf, "STRICT_SECURITY\n");
-+#endif
-+
-+ TRACE_EXIT();
-+ return strlen(buf);
-+}
-+
-+static struct kobj_attribute scst_version_attr =
-+ __ATTR(version, S_IRUGO, scst_version_show, NULL);
-+
-+static ssize_t scst_last_sysfs_mgmt_res_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock(&sysfs_work_lock);
-+ TRACE_DBG("active_sysfs_works %d", active_sysfs_works);
-+ if (active_sysfs_works > 0)
-+ res = -EAGAIN;
-+ else
-+ res = sprintf(buf, "%d\n", last_sysfs_work_res);
-+ spin_unlock(&sysfs_work_lock);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_last_sysfs_mgmt_res_attr =
-+ __ATTR(last_sysfs_mgmt_res, S_IRUGO,
-+ scst_last_sysfs_mgmt_res_show, NULL);
-+
-+static struct attribute *scst_sysfs_root_default_attrs[] = {
-+ &scst_threads_attr.attr,
-+ &scst_setup_id_attr.attr,
-+ &scst_max_tasklet_cmd_attr.attr,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ &scst_main_trace_level_attr.attr,
-+#endif
-+ &scst_version_attr.attr,
-+ &scst_last_sysfs_mgmt_res_attr.attr,
-+ NULL,
-+};
-+
-+static void scst_sysfs_root_release(struct kobject *kobj)
-+{
-+ complete_all(&scst_sysfs_root_release_completion);
-+}
-+
-+static struct kobj_type scst_sysfs_root_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_sysfs_root_release,
-+ .default_attrs = scst_sysfs_root_default_attrs,
-+};
-+
-+/**
-+ ** Sysfs user info
-+ **/
-+
-+static DEFINE_MUTEX(scst_sysfs_user_info_mutex);
-+
-+/* All protected by scst_sysfs_user_info_mutex */
-+static LIST_HEAD(scst_sysfs_user_info_list);
-+static uint32_t scst_sysfs_info_cur_cookie;
-+
-+/* scst_sysfs_user_info_mutex supposed to be held */
-+static struct scst_sysfs_user_info *scst_sysfs_user_find_info(uint32_t cookie)
-+{
-+ struct scst_sysfs_user_info *info, *res = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry(info, &scst_sysfs_user_info_list,
-+ info_list_entry) {
-+ if (info->info_cookie == cookie) {
-+ res = info;
-+ break;
-+ }
-+ }
-+
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_sysfs_user_get_info() - get user_info
-+ *
-+ * Finds the user_info based on cookie and mark it as received the reply by
-+ * setting for it flag info_being_executed.
-+ *
-+ * Returns found entry or NULL.
-+ */
-+struct scst_sysfs_user_info *scst_sysfs_user_get_info(uint32_t cookie)
-+{
-+ struct scst_sysfs_user_info *res = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_sysfs_user_info_mutex);
-+
-+ res = scst_sysfs_user_find_info(cookie);
-+ if (res != NULL) {
-+ if (!res->info_being_executed)
-+ res->info_being_executed = 1;
-+ }
-+
-+ mutex_unlock(&scst_sysfs_user_info_mutex);
-+
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_sysfs_user_get_info);
-+
-+/**
-+ ** Helper functionality to help target drivers and dev handlers support
-+ ** sending events to user space and wait for their completion in a safe
-+ ** manner. See samples how to use it in iscsi-scst or scst_user.
-+ **/
-+
-+/**
-+ * scst_sysfs_user_add_info() - create and add user_info in the global list
-+ *
-+ * Creates an info structure and adds it in the info_list.
-+ * Returns 0 and out_info on success, error code otherwise.
-+ */
-+int scst_sysfs_user_add_info(struct scst_sysfs_user_info **out_info)
-+{
-+ int res = 0;
-+ struct scst_sysfs_user_info *info;
-+
-+ TRACE_ENTRY();
-+
-+ info = kzalloc(sizeof(*info), GFP_KERNEL);
-+ if (info == NULL) {
-+ PRINT_ERROR("Unable to allocate sysfs user info (size %zd)",
-+ sizeof(*info));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ mutex_lock(&scst_sysfs_user_info_mutex);
-+
-+ while ((info->info_cookie == 0) ||
-+ (scst_sysfs_user_find_info(info->info_cookie) != NULL))
-+ info->info_cookie = scst_sysfs_info_cur_cookie++;
-+
-+ init_completion(&info->info_completion);
-+
-+ list_add_tail(&info->info_list_entry, &scst_sysfs_user_info_list);
-+ info->info_in_list = 1;
-+
-+ *out_info = info;
-+
-+ mutex_unlock(&scst_sysfs_user_info_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_sysfs_user_add_info);
-+
-+/**
-+ * scst_sysfs_user_del_info - delete and frees user_info
-+ */
-+void scst_sysfs_user_del_info(struct scst_sysfs_user_info *info)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_sysfs_user_info_mutex);
-+
-+ if (info->info_in_list)
-+ list_del(&info->info_list_entry);
-+
-+ mutex_unlock(&scst_sysfs_user_info_mutex);
-+
-+ kfree(info);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_sysfs_user_del_info);
-+
-+/*
-+ * Returns true if the reply received and being processed by another part of
-+ * the kernel, false otherwise. Also removes the user_info from the list to
-+ * fix for the user space that it missed the timeout.
-+ */
-+static bool scst_sysfs_user_info_executing(struct scst_sysfs_user_info *info)
-+{
-+ bool res;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_sysfs_user_info_mutex);
-+
-+ res = info->info_being_executed;
-+
-+ if (info->info_in_list) {
-+ list_del(&info->info_list_entry);
-+ info->info_in_list = 0;
-+ }
-+
-+ mutex_unlock(&scst_sysfs_user_info_mutex);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * scst_wait_info_completion() - wait an user space event's completion
-+ *
-+ * Waits for the info request been completed by user space at most timeout
-+ * jiffies. If the reply received before timeout and being processed by
-+ * another part of the kernel, i.e. scst_sysfs_user_info_executing()
-+ * returned true, waits for it to complete indefinitely.
-+ *
-+ * Returns status of the request completion.
-+ */
-+int scst_wait_info_completion(struct scst_sysfs_user_info *info,
-+ unsigned long timeout)
-+{
-+ int res, rc;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Waiting for info %p completion", info);
-+
-+ while (1) {
-+ rc = wait_for_completion_interruptible_timeout(
-+ &info->info_completion, timeout);
-+ if (rc > 0) {
-+ TRACE_DBG("Waiting for info %p finished with %d",
-+ info, rc);
-+ break;
-+ } else if (rc == 0) {
-+ if (!scst_sysfs_user_info_executing(info)) {
-+ PRINT_ERROR("Timeout waiting for user "
-+ "space event %p", info);
-+ res = -EBUSY;
-+ goto out;
-+ } else {
-+ /* Req is being executed in the kernel */
-+ TRACE_DBG("Keep waiting for info %p completion",
-+ info);
-+ wait_for_completion(&info->info_completion);
-+ break;
-+ }
-+ } else if (rc != -ERESTARTSYS) {
-+ res = rc;
-+ PRINT_ERROR("wait_for_completion() failed: %d",
-+ res);
-+ goto out;
-+ } else {
-+ TRACE_DBG("Waiting for info %p finished with %d, "
-+ "retrying", info, rc);
-+ }
-+ }
-+
-+ TRACE_DBG("info %p, status %d", info, info->info_status);
-+ res = info->info_status;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_wait_info_completion);
-+
-+static struct kobject scst_sysfs_root_kobj;
-+
-+int __init scst_sysfs_init(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ sysfs_work_thread = kthread_run(sysfs_work_thread_fn,
-+ NULL, "scst_uid");
-+ if (IS_ERR(sysfs_work_thread)) {
-+ res = PTR_ERR(sysfs_work_thread);
-+ PRINT_ERROR("kthread_run() for user interface thread "
-+ "failed: %d", res);
-+ sysfs_work_thread = NULL;
-+ goto out;
-+ }
-+
-+ res = kobject_init_and_add(&scst_sysfs_root_kobj,
-+ &scst_sysfs_root_ktype, kernel_kobj, "%s", "scst_tgt");
-+ if (res != 0)
-+ goto sysfs_root_add_error;
-+
-+ scst_targets_kobj = kobject_create_and_add("targets",
-+ &scst_sysfs_root_kobj);
-+ if (scst_targets_kobj == NULL)
-+ goto targets_kobj_error;
-+
-+ scst_devices_kobj = kobject_create_and_add("devices",
-+ &scst_sysfs_root_kobj);
-+ if (scst_devices_kobj == NULL)
-+ goto devices_kobj_error;
-+
-+ res = scst_add_sgv_kobj(&scst_sysfs_root_kobj, "sgv");
-+ if (res != 0)
-+ goto sgv_kobj_error;
-+
-+ scst_handlers_kobj = kobject_create_and_add("handlers",
-+ &scst_sysfs_root_kobj);
-+ if (scst_handlers_kobj == NULL)
-+ goto handlers_kobj_error;
-+
-+ scst_device_groups_kobj = kobject_create_and_add("device_groups",
-+ &scst_sysfs_root_kobj);
-+ if (scst_device_groups_kobj == NULL)
-+ goto device_groups_kobj_error;
-+
-+ if (sysfs_create_files(scst_device_groups_kobj,
-+ scst_device_groups_attrs))
-+ goto device_groups_attrs_error;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+device_groups_attrs_error:
-+ kobject_del(scst_device_groups_kobj);
-+ kobject_put(scst_device_groups_kobj);
-+
-+device_groups_kobj_error:
-+ kobject_del(scst_handlers_kobj);
-+ kobject_put(scst_handlers_kobj);
-+
-+handlers_kobj_error:
-+ scst_del_put_sgv_kobj();
-+
-+sgv_kobj_error:
-+ kobject_del(scst_devices_kobj);
-+ kobject_put(scst_devices_kobj);
-+
-+devices_kobj_error:
-+ kobject_del(scst_targets_kobj);
-+ kobject_put(scst_targets_kobj);
-+
-+targets_kobj_error:
-+ kobject_del(&scst_sysfs_root_kobj);
-+
-+sysfs_root_add_error:
-+ kobject_put(&scst_sysfs_root_kobj);
-+
-+ kthread_stop(sysfs_work_thread);
-+
-+ if (res == 0)
-+ res = -EINVAL;
-+
-+ goto out;
-+}
-+
-+void scst_sysfs_cleanup(void)
-+{
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("%s", "Exiting SCST sysfs hierarchy...");
-+
-+ scst_del_put_sgv_kobj();
-+
-+ kobject_del(scst_devices_kobj);
-+ kobject_put(scst_devices_kobj);
-+
-+ kobject_del(scst_targets_kobj);
-+ kobject_put(scst_targets_kobj);
-+
-+ kobject_del(scst_handlers_kobj);
-+ kobject_put(scst_handlers_kobj);
-+
-+ sysfs_remove_files(scst_device_groups_kobj, scst_device_groups_attrs);
-+
-+ kobject_del(scst_device_groups_kobj);
-+ kobject_put(scst_device_groups_kobj);
-+
-+ kobject_del(&scst_sysfs_root_kobj);
-+ kobject_put(&scst_sysfs_root_kobj);
-+
-+ wait_for_completion(&scst_sysfs_root_release_completion);
-+ /*
-+ * There is a race, when in the release() schedule happens just after
-+ * calling complete(), so if we exit and unload scst module immediately,
-+ * there will be oops there. So let's give it a chance to quit
-+ * gracefully. Unfortunately, current kobjects implementation
-+ * doesn't allow better ways to handle it.
-+ */
-+ msleep(3000);
-+
-+ if (sysfs_work_thread)
-+ kthread_stop(sysfs_work_thread);
-+
-+ PRINT_INFO("%s", "Exiting SCST sysfs hierarchy done");
-+
-+ TRACE_EXIT();
-+ return;
-+}
-diff -uprN orig/linux-3.2/include/scst/scst_debug.h linux-3.2/include/scst/scst_debug.h
---- orig/linux-3.2/include/scst/scst_debug.h
-+++ linux-3.2/include/scst/scst_debug.h
-@@ -0,0 +1,351 @@
-+/*
-+ * include/scst_debug.h
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * Contains macros for execution tracing and error reporting
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef __SCST_DEBUG_H
-+#define __SCST_DEBUG_H
-+
-+#include <generated/autoconf.h> /* for CONFIG_* */
-+
-+#include <linux/bug.h> /* for WARN_ON_ONCE */
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+#define EXTRACHECKS_BUG_ON(a) BUG_ON(a)
-+#define EXTRACHECKS_WARN_ON(a) WARN_ON(a)
-+#define EXTRACHECKS_WARN_ON_ONCE(a) WARN_ON_ONCE(a)
-+#else
-+#define EXTRACHECKS_BUG_ON(a) do { } while (0)
-+#define EXTRACHECKS_WARN_ON(a) do { } while (0)
-+#define EXTRACHECKS_WARN_ON_ONCE(a) do { } while (0)
-+#endif
-+
-+#define TRACE_NULL 0x00000000
-+#define TRACE_DEBUG 0x00000001
-+#define TRACE_FUNCTION 0x00000002
-+#define TRACE_LINE 0x00000004
-+#define TRACE_PID 0x00000008
-+#ifndef GENERATING_UPSTREAM_PATCH
-+#define TRACE_ENTRYEXIT 0x00000010
-+#endif
-+#define TRACE_BUFF 0x00000020
-+#define TRACE_MEMORY 0x00000040
-+#define TRACE_SG_OP 0x00000080
-+#define TRACE_OUT_OF_MEM 0x00000100
-+#define TRACE_MINOR 0x00000200 /* less important events */
-+#define TRACE_MGMT 0x00000400
-+#define TRACE_MGMT_DEBUG 0x00000800
-+#define TRACE_SCSI 0x00001000
-+#define TRACE_SPECIAL 0x00002000 /* filtering debug, etc */
-+#define TRACE_FLOW_CONTROL 0x00004000 /* flow control in action */
-+#define TRACE_PRES 0x00008000
-+#define TRACE_ALL 0xffffffff
-+/* Flags 0xXXXX0000 are local for users */
-+
-+#define TRACE_MINOR_AND_MGMT_DBG (TRACE_MINOR|TRACE_MGMT_DEBUG)
-+
-+#ifndef KERN_CONT
-+#define KERN_CONT ""
-+#endif
-+
-+/*
-+ * Note: in the next two printk() statements the KERN_CONT macro is only
-+ * present to suppress a checkpatch warning (KERN_CONT is defined as "").
-+ */
-+#define PRINT(log_flag, format, args...) \
-+ printk(log_flag format "\n", ## args)
-+#define PRINTN(log_flag, format, args...) \
-+ printk(log_flag format, ## args)
-+
-+#ifdef LOG_PREFIX
-+#define __LOG_PREFIX LOG_PREFIX
-+#else
-+#define __LOG_PREFIX NULL
-+#endif
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+#ifndef CONFIG_SCST_DEBUG
-+#define ___unlikely(a) (a)
-+#else
-+#define ___unlikely(a) unlikely(a)
-+#endif
-+
-+/*
-+ * We don't print prefix for debug traces to not put additional pressure
-+ * on the logging system in case of a lot of logging.
-+ */
-+
-+int debug_print_prefix(unsigned long trace_flag,
-+ const char *prefix, const char *func, int line);
-+void debug_print_buffer(const void *data, int len);
-+const char *debug_transport_id_to_initiator_name(const uint8_t *transport_id);
-+
-+#define TRACING_MINOR() (trace_flag & TRACE_MINOR)
-+
-+#define TRACE(trace, format, args...) \
-+do { \
-+ if (___unlikely(trace_flag & (trace))) { \
-+ debug_print_prefix(trace_flag, __LOG_PREFIX, \
-+ __func__, __LINE__); \
-+ PRINT(KERN_CONT, format, args); \
-+ } \
-+} while (0)
-+
-+#ifdef CONFIG_SCST_DEBUG
-+
-+#define PRINT_BUFFER(message, buff, len) \
-+do { \
-+ PRINT(KERN_INFO, "%s:%s:", __func__, message); \
-+ debug_print_buffer(buff, len); \
-+} while (0)
-+
-+#else
-+
-+#define PRINT_BUFFER(message, buff, len) \
-+do { \
-+ PRINT(KERN_INFO, "%s:", message); \
-+ debug_print_buffer(buff, len); \
-+} while (0)
-+
-+#endif
-+
-+#define PRINT_BUFF_FLAG(flag, message, buff, len) \
-+do { \
-+ if (___unlikely(trace_flag & (flag))) { \
-+ debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
-+ PRINT(KERN_CONT, "%s:", message); \
-+ debug_print_buffer(buff, len); \
-+ } \
-+} while (0)
-+
-+#else /* CONFIG_SCST_DEBUG || CONFIG_SCST_TRACING */
-+
-+#define TRACING_MINOR() (false)
-+
-+#define TRACE(trace, args...) do {} while (0)
-+#define PRINT_BUFFER(message, buff, len) do {} while (0)
-+#define PRINT_BUFF_FLAG(flag, message, buff, len) do {} while (0)
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+#ifdef CONFIG_SCST_DEBUG
-+
-+#define TRACE_DBG_FLAG(trace, format, args...) \
-+do { \
-+ if (trace_flag & (trace)) { \
-+ debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
-+ PRINT(KERN_CONT, format, args); \
-+ } \
-+} while (0)
-+
-+#define TRACE_MEM(args...) TRACE_DBG_FLAG(TRACE_MEMORY, args)
-+#define TRACE_SG(args...) TRACE_DBG_FLAG(TRACE_SG_OP, args)
-+#define TRACE_DBG(args...) TRACE_DBG_FLAG(TRACE_DEBUG, args)
-+#define TRACE_DBG_SPECIAL(args...) TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_SPECIAL, args)
-+#define TRACE_MGMT_DBG(args...) TRACE_DBG_FLAG(TRACE_MGMT_DEBUG, args)
-+#define TRACE_MGMT_DBG_SPECIAL(args...) \
-+ TRACE_DBG_FLAG(TRACE_MGMT_DEBUG|TRACE_SPECIAL, args)
-+#define TRACE_PR(args...) TRACE_DBG_FLAG(TRACE_PRES, args)
-+
-+#define TRACE_BUFFER(message, buff, len) \
-+do { \
-+ if (trace_flag & TRACE_BUFF) { \
-+ debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
-+ PRINT(KERN_CONT, "%s:", message); \
-+ debug_print_buffer(buff, len); \
-+ } \
-+} while (0)
-+
-+#define TRACE_BUFF_FLAG(flag, message, buff, len) \
-+do { \
-+ if (trace_flag & (flag)) { \
-+ debug_print_prefix(trace_flag, NULL, __func__, __LINE__);\
-+ PRINT(KERN_CONT, "%s:", message); \
-+ debug_print_buffer(buff, len); \
-+ } \
-+} while (0)
-+
-+#define PRINT_LOG_FLAG(log_flag, format, args...) \
-+do { \
-+ debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
-+ PRINT(KERN_CONT, format, args); \
-+} while (0)
-+
-+#define PRINT_WARNING(format, args...) \
-+do { \
-+ debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
-+ PRINT(KERN_CONT, "***WARNING***: " format, args); \
-+} while (0)
-+
-+#define PRINT_ERROR(format, args...) \
-+do { \
-+ debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
-+ PRINT(KERN_CONT, "***ERROR***: " format, args); \
-+} while (0)
-+
-+#define PRINT_CRIT_ERROR(format, args...) \
-+do { \
-+ debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
-+ PRINT(KERN_CONT, "***CRITICAL ERROR***: " format, args); \
-+} while (0)
-+
-+#define PRINT_INFO(format, args...) \
-+do { \
-+ debug_print_prefix(trace_flag, __LOG_PREFIX, __func__, __LINE__);\
-+ PRINT(KERN_CONT, format, args); \
-+} while (0)
-+
-+#ifndef GENERATING_UPSTREAM_PATCH
-+#define TRACE_ENTRY() \
-+do { \
-+ if (trace_flag & TRACE_ENTRYEXIT) { \
-+ if (trace_flag & TRACE_PID) { \
-+ PRINT(KERN_INFO, "[%d]: ENTRY %s", current->pid, \
-+ __func__); \
-+ } \
-+ else { \
-+ PRINT(KERN_INFO, "ENTRY %s", __func__); \
-+ } \
-+ } \
-+} while (0)
-+
-+#define TRACE_EXIT() \
-+do { \
-+ if (trace_flag & TRACE_ENTRYEXIT) { \
-+ if (trace_flag & TRACE_PID) { \
-+ PRINT(KERN_INFO, "[%d]: EXIT %s", current->pid, \
-+ __func__); \
-+ } \
-+ else { \
-+ PRINT(KERN_INFO, "EXIT %s", __func__); \
-+ } \
-+ } \
-+} while (0)
-+
-+#define TRACE_EXIT_RES(res) \
-+do { \
-+ if (trace_flag & TRACE_ENTRYEXIT) { \
-+ if (trace_flag & TRACE_PID) { \
-+ PRINT(KERN_INFO, "[%d]: EXIT %s: %ld", current->pid, \
-+ __func__, (long)(res)); \
-+ } \
-+ else { \
-+ PRINT(KERN_INFO, "EXIT %s: %ld", \
-+ __func__, (long)(res)); \
-+ } \
-+ } \
-+} while (0)
-+
-+#define TRACE_EXIT_HRES(res) \
-+do { \
-+ if (trace_flag & TRACE_ENTRYEXIT) { \
-+ if (trace_flag & TRACE_PID) { \
-+ PRINT(KERN_INFO, "[%d]: EXIT %s: 0x%lx", current->pid, \
-+ __func__, (long)(res)); \
-+ } \
-+ else { \
-+ PRINT(KERN_INFO, "EXIT %s: %lx", \
-+ __func__, (long)(res)); \
-+ } \
-+ } \
-+} while (0)
-+#endif
-+
-+#else /* CONFIG_SCST_DEBUG */
-+
-+#define TRACE_MEM(format, args...) do {} while (0)
-+#define TRACE_SG(format, args...) do {} while (0)
-+#define TRACE_DBG(format, args...) do {} while (0)
-+#define TRACE_DBG_FLAG(format, args...) do {} while (0)
-+#define TRACE_DBG_SPECIAL(format, args...) do {} while (0)
-+#define TRACE_MGMT_DBG(format, args...) do {} while (0)
-+#define TRACE_MGMT_DBG_SPECIAL(format, args...) do {} while (0)
-+#define TRACE_PR(format, args...) do {} while (0)
-+#define TRACE_BUFFER(message, buff, len) do {} while (0)
-+#define TRACE_BUFF_FLAG(flag, message, buff, len) do {} while (0)
-+
-+#ifndef GENERATING_UPSTREAM_PATCH
-+#define TRACE_ENTRY() do {} while (0)
-+#define TRACE_EXIT() do {} while (0)
-+#define TRACE_EXIT_RES(res) do {} while (0)
-+#define TRACE_EXIT_HRES(res) do {} while (0)
-+#endif
-+
-+#ifdef LOG_PREFIX
-+
-+#define PRINT_INFO(format, args...) \
-+do { \
-+ PRINT(KERN_INFO, "%s: " format, LOG_PREFIX, args); \
-+} while (0)
-+
-+#define PRINT_WARNING(format, args...) \
-+do { \
-+ PRINT(KERN_INFO, "%s: ***WARNING***: " \
-+ format, LOG_PREFIX, args); \
-+} while (0)
-+
-+#define PRINT_ERROR(format, args...) \
-+do { \
-+ PRINT(KERN_INFO, "%s: ***ERROR***: " \
-+ format, LOG_PREFIX, args); \
-+} while (0)
-+
-+#define PRINT_CRIT_ERROR(format, args...) \
-+do { \
-+ PRINT(KERN_INFO, "%s: ***CRITICAL ERROR***: " \
-+ format, LOG_PREFIX, args); \
-+} while (0)
-+
-+#else
-+
-+#define PRINT_INFO(format, args...) \
-+do { \
-+ PRINT(KERN_INFO, format, args); \
-+} while (0)
-+
-+#define PRINT_WARNING(format, args...) \
-+do { \
-+ PRINT(KERN_INFO, "***WARNING***: " \
-+ format, args); \
-+} while (0)
-+
-+#define PRINT_ERROR(format, args...) \
-+do { \
-+ PRINT(KERN_ERR, "***ERROR***: " \
-+ format, args); \
-+} while (0)
-+
-+#define PRINT_CRIT_ERROR(format, args...) \
-+do { \
-+ PRINT(KERN_CRIT, "***CRITICAL ERROR***: " \
-+ format, args); \
-+} while (0)
-+
-+#endif /* LOG_PREFIX */
-+
-+#endif /* CONFIG_SCST_DEBUG */
-+
-+#if defined(CONFIG_SCST_DEBUG) && defined(CONFIG_DEBUG_SLAB)
-+#define SCST_SLAB_FLAGS (SLAB_RED_ZONE | SLAB_POISON)
-+#else
-+#define SCST_SLAB_FLAGS 0L
-+#endif
-+
-+#endif /* __SCST_DEBUG_H */
-diff -uprN orig/linux-3.2/drivers/scst/scst_debug.c linux-3.2/drivers/scst/scst_debug.c
---- orig/linux-3.2/drivers/scst/scst_debug.c
-+++ linux-3.2/drivers/scst/scst_debug.c
-@@ -0,0 +1,228 @@
-+/*
-+ * scst_debug.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * Contains helper functions for execution tracing and error reporting.
-+ * Intended to be included in main .c file.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/version.h>
-+
-+#include <linux/export.h>
-+
-+#include <scst/scst.h>
-+#include <scst/scst_debug.h>
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+#define TRACE_BUF_SIZE 512
-+
-+static char trace_buf[TRACE_BUF_SIZE];
-+static DEFINE_SPINLOCK(trace_buf_lock);
-+
-+static inline int get_current_tid(void)
-+{
-+ /* Code should be the same as in sys_gettid() */
-+ if (in_interrupt()) {
-+ /*
-+ * Unfortunately, task_pid_vnr() isn't IRQ-safe, so otherwise
-+ * it can oops. ToDo.
-+ */
-+ return 0;
-+ }
-+ return task_pid_vnr(current);
-+}
-+
-+/**
-+ * debug_print_prefix() - print debug prefix for a log line
-+ *
-+ * Prints, if requested by trace_flag, debug prefix for each log line
-+ */
-+int debug_print_prefix(unsigned long trace_flag,
-+ const char *prefix, const char *func, int line)
-+{
-+ int i = 0;
-+ unsigned long flags;
-+ int pid = get_current_tid();
-+
-+ spin_lock_irqsave(&trace_buf_lock, flags);
-+
-+ trace_buf[0] = '\0';
-+
-+ if (trace_flag & TRACE_PID)
-+ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE, "[%d]: ", pid);
-+ if (prefix != NULL)
-+ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "%s: ",
-+ prefix);
-+ if (trace_flag & TRACE_FUNCTION)
-+ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "%s:", func);
-+ if (trace_flag & TRACE_LINE)
-+ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "%i:", line);
-+
-+ PRINTN(KERN_INFO, "%s", trace_buf);
-+
-+ spin_unlock_irqrestore(&trace_buf_lock, flags);
-+
-+ return i;
-+}
-+EXPORT_SYMBOL(debug_print_prefix);
-+
-+/**
-+ * debug_print_buffer() - print a buffer
-+ *
-+ * Prints in the log data from the buffer
-+ */
-+void debug_print_buffer(const void *data, int len)
-+{
-+ int z, z1, i;
-+ const unsigned char *buf = (const unsigned char *) data;
-+ unsigned long flags;
-+
-+ if (buf == NULL)
-+ return;
-+
-+ spin_lock_irqsave(&trace_buf_lock, flags);
-+
-+ PRINT(KERN_INFO, " (h)___0__1__2__3__4__5__6__7__8__9__A__B__C__D__E__F");
-+ for (z = 0, z1 = 0, i = 0; z < len; z++) {
-+ if (z % 16 == 0) {
-+ if (z != 0) {
-+ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i,
-+ " ");
-+ for (; (z1 < z) && (i < TRACE_BUF_SIZE - 1);
-+ z1++) {
-+ if ((buf[z1] >= 0x20) &&
-+ (buf[z1] < 0x80))
-+ trace_buf[i++] = buf[z1];
-+ else
-+ trace_buf[i++] = '.';
-+ }
-+ trace_buf[i] = '\0';
-+ PRINT(KERN_INFO, "%s", trace_buf);
-+ i = 0;
-+ }
-+ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i,
-+ "%4x: ", z);
-+ }
-+ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, "%02x ",
-+ buf[z]);
-+ }
-+
-+ i += snprintf(&trace_buf[i], TRACE_BUF_SIZE - i, " ");
-+ for (; (z1 < z) && (i < TRACE_BUF_SIZE - 1); z1++) {
-+ if ((buf[z1] > 0x20) && (buf[z1] < 0x80))
-+ trace_buf[i++] = buf[z1];
-+ else
-+ trace_buf[i++] = '.';
-+ }
-+ trace_buf[i] = '\0';
-+
-+ PRINT(KERN_INFO, "%s", trace_buf);
-+
-+ spin_unlock_irqrestore(&trace_buf_lock, flags);
-+ return;
-+}
-+EXPORT_SYMBOL(debug_print_buffer);
-+
-+/*
-+ * This function converts transport_id in a string form into internal per-CPU
-+ * static buffer. This buffer isn't anyhow protected, because it's acceptable
-+ * if the name corrupted in the debug logs because of the race for this buffer.
-+ *
-+ * Note! You can't call this function 2 or more times in a single logging
-+ * (printk) statement, because then each new call of this function will override
-+ * data written in this buffer by the previous call. You should instead split
-+ * that logging statement on smaller statements each calling
-+ * debug_transport_id_to_initiator_name() only once.
-+ */
-+const char *debug_transport_id_to_initiator_name(const uint8_t *transport_id)
-+{
-+ /*
-+ * No external protection, because it's acceptable if the name
-+ * corrupted in the debug logs because of the race for this
-+ * buffer.
-+ */
-+#define SIZEOF_NAME_BUF 256
-+ static char name_bufs[NR_CPUS][SIZEOF_NAME_BUF];
-+ char *name_buf;
-+ unsigned long flags;
-+
-+ BUG_ON(transport_id == NULL); /* better to catch it not under lock */
-+
-+ spin_lock_irqsave(&trace_buf_lock, flags);
-+
-+ name_buf = name_bufs[smp_processor_id()];
-+
-+ /*
-+ * To prevent external racing with us users from accidentally
-+ * missing their NULL terminator.
-+ */
-+ memset(name_buf, 0, SIZEOF_NAME_BUF);
-+ smp_mb();
-+
-+ switch (transport_id[0] & 0x0f) {
-+ case SCSI_TRANSPORTID_PROTOCOLID_ISCSI:
-+ scnprintf(name_buf, SIZEOF_NAME_BUF, "%s",
-+ &transport_id[4]);
-+ break;
-+ case SCSI_TRANSPORTID_PROTOCOLID_FCP2:
-+ scnprintf(name_buf, SIZEOF_NAME_BUF,
-+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-+ transport_id[8], transport_id[9],
-+ transport_id[10], transport_id[11],
-+ transport_id[12], transport_id[13],
-+ transport_id[14], transport_id[15]);
-+ break;
-+ case SCSI_TRANSPORTID_PROTOCOLID_SPI5:
-+ scnprintf(name_buf, SIZEOF_NAME_BUF,
-+ "%x:%x", be16_to_cpu((__force __be16)transport_id[2]),
-+ be16_to_cpu((__force __be16)transport_id[6]));
-+ break;
-+ case SCSI_TRANSPORTID_PROTOCOLID_SRP:
-+ scnprintf(name_buf, SIZEOF_NAME_BUF,
-+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
-+ ":%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-+ transport_id[8], transport_id[9],
-+ transport_id[10], transport_id[11],
-+ transport_id[12], transport_id[13],
-+ transport_id[14], transport_id[15],
-+ transport_id[16], transport_id[17],
-+ transport_id[18], transport_id[19],
-+ transport_id[20], transport_id[21],
-+ transport_id[22], transport_id[23]);
-+ break;
-+ case SCSI_TRANSPORTID_PROTOCOLID_SAS:
-+ scnprintf(name_buf, SIZEOF_NAME_BUF,
-+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-+ transport_id[4], transport_id[5],
-+ transport_id[6], transport_id[7],
-+ transport_id[8], transport_id[9],
-+ transport_id[10], transport_id[11]);
-+ break;
-+ default:
-+ scnprintf(name_buf, SIZEOF_NAME_BUF,
-+ "(Not known protocol ID %x)", transport_id[0] & 0x0f);
-+ break;
-+ }
-+
-+ spin_unlock_irqrestore(&trace_buf_lock, flags);
-+
-+ return name_buf;
-+#undef SIZEOF_NAME_BUF
-+}
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-diff -uprN orig/linux-3.2/include/scst/scst_sgv.h linux-3.2/include/scst/scst_sgv.h
---- orig/linux-3.2/include/scst/scst_sgv.h
-+++ linux-3.2/include/scst/scst_sgv.h
-@@ -0,0 +1,98 @@
-+/*
-+ * include/scst_sgv.h
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * Include file for SCST SGV cache.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+#ifndef __SCST_SGV_H
-+#define __SCST_SGV_H
-+
-+/** SGV pool routines and flag bits **/
-+
-+/* Set if the allocated object must be not from the cache */
-+#define SGV_POOL_ALLOC_NO_CACHED 1
-+
-+/* Set if there should not be any memory allocations on a cache miss */
-+#define SGV_POOL_NO_ALLOC_ON_CACHE_MISS 2
-+
-+/* Set an object should be returned even if it doesn't have SG vector built */
-+#define SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL 4
-+
-+/*
-+ * Set if the allocated object must be a new one, i.e. from the cache,
-+ * but not cached
-+ */
-+#define SGV_POOL_ALLOC_GET_NEW 8
-+
-+struct sgv_pool_obj;
-+struct sgv_pool;
-+
-+/*
-+ * Structure to keep a memory limit for an SCST object
-+ */
-+struct scst_mem_lim {
-+ /* How much memory allocated under this object */
-+ atomic_t alloced_pages;
-+
-+ /*
-+ * How much memory allowed to allocated under this object. Put here
-+ * mostly to save a possible cache miss accessing scst_max_dev_cmd_mem.
-+ */
-+ int max_allowed_pages;
-+};
-+
-+/* Types of clustering */
-+enum sgv_clustering_types {
-+ /* No clustering performed */
-+ sgv_no_clustering = 0,
-+
-+ /*
-+ * A page will only be merged with the latest previously allocated
-+ * page, so the order of pages in the SG will be preserved.
-+ */
-+ sgv_tail_clustering,
-+
-+ /*
-+ * Free merging of pages at any place in the SG is allowed. This mode
-+ * usually provides the best merging rate.
-+ */
-+ sgv_full_clustering,
-+};
-+
-+struct sgv_pool *sgv_pool_create(const char *name,
-+ enum sgv_clustering_types clustered, int single_alloc_pages,
-+ bool shared, int purge_interval);
-+void sgv_pool_del(struct sgv_pool *pool);
-+
-+void sgv_pool_get(struct sgv_pool *pool);
-+void sgv_pool_put(struct sgv_pool *pool);
-+
-+void sgv_pool_flush(struct sgv_pool *pool);
-+
-+void sgv_pool_set_allocator(struct sgv_pool *pool,
-+ struct page *(*alloc_pages_fn)(struct scatterlist *, gfp_t, void *),
-+ void (*free_pages_fn)(struct scatterlist *, int, void *));
-+
-+struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
-+ gfp_t gfp_mask, int flags, int *count,
-+ struct sgv_pool_obj **sgv, struct scst_mem_lim *mem_lim, void *priv);
-+void sgv_pool_free(struct sgv_pool_obj *sgv, struct scst_mem_lim *mem_lim);
-+
-+void *sgv_get_priv(struct sgv_pool_obj *sgv);
-+
-+void scst_init_mem_lim(struct scst_mem_lim *mem_lim);
-+
-+#endif /* __SCST_SGV_H */
-diff -uprN orig/linux-3.2/drivers/scst/scst_mem.h linux-3.2/drivers/scst/scst_mem.h
---- orig/linux-3.2/drivers/scst/scst_mem.h
-+++ linux-3.2/drivers/scst/scst_mem.h
-@@ -0,0 +1,142 @@
-+/*
-+ * scst_mem.h
-+ *
-+ * Copyright (C) 2006 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/scatterlist.h>
-+#include <linux/workqueue.h>
-+
-+#define SGV_POOL_ELEMENTS 11
-+
-+/*
-+ * sg_num is indexed by the page number, pg_count is indexed by the sg number.
-+ * Made in one entry to simplify the code (eg all sizeof(*) parts) and save
-+ * some CPU cache for non-clustered case.
-+ */
-+struct trans_tbl_ent {
-+ unsigned short sg_num;
-+ unsigned short pg_count;
-+};
-+
-+/*
-+ * SGV pool object
-+ */
-+struct sgv_pool_obj {
-+ int cache_num;
-+ int pages;
-+
-+ /* jiffies, protected by sgv_pool_lock */
-+ unsigned long time_stamp;
-+
-+ struct list_head recycling_list_entry;
-+ struct list_head sorted_recycling_list_entry;
-+
-+ struct sgv_pool *owner_pool;
-+ int orig_sg;
-+ int orig_length;
-+ int sg_count;
-+ void *allocator_priv;
-+ struct trans_tbl_ent *trans_tbl;
-+ struct scatterlist *sg_entries;
-+ struct scatterlist sg_entries_data[0];
-+};
-+
-+/*
-+ * SGV pool statistics accounting structure
-+ */
-+struct sgv_pool_cache_acc {
-+ atomic_t total_alloc, hit_alloc;
-+ atomic_t merged;
-+};
-+
-+/*
-+ * SGV pool allocation functions
-+ */
-+struct sgv_pool_alloc_fns {
-+ struct page *(*alloc_pages_fn)(struct scatterlist *sg, gfp_t gfp_mask,
-+ void *priv);
-+ void (*free_pages_fn)(struct scatterlist *sg, int sg_count,
-+ void *priv);
-+};
-+
-+/*
-+ * SGV pool
-+ */
-+struct sgv_pool {
-+ enum sgv_clustering_types clustering_type;
-+ int single_alloc_pages;
-+ int max_cached_pages;
-+
-+ struct sgv_pool_alloc_fns alloc_fns;
-+
-+ /* <=4K, <=8, <=16, <=32, <=64, <=128, <=256, <=512, <=1024, <=2048 */
-+ struct kmem_cache *caches[SGV_POOL_ELEMENTS];
-+
-+ spinlock_t sgv_pool_lock; /* outer lock for sgv_pools_lock! */
-+
-+ int purge_interval;
-+
-+ /* Protected by sgv_pool_lock, if necessary */
-+ unsigned int purge_work_scheduled:1;
-+
-+ /* Protected by sgv_pool_lock */
-+ struct list_head sorted_recycling_list;
-+
-+ int inactive_cached_pages; /* protected by sgv_pool_lock */
-+
-+ /* Protected by sgv_pool_lock */
-+ struct list_head recycling_lists[SGV_POOL_ELEMENTS];
-+
-+ int cached_pages, cached_entries; /* protected by sgv_pool_lock */
-+
-+ struct sgv_pool_cache_acc cache_acc[SGV_POOL_ELEMENTS];
-+
-+ struct delayed_work sgv_purge_work;
-+
-+ struct list_head sgv_active_pools_list_entry;
-+
-+ atomic_t big_alloc, big_pages, big_merged;
-+ atomic_t other_alloc, other_pages, other_merged;
-+
-+ atomic_t sgv_pool_ref;
-+
-+ int max_caches;
-+
-+ /* SCST_MAX_NAME + few more bytes to match scst_user expectations */
-+ char cache_names[SGV_POOL_ELEMENTS][SCST_MAX_NAME + 10];
-+ char name[SCST_MAX_NAME + 10];
-+
-+ struct mm_struct *owner_mm;
-+
-+ struct list_head sgv_pools_list_entry;
-+
-+ struct kobject sgv_kobj;
-+
-+ /* sysfs release completion */
-+ struct completion *sgv_kobj_release_cmpl;
-+};
-+
-+static inline struct scatterlist *sgv_pool_sg(struct sgv_pool_obj *obj)
-+{
-+ return obj->sg_entries;
-+}
-+
-+int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark);
-+void scst_sgv_pools_deinit(void);
-+
-+void scst_sgv_pool_use_norm(struct scst_tgt_dev *tgt_dev);
-+void scst_sgv_pool_use_norm_clust(struct scst_tgt_dev *tgt_dev);
-+void scst_sgv_pool_use_dma(struct scst_tgt_dev *tgt_dev);
-diff -uprN orig/linux-3.2/drivers/scst/scst_mem.c linux-3.2/drivers/scst/scst_mem.c
---- orig/linux-3.2/drivers/scst/scst_mem.c
-+++ linux-3.2/drivers/scst/scst_mem.c
-@@ -0,0 +1,2002 @@
-+/*
-+ * scst_mem.c
-+ *
-+ * Copyright (C) 2006 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/unistd.h>
-+#include <linux/string.h>
-+
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+#include "scst_mem.h"
-+
-+#define SGV_DEFAULT_PURGE_INTERVAL (60 * HZ)
-+#define SGV_MIN_SHRINK_INTERVAL (1 * HZ)
-+
-+/* Max pages freed from a pool per shrinking iteration */
-+#define MAX_PAGES_PER_POOL 50
-+
-+static struct sgv_pool *sgv_norm_clust_pool, *sgv_norm_pool, *sgv_dma_pool;
-+
-+static atomic_t sgv_pages_total = ATOMIC_INIT(0);
-+
-+/* Both read-only */
-+static int sgv_hi_wmk;
-+static int sgv_lo_wmk;
-+
-+static int sgv_max_local_pages, sgv_max_trans_pages;
-+
-+static DEFINE_SPINLOCK(sgv_pools_lock); /* inner lock for sgv_pool_lock! */
-+static DEFINE_MUTEX(sgv_pools_mutex);
-+
-+/* Both protected by sgv_pools_lock */
-+static struct sgv_pool *sgv_cur_purge_pool;
-+static LIST_HEAD(sgv_active_pools_list);
-+
-+static atomic_t sgv_releases_on_hiwmk = ATOMIC_INIT(0);
-+static atomic_t sgv_releases_on_hiwmk_failed = ATOMIC_INIT(0);
-+
-+static atomic_t sgv_other_total_alloc = ATOMIC_INIT(0);
-+
-+static struct shrinker sgv_shrinker;
-+
-+/*
-+ * Protected by sgv_pools_mutex AND sgv_pools_lock for writes,
-+ * either one for reads.
-+ */
-+static LIST_HEAD(sgv_pools_list);
-+
-+static struct kobject *scst_sgv_kobj;
-+static int scst_sgv_sysfs_create(struct sgv_pool *pool);
-+static void scst_sgv_sysfs_del(struct sgv_pool *pool);
-+
-+static inline bool sgv_pool_clustered(const struct sgv_pool *pool)
-+{
-+ return pool->clustering_type != sgv_no_clustering;
-+}
-+
-+void scst_sgv_pool_use_norm(struct scst_tgt_dev *tgt_dev)
-+{
-+ tgt_dev->gfp_mask = __GFP_NOWARN;
-+ tgt_dev->pool = sgv_norm_pool;
-+ clear_bit(SCST_TGT_DEV_CLUST_POOL, &tgt_dev->tgt_dev_flags);
-+}
-+
-+void scst_sgv_pool_use_norm_clust(struct scst_tgt_dev *tgt_dev)
-+{
-+ TRACE_MEM("%s", "Use clustering");
-+ tgt_dev->gfp_mask = __GFP_NOWARN;
-+ tgt_dev->pool = sgv_norm_clust_pool;
-+ set_bit(SCST_TGT_DEV_CLUST_POOL, &tgt_dev->tgt_dev_flags);
-+}
-+
-+void scst_sgv_pool_use_dma(struct scst_tgt_dev *tgt_dev)
-+{
-+ TRACE_MEM("%s", "Use ISA DMA memory");
-+ tgt_dev->gfp_mask = __GFP_NOWARN | GFP_DMA;
-+ tgt_dev->pool = sgv_dma_pool;
-+ clear_bit(SCST_TGT_DEV_CLUST_POOL, &tgt_dev->tgt_dev_flags);
-+}
-+
-+/* Must be no locks */
-+static void sgv_dtor_and_free(struct sgv_pool_obj *obj)
-+{
-+ struct sgv_pool *pool = obj->owner_pool;
-+
-+ TRACE_MEM("Destroying sgv obj %p", obj);
-+
-+ if (obj->sg_count != 0) {
-+ pool->alloc_fns.free_pages_fn(obj->sg_entries,
-+ obj->sg_count, obj->allocator_priv);
-+ }
-+ if (obj->sg_entries != obj->sg_entries_data) {
-+ if (obj->trans_tbl !=
-+ (struct trans_tbl_ent *)obj->sg_entries_data) {
-+ /* kfree() handles NULL parameter */
-+ kfree(obj->trans_tbl);
-+ obj->trans_tbl = NULL;
-+ }
-+ kfree(obj->sg_entries);
-+ }
-+
-+ kmem_cache_free(pool->caches[obj->cache_num], obj);
-+ return;
-+}
-+
-+/* Might be called under sgv_pool_lock */
-+static inline void sgv_del_from_active(struct sgv_pool *pool)
-+{
-+ struct list_head *next;
-+
-+ TRACE_MEM("Deleting sgv pool %p from the active list", pool);
-+
-+ spin_lock_bh(&sgv_pools_lock);
-+
-+ next = pool->sgv_active_pools_list_entry.next;
-+ list_del(&pool->sgv_active_pools_list_entry);
-+
-+ if (sgv_cur_purge_pool == pool) {
-+ TRACE_MEM("Sgv pool %p is sgv cur purge pool", pool);
-+
-+ if (next == &sgv_active_pools_list)
-+ next = next->next;
-+
-+ if (next == &sgv_active_pools_list) {
-+ sgv_cur_purge_pool = NULL;
-+ TRACE_MEM("%s", "Sgv active list now empty");
-+ } else {
-+ sgv_cur_purge_pool = list_entry(next, typeof(*pool),
-+ sgv_active_pools_list_entry);
-+ TRACE_MEM("New sgv cur purge pool %p",
-+ sgv_cur_purge_pool);
-+ }
-+ }
-+
-+ spin_unlock_bh(&sgv_pools_lock);
-+ return;
-+}
-+
-+/* Must be called under sgv_pool_lock held */
-+static void sgv_dec_cached_entries(struct sgv_pool *pool, int pages)
-+{
-+ pool->cached_entries--;
-+ pool->cached_pages -= pages;
-+
-+ if (pool->cached_entries == 0)
-+ sgv_del_from_active(pool);
-+
-+ return;
-+}
-+
-+/* Must be called under sgv_pool_lock held */
-+static void __sgv_purge_from_cache(struct sgv_pool_obj *obj)
-+{
-+ int pages = obj->pages;
-+ struct sgv_pool *pool = obj->owner_pool;
-+
-+ TRACE_MEM("Purging sgv obj %p from pool %p (new cached_entries %d)",
-+ obj, pool, pool->cached_entries-1);
-+
-+ list_del(&obj->sorted_recycling_list_entry);
-+ list_del(&obj->recycling_list_entry);
-+
-+ pool->inactive_cached_pages -= pages;
-+ sgv_dec_cached_entries(pool, pages);
-+
-+ atomic_sub(pages, &sgv_pages_total);
-+
-+ return;
-+}
-+
-+/* Must be called under sgv_pool_lock held */
-+static bool sgv_purge_from_cache(struct sgv_pool_obj *obj, int min_interval,
-+ unsigned long cur_time)
-+{
-+ EXTRACHECKS_BUG_ON(min_interval < 0);
-+
-+ TRACE_MEM("Checking if sgv obj %p should be purged (cur time %ld, "
-+ "obj time %ld, time to purge %ld)", obj, cur_time,
-+ obj->time_stamp, obj->time_stamp + min_interval);
-+
-+ if (time_after_eq(cur_time, (obj->time_stamp + min_interval))) {
-+ __sgv_purge_from_cache(obj);
-+ return true;
-+ }
-+ return false;
-+}
-+
-+/* No locks */
-+static int sgv_shrink_pool(struct sgv_pool *pool, int nr, int min_interval,
-+ unsigned long cur_time)
-+{
-+ int freed = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MEM("Trying to shrink pool %p (nr %d, min_interval %d)",
-+ pool, nr, min_interval);
-+
-+ if (pool->purge_interval < 0) {
-+ TRACE_MEM("Not shrinkable pool %p, skipping", pool);
-+ goto out;
-+ }
-+
-+ spin_lock_bh(&pool->sgv_pool_lock);
-+
-+ while (!list_empty(&pool->sorted_recycling_list) &&
-+ (atomic_read(&sgv_pages_total) > sgv_lo_wmk)) {
-+ struct sgv_pool_obj *obj = list_entry(
-+ pool->sorted_recycling_list.next,
-+ struct sgv_pool_obj, sorted_recycling_list_entry);
-+
-+ if (sgv_purge_from_cache(obj, min_interval, cur_time)) {
-+ int pages = obj->pages;
-+
-+ freed += pages;
-+ nr -= pages;
-+
-+ TRACE_MEM("%d pages purged from pool %p (nr left %d, "
-+ "total freed %d)", pages, pool, nr, freed);
-+
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+ sgv_dtor_and_free(obj);
-+ spin_lock_bh(&pool->sgv_pool_lock);
-+ } else
-+ break;
-+
-+ if ((nr <= 0) || (freed >= MAX_PAGES_PER_POOL)) {
-+ if (freed >= MAX_PAGES_PER_POOL)
-+ TRACE_MEM("%d pages purged from pool %p, "
-+ "leaving", freed, pool);
-+ break;
-+ }
-+ }
-+
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+
-+out:
-+ TRACE_EXIT_RES(nr);
-+ return nr;
-+}
-+
-+/* No locks */
-+static int __sgv_shrink(int nr, int min_interval)
-+{
-+ struct sgv_pool *pool;
-+ unsigned long cur_time = jiffies;
-+ int prev_nr = nr;
-+ bool circle = false;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MEM("Trying to shrink %d pages from all sgv pools "
-+ "(min_interval %d)", nr, min_interval);
-+
-+ while (nr > 0) {
-+ struct list_head *next;
-+
-+ spin_lock_bh(&sgv_pools_lock);
-+
-+ pool = sgv_cur_purge_pool;
-+ if (pool == NULL) {
-+ if (list_empty(&sgv_active_pools_list)) {
-+ TRACE_MEM("%s", "Active pools list is empty");
-+ goto out_unlock;
-+ }
-+
-+ pool = list_entry(sgv_active_pools_list.next,
-+ typeof(*pool),
-+ sgv_active_pools_list_entry);
-+ }
-+ sgv_pool_get(pool);
-+
-+ next = pool->sgv_active_pools_list_entry.next;
-+ if (next == &sgv_active_pools_list) {
-+ if (circle && (prev_nr == nr)) {
-+ TRACE_MEM("Full circle done, but no progress, "
-+ "leaving (nr %d)", nr);
-+ goto out_unlock_put;
-+ }
-+ circle = true;
-+ prev_nr = nr;
-+
-+ next = next->next;
-+ }
-+
-+ sgv_cur_purge_pool = list_entry(next, typeof(*pool),
-+ sgv_active_pools_list_entry);
-+ TRACE_MEM("New cur purge pool %p", sgv_cur_purge_pool);
-+
-+ spin_unlock_bh(&sgv_pools_lock);
-+
-+ nr = sgv_shrink_pool(pool, nr, min_interval, cur_time);
-+
-+ sgv_pool_put(pool);
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(nr);
-+ return nr;
-+
-+out_unlock:
-+ spin_unlock_bh(&sgv_pools_lock);
-+ goto out;
-+
-+out_unlock_put:
-+ spin_unlock_bh(&sgv_pools_lock);
-+ sgv_pool_put(pool);
-+ goto out;
-+}
-+
-+static int sgv_shrink(struct shrinker *shrinker, struct shrink_control *sc)
-+{
-+ int nr = sc->nr_to_scan;
-+
-+ TRACE_ENTRY();
-+
-+ if (nr > 0) {
-+ nr = __sgv_shrink(nr, SGV_MIN_SHRINK_INTERVAL);
-+ TRACE_MEM("Left %d", nr);
-+ } else {
-+ struct sgv_pool *pool;
-+ int inactive_pages = 0;
-+
-+ spin_lock_bh(&sgv_pools_lock);
-+ list_for_each_entry(pool, &sgv_active_pools_list,
-+ sgv_active_pools_list_entry) {
-+ if (pool->purge_interval > 0)
-+ inactive_pages += pool->inactive_cached_pages;
-+ }
-+ spin_unlock_bh(&sgv_pools_lock);
-+
-+ nr = max((int)0, inactive_pages - sgv_lo_wmk);
-+ TRACE_MEM("Can free %d (total %d)", nr,
-+ atomic_read(&sgv_pages_total));
-+ }
-+
-+ TRACE_EXIT_RES(nr);
-+ return nr;
-+}
-+
-+static void sgv_purge_work_fn(struct delayed_work *work)
-+{
-+ unsigned long cur_time = jiffies;
-+ struct sgv_pool *pool = container_of(work, struct sgv_pool,
-+ sgv_purge_work);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MEM("Purge work for pool %p", pool);
-+
-+ spin_lock_bh(&pool->sgv_pool_lock);
-+
-+ pool->purge_work_scheduled = false;
-+
-+ while (!list_empty(&pool->sorted_recycling_list)) {
-+ struct sgv_pool_obj *obj = list_entry(
-+ pool->sorted_recycling_list.next,
-+ struct sgv_pool_obj, sorted_recycling_list_entry);
-+
-+ if (sgv_purge_from_cache(obj, pool->purge_interval, cur_time)) {
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+ sgv_dtor_and_free(obj);
-+ spin_lock_bh(&pool->sgv_pool_lock);
-+ } else {
-+ /*
-+ * Let's reschedule it for full period to not get here
-+ * too often. In the worst case we have shrinker
-+ * to reclaim buffers more quickly.
-+ */
-+ TRACE_MEM("Rescheduling purge work for pool %p (delay "
-+ "%d HZ/%d sec)", pool, pool->purge_interval,
-+ pool->purge_interval/HZ);
-+ schedule_delayed_work(&pool->sgv_purge_work,
-+ pool->purge_interval);
-+ pool->purge_work_scheduled = true;
-+ break;
-+ }
-+ }
-+
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+
-+ TRACE_MEM("Leaving purge work for pool %p", pool);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int sgv_check_full_clustering(struct scatterlist *sg, int cur, int hint)
-+{
-+ int res = -1;
-+ int i = hint;
-+ unsigned long pfn_cur = page_to_pfn(sg_page(&sg[cur]));
-+ int len_cur = sg[cur].length;
-+ unsigned long pfn_cur_next = pfn_cur + (len_cur >> PAGE_SHIFT);
-+ int full_page_cur = (len_cur & (PAGE_SIZE - 1)) == 0;
-+ unsigned long pfn, pfn_next;
-+ bool full_page;
-+
-+#if 0
-+ TRACE_MEM("pfn_cur %ld, pfn_cur_next %ld, len_cur %d, full_page_cur %d",
-+ pfn_cur, pfn_cur_next, len_cur, full_page_cur);
-+#endif
-+
-+ /* check the hint first */
-+ if (i >= 0) {
-+ pfn = page_to_pfn(sg_page(&sg[i]));
-+ pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
-+ full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
-+
-+ if ((pfn == pfn_cur_next) && full_page_cur)
-+ goto out_head;
-+
-+ if ((pfn_next == pfn_cur) && full_page)
-+ goto out_tail;
-+ }
-+
-+ /* ToDo: implement more intelligent search */
-+ for (i = cur - 1; i >= 0; i--) {
-+ pfn = page_to_pfn(sg_page(&sg[i]));
-+ pfn_next = pfn + (sg[i].length >> PAGE_SHIFT);
-+ full_page = (sg[i].length & (PAGE_SIZE - 1)) == 0;
-+
-+ if ((pfn == pfn_cur_next) && full_page_cur)
-+ goto out_head;
-+
-+ if ((pfn_next == pfn_cur) && full_page)
-+ goto out_tail;
-+ }
-+
-+out:
-+ return res;
-+
-+out_tail:
-+ TRACE_MEM("SG segment %d will be tail merged with segment %d", cur, i);
-+ sg[i].length += len_cur;
-+ sg_clear(&sg[cur]);
-+ res = i;
-+ goto out;
-+
-+out_head:
-+ TRACE_MEM("SG segment %d will be head merged with segment %d", cur, i);
-+ sg_assign_page(&sg[i], sg_page(&sg[cur]));
-+ sg[i].length += len_cur;
-+ sg_clear(&sg[cur]);
-+ res = i;
-+ goto out;
-+}
-+
-+static int sgv_check_tail_clustering(struct scatterlist *sg, int cur, int hint)
-+{
-+ int res = -1;
-+ unsigned long pfn_cur = page_to_pfn(sg_page(&sg[cur]));
-+ int len_cur = sg[cur].length;
-+ int prev;
-+ unsigned long pfn_prev;
-+ bool full_page;
-+
-+#ifdef SCST_HIGHMEM
-+ if (page >= highmem_start_page) {
-+ TRACE_MEM("%s", "HIGHMEM page allocated, no clustering")
-+ goto out;
-+ }
-+#endif
-+
-+#if 0
-+ TRACE_MEM("pfn_cur %ld, pfn_cur_next %ld, len_cur %d, full_page_cur %d",
-+ pfn_cur, pfn_cur_next, len_cur, full_page_cur);
-+#endif
-+
-+ if (cur == 0)
-+ goto out;
-+
-+ prev = cur - 1;
-+ pfn_prev = page_to_pfn(sg_page(&sg[prev])) +
-+ (sg[prev].length >> PAGE_SHIFT);
-+ full_page = (sg[prev].length & (PAGE_SIZE - 1)) == 0;
-+
-+ if ((pfn_prev == pfn_cur) && full_page) {
-+ TRACE_MEM("SG segment %d will be tail merged with segment %d",
-+ cur, prev);
-+ sg[prev].length += len_cur;
-+ sg_clear(&sg[cur]);
-+ res = prev;
-+ }
-+
-+out:
-+ return res;
-+}
-+
-+static void sgv_free_sys_sg_entries(struct scatterlist *sg, int sg_count,
-+ void *priv)
-+{
-+ int i;
-+
-+ TRACE_MEM("sg=%p, sg_count=%d", sg, sg_count);
-+
-+ for (i = 0; i < sg_count; i++) {
-+ struct page *p = sg_page(&sg[i]);
-+ int len = sg[i].length;
-+ int pages =
-+ (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
-+
-+ TRACE_MEM("page %lx, len %d, pages %d",
-+ (unsigned long)p, len, pages);
-+
-+ while (pages > 0) {
-+ int order = 0;
-+
-+ TRACE_MEM("free_pages(): order %d, page %lx",
-+ order, (unsigned long)p);
-+
-+ __free_pages(p, order);
-+
-+ pages -= 1 << order;
-+ p += 1 << order;
-+ }
-+ }
-+}
-+
-+static struct page *sgv_alloc_sys_pages(struct scatterlist *sg,
-+ gfp_t gfp_mask, void *priv)
-+{
-+ struct page *page = alloc_pages(gfp_mask, 0);
-+
-+ sg_set_page(sg, page, PAGE_SIZE, 0);
-+ TRACE_MEM("page=%p, sg=%p, priv=%p", page, sg, priv);
-+ if (page == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "%s", "Allocation of "
-+ "sg page failed");
-+ }
-+ return page;
-+}
-+
-+static int sgv_alloc_sg_entries(struct scatterlist *sg, int pages,
-+ gfp_t gfp_mask, enum sgv_clustering_types clustering_type,
-+ struct trans_tbl_ent *trans_tbl,
-+ const struct sgv_pool_alloc_fns *alloc_fns, void *priv)
-+{
-+ int sg_count = 0;
-+ int pg, i, j;
-+ int merged = -1;
-+
-+ TRACE_MEM("pages=%d, clustering_type=%d", pages, clustering_type);
-+
-+#if 0
-+ gfp_mask |= __GFP_COLD;
-+#endif
-+#ifdef CONFIG_SCST_STRICT_SECURITY
-+ gfp_mask |= __GFP_ZERO;
-+#endif
-+
-+ for (pg = 0; pg < pages; pg++) {
-+ void *rc;
-+#ifdef CONFIG_SCST_DEBUG_OOM
-+ if (((gfp_mask & __GFP_NOFAIL) != __GFP_NOFAIL) &&
-+ ((scst_random() % 10000) == 55))
-+ rc = NULL;
-+ else
-+#endif
-+ rc = alloc_fns->alloc_pages_fn(&sg[sg_count], gfp_mask,
-+ priv);
-+ if (rc == NULL)
-+ goto out_no_mem;
-+
-+ /*
-+ * This code allows compiler to see full body of the clustering
-+ * functions and gives it a chance to generate better code.
-+ * At least, the resulting code is smaller, comparing to
-+ * calling them using a function pointer.
-+ */
-+ if (clustering_type == sgv_full_clustering)
-+ merged = sgv_check_full_clustering(sg, sg_count, merged);
-+ else if (clustering_type == sgv_tail_clustering)
-+ merged = sgv_check_tail_clustering(sg, sg_count, merged);
-+ else
-+ merged = -1;
-+
-+ if (merged == -1)
-+ sg_count++;
-+
-+ TRACE_MEM("pg=%d, merged=%d, sg_count=%d", pg, merged,
-+ sg_count);
-+ }
-+
-+ if ((clustering_type != sgv_no_clustering) && (trans_tbl != NULL)) {
-+ pg = 0;
-+ for (i = 0; i < pages; i++) {
-+ int n = (sg[i].length >> PAGE_SHIFT) +
-+ ((sg[i].length & ~PAGE_MASK) != 0);
-+ trans_tbl[i].pg_count = pg;
-+ for (j = 0; j < n; j++)
-+ trans_tbl[pg++].sg_num = i+1;
-+ TRACE_MEM("i=%d, n=%d, pg_count=%d", i, n,
-+ trans_tbl[i].pg_count);
-+ }
-+ }
-+
-+out:
-+ TRACE_MEM("sg_count=%d", sg_count);
-+ return sg_count;
-+
-+out_no_mem:
-+ alloc_fns->free_pages_fn(sg, sg_count, priv);
-+ sg_count = 0;
-+ goto out;
-+}
-+
-+static int sgv_alloc_arrays(struct sgv_pool_obj *obj,
-+ int pages_to_alloc, gfp_t gfp_mask)
-+{
-+ int sz, tsz = 0;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ sz = pages_to_alloc * sizeof(obj->sg_entries[0]);
-+
-+ obj->sg_entries = kmalloc(sz, gfp_mask);
-+ if (unlikely(obj->sg_entries == NULL)) {
-+ TRACE(TRACE_OUT_OF_MEM, "Allocation of sgv_pool_obj "
-+ "SG vector failed (size %d)", sz);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ sg_init_table(obj->sg_entries, pages_to_alloc);
-+
-+ if (sgv_pool_clustered(obj->owner_pool)) {
-+ if (pages_to_alloc <= sgv_max_trans_pages) {
-+ obj->trans_tbl =
-+ (struct trans_tbl_ent *)obj->sg_entries_data;
-+ /*
-+ * No need to clear trans_tbl, if needed, it will be
-+ * fully rewritten in sgv_alloc_sg_entries()
-+ */
-+ } else {
-+ tsz = pages_to_alloc * sizeof(obj->trans_tbl[0]);
-+ obj->trans_tbl = kzalloc(tsz, gfp_mask);
-+ if (unlikely(obj->trans_tbl == NULL)) {
-+ TRACE(TRACE_OUT_OF_MEM, "Allocation of "
-+ "trans_tbl failed (size %d)", tsz);
-+ res = -ENOMEM;
-+ goto out_free;
-+ }
-+ }
-+ }
-+
-+ TRACE_MEM("pages_to_alloc %d, sz %d, tsz %d, obj %p, sg_entries %p, "
-+ "trans_tbl %p", pages_to_alloc, sz, tsz, obj, obj->sg_entries,
-+ obj->trans_tbl);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(obj->sg_entries);
-+ obj->sg_entries = NULL;
-+ goto out;
-+}
-+
-+static struct sgv_pool_obj *sgv_get_obj(struct sgv_pool *pool, int cache_num,
-+ int pages, gfp_t gfp_mask, bool get_new)
-+{
-+ struct sgv_pool_obj *obj;
-+
-+ spin_lock_bh(&pool->sgv_pool_lock);
-+
-+ if (unlikely(get_new)) {
-+ /* Used only for buffers preallocation */
-+ goto get_new;
-+ }
-+
-+ if (likely(!list_empty(&pool->recycling_lists[cache_num]))) {
-+ obj = list_entry(pool->recycling_lists[cache_num].next,
-+ struct sgv_pool_obj, recycling_list_entry);
-+
-+ list_del(&obj->sorted_recycling_list_entry);
-+ list_del(&obj->recycling_list_entry);
-+
-+ pool->inactive_cached_pages -= pages;
-+
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+ goto out;
-+ }
-+
-+get_new:
-+ if (pool->cached_entries == 0) {
-+ TRACE_MEM("Adding pool %p to the active list", pool);
-+ spin_lock_bh(&sgv_pools_lock);
-+ list_add_tail(&pool->sgv_active_pools_list_entry,
-+ &sgv_active_pools_list);
-+ spin_unlock_bh(&sgv_pools_lock);
-+ }
-+
-+ pool->cached_entries++;
-+ pool->cached_pages += pages;
-+
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+
-+ TRACE_MEM("New cached entries %d (pool %p)", pool->cached_entries,
-+ pool);
-+
-+ obj = kmem_cache_alloc(pool->caches[cache_num],
-+ gfp_mask & ~(__GFP_HIGHMEM|GFP_DMA));
-+ if (likely(obj)) {
-+ memset(obj, 0, sizeof(*obj));
-+ obj->cache_num = cache_num;
-+ obj->pages = pages;
-+ obj->owner_pool = pool;
-+ } else {
-+ spin_lock_bh(&pool->sgv_pool_lock);
-+ sgv_dec_cached_entries(pool, pages);
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+ }
-+
-+out:
-+ return obj;
-+}
-+
-+static void sgv_put_obj(struct sgv_pool_obj *obj)
-+{
-+ struct sgv_pool *pool = obj->owner_pool;
-+ struct list_head *entry;
-+ struct list_head *list = &pool->recycling_lists[obj->cache_num];
-+ int pages = obj->pages;
-+
-+ spin_lock_bh(&pool->sgv_pool_lock);
-+
-+ TRACE_MEM("sgv %p, cache num %d, pages %d, sg_count %d", obj,
-+ obj->cache_num, pages, obj->sg_count);
-+
-+ if (sgv_pool_clustered(pool)) {
-+ /* Make objects with less entries more preferred */
-+ __list_for_each(entry, list) {
-+ struct sgv_pool_obj *tmp = list_entry(entry,
-+ struct sgv_pool_obj, recycling_list_entry);
-+
-+ TRACE_MEM("tmp %p, cache num %d, pages %d, sg_count %d",
-+ tmp, tmp->cache_num, tmp->pages, tmp->sg_count);
-+
-+ if (obj->sg_count <= tmp->sg_count)
-+ break;
-+ }
-+ entry = entry->prev;
-+ } else
-+ entry = list;
-+
-+ TRACE_MEM("Adding in %p (list %p)", entry, list);
-+ list_add(&obj->recycling_list_entry, entry);
-+
-+ list_add_tail(&obj->sorted_recycling_list_entry,
-+ &pool->sorted_recycling_list);
-+
-+ obj->time_stamp = jiffies;
-+
-+ pool->inactive_cached_pages += pages;
-+
-+ if (!pool->purge_work_scheduled) {
-+ TRACE_MEM("Scheduling purge work for pool %p", pool);
-+ pool->purge_work_scheduled = true;
-+ schedule_delayed_work(&pool->sgv_purge_work,
-+ pool->purge_interval);
-+ }
-+
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+ return;
-+}
-+
-+/* No locks */
-+static int sgv_hiwmk_check(int pages_to_alloc)
-+{
-+ int res = 0;
-+ int pages = pages_to_alloc;
-+
-+ pages += atomic_read(&sgv_pages_total);
-+
-+ if (unlikely(pages > sgv_hi_wmk)) {
-+ pages -= sgv_hi_wmk;
-+ atomic_inc(&sgv_releases_on_hiwmk);
-+
-+ pages = __sgv_shrink(pages, 0);
-+ if (pages > 0) {
-+ TRACE(TRACE_OUT_OF_MEM, "Requested amount of "
-+ "memory (%d pages) for being executed "
-+ "commands together with the already "
-+ "allocated memory exceeds the allowed "
-+ "maximum %d. Should you increase "
-+ "scst_max_cmd_mem?", pages_to_alloc,
-+ sgv_hi_wmk);
-+ atomic_inc(&sgv_releases_on_hiwmk_failed);
-+ res = -ENOMEM;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ atomic_add(pages_to_alloc, &sgv_pages_total);
-+
-+out_unlock:
-+ TRACE_MEM("pages_to_alloc %d, new total %d", pages_to_alloc,
-+ atomic_read(&sgv_pages_total));
-+
-+ return res;
-+}
-+
-+/* No locks */
-+static void sgv_hiwmk_uncheck(int pages)
-+{
-+ atomic_sub(pages, &sgv_pages_total);
-+ TRACE_MEM("pages %d, new total %d", pages,
-+ atomic_read(&sgv_pages_total));
-+ return;
-+}
-+
-+/* No locks */
-+static bool sgv_check_allowed_mem(struct scst_mem_lim *mem_lim, int pages)
-+{
-+ int alloced;
-+ bool res = true;
-+
-+ alloced = atomic_add_return(pages, &mem_lim->alloced_pages);
-+ if (unlikely(alloced > mem_lim->max_allowed_pages)) {
-+ TRACE(TRACE_OUT_OF_MEM, "Requested amount of memory "
-+ "(%d pages) for being executed commands on a device "
-+ "together with the already allocated memory exceeds "
-+ "the allowed maximum %d. Should you increase "
-+ "scst_max_dev_cmd_mem?", pages,
-+ mem_lim->max_allowed_pages);
-+ atomic_sub(pages, &mem_lim->alloced_pages);
-+ res = false;
-+ }
-+
-+ TRACE_MEM("mem_lim %p, pages %d, res %d, new alloced %d", mem_lim,
-+ pages, res, atomic_read(&mem_lim->alloced_pages));
-+
-+ return res;
-+}
-+
-+/* No locks */
-+static void sgv_uncheck_allowed_mem(struct scst_mem_lim *mem_lim, int pages)
-+{
-+ atomic_sub(pages, &mem_lim->alloced_pages);
-+
-+ TRACE_MEM("mem_lim %p, pages %d, new alloced %d", mem_lim,
-+ pages, atomic_read(&mem_lim->alloced_pages));
-+ return;
-+}
-+
-+/**
-+ * sgv_pool_alloc - allocate an SG vector from the SGV pool
-+ * @pool: the cache to alloc from
-+ * @size: size of the resulting SG vector in bytes
-+ * @gfp_mask: the allocation mask
-+ * @flags: the allocation flags
-+ * @count: the resulting count of SG entries in the resulting SG vector
-+ * @sgv: the resulting SGV object
-+ * @mem_lim: memory limits
-+ * @priv: pointer to private for this allocation data
-+ *
-+ * Description:
-+ * Allocate an SG vector from the SGV pool and returns pointer to it or
-+ * NULL in case of any error. See the SGV pool documentation for more details.
-+ */
-+struct scatterlist *sgv_pool_alloc(struct sgv_pool *pool, unsigned int size,
-+ gfp_t gfp_mask, int flags, int *count,
-+ struct sgv_pool_obj **sgv, struct scst_mem_lim *mem_lim, void *priv)
-+{
-+ struct sgv_pool_obj *obj;
-+ int cache_num, pages, cnt;
-+ struct scatterlist *res = NULL;
-+ int pages_to_alloc;
-+ int no_cached = flags & SGV_POOL_ALLOC_NO_CACHED;
-+ bool allowed_mem_checked = false, hiwmk_checked = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(size == 0))
-+ goto out;
-+
-+ EXTRACHECKS_BUG_ON((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
-+
-+ pages = ((size + PAGE_SIZE - 1) >> PAGE_SHIFT);
-+ if (pool->single_alloc_pages == 0) {
-+ int pages_order = get_order(size);
-+ cache_num = pages_order;
-+ pages_to_alloc = (1 << pages_order);
-+ } else {
-+ cache_num = 0;
-+ pages_to_alloc = max(pool->single_alloc_pages, pages);
-+ }
-+
-+ TRACE_MEM("size=%d, pages=%d, pages_to_alloc=%d, cache num=%d, "
-+ "flags=%x, no_cached=%d, *sgv=%p", size, pages,
-+ pages_to_alloc, cache_num, flags, no_cached, *sgv);
-+
-+ if (*sgv != NULL) {
-+ obj = *sgv;
-+
-+ TRACE_MEM("Supplied obj %p, cache num %d", obj, obj->cache_num);
-+
-+ EXTRACHECKS_BUG_ON(obj->sg_count != 0);
-+
-+ if (unlikely(!sgv_check_allowed_mem(mem_lim, pages_to_alloc)))
-+ goto out_fail_free_sg_entries;
-+ allowed_mem_checked = true;
-+
-+ if (unlikely(sgv_hiwmk_check(pages_to_alloc) != 0))
-+ goto out_fail_free_sg_entries;
-+ hiwmk_checked = true;
-+ } else if ((pages_to_alloc <= pool->max_cached_pages) && !no_cached) {
-+ if (unlikely(!sgv_check_allowed_mem(mem_lim, pages_to_alloc)))
-+ goto out_fail;
-+ allowed_mem_checked = true;
-+
-+ obj = sgv_get_obj(pool, cache_num, pages_to_alloc, gfp_mask,
-+ flags & SGV_POOL_ALLOC_GET_NEW);
-+ if (unlikely(obj == NULL)) {
-+ TRACE(TRACE_OUT_OF_MEM, "Allocation of "
-+ "sgv_pool_obj failed (size %d)", size);
-+ goto out_fail;
-+ }
-+
-+ if (obj->sg_count != 0) {
-+ TRACE_MEM("Cached obj %p", obj);
-+ atomic_inc(&pool->cache_acc[cache_num].hit_alloc);
-+ goto success;
-+ }
-+
-+ if (flags & SGV_POOL_NO_ALLOC_ON_CACHE_MISS) {
-+ if (!(flags & SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
-+ goto out_fail_free;
-+ }
-+
-+ TRACE_MEM("Brand new obj %p", obj);
-+
-+ if (pages_to_alloc <= sgv_max_local_pages) {
-+ obj->sg_entries = obj->sg_entries_data;
-+ sg_init_table(obj->sg_entries, pages_to_alloc);
-+ TRACE_MEM("sg_entries %p", obj->sg_entries);
-+ if (sgv_pool_clustered(pool)) {
-+ obj->trans_tbl = (struct trans_tbl_ent *)
-+ (obj->sg_entries + pages_to_alloc);
-+ TRACE_MEM("trans_tbl %p", obj->trans_tbl);
-+ /*
-+ * No need to clear trans_tbl, if needed, it
-+ * will be fully rewritten in
-+ * sgv_alloc_sg_entries().
-+ */
-+ }
-+ } else {
-+ if (unlikely(sgv_alloc_arrays(obj, pages_to_alloc,
-+ gfp_mask) != 0))
-+ goto out_fail_free;
-+ }
-+
-+ if ((flags & SGV_POOL_NO_ALLOC_ON_CACHE_MISS) &&
-+ (flags & SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL))
-+ goto out_return;
-+
-+ obj->allocator_priv = priv;
-+
-+ if (unlikely(sgv_hiwmk_check(pages_to_alloc) != 0))
-+ goto out_fail_free_sg_entries;
-+ hiwmk_checked = true;
-+ } else {
-+ int sz;
-+
-+ pages_to_alloc = pages;
-+
-+ if (unlikely(!sgv_check_allowed_mem(mem_lim, pages_to_alloc)))
-+ goto out_fail;
-+ allowed_mem_checked = true;
-+
-+ if (flags & SGV_POOL_NO_ALLOC_ON_CACHE_MISS)
-+ goto out_return2;
-+
-+ sz = sizeof(*obj) + pages * sizeof(obj->sg_entries[0]);
-+
-+ obj = kmalloc(sz, gfp_mask);
-+ if (unlikely(obj == NULL)) {
-+ TRACE(TRACE_OUT_OF_MEM, "Allocation of "
-+ "sgv_pool_obj failed (size %d)", size);
-+ goto out_fail;
-+ }
-+ memset(obj, 0, sizeof(*obj));
-+
-+ obj->owner_pool = pool;
-+ cache_num = -1;
-+ obj->cache_num = cache_num;
-+ obj->pages = pages_to_alloc;
-+ obj->allocator_priv = priv;
-+
-+ obj->sg_entries = obj->sg_entries_data;
-+ sg_init_table(obj->sg_entries, pages);
-+
-+ if (unlikely(sgv_hiwmk_check(pages_to_alloc) != 0))
-+ goto out_fail_free_sg_entries;
-+ hiwmk_checked = true;
-+
-+ TRACE_MEM("Big or no_cached obj %p (size %d)", obj, sz);
-+ }
-+
-+ obj->sg_count = sgv_alloc_sg_entries(obj->sg_entries,
-+ pages_to_alloc, gfp_mask, pool->clustering_type,
-+ obj->trans_tbl, &pool->alloc_fns, priv);
-+ if (unlikely(obj->sg_count <= 0)) {
-+ obj->sg_count = 0;
-+ if ((flags & SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL) &&
-+ (cache_num >= 0))
-+ goto out_return1;
-+ else
-+ goto out_fail_free_sg_entries;
-+ }
-+
-+ if (cache_num >= 0) {
-+ atomic_add(pages_to_alloc - obj->sg_count,
-+ &pool->cache_acc[cache_num].merged);
-+ } else {
-+ if (no_cached) {
-+ atomic_add(pages_to_alloc,
-+ &pool->other_pages);
-+ atomic_add(pages_to_alloc - obj->sg_count,
-+ &pool->other_merged);
-+ } else {
-+ atomic_add(pages_to_alloc,
-+ &pool->big_pages);
-+ atomic_add(pages_to_alloc - obj->sg_count,
-+ &pool->big_merged);
-+ }
-+ }
-+
-+success:
-+ if (cache_num >= 0) {
-+ int sg;
-+ atomic_inc(&pool->cache_acc[cache_num].total_alloc);
-+ if (sgv_pool_clustered(pool))
-+ cnt = obj->trans_tbl[pages-1].sg_num;
-+ else
-+ cnt = pages;
-+ sg = cnt-1;
-+ obj->orig_sg = sg;
-+ obj->orig_length = obj->sg_entries[sg].length;
-+ if (sgv_pool_clustered(pool)) {
-+ obj->sg_entries[sg].length =
-+ (pages - obj->trans_tbl[sg].pg_count) << PAGE_SHIFT;
-+ }
-+ } else {
-+ cnt = obj->sg_count;
-+ if (no_cached)
-+ atomic_inc(&pool->other_alloc);
-+ else
-+ atomic_inc(&pool->big_alloc);
-+ }
-+
-+ *count = cnt;
-+ res = obj->sg_entries;
-+ *sgv = obj;
-+
-+ if (size & ~PAGE_MASK)
-+ obj->sg_entries[cnt-1].length -=
-+ PAGE_SIZE - (size & ~PAGE_MASK);
-+
-+ TRACE_MEM("obj=%p, sg_entries %p (size=%d, pages=%d, sg_count=%d, "
-+ "count=%d, last_len=%d)", obj, obj->sg_entries, size, pages,
-+ obj->sg_count, *count, obj->sg_entries[obj->orig_sg].length);
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+
-+out_return:
-+ obj->allocator_priv = priv;
-+ obj->owner_pool = pool;
-+
-+out_return1:
-+ *sgv = obj;
-+ TRACE_MEM("Returning failed obj %p (count %d)", obj, *count);
-+
-+out_return2:
-+ *count = pages_to_alloc;
-+ res = NULL;
-+ goto out_uncheck;
-+
-+out_fail_free_sg_entries:
-+ if (obj->sg_entries != obj->sg_entries_data) {
-+ if (obj->trans_tbl !=
-+ (struct trans_tbl_ent *)obj->sg_entries_data) {
-+ /* kfree() handles NULL parameter */
-+ kfree(obj->trans_tbl);
-+ obj->trans_tbl = NULL;
-+ }
-+ kfree(obj->sg_entries);
-+ obj->sg_entries = NULL;
-+ }
-+
-+out_fail_free:
-+ if (cache_num >= 0) {
-+ spin_lock_bh(&pool->sgv_pool_lock);
-+ sgv_dec_cached_entries(pool, pages_to_alloc);
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+
-+ kmem_cache_free(pool->caches[obj->cache_num], obj);
-+ } else
-+ kfree(obj);
-+
-+out_fail:
-+ res = NULL;
-+ *count = 0;
-+ *sgv = NULL;
-+ TRACE_MEM("%s", "Allocation failed");
-+
-+out_uncheck:
-+ if (hiwmk_checked)
-+ sgv_hiwmk_uncheck(pages_to_alloc);
-+ if (allowed_mem_checked)
-+ sgv_uncheck_allowed_mem(mem_lim, pages_to_alloc);
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(sgv_pool_alloc);
-+
-+/**
-+ * sgv_get_priv - return the private allocation data
-+ *
-+ * Allows to get the allocation private data for this SGV
-+ * cache object. The private data supposed to be set by sgv_pool_alloc().
-+ */
-+void *sgv_get_priv(struct sgv_pool_obj *obj)
-+{
-+ return obj->allocator_priv;
-+}
-+EXPORT_SYMBOL_GPL(sgv_get_priv);
-+
-+/**
-+ * sgv_pool_free - free previously allocated SG vector
-+ * @sgv: the SGV object to free
-+ * @mem_lim: memory limits
-+ *
-+ * Description:
-+ * Frees previously allocated SG vector and updates memory limits
-+ */
-+void sgv_pool_free(struct sgv_pool_obj *obj, struct scst_mem_lim *mem_lim)
-+{
-+ int pages = (obj->sg_count != 0) ? obj->pages : 0;
-+
-+ TRACE_MEM("Freeing obj %p, cache num %d, pages %d, sg_entries %p, "
-+ "sg_count %d, allocator_priv %p", obj, obj->cache_num, pages,
-+ obj->sg_entries, obj->sg_count, obj->allocator_priv);
-+
-+/*
-+ * Enable it if you are investigating a data corruption and want to make
-+ * sure that target or dev handler didn't leave the pages mapped somewhere and,
-+ * hence, provoked a data corruption.
-+ *
-+ * Make sure the check value for _count is set correctly. In most cases, 1 is
-+ * correct, but, e.g., iSCSI-SCST can call it with value 2, because
-+ * it frees the corresponding cmd before the last put_page() call from
-+ * net_put_page() for the last page in the SG. Also, user space dev handlers
-+ * usually have their memory mapped in their address space.
-+ */
-+#if 0
-+ {
-+ struct scatterlist *sg = obj->sg_entries;
-+ int i;
-+ for (i = 0; i < obj->sg_count; i++) {
-+ struct page *p = sg_page(&sg[i]);
-+ int len = sg[i].length;
-+ int pages = (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
-+ while (pages > 0) {
-+ if (atomic_read(&p->_count) != 1) {
-+ PRINT_WARNING("Freeing page %p with "
-+ "additional owners (_count %d). "
-+ "Data corruption possible!",
-+ p, atomic_read(&p->_count));
-+ WARN_ON(1);
-+ }
-+ pages--;
-+ p++;
-+ }
-+ }
-+ }
-+#endif
-+
-+ if (obj->cache_num >= 0) {
-+ obj->sg_entries[obj->orig_sg].length = obj->orig_length;
-+ sgv_put_obj(obj);
-+ } else {
-+ obj->owner_pool->alloc_fns.free_pages_fn(obj->sg_entries,
-+ obj->sg_count, obj->allocator_priv);
-+ kfree(obj);
-+ sgv_hiwmk_uncheck(pages);
-+ }
-+
-+ sgv_uncheck_allowed_mem(mem_lim, pages);
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(sgv_pool_free);
-+
-+/**
-+ * scst_alloc() - allocates an SG vector
-+ *
-+ * Allocates and returns pointer to SG vector with data size "size".
-+ * In *count returned the count of entries in the vector.
-+ * Returns NULL for failure.
-+ */
-+struct scatterlist *scst_alloc(int size, gfp_t gfp_mask, int *count)
-+{
-+ struct scatterlist *res;
-+ int pages = (size >> PAGE_SHIFT) + ((size & ~PAGE_MASK) != 0);
-+ struct sgv_pool_alloc_fns sys_alloc_fns = {
-+ sgv_alloc_sys_pages, sgv_free_sys_sg_entries };
-+ int no_fail = ((gfp_mask & __GFP_NOFAIL) == __GFP_NOFAIL);
-+ int cnt;
-+
-+ TRACE_ENTRY();
-+
-+ atomic_inc(&sgv_other_total_alloc);
-+
-+ if (unlikely(sgv_hiwmk_check(pages) != 0)) {
-+ if (!no_fail) {
-+ res = NULL;
-+ goto out;
-+ } else {
-+ /*
-+ * Update active_pages_total since alloc can't fail.
-+ * If it wasn't updated then the counter would cross 0
-+ * on free again.
-+ */
-+ sgv_hiwmk_uncheck(-pages);
-+ }
-+ }
-+
-+ res = kmalloc(pages*sizeof(*res), gfp_mask);
-+ if (res == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate sg for %d pages",
-+ pages);
-+ goto out_uncheck;
-+ }
-+
-+ sg_init_table(res, pages);
-+
-+ /*
-+ * If we allow use clustering here, we will have troubles in
-+ * scst_free() to figure out how many pages are in the SG vector.
-+ * So, let's always don't use clustering.
-+ */
-+ cnt = sgv_alloc_sg_entries(res, pages, gfp_mask, sgv_no_clustering,
-+ NULL, &sys_alloc_fns, NULL);
-+ if (cnt <= 0)
-+ goto out_free;
-+
-+ if (size & ~PAGE_MASK)
-+ res[cnt-1].length -= PAGE_SIZE - (size & ~PAGE_MASK);
-+
-+ *count = cnt;
-+
-+out:
-+ TRACE_MEM("Alloced sg %p (count %d, no_fail %d)", res, *count, no_fail);
-+
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+
-+out_free:
-+ kfree(res);
-+ res = NULL;
-+
-+out_uncheck:
-+ if (!no_fail)
-+ sgv_hiwmk_uncheck(pages);
-+ goto out;
-+}
-+EXPORT_SYMBOL_GPL(scst_alloc);
-+
-+/**
-+ * scst_free() - frees SG vector
-+ *
-+ * Frees SG vector returned by scst_alloc().
-+ */
-+void scst_free(struct scatterlist *sg, int count)
-+{
-+ TRACE_MEM("Freeing sg=%p", sg);
-+
-+ sgv_hiwmk_uncheck(count);
-+
-+ sgv_free_sys_sg_entries(sg, count, NULL);
-+ kfree(sg);
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(scst_free);
-+
-+/* Must be called under sgv_pools_mutex */
-+static void sgv_pool_init_cache(struct sgv_pool *pool, int cache_num)
-+{
-+ int size;
-+ int pages;
-+ struct sgv_pool_obj *obj;
-+
-+ atomic_set(&pool->cache_acc[cache_num].total_alloc, 0);
-+ atomic_set(&pool->cache_acc[cache_num].hit_alloc, 0);
-+ atomic_set(&pool->cache_acc[cache_num].merged, 0);
-+
-+ if (pool->single_alloc_pages == 0)
-+ pages = 1 << cache_num;
-+ else
-+ pages = pool->single_alloc_pages;
-+
-+ if (pages <= sgv_max_local_pages) {
-+ size = sizeof(*obj) + pages *
-+ (sizeof(obj->sg_entries[0]) +
-+ ((pool->clustering_type != sgv_no_clustering) ?
-+ sizeof(obj->trans_tbl[0]) : 0));
-+ } else if (pages <= sgv_max_trans_pages) {
-+ /*
-+ * sg_entries is allocated outside object,
-+ * but trans_tbl is still embedded.
-+ */
-+ size = sizeof(*obj) + pages *
-+ (((pool->clustering_type != sgv_no_clustering) ?
-+ sizeof(obj->trans_tbl[0]) : 0));
-+ } else {
-+ size = sizeof(*obj);
-+ /* both sgv and trans_tbl are kmalloc'ed() */
-+ }
-+
-+ TRACE_MEM("pages=%d, size=%d", pages, size);
-+
-+ scnprintf(pool->cache_names[cache_num],
-+ sizeof(pool->cache_names[cache_num]),
-+ "%s-%uK", pool->name, (pages << PAGE_SHIFT) >> 10);
-+ pool->caches[cache_num] = kmem_cache_create(
-+ pool->cache_names[cache_num], size, 0, SCST_SLAB_FLAGS, NULL
-+ );
-+ return;
-+}
-+
-+/* Must be called under sgv_pools_mutex */
-+static int sgv_pool_init(struct sgv_pool *pool, const char *name,
-+ enum sgv_clustering_types clustering_type, int single_alloc_pages,
-+ int purge_interval)
-+{
-+ int res = -ENOMEM;
-+ int i;
-+
-+ TRACE_ENTRY();
-+
-+ if (single_alloc_pages < 0) {
-+ PRINT_ERROR("Wrong single_alloc_pages value %d",
-+ single_alloc_pages);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ memset(pool, 0, sizeof(*pool));
-+
-+ atomic_set(&pool->big_alloc, 0);
-+ atomic_set(&pool->big_pages, 0);
-+ atomic_set(&pool->big_merged, 0);
-+ atomic_set(&pool->other_alloc, 0);
-+ atomic_set(&pool->other_pages, 0);
-+ atomic_set(&pool->other_merged, 0);
-+
-+ pool->clustering_type = clustering_type;
-+ pool->single_alloc_pages = single_alloc_pages;
-+ if (purge_interval != 0) {
-+ pool->purge_interval = purge_interval;
-+ if (purge_interval < 0) {
-+ /* Let's pretend that it's always scheduled */
-+ pool->purge_work_scheduled = 1;
-+ }
-+ } else
-+ pool->purge_interval = SGV_DEFAULT_PURGE_INTERVAL;
-+ if (single_alloc_pages == 0) {
-+ pool->max_caches = SGV_POOL_ELEMENTS;
-+ pool->max_cached_pages = 1 << (SGV_POOL_ELEMENTS - 1);
-+ } else {
-+ pool->max_caches = 1;
-+ pool->max_cached_pages = single_alloc_pages;
-+ }
-+ pool->alloc_fns.alloc_pages_fn = sgv_alloc_sys_pages;
-+ pool->alloc_fns.free_pages_fn = sgv_free_sys_sg_entries;
-+
-+ TRACE_MEM("name %s, sizeof(*obj)=%zd, clustering_type=%d, "
-+ "single_alloc_pages=%d, max_caches=%d, max_cached_pages=%d",
-+ name, sizeof(struct sgv_pool_obj), clustering_type,
-+ single_alloc_pages, pool->max_caches, pool->max_cached_pages);
-+
-+ strlcpy(pool->name, name, sizeof(pool->name)-1);
-+
-+ pool->owner_mm = current->mm;
-+
-+ for (i = 0; i < pool->max_caches; i++) {
-+ sgv_pool_init_cache(pool, i);
-+ if (pool->caches[i] == NULL) {
-+ PRINT_ERROR("Allocation of sgv_pool "
-+ "cache %s(%d) failed", name, i);
-+ goto out_free;
-+ }
-+ }
-+
-+ atomic_set(&pool->sgv_pool_ref, 1);
-+ spin_lock_init(&pool->sgv_pool_lock);
-+ INIT_LIST_HEAD(&pool->sorted_recycling_list);
-+ for (i = 0; i < pool->max_caches; i++)
-+ INIT_LIST_HEAD(&pool->recycling_lists[i]);
-+
-+ INIT_DELAYED_WORK(&pool->sgv_purge_work,
-+ (void (*)(struct work_struct *))sgv_purge_work_fn);
-+
-+ spin_lock_bh(&sgv_pools_lock);
-+ list_add_tail(&pool->sgv_pools_list_entry, &sgv_pools_list);
-+ spin_unlock_bh(&sgv_pools_lock);
-+
-+ res = scst_sgv_sysfs_create(pool);
-+ if (res != 0)
-+ goto out_del;
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ spin_lock_bh(&sgv_pools_lock);
-+ list_del(&pool->sgv_pools_list_entry);
-+ spin_unlock_bh(&sgv_pools_lock);
-+
-+out_free:
-+ for (i = 0; i < pool->max_caches; i++) {
-+ if (pool->caches[i]) {
-+ kmem_cache_destroy(pool->caches[i]);
-+ pool->caches[i] = NULL;
-+ } else
-+ break;
-+ }
-+ goto out;
-+}
-+
-+static void sgv_evaluate_local_max_pages(void)
-+{
-+ int space4sgv_ttbl = PAGE_SIZE - sizeof(struct sgv_pool_obj);
-+
-+ sgv_max_local_pages = space4sgv_ttbl /
-+ (sizeof(struct trans_tbl_ent) + sizeof(struct scatterlist));
-+
-+ sgv_max_trans_pages = space4sgv_ttbl / sizeof(struct trans_tbl_ent);
-+
-+ TRACE_MEM("sgv_max_local_pages %d, sgv_max_trans_pages %d",
-+ sgv_max_local_pages, sgv_max_trans_pages);
-+ return;
-+}
-+
-+/**
-+ * sgv_pool_flush() - flushes the SGV pool.
-+ *
-+ * Flushes, i.e. frees, all the cached entries in the SGV pool.
-+ */
-+void sgv_pool_flush(struct sgv_pool *pool)
-+{
-+ int i;
-+
-+ TRACE_ENTRY();
-+
-+ for (i = 0; i < pool->max_caches; i++) {
-+ struct sgv_pool_obj *obj;
-+
-+ spin_lock_bh(&pool->sgv_pool_lock);
-+
-+ while (!list_empty(&pool->recycling_lists[i])) {
-+ obj = list_entry(pool->recycling_lists[i].next,
-+ struct sgv_pool_obj, recycling_list_entry);
-+
-+ __sgv_purge_from_cache(obj);
-+
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+
-+ EXTRACHECKS_BUG_ON(obj->owner_pool != pool);
-+ sgv_dtor_and_free(obj);
-+
-+ spin_lock_bh(&pool->sgv_pool_lock);
-+ }
-+ spin_unlock_bh(&pool->sgv_pool_lock);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(sgv_pool_flush);
-+
-+static void sgv_pool_destroy(struct sgv_pool *pool)
-+{
-+ int i;
-+
-+ TRACE_ENTRY();
-+
-+ cancel_delayed_work_sync(&pool->sgv_purge_work);
-+
-+ sgv_pool_flush(pool);
-+
-+ mutex_lock(&sgv_pools_mutex);
-+ spin_lock_bh(&sgv_pools_lock);
-+ list_del(&pool->sgv_pools_list_entry);
-+ spin_unlock_bh(&sgv_pools_lock);
-+ mutex_unlock(&sgv_pools_mutex);
-+
-+ scst_sgv_sysfs_del(pool);
-+
-+ for (i = 0; i < pool->max_caches; i++) {
-+ if (pool->caches[i])
-+ kmem_cache_destroy(pool->caches[i]);
-+ pool->caches[i] = NULL;
-+ }
-+
-+ kfree(pool);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/**
-+ * sgv_pool_set_allocator - set custom pages allocator
-+ * @pool: the cache
-+ * @alloc_pages_fn: pages allocation function
-+ * @free_pages_fn: pages freeing function
-+ *
-+ * Description:
-+ * Allows to set custom pages allocator for the SGV pool.
-+ * See the SGV pool documentation for more details.
-+ */
-+void sgv_pool_set_allocator(struct sgv_pool *pool,
-+ struct page *(*alloc_pages_fn)(struct scatterlist *, gfp_t, void *),
-+ void (*free_pages_fn)(struct scatterlist *, int, void *))
-+{
-+ pool->alloc_fns.alloc_pages_fn = alloc_pages_fn;
-+ pool->alloc_fns.free_pages_fn = free_pages_fn;
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(sgv_pool_set_allocator);
-+
-+/**
-+ * sgv_pool_create - creates and initializes an SGV pool
-+ * @name: the name of the SGV pool
-+ * @clustered: sets type of the pages clustering.
-+ * @single_alloc_pages: if 0, then the SGV pool will work in the set of
-+ * power 2 size buffers mode. If >0, then the SGV pool will
-+ * work in the fixed size buffers mode. In this case
-+ * single_alloc_pages sets the size of each buffer in pages.
-+ * @shared: sets if the SGV pool can be shared between devices or not.
-+ * The cache sharing allowed only between devices created inside
-+ * the same address space. If an SGV pool is shared, each
-+ * subsequent call of sgv_pool_create() with the same cache name
-+ * will not create a new cache, but instead return a reference
-+ * to it.
-+ * @purge_interval: sets the cache purging interval. I.e., an SG buffer
-+ * will be freed if it's unused for time t
-+ * purge_interval <= t < 2*purge_interval. If purge_interval
-+ * is 0, then the default interval will be used (60 seconds).
-+ * If purge_interval <0, then the automatic purging will be
-+ * disabled.
-+ *
-+ * Description:
-+ * Returns the resulting SGV pool or NULL in case of any error.
-+ */
-+struct sgv_pool *sgv_pool_create(const char *name,
-+ enum sgv_clustering_types clustering_type,
-+ int single_alloc_pages, bool shared, int purge_interval)
-+{
-+ struct sgv_pool *pool;
-+ int rc;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&sgv_pools_mutex);
-+
-+ list_for_each_entry(pool, &sgv_pools_list, sgv_pools_list_entry) {
-+ if (strcmp(pool->name, name) == 0) {
-+ if (shared) {
-+ if (pool->owner_mm != current->mm) {
-+ PRINT_ERROR("Attempt of a shared use "
-+ "of SGV pool %s with "
-+ "different MM", name);
-+ goto out_unlock;
-+ }
-+ sgv_pool_get(pool);
-+ goto out_unlock;
-+ } else {
-+ PRINT_ERROR("SGV pool %s already exists", name);
-+ pool = NULL;
-+ goto out_unlock;
-+ }
-+ }
-+ }
-+
-+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
-+ if (pool == NULL) {
-+ PRINT_ERROR("Allocation of sgv_pool failed (size %zd)",
-+ sizeof(*pool));
-+ goto out_unlock;
-+ }
-+
-+ rc = sgv_pool_init(pool, name, clustering_type, single_alloc_pages,
-+ purge_interval);
-+ if (rc != 0)
-+ goto out_free;
-+
-+out_unlock:
-+ mutex_unlock(&sgv_pools_mutex);
-+
-+ TRACE_EXIT_RES(pool != NULL);
-+ return pool;
-+
-+out_free:
-+ kfree(pool);
-+ goto out_unlock;
-+}
-+EXPORT_SYMBOL_GPL(sgv_pool_create);
-+
-+/**
-+ * sgv_pool_get - increase ref counter for the corresponding SGV pool
-+ *
-+ * Increases ref counter for the corresponding SGV pool
-+ */
-+void sgv_pool_get(struct sgv_pool *pool)
-+{
-+ atomic_inc(&pool->sgv_pool_ref);
-+ TRACE_MEM("Incrementing sgv pool %p ref (new value %d)",
-+ pool, atomic_read(&pool->sgv_pool_ref));
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(sgv_pool_get);
-+
-+/**
-+ * sgv_pool_put - decrease ref counter for the corresponding SGV pool
-+ *
-+ * Decreases ref counter for the corresponding SGV pool. If the ref
-+ * counter reaches 0, the cache will be destroyed.
-+ */
-+void sgv_pool_put(struct sgv_pool *pool)
-+{
-+ TRACE_MEM("Decrementing sgv pool %p ref (new value %d)",
-+ pool, atomic_read(&pool->sgv_pool_ref)-1);
-+ if (atomic_dec_and_test(&pool->sgv_pool_ref))
-+ sgv_pool_destroy(pool);
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(sgv_pool_put);
-+
-+/**
-+ * sgv_pool_del - deletes the corresponding SGV pool
-+ * @pool: the cache to delete.
-+ *
-+ * Description:
-+ * If the cache is shared, it will decrease its reference counter.
-+ * If the reference counter reaches 0, the cache will be destroyed.
-+ */
-+void sgv_pool_del(struct sgv_pool *pool)
-+{
-+ TRACE_ENTRY();
-+
-+ sgv_pool_put(pool);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(sgv_pool_del);
-+
-+/* Both parameters in pages */
-+int scst_sgv_pools_init(unsigned long mem_hwmark, unsigned long mem_lwmark)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ sgv_hi_wmk = mem_hwmark;
-+ sgv_lo_wmk = mem_lwmark;
-+
-+ sgv_evaluate_local_max_pages();
-+
-+ sgv_norm_pool = sgv_pool_create("sgv", sgv_no_clustering, 0, false, 0);
-+ if (sgv_norm_pool == NULL)
-+ goto out_err;
-+
-+ sgv_norm_clust_pool = sgv_pool_create("sgv-clust",
-+ sgv_full_clustering, 0, false, 0);
-+ if (sgv_norm_clust_pool == NULL)
-+ goto out_free_norm;
-+
-+ sgv_dma_pool = sgv_pool_create("sgv-dma", sgv_no_clustering, 0,
-+ false, 0);
-+ if (sgv_dma_pool == NULL)
-+ goto out_free_clust;
-+
-+ sgv_shrinker.shrink = sgv_shrink;
-+ sgv_shrinker.seeks = DEFAULT_SEEKS;
-+ register_shrinker(&sgv_shrinker);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free_clust:
-+ sgv_pool_destroy(sgv_norm_clust_pool);
-+
-+out_free_norm:
-+ sgv_pool_destroy(sgv_norm_pool);
-+
-+out_err:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+void scst_sgv_pools_deinit(void)
-+{
-+ TRACE_ENTRY();
-+
-+ unregister_shrinker(&sgv_shrinker);
-+
-+ sgv_pool_destroy(sgv_dma_pool);
-+ sgv_pool_destroy(sgv_norm_pool);
-+ sgv_pool_destroy(sgv_norm_clust_pool);
-+
-+ flush_scheduled_work();
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static ssize_t sgv_sysfs_stat_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct sgv_pool *pool;
-+ int i, total = 0, hit = 0, merged = 0, allocated = 0;
-+ int oa, om, res;
-+
-+ pool = container_of(kobj, struct sgv_pool, sgv_kobj);
-+
-+ for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
-+ int t;
-+
-+ hit += atomic_read(&pool->cache_acc[i].hit_alloc);
-+ total += atomic_read(&pool->cache_acc[i].total_alloc);
-+
-+ t = atomic_read(&pool->cache_acc[i].total_alloc) -
-+ atomic_read(&pool->cache_acc[i].hit_alloc);
-+ allocated += t * (1 << i);
-+ merged += atomic_read(&pool->cache_acc[i].merged);
-+ }
-+
-+ res = sprintf(buf, "%-30s %-11s %-11s %-11s %-11s", "Name", "Hit", "Total",
-+ "% merged", "Cached (P/I/O)");
-+
-+ res += sprintf(&buf[res], "\n%-30s %-11d %-11d %-11d %d/%d/%d\n",
-+ pool->name, hit, total,
-+ (allocated != 0) ? merged*100/allocated : 0,
-+ pool->cached_pages, pool->inactive_cached_pages,
-+ pool->cached_entries);
-+
-+ for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
-+ int t = atomic_read(&pool->cache_acc[i].total_alloc) -
-+ atomic_read(&pool->cache_acc[i].hit_alloc);
-+ allocated = t * (1 << i);
-+ merged = atomic_read(&pool->cache_acc[i].merged);
-+
-+ res += sprintf(&buf[res], " %-28s %-11d %-11d %d\n",
-+ pool->cache_names[i],
-+ atomic_read(&pool->cache_acc[i].hit_alloc),
-+ atomic_read(&pool->cache_acc[i].total_alloc),
-+ (allocated != 0) ? merged*100/allocated : 0);
-+ }
-+
-+ allocated = atomic_read(&pool->big_pages);
-+ merged = atomic_read(&pool->big_merged);
-+ oa = atomic_read(&pool->other_pages);
-+ om = atomic_read(&pool->other_merged);
-+
-+ res += sprintf(&buf[res], " %-40s %d/%-9d %d/%d\n", "big/other",
-+ atomic_read(&pool->big_alloc), atomic_read(&pool->other_alloc),
-+ (allocated != 0) ? merged*100/allocated : 0,
-+ (oa != 0) ? om/oa : 0);
-+
-+ return res;
-+}
-+
-+static ssize_t sgv_sysfs_stat_reset(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ struct sgv_pool *pool;
-+ int i;
-+
-+ TRACE_ENTRY();
-+
-+ pool = container_of(kobj, struct sgv_pool, sgv_kobj);
-+
-+ for (i = 0; i < SGV_POOL_ELEMENTS; i++) {
-+ atomic_set(&pool->cache_acc[i].hit_alloc, 0);
-+ atomic_set(&pool->cache_acc[i].total_alloc, 0);
-+ atomic_set(&pool->cache_acc[i].merged, 0);
-+ }
-+
-+ atomic_set(&pool->big_pages, 0);
-+ atomic_set(&pool->big_merged, 0);
-+ atomic_set(&pool->big_alloc, 0);
-+ atomic_set(&pool->other_pages, 0);
-+ atomic_set(&pool->other_merged, 0);
-+ atomic_set(&pool->other_alloc, 0);
-+
-+ PRINT_INFO("Statistics for SGV pool %s reset", pool->name);
-+
-+ TRACE_EXIT_RES(count);
-+ return count;
-+}
-+
-+static ssize_t sgv_sysfs_global_stat_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct sgv_pool *pool;
-+ int inactive_pages = 0, res;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_bh(&sgv_pools_lock);
-+ list_for_each_entry(pool, &sgv_active_pools_list,
-+ sgv_active_pools_list_entry) {
-+ inactive_pages += pool->inactive_cached_pages;
-+ }
-+ spin_unlock_bh(&sgv_pools_lock);
-+
-+ res = sprintf(buf, "%-42s %d/%d\n%-42s %d/%d\n%-42s %d/%d\n"
-+ "%-42s %-11d\n",
-+ "Inactive/active pages", inactive_pages,
-+ atomic_read(&sgv_pages_total) - inactive_pages,
-+ "Hi/lo watermarks [pages]", sgv_hi_wmk, sgv_lo_wmk,
-+ "Hi watermark releases/failures",
-+ atomic_read(&sgv_releases_on_hiwmk),
-+ atomic_read(&sgv_releases_on_hiwmk_failed),
-+ "Other allocs", atomic_read(&sgv_other_total_alloc));
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+static ssize_t sgv_sysfs_global_stat_reset(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ TRACE_ENTRY();
-+
-+ atomic_set(&sgv_releases_on_hiwmk, 0);
-+ atomic_set(&sgv_releases_on_hiwmk_failed, 0);
-+ atomic_set(&sgv_other_total_alloc, 0);
-+
-+ PRINT_INFO("%s", "Global SGV pool statistics reset");
-+
-+ TRACE_EXIT_RES(count);
-+ return count;
-+}
-+
-+static struct kobj_attribute sgv_stat_attr =
-+ __ATTR(stats, S_IRUGO | S_IWUSR, sgv_sysfs_stat_show,
-+ sgv_sysfs_stat_reset);
-+
-+static struct attribute *sgv_attrs[] = {
-+ &sgv_stat_attr.attr,
-+ NULL,
-+};
-+
-+static void sgv_kobj_release(struct kobject *kobj)
-+{
-+ struct sgv_pool *pool;
-+
-+ TRACE_ENTRY();
-+
-+ pool = container_of(kobj, struct sgv_pool, sgv_kobj);
-+ if (pool->sgv_kobj_release_cmpl != NULL)
-+ complete_all(pool->sgv_kobj_release_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct kobj_type sgv_pool_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = sgv_kobj_release,
-+ .default_attrs = sgv_attrs,
-+};
-+
-+static int scst_sgv_sysfs_create(struct sgv_pool *pool)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = kobject_init_and_add(&pool->sgv_kobj, &sgv_pool_ktype,
-+ scst_sgv_kobj, pool->name);
-+ if (res != 0) {
-+ PRINT_ERROR("Can't add sgv pool %s to sysfs", pool->name);
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void scst_sgv_sysfs_del(struct sgv_pool *pool)
-+{
-+ int rc;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+
-+ TRACE_ENTRY();
-+
-+ pool->sgv_kobj_release_cmpl = &c;
-+
-+ kobject_del(&pool->sgv_kobj);
-+ kobject_put(&pool->sgv_kobj);
-+
-+ rc = wait_for_completion_timeout(pool->sgv_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for SGV pool %s (%d refs)...", pool->name,
-+ atomic_read(&pool->sgv_kobj.kref.refcount));
-+ wait_for_completion(pool->sgv_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs "
-+ "entry for SGV pool %s", pool->name);
-+ }
-+
-+ TRACE_EXIT();
-+}
-+
-+static struct kobj_attribute sgv_global_stat_attr =
-+ __ATTR(global_stats, S_IRUGO | S_IWUSR, sgv_sysfs_global_stat_show,
-+ sgv_sysfs_global_stat_reset);
-+
-+static struct attribute *sgv_default_attrs[] = {
-+ &sgv_global_stat_attr.attr,
-+ NULL,
-+};
-+
-+static void scst_sysfs_release(struct kobject *kobj)
-+{
-+ kfree(kobj);
-+}
-+
-+static struct kobj_type sgv_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_sysfs_release,
-+ .default_attrs = sgv_default_attrs,
-+};
-+
-+/**
-+ * scst_add_sgv_kobj() - Initialize and add the root SGV kernel object.
-+ */
-+int scst_add_sgv_kobj(struct kobject *parent, const char *name)
-+{
-+ int res;
-+
-+ WARN_ON(scst_sgv_kobj);
-+ res = -ENOMEM;
-+ scst_sgv_kobj = kzalloc(sizeof(*scst_sgv_kobj), GFP_KERNEL);
-+ if (!scst_sgv_kobj)
-+ goto out;
-+ res = kobject_init_and_add(scst_sgv_kobj, &sgv_ktype, parent, name);
-+ if (res != 0)
-+ goto out_free;
-+out:
-+ return res;
-+out_free:
-+ kobject_put(scst_sgv_kobj);
-+ scst_sgv_kobj = NULL;
-+ goto out;
-+}
-+
-+/**
-+ * scst_del_put_sgv_kobj() - Remove the root SGV kernel object.
-+ */
-+void scst_del_put_sgv_kobj(void)
-+{
-+ WARN_ON(!scst_sgv_kobj);
-+ kobject_del(scst_sgv_kobj);
-+ kobject_put(scst_sgv_kobj);
-+ scst_sgv_kobj = NULL;
-+}
-+
-diff -uprN orig/linux-3.2/Documentation/scst/sgv_cache.sgml linux-3.2/Documentation/scst/sgv_cache.sgml
---- orig/linux-3.2/Documentation/scst/sgv_cache.sgml
-+++ linux-3.2/Documentation/scst/sgv_cache.sgml
-@@ -0,0 +1,335 @@
-+<!doctype linuxdoc system>
-+
-+<article>
-+
-+<title>
-+SCST SGV cache description
-+</title>
-+
-+<author>
-+ <name>Vladislav Bolkhovitin</name>
-+</author>
-+
-+<date>Version 2.1.0</date>
-+
-+<toc>
-+
-+<sect>Introduction
-+
-+<p>
-+SCST SGV cache is a memory management subsystem in SCST. One can call it
-+a "memory pool", but Linux kernel already have a mempool interface,
-+which serves different purposes. SGV cache provides to SCST core, target
-+drivers and backend dev handlers facilities to allocate, build and cache
-+SG vectors for data buffers. The main advantage of it is the caching
-+facility, when it doesn't free to the system each vector, which is not
-+used anymore, but keeps it for a while (possibly indefinitely) to let it
-+be reused by the next consecutive command. This allows to:
-+
-+<itemize>
-+
-+<item> Reduce commands processing latencies and, hence, improve performance;
-+
-+<item> Make commands processing latencies predictable, which is essential
-+ for RT applications.
-+
-+</itemize>
-+
-+The freed SG vectors are kept by the SGV cache either for some (possibly
-+indefinite) time, or, optionally, until the system needs more memory and
-+asks to free some using the set_shrinker() interface. Also the SGV cache
-+allows to:
-+
-+<itemize>
-+
-+<item> Cluster pages together. "Cluster" means merging adjacent pages in a
-+single SG entry. It allows to have less SG entries in the resulting SG
-+vector, hence improve performance handling it as well as allow to
-+work with bigger buffers on hardware with limited SG capabilities.
-+
-+<item> Set custom page allocator functions. For instance, scst_user device
-+handler uses this facility to eliminate unneeded mapping/unmapping of
-+user space pages and avoid unneeded IOCTL calls for buffers allocations.
-+In fileio_tgt application, which uses a regular malloc() function to
-+allocate data buffers, this facility allows ~30% less CPU load and
-+considerable performance increase.
-+
-+<item> Prevent each initiator or all initiators altogether to allocate too
-+much memory and DoS the target. Consider 10 initiators, which can have
-+access to 10 devices each. Any of them can queue up to 64 commands, each
-+can transfer up to 1MB of data. So, all of them in a peak can allocate
-+up to 10*10*64 = ~6.5GB of memory for data buffers. This amount must be
-+limited somehow and the SGV cache performs this function.
-+
-+</itemize>
-+
-+<sect> Implementation
-+
-+<p>
-+From implementation POV the SGV cache is a simple extension of the kmem
-+cache. It can work in 2 modes:
-+
-+<enum>
-+
-+<item> With fixed size buffers.
-+
-+<item> With a set of power 2 size buffers. In this mode each SGV cache
-+(struct sgv_pool) has SGV_POOL_ELEMENTS (11 currently) of kmem caches.
-+Each of those kmem caches keeps SGV cache objects (struct sgv_pool_obj)
-+corresponding to SG vectors with size of order X pages. For instance,
-+request to allocate 4 pages will be served from kmem cache&lsqb;2&rsqb, since the
-+order of the of number of requested pages is 2. If later request to
-+allocate 11KB comes, the same SG vector with 4 pages will be reused (see
-+below). This mode is in average allows less memory overhead comparing
-+with the fixed size buffers mode.
-+
-+</enum>
-+
-+Consider how the SGV cache works in the set of buffers mode. When a
-+request to allocate new SG vector comes, sgv_pool_alloc() via
-+sgv_get_obj() checks if there is already a cached vector with that
-+order. If yes, then that vector will be reused and its length, if
-+necessary, will be modified to match the requested size. In the above
-+example request for 11KB buffer, 4 pages vector will be reused and
-+modified using trans_tbl to contain 3 pages and the last entry will be
-+modified to contain the requested length - 2*PAGE_SIZE. If there is no
-+cached object, then a new sgv_pool_obj will be allocated from the
-+corresponding kmem cache, chosen by the order of number of requested
-+pages. Then that vector will be filled by pages and returned.
-+
-+In the fixed size buffers mode the SGV cache works similarly, except
-+that it always allocate buffer with the predefined fixed size. I.e.
-+even for 4K request the whole buffer with predefined size, say, 1MB,
-+will be used.
-+
-+In both modes, if size of a request exceeds the maximum allowed for
-+caching buffer size, the requested buffer will be allocated, but not
-+cached.
-+
-+Freed cached sgv_pool_obj objects are actually freed to the system
-+either by the purge work, which is scheduled once in 60 seconds, or in
-+sgv_shrink() called by system, when it's asking for memory.
-+
-+<sect> Interface
-+
-+<sect1> sgv_pool *sgv_pool_create()
-+
-+<p>
-+<verb>
-+struct sgv_pool *sgv_pool_create(
-+ const char *name,
-+ enum sgv_clustering_types clustered, int single_alloc_pages,
-+ bool shared, int purge_interval)
-+</verb>
-+
-+This function creates and initializes an SGV cache. It has the following
-+arguments:
-+
-+<itemize>
-+
-+<item> <bf/name/ - the name of the SGV cache
-+
-+<item> <bf/clustered/ - sets type of the pages clustering. The type can be:
-+
-+ <itemize>
-+
-+ <item> <bf/sgv_no_clustering/ - no clustering performed.
-+
-+ <item> <bf/sgv_tail_clustering/ - a page will only be merged with the latest
-+ previously allocated page, so the order of pages in the SG will be
-+ preserved
-+
-+ <item> <bf/sgv_full_clustering/ - free merging of pages at any place in
-+ the SG is allowed. This mode usually provides the best merging
-+ rate.
-+
-+ </itemize>
-+
-+<item> <bf/single_alloc_pages/ - if 0, then the SGV cache will work in the set of
-+ power 2 size buffers mode. If >0, then the SGV cache will work in the
-+ fixed size buffers mode. In this case single_alloc_pages sets the
-+ size of each buffer in pages.
-+
-+<item> <bf/shared/ - sets if the SGV cache can be shared between devices or not.
-+ The cache sharing allowed only between devices created inside the same
-+ address space. If an SGV cache is shared, each subsequent call of
-+ sgv_pool_create() with the same cache name will not create a new cache,
-+ but instead return a reference to it.
-+
-+<item> <bf/purge_interval/ - sets the cache purging interval. I.e. an SG buffer
-+ will be freed if it's unused for time t purge_interval <= t <
-+ 2*purge_interval. If purge_interval is 0, then the default interval
-+ will be used (60 seconds). If purge_interval <0, then the automatic
-+ purging will be disabled. Shrinking by the system's demand will also
-+ be disabled.
-+
-+</itemize>
-+
-+Returns the resulting SGV cache or NULL in case of any error.
-+
-+<sect1> void sgv_pool_del()
-+
-+<p>
-+<verb>
-+void sgv_pool_del(
-+ struct sgv_pool *pool)
-+</verb>
-+
-+This function deletes the corresponding SGV cache. If the cache is
-+shared, it will decrease its reference counter. If the reference counter
-+reaches 0, the cache will be destroyed.
-+
-+<sect1> void sgv_pool_flush()
-+
-+<p>
-+<verb>
-+void sgv_pool_flush(
-+ struct sgv_pool *pool)
-+</verb>
-+
-+This function flushes, i.e. frees, all the cached entries in the SGV
-+cache.
-+
-+<sect1> void sgv_pool_set_allocator()
-+
-+<p>
-+<verb>
-+void sgv_pool_set_allocator(
-+ struct sgv_pool *pool,
-+ struct page *(*alloc_pages_fn)(struct scatterlist *sg, gfp_t gfp, void *priv),
-+ void (*free_pages_fn)(struct scatterlist *sg, int sg_count, void *priv));
-+</verb>
-+
-+This function allows to set for the SGV cache a custom pages allocator. For
-+instance, scst_user uses such function to supply to the cache mapped from
-+user space pages.
-+
-+<bf/alloc_pages_fn()/ has the following parameters:
-+
-+<itemize>
-+
-+<item> <bf/sg/ - SG entry, to which the allocated page should be added.
-+
-+<item> <bf/gfp/ - the allocation GFP flags
-+
-+<item> <bf/priv/ - pointer to a private data supplied to sgv_pool_alloc()
-+
-+</itemize>
-+
-+This function should return the allocated page or NULL, if no page was
-+allocated.
-+
-+
-+<bf/free_pages_fn()/ has the following parameters:
-+
-+<itemize>
-+
-+<item> <bf/sg/ - SG vector to free
-+
-+<item> <bf/sg_count/ - number of SG entries in the sg
-+
-+<item> <bf/priv/ - pointer to a private data supplied to the
-+corresponding sgv_pool_alloc()
-+
-+</itemize>
-+
-+<sect1> struct scatterlist *sgv_pool_alloc()
-+
-+<p>
-+<verb>
-+struct scatterlist *sgv_pool_alloc(
-+ struct sgv_pool *pool,
-+ unsigned int size,
-+ gfp_t gfp_mask,
-+ int flags,
-+ int *count,
-+ struct sgv_pool_obj **sgv,
-+ struct scst_mem_lim *mem_lim,
-+ void *priv)
-+</verb>
-+
-+This function allocates an SG vector from the SGV cache. It has the
-+following parameters:
-+
-+<itemize>
-+
-+<item> <bf/pool/ - the cache to alloc from
-+
-+<item> <bf/size/ - size of the resulting SG vector in bytes
-+
-+<item> <bf/gfp_mask/ - the allocation mask
-+
-+<item> <bf/flags/ - the allocation flags. The following flags are possible and
-+ can be set using OR operation:
-+
-+ <enum>
-+
-+ <item> <bf/SGV_POOL_ALLOC_NO_CACHED/ - the SG vector must not be cached.
-+
-+ <item> <bf/SGV_POOL_NO_ALLOC_ON_CACHE_MISS/ - don't do an allocation on a
-+ cache miss.
-+
-+ <item> <bf/SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL/ - return an empty SGV object,
-+ i.e. without the SG vector, if the allocation can't be completed.
-+ For instance, because SGV_POOL_NO_ALLOC_ON_CACHE_MISS flag set.
-+
-+ </enum>
-+
-+<item> <bf/count/ - the resulting count of SG entries in the resulting SG vector.
-+
-+<item> <bf/sgv/ - the resulting SGV object. It should be used to free the
-+ resulting SG vector.
-+
-+<item> <bf/mem_lim/ - memory limits, see below.
-+
-+<item> <bf/priv/ - pointer to private for this allocation data. This pointer will
-+ be supplied to alloc_pages_fn() and free_pages_fn() and can be
-+ retrieved by sgv_get_priv().
-+
-+</itemize>
-+
-+This function returns pointer to the resulting SG vector or NULL in case
-+of any error.
-+
-+<sect1> void sgv_pool_free()
-+
-+<p>
-+<verb>
-+void sgv_pool_free(
-+ struct sgv_pool_obj *sgv,
-+ struct scst_mem_lim *mem_lim)
-+</verb>
-+
-+This function frees previously allocated SG vector, referenced by SGV
-+cache object sgv.
-+
-+<sect1> void *sgv_get_priv(struct sgv_pool_obj *sgv)
-+
-+<p>
-+<verb>
-+void *sgv_get_priv(
-+ struct sgv_pool_obj *sgv)
-+</verb>
-+
-+This function allows to get the allocation private data for this SGV
-+cache object sgv. The private data are set by sgv_pool_alloc().
-+
-+<sect1> void scst_init_mem_lim()
-+
-+<p>
-+<verb>
-+void scst_init_mem_lim(
-+ struct scst_mem_lim *mem_lim)
-+</verb>
-+
-+This function initializes memory limits structure mem_lim according to
-+the current system configuration. This structure should be latter used
-+to track and limit allocated by one or more SGV caches memory.
-+
-+
-+<sect> Runtime information and statistics.
-+
-+<p>
-+Runtime information and statistics is available in /sys/kernel/scst_tgt/sgv.
-+
-+</article>
-diff -uprN orig/linux-3.2/include/scst/scst_user.h linux-3.2/include/scst/scst_user.h
---- orig/linux-3.2/include/scst/scst_user.h
-+++ linux-3.2/include/scst/scst_user.h
-@@ -0,0 +1,320 @@
-+/*
-+ * include/scst_user.h
-+ *
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * Contains constants and data structures for scst_user module.
-+ * See http://scst.sourceforge.net/doc/scst_user_spec.txt or
-+ * scst_user_spec.txt for description.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef __SCST_USER_H
-+#define __SCST_USER_H
-+
-+#include <scst/scst_const.h>
-+
-+#define DEV_USER_NAME "scst_user"
-+#define DEV_USER_PATH "/dev/"
-+#define DEV_USER_VERSION_NAME SCST_VERSION_NAME
-+#define DEV_USER_VERSION \
-+ DEV_USER_VERSION_NAME "$Revision: 3281 $" SCST_CONST_VERSION
-+
-+#define SCST_USER_PARSE_STANDARD 0
-+#define SCST_USER_PARSE_CALL 1
-+#define SCST_USER_PARSE_EXCEPTION 2
-+#define SCST_USER_MAX_PARSE_OPT SCST_USER_PARSE_EXCEPTION
-+
-+#define SCST_USER_ON_FREE_CMD_CALL 0
-+#define SCST_USER_ON_FREE_CMD_IGNORE 1
-+#define SCST_USER_MAX_ON_FREE_CMD_OPT SCST_USER_ON_FREE_CMD_IGNORE
-+
-+#define SCST_USER_MEM_NO_REUSE 0
-+#define SCST_USER_MEM_REUSE_READ 1
-+#define SCST_USER_MEM_REUSE_WRITE 2
-+#define SCST_USER_MEM_REUSE_ALL 3
-+#define SCST_USER_MAX_MEM_REUSE_OPT SCST_USER_MEM_REUSE_ALL
-+
-+#define SCST_USER_PARTIAL_TRANSFERS_NOT_SUPPORTED 0
-+#define SCST_USER_PARTIAL_TRANSFERS_SUPPORTED_ORDERED 1
-+#define SCST_USER_PARTIAL_TRANSFERS_SUPPORTED 2
-+#define SCST_USER_MAX_PARTIAL_TRANSFERS_OPT \
-+ SCST_USER_PARTIAL_TRANSFERS_SUPPORTED
-+
-+#ifndef aligned_u64
-+#define aligned_u64 uint64_t __attribute__((aligned(8)))
-+#endif
-+
-+/*************************************************************
-+ ** Private ucmd states
-+ *************************************************************/
-+#define UCMD_STATE_NEW 0
-+#define UCMD_STATE_PARSING 1
-+#define UCMD_STATE_BUF_ALLOCING 2
-+#define UCMD_STATE_EXECING 3
-+#define UCMD_STATE_ON_FREEING 4
-+#define UCMD_STATE_ON_FREE_SKIPPED 5
-+#define UCMD_STATE_ON_CACHE_FREEING 6
-+#define UCMD_STATE_TM_EXECING 7
-+
-+#define UCMD_STATE_ATTACH_SESS 0x20
-+#define UCMD_STATE_DETACH_SESS 0x21
-+
-+struct scst_user_opt {
-+ uint8_t parse_type;
-+ uint8_t on_free_cmd_type;
-+ uint8_t memory_reuse_type;
-+ uint8_t partial_transfers_type;
-+ int32_t partial_len;
-+
-+ /* SCSI control mode page parameters, see SPC */
-+ uint8_t tst;
-+ uint8_t queue_alg;
-+ uint8_t tas;
-+ uint8_t swp;
-+ uint8_t d_sense;
-+
-+ uint8_t has_own_order_mgmt;
-+};
-+
-+struct scst_user_dev_desc {
-+ aligned_u64 version_str;
-+ aligned_u64 license_str;
-+ uint8_t type;
-+ uint8_t sgv_shared;
-+ uint8_t sgv_disable_clustered_pool;
-+ int32_t sgv_single_alloc_pages;
-+ int32_t sgv_purge_interval;
-+ struct scst_user_opt opt;
-+ uint32_t block_size;
-+ uint8_t enable_pr_cmds_notifications;
-+ char name[SCST_MAX_NAME];
-+ char sgv_name[SCST_MAX_NAME];
-+};
-+
-+struct scst_user_sess {
-+ aligned_u64 sess_h;
-+ aligned_u64 lun;
-+ uint16_t threads_num;
-+ uint8_t rd_only;
-+ uint16_t scsi_transport_version;
-+ uint16_t phys_transport_version;
-+ char initiator_name[SCST_MAX_EXTERNAL_NAME];
-+ char target_name[SCST_MAX_EXTERNAL_NAME];
-+};
-+
-+struct scst_user_scsi_cmd_parse {
-+ aligned_u64 sess_h;
-+
-+ uint8_t cdb[SCST_MAX_CDB_SIZE];
-+ uint16_t cdb_len;
-+
-+ int32_t timeout;
-+ int32_t bufflen;
-+ int32_t out_bufflen;
-+
-+ uint32_t op_flags;
-+
-+ uint8_t queue_type;
-+ uint8_t data_direction;
-+
-+ uint8_t expected_values_set;
-+ uint8_t expected_data_direction;
-+ int32_t expected_transfer_len;
-+ int32_t expected_out_transfer_len;
-+
-+ uint32_t sn;
-+};
-+
-+struct scst_user_scsi_cmd_alloc_mem {
-+ aligned_u64 sess_h;
-+
-+ uint8_t cdb[SCST_MAX_CDB_SIZE];
-+ uint16_t cdb_len;
-+
-+ int32_t alloc_len;
-+
-+ uint8_t queue_type;
-+ uint8_t data_direction;
-+
-+ uint32_t sn;
-+};
-+
-+struct scst_user_scsi_cmd_exec {
-+ aligned_u64 sess_h;
-+
-+ uint8_t cdb[SCST_MAX_CDB_SIZE];
-+ uint16_t cdb_len;
-+
-+ int32_t data_len;
-+ int32_t bufflen;
-+ int32_t alloc_len;
-+ aligned_u64 pbuf;
-+ uint8_t queue_type;
-+ uint8_t data_direction;
-+ uint8_t partial;
-+ int32_t timeout;
-+
-+ aligned_u64 p_out_buf;
-+ int32_t out_bufflen;
-+
-+ uint32_t sn;
-+
-+ uint32_t parent_cmd_h;
-+ int32_t parent_cmd_data_len;
-+ uint32_t partial_offset;
-+};
-+
-+struct scst_user_scsi_on_free_cmd {
-+ aligned_u64 pbuf;
-+ int32_t resp_data_len;
-+ uint8_t buffer_cached;
-+ uint8_t aborted;
-+ uint8_t status;
-+ uint8_t delivery_status;
-+};
-+
-+struct scst_user_on_cached_mem_free {
-+ aligned_u64 pbuf;
-+};
-+
-+struct scst_user_tm {
-+ aligned_u64 sess_h;
-+ uint32_t fn;
-+ uint32_t cmd_h_to_abort;
-+ uint32_t cmd_sn;
-+ uint8_t cmd_sn_set;
-+};
-+
-+struct scst_user_get_cmd {
-+ uint32_t cmd_h;
-+ uint32_t subcode;
-+ union {
-+ aligned_u64 preply;
-+ struct scst_user_sess sess;
-+ struct scst_user_scsi_cmd_parse parse_cmd;
-+ struct scst_user_scsi_cmd_alloc_mem alloc_cmd;
-+ struct scst_user_scsi_cmd_exec exec_cmd;
-+ struct scst_user_scsi_on_free_cmd on_free_cmd;
-+ struct scst_user_on_cached_mem_free on_cached_mem_free;
-+ struct scst_user_tm tm_cmd;
-+ };
-+};
-+
-+/* Be careful adding new members here, this structure is allocated on stack! */
-+struct scst_user_scsi_cmd_reply_parse {
-+ uint8_t status;
-+ union {
-+ struct {
-+ uint8_t queue_type;
-+ uint8_t data_direction;
-+ uint16_t cdb_len;
-+ uint32_t op_flags;
-+ int32_t data_len;
-+ int32_t bufflen;
-+ int32_t out_bufflen;
-+ };
-+ struct {
-+ uint8_t sense_len;
-+ aligned_u64 psense_buffer;
-+ };
-+ };
-+};
-+
-+/* Be careful adding new members here, this structure is allocated on stack! */
-+struct scst_user_scsi_cmd_reply_alloc_mem {
-+ aligned_u64 pbuf;
-+};
-+
-+/* Be careful adding new members here, this structure is allocated on stack! */
-+struct scst_user_scsi_cmd_reply_exec {
-+ int32_t resp_data_len;
-+ aligned_u64 pbuf;
-+
-+#define SCST_EXEC_REPLY_BACKGROUND 0
-+#define SCST_EXEC_REPLY_COMPLETED 1
-+ uint8_t reply_type;
-+
-+ uint8_t status;
-+ uint8_t sense_len;
-+ aligned_u64 psense_buffer;
-+};
-+
-+/* Be careful adding new members here, this structure is allocated on stack! */
-+struct scst_user_reply_cmd {
-+ uint32_t cmd_h;
-+ uint32_t subcode;
-+ union {
-+ int32_t result;
-+ struct scst_user_scsi_cmd_reply_parse parse_reply;
-+ struct scst_user_scsi_cmd_reply_alloc_mem alloc_reply;
-+ struct scst_user_scsi_cmd_reply_exec exec_reply;
-+ };
-+};
-+
-+/* Be careful adding new members here, this structure is allocated on stack! */
-+struct scst_user_get_ext_cdb {
-+ uint32_t cmd_h;
-+ aligned_u64 ext_cdb_buffer;
-+};
-+
-+/* Be careful adding new members here, this structure is allocated on stack! */
-+struct scst_user_prealloc_buffer_in {
-+ aligned_u64 pbuf;
-+ uint32_t bufflen;
-+ uint8_t for_clust_pool;
-+};
-+
-+/* Be careful adding new members here, this structure is allocated on stack! */
-+struct scst_user_prealloc_buffer_out {
-+ uint32_t cmd_h;
-+};
-+
-+/* Be careful adding new members here, this structure is allocated on stack! */
-+union scst_user_prealloc_buffer {
-+ struct scst_user_prealloc_buffer_in in;
-+ struct scst_user_prealloc_buffer_out out;
-+};
-+
-+#define SCST_USER_REGISTER_DEVICE _IOW('u', 1, struct scst_user_dev_desc)
-+#define SCST_USER_UNREGISTER_DEVICE _IO('u', 2)
-+#define SCST_USER_SET_OPTIONS _IOW('u', 3, struct scst_user_opt)
-+#define SCST_USER_GET_OPTIONS _IOR('u', 4, struct scst_user_opt)
-+#define SCST_USER_REPLY_AND_GET_CMD _IOWR('u', 5, struct scst_user_get_cmd)
-+#define SCST_USER_REPLY_CMD _IOW('u', 6, struct scst_user_reply_cmd)
-+#define SCST_USER_FLUSH_CACHE _IO('u', 7)
-+#define SCST_USER_DEVICE_CAPACITY_CHANGED _IO('u', 8)
-+#define SCST_USER_GET_EXTENDED_CDB _IOWR('u', 9, struct scst_user_get_ext_cdb)
-+#define SCST_USER_PREALLOC_BUFFER _IOWR('u', 10, union scst_user_prealloc_buffer)
-+
-+/* Values for scst_user_get_cmd.subcode */
-+#define SCST_USER_ATTACH_SESS \
-+ _IOR('s', UCMD_STATE_ATTACH_SESS, struct scst_user_sess)
-+#define SCST_USER_DETACH_SESS \
-+ _IOR('s', UCMD_STATE_DETACH_SESS, struct scst_user_sess)
-+#define SCST_USER_PARSE \
-+ _IOWR('s', UCMD_STATE_PARSING, struct scst_user_scsi_cmd_parse)
-+#define SCST_USER_ALLOC_MEM \
-+ _IOWR('s', UCMD_STATE_BUF_ALLOCING, struct scst_user_scsi_cmd_alloc_mem)
-+#define SCST_USER_EXEC \
-+ _IOWR('s', UCMD_STATE_EXECING, struct scst_user_scsi_cmd_exec)
-+#define SCST_USER_ON_FREE_CMD \
-+ _IOR('s', UCMD_STATE_ON_FREEING, struct scst_user_scsi_on_free_cmd)
-+#define SCST_USER_ON_CACHED_MEM_FREE \
-+ _IOR('s', UCMD_STATE_ON_CACHE_FREEING, \
-+ struct scst_user_on_cached_mem_free)
-+#define SCST_USER_TASK_MGMT \
-+ _IOWR('s', UCMD_STATE_TM_EXECING, struct scst_user_tm)
-+
-+#endif /* __SCST_USER_H */
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/scst_user.c linux-3.2/drivers/scst/dev_handlers/scst_user.c
---- orig/linux-3.2/drivers/scst/dev_handlers/scst_user.c
-+++ linux-3.2/drivers/scst/dev_handlers/scst_user.c
-@@ -0,0 +1,3751 @@
-+/*
-+ * scst_user.c
-+ *
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * SCSI virtual user space device handler
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/kthread.h>
-+#include <linux/delay.h>
-+#include <linux/poll.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+
-+#define LOG_PREFIX DEV_USER_NAME
-+
-+#include <scst/scst.h>
-+#include <scst/scst_user.h>
-+#include "scst_dev_handler.h"
-+
-+#define DEV_USER_CMD_HASH_ORDER 6
-+#define DEV_USER_ATTACH_TIMEOUT (5*HZ)
-+
-+struct scst_user_dev {
-+ struct rw_semaphore dev_rwsem;
-+
-+ /*
-+ * Must be kept here, because it's needed on the cleanup time,
-+ * when corresponding scst_dev is already dead.
-+ */
-+ struct scst_cmd_threads udev_cmd_threads;
-+
-+ /* Protected by udev_cmd_threads.cmd_list_lock */
-+ struct list_head ready_cmd_list;
-+
-+ /* Protected by dev_rwsem or don't need any protection */
-+ unsigned int blocking:1;
-+ unsigned int cleanup_done:1;
-+ unsigned int tst:3;
-+ unsigned int queue_alg:4;
-+ unsigned int tas:1;
-+ unsigned int swp:1;
-+ unsigned int d_sense:1;
-+ unsigned int has_own_order_mgmt:1;
-+
-+ int (*generic_parse)(struct scst_cmd *cmd,
-+ int (*get_block)(struct scst_cmd *cmd));
-+
-+ int block;
-+ int def_block;
-+
-+ struct scst_mem_lim udev_mem_lim;
-+ struct sgv_pool *pool;
-+ struct sgv_pool *pool_clust;
-+
-+ uint8_t parse_type;
-+ uint8_t on_free_cmd_type;
-+ uint8_t memory_reuse_type;
-+ uint8_t partial_transfers_type;
-+ uint32_t partial_len;
-+
-+ struct scst_dev_type devtype;
-+
-+ /* Both protected by udev_cmd_threads.cmd_list_lock */
-+ unsigned int handle_counter;
-+ struct list_head ucmd_hash[1 << DEV_USER_CMD_HASH_ORDER];
-+
-+ struct scst_device *sdev;
-+
-+ int virt_id;
-+ struct list_head dev_list_entry;
-+ char name[SCST_MAX_NAME];
-+
-+ struct list_head cleanup_list_entry;
-+ struct completion cleanup_cmpl;
-+};
-+
-+/* Most fields are unprotected, since only one thread at time can access them */
-+struct scst_user_cmd {
-+ struct scst_cmd *cmd;
-+ struct scst_user_dev *dev;
-+
-+ atomic_t ucmd_ref;
-+
-+ unsigned int buff_cached:1;
-+ unsigned int buf_dirty:1;
-+ unsigned int background_exec:1;
-+ unsigned int aborted:1;
-+
-+ struct scst_user_cmd *buf_ucmd;
-+
-+ int cur_data_page;
-+ int num_data_pages;
-+ int first_page_offset;
-+ unsigned long ubuff;
-+ struct page **data_pages;
-+ struct sgv_pool_obj *sgv;
-+
-+ /*
-+ * Special flags, which can be accessed asynchronously (hence "long").
-+ * Protected by udev_cmd_threads.cmd_list_lock.
-+ */
-+ unsigned long sent_to_user:1;
-+ unsigned long jammed:1;
-+ unsigned long this_state_unjammed:1;
-+ unsigned long seen_by_user:1; /* here only as a small optimization */
-+
-+ unsigned int state;
-+
-+ struct list_head ready_cmd_list_entry;
-+
-+ unsigned int h;
-+ struct list_head hash_list_entry;
-+
-+ int user_cmd_payload_len;
-+ struct scst_user_get_cmd user_cmd;
-+
-+ /* cmpl used only by ATTACH_SESS, mcmd used only by TM */
-+ union {
-+ struct completion *cmpl;
-+ struct scst_mgmt_cmd *mcmd;
-+ };
-+ int result;
-+};
-+
-+static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
-+ gfp_t gfp_mask);
-+static void dev_user_free_ucmd(struct scst_user_cmd *ucmd);
-+
-+static int dev_user_parse(struct scst_cmd *cmd);
-+static int dev_user_alloc_data_buf(struct scst_cmd *cmd);
-+static int dev_user_exec(struct scst_cmd *cmd);
-+static void dev_user_on_free_cmd(struct scst_cmd *cmd);
-+static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
-+ struct scst_tgt_dev *tgt_dev);
-+
-+static int dev_user_disk_done(struct scst_cmd *cmd);
-+static int dev_user_tape_done(struct scst_cmd *cmd);
-+
-+static struct page *dev_user_alloc_pages(struct scatterlist *sg,
-+ gfp_t gfp_mask, void *priv);
-+static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
-+ void *priv);
-+
-+static void dev_user_add_to_ready(struct scst_user_cmd *ucmd);
-+
-+static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
-+ unsigned long *flags);
-+
-+static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd);
-+static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
-+ int status);
-+static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status);
-+static int dev_user_register_dev(struct file *file,
-+ const struct scst_user_dev_desc *dev_desc);
-+static int dev_user_unregister_dev(struct file *file);
-+static int dev_user_flush_cache(struct file *file);
-+static int dev_user_capacity_changed(struct file *file);
-+static int dev_user_prealloc_buffer(struct file *file, void __user *arg);
-+static int __dev_user_set_opt(struct scst_user_dev *dev,
-+ const struct scst_user_opt *opt);
-+static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt);
-+static int dev_user_get_opt(struct file *file, void __user *arg);
-+
-+static unsigned int dev_user_poll(struct file *filp, poll_table *wait);
-+static long dev_user_ioctl(struct file *file, unsigned int cmd,
-+ unsigned long arg);
-+static int dev_user_release(struct inode *inode, struct file *file);
-+static int dev_user_exit_dev(struct scst_user_dev *dev);
-+
-+static ssize_t dev_user_sysfs_commands_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+
-+static struct kobj_attribute dev_user_commands_attr =
-+ __ATTR(commands, S_IRUGO, dev_user_sysfs_commands_show, NULL);
-+
-+static const struct attribute *dev_user_dev_attrs[] = {
-+ &dev_user_commands_attr.attr,
-+ NULL,
-+};
-+
-+static int dev_usr_parse(struct scst_cmd *cmd);
-+
-+/** Data **/
-+
-+static struct kmem_cache *user_cmd_cachep;
-+static struct kmem_cache *user_get_cmd_cachep;
-+
-+static DEFINE_MUTEX(dev_priv_mutex);
-+
-+static const struct file_operations dev_user_fops = {
-+ .poll = dev_user_poll,
-+ .unlocked_ioctl = dev_user_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = dev_user_ioctl,
-+#endif
-+ .release = dev_user_release,
-+};
-+
-+static struct scst_dev_type dev_user_devtype = {
-+ .name = DEV_USER_NAME,
-+ .type = -1,
-+ .parse = dev_usr_parse,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static int dev_user_major;
-+
-+static struct class *dev_user_sysfs_class;
-+
-+static DEFINE_SPINLOCK(dev_list_lock);
-+static LIST_HEAD(dev_list);
-+
-+static DEFINE_SPINLOCK(cleanup_lock);
-+static LIST_HEAD(cleanup_list);
-+static DECLARE_WAIT_QUEUE_HEAD(cleanup_list_waitQ);
-+static struct task_struct *cleanup_thread;
-+
-+/*
-+ * Skip this command if result is not 0. Must be called under
-+ * udev_cmd_threads.cmd_list_lock and IRQ off.
-+ */
-+static inline bool ucmd_get_check(struct scst_user_cmd *ucmd)
-+{
-+ int r = atomic_inc_return(&ucmd->ucmd_ref);
-+ int res;
-+ if (unlikely(r == 1)) {
-+ TRACE_DBG("ucmd %p is being destroyed", ucmd);
-+ atomic_dec(&ucmd->ucmd_ref);
-+ res = true;
-+ /*
-+ * Necessary code is serialized by cmd_list_lock in
-+ * cmd_remove_hash()
-+ */
-+ } else {
-+ TRACE_DBG("ucmd %p, new ref_cnt %d", ucmd,
-+ atomic_read(&ucmd->ucmd_ref));
-+ res = false;
-+ }
-+ return res;
-+}
-+
-+static inline void ucmd_get(struct scst_user_cmd *ucmd)
-+{
-+ TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
-+ atomic_inc(&ucmd->ucmd_ref);
-+ /*
-+ * For the same reason as in kref_get(). Let's be safe and
-+ * always do it.
-+ */
-+ smp_mb__after_atomic_inc();
-+}
-+
-+/* Must not be called under cmd_list_lock!! */
-+static inline void ucmd_put(struct scst_user_cmd *ucmd)
-+{
-+ TRACE_DBG("ucmd %p, ucmd_ref %d", ucmd, atomic_read(&ucmd->ucmd_ref));
-+
-+ EXTRACHECKS_BUG_ON(atomic_read(&ucmd->ucmd_ref) == 0);
-+
-+ if (atomic_dec_and_test(&ucmd->ucmd_ref))
-+ dev_user_free_ucmd(ucmd);
-+}
-+
-+static inline int calc_num_pg(unsigned long buf, int len)
-+{
-+ len += buf & ~PAGE_MASK;
-+ return (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
-+}
-+
-+static void __dev_user_not_reg(void)
-+{
-+ TRACE_MGMT_DBG("%s", "Device not registered");
-+ return;
-+}
-+
-+static inline int dev_user_check_reg(struct scst_user_dev *dev)
-+{
-+ if (dev == NULL) {
-+ __dev_user_not_reg();
-+ return -ENODEV;
-+ }
-+ return 0;
-+}
-+
-+static inline int scst_user_cmd_hashfn(int h)
-+{
-+ return h & ((1 << DEV_USER_CMD_HASH_ORDER) - 1);
-+}
-+
-+static inline struct scst_user_cmd *__ucmd_find_hash(struct scst_user_dev *dev,
-+ unsigned int h)
-+{
-+ struct list_head *head;
-+ struct scst_user_cmd *ucmd;
-+
-+ head = &dev->ucmd_hash[scst_user_cmd_hashfn(h)];
-+ list_for_each_entry(ucmd, head, hash_list_entry) {
-+ if (ucmd->h == h) {
-+ TRACE_DBG("Found ucmd %p", ucmd);
-+ return ucmd;
-+ }
-+ }
-+ return NULL;
-+}
-+
-+static void cmd_insert_hash(struct scst_user_cmd *ucmd)
-+{
-+ struct list_head *head;
-+ struct scst_user_dev *dev = ucmd->dev;
-+ struct scst_user_cmd *u;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&dev->udev_cmd_threads.cmd_list_lock, flags);
-+ do {
-+ ucmd->h = dev->handle_counter++;
-+ u = __ucmd_find_hash(dev, ucmd->h);
-+ } while (u != NULL);
-+ head = &dev->ucmd_hash[scst_user_cmd_hashfn(ucmd->h)];
-+ list_add_tail(&ucmd->hash_list_entry, head);
-+ spin_unlock_irqrestore(&dev->udev_cmd_threads.cmd_list_lock, flags);
-+
-+ TRACE_DBG("Inserted ucmd %p, h=%d (dev %s)", ucmd, ucmd->h, dev->name);
-+ return;
-+}
-+
-+static inline void cmd_remove_hash(struct scst_user_cmd *ucmd)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ucmd->dev->udev_cmd_threads.cmd_list_lock, flags);
-+ list_del(&ucmd->hash_list_entry);
-+ spin_unlock_irqrestore(&ucmd->dev->udev_cmd_threads.cmd_list_lock, flags);
-+
-+ TRACE_DBG("Removed ucmd %p, h=%d", ucmd, ucmd->h);
-+ return;
-+}
-+
-+static void dev_user_free_ucmd(struct scst_user_cmd *ucmd)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_MEM("Freeing ucmd %p", ucmd);
-+
-+ cmd_remove_hash(ucmd);
-+ EXTRACHECKS_BUG_ON(ucmd->cmd != NULL);
-+
-+ kmem_cache_free(user_cmd_cachep, ucmd);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct page *dev_user_alloc_pages(struct scatterlist *sg,
-+ gfp_t gfp_mask, void *priv)
-+{
-+ struct scst_user_cmd *ucmd = priv;
-+ int offset = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /* *sg supposed to be zeroed */
-+
-+ TRACE_MEM("ucmd %p, ubuff %lx, ucmd->cur_data_page %d", ucmd,
-+ ucmd->ubuff, ucmd->cur_data_page);
-+
-+ if (ucmd->cur_data_page == 0) {
-+ TRACE_MEM("ucmd->first_page_offset %d",
-+ ucmd->first_page_offset);
-+ offset = ucmd->first_page_offset;
-+ ucmd_get(ucmd);
-+ }
-+
-+ if (ucmd->cur_data_page >= ucmd->num_data_pages)
-+ goto out;
-+
-+ sg_set_page(sg, ucmd->data_pages[ucmd->cur_data_page],
-+ PAGE_SIZE - offset, offset);
-+ ucmd->cur_data_page++;
-+
-+ TRACE_MEM("page=%p, length=%d, offset=%d", sg_page(sg), sg->length,
-+ sg->offset);
-+ TRACE_BUFFER("Page data", sg_virt(sg), sg->length);
-+
-+out:
-+ TRACE_EXIT();
-+ return sg_page(sg);
-+}
-+
-+static void dev_user_on_cached_mem_free(struct scst_user_cmd *ucmd)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_MEM("Preparing ON_CACHED_MEM_FREE (ucmd %p, h %d, ubuff %lx)",
-+ ucmd, ucmd->h, ucmd->ubuff);
-+
-+ ucmd->user_cmd_payload_len =
-+ offsetof(struct scst_user_get_cmd, on_cached_mem_free) +
-+ sizeof(ucmd->user_cmd.on_cached_mem_free);
-+ ucmd->user_cmd.cmd_h = ucmd->h;
-+ ucmd->user_cmd.subcode = SCST_USER_ON_CACHED_MEM_FREE;
-+ ucmd->user_cmd.on_cached_mem_free.pbuf = ucmd->ubuff;
-+
-+ ucmd->state = UCMD_STATE_ON_CACHE_FREEING;
-+
-+ dev_user_add_to_ready(ucmd);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void dev_user_unmap_buf(struct scst_user_cmd *ucmd)
-+{
-+ int i;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MEM("Unmapping data pages (ucmd %p, ubuff %lx, num %d)", ucmd,
-+ ucmd->ubuff, ucmd->num_data_pages);
-+
-+ for (i = 0; i < ucmd->num_data_pages; i++) {
-+ struct page *page = ucmd->data_pages[i];
-+
-+ if (ucmd->buf_dirty)
-+ SetPageDirty(page);
-+
-+ page_cache_release(page);
-+ }
-+
-+ kfree(ucmd->data_pages);
-+ ucmd->data_pages = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void __dev_user_free_sg_entries(struct scst_user_cmd *ucmd)
-+{
-+ TRACE_ENTRY();
-+
-+ BUG_ON(ucmd->data_pages == NULL);
-+
-+ TRACE_MEM("Freeing data pages (ucmd=%p, ubuff=%lx, buff_cached=%d)",
-+ ucmd, ucmd->ubuff, ucmd->buff_cached);
-+
-+ dev_user_unmap_buf(ucmd);
-+
-+ if (ucmd->buff_cached)
-+ dev_user_on_cached_mem_free(ucmd);
-+ else
-+ ucmd_put(ucmd);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void dev_user_free_sg_entries(struct scatterlist *sg, int sg_count,
-+ void *priv)
-+{
-+ struct scst_user_cmd *ucmd = priv;
-+
-+ TRACE_MEM("Freeing data pages (sg=%p, sg_count=%d, priv %p)", sg,
-+ sg_count, ucmd);
-+
-+ __dev_user_free_sg_entries(ucmd);
-+
-+ return;
-+}
-+
-+static inline int is_buff_cached(struct scst_user_cmd *ucmd)
-+{
-+ int mem_reuse_type = ucmd->dev->memory_reuse_type;
-+
-+ if ((mem_reuse_type == SCST_USER_MEM_REUSE_ALL) ||
-+ ((ucmd->cmd->data_direction == SCST_DATA_READ) &&
-+ (mem_reuse_type == SCST_USER_MEM_REUSE_READ)) ||
-+ ((ucmd->cmd->data_direction == SCST_DATA_WRITE) &&
-+ (mem_reuse_type == SCST_USER_MEM_REUSE_WRITE)))
-+ return 1;
-+ else
-+ return 0;
-+}
-+
-+static inline int is_need_offs_page(unsigned long buf, int len)
-+{
-+ return ((buf & ~PAGE_MASK) != 0) &&
-+ ((buf & PAGE_MASK) != ((buf+len-1) & PAGE_MASK));
-+}
-+
-+/*
-+ * Returns 0 for success, <0 for fatal failure, >0 - need pages.
-+ * Unmaps the buffer, if needed in case of error
-+ */
-+static int dev_user_alloc_sg(struct scst_user_cmd *ucmd, int cached_buff)
-+{
-+ int res = 0;
-+ struct scst_cmd *cmd = ucmd->cmd;
-+ struct scst_user_dev *dev = ucmd->dev;
-+ struct sgv_pool *pool;
-+ gfp_t gfp_mask;
-+ int flags = 0;
-+ int bufflen, orig_bufflen;
-+ int last_len = 0;
-+ int out_sg_pages = 0;
-+
-+ TRACE_ENTRY();
-+
-+ gfp_mask = __GFP_NOWARN;
-+ gfp_mask |= (scst_cmd_atomic(cmd) ? GFP_ATOMIC : GFP_KERNEL);
-+
-+ if (cmd->data_direction != SCST_DATA_BIDI) {
-+ orig_bufflen = cmd->bufflen;
-+ pool = cmd->tgt_dev->dh_priv;
-+ } else {
-+ /* Make out_sg->offset 0 */
-+ int len = cmd->bufflen + ucmd->first_page_offset;
-+ out_sg_pages = (len >> PAGE_SHIFT) + ((len & ~PAGE_MASK) != 0);
-+ orig_bufflen = (out_sg_pages << PAGE_SHIFT) + cmd->out_bufflen;
-+ pool = dev->pool;
-+ }
-+ bufflen = orig_bufflen;
-+
-+ EXTRACHECKS_BUG_ON(bufflen == 0);
-+
-+ if (cached_buff) {
-+ flags |= SGV_POOL_RETURN_OBJ_ON_ALLOC_FAIL;
-+ if (ucmd->ubuff == 0)
-+ flags |= SGV_POOL_NO_ALLOC_ON_CACHE_MISS;
-+ } else {
-+ TRACE_MEM("%s", "Not cached buff");
-+ flags |= SGV_POOL_ALLOC_NO_CACHED;
-+ if (ucmd->ubuff == 0) {
-+ res = 1;
-+ goto out;
-+ }
-+ bufflen += ucmd->first_page_offset;
-+ if (is_need_offs_page(ucmd->ubuff, orig_bufflen))
-+ last_len = bufflen & ~PAGE_MASK;
-+ else
-+ last_len = orig_bufflen & ~PAGE_MASK;
-+ }
-+ ucmd->buff_cached = cached_buff;
-+
-+ cmd->sg = sgv_pool_alloc(pool, bufflen, gfp_mask, flags, &cmd->sg_cnt,
-+ &ucmd->sgv, &dev->udev_mem_lim, ucmd);
-+ if (cmd->sg != NULL) {
-+ struct scst_user_cmd *buf_ucmd = sgv_get_priv(ucmd->sgv);
-+
-+ TRACE_MEM("Buf ucmd %p (cmd->sg_cnt %d, last seg len %d, "
-+ "last_len %d, bufflen %d)", buf_ucmd, cmd->sg_cnt,
-+ cmd->sg[cmd->sg_cnt-1].length, last_len, bufflen);
-+
-+ ucmd->ubuff = buf_ucmd->ubuff;
-+ ucmd->buf_ucmd = buf_ucmd;
-+
-+ EXTRACHECKS_BUG_ON((ucmd->data_pages != NULL) &&
-+ (ucmd != buf_ucmd));
-+
-+ if (last_len != 0) {
-+ cmd->sg[cmd->sg_cnt-1].length &= PAGE_MASK;
-+ cmd->sg[cmd->sg_cnt-1].length += last_len;
-+ }
-+
-+ TRACE_MEM("Buf alloced (ucmd %p, cached_buff %d, ubuff %lx, "
-+ "last seg len %d)", ucmd, cached_buff, ucmd->ubuff,
-+ cmd->sg[cmd->sg_cnt-1].length);
-+
-+ if (cmd->data_direction == SCST_DATA_BIDI) {
-+ cmd->out_sg = &cmd->sg[out_sg_pages];
-+ cmd->out_sg_cnt = cmd->sg_cnt - out_sg_pages;
-+ cmd->sg_cnt = out_sg_pages;
-+ TRACE_MEM("cmd %p, out_sg %p, out_sg_cnt %d, sg_cnt %d",
-+ cmd, cmd->out_sg, cmd->out_sg_cnt, cmd->sg_cnt);
-+ }
-+
-+ if (unlikely(cmd->sg_cnt > cmd->tgt_dev->max_sg_cnt)) {
-+ static int ll;
-+ if ((ll < 10) || TRACING_MINOR()) {
-+ PRINT_INFO("Unable to complete command due to "
-+ "SG IO count limitation (requested %d, "
-+ "available %d, tgt lim %d)",
-+ cmd->sg_cnt, cmd->tgt_dev->max_sg_cnt,
-+ cmd->tgt->sg_tablesize);
-+ ll++;
-+ }
-+ cmd->sg = NULL;
-+ /* sgv will be freed in dev_user_free_sgv() */
-+ res = -1;
-+ }
-+ } else {
-+ TRACE_MEM("Buf not alloced (ucmd %p, h %d, buff_cached, %d, "
-+ "sg_cnt %d, ubuff %lx, sgv %p", ucmd, ucmd->h,
-+ ucmd->buff_cached, cmd->sg_cnt, ucmd->ubuff, ucmd->sgv);
-+ if (unlikely(cmd->sg_cnt == 0)) {
-+ TRACE_MEM("Refused allocation (ucmd %p)", ucmd);
-+ BUG_ON(ucmd->sgv != NULL);
-+ res = -1;
-+ } else {
-+ switch (ucmd->state) {
-+ case UCMD_STATE_BUF_ALLOCING:
-+ res = 1;
-+ break;
-+ case UCMD_STATE_EXECING:
-+ res = -1;
-+ break;
-+ default:
-+ BUG();
-+ break;
-+ }
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_alloc_space(struct scst_user_cmd *ucmd)
-+{
-+ int rc, res = SCST_CMD_STATE_DEFAULT;
-+ struct scst_cmd *cmd = ucmd->cmd;
-+
-+ TRACE_ENTRY();
-+
-+ ucmd->state = UCMD_STATE_BUF_ALLOCING;
-+ scst_cmd_set_dh_data_buff_alloced(cmd);
-+
-+ rc = dev_user_alloc_sg(ucmd, is_buff_cached(ucmd));
-+ if (rc == 0)
-+ goto out;
-+ else if (rc < 0) {
-+ scst_set_busy(cmd);
-+ res = scst_set_cmd_abnormal_done_state(cmd);
-+ goto out;
-+ }
-+
-+ if (!(cmd->data_direction & SCST_DATA_WRITE) &&
-+ !scst_is_cmd_local(cmd)) {
-+ TRACE_DBG("Delayed alloc, ucmd %p", ucmd);
-+ goto out;
-+ }
-+
-+ ucmd->user_cmd_payload_len =
-+ offsetof(struct scst_user_get_cmd, alloc_cmd) +
-+ sizeof(ucmd->user_cmd.alloc_cmd);
-+ ucmd->user_cmd.cmd_h = ucmd->h;
-+ ucmd->user_cmd.subcode = SCST_USER_ALLOC_MEM;
-+ ucmd->user_cmd.alloc_cmd.sess_h = (unsigned long)cmd->tgt_dev;
-+ memcpy(ucmd->user_cmd.alloc_cmd.cdb, cmd->cdb,
-+ min_t(int, SCST_MAX_CDB_SIZE, cmd->cdb_len));
-+ ucmd->user_cmd.alloc_cmd.cdb_len = cmd->cdb_len;
-+ ucmd->user_cmd.alloc_cmd.alloc_len = ucmd->buff_cached ?
-+ (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
-+ ucmd->user_cmd.alloc_cmd.queue_type = cmd->queue_type;
-+ ucmd->user_cmd.alloc_cmd.data_direction = cmd->data_direction;
-+ ucmd->user_cmd.alloc_cmd.sn = cmd->tgt_sn;
-+
-+ dev_user_add_to_ready(ucmd);
-+
-+ res = SCST_CMD_STATE_STOP;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_user_cmd *dev_user_alloc_ucmd(struct scst_user_dev *dev,
-+ gfp_t gfp_mask)
-+{
-+ struct scst_user_cmd *ucmd = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ ucmd = kmem_cache_zalloc(user_cmd_cachep, gfp_mask);
-+ if (unlikely(ucmd == NULL)) {
-+ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate "
-+ "user cmd (gfp_mask %x)", gfp_mask);
-+ goto out;
-+ }
-+ ucmd->dev = dev;
-+ atomic_set(&ucmd->ucmd_ref, 1);
-+
-+ cmd_insert_hash(ucmd);
-+
-+ TRACE_MEM("ucmd %p allocated", ucmd);
-+
-+out:
-+ TRACE_EXIT_HRES((unsigned long)ucmd);
-+ return ucmd;
-+}
-+
-+static int dev_user_get_block(struct scst_cmd *cmd)
-+{
-+ struct scst_user_dev *dev = cmd->dev->dh_priv;
-+ /*
-+ * No need for locks here, since *_detach() can not be
-+ * called, when there are existing commands.
-+ */
-+ TRACE_EXIT_RES(dev->block);
-+ return dev->block;
-+}
-+
-+static int dev_user_parse(struct scst_cmd *cmd)
-+{
-+ int rc, res = SCST_CMD_STATE_DEFAULT;
-+ struct scst_user_cmd *ucmd;
-+ int atomic = scst_cmd_atomic(cmd);
-+ struct scst_user_dev *dev = cmd->dev->dh_priv;
-+ gfp_t gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
-+
-+ TRACE_ENTRY();
-+
-+ if (cmd->dh_priv == NULL) {
-+ ucmd = dev_user_alloc_ucmd(dev, gfp_mask);
-+ if (unlikely(ucmd == NULL)) {
-+ if (atomic) {
-+ res = SCST_CMD_STATE_NEED_THREAD_CTX;
-+ goto out;
-+ } else {
-+ scst_set_busy(cmd);
-+ goto out_error;
-+ }
-+ }
-+ ucmd->cmd = cmd;
-+ cmd->dh_priv = ucmd;
-+ } else {
-+ ucmd = cmd->dh_priv;
-+ TRACE_DBG("Used ucmd %p, state %x", ucmd, ucmd->state);
-+ }
-+
-+ TRACE_DBG("ucmd %p, cmd %p, state %x", ucmd, cmd, ucmd->state);
-+
-+ if (ucmd->state == UCMD_STATE_PARSING) {
-+ /* We've already done */
-+ goto done;
-+ }
-+
-+ EXTRACHECKS_BUG_ON(ucmd->state != UCMD_STATE_NEW);
-+
-+ switch (dev->parse_type) {
-+ case SCST_USER_PARSE_STANDARD:
-+ TRACE_DBG("PARSE STANDARD: ucmd %p", ucmd);
-+ rc = dev->generic_parse(cmd, dev_user_get_block);
-+ if (rc != 0)
-+ goto out_invalid;
-+ break;
-+
-+ case SCST_USER_PARSE_EXCEPTION:
-+ TRACE_DBG("PARSE EXCEPTION: ucmd %p", ucmd);
-+ rc = dev->generic_parse(cmd, dev_user_get_block);
-+ if ((rc == 0) && (cmd->op_flags & SCST_INFO_VALID))
-+ break;
-+ else if (rc == SCST_CMD_STATE_NEED_THREAD_CTX) {
-+ TRACE_MEM("Restarting PARSE to thread context "
-+ "(ucmd %p)", ucmd);
-+ res = SCST_CMD_STATE_NEED_THREAD_CTX;
-+ goto out;
-+ }
-+ /* else go through */
-+
-+ case SCST_USER_PARSE_CALL:
-+ TRACE_DBG("Preparing PARSE for user space (ucmd=%p, h=%d, "
-+ "bufflen %d)", ucmd, ucmd->h, cmd->bufflen);
-+ ucmd->user_cmd_payload_len =
-+ offsetof(struct scst_user_get_cmd, parse_cmd) +
-+ sizeof(ucmd->user_cmd.parse_cmd);
-+ ucmd->user_cmd.cmd_h = ucmd->h;
-+ ucmd->user_cmd.subcode = SCST_USER_PARSE;
-+ ucmd->user_cmd.parse_cmd.sess_h = (unsigned long)cmd->tgt_dev;
-+ memcpy(ucmd->user_cmd.parse_cmd.cdb, cmd->cdb,
-+ min_t(int, SCST_MAX_CDB_SIZE, cmd->cdb_len));
-+ ucmd->user_cmd.parse_cmd.cdb_len = cmd->cdb_len;
-+ ucmd->user_cmd.parse_cmd.timeout = cmd->timeout / HZ;
-+ ucmd->user_cmd.parse_cmd.bufflen = cmd->bufflen;
-+ ucmd->user_cmd.parse_cmd.out_bufflen = cmd->out_bufflen;
-+ ucmd->user_cmd.parse_cmd.queue_type = cmd->queue_type;
-+ ucmd->user_cmd.parse_cmd.data_direction = cmd->data_direction;
-+ ucmd->user_cmd.parse_cmd.expected_values_set =
-+ cmd->expected_values_set;
-+ ucmd->user_cmd.parse_cmd.expected_data_direction =
-+ cmd->expected_data_direction;
-+ ucmd->user_cmd.parse_cmd.expected_transfer_len =
-+ cmd->expected_transfer_len;
-+ ucmd->user_cmd.parse_cmd.expected_out_transfer_len =
-+ cmd->expected_out_transfer_len;
-+ ucmd->user_cmd.parse_cmd.sn = cmd->tgt_sn;
-+ ucmd->user_cmd.parse_cmd.op_flags = cmd->op_flags;
-+ ucmd->state = UCMD_STATE_PARSING;
-+ dev_user_add_to_ready(ucmd);
-+ res = SCST_CMD_STATE_STOP;
-+ goto out;
-+
-+ default:
-+ BUG();
-+ goto out;
-+ }
-+
-+done:
-+ if (cmd->bufflen == 0) {
-+ /*
-+ * According to SPC bufflen 0 for data transfer commands isn't
-+ * an error, so we need to fix the transfer direction.
-+ */
-+ cmd->data_direction = SCST_DATA_NONE;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_invalid:
-+ PRINT_ERROR("PARSE failed (ucmd %p, rc %d)", ucmd, rc);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+
-+out_error:
-+ res = scst_set_cmd_abnormal_done_state(cmd);
-+ goto out;
-+}
-+
-+static int dev_user_alloc_data_buf(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+ struct scst_user_cmd *ucmd = cmd->dh_priv;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON((ucmd->state != UCMD_STATE_NEW) &&
-+ (ucmd->state != UCMD_STATE_PARSING) &&
-+ (ucmd->state != UCMD_STATE_BUF_ALLOCING));
-+
-+ res = dev_user_alloc_space(ucmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void dev_user_flush_dcache(struct scst_user_cmd *ucmd)
-+{
-+ struct scst_user_cmd *buf_ucmd = ucmd->buf_ucmd;
-+ unsigned long start = buf_ucmd->ubuff;
-+ int i, bufflen = ucmd->cmd->bufflen;
-+
-+ TRACE_ENTRY();
-+
-+ if (start == 0)
-+ goto out;
-+
-+ /*
-+ * Possibly, flushing of all the pages from ucmd->cmd->sg can be
-+ * faster, since it should be cache hot, while ucmd->buf_ucmd and
-+ * buf_ucmd->data_pages are cache cold. But, from other side,
-+ * sizeof(buf_ucmd->data_pages[0]) is considerably smaller, than
-+ * sizeof(ucmd->cmd->sg[0]), so on big buffers going over
-+ * data_pages array can lead to less cache misses. So, real numbers are
-+ * needed. ToDo.
-+ */
-+
-+ for (i = 0; (bufflen > 0) && (i < buf_ucmd->num_data_pages); i++) {
-+ struct page *page __attribute__((unused));
-+ page = buf_ucmd->data_pages[i];
-+#ifdef ARCH_HAS_FLUSH_ANON_PAGE
-+ struct vm_area_struct *vma = find_vma(current->mm, start);
-+ if (vma != NULL)
-+ flush_anon_page(vma, page, start);
-+#endif
-+ flush_dcache_page(page);
-+ start += PAGE_SIZE;
-+ bufflen -= PAGE_SIZE;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int dev_user_exec(struct scst_cmd *cmd)
-+{
-+ struct scst_user_cmd *ucmd = cmd->dh_priv;
-+ int res = SCST_EXEC_COMPLETED;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Preparing EXEC for user space (ucmd=%p, h=%d, "
-+ "bufflen %d, data_len %d, ubuff %lx)", ucmd, ucmd->h,
-+ cmd->bufflen, cmd->data_len, ucmd->ubuff);
-+
-+ if (cmd->data_direction & SCST_DATA_WRITE)
-+ dev_user_flush_dcache(ucmd);
-+
-+ ucmd->user_cmd_payload_len =
-+ offsetof(struct scst_user_get_cmd, exec_cmd) +
-+ sizeof(ucmd->user_cmd.exec_cmd);
-+ ucmd->user_cmd.cmd_h = ucmd->h;
-+ ucmd->user_cmd.subcode = SCST_USER_EXEC;
-+ ucmd->user_cmd.exec_cmd.sess_h = (unsigned long)cmd->tgt_dev;
-+ memcpy(ucmd->user_cmd.exec_cmd.cdb, cmd->cdb,
-+ min_t(int, SCST_MAX_CDB_SIZE, cmd->cdb_len));
-+ ucmd->user_cmd.exec_cmd.cdb_len = cmd->cdb_len;
-+ ucmd->user_cmd.exec_cmd.bufflen = cmd->bufflen;
-+ ucmd->user_cmd.exec_cmd.data_len = cmd->data_len;
-+ ucmd->user_cmd.exec_cmd.pbuf = ucmd->ubuff;
-+ if ((ucmd->ubuff == 0) && (cmd->data_direction != SCST_DATA_NONE)) {
-+ ucmd->user_cmd.exec_cmd.alloc_len = ucmd->buff_cached ?
-+ (cmd->sg_cnt << PAGE_SHIFT) : cmd->bufflen;
-+ }
-+ ucmd->user_cmd.exec_cmd.queue_type = cmd->queue_type;
-+ ucmd->user_cmd.exec_cmd.data_direction = cmd->data_direction;
-+ ucmd->user_cmd.exec_cmd.partial = 0;
-+ ucmd->user_cmd.exec_cmd.timeout = cmd->timeout / HZ;
-+ ucmd->user_cmd.exec_cmd.p_out_buf = ucmd->ubuff +
-+ (cmd->sg_cnt << PAGE_SHIFT);
-+ ucmd->user_cmd.exec_cmd.out_bufflen = cmd->out_bufflen;
-+ ucmd->user_cmd.exec_cmd.sn = cmd->tgt_sn;
-+
-+ ucmd->state = UCMD_STATE_EXECING;
-+
-+ dev_user_add_to_ready(ucmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void dev_user_free_sgv(struct scst_user_cmd *ucmd)
-+{
-+ if (ucmd->sgv != NULL) {
-+ sgv_pool_free(ucmd->sgv, &ucmd->dev->udev_mem_lim);
-+ ucmd->sgv = NULL;
-+ } else if (ucmd->data_pages != NULL) {
-+ /* We mapped pages, but for some reason didn't allocate them */
-+ ucmd_get(ucmd);
-+ __dev_user_free_sg_entries(ucmd);
-+ }
-+ return;
-+}
-+
-+static void dev_user_on_free_cmd(struct scst_cmd *cmd)
-+{
-+ struct scst_user_cmd *ucmd = cmd->dh_priv;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(ucmd == NULL))
-+ goto out;
-+
-+ TRACE_MEM("ucmd %p, cmd %p, buff_cached %d, ubuff %lx", ucmd, ucmd->cmd,
-+ ucmd->buff_cached, ucmd->ubuff);
-+
-+ ucmd->cmd = NULL;
-+ if ((cmd->data_direction & SCST_DATA_WRITE) && ucmd->buf_ucmd != NULL)
-+ ucmd->buf_ucmd->buf_dirty = 1;
-+
-+ if (ucmd->dev->on_free_cmd_type == SCST_USER_ON_FREE_CMD_IGNORE) {
-+ ucmd->state = UCMD_STATE_ON_FREE_SKIPPED;
-+ /* The state assignment must be before freeing sgv! */
-+ goto out_reply;
-+ }
-+
-+ if (unlikely(!ucmd->seen_by_user)) {
-+ TRACE_MGMT_DBG("Not seen by user ucmd %p", ucmd);
-+ goto out_reply;
-+ }
-+
-+ ucmd->user_cmd_payload_len =
-+ offsetof(struct scst_user_get_cmd, on_free_cmd) +
-+ sizeof(ucmd->user_cmd.on_free_cmd);
-+ ucmd->user_cmd.cmd_h = ucmd->h;
-+ ucmd->user_cmd.subcode = SCST_USER_ON_FREE_CMD;
-+ ucmd->user_cmd.on_free_cmd.pbuf = ucmd->ubuff;
-+ ucmd->user_cmd.on_free_cmd.resp_data_len = cmd->resp_data_len;
-+ ucmd->user_cmd.on_free_cmd.buffer_cached = ucmd->buff_cached;
-+ ucmd->user_cmd.on_free_cmd.aborted = ucmd->aborted;
-+ ucmd->user_cmd.on_free_cmd.status = cmd->status;
-+ ucmd->user_cmd.on_free_cmd.delivery_status = cmd->delivery_status;
-+
-+ ucmd->state = UCMD_STATE_ON_FREEING;
-+
-+ dev_user_add_to_ready(ucmd);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_reply:
-+ dev_user_process_reply_on_free(ucmd);
-+ goto out;
-+}
-+
-+static void dev_user_set_block(struct scst_cmd *cmd, int block)
-+{
-+ struct scst_user_dev *dev = cmd->dev->dh_priv;
-+ /*
-+ * No need for locks here, since *_detach() can not be
-+ * called, when there are existing commands.
-+ */
-+ TRACE_DBG("dev %p, new block %d", dev, block);
-+ if (block != 0)
-+ dev->block = block;
-+ else
-+ dev->block = dev->def_block;
-+ return;
-+}
-+
-+static int dev_user_disk_done(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_block_generic_dev_done(cmd, dev_user_set_block);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_tape_done(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_tape_generic_dev_done(cmd, dev_user_set_block);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void dev_user_add_to_ready(struct scst_user_cmd *ucmd)
-+{
-+ struct scst_user_dev *dev = ucmd->dev;
-+ unsigned long flags;
-+ int do_wake = in_interrupt();
-+
-+ TRACE_ENTRY();
-+
-+ if (ucmd->cmd)
-+ do_wake |= ucmd->cmd->preprocessing_only;
-+
-+ spin_lock_irqsave(&dev->udev_cmd_threads.cmd_list_lock, flags);
-+
-+ ucmd->this_state_unjammed = 0;
-+
-+ if ((ucmd->state == UCMD_STATE_PARSING) ||
-+ (ucmd->state == UCMD_STATE_BUF_ALLOCING)) {
-+ /*
-+ * If we don't put such commands in the queue head, then under
-+ * high load we might delay threads, waiting for memory
-+ * allocations, for too long and start loosing NOPs, which
-+ * would lead to consider us by remote initiators as
-+ * unresponsive and stuck => broken connections, etc. If none
-+ * of our commands completed in NOP timeout to allow the head
-+ * commands to go, then we are really overloaded and/or stuck.
-+ */
-+ TRACE_DBG("Adding ucmd %p (state %d) to head of ready "
-+ "cmd list", ucmd, ucmd->state);
-+ list_add(&ucmd->ready_cmd_list_entry,
-+ &dev->ready_cmd_list);
-+ } else if (unlikely(ucmd->state == UCMD_STATE_TM_EXECING) ||
-+ unlikely(ucmd->state == UCMD_STATE_ATTACH_SESS) ||
-+ unlikely(ucmd->state == UCMD_STATE_DETACH_SESS)) {
-+ TRACE_MGMT_DBG("Adding mgmt ucmd %p (state %d) to head of "
-+ "ready cmd list", ucmd, ucmd->state);
-+ list_add(&ucmd->ready_cmd_list_entry,
-+ &dev->ready_cmd_list);
-+ do_wake = 1;
-+ } else {
-+ if ((ucmd->cmd != NULL) &&
-+ unlikely((ucmd->cmd->queue_type == SCST_CMD_QUEUE_HEAD_OF_QUEUE))) {
-+ TRACE_DBG("Adding HQ ucmd %p to head of ready cmd list",
-+ ucmd);
-+ list_add(&ucmd->ready_cmd_list_entry,
-+ &dev->ready_cmd_list);
-+ } else {
-+ TRACE_DBG("Adding ucmd %p to ready cmd list", ucmd);
-+ list_add_tail(&ucmd->ready_cmd_list_entry,
-+ &dev->ready_cmd_list);
-+ }
-+ do_wake |= ((ucmd->state == UCMD_STATE_ON_CACHE_FREEING) ||
-+ (ucmd->state == UCMD_STATE_ON_FREEING));
-+ }
-+
-+ if (do_wake) {
-+ TRACE_DBG("Waking up dev %p", dev);
-+ wake_up(&dev->udev_cmd_threads.cmd_list_waitQ);
-+ }
-+
-+ spin_unlock_irqrestore(&dev->udev_cmd_threads.cmd_list_lock, flags);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int dev_user_map_buf(struct scst_user_cmd *ucmd, unsigned long ubuff,
-+ int num_pg)
-+{
-+ int res = 0, rc;
-+ int i;
-+ struct task_struct *tsk = current;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(ubuff == 0))
-+ goto out_nomem;
-+
-+ BUG_ON(ucmd->data_pages != NULL);
-+
-+ ucmd->num_data_pages = num_pg;
-+
-+ ucmd->data_pages =
-+ kmalloc(sizeof(*ucmd->data_pages) * ucmd->num_data_pages,
-+ GFP_KERNEL);
-+ if (ucmd->data_pages == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "Unable to allocate data_pages array "
-+ "(num_data_pages=%d)", ucmd->num_data_pages);
-+ res = -ENOMEM;
-+ goto out_nomem;
-+ }
-+
-+ TRACE_MEM("Mapping buffer (ucmd %p, ubuff %lx, ucmd->num_data_pages %d,"
-+ " first_page_offset %d, len %d)", ucmd, ubuff,
-+ ucmd->num_data_pages, (int)(ubuff & ~PAGE_MASK),
-+ (ucmd->cmd != NULL) ? ucmd->cmd->bufflen : -1);
-+
-+ down_read(&tsk->mm->mmap_sem);
-+ rc = get_user_pages(tsk, tsk->mm, ubuff, ucmd->num_data_pages,
-+ 1/*writable*/, 0/*don't force*/, ucmd->data_pages, NULL);
-+ up_read(&tsk->mm->mmap_sem);
-+
-+ /* get_user_pages() flushes dcache */
-+
-+ if (rc < ucmd->num_data_pages)
-+ goto out_unmap;
-+
-+ ucmd->ubuff = ubuff;
-+ ucmd->first_page_offset = (ubuff & ~PAGE_MASK);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_nomem:
-+ if (ucmd->cmd != NULL)
-+ scst_set_busy(ucmd->cmd);
-+ /* go through */
-+
-+out_err:
-+ if (ucmd->cmd != NULL)
-+ scst_set_cmd_abnormal_done_state(ucmd->cmd);
-+ goto out;
-+
-+out_unmap:
-+ PRINT_ERROR("Failed to get %d user pages (rc %d)",
-+ ucmd->num_data_pages, rc);
-+ if (rc > 0) {
-+ for (i = 0; i < rc; i++)
-+ page_cache_release(ucmd->data_pages[i]);
-+ }
-+ kfree(ucmd->data_pages);
-+ ucmd->data_pages = NULL;
-+ res = -EFAULT;
-+ if (ucmd->cmd != NULL)
-+ scst_set_cmd_error(ucmd->cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_err;
-+}
-+
-+static int dev_user_process_reply_alloc(struct scst_user_cmd *ucmd,
-+ struct scst_user_reply_cmd *reply)
-+{
-+ int res = 0;
-+ struct scst_cmd *cmd = ucmd->cmd;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("ucmd %p, pbuf %llx", ucmd, reply->alloc_reply.pbuf);
-+
-+ if (likely(reply->alloc_reply.pbuf != 0)) {
-+ int pages;
-+ if (ucmd->buff_cached) {
-+ if (unlikely((reply->alloc_reply.pbuf & ~PAGE_MASK) != 0)) {
-+ PRINT_ERROR("Supplied pbuf %llx isn't "
-+ "page aligned",
-+ reply->alloc_reply.pbuf);
-+ goto out_hwerr;
-+ }
-+ pages = cmd->sg_cnt;
-+ } else
-+ pages = calc_num_pg(reply->alloc_reply.pbuf,
-+ cmd->bufflen);
-+ res = dev_user_map_buf(ucmd, reply->alloc_reply.pbuf, pages);
-+ } else {
-+ scst_set_busy(ucmd->cmd);
-+ scst_set_cmd_abnormal_done_state(ucmd->cmd);
-+ }
-+
-+out_process:
-+ scst_post_alloc_data_buf(cmd);
-+ scst_process_active_cmd(cmd, false);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_hwerr:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ scst_set_cmd_abnormal_done_state(ucmd->cmd);
-+ res = -EINVAL;
-+ goto out_process;
-+}
-+
-+static int dev_user_process_reply_parse(struct scst_user_cmd *ucmd,
-+ struct scst_user_reply_cmd *reply)
-+{
-+ int res = 0, rc;
-+ struct scst_user_scsi_cmd_reply_parse *preply =
-+ &reply->parse_reply;
-+ struct scst_cmd *cmd = ucmd->cmd;
-+
-+ TRACE_ENTRY();
-+
-+ if (preply->status != 0)
-+ goto out_status;
-+
-+ if (unlikely(preply->queue_type > SCST_CMD_QUEUE_ACA))
-+ goto out_inval;
-+
-+ if (unlikely((preply->data_direction != SCST_DATA_WRITE) &&
-+ (preply->data_direction != SCST_DATA_READ) &&
-+ (preply->data_direction != SCST_DATA_BIDI) &&
-+ (preply->data_direction != SCST_DATA_NONE)))
-+ goto out_inval;
-+
-+ if (unlikely((preply->data_direction != SCST_DATA_NONE) &&
-+ (preply->bufflen == 0)))
-+ goto out_inval;
-+
-+ if (unlikely((preply->bufflen < 0) || (preply->out_bufflen < 0) ||
-+ (preply->data_len < 0)))
-+ goto out_inval;
-+
-+ if (unlikely(preply->cdb_len > cmd->cdb_len))
-+ goto out_inval;
-+
-+ if (!(preply->op_flags & SCST_INFO_VALID))
-+ goto out_inval;
-+
-+ TRACE_DBG("ucmd %p, queue_type %x, data_direction, %x, bufflen %d, "
-+ "data_len %d, pbuf %llx, cdb_len %d, op_flags %x", ucmd,
-+ preply->queue_type, preply->data_direction, preply->bufflen,
-+ preply->data_len, reply->alloc_reply.pbuf, preply->cdb_len,
-+ preply->op_flags);
-+
-+ cmd->queue_type = preply->queue_type;
-+ cmd->data_direction = preply->data_direction;
-+ cmd->bufflen = preply->bufflen;
-+ cmd->out_bufflen = preply->out_bufflen;
-+ cmd->data_len = preply->data_len;
-+ if (preply->cdb_len > 0)
-+ cmd->cdb_len = preply->cdb_len;
-+ cmd->op_flags = preply->op_flags;
-+
-+out_process:
-+ scst_post_parse(cmd);
-+ scst_process_active_cmd(cmd, false);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_inval:
-+ PRINT_ERROR("Invalid parse_reply parameters (LUN %lld, op %x, cmd %p)",
-+ (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
-+ PRINT_BUFFER("Invalid parse_reply", reply, sizeof(*reply));
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ res = -EINVAL;
-+ goto out_abnormal;
-+
-+out_hwerr_res_set:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+
-+out_abnormal:
-+ scst_set_cmd_abnormal_done_state(cmd);
-+ goto out_process;
-+
-+out_status:
-+ TRACE_DBG("ucmd %p returned with error from user status %x",
-+ ucmd, preply->status);
-+
-+ if (preply->sense_len != 0) {
-+ int sense_len;
-+
-+ res = scst_alloc_sense(cmd, 0);
-+ if (res != 0)
-+ goto out_hwerr_res_set;
-+
-+ sense_len = min_t(int, cmd->sense_buflen, preply->sense_len);
-+
-+ rc = copy_from_user(cmd->sense,
-+ (void __user *)(unsigned long)preply->psense_buffer,
-+ sense_len);
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d sense's bytes", rc);
-+ res = -EFAULT;
-+ goto out_hwerr_res_set;
-+ }
-+ cmd->sense_valid_len = sense_len;
-+ }
-+ scst_set_cmd_error_status(cmd, preply->status);
-+ goto out_abnormal;
-+}
-+
-+static int dev_user_process_reply_on_free(struct scst_user_cmd *ucmd)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("ON FREE ucmd %p", ucmd);
-+
-+ dev_user_free_sgv(ucmd);
-+ ucmd_put(ucmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_process_reply_on_cache_free(struct scst_user_cmd *ucmd)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("ON CACHE FREE ucmd %p", ucmd);
-+
-+ ucmd_put(ucmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_process_reply_exec(struct scst_user_cmd *ucmd,
-+ struct scst_user_reply_cmd *reply)
-+{
-+ int res = 0;
-+ struct scst_user_scsi_cmd_reply_exec *ereply =
-+ &reply->exec_reply;
-+ struct scst_cmd *cmd = ucmd->cmd;
-+
-+ TRACE_ENTRY();
-+
-+ if (ereply->reply_type == SCST_EXEC_REPLY_COMPLETED) {
-+ if (ucmd->background_exec) {
-+ TRACE_DBG("Background ucmd %p finished", ucmd);
-+ ucmd_put(ucmd);
-+ goto out;
-+ }
-+ if (unlikely(ereply->resp_data_len > cmd->bufflen))
-+ goto out_inval;
-+ if (unlikely((cmd->data_direction != SCST_DATA_READ) &&
-+ (ereply->resp_data_len != 0)))
-+ goto out_inval;
-+ } else if (ereply->reply_type == SCST_EXEC_REPLY_BACKGROUND) {
-+ if (unlikely(ucmd->background_exec))
-+ goto out_inval;
-+ if (unlikely((cmd->data_direction & SCST_DATA_READ) ||
-+ (cmd->resp_data_len != 0)))
-+ goto out_inval;
-+ /*
-+ * background_exec assignment must be after ucmd get.
-+ * Otherwise, due to reorder, in dev_user_process_reply()
-+ * it is possible that ucmd is destroyed before it "got" here.
-+ */
-+ ucmd_get(ucmd);
-+ ucmd->background_exec = 1;
-+ TRACE_DBG("Background ucmd %p", ucmd);
-+ goto out_compl;
-+ } else
-+ goto out_inval;
-+
-+ TRACE_DBG("ucmd %p, status %d, resp_data_len %d", ucmd,
-+ ereply->status, ereply->resp_data_len);
-+
-+ cmd->atomic = 0;
-+
-+ if (ereply->resp_data_len != 0) {
-+ if (ucmd->ubuff == 0) {
-+ int pages, rc;
-+ if (unlikely(ereply->pbuf == 0))
-+ goto out_busy;
-+ if (ucmd->buff_cached) {
-+ if (unlikely((ereply->pbuf & ~PAGE_MASK) != 0)) {
-+ PRINT_ERROR("Supplied pbuf %llx isn't "
-+ "page aligned", ereply->pbuf);
-+ goto out_hwerr;
-+ }
-+ pages = cmd->sg_cnt;
-+ } else
-+ pages = calc_num_pg(ereply->pbuf, cmd->bufflen);
-+ rc = dev_user_map_buf(ucmd, ereply->pbuf, pages);
-+ if ((rc != 0) || (ucmd->ubuff == 0))
-+ goto out_compl;
-+
-+ rc = dev_user_alloc_sg(ucmd, ucmd->buff_cached);
-+ if (unlikely(rc != 0))
-+ goto out_busy;
-+ } else
-+ dev_user_flush_dcache(ucmd);
-+ cmd->may_need_dma_sync = 1;
-+ scst_set_resp_data_len(cmd, ereply->resp_data_len);
-+ } else if (cmd->resp_data_len != ereply->resp_data_len) {
-+ if (ucmd->ubuff == 0) {
-+ /*
-+ * We have an empty SG, so can't call
-+ * scst_set_resp_data_len()
-+ */
-+ cmd->resp_data_len = ereply->resp_data_len;
-+ cmd->resid_possible = 1;
-+ } else
-+ scst_set_resp_data_len(cmd, ereply->resp_data_len);
-+ }
-+
-+ cmd->status = ereply->status;
-+ if (ereply->sense_len != 0) {
-+ int sense_len, rc;
-+
-+ res = scst_alloc_sense(cmd, 0);
-+ if (res != 0)
-+ goto out_compl;
-+
-+ sense_len = min_t(int, cmd->sense_buflen, ereply->sense_len);
-+
-+ rc = copy_from_user(cmd->sense,
-+ (void __user *)(unsigned long)ereply->psense_buffer,
-+ sense_len);
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d sense's bytes", rc);
-+ res = -EFAULT;
-+ goto out_hwerr_res_set;
-+ }
-+ cmd->sense_valid_len = sense_len;
-+ }
-+
-+out_compl:
-+ cmd->completed = 1;
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_DIRECT);
-+ /* !! At this point cmd can be already freed !! */
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_inval:
-+ PRINT_ERROR("Invalid exec_reply parameters (LUN %lld, op %x, cmd %p)",
-+ (long long unsigned int)cmd->lun, cmd->cdb[0], cmd);
-+ PRINT_BUFFER("Invalid exec_reply", reply, sizeof(*reply));
-+
-+out_hwerr:
-+ res = -EINVAL;
-+
-+out_hwerr_res_set:
-+ if (ucmd->background_exec) {
-+ ucmd_put(ucmd);
-+ goto out;
-+ } else {
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_compl;
-+ }
-+
-+out_busy:
-+ scst_set_busy(cmd);
-+ goto out_compl;
-+}
-+
-+static int dev_user_process_reply(struct scst_user_dev *dev,
-+ struct scst_user_reply_cmd *reply)
-+{
-+ int res = 0;
-+ struct scst_user_cmd *ucmd;
-+ int state;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ ucmd = __ucmd_find_hash(dev, reply->cmd_h);
-+ if (unlikely(ucmd == NULL)) {
-+ TRACE_MGMT_DBG("cmd_h %d not found", reply->cmd_h);
-+ res = -ESRCH;
-+ goto out_unlock;
-+ }
-+
-+ if (unlikely(ucmd_get_check(ucmd))) {
-+ TRACE_MGMT_DBG("Found being destroyed cmd_h %d", reply->cmd_h);
-+ res = -ESRCH;
-+ goto out_unlock;
-+ }
-+
-+ /* To sync. with dev_user_process_reply_exec(). See comment there. */
-+ smp_mb();
-+ if (ucmd->background_exec) {
-+ state = UCMD_STATE_EXECING;
-+ goto unlock_process;
-+ }
-+
-+ if (unlikely(ucmd->this_state_unjammed)) {
-+ TRACE_MGMT_DBG("Reply on unjammed ucmd %p, ignoring",
-+ ucmd);
-+ goto out_unlock_put;
-+ }
-+
-+ if (unlikely(!ucmd->sent_to_user)) {
-+ TRACE_MGMT_DBG("Ucmd %p isn't in the sent to user "
-+ "state %x", ucmd, ucmd->state);
-+ res = -EINVAL;
-+ goto out_unlock_put;
-+ }
-+
-+ if (unlikely(reply->subcode != ucmd->user_cmd.subcode))
-+ goto out_wrong_state;
-+
-+ if (unlikely(_IOC_NR(reply->subcode) != ucmd->state))
-+ goto out_wrong_state;
-+
-+ state = ucmd->state;
-+ ucmd->sent_to_user = 0;
-+
-+unlock_process:
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ switch (state) {
-+ case UCMD_STATE_PARSING:
-+ res = dev_user_process_reply_parse(ucmd, reply);
-+ break;
-+
-+ case UCMD_STATE_BUF_ALLOCING:
-+ res = dev_user_process_reply_alloc(ucmd, reply);
-+ break;
-+
-+ case UCMD_STATE_EXECING:
-+ res = dev_user_process_reply_exec(ucmd, reply);
-+ break;
-+
-+ case UCMD_STATE_ON_FREEING:
-+ res = dev_user_process_reply_on_free(ucmd);
-+ break;
-+
-+ case UCMD_STATE_ON_CACHE_FREEING:
-+ res = dev_user_process_reply_on_cache_free(ucmd);
-+ break;
-+
-+ case UCMD_STATE_TM_EXECING:
-+ res = dev_user_process_reply_tm_exec(ucmd, reply->result);
-+ break;
-+
-+ case UCMD_STATE_ATTACH_SESS:
-+ case UCMD_STATE_DETACH_SESS:
-+ res = dev_user_process_reply_sess(ucmd, reply->result);
-+ break;
-+
-+ default:
-+ BUG();
-+ break;
-+ }
-+
-+out_put:
-+ ucmd_put(ucmd);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_wrong_state:
-+ PRINT_ERROR("Command's %p subcode %x doesn't match internal "
-+ "command's state %x or reply->subcode (%x) != ucmd->subcode "
-+ "(%x)", ucmd, _IOC_NR(reply->subcode), ucmd->state,
-+ reply->subcode, ucmd->user_cmd.subcode);
-+ res = -EINVAL;
-+ dev_user_unjam_cmd(ucmd, 0, NULL);
-+
-+out_unlock_put:
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ goto out_put;
-+
-+out_unlock:
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ goto out;
-+}
-+
-+static int dev_user_reply_cmd(struct file *file, void __user *arg)
-+{
-+ int res = 0, rc;
-+ struct scst_user_dev *dev;
-+ struct scst_user_reply_cmd reply;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ res = dev_user_check_reg(dev);
-+ if (unlikely(res != 0)) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out;
-+ }
-+ down_read(&dev->dev_rwsem);
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ rc = copy_from_user(&reply, arg, sizeof(reply));
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ res = -EFAULT;
-+ goto out_up;
-+ }
-+
-+ TRACE_DBG("Reply for dev %s", dev->name);
-+
-+ TRACE_BUFFER("Reply", &reply, sizeof(reply));
-+
-+ res = dev_user_process_reply(dev, &reply);
-+ if (unlikely(res < 0))
-+ goto out_up;
-+
-+out_up:
-+ up_read(&dev->dev_rwsem);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_get_ext_cdb(struct file *file, void __user *arg)
-+{
-+ int res = 0, rc;
-+ struct scst_user_dev *dev;
-+ struct scst_user_cmd *ucmd;
-+ struct scst_cmd *cmd = NULL;
-+ struct scst_user_get_ext_cdb get;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ res = dev_user_check_reg(dev);
-+ if (unlikely(res != 0)) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out;
-+ }
-+ down_read(&dev->dev_rwsem);
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ rc = copy_from_user(&get, arg, sizeof(get));
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ res = -EFAULT;
-+ goto out_up;
-+ }
-+
-+ TRACE_MGMT_DBG("Get ext cdb for dev %s", dev->name);
-+
-+ TRACE_BUFFER("Get ext cdb", &get, sizeof(get));
-+
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ ucmd = __ucmd_find_hash(dev, get.cmd_h);
-+ if (unlikely(ucmd == NULL)) {
-+ TRACE_MGMT_DBG("cmd_h %d not found", get.cmd_h);
-+ res = -ESRCH;
-+ goto out_unlock;
-+ }
-+
-+ if (unlikely(ucmd_get_check(ucmd))) {
-+ TRACE_MGMT_DBG("Found being destroyed cmd_h %d", get.cmd_h);
-+ res = -ESRCH;
-+ goto out_unlock;
-+ }
-+
-+ if ((ucmd->cmd != NULL) && (ucmd->state <= UCMD_STATE_EXECING) &&
-+ (ucmd->sent_to_user || ucmd->background_exec)) {
-+ cmd = ucmd->cmd;
-+ scst_cmd_get(cmd);
-+ } else {
-+ TRACE_MGMT_DBG("Invalid ucmd state %d for cmd_h %d",
-+ ucmd->state, get.cmd_h);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ if (cmd == NULL)
-+ goto out_put;
-+
-+ BUILD_BUG_ON(sizeof(cmd->cdb_buf) != SCST_MAX_CDB_SIZE);
-+
-+ if (cmd->cdb_len <= SCST_MAX_CDB_SIZE)
-+ goto out_cmd_put;
-+
-+ EXTRACHECKS_BUG_ON(cmd->cdb_buf == cmd->cdb_buf);
-+
-+ TRACE_BUFFER("EXT CDB", &cmd->cdb[sizeof(cmd->cdb_buf)],
-+ cmd->cdb_len - sizeof(cmd->cdb_buf));
-+ rc = copy_to_user((void __user *)(unsigned long)get.ext_cdb_buffer,
-+ &cmd->cdb[sizeof(cmd->cdb_buf)],
-+ cmd->cdb_len - sizeof(cmd->cdb_buf));
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("Failed to copy to user %d bytes", rc);
-+ res = -EFAULT;
-+ goto out_cmd_put;
-+ }
-+
-+out_cmd_put:
-+ scst_cmd_put(cmd);
-+
-+out_put:
-+ ucmd_put(ucmd);
-+
-+out_up:
-+ up_read(&dev->dev_rwsem);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unlock:
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ goto out_up;
-+}
-+
-+static int dev_user_process_scst_commands(struct scst_user_dev *dev)
-+ __releases(&dev->udev_cmd_threads.cmd_list_lock)
-+ __acquires(&dev->udev_cmd_threads.cmd_list_lock)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ while (!list_empty(&dev->udev_cmd_threads.active_cmd_list)) {
-+ struct scst_cmd *cmd = list_entry(
-+ dev->udev_cmd_threads.active_cmd_list.next, typeof(*cmd),
-+ cmd_list_entry);
-+ TRACE_DBG("Deleting cmd %p from active cmd list", cmd);
-+ list_del(&cmd->cmd_list_entry);
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ scst_process_active_cmd(cmd, false);
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ res++;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Called under udev_cmd_threads.cmd_list_lock and IRQ off */
-+static struct scst_user_cmd *__dev_user_get_next_cmd(struct list_head *cmd_list)
-+ __releases(&dev->udev_cmd_threads.cmd_list_lock)
-+ __acquires(&dev->udev_cmd_threads.cmd_list_lock)
-+{
-+ struct scst_user_cmd *u;
-+
-+again:
-+ u = NULL;
-+ if (!list_empty(cmd_list)) {
-+ u = list_entry(cmd_list->next, typeof(*u),
-+ ready_cmd_list_entry);
-+
-+ TRACE_DBG("Found ready ucmd %p", u);
-+ list_del(&u->ready_cmd_list_entry);
-+
-+ EXTRACHECKS_BUG_ON(u->this_state_unjammed);
-+
-+ if (u->cmd != NULL) {
-+ if (u->state == UCMD_STATE_EXECING) {
-+ struct scst_user_dev *dev = u->dev;
-+ int rc;
-+
-+ EXTRACHECKS_BUG_ON(u->jammed);
-+
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ rc = scst_check_local_events(u->cmd);
-+ if (unlikely(rc != 0)) {
-+ u->cmd->scst_cmd_done(u->cmd,
-+ SCST_CMD_STATE_DEFAULT,
-+ SCST_CONTEXT_DIRECT);
-+ /*
-+ * !! At this point cmd & u can be !!
-+ * !! already freed !!
-+ */
-+ spin_lock_irq(
-+ &dev->udev_cmd_threads.cmd_list_lock);
-+ goto again;
-+ }
-+
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ } else if (unlikely(test_bit(SCST_CMD_ABORTED,
-+ &u->cmd->cmd_flags))) {
-+ switch (u->state) {
-+ case UCMD_STATE_PARSING:
-+ case UCMD_STATE_BUF_ALLOCING:
-+ TRACE_MGMT_DBG("Aborting ucmd %p", u);
-+ dev_user_unjam_cmd(u, 0, NULL);
-+ goto again;
-+ case UCMD_STATE_EXECING:
-+ EXTRACHECKS_BUG_ON(1);
-+ }
-+ }
-+ }
-+ u->sent_to_user = 1;
-+ u->seen_by_user = 1;
-+ }
-+ return u;
-+}
-+
-+static inline int test_cmd_threads(struct scst_user_dev *dev)
-+{
-+ int res = !list_empty(&dev->udev_cmd_threads.active_cmd_list) ||
-+ !list_empty(&dev->ready_cmd_list) ||
-+ !dev->blocking || dev->cleanup_done ||
-+ signal_pending(current);
-+ return res;
-+}
-+
-+/* Called under udev_cmd_threads.cmd_list_lock and IRQ off */
-+static int dev_user_get_next_cmd(struct scst_user_dev *dev,
-+ struct scst_user_cmd **ucmd)
-+{
-+ int res = 0;
-+ wait_queue_t wait;
-+
-+ TRACE_ENTRY();
-+
-+ init_waitqueue_entry(&wait, current);
-+
-+ while (1) {
-+ if (!test_cmd_threads(dev)) {
-+ add_wait_queue_exclusive_head(
-+ &dev->udev_cmd_threads.cmd_list_waitQ,
-+ &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_cmd_threads(dev))
-+ break;
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ schedule();
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&dev->udev_cmd_threads.cmd_list_waitQ,
-+ &wait);
-+ }
-+
-+ dev_user_process_scst_commands(dev);
-+
-+ *ucmd = __dev_user_get_next_cmd(&dev->ready_cmd_list);
-+ if (*ucmd != NULL)
-+ break;
-+
-+ if (!dev->blocking || dev->cleanup_done) {
-+ res = -EAGAIN;
-+ TRACE_DBG("No ready commands, returning %d", res);
-+ break;
-+ }
-+
-+ if (signal_pending(current)) {
-+ res = -EINTR;
-+ TRACE_DBG("Signal pending, returning %d", res);
-+ break;
-+ }
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_reply_get_cmd(struct file *file, void __user *arg)
-+{
-+ int res = 0, rc;
-+ struct scst_user_dev *dev;
-+ struct scst_user_get_cmd *cmd;
-+ struct scst_user_reply_cmd *reply;
-+ struct scst_user_cmd *ucmd;
-+ uint64_t ureply;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ res = dev_user_check_reg(dev);
-+ if (unlikely(res != 0)) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out;
-+ }
-+ down_read(&dev->dev_rwsem);
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ /* get_user() can't be used with 64-bit values on x86_32 */
-+ rc = copy_from_user(&ureply, (uint64_t __user *)
-+ &((struct scst_user_get_cmd __user *)arg)->preply,
-+ sizeof(ureply));
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ res = -EFAULT;
-+ goto out_up;
-+ }
-+
-+ TRACE_DBG("ureply %lld (dev %s)", (long long unsigned int)ureply,
-+ dev->name);
-+
-+ cmd = kmem_cache_alloc(user_get_cmd_cachep, GFP_KERNEL);
-+ if (unlikely(cmd == NULL)) {
-+ res = -ENOMEM;
-+ goto out_up;
-+ }
-+
-+ if (ureply != 0) {
-+ unsigned long u = (unsigned long)ureply;
-+ reply = (struct scst_user_reply_cmd *)cmd;
-+ rc = copy_from_user(reply, (void __user *)u, sizeof(*reply));
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+
-+ TRACE_BUFFER("Reply", reply, sizeof(*reply));
-+
-+ res = dev_user_process_reply(dev, reply);
-+ if (unlikely(res < 0))
-+ goto out_free;
-+ }
-+
-+ kmem_cache_free(user_get_cmd_cachep, cmd);
-+
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+again:
-+ res = dev_user_get_next_cmd(dev, &ucmd);
-+ if (res == 0) {
-+ int len;
-+ /*
-+ * A misbehaving user space handler can make ucmd to get dead
-+ * immediately after we released the lock, which can lead to
-+ * copy of dead data to the user space, which can lead to a
-+ * leak of sensitive information.
-+ */
-+ if (unlikely(ucmd_get_check(ucmd))) {
-+ /* Oops, this ucmd is already being destroyed. Retry. */
-+ goto again;
-+ }
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ EXTRACHECKS_BUG_ON(ucmd->user_cmd_payload_len == 0);
-+
-+ len = ucmd->user_cmd_payload_len;
-+ TRACE_DBG("ucmd %p (user_cmd %p), payload_len %d (len %d)",
-+ ucmd, &ucmd->user_cmd, ucmd->user_cmd_payload_len, len);
-+ TRACE_BUFFER("UCMD", &ucmd->user_cmd, len);
-+ rc = copy_to_user(arg, &ucmd->user_cmd, len);
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("Copy to user failed (%d), requeuing ucmd "
-+ "%p back to head of ready cmd list", rc, ucmd);
-+ res = -EFAULT;
-+ /* Requeue ucmd back */
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ list_add(&ucmd->ready_cmd_list_entry,
-+ &dev->ready_cmd_list);
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ }
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ else
-+ ucmd->user_cmd_payload_len = 0;
-+#endif
-+ ucmd_put(ucmd);
-+ } else
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+out_up:
-+ up_read(&dev->dev_rwsem);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kmem_cache_free(user_get_cmd_cachep, cmd);
-+ goto out_up;
-+}
-+
-+static long dev_user_ioctl(struct file *file, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ long res, rc;
-+
-+ TRACE_ENTRY();
-+
-+ switch (cmd) {
-+ case SCST_USER_REPLY_AND_GET_CMD:
-+ TRACE_DBG("%s", "REPLY_AND_GET_CMD");
-+ res = dev_user_reply_get_cmd(file, (void __user *)arg);
-+ break;
-+
-+ case SCST_USER_REPLY_CMD:
-+ TRACE_DBG("%s", "REPLY_CMD");
-+ res = dev_user_reply_cmd(file, (void __user *)arg);
-+ break;
-+
-+ case SCST_USER_GET_EXTENDED_CDB:
-+ TRACE_DBG("%s", "GET_EXTENDED_CDB");
-+ res = dev_user_get_ext_cdb(file, (void __user *)arg);
-+ break;
-+
-+ case SCST_USER_REGISTER_DEVICE:
-+ {
-+ struct scst_user_dev_desc *dev_desc;
-+ TRACE_DBG("%s", "REGISTER_DEVICE");
-+ dev_desc = kmalloc(sizeof(*dev_desc), GFP_KERNEL);
-+ if (dev_desc == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ rc = copy_from_user(dev_desc, (void __user *)arg,
-+ sizeof(*dev_desc));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %ld user's bytes", rc);
-+ res = -EFAULT;
-+ kfree(dev_desc);
-+ goto out;
-+ }
-+ TRACE_BUFFER("dev_desc", dev_desc, sizeof(*dev_desc));
-+ dev_desc->name[sizeof(dev_desc->name)-1] = '\0';
-+ dev_desc->sgv_name[sizeof(dev_desc->sgv_name)-1] = '\0';
-+ res = dev_user_register_dev(file, dev_desc);
-+ kfree(dev_desc);
-+ break;
-+ }
-+
-+ case SCST_USER_UNREGISTER_DEVICE:
-+ TRACE_DBG("%s", "UNREGISTER_DEVICE");
-+ res = dev_user_unregister_dev(file);
-+ break;
-+
-+ case SCST_USER_FLUSH_CACHE:
-+ TRACE_DBG("%s", "FLUSH_CACHE");
-+ res = dev_user_flush_cache(file);
-+ break;
-+
-+ case SCST_USER_SET_OPTIONS:
-+ {
-+ struct scst_user_opt opt;
-+ TRACE_DBG("%s", "SET_OPTIONS");
-+ rc = copy_from_user(&opt, (void __user *)arg, sizeof(opt));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %ld user's bytes", rc);
-+ res = -EFAULT;
-+ goto out;
-+ }
-+ TRACE_BUFFER("opt", &opt, sizeof(opt));
-+ res = dev_user_set_opt(file, &opt);
-+ break;
-+ }
-+
-+ case SCST_USER_GET_OPTIONS:
-+ TRACE_DBG("%s", "GET_OPTIONS");
-+ res = dev_user_get_opt(file, (void __user *)arg);
-+ break;
-+
-+ case SCST_USER_DEVICE_CAPACITY_CHANGED:
-+ TRACE_DBG("%s", "CAPACITY_CHANGED");
-+ res = dev_user_capacity_changed(file);
-+ break;
-+
-+ case SCST_USER_PREALLOC_BUFFER:
-+ TRACE_DBG("%s", "PREALLOC_BUFFER");
-+ res = dev_user_prealloc_buffer(file, (void __user *)arg);
-+ break;
-+
-+ default:
-+ PRINT_ERROR("Invalid ioctl cmd %x", cmd);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static unsigned int dev_user_poll(struct file *file, poll_table *wait)
-+{
-+ int res = 0;
-+ struct scst_user_dev *dev;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ res = dev_user_check_reg(dev);
-+ if (unlikely(res != 0)) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out;
-+ }
-+ down_read(&dev->dev_rwsem);
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ if (!list_empty(&dev->ready_cmd_list) ||
-+ !list_empty(&dev->udev_cmd_threads.active_cmd_list)) {
-+ res |= POLLIN | POLLRDNORM;
-+ goto out_unlock;
-+ }
-+
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ TRACE_DBG("Before poll_wait() (dev %s)", dev->name);
-+ poll_wait(file, &dev->udev_cmd_threads.cmd_list_waitQ, wait);
-+ TRACE_DBG("After poll_wait() (dev %s)", dev->name);
-+
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ if (!list_empty(&dev->ready_cmd_list) ||
-+ !list_empty(&dev->udev_cmd_threads.active_cmd_list)) {
-+ res |= POLLIN | POLLRDNORM;
-+ goto out_unlock;
-+ }
-+
-+out_unlock:
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ up_read(&dev->dev_rwsem);
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+/*
-+ * Called under udev_cmd_threads.cmd_list_lock, but can drop it inside,
-+ * then reacquire.
-+ */
-+static void dev_user_unjam_cmd(struct scst_user_cmd *ucmd, int busy,
-+ unsigned long *flags)
-+ __releases(&dev->udev_cmd_threads.cmd_list_lock)
-+ __acquires(&dev->udev_cmd_threads.cmd_list_lock)
-+{
-+ int state = ucmd->state;
-+ struct scst_user_dev *dev = ucmd->dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (ucmd->this_state_unjammed)
-+ goto out;
-+
-+ TRACE_MGMT_DBG("Unjamming ucmd %p (busy %d, state %x)", ucmd, busy,
-+ state);
-+
-+ ucmd->jammed = 1;
-+ ucmd->this_state_unjammed = 1;
-+ ucmd->sent_to_user = 0;
-+
-+ switch (state) {
-+ case UCMD_STATE_PARSING:
-+ case UCMD_STATE_BUF_ALLOCING:
-+ if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
-+ ucmd->aborted = 1;
-+ else {
-+ if (busy)
-+ scst_set_busy(ucmd->cmd);
-+ else
-+ scst_set_cmd_error(ucmd->cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ scst_set_cmd_abnormal_done_state(ucmd->cmd);
-+
-+ if (state == UCMD_STATE_PARSING)
-+ scst_post_parse(ucmd->cmd);
-+ else
-+ scst_post_alloc_data_buf(ucmd->cmd);
-+
-+ TRACE_MGMT_DBG("Adding ucmd %p to active list", ucmd);
-+ list_add(&ucmd->cmd->cmd_list_entry,
-+ &ucmd->cmd->cmd_threads->active_cmd_list);
-+ wake_up(&ucmd->cmd->cmd_threads->cmd_list_waitQ);
-+ break;
-+
-+ case UCMD_STATE_EXECING:
-+ if (flags != NULL)
-+ spin_unlock_irqrestore(&dev->udev_cmd_threads.cmd_list_lock,
-+ *flags);
-+ else
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ TRACE_MGMT_DBG("EXEC: unjamming ucmd %p", ucmd);
-+
-+ if (test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags))
-+ ucmd->aborted = 1;
-+ else {
-+ if (busy)
-+ scst_set_busy(ucmd->cmd);
-+ else
-+ scst_set_cmd_error(ucmd->cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+
-+ ucmd->cmd->scst_cmd_done(ucmd->cmd, SCST_CMD_STATE_DEFAULT,
-+ SCST_CONTEXT_THREAD);
-+ /* !! At this point cmd and ucmd can be already freed !! */
-+
-+ if (flags != NULL)
-+ spin_lock_irqsave(&dev->udev_cmd_threads.cmd_list_lock,
-+ *flags);
-+ else
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ break;
-+
-+ case UCMD_STATE_ON_FREEING:
-+ case UCMD_STATE_ON_CACHE_FREEING:
-+ case UCMD_STATE_TM_EXECING:
-+ case UCMD_STATE_ATTACH_SESS:
-+ case UCMD_STATE_DETACH_SESS:
-+ if (flags != NULL)
-+ spin_unlock_irqrestore(&dev->udev_cmd_threads.cmd_list_lock,
-+ *flags);
-+ else
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ switch (state) {
-+ case UCMD_STATE_ON_FREEING:
-+ dev_user_process_reply_on_free(ucmd);
-+ break;
-+
-+ case UCMD_STATE_ON_CACHE_FREEING:
-+ dev_user_process_reply_on_cache_free(ucmd);
-+ break;
-+
-+ case UCMD_STATE_TM_EXECING:
-+ dev_user_process_reply_tm_exec(ucmd,
-+ SCST_MGMT_STATUS_FAILED);
-+ break;
-+
-+ case UCMD_STATE_ATTACH_SESS:
-+ case UCMD_STATE_DETACH_SESS:
-+ dev_user_process_reply_sess(ucmd, -EFAULT);
-+ break;
-+ }
-+
-+ if (flags != NULL)
-+ spin_lock_irqsave(&dev->udev_cmd_threads.cmd_list_lock,
-+ *flags);
-+ else
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ break;
-+
-+ default:
-+ PRINT_CRIT_ERROR("Wrong ucmd state %x", state);
-+ BUG();
-+ break;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int dev_user_unjam_dev(struct scst_user_dev *dev)
-+ __releases(&dev->udev_cmd_threads.cmd_list_lock)
-+ __acquires(&dev->udev_cmd_threads.cmd_list_lock)
-+{
-+ int i, res = 0;
-+ struct scst_user_cmd *ucmd;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Unjamming dev %p", dev);
-+
-+ sgv_pool_flush(dev->pool);
-+ sgv_pool_flush(dev->pool_clust);
-+
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+repeat:
-+ for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
-+ struct list_head *head = &dev->ucmd_hash[i];
-+
-+ list_for_each_entry(ucmd, head, hash_list_entry) {
-+ res++;
-+
-+ if (!ucmd->sent_to_user)
-+ continue;
-+
-+ if (ucmd_get_check(ucmd))
-+ continue;
-+
-+ TRACE_MGMT_DBG("ucmd %p, state %x, scst_cmd %p", ucmd,
-+ ucmd->state, ucmd->cmd);
-+
-+ dev_user_unjam_cmd(ucmd, 0, NULL);
-+
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ ucmd_put(ucmd);
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ goto repeat;
-+ }
-+ }
-+
-+ if (dev_user_process_scst_commands(dev) != 0)
-+ goto repeat;
-+
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_process_reply_tm_exec(struct scst_user_cmd *ucmd,
-+ int status)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("TM reply (ucmd %p, fn %d, status %d)", ucmd,
-+ ucmd->user_cmd.tm_cmd.fn, status);
-+
-+ if (status == SCST_MGMT_STATUS_TASK_NOT_EXIST) {
-+ /*
-+ * It is possible that user space seen TM cmd before cmd
-+ * to abort or will never see it at all, because it was
-+ * aborted on the way there. So, it is safe to return
-+ * success instead, because, if there is the TM cmd at this
-+ * point, then the cmd to abort apparrently does exist.
-+ */
-+ status = SCST_MGMT_STATUS_SUCCESS;
-+ }
-+
-+ scst_async_mcmd_completed(ucmd->mcmd, status);
-+
-+ ucmd_put(ucmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void dev_user_abort_ready_commands(struct scst_user_dev *dev)
-+{
-+ struct scst_user_cmd *ucmd;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_irqsave(&dev->udev_cmd_threads.cmd_list_lock, flags);
-+again:
-+ list_for_each_entry(ucmd, &dev->ready_cmd_list, ready_cmd_list_entry) {
-+ if ((ucmd->cmd != NULL) && !ucmd->seen_by_user &&
-+ test_bit(SCST_CMD_ABORTED, &ucmd->cmd->cmd_flags)) {
-+ switch (ucmd->state) {
-+ case UCMD_STATE_PARSING:
-+ case UCMD_STATE_BUF_ALLOCING:
-+ case UCMD_STATE_EXECING:
-+ TRACE_MGMT_DBG("Aborting ready ucmd %p", ucmd);
-+ list_del(&ucmd->ready_cmd_list_entry);
-+ dev_user_unjam_cmd(ucmd, 0, &flags);
-+ goto again;
-+ }
-+ }
-+ }
-+
-+ spin_unlock_irqrestore(&dev->udev_cmd_threads.cmd_list_lock, flags);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Can be called under some spinlock and IRQs off */
-+static int dev_user_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
-+ struct scst_tgt_dev *tgt_dev)
-+{
-+ struct scst_user_cmd *ucmd;
-+ struct scst_user_dev *dev = tgt_dev->dev->dh_priv;
-+ struct scst_user_cmd *ucmd_to_abort = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * In the used approach we don't do anything with hung devices, which
-+ * stopped responding and/or have stuck commands. We forcedly abort such
-+ * commands only if they not yet sent to the user space or if the device
-+ * is getting unloaded, e.g. if its handler program gets killed. This is
-+ * because it's pretty hard to distinguish between stuck and temporary
-+ * overloaded states of the device. There are several reasons for that:
-+ *
-+ * 1. Some commands need a lot of time to complete (several hours),
-+ * so for an impatient user such command(s) will always look as
-+ * stuck.
-+ *
-+ * 2. If we forcedly abort, i.e. abort before it's actually completed
-+ * in the user space, just one command, we will have to put the whole
-+ * device offline until we are sure that no more previously aborted
-+ * commands will get executed. Otherwise, we might have a possibility
-+ * for data corruption, when aborted and reported as completed
-+ * command actually gets executed *after* new commands sent
-+ * after the force abort was done. Many journaling file systems and
-+ * databases use "provide required commands order via queue draining"
-+ * approach and not putting the whole device offline after the forced
-+ * abort will break it. This makes our decision, if a command stuck
-+ * or not, cost a lot.
-+ *
-+ * So, we leave policy definition if a device stuck or not to
-+ * the user space and simply let all commands live until they are
-+ * completed or their devices get closed/killed. This approach is very
-+ * much OK, but can affect management commands, which need activity
-+ * suspending via scst_suspend_activity() function such as devices or
-+ * targets registration/removal. But during normal life such commands
-+ * should be rare. Plus, when possible, scst_suspend_activity() will
-+ * return after timeout EBUSY status to allow caller to not stuck
-+ * forever as well.
-+ *
-+ * But, anyway, ToDo, we should reimplement that in the SCST core, so
-+ * stuck commands would affect only related devices.
-+ */
-+
-+ dev_user_abort_ready_commands(dev);
-+
-+ /* We can't afford missing TM command due to memory shortage */
-+ ucmd = dev_user_alloc_ucmd(dev, GFP_ATOMIC|__GFP_NOFAIL);
-+ if (ucmd == NULL) {
-+ PRINT_CRIT_ERROR("Unable to allocate TM %d message "
-+ "(dev %s)", mcmd->fn, dev->name);
-+ goto out;
-+ }
-+
-+ ucmd->user_cmd_payload_len =
-+ offsetof(struct scst_user_get_cmd, tm_cmd) +
-+ sizeof(ucmd->user_cmd.tm_cmd);
-+ ucmd->user_cmd.cmd_h = ucmd->h;
-+ ucmd->user_cmd.subcode = SCST_USER_TASK_MGMT;
-+ ucmd->user_cmd.tm_cmd.sess_h = (unsigned long)tgt_dev;
-+ ucmd->user_cmd.tm_cmd.fn = mcmd->fn;
-+ ucmd->user_cmd.tm_cmd.cmd_sn = mcmd->cmd_sn;
-+ ucmd->user_cmd.tm_cmd.cmd_sn_set = mcmd->cmd_sn_set;
-+
-+ if (mcmd->cmd_to_abort != NULL) {
-+ ucmd_to_abort = mcmd->cmd_to_abort->dh_priv;
-+ if (ucmd_to_abort != NULL)
-+ ucmd->user_cmd.tm_cmd.cmd_h_to_abort = ucmd_to_abort->h;
-+ }
-+
-+ TRACE_MGMT_DBG("Preparing TM ucmd %p (h %d, fn %d, cmd_to_abort %p, "
-+ "ucmd_to_abort %p, cmd_h_to_abort %d, mcmd %p)", ucmd, ucmd->h,
-+ mcmd->fn, mcmd->cmd_to_abort, ucmd_to_abort,
-+ ucmd->user_cmd.tm_cmd.cmd_h_to_abort, mcmd);
-+
-+ ucmd->mcmd = mcmd;
-+ ucmd->state = UCMD_STATE_TM_EXECING;
-+
-+ scst_prepare_async_mcmd(mcmd);
-+
-+ dev_user_add_to_ready(ucmd);
-+
-+out:
-+ TRACE_EXIT();
-+ return SCST_DEV_TM_NOT_COMPLETED;
-+}
-+
-+static int dev_user_attach(struct scst_device *sdev)
-+{
-+ int res = 0;
-+ struct scst_user_dev *dev = NULL, *d;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock(&dev_list_lock);
-+ list_for_each_entry(d, &dev_list, dev_list_entry) {
-+ if (strcmp(d->name, sdev->virt_name) == 0) {
-+ dev = d;
-+ break;
-+ }
-+ }
-+ spin_unlock(&dev_list_lock);
-+ if (dev == NULL) {
-+ PRINT_ERROR("Device %s not found", sdev->virt_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ sdev->dh_priv = dev;
-+ sdev->tst = dev->tst;
-+ sdev->queue_alg = dev->queue_alg;
-+ sdev->swp = dev->swp;
-+ sdev->tas = dev->tas;
-+ sdev->d_sense = dev->d_sense;
-+ sdev->has_own_order_mgmt = dev->has_own_order_mgmt;
-+
-+ dev->sdev = sdev;
-+
-+ PRINT_INFO("Attached user space virtual device \"%s\"",
-+ dev->name);
-+
-+out:
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+static void dev_user_detach(struct scst_device *sdev)
-+{
-+ struct scst_user_dev *dev = sdev->dh_priv;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("virt_id %d", sdev->virt_id);
-+
-+ PRINT_INFO("Detached user space virtual device \"%s\"",
-+ dev->name);
-+
-+ /* dev will be freed by the caller */
-+ sdev->dh_priv = NULL;
-+ dev->sdev = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int dev_user_process_reply_sess(struct scst_user_cmd *ucmd, int status)
-+{
-+ int res = 0;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("ucmd %p, cmpl %p, status %d", ucmd, ucmd->cmpl, status);
-+
-+ spin_lock_irqsave(&ucmd->dev->udev_cmd_threads.cmd_list_lock, flags);
-+
-+ if (ucmd->state == UCMD_STATE_ATTACH_SESS) {
-+ TRACE_MGMT_DBG("%s", "ATTACH_SESS finished");
-+ ucmd->result = status;
-+ } else if (ucmd->state == UCMD_STATE_DETACH_SESS) {
-+ TRACE_MGMT_DBG("%s", "DETACH_SESS finished");
-+ } else
-+ BUG();
-+
-+ if (ucmd->cmpl != NULL)
-+ complete_all(ucmd->cmpl);
-+
-+ spin_unlock_irqrestore(&ucmd->dev->udev_cmd_threads.cmd_list_lock, flags);
-+
-+ ucmd_put(ucmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_attach_tgt(struct scst_tgt_dev *tgt_dev)
-+{
-+ struct scst_user_dev *dev = tgt_dev->dev->dh_priv;
-+ int res = 0, rc;
-+ struct scst_user_cmd *ucmd;
-+ DECLARE_COMPLETION_ONSTACK(cmpl);
-+ struct scst_tgt_template *tgtt = tgt_dev->sess->tgt->tgtt;
-+ struct scst_tgt *tgt = tgt_dev->sess->tgt;
-+
-+ TRACE_ENTRY();
-+
-+ tgt_dev->active_cmd_threads = &dev->udev_cmd_threads;
-+
-+ /*
-+ * We can't replace tgt_dev->pool, because it can be used to allocate
-+ * memory for SCST local commands, like REPORT LUNS, where there is no
-+ * corresponding ucmd. Otherwise we will crash in dev_user_alloc_sg().
-+ */
-+ if (test_bit(SCST_TGT_DEV_CLUST_POOL, &tgt_dev->tgt_dev_flags))
-+ tgt_dev->dh_priv = dev->pool_clust;
-+ else
-+ tgt_dev->dh_priv = dev->pool;
-+
-+ ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
-+ if (ucmd == NULL)
-+ goto out_nomem;
-+
-+ ucmd->cmpl = &cmpl;
-+
-+ ucmd->user_cmd_payload_len = offsetof(struct scst_user_get_cmd, sess) +
-+ sizeof(ucmd->user_cmd.sess);
-+ ucmd->user_cmd.cmd_h = ucmd->h;
-+ ucmd->user_cmd.subcode = SCST_USER_ATTACH_SESS;
-+ ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
-+ ucmd->user_cmd.sess.lun = (uint64_t)tgt_dev->lun;
-+ ucmd->user_cmd.sess.threads_num = tgt_dev->sess->tgt->tgtt->threads_num;
-+ ucmd->user_cmd.sess.rd_only = tgt_dev->acg_dev->rd_only;
-+ if (tgtt->get_phys_transport_version != NULL)
-+ ucmd->user_cmd.sess.phys_transport_version =
-+ tgtt->get_phys_transport_version(tgt);
-+ if (tgtt->get_scsi_transport_version != NULL)
-+ ucmd->user_cmd.sess.scsi_transport_version =
-+ tgtt->get_scsi_transport_version(tgt);
-+ strlcpy(ucmd->user_cmd.sess.initiator_name,
-+ tgt_dev->sess->initiator_name,
-+ sizeof(ucmd->user_cmd.sess.initiator_name)-1);
-+ strlcpy(ucmd->user_cmd.sess.target_name,
-+ tgt_dev->sess->tgt->tgt_name,
-+ sizeof(ucmd->user_cmd.sess.target_name)-1);
-+
-+ TRACE_MGMT_DBG("Preparing ATTACH_SESS %p (h %d, sess_h %llx, LUN %llx, "
-+ "threads_num %d, rd_only %d, initiator %s, target %s)",
-+ ucmd, ucmd->h, ucmd->user_cmd.sess.sess_h,
-+ ucmd->user_cmd.sess.lun, ucmd->user_cmd.sess.threads_num,
-+ ucmd->user_cmd.sess.rd_only, ucmd->user_cmd.sess.initiator_name,
-+ ucmd->user_cmd.sess.target_name);
-+
-+ ucmd->state = UCMD_STATE_ATTACH_SESS;
-+
-+ ucmd_get(ucmd);
-+
-+ dev_user_add_to_ready(ucmd);
-+
-+ rc = wait_for_completion_timeout(ucmd->cmpl, DEV_USER_ATTACH_TIMEOUT);
-+ if (rc > 0)
-+ res = ucmd->result;
-+ else {
-+ PRINT_ERROR("%s", "ATTACH_SESS command timeout");
-+ res = -EFAULT;
-+ }
-+
-+ BUG_ON(irqs_disabled());
-+
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+ ucmd->cmpl = NULL;
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ ucmd_put(ucmd);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static void dev_user_detach_tgt(struct scst_tgt_dev *tgt_dev)
-+{
-+ struct scst_user_dev *dev = tgt_dev->dev->dh_priv;
-+ struct scst_user_cmd *ucmd;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * We can't miss detach command due to memory shortage, because it might
-+ * lead to a memory leak in the user space handler.
-+ */
-+ ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL|__GFP_NOFAIL);
-+ if (ucmd == NULL) {
-+ PRINT_CRIT_ERROR("Unable to allocate DETACH_SESS message "
-+ "(dev %s)", dev->name);
-+ goto out;
-+ }
-+
-+ TRACE_MGMT_DBG("Preparing DETACH_SESS %p (h %d, sess_h %llx)", ucmd,
-+ ucmd->h, ucmd->user_cmd.sess.sess_h);
-+
-+ ucmd->user_cmd_payload_len = offsetof(struct scst_user_get_cmd, sess) +
-+ sizeof(ucmd->user_cmd.sess);
-+ ucmd->user_cmd.cmd_h = ucmd->h;
-+ ucmd->user_cmd.subcode = SCST_USER_DETACH_SESS;
-+ ucmd->user_cmd.sess.sess_h = (unsigned long)tgt_dev;
-+
-+ ucmd->state = UCMD_STATE_DETACH_SESS;
-+
-+ dev_user_add_to_ready(ucmd);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* No locks are needed, but the activity must be suspended */
-+static void dev_user_setup_functions(struct scst_user_dev *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ dev->devtype.parse = dev_user_parse;
-+ dev->devtype.alloc_data_buf = dev_user_alloc_data_buf;
-+ dev->devtype.dev_done = NULL;
-+
-+ if (dev->parse_type != SCST_USER_PARSE_CALL) {
-+ switch (dev->devtype.type) {
-+ case TYPE_DISK:
-+ dev->generic_parse = scst_sbc_generic_parse;
-+ dev->devtype.dev_done = dev_user_disk_done;
-+ break;
-+
-+ case TYPE_TAPE:
-+ dev->generic_parse = scst_tape_generic_parse;
-+ dev->devtype.dev_done = dev_user_tape_done;
-+ break;
-+
-+ case TYPE_MOD:
-+ dev->generic_parse = scst_modisk_generic_parse;
-+ dev->devtype.dev_done = dev_user_disk_done;
-+ break;
-+
-+ case TYPE_ROM:
-+ dev->generic_parse = scst_cdrom_generic_parse;
-+ dev->devtype.dev_done = dev_user_disk_done;
-+ break;
-+
-+ case TYPE_MEDIUM_CHANGER:
-+ dev->generic_parse = scst_changer_generic_parse;
-+ break;
-+
-+ case TYPE_PROCESSOR:
-+ dev->generic_parse = scst_processor_generic_parse;
-+ break;
-+
-+ case TYPE_RAID:
-+ dev->generic_parse = scst_raid_generic_parse;
-+ break;
-+
-+ default:
-+ PRINT_INFO("Unknown SCSI type %x, using PARSE_CALL "
-+ "for it", dev->devtype.type);
-+ dev->parse_type = SCST_USER_PARSE_CALL;
-+ break;
-+ }
-+ } else {
-+ dev->generic_parse = NULL;
-+ dev->devtype.dev_done = NULL;
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int dev_user_check_version(const struct scst_user_dev_desc *dev_desc)
-+{
-+ char str[sizeof(DEV_USER_VERSION) > 20 ? sizeof(DEV_USER_VERSION) : 20];
-+ int res = 0, rc;
-+
-+ rc = copy_from_user(str,
-+ (void __user *)(unsigned long)dev_desc->license_str,
-+ sizeof(str));
-+ if (rc != 0) {
-+ PRINT_ERROR("%s", "Unable to get license string");
-+ res = -EFAULT;
-+ goto out;
-+ }
-+ str[sizeof(str)-1] = '\0';
-+
-+ if ((strcmp(str, "GPL") != 0) &&
-+ (strcmp(str, "GPL v2") != 0) &&
-+ (strcmp(str, "Dual BSD/GPL") != 0) &&
-+ (strcmp(str, "Dual MIT/GPL") != 0) &&
-+ (strcmp(str, "Dual MPL/GPL") != 0)) {
-+ /* ->name already 0-terminated in dev_user_ioctl() */
-+ PRINT_ERROR("Unsupported license of user device %s (%s). "
-+ "Ask license@scst-tgt.com for more info.",
-+ dev_desc->name, str);
-+ res = -EPERM;
-+ goto out;
-+ }
-+
-+ rc = copy_from_user(str,
-+ (void __user *)(unsigned long)dev_desc->version_str,
-+ sizeof(str));
-+ if (rc != 0) {
-+ PRINT_ERROR("%s", "Unable to get version string");
-+ res = -EFAULT;
-+ goto out;
-+ }
-+ str[sizeof(str)-1] = '\0';
-+
-+ if (strcmp(str, DEV_USER_VERSION) != 0) {
-+ /* ->name already 0-terminated in dev_user_ioctl() */
-+ PRINT_ERROR("Incorrect version of user device %s (%s). "
-+ "Expected: %s", dev_desc->name, str,
-+ DEV_USER_VERSION);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+out:
-+ return res;
-+}
-+
-+static int dev_user_register_dev(struct file *file,
-+ const struct scst_user_dev_desc *dev_desc)
-+{
-+ int res, i;
-+ struct scst_user_dev *dev, *d;
-+ int block;
-+
-+ TRACE_ENTRY();
-+
-+ res = dev_user_check_version(dev_desc);
-+ if (res != 0)
-+ goto out;
-+
-+ switch (dev_desc->type) {
-+ case TYPE_DISK:
-+ case TYPE_ROM:
-+ case TYPE_MOD:
-+ if (dev_desc->block_size == 0) {
-+ PRINT_ERROR("Wrong block size %d",
-+ dev_desc->block_size);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ block = scst_calc_block_shift(dev_desc->block_size);
-+ if (block == -1) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ break;
-+ default:
-+ block = dev_desc->block_size;
-+ break;
-+ }
-+
-+ if (!try_module_get(THIS_MODULE)) {
-+ PRINT_ERROR("%s", "Fail to get module");
-+ res = -ETXTBSY;
-+ goto out;
-+ }
-+
-+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-+ if (dev == NULL) {
-+ res = -ENOMEM;
-+ goto out_put;
-+ }
-+
-+ init_rwsem(&dev->dev_rwsem);
-+ INIT_LIST_HEAD(&dev->ready_cmd_list);
-+ if (file->f_flags & O_NONBLOCK) {
-+ TRACE_DBG("%s", "Non-blocking operations");
-+ dev->blocking = 0;
-+ } else
-+ dev->blocking = 1;
-+ for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++)
-+ INIT_LIST_HEAD(&dev->ucmd_hash[i]);
-+
-+ scst_init_threads(&dev->udev_cmd_threads);
-+
-+ strlcpy(dev->name, dev_desc->name, sizeof(dev->name)-1);
-+
-+ scst_init_mem_lim(&dev->udev_mem_lim);
-+
-+ scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "%s",
-+ (dev_desc->sgv_name[0] == '\0') ? dev->name :
-+ dev_desc->sgv_name);
-+ dev->pool = sgv_pool_create(dev->devtype.name, sgv_no_clustering,
-+ dev_desc->sgv_single_alloc_pages,
-+ dev_desc->sgv_shared,
-+ dev_desc->sgv_purge_interval);
-+ if (dev->pool == NULL) {
-+ res = -ENOMEM;
-+ goto out_deinit_threads;
-+ }
-+ sgv_pool_set_allocator(dev->pool, dev_user_alloc_pages,
-+ dev_user_free_sg_entries);
-+
-+ if (!dev_desc->sgv_disable_clustered_pool) {
-+ scnprintf(dev->devtype.name, sizeof(dev->devtype.name),
-+ "%s-clust",
-+ (dev_desc->sgv_name[0] == '\0') ? dev->name :
-+ dev_desc->sgv_name);
-+ dev->pool_clust = sgv_pool_create(dev->devtype.name,
-+ sgv_tail_clustering,
-+ dev_desc->sgv_single_alloc_pages,
-+ dev_desc->sgv_shared,
-+ dev_desc->sgv_purge_interval);
-+ if (dev->pool_clust == NULL) {
-+ res = -ENOMEM;
-+ goto out_free0;
-+ }
-+ sgv_pool_set_allocator(dev->pool_clust, dev_user_alloc_pages,
-+ dev_user_free_sg_entries);
-+ } else {
-+ dev->pool_clust = dev->pool;
-+ sgv_pool_get(dev->pool_clust);
-+ }
-+
-+ scnprintf(dev->devtype.name, sizeof(dev->devtype.name), "%s",
-+ dev->name);
-+ dev->devtype.type = dev_desc->type;
-+ dev->devtype.threads_num = -1;
-+ dev->devtype.parse_atomic = 1;
-+ dev->devtype.alloc_data_buf_atomic = 1;
-+ dev->devtype.dev_done_atomic = 1;
-+ dev->devtype.dev_attrs = dev_user_dev_attrs;
-+ dev->devtype.attach = dev_user_attach;
-+ dev->devtype.detach = dev_user_detach;
-+ dev->devtype.attach_tgt = dev_user_attach_tgt;
-+ dev->devtype.detach_tgt = dev_user_detach_tgt;
-+ dev->devtype.exec = dev_user_exec;
-+ dev->devtype.on_free_cmd = dev_user_on_free_cmd;
-+ dev->devtype.task_mgmt_fn = dev_user_task_mgmt_fn;
-+ if (dev_desc->enable_pr_cmds_notifications)
-+ dev->devtype.pr_cmds_notifications = 1;
-+
-+ init_completion(&dev->cleanup_cmpl);
-+ dev->block = block;
-+ dev->def_block = block;
-+
-+ res = __dev_user_set_opt(dev, &dev_desc->opt);
-+ if (res != 0)
-+ goto out_free;
-+
-+ TRACE_MEM("dev %p, name %s", dev, dev->name);
-+
-+ spin_lock(&dev_list_lock);
-+
-+ list_for_each_entry(d, &dev_list, dev_list_entry) {
-+ if (strcmp(d->name, dev->name) == 0) {
-+ PRINT_ERROR("Device %s already exist",
-+ dev->name);
-+ res = -EEXIST;
-+ spin_unlock(&dev_list_lock);
-+ goto out_free;
-+ }
-+ }
-+
-+ list_add_tail(&dev->dev_list_entry, &dev_list);
-+
-+ spin_unlock(&dev_list_lock);
-+
-+ res = scst_register_virtual_dev_driver(&dev->devtype);
-+ if (res < 0)
-+ goto out_del_free;
-+
-+ dev->virt_id = scst_register_virtual_device(&dev->devtype, dev->name);
-+ if (dev->virt_id < 0) {
-+ res = dev->virt_id;
-+ goto out_unreg_handler;
-+ }
-+
-+ mutex_lock(&dev_priv_mutex);
-+ if (file->private_data != NULL) {
-+ mutex_unlock(&dev_priv_mutex);
-+ PRINT_ERROR("%s", "Device already registered");
-+ res = -EINVAL;
-+ goto out_unreg_drv;
-+ }
-+ file->private_data = dev;
-+ mutex_unlock(&dev_priv_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unreg_drv:
-+ scst_unregister_virtual_device(dev->virt_id);
-+
-+out_unreg_handler:
-+ scst_unregister_virtual_dev_driver(&dev->devtype);
-+
-+out_del_free:
-+ spin_lock(&dev_list_lock);
-+ list_del(&dev->dev_list_entry);
-+ spin_unlock(&dev_list_lock);
-+
-+out_free:
-+ sgv_pool_del(dev->pool_clust);
-+
-+out_free0:
-+ sgv_pool_del(dev->pool);
-+
-+out_deinit_threads:
-+ scst_deinit_threads(&dev->udev_cmd_threads);
-+
-+ kfree(dev);
-+
-+out_put:
-+ module_put(THIS_MODULE);
-+ goto out;
-+}
-+
-+static int dev_user_unregister_dev(struct file *file)
-+{
-+ int res;
-+ struct scst_user_dev *dev;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ res = dev_user_check_reg(dev);
-+ if (res != 0) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out;
-+ }
-+ down_read(&dev->dev_rwsem);
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out_up;
-+
-+ up_read(&dev->dev_rwsem);
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ if (dev == NULL) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out_resume;
-+ }
-+
-+ dev->blocking = 0;
-+ wake_up_all(&dev->udev_cmd_threads.cmd_list_waitQ);
-+
-+ down_write(&dev->dev_rwsem);
-+ file->private_data = NULL;
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ dev_user_exit_dev(dev);
-+
-+ up_write(&dev->dev_rwsem); /* to make lockdep happy */
-+
-+ kfree(dev);
-+
-+out_resume:
-+ scst_resume_activity();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_up:
-+ up_read(&dev->dev_rwsem);
-+ goto out;
-+}
-+
-+static int dev_user_flush_cache(struct file *file)
-+{
-+ int res;
-+ struct scst_user_dev *dev;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ res = dev_user_check_reg(dev);
-+ if (res != 0) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out;
-+ }
-+ down_read(&dev->dev_rwsem);
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out_up;
-+
-+ sgv_pool_flush(dev->pool);
-+ sgv_pool_flush(dev->pool_clust);
-+
-+ scst_resume_activity();
-+
-+out_up:
-+ up_read(&dev->dev_rwsem);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_capacity_changed(struct file *file)
-+{
-+ int res;
-+ struct scst_user_dev *dev;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ res = dev_user_check_reg(dev);
-+ if (res != 0) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out;
-+ }
-+ down_read(&dev->dev_rwsem);
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ scst_capacity_data_changed(dev->sdev);
-+
-+ up_read(&dev->dev_rwsem);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_prealloc_buffer(struct file *file, void __user *arg)
-+{
-+ int res = 0, rc;
-+ struct scst_user_dev *dev;
-+ union scst_user_prealloc_buffer pre;
-+ aligned_u64 pbuf;
-+ uint32_t bufflen;
-+ struct scst_user_cmd *ucmd;
-+ int pages, sg_cnt;
-+ struct sgv_pool *pool;
-+ struct scatterlist *sg;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ res = dev_user_check_reg(dev);
-+ if (unlikely(res != 0)) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out;
-+ }
-+ down_read(&dev->dev_rwsem);
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ rc = copy_from_user(&pre.in, arg, sizeof(pre.in));
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ res = -EFAULT;
-+ goto out_up;
-+ }
-+
-+ TRACE_MEM("Prealloc buffer with size %dKB for dev %s",
-+ pre.in.bufflen / 1024, dev->name);
-+ TRACE_BUFFER("Input param", &pre.in, sizeof(pre.in));
-+
-+ pbuf = pre.in.pbuf;
-+ bufflen = pre.in.bufflen;
-+
-+ ucmd = dev_user_alloc_ucmd(dev, GFP_KERNEL);
-+ if (ucmd == NULL) {
-+ res = -ENOMEM;
-+ goto out_up;
-+ }
-+
-+ ucmd->buff_cached = 1;
-+
-+ TRACE_MEM("ucmd %p, pbuf %llx", ucmd, pbuf);
-+
-+ if (unlikely((pbuf & ~PAGE_MASK) != 0)) {
-+ PRINT_ERROR("Supplied pbuf %llx isn't page aligned", pbuf);
-+ res = -EINVAL;
-+ goto out_put;
-+ }
-+
-+ pages = calc_num_pg(pbuf, bufflen);
-+ res = dev_user_map_buf(ucmd, pbuf, pages);
-+ if (res != 0)
-+ goto out_put;
-+
-+ if (pre.in.for_clust_pool)
-+ pool = dev->pool_clust;
-+ else
-+ pool = dev->pool;
-+
-+ sg = sgv_pool_alloc(pool, bufflen, GFP_KERNEL, SGV_POOL_ALLOC_GET_NEW,
-+ &sg_cnt, &ucmd->sgv, &dev->udev_mem_lim, ucmd);
-+ if (sg != NULL) {
-+ struct scst_user_cmd *buf_ucmd = sgv_get_priv(ucmd->sgv);
-+
-+ TRACE_MEM("Buf ucmd %p (sg_cnt %d, last seg len %d, "
-+ "bufflen %d)", buf_ucmd, sg_cnt,
-+ sg[sg_cnt-1].length, bufflen);
-+
-+ EXTRACHECKS_BUG_ON(ucmd != buf_ucmd);
-+
-+ ucmd->buf_ucmd = buf_ucmd;
-+ } else {
-+ res = -ENOMEM;
-+ goto out_put;
-+ }
-+
-+ dev_user_free_sgv(ucmd);
-+
-+ pre.out.cmd_h = ucmd->h;
-+ rc = copy_to_user(arg, &pre.out, sizeof(pre.out));
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("Failed to copy to user %d bytes", rc);
-+ res = -EFAULT;
-+ goto out_put;
-+ }
-+
-+out_put:
-+ ucmd_put(ucmd);
-+
-+out_up:
-+ up_read(&dev->dev_rwsem);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int __dev_user_set_opt(struct scst_user_dev *dev,
-+ const struct scst_user_opt *opt)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("dev %s, parse_type %x, on_free_cmd_type %x, "
-+ "memory_reuse_type %x, partial_transfers_type %x, "
-+ "partial_len %d", dev->name, opt->parse_type,
-+ opt->on_free_cmd_type, opt->memory_reuse_type,
-+ opt->partial_transfers_type, opt->partial_len);
-+
-+ if (opt->parse_type > SCST_USER_MAX_PARSE_OPT ||
-+ opt->on_free_cmd_type > SCST_USER_MAX_ON_FREE_CMD_OPT ||
-+ opt->memory_reuse_type > SCST_USER_MAX_MEM_REUSE_OPT ||
-+ opt->partial_transfers_type > SCST_USER_MAX_PARTIAL_TRANSFERS_OPT) {
-+ PRINT_ERROR("%s", "Invalid option");
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (((opt->tst != SCST_CONTR_MODE_ONE_TASK_SET) &&
-+ (opt->tst != SCST_CONTR_MODE_SEP_TASK_SETS)) ||
-+ ((opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_RESTRICTED_REORDER) &&
-+ (opt->queue_alg != SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER)) ||
-+ (opt->swp > 1) || (opt->tas > 1) || (opt->has_own_order_mgmt > 1) ||
-+ (opt->d_sense > 1)) {
-+ PRINT_ERROR("Invalid SCSI option (tst %x, queue_alg %x, swp %x,"
-+ " tas %x, d_sense %d, has_own_order_mgmt %x)", opt->tst,
-+ opt->queue_alg, opt->swp, opt->tas, opt->d_sense,
-+ opt->has_own_order_mgmt);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ dev->parse_type = opt->parse_type;
-+ dev->on_free_cmd_type = opt->on_free_cmd_type;
-+ dev->memory_reuse_type = opt->memory_reuse_type;
-+ dev->partial_transfers_type = opt->partial_transfers_type;
-+ dev->partial_len = opt->partial_len;
-+
-+ dev->tst = opt->tst;
-+ dev->queue_alg = opt->queue_alg;
-+ dev->swp = opt->swp;
-+ dev->tas = opt->tas;
-+ dev->tst = opt->tst;
-+ dev->d_sense = opt->d_sense;
-+ dev->has_own_order_mgmt = opt->has_own_order_mgmt;
-+ if (dev->sdev != NULL) {
-+ dev->sdev->tst = opt->tst;
-+ dev->sdev->queue_alg = opt->queue_alg;
-+ dev->sdev->swp = opt->swp;
-+ dev->sdev->tas = opt->tas;
-+ dev->sdev->d_sense = opt->d_sense;
-+ dev->sdev->has_own_order_mgmt = opt->has_own_order_mgmt;
-+ }
-+
-+ dev_user_setup_functions(dev);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_set_opt(struct file *file, const struct scst_user_opt *opt)
-+{
-+ int res;
-+ struct scst_user_dev *dev;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ res = dev_user_check_reg(dev);
-+ if (res != 0) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out;
-+ }
-+ down_read(&dev->dev_rwsem);
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out_up;
-+
-+ res = __dev_user_set_opt(dev, opt);
-+
-+ scst_resume_activity();
-+
-+out_up:
-+ up_read(&dev->dev_rwsem);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_user_get_opt(struct file *file, void __user *arg)
-+{
-+ int res, rc;
-+ struct scst_user_dev *dev;
-+ struct scst_user_opt opt;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&dev_priv_mutex);
-+ dev = file->private_data;
-+ res = dev_user_check_reg(dev);
-+ if (res != 0) {
-+ mutex_unlock(&dev_priv_mutex);
-+ goto out;
-+ }
-+ down_read(&dev->dev_rwsem);
-+ mutex_unlock(&dev_priv_mutex);
-+
-+ opt.parse_type = dev->parse_type;
-+ opt.on_free_cmd_type = dev->on_free_cmd_type;
-+ opt.memory_reuse_type = dev->memory_reuse_type;
-+ opt.partial_transfers_type = dev->partial_transfers_type;
-+ opt.partial_len = dev->partial_len;
-+ opt.tst = dev->tst;
-+ opt.queue_alg = dev->queue_alg;
-+ opt.tas = dev->tas;
-+ opt.swp = dev->swp;
-+ opt.d_sense = dev->d_sense;
-+ opt.has_own_order_mgmt = dev->has_own_order_mgmt;
-+
-+ TRACE_DBG("dev %s, parse_type %x, on_free_cmd_type %x, "
-+ "memory_reuse_type %x, partial_transfers_type %x, "
-+ "partial_len %d", dev->name, opt.parse_type,
-+ opt.on_free_cmd_type, opt.memory_reuse_type,
-+ opt.partial_transfers_type, opt.partial_len);
-+
-+ rc = copy_to_user(arg, &opt, sizeof(opt));
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("Failed to copy to user %d bytes", rc);
-+ res = -EFAULT;
-+ goto out_up;
-+ }
-+
-+out_up:
-+ up_read(&dev->dev_rwsem);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int dev_usr_parse(struct scst_cmd *cmd)
-+{
-+ BUG();
-+ return SCST_CMD_STATE_DEFAULT;
-+}
-+
-+static int dev_user_exit_dev(struct scst_user_dev *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE(TRACE_MGMT, "Releasing dev %s", dev->name);
-+
-+ spin_lock(&dev_list_lock);
-+ list_del(&dev->dev_list_entry);
-+ spin_unlock(&dev_list_lock);
-+
-+ dev->blocking = 0;
-+ wake_up_all(&dev->udev_cmd_threads.cmd_list_waitQ);
-+
-+ spin_lock(&cleanup_lock);
-+ list_add_tail(&dev->cleanup_list_entry, &cleanup_list);
-+ spin_unlock(&cleanup_lock);
-+
-+ wake_up(&cleanup_list_waitQ);
-+
-+ scst_unregister_virtual_device(dev->virt_id);
-+ scst_unregister_virtual_dev_driver(&dev->devtype);
-+
-+ sgv_pool_flush(dev->pool_clust);
-+ sgv_pool_flush(dev->pool);
-+
-+ TRACE_MGMT_DBG("Unregistering finished (dev %p)", dev);
-+
-+ dev->cleanup_done = 1;
-+
-+ wake_up(&cleanup_list_waitQ);
-+ wake_up(&dev->udev_cmd_threads.cmd_list_waitQ);
-+
-+ wait_for_completion(&dev->cleanup_cmpl);
-+
-+ sgv_pool_del(dev->pool_clust);
-+ sgv_pool_del(dev->pool);
-+
-+ scst_deinit_threads(&dev->udev_cmd_threads);
-+
-+ TRACE_MGMT_DBG("Releasing completed (dev %p)", dev);
-+
-+ module_put(THIS_MODULE);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static int __dev_user_release(void *arg)
-+{
-+ struct scst_user_dev *dev = arg;
-+ dev_user_exit_dev(dev);
-+ kfree(dev);
-+ return 0;
-+}
-+
-+static int dev_user_release(struct inode *inode, struct file *file)
-+{
-+ struct scst_user_dev *dev;
-+ struct task_struct *t;
-+
-+ TRACE_ENTRY();
-+
-+ dev = file->private_data;
-+ if (dev == NULL)
-+ goto out;
-+ file->private_data = NULL;
-+
-+ TRACE_MGMT_DBG("Going to release dev %s", dev->name);
-+
-+ t = kthread_run(__dev_user_release, dev, "scst_usr_released");
-+ if (IS_ERR(t)) {
-+ PRINT_CRIT_ERROR("kthread_run() failed (%ld), releasing device "
-+ "%p directly. If you have several devices under load "
-+ "it might deadlock!", PTR_ERR(t), dev);
-+ __dev_user_release(dev);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static int dev_user_process_cleanup(struct scst_user_dev *dev)
-+{
-+ struct scst_user_cmd *ucmd;
-+ int rc = 0, res = 1;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(dev->blocking);
-+ wake_up_all(&dev->udev_cmd_threads.cmd_list_waitQ); /* just in case */
-+
-+ while (1) {
-+ int rc1;
-+
-+ TRACE_DBG("Cleanuping dev %p", dev);
-+
-+ rc1 = dev_user_unjam_dev(dev);
-+ if ((rc1 == 0) && (rc == -EAGAIN) && dev->cleanup_done)
-+ break;
-+
-+ spin_lock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ rc = dev_user_get_next_cmd(dev, &ucmd);
-+ if (rc == 0)
-+ dev_user_unjam_cmd(ucmd, 1, NULL);
-+
-+ spin_unlock_irq(&dev->udev_cmd_threads.cmd_list_lock);
-+
-+ if (rc == -EAGAIN) {
-+ if (!dev->cleanup_done) {
-+ TRACE_DBG("No more commands (dev %p)", dev);
-+ goto out;
-+ }
-+ }
-+ }
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+{
-+ int i;
-+ for (i = 0; i < (int)ARRAY_SIZE(dev->ucmd_hash); i++) {
-+ struct list_head *head = &dev->ucmd_hash[i];
-+ struct scst_user_cmd *ucmd2;
-+again:
-+ list_for_each_entry(ucmd2, head, hash_list_entry) {
-+ PRINT_ERROR("Lost ucmd %p (state %x, ref %d)", ucmd2,
-+ ucmd2->state, atomic_read(&ucmd2->ucmd_ref));
-+ ucmd_put(ucmd2);
-+ goto again;
-+ }
-+ }
-+}
-+#endif
-+
-+ TRACE_DBG("Cleanuping done (dev %p)", dev);
-+ complete_all(&dev->cleanup_cmpl);
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t dev_user_sysfs_commands_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0, ppos, i;
-+ struct scst_device *dev;
-+ struct scst_user_dev *udev;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ udev = dev->dh_priv;
-+
-+ spin_lock_irqsave(&udev->udev_cmd_threads.cmd_list_lock, flags);
-+ for (i = 0; i < (int)ARRAY_SIZE(udev->ucmd_hash); i++) {
-+ struct list_head *head = &udev->ucmd_hash[i];
-+ struct scst_user_cmd *ucmd;
-+ list_for_each_entry(ucmd, head, hash_list_entry) {
-+ ppos = pos;
-+ pos += scnprintf(&buf[pos],
-+ SCST_SYSFS_BLOCK_SIZE - pos,
-+ "ucmd %p (state %x, ref %d), "
-+ "sent_to_user %d, seen_by_user %d, "
-+ "aborted %d, jammed %d, scst_cmd %p\n",
-+ ucmd, ucmd->state,
-+ atomic_read(&ucmd->ucmd_ref),
-+ ucmd->sent_to_user, ucmd->seen_by_user,
-+ ucmd->aborted, ucmd->jammed, ucmd->cmd);
-+ if (pos >= SCST_SYSFS_BLOCK_SIZE-1) {
-+ ppos += scnprintf(&buf[ppos],
-+ SCST_SYSFS_BLOCK_SIZE - ppos, "...\n");
-+ pos = ppos;
-+ break;
-+ }
-+ }
-+ }
-+ spin_unlock_irqrestore(&udev->udev_cmd_threads.cmd_list_lock, flags);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static inline int test_cleanup_list(void)
-+{
-+ int res = !list_empty(&cleanup_list) ||
-+ unlikely(kthread_should_stop());
-+ return res;
-+}
-+
-+static int dev_user_cleanup_thread(void *arg)
-+{
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("Cleanup thread started, PID %d", current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+
-+ spin_lock(&cleanup_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (!test_cleanup_list()) {
-+ add_wait_queue_exclusive(&cleanup_list_waitQ, &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_cleanup_list())
-+ break;
-+ spin_unlock(&cleanup_lock);
-+ schedule();
-+ spin_lock(&cleanup_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&cleanup_list_waitQ, &wait);
-+ }
-+
-+ /*
-+ * We have to poll devices, because commands can go from SCST
-+ * core on cmd_list_waitQ and we have no practical way to
-+ * detect them.
-+ */
-+
-+ while (1) {
-+ struct scst_user_dev *dev;
-+ LIST_HEAD(cl_devs);
-+
-+ while (!list_empty(&cleanup_list)) {
-+ int rc;
-+
-+ dev = list_entry(cleanup_list.next,
-+ typeof(*dev), cleanup_list_entry);
-+ list_del(&dev->cleanup_list_entry);
-+
-+ spin_unlock(&cleanup_lock);
-+ rc = dev_user_process_cleanup(dev);
-+ spin_lock(&cleanup_lock);
-+
-+ if (rc != 0)
-+ list_add_tail(&dev->cleanup_list_entry,
-+ &cl_devs);
-+ }
-+
-+ if (list_empty(&cl_devs))
-+ break;
-+
-+ spin_unlock(&cleanup_lock);
-+ msleep(100);
-+ spin_lock(&cleanup_lock);
-+
-+ while (!list_empty(&cl_devs)) {
-+ dev = list_entry(cl_devs.next, typeof(*dev),
-+ cleanup_list_entry);
-+ list_move_tail(&dev->cleanup_list_entry,
-+ &cleanup_list);
-+ }
-+ }
-+ }
-+ spin_unlock(&cleanup_lock);
-+
-+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so cleanup_list must be empty.
-+ */
-+ BUG_ON(!list_empty(&cleanup_list));
-+
-+ PRINT_INFO("Cleanup thread PID %d finished", current->pid);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static int __init init_scst_user(void)
-+{
-+ int res = 0;
-+ struct max_get_reply {
-+ union {
-+ struct scst_user_get_cmd g;
-+ struct scst_user_reply_cmd r;
-+ };
-+ };
-+ struct device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ user_cmd_cachep = KMEM_CACHE(scst_user_cmd, SCST_SLAB_FLAGS);
-+ if (user_cmd_cachep == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ user_get_cmd_cachep = KMEM_CACHE(max_get_reply, SCST_SLAB_FLAGS);
-+ if (user_get_cmd_cachep == NULL) {
-+ res = -ENOMEM;
-+ goto out_cache;
-+ }
-+
-+ dev_user_devtype.module = THIS_MODULE;
-+
-+ res = scst_register_virtual_dev_driver(&dev_user_devtype);
-+ if (res < 0)
-+ goto out_cache1;
-+
-+ dev_user_sysfs_class = class_create(THIS_MODULE, DEV_USER_NAME);
-+ if (IS_ERR(dev_user_sysfs_class)) {
-+ PRINT_ERROR("%s", "Unable create sysfs class for SCST user "
-+ "space handler");
-+ res = PTR_ERR(dev_user_sysfs_class);
-+ goto out_unreg;
-+ }
-+
-+ dev_user_major = register_chrdev(0, DEV_USER_NAME, &dev_user_fops);
-+ if (dev_user_major < 0) {
-+ PRINT_ERROR("register_chrdev() failed: %d", res);
-+ res = dev_user_major;
-+ goto out_class;
-+ }
-+
-+ dev = device_create(dev_user_sysfs_class, NULL,
-+ MKDEV(dev_user_major, 0),
-+ NULL,
-+ DEV_USER_NAME);
-+ if (IS_ERR(dev)) {
-+ res = PTR_ERR(dev);
-+ goto out_chrdev;
-+ }
-+
-+ cleanup_thread = kthread_run(dev_user_cleanup_thread, NULL,
-+ "scst_usr_cleanupd");
-+ if (IS_ERR(cleanup_thread)) {
-+ res = PTR_ERR(cleanup_thread);
-+ PRINT_ERROR("kthread_create() failed: %d", res);
-+ goto out_dev;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_dev:
-+ device_destroy(dev_user_sysfs_class, MKDEV(dev_user_major, 0));
-+
-+out_chrdev:
-+ unregister_chrdev(dev_user_major, DEV_USER_NAME);
-+
-+out_class:
-+ class_destroy(dev_user_sysfs_class);
-+
-+out_unreg:
-+ scst_unregister_dev_driver(&dev_user_devtype);
-+
-+out_cache1:
-+ kmem_cache_destroy(user_get_cmd_cachep);
-+
-+out_cache:
-+ kmem_cache_destroy(user_cmd_cachep);
-+ goto out;
-+}
-+
-+static void __exit exit_scst_user(void)
-+{
-+ int rc;
-+
-+ TRACE_ENTRY();
-+
-+ rc = kthread_stop(cleanup_thread);
-+ if (rc < 0)
-+ TRACE_MGMT_DBG("kthread_stop() failed: %d", rc);
-+
-+ unregister_chrdev(dev_user_major, DEV_USER_NAME);
-+ device_destroy(dev_user_sysfs_class, MKDEV(dev_user_major, 0));
-+ class_destroy(dev_user_sysfs_class);
-+
-+ scst_unregister_virtual_dev_driver(&dev_user_devtype);
-+
-+ kmem_cache_destroy(user_get_cmd_cachep);
-+ kmem_cache_destroy(user_cmd_cachep);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(init_scst_user);
-+module_exit(exit_scst_user);
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("User space device handler for SCST");
-+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/scst_vdisk.c linux-3.2/drivers/scst/dev_handlers/scst_vdisk.c
---- orig/linux-3.2/drivers/scst/dev_handlers/scst_vdisk.c
-+++ linux-3.2/drivers/scst/dev_handlers/scst_vdisk.c
-@@ -0,0 +1,4529 @@
-+/*
-+ * scst_vdisk.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 Ming Zhang <blackmagic02881 at gmail dot com>
-+ * Copyright (C) 2007 Ross Walker <rswwalker at hotmail dot com>
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * SCSI disk (type 0) and CDROM (type 5) dev handler using files
-+ * on file systems or block devices (VDISK)
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/unistd.h>
-+#include <linux/spinlock.h>
-+#include <linux/init.h>
-+#include <linux/uio.h>
-+#include <linux/list.h>
-+#include <linux/ctype.h>
-+#include <linux/writeback.h>
-+#include <linux/vmalloc.h>
-+#include <asm/atomic.h>
-+#include <linux/kthread.h>
-+#include <linux/sched.h>
-+#include <linux/delay.h>
-+#include <asm/div64.h>
-+#include <asm/unaligned.h>
-+#include <linux/slab.h>
-+#include <linux/bio.h>
-+#include <linux/crc32c.h>
-+
-+#define LOG_PREFIX "dev_vdisk"
-+
-+#include <scst/scst.h>
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+#define TRACE_ORDER 0x80000000
-+
-+static struct scst_trace_log vdisk_local_trace_tbl[] = {
-+ { TRACE_ORDER, "order" },
-+ { 0, NULL }
-+};
-+#define trace_log_tbl vdisk_local_trace_tbl
-+
-+#define VDISK_TRACE_TBL_HELP ", order"
-+
-+#endif
-+
-+#include "scst_dev_handler.h"
-+
-+/* 8 byte ASCII Vendor */
-+#define SCST_FIO_VENDOR "SCST_FIO"
-+#define SCST_BIO_VENDOR "SCST_BIO"
-+/* 4 byte ASCII Product Revision Level - left aligned */
-+#define SCST_FIO_REV " 220"
-+
-+#define MAX_USN_LEN (20+1) /* For '\0' */
-+
-+#define INQ_BUF_SZ 256
-+#define EVPD 0x01
-+#define CMDDT 0x02
-+
-+#define MSENSE_BUF_SZ 256
-+#define DBD 0x08 /* disable block descriptor */
-+#define WP 0x80 /* write protect */
-+#define DPOFUA 0x10 /* DPOFUA bit */
-+#define WCE 0x04 /* write cache enable */
-+
-+#define PF 0x10 /* page format */
-+#define SP 0x01 /* save pages */
-+#define PS 0x80 /* parameter saveable */
-+
-+#define BYTE 8
-+#define DEF_DISK_BLOCKSIZE_SHIFT 9
-+#define DEF_DISK_BLOCKSIZE (1 << DEF_DISK_BLOCKSIZE_SHIFT)
-+#define DEF_CDROM_BLOCKSIZE_SHIFT 11
-+#define DEF_CDROM_BLOCKSIZE (1 << DEF_CDROM_BLOCKSIZE_SHIFT)
-+#define DEF_SECTORS 56
-+#define DEF_HEADS 255
-+#define LEN_MEM (32 * 1024)
-+#define DEF_RD_ONLY 0
-+#define DEF_WRITE_THROUGH 0
-+#define DEF_NV_CACHE 0
-+#define DEF_O_DIRECT 0
-+#define DEF_REMOVABLE 0
-+#define DEF_THIN_PROVISIONED 0
-+
-+#define VDISK_NULLIO_SIZE (3LL*1024*1024*1024*1024/2)
-+
-+#define DEF_TST SCST_CONTR_MODE_SEP_TASK_SETS
-+
-+/*
-+ * Since we can't control backstorage device's reordering, we have to always
-+ * report unrestricted reordering.
-+ */
-+#define DEF_QUEUE_ALG_WT SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER
-+#define DEF_QUEUE_ALG SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER
-+#define DEF_SWP 0
-+#define DEF_TAS 0
-+
-+#define DEF_DSENSE SCST_CONTR_MODE_FIXED_SENSE
-+
-+struct scst_vdisk_dev {
-+ uint32_t block_size;
-+ uint64_t nblocks;
-+ int block_shift;
-+ loff_t file_size; /* in bytes */
-+
-+ /*
-+ * This lock can be taken on both SIRQ and thread context, but in
-+ * all cases for each particular instance it's taken consistenly either
-+ * on SIRQ or thread context. Mix of them is forbidden.
-+ */
-+ spinlock_t flags_lock;
-+
-+ /*
-+ * Below flags are protected by flags_lock or suspended activity
-+ * with scst_vdisk_mutex.
-+ */
-+ unsigned int rd_only:1;
-+ unsigned int wt_flag:1;
-+ unsigned int nv_cache:1;
-+ unsigned int o_direct_flag:1;
-+ unsigned int media_changed:1;
-+ unsigned int prevent_allow_medium_removal:1;
-+ unsigned int nullio:1;
-+ unsigned int blockio:1;
-+ unsigned int cdrom_empty:1;
-+ unsigned int removable:1;
-+ unsigned int thin_provisioned:1;
-+
-+ int virt_id;
-+ char name[16+1]; /* Name of the virtual device,
-+ must be <= SCSI Model + 1 */
-+ char *filename; /* File name, protected by
-+ scst_mutex and suspended activities */
-+ uint16_t command_set_version;
-+
-+ /* All 4 protected by vdisk_serial_rwlock */
-+ unsigned int t10_dev_id_set:1; /* true if t10_dev_id manually set */
-+ unsigned int usn_set:1; /* true if usn manually set */
-+ char t10_dev_id[16+8+2]; /* T10 device ID */
-+ char usn[MAX_USN_LEN];
-+
-+ struct scst_device *dev;
-+ struct list_head vdev_list_entry;
-+
-+ struct scst_dev_type *vdev_devt;
-+};
-+
-+struct scst_vdisk_thr {
-+ struct scst_thr_data_hdr hdr;
-+ struct file *fd;
-+ struct block_device *bdev;
-+ struct iovec *iv;
-+ int iv_count;
-+};
-+
-+/* Context RA patch supposed to be applied on the kernel */
-+#define DEF_NUM_THREADS 8
-+static int num_threads = DEF_NUM_THREADS;
-+
-+module_param_named(num_threads, num_threads, int, S_IRUGO);
-+MODULE_PARM_DESC(num_threads, "vdisk threads count");
-+
-+static int vdisk_attach(struct scst_device *dev);
-+static void vdisk_detach(struct scst_device *dev);
-+static int vdisk_attach_tgt(struct scst_tgt_dev *tgt_dev);
-+static void vdisk_detach_tgt(struct scst_tgt_dev *tgt_dev);
-+static int vdisk_parse(struct scst_cmd *);
-+static int vdisk_do_job(struct scst_cmd *cmd);
-+static int vcdrom_parse(struct scst_cmd *);
-+static int vcdrom_exec(struct scst_cmd *cmd);
-+static void vdisk_exec_read(struct scst_cmd *cmd,
-+ struct scst_vdisk_thr *thr, loff_t loff);
-+static void vdisk_exec_write(struct scst_cmd *cmd,
-+ struct scst_vdisk_thr *thr, loff_t loff);
-+static void blockio_exec_rw(struct scst_cmd *cmd, struct scst_vdisk_thr *thr,
-+ u64 lba_start, int write);
-+static int vdisk_blockio_flush(struct block_device *bdev, gfp_t gfp_mask,
-+ bool report_error);
-+static void vdisk_exec_verify(struct scst_cmd *cmd,
-+ struct scst_vdisk_thr *thr, loff_t loff);
-+static void vdisk_exec_read_capacity(struct scst_cmd *cmd);
-+static void vdisk_exec_read_capacity16(struct scst_cmd *cmd);
-+static void vdisk_exec_report_tpgs(struct scst_cmd *cmd);
-+static void vdisk_exec_inquiry(struct scst_cmd *cmd);
-+static void vdisk_exec_request_sense(struct scst_cmd *cmd);
-+static void vdisk_exec_mode_sense(struct scst_cmd *cmd);
-+static void vdisk_exec_mode_select(struct scst_cmd *cmd);
-+static void vdisk_exec_log(struct scst_cmd *cmd);
-+static void vdisk_exec_read_toc(struct scst_cmd *cmd);
-+static void vdisk_exec_prevent_allow_medium_removal(struct scst_cmd *cmd);
-+static void vdisk_exec_unmap(struct scst_cmd *cmd, struct scst_vdisk_thr *thr);
-+static int vdisk_fsync(struct scst_vdisk_thr *thr, loff_t loff,
-+ loff_t len, struct scst_cmd *cmd, struct scst_device *dev);
-+static ssize_t vdisk_add_fileio_device(const char *device_name, char *params);
-+static ssize_t vdisk_add_blockio_device(const char *device_name, char *params);
-+static ssize_t vdisk_add_nullio_device(const char *device_name, char *params);
-+static ssize_t vdisk_del_device(const char *device_name);
-+static ssize_t vcdrom_add_device(const char *device_name, char *params);
-+static ssize_t vcdrom_del_device(const char *device_name);
-+static int vdisk_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
-+ struct scst_tgt_dev *tgt_dev);
-+static uint64_t vdisk_gen_dev_id_num(const char *virt_dev_name);
-+
-+/** SYSFS **/
-+
-+static ssize_t vdev_sysfs_size_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+static ssize_t vdisk_sysfs_blocksize_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+static ssize_t vdisk_sysfs_rd_only_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+static ssize_t vdisk_sysfs_wt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+static ssize_t vdisk_sysfs_tp_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+static ssize_t vdisk_sysfs_nv_cache_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+static ssize_t vdisk_sysfs_o_direct_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+static ssize_t vdisk_sysfs_removable_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+static ssize_t vdev_sysfs_filename_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+static ssize_t vdisk_sysfs_resync_size_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count);
-+static ssize_t vdev_sysfs_t10_dev_id_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count);
-+static ssize_t vdev_sysfs_t10_dev_id_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+static ssize_t vdev_sysfs_usn_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count);
-+static ssize_t vdev_sysfs_usn_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+
-+static ssize_t vcdrom_sysfs_filename_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count);
-+
-+static struct kobj_attribute vdev_size_attr =
-+ __ATTR(size_mb, S_IRUGO, vdev_sysfs_size_show, NULL);
-+static struct kobj_attribute vdisk_blocksize_attr =
-+ __ATTR(blocksize, S_IRUGO, vdisk_sysfs_blocksize_show, NULL);
-+static struct kobj_attribute vdisk_rd_only_attr =
-+ __ATTR(read_only, S_IRUGO, vdisk_sysfs_rd_only_show, NULL);
-+static struct kobj_attribute vdisk_wt_attr =
-+ __ATTR(write_through, S_IRUGO, vdisk_sysfs_wt_show, NULL);
-+static struct kobj_attribute vdisk_tp_attr =
-+ __ATTR(thin_provisioned, S_IRUGO, vdisk_sysfs_tp_show, NULL);
-+static struct kobj_attribute vdisk_nv_cache_attr =
-+ __ATTR(nv_cache, S_IRUGO, vdisk_sysfs_nv_cache_show, NULL);
-+static struct kobj_attribute vdisk_o_direct_attr =
-+ __ATTR(o_direct, S_IRUGO, vdisk_sysfs_o_direct_show, NULL);
-+static struct kobj_attribute vdisk_removable_attr =
-+ __ATTR(removable, S_IRUGO, vdisk_sysfs_removable_show, NULL);
-+static struct kobj_attribute vdisk_filename_attr =
-+ __ATTR(filename, S_IRUGO, vdev_sysfs_filename_show, NULL);
-+static struct kobj_attribute vdisk_resync_size_attr =
-+ __ATTR(resync_size, S_IWUSR, NULL, vdisk_sysfs_resync_size_store);
-+static struct kobj_attribute vdev_t10_dev_id_attr =
-+ __ATTR(t10_dev_id, S_IWUSR|S_IRUGO, vdev_sysfs_t10_dev_id_show,
-+ vdev_sysfs_t10_dev_id_store);
-+static struct kobj_attribute vdev_usn_attr =
-+ __ATTR(usn, S_IWUSR|S_IRUGO, vdev_sysfs_usn_show, vdev_sysfs_usn_store);
-+
-+static struct kobj_attribute vcdrom_filename_attr =
-+ __ATTR(filename, S_IRUGO|S_IWUSR, vdev_sysfs_filename_show,
-+ vcdrom_sysfs_filename_store);
-+
-+static const struct attribute *vdisk_fileio_attrs[] = {
-+ &vdev_size_attr.attr,
-+ &vdisk_blocksize_attr.attr,
-+ &vdisk_rd_only_attr.attr,
-+ &vdisk_wt_attr.attr,
-+ &vdisk_tp_attr.attr,
-+ &vdisk_nv_cache_attr.attr,
-+ &vdisk_o_direct_attr.attr,
-+ &vdisk_removable_attr.attr,
-+ &vdisk_filename_attr.attr,
-+ &vdisk_resync_size_attr.attr,
-+ &vdev_t10_dev_id_attr.attr,
-+ &vdev_usn_attr.attr,
-+ NULL,
-+};
-+
-+static const struct attribute *vdisk_blockio_attrs[] = {
-+ &vdev_size_attr.attr,
-+ &vdisk_blocksize_attr.attr,
-+ &vdisk_rd_only_attr.attr,
-+ &vdisk_nv_cache_attr.attr,
-+ &vdisk_removable_attr.attr,
-+ &vdisk_filename_attr.attr,
-+ &vdisk_resync_size_attr.attr,
-+ &vdev_t10_dev_id_attr.attr,
-+ &vdev_usn_attr.attr,
-+ &vdisk_tp_attr.attr,
-+ NULL,
-+};
-+
-+static const struct attribute *vdisk_nullio_attrs[] = {
-+ &vdev_size_attr.attr,
-+ &vdisk_blocksize_attr.attr,
-+ &vdisk_rd_only_attr.attr,
-+ &vdisk_removable_attr.attr,
-+ &vdev_t10_dev_id_attr.attr,
-+ &vdev_usn_attr.attr,
-+ NULL,
-+};
-+
-+static const struct attribute *vcdrom_attrs[] = {
-+ &vdev_size_attr.attr,
-+ &vcdrom_filename_attr.attr,
-+ &vdev_t10_dev_id_attr.attr,
-+ &vdev_usn_attr.attr,
-+ NULL,
-+};
-+
-+/* Protects vdisks addition/deletion and related activities, like search */
-+static DEFINE_MUTEX(scst_vdisk_mutex);
-+
-+/* Protects devices t10_dev_id and usn */
-+static DEFINE_RWLOCK(vdisk_serial_rwlock);
-+
-+/* Protected by scst_vdisk_mutex */
-+static LIST_HEAD(vdev_list);
-+
-+static struct kmem_cache *vdisk_thr_cachep;
-+
-+/*
-+ * Be careful changing "name" field, since it is the name of the corresponding
-+ * /sys/kernel/scst_tgt entry, hence a part of user space ABI.
-+ */
-+
-+static struct scst_dev_type vdisk_file_devtype = {
-+ .name = "vdisk_fileio",
-+ .type = TYPE_DISK,
-+ .exec_sync = 1,
-+ .threads_num = -1,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = vdisk_attach,
-+ .detach = vdisk_detach,
-+ .attach_tgt = vdisk_attach_tgt,
-+ .detach_tgt = vdisk_detach_tgt,
-+ .parse = vdisk_parse,
-+ .exec = vdisk_do_job,
-+ .task_mgmt_fn = vdisk_task_mgmt_fn,
-+ .add_device = vdisk_add_fileio_device,
-+ .del_device = vdisk_del_device,
-+ .dev_attrs = vdisk_fileio_attrs,
-+ .add_device_parameters = "filename, blocksize, write_through, "
-+ "nv_cache, o_direct, read_only, removable, thin_provisioned",
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+ .trace_tbl = vdisk_local_trace_tbl,
-+ .trace_tbl_help = VDISK_TRACE_TBL_HELP,
-+#endif
-+};
-+
-+static struct kmem_cache *blockio_work_cachep;
-+
-+static struct scst_dev_type vdisk_blk_devtype = {
-+ .name = "vdisk_blockio",
-+ .type = TYPE_DISK,
-+ .threads_num = 1,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = vdisk_attach,
-+ .detach = vdisk_detach,
-+ .attach_tgt = vdisk_attach_tgt,
-+ .detach_tgt = vdisk_detach_tgt,
-+ .parse = vdisk_parse,
-+ .exec = vdisk_do_job,
-+ .task_mgmt_fn = vdisk_task_mgmt_fn,
-+ .add_device = vdisk_add_blockio_device,
-+ .del_device = vdisk_del_device,
-+ .dev_attrs = vdisk_blockio_attrs,
-+ .add_device_parameters = "filename, blocksize, nv_cache, read_only, "
-+ "removable, thin_provisioned",
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+ .trace_tbl = vdisk_local_trace_tbl,
-+ .trace_tbl_help = VDISK_TRACE_TBL_HELP,
-+#endif
-+};
-+
-+static struct scst_dev_type vdisk_null_devtype = {
-+ .name = "vdisk_nullio",
-+ .type = TYPE_DISK,
-+ .threads_num = 0,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = vdisk_attach,
-+ .detach = vdisk_detach,
-+ .attach_tgt = vdisk_attach_tgt,
-+ .detach_tgt = vdisk_detach_tgt,
-+ .parse = vdisk_parse,
-+ .exec = vdisk_do_job,
-+ .task_mgmt_fn = vdisk_task_mgmt_fn,
-+ .add_device = vdisk_add_nullio_device,
-+ .del_device = vdisk_del_device,
-+ .dev_attrs = vdisk_nullio_attrs,
-+ .add_device_parameters = "blocksize, read_only, removable",
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+ .trace_tbl = vdisk_local_trace_tbl,
-+ .trace_tbl_help = VDISK_TRACE_TBL_HELP,
-+#endif
-+};
-+
-+static struct scst_dev_type vcdrom_devtype = {
-+ .name = "vcdrom",
-+ .type = TYPE_ROM,
-+ .exec_sync = 1,
-+ .threads_num = -1,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = vdisk_attach,
-+ .detach = vdisk_detach,
-+ .attach_tgt = vdisk_attach_tgt,
-+ .detach_tgt = vdisk_detach_tgt,
-+ .parse = vcdrom_parse,
-+ .exec = vcdrom_exec,
-+ .task_mgmt_fn = vdisk_task_mgmt_fn,
-+ .add_device = vcdrom_add_device,
-+ .del_device = vcdrom_del_device,
-+ .dev_attrs = vcdrom_attrs,
-+ .add_device_parameters = NULL,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+ .trace_tbl = vdisk_local_trace_tbl,
-+ .trace_tbl_help = VDISK_TRACE_TBL_HELP,
-+#endif
-+};
-+
-+static struct scst_vdisk_thr nullio_thr_data;
-+
-+static const char *vdev_get_filename(const struct scst_vdisk_dev *virt_dev)
-+{
-+ if (virt_dev->filename != NULL)
-+ return virt_dev->filename;
-+ else
-+ return "none";
-+}
-+
-+/* Returns fd, use IS_ERR(fd) to get error status */
-+static struct file *vdev_open_fd(const struct scst_vdisk_dev *virt_dev)
-+{
-+ int open_flags = 0;
-+ struct file *fd;
-+
-+ TRACE_ENTRY();
-+
-+ if (virt_dev->dev->rd_only)
-+ open_flags |= O_RDONLY;
-+ else
-+ open_flags |= O_RDWR;
-+ if (virt_dev->o_direct_flag)
-+ open_flags |= O_DIRECT;
-+ if (virt_dev->wt_flag && !virt_dev->nv_cache)
-+ open_flags |= O_SYNC;
-+ TRACE_DBG("Opening file %s, flags 0x%x",
-+ virt_dev->filename, open_flags);
-+ fd = filp_open(virt_dev->filename, O_LARGEFILE | open_flags, 0600);
-+
-+ TRACE_EXIT();
-+ return fd;
-+}
-+
-+static void vdisk_blockio_check_flush_support(struct scst_vdisk_dev *virt_dev)
-+{
-+ struct inode *inode;
-+ struct file *fd;
-+
-+ TRACE_ENTRY();
-+
-+ if (!virt_dev->blockio || virt_dev->rd_only || virt_dev->nv_cache)
-+ goto out;
-+
-+ fd = filp_open(virt_dev->filename, O_LARGEFILE, 0600);
-+ if (IS_ERR(fd)) {
-+ PRINT_ERROR("filp_open(%s) returned error %ld",
-+ virt_dev->filename, PTR_ERR(fd));
-+ goto out;
-+ }
-+
-+ inode = fd->f_dentry->d_inode;
-+
-+ if (!S_ISBLK(inode->i_mode)) {
-+ PRINT_ERROR("%s is NOT a block device", virt_dev->filename);
-+ goto out_close;
-+ }
-+
-+ if (vdisk_blockio_flush(inode->i_bdev, GFP_KERNEL, false) != 0) {
-+ PRINT_WARNING("Device %s doesn't support barriers, switching "
-+ "to NV_CACHE mode. Read README for more details.",
-+ virt_dev->filename);
-+ virt_dev->nv_cache = 1;
-+ }
-+
-+out_close:
-+ filp_close(fd, NULL);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void vdisk_check_tp_support(struct scst_vdisk_dev *virt_dev)
-+{
-+ struct inode *inode;
-+ struct file *fd;
-+ bool supported = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (virt_dev->rd_only || !virt_dev->thin_provisioned)
-+ goto out;
-+
-+ fd = filp_open(virt_dev->filename, O_LARGEFILE, 0600);
-+ if (IS_ERR(fd)) {
-+ PRINT_ERROR("filp_open(%s) returned error %ld",
-+ virt_dev->filename, PTR_ERR(fd));
-+ goto out;
-+ }
-+
-+ inode = fd->f_dentry->d_inode;
-+
-+ if (virt_dev->blockio) {
-+ if (!S_ISBLK(inode->i_mode)) {
-+ PRINT_ERROR("%s is NOT a block device",
-+ virt_dev->filename);
-+ goto out_close;
-+ }
-+ supported = blk_queue_discard(bdev_get_queue(inode->i_bdev));
-+
-+ } else {
-+ /*
-+ * truncate_range() was chosen rather as a sample. In future,
-+ * when unmap of range of blocks in file become standard, we
-+ * will just switch to the new call.
-+ */
-+ supported = (inode->i_op->truncate_range != NULL);
-+ }
-+
-+ if (!supported) {
-+ PRINT_WARNING("Device %s doesn't support thin "
-+ "provisioning, disabling it.",
-+ virt_dev->filename);
-+ virt_dev->thin_provisioned = 0;
-+ }
-+
-+out_close:
-+ filp_close(fd, NULL);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Returns 0 on success and file size in *file_size, error code otherwise */
-+static int vdisk_get_file_size(const char *filename, bool blockio,
-+ loff_t *file_size)
-+{
-+ struct inode *inode;
-+ int res = 0;
-+ struct file *fd;
-+
-+ TRACE_ENTRY();
-+
-+ *file_size = 0;
-+
-+ fd = filp_open(filename, O_LARGEFILE | O_RDONLY, 0600);
-+ if (IS_ERR(fd)) {
-+ res = PTR_ERR(fd);
-+ PRINT_ERROR("filp_open(%s) returned error %d", filename, res);
-+ goto out;
-+ }
-+
-+ inode = fd->f_dentry->d_inode;
-+
-+ if (blockio && !S_ISBLK(inode->i_mode)) {
-+ PRINT_ERROR("File %s is NOT a block device", filename);
-+ res = -EINVAL;
-+ goto out_close;
-+ }
-+
-+ if (S_ISREG(inode->i_mode))
-+ /* Nothing to do */;
-+ else if (S_ISBLK(inode->i_mode))
-+ inode = inode->i_bdev->bd_inode;
-+ else {
-+ res = -EINVAL;
-+ goto out_close;
-+ }
-+
-+ *file_size = inode->i_size;
-+
-+out_close:
-+ filp_close(fd, NULL);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* scst_vdisk_mutex supposed to be held */
-+static struct scst_vdisk_dev *vdev_find(const char *name)
-+{
-+ struct scst_vdisk_dev *res, *vv;
-+
-+ TRACE_ENTRY();
-+
-+ res = NULL;
-+ list_for_each_entry(vv, &vdev_list, vdev_list_entry) {
-+ if (strcmp(vv->name, name) == 0) {
-+ res = vv;
-+ break;
-+ }
-+ }
-+
-+ TRACE_EXIT_HRES((unsigned long)res);
-+ return res;
-+}
-+
-+static int vdisk_attach(struct scst_device *dev)
-+{
-+ int res = 0;
-+ loff_t err;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("virt_id %d (%s)", dev->virt_id, dev->virt_name);
-+
-+ if (dev->virt_id == 0) {
-+ PRINT_ERROR("%s", "Not a virtual device");
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * scst_vdisk_mutex must be already taken before
-+ * scst_register_virtual_device()
-+ */
-+ virt_dev = vdev_find(dev->virt_name);
-+ if (virt_dev == NULL) {
-+ PRINT_ERROR("Device %s not found", dev->virt_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ virt_dev->dev = dev;
-+
-+ dev->rd_only = virt_dev->rd_only;
-+
-+ if (!virt_dev->cdrom_empty) {
-+ if (virt_dev->nullio)
-+ err = VDISK_NULLIO_SIZE;
-+ else {
-+ res = vdisk_get_file_size(virt_dev->filename,
-+ virt_dev->blockio, &err);
-+ if (res != 0)
-+ goto out;
-+ }
-+ virt_dev->file_size = err;
-+
-+ TRACE_DBG("size of file: %lld", (long long unsigned int)err);
-+
-+ vdisk_blockio_check_flush_support(virt_dev);
-+ vdisk_check_tp_support(virt_dev);
-+ } else
-+ virt_dev->file_size = 0;
-+
-+ virt_dev->nblocks = virt_dev->file_size >> virt_dev->block_shift;
-+
-+ if (!virt_dev->cdrom_empty) {
-+ PRINT_INFO("Attached SCSI target virtual %s %s "
-+ "(file=\"%s\", fs=%lldMB, bs=%d, nblocks=%lld,"
-+ " cyln=%lld%s)",
-+ (dev->type == TYPE_DISK) ? "disk" : "cdrom",
-+ virt_dev->name, vdev_get_filename(virt_dev),
-+ virt_dev->file_size >> 20, virt_dev->block_size,
-+ (long long unsigned int)virt_dev->nblocks,
-+ (long long unsigned int)virt_dev->nblocks/64/32,
-+ virt_dev->nblocks < 64*32
-+ ? " !WARNING! cyln less than 1" : "");
-+ } else {
-+ PRINT_INFO("Attached empty SCSI target virtual cdrom %s",
-+ virt_dev->name);
-+ }
-+
-+ dev->dh_priv = virt_dev;
-+
-+ dev->tst = DEF_TST;
-+ dev->d_sense = DEF_DSENSE;
-+ if (virt_dev->wt_flag && !virt_dev->nv_cache)
-+ dev->queue_alg = DEF_QUEUE_ALG_WT;
-+ else
-+ dev->queue_alg = DEF_QUEUE_ALG;
-+ dev->swp = DEF_SWP;
-+ dev->tas = DEF_TAS;
-+
-+out:
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+/* scst_mutex supposed to be held */
-+static void vdisk_detach(struct scst_device *dev)
-+{
-+ struct scst_vdisk_dev *virt_dev = dev->dh_priv;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("virt_id %d", dev->virt_id);
-+
-+ PRINT_INFO("Detached virtual device %s (\"%s\")",
-+ virt_dev->name, vdev_get_filename(virt_dev));
-+
-+ /* virt_dev will be freed by the caller */
-+ dev->dh_priv = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void vdisk_free_thr_data(struct scst_thr_data_hdr *d)
-+{
-+ struct scst_vdisk_thr *thr =
-+ container_of(d, struct scst_vdisk_thr, hdr);
-+
-+ TRACE_ENTRY();
-+
-+ if (thr->fd)
-+ filp_close(thr->fd, NULL);
-+
-+ kfree(thr->iv);
-+
-+ kmem_cache_free(vdisk_thr_cachep, thr);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct scst_vdisk_thr *vdisk_init_thr_data(
-+ struct scst_tgt_dev *tgt_dev, gfp_t gfp_mask)
-+{
-+ struct scst_vdisk_thr *res;
-+ struct scst_vdisk_dev *virt_dev = tgt_dev->dev->dh_priv;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(virt_dev->nullio);
-+
-+ res = kmem_cache_zalloc(vdisk_thr_cachep, gfp_mask);
-+ if (res == NULL) {
-+ PRINT_ERROR("Unable to allocate struct scst_vdisk_thr"
-+ " (size %zd)", sizeof(*res));
-+ goto out;
-+ }
-+
-+ if (!virt_dev->cdrom_empty) {
-+ res->fd = vdev_open_fd(virt_dev);
-+ if (IS_ERR(res->fd)) {
-+ PRINT_ERROR("filp_open(%s) returned an error %ld",
-+ virt_dev->filename, PTR_ERR(res->fd));
-+ goto out_free;
-+ }
-+ if (virt_dev->blockio)
-+ res->bdev = res->fd->f_dentry->d_inode->i_bdev;
-+ else
-+ res->bdev = NULL;
-+ } else
-+ res->fd = NULL;
-+
-+ scst_add_thr_data(tgt_dev, &res->hdr, vdisk_free_thr_data);
-+
-+out:
-+ TRACE_EXIT_HRES((unsigned long)res);
-+ return res;
-+
-+out_free:
-+ kmem_cache_free(vdisk_thr_cachep, res);
-+ res = NULL;
-+ goto out;
-+}
-+
-+static int vdisk_attach_tgt(struct scst_tgt_dev *tgt_dev)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /* Nothing to do */
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void vdisk_detach_tgt(struct scst_tgt_dev *tgt_dev)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_del_all_thr_data(tgt_dev);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int vdisk_do_job(struct scst_cmd *cmd)
-+{
-+ int rc, res;
-+ uint64_t lba_start = 0;
-+ loff_t data_len = 0;
-+ uint8_t *cdb = cmd->cdb;
-+ int opcode = cdb[0];
-+ loff_t loff;
-+ struct scst_device *dev = cmd->dev;
-+ struct scst_tgt_dev *tgt_dev = cmd->tgt_dev;
-+ struct scst_vdisk_dev *virt_dev = dev->dh_priv;
-+ struct scst_thr_data_hdr *d;
-+ struct scst_vdisk_thr *thr = NULL;
-+ int fua = 0;
-+
-+ TRACE_ENTRY();
-+
-+ switch (cmd->queue_type) {
-+ case SCST_CMD_QUEUE_ORDERED:
-+ TRACE(TRACE_ORDER, "ORDERED cmd %p (op %x)", cmd, cmd->cdb[0]);
-+ break;
-+ case SCST_CMD_QUEUE_HEAD_OF_QUEUE:
-+ TRACE(TRACE_ORDER, "HQ cmd %p (op %x)", cmd, cmd->cdb[0]);
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+
-+ if (!virt_dev->nullio) {
-+ d = scst_find_thr_data(tgt_dev);
-+ if (unlikely(d == NULL)) {
-+ thr = vdisk_init_thr_data(tgt_dev,
-+ cmd->noio_mem_alloc ? GFP_NOIO : GFP_KERNEL);
-+ if (thr == NULL) {
-+ scst_set_busy(cmd);
-+ goto out_compl;
-+ }
-+ scst_thr_data_get(&thr->hdr);
-+ } else
-+ thr = container_of(d, struct scst_vdisk_thr, hdr);
-+ } else {
-+ thr = &nullio_thr_data;
-+ scst_thr_data_get(&thr->hdr);
-+ }
-+
-+ switch (opcode) {
-+ case READ_6:
-+ case WRITE_6:
-+ case VERIFY_6:
-+ lba_start = (((cdb[1] & 0x1f) << (BYTE * 2)) +
-+ (cdb[2] << (BYTE * 1)) +
-+ (cdb[3] << (BYTE * 0)));
-+ data_len = cmd->bufflen;
-+ break;
-+ case READ_10:
-+ case READ_12:
-+ case WRITE_10:
-+ case WRITE_12:
-+ case VERIFY:
-+ case WRITE_VERIFY:
-+ case WRITE_VERIFY_12:
-+ case VERIFY_12:
-+ lba_start |= ((u64)cdb[2]) << 24;
-+ lba_start |= ((u64)cdb[3]) << 16;
-+ lba_start |= ((u64)cdb[4]) << 8;
-+ lba_start |= ((u64)cdb[5]);
-+ data_len = cmd->bufflen;
-+ break;
-+ case READ_16:
-+ case WRITE_16:
-+ case WRITE_VERIFY_16:
-+ case VERIFY_16:
-+ lba_start |= ((u64)cdb[2]) << 56;
-+ lba_start |= ((u64)cdb[3]) << 48;
-+ lba_start |= ((u64)cdb[4]) << 40;
-+ lba_start |= ((u64)cdb[5]) << 32;
-+ lba_start |= ((u64)cdb[6]) << 24;
-+ lba_start |= ((u64)cdb[7]) << 16;
-+ lba_start |= ((u64)cdb[8]) << 8;
-+ lba_start |= ((u64)cdb[9]);
-+ data_len = cmd->bufflen;
-+ break;
-+ case SYNCHRONIZE_CACHE:
-+ lba_start |= ((u64)cdb[2]) << 24;
-+ lba_start |= ((u64)cdb[3]) << 16;
-+ lba_start |= ((u64)cdb[4]) << 8;
-+ lba_start |= ((u64)cdb[5]);
-+ data_len = ((cdb[7] << (BYTE * 1)) + (cdb[8] << (BYTE * 0)))
-+ << virt_dev->block_shift;
-+ if (data_len == 0)
-+ data_len = virt_dev->file_size -
-+ ((loff_t)lba_start << virt_dev->block_shift);
-+ break;
-+ }
-+
-+ loff = (loff_t)lba_start << virt_dev->block_shift;
-+ TRACE_DBG("cmd %p, lba_start %lld, loff %lld, data_len %lld", cmd,
-+ (long long unsigned int)lba_start,
-+ (long long unsigned int)loff,
-+ (long long unsigned int)data_len);
-+ if (unlikely(loff < 0) || unlikely(data_len < 0) ||
-+ unlikely((loff + data_len) > virt_dev->file_size)) {
-+ PRINT_INFO("Access beyond the end of the device "
-+ "(%lld of %lld, len %lld)",
-+ (long long unsigned int)loff,
-+ (long long unsigned int)virt_dev->file_size,
-+ (long long unsigned int)data_len);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_block_out_range_error));
-+ goto out_compl;
-+ }
-+
-+ switch (opcode) {
-+ case WRITE_10:
-+ case WRITE_12:
-+ case WRITE_16:
-+ fua = (cdb[1] & 0x8);
-+ if (fua) {
-+ TRACE(TRACE_ORDER, "FUA: loff=%lld, "
-+ "data_len=%lld", (long long unsigned int)loff,
-+ (long long unsigned int)data_len);
-+ }
-+ break;
-+ }
-+
-+ switch (opcode) {
-+ case READ_6:
-+ case READ_10:
-+ case READ_12:
-+ case READ_16:
-+ if (virt_dev->blockio) {
-+ blockio_exec_rw(cmd, thr, lba_start, 0);
-+ goto out_thr;
-+ } else
-+ vdisk_exec_read(cmd, thr, loff);
-+ break;
-+ case WRITE_6:
-+ case WRITE_10:
-+ case WRITE_12:
-+ case WRITE_16:
-+ {
-+ if (virt_dev->blockio) {
-+ blockio_exec_rw(cmd, thr, lba_start, 1);
-+ goto out_thr;
-+ } else
-+ vdisk_exec_write(cmd, thr, loff);
-+ /* O_SYNC flag is used for WT devices */
-+ if (fua)
-+ vdisk_fsync(thr, loff, data_len, cmd, dev);
-+ break;
-+ }
-+ case WRITE_VERIFY:
-+ case WRITE_VERIFY_12:
-+ case WRITE_VERIFY_16:
-+ {
-+ /* ToDo: BLOCKIO VERIFY */
-+ vdisk_exec_write(cmd, thr, loff);
-+ /* O_SYNC flag is used for WT devices */
-+ if (scsi_status_is_good(cmd->status))
-+ vdisk_exec_verify(cmd, thr, loff);
-+ break;
-+ }
-+ case SYNCHRONIZE_CACHE:
-+ {
-+ int immed = cdb[1] & 0x2;
-+ TRACE(TRACE_ORDER, "SYNCHRONIZE_CACHE: "
-+ "loff=%lld, data_len=%lld, immed=%d",
-+ (long long unsigned int)loff,
-+ (long long unsigned int)data_len, immed);
-+ if (immed) {
-+ scst_cmd_get(cmd); /* to protect dev */
-+ cmd->completed = 1;
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT,
-+ SCST_CONTEXT_SAME);
-+ vdisk_fsync(thr, loff, data_len, NULL, dev);
-+ /* ToDo: vdisk_fsync() error processing */
-+ scst_cmd_put(cmd);
-+ goto out_thr;
-+ } else {
-+ vdisk_fsync(thr, loff, data_len, cmd, dev);
-+ break;
-+ }
-+ }
-+ case VERIFY_6:
-+ case VERIFY:
-+ case VERIFY_12:
-+ case VERIFY_16:
-+ vdisk_exec_verify(cmd, thr, loff);
-+ break;
-+ case MODE_SENSE:
-+ case MODE_SENSE_10:
-+ vdisk_exec_mode_sense(cmd);
-+ break;
-+ case MODE_SELECT:
-+ case MODE_SELECT_10:
-+ vdisk_exec_mode_select(cmd);
-+ break;
-+ case LOG_SELECT:
-+ case LOG_SENSE:
-+ vdisk_exec_log(cmd);
-+ break;
-+ case ALLOW_MEDIUM_REMOVAL:
-+ vdisk_exec_prevent_allow_medium_removal(cmd);
-+ break;
-+ case READ_TOC:
-+ vdisk_exec_read_toc(cmd);
-+ break;
-+ case START_STOP:
-+ vdisk_fsync(thr, 0, virt_dev->file_size, cmd, dev);
-+ break;
-+ case RESERVE:
-+ case RESERVE_10:
-+ case RELEASE:
-+ case RELEASE_10:
-+ case TEST_UNIT_READY:
-+ break;
-+ case INQUIRY:
-+ vdisk_exec_inquiry(cmd);
-+ break;
-+ case REQUEST_SENSE:
-+ vdisk_exec_request_sense(cmd);
-+ break;
-+ case READ_CAPACITY:
-+ vdisk_exec_read_capacity(cmd);
-+ break;
-+ case SERVICE_ACTION_IN:
-+ if ((cmd->cdb[1] & 0x1f) == SAI_READ_CAPACITY_16) {
-+ vdisk_exec_read_capacity16(cmd);
-+ break;
-+ }
-+ goto out_invalid_opcode;
-+ case UNMAP:
-+ vdisk_exec_unmap(cmd, thr);
-+ break;
-+ case MAINTENANCE_IN:
-+ switch (cmd->cdb[1] & 0x1f) {
-+ case MI_REPORT_TARGET_PGS:
-+ vdisk_exec_report_tpgs(cmd);
-+ break;
-+ default:
-+ goto out_invalid_opcode;
-+ }
-+ break;
-+ case REPORT_LUNS:
-+ default:
-+ goto out_invalid_opcode;
-+ }
-+
-+out_compl:
-+ cmd->completed = 1;
-+
-+out_done:
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+
-+out_thr:
-+ if (likely(thr != NULL))
-+ scst_thr_data_put(&thr->hdr);
-+
-+ res = SCST_EXEC_COMPLETED;
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_invalid_opcode:
-+ TRACE_DBG("Invalid opcode %d", opcode);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out_compl;
-+}
-+
-+static int vdisk_get_block_shift(struct scst_cmd *cmd)
-+{
-+ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
-+ return virt_dev->block_shift;
-+}
-+
-+static int vdisk_parse(struct scst_cmd *cmd)
-+{
-+ scst_sbc_generic_parse(cmd, vdisk_get_block_shift);
-+ return SCST_CMD_STATE_DEFAULT;
-+}
-+
-+static int vcdrom_parse(struct scst_cmd *cmd)
-+{
-+ scst_cdrom_generic_parse(cmd, vdisk_get_block_shift);
-+ return SCST_CMD_STATE_DEFAULT;
-+}
-+
-+static int vcdrom_exec(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_COMPLETED;
-+ int opcode = cmd->cdb[0];
-+ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
-+
-+ TRACE_ENTRY();
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+
-+ if (virt_dev->cdrom_empty && (opcode != INQUIRY)) {
-+ TRACE_DBG("%s", "CDROM empty");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_not_ready));
-+ goto out_done;
-+ }
-+
-+ if (virt_dev->media_changed && scst_is_ua_command(cmd)) {
-+ spin_lock(&virt_dev->flags_lock);
-+ if (virt_dev->media_changed) {
-+ virt_dev->media_changed = 0;
-+ TRACE_DBG("%s", "Reporting media changed");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_medium_changed_UA));
-+ spin_unlock(&virt_dev->flags_lock);
-+ goto out_done;
-+ }
-+ spin_unlock(&virt_dev->flags_lock);
-+ }
-+
-+ res = vdisk_do_job(cmd);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_done:
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ goto out;
-+}
-+
-+static uint64_t vdisk_gen_dev_id_num(const char *virt_dev_name)
-+{
-+ uint32_t dev_id_num;
-+
-+ dev_id_num = crc32c(0, virt_dev_name, strlen(virt_dev_name)+1);
-+
-+ return ((uint64_t)scst_get_setup_id() << 32) | dev_id_num;
-+}
-+
-+static void vdisk_exec_unmap(struct scst_cmd *cmd, struct scst_vdisk_thr *thr)
-+{
-+ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
-+ ssize_t length = 0;
-+ struct file *fd = thr->fd;
-+ struct inode *inode;
-+ uint8_t *address;
-+ int offset, descriptor_len, total_len;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(!virt_dev->thin_provisioned)) {
-+ TRACE_DBG("%s", "Invalid opcode UNMAP");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out;
-+ }
-+
-+ if (unlikely(cmd->cdb[1] & 1)) {
-+ /* ANCHOR not supported */
-+ TRACE_DBG("%s", "Invalid ANCHOR field");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+
-+ length = scst_get_buf_full(cmd, &address);
-+ if (unlikely(length <= 0)) {
-+ if (length == 0)
-+ goto out_put;
-+ else if (length == -ENOMEM)
-+ scst_set_busy(cmd);
-+ else
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out;
-+ }
-+
-+ inode = fd->f_dentry->d_inode;
-+
-+ total_len = cmd->cdb[7] << 8 | cmd->cdb[8]; /* length */
-+ offset = 8;
-+
-+ descriptor_len = address[2] << 8 | address[3];
-+
-+ TRACE_DBG("total_len %d, descriptor_len %d", total_len, descriptor_len);
-+
-+ if (descriptor_len == 0)
-+ goto out_put;
-+
-+ if (unlikely((descriptor_len > (total_len - 8)) ||
-+ ((descriptor_len % 16) != 0))) {
-+ PRINT_ERROR("Bad descriptor length: %d < %d - 8",
-+ descriptor_len, total_len);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
-+ goto out_put;
-+ }
-+
-+ while ((offset - 8) < descriptor_len) {
-+ int err;
-+ uint64_t start;
-+ uint32_t len;
-+ start = be64_to_cpu(get_unaligned((__be64 *)&address[offset]));
-+ offset += 8;
-+ len = be32_to_cpu(get_unaligned((__be32 *)&address[offset]));
-+ offset += 8;
-+
-+ if ((start > virt_dev->nblocks) ||
-+ ((start + len) > virt_dev->nblocks)) {
-+ PRINT_ERROR("Device %s: attempt to write beyond max "
-+ "size", virt_dev->name);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_put;
-+ }
-+
-+ TRACE_DBG("Unmapping lba %lld (blocks %d)",
-+ (unsigned long long)start, len);
-+
-+ if (virt_dev->blockio) {
-+ gfp_t gfp = cmd->noio_mem_alloc ? GFP_NOIO : GFP_KERNEL;
-+ err = blkdev_issue_discard(inode->i_bdev, start, len,
-+ gfp, 0);
-+ if (unlikely(err != 0)) {
-+ PRINT_ERROR("blkdev_issue_discard() for "
-+ "LBA %lld len %d failed with err %d",
-+ (unsigned long long)start, len, err);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_write_error));
-+ goto out_put;
-+ }
-+ } else {
-+ const int block_shift = virt_dev->block_shift;
-+ const loff_t a0 = start << block_shift;
-+ const loff_t a2 = (start + len) << block_shift;
-+ const loff_t a1 = max_t(loff_t, a2 & PAGE_CACHE_MASK,
-+ a0);
-+
-+ /*
-+ * The SCSI UNMAP command discards a range of blocks
-+ * of size (1 << block_shift) while the Linux VFS
-+ * truncate_range() function discards a range of blocks
-+ * of size PAGE_CACHE_SIZE. Hence pass range [a0, a1)
-+ * to truncate_range() instead of range [a0,
-+ * a2). Note: since we do not set TPRZ it is not
-+ * necessary to overwrite the range [a1, a2) with
-+ * zeroes.
-+ */
-+ WARN_ON(!(a0 <= a1 && a1 <= a2));
-+ WARN_ON(!((a1 & (PAGE_CACHE_SIZE - 1)) == 0 ||
-+ a0 == a1));
-+ if (a0 < a1)
-+ inode->i_op->truncate_range(inode, a0, a1 - 1);
-+ }
-+ }
-+
-+out_put:
-+ scst_put_buf_full(cmd, address);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void vdisk_exec_inquiry(struct scst_cmd *cmd)
-+{
-+ int32_t length, i, resp_len = 0;
-+ uint8_t *address;
-+ uint8_t *buf;
-+ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
-+ uint16_t tg_id;
-+
-+ TRACE_ENTRY();
-+
-+ buf = kzalloc(INQ_BUF_SZ, GFP_KERNEL);
-+ if (buf == NULL) {
-+ scst_set_busy(cmd);
-+ goto out;
-+ }
-+
-+ length = scst_get_buf_full(cmd, &address);
-+ TRACE_DBG("length %d", length);
-+ if (unlikely(length <= 0)) {
-+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ goto out_free;
-+ }
-+
-+ if (cmd->cdb[1] & CMDDT) {
-+ TRACE_DBG("%s", "INQUIRY: CMDDT is unsupported");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_put;
-+ }
-+
-+ buf[0] = cmd->dev->type; /* type dev */
-+ /* Vital Product */
-+ if (cmd->cdb[1] & EVPD) {
-+ if (0 == cmd->cdb[2]) {
-+ /* supported vital product data pages */
-+ buf[3] = 3;
-+ buf[4] = 0x0; /* this page */
-+ buf[5] = 0x80; /* unit serial number */
-+ buf[6] = 0x83; /* device identification */
-+ if (virt_dev->dev->type == TYPE_DISK) {
-+ buf[3] += 1;
-+ buf[7] = 0xB0; /* block limits */
-+ if (virt_dev->thin_provisioned) {
-+ buf[3] += 1;
-+ buf[8] = 0xB2; /* thin provisioning */
-+ }
-+ }
-+ resp_len = buf[3] + 4;
-+ } else if (0x80 == cmd->cdb[2]) {
-+ /* unit serial number */
-+ buf[1] = 0x80;
-+ if (cmd->tgtt->get_serial) {
-+ buf[3] = cmd->tgtt->get_serial(cmd->tgt_dev,
-+ &buf[4], INQ_BUF_SZ - 4);
-+ } else {
-+ int usn_len;
-+ read_lock(&vdisk_serial_rwlock);
-+ usn_len = strlen(virt_dev->usn);
-+ buf[3] = usn_len;
-+ strncpy(&buf[4], virt_dev->usn, usn_len);
-+ read_unlock(&vdisk_serial_rwlock);
-+ }
-+ resp_len = buf[3] + 4;
-+ } else if (0x83 == cmd->cdb[2]) {
-+ /* device identification */
-+ int num = 4;
-+
-+ buf[1] = 0x83;
-+ /* T10 vendor identifier field format (faked) */
-+ buf[num + 0] = 0x2; /* ASCII */
-+ buf[num + 1] = 0x1; /* Vendor ID */
-+ if (cmd->tgtt->vendor)
-+ memcpy(&buf[num + 4], cmd->tgtt->vendor, 8);
-+ else if (virt_dev->blockio)
-+ memcpy(&buf[num + 4], SCST_BIO_VENDOR, 8);
-+ else
-+ memcpy(&buf[num + 4], SCST_FIO_VENDOR, 8);
-+
-+ read_lock(&vdisk_serial_rwlock);
-+ i = strlen(virt_dev->t10_dev_id);
-+ memcpy(&buf[num + 12], virt_dev->t10_dev_id, i);
-+ read_unlock(&vdisk_serial_rwlock);
-+
-+ buf[num + 3] = 8 + i;
-+ num += buf[num + 3];
-+
-+ num += 4;
-+
-+ /*
-+ * Relative target port identifier
-+ */
-+ buf[num + 0] = 0x01; /* binary */
-+ /* Relative target port id */
-+ buf[num + 1] = 0x10 | 0x04;
-+
-+ put_unaligned(cpu_to_be16(cmd->tgt->rel_tgt_id),
-+ (__be16 *)&buf[num + 4 + 2]);
-+
-+ buf[num + 3] = 4;
-+ num += buf[num + 3];
-+
-+ num += 4;
-+
-+ tg_id = scst_lookup_tg_id(cmd->dev, cmd->tgt);
-+ if (tg_id) {
-+ /*
-+ * Target port group designator
-+ */
-+ buf[num + 0] = 0x01; /* binary */
-+ /* Target port group id */
-+ buf[num + 1] = 0x10 | 0x05;
-+
-+ put_unaligned(cpu_to_be16(tg_id),
-+ (__be16 *)&buf[num + 4 + 2]);
-+
-+ buf[num + 3] = 4;
-+ num += 4 + buf[num + 3];
-+ }
-+
-+ /*
-+ * IEEE id
-+ */
-+ buf[num + 0] = 0x01; /* binary */
-+
-+ /* EUI-64 */
-+ buf[num + 1] = 0x02;
-+ buf[num + 2] = 0x00;
-+ buf[num + 3] = 0x08;
-+
-+ /* IEEE id */
-+ buf[num + 4] = virt_dev->t10_dev_id[0];
-+ buf[num + 5] = virt_dev->t10_dev_id[1];
-+ buf[num + 6] = virt_dev->t10_dev_id[2];
-+
-+ /* IEEE ext id */
-+ buf[num + 7] = virt_dev->t10_dev_id[3];
-+ buf[num + 8] = virt_dev->t10_dev_id[4];
-+ buf[num + 9] = virt_dev->t10_dev_id[5];
-+ buf[num + 10] = virt_dev->t10_dev_id[6];
-+ buf[num + 11] = virt_dev->t10_dev_id[7];
-+ num += buf[num + 3];
-+
-+ resp_len = num;
-+ buf[2] = (resp_len >> 8) & 0xFF;
-+ buf[3] = resp_len & 0xFF;
-+ resp_len += 4;
-+ } else if ((0xB0 == cmd->cdb[2]) &&
-+ (virt_dev->dev->type == TYPE_DISK)) {
-+ /* Block Limits */
-+ int max_transfer;
-+ buf[1] = 0xB0;
-+ buf[3] = 0x3C;
-+ /* Optimal transfer granuality is PAGE_SIZE */
-+ put_unaligned(cpu_to_be16(max_t(int,
-+ PAGE_SIZE/virt_dev->block_size, 1)),
-+ (uint16_t *)&buf[6]);
-+ /* Max transfer len is min of sg limit and 8M */
-+ max_transfer = min_t(int,
-+ cmd->tgt_dev->max_sg_cnt << PAGE_SHIFT,
-+ 8*1024*1024) / virt_dev->block_size;
-+ put_unaligned(cpu_to_be32(max_transfer),
-+ (uint32_t *)&buf[8]);
-+ /*
-+ * Let's have optimal transfer len 512KB. Better to not
-+ * set it at all, because we don't have such limit,
-+ * but some initiators may not understand that (?).
-+ * From other side, too big transfers are not optimal,
-+ * because SGV cache supports only <4M buffers.
-+ */
-+ put_unaligned(cpu_to_be32(min_t(int,
-+ max_transfer,
-+ 512*1024 / virt_dev->block_size)),
-+ (uint32_t *)&buf[12]);
-+ if (virt_dev->thin_provisioned) {
-+ /* MAXIMUM UNMAP LBA COUNT is UNLIMITED */
-+ put_unaligned(__constant_cpu_to_be32(0xFFFFFFFF),
-+ (uint32_t *)&buf[20]);
-+ /* MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT is UNLIMITED */
-+ put_unaligned(__constant_cpu_to_be32(0xFFFFFFFF),
-+ (uint32_t *)&buf[24]);
-+ /* OPTIMAL UNMAP GRANULARITY is 1 */
-+ put_unaligned(__constant_cpu_to_be32(1),
-+ (uint32_t *)&buf[28]);
-+ }
-+ resp_len = buf[3] + 4;
-+ } else if ((0xB2 == cmd->cdb[2]) &&
-+ (virt_dev->dev->type == TYPE_DISK) &&
-+ virt_dev->thin_provisioned) {
-+ /* Thin Provisioning */
-+ buf[1] = 0xB2;
-+ buf[3] = 2;
-+ buf[5] = 0x80;
-+ resp_len = buf[3] + 4;
-+ } else {
-+ TRACE_DBG("INQUIRY: Unsupported EVPD page %x",
-+ cmd->cdb[2]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_put;
-+ }
-+ } else {
-+ int len, num;
-+
-+ if (cmd->cdb[2] != 0) {
-+ TRACE_DBG("INQUIRY: Unsupported page %x", cmd->cdb[2]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_put;
-+ }
-+
-+ if (virt_dev->removable)
-+ buf[1] = 0x80; /* removable */
-+ buf[2] = 5; /* Device complies to SPC-3 */
-+ buf[3] = 0x02; /* Data in format specified in SPC */
-+ if (cmd->tgtt->fake_aca)
-+ buf[3] |= 0x20;
-+ buf[4] = 31;/* n - 4 = 35 - 4 = 31 for full 36 byte data */
-+ if (scst_impl_alua_configured(cmd->dev))
-+ buf[5] = SCST_INQ_TPGS_MODE_IMPLICIT;
-+ buf[6] = 0x10; /* MultiP 1 */
-+ buf[7] = 2; /* CMDQUE 1, BQue 0 => commands queuing supported */
-+
-+ /*
-+ * 8 byte ASCII Vendor Identification of the target
-+ * - left aligned.
-+ */
-+ if (cmd->tgtt->vendor)
-+ memcpy(&buf[8], cmd->tgtt->vendor, 8);
-+ else if (virt_dev->blockio)
-+ memcpy(&buf[8], SCST_BIO_VENDOR, 8);
-+ else
-+ memcpy(&buf[8], SCST_FIO_VENDOR, 8);
-+
-+ /*
-+ * 16 byte ASCII Product Identification of the target - left
-+ * aligned.
-+ */
-+ memset(&buf[16], ' ', 16);
-+ if (cmd->tgtt->get_product_id)
-+ cmd->tgtt->get_product_id(cmd->tgt_dev, &buf[16], 16);
-+ else {
-+ len = min_t(size_t, strlen(virt_dev->name), 16);
-+ memcpy(&buf[16], virt_dev->name, len);
-+ }
-+
-+ /*
-+ * 4 byte ASCII Product Revision Level of the target - left
-+ * aligned.
-+ */
-+ if (cmd->tgtt->revision)
-+ memcpy(&buf[32], cmd->tgtt->revision, 4);
-+ else
-+ memcpy(&buf[32], SCST_FIO_REV, 4);
-+
-+ /** Version descriptors **/
-+
-+ buf[4] += 58 - 36;
-+ num = 0;
-+
-+ /* SAM-3 T10/1561-D revision 14 */
-+ buf[58 + num] = 0x0;
-+ buf[58 + num + 1] = 0x76;
-+ num += 2;
-+
-+ /* Physical transport */
-+ if (cmd->tgtt->get_phys_transport_version != NULL) {
-+ uint16_t v = cmd->tgtt->get_phys_transport_version(cmd->tgt);
-+ if (v != 0) {
-+ *((__be16 *)&buf[58 + num]) = cpu_to_be16(v);
-+ num += 2;
-+ }
-+ }
-+
-+ /* SCSI transport */
-+ if (cmd->tgtt->get_scsi_transport_version != NULL) {
-+ *((__be16 *)&buf[58 + num]) =
-+ cpu_to_be16(cmd->tgtt->get_scsi_transport_version(cmd->tgt));
-+ num += 2;
-+ }
-+
-+ /* SPC-3 T10/1416-D revision 23 */
-+ buf[58 + num] = 0x3;
-+ buf[58 + num + 1] = 0x12;
-+ num += 2;
-+
-+ /* Device command set */
-+ if (virt_dev->command_set_version != 0) {
-+ *((__be16 *)&buf[58 + num]) =
-+ cpu_to_be16(virt_dev->command_set_version);
-+ num += 2;
-+ }
-+
-+ /* Vendor specific information. */
-+ if (cmd->tgtt->get_vend_specific) {
-+ /* Skip to byte 96. */
-+ num = 96 - 58;
-+ num += cmd->tgtt->get_vend_specific(cmd->tgt_dev,
-+ &buf[96], INQ_BUF_SZ - 96);
-+ }
-+
-+ buf[4] += num;
-+ resp_len = buf[4] + 5;
-+ }
-+
-+ BUG_ON(resp_len >= INQ_BUF_SZ);
-+
-+ if (length > resp_len)
-+ length = resp_len;
-+ memcpy(address, buf, length);
-+
-+out_put:
-+ scst_put_buf_full(cmd, address);
-+ if (length < cmd->resp_data_len)
-+ scst_set_resp_data_len(cmd, length);
-+
-+out_free:
-+ kfree(buf);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void vdisk_exec_request_sense(struct scst_cmd *cmd)
-+{
-+ int32_t length, sl;
-+ uint8_t *address;
-+ uint8_t b[SCST_STANDARD_SENSE_LEN];
-+
-+ TRACE_ENTRY();
-+
-+ sl = scst_set_sense(b, sizeof(b), cmd->dev->d_sense,
-+ SCST_LOAD_SENSE(scst_sense_no_sense));
-+
-+ length = scst_get_buf_full(cmd, &address);
-+ TRACE_DBG("length %d", length);
-+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_full() failed: %d)", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out;
-+ }
-+
-+ length = min(sl, length);
-+ memcpy(address, b, length);
-+ scst_set_resp_data_len(cmd, length);
-+
-+ scst_put_buf_full(cmd, address);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * <<Following mode pages info copied from ST318451LW with some corrections>>
-+ *
-+ * ToDo: revise them
-+ */
-+static int vdisk_err_recov_pg(unsigned char *p, int pcontrol,
-+ struct scst_vdisk_dev *virt_dev)
-+{ /* Read-Write Error Recovery page for mode_sense */
-+ const unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
-+ 5, 0, 0xff, 0xff};
-+
-+ memcpy(p, err_recov_pg, sizeof(err_recov_pg));
-+ if (1 == pcontrol)
-+ memset(p + 2, 0, sizeof(err_recov_pg) - 2);
-+ return sizeof(err_recov_pg);
-+}
-+
-+static int vdisk_disconnect_pg(unsigned char *p, int pcontrol,
-+ struct scst_vdisk_dev *virt_dev)
-+{ /* Disconnect-Reconnect page for mode_sense */
-+ const unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
-+ 0, 0, 0, 0, 0, 0, 0, 0};
-+
-+ memcpy(p, disconnect_pg, sizeof(disconnect_pg));
-+ if (1 == pcontrol)
-+ memset(p + 2, 0, sizeof(disconnect_pg) - 2);
-+ return sizeof(disconnect_pg);
-+}
-+
-+static int vdisk_rigid_geo_pg(unsigned char *p, int pcontrol,
-+ struct scst_vdisk_dev *virt_dev)
-+{
-+ unsigned char geo_m_pg[] = {0x04, 0x16, 0, 0, 0, DEF_HEADS, 0, 0,
-+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-+ 0x3a, 0x98/* 15K RPM */, 0, 0};
-+ int32_t ncyl, n, rem;
-+ uint64_t dividend;
-+
-+ memcpy(p, geo_m_pg, sizeof(geo_m_pg));
-+ /*
-+ * Divide virt_dev->nblocks by (DEF_HEADS * DEF_SECTORS) and store
-+ * the quotient in ncyl and the remainder in rem.
-+ */
-+ dividend = virt_dev->nblocks;
-+ rem = do_div(dividend, DEF_HEADS * DEF_SECTORS);
-+ ncyl = dividend;
-+ if (rem != 0)
-+ ncyl++;
-+ memcpy(&n, p + 2, sizeof(u32));
-+ n = n | ((__force u32)cpu_to_be32(ncyl) >> 8);
-+ memcpy(p + 2, &n, sizeof(u32));
-+ if (1 == pcontrol)
-+ memset(p + 2, 0, sizeof(geo_m_pg) - 2);
-+ return sizeof(geo_m_pg);
-+}
-+
-+static int vdisk_format_pg(unsigned char *p, int pcontrol,
-+ struct scst_vdisk_dev *virt_dev)
-+{ /* Format device page for mode_sense */
-+ const unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
-+ 0, 0, 0, 0, 0, 0, 0, 0,
-+ 0, 0, 0, 0, 0x40, 0, 0, 0};
-+
-+ memcpy(p, format_pg, sizeof(format_pg));
-+ p[10] = (DEF_SECTORS >> 8) & 0xff;
-+ p[11] = DEF_SECTORS & 0xff;
-+ p[12] = (virt_dev->block_size >> 8) & 0xff;
-+ p[13] = virt_dev->block_size & 0xff;
-+ if (1 == pcontrol)
-+ memset(p + 2, 0, sizeof(format_pg) - 2);
-+ return sizeof(format_pg);
-+}
-+
-+static int vdisk_caching_pg(unsigned char *p, int pcontrol,
-+ struct scst_vdisk_dev *virt_dev)
-+{ /* Caching page for mode_sense */
-+ const unsigned char caching_pg[] = {0x8, 18, 0x10, 0, 0xff, 0xff, 0, 0,
-+ 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
-+
-+ memcpy(p, caching_pg, sizeof(caching_pg));
-+ p[2] |= !(virt_dev->wt_flag || virt_dev->nv_cache) ? WCE : 0;
-+ if (1 == pcontrol)
-+ memset(p + 2, 0, sizeof(caching_pg) - 2);
-+ return sizeof(caching_pg);
-+}
-+
-+static int vdisk_ctrl_m_pg(unsigned char *p, int pcontrol,
-+ struct scst_vdisk_dev *virt_dev)
-+{ /* Control mode page for mode_sense */
-+ const unsigned char ctrl_m_pg[] = {0xa, 0xa, 0, 0, 0, 0, 0, 0,
-+ 0, 0, 0x2, 0x4b};
-+
-+ memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
-+ switch (pcontrol) {
-+ case 0:
-+ p[2] |= virt_dev->dev->tst << 5;
-+ p[2] |= virt_dev->dev->d_sense << 2;
-+ p[3] |= virt_dev->dev->queue_alg << 4;
-+ p[4] |= virt_dev->dev->swp << 3;
-+ p[5] |= virt_dev->dev->tas << 6;
-+ break;
-+ case 1:
-+ memset(p + 2, 0, sizeof(ctrl_m_pg) - 2);
-+#if 0 /*
-+ * It's too early to implement it, since we can't control the
-+ * backstorage device parameters. ToDo
-+ */
-+ p[2] |= 7 << 5; /* TST */
-+ p[3] |= 0xF << 4; /* QUEUE ALGORITHM MODIFIER */
-+#endif
-+ p[2] |= 1 << 2; /* D_SENSE */
-+ p[4] |= 1 << 3; /* SWP */
-+ p[5] |= 1 << 6; /* TAS */
-+ break;
-+ case 2:
-+ p[2] |= DEF_TST << 5;
-+ p[2] |= DEF_DSENSE << 2;
-+ if (virt_dev->wt_flag || virt_dev->nv_cache)
-+ p[3] |= DEF_QUEUE_ALG_WT << 4;
-+ else
-+ p[3] |= DEF_QUEUE_ALG << 4;
-+ p[4] |= DEF_SWP << 3;
-+ p[5] |= DEF_TAS << 6;
-+ break;
-+ default:
-+ BUG();
-+ }
-+ return sizeof(ctrl_m_pg);
-+}
-+
-+static int vdisk_iec_m_pg(unsigned char *p, int pcontrol,
-+ struct scst_vdisk_dev *virt_dev)
-+{ /* Informational Exceptions control mode page for mode_sense */
-+ const unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
-+ 0, 0, 0x0, 0x0};
-+ memcpy(p, iec_m_pg, sizeof(iec_m_pg));
-+ if (1 == pcontrol)
-+ memset(p + 2, 0, sizeof(iec_m_pg) - 2);
-+ return sizeof(iec_m_pg);
-+}
-+
-+static void vdisk_exec_mode_sense(struct scst_cmd *cmd)
-+{
-+ int32_t length;
-+ uint8_t *address;
-+ uint8_t *buf;
-+ struct scst_vdisk_dev *virt_dev;
-+ uint32_t blocksize;
-+ uint64_t nblocks;
-+ unsigned char dbd, type;
-+ int pcontrol, pcode, subpcode;
-+ unsigned char dev_spec;
-+ int msense_6, offset = 0, len;
-+ unsigned char *bp;
-+
-+ TRACE_ENTRY();
-+
-+ buf = kzalloc(MSENSE_BUF_SZ, GFP_KERNEL);
-+ if (buf == NULL) {
-+ scst_set_busy(cmd);
-+ goto out;
-+ }
-+
-+ virt_dev = cmd->dev->dh_priv;
-+ blocksize = virt_dev->block_size;
-+ nblocks = virt_dev->nblocks;
-+
-+ type = cmd->dev->type; /* type dev */
-+ dbd = cmd->cdb[1] & DBD;
-+ pcontrol = (cmd->cdb[2] & 0xc0) >> 6;
-+ pcode = cmd->cdb[2] & 0x3f;
-+ subpcode = cmd->cdb[3];
-+ msense_6 = (MODE_SENSE == cmd->cdb[0]);
-+ dev_spec = (virt_dev->dev->rd_only ||
-+ cmd->tgt_dev->acg_dev->rd_only) ? WP : 0;
-+
-+ if (!virt_dev->blockio)
-+ dev_spec |= DPOFUA;
-+
-+ length = scst_get_buf_full(cmd, &address);
-+ if (unlikely(length <= 0)) {
-+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ goto out_free;
-+ }
-+
-+ if (0x3 == pcontrol) {
-+ TRACE_DBG("%s", "MODE SENSE: Saving values not supported");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_saving_params_unsup));
-+ goto out_put;
-+ }
-+
-+ if (msense_6) {
-+ buf[1] = type;
-+ buf[2] = dev_spec;
-+ offset = 4;
-+ } else {
-+ buf[2] = type;
-+ buf[3] = dev_spec;
-+ offset = 8;
-+ }
-+
-+ if (0 != subpcode) {
-+ /* TODO: Control Extension page */
-+ TRACE_DBG("%s", "MODE SENSE: Only subpage 0 is supported");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_put;
-+ }
-+
-+ if (!dbd) {
-+ /* Create block descriptor */
-+ buf[offset - 1] = 0x08; /* block descriptor length */
-+ if (nblocks >> 32) {
-+ buf[offset + 0] = 0xFF;
-+ buf[offset + 1] = 0xFF;
-+ buf[offset + 2] = 0xFF;
-+ buf[offset + 3] = 0xFF;
-+ } else {
-+ /* num blks */
-+ buf[offset + 0] = (nblocks >> (BYTE * 3)) & 0xFF;
-+ buf[offset + 1] = (nblocks >> (BYTE * 2)) & 0xFF;
-+ buf[offset + 2] = (nblocks >> (BYTE * 1)) & 0xFF;
-+ buf[offset + 3] = (nblocks >> (BYTE * 0)) & 0xFF;
-+ }
-+ buf[offset + 4] = 0; /* density code */
-+ buf[offset + 5] = (blocksize >> (BYTE * 2)) & 0xFF;/* blklen */
-+ buf[offset + 6] = (blocksize >> (BYTE * 1)) & 0xFF;
-+ buf[offset + 7] = (blocksize >> (BYTE * 0)) & 0xFF;
-+
-+ offset += 8; /* increment offset */
-+ }
-+
-+ bp = buf + offset;
-+
-+ switch (pcode) {
-+ case 0x1: /* Read-Write error recovery page, direct access */
-+ len = vdisk_err_recov_pg(bp, pcontrol, virt_dev);
-+ break;
-+ case 0x2: /* Disconnect-Reconnect page, all devices */
-+ len = vdisk_disconnect_pg(bp, pcontrol, virt_dev);
-+ break;
-+ case 0x3: /* Format device page, direct access */
-+ len = vdisk_format_pg(bp, pcontrol, virt_dev);
-+ break;
-+ case 0x4: /* Rigid disk geometry */
-+ len = vdisk_rigid_geo_pg(bp, pcontrol, virt_dev);
-+ break;
-+ case 0x8: /* Caching page, direct access */
-+ len = vdisk_caching_pg(bp, pcontrol, virt_dev);
-+ break;
-+ case 0xa: /* Control Mode page, all devices */
-+ len = vdisk_ctrl_m_pg(bp, pcontrol, virt_dev);
-+ break;
-+ case 0x1c: /* Informational Exceptions Mode page, all devices */
-+ len = vdisk_iec_m_pg(bp, pcontrol, virt_dev);
-+ break;
-+ case 0x3f: /* Read all Mode pages */
-+ len = vdisk_err_recov_pg(bp, pcontrol, virt_dev);
-+ len += vdisk_disconnect_pg(bp + len, pcontrol, virt_dev);
-+ len += vdisk_format_pg(bp + len, pcontrol, virt_dev);
-+ len += vdisk_caching_pg(bp + len, pcontrol, virt_dev);
-+ len += vdisk_ctrl_m_pg(bp + len, pcontrol, virt_dev);
-+ len += vdisk_iec_m_pg(bp + len, pcontrol, virt_dev);
-+ len += vdisk_rigid_geo_pg(bp + len, pcontrol, virt_dev);
-+ break;
-+ default:
-+ TRACE_DBG("MODE SENSE: Unsupported page %x", pcode);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_put;
-+ }
-+
-+ offset += len;
-+
-+ if (msense_6)
-+ buf[0] = offset - 1;
-+ else {
-+ buf[0] = ((offset - 2) >> 8) & 0xff;
-+ buf[1] = (offset - 2) & 0xff;
-+ }
-+
-+ if (offset > length)
-+ offset = length;
-+ memcpy(address, buf, offset);
-+
-+out_put:
-+ scst_put_buf_full(cmd, address);
-+ if (offset < cmd->resp_data_len)
-+ scst_set_resp_data_len(cmd, offset);
-+
-+out_free:
-+ kfree(buf);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int vdisk_set_wt(struct scst_vdisk_dev *virt_dev, int wt)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if ((virt_dev->wt_flag == wt) || virt_dev->nullio || virt_dev->nv_cache)
-+ goto out;
-+
-+ spin_lock(&virt_dev->flags_lock);
-+ virt_dev->wt_flag = wt;
-+ spin_unlock(&virt_dev->flags_lock);
-+
-+ scst_dev_del_all_thr_data(virt_dev->dev);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void vdisk_ctrl_m_pg_select(unsigned char *p,
-+ struct scst_vdisk_dev *virt_dev, struct scst_cmd *cmd)
-+{
-+ struct scst_device *dev = virt_dev->dev;
-+ int old_swp = dev->swp, old_tas = dev->tas, old_dsense = dev->d_sense;
-+
-+#if 0 /* Not implemented yet, see comment in vdisk_ctrl_m_pg() */
-+ dev->tst = (p[2] >> 5) & 1;
-+ dev->queue_alg = p[3] >> 4;
-+#else
-+ if ((dev->tst != ((p[2] >> 5) & 1)) || (dev->queue_alg != (p[3] >> 4))) {
-+ TRACE(TRACE_MINOR|TRACE_SCSI, "%s", "MODE SELECT: Changing of "
-+ "TST and QUEUE ALGORITHM not supported");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ return;
-+ }
-+#endif
-+ dev->swp = (p[4] & 0x8) >> 3;
-+ dev->tas = (p[5] & 0x40) >> 6;
-+ dev->d_sense = (p[2] & 0x4) >> 2;
-+
-+ PRINT_INFO("Device %s: new control mode page parameters: SWP %x "
-+ "(was %x), TAS %x (was %x), D_SENSE %d (was %d)",
-+ virt_dev->name, dev->swp, old_swp, dev->tas, old_tas,
-+ dev->d_sense, old_dsense);
-+ return;
-+}
-+
-+static void vdisk_exec_mode_select(struct scst_cmd *cmd)
-+{
-+ int32_t length;
-+ uint8_t *address;
-+ struct scst_vdisk_dev *virt_dev;
-+ int mselect_6, offset;
-+
-+ TRACE_ENTRY();
-+
-+ virt_dev = cmd->dev->dh_priv;
-+ mselect_6 = (MODE_SELECT == cmd->cdb[0]);
-+
-+ length = scst_get_buf_full(cmd, &address);
-+ if (unlikely(length <= 0)) {
-+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ goto out;
-+ }
-+
-+ if (!(cmd->cdb[1] & PF) || (cmd->cdb[1] & SP)) {
-+ TRACE(TRACE_MINOR|TRACE_SCSI, "MODE SELECT: Unsupported "
-+ "value(s) of PF and/or SP bits (cdb[1]=%x)",
-+ cmd->cdb[1]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_put;
-+ }
-+
-+ if (mselect_6)
-+ offset = 4;
-+ else
-+ offset = 8;
-+
-+ if (address[offset - 1] == 8) {
-+ offset += 8;
-+ } else if (address[offset - 1] != 0) {
-+ PRINT_ERROR("%s", "MODE SELECT: Wrong parameters list "
-+ "lenght");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_parm_list));
-+ goto out_put;
-+ }
-+
-+ while (length > offset + 2) {
-+ if (address[offset] & PS) {
-+ PRINT_ERROR("%s", "MODE SELECT: Illegal PS bit");
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_invalid_field_in_parm_list));
-+ goto out_put;
-+ }
-+ if ((address[offset] & 0x3f) == 0x8) {
-+ /* Caching page */
-+ if (address[offset + 1] != 18) {
-+ PRINT_ERROR("%s", "MODE SELECT: Invalid "
-+ "caching page request");
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_invalid_field_in_parm_list));
-+ goto out_put;
-+ }
-+ if (vdisk_set_wt(virt_dev,
-+ (address[offset + 2] & WCE) ? 0 : 1) != 0) {
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_put;
-+ }
-+ break;
-+ } else if ((address[offset] & 0x3f) == 0xA) {
-+ /* Control page */
-+ if (address[offset + 1] != 0xA) {
-+ PRINT_ERROR("%s", "MODE SELECT: Invalid "
-+ "control page request");
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_invalid_field_in_parm_list));
-+ goto out_put;
-+ }
-+ vdisk_ctrl_m_pg_select(&address[offset], virt_dev, cmd);
-+ } else {
-+ PRINT_ERROR("MODE SELECT: Invalid request %x",
-+ address[offset] & 0x3f);
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(
-+ scst_sense_invalid_field_in_parm_list));
-+ goto out_put;
-+ }
-+ offset += address[offset + 1];
-+ }
-+
-+out_put:
-+ scst_put_buf_full(cmd, address);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void vdisk_exec_log(struct scst_cmd *cmd)
-+{
-+ TRACE_ENTRY();
-+
-+ /* No log pages are supported */
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void vdisk_exec_read_capacity(struct scst_cmd *cmd)
-+{
-+ int32_t length;
-+ uint8_t *address;
-+ struct scst_vdisk_dev *virt_dev;
-+ uint32_t blocksize;
-+ uint64_t nblocks;
-+ uint8_t buffer[8];
-+
-+ TRACE_ENTRY();
-+
-+ virt_dev = cmd->dev->dh_priv;
-+ blocksize = virt_dev->block_size;
-+ nblocks = virt_dev->nblocks;
-+
-+ if ((cmd->cdb[8] & 1) == 0) {
-+ uint64_t lba = be64_to_cpu(get_unaligned((__be64 *)&cmd->cdb[2]));
-+ if (lba != 0) {
-+ TRACE_DBG("PMI zero and LBA not zero (cmd %p)", cmd);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+ }
-+
-+ /* Last block on the virt_dev is (nblocks-1) */
-+ memset(buffer, 0, sizeof(buffer));
-+
-+ /*
-+ * If we are thinly provisioned, we must ensure that the initiator
-+ * issues a READ_CAPACITY(16) so we can return the TPE bit. By
-+ * returning 0xFFFFFFFF we do that.
-+ */
-+ if (nblocks >> 32 || virt_dev->thin_provisioned) {
-+ buffer[0] = 0xFF;
-+ buffer[1] = 0xFF;
-+ buffer[2] = 0xFF;
-+ buffer[3] = 0xFF;
-+ } else {
-+ buffer[0] = ((nblocks - 1) >> (BYTE * 3)) & 0xFF;
-+ buffer[1] = ((nblocks - 1) >> (BYTE * 2)) & 0xFF;
-+ buffer[2] = ((nblocks - 1) >> (BYTE * 1)) & 0xFF;
-+ buffer[3] = ((nblocks - 1) >> (BYTE * 0)) & 0xFF;
-+ }
-+ buffer[4] = (blocksize >> (BYTE * 3)) & 0xFF;
-+ buffer[5] = (blocksize >> (BYTE * 2)) & 0xFF;
-+ buffer[6] = (blocksize >> (BYTE * 1)) & 0xFF;
-+ buffer[7] = (blocksize >> (BYTE * 0)) & 0xFF;
-+
-+ length = scst_get_buf_full(cmd, &address);
-+ if (unlikely(length <= 0)) {
-+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ goto out;
-+ }
-+
-+ length = min_t(int, length, sizeof(buffer));
-+
-+ memcpy(address, buffer, length);
-+
-+ scst_put_buf_full(cmd, address);
-+
-+ if (length < cmd->resp_data_len)
-+ scst_set_resp_data_len(cmd, length);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void vdisk_exec_read_capacity16(struct scst_cmd *cmd)
-+{
-+ int32_t length;
-+ uint8_t *address;
-+ struct scst_vdisk_dev *virt_dev;
-+ uint32_t blocksize;
-+ uint64_t nblocks;
-+ uint8_t buffer[32];
-+
-+ TRACE_ENTRY();
-+
-+ virt_dev = cmd->dev->dh_priv;
-+ blocksize = virt_dev->block_size;
-+ nblocks = virt_dev->nblocks - 1;
-+
-+ if ((cmd->cdb[14] & 1) == 0) {
-+ uint64_t lba = be64_to_cpu(get_unaligned((__be64 *)&cmd->cdb[2]));
-+ if (lba != 0) {
-+ TRACE_DBG("PMI zero and LBA not zero (cmd %p)", cmd);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+ }
-+
-+ memset(buffer, 0, sizeof(buffer));
-+
-+ buffer[0] = nblocks >> 56;
-+ buffer[1] = (nblocks >> 48) & 0xFF;
-+ buffer[2] = (nblocks >> 40) & 0xFF;
-+ buffer[3] = (nblocks >> 32) & 0xFF;
-+ buffer[4] = (nblocks >> 24) & 0xFF;
-+ buffer[5] = (nblocks >> 16) & 0xFF;
-+ buffer[6] = (nblocks >> 8) & 0xFF;
-+ buffer[7] = nblocks & 0xFF;
-+
-+ buffer[8] = (blocksize >> (BYTE * 3)) & 0xFF;
-+ buffer[9] = (blocksize >> (BYTE * 2)) & 0xFF;
-+ buffer[10] = (blocksize >> (BYTE * 1)) & 0xFF;
-+ buffer[11] = (blocksize >> (BYTE * 0)) & 0xFF;
-+
-+ switch (blocksize) {
-+ case 512:
-+ buffer[13] = 3;
-+ break;
-+ case 1024:
-+ buffer[13] = 2;
-+ break;
-+ case 2048:
-+ buffer[13] = 1;
-+ break;
-+ case 4096:
-+ default:
-+ buffer[13] = 0;
-+ break;
-+ }
-+
-+ if (virt_dev->thin_provisioned) {
-+ buffer[14] |= 0x80; /* Add TPE */
-+#if 0 /*
-+ * Might be a big performance and functionality win, but might be
-+ * dangerous as well, although generally nearly always it should be set,
-+ * because nearly all devices should return zero for unmapped blocks.
-+ * But let's be on the safe side and disable it for now.
-+ */
-+ buffer[14] |= 0x40; /* Add TPRZ */
-+#endif
-+ }
-+
-+ length = scst_get_buf_full(cmd, &address);
-+ if (unlikely(length <= 0)) {
-+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ goto out;
-+ }
-+
-+ length = min_t(int, length, sizeof(buffer));
-+
-+ memcpy(address, buffer, length);
-+
-+ scst_put_buf_full(cmd, address);
-+
-+ if (length < cmd->resp_data_len)
-+ scst_set_resp_data_len(cmd, length);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* SPC-4 REPORT TARGET PORT GROUPS command */
-+static void vdisk_exec_report_tpgs(struct scst_cmd *cmd)
-+{
-+ struct scst_device *dev;
-+ uint8_t *address;
-+ void *buf;
-+ int32_t buf_len;
-+ uint32_t allocation_length, data_length, length;
-+ uint8_t data_format;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ buf_len = scst_get_buf_full(cmd, &address);
-+ if (buf_len < 0) {
-+ PRINT_ERROR("scst_get_buf_full() failed: %d", buf_len);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out;
-+ }
-+
-+ if (cmd->cdb_len < 12)
-+ PRINT_WARNING("received invalid REPORT TARGET PORT GROUPS "
-+ "command - length %d is too small (should be at "
-+ "least 12 bytes)", cmd->cdb_len);
-+
-+ dev = cmd->dev;
-+ data_format = cmd->cdb_len > 1 ? cmd->cdb[1] >> 5 : 0;
-+ allocation_length = cmd->cdb_len >= 10 ?
-+ be32_to_cpu(get_unaligned((__be32 *)(cmd->cdb + 6))) : 1024;
-+
-+ res = scst_tg_get_group_info(&buf, &data_length, dev, data_format);
-+ if (res == -ENOMEM) {
-+ scst_set_busy(cmd);
-+ goto out_put;
-+ } else if (res < 0) {
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out_put;
-+ }
-+
-+ length = min_t(uint32_t, min(allocation_length, data_length), buf_len);
-+ memcpy(address, buf, length);
-+ kfree(buf);
-+ if (length < cmd->resp_data_len)
-+ scst_set_resp_data_len(cmd, length);
-+
-+out_put:
-+ scst_put_buf_full(cmd, address);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void vdisk_exec_read_toc(struct scst_cmd *cmd)
-+{
-+ int32_t length, off = 0;
-+ uint8_t *address;
-+ struct scst_vdisk_dev *virt_dev;
-+ uint32_t nblocks;
-+ uint8_t buffer[4+8+8] = { 0x00, 0x0a, 0x01, 0x01, 0x00, 0x14,
-+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
-+
-+ TRACE_ENTRY();
-+
-+ if (cmd->dev->type != TYPE_ROM) {
-+ PRINT_ERROR("%s", "READ TOC for non-CDROM device");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_opcode));
-+ goto out;
-+ }
-+
-+ if (cmd->cdb[2] & 0x0e/*Format*/) {
-+ PRINT_ERROR("%s", "READ TOC: invalid requested data format");
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+
-+ if ((cmd->cdb[6] != 0 && (cmd->cdb[2] & 0x01)) ||
-+ (cmd->cdb[6] > 1 && cmd->cdb[6] != 0xAA)) {
-+ PRINT_ERROR("READ TOC: invalid requested track number %x",
-+ cmd->cdb[6]);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ goto out;
-+ }
-+
-+ length = scst_get_buf_full(cmd, &address);
-+ if (unlikely(length <= 0)) {
-+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_full() failed: %d", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+ goto out;
-+ }
-+
-+ virt_dev = cmd->dev->dh_priv;
-+ /* ToDo when you have > 8TB ROM device. */
-+ nblocks = (uint32_t)virt_dev->nblocks;
-+
-+ /* Header */
-+ memset(buffer, 0, sizeof(buffer));
-+ buffer[2] = 0x01; /* First Track/Session */
-+ buffer[3] = 0x01; /* Last Track/Session */
-+ off = 4;
-+ if (cmd->cdb[6] <= 1) {
-+ /* Fistr TOC Track Descriptor */
-+ /* ADDR 0x10 - Q Sub-channel encodes current position data
-+ CONTROL 0x04 - Data track, recoreded uninterrupted */
-+ buffer[off+1] = 0x14;
-+ /* Track Number */
-+ buffer[off+2] = 0x01;
-+ off += 8;
-+ }
-+ if (!(cmd->cdb[2] & 0x01)) {
-+ /* Lead-out area TOC Track Descriptor */
-+ buffer[off+1] = 0x14;
-+ /* Track Number */
-+ buffer[off+2] = 0xAA;
-+ /* Track Start Address */
-+ buffer[off+4] = (nblocks >> (BYTE * 3)) & 0xFF;
-+ buffer[off+5] = (nblocks >> (BYTE * 2)) & 0xFF;
-+ buffer[off+6] = (nblocks >> (BYTE * 1)) & 0xFF;
-+ buffer[off+7] = (nblocks >> (BYTE * 0)) & 0xFF;
-+ off += 8;
-+ }
-+
-+ buffer[1] = off - 2; /* Data Length */
-+
-+ if (off > length)
-+ off = length;
-+ memcpy(address, buffer, off);
-+
-+ scst_put_buf_full(cmd, address);
-+
-+ if (off < cmd->resp_data_len)
-+ scst_set_resp_data_len(cmd, off);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void vdisk_exec_prevent_allow_medium_removal(struct scst_cmd *cmd)
-+{
-+ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
-+
-+ TRACE_DBG("PERSIST/PREVENT 0x%02x", cmd->cdb[4]);
-+
-+ spin_lock(&virt_dev->flags_lock);
-+ virt_dev->prevent_allow_medium_removal = cmd->cdb[4] & 0x01 ? 1 : 0;
-+ spin_unlock(&virt_dev->flags_lock);
-+
-+ return;
-+}
-+
-+static int vdisk_fsync(struct scst_vdisk_thr *thr, loff_t loff,
-+ loff_t len, struct scst_cmd *cmd, struct scst_device *dev)
-+{
-+ int res = 0;
-+ struct scst_vdisk_dev *virt_dev = dev->dh_priv;
-+ struct file *file;
-+
-+ TRACE_ENTRY();
-+
-+ /* Hopefully, the compiler will generate the single comparison */
-+ if (virt_dev->nv_cache || virt_dev->wt_flag ||
-+ virt_dev->o_direct_flag || virt_dev->nullio)
-+ goto out;
-+
-+ if (virt_dev->blockio) {
-+ res = vdisk_blockio_flush(thr->bdev,
-+ (cmd->noio_mem_alloc ? GFP_NOIO : GFP_KERNEL), true);
-+ goto out;
-+ }
-+
-+ file = thr->fd;
-+
-+#if 0 /* For sparse files we might need to sync metadata as well */
-+ res = generic_write_sync(file, loff, len);
-+#else
-+ res = filemap_write_and_wait_range(file->f_mapping, loff, len);
-+#endif
-+ if (unlikely(res != 0)) {
-+ PRINT_ERROR("sync range failed (%d)", res);
-+ if (cmd != NULL) {
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_write_error));
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct iovec *vdisk_alloc_iv(struct scst_cmd *cmd,
-+ struct scst_vdisk_thr *thr)
-+{
-+ int iv_count;
-+
-+ iv_count = min_t(int, scst_get_buf_count(cmd), UIO_MAXIOV);
-+ if (iv_count > thr->iv_count) {
-+ kfree(thr->iv);
-+ /* It can't be called in atomic context */
-+ thr->iv = kmalloc(sizeof(*thr->iv) * iv_count, GFP_KERNEL);
-+ if (thr->iv == NULL) {
-+ PRINT_ERROR("Unable to allocate iv (%d)", iv_count);
-+ scst_set_busy(cmd);
-+ goto out;
-+ }
-+ thr->iv_count = iv_count;
-+ }
-+
-+out:
-+ return thr->iv;
-+}
-+
-+static void vdisk_exec_read(struct scst_cmd *cmd,
-+ struct scst_vdisk_thr *thr, loff_t loff)
-+{
-+ mm_segment_t old_fs;
-+ loff_t err;
-+ ssize_t length, full_len;
-+ uint8_t __user *address;
-+ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
-+ struct file *fd = thr->fd;
-+ struct iovec *iv;
-+ int iv_count, i;
-+ bool finished = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (virt_dev->nullio)
-+ goto out;
-+
-+ iv = vdisk_alloc_iv(cmd, thr);
-+ if (iv == NULL)
-+ goto out;
-+
-+ length = scst_get_buf_first(cmd, (uint8_t __force **)&address);
-+ if (unlikely(length < 0)) {
-+ PRINT_ERROR("scst_get_buf_first() failed: %zd", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out;
-+ }
-+
-+ old_fs = get_fs();
-+ set_fs(get_ds());
-+
-+ while (1) {
-+ iv_count = 0;
-+ full_len = 0;
-+ i = -1;
-+ while (length > 0) {
-+ full_len += length;
-+ i++;
-+ iv_count++;
-+ iv[i].iov_base = address;
-+ iv[i].iov_len = length;
-+ if (iv_count == UIO_MAXIOV)
-+ break;
-+ length = scst_get_buf_next(cmd,
-+ (uint8_t __force **)&address);
-+ }
-+ if (length == 0) {
-+ finished = true;
-+ if (unlikely(iv_count == 0))
-+ break;
-+ } else if (unlikely(length < 0)) {
-+ PRINT_ERROR("scst_get_buf_next() failed: %zd", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_set_fs;
-+ }
-+
-+ TRACE_DBG("(iv_count %d, full_len %zd)", iv_count, full_len);
-+ /* SEEK */
-+ if (fd->f_op->llseek)
-+ err = fd->f_op->llseek(fd, loff, 0/*SEEK_SET*/);
-+ else
-+ err = default_llseek(fd, loff, 0/*SEEK_SET*/);
-+ if (err != loff) {
-+ PRINT_ERROR("lseek trouble %lld != %lld",
-+ (long long unsigned int)err,
-+ (long long unsigned int)loff);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_set_fs;
-+ }
-+
-+ /* READ */
-+ err = vfs_readv(fd, (struct iovec __force __user *)iv, iv_count,
-+ &fd->f_pos);
-+
-+ if ((err < 0) || (err < full_len)) {
-+ PRINT_ERROR("readv() returned %lld from %zd",
-+ (long long unsigned int)err,
-+ full_len);
-+ if (err == -EAGAIN)
-+ scst_set_busy(cmd);
-+ else {
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_read_error));
-+ }
-+ goto out_set_fs;
-+ }
-+
-+ for (i = 0; i < iv_count; i++)
-+ scst_put_buf(cmd, (void __force *)(iv[i].iov_base));
-+
-+ if (finished)
-+ break;
-+
-+ loff += full_len;
-+ length = scst_get_buf_next(cmd, (uint8_t __force **)&address);
-+ };
-+
-+ set_fs(old_fs);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_set_fs:
-+ set_fs(old_fs);
-+ for (i = 0; i < iv_count; i++)
-+ scst_put_buf(cmd, (void __force *)(iv[i].iov_base));
-+ goto out;
-+}
-+
-+static void vdisk_exec_write(struct scst_cmd *cmd,
-+ struct scst_vdisk_thr *thr, loff_t loff)
-+{
-+ mm_segment_t old_fs;
-+ loff_t err;
-+ ssize_t length, full_len, saved_full_len;
-+ uint8_t __user *address;
-+ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
-+ struct file *fd = thr->fd;
-+ struct iovec *iv, *eiv;
-+ int i, iv_count, eiv_count;
-+ bool finished = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (virt_dev->nullio)
-+ goto out;
-+
-+ iv = vdisk_alloc_iv(cmd, thr);
-+ if (iv == NULL)
-+ goto out;
-+
-+ length = scst_get_buf_first(cmd, (uint8_t __force **)&address);
-+ if (unlikely(length < 0)) {
-+ PRINT_ERROR("scst_get_buf_first() failed: %zd", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out;
-+ }
-+
-+ old_fs = get_fs();
-+ set_fs(get_ds());
-+
-+ while (1) {
-+ iv_count = 0;
-+ full_len = 0;
-+ i = -1;
-+ while (length > 0) {
-+ full_len += length;
-+ i++;
-+ iv_count++;
-+ iv[i].iov_base = address;
-+ iv[i].iov_len = length;
-+ if (iv_count == UIO_MAXIOV)
-+ break;
-+ length = scst_get_buf_next(cmd,
-+ (uint8_t __force **)&address);
-+ }
-+ if (length == 0) {
-+ finished = true;
-+ if (unlikely(iv_count == 0))
-+ break;
-+ } else if (unlikely(length < 0)) {
-+ PRINT_ERROR("scst_get_buf_next() failed: %zd", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_set_fs;
-+ }
-+
-+ saved_full_len = full_len;
-+ eiv = iv;
-+ eiv_count = iv_count;
-+restart:
-+ TRACE_DBG("writing(eiv_count %d, full_len %zd)", eiv_count, full_len);
-+
-+ /* SEEK */
-+ if (fd->f_op->llseek)
-+ err = fd->f_op->llseek(fd, loff, 0 /*SEEK_SET */);
-+ else
-+ err = default_llseek(fd, loff, 0 /*SEEK_SET */);
-+ if (err != loff) {
-+ PRINT_ERROR("lseek trouble %lld != %lld",
-+ (long long unsigned int)err,
-+ (long long unsigned int)loff);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_set_fs;
-+ }
-+
-+ /* WRITE */
-+ err = vfs_writev(fd, (struct iovec __force __user *)eiv, eiv_count,
-+ &fd->f_pos);
-+
-+ if (err < 0) {
-+ PRINT_ERROR("write() returned %lld from %zd",
-+ (long long unsigned int)err,
-+ full_len);
-+ if (err == -EAGAIN)
-+ scst_set_busy(cmd);
-+ else {
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_write_error));
-+ }
-+ goto out_set_fs;
-+ } else if (err < full_len) {
-+ /*
-+ * Probably that's wrong, but sometimes write() returns
-+ * value less, than requested. Let's restart.
-+ */
-+ int e = eiv_count;
-+ TRACE_MGMT_DBG("write() returned %d from %zd "
-+ "(iv_count=%d)", (int)err, full_len,
-+ eiv_count);
-+ if (err == 0) {
-+ PRINT_INFO("Suspicious: write() returned 0 from "
-+ "%zd (iv_count=%d)", full_len, eiv_count);
-+ }
-+ full_len -= err;
-+ for (i = 0; i < e; i++) {
-+ if ((long long)eiv->iov_len < err) {
-+ err -= eiv->iov_len;
-+ eiv++;
-+ eiv_count--;
-+ } else {
-+ eiv->iov_base =
-+ (uint8_t __force __user *)eiv->iov_base + err;
-+ eiv->iov_len -= err;
-+ break;
-+ }
-+ }
-+ goto restart;
-+ }
-+
-+ for (i = 0; i < iv_count; i++)
-+ scst_put_buf(cmd, (void __force *)(iv[i].iov_base));
-+
-+ if (finished)
-+ break;
-+
-+ loff += saved_full_len;
-+ length = scst_get_buf_next(cmd, (uint8_t __force **)&address);
-+ }
-+
-+ set_fs(old_fs);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_set_fs:
-+ set_fs(old_fs);
-+ for (i = 0; i < iv_count; i++)
-+ scst_put_buf(cmd, (void __force *)(iv[i].iov_base));
-+ goto out;
-+}
-+
-+struct scst_blockio_work {
-+ atomic_t bios_inflight;
-+ struct scst_cmd *cmd;
-+};
-+
-+static inline void blockio_check_finish(struct scst_blockio_work *blockio_work)
-+{
-+ /* Decrement the bios in processing, and if zero signal completion */
-+ if (atomic_dec_and_test(&blockio_work->bios_inflight)) {
-+ blockio_work->cmd->completed = 1;
-+ blockio_work->cmd->scst_cmd_done(blockio_work->cmd,
-+ SCST_CMD_STATE_DEFAULT, scst_estimate_context());
-+ kmem_cache_free(blockio_work_cachep, blockio_work);
-+ }
-+ return;
-+}
-+
-+static void blockio_endio(struct bio *bio, int error)
-+{
-+ struct scst_blockio_work *blockio_work = bio->bi_private;
-+
-+ if (unlikely(!bio_flagged(bio, BIO_UPTODATE))) {
-+ if (error == 0) {
-+ PRINT_ERROR("Not up to date bio with error 0 for "
-+ "cmd %p, returning -EIO", blockio_work->cmd);
-+ error = -EIO;
-+ }
-+ }
-+
-+ if (unlikely(error != 0)) {
-+ static DEFINE_SPINLOCK(blockio_endio_lock);
-+ unsigned long flags;
-+
-+ PRINT_ERROR("cmd %p returned error %d", blockio_work->cmd,
-+ error);
-+
-+ /* To protect from several bios finishing simultaneously */
-+ spin_lock_irqsave(&blockio_endio_lock, flags);
-+
-+ if (bio->bi_rw & REQ_WRITE)
-+ scst_set_cmd_error(blockio_work->cmd,
-+ SCST_LOAD_SENSE(scst_sense_write_error));
-+ else
-+ scst_set_cmd_error(blockio_work->cmd,
-+ SCST_LOAD_SENSE(scst_sense_read_error));
-+
-+ spin_unlock_irqrestore(&blockio_endio_lock, flags);
-+ }
-+
-+ blockio_check_finish(blockio_work);
-+
-+ bio_put(bio);
-+ return;
-+}
-+
-+static void blockio_exec_rw(struct scst_cmd *cmd, struct scst_vdisk_thr *thr,
-+ u64 lba_start, int write)
-+{
-+ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
-+ struct block_device *bdev = thr->bdev;
-+ struct request_queue *q = bdev_get_queue(bdev);
-+ int length, max_nr_vecs = 0, offset;
-+ struct page *page;
-+ struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
-+ int need_new_bio;
-+ struct scst_blockio_work *blockio_work;
-+ int bios = 0;
-+ gfp_t gfp_mask = (cmd->noio_mem_alloc ? GFP_NOIO : GFP_KERNEL);
-+ struct blk_plug plug;
-+
-+ TRACE_ENTRY();
-+
-+ if (virt_dev->nullio)
-+ goto out;
-+
-+ /* Allocate and initialize blockio_work struct */
-+ blockio_work = kmem_cache_alloc(blockio_work_cachep, gfp_mask);
-+ if (blockio_work == NULL)
-+ goto out_no_mem;
-+
-+ blockio_work->cmd = cmd;
-+
-+ if (q)
-+ max_nr_vecs = min(bio_get_nr_vecs(bdev), BIO_MAX_PAGES);
-+ else
-+ max_nr_vecs = 1;
-+
-+ need_new_bio = 1;
-+
-+ length = scst_get_sg_page_first(cmd, &page, &offset);
-+ while (length > 0) {
-+ int len, bytes, off, thislen;
-+ struct page *pg;
-+ u64 lba_start0;
-+
-+ pg = page;
-+ len = length;
-+ off = offset;
-+ thislen = 0;
-+ lba_start0 = lba_start;
-+
-+ while (len > 0) {
-+ int rc;
-+
-+ if (need_new_bio) {
-+ bio = bio_kmalloc(gfp_mask, max_nr_vecs);
-+ if (!bio) {
-+ PRINT_ERROR("Failed to create bio "
-+ "for data segment %d (cmd %p)",
-+ cmd->get_sg_buf_entry_num, cmd);
-+ goto out_no_bio;
-+ }
-+
-+ bios++;
-+ need_new_bio = 0;
-+ bio->bi_end_io = blockio_endio;
-+ bio->bi_sector = lba_start0 <<
-+ (virt_dev->block_shift - 9);
-+ bio->bi_bdev = bdev;
-+ bio->bi_private = blockio_work;
-+ /*
-+ * Better to fail fast w/o any local recovery
-+ * and retries.
-+ */
-+ bio->bi_rw |= REQ_FAILFAST_DEV |
-+ REQ_FAILFAST_TRANSPORT |
-+ REQ_FAILFAST_DRIVER;
-+#if 0 /* It could be win, but could be not, so a performance study is needed */
-+ bio->bi_rw |= REQ_SYNC;
-+#endif
-+ if (!hbio)
-+ hbio = tbio = bio;
-+ else
-+ tbio = tbio->bi_next = bio;
-+ }
-+
-+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
-+
-+ rc = bio_add_page(bio, pg, bytes, off);
-+ if (rc < bytes) {
-+ BUG_ON(rc != 0);
-+ need_new_bio = 1;
-+ lba_start0 += thislen >> virt_dev->block_shift;
-+ thislen = 0;
-+ continue;
-+ }
-+
-+ pg++;
-+ thislen += bytes;
-+ len -= bytes;
-+ off = 0;
-+ }
-+
-+ lba_start += length >> virt_dev->block_shift;
-+
-+ scst_put_sg_page(cmd, page, offset);
-+ length = scst_get_sg_page_next(cmd, &page, &offset);
-+ }
-+
-+ /* +1 to prevent erroneous too early command completion */
-+ atomic_set(&blockio_work->bios_inflight, bios+1);
-+
-+ blk_start_plug(&plug);
-+
-+ while (hbio) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio->bi_next = NULL;
-+ submit_bio((write != 0), bio);
-+ }
-+
-+ blk_finish_plug(&plug);
-+
-+ blockio_check_finish(blockio_work);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_no_bio:
-+ while (hbio) {
-+ bio = hbio;
-+ hbio = hbio->bi_next;
-+ bio_put(bio);
-+ }
-+ kmem_cache_free(blockio_work_cachep, blockio_work);
-+
-+out_no_mem:
-+ scst_set_busy(cmd);
-+ goto out;
-+}
-+
-+static int vdisk_blockio_flush(struct block_device *bdev, gfp_t gfp_mask,
-+ bool report_error)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ res = blkdev_issue_flush(bdev, gfp_mask, NULL);
-+ if ((res != 0) && report_error)
-+ PRINT_ERROR("blkdev_issue_flush() failed: %d", res);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void vdisk_exec_verify(struct scst_cmd *cmd,
-+ struct scst_vdisk_thr *thr, loff_t loff)
-+{
-+ mm_segment_t old_fs;
-+ loff_t err;
-+ ssize_t length, len_mem = 0;
-+ uint8_t *address_sav, *address;
-+ int compare;
-+ struct scst_vdisk_dev *virt_dev = cmd->dev->dh_priv;
-+ struct file *fd = thr->fd;
-+ uint8_t *mem_verify = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ if (vdisk_fsync(thr, loff, cmd->bufflen, cmd, cmd->dev) != 0)
-+ goto out;
-+
-+ /*
-+ * Until the cache is cleared prior the verifying, there is not
-+ * much point in this code. ToDo.
-+ *
-+ * Nevertherless, this code is valuable if the data have not read
-+ * from the file/disk yet.
-+ */
-+
-+ /* SEEK */
-+ old_fs = get_fs();
-+ set_fs(get_ds());
-+
-+ if (!virt_dev->nullio) {
-+ if (fd->f_op->llseek)
-+ err = fd->f_op->llseek(fd, loff, 0/*SEEK_SET*/);
-+ else
-+ err = default_llseek(fd, loff, 0/*SEEK_SET*/);
-+ if (err != loff) {
-+ PRINT_ERROR("lseek trouble %lld != %lld",
-+ (long long unsigned int)err,
-+ (long long unsigned int)loff);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_set_fs;
-+ }
-+ }
-+
-+ mem_verify = vmalloc(LEN_MEM);
-+ if (mem_verify == NULL) {
-+ PRINT_ERROR("Unable to allocate memory %d for verify",
-+ LEN_MEM);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_set_fs;
-+ }
-+
-+ length = scst_get_buf_first(cmd, &address);
-+ address_sav = address;
-+ if (!length && cmd->data_len) {
-+ length = cmd->data_len;
-+ compare = 0;
-+ } else
-+ compare = 1;
-+
-+ while (length > 0) {
-+ len_mem = (length > LEN_MEM) ? LEN_MEM : length;
-+ TRACE_DBG("Verify: length %zd - len_mem %zd", length, len_mem);
-+
-+ if (!virt_dev->nullio)
-+ err = vfs_read(fd, (char __force __user *)mem_verify,
-+ len_mem, &fd->f_pos);
-+ else
-+ err = len_mem;
-+ if ((err < 0) || (err < len_mem)) {
-+ PRINT_ERROR("verify() returned %lld from %zd",
-+ (long long unsigned int)err, len_mem);
-+ if (err == -EAGAIN)
-+ scst_set_busy(cmd);
-+ else {
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_read_error));
-+ }
-+ if (compare)
-+ scst_put_buf(cmd, address_sav);
-+ goto out_set_fs;
-+ }
-+ if (compare && memcmp(address, mem_verify, len_mem) != 0) {
-+ TRACE_DBG("Verify: error memcmp length %zd", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_miscompare_error));
-+ scst_put_buf(cmd, address_sav);
-+ goto out_set_fs;
-+ }
-+ length -= len_mem;
-+ address += len_mem;
-+ if (compare && length <= 0) {
-+ scst_put_buf(cmd, address_sav);
-+ length = scst_get_buf_next(cmd, &address);
-+ address_sav = address;
-+ }
-+ }
-+
-+ if (length < 0) {
-+ PRINT_ERROR("scst_get_buf_() failed: %zd", length);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ }
-+
-+out_set_fs:
-+ set_fs(old_fs);
-+ if (mem_verify)
-+ vfree(mem_verify);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int vdisk_task_mgmt_fn(struct scst_mgmt_cmd *mcmd,
-+ struct scst_tgt_dev *tgt_dev)
-+{
-+ TRACE_ENTRY();
-+
-+ if ((mcmd->fn == SCST_LUN_RESET) || (mcmd->fn == SCST_TARGET_RESET)) {
-+ /* Restore default values */
-+ struct scst_device *dev = tgt_dev->dev;
-+ struct scst_vdisk_dev *virt_dev = dev->dh_priv;
-+
-+ dev->tst = DEF_TST;
-+ dev->d_sense = DEF_DSENSE;
-+ if (virt_dev->wt_flag && !virt_dev->nv_cache)
-+ dev->queue_alg = DEF_QUEUE_ALG_WT;
-+ else
-+ dev->queue_alg = DEF_QUEUE_ALG;
-+ dev->swp = DEF_SWP;
-+ dev->tas = DEF_TAS;
-+
-+ spin_lock(&virt_dev->flags_lock);
-+ virt_dev->prevent_allow_medium_removal = 0;
-+ spin_unlock(&virt_dev->flags_lock);
-+ } else if (mcmd->fn == SCST_PR_ABORT_ALL) {
-+ struct scst_device *dev = tgt_dev->dev;
-+ struct scst_vdisk_dev *virt_dev = dev->dh_priv;
-+ spin_lock(&virt_dev->flags_lock);
-+ virt_dev->prevent_allow_medium_removal = 0;
-+ spin_unlock(&virt_dev->flags_lock);
-+ }
-+
-+ TRACE_EXIT();
-+ return SCST_DEV_TM_NOT_COMPLETED;
-+}
-+
-+static void vdisk_report_registering(const struct scst_vdisk_dev *virt_dev)
-+{
-+ char buf[128];
-+ int i, j;
-+
-+ i = snprintf(buf, sizeof(buf), "Registering virtual %s device %s ",
-+ virt_dev->vdev_devt->name, virt_dev->name);
-+ j = i;
-+
-+ if (virt_dev->wt_flag)
-+ i += snprintf(&buf[i], sizeof(buf) - i, "(WRITE_THROUGH");
-+
-+ if (virt_dev->nv_cache)
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sNV_CACHE",
-+ (j == i) ? "(" : ", ");
-+
-+ if (virt_dev->rd_only)
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sREAD_ONLY",
-+ (j == i) ? "(" : ", ");
-+
-+ if (virt_dev->o_direct_flag)
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sO_DIRECT",
-+ (j == i) ? "(" : ", ");
-+
-+ if (virt_dev->nullio)
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sNULLIO",
-+ (j == i) ? "(" : ", ");
-+
-+ if (virt_dev->blockio)
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sBLOCKIO",
-+ (j == i) ? "(" : ", ");
-+
-+ if (virt_dev->removable)
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sREMOVABLE",
-+ (j == i) ? "(" : ", ");
-+
-+ if (virt_dev->thin_provisioned)
-+ i += snprintf(&buf[i], sizeof(buf) - i, "%sTHIN PROVISIONED",
-+ (j == i) ? "(" : ", ");
-+
-+ if (j == i)
-+ PRINT_INFO("%s", buf);
-+ else
-+ PRINT_INFO("%s)", buf);
-+
-+ return;
-+}
-+
-+static int vdisk_resync_size(struct scst_vdisk_dev *virt_dev)
-+{
-+ loff_t file_size;
-+ int res = 0;
-+
-+ BUG_ON(virt_dev->nullio);
-+
-+ res = vdisk_get_file_size(virt_dev->filename,
-+ virt_dev->blockio, &file_size);
-+ if (res != 0)
-+ goto out;
-+
-+ if (file_size == virt_dev->file_size) {
-+ PRINT_INFO("Size of virtual disk %s remained the same",
-+ virt_dev->name);
-+ goto out;
-+ }
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ virt_dev->file_size = file_size;
-+ virt_dev->nblocks = virt_dev->file_size >> virt_dev->block_shift;
-+
-+ scst_dev_del_all_thr_data(virt_dev->dev);
-+
-+ PRINT_INFO("New size of SCSI target virtual disk %s "
-+ "(fs=%lldMB, bs=%d, nblocks=%lld, cyln=%lld%s)",
-+ virt_dev->name, virt_dev->file_size >> 20,
-+ virt_dev->block_size,
-+ (long long unsigned int)virt_dev->nblocks,
-+ (long long unsigned int)virt_dev->nblocks/64/32,
-+ virt_dev->nblocks < 64*32 ? " !WARNING! cyln less "
-+ "than 1" : "");
-+
-+ scst_capacity_data_changed(virt_dev->dev);
-+
-+ scst_resume_activity();
-+
-+out:
-+ return res;
-+}
-+
-+static int vdev_create(struct scst_dev_type *devt,
-+ const char *name, struct scst_vdisk_dev **res_virt_dev)
-+{
-+ int res;
-+ struct scst_vdisk_dev *virt_dev;
-+ uint64_t dev_id_num;
-+
-+ res = -EEXIST;
-+ if (vdev_find(name))
-+ goto out;
-+
-+ virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
-+ if (virt_dev == NULL) {
-+ PRINT_ERROR("Allocation of virtual device %s failed",
-+ devt->name);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ spin_lock_init(&virt_dev->flags_lock);
-+ virt_dev->vdev_devt = devt;
-+
-+ virt_dev->rd_only = DEF_RD_ONLY;
-+ virt_dev->removable = DEF_REMOVABLE;
-+ virt_dev->thin_provisioned = DEF_THIN_PROVISIONED;
-+
-+ virt_dev->block_size = DEF_DISK_BLOCKSIZE;
-+ virt_dev->block_shift = DEF_DISK_BLOCKSIZE_SHIFT;
-+
-+ if (strlen(name) >= sizeof(virt_dev->name)) {
-+ PRINT_ERROR("Name %s is too long (max allowed %zd)", name,
-+ sizeof(virt_dev->name)-1);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+ strcpy(virt_dev->name, name);
-+
-+ dev_id_num = vdisk_gen_dev_id_num(virt_dev->name);
-+
-+ snprintf(virt_dev->t10_dev_id, sizeof(virt_dev->t10_dev_id),
-+ "%llx-%s", dev_id_num, virt_dev->name);
-+ TRACE_DBG("t10_dev_id %s", virt_dev->t10_dev_id);
-+
-+ scnprintf(virt_dev->usn, sizeof(virt_dev->usn), "%llx", dev_id_num);
-+ TRACE_DBG("usn %s", virt_dev->usn);
-+
-+ *res_virt_dev = virt_dev;
-+ res = 0;
-+
-+out:
-+ return res;
-+
-+out_free:
-+ kfree(virt_dev);
-+ goto out;
-+}
-+
-+static void vdev_destroy(struct scst_vdisk_dev *virt_dev)
-+{
-+ kfree(virt_dev->filename);
-+ kfree(virt_dev);
-+ return;
-+}
-+
-+static int vdev_parse_add_dev_params(struct scst_vdisk_dev *virt_dev,
-+ char *params, const char *allowed_params[])
-+{
-+ int res = 0;
-+ unsigned long val;
-+ char *param, *p, *pp;
-+
-+ TRACE_ENTRY();
-+
-+ while (1) {
-+ param = scst_get_next_token_str(&params);
-+ if (param == NULL)
-+ break;
-+
-+ p = scst_get_next_lexem(&param);
-+ if (*p == '\0') {
-+ PRINT_ERROR("Syntax error at %s (device %s)",
-+ param, virt_dev->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (allowed_params != NULL) {
-+ const char **a = allowed_params;
-+ bool allowed = false;
-+
-+ while (*a != NULL) {
-+ if (!strcasecmp(*a, p)) {
-+ allowed = true;
-+ break;
-+ }
-+ a++;
-+ }
-+
-+ if (!allowed) {
-+ PRINT_ERROR("Unknown parameter %s (device %s)", p,
-+ virt_dev->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ }
-+
-+ pp = scst_get_next_lexem(&param);
-+ if (*pp == '\0') {
-+ PRINT_ERROR("Parameter %s value missed for device %s",
-+ p, virt_dev->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (scst_get_next_lexem(&param)[0] != '\0') {
-+ PRINT_ERROR("Too many parameter's %s values (device %s)",
-+ p, virt_dev->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (!strcasecmp("filename", p)) {
-+ if (*pp != '/') {
-+ PRINT_ERROR("Filename %s must be global "
-+ "(device %s)", pp, virt_dev->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ virt_dev->filename = kstrdup(pp, GFP_KERNEL);
-+ if (virt_dev->filename == NULL) {
-+ PRINT_ERROR("Unable to duplicate file name %s "
-+ "(device %s)", pp, virt_dev->name);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ continue;
-+ }
-+
-+ res = strict_strtoul(pp, 0, &val);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtoul() for %s failed: %d "
-+ "(device %s)", pp, res, virt_dev->name);
-+ goto out;
-+ }
-+
-+ if (!strcasecmp("write_through", p)) {
-+ virt_dev->wt_flag = val;
-+ TRACE_DBG("WRITE THROUGH %d", virt_dev->wt_flag);
-+ } else if (!strcasecmp("nv_cache", p)) {
-+ virt_dev->nv_cache = val;
-+ TRACE_DBG("NON-VOLATILE CACHE %d", virt_dev->nv_cache);
-+ } else if (!strcasecmp("o_direct", p)) {
-+#if 0
-+ virt_dev->o_direct_flag = val;
-+ TRACE_DBG("O_DIRECT %d", virt_dev->o_direct_flag);
-+#else
-+ PRINT_INFO("O_DIRECT flag doesn't currently"
-+ " work, ignoring it, use fileio_tgt "
-+ "in O_DIRECT mode instead (device %s)", virt_dev->name);
-+#endif
-+ } else if (!strcasecmp("read_only", p)) {
-+ virt_dev->rd_only = val;
-+ TRACE_DBG("READ ONLY %d", virt_dev->rd_only);
-+ } else if (!strcasecmp("removable", p)) {
-+ virt_dev->removable = val;
-+ TRACE_DBG("REMOVABLE %d", virt_dev->removable);
-+ } else if (!strcasecmp("thin_provisioned", p)) {
-+ virt_dev->thin_provisioned = val;
-+ TRACE_DBG("THIN PROVISIONED %d",
-+ virt_dev->thin_provisioned);
-+ } else if (!strcasecmp("blocksize", p)) {
-+ virt_dev->block_size = val;
-+ virt_dev->block_shift = scst_calc_block_shift(
-+ virt_dev->block_size);
-+ if (virt_dev->block_shift < 9) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ TRACE_DBG("block_size %d, block_shift %d",
-+ virt_dev->block_size,
-+ virt_dev->block_shift);
-+ } else {
-+ PRINT_ERROR("Unknown parameter %s (device %s)", p,
-+ virt_dev->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* scst_vdisk_mutex supposed to be held */
-+static int vdev_fileio_add_device(const char *device_name, char *params)
-+{
-+ int res = 0;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ res = vdev_create(&vdisk_file_devtype, device_name, &virt_dev);
-+ if (res != 0)
-+ goto out;
-+
-+ virt_dev->command_set_version = 0x04C0; /* SBC-3 */
-+
-+ virt_dev->wt_flag = DEF_WRITE_THROUGH;
-+ virt_dev->nv_cache = DEF_NV_CACHE;
-+ virt_dev->o_direct_flag = DEF_O_DIRECT;
-+
-+ res = vdev_parse_add_dev_params(virt_dev, params, NULL);
-+ if (res != 0)
-+ goto out_destroy;
-+
-+ if (virt_dev->rd_only && (virt_dev->wt_flag || virt_dev->nv_cache)) {
-+ PRINT_ERROR("Write options on read only device %s",
-+ virt_dev->name);
-+ res = -EINVAL;
-+ goto out_destroy;
-+ }
-+
-+ if (virt_dev->filename == NULL) {
-+ PRINT_ERROR("File name required (device %s)", virt_dev->name);
-+ res = -EINVAL;
-+ goto out_destroy;
-+ }
-+
-+ list_add_tail(&virt_dev->vdev_list_entry, &vdev_list);
-+
-+ vdisk_report_registering(virt_dev);
-+
-+ virt_dev->virt_id = scst_register_virtual_device(virt_dev->vdev_devt,
-+ virt_dev->name);
-+ if (virt_dev->virt_id < 0) {
-+ res = virt_dev->virt_id;
-+ goto out_del;
-+ }
-+
-+ TRACE_DBG("Registered virt_dev %s with id %d", virt_dev->name,
-+ virt_dev->virt_id);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ list_del(&virt_dev->vdev_list_entry);
-+
-+out_destroy:
-+ vdev_destroy(virt_dev);
-+ goto out;
-+}
-+
-+/* scst_vdisk_mutex supposed to be held */
-+static int vdev_blockio_add_device(const char *device_name, char *params)
-+{
-+ int res = 0;
-+ const char *allowed_params[] = { "filename", "read_only", "removable",
-+ "blocksize", "nv_cache",
-+ "thin_provisioned", NULL };
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ res = vdev_create(&vdisk_blk_devtype, device_name, &virt_dev);
-+ if (res != 0)
-+ goto out;
-+
-+ virt_dev->command_set_version = 0x04C0; /* SBC-3 */
-+
-+ virt_dev->blockio = 1;
-+
-+ res = vdev_parse_add_dev_params(virt_dev, params, allowed_params);
-+ if (res != 0)
-+ goto out_destroy;
-+
-+ if (virt_dev->filename == NULL) {
-+ PRINT_ERROR("File name required (device %s)", virt_dev->name);
-+ res = -EINVAL;
-+ goto out_destroy;
-+ }
-+
-+ list_add_tail(&virt_dev->vdev_list_entry, &vdev_list);
-+
-+ vdisk_report_registering(virt_dev);
-+
-+ virt_dev->virt_id = scst_register_virtual_device(virt_dev->vdev_devt,
-+ virt_dev->name);
-+ if (virt_dev->virt_id < 0) {
-+ res = virt_dev->virt_id;
-+ goto out_del;
-+ }
-+
-+ TRACE_DBG("Registered virt_dev %s with id %d", virt_dev->name,
-+ virt_dev->virt_id);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ list_del(&virt_dev->vdev_list_entry);
-+
-+out_destroy:
-+ vdev_destroy(virt_dev);
-+ goto out;
-+}
-+
-+/* scst_vdisk_mutex supposed to be held */
-+static int vdev_nullio_add_device(const char *device_name, char *params)
-+{
-+ int res = 0;
-+ const char *allowed_params[] = { "read_only", "removable",
-+ "blocksize", NULL };
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ res = vdev_create(&vdisk_null_devtype, device_name, &virt_dev);
-+ if (res != 0)
-+ goto out;
-+
-+ virt_dev->command_set_version = 0x04C0; /* SBC-3 */
-+
-+ virt_dev->nullio = 1;
-+
-+ res = vdev_parse_add_dev_params(virt_dev, params, allowed_params);
-+ if (res != 0)
-+ goto out_destroy;
-+
-+ list_add_tail(&virt_dev->vdev_list_entry, &vdev_list);
-+
-+ vdisk_report_registering(virt_dev);
-+
-+ virt_dev->virt_id = scst_register_virtual_device(virt_dev->vdev_devt,
-+ virt_dev->name);
-+ if (virt_dev->virt_id < 0) {
-+ res = virt_dev->virt_id;
-+ goto out_del;
-+ }
-+
-+ TRACE_DBG("Registered virt_dev %s with id %d", virt_dev->name,
-+ virt_dev->virt_id);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ list_del(&virt_dev->vdev_list_entry);
-+
-+out_destroy:
-+ vdev_destroy(virt_dev);
-+ goto out;
-+}
-+
-+static ssize_t vdisk_add_fileio_device(const char *device_name, char *params)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ res = vdev_fileio_add_device(device_name, params);
-+
-+ mutex_unlock(&scst_vdisk_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t vdisk_add_blockio_device(const char *device_name, char *params)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ res = vdev_blockio_add_device(device_name, params);
-+
-+ mutex_unlock(&scst_vdisk_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+}
-+
-+static ssize_t vdisk_add_nullio_device(const char *device_name, char *params)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ res = vdev_nullio_add_device(device_name, params);
-+
-+ mutex_unlock(&scst_vdisk_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+}
-+
-+/* scst_vdisk_mutex supposed to be held */
-+static void vdev_del_device(struct scst_vdisk_dev *virt_dev)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_unregister_virtual_device(virt_dev->virt_id);
-+
-+ list_del(&virt_dev->vdev_list_entry);
-+
-+ PRINT_INFO("Virtual device %s unregistered", virt_dev->name);
-+ TRACE_DBG("virt_id %d unregistered", virt_dev->virt_id);
-+
-+ vdev_destroy(virt_dev);
-+
-+ return;
-+}
-+
-+static ssize_t vdisk_del_device(const char *device_name)
-+{
-+ int res = 0;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ virt_dev = vdev_find(device_name);
-+ if (virt_dev == NULL) {
-+ PRINT_ERROR("Device %s not found", device_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ vdev_del_device(virt_dev);
-+
-+out_unlock:
-+ mutex_unlock(&scst_vdisk_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* scst_vdisk_mutex supposed to be held */
-+static ssize_t __vcdrom_add_device(const char *device_name, char *params)
-+{
-+ int res = 0;
-+ const char *allowed_params[] = { NULL }; /* no params */
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ res = vdev_create(&vcdrom_devtype, device_name, &virt_dev);
-+ if (res != 0)
-+ goto out;
-+
-+#if 0 /*
-+ * Our implementation is pretty minimalistic and doesn't support all
-+ * mandatory commands, so it's better to not claim any standard
-+ * confirmance.
-+ */
-+ virt_dev->command_set_version = 0x02A0; /* MMC-3 */
-+#endif
-+
-+ virt_dev->rd_only = 1;
-+ virt_dev->removable = 1;
-+ virt_dev->cdrom_empty = 1;
-+
-+ virt_dev->block_size = DEF_CDROM_BLOCKSIZE;
-+ virt_dev->block_shift = DEF_CDROM_BLOCKSIZE_SHIFT;
-+
-+ res = vdev_parse_add_dev_params(virt_dev, params, allowed_params);
-+ if (res != 0)
-+ goto out_destroy;
-+
-+ list_add_tail(&virt_dev->vdev_list_entry, &vdev_list);
-+
-+ vdisk_report_registering(virt_dev);
-+
-+ virt_dev->virt_id = scst_register_virtual_device(virt_dev->vdev_devt,
-+ virt_dev->name);
-+ if (virt_dev->virt_id < 0) {
-+ res = virt_dev->virt_id;
-+ goto out_del;
-+ }
-+
-+ TRACE_DBG("Registered virt_dev %s with id %d", virt_dev->name,
-+ virt_dev->virt_id);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ list_del(&virt_dev->vdev_list_entry);
-+
-+out_destroy:
-+ vdev_destroy(virt_dev);
-+ goto out;
-+}
-+
-+static ssize_t vcdrom_add_device(const char *device_name, char *params)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ res = __vcdrom_add_device(device_name, params);
-+
-+ mutex_unlock(&scst_vdisk_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+}
-+
-+static ssize_t vcdrom_del_device(const char *device_name)
-+{
-+ int res = 0;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_vdisk_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ virt_dev = vdev_find(device_name);
-+ if (virt_dev == NULL) {
-+ PRINT_ERROR("Device %s not found", device_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ vdev_del_device(virt_dev);
-+
-+out_unlock:
-+ mutex_unlock(&scst_vdisk_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int vcdrom_change(struct scst_vdisk_dev *virt_dev,
-+ char *buffer)
-+{
-+ loff_t err;
-+ char *old_fn, *p, *pp;
-+ const char *filename = NULL;
-+ int length = strlen(buffer);
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ p = buffer;
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ filename = p;
-+ p = &buffer[length-1];
-+ pp = &buffer[length];
-+ while (isspace(*p) && (*p != '\0')) {
-+ pp = p;
-+ p--;
-+ }
-+ *pp = '\0';
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out;
-+
-+ /* To sync with detach*() functions */
-+ mutex_lock(&scst_mutex);
-+
-+ if (*filename == '\0') {
-+ virt_dev->cdrom_empty = 1;
-+ TRACE_DBG("%s", "No media");
-+ } else if (*filename != '/') {
-+ PRINT_ERROR("File path \"%s\" is not absolute", filename);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ } else
-+ virt_dev->cdrom_empty = 0;
-+
-+ old_fn = virt_dev->filename;
-+
-+ if (!virt_dev->cdrom_empty) {
-+ char *fn = kstrdup(filename, GFP_KERNEL);
-+ if (fn == NULL) {
-+ PRINT_ERROR("%s", "Allocation of filename failed");
-+ res = -ENOMEM;
-+ goto out_unlock;
-+ }
-+
-+ virt_dev->filename = fn;
-+
-+ res = vdisk_get_file_size(virt_dev->filename,
-+ virt_dev->blockio, &err);
-+ if (res != 0)
-+ goto out_free_fn;
-+ } else {
-+ err = 0;
-+ virt_dev->filename = NULL;
-+ }
-+
-+ if (virt_dev->prevent_allow_medium_removal) {
-+ PRINT_ERROR("Prevent medium removal for "
-+ "virtual device with name %s", virt_dev->name);
-+ res = -EINVAL;
-+ goto out_free_fn;
-+ }
-+
-+ virt_dev->file_size = err;
-+ virt_dev->nblocks = virt_dev->file_size >> virt_dev->block_shift;
-+ if (!virt_dev->cdrom_empty)
-+ virt_dev->media_changed = 1;
-+
-+ mutex_unlock(&scst_mutex);
-+
-+ scst_dev_del_all_thr_data(virt_dev->dev);
-+
-+ if (!virt_dev->cdrom_empty) {
-+ PRINT_INFO("Changed SCSI target virtual cdrom %s "
-+ "(file=\"%s\", fs=%lldMB, bs=%d, nblocks=%lld,"
-+ " cyln=%lld%s)", virt_dev->name,
-+ vdev_get_filename(virt_dev),
-+ virt_dev->file_size >> 20, virt_dev->block_size,
-+ (long long unsigned int)virt_dev->nblocks,
-+ (long long unsigned int)virt_dev->nblocks/64/32,
-+ virt_dev->nblocks < 64*32 ? " !WARNING! cyln less "
-+ "than 1" : "");
-+ } else {
-+ PRINT_INFO("Removed media from SCSI target virtual cdrom %s",
-+ virt_dev->name);
-+ }
-+
-+ kfree(old_fn);
-+
-+out_resume:
-+ scst_resume_activity();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free_fn:
-+ kfree(virt_dev->filename);
-+ virt_dev->filename = old_fn;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+ goto out_resume;
-+}
-+
-+static int vcdrom_sysfs_process_filename_store(struct scst_sysfs_work_item *work)
-+{
-+ int res;
-+ struct scst_device *dev = work->dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ /* It's safe, since we taken dev_kobj and dh_priv NULLed in attach() */
-+ virt_dev = dev->dh_priv;
-+
-+ res = vcdrom_change(virt_dev, work->buf);
-+
-+ kobject_put(&dev->dev_kobj);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t vcdrom_sysfs_filename_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ char *i_buf;
-+ struct scst_sysfs_work_item *work;
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+
-+ i_buf = kasprintf(GFP_KERNEL, "%.*s", (int)count, buf);
-+ if (i_buf == NULL) {
-+ PRINT_ERROR("Unable to alloc intermediate buffer with size %zd",
-+ count+1);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ res = scst_alloc_sysfs_work(vcdrom_sysfs_process_filename_store,
-+ false, &work);
-+ if (res != 0)
-+ goto out_free;
-+
-+ work->buf = i_buf;
-+ work->dev = dev;
-+
-+ kobject_get(&dev->dev_kobj);
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(i_buf);
-+ goto out;
-+}
-+
-+static ssize_t vdev_sysfs_size_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ pos = sprintf(buf, "%lld\n", virt_dev->file_size / 1024 / 1024);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t vdisk_sysfs_blocksize_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ pos = sprintf(buf, "%d\n%s", (int)virt_dev->block_size,
-+ (virt_dev->block_size == DEF_DISK_BLOCKSIZE) ? "" :
-+ SCST_SYSFS_KEY_MARK "\n");
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t vdisk_sysfs_rd_only_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ pos = sprintf(buf, "%d\n%s", virt_dev->rd_only ? 1 : 0,
-+ (virt_dev->rd_only == DEF_RD_ONLY) ? "" :
-+ SCST_SYSFS_KEY_MARK "\n");
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t vdisk_sysfs_wt_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ pos = sprintf(buf, "%d\n%s", virt_dev->wt_flag ? 1 : 0,
-+ (virt_dev->wt_flag == DEF_WRITE_THROUGH) ? "" :
-+ SCST_SYSFS_KEY_MARK "\n");
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t vdisk_sysfs_tp_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ pos = sprintf(buf, "%d\n%s", virt_dev->thin_provisioned ? 1 : 0,
-+ (virt_dev->thin_provisioned == DEF_THIN_PROVISIONED) ? "" :
-+ SCST_SYSFS_KEY_MARK "\n");
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t vdisk_sysfs_nv_cache_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ pos = sprintf(buf, "%d\n%s", virt_dev->nv_cache ? 1 : 0,
-+ (virt_dev->nv_cache == DEF_NV_CACHE) ? "" :
-+ SCST_SYSFS_KEY_MARK "\n");
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t vdisk_sysfs_o_direct_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ pos = sprintf(buf, "%d\n%s", virt_dev->o_direct_flag ? 1 : 0,
-+ (virt_dev->o_direct_flag == DEF_O_DIRECT) ? "" :
-+ SCST_SYSFS_KEY_MARK "\n");
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t vdisk_sysfs_removable_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ pos = sprintf(buf, "%d\n", virt_dev->removable ? 1 : 0);
-+
-+ if ((virt_dev->dev->type != TYPE_ROM) &&
-+ (virt_dev->removable != DEF_REMOVABLE))
-+ pos += sprintf(&buf[pos], "%s\n", SCST_SYSFS_KEY_MARK);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static int vdev_sysfs_process_get_filename(struct scst_sysfs_work_item *work)
-+{
-+ int res = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = work->dev;
-+
-+ /*
-+ * Since we have a get() on dev->dev_kobj, we can not simply mutex_lock
-+ * scst_vdisk_mutex, because otherwise we can fall in a deadlock with
-+ * vdisk_del_device(), which is waiting for the last ref to dev_kobj
-+ * under scst_vdisk_mutex.
-+ */
-+ while (!mutex_trylock(&scst_vdisk_mutex)) {
-+ if ((volatile bool)(dev->dev_unregistering)) {
-+ TRACE_MGMT_DBG("Skipping being unregistered dev %s",
-+ dev->virt_name);
-+ res = -ENOENT;
-+ goto out_put;
-+ }
-+ if (signal_pending(current)) {
-+ res = -EINTR;
-+ goto out_put;
-+ }
-+ msleep(100);
-+ }
-+
-+ virt_dev = dev->dh_priv;
-+
-+ if (virt_dev == NULL)
-+ goto out_unlock;
-+
-+ if (virt_dev->filename != NULL)
-+ work->res_buf = kasprintf(GFP_KERNEL, "%s\n%s\n",
-+ vdev_get_filename(virt_dev), SCST_SYSFS_KEY_MARK);
-+ else
-+ work->res_buf = kasprintf(GFP_KERNEL, "%s\n",
-+ vdev_get_filename(virt_dev));
-+
-+out_unlock:
-+ mutex_unlock(&scst_vdisk_mutex);
-+
-+out_put:
-+ kobject_put(&dev->dev_kobj);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t vdev_sysfs_filename_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int res = 0;
-+ struct scst_device *dev;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+
-+ res = scst_alloc_sysfs_work(vdev_sysfs_process_get_filename,
-+ true, &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->dev = dev;
-+
-+ kobject_get(&dev->dev_kobj);
-+
-+ scst_sysfs_work_get(work);
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res != 0)
-+ goto out_put;
-+
-+ res = snprintf(buf, SCST_SYSFS_BLOCK_SIZE, "%s\n", work->res_buf);
-+
-+out_put:
-+ scst_sysfs_work_put(work);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int vdisk_sysfs_process_resync_size_store(
-+ struct scst_sysfs_work_item *work)
-+{
-+ int res;
-+ struct scst_device *dev = work->dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ /* It's safe, since we taken dev_kobj and dh_priv NULLed in attach() */
-+ virt_dev = dev->dh_priv;
-+
-+ res = vdisk_resync_size(virt_dev);
-+
-+ kobject_put(&dev->dev_kobj);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t vdisk_sysfs_resync_size_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_device *dev;
-+ struct scst_sysfs_work_item *work;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+
-+ res = scst_alloc_sysfs_work(vdisk_sysfs_process_resync_size_store,
-+ false, &work);
-+ if (res != 0)
-+ goto out;
-+
-+ work->dev = dev;
-+
-+ kobject_get(&dev->dev_kobj);
-+
-+ res = scst_sysfs_queue_wait_work(work);
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t vdev_sysfs_t10_dev_id_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res, i;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ write_lock(&vdisk_serial_rwlock);
-+
-+ if ((count > sizeof(virt_dev->t10_dev_id)) ||
-+ ((count == sizeof(virt_dev->t10_dev_id)) &&
-+ (buf[count-1] != '\n'))) {
-+ PRINT_ERROR("T10 device id is too long (max %zd "
-+ "characters)", sizeof(virt_dev->t10_dev_id)-1);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ memset(virt_dev->t10_dev_id, 0, sizeof(virt_dev->t10_dev_id));
-+ memcpy(virt_dev->t10_dev_id, buf, count);
-+
-+ i = 0;
-+ while (i < sizeof(virt_dev->t10_dev_id)) {
-+ if (virt_dev->t10_dev_id[i] == '\n') {
-+ virt_dev->t10_dev_id[i] = '\0';
-+ break;
-+ }
-+ i++;
-+ }
-+
-+ virt_dev->t10_dev_id_set = 1;
-+
-+ res = count;
-+
-+ PRINT_INFO("T10 device id for device %s changed to %s", virt_dev->name,
-+ virt_dev->t10_dev_id);
-+
-+out_unlock:
-+ write_unlock(&vdisk_serial_rwlock);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t vdev_sysfs_t10_dev_id_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ read_lock(&vdisk_serial_rwlock);
-+ pos = sprintf(buf, "%s\n%s", virt_dev->t10_dev_id,
-+ virt_dev->t10_dev_id_set ? SCST_SYSFS_KEY_MARK "\n" : "");
-+ read_unlock(&vdisk_serial_rwlock);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t vdev_sysfs_usn_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res, i;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ write_lock(&vdisk_serial_rwlock);
-+
-+ if ((count > sizeof(virt_dev->usn)) ||
-+ ((count == sizeof(virt_dev->usn)) &&
-+ (buf[count-1] != '\n'))) {
-+ PRINT_ERROR("USN is too long (max %zd "
-+ "characters)", sizeof(virt_dev->usn)-1);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ memset(virt_dev->usn, 0, sizeof(virt_dev->usn));
-+ memcpy(virt_dev->usn, buf, count);
-+
-+ i = 0;
-+ while (i < sizeof(virt_dev->usn)) {
-+ if (virt_dev->usn[i] == '\n') {
-+ virt_dev->usn[i] = '\0';
-+ break;
-+ }
-+ i++;
-+ }
-+
-+ virt_dev->usn_set = 1;
-+
-+ res = count;
-+
-+ PRINT_INFO("USN for device %s changed to %s", virt_dev->name,
-+ virt_dev->usn);
-+
-+out_unlock:
-+ write_unlock(&vdisk_serial_rwlock);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t vdev_sysfs_usn_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos = 0;
-+ struct scst_device *dev;
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ TRACE_ENTRY();
-+
-+ dev = container_of(kobj, struct scst_device, dev_kobj);
-+ virt_dev = dev->dh_priv;
-+
-+ read_lock(&vdisk_serial_rwlock);
-+ pos = sprintf(buf, "%s\n%s", virt_dev->usn,
-+ virt_dev->usn_set ? SCST_SYSFS_KEY_MARK "\n" : "");
-+ read_unlock(&vdisk_serial_rwlock);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static int __init init_scst_vdisk(struct scst_dev_type *devtype)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ devtype->module = THIS_MODULE;
-+
-+ res = scst_register_virtual_dev_driver(devtype);
-+ if (res < 0)
-+ goto out;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+}
-+
-+static void exit_scst_vdisk(struct scst_dev_type *devtype)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_vdisk_mutex);
-+ while (1) {
-+ struct scst_vdisk_dev *virt_dev;
-+
-+ if (list_empty(&vdev_list))
-+ break;
-+
-+ virt_dev = list_entry(vdev_list.next, typeof(*virt_dev),
-+ vdev_list_entry);
-+
-+ vdev_del_device(virt_dev);
-+ }
-+ mutex_unlock(&scst_vdisk_mutex);
-+
-+ scst_unregister_virtual_dev_driver(devtype);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int __init init_scst_vdisk_driver(void)
-+{
-+ int res;
-+
-+ vdisk_thr_cachep = KMEM_CACHE(scst_vdisk_thr, SCST_SLAB_FLAGS);
-+ if (vdisk_thr_cachep == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ blockio_work_cachep = KMEM_CACHE(scst_blockio_work, SCST_SLAB_FLAGS);
-+ if (blockio_work_cachep == NULL) {
-+ res = -ENOMEM;
-+ goto out_free_vdisk_cache;
-+ }
-+
-+ if (num_threads < 1) {
-+ PRINT_ERROR("num_threads can not be less than 1, use "
-+ "default %d", DEF_NUM_THREADS);
-+ num_threads = DEF_NUM_THREADS;
-+ }
-+
-+ vdisk_file_devtype.threads_num = num_threads;
-+ vcdrom_devtype.threads_num = num_threads;
-+
-+ atomic_set(&nullio_thr_data.hdr.ref, 1); /* never destroy it */
-+
-+ res = init_scst_vdisk(&vdisk_file_devtype);
-+ if (res != 0)
-+ goto out_free_slab;
-+
-+ res = init_scst_vdisk(&vdisk_blk_devtype);
-+ if (res != 0)
-+ goto out_free_vdisk;
-+
-+ res = init_scst_vdisk(&vdisk_null_devtype);
-+ if (res != 0)
-+ goto out_free_blk;
-+
-+ res = init_scst_vdisk(&vcdrom_devtype);
-+ if (res != 0)
-+ goto out_free_null;
-+
-+out:
-+ return res;
-+
-+out_free_null:
-+ exit_scst_vdisk(&vdisk_null_devtype);
-+
-+out_free_blk:
-+ exit_scst_vdisk(&vdisk_blk_devtype);
-+
-+out_free_vdisk:
-+ exit_scst_vdisk(&vdisk_file_devtype);
-+
-+out_free_slab:
-+ kmem_cache_destroy(blockio_work_cachep);
-+
-+out_free_vdisk_cache:
-+ kmem_cache_destroy(vdisk_thr_cachep);
-+ goto out;
-+}
-+
-+static void __exit exit_scst_vdisk_driver(void)
-+{
-+ exit_scst_vdisk(&vdisk_null_devtype);
-+ exit_scst_vdisk(&vdisk_blk_devtype);
-+ exit_scst_vdisk(&vdisk_file_devtype);
-+ exit_scst_vdisk(&vcdrom_devtype);
-+
-+ kmem_cache_destroy(blockio_work_cachep);
-+ kmem_cache_destroy(vdisk_thr_cachep);
-+}
-+
-+module_init(init_scst_vdisk_driver);
-+module_exit(exit_scst_vdisk_driver);
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("SCSI disk (type 0) and CDROM (type 5) dev handler for "
-+ "SCST using files on file systems or block devices");
-+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-3.2/drivers/scst/scst_tg.c linux-3.2/drivers/scst/scst_tg.c
---- orig/linux-3.2/drivers/scst/scst_tg.c
-+++ linux-3.2/drivers/scst/scst_tg.c
-@@ -0,0 +1,809 @@
-+/*
-+ * scst_tg.c
-+ *
-+ * SCSI target group related code.
-+ *
-+ * Copyright (C) 2011 Bart Van Assche <bvanassche@acm.org>.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * version 2 as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <asm/unaligned.h>
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+
-+static struct list_head scst_dev_group_list;
-+
-+/* Look up a device by name. */
-+static struct scst_device *__lookup_dev(const char *name)
-+{
-+ struct scst_device *dev;
-+
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry)
-+ if (strcmp(dev->virt_name, name) == 0)
-+ return dev;
-+
-+ return NULL;
-+}
-+
-+/* Look up a target by name. */
-+static struct scst_tgt *__lookup_tgt(const char *name)
-+{
-+ struct scst_tgt_template *t;
-+ struct scst_tgt *tgt;
-+
-+ list_for_each_entry(t, &scst_template_list, scst_template_list_entry)
-+ list_for_each_entry(tgt, &t->tgt_list, tgt_list_entry)
-+ if (strcmp(tgt->tgt_name, name) == 0)
-+ return tgt;
-+
-+ return NULL;
-+}
-+
-+/* Look up a target by name in the given device group. */
-+static struct scst_tg_tgt *__lookup_dg_tgt(struct scst_dev_group *dg,
-+ const char *tgt_name)
-+{
-+ struct scst_target_group *tg;
-+ struct scst_tg_tgt *tg_tgt;
-+
-+ BUG_ON(!dg);
-+ BUG_ON(!tgt_name);
-+ list_for_each_entry(tg, &dg->tg_list, entry)
-+ list_for_each_entry(tg_tgt, &tg->tgt_list, entry)
-+ if (strcmp(tg_tgt->name, tgt_name) == 0)
-+ return tg_tgt;
-+
-+ return NULL;
-+}
-+
-+/* Look up a target group by name in the given device group. */
-+static struct scst_target_group *
-+__lookup_tg_by_name(struct scst_dev_group *dg, const char *name)
-+{
-+ struct scst_target_group *tg;
-+
-+ list_for_each_entry(tg, &dg->tg_list, entry)
-+ if (strcmp(tg->name, name) == 0)
-+ return tg;
-+
-+ return NULL;
-+}
-+
-+/* Look up a device node by device pointer in the given device group. */
-+static struct scst_dg_dev *__lookup_dg_dev_by_dev(struct scst_dev_group *dg,
-+ struct scst_device *dev)
-+{
-+ struct scst_dg_dev *dgd;
-+
-+ list_for_each_entry(dgd, &dg->dev_list, entry)
-+ if (dgd->dev == dev)
-+ return dgd;
-+
-+ return NULL;
-+}
-+
-+/* Look up a device node by name in the given device group. */
-+static struct scst_dg_dev *__lookup_dg_dev_by_name(struct scst_dev_group *dg,
-+ const char *name)
-+{
-+ struct scst_dg_dev *dgd;
-+
-+ list_for_each_entry(dgd, &dg->dev_list, entry)
-+ if (strcmp(dgd->dev->virt_name, name) == 0)
-+ return dgd;
-+
-+ return NULL;
-+}
-+
-+/* Look up a device node by name in any device group. */
-+static struct scst_dg_dev *__global_lookup_dg_dev_by_name(const char *name)
-+{
-+ struct scst_dev_group *dg;
-+ struct scst_dg_dev *dgd;
-+
-+ list_for_each_entry(dg, &scst_dev_group_list, entry) {
-+ dgd = __lookup_dg_dev_by_name(dg, name);
-+ if (dgd)
-+ return dgd;
-+ }
-+ return NULL;
-+}
-+
-+/* Look up a device group by name. */
-+static struct scst_dev_group *__lookup_dg_by_name(const char *name)
-+{
-+ struct scst_dev_group *dg;
-+
-+ list_for_each_entry(dg, &scst_dev_group_list, entry)
-+ if (strcmp(dg->name, name) == 0)
-+ return dg;
-+
-+ return NULL;
-+}
-+
-+/* Look up a device group by device pointer. */
-+static struct scst_dev_group *__lookup_dg_by_dev(struct scst_device *dev)
-+{
-+ struct scst_dev_group *dg;
-+
-+ list_for_each_entry(dg, &scst_dev_group_list, entry)
-+ if (__lookup_dg_dev_by_dev(dg, dev))
-+ return dg;
-+
-+ return NULL;
-+}
-+
-+/*
-+ * Target group contents management.
-+ */
-+
-+static void scst_release_tg_tgt(struct kobject *kobj)
-+{
-+ struct scst_tg_tgt *tg_tgt;
-+
-+ tg_tgt = container_of(kobj, struct scst_tg_tgt, kobj);
-+ kfree(tg_tgt->name);
-+ kfree(tg_tgt);
-+}
-+
-+static struct kobj_type scst_tg_tgt_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_release_tg_tgt,
-+};
-+
-+/**
-+ * scst_tg_tgt_add() - Add a target to a target group.
-+ */
-+int scst_tg_tgt_add(struct scst_target_group *tg, const char *name)
-+{
-+ struct scst_tg_tgt *tg_tgt;
-+ struct scst_tgt *tgt;
-+ int res;
-+
-+ TRACE_ENTRY();
-+ BUG_ON(!tg);
-+ BUG_ON(!name);
-+ res = -ENOMEM;
-+ tg_tgt = kzalloc(sizeof *tg_tgt, GFP_KERNEL);
-+ if (!tg_tgt)
-+ goto out;
-+ tg_tgt->tg = tg;
-+ kobject_init(&tg_tgt->kobj, &scst_tg_tgt_ktype);
-+ tg_tgt->name = kstrdup(name, GFP_KERNEL);
-+ if (!tg_tgt->name)
-+ goto out_put;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res)
-+ goto out_put;
-+ res = -EEXIST;
-+ tgt = __lookup_tgt(name);
-+ if (__lookup_dg_tgt(tg->dg, name))
-+ goto out_unlock;
-+ tg_tgt->tgt = tgt;
-+ res = scst_tg_tgt_sysfs_add(tg, tg_tgt);
-+ if (res)
-+ goto out_unlock;
-+ list_add_tail(&tg_tgt->entry, &tg->tgt_list);
-+ res = 0;
-+ mutex_unlock(&scst_mutex);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+out_put:
-+ kobject_put(&tg_tgt->kobj);
-+ goto out;
-+}
-+
-+static void __scst_tg_tgt_remove(struct scst_target_group *tg,
-+ struct scst_tg_tgt *tg_tgt)
-+{
-+ TRACE_ENTRY();
-+ list_del(&tg_tgt->entry);
-+ scst_tg_tgt_sysfs_del(tg, tg_tgt);
-+ kobject_put(&tg_tgt->kobj);
-+ TRACE_EXIT();
-+}
-+
-+/**
-+ * scst_tg_tgt_remove_by_name() - Remove a target from a target group.
-+ */
-+int scst_tg_tgt_remove_by_name(struct scst_target_group *tg, const char *name)
-+{
-+ struct scst_tg_tgt *tg_tgt;
-+ int res;
-+
-+ TRACE_ENTRY();
-+ BUG_ON(!tg);
-+ BUG_ON(!name);
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res)
-+ goto out;
-+ res = -EINVAL;
-+ tg_tgt = __lookup_dg_tgt(tg->dg, name);
-+ if (!tg_tgt)
-+ goto out_unlock;
-+ __scst_tg_tgt_remove(tg, tg_tgt);
-+ res = 0;
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Caller must hold scst_mutex. Called from the target removal code. */
-+void scst_tg_tgt_remove_by_tgt(struct scst_tgt *tgt)
-+{
-+ struct scst_dev_group *dg;
-+ struct scst_target_group *tg;
-+ struct scst_tg_tgt *t, *t2;
-+
-+ BUG_ON(!tgt);
-+ list_for_each_entry(dg, &scst_dev_group_list, entry)
-+ list_for_each_entry(tg, &dg->tg_list, entry)
-+ list_for_each_entry_safe(t, t2, &tg->tgt_list, entry)
-+ if (t->tgt == tgt)
-+ __scst_tg_tgt_remove(tg, t);
-+}
-+
-+/*
-+ * Target group management.
-+ */
-+
-+static void scst_release_tg(struct kobject *kobj)
-+{
-+ struct scst_target_group *tg;
-+
-+ tg = container_of(kobj, struct scst_target_group, kobj);
-+ kfree(tg->name);
-+ kfree(tg);
-+}
-+
-+static struct kobj_type scst_tg_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_release_tg,
-+};
-+
-+/**
-+ * scst_tg_add() - Add a target group.
-+ */
-+int scst_tg_add(struct scst_dev_group *dg, const char *name)
-+{
-+ struct scst_target_group *tg;
-+ int res;
-+
-+ TRACE_ENTRY();
-+ res = -ENOMEM;
-+ tg = kzalloc(sizeof *tg, GFP_KERNEL);
-+ if (!tg)
-+ goto out;
-+ kobject_init(&tg->kobj, &scst_tg_ktype);
-+ tg->name = kstrdup(name, GFP_KERNEL);
-+ if (!tg->name)
-+ goto out_put;
-+ tg->dg = dg;
-+ tg->state = SCST_TG_STATE_OPTIMIZED;
-+ INIT_LIST_HEAD(&tg->tgt_list);
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res)
-+ goto out_put;
-+ res = -EEXIST;
-+ if (__lookup_tg_by_name(dg, name))
-+ goto out_unlock;
-+ res = scst_tg_sysfs_add(dg, tg);
-+ if (res)
-+ goto out_unlock;
-+ list_add_tail(&tg->entry, &dg->tg_list);
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+out_put:
-+ kobject_put(&tg->kobj);
-+ goto out;
-+}
-+
-+static void __scst_tg_remove(struct scst_dev_group *dg,
-+ struct scst_target_group *tg)
-+{
-+ struct scst_tg_tgt *tg_tgt;
-+
-+ TRACE_ENTRY();
-+ BUG_ON(!dg);
-+ BUG_ON(!tg);
-+ while (!list_empty(&tg->tgt_list)) {
-+ tg_tgt = list_first_entry(&tg->tgt_list, struct scst_tg_tgt,
-+ entry);
-+ __scst_tg_tgt_remove(tg, tg_tgt);
-+ }
-+ list_del(&tg->entry);
-+ scst_tg_sysfs_del(tg);
-+ kobject_put(&tg->kobj);
-+ TRACE_EXIT();
-+}
-+
-+/**
-+ * scst_tg_remove_by_name() - Remove a target group.
-+ */
-+int scst_tg_remove_by_name(struct scst_dev_group *dg, const char *name)
-+{
-+ struct scst_target_group *tg;
-+ int res;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res)
-+ goto out;
-+ res = -EINVAL;
-+ tg = __lookup_tg_by_name(dg, name);
-+ if (!tg)
-+ goto out_unlock;
-+ __scst_tg_remove(dg, tg);
-+ res = 0;
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+out:
-+ return res;
-+}
-+
-+int scst_tg_set_state(struct scst_target_group *tg, enum scst_tg_state state)
-+{
-+ struct scst_dg_dev *dg_dev;
-+ struct scst_device *dev;
-+ struct scst_tgt_dev *tgt_dev;
-+ int res;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res)
-+ goto out;
-+
-+ tg->state = state;
-+
-+ list_for_each_entry(dg_dev, &tg->dg->dev_list, entry) {
-+ dev = dg_dev->dev;
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ TRACE_MGMT_DBG("ALUA state of tgt_dev %p has changed",
-+ tgt_dev);
-+ scst_gen_aen_or_ua(tgt_dev,
-+ SCST_LOAD_SENSE(scst_sense_asym_access_state_changed));
-+ }
-+ }
-+ mutex_unlock(&scst_mutex);
-+out:
-+ return res;
-+}
-+
-+/*
-+ * Device group contents manipulation.
-+ */
-+
-+/**
-+ * scst_dg_dev_add() - Add a device to a device group.
-+ *
-+ * It is verified whether 'name' refers to an existing device and whether that
-+ * device has not yet been added to any other device group.
-+ */
-+int scst_dg_dev_add(struct scst_dev_group *dg, const char *name)
-+{
-+ struct scst_dg_dev *dgdev;
-+ struct scst_device *dev;
-+ int res;
-+
-+ res = -ENOMEM;
-+ dgdev = kzalloc(sizeof *dgdev, GFP_KERNEL);
-+ if (!dgdev)
-+ goto out;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res)
-+ goto out_free;
-+ res = -EEXIST;
-+ if (__global_lookup_dg_dev_by_name(name))
-+ goto out_unlock;
-+ res = -EINVAL;
-+ dev = __lookup_dev(name);
-+ if (!dev)
-+ goto out_unlock;
-+ dgdev->dev = dev;
-+ res = scst_dg_dev_sysfs_add(dg, dgdev);
-+ if (res)
-+ goto out_unlock;
-+ list_add_tail(&dgdev->entry, &dg->dev_list);
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ return res;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+out_free:
-+ kfree(dgdev);
-+ goto out;
-+}
-+
-+static void __scst_dg_dev_remove(struct scst_dev_group *dg,
-+ struct scst_dg_dev *dgdev)
-+{
-+ list_del(&dgdev->entry);
-+ scst_dg_dev_sysfs_del(dg, dgdev);
-+ kfree(dgdev);
-+}
-+
-+/**
-+ * scst_dg_dev_remove_by_name() - Remove a device from a device group.
-+ */
-+int scst_dg_dev_remove_by_name(struct scst_dev_group *dg, const char *name)
-+{
-+ struct scst_dg_dev *dgdev;
-+ int res;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res)
-+ goto out;
-+ res = -EINVAL;
-+ dgdev = __lookup_dg_dev_by_name(dg, name);
-+ if (!dgdev)
-+ goto out_unlock;
-+ __scst_dg_dev_remove(dg, dgdev);
-+ res = 0;
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+out:
-+ return res;
-+}
-+
-+/* Caller must hold scst_mutex. Called from the device removal code. */
-+int scst_dg_dev_remove_by_dev(struct scst_device *dev)
-+{
-+ struct scst_dev_group *dg;
-+ struct scst_dg_dev *dgdev;
-+ int res;
-+
-+ res = -EINVAL;
-+ dg = __lookup_dg_by_dev(dev);
-+ if (!dg)
-+ goto out;
-+ dgdev = __lookup_dg_dev_by_dev(dg, dev);
-+ BUG_ON(!dgdev);
-+ __scst_dg_dev_remove(dg, dgdev);
-+ res = 0;
-+out:
-+ return res;
-+}
-+
-+/*
-+ * Device group management.
-+ */
-+
-+static void scst_release_dg(struct kobject *kobj)
-+{
-+ struct scst_dev_group *dg;
-+
-+ dg = container_of(kobj, struct scst_dev_group, kobj);
-+ kfree(dg->name);
-+ kfree(dg);
-+}
-+
-+static struct kobj_type scst_dg_ktype = {
-+ .sysfs_ops = &scst_sysfs_ops,
-+ .release = scst_release_dg,
-+};
-+
-+/**
-+ * scst_dg_add() - Add a new device group object and make it visible in sysfs.
-+ */
-+int scst_dg_add(struct kobject *parent, const char *name)
-+{
-+ struct scst_dev_group *dg;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = -ENOMEM;
-+ dg = kzalloc(sizeof(*dg), GFP_KERNEL);
-+ if (!dg)
-+ goto out;
-+ kobject_init(&dg->kobj, &scst_dg_ktype);
-+ dg->name = kstrdup(name, GFP_KERNEL);
-+ if (!dg->name)
-+ goto out_put;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res)
-+ goto out_put;
-+ res = -EEXIST;
-+ if (__lookup_dg_by_name(name))
-+ goto out_unlock;
-+ res = -ENOMEM;
-+ INIT_LIST_HEAD(&dg->dev_list);
-+ INIT_LIST_HEAD(&dg->tg_list);
-+ res = scst_dg_sysfs_add(parent, dg);
-+ if (res)
-+ goto out_unlock;
-+ list_add_tail(&dg->entry, &scst_dev_group_list);
-+ mutex_unlock(&scst_mutex);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+out_put:
-+ kobject_put(&dg->kobj);
-+ goto out;
-+}
-+
-+static void __scst_dg_remove(struct scst_dev_group *dg)
-+{
-+ struct scst_dg_dev *dgdev;
-+ struct scst_target_group *tg;
-+
-+ list_del(&dg->entry);
-+ scst_dg_sysfs_del(dg);
-+ while (!list_empty(&dg->dev_list)) {
-+ dgdev = list_first_entry(&dg->dev_list, struct scst_dg_dev,
-+ entry);
-+ __scst_dg_dev_remove(dg, dgdev);
-+ }
-+ while (!list_empty(&dg->tg_list)) {
-+ tg = list_first_entry(&dg->tg_list, struct scst_target_group,
-+ entry);
-+ __scst_tg_remove(dg, tg);
-+ }
-+ kobject_put(&dg->kobj);
-+}
-+
-+int scst_dg_remove(const char *name)
-+{
-+ struct scst_dev_group *dg;
-+ int res;
-+
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res)
-+ goto out;
-+ res = -EINVAL;
-+ dg = __lookup_dg_by_name(name);
-+ if (!dg)
-+ goto out_unlock;
-+ __scst_dg_remove(dg);
-+ res = 0;
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+out:
-+ return res;
-+}
-+
-+/*
-+ * Given a pointer to a device_groups/<dg>/devices or
-+ * device_groups/<dg>/target_groups kobject, return the pointer to the
-+ * corresponding device group.
-+ *
-+ * Note: The caller must hold a reference on the kobject to avoid that the
-+ * object disappears before the caller stops using the device group pointer.
-+ */
-+struct scst_dev_group *scst_lookup_dg_by_kobj(struct kobject *kobj)
-+{
-+ int res;
-+ struct scst_dev_group *dg;
-+
-+ dg = NULL;
-+ res = mutex_lock_interruptible(&scst_mutex);
-+ if (res)
-+ goto out;
-+ list_for_each_entry(dg, &scst_dev_group_list, entry)
-+ if (dg->dev_kobj == kobj || dg->tg_kobj == kobj)
-+ goto out_unlock;
-+ dg = NULL;
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+out:
-+ return dg;
-+}
-+
-+/*
-+ * Target group module management.
-+ */
-+
-+void scst_tg_init(void)
-+{
-+ INIT_LIST_HEAD(&scst_dev_group_list);
-+}
-+
-+void scst_tg_cleanup(void)
-+{
-+ struct scst_dev_group *tg;
-+
-+ mutex_lock(&scst_mutex);
-+ while (!list_empty(&scst_dev_group_list)) {
-+ tg = list_first_entry(&scst_dev_group_list,
-+ struct scst_dev_group, entry);
-+ __scst_dg_remove(tg);
-+ }
-+ mutex_unlock(&scst_mutex);
-+}
-+
-+/*
-+ * Functions for target group related SCSI command support.
-+ */
-+
-+/**
-+ * scst_lookup_tg_id() - Look up a target port group identifier.
-+ * @dev: SCST device.
-+ * @tgt: SCST target.
-+ *
-+ * Returns a non-zero number if the lookup was successful and zero if not.
-+ */
-+uint16_t scst_lookup_tg_id(struct scst_device *dev, struct scst_tgt *tgt)
-+{
-+ struct scst_dev_group *dg;
-+ struct scst_target_group *tg;
-+ struct scst_tg_tgt *tg_tgt;
-+ uint16_t tg_id = 0;
-+
-+ TRACE_ENTRY();
-+ mutex_lock(&scst_mutex);
-+ dg = __lookup_dg_by_dev(dev);
-+ if (!dg)
-+ goto out_unlock;
-+ tg_tgt = __lookup_dg_tgt(dg, tgt->tgt_name);
-+ if (!tg_tgt)
-+ goto out_unlock;
-+ tg = tg_tgt->tg;
-+ BUG_ON(!tg);
-+ tg_id = tg->group_id;
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+ TRACE_EXIT_RES(tg_id);
-+ return tg_id;
-+}
-+EXPORT_SYMBOL_GPL(scst_lookup_tg_id);
-+
-+/**
-+ * scst_impl_alua_configured() - Whether implicit ALUA has been configured.
-+ * @dev: Pointer to the SCST device to verify.
-+ */
-+bool scst_impl_alua_configured(struct scst_device *dev)
-+{
-+ struct scst_dev_group *dg;
-+ bool res;
-+
-+ mutex_lock(&scst_mutex);
-+ dg = __lookup_dg_by_dev(dev);
-+ res = dg != NULL;
-+ mutex_unlock(&scst_mutex);
-+
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_impl_alua_configured);
-+
-+/**
-+ * scst_tg_get_group_info() - Build REPORT TARGET GROUPS response.
-+ * @buf: Pointer to a pointer to which the result buffer pointer will be set.
-+ * @length: Response length, including the "RETURN DATA LENGTH" field.
-+ * @dev: Pointer to the SCST device for which to obtain group information.
-+ * @data_format: Three-bit response data format specification.
-+ */
-+int scst_tg_get_group_info(void **buf, uint32_t *length,
-+ struct scst_device *dev, uint8_t data_format)
-+{
-+ struct scst_dev_group *dg;
-+ struct scst_target_group *tg;
-+ struct scst_tg_tgt *tgtgt;
-+ struct scst_tgt *tgt;
-+ uint8_t *p;
-+ uint32_t ret_data_len;
-+ uint16_t rel_tgt_id;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(!buf);
-+ BUG_ON(!length);
-+
-+ ret_data_len = 0;
-+
-+ res = -EINVAL;
-+ switch (data_format) {
-+ case 0:
-+ break;
-+ case 1:
-+ /* Extended header */
-+ ret_data_len += 4;
-+ break;
-+ default:
-+ goto out;
-+ }
-+
-+ *length = 4;
-+
-+ mutex_lock(&scst_mutex);
-+
-+ dg = __lookup_dg_by_dev(dev);
-+ if (dg) {
-+ list_for_each_entry(tg, &dg->tg_list, entry) {
-+ /* Target port group descriptor header. */
-+ ret_data_len += 8;
-+ list_for_each_entry(tgtgt, &tg->tgt_list, entry) {
-+ /* Target port descriptor. */
-+ ret_data_len += 4;
-+ }
-+ }
-+ }
-+
-+ *length += ret_data_len;
-+
-+ res = -ENOMEM;
-+ *buf = kzalloc(*length, GFP_KERNEL);
-+ if (!*buf)
-+ goto out_unlock;
-+
-+ p = *buf;
-+ /* Return data length. */
-+ put_unaligned(cpu_to_be32(ret_data_len), (__be32 *)p);
-+ p += 4;
-+ if (data_format == 1) {
-+ /* Extended header */
-+ *p++ = 0x10; /* format = 1 */
-+ *p++ = 0x00; /* implicit transition time = 0 */
-+ p += 2; /* reserved */
-+ }
-+
-+ if (!dg)
-+ goto done;
-+
-+ list_for_each_entry(tg, &dg->tg_list, entry) {
-+ /* Target port group descriptor header. */
-+ *p++ = (tg->preferred ? SCST_TG_PREFERRED : 0) | tg->state;
-+ *p++ = SCST_TG_SUP_OPTIMIZED
-+ | SCST_TG_SUP_NONOPTIMIZED
-+ | SCST_TG_SUP_STANDBY
-+ | SCST_TG_SUP_UNAVAILABLE;
-+ put_unaligned(cpu_to_be16(tg->group_id), (__be16 *)p);
-+ p += 2;
-+ p++; /* reserved */
-+ *p++ = 2; /* status code: implicit transition */
-+ p++; /* vendor specific */
-+ list_for_each_entry(tgtgt, &tg->tgt_list, entry)
-+ (*p)++; /* target port count */
-+ p++;
-+ list_for_each_entry(tgtgt, &tg->tgt_list, entry) {
-+ tgt = tgtgt->tgt;
-+ rel_tgt_id = tgt ? tgt->rel_tgt_id : tgtgt->rel_tgt_id;
-+ /* Target port descriptor. */
-+ p += 2; /* reserved */
-+ /* Relative target port identifier. */
-+ put_unaligned(cpu_to_be16(rel_tgt_id),
-+ (__be16 *)p);
-+ p += 2;
-+ }
-+ }
-+
-+done:
-+ WARN_ON(p - (uint8_t *)*buf != *length);
-+
-+ res = 0;
-+
-+out_unlock:
-+ mutex_unlock(&scst_mutex);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_tg_get_group_info);
-diff -uprN orig/linux-3.2/drivers/scst/scst_proc.c linux-3.2/drivers/scst/scst_proc.c
---- orig/linux-3.2/drivers/scst/scst_proc.c
-+++ linux-3.2/drivers/scst/scst_proc.c
-@@ -0,0 +1,2716 @@
-+/*
-+ * scst_proc.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/unistd.h>
-+#include <linux/string.h>
-+#include <linux/proc_fs.h>
-+#include <linux/seq_file.h>
-+
-+#include <scst/scst.h>
-+#include "scst_priv.h"
-+#include "scst_mem.h"
-+#include "scst_pres.h"
-+
-+static int scst_proc_init_groups(void);
-+static void scst_proc_cleanup_groups(void);
-+static int scst_proc_assign_handler(char *buf);
-+static int scst_proc_group_add(const char *p, unsigned int addr_method);
-+static int scst_proc_del_free_acg(struct scst_acg *acg, int remove_proc);
-+
-+static struct scst_proc_data scst_version_proc_data;
-+static struct scst_proc_data scst_help_proc_data;
-+static struct scst_proc_data scst_sgv_proc_data;
-+static struct scst_proc_data scst_groups_names_proc_data;
-+static struct scst_proc_data scst_groups_devices_proc_data;
-+static struct scst_proc_data scst_groups_addr_method_proc_data;
-+static struct scst_proc_data scst_sessions_proc_data;
-+static struct scst_proc_data scst_dev_handler_type_proc_data;
-+static struct scst_proc_data scst_tgt_proc_data;
-+static struct scst_proc_data scst_threads_proc_data;
-+static struct scst_proc_data scst_scsi_tgt_proc_data;
-+static struct scst_proc_data scst_dev_handler_proc_data;
-+
-+/*
-+ * Must be less than 4K page size, since our output routines
-+ * use some slack for overruns
-+ */
-+#define SCST_PROC_BLOCK_SIZE (PAGE_SIZE - 512)
-+
-+#define SCST_PROC_LOG_ENTRY_NAME "trace_level"
-+#define SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME "type"
-+#define SCST_PROC_VERSION_NAME "version"
-+#define SCST_PROC_SESSIONS_NAME "sessions"
-+#define SCST_PROC_HELP_NAME "help"
-+#define SCST_PROC_THREADS_NAME "threads"
-+#define SCST_PROC_GROUPS_ENTRY_NAME "groups"
-+#define SCST_PROC_GROUPS_DEVICES_ENTRY_NAME "devices"
-+#define SCST_PROC_GROUPS_USERS_ENTRY_NAME "names"
-+#define SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME "addr_method"
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+#define SCST_PROC_LAT_ENTRY_NAME "latency"
-+#endif
-+
-+#define SCST_PROC_ACTION_ALL 1
-+#define SCST_PROC_ACTION_NONE 2
-+#define SCST_PROC_ACTION_DEFAULT 3
-+#define SCST_PROC_ACTION_ADD 4
-+#define SCST_PROC_ACTION_CLEAR 5
-+#define SCST_PROC_ACTION_MOVE 6
-+#define SCST_PROC_ACTION_DEL 7
-+#define SCST_PROC_ACTION_REPLACE 8
-+#define SCST_PROC_ACTION_VALUE 9
-+#define SCST_PROC_ACTION_ASSIGN 10
-+#define SCST_PROC_ACTION_ADD_GROUP 11
-+#define SCST_PROC_ACTION_DEL_GROUP 12
-+#define SCST_PROC_ACTION_RENAME_GROUP 13
-+#define SCST_PROC_ACTION_DUMP_PRS 14
-+
-+static struct proc_dir_entry *scst_proc_scsi_tgt;
-+static struct proc_dir_entry *scst_proc_groups_root;
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+static struct scst_proc_data scst_log_proc_data;
-+
-+static struct scst_trace_log scst_proc_trace_tbl[] = {
-+ { TRACE_OUT_OF_MEM, "out_of_mem" },
-+ { TRACE_MINOR, "minor" },
-+ { TRACE_SG_OP, "sg" },
-+ { TRACE_MEMORY, "mem" },
-+ { TRACE_BUFF, "buff" },
-+#ifndef GENERATING_UPSTREAM_PATCH
-+ { TRACE_ENTRYEXIT, "entryexit" },
-+#endif
-+ { TRACE_PID, "pid" },
-+ { TRACE_LINE, "line" },
-+ { TRACE_FUNCTION, "function" },
-+ { TRACE_DEBUG, "debug" },
-+ { TRACE_SPECIAL, "special" },
-+ { TRACE_SCSI, "scsi" },
-+ { TRACE_MGMT, "mgmt" },
-+ { TRACE_MGMT_DEBUG, "mgmt_dbg" },
-+ { TRACE_FLOW_CONTROL, "flow_control" },
-+ { TRACE_PRES, "pr" },
-+ { 0, NULL }
-+};
-+
-+static struct scst_trace_log scst_proc_local_trace_tbl[] = {
-+ { TRACE_RTRY, "retry" },
-+ { TRACE_SCSI_SERIALIZING, "scsi_serializing" },
-+ { TRACE_RCV_BOT, "recv_bot" },
-+ { TRACE_SND_BOT, "send_bot" },
-+ { TRACE_RCV_TOP, "recv_top" },
-+ { TRACE_SND_TOP, "send_top" },
-+ { 0, NULL }
-+};
-+#endif
-+
-+static char *scst_proc_help_string =
-+" echo \"assign H:C:I:L HANDLER_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
-+"\n"
-+" echo \"add_group GROUP_NAME [FLAT]\" >/proc/scsi_tgt/scsi_tgt\n"
-+" echo \"add_group GROUP_NAME [LUN]\" >/proc/scsi_tgt/scsi_tgt\n"
-+" echo \"del_group GROUP_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
-+" echo \"rename_group OLD_NAME NEW_NAME\" >/proc/scsi_tgt/scsi_tgt\n"
-+"\n"
-+" echo \"add|del H:C:I:L lun [READ_ONLY]\""
-+" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
-+" echo \"replace H:C:I:L lun [READ_ONLY]\""
-+" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
-+" echo \"add|del V_NAME lun [READ_ONLY]\""
-+" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
-+" echo \"replace V_NAME lun [READ_ONLY]\""
-+" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
-+" echo \"clear\" >/proc/scsi_tgt/groups/GROUP_NAME/devices\n"
-+"\n"
-+" echo \"add|del NAME\" >/proc/scsi_tgt/groups/GROUP_NAME/names\n"
-+" echo \"move NAME NEW_GROUP_NAME\" >/proc/scsi_tgt/groups/OLD_GROUP_NAME/names\n"
-+" echo \"clear\" >/proc/scsi_tgt/groups/GROUP_NAME/names\n"
-+"\n"
-+" echo \"DEC|0xHEX|0OCT\" >/proc/scsi_tgt/threads\n"
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+"\n"
-+" echo \"all|none|default\" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
-+" echo \"value DEC|0xHEX|0OCT\""
-+" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
-+" echo \"set|add|del TOKEN\""
-+" >/proc/scsi_tgt/[DEV_HANDLER_NAME/]trace_level\n"
-+" where TOKEN is one of [debug, function, line, pid, entryexit,\n"
-+" buff, mem, sg, out_of_mem, special, scsi,\n"
-+" mgmt, minor, mgmt_dbg]\n"
-+" Additionally for /proc/scsi_tgt/trace_level there are these TOKENs\n"
-+" [scsi_serializing, retry, recv_bot, send_bot, recv_top, send_top]\n"
-+" echo \"dump_prs dev_name\" >/proc/scsi_tgt/trace_level\n"
-+#endif
-+;
-+
-+static char *scst_proc_dev_handler_type[] = {
-+ "Direct-access device (e.g., magnetic disk)",
-+ "Sequential-access device (e.g., magnetic tape)",
-+ "Printer device",
-+ "Processor device",
-+ "Write-once device (e.g., some optical disks)",
-+ "CD-ROM device",
-+ "Scanner device (obsolete)",
-+ "Optical memory device (e.g., some optical disks)",
-+ "Medium changer device (e.g., jukeboxes)",
-+ "Communications device (obsolete)",
-+ "Defined by ASC IT8 (Graphic arts pre-press devices)",
-+ "Defined by ASC IT8 (Graphic arts pre-press devices)",
-+ "Storage array controller device (e.g., RAID)",
-+ "Enclosure services device",
-+ "Simplified direct-access device (e.g., magnetic disk)",
-+ "Optical card reader/writer device"
-+};
-+
-+static DEFINE_MUTEX(scst_proc_mutex);
-+
-+#include <linux/ctype.h>
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+static DEFINE_MUTEX(scst_log_mutex);
-+
-+int scst_proc_log_entry_write(struct file *file, const char __user *buf,
-+ unsigned long length, unsigned long *log_level,
-+ unsigned long default_level, const struct scst_trace_log *tbl)
-+{
-+ int res = length;
-+ int action;
-+ unsigned long level = 0, oldlevel;
-+ char *buffer, *p, *e;
-+ const struct scst_trace_log *t;
-+ char *data = (char *)PDE(file->f_dentry->d_inode)->data;
-+
-+ TRACE_ENTRY();
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ /*
-+ * Usage:
-+ * echo "all|none|default" >/proc/scsi_tgt/trace_level
-+ * echo "value DEC|0xHEX|0OCT" >/proc/scsi_tgt/trace_level
-+ * echo "add|del TOKEN" >/proc/scsi_tgt/trace_level
-+ */
-+ p = buffer;
-+ if (!strncasecmp("all", p, 3)) {
-+ action = SCST_PROC_ACTION_ALL;
-+ } else if (!strncasecmp("none", p, 4) || !strncasecmp("null", p, 4)) {
-+ action = SCST_PROC_ACTION_NONE;
-+ } else if (!strncasecmp("default", p, 7)) {
-+ action = SCST_PROC_ACTION_DEFAULT;
-+ } else if (!strncasecmp("add ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_ADD;
-+ } else if (!strncasecmp("del ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_DEL;
-+ } else if (!strncasecmp("value ", p, 6)) {
-+ p += 6;
-+ action = SCST_PROC_ACTION_VALUE;
-+ } else if (!strncasecmp("dump_prs ", p, 9)) {
-+ p += 9;
-+ action = SCST_PROC_ACTION_DUMP_PRS;
-+ } else {
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ALL:
-+ level = TRACE_ALL;
-+ break;
-+ case SCST_PROC_ACTION_DEFAULT:
-+ level = default_level;
-+ break;
-+ case SCST_PROC_ACTION_NONE:
-+ level = TRACE_NULL;
-+ break;
-+ case SCST_PROC_ACTION_ADD:
-+ case SCST_PROC_ACTION_DEL:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = 0;
-+ if (tbl) {
-+ t = tbl;
-+ while (t->token) {
-+ if (!strcasecmp(p, t->token)) {
-+ level = t->val;
-+ break;
-+ }
-+ t++;
-+ }
-+ }
-+ if (level == 0) {
-+ t = scst_proc_trace_tbl;
-+ while (t->token) {
-+ if (!strcasecmp(p, t->token)) {
-+ level = t->val;
-+ break;
-+ }
-+ t++;
-+ }
-+ }
-+ if (level == 0) {
-+ PRINT_ERROR("Unknown token \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+ break;
-+ case SCST_PROC_ACTION_VALUE:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ level = simple_strtoul(p, NULL, 0);
-+ break;
-+ case SCST_PROC_ACTION_DUMP_PRS:
-+ {
-+ struct scst_device *dev;
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p;
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = '\0';
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free;
-+ }
-+
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ if (strcmp(dev->virt_name, p) == 0) {
-+ scst_pr_dump_prs(dev, true);
-+ goto out_up;
-+ }
-+ }
-+
-+ PRINT_ERROR("Device %s not found", p);
-+ res = -ENOENT;
-+out_up:
-+ mutex_unlock(&scst_mutex);
-+ goto out_free;
-+ }
-+ }
-+
-+ oldlevel = *log_level;
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ *log_level |= level;
-+ break;
-+ case SCST_PROC_ACTION_DEL:
-+ *log_level &= ~level;
-+ break;
-+ default:
-+ *log_level = level;
-+ break;
-+ }
-+
-+ PRINT_INFO("Changed trace level for \"%s\": "
-+ "old 0x%08lx, new 0x%08lx",
-+ (char *)data, oldlevel, *log_level);
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_proc_log_entry_write);
-+
-+static ssize_t scst_proc_scsi_tgt_gen_write_log(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ res = scst_proc_log_entry_write(file, buf, length,
-+ &trace_flag, SCST_DEFAULT_LOG_FLAGS,
-+ scst_proc_local_trace_tbl);
-+
-+ mutex_unlock(&scst_log_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+
-+static char *scst_io_size_names[] = {
-+ "<=8K ",
-+ "<=32K ",
-+ "<=128K",
-+ "<=512K",
-+ ">512K "
-+};
-+
-+static int lat_info_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_acg *acg;
-+ struct scst_session *sess;
-+ char buf[50];
-+
-+ TRACE_ENTRY();
-+
-+ BUILD_BUG_ON(SCST_LATENCY_STATS_NUM != ARRAY_SIZE(scst_io_size_names));
-+ BUILD_BUG_ON(SCST_LATENCY_STATS_NUM != ARRAY_SIZE(sess->sess_latency_stat));
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
-+ bool header_printed = false;
-+
-+ list_for_each_entry(sess, &acg->acg_sess_list,
-+ acg_sess_list_entry) {
-+ unsigned int i;
-+ int t;
-+ uint64_t scst_time, tgt_time, dev_time;
-+ unsigned int processed_cmds;
-+
-+ if (!header_printed) {
-+ seq_printf(seq, "%-15s %-15s %-46s %-46s %-46s\n",
-+ "T-L names", "Total commands", "SCST latency",
-+ "Target latency", "Dev latency (min/avg/max/all ns)");
-+ header_printed = true;
-+ }
-+
-+ seq_printf(seq, "Target name: %s\nInitiator name: %s\n",
-+ sess->tgt->tgtt->name,
-+ sess->initiator_name);
-+
-+ spin_lock_bh(&sess->lat_lock);
-+
-+ for (i = 0; i < SCST_LATENCY_STATS_NUM ; i++) {
-+ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
-+ unsigned int processed_cmds_wr;
-+ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
-+ unsigned int processed_cmds_rd;
-+ struct scst_ext_latency_stat *latency_stat;
-+
-+ latency_stat = &sess->sess_latency_stat[i];
-+ scst_time_wr = latency_stat->scst_time_wr;
-+ scst_time_rd = latency_stat->scst_time_rd;
-+ tgt_time_wr = latency_stat->tgt_time_wr;
-+ tgt_time_rd = latency_stat->tgt_time_rd;
-+ dev_time_wr = latency_stat->dev_time_wr;
-+ dev_time_rd = latency_stat->dev_time_rd;
-+ processed_cmds_wr = latency_stat->processed_cmds_wr;
-+ processed_cmds_rd = latency_stat->processed_cmds_rd;
-+
-+ seq_printf(seq, "%-5s %-9s %-15lu ",
-+ "Write", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_wr);
-+ if (processed_cmds_wr == 0)
-+ processed_cmds_wr = 1;
-+
-+ do_div(scst_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_wr,
-+ (unsigned long)scst_time_wr,
-+ (unsigned long)latency_stat->max_scst_time_wr,
-+ (unsigned long)latency_stat->scst_time_wr);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(tgt_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_wr,
-+ (unsigned long)tgt_time_wr,
-+ (unsigned long)latency_stat->max_tgt_time_wr,
-+ (unsigned long)latency_stat->tgt_time_wr);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(dev_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_wr,
-+ (unsigned long)dev_time_wr,
-+ (unsigned long)latency_stat->max_dev_time_wr,
-+ (unsigned long)latency_stat->dev_time_wr);
-+ seq_printf(seq, "%-47s\n", buf);
-+
-+ seq_printf(seq, "%-5s %-9s %-15lu ",
-+ "Read", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_rd);
-+ if (processed_cmds_rd == 0)
-+ processed_cmds_rd = 1;
-+
-+ do_div(scst_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_rd,
-+ (unsigned long)scst_time_rd,
-+ (unsigned long)latency_stat->max_scst_time_rd,
-+ (unsigned long)latency_stat->scst_time_rd);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(tgt_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_rd,
-+ (unsigned long)tgt_time_rd,
-+ (unsigned long)latency_stat->max_tgt_time_rd,
-+ (unsigned long)latency_stat->tgt_time_rd);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(dev_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_rd,
-+ (unsigned long)dev_time_rd,
-+ (unsigned long)latency_stat->max_dev_time_rd,
-+ (unsigned long)latency_stat->dev_time_rd);
-+ seq_printf(seq, "%-47s\n", buf);
-+ }
-+
-+ for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
-+ struct list_head *head =
-+ &sess->sess_tgt_dev_list[t];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+
-+ seq_printf(seq, "\nLUN: %llu\n", tgt_dev->lun);
-+
-+ for (i = 0; i < SCST_LATENCY_STATS_NUM ; i++) {
-+ uint64_t scst_time_wr, tgt_time_wr, dev_time_wr;
-+ unsigned int processed_cmds_wr;
-+ uint64_t scst_time_rd, tgt_time_rd, dev_time_rd;
-+ unsigned int processed_cmds_rd;
-+ struct scst_ext_latency_stat *latency_stat;
-+
-+ latency_stat = &tgt_dev->dev_latency_stat[i];
-+ scst_time_wr = latency_stat->scst_time_wr;
-+ scst_time_rd = latency_stat->scst_time_rd;
-+ tgt_time_wr = latency_stat->tgt_time_wr;
-+ tgt_time_rd = latency_stat->tgt_time_rd;
-+ dev_time_wr = latency_stat->dev_time_wr;
-+ dev_time_rd = latency_stat->dev_time_rd;
-+ processed_cmds_wr = latency_stat->processed_cmds_wr;
-+ processed_cmds_rd = latency_stat->processed_cmds_rd;
-+
-+ seq_printf(seq, "%-5s %-9s %-15lu ",
-+ "Write", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_wr);
-+ if (processed_cmds_wr == 0)
-+ processed_cmds_wr = 1;
-+
-+ do_div(scst_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_wr,
-+ (unsigned long)scst_time_wr,
-+ (unsigned long)latency_stat->max_scst_time_wr,
-+ (unsigned long)latency_stat->scst_time_wr);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(tgt_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_wr,
-+ (unsigned long)tgt_time_wr,
-+ (unsigned long)latency_stat->max_tgt_time_wr,
-+ (unsigned long)latency_stat->tgt_time_wr);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(dev_time_wr, processed_cmds_wr);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_wr,
-+ (unsigned long)dev_time_wr,
-+ (unsigned long)latency_stat->max_dev_time_wr,
-+ (unsigned long)latency_stat->dev_time_wr);
-+ seq_printf(seq, "%-47s\n", buf);
-+
-+ seq_printf(seq, "%-5s %-9s %-15lu ",
-+ "Read", scst_io_size_names[i],
-+ (unsigned long)processed_cmds_rd);
-+ if (processed_cmds_rd == 0)
-+ processed_cmds_rd = 1;
-+
-+ do_div(scst_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_scst_time_rd,
-+ (unsigned long)scst_time_rd,
-+ (unsigned long)latency_stat->max_scst_time_rd,
-+ (unsigned long)latency_stat->scst_time_rd);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(tgt_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_tgt_time_rd,
-+ (unsigned long)tgt_time_rd,
-+ (unsigned long)latency_stat->max_tgt_time_rd,
-+ (unsigned long)latency_stat->tgt_time_rd);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(dev_time_rd, processed_cmds_rd);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)latency_stat->min_dev_time_rd,
-+ (unsigned long)dev_time_rd,
-+ (unsigned long)latency_stat->max_dev_time_rd,
-+ (unsigned long)latency_stat->dev_time_rd);
-+ seq_printf(seq, "%-47s\n", buf);
-+ }
-+ }
-+ }
-+
-+ scst_time = sess->scst_time;
-+ tgt_time = sess->tgt_time;
-+ dev_time = sess->dev_time;
-+ processed_cmds = sess->processed_cmds;
-+
-+ seq_printf(seq, "\n%-15s %-16d", "Overall ",
-+ processed_cmds);
-+
-+ if (processed_cmds == 0)
-+ processed_cmds = 1;
-+
-+ do_div(scst_time, processed_cmds);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)sess->min_scst_time,
-+ (unsigned long)scst_time,
-+ (unsigned long)sess->max_scst_time,
-+ (unsigned long)sess->scst_time);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(tgt_time, processed_cmds);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)sess->min_tgt_time,
-+ (unsigned long)tgt_time,
-+ (unsigned long)sess->max_tgt_time,
-+ (unsigned long)sess->tgt_time);
-+ seq_printf(seq, "%-47s", buf);
-+
-+ do_div(dev_time, processed_cmds);
-+ snprintf(buf, sizeof(buf), "%lu/%lu/%lu/%lu",
-+ (unsigned long)sess->min_dev_time,
-+ (unsigned long)dev_time,
-+ (unsigned long)sess->max_dev_time,
-+ (unsigned long)sess->dev_time);
-+ seq_printf(seq, "%-47s\n\n", buf);
-+
-+ spin_unlock_bh(&sess->lat_lock);
-+ }
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_proc_scsi_tgt_gen_write_lat(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res = length, t;
-+ struct scst_acg *acg;
-+ struct scst_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
-+ list_for_each_entry(sess, &acg->acg_sess_list,
-+ acg_sess_list_entry) {
-+ PRINT_INFO("Zeroing latency statistics for initiator "
-+ "%s", sess->initiator_name);
-+ spin_lock_bh(&sess->lat_lock);
-+
-+ sess->scst_time = 0;
-+ sess->tgt_time = 0;
-+ sess->dev_time = 0;
-+ sess->min_scst_time = 0;
-+ sess->min_tgt_time = 0;
-+ sess->min_dev_time = 0;
-+ sess->max_scst_time = 0;
-+ sess->max_tgt_time = 0;
-+ sess->max_dev_time = 0;
-+ sess->processed_cmds = 0;
-+ memset(sess->sess_latency_stat, 0,
-+ sizeof(sess->sess_latency_stat));
-+
-+ for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
-+ struct list_head *head =
-+ &sess->sess_tgt_dev_list[t];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ tgt_dev->scst_time = 0;
-+ tgt_dev->tgt_time = 0;
-+ tgt_dev->dev_time = 0;
-+ tgt_dev->processed_cmds = 0;
-+ memset(tgt_dev->dev_latency_stat, 0,
-+ sizeof(tgt_dev->dev_latency_stat));
-+ }
-+ }
-+
-+ spin_unlock_bh(&sess->lat_lock);
-+ }
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_lat_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write_lat)
-+ .show = lat_info_show,
-+ .data = "scsi_tgt",
-+};
-+
-+#endif /* CONFIG_SCST_MEASURE_LATENCY */
-+
-+static int __init scst_proc_init_module_log(void)
-+{
-+ int res = 0;
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) || \
-+ defined(CONFIG_SCST_MEASURE_LATENCY)
-+ struct proc_dir_entry *generic;
-+#endif
-+
-+ TRACE_ENTRY();
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_LOG_ENTRY_NAME,
-+ &scst_log_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_LOG_ENTRY_NAME);
-+ res = -ENOMEM;
-+ }
-+#endif
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+ if (res == 0) {
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_LAT_ENTRY_NAME,
-+ &scst_lat_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME,
-+ SCST_PROC_LAT_ENTRY_NAME);
-+ res = -ENOMEM;
-+ }
-+ }
-+#endif
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void scst_proc_cleanup_module_log(void)
-+{
-+ TRACE_ENTRY();
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ remove_proc_entry(SCST_PROC_LOG_ENTRY_NAME, scst_proc_scsi_tgt);
-+#endif
-+
-+#ifdef CONFIG_SCST_MEASURE_LATENCY
-+ remove_proc_entry(SCST_PROC_LAT_ENTRY_NAME, scst_proc_scsi_tgt);
-+#endif
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_proc_group_add_tree(struct scst_acg *acg, const char *name)
-+{
-+ int res = 0;
-+ struct proc_dir_entry *generic;
-+
-+ TRACE_ENTRY();
-+
-+ acg->acg_proc_root = proc_mkdir(name, scst_proc_groups_root);
-+ if (acg->acg_proc_root == NULL) {
-+ PRINT_ERROR("Not enough memory to register %s entry in "
-+ "/proc/%s/%s", name, SCST_PROC_ENTRY_NAME,
-+ SCST_PROC_GROUPS_ENTRY_NAME);
-+ goto out;
-+ }
-+
-+ scst_groups_addr_method_proc_data.data = acg;
-+ generic = scst_create_proc_entry(acg->acg_proc_root,
-+ SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME,
-+ &scst_groups_addr_method_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
-+ SCST_PROC_ENTRY_NAME,
-+ SCST_PROC_GROUPS_ENTRY_NAME,
-+ name, SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME);
-+ res = -ENOMEM;
-+ goto out_remove;
-+ }
-+
-+ scst_groups_devices_proc_data.data = acg;
-+ generic = scst_create_proc_entry(acg->acg_proc_root,
-+ SCST_PROC_GROUPS_DEVICES_ENTRY_NAME,
-+ &scst_groups_devices_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
-+ SCST_PROC_ENTRY_NAME,
-+ SCST_PROC_GROUPS_ENTRY_NAME,
-+ name, SCST_PROC_GROUPS_DEVICES_ENTRY_NAME);
-+ res = -ENOMEM;
-+ goto out_remove0;
-+ }
-+
-+ scst_groups_names_proc_data.data = acg;
-+ generic = scst_create_proc_entry(acg->acg_proc_root,
-+ SCST_PROC_GROUPS_USERS_ENTRY_NAME,
-+ &scst_groups_names_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("Cannot init /proc/%s/%s/%s/%s",
-+ SCST_PROC_ENTRY_NAME,
-+ SCST_PROC_GROUPS_ENTRY_NAME,
-+ name, SCST_PROC_GROUPS_USERS_ENTRY_NAME);
-+ res = -ENOMEM;
-+ goto out_remove1;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_remove1:
-+ remove_proc_entry(SCST_PROC_GROUPS_DEVICES_ENTRY_NAME,
-+ acg->acg_proc_root);
-+
-+out_remove0:
-+ remove_proc_entry(SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME,
-+ acg->acg_proc_root);
-+out_remove:
-+ remove_proc_entry(name, scst_proc_groups_root);
-+ goto out;
-+}
-+
-+static void scst_proc_del_acg_tree(struct proc_dir_entry *acg_proc_root,
-+ const char *name)
-+{
-+ TRACE_ENTRY();
-+
-+ remove_proc_entry(SCST_PROC_GROUPS_ADDR_METHOD_ENTRY_NAME, acg_proc_root);
-+ remove_proc_entry(SCST_PROC_GROUPS_USERS_ENTRY_NAME, acg_proc_root);
-+ remove_proc_entry(SCST_PROC_GROUPS_DEVICES_ENTRY_NAME, acg_proc_root);
-+ remove_proc_entry(name, scst_proc_groups_root);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static int scst_proc_group_add(const char *p, unsigned int addr_method)
-+{
-+ int res = 0, len = strlen(p) + 1;
-+ struct scst_acg *acg;
-+ char *name = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ name = kmalloc(len, GFP_KERNEL);
-+ if (name == NULL) {
-+ PRINT_ERROR("Allocation of new name (size %d) failed", len);
-+ goto out_nomem;
-+ }
-+ strlcpy(name, p, len);
-+
-+ acg = scst_alloc_add_acg(NULL, name, false);
-+ if (acg == NULL) {
-+ PRINT_ERROR("scst_alloc_add_acg() (name %s) failed", name);
-+ goto out_free;
-+ }
-+
-+ acg->addr_method = addr_method;
-+
-+ res = scst_proc_group_add_tree(acg, p);
-+ if (res != 0)
-+ goto out_free_acg;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free_acg:
-+ scst_proc_del_free_acg(acg, 0);
-+
-+out_free:
-+ kfree(name);
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static int scst_proc_del_free_acg(struct scst_acg *acg, int remove_proc)
-+{
-+ struct proc_dir_entry *acg_proc_root = acg->acg_proc_root;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (acg != scst_default_acg) {
-+ if (!scst_acg_sess_is_empty(acg)) {
-+ PRINT_ERROR("%s", "Session is not empty");
-+ res = -EBUSY;
-+ goto out;
-+ }
-+ if (remove_proc)
-+ scst_proc_del_acg_tree(acg_proc_root, acg->acg_name);
-+ scst_del_free_acg(acg);
-+ }
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static int scst_proc_rename_acg(struct scst_acg *acg, const char *new_name)
-+{
-+ int res = 0, len = strlen(new_name) + 1;
-+ char *name;
-+ struct proc_dir_entry *old_acg_proc_root = acg->acg_proc_root;
-+
-+ TRACE_ENTRY();
-+
-+ name = kmalloc(len, GFP_KERNEL);
-+ if (name == NULL) {
-+ PRINT_ERROR("Allocation of new name (size %d) failed", len);
-+ goto out_nomem;
-+ }
-+ strlcpy(name, new_name, len);
-+
-+ res = scst_proc_group_add_tree(acg, new_name);
-+ if (res != 0)
-+ goto out_free;
-+
-+ scst_proc_del_acg_tree(old_acg_proc_root, acg->acg_name);
-+
-+ kfree(acg->acg_name);
-+ acg->acg_name = name;
-+
-+ scst_check_reassign_sessions();
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(name);
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static int __init scst_proc_init_groups(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /* create the proc directory entry for the device */
-+ scst_proc_groups_root = proc_mkdir(SCST_PROC_GROUPS_ENTRY_NAME,
-+ scst_proc_scsi_tgt);
-+ if (scst_proc_groups_root == NULL) {
-+ PRINT_ERROR("Not enough memory to register %s entry in "
-+ "/proc/%s", SCST_PROC_GROUPS_ENTRY_NAME,
-+ SCST_PROC_ENTRY_NAME);
-+ goto out_nomem;
-+ }
-+
-+ res = scst_proc_group_add_tree(scst_default_acg,
-+ SCST_DEFAULT_ACG_NAME);
-+ if (res != 0)
-+ goto out_remove;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_remove:
-+ remove_proc_entry(SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+static void scst_proc_cleanup_groups(void)
-+{
-+ struct scst_acg *acg_tmp, *acg;
-+
-+ TRACE_ENTRY();
-+
-+ /* remove all groups (dir & entries) */
-+ list_for_each_entry_safe(acg, acg_tmp, &scst_acg_list,
-+ acg_list_entry) {
-+ scst_proc_del_free_acg(acg, 1);
-+ }
-+
-+ scst_proc_del_acg_tree(scst_default_acg->acg_proc_root,
-+ SCST_DEFAULT_ACG_NAME);
-+ TRACE_DBG("remove_proc_entry(%s, %p)",
-+ SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_GROUPS_ENTRY_NAME, scst_proc_scsi_tgt);
-+
-+ TRACE_EXIT();
-+}
-+
-+static int __init scst_proc_init_sgv(void)
-+{
-+ int res = 0;
-+ struct proc_dir_entry *pr;
-+
-+ TRACE_ENTRY();
-+
-+ pr = scst_create_proc_entry(scst_proc_scsi_tgt, "sgv",
-+ &scst_sgv_proc_data);
-+ if (pr == NULL) {
-+ PRINT_ERROR("%s", "cannot create sgv /proc entry");
-+ res = -ENOMEM;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void __exit scst_proc_cleanup_sgv(void)
-+{
-+ TRACE_ENTRY();
-+ remove_proc_entry("sgv", scst_proc_scsi_tgt);
-+ TRACE_EXIT();
-+}
-+
-+int __init scst_proc_init_module(void)
-+{
-+ int res = 0;
-+ struct proc_dir_entry *generic;
-+
-+ TRACE_ENTRY();
-+
-+ scst_proc_scsi_tgt = proc_mkdir(SCST_PROC_ENTRY_NAME, NULL);
-+ if (!scst_proc_scsi_tgt) {
-+ PRINT_ERROR("cannot init /proc/%s", SCST_PROC_ENTRY_NAME);
-+ goto out_nomem;
-+ }
-+
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_ENTRY_NAME,
-+ &scst_tgt_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_ENTRY_NAME);
-+ goto out_remove;
-+ }
-+
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_VERSION_NAME,
-+ &scst_version_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_VERSION_NAME);
-+ goto out_remove1;
-+ }
-+
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_SESSIONS_NAME,
-+ &scst_sessions_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_SESSIONS_NAME);
-+ goto out_remove2;
-+ }
-+
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_HELP_NAME,
-+ &scst_help_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_HELP_NAME);
-+ goto out_remove3;
-+ }
-+
-+ generic = scst_create_proc_entry(scst_proc_scsi_tgt,
-+ SCST_PROC_THREADS_NAME,
-+ &scst_threads_proc_data);
-+ if (!generic) {
-+ PRINT_ERROR("cannot init /proc/%s/%s",
-+ SCST_PROC_ENTRY_NAME, SCST_PROC_THREADS_NAME);
-+ goto out_remove4;
-+ }
-+
-+ if (scst_proc_init_module_log() < 0)
-+ goto out_remove5;
-+
-+ if (scst_proc_init_groups() < 0)
-+ goto out_remove6;
-+
-+ if (scst_proc_init_sgv() < 0)
-+ goto out_remove7;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_remove7:
-+ scst_proc_cleanup_groups();
-+
-+out_remove6:
-+ scst_proc_cleanup_module_log();
-+
-+out_remove5:
-+ remove_proc_entry(SCST_PROC_THREADS_NAME, scst_proc_scsi_tgt);
-+
-+out_remove4:
-+ remove_proc_entry(SCST_PROC_HELP_NAME, scst_proc_scsi_tgt);
-+
-+out_remove3:
-+ remove_proc_entry(SCST_PROC_SESSIONS_NAME, scst_proc_scsi_tgt);
-+
-+out_remove2:
-+ remove_proc_entry(SCST_PROC_VERSION_NAME, scst_proc_scsi_tgt);
-+
-+out_remove1:
-+ remove_proc_entry(SCST_PROC_ENTRY_NAME, scst_proc_scsi_tgt);
-+
-+out_remove:
-+ remove_proc_entry(SCST_PROC_ENTRY_NAME, NULL);
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+void __exit scst_proc_cleanup_module(void)
-+{
-+ TRACE_ENTRY();
-+
-+ /* We may not bother about locks here */
-+ scst_proc_cleanup_sgv();
-+ scst_proc_cleanup_groups();
-+ scst_proc_cleanup_module_log();
-+ remove_proc_entry(SCST_PROC_THREADS_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_HELP_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_SESSIONS_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_VERSION_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_ENTRY_NAME, scst_proc_scsi_tgt);
-+ remove_proc_entry(SCST_PROC_ENTRY_NAME, NULL);
-+
-+ TRACE_EXIT();
-+}
-+
-+static ssize_t scst_proc_threads_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res = length;
-+ int oldtn, newtn, delta;
-+ char *buffer;
-+
-+ TRACE_ENTRY();
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free;
-+ }
-+
-+ mutex_lock(&scst_mutex);
-+
-+ oldtn = scst_main_cmd_threads.nr_threads;
-+ newtn = simple_strtoul(buffer, NULL, 0);
-+ if (newtn <= 0) {
-+ PRINT_ERROR("Illegal threads num value %d", newtn);
-+ res = -EINVAL;
-+ goto out_up_thr_free;
-+ }
-+ delta = newtn - oldtn;
-+ if (delta < 0)
-+ scst_del_threads(&scst_main_cmd_threads, -delta);
-+ else {
-+ int rc = scst_add_threads(&scst_main_cmd_threads, NULL, NULL,
-+ delta);
-+ if (rc != 0)
-+ res = rc;
-+ }
-+
-+ PRINT_INFO("Changed cmd threads num: old %d, new %d", oldtn, newtn);
-+
-+out_up_thr_free:
-+ mutex_unlock(&scst_mutex);
-+
-+ mutex_unlock(&scst_proc_mutex);
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+int scst_build_proc_target_dir_entries(struct scst_tgt_template *vtt)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /* create the proc directory entry for the device */
-+ vtt->proc_tgt_root = proc_mkdir(vtt->name, scst_proc_scsi_tgt);
-+ if (vtt->proc_tgt_root == NULL) {
-+ PRINT_ERROR("Not enough memory to register SCSI target %s "
-+ "in /proc/%s", vtt->name, SCST_PROC_ENTRY_NAME);
-+ goto out_nomem;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+void scst_cleanup_proc_target_dir_entries(struct scst_tgt_template *vtt)
-+{
-+ TRACE_ENTRY();
-+
-+ remove_proc_entry(vtt->name, scst_proc_scsi_tgt);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called under scst_mutex */
-+int scst_build_proc_target_entries(struct scst_tgt *vtt)
-+{
-+ int res = 0;
-+ struct proc_dir_entry *p;
-+ char name[20];
-+
-+ TRACE_ENTRY();
-+
-+ if (vtt->tgtt->read_proc || vtt->tgtt->write_proc) {
-+ /* create the proc file entry for the device */
-+ scnprintf(name, sizeof(name), "%d", vtt->tgtt->proc_dev_num);
-+ scst_scsi_tgt_proc_data.data = (void *)vtt;
-+ p = scst_create_proc_entry(vtt->tgtt->proc_tgt_root,
-+ name,
-+ &scst_scsi_tgt_proc_data);
-+ if (p == NULL) {
-+ PRINT_ERROR("Not enough memory to register SCSI "
-+ "target entry %s in /proc/%s/%s", name,
-+ SCST_PROC_ENTRY_NAME, vtt->tgtt->name);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ vtt->proc_num = vtt->tgtt->proc_dev_num;
-+ vtt->tgtt->proc_dev_num++;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+void scst_cleanup_proc_target_entries(struct scst_tgt *vtt)
-+{
-+ char name[20];
-+
-+ TRACE_ENTRY();
-+
-+ if (vtt->tgtt->read_proc || vtt->tgtt->write_proc) {
-+ scnprintf(name, sizeof(name), "%d", vtt->proc_num);
-+ remove_proc_entry(name, vtt->tgtt->proc_tgt_root);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static ssize_t scst_proc_scsi_tgt_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ struct scst_tgt *vtt =
-+ (struct scst_tgt *)PDE(file->f_dentry->d_inode)->data;
-+ ssize_t res = 0;
-+ char *buffer;
-+ char *start;
-+ int eof = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (vtt->tgtt->write_proc == NULL) {
-+ res = -ENOSYS;
-+ goto out;
-+ }
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ TRACE_BUFFER("Buffer", buffer, length);
-+
-+ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free;
-+ }
-+
-+ res = vtt->tgtt->write_proc(buffer, &start, 0, length, &eof, vtt);
-+
-+ mutex_unlock(&scst_proc_mutex);
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+int scst_build_proc_dev_handler_dir_entries(struct scst_dev_type *dev_type)
-+{
-+ int res = 0;
-+ struct proc_dir_entry *p;
-+ const char *name; /* workaround to keep /proc ABI intact */
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(dev_type->proc_dev_type_root);
-+
-+ if (strcmp(dev_type->name, "vdisk_fileio") == 0)
-+ name = "vdisk";
-+ else
-+ name = dev_type->name;
-+
-+ /* create the proc directory entry for the dev type handler */
-+ dev_type->proc_dev_type_root = proc_mkdir(name,
-+ scst_proc_scsi_tgt);
-+ if (dev_type->proc_dev_type_root == NULL) {
-+ PRINT_ERROR("Not enough memory to register dev handler dir "
-+ "%s in /proc/%s", name, SCST_PROC_ENTRY_NAME);
-+ goto out_nomem;
-+ }
-+
-+ scst_dev_handler_type_proc_data.data = dev_type;
-+ if (dev_type->type >= 0) {
-+ p = scst_create_proc_entry(dev_type->proc_dev_type_root,
-+ SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
-+ &scst_dev_handler_type_proc_data);
-+ if (p == NULL) {
-+ PRINT_ERROR("Not enough memory to register dev "
-+ "handler entry %s in /proc/%s/%s",
-+ SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
-+ SCST_PROC_ENTRY_NAME, name);
-+ goto out_remove;
-+ }
-+ }
-+
-+ if (dev_type->read_proc || dev_type->write_proc) {
-+ /* create the proc file entry for the dev type handler */
-+ scst_dev_handler_proc_data.data = (void *)dev_type;
-+ p = scst_create_proc_entry(dev_type->proc_dev_type_root,
-+ name,
-+ &scst_dev_handler_proc_data);
-+ if (p == NULL) {
-+ PRINT_ERROR("Not enough memory to register dev "
-+ "handler entry %s in /proc/%s/%s", name,
-+ SCST_PROC_ENTRY_NAME, name);
-+ goto out_remove1;
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_remove1:
-+ if (dev_type->type >= 0)
-+ remove_proc_entry(SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
-+ dev_type->proc_dev_type_root);
-+
-+out_remove:
-+ remove_proc_entry(name, scst_proc_scsi_tgt);
-+
-+out_nomem:
-+ res = -ENOMEM;
-+ goto out;
-+}
-+
-+void scst_cleanup_proc_dev_handler_dir_entries(struct scst_dev_type *dev_type)
-+{
-+ /* Workaround to keep /proc ABI intact */
-+ const char *name;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(dev_type->proc_dev_type_root == NULL);
-+
-+ if (strcmp(dev_type->name, "vdisk_fileio") == 0)
-+ name = "vdisk";
-+ else
-+ name = dev_type->name;
-+
-+ if (dev_type->type >= 0) {
-+ remove_proc_entry(SCST_PROC_DEV_HANDLER_TYPE_ENTRY_NAME,
-+ dev_type->proc_dev_type_root);
-+ }
-+ if (dev_type->read_proc || dev_type->write_proc)
-+ remove_proc_entry(name, dev_type->proc_dev_type_root);
-+ remove_proc_entry(name, scst_proc_scsi_tgt);
-+ dev_type->proc_dev_type_root = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static ssize_t scst_proc_scsi_dev_handler_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ struct scst_dev_type *dev_type =
-+ (struct scst_dev_type *)PDE(file->f_dentry->d_inode)->data;
-+ ssize_t res = 0;
-+ char *buffer;
-+ char *start;
-+ int eof = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev_type->write_proc == NULL) {
-+ res = -ENOSYS;
-+ goto out;
-+ }
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ TRACE_BUFFER("Buffer", buffer, length);
-+
-+ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free;
-+ }
-+
-+ res = dev_type->write_proc(buffer, &start, 0, length, &eof, dev_type);
-+
-+ mutex_unlock(&scst_proc_mutex);
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_proc_scsi_tgt_gen_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res, rc = 0, action;
-+ char *buffer, *p, *pp, *ppp;
-+ struct scst_acg *a, *acg = NULL;
-+ unsigned int addr_method = SCST_LUN_ADDR_METHOD_PERIPHERAL;
-+
-+ TRACE_ENTRY();
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ /*
-+ * Usage: echo "add_group GROUP_NAME [FLAT]" >/proc/scsi_tgt/scsi_tgt
-+ * or echo "add_group GROUP_NAME [LUN]" >/proc/scsi_tgt/scsi_tgt
-+ * or echo "del_group GROUP_NAME" >/proc/scsi_tgt/scsi_tgt
-+ * or echo "rename_group OLD_NAME NEW_NAME" >/proc/scsi_tgt/scsi_tgt"
-+ * or echo "assign H:C:I:L HANDLER_NAME" >/proc/scsi_tgt/scsi_tgt
-+ */
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ if (!strncasecmp("assign ", p, 7)) {
-+ p += 7;
-+ action = SCST_PROC_ACTION_ASSIGN;
-+ } else if (!strncasecmp("add_group ", p, 10)) {
-+ p += 10;
-+ action = SCST_PROC_ACTION_ADD_GROUP;
-+ } else if (!strncasecmp("del_group ", p, 10)) {
-+ p += 10;
-+ action = SCST_PROC_ACTION_DEL_GROUP;
-+ } else if (!strncasecmp("rename_group ", p, 13)) {
-+ p += 13;
-+ action = SCST_PROC_ACTION_RENAME_GROUP;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out_free;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free_resume;
-+ }
-+
-+ res = length;
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD_GROUP:
-+ case SCST_PROC_ACTION_DEL_GROUP:
-+ case SCST_PROC_ACTION_RENAME_GROUP:
-+ pp = p;
-+ while (!isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ *pp = '\0';
-+ pp++;
-+ while (isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD_GROUP:
-+ ppp = pp;
-+ while (!isspace(*ppp) && *ppp != '\0')
-+ ppp++;
-+ if (*ppp != '\0') {
-+ *ppp = '\0';
-+ ppp++;
-+ while (isspace(*ppp) && *ppp != '\0')
-+ ppp++;
-+ if (*ppp != '\0') {
-+ PRINT_ERROR("%s", "Too many "
-+ "arguments");
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ }
-+ if (strcasecmp(pp, "FLAT") == 0)
-+ addr_method = SCST_LUN_ADDR_METHOD_FLAT;
-+ else if (strcasecmp(pp, "LUN") == 0)
-+ addr_method = SCST_LUN_ADDR_METHOD_LUN;
-+ else {
-+ PRINT_ERROR("Unexpected "
-+ "argument %s", pp);
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ break;
-+ case SCST_PROC_ACTION_DEL_GROUP:
-+ PRINT_ERROR("%s", "Too many "
-+ "arguments");
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ }
-+ }
-+
-+ if (strcmp(p, SCST_DEFAULT_ACG_NAME) == 0) {
-+ PRINT_ERROR("Attempt to add/delete/rename predefined "
-+ "group \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+
-+ list_for_each_entry(a, &scst_acg_list, acg_list_entry) {
-+ if (strcmp(a->acg_name, p) == 0) {
-+ TRACE_DBG("group (acg) %p %s found",
-+ a, a->acg_name);
-+ acg = a;
-+ break;
-+ }
-+ }
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD_GROUP:
-+ if (acg) {
-+ PRINT_ERROR("acg name %s exist", p);
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ rc = scst_proc_group_add(p, addr_method);
-+ break;
-+ case SCST_PROC_ACTION_DEL_GROUP:
-+ if (acg == NULL) {
-+ PRINT_ERROR("acg name %s not found", p);
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ rc = scst_proc_del_free_acg(acg, 1);
-+ break;
-+ case SCST_PROC_ACTION_RENAME_GROUP:
-+ if (acg == NULL) {
-+ PRINT_ERROR("acg name %s not found", p);
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+
-+ p = pp;
-+ while (!isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ *pp = '\0';
-+ pp++;
-+ while (isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ PRINT_ERROR("%s", "Too many arguments");
-+ res = -EINVAL;
-+ goto out_up_free;
-+ }
-+ }
-+ rc = scst_proc_rename_acg(acg, p);
-+ break;
-+ }
-+ break;
-+ case SCST_PROC_ACTION_ASSIGN:
-+ rc = scst_proc_assign_handler(p);
-+ break;
-+ }
-+
-+ if (rc != 0)
-+ res = rc;
-+
-+out_up_free:
-+ mutex_unlock(&scst_mutex);
-+
-+out_free_resume:
-+ scst_resume_activity();
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* The activity supposed to be suspended and scst_mutex held */
-+static int scst_proc_assign_handler(char *buf)
-+{
-+ int res = 0;
-+ char *p = buf, *e, *ee;
-+ unsigned long host, channel = 0, id = 0, lun = 0;
-+ struct scst_device *d, *dev = NULL;
-+ struct scst_dev_type *dt, *handler = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+
-+ host = simple_strtoul(p, &p, 0);
-+ if ((host == ULONG_MAX) || (*p != ':'))
-+ goto out_synt_err;
-+ p++;
-+ channel = simple_strtoul(p, &p, 0);
-+ if ((channel == ULONG_MAX) || (*p != ':'))
-+ goto out_synt_err;
-+ p++;
-+ id = simple_strtoul(p, &p, 0);
-+ if ((channel == ULONG_MAX) || (*p != ':'))
-+ goto out_synt_err;
-+ p++;
-+ lun = simple_strtoul(p, &p, 0);
-+ if (lun == ULONG_MAX)
-+ goto out_synt_err;
-+
-+ e = p;
-+ e++;
-+ while (isspace(*e) && *e != '\0')
-+ e++;
-+ ee = e;
-+ while (!isspace(*ee) && *ee != '\0')
-+ ee++;
-+ *ee = '\0';
-+
-+ TRACE_DBG("Dev %ld:%ld:%ld:%ld, handler %s", host, channel, id, lun, e);
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if ((d->virt_id == 0) &&
-+ d->scsi_dev->host->host_no == host &&
-+ d->scsi_dev->channel == channel &&
-+ d->scsi_dev->id == id &&
-+ d->scsi_dev->lun == lun) {
-+ dev = d;
-+ TRACE_DBG("Dev %p (%ld:%ld:%ld:%ld) found",
-+ dev, host, channel, id, lun);
-+ break;
-+ }
-+ }
-+
-+ if (dev == NULL) {
-+ PRINT_ERROR("Device %ld:%ld:%ld:%ld not found",
-+ host, channel, id, lun);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(dt, &scst_dev_type_list, dev_type_list_entry) {
-+ if (!strcmp(dt->name, e)) {
-+ handler = dt;
-+ TRACE_DBG("Dev handler %p with name %s found",
-+ dt, dt->name);
-+ break;
-+ }
-+ }
-+
-+ if (handler == NULL) {
-+ PRINT_ERROR("Handler %s not found", e);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (dev->scsi_dev->type != handler->type) {
-+ PRINT_ERROR("Type %d of device %s differs from type "
-+ "%d of dev handler %s", dev->type,
-+ dev->handler->name, handler->type, handler->name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = scst_assign_dev_handler(dev, handler);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_synt_err:
-+ PRINT_ERROR("Syntax error on %s", p);
-+ res = -EINVAL;
-+ goto out;
-+}
-+
-+static ssize_t scst_proc_groups_devices_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res, action, rc, read_only = 0;
-+ char *buffer, *p, *e = NULL;
-+ unsigned int virt_lun;
-+ struct scst_acg *acg =
-+ (struct scst_acg *)PDE(file->f_dentry->d_inode)->data;
-+ struct scst_acg_dev *acg_dev = NULL, *acg_dev_tmp;
-+ struct scst_device *d, *dev = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ /*
-+ * Usage: echo "add|del H:C:I:L lun [READ_ONLY]" \
-+ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
-+ * or echo "replace H:C:I:L lun [READ_ONLY]" \
-+ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
-+ * or echo "add|del V_NAME lun [READ_ONLY]" \
-+ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
-+ * or echo "replace V_NAME lun [READ_ONLY]" \
-+ * >/proc/scsi_tgt/groups/GROUP_NAME/devices
-+ * or echo "clear" >/proc/scsi_tgt/groups/GROUP_NAME/devices
-+ */
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ if (!strncasecmp("clear", p, 5)) {
-+ action = SCST_PROC_ACTION_CLEAR;
-+ } else if (!strncasecmp("add ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_ADD;
-+ } else if (!strncasecmp("del ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_DEL;
-+ } else if (!strncasecmp("replace ", p, 8)) {
-+ p += 8;
-+ action = SCST_PROC_ACTION_REPLACE;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ res = scst_suspend_activity(true);
-+ if (res != 0)
-+ goto out_free;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free_resume;
-+ }
-+
-+ res = length;
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ case SCST_PROC_ACTION_DEL:
-+ case SCST_PROC_ACTION_REPLACE:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ e = p; /* save p */
-+ while (!isspace(*e) && *e != '\0')
-+ e++;
-+ *e = 0;
-+
-+ list_for_each_entry(d, &scst_dev_list, dev_list_entry) {
-+ if (!strcmp(d->virt_name, p)) {
-+ dev = d;
-+ TRACE_DBG("Device %p (%s) found", dev, p);
-+ break;
-+ }
-+ }
-+ if (dev == NULL) {
-+ PRINT_ERROR("Device %s not found", p);
-+ res = -EINVAL;
-+ goto out_free_up;
-+ }
-+ break;
-+ }
-+
-+ /* ToDo: create separate functions */
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ case SCST_PROC_ACTION_REPLACE:
-+ {
-+ bool dev_replaced = false;
-+
-+ e++;
-+ while (isspace(*e) && *e != '\0')
-+ e++;
-+
-+ virt_lun = simple_strtoul(e, &e, 0);
-+ if (virt_lun > SCST_MAX_LUN) {
-+ PRINT_ERROR("Too big LUN %d (max %d)", virt_lun,
-+ SCST_MAX_LUN);
-+ res = -EINVAL;
-+ goto out_free_up;
-+ }
-+
-+ while (isspace(*e) && *e != '\0')
-+ e++;
-+
-+ if (*e != '\0') {
-+ if (!strncasecmp("READ_ONLY", e, 9))
-+ read_only = 1;
-+ else {
-+ PRINT_ERROR("Unknown option \"%s\"", e);
-+ res = -EINVAL;
-+ goto out_free_up;
-+ }
-+ }
-+
-+ list_for_each_entry(acg_dev_tmp, &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ if (acg_dev_tmp->lun == virt_lun) {
-+ acg_dev = acg_dev_tmp;
-+ break;
-+ }
-+ }
-+ if (acg_dev != NULL) {
-+ if (action == SCST_PROC_ACTION_ADD) {
-+ PRINT_ERROR("virt lun %d already exists in "
-+ "group %s", virt_lun, acg->acg_name);
-+ res = -EEXIST;
-+ goto out_free_up;
-+ } else {
-+ /* Replace */
-+ rc = scst_acg_del_lun(acg, acg_dev->lun,
-+ false);
-+ if (rc) {
-+ res = rc;
-+ goto out_free_up;
-+ }
-+ dev_replaced = true;
-+ }
-+ }
-+
-+ rc = scst_acg_add_lun(acg, NULL, dev, virt_lun, read_only,
-+ false, NULL);
-+ if (rc) {
-+ res = rc;
-+ goto out_free_up;
-+ }
-+
-+ if (action == SCST_PROC_ACTION_ADD)
-+ scst_report_luns_changed(acg);
-+
-+ if (dev_replaced) {
-+ struct scst_tgt_dev *tgt_dev;
-+
-+ list_for_each_entry(tgt_dev, &dev->dev_tgt_dev_list,
-+ dev_tgt_dev_list_entry) {
-+ if ((tgt_dev->acg_dev->acg == acg) &&
-+ (tgt_dev->lun == virt_lun)) {
-+ TRACE_MGMT_DBG("INQUIRY DATA HAS CHANGED"
-+ " on tgt_dev %p", tgt_dev);
-+ scst_gen_aen_or_ua(tgt_dev,
-+ SCST_LOAD_SENSE(scst_sense_inquery_data_changed));
-+ }
-+ }
-+ }
-+ break;
-+ }
-+ case SCST_PROC_ACTION_DEL:
-+ {
-+ /*
-+ * This code doesn't handle if there are >1 LUNs for the same
-+ * device in the group. Instead, it always deletes the first
-+ * entry. It wasn't fixed for compatibility reasons, because
-+ * procfs is now obsoleted.
-+ */
-+ struct scst_acg_dev *a;
-+ list_for_each_entry(a, &acg->acg_dev_list, acg_dev_list_entry) {
-+ if (a->dev == dev) {
-+ rc = scst_acg_del_lun(acg, a->lun, true);
-+ if (rc)
-+ res = rc;
-+ goto out_free_up;
-+ }
-+ }
-+ PRINT_ERROR("Device is not found in group %s", acg->acg_name);
-+ break;
-+ }
-+ case SCST_PROC_ACTION_CLEAR:
-+ list_for_each_entry_safe(acg_dev, acg_dev_tmp,
-+ &acg->acg_dev_list,
-+ acg_dev_list_entry) {
-+ rc = scst_acg_del_lun(acg, acg_dev->lun,
-+ list_is_last(&acg_dev->acg_dev_list_entry,
-+ &acg->acg_dev_list));
-+ if (rc) {
-+ res = rc;
-+ goto out_free_up;
-+ }
-+ }
-+ break;
-+ }
-+
-+out_free_up:
-+ mutex_unlock(&scst_mutex);
-+
-+out_free_resume:
-+ scst_resume_activity();
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_proc_groups_names_write(struct file *file,
-+ const char __user *buf,
-+ size_t length, loff_t *off)
-+{
-+ int res = length, rc = 0, action;
-+ char *buffer, *p, *pp = NULL;
-+ struct scst_acg *acg =
-+ (struct scst_acg *)PDE(file->f_dentry->d_inode)->data;
-+ struct scst_acn *n, *nn;
-+
-+ TRACE_ENTRY();
-+
-+ if (length > SCST_PROC_BLOCK_SIZE) {
-+ res = -EOVERFLOW;
-+ goto out;
-+ }
-+ if (!buf) {
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ buffer = (char *)__get_free_page(GFP_KERNEL);
-+ if (!buffer) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ if (copy_from_user(buffer, buf, length)) {
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+ if (length < PAGE_SIZE) {
-+ buffer[length] = '\0';
-+ } else if (buffer[PAGE_SIZE-1]) {
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ /*
-+ * Usage: echo "add|del NAME" >/proc/scsi_tgt/groups/GROUP_NAME/names
-+ * or echo "move NAME NEW_GROUP_NAME" >/proc/scsi_tgt/groups/OLD_GROUP_NAME/names"
-+ * or echo "clear" >/proc/scsi_tgt/groups/GROUP_NAME/names
-+ */
-+ p = buffer;
-+ if (p[strlen(p) - 1] == '\n')
-+ p[strlen(p) - 1] = '\0';
-+ if (!strncasecmp("clear", p, 5)) {
-+ action = SCST_PROC_ACTION_CLEAR;
-+ } else if (!strncasecmp("add ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_ADD;
-+ } else if (!strncasecmp("del ", p, 4)) {
-+ p += 4;
-+ action = SCST_PROC_ACTION_DEL;
-+ } else if (!strncasecmp("move ", p, 5)) {
-+ p += 5;
-+ action = SCST_PROC_ACTION_MOVE;
-+ } else {
-+ PRINT_ERROR("Unknown action \"%s\"", p);
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ case SCST_PROC_ACTION_DEL:
-+ case SCST_PROC_ACTION_MOVE:
-+ while (isspace(*p) && *p != '\0')
-+ p++;
-+ pp = p;
-+ while (!isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ *pp = '\0';
-+ pp++;
-+ while (isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ case SCST_PROC_ACTION_DEL:
-+ PRINT_ERROR("%s", "Too many "
-+ "arguments");
-+ res = -EINVAL;
-+ goto out_free;
-+ }
-+ }
-+ }
-+ break;
-+ }
-+
-+ rc = scst_suspend_activity(true);
-+ if (rc != 0)
-+ goto out_free;
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out_free_resume;
-+ }
-+
-+ switch (action) {
-+ case SCST_PROC_ACTION_ADD:
-+ rc = scst_acg_add_acn(acg, p);
-+ break;
-+ case SCST_PROC_ACTION_DEL:
-+ rc = scst_acg_remove_name(acg, p, true);
-+ break;
-+ case SCST_PROC_ACTION_MOVE:
-+ {
-+ struct scst_acg *a, *new_acg = NULL;
-+ char *name = p;
-+ p = pp;
-+ while (!isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ *pp = '\0';
-+ pp++;
-+ while (isspace(*pp) && *pp != '\0')
-+ pp++;
-+ if (*pp != '\0') {
-+ PRINT_ERROR("%s", "Too many arguments");
-+ res = -EINVAL;
-+ goto out_free_unlock;
-+ }
-+ }
-+ list_for_each_entry(a, &scst_acg_list, acg_list_entry) {
-+ if (strcmp(a->acg_name, p) == 0) {
-+ TRACE_DBG("group (acg) %p %s found",
-+ a, a->acg_name);
-+ new_acg = a;
-+ break;
-+ }
-+ }
-+ if (new_acg == NULL) {
-+ PRINT_ERROR("Group %s not found", p);
-+ res = -EINVAL;
-+ goto out_free_unlock;
-+ }
-+ rc = scst_acg_remove_name(acg, name, false);
-+ if (rc != 0)
-+ goto out_free_unlock;
-+ rc = scst_acg_add_acn(new_acg, name);
-+ if (rc != 0)
-+ scst_acg_add_acn(acg, name);
-+ break;
-+ }
-+ case SCST_PROC_ACTION_CLEAR:
-+ list_for_each_entry_safe(n, nn, &acg->acn_list,
-+ acn_list_entry) {
-+ scst_del_free_acn(n, false);
-+ }
-+ scst_check_reassign_sessions();
-+ break;
-+ }
-+
-+out_free_unlock:
-+ mutex_unlock(&scst_mutex);
-+
-+out_free_resume:
-+ scst_resume_activity();
-+
-+out_free:
-+ free_page((unsigned long)buffer);
-+
-+out:
-+ if (rc < 0)
-+ res = rc;
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_version_info_show(struct seq_file *seq, void *v)
-+{
-+ TRACE_ENTRY();
-+
-+ seq_printf(seq, "%s\n", SCST_VERSION_STRING);
-+
-+#ifdef CONFIG_SCST_STRICT_SERIALIZING
-+ seq_printf(seq, "STRICT_SERIALIZING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ seq_printf(seq, "EXTRACHECKS\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_TRACING
-+ seq_printf(seq, "TRACING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+ seq_printf(seq, "DEBUG\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_TM
-+ seq_printf(seq, "DEBUG_TM\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_RETRY
-+ seq_printf(seq, "DEBUG_RETRY\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_OOM
-+ seq_printf(seq, "DEBUG_OOM\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG_SN
-+ seq_printf(seq, "DEBUG_SN\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_USE_EXPECTED_VALUES
-+ seq_printf(seq, "USE_EXPECTED_VALUES\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_TEST_IO_IN_SIRQ
-+ seq_printf(seq, "TEST_IO_IN_SIRQ\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_STRICT_SECURITY
-+ seq_printf(seq, "STRICT_SECURITY\n");
-+#endif
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static struct scst_proc_data scst_version_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = scst_version_info_show,
-+};
-+
-+static int scst_help_info_show(struct seq_file *seq, void *v)
-+{
-+ TRACE_ENTRY();
-+
-+ seq_printf(seq, "%s\n", scst_proc_help_string);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static struct scst_proc_data scst_help_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = scst_help_info_show,
-+};
-+
-+static int scst_dev_handler_type_info_show(struct seq_file *seq, void *v)
-+{
-+ struct scst_dev_type *dev_type = (struct scst_dev_type *)seq->private;
-+
-+ TRACE_ENTRY();
-+
-+ seq_printf(seq, "%d - %s\n", dev_type->type,
-+ dev_type->type > (int)ARRAY_SIZE(scst_proc_dev_handler_type)
-+ ? "unknown" : scst_proc_dev_handler_type[dev_type->type]);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static struct scst_proc_data scst_dev_handler_type_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = scst_dev_handler_type_info_show,
-+};
-+
-+static int scst_sessions_info_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_acg *acg;
-+ struct scst_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ seq_printf(seq, "%-20s %-45s %-35s %-15s\n",
-+ "Target name", "Initiator name",
-+ "Group name", "Active/All Commands Count");
-+
-+ list_for_each_entry(acg, &scst_acg_list, acg_list_entry) {
-+ list_for_each_entry(sess, &acg->acg_sess_list,
-+ acg_sess_list_entry) {
-+ int active_cmds = 0, t;
-+ for (t = SESS_TGT_DEV_LIST_HASH_SIZE-1; t >= 0; t--) {
-+ struct list_head *head =
-+ &sess->sess_tgt_dev_list[t];
-+ struct scst_tgt_dev *tgt_dev;
-+ list_for_each_entry(tgt_dev, head,
-+ sess_tgt_dev_list_entry) {
-+ active_cmds += atomic_read(&tgt_dev->tgt_dev_cmd_count);
-+ }
-+ }
-+ seq_printf(seq, "%-20s %-45s %-35s %d/%d\n",
-+ sess->tgt->tgtt->name,
-+ sess->initiator_name,
-+ acg->acg_name, active_cmds,
-+ atomic_read(&sess->sess_cmd_count));
-+ }
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_sessions_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = scst_sessions_info_show,
-+};
-+
-+static struct scst_proc_data scst_sgv_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = sgv_procinfo_show,
-+};
-+
-+static int scst_groups_names_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_acg *acg = (struct scst_acg *)seq->private;
-+ struct scst_acn *name;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(name, &acg->acn_list, acn_list_entry) {
-+ seq_printf(seq, "%s\n", name->name);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_groups_names_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_groups_names_write)
-+ .show = scst_groups_names_show,
-+};
-+
-+static int scst_groups_addr_method_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_acg *acg = (struct scst_acg *)seq->private;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ switch (acg->addr_method) {
-+ case SCST_LUN_ADDR_METHOD_FLAT:
-+ seq_printf(seq, "%s\n", "FLAT");
-+ break;
-+ case SCST_LUN_ADDR_METHOD_PERIPHERAL:
-+ seq_printf(seq, "%s\n", "PERIPHERAL");
-+ break;
-+ case SCST_LUN_ADDR_METHOD_LUN:
-+ seq_printf(seq, "%s\n", "LUN");
-+ break;
-+ default:
-+ seq_printf(seq, "%s\n", "UNKNOWN");
-+ break;
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+static struct scst_proc_data scst_groups_addr_method_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(NULL)
-+ .show = scst_groups_addr_method_show,
-+};
-+static int scst_groups_devices_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_acg *acg = (struct scst_acg *)seq->private;
-+ struct scst_acg_dev *acg_dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ seq_printf(seq, "%-60s%-13s%s\n", "Device (host:ch:id:lun or name)",
-+ "LUN", "Options");
-+
-+ list_for_each_entry(acg_dev, &acg->acg_dev_list, acg_dev_list_entry) {
-+ seq_printf(seq, "%-60s%-13lld%s\n",
-+ acg_dev->dev->virt_name,
-+ (long long unsigned int)acg_dev->lun,
-+ acg_dev->rd_only ? "RO" : "");
-+ }
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_groups_devices_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_groups_devices_write)
-+ .show = scst_groups_devices_show,
-+};
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+static int scst_proc_read_tbl(const struct scst_trace_log *tbl,
-+ struct seq_file *seq,
-+ unsigned long log_level, int *first)
-+{
-+ const struct scst_trace_log *t = tbl;
-+ int res = 0;
-+
-+ while (t->token) {
-+ if (log_level & t->val) {
-+ seq_printf(seq, "%s%s", *first ? "" : " | ", t->token);
-+ *first = 0;
-+ }
-+ t++;
-+ }
-+ return res;
-+}
-+
-+int scst_proc_log_entry_read(struct seq_file *seq, unsigned long log_level,
-+ const struct scst_trace_log *tbl)
-+{
-+ int res = 0, first = 1;
-+
-+ TRACE_ENTRY();
-+
-+ scst_proc_read_tbl(scst_proc_trace_tbl, seq, log_level, &first);
-+
-+ if (tbl)
-+ scst_proc_read_tbl(tbl, seq, log_level, &first);
-+
-+ seq_printf(seq, "%s\n", first ? "none" : "");
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+EXPORT_SYMBOL_GPL(scst_proc_log_entry_read);
-+
-+static int log_info_show(struct seq_file *seq, void *v)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_log_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ res = scst_proc_log_entry_read(seq, trace_flag,
-+ scst_proc_local_trace_tbl);
-+
-+ mutex_unlock(&scst_log_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_log_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write_log)
-+ .show = log_info_show,
-+ .data = "scsi_tgt",
-+};
-+
-+#endif
-+
-+static int scst_tgt_info_show(struct seq_file *seq, void *v)
-+{
-+ int res = 0;
-+ struct scst_device *dev;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ seq_printf(seq, "%-60s%s\n", "Device (host:ch:id:lun or name)",
-+ "Device handler");
-+ list_for_each_entry(dev, &scst_dev_list, dev_list_entry) {
-+ seq_printf(seq, "%-60s%s\n",
-+ dev->virt_name, dev->handler->name);
-+ }
-+
-+ mutex_unlock(&scst_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_tgt_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_gen_write)
-+ .show = scst_tgt_info_show,
-+};
-+
-+static int scst_threads_info_show(struct seq_file *seq, void *v)
-+{
-+ TRACE_ENTRY();
-+
-+ seq_printf(seq, "%d\n", scst_main_cmd_threads.nr_threads);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static struct scst_proc_data scst_threads_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_threads_write)
-+ .show = scst_threads_info_show,
-+};
-+
-+static int scst_scsi_tgtinfo_show(struct seq_file *seq, void *v)
-+{
-+ struct scst_tgt *vtt = seq->private;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ if (vtt->tgtt->read_proc)
-+ res = vtt->tgtt->read_proc(seq, vtt);
-+
-+ mutex_unlock(&scst_proc_mutex);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_scsi_tgt_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_tgt_write)
-+ .show = scst_scsi_tgtinfo_show,
-+};
-+
-+static int scst_dev_handler_info_show(struct seq_file *seq, void *v)
-+{
-+ struct scst_dev_type *dev_type = seq->private;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (mutex_lock_interruptible(&scst_proc_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ if (dev_type->read_proc)
-+ res = dev_type->read_proc(seq, dev_type);
-+
-+ mutex_unlock(&scst_proc_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct scst_proc_data scst_dev_handler_proc_data = {
-+ SCST_DEF_RW_SEQ_OP(scst_proc_scsi_dev_handler_write)
-+ .show = scst_dev_handler_info_show,
-+};
-+
-+struct proc_dir_entry *scst_create_proc_entry(struct proc_dir_entry *root,
-+ const char *name, struct scst_proc_data *pdata)
-+{
-+ struct proc_dir_entry *p = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ if (root) {
-+ mode_t mode;
-+
-+ mode = S_IFREG | S_IRUGO | (pdata->seq_op.write ? S_IWUSR : 0);
-+ p = create_proc_entry(name, mode, root);
-+ if (p == NULL) {
-+ PRINT_ERROR("Fail to create entry %s in /proc", name);
-+ } else {
-+ p->proc_fops = &pdata->seq_op;
-+ p->data = pdata->data;
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return p;
-+}
-+EXPORT_SYMBOL_GPL(scst_create_proc_entry);
-+
-+int scst_single_seq_open(struct inode *inode, struct file *file)
-+{
-+ struct scst_proc_data *pdata = container_of(PDE(inode)->proc_fops,
-+ struct scst_proc_data, seq_op);
-+ return single_open(file, pdata->show, PDE(inode)->data);
-+}
-+EXPORT_SYMBOL_GPL(scst_single_seq_open);
-+
-+struct proc_dir_entry *scst_proc_get_tgt_root(
-+ struct scst_tgt_template *vtt)
-+{
-+ return vtt->proc_tgt_root;
-+}
-+EXPORT_SYMBOL_GPL(scst_proc_get_tgt_root);
-+
-+struct proc_dir_entry *scst_proc_get_dev_type_root(
-+ struct scst_dev_type *dtt)
-+{
-+ return dtt->proc_dev_type_root;
-+}
-+EXPORT_SYMBOL_GPL(scst_proc_get_dev_type_root);
-diff -uprN orig/linux-3.2/Documentation/scst/README.scst linux-3.2/Documentation/scst/README.scst
---- orig/linux-3.2/Documentation/scst/README.scst
-+++ linux-3.2/Documentation/scst/README.scst
-@@ -0,0 +1,1535 @@
-+Generic SCSI target mid-level for Linux (SCST)
-+==============================================
-+
-+SCST is designed to provide unified, consistent interface between SCSI
-+target drivers and Linux kernel and simplify target drivers development
-+as much as possible. Detail description of SCST's features and internals
-+could be found on its Internet page http://scst.sourceforge.net.
-+
-+SCST supports the following I/O modes:
-+
-+ * Pass-through mode with one to many relationship, i.e. when multiple
-+ initiators can connect to the exported pass-through devices, for
-+ the following SCSI devices types: disks (type 0), tapes (type 1),
-+ processors (type 3), CDROMs (type 5), MO disks (type 7), medium
-+ changers (type 8) and RAID controllers (type 0xC).
-+
-+ * FILEIO mode, which allows to use files on file systems or block
-+ devices as virtual remotely available SCSI disks or CDROMs with
-+ benefits of the Linux page cache.
-+
-+ * BLOCKIO mode, which performs direct block IO with a block device,
-+ bypassing page-cache for all operations. This mode works ideally with
-+ high-end storage HBAs and for applications that either do not need
-+ caching between application and disk or need the large block
-+ throughput.
-+
-+ * "Performance" device handlers, which provide in pseudo pass-through
-+ mode a way for direct performance measurements without overhead of
-+ actual data transferring from/to underlying SCSI device.
-+
-+In addition, SCST supports advanced per-initiator access and devices
-+visibility management, so different initiators could see different set
-+of devices with different access permissions. See below for details.
-+
-+Full list of SCST features and comparison with other Linux targets you
-+can find on http://scst.sourceforge.net/comparison.html.
-+
-+
-+Installation
-+------------
-+
-+To see your devices remotely, you need to add a corresponding LUN for
-+them (see below how). By default, no local devices are seen remotely.
-+There must be LUN 0 in each LUNs set (security group), i.e. LUs
-+numeration must not start from, e.g., 1. Otherwise you will see no
-+devices on remote initiators and SCST core will write into the kernel
-+log message: "tgt_dev for LUN 0 not found, command to unexisting LU?"
-+
-+It is highly recommended to use scstadmin utility for configuring
-+devices and security groups.
-+
-+The flow of SCST inialization should be as the following:
-+
-+1. Load of SCST modules with necessary module parameters, if needed.
-+
-+2. Configure targets, devices, LUNs, etc. using either scstadmin
-+(recommended), or the sysfs interface directly as described below.
-+
-+If you experience problems during modules load or running, check your
-+kernel logs (or run dmesg command for the few most recent messages).
-+
-+IMPORTANT: Without loading appropriate device handler, corresponding devices
-+========= will be invisible for remote initiators, which could lead to holes
-+ in the LUN addressing, so automatic device scanning by remote SCSI
-+ mid-level could not notice the devices. Therefore you will have
-+ to add them manually via
-+ 'echo "- - -" >/sys/class/scsi_host/hostX/scan',
-+ where X - is the host number.
-+
-+IMPORTANT: Working of target and initiator on the same host is
-+========= supported, except the following 2 cases: swap over target exported
-+ device and using a writable mmap over a file from target
-+ exported device. The latter means you can't mount a file
-+ system over target exported device. In other words, you can
-+ freely use any sg, sd, st, etc. devices imported from target
-+ on the same host, but you can't mount file systems or put
-+ swap on them. This is a limitation of Linux memory/cache
-+ manager, because in this case a memory allocation deadlock is
-+ possible like: system needs some memory -> it decides to
-+ clear some cache -> the cache is needed to be written on a
-+ target exported device -> initiator sends request to the
-+ target located on the same system -> the target needs memory
-+ -> the system needs even more memory -> deadlock.
-+
-+IMPORTANT: In the current version simultaneous access to local SCSI devices
-+========= via standard high-level SCSI drivers (sd, st, sg, etc.) and
-+ SCST's target drivers is unsupported. Especially it is
-+ important for execution via sg and st commands that change
-+ the state of devices and their parameters, because that could
-+ lead to data corruption. If any such command is done, at
-+ least related device handler(s) must be restarted. For block
-+ devices READ/WRITE commands using direct disk handler are
-+ generally safe.
-+
-+
-+Usage in failover mode
-+----------------------
-+
-+It is recommended to use TEST UNIT READY ("tur") command to check if
-+SCST target is alive in MPIO configurations.
-+
-+
-+Device handlers
-+---------------
-+
-+Device specific drivers (device handlers) are plugins for SCST, which
-+help SCST to analyze incoming requests and determine parameters,
-+specific to various types of devices. If an appropriate device handler
-+for a SCSI device type isn't loaded, SCST doesn't know how to handle
-+devices of this type, so they will be invisible for remote initiators
-+(more precisely, "LUN not supported" sense code will be returned).
-+
-+In addition to device handlers for real devices, there are VDISK, user
-+space and "performance" device handlers.
-+
-+VDISK device handler works over files on file systems and makes from
-+them virtual remotely available SCSI disks or CDROM's. In addition, it
-+allows to work directly over a block device, e.g. local IDE or SCSI disk
-+or ever disk partition, where there is no file systems overhead. Using
-+block devices comparing to sending SCSI commands directly to SCSI
-+mid-level via scsi_do_req()/scsi_execute_async() has advantage that data
-+are transferred via system cache, so it is possible to fully benefit
-+from caching and read ahead performed by Linux's VM subsystem. The only
-+disadvantage here that in the FILEIO mode there is superfluous data
-+copying between the cache and SCST's buffers. This issue is going to be
-+addressed in one of the future releases. Virtual CDROM's are useful for
-+remote installation. See below for details how to setup and use VDISK
-+device handler.
-+
-+"Performance" device handlers for disks, MO disks and tapes in their
-+exec() method skip (pretend to execute) all READ and WRITE operations
-+and thus provide a way for direct link performance measurements without
-+overhead of actual data transferring from/to underlying SCSI device.
-+
-+NOTE: Since "perf" device handlers on READ operations don't touch the
-+==== commands' data buffer, it is returned to remote initiators as it
-+ was allocated, without even being zeroed. Thus, "perf" device
-+ handlers impose some security risk, so use them with caution.
-+
-+
-+Compilation options
-+-------------------
-+
-+There are the following compilation options, that could be change using
-+your favorite kernel configuration Makefile target, e.g. "make xconfig":
-+
-+ - CONFIG_SCST_DEBUG - if defined, turns on some debugging code,
-+ including some logging. Makes the driver considerably bigger and slower,
-+ producing large amount of log data.
-+
-+ - CONFIG_SCST_TRACING - if defined, turns on ability to log events. Makes the
-+ driver considerably bigger and leads to some performance loss.
-+
-+ - CONFIG_SCST_EXTRACHECKS - if defined, adds extra validity checks in
-+ the various places.
-+
-+ - CONFIG_SCST_USE_EXPECTED_VALUES - if not defined (default), initiator
-+ supplied expected data transfer length and direction will be used
-+ only for verification purposes to return error or warn in case if one
-+ of them is invalid. Instead, locally decoded from SCSI command values
-+ will be used. This is necessary for security reasons, because
-+ otherwise a faulty initiator can crash target by supplying invalid
-+ value in one of those parameters. This is especially important in
-+ case of pass-through mode. If CONFIG_SCST_USE_EXPECTED_VALUES is
-+ defined, initiator supplied expected data transfer length and
-+ direction will override the locally decoded values. This might be
-+ necessary if internal SCST commands translation table doesn't contain
-+ SCSI command, which is used in your environment. You can know that if
-+ you enable "minor" trace level and have messages like "Unknown
-+ opcode XX for YY. Should you update scst_scsi_op_table?" in your
-+ kernel log and your initiator returns an error. Also report those
-+ messages in the SCST mailing list scst-devel@lists.sourceforge.net.
-+ Note, that not all SCSI transports support supplying expected values.
-+ You should try to enable this option if you have a not working with
-+ SCST pass-through device, for instance, an SATA CDROM.
-+
-+ - CONFIG_SCST_DEBUG_TM - if defined, turns on task management functions
-+ debugging, when on LUN 6 some of the commands will be delayed for
-+ about 60 sec., so making the remote initiator send TM functions, eg
-+ ABORT TASK and TARGET RESET. Also define
-+ CONFIG_SCST_TM_DBG_GO_OFFLINE symbol in the Makefile if you want that
-+ the device eventually become completely unresponsive, or otherwise to
-+ circle around ABORTs and RESETs code. Needs CONFIG_SCST_DEBUG turned
-+ on.
-+
-+ - CONFIG_SCST_STRICT_SERIALIZING - if defined, makes SCST send all commands to
-+ underlying SCSI device synchronously, one after one. This makes task
-+ management more reliable, with cost of some performance penalty. This
-+ is mostly actual for stateful SCSI devices like tapes, where the
-+ result of command's execution depends from device's settings defined
-+ by previous commands. Disk and RAID devices are stateless in the most
-+ cases. The current SCSI core in Linux doesn't allow to abort all
-+ commands reliably if they sent asynchronously to a stateful device.
-+ Turned off by default, turn it on if you use stateful device(s) and
-+ need as much error recovery reliability as possible. As a side effect
-+ of CONFIG_SCST_STRICT_SERIALIZING, on kernels below 2.6.30 no kernel
-+ patching is necessary for pass-through device handlers (scst_disk,
-+ etc.).
-+
-+ - CONFIG_SCST_TEST_IO_IN_SIRQ - if defined, allows SCST to submit selected
-+ SCSI commands (TUR and READ/WRITE) from soft-IRQ context (tasklets).
-+ Enabling it will decrease amount of context switches and slightly
-+ improve performance. The goal of this option is to be able to measure
-+ overhead of the context switches. If after enabling this option you
-+ don't see under load in vmstat output on the target significant
-+ decrease of amount of context switches, then your target driver
-+ doesn't submit commands to SCST in IRQ context. For instance,
-+ iSCSI-SCST doesn't do that, but qla2x00t with
-+ CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD disabled - does. This option is
-+ designed to be used with vdisk NULLIO backend.
-+
-+ WARNING! Using this option enabled with other backend than vdisk
-+ NULLIO is unsafe and can lead you to a kernel crash!
-+
-+ - CONFIG_SCST_STRICT_SECURITY - if defined, makes SCST zero allocated data
-+ buffers. Undefining it (default) considerably improves performance
-+ and eases CPU load, but could create a security hole (information
-+ leakage), so enable it, if you have strict security requirements.
-+
-+ - CONFIG_SCST_ABORT_CONSIDER_FINISHED_TASKS_AS_NOT_EXISTING - if defined,
-+ in case when TASK MANAGEMENT function ABORT TASK is trying to abort a
-+ command, which has already finished, remote initiator, which sent the
-+ ABORT TASK request, will receive TASK NOT EXIST (or ABORT FAILED)
-+ response for the ABORT TASK request. This is more logical response,
-+ since, because the command finished, attempt to abort it failed, but
-+ some initiators, particularly VMware iSCSI initiator, consider TASK
-+ NOT EXIST response as if the target got crazy and try to RESET it.
-+ Then sometimes get crazy itself. So, this option is disabled by
-+ default.
-+
-+ - CONFIG_SCST_MEASURE_LATENCY - if defined, provides in "latency" files
-+ global and per-LUN average commands processing latency statistic. You
-+ can clear already measured results by writing 0 in each file. Note,
-+ you need a non-preemptible kernel to have correct results.
-+
-+HIGHMEM kernel configurations are fully supported, but not recommended
-+for performance reasons.
-+
-+
-+Module parameters
-+-----------------
-+
-+Module scst supports the following parameters:
-+
-+ - scst_threads - allows to set count of SCST's threads. By default it
-+ is CPU count.
-+
-+ - scst_max_cmd_mem - sets maximum amount of memory in MB allowed to be
-+ consumed by the SCST commands for data buffers at any given time. By
-+ default it is approximately TotalMem/4.
-+
-+
-+SCST sysfs interface
-+--------------------
-+
-+SCST sysfs interface designed to be self descriptive and self
-+containing. This means that a high level managament tool for it can be
-+written once and automatically support any future sysfs interface
-+changes (attributes additions or removals, new target drivers and dev
-+handlers, etc.) without any modifications. Scstadmin is an example of
-+such management tool.
-+
-+To implement that an management tool should not be implemented around
-+drivers and their attributes, but around common rules those drivers and
-+attributes follow. You can find those rules in SysfsRules file. For
-+instance, each SCST sysfs file (attribute) can contain in the last line
-+mark "[key]". It is automatically added to allow scstadmin and other
-+management tools to see which attributes it should save in the config
-+file. If you are doing manual attributes manipulations, you can ignore
-+this mark.
-+
-+Root of SCST sysfs interface is /sys/kernel/scst_tgt. It has the
-+following entries:
-+
-+ - devices - this is a root subdirectory for all SCST devices
-+
-+ - handlers - this is a root subdirectory for all SCST dev handlers
-+
-+ - max_tasklet_cmd - specifies how many commands at max can be queued in
-+ the SCST core simultaneously on a single CPU from all connected
-+ initiators to allow processing commands on this CPU in soft-IRQ
-+ context in tasklets. If the count of the commands exceeds this value,
-+ then all of them will be processed only in SCST threads. This is to
-+ to prevent possible under heavy load starvation of processes on the
-+ CPUs serving soft IRQs and in some cases to improve performance by
-+ more evenly spreading load over available CPUs.
-+
-+ - sgv - this is a root subdirectory for all SCST SGV caches
-+
-+ - targets - this is a root subdirectory for all SCST targets
-+
-+ - setup_id - allows to read and write SCST setup ID. This ID can be
-+ used in cases, when the same SCST configuration should be installed
-+ on several targets, but exported from those targets devices should
-+ have different IDs and SNs. For instance, VDISK dev handler uses this
-+ ID to generate T10 vendor specific identifier and SN of the devices.
-+
-+ - threads - allows to read and set number of global SCST I/O threads.
-+ Those threads used with async. dev handlers, for instance, vdisk
-+ BLOCKIO or NULLIO.
-+
-+ - trace_level - allows to enable and disable various tracing
-+ facilities. See content of this file for help how to use it. See also
-+ section "Dealing with massive logs" for more info how to make correct
-+ logs when you enabled trace levels producing a lot of logs data.
-+
-+ - version - read-only attribute, which allows to see version of
-+ SCST and enabled optional features.
-+
-+ - last_sysfs_mgmt_res - read-only attribute returning completion status
-+ of the last management command. In the sysfs implementation there are
-+ some problems between internal sysfs and internal SCST locking. To
-+ avoid them in some cases sysfs calls can return error with errno
-+ EAGAIN. This doesn't mean the operation failed. It only means that
-+ the operation queued and not yet completed. To wait for it to
-+ complete, an management tool should poll this file. If the operation
-+ hasn't yet completed, it will also return EAGAIN. But after it's
-+ completed, it will return the result of this operation (0 for success
-+ or -errno for error).
-+
-+"Devices" subdirectory contains subdirectories for each SCST devices.
-+
-+Content of each device's subdirectory is dev handler specific. See
-+documentation for your dev handlers for more info about it as well as
-+SysfsRules file for more info about common to all dev handlers rules.
-+SCST dev handlers can have the following common entries:
-+
-+ - exported - subdirectory containing links to all LUNs where this
-+ device was exported.
-+
-+ - handler - if dev handler determined for this device, this link points
-+ to it. The handler can be not set for pass-through devices.
-+
-+ - threads_num - shows and allows to set number of threads in this device's
-+ threads pool. If 0 - no threads will be created, and global SCST
-+ threads pool will be used. If <0 - creation of the threads pool is
-+ prohibited.
-+
-+ - threads_pool_type - shows and allows to sets threads pool type.
-+ Possible values: "per_initiator" and "shared". When the value is
-+ "per_initiator" (default), each session from each initiator will use
-+ separate dedicated pool of threads. When the value is "shared", all
-+ sessions from all initiators will share the same per-device pool of
-+ threads. Valid only if threads_num attribute >0.
-+
-+ - dump_prs - allows to dump persistent reservations information in the
-+ kernel log.
-+
-+ - type - SCSI type of this device
-+
-+See below for more information about other entries of this subdirectory
-+of the standard SCST dev handlers.
-+
-+"Handlers" subdirectory contains subdirectories for each SCST dev
-+handler.
-+
-+Content of each handler's subdirectory is dev handler specific. See
-+documentation for your dev handlers for more info about it as well as
-+SysfsRules file for more info about common to all dev handlers rules.
-+SCST dev handlers can have the following common entries:
-+
-+ - mgmt - this entry allows to create virtual devices and their
-+ attributes (for virtual devices dev handlers) or assign/unassign real
-+ SCSI devices to/from this dev handler (for pass-through dev
-+ handlers).
-+
-+ - trace_level - allows to enable and disable various tracing
-+ facilities. See content of this file for help how to use it. See also
-+ section "Dealing with massive logs" for more info how to make correct
-+ logs when you enabled trace levels producing a lot of logs data.
-+
-+ - type - SCSI type of devices served by this dev handler.
-+
-+See below for more information about other entries of this subdirectory
-+of the standard SCST dev handlers.
-+
-+"Sgv" subdirectory contains statistic information of SCST SGV caches. It
-+has the following entries:
-+
-+ - None, one or more subdirectories for each existing SGV cache.
-+
-+ - global_stats - file containing global SGV caches statistics.
-+
-+Each SGV cache's subdirectory has the following item:
-+
-+ - stats - file containing statistics for this SGV caches.
-+
-+"Targets" subdirectory contains subdirectories for each SCST target.
-+
-+Content of each target's subdirectory is target specific. See
-+documentation for your target for more info about it as well as
-+SysfsRules file for more info about common to all targets rules.
-+Every target should have at least the following entries:
-+
-+ - ini_groups - subdirectory, which contains and allows to define
-+ initiator-oriented access control information, see below.
-+
-+ - luns - subdirectory, which contains list of available LUNs in the
-+ target-oriented access control and allows to define it, see below.
-+
-+ - sessions - subdirectory containing connected to this target sessions.
-+
-+ - comment - this attribute can be used to store any human readable info
-+ to help identify target. For instance, to help identify the target's
-+ mapping to the corresponding hardware port. It isn't anyhow used by
-+ SCST.
-+
-+ - enabled - using this attribute you can enable or disable this target/
-+ It allows to finish configuring it before it starts accepting new
-+ connections. 0 by default.
-+
-+ - addr_method - used LUNs addressing method. Possible values:
-+ "Peripheral" and "Flat". Most initiators work well with Peripheral
-+ addressing method (default), but some (HP-UX, for instance) may
-+ require Flat method. This attribute is also available in the
-+ initiators security groups, so you can assign the addressing method
-+ on per-initiator basis.
-+
-+ - cpu_mask - defines CPU affinity mask for threads serving this target.
-+ For threads serving LUNs it is used only for devices with
-+ threads_pool_type "per_initiator".
-+
-+ - io_grouping_type - defines how I/O from sessions to this target are
-+ grouped together. This I/O grouping is very important for
-+ performance. By setting this attribute in a right value, you can
-+ considerably increase performance of your setup. This grouping is
-+ performed only if you use CFQ I/O scheduler on the target and for
-+ devices with threads_num >= 0 and, if threads_num > 0, with
-+ threads_pool_type "per_initiator". Possible values:
-+ "this_group_only", "never", "auto", or I/O group number >0. When the
-+ value is "this_group_only" all I/O from all sessions in this target
-+ will be grouped together. When the value is "never", I/O from
-+ different sessions will not be grouped together, i.e. all sessions in
-+ this target will have separate dedicated I/O groups. When the value
-+ is "auto" (default), all I/O from initiators with the same name
-+ (iSCSI initiator name, for instance) in all targets will be grouped
-+ together with a separate dedicated I/O group for each initiator name.
-+ For iSCSI this mode works well, but other transports usually use
-+ different initiator names for different sessions, so using such
-+ transports in MPIO configurations you should either use value
-+ "this_group_only", or an explicit I/O group number. This attribute is
-+ also available in the initiators security groups, so you can assign
-+ the I/O grouping on per-initiator basis. See below for more info how
-+ to use this attribute.
-+
-+ - rel_tgt_id - allows to read or write SCSI Relative Target Port
-+ Identifier attribute. This identifier is used to identify SCSI Target
-+ Ports by some SCSI commands, mainly by Persistent Reservations
-+ commands. This identifier must be unique among all SCST targets, but
-+ for convenience SCST allows disabled targets to have not unique
-+ rel_tgt_id. In this case SCST will not allow to enable this target
-+ until rel_tgt_id becomes unique. This attribute initialized unique by
-+ SCST by default.
-+
-+A target driver may have also the following entries:
-+
-+ - "hw_target" - if the target driver supports both hardware and virtual
-+ targets (for instance, an FC adapter supporting NPIV, which has
-+ hardware targets for its physical ports as well as virtual NPIV
-+ targets), this read only attribute for all hardware targets will
-+ exist and contain value 1.
-+
-+Subdirectory "sessions" contains one subdirectory for each connected
-+session with name equal to name of the connected initiator.
-+
-+Each session subdirectory contains the following entries:
-+
-+ - initiator_name - contains initiator name
-+
-+ - force_close - optional write-only attribute, which allows to force
-+ close this session.
-+
-+ - active_commands - contains number of active, i.e. not yet or being
-+ executed, SCSI commands in this session.
-+
-+ - commands - contains overall number of SCSI commands in this session.
-+
-+ - latency - if CONFIG_SCST_MEASURE_LATENCY enabled, contains latency
-+ statistics for this session.
-+
-+ - luns - a link pointing out to the corresponding LUNs set (security
-+ group) where this session was attached to.
-+
-+ - One or more "lunX" subdirectories, where 'X' is a number, for each LUN
-+ this session has (see below).
-+
-+ - other target driver specific attributes and subdirectories.
-+
-+See below description of the VDISK's sysfs interface for samples.
-+
-+
-+Access and devices visibility management (LUN masking)
-+------------------------------------------------------
-+
-+Access and devices visibility management allows for an initiator or
-+group of initiators to see different devices with different LUNs
-+with necessary access permissions.
-+
-+SCST supports two modes of access control:
-+
-+1. Target-oriented. In this mode you define for each target a default
-+set of LUNs, which are accessible to all initiators, connected to that
-+target. This is a regular access control mode, which people usually mean
-+thinking about access control in general. For instance, in IET this is
-+the only supported mode.
-+
-+2. Initiator-oriented. In this mode you define which LUNs are accessible
-+for each initiator. In this mode you should create for each set of one
-+or more initiators, which should access to the same set of devices with
-+the same LUNs, a separate security group, then add to it devices and
-+names of allowed initiator(s).
-+
-+Both modes can be used simultaneously. In this case the
-+initiator-oriented mode has higher priority, than the target-oriented,
-+i.e. initiators are at first searched in all defined security groups for
-+this target and, if none matches, the default target's set of LUNs is
-+used. This set of LUNs might be empty, then the initiator will not see
-+any LUNs from the target.
-+
-+You can at any time find out which set of LUNs each session is assigned
-+to by looking where link
-+/sys/kernel/scst_tgt/targets/target_driver/target_name/sessions/initiator_name/luns
-+points to.
-+
-+To configure the target-oriented access control SCST provides the
-+following interface. Each target's sysfs subdirectory
-+(/sys/kernel/scst_tgt/targets/target_driver/target_name) has "luns"
-+subdirectory. This subdirectory contains the list of already defined
-+target-oriented access control LUNs for this target as well as file
-+"mgmt". This file has the following commands, which you can send to it,
-+for instance, using "echo" shell command. You can always get a small
-+help about supported commands by looking inside this file. "Parameters"
-+are one or more param_name=value pairs separated by ';'.
-+
-+ - "add H:C:I:L lun [parameters]" - adds a pass-through device with
-+ host:channel:id:lun with LUN "lun". Optionally, the device could be
-+ marked as read only by using parameter "read_only". The recommended
-+ way to find out H:C:I:L numbers is use of lsscsi utility.
-+
-+ - "replace H:C:I:L lun [parameters]" - replaces by pass-through device
-+ with host:channel:id:lun existing with LUN "lun" device with
-+ generation of INQUIRY DATA HAS CHANGED Unit Attention. If the old
-+ device doesn't exist, this command acts as the "add" command.
-+ Optionally, the device could be marked as read only by using
-+ parameter "read_only". The recommended way to find out H:C:I:L
-+ numbers is use of lsscsi utility.
-+
-+ - "add VNAME lun [parameters]" - adds a virtual device with name VNAME
-+ with LUN "lun". Optionally, the device could be marked as read only
-+ by using parameter "read_only".
-+
-+ - "replace VNAME lun [parameters]" - replaces by virtual device
-+ with name VNAME existing with LUN "lun" device with generation of
-+ INQUIRY DATA HAS CHANGED Unit Attention. If the old device doesn't
-+ exist, this command acts as the "add" command. Optionally, the device
-+ could be marked as read only by using parameter "read_only".
-+
-+ - "del lun" - deletes LUN lun
-+
-+ - "clear" - clears the list of devices
-+
-+To configure the initiator-oriented access control SCST provides the
-+following interface. Each target's sysfs subdirectory
-+(/sys/kernel/scst_tgt/targets/target_driver/target_name) has "ini_groups"
-+subdirectory. This subdirectory contains the list of already defined
-+security groups for this target as well as file "mgmt". This file has
-+the following commands, which you can send to it, for instance, using
-+"echo" shell command. You can always get a small help about supported
-+commands by looking inside this file.
-+
-+ - "create GROUP_NAME" - creates a new security group.
-+
-+ - "del GROUP_NAME" - deletes a new security group.
-+
-+Each security group's subdirectory contains 2 subdirectories: initiators
-+and luns as well as the following attributes: addr_method, cpu_mask and
-+io_grouping_type. See above description of them.
-+
-+Each "initiators" subdirectory contains list of added to this groups
-+initiator as well as as well as file "mgmt". This file has the following
-+commands, which you can send to it, for instance, using "echo" shell
-+command. You can always get a small help about supported commands by
-+looking inside this file.
-+
-+ - "add INITIATOR_NAME" - adds initiator with name INITIATOR_NAME to the
-+ group.
-+
-+ - "del INITIATOR_NAME" - deletes initiator with name INITIATOR_NAME
-+ from the group.
-+
-+ - "move INITIATOR_NAME DEST_GROUP_NAME" moves initiator with name
-+ INITIATOR_NAME from the current group to group with name
-+ DEST_GROUP_NAME.
-+
-+ - "clear" - deletes all initiators from this group.
-+
-+For "add" and "del" commands INITIATOR_NAME can be a simple DOS-type
-+patterns, containing '*' and '?' symbols. '*' means match all any
-+symbols, '?' means match only any single symbol. For instance,
-+"blah.xxx" will match "bl?h.*". Additionally, you can use negative sign
-+'!' to revert the value of the pattern. For instance, "ah.xxx" will
-+match "!bl?h.*".
-+
-+Each "luns" subdirectory contains the list of already defined LUNs for
-+this group as well as file "mgmt". Content of this file as well as list
-+of available in it commands is fully identical to the "luns"
-+subdirectory of the target-oriented access control.
-+
-+Examples:
-+
-+ - echo "create INI" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/mgmt -
-+ creates security group INI for target iqn.2006-10.net.vlnb:tgt1.
-+
-+ - echo "add 2:0:1:0 11" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/INI/luns/mgmt -
-+ adds a pass-through device sitting on host 2, channel 0, ID 1, LUN 0
-+ to group with name INI as LUN 11.
-+
-+ - echo "add disk1 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/INI/luns/mgmt -
-+ adds a virtual disk with name disk1 to group with name INI as LUN 0.
-+
-+ - echo "add 21:*:e0:?b:83:*" >/sys/kernel/scst_tgt/targets/21:00:00:a0:8c:54:52:12/ini_groups/INI/initiators/mgmt -
-+ adds a pattern to group with name INI to Fibre Channel target with
-+ WWN 21:00:00:a0:8c:54:52:12, which matches WWNs of Fibre Channel
-+ initiator ports.
-+
-+Consider you need to have an iSCSI target with name
-+"iqn.2007-05.com.example:storage.disk1.sys1.xyz", which should export
-+virtual device "dev1" with LUN 0 and virtual device "dev2" with LUN 1,
-+but initiator with name
-+"iqn.2007-05.com.example:storage.disk1.spec_ini.xyz" should see only
-+virtual device "dev2" read only with LUN 0. To achieve that you should
-+do the following commands:
-+
-+# echo "iqn.2007-05.com.example:storage.disk1.sys1.xyz" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+# echo "add dev1 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2007-05.com.example:storage.disk1.sys1.xyz/luns/mgmt
-+# echo "add dev2 1" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2007-05.com.example:storage.disk1.sys1.xyz/luns/mgmt
-+# echo "create SPEC_INI" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2007-05.com.example:storage.disk1.sys1.xyz/ini_groups/mgmt
-+# echo "add dev2 0 read_only=1" \
-+ >/sys/kernel/scst_tgt/targets/iscsi/iqn.2007-05.com.example:storage.disk1.sys1.xyz/ini_groups/SPEC_INI/luns/mgmt
-+# echo "iqn.2007-05.com.example:storage.disk1.spec_ini.xyz" \
-+ >/sys/kernel/scst_tgt/targets/iscsi/iqn.2007-05.com.example:storage.disk1.sys1.xyz/ini_groups/SPEC_INI/initiators/mgmt
-+
-+For Fibre Channel or SAS in the above example you should use target's
-+and initiator ports WWNs instead of iSCSI names.
-+
-+It is highly recommended to use scstadmin utility instead of described
-+in this section low level interface.
-+
-+IMPORTANT
-+=========
-+
-+There must be LUN 0 in each set of LUNs, i.e. LUs numeration must not
-+start from, e.g., 1. Otherwise you will see no devices on remote
-+initiators and SCST core will write into the kernel log message: "tgt_dev
-+for LUN 0 not found, command to unexisting LU?"
-+
-+IMPORTANT
-+=========
-+
-+All the access control must be fully configured BEFORE the corresponding
-+target is enabled. When you enable a target, it will immediately start
-+accepting new connections, hence creating new sessions, and those new
-+sessions will be assigned to security groups according to the
-+*currently* configured access control settings. For instance, to
-+the default target's set of LUNs, instead of "HOST004" group as you may
-+need, because "HOST004" doesn't exist yet. So, you must configure all
-+the security groups before new connections from the initiators are
-+created, i.e. before the target enabled.
-+
-+
-+VDISK device handler
-+--------------------
-+
-+VDISK has 4 built-in dev handlers: vdisk_fileio, vdisk_blockio,
-+vdisk_nullio and vcdrom. Roots of their sysfs interface are
-+/sys/kernel/scst_tgt/handlers/handler_name, e.g. for vdisk_fileio:
-+/sys/kernel/scst_tgt/handlers/vdisk_fileio. Each root has the following
-+entries:
-+
-+ - None, one or more links to devices with name equal to names
-+ of the corresponding devices.
-+
-+ - trace_level - allows to enable and disable various tracing
-+ facilities. See content of this file for help how to use it. See also
-+ section "Dealing with massive logs" for more info how to make correct
-+ logs when you enabled trace levels producing a lot of logs data.
-+
-+ - mgmt - main management entry, which allows to add/delete VDISK
-+ devices with the corresponding type.
-+
-+The "mgmt" file has the following commands, which you can send to it,
-+for instance, using "echo" shell command. You can always get a small
-+help about supported commands by looking inside this file. "Parameters"
-+are one or more param_name=value pairs separated by ';'.
-+
-+ - echo "add_device device_name [parameters]" - adds a virtual device
-+ with name device_name and specified parameters (see below)
-+
-+ - echo "del_device device_name" - deletes a virtual device with name
-+ device_name.
-+
-+Handler vdisk_fileio provides FILEIO mode to create virtual devices.
-+This mode uses as backend files and accesses to them using regular
-+read()/write() file calls. This allows to use full power of Linux page
-+cache. The following parameters possible for vdisk_fileio:
-+
-+ - filename - specifies path and file name of the backend file. The path
-+ must be absolute.
-+
-+ - blocksize - specifies block size used by this virtual device. The
-+ block size must be power of 2 and >= 512 bytes. Default is 512.
-+
-+ - write_through - disables write back caching. Note, this option
-+ has sense only if you also *manually* disable write-back cache in
-+ *all* your backstorage devices and make sure it's actually disabled,
-+ since many devices are known to lie about this mode to get better
-+ benchmark results. Default is 0.
-+
-+ - read_only - read only. Default is 0.
-+
-+ - o_direct - disables both read and write caching. This mode isn't
-+ currently fully implemented, you should use user space fileio_tgt
-+ program in O_DIRECT mode instead (see below).
-+
-+ - nv_cache - enables "non-volatile cache" mode. In this mode it is
-+ assumed that the target has a GOOD UPS with ability to cleanly
-+ shutdown target in case of power failure and it is software/hardware
-+ bugs free, i.e. all data from the target's cache are guaranteed
-+ sooner or later to go to the media. Hence all data synchronization
-+ with media operations, like SYNCHRONIZE_CACHE, are ignored in order
-+ to bring more performance. Also in this mode target reports to
-+ initiators that the corresponding device has write-through cache to
-+ disable all write-back cache workarounds used by initiators. Use with
-+ extreme caution, since in this mode after a crash of the target
-+ journaled file systems don't guarantee the consistency after journal
-+ recovery, therefore manual fsck MUST be ran. Note, that since usually
-+ the journal barrier protection (see "IMPORTANT" note below) turned
-+ off, enabling NV_CACHE could change nothing from data protection
-+ point of view, since no data synchronization with media operations
-+ will go from the initiator. This option overrides "write_through"
-+ option. Disabled by default.
-+
-+ - thin_provisioned - enables thin provisioning facility, when remote
-+ initiators can unmap blocks of storage, if they don't need them
-+ anymore. Backend storage also must support this facility.
-+
-+ - removable - with this flag set the device is reported to remote
-+ initiators as removable.
-+
-+Handler vdisk_blockio provides BLOCKIO mode to create virtual devices.
-+This mode performs direct block I/O with a block device, bypassing the
-+page cache for all operations. This mode works ideally with high-end
-+storage HBAs and for applications that either do not need caching
-+between application and disk or need the large block throughput. See
-+below for more info.
-+
-+The following parameters possible for vdisk_blockio: filename,
-+blocksize, nv_cache, read_only, removable, thin_provisioned. See
-+vdisk_fileio above for description of those parameters.
-+
-+Handler vdisk_nullio provides NULLIO mode to create virtual devices. In
-+this mode no real I/O is done, but success returned to initiators.
-+Intended to be used for performance measurements at the same way as
-+"*_perf" handlers. The following parameters possible for vdisk_nullio:
-+blocksize, read_only, removable. See vdisk_fileio above for description
-+of those parameters.
-+
-+Handler vcdrom allows emulation of a virtual CDROM device using an ISO
-+file as backend. It doesn't have any parameters.
-+
-+For example:
-+
-+echo "add_device disk1 filename=/disk1; blocksize=4096; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
-+
-+will create a FILEIO virtual device disk1 with backend file /disk1
-+with block size 4K and NV_CACHE enabled.
-+
-+Each vdisk_fileio's device has the following attributes in
-+/sys/kernel/scst_tgt/devices/device_name:
-+
-+ - filename - contains path and file name of the backend file.
-+
-+ - blocksize - contains block size used by this virtual device.
-+
-+ - write_through - contains status of write back caching of this virtual
-+ device.
-+
-+ - read_only - contains read only status of this virtual device.
-+
-+ - o_direct - contains O_DIRECT status of this virtual device.
-+
-+ - nv_cache - contains NV_CACHE status of this virtual device.
-+
-+ - thin_provisioned - contains thin provisioning status of this virtual
-+ device
-+
-+ - removable - contains removable status of this virtual device.
-+
-+ - size_mb - contains size of this virtual device in MB.
-+
-+ - t10_dev_id - contains and allows to set T10 vendor specific
-+ identifier for Device Identification VPD page (0x83) of INQUIRY data.
-+ By default VDISK handler always generates t10_dev_id for every new
-+ created device at creation time based on the device name and
-+ scst_vdisk_ID scst_vdisk.ko module parameter (see below).
-+
-+ - usn - contains the virtual device's serial number of INQUIRY data. It
-+ is created at the device creation time based on the device name and
-+ scst_vdisk_ID scst_vdisk.ko module parameter (see below).
-+
-+ - type - contains SCSI type of this virtual device.
-+
-+ - resync_size - write only attribute, which makes vdisk_fileio to
-+ rescan size of the backend file. It is useful if you changed it, for
-+ instance, if you resized it.
-+
-+For example:
-+
-+/sys/kernel/scst_tgt/devices/disk1
-+|-- blocksize
-+|-- exported
-+| |-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/0
-+| |-- export1 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/INI/luns/0
-+| |-- export2 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/luns/0
-+| |-- export3 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/INI1/luns/0
-+| |-- export4 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/INI2/luns/0
-+|-- filename
-+|-- handler -> ../../handlers/vdisk_fileio
-+|-- nv_cache
-+|-- o_direct
-+|-- read_only
-+|-- removable
-+|-- resync_size
-+|-- size_mb
-+|-- t10_dev_id
-+|-- thin_provisioned
-+|-- threads_num
-+|-- threads_pool_type
-+|-- type
-+|-- usn
-+`-- write_through
-+
-+Each vdisk_blockio's device has the following attributes in
-+/sys/kernel/scst_tgt/devices/device_name: blocksize, filename, nv_cache,
-+read_only, removable, resync_size, size_mb, t10_dev_id,
-+thin_provisioned, threads_num, threads_pool_type, type, usn. See above
-+description of those parameters.
-+
-+Each vdisk_nullio's device has the following attributes in
-+/sys/kernel/scst_tgt/devices/device_name: blocksize, read_only,
-+removable, size_mb, t10_dev_id, threads_num, threads_pool_type, type,
-+usn. See above description of those parameters.
-+
-+Each vcdrom's device has the following attributes in
-+/sys/kernel/scst_tgt/devices/device_name: filename, size_mb,
-+t10_dev_id, threads_num, threads_pool_type, type, usn. See above
-+description of those parameters. Exception is filename attribute. For
-+vcdrom it is writable. Writing to it allows to virtually insert or
-+change virtual CD media in the virtual CDROM device. For example:
-+
-+ - echo "/image.iso" >/sys/kernel/scst_tgt/devices/cdrom/filename - will
-+ insert file /image.iso as virtual media to the virtual CDROM cdrom.
-+
-+ - echo "" >/sys/kernel/scst_tgt/devices/cdrom/filename - will remove
-+ "media" from the virtual CDROM cdrom.
-+
-+Additionally VDISK handler has module parameter "num_threads", which
-+specifies count of I/O threads for each FILEIO VDISK's or VCDROM device.
-+If you have a workload, which tends to produce rather random accesses
-+(e.g. DB-like), you should increase this count to a bigger value, like
-+32. If you have a rather sequential workload, you should decrease it to
-+a lower value, like number of CPUs on the target or even 1. Due to some
-+limitations of Linux I/O subsystem, increasing number of I/O threads too
-+much leads to sequential performance drop, especially with deadline
-+scheduler, so decreasing it can improve sequential performance. The
-+default provides a good compromise between random and sequential
-+accesses.
-+
-+You shouldn't be afraid to have too many VDISK I/O threads if you have
-+many VDISK devices. Kernel threads consume very little amount of
-+resources (several KBs) and only necessary threads will be used by SCST,
-+so the threads will not trash your system.
-+
-+CAUTION: If you partitioned/formatted your device with block size X, *NEVER*
-+======== ever try to export and then mount it (even accidentally) with another
-+ block size. Otherwise you can *instantly* damage it pretty
-+ badly as well as all your data on it. Messages on initiator
-+ like: "attempt to access beyond end of device" is the sign of
-+ such damage.
-+
-+ Moreover, if you want to compare how well different block sizes
-+ work for you, you **MUST** EVERY TIME AFTER CHANGING BLOCK SIZE
-+ **COMPLETELY** **WIPE OFF** ALL THE DATA FROM THE DEVICE. In
-+ other words, THE **WHOLE** DEVICE **MUST** HAVE ONLY **ZEROS**
-+ AS THE DATA AFTER YOU SWITCH TO NEW BLOCK SIZE. Switching block
-+ sizes isn't like switching between FILEIO and BLOCKIO, after
-+ changing block size all previously written with another block
-+ size data MUST BE ERASED. Otherwise you will have a full set of
-+ very weird behaviors, because blocks addressing will be
-+ changed, but initiators in most cases will not have a
-+ possibility to detect that old addresses written on the device
-+ in, e.g., partition table, don't refer anymore to what they are
-+ intended to refer.
-+
-+IMPORTANT: Some disk and partition table management utilities don't support
-+========= block sizes >512 bytes, therefore make sure that your favorite one
-+ supports it. Currently only cfdisk is known to work only with
-+ 512 bytes blocks, other utilities like fdisk on Linux or
-+ standard disk manager on Windows are proved to work well with
-+ non-512 bytes blocks. Note, if you export a disk file or
-+ device with some block size, different from one, with which
-+ it was already partitioned, you could get various weird
-+ things like utilities hang up or other unexpected behavior.
-+ Hence, to be sure, zero the exported file or device before
-+ the first access to it from the remote initiator with another
-+ block size. On Window initiator make sure you "Set Signature"
-+ in the disk manager on the imported from the target drive
-+ before doing any other partitioning on it. After you
-+ successfully mounted a file system over non-512 bytes block
-+ size device, the block size stops matter, any program will
-+ work with files on such file system.
-+
-+
-+Dealing with massive logs
-+-------------------------
-+
-+If you want to enable using "trace_level" file logging levels, which
-+produce a lot of events, like "debug", to not loose logged events you
-+should also:
-+
-+ * Increase in .config of your kernel CONFIG_LOG_BUF_SHIFT variable
-+ to much bigger value, then recompile it. For example, value 25 will
-+ provide good protection from logging overflow even under high volume
-+ of logging events. To use it you will need to modify the maximum
-+ allowed value for CONFIG_LOG_BUF_SHIFT in the corresponding Kconfig
-+ file to 25 as well.
-+
-+ * Change in your /etc/syslog.conf or other config file of your favorite
-+ logging program to store kernel logs in async manner. For example,
-+ you can add in rsyslog.conf line "kern.info -/var/log/kernel" and
-+ add "kern.none" in line for /var/log/messages, so the resulting line
-+ would looks like:
-+
-+ "*.info;kern.none;mail.none;authpriv.none;cron.none /var/log/messages"
-+
-+
-+Persistent Reservations
-+-----------------------
-+
-+SCST implements Persistent Reservations with full set of capabilities,
-+including "Persistence Through Power Loss".
-+
-+The "Persistence Through Power Loss" data are saved in /var/lib/scst/pr
-+with files with names the same as the names of the corresponding
-+devices. Also this directory contains backup versions of those files
-+with suffix ".1". Those backup files are used in case of power or other
-+failure to prevent Persistent Reservation information from corruption
-+during update.
-+
-+The Persistent Reservations available on all transports implementing
-+get_initiator_port_transport_id() callback. Transports not implementing
-+this callback will act in one of 2 possible scenarios ("all or
-+nothing"):
-+
-+1. If a device has such transport connected and doesn't have persistent
-+reservations, it will refuse Persistent Reservations commands as if it
-+doesn't support them.
-+
-+2. If a device has persistent reservations, all initiators newly
-+connecting via such transports will not see this device. After all
-+persistent reservations from this device are released, upon reconnect
-+the initiators will see it.
-+
-+
-+Caching
-+-------
-+
-+By default for performance reasons VDISK FILEIO devices use write back
-+caching policy.
-+
-+Generally, write back caching is safe for use and danger of it is
-+greatly overestimated, because most modern (especially, Enterprise
-+level) applications are well prepared to work with write back cached
-+storage. Particularly, such are all transactions-based applications.
-+Those applications flush cache to completely avoid ANY data loss on a
-+crash or power failure. For instance, journaled file systems flush cache
-+on each meta data update, so they survive power/hardware/software
-+failures pretty well.
-+
-+Since locally on initiators write back caching is always on, if an
-+application cares about its data consistency, it does flush the cache
-+when necessary or on any write, if open files with O_SYNC. If it doesn't
-+care, it doesn't flush the cache. As soon as the cache flushes
-+propagated to the storage, write back caching on it doesn't make any
-+difference. If application doesn't flush the cache, it's doomed to loose
-+data in case of a crash or power failure doesn't matter where this cache
-+located, locally or on the storage.
-+
-+To illustrate that consider, for example, a user who wants to copy /src
-+directory to /dst directory reliably, i.e. after the copy finished no
-+power failure or software/hardware crash could lead to a loss of the
-+data in /dst. There are 2 ways to achieve this. Let's suppose for
-+simplicity cp opens files for writing with O_SYNC flag, hence bypassing
-+the local cache.
-+
-+1. Slow. Make the device behind /dst working in write through caching
-+mode and then run "cp -a /src /dst".
-+
-+2. Fast. Let the device behind /dst working in write back caching mode
-+and then run "cp -a /src /dst; sync". The reliability of the result is
-+the same, but it's much faster than (1). Nobody would care if a crash
-+happens during the copy, because after recovery simply leftovers from
-+the not completed attempt would be deleted and the operation would be
-+restarted from the very beginning.
-+
-+So, you can see in (2) there is no danger of ANY data loss from the
-+write back caching. Moreover, since on practice cp doesn't open files
-+for writing with O_SYNC flag, to get the copy done reliably, sync
-+command must be called after cp anyway, so enabling write back caching
-+wouldn't make any difference for reliability.
-+
-+Also you can consider it from another side. Modern HDDs have at least
-+16MB of cache working in write back mode by default, so for a 10 drives
-+RAID it is 160MB of a write back cache. How many people are happy with
-+it and how many disabled write back cache of their HDDs? Almost all and
-+almost nobody correspondingly? Moreover, many HDDs lie about state of
-+their cache and report write through while working in write back mode.
-+They are also successfully used.
-+
-+Note, Linux I/O subsystem guarantees to propagated cache flushes to the
-+storage only using data protection barriers, which usually turned off by
-+default (see http://lwn.net/Articles/283161). Without barriers enabled
-+Linux doesn't provide a guarantee that after sync()/fsync() all written
-+data really hit permanent storage. They can be stored in the cache of
-+your backstorage devices and, hence, lost on a power failure event.
-+Thus, ever with write-through cache mode, you still either need to
-+enable barriers on your backend file system on the target (for direct
-+/dev/sdX devices this is, indeed, impossible), or need a good UPS to
-+protect yourself from not committed data loss. Some info about barriers
-+from the XFS point of view could be found at
-+http://oss.sgi.com/projects/xfs/faq.html#wcache. On Linux initiators for
-+Ext3 and ReiserFS file systems the barrier protection could be turned on
-+using "barrier=1" and "barrier=flush" mount options correspondingly. You
-+can check if the barriers turn on or off by looking in /proc/mounts.
-+Windows and, AFAIK, other UNIX'es don't need any special explicit
-+options and do necessary barrier actions on write-back caching devices
-+by default.
-+
-+To limit this data loss with write back caching you can use files in
-+/proc/sys/vm to limit amount of unflushed data in the system cache.
-+
-+If you for some reason have to use VDISK FILEIO devices in write through
-+caching mode, don't forget to disable internal caching on their backend
-+devices or make sure they have additional battery or supercapacitors
-+power supply on board. Otherwise, you still on a power failure would
-+loose all the unsaved yet data in the devices internal cache.
-+
-+Note, on some real-life workloads write through caching might perform
-+better, than write back one with the barrier protection turned on.
-+
-+
-+BLOCKIO VDISK mode
-+------------------
-+
-+This module works best for these types of scenarios:
-+
-+1) Data that are not aligned to 4K sector boundaries and <4K block sizes
-+are used, which is normally found in virtualization environments where
-+operating systems start partitions on odd sectors (Windows and it's
-+sector 63).
-+
-+2) Large block data transfers normally found in database loads/dumps and
-+streaming media.
-+
-+3) Advanced relational database systems that perform their own caching
-+which prefer or demand direct IO access and, because of the nature of
-+their data access, can actually see worse performance with
-+non-discriminate caching.
-+
-+4) Multiple layers of targets were the secondary and above layers need
-+to have a consistent view of the primary targets in order to preserve
-+data integrity which a page cache backed IO type might not provide
-+reliably.
-+
-+Also it has an advantage over FILEIO that it doesn't copy data between
-+the system cache and the commands data buffers, so it saves a
-+considerable amount of CPU power and memory bandwidth.
-+
-+IMPORTANT: Since data in BLOCKIO and FILEIO modes are not consistent between
-+========= each other, if you try to use a device in both those modes
-+ simultaneously, you will almost instantly corrupt your data
-+ on that device.
-+
-+IMPORTANT: If SCST 1.x BLOCKIO worked by default in NV_CACHE mode, when
-+========= each device reported to remote initiators as having write through
-+ caching. But if your backend block device has internal write
-+ back caching it might create a possibility for data loss of
-+ the cached in the internal cache data in case of a power
-+ failure. Starting from SCST 2.0 BLOCKIO works by default in
-+ non-NV_CACHE mode, when each device reported to remote
-+ initiators as having write back caching, and synchronizes the
-+ internal device's cache on each SYNCHRONIZE_CACHE command
-+ from the initiators. It might lead to some PERFORMANCE LOSS,
-+ so if you are are sure in your power supply and want to
-+ restore 1.x behavior, your should recreate your BLOCKIO
-+ devices in NV_CACHE mode.
-+
-+
-+Pass-through mode
-+-----------------
-+
-+In the pass-through mode (i.e. using the pass-through device handlers
-+scst_disk, scst_tape, etc) SCSI commands, coming from remote initiators,
-+are passed to local SCSI devices on target as is, without any
-+modifications.
-+
-+SCST supports 1 to many pass-through, when several initiators can safely
-+connect a single pass-through device (a tape, for instance). For such
-+cases SCST emulates all the necessary functionality.
-+
-+In the sysfs interface all real SCSI devices are listed in
-+/sys/kernel/scst_tgt/devices in form host:channel:id:lun numbers, for
-+instance 1:0:0:0. The recommended way to match those numbers to your
-+devices is use of lsscsi utility.
-+
-+Each pass-through dev handler has in its root subdirectory
-+/sys/kernel/scst_tgt/handlers/handler_name, e.g.
-+/sys/kernel/scst_tgt/handlers/dev_disk, "mgmt" file. It allows the
-+following commands. They can be sent to it using, e.g., echo command.
-+
-+ - "add_device" - this command assigns SCSI device with
-+host:channel:id:lun numbers to this dev handler.
-+
-+echo "add_device 1:0:0:0" >/sys/kernel/scst_tgt/handlers/dev_disk/mgmt
-+
-+will assign SCSI device 1:0:0:0 to this dev handler.
-+
-+ - "del_device" - this command unassigns SCSI device with
-+host:channel:id:lun numbers from this dev handler.
-+
-+As usually, on read the "mgmt" file returns small help about available
-+commands.
-+
-+You need to manually assign each your real SCSI device to the
-+corresponding pass-through dev handler using the "add_device" command,
-+otherwise the real SCSI devices will not be visible remotely. The
-+assignment isn't done automatically, because it could lead to the
-+pass-through dev handlers load and initialization problems if any of the
-+local real SCSI devices are malfunctioning.
-+
-+As any other hardware, the local SCSI hardware can not handle commands
-+with amount of data and/or segments count in scatter-gather array bigger
-+some values. Therefore, when using the pass-through mode you should note
-+that values for maximum number of segments and maximum amount of
-+transferred data (max_sectors) for each SCSI command on devices on
-+initiators can not be bigger, than corresponding values of the
-+corresponding SCSI devices on the target. Otherwise you will see
-+symptoms like small transfers work well, but large ones stall and
-+messages like: "Unable to complete command due to SG IO count
-+limitation" are printed in the kernel logs.
-+
-+You can't control from the user space limit of the scatter-gather
-+segments, but for block devices usually it is sufficient if you set on
-+the initiators /sys/block/DEVICE_NAME/queue/max_sectors_kb in the same
-+or lower value as in /sys/block/DEVICE_NAME/queue/max_hw_sectors_kb for
-+the corresponding devices on the target.
-+
-+For not-block devices SCSI commands are usually generated directly by
-+applications, so, if you experience large transfers stalls, you should
-+check documentation for your application how to limit the transfer
-+sizes.
-+
-+Another way to solve this issue is to build SG entries with more than 1
-+page each. See the following patch as an example:
-+http://scst.sourceforge.net/sgv_big_order_alloc.diff
-+
-+
-+Performance
-+-----------
-+
-+SCST from the very beginning has been designed and implemented to
-+provide the best possible performance. Since there is no "one fit all"
-+the best performance configuration for different setups and loads, SCST
-+provides extensive set of settings to allow to tune it for the best
-+performance in each particular case. You don't have to necessary use
-+those settings. If you don't, SCST will do very good job to autotune for
-+you, so the resulting performance will, in average, be better
-+(sometimes, much better) than with other SCSI targets. But in some cases
-+you can by manual tuning improve it even more.
-+
-+Before doing any performance measurements note that performance results
-+are very much dependent from your type of load, so it is crucial that
-+you choose access mode (FILEIO, BLOCKIO, O_DIRECT, pass-through), which
-+suits your needs the best.
-+
-+In order to get the maximum performance you should:
-+
-+1. For SCST:
-+
-+ - Disable in Makefile CONFIG_SCST_STRICT_SERIALIZING, CONFIG_SCST_EXTRACHECKS,
-+ CONFIG_SCST_TRACING, CONFIG_SCST_DEBUG*, CONFIG_SCST_STRICT_SECURITY,
-+ CONFIG_SCST_MEASURE_LATENCY
-+
-+2. For target drivers:
-+
-+ - Disable in Makefiles CONFIG_SCST_EXTRACHECKS, CONFIG_SCST_TRACING,
-+ CONFIG_SCST_DEBUG*
-+
-+3. For device handlers, including VDISK:
-+
-+ - Disable in Makefile CONFIG_SCST_TRACING and CONFIG_SCST_DEBUG.
-+
-+4. Make sure you have io_grouping_type option set correctly, especially
-+in the following cases:
-+
-+ - Several initiators share your target's backstorage. It can be a
-+ shared LU using some cluster FS, like VMFS, as well as can be
-+ different LUs located on the same backstorage (RAID array). For
-+ instance, if you have 3 initiators and each of them using its own
-+ dedicated FILEIO device file from the same RAID-6 array on the
-+ target.
-+
-+ In this case for the best performance you should have
-+ io_grouping_type option set in value "never" in all the LUNs' targets
-+ and security groups.
-+
-+ - Your initiator connected to your target in MPIO mode. In this case for
-+ the best performance you should:
-+
-+ * Either connect all the sessions from the initiator to a single
-+ target or security group and have io_grouping_type option set in
-+ value "this_group_only" in the target or security group,
-+
-+ * Or, if it isn't possible to connect all the sessions from the
-+ initiator to a single target or security group, assign the same
-+ numeric io_grouping_type value for each target/security group this
-+ initiator connected to. The exact value itself doesn't matter,
-+ important only that all the targets/security groups use the same
-+ value.
-+
-+Don't forget, io_grouping_type makes sense only if you use CFQ I/O
-+scheduler on the target and for devices with threads_num >= 0 and, if
-+threads_num > 0, with threads_pool_type "per_initiator".
-+
-+You can check if in your setup io_grouping_type set correctly as well as
-+if the "auto" io_grouping_type value works for you by tests like the
-+following:
-+
-+ - For not MPIO case you can run single thread sequential reading, e.g.
-+ using buffered dd, from one initiator, then run the same single
-+ thread sequential reading from the second initiator in parallel. If
-+ io_grouping_type is set correctly the aggregate throughput measured
-+ on the target should only slightly decrease as well as all initiators
-+ should have nearly equal share of it. If io_grouping_type is not set
-+ correctly, the aggregate throughput and/or throughput on any
-+ initiator will decrease significantly, in 2 times or even more. For
-+ instance, you have 80MB/s single thread sequential reading from the
-+ target on any initiator. When then both initiators are reading in
-+ parallel you should see on the target aggregate throughput something
-+ like 70-75MB/s with correct io_grouping_type and something like
-+ 35-40MB/s or 8-10MB/s on any initiator with incorrect.
-+
-+ - For the MPIO case it's quite easier. With incorrect io_grouping_type
-+ you simply won't see performance increase from adding the second
-+ session (assuming your hardware is capable to transfer data through
-+ both sessions in parallel), or can even see a performance decrease.
-+
-+5. If you are going to use your target in an VM environment, for
-+instance as a shared storage with VMware, make sure all your VMs
-+connected to the target via *separate* sessions. For instance, for iSCSI
-+it means that each VM has own connection to the target, not all VMs
-+connected using a single connection. You can check it using SCST sysfs
-+interface. For other transports you should use available facilities,
-+like NPIV for Fibre Channel, to make separate sessions for each VM. If
-+you miss it, you can greatly loose performance of parallel access to
-+your target from different VMs. This isn't related to the case if your
-+VMs are using the same shared storage, like with VMFS, for instance. In
-+this case all your VM hosts will be connected to the target via separate
-+sessions, which is enough.
-+
-+6. For other target and initiator software parts:
-+
-+ - Make sure you applied on your kernel all available SCST patches.
-+ If for your kernel version this patch doesn't exist, it is strongly
-+ recommended to upgrade your kernel to version, for which this patch
-+ exists.
-+
-+ - Don't enable debug/hacking features in the kernel, i.e. use them as
-+ they are by default.
-+
-+ - The default kernel read-ahead and queuing settings are optimized
-+ for locally attached disks, therefore they are not optimal if they
-+ attached remotely (SCSI target case), which sometimes could lead to
-+ unexpectedly low throughput. You should increase read-ahead size to at
-+ least 512KB or even more on all initiators and the target.
-+
-+ You should also limit on all initiators maximum amount of sectors per
-+ SCSI command. This tuning is also recommended on targets with large
-+ read-ahead values. To do it on Linux, run:
-+
-+ echo “64” > /sys/block/sdX/queue/max_sectors_kb
-+
-+ where specify instead of X your imported from target device letter,
-+ like 'b', i.e. sdb.
-+
-+ To increase read-ahead size on Linux, run:
-+
-+ blockdev --setra N /dev/sdX
-+
-+ where N is a read-ahead number in 512-byte sectors and X is a device
-+ letter like above.
-+
-+ Note: you need to set read-ahead setting for device sdX again after
-+ you changed the maximum amount of sectors per SCSI command for that
-+ device.
-+
-+ Note2: you need to restart SCST after you changed read-ahead settings
-+ on the target. It is a limitation of the Linux read ahead
-+ implementation. It reads RA values for each file only when the file
-+ is open and not updates them when the global RA parameters changed.
-+ Hence, the need for vdisk to reopen all its files/devices.
-+
-+ - You may need to increase amount of requests that OS on initiator
-+ sends to the target device. To do it on Linux initiators, run
-+
-+ echo “64” > /sys/block/sdX/queue/nr_requests
-+
-+ where X is a device letter like above.
-+
-+ You may also experiment with other parameters in /sys/block/sdX
-+ directory, they also affect performance. If you find the best values,
-+ please share them with us.
-+
-+ - On the target use CFQ IO scheduler. In most cases it has performance
-+ advantage over other IO schedulers, sometimes huge (2+ times
-+ aggregate throughput increase).
-+
-+ - It is recommended to turn the kernel preemption off, i.e. set
-+ the kernel preemption model to "No Forced Preemption (Server)".
-+
-+ - Looks like XFS is the best filesystem on the target to store device
-+ files, because it allows considerably better linear write throughput,
-+ than ext3.
-+
-+7. For hardware on target.
-+
-+ - Make sure that your target hardware (e.g. target FC or network card)
-+ and underlaying IO hardware (e.g. IO card, like SATA, SCSI or RAID to
-+ which your disks connected) don't share the same PCI bus. You can
-+ check it using lspci utility. They have to work in parallel, so it
-+ will be better if they don't compete for the bus. The problem is not
-+ only in the bandwidth, which they have to share, but also in the
-+ interaction between cards during that competition. This is very
-+ important, because in some cases if target and backend storage
-+ controllers share the same PCI bus, it could lead up to 5-10 times
-+ less performance, than expected. Moreover, some motherboard (by
-+ Supermicro, particularly) have serious stability issues if there are
-+ several high speed devices on the same bus working in parallel. If
-+ you have no choice, but PCI bus sharing, set in the BIOS PCI latency
-+ as low as possible.
-+
-+8. If you use VDISK IO module in FILEIO mode, NV_CACHE option will
-+provide you the best performance. But using it make sure you use a good
-+UPS with ability to shutdown the target on the power failure.
-+
-+Baseline performance numbers you can find in those measurements:
-+http://lkml.org/lkml/2009/3/30/283.
-+
-+IMPORTANT: If you use on initiator some versions of Windows (at least W2K)
-+========= you can't get good write performance for VDISK FILEIO devices with
-+ default 512 bytes block sizes. You could get about 10% of the
-+ expected one. This is because of the partition alignment, which
-+ is (simplifying) incompatible with how Linux page cache
-+ works, so for each write the corresponding block must be read
-+ first. Use 4096 bytes block sizes for VDISK devices and you
-+ will have the expected write performance. Actually, any OS on
-+ initiators, not only Windows, will benefit from block size
-+ max(PAGE_SIZE, BLOCK_SIZE_ON_UNDERLYING_FS), where PAGE_SIZE
-+ is the page size, BLOCK_SIZE_ON_UNDERLYING_FS is block size
-+ on the underlying FS, on which the device file located, or 0,
-+ if a device node is used. Both values are from the target.
-+ See also important notes about setting block sizes >512 bytes
-+ for VDISK FILEIO devices above.
-+
-+9. In some cases, for instance working with SSD devices, which consume
-+100% of a single CPU load for data transfers in their internal threads,
-+to maximize IOPS it can be needed to assign for those threads dedicated
-+CPUs. Consider using cpu_mask attribute for devices with
-+threads_pool_type "per_initiator" or Linux CPU affinity facilities for
-+other threads_pool_types. No IRQ processing should be done on those
-+CPUs. Check that using /proc/interrupts. See taskset command and
-+Documentation/IRQ-affinity.txt in your kernel's source tree for how to
-+assign IRQ affinity to tasks and IRQs.
-+
-+The reason for that is that processing of coming commands in SIRQ
-+context might be done on the same CPUs as SSD devices' threads doing data
-+transfers. As the result, those threads won't receive all the processing
-+power of those CPUs and perform worse.
-+
-+
-+Work if target's backstorage or link is too slow
-+------------------------------------------------
-+
-+Under high I/O load, when your target's backstorage gets overloaded, or
-+working over a slow link between initiator and target, when the link
-+can't serve all the queued commands on time, you can experience I/O
-+stalls or see in the kernel log abort or reset messages.
-+
-+At first, consider the case of too slow target's backstorage. On some
-+seek intensive workloads even fast disks or RAIDs, which able to serve
-+continuous data stream on 500+ MB/s speed, can be as slow as 0.3 MB/s.
-+Another possible cause for that can be MD/LVM/RAID on your target as in
-+http://lkml.org/lkml/2008/2/27/96 (check the whole thread as well).
-+
-+Thus, in such situations simply processing of one or more commands takes
-+too long time, hence initiator decides that they are stuck on the target
-+and tries to recover. Particularly, it is known that the default amount
-+of simultaneously queued commands (48) is sometimes too high if you do
-+intensive writes from VMware on a target disk, which uses LVM in the
-+snapshot mode. In this case value like 16 or even 8-10 depending of your
-+backstorage speed could be more appropriate.
-+
-+Unfortunately, currently SCST lacks dynamic I/O flow control, when the
-+queue depth on the target is dynamically decreased/increased based on
-+how slow/fast the backstorage speed comparing to the target link. So,
-+there are 6 possible actions, which you can do to workaround or fix this
-+issue in this case:
-+
-+1. Ignore incoming task management (TM) commands. It's fine if there are
-+not too many of them, so average performance isn't hurt and the
-+corresponding device isn't getting put offline, i.e. if the backstorage
-+isn't too slow.
-+
-+2. Decrease /sys/block/sdX/device/queue_depth on the initiator in case
-+if it's Linux (see below how) or/and SCST_MAX_TGT_DEV_COMMANDS constant
-+in scst_priv.h file until you stop seeing incoming TM commands.
-+ISCSI-SCST driver also has its own iSCSI specific parameter for that,
-+see its README file.
-+
-+To decrease device queue depth on Linux initiators you can run command:
-+
-+# echo Y >/sys/block/sdX/device/queue_depth
-+
-+where Y is the new number of simultaneously queued commands, X - your
-+imported device letter, like 'a' for sda device. There are no special
-+limitations for Y value, it can be any value from 1 to possible maximum
-+(usually, 32), so start from dividing the current value on 2, i.e. set
-+16, if /sys/block/sdX/device/queue_depth contains 32.
-+
-+3. Increase the corresponding timeout on the initiator. For Linux it is
-+located in
-+/sys/devices/platform/host*/session*/target*:0:0/*:0:0:1/timeout. It can
-+be done automatically by an udev rule. For instance, the following
-+rule will increase it to 300 seconds:
-+
-+SUBSYSTEM=="scsi", KERNEL=="[0-9]*:[0-9]*", ACTION=="add", ATTR{type}=="0|7|14", ATTR{timeout}="300"
-+
-+By default, this timeout is 30 or 60 seconds, depending on your distribution.
-+
-+4. Try to avoid such seek intensive workloads.
-+
-+5. Increase speed of the target's backstorage.
-+
-+6. Implement in SCST dynamic I/O flow control. This will be an ultimate
-+solution. See "Dynamic I/O flow control" section on
-+http://scst.sourceforge.net/contributing.html page for possible
-+implementation idea.
-+
-+Next, consider the case of too slow link between initiator and target,
-+when the initiator tries to simultaneously push N commands to the target
-+over it. In this case time to serve those commands, i.e. send or receive
-+data for them over the link, can be more, than timeout for any single
-+command, hence one or more commands in the tail of the queue can not be
-+served on time less than the timeout, so the initiator will decide that
-+they are stuck on the target and will try to recover.
-+
-+To workaround/fix this issue in this case you can use ways 1, 2, 3, 6
-+above or (7): increase speed of the link between target and initiator.
-+But for some initiators implementations for WRITE commands there might
-+be cases when target has no way to detect the issue, so dynamic I/O flow
-+control will not be able to help. In those cases you could also need on
-+the initiator(s) to either decrease the queue depth (way 2), or increase
-+the corresponding timeout (way 3).
-+
-+Note, that logged messages about QUEUE_FULL status are quite different
-+by nature. This is a normal work, just SCSI flow control in action.
-+Simply don't enable "mgmt_minor" logging level, or, alternatively, if
-+you are confident in the worst case performance of your back-end storage
-+or initiator-target link, you can increase SCST_MAX_TGT_DEV_COMMANDS in
-+scst_priv.h to 64. Usually initiators don't try to push more commands on
-+the target.
-+
-+
-+Credits
-+-------
-+
-+Thanks to:
-+
-+ * Mark Buechler <mark.buechler@gmail.com> for a lot of useful
-+ suggestions, bug reports and help in debugging.
-+
-+ * Ming Zhang <mingz@ele.uri.edu> for fixes and comments.
-+
-+ * Nathaniel Clark <nate@misrule.us> for fixes and comments.
-+
-+ * Calvin Morrow <calvin.morrow@comcast.net> for testing and useful
-+ suggestions.
-+
-+ * Hu Gang <hugang@soulinfo.com> for the original version of the
-+ LSI target driver.
-+
-+ * Erik Habbinga <erikhabbinga@inphase-tech.com> for fixes and support
-+ of the LSI target driver.
-+
-+ * Ross S. W. Walker <rswwalker@hotmail.com> for BLOCKIO inspiration
-+ and Vu Pham <huongvp@yahoo.com> who implemented it for VDISK dev handler.
-+
-+ * Alessandro Premoli <a.premoli@andxor.it> for fixes
-+
-+ * Nathan Bullock <nbullock@yottayotta.com> for fixes.
-+
-+ * Terry Greeniaus <tgreeniaus@yottayotta.com> for fixes.
-+
-+ * Krzysztof Blaszkowski <kb@sysmikro.com.pl> for many fixes and bug reports.
-+
-+ * Jianxi Chen <pacers@users.sourceforge.net> for fixing problem with
-+ devices >2TB in size
-+
-+ * Bart Van Assche <bvanassche@acm.org> for a lot of help
-+
-+ * Daniel Debonzi <debonzi@linux.vnet.ibm.com> for a big part of the
-+ initial SCST sysfs tree implementation
-+
-+
-+Vladislav Bolkhovitin <vst@vlnb.net>, http://scst.sourceforge.net
-diff -uprN orig/linux-3.2/Documentation/scst/SysfsRules linux-3.2/Documentation/scst/SysfsRules
---- orig/linux-3.2/Documentation/scst/SysfsRules
-+++ linux-3.2/Documentation/scst/SysfsRules
-@@ -0,0 +1,942 @@
-+ SCST SYSFS interface rules
-+ ==========================
-+
-+This file describes SYSFS interface rules, which all SCST target
-+drivers, dev handlers and management utilities MUST follow. This allows
-+to have a simple, self-documented, target drivers and dev handlers
-+independent management interface.
-+
-+Words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
-+"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
-+document are to be interpreted as described in RFC 2119.
-+
-+In this document "key attribute" means a configuration attribute with
-+not default value, which must be configured during the target driver's
-+initialization. A key attribute MUST have in the last line keyword
-+"[key]". If a default value set to a key attribute, it becomes a regular
-+none-key attribute. For instance, iSCSI target has attribute DataDigest.
-+Default value for this attribute is "None". It value "CRC32C" is set to
-+this attribute, it will become a key attribute. If value "None" is again
-+set, this attribute will become back to a none-key attribute.
-+
-+Each user configurable attribute with a not default value MUST be marked
-+as key attribute.
-+
-+Key attributes SHOULD NOT have sysfs names finished on digits, because
-+such names SHOULD be used to store several attributes with the same name
-+on the sysfs tree where duplicated names are not allowed. For instance,
-+iSCSI targets can have several incoming user names, so the corresponding
-+attribute should have sysfs name "IncomingUser". If there are 2 user
-+names, they should have sysfs names "IncomingUser" and "IncomingUser1".
-+In other words, all "IncomingUser[0-9]*" names should be considered as
-+different instances of the same "IncomingUser" attribute.
-+
-+
-+I. Rules for target drivers
-+===========================
-+
-+SCST core for each target driver (struct scst_tgt_template) creates a
-+root subdirectory in /sys/kernel/scst_tgt/targets with name
-+scst_tgt_template.name (called "target_driver_name" further in this
-+document).
-+
-+For each target (struct scst_tgt) SCST core creates a root subdirectory
-+in /sys/kernel/scst_tgt/targets/target_driver_name with name
-+scst_tgt.tgt_name (called "target_name" further in this document).
-+
-+There are 2 type of targets possible: hardware and virtual targets.
-+Hardware targets are targets corresponding to real hardware, for
-+instance, a Fibre Channel adapter's port. Virtual targets are hardware
-+independent targets, which can be dynamically added or removed, for
-+instance, an iSCSI target, or NPIV Fibre Channel target.
-+
-+A target driver supporting virtual targets MUST support "mgmt" attribute
-+and "add_target"/"del_target" commands.
-+
-+If target driver supports both hardware and virtual targets (for
-+instance, an FC adapter supporting NPIV, which has hardware targets for
-+its physical ports as well as virtual NPIV targets), it MUST create each
-+hardware target with hw_target mark to make SCST core create "hw_target"
-+attribute (see below).
-+
-+Attributes for target drivers
-+-----------------------------
-+
-+A target driver MAY support in its root subdirectory the following
-+optional attributes. Target drivers MAY also support there other
-+read-only or read-writable attributes.
-+
-+1. "enabled" - this attribute MUST allow to enable and disable target
-+driver as a whole, i.e. if disabled, the target driver MUST NOT accept
-+new connections. The goal of this attribute is to allow the target
-+driver's initial configuration. For instance, iSCSI target may need to
-+have discovery user names and passwords set before it starts serving
-+discovery connections.
-+
-+This attribute MUST have read and write permissions for superuser and be
-+read-only for other users.
-+
-+On read it MUST return 0, if the target driver is disabled, and 1, if it
-+is enabled.
-+
-+On write it MUST accept '0' character as request to disable and '1' as
-+request to enable, but MAY also accept other driver specific commands.
-+
-+During disabling the target driver MAY close already connected sessions
-+in all targets, but this is OPTIONAL.
-+
-+MUST be 0 by default.
-+
-+2. "trace_level" - this attribute SHOULD allow to change log level of this
-+driver.
-+
-+This attribute SHOULD have read and write permissions for superuser and be
-+read-only for other users.
-+
-+On read it SHOULD return a help text about available command and log levels.
-+
-+On write it SHOULD accept commands to change log levels according to the
-+help text.
-+
-+For example:
-+
-+out_of_mem | minor | pid | line | function | special | mgmt | mgmt_dbg | flow_control | conn
-+
-+Usage:
-+ echo "all|none|default" >trace_level
-+ echo "value DEC|0xHEX|0OCT" >trace_level
-+ echo "add|del TOKEN" >trace_level
-+
-+where TOKEN is one of [debug, function, line, pid,
-+ entryexit, buff, mem, sg, out_of_mem,
-+ special, scsi, mgmt, minor,
-+ mgmt_dbg, scsi_serializing,
-+ retry, recv_bot, send_bot, recv_top,
-+ send_top, d_read, d_write, conn, conn_dbg, iov, pdu, net_page]
-+
-+
-+3. "version" - this read-only for all attribute SHOULD return version of
-+the target driver and some info about its enabled compile time facilities.
-+
-+For example:
-+
-+2.0.0
-+EXTRACHECKS
-+DEBUG
-+
-+4. "mgmt" - if supported this attribute MUST allow to add and delete
-+targets, if virtual targets are supported by this driver, as well as it
-+MAY allow to add and delete the target driver's or its targets'
-+attributes.
-+
-+This attribute MUST have read and write permissions for superuser and be
-+read-only for other users.
-+
-+On read it MUST return a help string describing available commands,
-+parameters and attributes.
-+
-+To achieve that the target driver should just set in its struct
-+scst_tgt_template correctly the following fields: mgmt_cmd_help,
-+add_target_parameters, tgtt_optional_attributes and
-+tgt_optional_attributes.
-+
-+For example:
-+
-+Usage: echo "add_target target_name [parameters]" >mgmt
-+ echo "del_target target_name" >mgmt
-+ echo "add_attribute <attribute> <value>" >mgmt
-+ echo "del_attribute <attribute> <value>" >mgmt
-+ echo "add_target_attribute target_name <attribute> <value>" >mgmt
-+ echo "del_target_attribute target_name <attribute> <value>" >mgmt
-+
-+where parameters are one or more param_name=value pairs separated by ';'
-+
-+The following target driver attributes available: IncomingUser, OutgoingUser
-+The following target attributes available: IncomingUser, OutgoingUser, allowed_portal
-+
-+4.1. "add_target" - if supported, this command MUST add new target with
-+name "target_name" and specified optional or required parameters. Each
-+parameter MUST be in form "parameter=value". All parameters MUST be
-+separated by ';' symbol.
-+
-+All target drivers supporting creation of virtual targets MUST support
-+this command.
-+
-+All target drivers supporting "add_target" command MUST support all
-+read-only targets' key attributes as parameters to "add_target" command
-+with the attributes' names as parameters' names and the attributes'
-+values as parameters' values.
-+
-+For example:
-+
-+echo "add_target TARGET1 parameter1=1; parameter2=2" >mgmt
-+
-+will add target with name "TARGET1" and parameters with names
-+"parameter1" and "parameter2" with values 1 and 2 correspondingly.
-+
-+4.2. "del_target" - if supported, this command MUST delete target with
-+name "target_name". If "add_target" command is supported "del_target"
-+MUST also be supported.
-+
-+4.3. "add_attribute" - if supported, this command MUST add a target
-+driver's attribute with the specified name and one or more values.
-+
-+All target drivers supporting run time creation of the target driver's
-+key attributes MUST support this command.
-+
-+For example, for iSCSI target:
-+
-+echo "add_attribute IncomingUser name password" >mgmt
-+
-+will add for discovery sessions an incoming user (attribute
-+/sys/kernel/scst_tgt/targets/iscsi/IncomingUser) with name "name" and
-+password "password".
-+
-+4.4. "del_attribute" - if supported, this command MUST delete target
-+driver's attribute with the specified name and values. The values MUST
-+be specified, because in some cases attributes MAY internally be
-+distinguished by values. For instance, iSCSI target might have several
-+incoming users. If not needed, target driver might ignore the values.
-+
-+If "add_attribute" command is supported "del_attribute" MUST
-+also be supported.
-+
-+4.5. "add_target_attribute" - if supported, this command MUST add new
-+attribute for the specified target with the specified name and one or
-+more values.
-+
-+All target drivers supporting run time creation of targets' key
-+attributes MUST support this command.
-+
-+For example:
-+
-+echo "add_target_attribute iqn.2006-10.net.vlnb:tgt IncomingUser name password" >mgmt
-+
-+will add for target with name "iqn.2006-10.net.vlnb:tgt" an incoming
-+user (attribute
-+/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/IncomingUser)
-+with name "name" and password "password".
-+
-+4.6. "del_target_attribute" - if supported, this command MUST delete
-+target's attribute with the specified name and values. The values MUST
-+be specified, because in some cases attributes MAY internally be
-+distinguished by values. For instance, iSCSI target might have several
-+incoming users. If not needed, target driver might ignore the values.
-+
-+If "add_target_attribute" command is supported "del_target_attribute"
-+MUST also be supported.
-+
-+Attributes for targets
-+----------------------
-+
-+Each target MAY support in its root subdirectory the following optional
-+attributes. Target drivers MAY also support there other read-only or
-+read-writable attributes.
-+
-+1. "enabled" - this attribute MUST allow to enable and disable the
-+corresponding target, i.e. if disabled, the target MUST NOT accept new
-+connections. The goal of this attribute is to allow the target's initial
-+configuration. For instance, each target needs to have its LUNs setup
-+before it starts serving initiators. Another example is iSCSI target,
-+which may need to have initialized a number of iSCSI parameters before
-+it starts accepting new iSCSI connections.
-+
-+This attribute MUST have read and write permissions for superuser and be
-+read-only for other users.
-+
-+On read it MUST return 0, if the target is disabled, and 1, if it is
-+enabled.
-+
-+On write it MUST accept '0' character as request to disable and '1' as
-+request to enable. Other requests MUST be rejected.
-+
-+SCST core provides some facilities, which MUST be used to implement this
-+attribute.
-+
-+During disabling the target driver MAY close already connected sessions
-+to the target, but this is OPTIONAL.
-+
-+MUST be 0 by default.
-+
-+SCST core will automatically create for all targets the following
-+attributes:
-+
-+1. "rel_tgt_id" - allows to read or write SCSI Relative Target Port
-+Identifier attribute.
-+
-+2. "hw_target" - allows to distinguish hardware and virtual targets, if
-+the target driver supports both.
-+
-+To provide OPTIONAL force close session functionality target drivers
-+MUST implement it using "force_close" write only session's attribute,
-+which on write to it MUST close the corresponding session.
-+
-+See SCST core's README for more info about those attributes.
-+
-+
-+II. Rules for dev handlers
-+==========================
-+
-+There are 2 types of dev handlers: parent dev handlers and children dev
-+handlers. The children dev handlers depend from the parent dev handlers.
-+
-+SCST core for each parent dev handler (struct scst_dev_type with
-+parent member with value NULL) creates a root subdirectory in
-+/sys/kernel/scst_tgt/handlers with name scst_dev_type.name (called
-+"dev_handler_name" further in this document).
-+
-+Parent dev handlers can have one or more subdirectories for children dev
-+handlers with names scst_dev_type.name of them.
-+
-+Only one level of the dev handlers' parent/children hierarchy is
-+allowed. Parent dev handlers, which support children dev handlers, MUST
-+NOT handle devices and MUST be only placeholders for the children dev
-+handlers.
-+
-+Further in this document children dev handlers or parent dev handlers,
-+which don't support children, will be called "end level dev handlers".
-+
-+End level dev handlers can be recognized by existence of the "mgmt"
-+attribute.
-+
-+For each device (struct scst_device) SCST core creates a root
-+subdirectory in /sys/kernel/scst_tgt/devices/device_name with name
-+scst_device.virt_name (called "device_name" further in this document).
-+
-+Attributes for dev handlers
-+---------------------------
-+
-+Each dev handler MUST have it in its root subdirectory "mgmt" attribute,
-+which MUST support "add_device" and "del_device" attributes as described
-+below.
-+
-+Parent dev handlers and end level dev handlers without parents MAY
-+support in its root subdirectory the following optional attributes. They
-+MAY also support there other read-only or read-writable attributes.
-+
-+1. "trace_level" - this attribute SHOULD allow to change log level of this
-+driver.
-+
-+This attribute SHOULD have read and write permissions for superuser and be
-+read-only for other users.
-+
-+On read it SHOULD return a help text about available command and log levels.
-+
-+On write it SHOULD accept commands to change log levels according to the
-+help text.
-+
-+For example:
-+
-+out_of_mem | minor | pid | line | function | special | mgmt | mgmt_dbg
-+
-+
-+Usage:
-+ echo "all|none|default" >trace_level
-+ echo "value DEC|0xHEX|0OCT" >trace_level
-+ echo "add|del TOKEN" >trace_level
-+
-+where TOKEN is one of [debug, function, line, pid,
-+ entryexit, buff, mem, sg, out_of_mem,
-+ special, scsi, mgmt, minor,
-+ mgmt_dbg, scsi_serializing,
-+ retry, recv_bot, send_bot, recv_top,
-+ send_top]
-+
-+2. "version" - this read-only for all attribute SHOULD return version of
-+the dev handler and some info about its enabled compile time facilities.
-+
-+For example:
-+
-+2.0.0
-+EXTRACHECKS
-+DEBUG
-+
-+End level dev handlers in their root subdirectories MUST support "mgmt"
-+attribute and MAY support other read-only or read-writable attributes.
-+This attribute MUST have read and write permissions for superuser and be
-+read-only for other users.
-+
-+Attribute "mgmt" for virtual devices dev handlers
-+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+
-+For virtual devices dev handlers "mgmt" attribute MUST allow to add and
-+delete devices as well as it MAY allow to add and delete the dev
-+handler's or its devices' attributes.
-+
-+On read it MUST return a help string describing available commands and
-+parameters.
-+
-+To achieve that the dev handler should just set in its struct
-+scst_dev_type correctly the following fields: mgmt_cmd_help,
-+add_device_parameters, devt_optional_attributes and
-+dev_optional_attributes.
-+
-+For example:
-+
-+Usage: echo "add_device device_name [parameters]" >mgmt
-+ echo "del_device device_name" >mgmt
-+ echo "add_attribute <attribute> <value>" >mgmt
-+ echo "del_attribute <attribute> <value>" >mgmt
-+ echo "add_device_attribute device_name <attribute> <value>" >mgmt
-+ echo "del_device_attribute device_name <attribute> <value>" >mgmt
-+
-+where parameters are one or more param_name=value pairs separated by ';'
-+
-+The following parameters available: filename, blocksize, write_through, nv_cache, o_direct, read_only, removable
-+The following device driver attributes available: AttributeX, AttributeY
-+The following device attributes available: AttributeDX, AttributeDY
-+
-+1. "add_device" - this command MUST add new device with name
-+"device_name" and specified optional or required parameters. Each
-+parameter MUST be in form "parameter=value". All parameters MUST be
-+separated by ';' symbol.
-+
-+All dev handlers supporting "add_device" command MUST support all
-+read-only devices' key attributes as parameters to "add_device" command
-+with the attributes' names as parameters' names and the attributes'
-+values as parameters' values.
-+
-+For example:
-+
-+echo "add_device device1 parameter1=1; parameter2=2" >mgmt
-+
-+will add device with name "device1" and parameters with names
-+"parameter1" and "parameter2" with values 1 and 2 correspondingly.
-+
-+2. "del_device" - this command MUST delete device with name
-+"device_name".
-+
-+3. "add_attribute" - if supported, this command MUST add a device
-+driver's attribute with the specified name and one or more values.
-+
-+All dev handlers supporting run time creation of the dev handler's
-+key attributes MUST support this command.
-+
-+For example:
-+
-+echo "add_attribute AttributeX ValueX" >mgmt
-+
-+will add attribute
-+/sys/kernel/scst_tgt/handlers/dev_handler_name/AttributeX with value ValueX.
-+
-+4. "del_attribute" - if supported, this command MUST delete device
-+driver's attribute with the specified name and values. The values MUST
-+be specified, because in some cases attributes MAY internally be
-+distinguished by values. If not needed, dev handler might ignore the
-+values.
-+
-+If "add_attribute" command is supported "del_attribute" MUST also be
-+supported.
-+
-+5. "add_device_attribute" - if supported, this command MUST add new
-+attribute for the specified device with the specified name and one or
-+more values.
-+
-+All dev handlers supporting run time creation of devices' key attributes
-+MUST support this command.
-+
-+For example:
-+
-+echo "add_device_attribute device1 AttributeDX ValueDX" >mgmt
-+
-+will add for device with name "device1" attribute
-+/sys/kernel/scst_tgt/devices/device_name/AttributeDX) with value
-+ValueDX.
-+
-+6. "del_device_attribute" - if supported, this command MUST delete
-+device's attribute with the specified name and values. The values MUST
-+be specified, because in some cases attributes MAY internally be
-+distinguished by values. If not needed, dev handler might ignore the
-+values.
-+
-+If "add_device_attribute" command is supported "del_device_attribute"
-+MUST also be supported.
-+
-+Attribute "mgmt" for pass-through devices dev handlers
-+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+
-+For pass-through devices dev handlers "mgmt" attribute MUST allow to
-+assign and unassign this dev handler to existing SCSI devices via
-+"add_device" and "del_device" commands correspondingly.
-+
-+On read it MUST return a help string describing available commands and
-+parameters.
-+
-+For example:
-+
-+Usage: echo "add_device H:C:I:L" >mgmt
-+ echo "del_device H:C:I:L" >mgmt
-+
-+1. "add_device" - this command MUST assign SCSI device with
-+host:channel:id:lun numbers to this dev handler.
-+
-+All pass-through dev handlers MUST support this command.
-+
-+For example:
-+
-+echo "add_device 1:0:0:0" >mgmt
-+
-+will assign SCSI device 1:0:0:0 to this dev handler.
-+
-+2. "del_device" - this command MUST unassign SCSI device with
-+host:channel:id:lun numbers from this dev handler.
-+
-+SCST core will automatically create for all dev handlers the following
-+attributes:
-+
-+1. "type" - SCSI type of device this dev handler can handle.
-+
-+See SCST core's README for more info about those attributes.
-+
-+Attributes for devices
-+----------------------
-+
-+Each device MAY support in its root subdirectory any read-only or
-+read-writable attributes.
-+
-+SCST core will automatically create for all devices the following
-+attributes:
-+
-+1. "type" - SCSI type of this device
-+
-+See SCST core's README for more info about those attributes.
-+
-+
-+III. Rules for management utilities
-+===================================
-+
-+Rules summary
-+-------------
-+
-+A management utility (scstadmin) SHOULD NOT keep any knowledge specific
-+to any device, dev handler, target or target driver. It SHOULD only know
-+the common SCST SYSFS rules, which all dev handlers and target drivers
-+MUST follow. Namely:
-+
-+Common rules:
-+~~~~~~~~~~~~~
-+
-+1. All key attributes MUST be marked by mark "[key]" in the last line of
-+the attribute.
-+
-+2. All not key attributes don't matter and SHOULD be ignored.
-+
-+For target drivers and targets:
-+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+
-+1. If target driver supports adding new targets, it MUST have "mgmt"
-+attribute, which MUST support "add_target" and "del_target" commands as
-+specified above.
-+
-+2. If target driver supports run time adding new key attributes, it MUST
-+have "mgmt" attribute, which MUST support "add_attribute" and
-+"del_attribute" commands as specified above.
-+
-+3. If target driver supports both hardware and virtual targets, all its
-+hardware targets MUST have "hw_target" attribute with value 1.
-+
-+4. If target has read-only key attributes, the add_target command MUST
-+support them as parameters.
-+
-+5. If target supports run time adding new key attributes, the target
-+driver MUST have "mgmt" attribute, which MUST support
-+"add_target_attribute" and "del_target_attribute" commands as specified
-+above.
-+
-+6. Both target drivers and targets MAY support "enable" attribute. If
-+supported, after configuring the corresponding target driver or target
-+"1" MUST be written to this attribute in the following order: at first,
-+for all targets of the target driver, then for the target driver.
-+
-+For devices and dev handlers:
-+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+
-+1. Each dev handler in its root subdirectory MUST have "mgmt" attribute.
-+
-+2. Each dev handler MUST support "add_device" and "del_device" commands
-+to the "mgmt" attribute as specified above.
-+
-+3. If dev handler driver supports run time adding new key attributes, it
-+MUST support "add_attribute" and "del_attribute" commands to the "mgmt"
-+attribute as specified above.
-+
-+4. All device handlers have links in the root subdirectory pointing to
-+their devices.
-+
-+5. If device has read-only key attributes, the "add_device" command MUST
-+support them as parameters.
-+
-+6. If device supports run time adding new key attributes, its dev
-+handler MUST support "add_device_attribute" and "del_device_attribute"
-+commands to the "mgmt" attribute as specified above.
-+
-+7. Each device has "handler" link to its dev handler's root
-+subdirectory.
-+
-+How to distinguish and process different types of attributes
-+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+
-+Since management utilities only interested in key attributes, they
-+should simply ignore all non-key attributes, like
-+devices/device_name/type or targets/target_driver/target_name/version
-+doesn't matter if they are read-only or writable. So, the word "key"
-+will be omitted later in this section.
-+
-+At first, any attribute can be a key attribute, doesn't matter how it's
-+created.
-+
-+All the existing on the configuration save time attributes should be
-+treated the same. Management utilities shouldn't try to separate anyhow
-+them in config files.
-+
-+1. Always existing attributes
-+-----------------------------
-+
-+There are 2 type of them:
-+
-+1.1. Writable, like devices/device_name/t10_dev_id or
-+targets/qla2x00tgt/target_name/explicit_confirmation. They are the
-+simplest and all the values can just be read and written from/to them.
-+
-+On the configuration save time they can be distinguished as existing.
-+
-+On the write configuration time they can be distinguished as existing
-+and writable.
-+
-+1.2. Read-only, like devices/fileio_device_name/filename or
-+devices/fileio_device_name/block_size. They are also easy to distinguish
-+looking at the permissions.
-+
-+On the configuration save time they can be distinguished the same as for
-+(1.1) as existing.
-+
-+On the write configuration time they can be distinguished as existing
-+and read-only. They all should be passed to "add_target" or
-+"add_device" commands for virtual targets and devices correspondingly.
-+To apply changes to them, the whole corresponding object
-+(fileio_device_name in this example) should be removed then recreated.
-+
-+2. Optional
-+-----------
-+
-+For instance, targets/iscsi/IncomingUser or
-+targets/iscsi/target_name/IncomingUser. There are 4 types of them:
-+
-+2.1. Global for target drivers and dev handlers
-+-----------------------------------------------
-+
-+For instance, targets/iscsi/IncomingUser or handlers/vdisk_fileio/XX
-+(none at the moment).
-+
-+On the configuration save time they can be distinguished the same as for
-+(1.1).
-+
-+On the write configuration time they can be distinguished as one of 4
-+choices:
-+
-+2.1.1. Existing and writable. In this case they should be treated as
-+(1.1)
-+
-+2.1.2. Existing and read-only. In this case they should be treated as
-+(1.2).
-+
-+2.1.3. Not existing. In this case they should be added using
-+"add_attribute" command.
-+
-+2.1.4. Existing in the sysfs tree and not existing in the config file.
-+In this case they should be deleted using "del_attribute" command.
-+
-+2.2. Global for targets
-+-----------------------
-+
-+For instance, targets/iscsi/target_name/IncomingUser.
-+
-+On the configuration save time they can be distinguished the same as (1.1).
-+
-+On the write configuration time they can be distinguished as one of 4
-+choices:
-+
-+2.2.1. Existing and writable. In this case they should be treated as
-+(1.1).
-+
-+2.2.2. Existing and read-only. In this case they should be treated as
-+(1.2).
-+
-+2.2.3. Not existing. In this case they should be added using
-+"add_target_attribute" command.
-+
-+2.2.4. Existing in the sysfs tree and not existing in the config file.
-+In this case they should be deleted using "del_target_attribute"
-+command.
-+
-+2.3. Global for devices
-+-----------------------
-+
-+For instance, devices/nullio/t10_dev_id.
-+
-+On the configuration save time they can be distinguished the same as (1.1).
-+
-+On the write configuration time they can be distinguished as one of 4
-+choices:
-+
-+2.3.1. Existing and writable. In this case they should be treated as
-+(1.1)
-+
-+2.3.2. Existing and read-only. In this case they should be treated as
-+(1.2).
-+
-+2.3.3. Not existing. In this case they should be added using
-+"add_device_attribute" command for the corresponding handler, e.g.
-+devices/nullio/handler/.
-+
-+2.3.4. Existing in the sysfs tree and not existing in the config file.
-+In this case they should be deleted using "del_device_attribute"
-+command for the corresponding handler, e.g. devices/nullio/handler/.
-+
-+Thus, management utility should implement only 8 procedures: (1.1),
-+(1.2), (2.1.3), (2.1.4), (2.2.3), (2.2.4), (2.3.3), (2.3.4).
-+
-+
-+How to distinguish hardware and virtual targets
-+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+
-+A target is hardware:
-+
-+ * if exist both "hw_target" attribute and "mgmt" management file
-+
-+ * or if both don't exist
-+
-+A target is virtual if there is "mgmt" file and "hw_target" attribute
-+doesn't exist.
-+
-+
-+Algorithm to convert current SCST configuration to config file
-+--------------------------------------------------------------
-+
-+A management utility SHOULD use the following algorithm when converting
-+current SCST configuration to a config file.
-+
-+For all attributes with digits at the end the name, the digits part
-+should be omitted from the attributes' names during the store. For
-+instance, "IncomingUser1" should be stored as "IncomingUser".
-+
-+1. Scan all attributes in /sys/kernel/scst_tgt (not recursive) and store
-+all found key attributes.
-+
-+2. Scan all subdirectories of /sys/kernel/scst_tgt/handlers. Each
-+subdirectory with "mgmt" attribute is a root subdirectory of a dev
-+handler with name the name of the subdirectory. For each found dev
-+handler do the following:
-+
-+2.1. Store the dev handler's name. Store also its path to the root
-+subdirectory, if it isn't default (/sys/kernel/scst_tgt/handlers/handler_name).
-+
-+2.2. Store all dev handler's key attributes.
-+
-+2.3. Go through all links in the root subdirectory pointing to
-+/sys/kernel/scst_tgt/devices and for each device:
-+
-+2.3.1. For virtual devices dev handlers:
-+
-+2.3.1.1. Store the name of the device.
-+
-+2.3.1.2. Store all key attributes. Mark all read only key attributes
-+during storing, they will be parameters for the device's creation.
-+
-+2.3.2. For pass-through devices dev handlers:
-+
-+2.3.2.1. Store the H:C:I:L name of the device. Optionally, instead of
-+the name unique T10 vendor device ID found using command:
-+
-+sg_inq -p 0x83 /dev/sdX
-+
-+can be stored. It will allow to reliably find out this device if on the
-+next reboot it will have another host:channel:id:lin numbers. The sdX
-+device can be found as the last letters after ':' in
-+/sys/kernel/scst_tgt/devices/H:C:I:L/scsi_device/device/block:sdX.
-+
-+3. Go through all subdirectories in /sys/kernel/scst_tgt/targets. For
-+each target driver:
-+
-+3.1. Store the name of the target driver.
-+
-+3.2. Store all its key attributes.
-+
-+3.3. Go through all target's subdirectories. For each target:
-+
-+3.3.1. Store the name of the target.
-+
-+3.3.2. Mark if the target is hardware or virtual target. The target is a
-+hardware target if it has "hw_target" attribute or its target driver
-+doesn't have "mgmt" attribute.
-+
-+3.3.3. Store all key attributes. Mark all read only key attributes
-+during storing, they will be parameters for the target's creation.
-+
-+3.3.4. Scan all "luns" subdirectory and store:
-+
-+ - LUN.
-+
-+ - LU's device name.
-+
-+ - Key attributes.
-+
-+3.3.5. Scan all "ini_groups" subdirectories. For each group store the following:
-+
-+ - The group's name.
-+
-+ - The group's LUNs (the same info as for 3.3.4).
-+
-+ - The group's initiators.
-+
-+3.3.6. Store value of "enabled" attribute, if it exists.
-+
-+3.4. Store value of "enabled" attribute, if it exists.
-+
-+
-+Algorithm to initialize SCST from config file
-+---------------------------------------------
-+
-+A management utility SHOULD use the following algorithm when doing
-+initial SCST configuration from a config file. All necessary kernel
-+modules and user space programs supposed to be already loaded, hence all
-+dev handlers' entries in /sys/kernel/scst_tgt/handlers as well as all
-+entries for hardware targets already created.
-+
-+1. Set stored values for all stored global (/sys/kernel/scst_tgt)
-+attributes.
-+
-+2. For each dev driver:
-+
-+2.1. Set stored values for all already existing stored attributes.
-+
-+2.2. Create not existing stored attributes using "add_attribute" command.
-+
-+2.3. For virtual devices dev handlers for each stored device:
-+
-+2.3.1. Create the device using "add_device" command using marked read
-+only attributes as parameters.
-+
-+2.3.2. Set stored values for all already existing stored attributes.
-+
-+2.3.3. Create not existing stored attributes using
-+"add_device_attribute" command.
-+
-+2.4. For pass-through dev handlers for each stores device:
-+
-+2.4.1. Assign the corresponding pass-through device to this dev handler
-+using "add_device" command.
-+
-+3. For each target driver:
-+
-+3.1. Set stored values for all already existing stored attributes.
-+
-+3.2. Create not existing stored attributes using "add_attribute" command.
-+
-+3.3. For each target:
-+
-+3.3.1. For virtual targets:
-+
-+3.3.1.1. Create the target using "add_target" command using marked read
-+only attributes as parameters.
-+
-+3.3.1.2. Set stored values for all already existing stored attributes.
-+
-+3.3.1.3. Create not existing stored attributes using
-+"add_target_attribute" command.
-+
-+3.3.2. For hardware targets for each target:
-+
-+3.3.2.1. Set stored values for all already existing stored attributes.
-+
-+3.3.2.2. Create not existing stored attributes using
-+"add_target_attribute" command.
-+
-+3.3.3. Setup LUNs
-+
-+3.3.4. Setup ini_groups, their LUNs and initiators' names.
-+
-+3.3.5. If this target supports enabling, enable it.
-+
-+3.4. If this target driver supports enabling, enable it.
-+
-+
-+Algorithm to apply changes in config file to currently running SCST
-+-------------------------------------------------------------------
-+
-+A management utility SHOULD use the following algorithm when applying
-+changes in config file to currently running SCST.
-+
-+Not all changes can be applied on enabled targets or enabled target
-+drivers. From other side, for some target drivers enabling/disabling is
-+a very long and disruptive operation, which should be performed as rare
-+as possible. Thus, the management utility SHOULD support additional
-+option, which, if set, will make it to disable all affected targets
-+before doing any change with them.
-+
-+1. Scan all attributes in /sys/kernel/scst_tgt (not recursive) and
-+compare stored and actual key attributes. Apply all changes.
-+
-+2. Scan all subdirectories of /sys/kernel/scst_tgt/handlers. Each
-+subdirectory with "mgmt" attribute is a root subdirectory of a dev
-+handler with name the name of the subdirectory. For each found dev
-+handler do the following:
-+
-+2.1. Compare stored and actual key attributes. Apply all changes. Create
-+new attributes using "add_attribute" commands and delete not needed any
-+more attributes using "del_attribute" command.
-+
-+2.2. Compare existing devices (links in the root subdirectory pointing
-+to /sys/kernel/scst_tgt/devices) and stored devices in the config file.
-+Delete all not needed devices and create new devices.
-+
-+2.3. For all existing devices:
-+
-+2.3.1. Compare stored and actual key attributes. Apply all changes.
-+Create new attributes using "add_device_attribute" commands and delete
-+not needed any more attributes using "del_device_attribute" command.
-+
-+2.3.2. If any read only key attribute for virtual device should be
-+changed, delete the devices and recreate it.
-+
-+3. Go through all subdirectories in /sys/kernel/scst_tgt/targets. For
-+each target driver:
-+
-+3.1. If this target driver should be disabled, disable it.
-+
-+3.2. Compare stored and actual key attributes. Apply all changes. Create
-+new attributes using "add_attribute" commands and delete not needed any
-+more attributes using "del_attribute" command.
-+
-+3.3. Go through all target's subdirectories. Compare existing and stored
-+targets. Delete all not needed targets and create new targets.
-+
-+3.4. For all existing targets:
-+
-+3.4.1. If this target should be disabled, disable it.
-+
-+3.4.2. Compare stored and actual key attributes. Apply all changes.
-+Create new attributes using "add_target_attribute" commands and delete
-+not needed any more attributes using "del_target_attribute" command.
-+
-+3.4.3. If any read only key attribute for virtual target should be
-+changed, delete the target and recreate it.
-+
-+3.4.4. Scan all "luns" subdirectory and apply necessary changes, using
-+"replace" commands to replace one LUN by another, if needed.
-+
-+3.4.5. Scan all "ini_groups" subdirectories and apply necessary changes,
-+using "replace" commands to replace one LUN by another and "move"
-+command to move initiator from one group to another, if needed. It MUST
-+be done in the following order:
-+
-+ - Necessary initiators deleted, if they aren't going to be moved
-+
-+ - LUNs updated
-+
-+ - Necessary initiators added or moved
-+
-+3.4.6. If this target should be enabled, enable it.
-+
-+3.5. If this target driver should be enabled, enable it.
-+
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/Makefile linux-3.2/drivers/scst/dev_handlers/Makefile
---- orig/linux-3.2/drivers/scst/dev_handlers/Makefile
-+++ linux-3.2/drivers/scst/dev_handlers/Makefile
-@@ -0,0 +1,14 @@
-+ccflags-y += -Wno-unused-parameter
-+
-+obj-m := scst_cdrom.o scst_changer.o scst_disk.o scst_modisk.o scst_tape.o \
-+ scst_vdisk.o scst_raid.o scst_processor.o scst_user.o
-+
-+obj-$(CONFIG_SCST_DISK) += scst_disk.o
-+obj-$(CONFIG_SCST_TAPE) += scst_tape.o
-+obj-$(CONFIG_SCST_CDROM) += scst_cdrom.o
-+obj-$(CONFIG_SCST_MODISK) += scst_modisk.o
-+obj-$(CONFIG_SCST_CHANGER) += scst_changer.o
-+obj-$(CONFIG_SCST_RAID) += scst_raid.o
-+obj-$(CONFIG_SCST_PROCESSOR) += scst_processor.o
-+obj-$(CONFIG_SCST_VDISK) += scst_vdisk.o
-+obj-$(CONFIG_SCST_USER) += scst_user.o
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/scst_cdrom.c linux-3.2/drivers/scst/dev_handlers/scst_cdrom.c
---- orig/linux-3.2/drivers/scst/dev_handlers/scst_cdrom.c
-+++ linux-3.2/drivers/scst/dev_handlers/scst_cdrom.c
-@@ -0,0 +1,263 @@
-+/*
-+ * scst_cdrom.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * SCSI CDROM (type 5) dev handler
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/cdrom.h>
-+#include <scsi/scsi_host.h>
-+#include <linux/slab.h>
-+
-+#define LOG_PREFIX "dev_cdrom"
-+
-+#include <scst/scst.h>
-+#include "scst_dev_handler.h"
-+
-+#define CDROM_NAME "dev_cdrom"
-+
-+#define CDROM_DEF_BLOCK_SHIFT 11
-+
-+struct cdrom_params {
-+ int block_shift;
-+};
-+
-+static int cdrom_attach(struct scst_device *);
-+static void cdrom_detach(struct scst_device *);
-+static int cdrom_parse(struct scst_cmd *);
-+static int cdrom_done(struct scst_cmd *);
-+
-+static struct scst_dev_type cdrom_devtype = {
-+ .name = CDROM_NAME,
-+ .type = TYPE_ROM,
-+ .threads_num = 1,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = cdrom_attach,
-+ .detach = cdrom_detach,
-+ .parse = cdrom_parse,
-+ .dev_done = cdrom_done,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static int cdrom_attach(struct scst_device *dev)
-+{
-+ int res, rc;
-+ uint8_t cmd[10];
-+ const int buffer_size = 512;
-+ uint8_t *buffer = NULL;
-+ int retries;
-+ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
-+ enum dma_data_direction data_dir;
-+ struct cdrom_params *params;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->scsi_dev == NULL ||
-+ dev->scsi_dev->type != dev->type) {
-+ PRINT_ERROR("%s", "SCSI device not define or illegal type");
-+ res = -ENODEV;
-+ goto out;
-+ }
-+
-+ params = kzalloc(sizeof(*params), GFP_KERNEL);
-+ if (params == NULL) {
-+ PRINT_ERROR("Unable to allocate struct cdrom_params (size %zd)",
-+ sizeof(*params));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ buffer = kmalloc(buffer_size, GFP_KERNEL);
-+ if (!buffer) {
-+ PRINT_ERROR("Buffer memory allocation (size %d) failure",
-+ buffer_size);
-+ res = -ENOMEM;
-+ goto out_free_params;
-+ }
-+
-+ /* Clear any existing UA's and get cdrom capacity (cdrom block size) */
-+ memset(cmd, 0, sizeof(cmd));
-+ cmd[0] = READ_CAPACITY;
-+ cmd[1] = (dev->scsi_dev->scsi_level <= SCSI_2) ?
-+ ((dev->scsi_dev->lun << 5) & 0xe0) : 0;
-+ retries = SCST_DEV_UA_RETRIES;
-+ while (1) {
-+ memset(buffer, 0, buffer_size);
-+ memset(sense_buffer, 0, sizeof(sense_buffer));
-+ data_dir = SCST_DATA_READ;
-+
-+ TRACE_DBG("%s", "Doing READ_CAPACITY");
-+ rc = scsi_execute(dev->scsi_dev, cmd, data_dir, buffer,
-+ buffer_size, sense_buffer,
-+ SCST_GENERIC_CDROM_REG_TIMEOUT, 3, 0
-+ , NULL
-+ );
-+
-+ TRACE_DBG("READ_CAPACITY done: %x", rc);
-+
-+ if ((rc == 0) ||
-+ !scst_analyze_sense(sense_buffer,
-+ sizeof(sense_buffer), SCST_SENSE_KEY_VALID,
-+ UNIT_ATTENTION, 0, 0))
-+ break;
-+
-+ if (!--retries) {
-+ PRINT_ERROR("UA not cleared after %d retries",
-+ SCST_DEV_UA_RETRIES);
-+ params->block_shift = CDROM_DEF_BLOCK_SHIFT;
-+ res = -ENODEV;
-+ goto out_free_buf;
-+ }
-+ }
-+
-+ if (rc == 0) {
-+ int sector_size = ((buffer[4] << 24) | (buffer[5] << 16) |
-+ (buffer[6] << 8) | (buffer[7] << 0));
-+ if (sector_size == 0)
-+ params->block_shift = CDROM_DEF_BLOCK_SHIFT;
-+ else
-+ params->block_shift =
-+ scst_calc_block_shift(sector_size);
-+ TRACE_DBG("Sector size is %i scsi_level %d(SCSI_2 %d)",
-+ sector_size, dev->scsi_dev->scsi_level, SCSI_2);
-+ } else {
-+ params->block_shift = CDROM_DEF_BLOCK_SHIFT;
-+ TRACE(TRACE_MINOR, "Read capacity failed: %x, using default "
-+ "sector size %d", rc, params->block_shift);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Returned sense", sense_buffer,
-+ sizeof(sense_buffer));
-+ }
-+
-+ res = scst_obtain_device_parameters(dev);
-+ if (res != 0) {
-+ PRINT_ERROR("Failed to obtain control parameters for device "
-+ "%s", dev->virt_name);
-+ goto out_free_buf;
-+ }
-+
-+out_free_buf:
-+ kfree(buffer);
-+
-+out_free_params:
-+ if (res == 0)
-+ dev->dh_priv = params;
-+ else
-+ kfree(params);
-+
-+out:
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+static void cdrom_detach(struct scst_device *dev)
-+{
-+ struct cdrom_params *params =
-+ (struct cdrom_params *)dev->dh_priv;
-+
-+ TRACE_ENTRY();
-+
-+ kfree(params);
-+ dev->dh_priv = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int cdrom_get_block_shift(struct scst_cmd *cmd)
-+{
-+ struct cdrom_params *params = (struct cdrom_params *)cmd->dev->dh_priv;
-+ /*
-+ * No need for locks here, since *_detach() can not be
-+ * called, when there are existing commands.
-+ */
-+ return params->block_shift;
-+}
-+
-+static int cdrom_parse(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ scst_cdrom_generic_parse(cmd, cdrom_get_block_shift);
-+
-+ cmd->retries = SCST_PASSTHROUGH_RETRIES;
-+
-+ return res;
-+}
-+
-+static void cdrom_set_block_shift(struct scst_cmd *cmd, int block_shift)
-+{
-+ struct cdrom_params *params = (struct cdrom_params *)cmd->dev->dh_priv;
-+ /*
-+ * No need for locks here, since *_detach() can not be
-+ * called, when there are existing commands.
-+ */
-+ if (block_shift != 0)
-+ params->block_shift = block_shift;
-+ else
-+ params->block_shift = CDROM_DEF_BLOCK_SHIFT;
-+ return;
-+}
-+
-+static int cdrom_done(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_block_generic_dev_done(cmd, cdrom_set_block_shift);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int __init cdrom_init(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ cdrom_devtype.module = THIS_MODULE;
-+
-+ res = scst_register_dev_driver(&cdrom_devtype);
-+ if (res < 0)
-+ goto out;
-+
-+out:
-+ TRACE_EXIT();
-+ return res;
-+
-+}
-+
-+static void __exit cdrom_exit(void)
-+{
-+ TRACE_ENTRY();
-+ scst_unregister_dev_driver(&cdrom_devtype);
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(cdrom_init);
-+module_exit(cdrom_exit);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
-+MODULE_DESCRIPTION("SCSI CDROM (type 5) dev handler for SCST");
-+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/scst_changer.c linux-3.2/drivers/scst/dev_handlers/scst_changer.c
---- orig/linux-3.2/drivers/scst/dev_handlers/scst_changer.c
-+++ linux-3.2/drivers/scst/dev_handlers/scst_changer.c
-@@ -0,0 +1,183 @@
-+/*
-+ * scst_changer.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * SCSI medium changer (type 8) dev handler
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <scsi/scsi_host.h>
-+#include <linux/slab.h>
-+
-+#define LOG_PREFIX "dev_changer"
-+
-+#include <scst/scst.h>
-+#include "scst_dev_handler.h"
-+
-+#define CHANGER_NAME "dev_changer"
-+
-+#define CHANGER_RETRIES 2
-+
-+static int changer_attach(struct scst_device *);
-+/* static void changer_detach(struct scst_device *); */
-+static int changer_parse(struct scst_cmd *);
-+/* static int changer_done(struct scst_cmd *); */
-+
-+static struct scst_dev_type changer_devtype = {
-+ .name = CHANGER_NAME,
-+ .type = TYPE_MEDIUM_CHANGER,
-+ .threads_num = 1,
-+ .parse_atomic = 1,
-+/* .dev_done_atomic = 1, */
-+ .attach = changer_attach,
-+/* .detach = changer_detach, */
-+ .parse = changer_parse,
-+/* .dev_done = changer_done */
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static int changer_attach(struct scst_device *dev)
-+{
-+ int res, rc;
-+ int retries;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->scsi_dev == NULL ||
-+ dev->scsi_dev->type != dev->type) {
-+ PRINT_ERROR("%s", "SCSI device not define or illegal type");
-+ res = -ENODEV;
-+ goto out;
-+ }
-+
-+ /*
-+ * If the device is offline, don't try to read capacity or any
-+ * of the other stuff
-+ */
-+ if (dev->scsi_dev->sdev_state == SDEV_OFFLINE) {
-+ TRACE_DBG("%s", "Device is offline");
-+ res = -ENODEV;
-+ goto out;
-+ }
-+
-+ retries = SCST_DEV_UA_RETRIES;
-+ do {
-+ TRACE_DBG("%s", "Doing TEST_UNIT_READY");
-+ rc = scsi_test_unit_ready(dev->scsi_dev,
-+ SCST_GENERIC_CHANGER_TIMEOUT, CHANGER_RETRIES
-+ , NULL);
-+ TRACE_DBG("TEST_UNIT_READY done: %x", rc);
-+ } while ((--retries > 0) && rc);
-+
-+ if (rc) {
-+ PRINT_WARNING("Unit not ready: %x", rc);
-+ /* Let's try not to be too smart and continue processing */
-+ }
-+
-+ res = scst_obtain_device_parameters(dev);
-+ if (res != 0) {
-+ PRINT_ERROR("Failed to obtain control parameters for device "
-+ "%s", dev->virt_name);
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT_HRES(res);
-+ return res;
-+}
-+
-+#if 0
-+void changer_detach(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+#endif
-+
-+static int changer_parse(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ scst_changer_generic_parse(cmd, NULL);
-+
-+ cmd->retries = SCST_PASSTHROUGH_RETRIES;
-+
-+ return res;
-+}
-+
-+#if 0
-+int changer_done(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * SCST sets good defaults for cmd->is_send_status and
-+ * cmd->resp_data_len based on cmd->status and cmd->data_direction,
-+ * therefore change them only if necessary
-+ */
-+
-+#if 0
-+ switch (cmd->cdb[0]) {
-+ default:
-+ /* It's all good */
-+ break;
-+ }
-+#endif
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+#endif
-+
-+static int __init changer_init(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ changer_devtype.module = THIS_MODULE;
-+
-+ res = scst_register_dev_driver(&changer_devtype);
-+ if (res < 0)
-+ goto out;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void __exit changer_exit(void)
-+{
-+ TRACE_ENTRY();
-+ scst_unregister_dev_driver(&changer_devtype);
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(changer_init);
-+module_exit(changer_exit);
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("SCSI medium changer (type 8) dev handler for SCST");
-+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/scst_dev_handler.h linux-3.2/drivers/scst/dev_handlers/scst_dev_handler.h
---- orig/linux-3.2/drivers/scst/dev_handlers/scst_dev_handler.h
-+++ linux-3.2/drivers/scst/dev_handlers/scst_dev_handler.h
-@@ -0,0 +1,27 @@
-+#ifndef __SCST_DEV_HANDLER_H
-+#define __SCST_DEV_HANDLER_H
-+
-+#include <linux/module.h>
-+#include <scsi/scsi_eh.h>
-+#include <scst/scst_debug.h>
-+
-+#define SCST_DEV_UA_RETRIES 5
-+#define SCST_PASSTHROUGH_RETRIES 0
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+
-+#ifdef CONFIG_SCST_DEBUG
-+#define SCST_DEFAULT_DEV_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_PID | \
-+ TRACE_LINE | TRACE_FUNCTION | TRACE_MGMT | TRACE_MINOR | \
-+ TRACE_MGMT_DEBUG | TRACE_SPECIAL)
-+#else
-+#define SCST_DEFAULT_DEV_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
-+ TRACE_SPECIAL)
-+#endif
-+
-+static unsigned long dh_trace_flag = SCST_DEFAULT_DEV_LOG_FLAGS;
-+#define trace_flag dh_trace_flag
-+
-+#endif /* defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING) */
-+
-+#endif /* __SCST_DEV_HANDLER_H */
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/scst_disk.c linux-3.2/drivers/scst/dev_handlers/scst_disk.c
---- orig/linux-3.2/drivers/scst/dev_handlers/scst_disk.c
-+++ linux-3.2/drivers/scst/dev_handlers/scst_disk.c
-@@ -0,0 +1,692 @@
-+/*
-+ * scst_disk.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * SCSI disk (type 0) dev handler
-+ * &
-+ * SCSI disk (type 0) "performance" device handler (skip all READ and WRITE
-+ * operations).
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/blkdev.h>
-+#include <scsi/scsi_host.h>
-+#include <linux/slab.h>
-+#include <asm/unaligned.h>
-+
-+#define LOG_PREFIX "dev_disk"
-+
-+#include <scst/scst.h>
-+#include "scst_dev_handler.h"
-+
-+# define DISK_NAME "dev_disk"
-+# define DISK_PERF_NAME "dev_disk_perf"
-+
-+#define DISK_DEF_BLOCK_SHIFT 9
-+
-+struct disk_params {
-+ int block_shift;
-+};
-+
-+static int disk_attach(struct scst_device *dev);
-+static void disk_detach(struct scst_device *dev);
-+static int disk_parse(struct scst_cmd *cmd);
-+static int disk_perf_exec(struct scst_cmd *cmd);
-+static int disk_done(struct scst_cmd *cmd);
-+static int disk_exec(struct scst_cmd *cmd);
-+static bool disk_on_sg_tablesize_low(struct scst_cmd *cmd);
-+
-+static struct scst_dev_type disk_devtype = {
-+ .name = DISK_NAME,
-+ .type = TYPE_DISK,
-+ .threads_num = 1,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = disk_attach,
-+ .detach = disk_detach,
-+ .parse = disk_parse,
-+ .exec = disk_exec,
-+ .on_sg_tablesize_low = disk_on_sg_tablesize_low,
-+ .dev_done = disk_done,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static struct scst_dev_type disk_devtype_perf = {
-+ .name = DISK_PERF_NAME,
-+ .type = TYPE_DISK,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = disk_attach,
-+ .detach = disk_detach,
-+ .parse = disk_parse,
-+ .exec = disk_perf_exec,
-+ .dev_done = disk_done,
-+ .on_sg_tablesize_low = disk_on_sg_tablesize_low,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static int __init init_scst_disk_driver(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ disk_devtype.module = THIS_MODULE;
-+
-+ res = scst_register_dev_driver(&disk_devtype);
-+ if (res < 0)
-+ goto out;
-+
-+ disk_devtype_perf.module = THIS_MODULE;
-+
-+ res = scst_register_dev_driver(&disk_devtype_perf);
-+ if (res < 0)
-+ goto out_unreg;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unreg:
-+ scst_unregister_dev_driver(&disk_devtype);
-+ goto out;
-+}
-+
-+static void __exit exit_scst_disk_driver(void)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_unregister_dev_driver(&disk_devtype_perf);
-+ scst_unregister_dev_driver(&disk_devtype);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(init_scst_disk_driver);
-+module_exit(exit_scst_disk_driver);
-+
-+static int disk_attach(struct scst_device *dev)
-+{
-+ int res, rc;
-+ uint8_t cmd[10];
-+ const int buffer_size = 512;
-+ uint8_t *buffer = NULL;
-+ int retries;
-+ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
-+ enum dma_data_direction data_dir;
-+ struct disk_params *params;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->scsi_dev == NULL ||
-+ dev->scsi_dev->type != dev->type) {
-+ PRINT_ERROR("%s", "SCSI device not define or illegal type");
-+ res = -ENODEV;
-+ goto out;
-+ }
-+
-+ params = kzalloc(sizeof(*params), GFP_KERNEL);
-+ if (params == NULL) {
-+ PRINT_ERROR("Unable to allocate struct disk_params (size %zd)",
-+ sizeof(*params));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ buffer = kmalloc(buffer_size, GFP_KERNEL);
-+ if (!buffer) {
-+ PRINT_ERROR("Buffer memory allocation (size %d) failure",
-+ buffer_size);
-+ res = -ENOMEM;
-+ goto out_free_params;
-+ }
-+
-+ /* Clear any existing UA's and get disk capacity (disk block size) */
-+ memset(cmd, 0, sizeof(cmd));
-+ cmd[0] = READ_CAPACITY;
-+ cmd[1] = (dev->scsi_dev->scsi_level <= SCSI_2) ?
-+ ((dev->scsi_dev->lun << 5) & 0xe0) : 0;
-+ retries = SCST_DEV_UA_RETRIES;
-+ while (1) {
-+ memset(buffer, 0, buffer_size);
-+ memset(sense_buffer, 0, sizeof(sense_buffer));
-+ data_dir = SCST_DATA_READ;
-+
-+ TRACE_DBG("%s", "Doing READ_CAPACITY");
-+ rc = scsi_execute(dev->scsi_dev, cmd, data_dir, buffer,
-+ buffer_size, sense_buffer,
-+ SCST_GENERIC_DISK_REG_TIMEOUT, 3, 0
-+ , NULL
-+ );
-+
-+ TRACE_DBG("READ_CAPACITY done: %x", rc);
-+
-+ if ((rc == 0) ||
-+ !scst_analyze_sense(sense_buffer,
-+ sizeof(sense_buffer), SCST_SENSE_KEY_VALID,
-+ UNIT_ATTENTION, 0, 0))
-+ break;
-+ if (!--retries) {
-+ PRINT_ERROR("UA not clear after %d retries",
-+ SCST_DEV_UA_RETRIES);
-+ res = -ENODEV;
-+ goto out_free_buf;
-+ }
-+ }
-+ if (rc == 0) {
-+ int sector_size = ((buffer[4] << 24) | (buffer[5] << 16) |
-+ (buffer[6] << 8) | (buffer[7] << 0));
-+ if (sector_size == 0)
-+ params->block_shift = DISK_DEF_BLOCK_SHIFT;
-+ else
-+ params->block_shift =
-+ scst_calc_block_shift(sector_size);
-+ } else {
-+ params->block_shift = DISK_DEF_BLOCK_SHIFT;
-+ TRACE(TRACE_MINOR, "Read capacity failed: %x, using default "
-+ "sector size %d", rc, params->block_shift);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Returned sense", sense_buffer,
-+ sizeof(sense_buffer));
-+ }
-+
-+ res = scst_obtain_device_parameters(dev);
-+ if (res != 0) {
-+ PRINT_ERROR("Failed to obtain control parameters for device "
-+ "%s", dev->virt_name);
-+ goto out_free_buf;
-+ }
-+
-+out_free_buf:
-+ kfree(buffer);
-+
-+out_free_params:
-+ if (res == 0)
-+ dev->dh_priv = params;
-+ else
-+ kfree(params);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void disk_detach(struct scst_device *dev)
-+{
-+ struct disk_params *params =
-+ (struct disk_params *)dev->dh_priv;
-+
-+ TRACE_ENTRY();
-+
-+ kfree(params);
-+ dev->dh_priv = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int disk_get_block_shift(struct scst_cmd *cmd)
-+{
-+ struct disk_params *params = (struct disk_params *)cmd->dev->dh_priv;
-+ /*
-+ * No need for locks here, since *_detach() can not be
-+ * called, when there are existing commands.
-+ */
-+ return params->block_shift;
-+}
-+
-+static int disk_parse(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ scst_sbc_generic_parse(cmd, disk_get_block_shift);
-+
-+ cmd->retries = SCST_PASSTHROUGH_RETRIES;
-+
-+ return res;
-+}
-+
-+static void disk_set_block_shift(struct scst_cmd *cmd, int block_shift)
-+{
-+ struct disk_params *params = (struct disk_params *)cmd->dev->dh_priv;
-+ /*
-+ * No need for locks here, since *_detach() can not be
-+ * called, when there are existing commands.
-+ */
-+ if (block_shift != 0)
-+ params->block_shift = block_shift;
-+ else
-+ params->block_shift = DISK_DEF_BLOCK_SHIFT;
-+ return;
-+}
-+
-+static int disk_done(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_block_generic_dev_done(cmd, disk_set_block_shift);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static bool disk_on_sg_tablesize_low(struct scst_cmd *cmd)
-+{
-+ bool res;
-+
-+ TRACE_ENTRY();
-+
-+ switch (cmd->cdb[0]) {
-+ case WRITE_6:
-+ case READ_6:
-+ case WRITE_10:
-+ case READ_10:
-+ case WRITE_VERIFY:
-+ case WRITE_12:
-+ case READ_12:
-+ case WRITE_VERIFY_12:
-+ case WRITE_16:
-+ case READ_16:
-+ case WRITE_VERIFY_16:
-+ res = true;
-+ /* See comment in disk_exec */
-+ cmd->inc_expected_sn_on_done = 1;
-+ break;
-+ default:
-+ res = false;
-+ break;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+struct disk_work {
-+ struct scst_cmd *cmd;
-+ struct completion disk_work_cmpl;
-+ int result;
-+ unsigned int left;
-+ uint64_t save_lba;
-+ unsigned int save_len;
-+ struct scatterlist *save_sg;
-+ int save_sg_cnt;
-+};
-+
-+static int disk_cdb_get_transfer_data(const uint8_t *cdb,
-+ uint64_t *out_lba, unsigned int *out_length)
-+{
-+ int res;
-+ uint64_t lba;
-+ unsigned int len;
-+
-+ TRACE_ENTRY();
-+
-+ switch (cdb[0]) {
-+ case WRITE_6:
-+ case READ_6:
-+ lba = be16_to_cpu(get_unaligned((__be16 *)&cdb[2]));
-+ len = cdb[4];
-+ break;
-+ case WRITE_10:
-+ case READ_10:
-+ case WRITE_VERIFY:
-+ lba = be32_to_cpu(get_unaligned((__be32 *)&cdb[2]));
-+ len = be16_to_cpu(get_unaligned((__be16 *)&cdb[7]));
-+ break;
-+ case WRITE_12:
-+ case READ_12:
-+ case WRITE_VERIFY_12:
-+ lba = be32_to_cpu(get_unaligned((__be32 *)&cdb[2]));
-+ len = be32_to_cpu(get_unaligned((__be32 *)&cdb[6]));
-+ break;
-+ case WRITE_16:
-+ case READ_16:
-+ case WRITE_VERIFY_16:
-+ lba = be64_to_cpu(get_unaligned((__be64 *)&cdb[2]));
-+ len = be32_to_cpu(get_unaligned((__be32 *)&cdb[10]));
-+ break;
-+ default:
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = 0;
-+ *out_lba = lba;
-+ *out_length = len;
-+
-+ TRACE_DBG("LBA %lld, length %d", (unsigned long long)lba, len);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int disk_cdb_set_transfer_data(uint8_t *cdb,
-+ uint64_t lba, unsigned int len)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ switch (cdb[0]) {
-+ case WRITE_6:
-+ case READ_6:
-+ put_unaligned(cpu_to_be16(lba), (__be16 *)&cdb[2]);
-+ cdb[4] = len;
-+ break;
-+ case WRITE_10:
-+ case READ_10:
-+ case WRITE_VERIFY:
-+ put_unaligned(cpu_to_be32(lba), (__be32 *)&cdb[2]);
-+ put_unaligned(cpu_to_be16(len), (__be16 *)&cdb[7]);
-+ break;
-+ case WRITE_12:
-+ case READ_12:
-+ case WRITE_VERIFY_12:
-+ put_unaligned(cpu_to_be32(lba), (__be32 *)&cdb[2]);
-+ put_unaligned(cpu_to_be32(len), (__be32 *)&cdb[6]);
-+ break;
-+ case WRITE_16:
-+ case READ_16:
-+ case WRITE_VERIFY_16:
-+ put_unaligned(cpu_to_be64(lba), (__be64 *)&cdb[2]);
-+ put_unaligned(cpu_to_be32(len), (__be32 *)&cdb[10]);
-+ break;
-+ default:
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = 0;
-+
-+ TRACE_DBG("LBA %lld, length %d", (unsigned long long)lba, len);
-+ TRACE_BUFFER("New CDB", cdb, SCST_MAX_CDB_SIZE);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void disk_restore_sg(struct disk_work *work)
-+{
-+ disk_cdb_set_transfer_data(work->cmd->cdb, work->save_lba, work->save_len);
-+ work->cmd->sg = work->save_sg;
-+ work->cmd->sg_cnt = work->save_sg_cnt;
-+ return;
-+}
-+
-+static void disk_cmd_done(void *data, char *sense, int result, int resid)
-+{
-+ struct disk_work *work = data;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("work %p, cmd %p, left %d, result %d, sense %p, resid %d",
-+ work, work->cmd, work->left, result, sense, resid);
-+
-+ if (result == SAM_STAT_GOOD)
-+ goto out_complete;
-+
-+ work->result = result;
-+
-+ disk_restore_sg(work);
-+
-+ scst_pass_through_cmd_done(work->cmd, sense, result, resid + work->left);
-+
-+out_complete:
-+ complete_all(&work->disk_work_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Executes command and split CDB, if necessary */
-+static int disk_exec(struct scst_cmd *cmd)
-+{
-+ int res, rc;
-+ struct disk_params *params = (struct disk_params *)cmd->dev->dh_priv;
-+ struct disk_work work;
-+ unsigned int offset, cur_len; /* in blocks */
-+ struct scatterlist *sg, *start_sg;
-+ int cur_sg_cnt;
-+ int sg_tablesize = cmd->dev->scsi_dev->host->sg_tablesize;
-+ int max_sectors;
-+ int num, j;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * For PC requests we are going to submit max_hw_sectors used instead
-+ * of max_sectors.
-+ */
-+ max_sectors = queue_max_hw_sectors(cmd->dev->scsi_dev->request_queue);
-+
-+ if (unlikely(((max_sectors << params->block_shift) & ~PAGE_MASK) != 0)) {
-+ int mlen = max_sectors << params->block_shift;
-+ int pg = ((mlen >> PAGE_SHIFT) + ((mlen & ~PAGE_MASK) != 0)) - 1;
-+ int adj_len = pg << PAGE_SHIFT;
-+ max_sectors = adj_len >> params->block_shift;
-+ if (max_sectors == 0) {
-+ PRINT_ERROR("Too low max sectors %d", max_sectors);
-+ goto out_error;
-+ }
-+ }
-+
-+ if (unlikely((cmd->bufflen >> params->block_shift) > max_sectors)) {
-+ if ((cmd->out_bufflen >> params->block_shift) > max_sectors) {
-+ PRINT_ERROR("Too limited max_sectors %d for "
-+ "bidirectional cmd %x (out_bufflen %d)",
-+ max_sectors, cmd->cdb[0], cmd->out_bufflen);
-+ /* Let lower level handle it */
-+ res = SCST_EXEC_NOT_COMPLETED;
-+ goto out;
-+ }
-+ goto split;
-+ }
-+
-+ if (likely(cmd->sg_cnt <= sg_tablesize)) {
-+ res = SCST_EXEC_NOT_COMPLETED;
-+ goto out;
-+ }
-+
-+split:
-+ BUG_ON(cmd->out_sg_cnt > sg_tablesize);
-+ BUG_ON((cmd->out_bufflen >> params->block_shift) > max_sectors);
-+
-+ /*
-+ * We don't support changing BIDI CDBs (see disk_on_sg_tablesize_low()),
-+ * so use only sg_cnt
-+ */
-+
-+ memset(&work, 0, sizeof(work));
-+ work.cmd = cmd;
-+ work.save_sg = cmd->sg;
-+ work.save_sg_cnt = cmd->sg_cnt;
-+ rc = disk_cdb_get_transfer_data(cmd->cdb, &work.save_lba,
-+ &work.save_len);
-+ if (rc != 0)
-+ goto out_error;
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+
-+ TRACE_DBG("cmd %p, save_sg %p, save_sg_cnt %d, save_lba %lld, "
-+ "save_len %d (sg_tablesize %d, max_sectors %d, block_shift %d, "
-+ "sizeof(*sg) 0x%zx)", cmd, work.save_sg, work.save_sg_cnt,
-+ (unsigned long long)work.save_lba, work.save_len,
-+ sg_tablesize, max_sectors, params->block_shift, sizeof(*sg));
-+
-+ /*
-+ * If we submit all chunks async'ly, it will be very not trivial what
-+ * to do if several of them finish with sense or residual. So, let's
-+ * do it synchronously.
-+ */
-+
-+ num = 1;
-+ j = 0;
-+ offset = 0;
-+ cur_len = 0;
-+ sg = work.save_sg;
-+ start_sg = sg;
-+ cur_sg_cnt = 0;
-+ while (1) {
-+ unsigned int l;
-+
-+ if (unlikely(sg_is_chain(&sg[j]))) {
-+ bool reset_start_sg = (start_sg == &sg[j]);
-+ sg = sg_chain_ptr(&sg[j]);
-+ j = 0;
-+ if (reset_start_sg)
-+ start_sg = sg;
-+ }
-+
-+ l = sg[j].length >> params->block_shift;
-+ cur_len += l;
-+ cur_sg_cnt++;
-+
-+ TRACE_DBG("l %d, j %d, num %d, offset %d, cur_len %d, "
-+ "cur_sg_cnt %d, start_sg %p", l, j, num, offset,
-+ cur_len, cur_sg_cnt, start_sg);
-+
-+ if (((num % sg_tablesize) == 0) ||
-+ (num == work.save_sg_cnt) ||
-+ (cur_len >= max_sectors)) {
-+ TRACE_DBG("%s", "Execing...");
-+
-+ disk_cdb_set_transfer_data(cmd->cdb,
-+ work.save_lba + offset, cur_len);
-+ cmd->sg = start_sg;
-+ cmd->sg_cnt = cur_sg_cnt;
-+
-+ work.left = work.save_len - (offset + cur_len);
-+ init_completion(&work.disk_work_cmpl);
-+
-+ rc = scst_scsi_exec_async(cmd, &work, disk_cmd_done);
-+ if (unlikely(rc != 0)) {
-+ PRINT_ERROR("scst_scsi_exec_async() failed: %d",
-+ rc);
-+ goto out_err_restore;
-+ }
-+
-+ wait_for_completion(&work.disk_work_cmpl);
-+
-+ if (work.result != SAM_STAT_GOOD) {
-+ /* cmd can be already dead */
-+ res = SCST_EXEC_COMPLETED;
-+ goto out;
-+ }
-+
-+ offset += cur_len;
-+ cur_len = 0;
-+ cur_sg_cnt = 0;
-+ start_sg = &sg[j+1];
-+
-+ if (num == work.save_sg_cnt)
-+ break;
-+ }
-+ num++;
-+ j++;
-+ }
-+
-+ cmd->completed = 1;
-+
-+out_restore:
-+ disk_restore_sg(&work);
-+
-+out_done:
-+ res = SCST_EXEC_COMPLETED;
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err_restore:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_restore;
-+
-+out_error:
-+ scst_set_cmd_error(cmd, SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out_done;
-+}
-+
-+static int disk_perf_exec(struct scst_cmd *cmd)
-+{
-+ int res, rc;
-+ int opcode = cmd->cdb[0];
-+
-+ TRACE_ENTRY();
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+
-+ switch (opcode) {
-+ case WRITE_6:
-+ case WRITE_10:
-+ case WRITE_12:
-+ case WRITE_16:
-+ case READ_6:
-+ case READ_10:
-+ case READ_12:
-+ case READ_16:
-+ case WRITE_VERIFY:
-+ case WRITE_VERIFY_12:
-+ case WRITE_VERIFY_16:
-+ goto out_complete;
-+ }
-+
-+ res = SCST_EXEC_NOT_COMPLETED;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_complete:
-+ cmd->completed = 1;
-+
-+out_done:
-+ res = SCST_EXEC_COMPLETED;
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ goto out;
-+}
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("SCSI disk (type 0) dev handler for SCST");
-+MODULE_VERSION(SCST_VERSION_STRING);
-+
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/scst_modisk.c linux-3.2/drivers/scst/dev_handlers/scst_modisk.c
---- orig/linux-3.2/drivers/scst/dev_handlers/scst_modisk.c
-+++ linux-3.2/drivers/scst/dev_handlers/scst_modisk.c
-@@ -0,0 +1,350 @@
-+/*
-+ * scst_modisk.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * SCSI MO disk (type 7) dev handler
-+ * &
-+ * SCSI MO disk (type 7) "performance" device handler (skip all READ and WRITE
-+ * operations).
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <scsi/scsi_host.h>
-+#include <linux/slab.h>
-+
-+#define LOG_PREFIX "dev_modisk"
-+
-+#include <scst/scst.h>
-+#include "scst_dev_handler.h"
-+
-+# define MODISK_NAME "dev_modisk"
-+# define MODISK_PERF_NAME "dev_modisk_perf"
-+
-+#define MODISK_DEF_BLOCK_SHIFT 10
-+
-+struct modisk_params {
-+ int block_shift;
-+};
-+
-+static int modisk_attach(struct scst_device *);
-+static void modisk_detach(struct scst_device *);
-+static int modisk_parse(struct scst_cmd *);
-+static int modisk_done(struct scst_cmd *);
-+static int modisk_perf_exec(struct scst_cmd *);
-+
-+static struct scst_dev_type modisk_devtype = {
-+ .name = MODISK_NAME,
-+ .type = TYPE_MOD,
-+ .threads_num = 1,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = modisk_attach,
-+ .detach = modisk_detach,
-+ .parse = modisk_parse,
-+ .dev_done = modisk_done,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static struct scst_dev_type modisk_devtype_perf = {
-+ .name = MODISK_PERF_NAME,
-+ .type = TYPE_MOD,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = modisk_attach,
-+ .detach = modisk_detach,
-+ .parse = modisk_parse,
-+ .dev_done = modisk_done,
-+ .exec = modisk_perf_exec,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static int __init init_scst_modisk_driver(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ modisk_devtype.module = THIS_MODULE;
-+
-+ res = scst_register_dev_driver(&modisk_devtype);
-+ if (res < 0)
-+ goto out;
-+
-+ modisk_devtype_perf.module = THIS_MODULE;
-+
-+ res = scst_register_dev_driver(&modisk_devtype_perf);
-+ if (res < 0)
-+ goto out_unreg;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unreg:
-+ scst_unregister_dev_driver(&modisk_devtype);
-+ goto out;
-+}
-+
-+static void __exit exit_scst_modisk_driver(void)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_unregister_dev_driver(&modisk_devtype_perf);
-+ scst_unregister_dev_driver(&modisk_devtype);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(init_scst_modisk_driver);
-+module_exit(exit_scst_modisk_driver);
-+
-+static int modisk_attach(struct scst_device *dev)
-+{
-+ int res, rc;
-+ uint8_t cmd[10];
-+ const int buffer_size = 512;
-+ uint8_t *buffer = NULL;
-+ int retries;
-+ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
-+ enum dma_data_direction data_dir;
-+ struct modisk_params *params;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->scsi_dev == NULL ||
-+ dev->scsi_dev->type != dev->type) {
-+ PRINT_ERROR("%s", "SCSI device not define or illegal type");
-+ res = -ENODEV;
-+ goto out;
-+ }
-+
-+ params = kzalloc(sizeof(*params), GFP_KERNEL);
-+ if (params == NULL) {
-+ PRINT_ERROR("Unable to allocate struct modisk_params (size %zd)",
-+ sizeof(*params));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ params->block_shift = MODISK_DEF_BLOCK_SHIFT;
-+
-+ /*
-+ * If the device is offline, don't try to read capacity or any
-+ * of the other stuff
-+ */
-+ if (dev->scsi_dev->sdev_state == SDEV_OFFLINE) {
-+ TRACE_DBG("%s", "Device is offline");
-+ res = -ENODEV;
-+ goto out_free_params;
-+ }
-+
-+ buffer = kmalloc(buffer_size, GFP_KERNEL);
-+ if (!buffer) {
-+ PRINT_ERROR("Buffer memory allocation (size %d) failure",
-+ buffer_size);
-+ res = -ENOMEM;
-+ goto out_free_params;
-+ }
-+
-+ /*
-+ * Clear any existing UA's and get modisk capacity (modisk block
-+ * size).
-+ */
-+ memset(cmd, 0, sizeof(cmd));
-+ cmd[0] = READ_CAPACITY;
-+ cmd[1] = (dev->scsi_dev->scsi_level <= SCSI_2) ?
-+ ((dev->scsi_dev->lun << 5) & 0xe0) : 0;
-+ retries = SCST_DEV_UA_RETRIES;
-+ while (1) {
-+ memset(buffer, 0, buffer_size);
-+ memset(sense_buffer, 0, sizeof(sense_buffer));
-+ data_dir = SCST_DATA_READ;
-+
-+ TRACE_DBG("%s", "Doing READ_CAPACITY");
-+ rc = scsi_execute(dev->scsi_dev, cmd, data_dir, buffer,
-+ buffer_size, sense_buffer,
-+ SCST_GENERIC_MODISK_REG_TIMEOUT, 3, 0
-+ , NULL
-+ );
-+
-+ TRACE_DBG("READ_CAPACITY done: %x", rc);
-+
-+ if (!rc || !scst_analyze_sense(sense_buffer,
-+ sizeof(sense_buffer), SCST_SENSE_KEY_VALID,
-+ UNIT_ATTENTION, 0, 0))
-+ break;
-+
-+ if (!--retries) {
-+ PRINT_ERROR("UA not cleared after %d retries",
-+ SCST_DEV_UA_RETRIES);
-+ res = -ENODEV;
-+ goto out_free_buf;
-+ }
-+ }
-+
-+ if (rc == 0) {
-+ int sector_size = ((buffer[4] << 24) | (buffer[5] << 16) |
-+ (buffer[6] << 8) | (buffer[7] << 0));
-+ if (sector_size == 0)
-+ params->block_shift = MODISK_DEF_BLOCK_SHIFT;
-+ else
-+ params->block_shift =
-+ scst_calc_block_shift(sector_size);
-+ TRACE_DBG("Sector size is %i scsi_level %d(SCSI_2 %d)",
-+ sector_size, dev->scsi_dev->scsi_level, SCSI_2);
-+ } else {
-+ params->block_shift = MODISK_DEF_BLOCK_SHIFT;
-+ TRACE(TRACE_MINOR, "Read capacity failed: %x, using default "
-+ "sector size %d", rc, params->block_shift);
-+ PRINT_BUFF_FLAG(TRACE_MINOR, "Returned sense", sense_buffer,
-+ sizeof(sense_buffer));
-+ }
-+
-+ res = scst_obtain_device_parameters(dev);
-+ if (res != 0) {
-+ PRINT_ERROR("Failed to obtain control parameters for device "
-+ "%s: %x", dev->virt_name, res);
-+ goto out_free_buf;
-+ }
-+
-+out_free_buf:
-+ kfree(buffer);
-+
-+out_free_params:
-+ if (res == 0)
-+ dev->dh_priv = params;
-+ else
-+ kfree(params);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void modisk_detach(struct scst_device *dev)
-+{
-+ struct modisk_params *params =
-+ (struct modisk_params *)dev->dh_priv;
-+
-+ TRACE_ENTRY();
-+
-+ kfree(params);
-+ dev->dh_priv = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int modisk_get_block_shift(struct scst_cmd *cmd)
-+{
-+ struct modisk_params *params =
-+ (struct modisk_params *)cmd->dev->dh_priv;
-+ /*
-+ * No need for locks here, since *_detach() can not be
-+ * called, when there are existing commands.
-+ */
-+ return params->block_shift;
-+}
-+
-+static int modisk_parse(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ scst_modisk_generic_parse(cmd, modisk_get_block_shift);
-+
-+ cmd->retries = SCST_PASSTHROUGH_RETRIES;
-+
-+ return res;
-+}
-+
-+static void modisk_set_block_shift(struct scst_cmd *cmd, int block_shift)
-+{
-+ struct modisk_params *params =
-+ (struct modisk_params *)cmd->dev->dh_priv;
-+ /*
-+ * No need for locks here, since *_detach() can not be
-+ * called, when there are existing commands.
-+ */
-+ if (block_shift != 0)
-+ params->block_shift = block_shift;
-+ else
-+ params->block_shift = MODISK_DEF_BLOCK_SHIFT;
-+ return;
-+}
-+
-+static int modisk_done(struct scst_cmd *cmd)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = scst_block_generic_dev_done(cmd, modisk_set_block_shift);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int modisk_perf_exec(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_NOT_COMPLETED, rc;
-+ int opcode = cmd->cdb[0];
-+
-+ TRACE_ENTRY();
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+
-+ switch (opcode) {
-+ case WRITE_6:
-+ case WRITE_10:
-+ case WRITE_12:
-+ case WRITE_16:
-+ case READ_6:
-+ case READ_10:
-+ case READ_12:
-+ case READ_16:
-+ cmd->completed = 1;
-+ goto out_done;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_done:
-+ res = SCST_EXEC_COMPLETED;
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ goto out;
-+}
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("SCSI MO disk (type 7) dev handler for SCST");
-+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/scst_processor.c linux-3.2/drivers/scst/dev_handlers/scst_processor.c
---- orig/linux-3.2/drivers/scst/dev_handlers/scst_processor.c
-+++ linux-3.2/drivers/scst/dev_handlers/scst_processor.c
-@@ -0,0 +1,183 @@
-+/*
-+ * scst_processor.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * SCSI medium processor (type 3) dev handler
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <scsi/scsi_host.h>
-+#include <linux/slab.h>
-+
-+#define LOG_PREFIX "dev_processor"
-+
-+#include <scst/scst.h>
-+#include "scst_dev_handler.h"
-+
-+#define PROCESSOR_NAME "dev_processor"
-+
-+#define PROCESSOR_RETRIES 2
-+
-+static int processor_attach(struct scst_device *);
-+/*static void processor_detach(struct scst_device *);*/
-+static int processor_parse(struct scst_cmd *);
-+/*static int processor_done(struct scst_cmd *);*/
-+
-+static struct scst_dev_type processor_devtype = {
-+ .name = PROCESSOR_NAME,
-+ .type = TYPE_PROCESSOR,
-+ .threads_num = 1,
-+ .parse_atomic = 1,
-+/* .dev_done_atomic = 1,*/
-+ .attach = processor_attach,
-+/* .detach = processor_detach,*/
-+ .parse = processor_parse,
-+/* .dev_done = processor_done*/
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static int processor_attach(struct scst_device *dev)
-+{
-+ int res, rc;
-+ int retries;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->scsi_dev == NULL ||
-+ dev->scsi_dev->type != dev->type) {
-+ PRINT_ERROR("%s", "SCSI device not define or illegal type");
-+ res = -ENODEV;
-+ goto out;
-+ }
-+
-+ /*
-+ * If the device is offline, don't try to read capacity or any
-+ * of the other stuff
-+ */
-+ if (dev->scsi_dev->sdev_state == SDEV_OFFLINE) {
-+ TRACE_DBG("%s", "Device is offline");
-+ res = -ENODEV;
-+ goto out;
-+ }
-+
-+ retries = SCST_DEV_UA_RETRIES;
-+ do {
-+ TRACE_DBG("%s", "Doing TEST_UNIT_READY");
-+ rc = scsi_test_unit_ready(dev->scsi_dev,
-+ SCST_GENERIC_PROCESSOR_TIMEOUT, PROCESSOR_RETRIES
-+ , NULL);
-+ TRACE_DBG("TEST_UNIT_READY done: %x", rc);
-+ } while ((--retries > 0) && rc);
-+
-+ if (rc) {
-+ PRINT_WARNING("Unit not ready: %x", rc);
-+ /* Let's try not to be too smart and continue processing */
-+ }
-+
-+ res = scst_obtain_device_parameters(dev);
-+ if (res != 0) {
-+ PRINT_ERROR("Failed to obtain control parameters for device "
-+ "%s", dev->virt_name);
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+#if 0
-+void processor_detach(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+#endif
-+
-+static int processor_parse(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ scst_processor_generic_parse(cmd, NULL);
-+
-+ cmd->retries = SCST_PASSTHROUGH_RETRIES;
-+
-+ return res;
-+}
-+
-+#if 0
-+int processor_done(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * SCST sets good defaults for cmd->is_send_status and
-+ * cmd->resp_data_len based on cmd->status and cmd->data_direction,
-+ * therefore change them only if necessary.
-+ */
-+
-+#if 0
-+ switch (cmd->cdb[0]) {
-+ default:
-+ /* It's all good */
-+ break;
-+ }
-+#endif
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+#endif
-+
-+static int __init processor_init(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ processor_devtype.module = THIS_MODULE;
-+
-+ res = scst_register_dev_driver(&processor_devtype);
-+ if (res < 0)
-+ goto out;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void __exit processor_exit(void)
-+{
-+ TRACE_ENTRY();
-+ scst_unregister_dev_driver(&processor_devtype);
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(processor_init);
-+module_exit(processor_exit);
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("SCSI medium processor (type 3) dev handler for SCST");
-+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/scst_raid.c linux-3.2/drivers/scst/dev_handlers/scst_raid.c
---- orig/linux-3.2/drivers/scst/dev_handlers/scst_raid.c
-+++ linux-3.2/drivers/scst/dev_handlers/scst_raid.c
-@@ -0,0 +1,184 @@
-+/*
-+ * scst_raid.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * SCSI raid(controller) (type 0xC) dev handler
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#define LOG_PREFIX "dev_raid"
-+
-+#include <scsi/scsi_host.h>
-+#include <linux/slab.h>
-+
-+#include <scst/scst.h>
-+#include "scst_dev_handler.h"
-+
-+#define RAID_NAME "dev_raid"
-+
-+#define RAID_RETRIES 2
-+
-+static int raid_attach(struct scst_device *);
-+/* static void raid_detach(struct scst_device *); */
-+static int raid_parse(struct scst_cmd *);
-+/* static int raid_done(struct scst_cmd *); */
-+
-+static struct scst_dev_type raid_devtype = {
-+ .name = RAID_NAME,
-+ .type = TYPE_RAID,
-+ .threads_num = 1,
-+ .parse_atomic = 1,
-+/* .dev_done_atomic = 1,*/
-+ .attach = raid_attach,
-+/* .detach = raid_detach,*/
-+ .parse = raid_parse,
-+/* .dev_done = raid_done,*/
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static int raid_attach(struct scst_device *dev)
-+{
-+ int res, rc;
-+ int retries;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->scsi_dev == NULL ||
-+ dev->scsi_dev->type != dev->type) {
-+ PRINT_ERROR("%s", "SCSI device not define or illegal type");
-+ res = -ENODEV;
-+ goto out;
-+ }
-+
-+ /*
-+ * If the device is offline, don't try to read capacity or any
-+ * of the other stuff
-+ */
-+ if (dev->scsi_dev->sdev_state == SDEV_OFFLINE) {
-+ TRACE_DBG("%s", "Device is offline");
-+ res = -ENODEV;
-+ goto out;
-+ }
-+
-+ retries = SCST_DEV_UA_RETRIES;
-+ do {
-+ TRACE_DBG("%s", "Doing TEST_UNIT_READY");
-+ rc = scsi_test_unit_ready(dev->scsi_dev,
-+ SCST_GENERIC_RAID_TIMEOUT, RAID_RETRIES
-+ , NULL);
-+ TRACE_DBG("TEST_UNIT_READY done: %x", rc);
-+ } while ((--retries > 0) && rc);
-+
-+ if (rc) {
-+ PRINT_WARNING("Unit not ready: %x", rc);
-+ /* Let's try not to be too smart and continue processing */
-+ }
-+
-+ res = scst_obtain_device_parameters(dev);
-+ if (res != 0) {
-+ PRINT_ERROR("Failed to obtain control parameters for device "
-+ "%s", dev->virt_name);
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+#if 0
-+void raid_detach(struct scst_device *dev)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+#endif
-+
-+static int raid_parse(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ scst_raid_generic_parse(cmd, NULL);
-+
-+ cmd->retries = SCST_PASSTHROUGH_RETRIES;
-+
-+ return res;
-+}
-+
-+#if 0
-+int raid_done(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * SCST sets good defaults for cmd->is_send_status and
-+ * cmd->resp_data_len based on cmd->status and cmd->data_direction,
-+ * therefore change them only if necessary.
-+ */
-+
-+#if 0
-+ switch (cmd->cdb[0]) {
-+ default:
-+ /* It's all good */
-+ break;
-+ }
-+#endif
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+#endif
-+
-+static int __init raid_init(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ raid_devtype.module = THIS_MODULE;
-+
-+ res = scst_register_dev_driver(&raid_devtype);
-+ if (res < 0)
-+ goto out;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+}
-+
-+static void __exit raid_exit(void)
-+{
-+ TRACE_ENTRY();
-+ scst_unregister_dev_driver(&raid_devtype);
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(raid_init);
-+module_exit(raid_exit);
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("SCSI raid(controller) (type 0xC) dev handler for SCST");
-+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-3.2/drivers/scst/dev_handlers/scst_tape.c linux-3.2/drivers/scst/dev_handlers/scst_tape.c
---- orig/linux-3.2/drivers/scst/dev_handlers/scst_tape.c
-+++ linux-3.2/drivers/scst/dev_handlers/scst_tape.c
-@@ -0,0 +1,383 @@
-+/*
-+ * scst_tape.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * SCSI tape (type 1) dev handler
-+ * &
-+ * SCSI tape (type 1) "performance" device handler (skip all READ and WRITE
-+ * operations).
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <scsi/scsi_host.h>
-+#include <linux/slab.h>
-+
-+#define LOG_PREFIX "dev_tape"
-+
-+#include <scst/scst.h>
-+#include "scst_dev_handler.h"
-+
-+# define TAPE_NAME "dev_tape"
-+# define TAPE_PERF_NAME "dev_tape_perf"
-+
-+#define TAPE_RETRIES 2
-+
-+#define TAPE_DEF_BLOCK_SIZE 512
-+
-+/* The fixed bit in READ/WRITE/VERIFY */
-+#define SILI_BIT 2
-+
-+struct tape_params {
-+ int block_size;
-+};
-+
-+static int tape_attach(struct scst_device *);
-+static void tape_detach(struct scst_device *);
-+static int tape_parse(struct scst_cmd *);
-+static int tape_done(struct scst_cmd *);
-+static int tape_perf_exec(struct scst_cmd *);
-+
-+static struct scst_dev_type tape_devtype = {
-+ .name = TAPE_NAME,
-+ .type = TYPE_TAPE,
-+ .threads_num = 1,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = tape_attach,
-+ .detach = tape_detach,
-+ .parse = tape_parse,
-+ .dev_done = tape_done,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static struct scst_dev_type tape_devtype_perf = {
-+ .name = TAPE_PERF_NAME,
-+ .type = TYPE_TAPE,
-+ .parse_atomic = 1,
-+ .dev_done_atomic = 1,
-+ .attach = tape_attach,
-+ .detach = tape_detach,
-+ .parse = tape_parse,
-+ .dev_done = tape_done,
-+ .exec = tape_perf_exec,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_DEFAULT_DEV_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static int __init init_scst_tape_driver(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ tape_devtype.module = THIS_MODULE;
-+
-+ res = scst_register_dev_driver(&tape_devtype);
-+ if (res < 0)
-+ goto out;
-+
-+ tape_devtype_perf.module = THIS_MODULE;
-+
-+ res = scst_register_dev_driver(&tape_devtype_perf);
-+ if (res < 0)
-+ goto out_unreg;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unreg:
-+ scst_unregister_dev_driver(&tape_devtype);
-+ goto out;
-+}
-+
-+static void __exit exit_scst_tape_driver(void)
-+{
-+ TRACE_ENTRY();
-+
-+ scst_unregister_dev_driver(&tape_devtype_perf);
-+ scst_unregister_dev_driver(&tape_devtype);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(init_scst_tape_driver);
-+module_exit(exit_scst_tape_driver);
-+
-+static int tape_attach(struct scst_device *dev)
-+{
-+ int res, rc;
-+ int retries;
-+ struct scsi_mode_data data;
-+ const int buffer_size = 512;
-+ uint8_t *buffer = NULL;
-+ struct tape_params *params;
-+
-+ TRACE_ENTRY();
-+
-+ if (dev->scsi_dev == NULL ||
-+ dev->scsi_dev->type != dev->type) {
-+ PRINT_ERROR("%s", "SCSI device not define or illegal type");
-+ res = -ENODEV;
-+ goto out;
-+ }
-+
-+ params = kzalloc(sizeof(*params), GFP_KERNEL);
-+ if (params == NULL) {
-+ PRINT_ERROR("Unable to allocate struct tape_params (size %zd)",
-+ sizeof(*params));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ params->block_size = TAPE_DEF_BLOCK_SIZE;
-+
-+ buffer = kmalloc(buffer_size, GFP_KERNEL);
-+ if (!buffer) {
-+ PRINT_ERROR("Buffer memory allocation (size %d) failure",
-+ buffer_size);
-+ res = -ENOMEM;
-+ goto out_free_req;
-+ }
-+
-+ retries = SCST_DEV_UA_RETRIES;
-+ do {
-+ TRACE_DBG("%s", "Doing TEST_UNIT_READY");
-+ rc = scsi_test_unit_ready(dev->scsi_dev,
-+ SCST_GENERIC_TAPE_SMALL_TIMEOUT, TAPE_RETRIES
-+ , NULL);
-+ TRACE_DBG("TEST_UNIT_READY done: %x", rc);
-+ } while ((--retries > 0) && rc);
-+
-+ if (rc) {
-+ PRINT_WARNING("Unit not ready: %x", rc);
-+ /* Let's try not to be too smart and continue processing */
-+ goto obtain;
-+ }
-+
-+ TRACE_DBG("%s", "Doing MODE_SENSE");
-+ rc = scsi_mode_sense(dev->scsi_dev,
-+ ((dev->scsi_dev->scsi_level <= SCSI_2) ?
-+ ((dev->scsi_dev->lun << 5) & 0xe0) : 0),
-+ 0 /* Mode Page 0 */,
-+ buffer, buffer_size,
-+ SCST_GENERIC_TAPE_SMALL_TIMEOUT, TAPE_RETRIES,
-+ &data, NULL);
-+ TRACE_DBG("MODE_SENSE done: %x", rc);
-+
-+ if (rc == 0) {
-+ int medium_type, mode, speed, density;
-+ if (buffer[3] == 8) {
-+ params->block_size = ((buffer[9] << 16) |
-+ (buffer[10] << 8) |
-+ (buffer[11] << 0));
-+ } else
-+ params->block_size = TAPE_DEF_BLOCK_SIZE;
-+ medium_type = buffer[1];
-+ mode = (buffer[2] & 0x70) >> 4;
-+ speed = buffer[2] & 0x0f;
-+ density = buffer[4];
-+ TRACE_DBG("Tape: lun %d. bs %d. type 0x%02x mode 0x%02x "
-+ "speed 0x%02x dens 0x%02x", dev->scsi_dev->lun,
-+ params->block_size, medium_type, mode, speed, density);
-+ } else {
-+ PRINT_ERROR("MODE_SENSE failed: %x", rc);
-+ res = -ENODEV;
-+ goto out_free_buf;
-+ }
-+
-+obtain:
-+ res = scst_obtain_device_parameters(dev);
-+ if (res != 0) {
-+ PRINT_ERROR("Failed to obtain control parameters for device "
-+ "%s", dev->virt_name);
-+ goto out_free_buf;
-+ }
-+
-+out_free_buf:
-+ kfree(buffer);
-+
-+out_free_req:
-+ if (res == 0)
-+ dev->dh_priv = params;
-+ else
-+ kfree(params);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void tape_detach(struct scst_device *dev)
-+{
-+ struct tape_params *params =
-+ (struct tape_params *)dev->dh_priv;
-+
-+ TRACE_ENTRY();
-+
-+ kfree(params);
-+ dev->dh_priv = NULL;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int tape_get_block_size(struct scst_cmd *cmd)
-+{
-+ struct tape_params *params = (struct tape_params *)cmd->dev->dh_priv;
-+ /*
-+ * No need for locks here, since *_detach() can not be called,
-+ * when there are existing commands.
-+ */
-+ return params->block_size;
-+}
-+
-+static int tape_parse(struct scst_cmd *cmd)
-+{
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ scst_tape_generic_parse(cmd, tape_get_block_size);
-+
-+ cmd->retries = SCST_PASSTHROUGH_RETRIES;
-+
-+ return res;
-+}
-+
-+static void tape_set_block_size(struct scst_cmd *cmd, int block_size)
-+{
-+ struct tape_params *params = (struct tape_params *)cmd->dev->dh_priv;
-+ /*
-+ * No need for locks here, since *_detach() can not be called, when
-+ * there are existing commands.
-+ */
-+ params->block_size = block_size;
-+ return;
-+}
-+
-+static int tape_done(struct scst_cmd *cmd)
-+{
-+ int opcode = cmd->cdb[0];
-+ int status = cmd->status;
-+ int res = SCST_CMD_STATE_DEFAULT;
-+
-+ TRACE_ENTRY();
-+
-+ if ((status == SAM_STAT_GOOD) || (status == SAM_STAT_CONDITION_MET))
-+ res = scst_tape_generic_dev_done(cmd, tape_set_block_size);
-+ else if ((status == SAM_STAT_CHECK_CONDITION) &&
-+ SCST_SENSE_VALID(cmd->sense)) {
-+ struct tape_params *params;
-+
-+ TRACE_DBG("Extended sense %x", cmd->sense[0] & 0x7F);
-+
-+ if ((cmd->sense[0] & 0x7F) != 0x70) {
-+ PRINT_ERROR("Sense format 0x%x is not supported",
-+ cmd->sense[0] & 0x7F);
-+ scst_set_cmd_error(cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ goto out;
-+ }
-+
-+ if (opcode == READ_6 && !(cmd->cdb[1] & SILI_BIT) &&
-+ (cmd->sense[2] & 0xe0)) {
-+ /* EOF, EOM, or ILI */
-+ int TransferLength, Residue = 0;
-+ if ((cmd->sense[2] & 0x0f) == BLANK_CHECK)
-+ /* No need for EOM in this case */
-+ cmd->sense[2] &= 0xcf;
-+ TransferLength = ((cmd->cdb[2] << 16) |
-+ (cmd->cdb[3] << 8) | cmd->cdb[4]);
-+ /* Compute the residual count */
-+ if ((cmd->sense[0] & 0x80) != 0) {
-+ Residue = ((cmd->sense[3] << 24) |
-+ (cmd->sense[4] << 16) |
-+ (cmd->sense[5] << 8) |
-+ cmd->sense[6]);
-+ }
-+ TRACE_DBG("Checking the sense key "
-+ "sn[2]=%x cmd->cdb[0,1]=%x,%x TransLen/Resid"
-+ " %d/%d", (int)cmd->sense[2], cmd->cdb[0],
-+ cmd->cdb[1], TransferLength, Residue);
-+ if (TransferLength > Residue) {
-+ int resp_data_len = TransferLength - Residue;
-+ if (cmd->cdb[1] & SCST_TRANSFER_LEN_TYPE_FIXED) {
-+ /*
-+ * No need for locks here, since
-+ * *_detach() can not be called, when
-+ * there are existing commands.
-+ */
-+ params = (struct tape_params *)
-+ cmd->dev->dh_priv;
-+ resp_data_len *= params->block_size;
-+ }
-+ scst_set_resp_data_len(cmd, resp_data_len);
-+ }
-+ }
-+ }
-+
-+out:
-+ TRACE_DBG("cmd->is_send_status=%x, cmd->resp_data_len=%d, "
-+ "res=%d", cmd->is_send_status, cmd->resp_data_len, res);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int tape_perf_exec(struct scst_cmd *cmd)
-+{
-+ int res = SCST_EXEC_NOT_COMPLETED, rc;
-+ int opcode = cmd->cdb[0];
-+
-+ TRACE_ENTRY();
-+
-+ rc = scst_check_local_events(cmd);
-+ if (unlikely(rc != 0))
-+ goto out_done;
-+
-+ cmd->status = 0;
-+ cmd->msg_status = 0;
-+ cmd->host_status = DID_OK;
-+ cmd->driver_status = 0;
-+
-+ switch (opcode) {
-+ case WRITE_6:
-+ case READ_6:
-+ cmd->completed = 1;
-+ goto out_done;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_done:
-+ res = SCST_EXEC_COMPLETED;
-+ cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME);
-+ goto out;
-+}
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin & Leonid Stoljar");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("SCSI tape (type 1) dev handler for SCST");
-+MODULE_VERSION(SCST_VERSION_STRING);
-diff -uprN orig/linux-3.2/drivers/scst/fcst/Makefile linux-3.2/drivers/scst/fcst/Makefile
---- orig/linux-3.2/drivers/scst/fcst/Makefile
-+++ linux-3.2/drivers/scst/fcst/Makefile
-@@ -0,0 +1,7 @@
-+obj-$(CONFIG_FCST) += fcst.o
-+
-+fcst-objs := \
-+ ft_cmd.o \
-+ ft_io.o \
-+ ft_scst.o \
-+ ft_sess.o
-diff -uprN orig/linux-3.2/drivers/scst/fcst/Kconfig linux-3.2/drivers/scst/fcst/Kconfig
---- orig/linux-3.2/drivers/scst/fcst/Kconfig
-+++ linux-3.2/drivers/scst/fcst/Kconfig
-@@ -0,0 +1,5 @@
-+config FCST
-+ tristate "SCST target module for Fibre Channel using libfc"
-+ depends on LIBFC && SCST
-+ ---help---
-+ Supports using libfc HBAs as target adapters with SCST
-diff -uprN orig/linux-3.2/drivers/scst/fcst/fcst.h linux-3.2/drivers/scst/fcst/fcst.h
---- orig/linux-3.2/drivers/scst/fcst/fcst.h
-+++ linux-3.2/drivers/scst/fcst/fcst.h
-@@ -0,0 +1,151 @@
-+/*
-+ * Copyright (c) 2010 Cisco Systems, Inc.
-+ *
-+ * This program is free software; you may redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; version 2 of the License.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ * $Id$
-+ */
-+#ifndef __SCSI_FCST_H__
-+#define __SCSI_FCST_H__
-+
-+#include <scst/scst.h>
-+
-+#define FT_VERSION "0.3"
-+#define FT_MODULE "fcst"
-+
-+#define FT_MAX_HW_PENDING_TIME 20 /* max I/O time in seconds */
-+
-+/*
-+ * Debug options.
-+ */
-+#define FT_DEBUG_CONF 0x01 /* configuration messages */
-+#define FT_DEBUG_SESS 0x02 /* session messages */
-+#define FT_DEBUG_IO 0x04 /* I/O operations */
-+
-+extern unsigned int ft_debug_logging; /* debug options */
-+
-+#define FT_ERR(fmt, args...) \
-+ printk(KERN_ERR FT_MODULE ": %s: " fmt, __func__, ##args)
-+
-+#define FT_DEBUG(mask, fmt, args...) \
-+ do { \
-+ if (ft_debug_logging & (mask)) \
-+ printk(KERN_INFO FT_MODULE ": %s: " fmt, \
-+ __func__, ##args); \
-+ } while (0)
-+
-+#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
-+#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
-+#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
-+
-+#define FT_NAMELEN 32 /* length of ASCI WWPNs including pad */
-+
-+/*
-+ * Session (remote port).
-+ */
-+struct ft_sess {
-+ u32 port_id; /* for hash lookup use only */
-+ u32 params;
-+ u16 max_payload; /* max transmitted payload size */
-+ u32 max_lso_payload; /* max offloaded payload size */
-+ u64 port_name; /* port name for transport ID */
-+ struct ft_tport *tport;
-+ struct hlist_node hash; /* linkage in ft_sess_hash table */
-+ struct rcu_head rcu;
-+ struct kref kref; /* ref for hash and outstanding I/Os */
-+ struct scst_session *scst_sess;
-+};
-+
-+/*
-+ * Hash table of sessions per local port.
-+ * Hash lookup by remote port FC_ID.
-+ */
-+#define FT_SESS_HASH_BITS 6
-+#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS)
-+
-+/*
-+ * Per local port data.
-+ * This is created when the first session logs into the local port.
-+ * Deleted when tpg is deleted or last session is logged off.
-+ */
-+struct ft_tport {
-+ u32 sess_count; /* number of sessions in hash */
-+ u8 enabled:1;
-+ struct rcu_head rcu;
-+ struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */
-+ struct fc_lport *lport;
-+ struct scst_tgt *tgt;
-+};
-+
-+/*
-+ * Commands
-+ */
-+struct ft_cmd {
-+ int serial; /* order received, for debugging */
-+ struct fc_seq *seq; /* sequence in exchange mgr */
-+ struct fc_frame *req_frame; /* original request frame */
-+ u32 write_data_len; /* data received from initiator */
-+ u32 read_data_len; /* data sent to initiator */
-+ u32 xfer_rdy_len; /* max xfer ready offset */
-+ u32 max_lso_payload; /* max offloaded (LSO) data payload */
-+ u16 max_payload; /* max transmitted data payload */
-+ struct scst_cmd *scst_cmd;
-+};
-+
-+extern struct list_head ft_lport_list;
-+extern struct mutex ft_lport_lock;
-+extern struct scst_tgt_template ft_scst_template;
-+
-+/*
-+ * libfc interface.
-+ */
-+int ft_prli(struct fc_rport_priv *, u32 spp_len,
-+ const struct fc_els_spp *, struct fc_els_spp *);
-+void ft_prlo(struct fc_rport_priv *);
-+void ft_recv(struct fc_lport *, struct fc_frame *);
-+
-+/*
-+ * SCST interface.
-+ */
-+int ft_send_response(struct scst_cmd *);
-+int ft_send_xfer_rdy(struct scst_cmd *);
-+void ft_cmd_timeout(struct scst_cmd *);
-+void ft_cmd_free(struct scst_cmd *);
-+void ft_cmd_tm_done(struct scst_mgmt_cmd *);
-+int ft_tgt_detect(struct scst_tgt_template *);
-+int ft_tgt_release(struct scst_tgt *);
-+int ft_tgt_enable(struct scst_tgt *, bool);
-+bool ft_tgt_enabled(struct scst_tgt *);
-+int ft_report_aen(struct scst_aen *);
-+int ft_get_transport_id(struct scst_tgt *, struct scst_session *, uint8_t **);
-+
-+/*
-+ * Session interface.
-+ */
-+int ft_lport_notify(struct notifier_block *, unsigned long, void *);
-+void ft_lport_add(struct fc_lport *, void *);
-+void ft_lport_del(struct fc_lport *, void *);
-+
-+/*
-+ * other internal functions.
-+ */
-+int ft_thread(void *);
-+void ft_recv_req(struct ft_sess *, struct fc_frame *);
-+void ft_recv_write_data(struct scst_cmd *, struct fc_frame *);
-+int ft_send_read_data(struct scst_cmd *);
-+struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
-+struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
-+void ft_cmd_dump(struct scst_cmd *, const char *);
-+
-+#endif /* __SCSI_FCST_H__ */
-diff -uprN orig/linux-3.2/drivers/scst/fcst/ft_cmd.c linux-3.2/drivers/scst/fcst/ft_cmd.c
---- orig/linux-3.2/drivers/scst/fcst/ft_cmd.c
-+++ linux-3.2/drivers/scst/fcst/ft_cmd.c
-@@ -0,0 +1,685 @@
-+/*
-+ * Copyright (c) 2010 Cisco Systems, Inc.
-+ *
-+ * This program is free software; you may redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; version 2 of the License.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ */
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <scsi/libfc.h>
-+#include <scsi/fc_encode.h>
-+#include "fcst.h"
-+
-+/*
-+ * Append string to buffer safely.
-+ * Also prepends a space if there's already something the buf.
-+ */
-+static void ft_cmd_flag(char *buf, size_t len, const char *desc)
-+{
-+ if (buf[0])
-+ strlcat(buf, " ", len);
-+ strlcat(buf, desc, len);
-+}
-+
-+/*
-+ * Debug: dump command.
-+ */
-+void ft_cmd_dump(struct scst_cmd *cmd, const char *caller)
-+{
-+ static atomic_t serial;
-+ struct ft_cmd *fcmd;
-+ struct fc_frame_header *fh;
-+ char prefix[30];
-+ char buf[150];
-+
-+ if (!(ft_debug_logging & FT_DEBUG_IO))
-+ return;
-+
-+ fcmd = scst_cmd_get_tgt_priv(cmd);
-+ fh = fc_frame_header_get(fcmd->req_frame);
-+ snprintf(prefix, sizeof(prefix), FT_MODULE ": cmd %2x",
-+ atomic_inc_return(&serial) & 0xff);
-+
-+ printk(KERN_INFO "%s %s oid %x oxid %x resp_len %u\n",
-+ prefix, caller, ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id),
-+ scst_cmd_get_resp_data_len(cmd));
-+ printk(KERN_INFO "%s scst_cmd %p wlen %u rlen %u\n",
-+ prefix, cmd, fcmd->write_data_len, fcmd->read_data_len);
-+ printk(KERN_INFO "%s exp_dir %x exp_xfer_len %d exp_in_len %d\n",
-+ prefix, cmd->expected_data_direction,
-+ cmd->expected_transfer_len, cmd->expected_out_transfer_len);
-+ printk(KERN_INFO "%s dir %x data_len %d bufflen %d out_bufflen %d\n",
-+ prefix, cmd->data_direction, cmd->data_len,
-+ cmd->bufflen, cmd->out_bufflen);
-+ printk(KERN_INFO "%s sg_cnt reg %d in %d tgt %d tgt_in %d\n",
-+ prefix, cmd->sg_cnt, cmd->out_sg_cnt,
-+ cmd->tgt_sg_cnt, cmd->tgt_out_sg_cnt);
-+
-+ buf[0] = '\0';
-+ if (cmd->sent_for_exec)
-+ ft_cmd_flag(buf, sizeof(buf), "sent");
-+ if (cmd->completed)
-+ ft_cmd_flag(buf, sizeof(buf), "comp");
-+ if (cmd->ua_ignore)
-+ ft_cmd_flag(buf, sizeof(buf), "ua_ign");
-+ if (cmd->atomic)
-+ ft_cmd_flag(buf, sizeof(buf), "atom");
-+ if (cmd->double_ua_possible)
-+ ft_cmd_flag(buf, sizeof(buf), "dbl_ua_poss");
-+ if (cmd->is_send_status)
-+ ft_cmd_flag(buf, sizeof(buf), "send_stat");
-+ if (cmd->retry)
-+ ft_cmd_flag(buf, sizeof(buf), "retry");
-+ if (cmd->internal)
-+ ft_cmd_flag(buf, sizeof(buf), "internal");
-+ if (cmd->unblock_dev)
-+ ft_cmd_flag(buf, sizeof(buf), "unblock_dev");
-+ if (cmd->cmd_hw_pending)
-+ ft_cmd_flag(buf, sizeof(buf), "hw_pend");
-+ if (cmd->tgt_need_alloc_data_buf)
-+ ft_cmd_flag(buf, sizeof(buf), "tgt_need_alloc");
-+ if (cmd->tgt_data_buf_alloced)
-+ ft_cmd_flag(buf, sizeof(buf), "tgt_alloced");
-+ if (cmd->dh_data_buf_alloced)
-+ ft_cmd_flag(buf, sizeof(buf), "dh_alloced");
-+ if (cmd->expected_values_set)
-+ ft_cmd_flag(buf, sizeof(buf), "exp_val");
-+ if (cmd->sg_buff_modified)
-+ ft_cmd_flag(buf, sizeof(buf), "sg_buf_mod");
-+ if (cmd->preprocessing_only)
-+ ft_cmd_flag(buf, sizeof(buf), "pre_only");
-+ if (cmd->sn_set)
-+ ft_cmd_flag(buf, sizeof(buf), "sn_set");
-+ if (cmd->hq_cmd_inced)
-+ ft_cmd_flag(buf, sizeof(buf), "hq_cmd_inc");
-+ if (cmd->set_sn_on_restart_cmd)
-+ ft_cmd_flag(buf, sizeof(buf), "set_sn_on_restart");
-+ if (cmd->no_sgv)
-+ ft_cmd_flag(buf, sizeof(buf), "no_sgv");
-+ if (cmd->may_need_dma_sync)
-+ ft_cmd_flag(buf, sizeof(buf), "dma_sync");
-+ if (cmd->out_of_sn)
-+ ft_cmd_flag(buf, sizeof(buf), "oo_sn");
-+ if (cmd->inc_expected_sn_on_done)
-+ ft_cmd_flag(buf, sizeof(buf), "inc_sn_exp");
-+ if (cmd->done)
-+ ft_cmd_flag(buf, sizeof(buf), "done");
-+ if (cmd->finished)
-+ ft_cmd_flag(buf, sizeof(buf), "fin");
-+
-+ printk(KERN_INFO "%s flags %s\n", prefix, buf);
-+ printk(KERN_INFO "%s lun %lld sn %d tag %lld cmd_flags %lx\n",
-+ prefix, cmd->lun, cmd->sn, cmd->tag, cmd->cmd_flags);
-+ printk(KERN_INFO "%s tgt_sn %d op_flags %x op %s\n",
-+ prefix, cmd->tgt_sn, cmd->op_flags, cmd->op_name);
-+ printk(KERN_INFO "%s status %x msg_status %x "
-+ "host_status %x driver_status %x\n",
-+ prefix, cmd->status, cmd->msg_status,
-+ cmd->host_status, cmd->driver_status);
-+ printk(KERN_INFO "%s cdb_len %d\n", prefix, cmd->cdb_len);
-+ snprintf(buf, sizeof(buf), "%s cdb ", prefix);
-+ print_hex_dump(KERN_INFO, buf, DUMP_PREFIX_NONE,
-+ 16, 4, cmd->cdb, SCST_MAX_CDB_SIZE, 0);
-+}
-+
-+/*
-+ * Debug: dump mgmt command.
-+ */
-+static void ft_cmd_tm_dump(struct scst_mgmt_cmd *mcmd, const char *caller)
-+{
-+ struct ft_cmd *fcmd;
-+ struct fc_frame_header *fh;
-+ char prefix[30];
-+ char buf[150];
-+
-+ if (!(ft_debug_logging & FT_DEBUG_IO))
-+ return;
-+ fcmd = scst_mgmt_cmd_get_tgt_priv(mcmd);
-+ fh = fc_frame_header_get(fcmd->req_frame);
-+
-+ snprintf(prefix, sizeof(prefix), FT_MODULE ": mcmd");
-+
-+ printk(KERN_INFO "%s %s oid %x oxid %x lun %lld\n",
-+ prefix, caller, ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id),
-+ (unsigned long long)mcmd->lun);
-+ printk(KERN_INFO "%s state %d fn %d fin_wait %d done_wait %d comp %d\n",
-+ prefix, mcmd->state, mcmd->fn,
-+ mcmd->cmd_finish_wait_count, mcmd->cmd_done_wait_count,
-+ mcmd->completed_cmd_count);
-+ buf[0] = '\0';
-+ if (mcmd->needs_unblocking)
-+ ft_cmd_flag(buf, sizeof(buf), "needs_unblock");
-+ if (mcmd->lun_set)
-+ ft_cmd_flag(buf, sizeof(buf), "lun_set");
-+ if (mcmd->cmd_sn_set)
-+ ft_cmd_flag(buf, sizeof(buf), "cmd_sn_set");
-+ printk(KERN_INFO "%s flags %s\n", prefix, buf);
-+ if (mcmd->cmd_to_abort)
-+ ft_cmd_dump(mcmd->cmd_to_abort, caller);
-+}
-+
-+/*
-+ * Free command and associated frame.
-+ */
-+static void ft_cmd_done(struct ft_cmd *fcmd)
-+{
-+ struct fc_frame *fp = fcmd->req_frame;
-+ struct fc_lport *lport;
-+
-+ lport = fr_dev(fp);
-+ if (fr_seq(fp))
-+ lport->tt.seq_release(fr_seq(fp));
-+
-+ fc_frame_free(fp);
-+ kfree(fcmd);
-+}
-+
-+/*
-+ * Free command - callback from SCST.
-+ */
-+void ft_cmd_free(struct scst_cmd *cmd)
-+{
-+ struct ft_cmd *fcmd;
-+
-+ fcmd = scst_cmd_get_tgt_priv(cmd);
-+ if (fcmd) {
-+ scst_cmd_set_tgt_priv(cmd, NULL);
-+ ft_cmd_done(fcmd);
-+ }
-+}
-+
-+/*
-+ * Send response, after data if applicable.
-+ */
-+int ft_send_response(struct scst_cmd *cmd)
-+{
-+ struct ft_cmd *fcmd;
-+ struct fc_frame *fp;
-+ struct fcp_resp_with_ext *fcp;
-+ struct fc_lport *lport;
-+ struct fc_exch *ep;
-+ unsigned int slen;
-+ size_t len;
-+ int resid = 0;
-+ int bi_resid = 0;
-+ int error;
-+ int dir;
-+ u32 status;
-+
-+ ft_cmd_dump(cmd, __func__);
-+ fcmd = scst_cmd_get_tgt_priv(cmd);
-+ ep = fc_seq_exch(fcmd->seq);
-+ lport = ep->lp;
-+
-+ if (scst_cmd_aborted(cmd)) {
-+ FT_IO_DBG("cmd aborted did %x oxid %x\n", ep->did, ep->oxid);
-+ scst_set_delivery_status(cmd, SCST_CMD_DELIVERY_ABORTED);
-+ goto done;
-+ }
-+
-+ if (!scst_cmd_get_is_send_status(cmd)) {
-+ FT_IO_DBG("send status not set. feature not implemented\n");
-+ return SCST_TGT_RES_FATAL_ERROR;
-+ }
-+
-+ status = scst_cmd_get_status(cmd);
-+ dir = scst_cmd_get_data_direction(cmd);
-+
-+ slen = scst_cmd_get_sense_buffer_len(cmd);
-+ len = sizeof(*fcp) + slen;
-+
-+ /*
-+ * Send read data and set underflow/overflow residual count.
-+ * For bi-directional comands, the bi_resid is for the read direction.
-+ */
-+ if (dir & SCST_DATA_WRITE)
-+ resid = (signed)scst_cmd_get_bufflen(cmd) -
-+ fcmd->write_data_len;
-+ if (dir & SCST_DATA_READ) {
-+ error = ft_send_read_data(cmd);
-+ if (error) {
-+ FT_ERR("ft_send_read_data returned %d\n", error);
-+ return error;
-+ }
-+
-+ if (dir == SCST_DATA_BIDI) {
-+ bi_resid = (signed)scst_cmd_get_out_bufflen(cmd) -
-+ scst_cmd_get_resp_data_len(cmd);
-+ if (bi_resid)
-+ len += sizeof(__be32);
-+ } else
-+ resid = (signed)scst_cmd_get_bufflen(cmd) -
-+ scst_cmd_get_resp_data_len(cmd);
-+ }
-+
-+ fp = fc_frame_alloc(lport, len);
-+ if (!fp)
-+ return SCST_TGT_RES_QUEUE_FULL;
-+
-+ fcp = fc_frame_payload_get(fp, len);
-+ memset(fcp, 0, sizeof(*fcp));
-+ fcp->resp.fr_status = status;
-+
-+ if (slen) {
-+ fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
-+ fcp->ext.fr_sns_len = htonl(slen);
-+ memcpy(fcp + 1, scst_cmd_get_sense_buffer(cmd), slen);
-+ }
-+ if (bi_resid) {
-+ if (bi_resid < 0) {
-+ fcp->resp.fr_flags |= FCP_BIDI_READ_OVER;
-+ bi_resid = -bi_resid;
-+ } else
-+ fcp->resp.fr_flags |= FCP_BIDI_READ_UNDER;
-+ *(__be32 *)((u8 *)(fcp + 1) + slen) = htonl(bi_resid);
-+ }
-+ if (resid) {
-+ if (resid < 0) {
-+ resid = -resid;
-+ fcp->resp.fr_flags |= FCP_RESID_OVER;
-+ } else
-+ fcp->resp.fr_flags |= FCP_RESID_UNDER;
-+ fcp->ext.fr_resid = htonl(resid);
-+ }
-+ FT_IO_DBG("response did %x oxid %x\n", ep->did, ep->oxid);
-+
-+ /*
-+ * Send response.
-+ */
-+ fcmd->seq = lport->tt.seq_start_next(fcmd->seq);
-+ fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
-+ FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
-+
-+ lport->tt.seq_send(lport, fcmd->seq, fp);
-+done:
-+ lport->tt.exch_done(fcmd->seq);
-+ scst_tgt_cmd_done(cmd, SCST_CONTEXT_SAME);
-+ return SCST_TGT_RES_SUCCESS;
-+}
-+
-+/*
-+ * FC sequence response handler for follow-on sequences (data) and aborts.
-+ */
-+static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
-+{
-+ struct scst_cmd *cmd = arg;
-+ struct fc_frame_header *fh;
-+
-+ /*
-+ * If an error is being reported, it must be FC_EX_CLOSED.
-+ * Timeouts don't occur on incoming requests, and there are
-+ * currently no other errors.
-+ * The PRLO handler will be also called by libfc to delete
-+ * the session and all pending commands, so we ignore this response.
-+ */
-+ if (IS_ERR(fp)) {
-+ FT_IO_DBG("exchange error %ld - not handled\n", -PTR_ERR(fp));
-+ return;
-+ }
-+
-+ fh = fc_frame_header_get(fp);
-+ switch (fh->fh_r_ctl) {
-+ case FC_RCTL_DD_SOL_DATA: /* write data */
-+ ft_recv_write_data(cmd, fp);
-+ break;
-+ case FC_RCTL_DD_UNSOL_CTL: /* command */
-+ case FC_RCTL_DD_SOL_CTL: /* transfer ready */
-+ case FC_RCTL_DD_DATA_DESC: /* transfer ready */
-+ default:
-+ printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
-+ __func__, fh->fh_r_ctl);
-+ fc_frame_free(fp);
-+ break;
-+ }
-+}
-+
-+/*
-+ * Command timeout.
-+ * SCST calls this when the command has taken too long in the device handler.
-+ */
-+void ft_cmd_timeout(struct scst_cmd *cmd)
-+{
-+ FT_IO_DBG("timeout not implemented\n"); /* XXX TBD */
-+}
-+
-+/*
-+ * Send TX_RDY (transfer ready).
-+ */
-+static int ft_send_xfer_rdy_off(struct scst_cmd *cmd, u32 offset, u32 len)
-+{
-+ struct ft_cmd *fcmd;
-+ struct fc_frame *fp;
-+ struct fcp_txrdy *txrdy;
-+ struct fc_lport *lport;
-+ struct fc_exch *ep;
-+
-+ fcmd = scst_cmd_get_tgt_priv(cmd);
-+ if (fcmd->xfer_rdy_len < len + offset)
-+ fcmd->xfer_rdy_len = len + offset;
-+
-+ ep = fc_seq_exch(fcmd->seq);
-+ lport = ep->lp;
-+ fp = fc_frame_alloc(lport, sizeof(*txrdy));
-+ if (!fp)
-+ return SCST_TGT_RES_QUEUE_FULL;
-+
-+ txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
-+ memset(txrdy, 0, sizeof(*txrdy));
-+ txrdy->ft_data_ro = htonl(offset);
-+ txrdy->ft_burst_len = htonl(len);
-+
-+ fcmd->seq = lport->tt.seq_start_next(fcmd->seq);
-+ fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
-+ FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
-+ lport->tt.seq_send(lport, fcmd->seq, fp);
-+ return SCST_TGT_RES_SUCCESS;
-+}
-+
-+/*
-+ * Send TX_RDY (transfer ready).
-+ */
-+int ft_send_xfer_rdy(struct scst_cmd *cmd)
-+{
-+ return ft_send_xfer_rdy_off(cmd, 0, scst_cmd_get_bufflen(cmd));
-+}
-+
-+/*
-+ * Send a FCP response including SCSI status and optional FCP rsp_code.
-+ * status is SAM_STAT_GOOD (zero) if code is valid.
-+ * This is used in error cases, such as allocation failures.
-+ */
-+static void ft_send_resp_status(struct fc_frame *rx_fp, u32 status,
-+ enum fcp_resp_rsp_codes code)
-+{
-+ struct fc_frame *fp;
-+ struct fc_frame_header *fh;
-+ size_t len;
-+ struct fcp_resp_with_ext *fcp;
-+ struct fcp_resp_rsp_info *info;
-+ struct fc_lport *lport;
-+ struct fc_seq *sp;
-+
-+ sp = fr_seq(rx_fp);
-+ fh = fc_frame_header_get(rx_fp);
-+ FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
-+ ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
-+ lport = fr_dev(rx_fp);
-+ len = sizeof(*fcp);
-+ if (status == SAM_STAT_GOOD)
-+ len += sizeof(*info);
-+ fp = fc_frame_alloc(lport, len);
-+ if (!fp)
-+ return;
-+ fcp = fc_frame_payload_get(fp, len);
-+ memset(fcp, 0, len);
-+ fcp->resp.fr_status = status;
-+ if (status == SAM_STAT_GOOD) {
-+ fcp->ext.fr_rsp_len = htonl(sizeof(*info));
-+ fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
-+ info = (struct fcp_resp_rsp_info *)(fcp + 1);
-+ info->rsp_code = code;
-+ }
-+
-+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
-+ if (sp)
-+ lport->tt.seq_send(lport, sp, fp);
-+ else
-+ lport->tt.frame_send(lport, fp);
-+}
-+
-+/*
-+ * Send error or task management response.
-+ * Always frees the fcmd and associated state.
-+ */
-+static void ft_send_resp_code(struct ft_cmd *fcmd, enum fcp_resp_rsp_codes code)
-+{
-+ ft_send_resp_status(fcmd->req_frame, SAM_STAT_GOOD, code);
-+ ft_cmd_done(fcmd);
-+}
-+
-+void ft_cmd_tm_done(struct scst_mgmt_cmd *mcmd)
-+{
-+ struct ft_cmd *fcmd;
-+ enum fcp_resp_rsp_codes code;
-+
-+ ft_cmd_tm_dump(mcmd, __func__);
-+ fcmd = scst_mgmt_cmd_get_tgt_priv(mcmd);
-+ switch (scst_mgmt_cmd_get_status(mcmd)) {
-+ case SCST_MGMT_STATUS_SUCCESS:
-+ code = FCP_TMF_CMPL;
-+ break;
-+ case SCST_MGMT_STATUS_REJECTED:
-+ code = FCP_TMF_REJECTED;
-+ break;
-+ case SCST_MGMT_STATUS_LUN_NOT_EXIST:
-+ code = FCP_TMF_INVALID_LUN;
-+ break;
-+ case SCST_MGMT_STATUS_TASK_NOT_EXIST:
-+ case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
-+ case SCST_MGMT_STATUS_FAILED:
-+ default:
-+ code = FCP_TMF_FAILED;
-+ break;
-+ }
-+ FT_IO_DBG("tm cmd done fn %d code %d\n", mcmd->fn, code);
-+ ft_send_resp_code(fcmd, code);
-+}
-+
-+/*
-+ * Handle an incoming FCP task management command frame.
-+ * Note that this may be called directly from the softirq context.
-+ */
-+static void ft_recv_tm(struct scst_session *scst_sess,
-+ struct ft_cmd *fcmd, struct fcp_cmnd *fcp)
-+{
-+ struct scst_rx_mgmt_params params;
-+ int ret;
-+
-+ memset(&params, 0, sizeof(params));
-+ params.lun = fcp->fc_lun;
-+ params.lun_len = sizeof(fcp->fc_lun);
-+ params.lun_set = 1;
-+ params.atomic = SCST_ATOMIC;
-+ params.tgt_priv = fcmd;
-+
-+ switch (fcp->fc_tm_flags) {
-+ case FCP_TMF_LUN_RESET:
-+ params.fn = SCST_LUN_RESET;
-+ break;
-+ case FCP_TMF_TGT_RESET:
-+ params.fn = SCST_TARGET_RESET;
-+ params.lun_set = 0;
-+ break;
-+ case FCP_TMF_CLR_TASK_SET:
-+ params.fn = SCST_CLEAR_TASK_SET;
-+ break;
-+ case FCP_TMF_ABT_TASK_SET:
-+ params.fn = SCST_ABORT_TASK_SET;
-+ break;
-+ case FCP_TMF_CLR_ACA:
-+ params.fn = SCST_CLEAR_ACA;
-+ break;
-+ default:
-+ /*
-+ * FCP4r01 indicates having a combination of
-+ * tm_flags set is invalid.
-+ */
-+ FT_IO_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
-+ ft_send_resp_code(fcmd, FCP_CMND_FIELDS_INVALID);
-+ return;
-+ }
-+ FT_IO_DBG("submit tm cmd fn %d\n", params.fn);
-+ ret = scst_rx_mgmt_fn(scst_sess, &params);
-+ FT_IO_DBG("scst_rx_mgmt_fn ret %d\n", ret);
-+ if (ret)
-+ ft_send_resp_code(fcmd, FCP_TMF_FAILED);
-+}
-+
-+/*
-+ * Handle an incoming FCP command frame.
-+ * Note that this may be called directly from the softirq context.
-+ */
-+static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
-+{
-+ static atomic_t serial;
-+ struct fc_seq *sp;
-+ struct scst_cmd *cmd;
-+ struct ft_cmd *fcmd;
-+ struct fcp_cmnd *fcp;
-+ struct fc_lport *lport;
-+ int data_dir;
-+ u32 data_len;
-+ int cdb_len;
-+
-+ lport = sess->tport->lport;
-+ fcmd = kzalloc(sizeof(*fcmd), GFP_ATOMIC);
-+ if (!fcmd)
-+ goto busy;
-+ fcmd->serial = atomic_inc_return(&serial); /* debug only */
-+ fcmd->max_payload = sess->max_payload;
-+ fcmd->max_lso_payload = sess->max_lso_payload;
-+ fcmd->req_frame = fp;
-+
-+ fcp = fc_frame_payload_get(fp, sizeof(*fcp));
-+ if (!fcp)
-+ goto err;
-+ if (fcp->fc_tm_flags) {
-+ ft_recv_tm(sess->scst_sess, fcmd, fcp);
-+ return;
-+ }
-+
-+ /*
-+ * re-check length including specified CDB length.
-+ * data_len is just after the CDB.
-+ */
-+ cdb_len = fcp->fc_flags & FCP_CFL_LEN_MASK;
-+ fcp = fc_frame_payload_get(fp, sizeof(*fcp) + cdb_len);
-+ if (!fcp)
-+ goto err;
-+ cdb_len += sizeof(fcp->fc_cdb);
-+ data_len = ntohl(*(__be32 *)(fcp->fc_cdb + cdb_len));
-+
-+ cmd = scst_rx_cmd(sess->scst_sess, fcp->fc_lun, sizeof(fcp->fc_lun),
-+ fcp->fc_cdb, cdb_len, SCST_ATOMIC);
-+ if (!cmd)
-+ goto busy;
-+ fcmd->scst_cmd = cmd;
-+ scst_cmd_set_tgt_priv(cmd, fcmd);
-+
-+ sp = lport->tt.seq_assign(lport, fp);
-+ if (!sp)
-+ goto busy;
-+ fcmd->seq = sp;
-+ lport->tt.seq_set_resp(sp, ft_recv_seq, cmd);
-+
-+ switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
-+ case 0:
-+ default:
-+ data_dir = SCST_DATA_NONE;
-+ break;
-+ case FCP_CFL_RDDATA:
-+ data_dir = SCST_DATA_READ;
-+ break;
-+ case FCP_CFL_WRDATA:
-+ data_dir = SCST_DATA_WRITE;
-+ break;
-+ case FCP_CFL_RDDATA | FCP_CFL_WRDATA:
-+ data_dir = SCST_DATA_BIDI;
-+ break;
-+ }
-+ scst_cmd_set_expected(cmd, data_dir, data_len);
-+
-+ switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
-+ case FCP_PTA_SIMPLE:
-+ scst_cmd_set_queue_type(cmd, SCST_CMD_QUEUE_SIMPLE);
-+ break;
-+ case FCP_PTA_HEADQ:
-+ scst_cmd_set_queue_type(cmd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
-+ break;
-+ case FCP_PTA_ACA:
-+ scst_cmd_set_queue_type(cmd, SCST_CMD_QUEUE_ACA);
-+ break;
-+ case FCP_PTA_ORDERED:
-+ default:
-+ scst_cmd_set_queue_type(cmd, SCST_CMD_QUEUE_ORDERED);
-+ break;
-+ }
-+
-+ scst_cmd_init_done(cmd, SCST_CONTEXT_THREAD);
-+ return;
-+
-+err:
-+ ft_send_resp_code(fcmd, FCP_CMND_FIELDS_INVALID);
-+ return;
-+
-+busy:
-+ FT_IO_DBG("cmd allocation failure - sending BUSY\n");
-+ ft_send_resp_status(fp, SAM_STAT_BUSY, 0);
-+ ft_cmd_done(fcmd);
-+}
-+
-+/*
-+ * Send FCP ELS-4 Reject.
-+ */
-+static void ft_cmd_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
-+ enum fc_els_rjt_explan explan)
-+{
-+ struct fc_seq_els_data rjt_data;
-+ struct fc_lport *lport;
-+
-+ lport = fr_dev(rx_fp);
-+ rjt_data.reason = reason;
-+ rjt_data.explan = explan;
-+ lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
-+}
-+
-+/*
-+ * Handle an incoming FCP ELS-4 command frame.
-+ * Note that this may be called directly from the softirq context.
-+ */
-+static void ft_recv_els4(struct ft_sess *sess, struct fc_frame *fp)
-+{
-+ u8 op = fc_frame_payload_op(fp);
-+
-+ switch (op) {
-+ case ELS_SRR: /* TBD */
-+ default:
-+ FT_IO_DBG("unsupported ELS-4 op %x\n", op);
-+ ft_cmd_ls_rjt(fp, ELS_RJT_INVAL, ELS_EXPL_NONE);
-+ fc_frame_free(fp);
-+ break;
-+ }
-+}
-+
-+/*
-+ * Handle an incoming FCP frame.
-+ * Note that this may be called directly from the softirq context.
-+ */
-+void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
-+{
-+ struct fc_frame_header *fh = fc_frame_header_get(fp);
-+
-+ switch (fh->fh_r_ctl) {
-+ case FC_RCTL_DD_UNSOL_CMD:
-+ ft_recv_cmd(sess, fp);
-+ break;
-+ case FC_RCTL_ELS4_REQ:
-+ ft_recv_els4(sess, fp);
-+ break;
-+ default:
-+ printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
-+ __func__, fh->fh_r_ctl);
-+ fc_frame_free(fp);
-+ break;
-+ }
-+}
-diff -uprN orig/linux-3.2/drivers/scst/fcst/ft_io.c linux-3.2/drivers/scst/fcst/ft_io.c
---- orig/linux-3.2/drivers/scst/fcst/ft_io.c
-+++ linux-3.2/drivers/scst/fcst/ft_io.c
-@@ -0,0 +1,276 @@
-+/*
-+ * Copyright (c) 2010 Cisco Systems, Inc.
-+ *
-+ * Portions based on drivers/scsi/libfc/fc_fcp.c and subject to the following:
-+ *
-+ * Copyright (c) 2007 Intel Corporation. All rights reserved.
-+ * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
-+ * Copyright (c) 2008 Mike Christie
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ */
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <scsi/libfc.h>
-+#include <scsi/fc_encode.h>
-+#include "fcst.h"
-+
-+/*
-+ * Receive write data frame.
-+ */
-+void ft_recv_write_data(struct scst_cmd *cmd, struct fc_frame *fp)
-+{
-+ struct ft_cmd *fcmd;
-+ struct fc_frame_header *fh;
-+ unsigned int bufflen;
-+ u32 rel_off;
-+ size_t frame_len;
-+ size_t mem_len;
-+ size_t tlen;
-+ void *from;
-+ void *to;
-+ int dir;
-+ u8 *buf;
-+
-+ dir = scst_cmd_get_data_direction(cmd);
-+ if (dir == SCST_DATA_BIDI) {
-+ mem_len = scst_get_out_buf_first(cmd, &buf);
-+ bufflen = scst_cmd_get_out_bufflen(cmd);
-+ } else {
-+ mem_len = scst_get_buf_first(cmd, &buf);
-+ bufflen = scst_cmd_get_bufflen(cmd);
-+ }
-+ to = buf;
-+
-+ fcmd = scst_cmd_get_tgt_priv(cmd);
-+ fh = fc_frame_header_get(fp);
-+ frame_len = fr_len(fp);
-+ rel_off = ntohl(fh->fh_parm_offset);
-+
-+ FT_IO_DBG("sid %x oxid %x payload_len %zd rel_off %x\n",
-+ ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id),
-+ frame_len - sizeof(*fh), rel_off);
-+
-+ if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
-+ goto drop;
-+ if (frame_len <= sizeof(*fh))
-+ goto drop;
-+ frame_len -= sizeof(*fh);
-+ from = fc_frame_payload_get(fp, 0);
-+
-+ if (rel_off >= bufflen)
-+ goto drop;
-+ if (frame_len + rel_off > bufflen)
-+ frame_len = bufflen - rel_off;
-+
-+ while (frame_len) {
-+ if (!mem_len) {
-+ if (dir == SCST_DATA_BIDI) {
-+ scst_put_out_buf(cmd, buf);
-+ mem_len = scst_get_out_buf_next(cmd, &buf);
-+ } else {
-+ scst_put_buf(cmd, buf);
-+ mem_len = scst_get_buf_next(cmd, &buf);
-+ }
-+ to = buf;
-+ if (!mem_len)
-+ break;
-+ }
-+ if (rel_off) {
-+ if (rel_off >= mem_len) {
-+ rel_off -= mem_len;
-+ mem_len = 0;
-+ continue;
-+ }
-+ mem_len -= rel_off;
-+ to += rel_off;
-+ rel_off = 0;
-+ }
-+
-+ tlen = min(mem_len, frame_len);
-+ memcpy(to, from, tlen);
-+
-+ from += tlen;
-+ frame_len -= tlen;
-+ mem_len -= tlen;
-+ to += tlen;
-+ fcmd->write_data_len += tlen;
-+ }
-+ if (mem_len) {
-+ if (dir == SCST_DATA_BIDI)
-+ scst_put_out_buf(cmd, buf);
-+ else
-+ scst_put_buf(cmd, buf);
-+ }
-+ if (fcmd->write_data_len == cmd->data_len)
-+ scst_rx_data(cmd, SCST_RX_STATUS_SUCCESS, SCST_CONTEXT_THREAD);
-+drop:
-+ fc_frame_free(fp);
-+}
-+
-+/*
-+ * Send read data back to initiator.
-+ */
-+int ft_send_read_data(struct scst_cmd *cmd)
-+{
-+ struct ft_cmd *fcmd;
-+ struct fc_frame *fp = NULL;
-+ struct fc_exch *ep;
-+ struct fc_lport *lport;
-+ size_t remaining;
-+ u32 fh_off = 0;
-+ u32 frame_off;
-+ size_t frame_len = 0;
-+ size_t mem_len;
-+ u32 mem_off;
-+ size_t tlen;
-+ struct page *page;
-+ int use_sg;
-+ int error;
-+ void *to = NULL;
-+ u8 *from = NULL;
-+ int loop_limit = 10000;
-+
-+ fcmd = scst_cmd_get_tgt_priv(cmd);
-+ ep = fc_seq_exch(fcmd->seq);
-+ lport = ep->lp;
-+
-+ frame_off = fcmd->read_data_len;
-+ tlen = scst_cmd_get_resp_data_len(cmd);
-+ FT_IO_DBG("oid %x oxid %x resp_len %zd frame_off %u\n",
-+ ep->oid, ep->oxid, tlen, frame_off);
-+ if (tlen <= frame_off)
-+ return SCST_TGT_RES_SUCCESS;
-+ remaining = tlen - frame_off;
-+ if (remaining > UINT_MAX)
-+ FT_ERR("oid %x oxid %x resp_len %zd frame_off %u\n",
-+ ep->oid, ep->oxid, tlen, frame_off);
-+
-+ mem_len = scst_get_buf_first(cmd, &from);
-+ mem_off = 0;
-+ if (!mem_len) {
-+ FT_IO_DBG("mem_len 0\n");
-+ return SCST_TGT_RES_SUCCESS;
-+ }
-+ FT_IO_DBG("sid %x oxid %x mem_len %zd frame_off %u remaining %zd\n",
-+ ep->sid, ep->oxid, mem_len, frame_off, remaining);
-+
-+ /*
-+ * If we've already transferred some of the data, skip through
-+ * the buffer over the data already sent and continue with the
-+ * same sequence. Otherwise, get a new sequence for the data.
-+ */
-+ if (frame_off) {
-+ tlen = frame_off;
-+ while (mem_len <= tlen) {
-+ tlen -= mem_len;
-+ scst_put_buf(cmd, from);
-+ mem_len = scst_get_buf_next(cmd, &from);
-+ if (!mem_len)
-+ return SCST_TGT_RES_SUCCESS;
-+ }
-+ mem_len -= tlen;
-+ mem_off = tlen;
-+ } else
-+ fcmd->seq = lport->tt.seq_start_next(fcmd->seq);
-+
-+ /* no scatter/gather in skb for odd word length due to fc_seq_send() */
-+ use_sg = !(remaining % 4) && lport->sg_supp;
-+
-+ while (remaining) {
-+ if (!loop_limit) {
-+ FT_ERR("hit loop limit. remaining %zx mem_len %zx "
-+ "frame_len %zx tlen %zx\n",
-+ remaining, mem_len, frame_len, tlen);
-+ break;
-+ }
-+ loop_limit--;
-+ if (!mem_len) {
-+ scst_put_buf(cmd, from);
-+ mem_len = scst_get_buf_next(cmd, &from);
-+ mem_off = 0;
-+ if (!mem_len) {
-+ FT_ERR("mem_len 0 from get_buf_next\n");
-+ break;
-+ }
-+ }
-+ if (!frame_len) {
-+ frame_len = fcmd->max_lso_payload;
-+ frame_len = min(frame_len, remaining);
-+ fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
-+ if (!fp) {
-+ FT_IO_DBG("frame_alloc failed. "
-+ "use_sg %d frame_len %zd\n",
-+ use_sg, frame_len);
-+ break;
-+ }
-+ fr_max_payload(fp) = fcmd->max_payload;
-+ to = fc_frame_payload_get(fp, 0);
-+ fh_off = frame_off;
-+ }
-+ tlen = min(mem_len, frame_len);
-+ BUG_ON(!tlen);
-+ BUG_ON(tlen > remaining);
-+ BUG_ON(tlen > mem_len);
-+ BUG_ON(tlen > frame_len);
-+
-+ if (use_sg) {
-+ page = virt_to_page(from + mem_off);
-+ get_page(page);
-+ tlen = min_t(size_t, tlen,
-+ PAGE_SIZE - (mem_off & ~PAGE_MASK));
-+ skb_fill_page_desc(fp_skb(fp),
-+ skb_shinfo(fp_skb(fp))->nr_frags,
-+ page, offset_in_page(from + mem_off),
-+ tlen);
-+ fr_len(fp) += tlen;
-+ fp_skb(fp)->data_len += tlen;
-+ fp_skb(fp)->truesize +=
-+ PAGE_SIZE << compound_order(page);
-+ frame_len -= tlen;
-+ if (skb_shinfo(fp_skb(fp))->nr_frags >= FC_FRAME_SG_LEN)
-+ frame_len = 0;
-+ } else {
-+ memcpy(to, from + mem_off, tlen);
-+ to += tlen;
-+ frame_len -= tlen;
-+ }
-+
-+ mem_len -= tlen;
-+ mem_off += tlen;
-+ remaining -= tlen;
-+ frame_off += tlen;
-+
-+ if (frame_len)
-+ continue;
-+ fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
-+ FC_TYPE_FCP,
-+ remaining ? (FC_FC_EX_CTX | FC_FC_REL_OFF) :
-+ (FC_FC_EX_CTX | FC_FC_REL_OFF | FC_FC_END_SEQ),
-+ fh_off);
-+ error = lport->tt.seq_send(lport, fcmd->seq, fp);
-+ if (error) {
-+ WARN_ON(1);
-+ /* XXX For now, initiator will retry */
-+ } else
-+ fcmd->read_data_len = frame_off;
-+ }
-+ if (mem_len)
-+ scst_put_buf(cmd, from);
-+ if (remaining) {
-+ FT_IO_DBG("remaining read data %zd\n", remaining);
-+ return SCST_TGT_RES_QUEUE_FULL;
-+ }
-+ return SCST_TGT_RES_SUCCESS;
-+}
-diff -uprN orig/linux-3.2/drivers/scst/fcst/ft_scst.c linux-3.2/drivers/scst/fcst/ft_scst.c
---- orig/linux-3.2/drivers/scst/fcst/ft_scst.c
-+++ linux-3.2/drivers/scst/fcst/ft_scst.c
-@@ -0,0 +1,96 @@
-+/*
-+ * Copyright (c) 2010 Cisco Systems, Inc.
-+ *
-+ * This program is free software; you may redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; version 2 of the License.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ */
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <scsi/libfc.h>
-+#include "fcst.h"
-+
-+MODULE_AUTHOR("Joe Eykholt <jeykholt@cisco.com>");
-+MODULE_DESCRIPTION("Fibre-Channel SCST target");
-+MODULE_LICENSE("GPL v2");
-+
-+unsigned int ft_debug_logging;
-+module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO | S_IWUSR);
-+MODULE_PARM_DESC(debug_logging, "log levels bigmask");
-+
-+DEFINE_MUTEX(ft_lport_lock);
-+
-+/*
-+ * Provider ops for libfc.
-+ */
-+static struct fc4_prov ft_prov = {
-+ .prli = ft_prli,
-+ .prlo = ft_prlo,
-+ .recv = ft_recv,
-+ .module = THIS_MODULE,
-+};
-+
-+static struct notifier_block ft_notifier = {
-+ .notifier_call = ft_lport_notify
-+};
-+
-+/*
-+ * SCST target ops and configuration.
-+ * XXX - re-check uninitialized fields
-+ */
-+struct scst_tgt_template ft_scst_template = {
-+ .sg_tablesize = 128, /* XXX get true limit from libfc */
-+ .xmit_response_atomic = 1,
-+ .rdy_to_xfer_atomic = 1,
-+ .xmit_response = ft_send_response,
-+ .rdy_to_xfer = ft_send_xfer_rdy,
-+ .on_hw_pending_cmd_timeout = ft_cmd_timeout,
-+ .on_free_cmd = ft_cmd_free,
-+ .task_mgmt_fn_done = ft_cmd_tm_done,
-+ .detect = ft_tgt_detect,
-+ .release = ft_tgt_release,
-+ .report_aen = ft_report_aen,
-+ .enable_target = ft_tgt_enable,
-+ .is_target_enabled = ft_tgt_enabled,
-+ .get_initiator_port_transport_id = ft_get_transport_id,
-+ .max_hw_pending_time = FT_MAX_HW_PENDING_TIME,
-+ .name = FT_MODULE,
-+};
-+
-+static int __init ft_module_init(void)
-+{
-+ int err;
-+
-+ err = scst_register_target_template(&ft_scst_template);
-+ if (err)
-+ return err;
-+ err = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov);
-+ if (err) {
-+ scst_unregister_target_template(&ft_scst_template);
-+ return err;
-+ }
-+ blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
-+ fc_lport_iterate(ft_lport_add, NULL);
-+ return 0;
-+}
-+module_init(ft_module_init);
-+
-+static void __exit ft_module_exit(void)
-+{
-+ blocking_notifier_chain_unregister(&fc_lport_notifier_head,
-+ &ft_notifier);
-+ fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
-+ fc_lport_iterate(ft_lport_del, NULL);
-+ scst_unregister_target_template(&ft_scst_template);
-+ synchronize_rcu();
-+}
-+module_exit(ft_module_exit);
-diff -uprN orig/linux-3.2/drivers/scst/fcst/ft_sess.c linux-3.2/drivers/scst/fcst/ft_sess.c
---- orig/linux-3.2/drivers/scst/fcst/ft_sess.c
-+++ linux-3.2/drivers/scst/fcst/ft_sess.c
-@@ -0,0 +1,585 @@
-+/*
-+ * Copyright (c) 2010 Cisco Systems, Inc.
-+ *
-+ * This program is free software; you may redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; version 2 of the License.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ */
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <linux/mutex.h>
-+#include <linux/hash.h>
-+#include <asm/unaligned.h>
-+#include <scsi/libfc.h>
-+#include <scsi/fc/fc_els.h>
-+#include "fcst.h"
-+
-+static int ft_tport_count;
-+
-+static ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
-+{
-+ u8 b[8];
-+
-+ put_unaligned_be64(wwn, b);
-+ return snprintf(buf, len,
-+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
-+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
-+}
-+
-+/*
-+ * Lookup or allocate target local port.
-+ * Caller holds ft_lport_lock.
-+ */
-+static struct ft_tport *ft_tport_create(struct fc_lport *lport)
-+{
-+ struct ft_tport *tport;
-+ char name[FT_NAMELEN];
-+ int i;
-+
-+ ft_format_wwn(name, sizeof(name), lport->wwpn);
-+ FT_SESS_DBG("create %s\n", name);
-+
-+ tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
-+ if (tport) {
-+ FT_SESS_DBG("tport alloc %s - already setup\n", name);
-+ return tport;
-+ }
-+
-+ tport = kzalloc(sizeof(*tport), GFP_KERNEL);
-+ if (!tport) {
-+ FT_SESS_DBG("tport alloc %s failed\n", name);
-+ return NULL;
-+ }
-+
-+ tport->tgt = scst_register_target(&ft_scst_template, name);
-+ if (!tport->tgt) {
-+ FT_SESS_DBG("register_target %s failed\n", name);
-+ kfree(tport);
-+ return NULL;
-+ }
-+ scst_tgt_set_tgt_priv(tport->tgt, tport);
-+ ft_tport_count++;
-+
-+ tport->lport = lport;
-+ for (i = 0; i < FT_SESS_HASH_SIZE; i++)
-+ INIT_HLIST_HEAD(&tport->hash[i]);
-+
-+ rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
-+ FT_SESS_DBG("register_target %s succeeded\n", name);
-+ return tport;
-+}
-+
-+/*
-+ * Free tport via RCU.
-+ */
-+static void ft_tport_rcu_free(struct rcu_head *rcu)
-+{
-+ struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu);
-+
-+ kfree(tport);
-+}
-+
-+/*
-+ * Delete target local port, if any, associated with the local port.
-+ * Caller holds ft_lport_lock.
-+ */
-+static void ft_tport_delete(struct ft_tport *tport)
-+{
-+ struct fc_lport *lport;
-+ struct scst_tgt *tgt;
-+
-+ tgt = tport->tgt;
-+ BUG_ON(!tgt);
-+ FT_SESS_DBG("delete %s\n", scst_get_tgt_name(tgt));
-+ scst_unregister_target(tgt);
-+ lport = tport->lport;
-+ BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
-+ rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL);
-+ tport->lport = NULL;
-+ call_rcu(&tport->rcu, ft_tport_rcu_free);
-+ ft_tport_count--;
-+}
-+
-+/*
-+ * Add local port.
-+ * Called thru fc_lport_iterate().
-+ */
-+void ft_lport_add(struct fc_lport *lport, void *arg)
-+{
-+ mutex_lock(&ft_lport_lock);
-+ ft_tport_create(lport);
-+ mutex_unlock(&ft_lport_lock);
-+}
-+
-+/*
-+ * Delete local port.
-+ * Called thru fc_lport_iterate().
-+ */
-+void ft_lport_del(struct fc_lport *lport, void *arg)
-+{
-+ struct ft_tport *tport;
-+
-+ mutex_lock(&ft_lport_lock);
-+ tport = lport->prov[FC_TYPE_FCP];
-+ if (tport)
-+ ft_tport_delete(tport);
-+ mutex_unlock(&ft_lport_lock);
-+}
-+
-+/*
-+ * Notification of local port change from libfc.
-+ * Create or delete local port and associated tport.
-+ */
-+int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
-+{
-+ struct fc_lport *lport = arg;
-+
-+ switch (event) {
-+ case FC_LPORT_EV_ADD:
-+ ft_lport_add(lport, NULL);
-+ break;
-+ case FC_LPORT_EV_DEL:
-+ ft_lport_del(lport, NULL);
-+ break;
-+ }
-+ return NOTIFY_DONE;
-+}
-+
-+/*
-+ * Find session in local port.
-+ * Sessions and hash lists are RCU-protected.
-+ * A reference is taken which must be eventually freed.
-+ */
-+static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
-+{
-+ struct ft_tport *tport;
-+ struct hlist_head *head;
-+ struct hlist_node *pos;
-+ struct ft_sess *sess = NULL;
-+
-+ rcu_read_lock();
-+ tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
-+ if (!tport)
-+ goto out;
-+
-+ head = &tport->hash[hash_32(port_id, FT_SESS_HASH_BITS)];
-+ hlist_for_each_entry_rcu(sess, pos, head, hash) {
-+ if (sess->port_id == port_id) {
-+ kref_get(&sess->kref);
-+ rcu_read_unlock();
-+ FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
-+ return sess;
-+ }
-+ }
-+out:
-+ rcu_read_unlock();
-+ FT_SESS_DBG("port_id %x not found\n", port_id);
-+ return NULL;
-+}
-+
-+/*
-+ * Allocate session and enter it in the hash for the local port.
-+ * Caller holds ft_lport_lock.
-+ */
-+static int ft_sess_create(struct ft_tport *tport, struct fc_rport_priv *rdata,
-+ u32 fcp_parm)
-+{
-+ struct ft_sess *sess;
-+ struct scst_session *scst_sess;
-+ struct hlist_head *head;
-+ struct hlist_node *pos;
-+ u32 port_id;
-+ char name[FT_NAMELEN];
-+
-+ port_id = rdata->ids.port_id;
-+ if (!rdata->maxframe_size) {
-+ FT_SESS_DBG("port_id %x maxframe_size 0\n", port_id);
-+ return FC_SPP_RESP_CONF;
-+ }
-+
-+ head = &tport->hash[hash_32(port_id, FT_SESS_HASH_BITS)];
-+ hlist_for_each_entry_rcu(sess, pos, head, hash) {
-+ if (sess->port_id == port_id) {
-+ sess->params = fcp_parm;
-+ return 0;
-+ }
-+ }
-+
-+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
-+ if (!sess)
-+ return FC_SPP_RESP_RES; /* out of resources */
-+
-+ sess->port_name = rdata->ids.port_name;
-+ sess->max_payload = rdata->maxframe_size;
-+ sess->max_lso_payload = rdata->maxframe_size;
-+ if (tport->lport->seq_offload)
-+ sess->max_lso_payload = tport->lport->lso_max;
-+ sess->params = fcp_parm;
-+ sess->tport = tport;
-+ sess->port_id = port_id;
-+ kref_init(&sess->kref); /* ref for table entry */
-+
-+ ft_format_wwn(name, sizeof(name), rdata->ids.port_name);
-+ FT_SESS_DBG("register %s\n", name);
-+ scst_sess = scst_register_session(tport->tgt, 0, name, sess, NULL,
-+ NULL);
-+ if (!scst_sess) {
-+ kfree(sess);
-+ return FC_SPP_RESP_RES; /* out of resources */
-+ }
-+ sess->scst_sess = scst_sess;
-+ hlist_add_head_rcu(&sess->hash, head);
-+ tport->sess_count++;
-+
-+ FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
-+
-+ rdata->prli_count++;
-+ return 0;
-+}
-+
-+/*
-+ * Unhash the session.
-+ * Caller holds ft_lport_lock.
-+ */
-+static void ft_sess_unhash(struct ft_sess *sess)
-+{
-+ struct ft_tport *tport = sess->tport;
-+
-+ hlist_del_rcu(&sess->hash);
-+ BUG_ON(!tport->sess_count);
-+ tport->sess_count--;
-+ sess->port_id = -1;
-+ sess->params = 0;
-+}
-+
-+/*
-+ * Delete session from hash.
-+ * Caller holds ft_lport_lock.
-+ */
-+static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
-+{
-+ struct hlist_head *head;
-+ struct hlist_node *pos;
-+ struct ft_sess *sess;
-+
-+ head = &tport->hash[hash_32(port_id, FT_SESS_HASH_BITS)];
-+ hlist_for_each_entry_rcu(sess, pos, head, hash) {
-+ if (sess->port_id == port_id) {
-+ ft_sess_unhash(sess);
-+ return sess;
-+ }
-+ }
-+ return NULL;
-+}
-+
-+/*
-+ * Remove session and send PRLO.
-+ * This is called when the target is being deleted.
-+ * Caller holds ft_lport_lock.
-+ */
-+static void ft_sess_close(struct ft_sess *sess)
-+{
-+ struct fc_lport *lport;
-+ u32 port_id;
-+
-+ lport = sess->tport->lport;
-+ port_id = sess->port_id;
-+ if (port_id == -1)
-+ return;
-+ FT_SESS_DBG("port_id %x\n", port_id);
-+ ft_sess_unhash(sess);
-+ /* XXX should send LOGO or PRLO to rport */
-+}
-+
-+/*
-+ * Allocate and fill in the SPC Transport ID for persistent reservations.
-+ */
-+int ft_get_transport_id(struct scst_tgt *tgt, struct scst_session *scst_sess,
-+ uint8_t **result)
-+{
-+ struct ft_sess *sess;
-+ struct {
-+ u8 format_proto; /* format and protocol ID (0 for FC) */
-+ u8 __resv1[7];
-+ __be64 port_name; /* N_Port Name */
-+ u8 __resv2[8];
-+ } __attribute__((__packed__)) *id;
-+
-+ if (!scst_sess)
-+ return SCSI_TRANSPORTID_PROTOCOLID_FCP2;
-+
-+ id = kzalloc(sizeof(*id), GFP_KERNEL);
-+ if (!id)
-+ return -ENOMEM;
-+
-+ sess = scst_sess_get_tgt_priv(scst_sess);
-+ id->port_name = cpu_to_be64(sess->port_name);
-+ id->format_proto = SCSI_TRANSPORTID_PROTOCOLID_FCP2;
-+ *result = (uint8_t *)id;
-+ return 0;
-+}
-+
-+/*
-+ * libfc ops involving sessions.
-+ */
-+
-+/*
-+ * Handle PRLI (process login) request.
-+ * This could be a PRLI we're sending or receiving.
-+ * Caller holds ft_lport_lock.
-+ */
-+static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
-+ const struct fc_els_spp *rspp, struct fc_els_spp *spp)
-+{
-+ struct ft_tport *tport;
-+ u32 fcp_parm;
-+ int ret;
-+
-+ if (!rspp)
-+ goto fill;
-+
-+ if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
-+ return FC_SPP_RESP_NO_PA;
-+
-+ /*
-+ * If both target and initiator bits are off, the SPP is invalid.
-+ */
-+ fcp_parm = ntohl(rspp->spp_params); /* requested parameters */
-+ if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
-+ return FC_SPP_RESP_INVL;
-+
-+ /*
-+ * Create session (image pair) only if requested by
-+ * EST_IMG_PAIR flag and if the requestor is an initiator.
-+ */
-+ if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
-+ spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
-+
-+ if (!(fcp_parm & FCP_SPPF_INIT_FCN))
-+ return FC_SPP_RESP_CONF;
-+ tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
-+ if (!tport) {
-+ /* not a target for this local port */
-+ return FC_SPP_RESP_CONF;
-+ }
-+ if (!tport->enabled) {
-+ pr_err("Refused login from %#x because target port %s"
-+ " not yet enabled", rdata->ids.port_id,
-+ tport->tgt->tgt_name);
-+ return FC_SPP_RESP_CONF;
-+ }
-+ ret = ft_sess_create(tport, rdata, fcp_parm);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ /*
-+ * OR in our service parameters with other provider (initiator), if any.
-+ * If the initiator indicates RETRY, we must support that, too.
-+ * Don't force RETRY on the initiator, though.
-+ */
-+fill:
-+ fcp_parm = ntohl(spp->spp_params); /* response parameters */
-+ spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
-+ return FC_SPP_RESP_ACK;
-+}
-+
-+/**
-+ * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
-+ * @rdata: remote port private
-+ * @spp_len: service parameter page length
-+ * @rspp: received service parameter page (NULL for outgoing PRLI)
-+ * @spp: response service parameter page
-+ *
-+ * Returns spp response code.
-+ */
-+int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
-+ const struct fc_els_spp *rspp, struct fc_els_spp *spp)
-+{
-+ int ret;
-+
-+ FT_SESS_DBG("starting PRLI port_id %x\n", rdata->ids.port_id);
-+ mutex_lock(&ft_lport_lock);
-+ ret = ft_prli_locked(rdata, spp_len, rspp, spp);
-+ mutex_unlock(&ft_lport_lock);
-+ FT_SESS_DBG("port_id %x flags %x parms %x ret %x\n", rdata->ids.port_id,
-+ rspp ? rspp->spp_flags : 0, ntohl(spp->spp_params), ret);
-+ return ret;
-+}
-+
-+static void ft_sess_rcu_free(struct rcu_head *rcu)
-+{
-+ struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
-+
-+ kfree(sess);
-+}
-+
-+static void ft_sess_free(struct kref *kref)
-+{
-+ struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
-+ struct scst_session *scst_sess;
-+
-+ scst_sess = sess->scst_sess;
-+ FT_SESS_DBG("unregister %s\n", scst_sess->initiator_name);
-+ scst_unregister_session(scst_sess, 0, NULL);
-+ call_rcu(&sess->rcu, ft_sess_rcu_free);
-+}
-+
-+static void ft_sess_put(struct ft_sess *sess)
-+{
-+ int sess_held = atomic_read(&sess->kref.refcount);
-+
-+ BUG_ON(!sess_held);
-+ kref_put(&sess->kref, ft_sess_free);
-+}
-+
-+/*
-+ * Delete ft_sess for PRLO.
-+ * Called with ft_lport_lock held.
-+ */
-+static struct ft_sess *ft_sess_lookup_delete(struct fc_rport_priv *rdata)
-+{
-+ struct ft_sess *sess;
-+ struct ft_tport *tport;
-+
-+ tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
-+ if (!tport)
-+ return NULL;
-+ sess = ft_sess_delete(tport, rdata->ids.port_id);
-+ if (sess)
-+ sess->params = 0;
-+ return sess;
-+}
-+
-+/*
-+ * Handle PRLO.
-+ */
-+void ft_prlo(struct fc_rport_priv *rdata)
-+{
-+ struct ft_sess *sess;
-+
-+ mutex_lock(&ft_lport_lock);
-+ sess = ft_sess_lookup_delete(rdata);
-+ mutex_unlock(&ft_lport_lock);
-+ if (!sess)
-+ return;
-+
-+ /*
-+ * Release the session hold from the table.
-+ * When all command-starting threads have returned,
-+ * kref will call ft_sess_free which will unregister
-+ * the session.
-+ * fcmds referencing the session are safe.
-+ */
-+ ft_sess_put(sess); /* release from table */
-+ rdata->prli_count--;
-+}
-+
-+/*
-+ * Handle incoming FCP request.
-+ *
-+ * Caller has verified that the frame is type FCP.
-+ * Note that this may be called directly from the softirq context.
-+ */
-+void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
-+{
-+ struct ft_sess *sess;
-+ struct fc_frame_header *fh;
-+ u32 sid;
-+
-+ fh = fc_frame_header_get(fp);
-+ sid = ntoh24(fh->fh_s_id);
-+
-+ FT_SESS_DBG("sid %x preempt %x\n", sid, preempt_count());
-+
-+ sess = ft_sess_get(lport, sid);
-+ if (!sess) {
-+ FT_SESS_DBG("sid %x sess lookup failed\n", sid);
-+ /* TBD XXX - if FCP_CMND, send LOGO */
-+ fc_frame_free(fp);
-+ return;
-+ }
-+ FT_SESS_DBG("sid %x sess lookup returned %p preempt %x\n",
-+ sid, sess, preempt_count());
-+ ft_recv_req(sess, fp);
-+ ft_sess_put(sess);
-+}
-+
-+/*
-+ * Release all sessions for a target.
-+ * Called through scst_unregister_target() as well as directly.
-+ * Caller holds ft_lport_lock.
-+ */
-+int ft_tgt_release(struct scst_tgt *tgt)
-+{
-+ struct ft_tport *tport;
-+ struct hlist_head *head;
-+ struct hlist_node *pos;
-+ struct ft_sess *sess;
-+
-+ tport = scst_tgt_get_tgt_priv(tgt);
-+ tport->enabled = 0;
-+ tport->lport->service_params &= ~FCP_SPPF_TARG_FCN;
-+
-+ for (head = tport->hash; head < &tport->hash[FT_SESS_HASH_SIZE]; head++)
-+ hlist_for_each_entry_rcu(sess, pos, head, hash)
-+ ft_sess_close(sess);
-+
-+ synchronize_rcu();
-+ return 0;
-+}
-+
-+int ft_tgt_enable(struct scst_tgt *tgt, bool enable)
-+{
-+ struct ft_tport *tport;
-+ int ret = 0;
-+
-+ mutex_lock(&ft_lport_lock);
-+ if (enable) {
-+ FT_SESS_DBG("enable tgt %s\n", tgt->tgt_name);
-+ tport = scst_tgt_get_tgt_priv(tgt);
-+ tport->enabled = 1;
-+ tport->lport->service_params |= FCP_SPPF_TARG_FCN;
-+ } else {
-+ FT_SESS_DBG("disable tgt %s\n", tgt->tgt_name);
-+ ft_tgt_release(tgt);
-+ }
-+ mutex_unlock(&ft_lport_lock);
-+ return ret;
-+}
-+
-+bool ft_tgt_enabled(struct scst_tgt *tgt)
-+{
-+ struct ft_tport *tport;
-+
-+ tport = scst_tgt_get_tgt_priv(tgt);
-+ return tport->enabled;
-+}
-+
-+int ft_tgt_detect(struct scst_tgt_template *tt)
-+{
-+ return ft_tport_count;
-+}
-+
-+/*
-+ * Report AEN (Asynchronous Event Notification) from device to initiator.
-+ * See notes in scst.h.
-+ */
-+int ft_report_aen(struct scst_aen *aen)
-+{
-+ struct ft_sess *sess;
-+
-+ sess = scst_sess_get_tgt_priv(scst_aen_get_sess(aen));
-+ FT_SESS_DBG("AEN event %d sess to %x lun %lld\n",
-+ aen->event_fn, sess->port_id, scst_aen_get_lun(aen));
-+ return SCST_AEN_RES_FAILED; /* XXX TBD */
-+}
-diff -uprN orig/linux-3.2/Documentation/scst/README.fcst linux-3.2/Documentation/scst/README.fcst
---- orig/linux-3.2/Documentation/scst/README.fcst
-+++ linux-3.2/Documentation/scst/README.fcst
-@@ -0,0 +1,114 @@
-+About fcst
-+==========
-+
-+The fcst kernel module implements an SCST target driver for the FCoE protocol.
-+FCoE or Fibre Channel over Ethernet is a protocol that allows to communicate
-+fibre channel frames over an Ethernet network. Since the FCoE protocol
-+requires a lossless Ethernet network, special network adapters and switches
-+are required. Ethernet network adapters that support FCoE are called
-+Converged Network Adapters (CNA). The standard that makes lossless Ethernet
-+communication possible is called DCB or Data Center Bridging.
-+
-+Since FCoE frames are a kind of Ethernet frames, communication between FCoE
-+clients and servers is limited to a single Ethernet broadcast domain.
-+
-+
-+Building and Installing
-+=======================
-+
-+FCST is a kernel module that depends on libfc and SCST to provide FC target
-+support.
-+
-+To build for linux-2.6.34, do:
-+
-+1. Get the kernel source:
-+
-+ KERNEL=linux-2.6.34
-+
-+ cd /usr/src/kernels
-+ URL_DIR=http://www.kernel.org/pub/linux/kernel/v2.6
-+ TARFILE=$KERNEL.tar.bz2
-+ wget -o $TARFILE $URL_DIR/$TARFILE
-+ tar xfj $TARFILE
-+ cd $KERNEL
-+
-+2. Apply patches needed for libfc target hooks and point-to-point fixes:
-+
-+ KDIR=/usr/src/kernels/$KERNEL
-+ PDIR=/usr/src/scst/trunk/fcst/linux-patches # use your dir here
-+
-+ cd $PDIR
-+ for patch in `grep -v '^#' series-2.6.34`
-+ do
-+ (cd $KDIR; patch -p1) < $patch
-+ done
-+
-+3. Apply SCST patches to the kernel
-+ See trunk/scst/README
-+ The readahead patches are not needed in 2.6.33 or later.
-+
-+4. Configure, make, and install your kernel
-+
-+5. Install SCST
-+ See trunk/scst/README. Make sure you are building sysfs SCST build,
-+ because FCST supports only it. You need to do
-+
-+ cd trunk/scst
-+ make
-+ make install
-+
-+6. Make FCST
-+ In the directory containing this README, just do
-+ make
-+ make install
-+
-+7. Install the FCoE admin tools, including dcbd and fcoeadm.
-+ Some distros may have these.
-+ You should be able to use the source at
-+ http://www.open-fcoe.org/openfc/downloads/2.6.34/open-fcoe-2.6.34.tar.gz
-+
-+8. Bring up SCST and configure the devices.
-+
-+9. Bring up an FCoE initiator (we'll enable target mode on it later):
-+ modprobe fcoe
-+ fcoeadm -c eth3
-+
-+ The other end can be an initiator as well, in point-to-point mode
-+ over a full-duplex loss-less link (enable pause on both sides).
-+ Alternatively, the other end can be an FCoE switch.
-+
-+10. Use fcc (part of the open-fcoe contrib tools in step 7) to see the
-+ initiator setup. To get the FCoE port name for eth3
-+
-+ # fcc
-+ FC HBAs:
-+ HBA Port Name Port ID State Device
-+ host4 20:00:00:1b:21:06:58:21 01:01:02 Online eth3
-+
-+ host4 Remote Ports:
-+ Path Port Name Port ID State Roles
-+ 4:0-0 10:00:50:41:4c:4f:3b:00 01:01:01 Online FCP Initiator
-+
-+ In the above example, there's one local host on eth3, and it's in
-+ a point-to-point connection with the remote initiator with Port_id 010101.
-+
-+11. Load fcst
-+
-+ modprobe fcst
-+
-+12. Add any disks (configured in step 8) you want to export
-+ Note that you must have a LUN 0.
-+
-+ LPORT=20:00:00:1b:21:06:58:21 # the local Port_Name
-+
-+ cd /sys/kernel/scst_tgt/targets/fcst/$LPORT
-+ echo add disk-name 0 > luns/mgmt
-+ echo add disk-name 1 > luns/mgmt
-+
-+13. Enable the initiator:
-+
-+ echo 1 > $LPORT/enabled
-+
-+14. As a temporary workaround, you may need to reset the interface
-+ on the initiator side so it sees the SCST device as a target and
-+ discovers LUNs. You can avoid this by bringing up the initiator last.
-diff -uprN orig/linux-3.2/include/scst/iscsi_scst.h linux-3.2/include/scst/iscsi_scst.h
---- orig/linux-3.2/include/scst/iscsi_scst.h
-+++ linux-3.2/include/scst/iscsi_scst.h
-@@ -0,0 +1,226 @@
-+/*
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef _ISCSI_SCST_U_H
-+#define _ISCSI_SCST_U_H
-+
-+#ifndef __KERNEL__
-+#include <sys/uio.h>
-+#endif
-+
-+#include "iscsi_scst_ver.h"
-+#include "iscsi_scst_itf_ver.h"
-+
-+/* The maximum length of 223 bytes in the RFC. */
-+#define ISCSI_NAME_LEN 256
-+
-+#define ISCSI_PORTAL_LEN 64
-+
-+/* Full name is iSCSI name + connected portal */
-+#define ISCSI_FULL_NAME_LEN (ISCSI_NAME_LEN + ISCSI_PORTAL_LEN)
-+
-+#define ISCSI_LISTEN_PORT 3260
-+
-+#define SCSI_ID_LEN 24
-+
-+#ifndef aligned_u64
-+#define aligned_u64 uint64_t __attribute__((aligned(8)))
-+#endif
-+
-+#define ISCSI_MAX_ATTR_NAME_LEN 50
-+#define ISCSI_MAX_ATTR_VALUE_LEN 512
-+
-+enum {
-+ key_initial_r2t,
-+ key_immediate_data,
-+ key_max_connections,
-+ key_max_recv_data_length,
-+ key_max_xmit_data_length,
-+ key_max_burst_length,
-+ key_first_burst_length,
-+ key_default_wait_time,
-+ key_default_retain_time,
-+ key_max_outstanding_r2t,
-+ key_data_pdu_inorder,
-+ key_data_sequence_inorder,
-+ key_error_recovery_level,
-+ key_header_digest,
-+ key_data_digest,
-+ key_ofmarker,
-+ key_ifmarker,
-+ key_ofmarkint,
-+ key_ifmarkint,
-+ session_key_last,
-+};
-+
-+enum {
-+ key_queued_cmnds,
-+ key_rsp_timeout,
-+ key_nop_in_interval,
-+ key_nop_in_timeout,
-+ key_max_sessions,
-+ target_key_last,
-+};
-+
-+enum {
-+ key_session,
-+ key_target,
-+};
-+
-+struct iscsi_kern_target_info {
-+ u32 tid;
-+ u32 cookie;
-+ char name[ISCSI_NAME_LEN];
-+ u32 attrs_num;
-+ aligned_u64 attrs_ptr;
-+};
-+
-+struct iscsi_kern_session_info {
-+ u32 tid;
-+ aligned_u64 sid;
-+ char initiator_name[ISCSI_NAME_LEN];
-+ char full_initiator_name[ISCSI_FULL_NAME_LEN];
-+ u32 exp_cmd_sn;
-+ s32 session_params[session_key_last];
-+ s32 target_params[target_key_last];
-+};
-+
-+#define DIGEST_ALL (DIGEST_NONE | DIGEST_CRC32C)
-+#define DIGEST_NONE (1 << 0)
-+#define DIGEST_CRC32C (1 << 1)
-+
-+struct iscsi_kern_conn_info {
-+ u32 tid;
-+ aligned_u64 sid;
-+
-+ u32 cid;
-+ u32 stat_sn;
-+ u32 exp_stat_sn;
-+ int fd;
-+};
-+
-+struct iscsi_kern_attr {
-+ u32 mode;
-+ char name[ISCSI_MAX_ATTR_NAME_LEN];
-+};
-+
-+struct iscsi_kern_mgmt_cmd_res_info {
-+ u32 tid;
-+ u32 cookie;
-+ u32 req_cmd;
-+ u32 result;
-+ char value[ISCSI_MAX_ATTR_VALUE_LEN];
-+};
-+
-+struct iscsi_kern_params_info {
-+ u32 tid;
-+ aligned_u64 sid;
-+
-+ u32 params_type;
-+ u32 partial;
-+
-+ s32 session_params[session_key_last];
-+ s32 target_params[target_key_last];
-+};
-+
-+enum iscsi_kern_event_code {
-+ E_ADD_TARGET,
-+ E_DEL_TARGET,
-+ E_MGMT_CMD,
-+ E_ENABLE_TARGET,
-+ E_DISABLE_TARGET,
-+ E_GET_ATTR_VALUE,
-+ E_SET_ATTR_VALUE,
-+ E_CONN_CLOSE,
-+};
-+
-+struct iscsi_kern_event {
-+ u32 tid;
-+ aligned_u64 sid;
-+ u32 cid;
-+ u32 code;
-+ u32 cookie;
-+ char target_name[ISCSI_NAME_LEN];
-+ u32 param1_size;
-+ u32 param2_size;
-+};
-+
-+struct iscsi_kern_register_info {
-+ union {
-+ aligned_u64 version;
-+ struct {
-+ int max_data_seg_len;
-+ int max_queued_cmds;
-+ };
-+ };
-+};
-+
-+struct iscsi_kern_attr_info {
-+ u32 tid;
-+ u32 cookie;
-+ struct iscsi_kern_attr attr;
-+};
-+
-+struct iscsi_kern_initiator_info {
-+ u32 tid;
-+ char full_initiator_name[ISCSI_FULL_NAME_LEN];
-+};
-+
-+#define DEFAULT_NR_QUEUED_CMNDS 32
-+#define MIN_NR_QUEUED_CMNDS 1
-+#define MAX_NR_QUEUED_CMNDS 256
-+
-+#define DEFAULT_RSP_TIMEOUT 90
-+#define MIN_RSP_TIMEOUT 2
-+#define MAX_RSP_TIMEOUT 65535
-+
-+#define DEFAULT_NOP_IN_INTERVAL 30
-+#define MIN_NOP_IN_INTERVAL 0
-+#define MAX_NOP_IN_INTERVAL 65535
-+
-+#define DEFAULT_NOP_IN_TIMEOUT 30
-+#define MIN_NOP_IN_TIMEOUT 2
-+#define MAX_NOP_IN_TIMEOUT 65535
-+
-+#define NETLINK_ISCSI_SCST 25
-+
-+#define REGISTER_USERD _IOWR('s', 0, struct iscsi_kern_register_info)
-+#define ADD_TARGET _IOW('s', 1, struct iscsi_kern_target_info)
-+#define DEL_TARGET _IOW('s', 2, struct iscsi_kern_target_info)
-+#define ADD_SESSION _IOW('s', 3, struct iscsi_kern_session_info)
-+#define DEL_SESSION _IOW('s', 4, struct iscsi_kern_session_info)
-+#define ADD_CONN _IOW('s', 5, struct iscsi_kern_conn_info)
-+#define DEL_CONN _IOW('s', 6, struct iscsi_kern_conn_info)
-+#define ISCSI_PARAM_SET _IOW('s', 7, struct iscsi_kern_params_info)
-+#define ISCSI_PARAM_GET _IOWR('s', 8, struct iscsi_kern_params_info)
-+
-+#define ISCSI_ATTR_ADD _IOW('s', 9, struct iscsi_kern_attr_info)
-+#define ISCSI_ATTR_DEL _IOW('s', 10, struct iscsi_kern_attr_info)
-+#define MGMT_CMD_CALLBACK _IOW('s', 11, struct iscsi_kern_mgmt_cmd_res_info)
-+
-+#define ISCSI_INITIATOR_ALLOWED _IOW('s', 12, struct iscsi_kern_initiator_info)
-+
-+static inline int iscsi_is_key_internal(int key)
-+{
-+ switch (key) {
-+ case key_max_xmit_data_length:
-+ return 1;
-+ default:
-+ return 0;
-+ }
-+}
-+
-+#endif
-diff -uprN orig/linux-3.2/include/scst/iscsi_scst_ver.h linux-3.2/include/scst/iscsi_scst_ver.h
---- orig/linux-3.2/include/scst/iscsi_scst_ver.h
-+++ linux-3.2/include/scst/iscsi_scst_ver.h
-@@ -0,0 +1,20 @@
-+/*
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+
-+#define ISCSI_VERSION_STRING_SUFFIX
-+
-+#define ISCSI_VERSION_STRING "2.2.0" ISCSI_VERSION_STRING_SUFFIX
-diff -uprN orig/linux-3.2/include/scst/iscsi_scst_itf_ver.h linux-3.2/include/scst/iscsi_scst_itf_ver.h
---- orig/linux-3.2/include/scst/iscsi_scst_itf_ver.h
-+++ linux-3.2/include/scst/iscsi_scst_itf_ver.h
-@@ -0,0 +1,3 @@
-+/* Autogenerated, don't edit */
-+
-+#define ISCSI_SCST_INTERFACE_VERSION ISCSI_VERSION_STRING "_" "6e5293bf78ac2fa099a12c932a10afb091dc7731"
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/Makefile linux-3.2/drivers/scst/iscsi-scst/Makefile
---- orig/linux-3.2/drivers/scst/iscsi-scst/Makefile
-+++ linux-3.2/drivers/scst/iscsi-scst/Makefile
-@@ -0,0 +1,4 @@
-+iscsi-scst-y := iscsi.o nthread.o config.o digest.o \
-+ conn.o session.o target.o event.o param.o
-+
-+obj-$(CONFIG_SCST_ISCSI) += iscsi-scst.o
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/Kconfig linux-3.2/drivers/scst/iscsi-scst/Kconfig
---- orig/linux-3.2/drivers/scst/iscsi-scst/Kconfig
-+++ linux-3.2/drivers/scst/iscsi-scst/Kconfig
-@@ -0,0 +1,25 @@
-+config SCST_ISCSI
-+ tristate "ISCSI Target"
-+ depends on SCST && INET && LIBCRC32C
-+ default SCST
-+ help
-+ ISCSI target driver for SCST framework. The iSCSI protocol has been
-+ defined in RFC 3720. To use it you should download from
-+ http://scst.sourceforge.net the user space part of it.
-+
-+config SCST_ISCSI_DEBUG_DIGEST_FAILURES
-+ bool "Simulate iSCSI digest failures"
-+ depends on SCST_ISCSI
-+ help
-+ Simulates iSCSI digest failures in random places. Even when iSCSI
-+ traffic is sent over a TCP connection, the 16-bit TCP checksum is too
-+ weak for the requirements of a storage protocol. Furthermore, there
-+ are also instances where the TCP checksum does not protect iSCSI
-+ data, as when data is corrupted while being transferred on a PCI bus
-+ or while in memory. The iSCSI protocol therefore defines a 32-bit CRC
-+ digest on iSCSI packets in order to detect data corruption on an
-+ end-to-end basis. CRCs can be used on iSCSI PDU headers and/or data.
-+ Enabling this option allows to test digest failure recovery in the
-+ iSCSI initiator that is talking to SCST.
-+
-+ If unsure, say "N".
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/config.c linux-3.2/drivers/scst/iscsi-scst/config.c
---- orig/linux-3.2/drivers/scst/iscsi-scst/config.c
-+++ linux-3.2/drivers/scst/iscsi-scst/config.c
-@@ -0,0 +1,1034 @@
-+/*
-+ * Copyright (C) 2004 - 2005 FUJITA Tomonori <tomof@acm.org>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+#include "iscsi.h"
-+
-+/* Protected by target_mgmt_mutex */
-+int ctr_open_state;
-+
-+/* Protected by target_mgmt_mutex */
-+static LIST_HEAD(iscsi_attrs_list);
-+
-+static ssize_t iscsi_version_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ TRACE_ENTRY();
-+
-+ sprintf(buf, "%s\n", ISCSI_VERSION_STRING);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ strcat(buf, "EXTRACHECKS\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_TRACING
-+ strcat(buf, "TRACING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+ strcat(buf, "DEBUG\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_ISCSI_DEBUG_DIGEST_FAILURES
-+ strcat(buf, "DEBUG_DIGEST_FAILURES\n");
-+#endif
-+
-+ TRACE_EXIT();
-+ return strlen(buf);
-+}
-+
-+static struct kobj_attribute iscsi_version_attr =
-+ __ATTR(version, S_IRUGO, iscsi_version_show, NULL);
-+
-+static ssize_t iscsi_open_state_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ switch (ctr_open_state) {
-+ case ISCSI_CTR_OPEN_STATE_CLOSED:
-+ sprintf(buf, "%s\n", "closed");
-+ break;
-+ case ISCSI_CTR_OPEN_STATE_OPEN:
-+ sprintf(buf, "%s\n", "open");
-+ break;
-+ case ISCSI_CTR_OPEN_STATE_CLOSING:
-+ sprintf(buf, "%s\n", "closing");
-+ break;
-+ default:
-+ sprintf(buf, "%s\n", "unknown");
-+ break;
-+ }
-+
-+ return strlen(buf);
-+}
-+
-+static struct kobj_attribute iscsi_open_state_attr =
-+ __ATTR(open_state, S_IRUGO, iscsi_open_state_show, NULL);
-+
-+const struct attribute *iscsi_attrs[] = {
-+ &iscsi_version_attr.attr,
-+ &iscsi_open_state_attr.attr,
-+ NULL,
-+};
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int add_conn(void __user *ptr)
-+{
-+ int err, rc;
-+ struct iscsi_session *session;
-+ struct iscsi_kern_conn_info info;
-+ struct iscsi_target *target;
-+
-+ TRACE_ENTRY();
-+
-+ rc = copy_from_user(&info, ptr, sizeof(info));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ err = -EFAULT;
-+ goto out;
-+ }
-+
-+ target = target_lookup_by_id(info.tid);
-+ if (target == NULL) {
-+ PRINT_ERROR("Target %d not found", info.tid);
-+ err = -ENOENT;
-+ goto out;
-+ }
-+
-+ mutex_lock(&target->target_mutex);
-+
-+ session = session_lookup(target, info.sid);
-+ if (!session) {
-+ PRINT_ERROR("Session %lld not found",
-+ (long long unsigned int)info.tid);
-+ err = -ENOENT;
-+ goto out_unlock;
-+ }
-+
-+ err = __add_conn(session, &info);
-+
-+out_unlock:
-+ mutex_unlock(&target->target_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int del_conn(void __user *ptr)
-+{
-+ int err, rc;
-+ struct iscsi_session *session;
-+ struct iscsi_kern_conn_info info;
-+ struct iscsi_target *target;
-+
-+ TRACE_ENTRY();
-+
-+ rc = copy_from_user(&info, ptr, sizeof(info));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ err = -EFAULT;
-+ goto out;
-+ }
-+
-+ target = target_lookup_by_id(info.tid);
-+ if (target == NULL) {
-+ PRINT_ERROR("Target %d not found", info.tid);
-+ err = -ENOENT;
-+ goto out;
-+ }
-+
-+ mutex_lock(&target->target_mutex);
-+
-+ session = session_lookup(target, info.sid);
-+ if (!session) {
-+ PRINT_ERROR("Session %llx not found",
-+ (long long unsigned int)info.sid);
-+ err = -ENOENT;
-+ goto out_unlock;
-+ }
-+
-+ err = __del_conn(session, &info);
-+
-+out_unlock:
-+ mutex_unlock(&target->target_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int add_session(void __user *ptr)
-+{
-+ int err, rc;
-+ struct iscsi_kern_session_info *info;
-+ struct iscsi_target *target;
-+
-+ TRACE_ENTRY();
-+
-+ info = kzalloc(sizeof(*info), GFP_KERNEL);
-+ if (info == NULL) {
-+ PRINT_ERROR("Can't alloc info (size %zd)", sizeof(*info));
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ rc = copy_from_user(info, ptr, sizeof(*info));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ err = -EFAULT;
-+ goto out_free;
-+ }
-+
-+ info->initiator_name[sizeof(info->initiator_name)-1] = '\0';
-+ info->full_initiator_name[sizeof(info->full_initiator_name)-1] = '\0';
-+
-+ target = target_lookup_by_id(info->tid);
-+ if (target == NULL) {
-+ PRINT_ERROR("Target %d not found", info->tid);
-+ err = -ENOENT;
-+ goto out_free;
-+ }
-+
-+ err = __add_session(target, info);
-+
-+out_free:
-+ kfree(info);
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int del_session(void __user *ptr)
-+{
-+ int err, rc;
-+ struct iscsi_kern_session_info *info;
-+ struct iscsi_target *target;
-+
-+ TRACE_ENTRY();
-+
-+ info = kzalloc(sizeof(*info), GFP_KERNEL);
-+ if (info == NULL) {
-+ PRINT_ERROR("Can't alloc info (size %zd)", sizeof(*info));
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ rc = copy_from_user(info, ptr, sizeof(*info));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ err = -EFAULT;
-+ goto out_free;
-+ }
-+
-+ info->initiator_name[sizeof(info->initiator_name)-1] = '\0';
-+
-+ target = target_lookup_by_id(info->tid);
-+ if (target == NULL) {
-+ PRINT_ERROR("Target %d not found", info->tid);
-+ err = -ENOENT;
-+ goto out_free;
-+ }
-+
-+ mutex_lock(&target->target_mutex);
-+ err = __del_session(target, info->sid);
-+ mutex_unlock(&target->target_mutex);
-+
-+out_free:
-+ kfree(info);
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int iscsi_params_config(void __user *ptr, int set)
-+{
-+ int err, rc;
-+ struct iscsi_kern_params_info info;
-+ struct iscsi_target *target;
-+
-+ TRACE_ENTRY();
-+
-+ rc = copy_from_user(&info, ptr, sizeof(info));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ err = -EFAULT;
-+ goto out;
-+ }
-+
-+ target = target_lookup_by_id(info.tid);
-+ if (target == NULL) {
-+ PRINT_ERROR("Target %d not found", info.tid);
-+ err = -ENOENT;
-+ goto out;
-+ }
-+
-+ mutex_lock(&target->target_mutex);
-+ err = iscsi_params_set(target, &info, set);
-+ mutex_unlock(&target->target_mutex);
-+
-+ if (err < 0)
-+ goto out;
-+
-+ if (!set) {
-+ rc = copy_to_user(ptr, &info, sizeof(info));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy to user %d bytes", rc);
-+ err = -EFAULT;
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int iscsi_initiator_allowed(void __user *ptr)
-+{
-+ int err = 0, rc;
-+ struct iscsi_kern_initiator_info cinfo;
-+ struct iscsi_target *target;
-+
-+ TRACE_ENTRY();
-+
-+ rc = copy_from_user(&cinfo, ptr, sizeof(cinfo));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ err = -EFAULT;
-+ goto out;
-+ }
-+
-+ cinfo.full_initiator_name[sizeof(cinfo.full_initiator_name)-1] = '\0';
-+
-+ target = target_lookup_by_id(cinfo.tid);
-+ if (target == NULL) {
-+ PRINT_ERROR("Target %d not found", cinfo.tid);
-+ err = -ENOENT;
-+ goto out;
-+ }
-+
-+ err = scst_initiator_has_luns(target->scst_tgt,
-+ cinfo.full_initiator_name);
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int mgmt_cmd_callback(void __user *ptr)
-+{
-+ int err = 0, rc;
-+ struct iscsi_kern_mgmt_cmd_res_info cinfo;
-+ struct scst_sysfs_user_info *info;
-+
-+ TRACE_ENTRY();
-+
-+ rc = copy_from_user(&cinfo, ptr, sizeof(cinfo));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ err = -EFAULT;
-+ goto out;
-+ }
-+
-+ cinfo.value[sizeof(cinfo.value)-1] = '\0';
-+
-+ info = scst_sysfs_user_get_info(cinfo.cookie);
-+ TRACE_DBG("cookie %u, info %p, result %d", cinfo.cookie, info,
-+ cinfo.result);
-+ if (info == NULL) {
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ info->info_status = 0;
-+
-+ if (cinfo.result != 0) {
-+ info->info_status = cinfo.result;
-+ goto out_complete;
-+ }
-+
-+ switch (cinfo.req_cmd) {
-+ case E_ENABLE_TARGET:
-+ case E_DISABLE_TARGET:
-+ {
-+ struct iscsi_target *target;
-+
-+ target = target_lookup_by_id(cinfo.tid);
-+ if (target == NULL) {
-+ PRINT_ERROR("Target %d not found", cinfo.tid);
-+ err = -ENOENT;
-+ goto out_status;
-+ }
-+
-+ target->tgt_enabled = (cinfo.req_cmd == E_ENABLE_TARGET) ? 1 : 0;
-+ break;
-+ }
-+
-+ case E_GET_ATTR_VALUE:
-+ info->data = kstrdup(cinfo.value, GFP_KERNEL);
-+ if (info->data == NULL) {
-+ PRINT_ERROR("Can't dublicate value %s", cinfo.value);
-+ info->info_status = -ENOMEM;
-+ goto out_complete;
-+ }
-+ break;
-+ }
-+
-+out_complete:
-+ complete(&info->info_completion);
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+
-+out_status:
-+ info->info_status = err;
-+ goto out_complete;
-+}
-+
-+static ssize_t iscsi_attr_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos;
-+ struct iscsi_attr *tgt_attr;
-+ void *value;
-+
-+ TRACE_ENTRY();
-+
-+ tgt_attr = container_of(attr, struct iscsi_attr, attr);
-+
-+ pos = iscsi_sysfs_send_event(
-+ (tgt_attr->target != NULL) ? tgt_attr->target->tid : 0,
-+ E_GET_ATTR_VALUE, tgt_attr->name, NULL, &value);
-+
-+ if (pos != 0)
-+ goto out;
-+
-+ pos = scnprintf(buf, SCST_SYSFS_BLOCK_SIZE, "%s\n", (char *)value);
-+
-+ kfree(value);
-+
-+out:
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t iscsi_attr_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ char *buffer;
-+ struct iscsi_attr *tgt_attr;
-+
-+ TRACE_ENTRY();
-+
-+ buffer = kzalloc(count+1, GFP_KERNEL);
-+ if (buffer == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ memcpy(buffer, buf, count);
-+ buffer[count] = '\0';
-+
-+ tgt_attr = container_of(attr, struct iscsi_attr, attr);
-+
-+ TRACE_DBG("attr %s, buffer %s", tgt_attr->attr.attr.name, buffer);
-+
-+ res = iscsi_sysfs_send_event(
-+ (tgt_attr->target != NULL) ? tgt_attr->target->tid : 0,
-+ E_SET_ATTR_VALUE, tgt_attr->name, buffer, NULL);
-+
-+ kfree(buffer);
-+
-+ if (res == 0)
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * target_mgmt_mutex supposed to be locked. If target != 0, target_mutex
-+ * supposed to be locked as well.
-+ */
-+int iscsi_add_attr(struct iscsi_target *target,
-+ const struct iscsi_kern_attr *attr_info)
-+{
-+ int res = 0;
-+ struct iscsi_attr *tgt_attr;
-+ struct list_head *attrs_list;
-+ const char *name;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ static struct lock_class_key __key;
-+#endif
-+
-+ TRACE_ENTRY();
-+
-+ if (target != NULL) {
-+ attrs_list = &target->attrs_list;
-+ name = target->name;
-+ } else {
-+ attrs_list = &iscsi_attrs_list;
-+ name = "global";
-+ }
-+
-+ list_for_each_entry(tgt_attr, attrs_list, attrs_list_entry) {
-+ /* Both for sure NULL-terminated */
-+ if (strcmp(tgt_attr->name, attr_info->name) == 0) {
-+ PRINT_ERROR("Attribute %s for %s already exist",
-+ attr_info->name, name);
-+ res = -EEXIST;
-+ goto out;
-+ }
-+ }
-+
-+ TRACE_DBG("Adding %s's attr %s with mode %x", name,
-+ attr_info->name, attr_info->mode);
-+
-+ tgt_attr = kzalloc(sizeof(*tgt_attr), GFP_KERNEL);
-+ if (tgt_attr == NULL) {
-+ PRINT_ERROR("Unable to allocate user (size %zd)",
-+ sizeof(*tgt_attr));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ tgt_attr->target = target;
-+
-+ tgt_attr->name = kstrdup(attr_info->name, GFP_KERNEL);
-+ if (tgt_attr->name == NULL) {
-+ PRINT_ERROR("Unable to allocate attr %s name/value (target %s)",
-+ attr_info->name, name);
-+ res = -ENOMEM;
-+ goto out_free;
-+ }
-+
-+ list_add(&tgt_attr->attrs_list_entry, attrs_list);
-+
-+ tgt_attr->attr.attr.name = tgt_attr->name;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ tgt_attr->attr.attr.key = &__key;
-+#endif
-+ tgt_attr->attr.attr.mode = attr_info->mode & (S_IRUGO | S_IWUGO);
-+ tgt_attr->attr.show = iscsi_attr_show;
-+ tgt_attr->attr.store = iscsi_attr_store;
-+
-+ TRACE_DBG("tgt_attr %p, attr %p", tgt_attr, &tgt_attr->attr.attr);
-+
-+ res = sysfs_create_file(
-+ (target != NULL) ? scst_sysfs_get_tgt_kobj(target->scst_tgt) :
-+ scst_sysfs_get_tgtt_kobj(&iscsi_template),
-+ &tgt_attr->attr.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Unable to create file '%s' for target '%s'",
-+ tgt_attr->attr.attr.name, name);
-+ goto out_del;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_del:
-+ list_del(&tgt_attr->attrs_list_entry);
-+
-+out_free:
-+ kfree(tgt_attr->name);
-+ kfree(tgt_attr);
-+ goto out;
-+}
-+
-+void __iscsi_del_attr(struct iscsi_target *target,
-+ struct iscsi_attr *tgt_attr)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Deleting attr %s (target %s, tgt_attr %p, attr %p)",
-+ tgt_attr->name, (target != NULL) ? target->name : "global",
-+ tgt_attr, &tgt_attr->attr.attr);
-+
-+ list_del(&tgt_attr->attrs_list_entry);
-+
-+ sysfs_remove_file((target != NULL) ?
-+ scst_sysfs_get_tgt_kobj(target->scst_tgt) :
-+ scst_sysfs_get_tgtt_kobj(&iscsi_template),
-+ &tgt_attr->attr.attr);
-+
-+ kfree(tgt_attr->name);
-+ kfree(tgt_attr);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * target_mgmt_mutex supposed to be locked. If target != 0, target_mutex
-+ * supposed to be locked as well.
-+ */
-+static int iscsi_del_attr(struct iscsi_target *target,
-+ const char *attr_name)
-+{
-+ int res = 0;
-+ struct iscsi_attr *tgt_attr, *a;
-+ struct list_head *attrs_list;
-+
-+ TRACE_ENTRY();
-+
-+ if (target != NULL)
-+ attrs_list = &target->attrs_list;
-+ else
-+ attrs_list = &iscsi_attrs_list;
-+
-+ tgt_attr = NULL;
-+ list_for_each_entry(a, attrs_list, attrs_list_entry) {
-+ /* Both for sure NULL-terminated */
-+ if (strcmp(a->name, attr_name) == 0) {
-+ tgt_attr = a;
-+ break;
-+ }
-+ }
-+
-+ if (tgt_attr == NULL) {
-+ PRINT_ERROR("attr %s not found (target %s)", attr_name,
-+ (target != NULL) ? target->name : "global");
-+ res = -ENOENT;
-+ goto out;
-+ }
-+
-+ __iscsi_del_attr(target, tgt_attr);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int iscsi_attr_cmd(void __user *ptr, unsigned int cmd)
-+{
-+ int rc, err = 0;
-+ struct iscsi_kern_attr_info info;
-+ struct iscsi_target *target;
-+ struct scst_sysfs_user_info *i = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ rc = copy_from_user(&info, ptr, sizeof(info));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ err = -EFAULT;
-+ goto out;
-+ }
-+
-+ info.attr.name[sizeof(info.attr.name)-1] = '\0';
-+
-+ if (info.cookie != 0) {
-+ i = scst_sysfs_user_get_info(info.cookie);
-+ TRACE_DBG("cookie %u, uinfo %p", info.cookie, i);
-+ if (i == NULL) {
-+ err = -EINVAL;
-+ goto out;
-+ }
-+ }
-+
-+ target = target_lookup_by_id(info.tid);
-+
-+ if (target != NULL)
-+ mutex_lock(&target->target_mutex);
-+
-+ switch (cmd) {
-+ case ISCSI_ATTR_ADD:
-+ err = iscsi_add_attr(target, &info.attr);
-+ break;
-+ case ISCSI_ATTR_DEL:
-+ err = iscsi_del_attr(target, info.attr.name);
-+ break;
-+ default:
-+ BUG();
-+ }
-+
-+ if (target != NULL)
-+ mutex_unlock(&target->target_mutex);
-+
-+ if (i != NULL) {
-+ i->info_status = err;
-+ complete(&i->info_completion);
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int add_target(void __user *ptr)
-+{
-+ int err, rc;
-+ struct iscsi_kern_target_info *info;
-+ struct scst_sysfs_user_info *uinfo;
-+
-+ TRACE_ENTRY();
-+
-+ info = kzalloc(sizeof(*info), GFP_KERNEL);
-+ if (info == NULL) {
-+ PRINT_ERROR("Can't alloc info (size %zd)", sizeof(*info));
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ rc = copy_from_user(info, ptr, sizeof(*info));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ err = -EFAULT;
-+ goto out_free;
-+ }
-+
-+ if (target_lookup_by_id(info->tid) != NULL) {
-+ PRINT_ERROR("Target %u already exist!", info->tid);
-+ err = -EEXIST;
-+ goto out_free;
-+ }
-+
-+ info->name[sizeof(info->name)-1] = '\0';
-+
-+ if (info->cookie != 0) {
-+ uinfo = scst_sysfs_user_get_info(info->cookie);
-+ TRACE_DBG("cookie %u, uinfo %p", info->cookie, uinfo);
-+ if (uinfo == NULL) {
-+ err = -EINVAL;
-+ goto out_free;
-+ }
-+ } else
-+ uinfo = NULL;
-+
-+ err = __add_target(info);
-+
-+ if (uinfo != NULL) {
-+ uinfo->info_status = err;
-+ complete(&uinfo->info_completion);
-+ }
-+
-+out_free:
-+ kfree(info);
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int del_target(void __user *ptr)
-+{
-+ int err, rc;
-+ struct iscsi_kern_target_info info;
-+ struct scst_sysfs_user_info *uinfo;
-+
-+ TRACE_ENTRY();
-+
-+ rc = copy_from_user(&info, ptr, sizeof(info));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy %d user's bytes", rc);
-+ err = -EFAULT;
-+ goto out;
-+ }
-+
-+ info.name[sizeof(info.name)-1] = '\0';
-+
-+ if (info.cookie != 0) {
-+ uinfo = scst_sysfs_user_get_info(info.cookie);
-+ TRACE_DBG("cookie %u, uinfo %p", info.cookie, uinfo);
-+ if (uinfo == NULL) {
-+ err = -EINVAL;
-+ goto out;
-+ }
-+ } else
-+ uinfo = NULL;
-+
-+ err = __del_target(info.tid);
-+
-+ if (uinfo != NULL) {
-+ uinfo->info_status = err;
-+ complete(&uinfo->info_completion);
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+}
-+
-+static int iscsi_register(void __user *arg)
-+{
-+ struct iscsi_kern_register_info reg;
-+ char ver[sizeof(ISCSI_SCST_INTERFACE_VERSION)+1];
-+ int res, rc;
-+
-+ TRACE_ENTRY();
-+
-+ rc = copy_from_user(&reg, arg, sizeof(reg));
-+ if (rc != 0) {
-+ PRINT_ERROR("%s", "Unable to get register info");
-+ res = -EFAULT;
-+ goto out;
-+ }
-+
-+ rc = copy_from_user(ver, (void __user *)(unsigned long)reg.version,
-+ sizeof(ver));
-+ if (rc != 0) {
-+ PRINT_ERROR("%s", "Unable to get version string");
-+ res = -EFAULT;
-+ goto out;
-+ }
-+ ver[sizeof(ver)-1] = '\0';
-+
-+ if (strcmp(ver, ISCSI_SCST_INTERFACE_VERSION) != 0) {
-+ PRINT_ERROR("Incorrect version of user space %s (expected %s)",
-+ ver, ISCSI_SCST_INTERFACE_VERSION);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ memset(&reg, 0, sizeof(reg));
-+ reg.max_data_seg_len = ISCSI_CONN_IOV_MAX << PAGE_SHIFT;
-+ reg.max_queued_cmds = scst_get_max_lun_commands(NULL, NO_SUCH_LUN);
-+
-+ res = 0;
-+
-+ rc = copy_to_user(arg, &reg, sizeof(reg));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy to user %d bytes", rc);
-+ res = -EFAULT;
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static long ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ long err;
-+
-+ TRACE_ENTRY();
-+
-+ if (cmd == REGISTER_USERD) {
-+ err = iscsi_register((void __user *)arg);
-+ goto out;
-+ }
-+
-+ err = mutex_lock_interruptible(&target_mgmt_mutex);
-+ if (err < 0)
-+ goto out;
-+
-+ switch (cmd) {
-+ case ADD_TARGET:
-+ err = add_target((void __user *)arg);
-+ break;
-+
-+ case DEL_TARGET:
-+ err = del_target((void __user *)arg);
-+ break;
-+
-+ case ISCSI_ATTR_ADD:
-+ case ISCSI_ATTR_DEL:
-+ err = iscsi_attr_cmd((void __user *)arg, cmd);
-+ break;
-+
-+ case MGMT_CMD_CALLBACK:
-+ err = mgmt_cmd_callback((void __user *)arg);
-+ break;
-+
-+ case ISCSI_INITIATOR_ALLOWED:
-+ err = iscsi_initiator_allowed((void __user *)arg);
-+ break;
-+
-+ case ADD_SESSION:
-+ err = add_session((void __user *)arg);
-+ break;
-+
-+ case DEL_SESSION:
-+ err = del_session((void __user *)arg);
-+ break;
-+
-+ case ISCSI_PARAM_SET:
-+ err = iscsi_params_config((void __user *)arg, 1);
-+ break;
-+
-+ case ISCSI_PARAM_GET:
-+ err = iscsi_params_config((void __user *)arg, 0);
-+ break;
-+
-+ case ADD_CONN:
-+ err = add_conn((void __user *)arg);
-+ break;
-+
-+ case DEL_CONN:
-+ err = del_conn((void __user *)arg);
-+ break;
-+
-+ default:
-+ PRINT_ERROR("Invalid ioctl cmd %x", cmd);
-+ err = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+out_unlock:
-+ mutex_unlock(&target_mgmt_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(err);
-+ return err;
-+}
-+
-+static int open(struct inode *inode, struct file *file)
-+{
-+ bool already;
-+
-+ mutex_lock(&target_mgmt_mutex);
-+ already = (ctr_open_state != ISCSI_CTR_OPEN_STATE_CLOSED);
-+ if (!already)
-+ ctr_open_state = ISCSI_CTR_OPEN_STATE_OPEN;
-+ mutex_unlock(&target_mgmt_mutex);
-+
-+ if (already) {
-+ PRINT_WARNING("%s", "Attempt to second open the control "
-+ "device!");
-+ return -EBUSY;
-+ } else
-+ return 0;
-+}
-+
-+static int release(struct inode *inode, struct file *filp)
-+{
-+ struct iscsi_attr *attr, *t;
-+
-+ TRACE(TRACE_MGMT, "%s", "Releasing allocated resources");
-+
-+ mutex_lock(&target_mgmt_mutex);
-+ ctr_open_state = ISCSI_CTR_OPEN_STATE_CLOSING;
-+ mutex_unlock(&target_mgmt_mutex);
-+
-+ target_del_all();
-+
-+ mutex_lock(&target_mgmt_mutex);
-+
-+ list_for_each_entry_safe(attr, t, &iscsi_attrs_list,
-+ attrs_list_entry) {
-+ __iscsi_del_attr(NULL, attr);
-+ }
-+
-+ ctr_open_state = ISCSI_CTR_OPEN_STATE_CLOSED;
-+
-+ mutex_unlock(&target_mgmt_mutex);
-+
-+ return 0;
-+}
-+
-+const struct file_operations ctr_fops = {
-+ .owner = THIS_MODULE,
-+ .unlocked_ioctl = ioctl,
-+ .compat_ioctl = ioctl,
-+ .open = open,
-+ .release = release,
-+};
-+
-+#ifdef CONFIG_SCST_DEBUG
-+static void iscsi_dump_char(int ch, unsigned char *text, int *pos)
-+{
-+ int i = *pos;
-+
-+ if (ch < 0) {
-+ while ((i % 16) != 0) {
-+ printk(KERN_CONT " ");
-+ text[i] = ' ';
-+ i++;
-+ if ((i % 16) == 0)
-+ printk(KERN_CONT " | %.16s |\n", text);
-+ else if ((i % 4) == 0)
-+ printk(KERN_CONT " |");
-+ }
-+ i = 0;
-+ goto out;
-+ }
-+
-+ text[i] = (ch < 0x20 || (ch >= 0x80 && ch <= 0xa0)) ? ' ' : ch;
-+ printk(KERN_CONT " %02x", ch);
-+ i++;
-+ if ((i % 16) == 0) {
-+ printk(KERN_CONT " | %.16s |\n", text);
-+ i = 0;
-+ } else if ((i % 4) == 0)
-+ printk(KERN_CONT " |");
-+
-+out:
-+ *pos = i;
-+ return;
-+}
-+
-+void iscsi_dump_pdu(struct iscsi_pdu *pdu)
-+{
-+ unsigned char text[16];
-+ int pos = 0;
-+
-+ if (trace_flag & TRACE_D_DUMP_PDU) {
-+ unsigned char *buf;
-+ int i;
-+
-+ buf = (void *)&pdu->bhs;
-+ printk(KERN_DEBUG "BHS: (%p,%zd)\n", buf, sizeof(pdu->bhs));
-+ for (i = 0; i < (int)sizeof(pdu->bhs); i++)
-+ iscsi_dump_char(*buf++, text, &pos);
-+ iscsi_dump_char(-1, text, &pos);
-+
-+ buf = (void *)pdu->ahs;
-+ printk(KERN_DEBUG "AHS: (%p,%d)\n", buf, pdu->ahssize);
-+ for (i = 0; i < pdu->ahssize; i++)
-+ iscsi_dump_char(*buf++, text, &pos);
-+ iscsi_dump_char(-1, text, &pos);
-+
-+ printk(KERN_DEBUG "Data: (%d)\n", pdu->datasize);
-+ }
-+}
-+
-+unsigned long iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(struct iscsi_cmnd *cmnd)
-+{
-+ unsigned long flag;
-+
-+ if (cmnd->cmd_req != NULL)
-+ cmnd = cmnd->cmd_req;
-+
-+ if (cmnd->scst_cmd == NULL)
-+ flag = TRACE_MGMT_DEBUG;
-+ else {
-+ int status = scst_cmd_get_status(cmnd->scst_cmd);
-+ if ((status == SAM_STAT_TASK_SET_FULL) ||
-+ (status == SAM_STAT_BUSY))
-+ flag = TRACE_FLOW_CONTROL;
-+ else
-+ flag = TRACE_MGMT_DEBUG;
-+ }
-+ return flag;
-+}
-+
-+#endif /* CONFIG_SCST_DEBUG */
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/conn.c linux-3.2/drivers/scst/iscsi-scst/conn.c
---- orig/linux-3.2/drivers/scst/iscsi-scst/conn.c
-+++ linux-3.2/drivers/scst/iscsi-scst/conn.c
-@@ -0,0 +1,945 @@
-+/*
-+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/file.h>
-+#include <linux/ip.h>
-+#include <net/tcp.h>
-+
-+#include "iscsi.h"
-+#include "digest.h"
-+
-+static int print_conn_state(char *p, size_t size, struct iscsi_conn *conn)
-+{
-+ int pos = 0;
-+
-+ if (conn->closing) {
-+ pos += scnprintf(p, size, "%s", "closing");
-+ goto out;
-+ }
-+
-+ switch (conn->rd_state) {
-+ case ISCSI_CONN_RD_STATE_PROCESSING:
-+ pos += scnprintf(&p[pos], size - pos, "%s", "read_processing ");
-+ break;
-+ case ISCSI_CONN_RD_STATE_IN_LIST:
-+ pos += scnprintf(&p[pos], size - pos, "%s", "in_read_list ");
-+ break;
-+ }
-+
-+ switch (conn->wr_state) {
-+ case ISCSI_CONN_WR_STATE_PROCESSING:
-+ pos += scnprintf(&p[pos], size - pos, "%s", "write_processing ");
-+ break;
-+ case ISCSI_CONN_WR_STATE_IN_LIST:
-+ pos += scnprintf(&p[pos], size - pos, "%s", "in_write_list ");
-+ break;
-+ case ISCSI_CONN_WR_STATE_SPACE_WAIT:
-+ pos += scnprintf(&p[pos], size - pos, "%s", "space_waiting ");
-+ break;
-+ }
-+
-+ if (test_bit(ISCSI_CONN_REINSTATING, &conn->conn_aflags))
-+ pos += scnprintf(&p[pos], size - pos, "%s", "reinstating ");
-+ else if (pos == 0)
-+ pos += scnprintf(&p[pos], size - pos, "%s", "established idle ");
-+
-+out:
-+ return pos;
-+}
-+
-+static void iscsi_conn_release(struct kobject *kobj)
-+{
-+ struct iscsi_conn *conn;
-+
-+ TRACE_ENTRY();
-+
-+ conn = container_of(kobj, struct iscsi_conn, conn_kobj);
-+ if (conn->conn_kobj_release_cmpl != NULL)
-+ complete_all(conn->conn_kobj_release_cmpl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+struct kobj_type iscsi_conn_ktype = {
-+ .release = iscsi_conn_release,
-+};
-+
-+static ssize_t iscsi_get_initiator_ip(struct iscsi_conn *conn,
-+ char *buf, int size)
-+{
-+ int pos;
-+ struct sock *sk;
-+
-+ TRACE_ENTRY();
-+
-+ sk = conn->sock->sk;
-+ switch (sk->sk_family) {
-+ case AF_INET:
-+ pos = scnprintf(buf, size,
-+ "%pI4", &inet_sk(sk)->inet_daddr);
-+ break;
-+ case AF_INET6:
-+ pos = scnprintf(buf, size, "[%p6]",
-+ &inet6_sk(sk)->daddr);
-+ break;
-+ default:
-+ pos = scnprintf(buf, size, "Unknown family %d",
-+ sk->sk_family);
-+ break;
-+ }
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static ssize_t iscsi_conn_ip_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos;
-+ struct iscsi_conn *conn;
-+
-+ TRACE_ENTRY();
-+
-+ conn = container_of(kobj, struct iscsi_conn, conn_kobj);
-+
-+ pos = iscsi_get_initiator_ip(conn, buf, SCST_SYSFS_BLOCK_SIZE);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static struct kobj_attribute iscsi_conn_ip_attr =
-+ __ATTR(ip, S_IRUGO, iscsi_conn_ip_show, NULL);
-+
-+static ssize_t iscsi_conn_cid_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos;
-+ struct iscsi_conn *conn;
-+
-+ TRACE_ENTRY();
-+
-+ conn = container_of(kobj, struct iscsi_conn, conn_kobj);
-+
-+ pos = sprintf(buf, "%u", conn->cid);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static struct kobj_attribute iscsi_conn_cid_attr =
-+ __ATTR(cid, S_IRUGO, iscsi_conn_cid_show, NULL);
-+
-+static ssize_t iscsi_conn_state_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos;
-+ struct iscsi_conn *conn;
-+
-+ TRACE_ENTRY();
-+
-+ conn = container_of(kobj, struct iscsi_conn, conn_kobj);
-+
-+ pos = print_conn_state(buf, SCST_SYSFS_BLOCK_SIZE, conn);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static struct kobj_attribute iscsi_conn_state_attr =
-+ __ATTR(state, S_IRUGO, iscsi_conn_state_show, NULL);
-+
-+static void conn_sysfs_del(struct iscsi_conn *conn)
-+{
-+ int rc;
-+ DECLARE_COMPLETION_ONSTACK(c);
-+
-+ TRACE_ENTRY();
-+
-+ conn->conn_kobj_release_cmpl = &c;
-+
-+ kobject_del(&conn->conn_kobj);
-+ kobject_put(&conn->conn_kobj);
-+
-+ rc = wait_for_completion_timeout(conn->conn_kobj_release_cmpl, HZ);
-+ if (rc == 0) {
-+ PRINT_INFO("Waiting for releasing sysfs entry "
-+ "for conn %p (%d refs)...", conn,
-+ atomic_read(&conn->conn_kobj.kref.refcount));
-+ wait_for_completion(conn->conn_kobj_release_cmpl);
-+ PRINT_INFO("Done waiting for releasing sysfs "
-+ "entry for conn %p", conn);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int conn_sysfs_add(struct iscsi_conn *conn)
-+{
-+ int res;
-+ struct iscsi_session *session = conn->session;
-+ struct iscsi_conn *c;
-+ int n = 1;
-+ char addr[64];
-+
-+ TRACE_ENTRY();
-+
-+ iscsi_get_initiator_ip(conn, addr, sizeof(addr));
-+
-+restart:
-+ list_for_each_entry(c, &session->conn_list, conn_list_entry) {
-+ if (strcmp(addr, kobject_name(&conn->conn_kobj)) == 0) {
-+ char c_addr[64];
-+
-+ iscsi_get_initiator_ip(conn, c_addr, sizeof(c_addr));
-+
-+ TRACE_DBG("Duplicated conn from the same initiator "
-+ "%s found", c_addr);
-+
-+ snprintf(addr, sizeof(addr), "%s_%d", c_addr, n);
-+ n++;
-+ goto restart;
-+ }
-+ }
-+
-+ res = kobject_init_and_add(&conn->conn_kobj, &iscsi_conn_ktype,
-+ scst_sysfs_get_sess_kobj(session->scst_sess), addr);
-+ if (res != 0) {
-+ PRINT_ERROR("Unable create sysfs entries for conn %s",
-+ addr);
-+ goto out;
-+ }
-+
-+ TRACE_DBG("conn %p, conn_kobj %p", conn, &conn->conn_kobj);
-+
-+ res = sysfs_create_file(&conn->conn_kobj,
-+ &iscsi_conn_state_attr.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Unable create sysfs attribute %s for conn %s",
-+ iscsi_conn_state_attr.attr.name, addr);
-+ goto out_err;
-+ }
-+
-+ res = sysfs_create_file(&conn->conn_kobj,
-+ &iscsi_conn_cid_attr.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Unable create sysfs attribute %s for conn %s",
-+ iscsi_conn_cid_attr.attr.name, addr);
-+ goto out_err;
-+ }
-+
-+ res = sysfs_create_file(&conn->conn_kobj,
-+ &iscsi_conn_ip_attr.attr);
-+ if (res != 0) {
-+ PRINT_ERROR("Unable create sysfs attribute %s for conn %s",
-+ iscsi_conn_ip_attr.attr.name, addr);
-+ goto out_err;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err:
-+ conn_sysfs_del(conn);
-+ goto out;
-+}
-+
-+/* target_mutex supposed to be locked */
-+struct iscsi_conn *conn_lookup(struct iscsi_session *session, u16 cid)
-+{
-+ struct iscsi_conn *conn;
-+
-+ /*
-+ * We need to find the latest conn to correctly handle
-+ * multi-reinstatements
-+ */
-+ list_for_each_entry_reverse(conn, &session->conn_list,
-+ conn_list_entry) {
-+ if (conn->cid == cid)
-+ return conn;
-+ }
-+ return NULL;
-+}
-+
-+void iscsi_make_conn_rd_active(struct iscsi_conn *conn)
-+{
-+ struct iscsi_thread_pool *p = conn->conn_thr_pool;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_bh(&p->rd_lock);
-+
-+ TRACE_DBG("conn %p, rd_state %x, rd_data_ready %d", conn,
-+ conn->rd_state, conn->rd_data_ready);
-+
-+ /*
-+ * Let's start processing ASAP not waiting for all the being waited
-+ * data be received, even if we need several wakup iteration to receive
-+ * them all, because starting ASAP, i.e. in parallel, is better for
-+ * performance, especially on multi-CPU/core systems.
-+ */
-+
-+ conn->rd_data_ready = 1;
-+
-+ if (conn->rd_state == ISCSI_CONN_RD_STATE_IDLE) {
-+ list_add_tail(&conn->rd_list_entry, &p->rd_list);
-+ conn->rd_state = ISCSI_CONN_RD_STATE_IN_LIST;
-+ wake_up(&p->rd_waitQ);
-+ }
-+
-+ spin_unlock_bh(&p->rd_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void iscsi_make_conn_wr_active(struct iscsi_conn *conn)
-+{
-+ struct iscsi_thread_pool *p = conn->conn_thr_pool;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_bh(&p->wr_lock);
-+
-+ TRACE_DBG("conn %p, wr_state %x, wr_space_ready %d", conn,
-+ conn->wr_state, conn->wr_space_ready);
-+
-+ /*
-+ * Let's start sending waiting to be sent data ASAP, even if there's
-+ * still not all the needed buffers ready and we need several wakup
-+ * iteration to send them all, because starting ASAP, i.e. in parallel,
-+ * is better for performance, especially on multi-CPU/core systems.
-+ */
-+
-+ if (conn->wr_state == ISCSI_CONN_WR_STATE_IDLE) {
-+ list_add_tail(&conn->wr_list_entry, &p->wr_list);
-+ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
-+ wake_up(&p->wr_waitQ);
-+ }
-+
-+ spin_unlock_bh(&p->wr_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void __mark_conn_closed(struct iscsi_conn *conn, int flags)
-+{
-+ spin_lock_bh(&conn->conn_thr_pool->rd_lock);
-+ conn->closing = 1;
-+ if (flags & ISCSI_CONN_ACTIVE_CLOSE)
-+ conn->active_close = 1;
-+ if (flags & ISCSI_CONN_DELETING)
-+ conn->deleting = 1;
-+ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
-+
-+ iscsi_make_conn_rd_active(conn);
-+}
-+
-+void mark_conn_closed(struct iscsi_conn *conn)
-+{
-+ __mark_conn_closed(conn, ISCSI_CONN_ACTIVE_CLOSE);
-+}
-+
-+static void __iscsi_state_change(struct sock *sk)
-+{
-+ struct iscsi_conn *conn = sk->sk_user_data;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
-+ if (!conn->closing) {
-+ PRINT_ERROR("Connection with initiator %s "
-+ "unexpectedly closed!",
-+ conn->session->initiator_name);
-+ TRACE_MGMT_DBG("conn %p, sk state %d", conn,
-+ sk->sk_state);
-+ __mark_conn_closed(conn, 0);
-+ }
-+ } else
-+ iscsi_make_conn_rd_active(conn);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void iscsi_state_change(struct sock *sk)
-+{
-+ struct iscsi_conn *conn = sk->sk_user_data;
-+
-+ __iscsi_state_change(sk);
-+ conn->old_state_change(sk);
-+
-+ return;
-+}
-+
-+static void iscsi_data_ready(struct sock *sk, int len)
-+{
-+ struct iscsi_conn *conn = sk->sk_user_data;
-+
-+ TRACE_ENTRY();
-+
-+ iscsi_make_conn_rd_active(conn);
-+
-+ conn->old_data_ready(sk, len);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void __iscsi_write_space_ready(struct iscsi_conn *conn)
-+{
-+ struct iscsi_thread_pool *p = conn->conn_thr_pool;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_bh(&p->wr_lock);
-+ conn->wr_space_ready = 1;
-+ if ((conn->wr_state == ISCSI_CONN_WR_STATE_SPACE_WAIT)) {
-+ TRACE_DBG("wr space ready (conn %p)", conn);
-+ list_add_tail(&conn->wr_list_entry, &p->wr_list);
-+ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
-+ wake_up(&p->wr_waitQ);
-+ }
-+ spin_unlock_bh(&p->wr_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void iscsi_write_space_ready(struct sock *sk)
-+{
-+ struct iscsi_conn *conn = sk->sk_user_data;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Write space ready for conn %p", conn);
-+
-+ __iscsi_write_space_ready(conn);
-+
-+ conn->old_write_space(sk);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void conn_rsp_timer_fn(unsigned long arg)
-+{
-+ struct iscsi_conn *conn = (struct iscsi_conn *)arg;
-+ struct iscsi_cmnd *cmnd;
-+ unsigned long j = jiffies;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Timer (conn %p)", conn);
-+
-+ spin_lock_bh(&conn->write_list_lock);
-+
-+ if (!list_empty(&conn->write_timeout_list)) {
-+ unsigned long timeout_time;
-+ cmnd = list_entry(conn->write_timeout_list.next,
-+ struct iscsi_cmnd, write_timeout_list_entry);
-+
-+ timeout_time = j + iscsi_get_timeout(cmnd) + ISCSI_ADD_SCHED_TIME;
-+
-+ if (unlikely(time_after_eq(j, iscsi_get_timeout_time(cmnd)))) {
-+ if (!conn->closing) {
-+ PRINT_ERROR("Timeout %ld sec sending data/waiting "
-+ "for reply to/from initiator "
-+ "%s (SID %llx), closing connection",
-+ iscsi_get_timeout(cmnd)/HZ,
-+ conn->session->initiator_name,
-+ (long long unsigned int)
-+ conn->session->sid);
-+ /*
-+ * We must call mark_conn_closed() outside of
-+ * write_list_lock or we will have a circular
-+ * locking dependency with rd_lock.
-+ */
-+ spin_unlock_bh(&conn->write_list_lock);
-+ mark_conn_closed(conn);
-+ goto out;
-+ }
-+ } else if (!timer_pending(&conn->rsp_timer) ||
-+ time_after(conn->rsp_timer.expires, timeout_time)) {
-+ TRACE_DBG("Restarting timer on %ld (conn %p)",
-+ timeout_time, conn);
-+ /*
-+ * Timer might have been restarted while we were
-+ * entering here.
-+ *
-+ * Since we have not empty write_timeout_list, we are
-+ * safe to restart the timer, because we not race with
-+ * del_timer_sync() in conn_free().
-+ */
-+ mod_timer(&conn->rsp_timer, timeout_time);
-+ }
-+ }
-+
-+ spin_unlock_bh(&conn->write_list_lock);
-+
-+ if (unlikely(conn->conn_tm_active)) {
-+ TRACE_MGMT_DBG("TM active: making conn %p RD active", conn);
-+ iscsi_make_conn_rd_active(conn);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void conn_nop_in_delayed_work_fn(struct delayed_work *work)
-+{
-+ struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
-+ nop_in_delayed_work);
-+
-+ TRACE_ENTRY();
-+
-+ if (time_after_eq(jiffies, conn->last_rcv_time +
-+ conn->nop_in_interval)) {
-+ iscsi_send_nop_in(conn);
-+ }
-+
-+ if ((conn->nop_in_interval > 0) &&
-+ !test_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags)) {
-+ TRACE_DBG("Reschedule Nop-In work for conn %p", conn);
-+ schedule_delayed_work(&conn->nop_in_delayed_work,
-+ conn->nop_in_interval + ISCSI_ADD_SCHED_TIME);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called from rd thread only */
-+void iscsi_check_tm_data_wait_timeouts(struct iscsi_conn *conn, bool force)
-+{
-+ struct iscsi_cmnd *cmnd;
-+ unsigned long j = jiffies;
-+ bool aborted_cmds_pending;
-+ unsigned long timeout_time = j + ISCSI_TM_DATA_WAIT_TIMEOUT +
-+ ISCSI_ADD_SCHED_TIME;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG_FLAG(TRACE_MGMT_DEBUG, "conn %p, read_cmnd %p, read_state "
-+ "%d, j %ld (TIMEOUT %d, force %d)", conn, conn->read_cmnd,
-+ conn->read_state, j,
-+ ISCSI_TM_DATA_WAIT_TIMEOUT + ISCSI_ADD_SCHED_TIME, force);
-+
-+ iscsi_extracheck_is_rd_thread(conn);
-+
-+again:
-+ spin_lock_bh(&conn->conn_thr_pool->rd_lock);
-+ spin_lock(&conn->write_list_lock);
-+
-+ aborted_cmds_pending = false;
-+ list_for_each_entry(cmnd, &conn->write_timeout_list,
-+ write_timeout_list_entry) {
-+ /*
-+ * This should not happen, because DATA OUT commands can't get
-+ * into write_timeout_list.
-+ */
-+ BUG_ON(cmnd->cmd_req != NULL);
-+
-+ if (test_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags)) {
-+ TRACE_MGMT_DBG("Checking aborted cmnd %p (scst_state "
-+ "%d, on_write_timeout_list %d, write_start "
-+ "%ld, r2t_len_to_receive %d)", cmnd,
-+ cmnd->scst_state, cmnd->on_write_timeout_list,
-+ cmnd->write_start, cmnd->r2t_len_to_receive);
-+ if ((cmnd == conn->read_cmnd) ||
-+ cmnd->data_out_in_data_receiving) {
-+ BUG_ON((cmnd == conn->read_cmnd) && force);
-+ /*
-+ * We can't abort command waiting for data from
-+ * the net, because otherwise we are risking to
-+ * get out of sync with the sender, so we have
-+ * to wait until the timeout timer gets into the
-+ * action and close this connection.
-+ */
-+ TRACE_MGMT_DBG("Aborted cmnd %p is %s, "
-+ "keep waiting", cmnd,
-+ (cmnd == conn->read_cmnd) ? "RX cmnd" :
-+ "waiting for DATA OUT data");
-+ goto cont;
-+ }
-+ if ((cmnd->r2t_len_to_receive != 0) &&
-+ (time_after_eq(j, cmnd->write_start + ISCSI_TM_DATA_WAIT_TIMEOUT) ||
-+ force)) {
-+ spin_unlock(&conn->write_list_lock);
-+ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
-+ iscsi_fail_data_waiting_cmnd(cmnd);
-+ goto again;
-+ }
-+cont:
-+ aborted_cmds_pending = true;
-+ }
-+ }
-+
-+ if (aborted_cmds_pending) {
-+ if (!force &&
-+ (!timer_pending(&conn->rsp_timer) ||
-+ time_after(conn->rsp_timer.expires, timeout_time))) {
-+ TRACE_MGMT_DBG("Mod timer on %ld (conn %p)",
-+ timeout_time, conn);
-+ mod_timer(&conn->rsp_timer, timeout_time);
-+ }
-+ } else {
-+ TRACE_MGMT_DBG("Clearing conn_tm_active for conn %p", conn);
-+ conn->conn_tm_active = 0;
-+ }
-+
-+ spin_unlock(&conn->write_list_lock);
-+ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* target_mutex supposed to be locked */
-+void conn_reinst_finished(struct iscsi_conn *conn)
-+{
-+ struct iscsi_cmnd *cmnd, *t;
-+
-+ TRACE_ENTRY();
-+
-+ clear_bit(ISCSI_CONN_REINSTATING, &conn->conn_aflags);
-+
-+ list_for_each_entry_safe(cmnd, t, &conn->reinst_pending_cmd_list,
-+ reinst_pending_cmd_list_entry) {
-+ TRACE_MGMT_DBG("Restarting reinst pending cmnd %p",
-+ cmnd);
-+
-+ list_del(&cmnd->reinst_pending_cmd_list_entry);
-+
-+ /* Restore the state for preliminary completion/cmnd_done() */
-+ cmnd->scst_state = ISCSI_CMD_STATE_AFTER_PREPROC;
-+
-+ iscsi_restart_cmnd(cmnd);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void conn_activate(struct iscsi_conn *conn)
-+{
-+ TRACE_MGMT_DBG("Enabling conn %p", conn);
-+
-+ /* Catch double bind */
-+ BUG_ON(conn->sock->sk->sk_state_change == iscsi_state_change);
-+
-+ write_lock_bh(&conn->sock->sk->sk_callback_lock);
-+
-+ conn->old_state_change = conn->sock->sk->sk_state_change;
-+ conn->sock->sk->sk_state_change = iscsi_state_change;
-+
-+ conn->old_data_ready = conn->sock->sk->sk_data_ready;
-+ conn->sock->sk->sk_data_ready = iscsi_data_ready;
-+
-+ conn->old_write_space = conn->sock->sk->sk_write_space;
-+ conn->sock->sk->sk_write_space = iscsi_write_space_ready;
-+
-+ write_unlock_bh(&conn->sock->sk->sk_callback_lock);
-+
-+ /*
-+ * Check, if conn was closed while we were initializing it.
-+ * This function will make conn rd_active, if necessary.
-+ */
-+ __iscsi_state_change(conn->sock->sk);
-+
-+ return;
-+}
-+
-+/*
-+ * Note: the code below passes a kernel space pointer (&opt) to setsockopt()
-+ * while the declaration of setsockopt specifies that it expects a user space
-+ * pointer. This seems to work fine, and this approach is also used in some
-+ * other parts of the Linux kernel (see e.g. fs/ocfs2/cluster/tcp.c).
-+ */
-+static int conn_setup_sock(struct iscsi_conn *conn)
-+{
-+ int res = 0;
-+ int opt = 1;
-+ mm_segment_t oldfs;
-+ struct iscsi_session *session = conn->session;
-+
-+ TRACE_DBG("%llx", (long long unsigned int)session->sid);
-+
-+ conn->sock = SOCKET_I(conn->file->f_dentry->d_inode);
-+
-+ if (conn->sock->ops->sendpage == NULL) {
-+ PRINT_ERROR("Socket for sid %llx doesn't support sendpage()",
-+ (long long unsigned int)session->sid);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+#if 0
-+ conn->sock->sk->sk_allocation = GFP_NOIO;
-+#endif
-+ conn->sock->sk->sk_user_data = conn;
-+
-+ oldfs = get_fs();
-+ set_fs(get_ds());
-+ conn->sock->ops->setsockopt(conn->sock, SOL_TCP, TCP_NODELAY,
-+ (void __force __user *)&opt, sizeof(opt));
-+ set_fs(oldfs);
-+
-+out:
-+ return res;
-+}
-+
-+/* target_mutex supposed to be locked */
-+int conn_free(struct iscsi_conn *conn)
-+{
-+ struct iscsi_session *session = conn->session;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Freeing conn %p (sess=%p, %#Lx %u)", conn,
-+ session, (long long unsigned int)session->sid, conn->cid);
-+
-+ del_timer_sync(&conn->rsp_timer);
-+
-+ conn_sysfs_del(conn);
-+
-+ BUG_ON(atomic_read(&conn->conn_ref_cnt) != 0);
-+ BUG_ON(!list_empty(&conn->cmd_list));
-+ BUG_ON(!list_empty(&conn->write_list));
-+ BUG_ON(!list_empty(&conn->write_timeout_list));
-+ BUG_ON(conn->conn_reinst_successor != NULL);
-+ BUG_ON(!test_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags));
-+
-+ /* Just in case if new conn gets freed before the old one */
-+ if (test_bit(ISCSI_CONN_REINSTATING, &conn->conn_aflags)) {
-+ struct iscsi_conn *c;
-+ TRACE_MGMT_DBG("Freeing being reinstated conn %p", conn);
-+ list_for_each_entry(c, &session->conn_list,
-+ conn_list_entry) {
-+ if (c->conn_reinst_successor == conn) {
-+ c->conn_reinst_successor = NULL;
-+ break;
-+ }
-+ }
-+ }
-+
-+ list_del(&conn->conn_list_entry);
-+
-+ fput(conn->file);
-+ conn->file = NULL;
-+ conn->sock = NULL;
-+
-+ free_page((unsigned long)conn->read_iov);
-+
-+ kfree(conn);
-+
-+ if (list_empty(&session->conn_list)) {
-+ BUG_ON(session->sess_reinst_successor != NULL);
-+ session_free(session, true);
-+ }
-+
-+ return 0;
-+}
-+
-+/* target_mutex supposed to be locked */
-+static int iscsi_conn_alloc(struct iscsi_session *session,
-+ struct iscsi_kern_conn_info *info, struct iscsi_conn **new_conn)
-+{
-+ struct iscsi_conn *conn;
-+ int res = 0;
-+
-+ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
-+ if (!conn) {
-+ res = -ENOMEM;
-+ goto out_err;
-+ }
-+
-+ TRACE_MGMT_DBG("Creating connection %p for sid %#Lx, cid %u", conn,
-+ (long long unsigned int)session->sid, info->cid);
-+
-+ /* Changing it, change ISCSI_CONN_IOV_MAX as well !! */
-+ conn->read_iov = (struct iovec *)get_zeroed_page(GFP_KERNEL);
-+ if (conn->read_iov == NULL) {
-+ res = -ENOMEM;
-+ goto out_err_free_conn;
-+ }
-+
-+ atomic_set(&conn->conn_ref_cnt, 0);
-+ conn->session = session;
-+ if (session->sess_reinstating)
-+ __set_bit(ISCSI_CONN_REINSTATING, &conn->conn_aflags);
-+ conn->cid = info->cid;
-+ conn->stat_sn = info->stat_sn;
-+ conn->exp_stat_sn = info->exp_stat_sn;
-+ conn->rd_state = ISCSI_CONN_RD_STATE_IDLE;
-+ conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
-+
-+ conn->hdigest_type = session->sess_params.header_digest;
-+ conn->ddigest_type = session->sess_params.data_digest;
-+ res = digest_init(conn);
-+ if (res != 0)
-+ goto out_free_iov;
-+
-+ conn->target = session->target;
-+ spin_lock_init(&conn->cmd_list_lock);
-+ INIT_LIST_HEAD(&conn->cmd_list);
-+ spin_lock_init(&conn->write_list_lock);
-+ INIT_LIST_HEAD(&conn->write_list);
-+ INIT_LIST_HEAD(&conn->write_timeout_list);
-+ setup_timer(&conn->rsp_timer, conn_rsp_timer_fn, (unsigned long)conn);
-+ init_waitqueue_head(&conn->read_state_waitQ);
-+ init_completion(&conn->ready_to_free);
-+ INIT_LIST_HEAD(&conn->reinst_pending_cmd_list);
-+ INIT_LIST_HEAD(&conn->nop_req_list);
-+ spin_lock_init(&conn->nop_req_list_lock);
-+
-+ conn->conn_thr_pool = session->sess_thr_pool;
-+
-+ conn->nop_in_ttt = 0;
-+ INIT_DELAYED_WORK(&conn->nop_in_delayed_work,
-+ (void (*)(struct work_struct *))conn_nop_in_delayed_work_fn);
-+ conn->last_rcv_time = jiffies;
-+ conn->data_rsp_timeout = session->tgt_params.rsp_timeout * HZ;
-+ conn->nop_in_interval = session->tgt_params.nop_in_interval * HZ;
-+ conn->nop_in_timeout = session->tgt_params.nop_in_timeout * HZ;
-+ if (conn->nop_in_interval > 0) {
-+ TRACE_DBG("Schedule Nop-In work for conn %p", conn);
-+ schedule_delayed_work(&conn->nop_in_delayed_work,
-+ conn->nop_in_interval + ISCSI_ADD_SCHED_TIME);
-+ }
-+
-+ conn->file = fget(info->fd);
-+
-+ res = conn_setup_sock(conn);
-+ if (res != 0)
-+ goto out_fput;
-+
-+ res = conn_sysfs_add(conn);
-+ if (res != 0)
-+ goto out_fput;
-+
-+ list_add_tail(&conn->conn_list_entry, &session->conn_list);
-+
-+ *new_conn = conn;
-+
-+out:
-+ return res;
-+
-+out_fput:
-+ fput(conn->file);
-+
-+out_free_iov:
-+ free_page((unsigned long)conn->read_iov);
-+
-+out_err_free_conn:
-+ kfree(conn);
-+
-+out_err:
-+ goto out;
-+}
-+
-+/* target_mutex supposed to be locked */
-+int __add_conn(struct iscsi_session *session, struct iscsi_kern_conn_info *info)
-+{
-+ struct iscsi_conn *conn, *new_conn = NULL;
-+ int err;
-+ bool reinstatement = false;
-+
-+ conn = conn_lookup(session, info->cid);
-+ if ((conn != NULL) &&
-+ !test_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags)) {
-+ /* conn reinstatement */
-+ reinstatement = true;
-+ } else if (!list_empty(&session->conn_list)) {
-+ err = -EEXIST;
-+ goto out;
-+ }
-+
-+ err = iscsi_conn_alloc(session, info, &new_conn);
-+ if (err != 0)
-+ goto out;
-+
-+ if (reinstatement) {
-+ TRACE_MGMT_DBG("Reinstating conn (old %p, new %p)", conn,
-+ new_conn);
-+ conn->conn_reinst_successor = new_conn;
-+ __set_bit(ISCSI_CONN_REINSTATING, &new_conn->conn_aflags);
-+ __mark_conn_closed(conn, 0);
-+ }
-+
-+ conn_activate(new_conn);
-+
-+out:
-+ return err;
-+}
-+
-+/* target_mutex supposed to be locked */
-+int __del_conn(struct iscsi_session *session, struct iscsi_kern_conn_info *info)
-+{
-+ struct iscsi_conn *conn;
-+ int err = -EEXIST;
-+
-+ conn = conn_lookup(session, info->cid);
-+ if (!conn) {
-+ PRINT_WARNING("Connection %d not found", info->cid);
-+ return err;
-+ }
-+
-+ PRINT_INFO("Deleting connection with initiator %s (%p)",
-+ conn->session->initiator_name, conn);
-+
-+ __mark_conn_closed(conn, ISCSI_CONN_ACTIVE_CLOSE|ISCSI_CONN_DELETING);
-+
-+ return 0;
-+}
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+
-+void iscsi_extracheck_is_rd_thread(struct iscsi_conn *conn)
-+{
-+ if (unlikely(current != conn->rd_task)) {
-+ printk(KERN_EMERG "conn %p rd_task != current %p (pid %d)\n",
-+ conn, current, current->pid);
-+ while (in_softirq())
-+ local_bh_enable();
-+ printk(KERN_EMERG "rd_state %x\n", conn->rd_state);
-+ printk(KERN_EMERG "rd_task %p\n", conn->rd_task);
-+ printk(KERN_EMERG "rd_task->pid %d\n", conn->rd_task->pid);
-+ BUG();
-+ }
-+}
-+
-+void iscsi_extracheck_is_wr_thread(struct iscsi_conn *conn)
-+{
-+ if (unlikely(current != conn->wr_task)) {
-+ printk(KERN_EMERG "conn %p wr_task != current %p (pid %d)\n",
-+ conn, current, current->pid);
-+ while (in_softirq())
-+ local_bh_enable();
-+ printk(KERN_EMERG "wr_state %x\n", conn->wr_state);
-+ printk(KERN_EMERG "wr_task %p\n", conn->wr_task);
-+ printk(KERN_EMERG "wr_task->pid %d\n", conn->wr_task->pid);
-+ BUG();
-+ }
-+}
-+
-+#endif /* CONFIG_SCST_EXTRACHECKS */
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/digest.c linux-3.2/drivers/scst/iscsi-scst/digest.c
---- orig/linux-3.2/drivers/scst/iscsi-scst/digest.c
-+++ linux-3.2/drivers/scst/iscsi-scst/digest.c
-@@ -0,0 +1,245 @@
-+/*
-+ * iSCSI digest handling.
-+ *
-+ * Copyright (C) 2004 - 2006 Xiranet Communications GmbH
-+ * <arne.redlich@xiranet.com>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/scatterlist.h>
-+
-+#include "iscsi.h"
-+#include "digest.h"
-+#include <linux/crc32c.h>
-+
-+void digest_alg_available(int *val)
-+{
-+#if defined(CONFIG_LIBCRC32C_MODULE) || defined(CONFIG_LIBCRC32C)
-+ int crc32c = 1;
-+#else
-+ int crc32c = 0;
-+#endif
-+
-+ if ((*val & DIGEST_CRC32C) && !crc32c) {
-+ PRINT_ERROR("%s", "CRC32C digest algorithm not available "
-+ "in kernel");
-+ *val |= ~DIGEST_CRC32C;
-+ }
-+}
-+
-+/**
-+ * initialize support for digest calculation.
-+ *
-+ * digest_init -
-+ * @conn: ptr to connection to make use of digests
-+ *
-+ * @return: 0 on success, < 0 on error
-+ */
-+int digest_init(struct iscsi_conn *conn)
-+{
-+ if (!(conn->hdigest_type & DIGEST_ALL))
-+ conn->hdigest_type = DIGEST_NONE;
-+
-+ if (!(conn->ddigest_type & DIGEST_ALL))
-+ conn->ddigest_type = DIGEST_NONE;
-+
-+ return 0;
-+}
-+
-+static __be32 evaluate_crc32_from_sg(struct scatterlist *sg, int nbytes,
-+ uint32_t padding)
-+{
-+ u32 crc = ~0;
-+ int pad_bytes = ((nbytes + 3) & -4) - nbytes;
-+
-+#ifdef CONFIG_SCST_ISCSI_DEBUG_DIGEST_FAILURES
-+ if (((scst_random() % 100000) == 752)) {
-+ PRINT_INFO("%s", "Simulating digest failure");
-+ return 0;
-+ }
-+#endif
-+
-+#if defined(CONFIG_LIBCRC32C_MODULE) || defined(CONFIG_LIBCRC32C)
-+ while (nbytes > 0) {
-+ int d = min(nbytes, (int)(sg->length));
-+ crc = crc32c(crc, sg_virt(sg), d);
-+ nbytes -= d;
-+ sg++;
-+ }
-+
-+ if (pad_bytes)
-+ crc = crc32c(crc, (u8 *)&padding, pad_bytes);
-+#endif
-+
-+ return (__force __be32)~cpu_to_le32(crc);
-+}
-+
-+static __be32 digest_header(struct iscsi_pdu *pdu)
-+{
-+ struct scatterlist sg[2];
-+ unsigned int nbytes = sizeof(struct iscsi_hdr);
-+ int asize = (pdu->ahssize + 3) & -4;
-+
-+ sg_init_table(sg, 2);
-+
-+ sg_set_buf(&sg[0], &pdu->bhs, nbytes);
-+ if (pdu->ahssize) {
-+ sg_set_buf(&sg[1], pdu->ahs, asize);
-+ nbytes += asize;
-+ }
-+ EXTRACHECKS_BUG_ON((nbytes & 3) != 0);
-+ return evaluate_crc32_from_sg(sg, nbytes, 0);
-+}
-+
-+static __be32 digest_data(struct iscsi_cmnd *cmd, u32 size, u32 offset,
-+ uint32_t padding)
-+{
-+ struct scatterlist *sg = cmd->sg;
-+ int idx, count;
-+ struct scatterlist saved_sg;
-+ __be32 crc;
-+
-+ offset += sg[0].offset;
-+ idx = offset >> PAGE_SHIFT;
-+ offset &= ~PAGE_MASK;
-+
-+ count = get_pgcnt(size, offset);
-+
-+ TRACE_DBG("req %p, idx %d, count %d, sg_cnt %d, size %d, "
-+ "offset %d", cmd, idx, count, cmd->sg_cnt, size, offset);
-+ BUG_ON(idx + count > cmd->sg_cnt);
-+
-+ saved_sg = sg[idx];
-+ sg[idx].offset = offset;
-+ sg[idx].length -= offset - saved_sg.offset;
-+
-+ crc = evaluate_crc32_from_sg(sg + idx, size, padding);
-+
-+ sg[idx] = saved_sg;
-+ return crc;
-+}
-+
-+int digest_rx_header(struct iscsi_cmnd *cmnd)
-+{
-+ __be32 crc;
-+
-+ crc = digest_header(&cmnd->pdu);
-+ if (unlikely(crc != cmnd->hdigest)) {
-+ PRINT_ERROR("%s", "RX header digest failed");
-+ return -EIO;
-+ } else
-+ TRACE_DBG("RX header digest OK for cmd %p", cmnd);
-+
-+ return 0;
-+}
-+
-+void digest_tx_header(struct iscsi_cmnd *cmnd)
-+{
-+ cmnd->hdigest = digest_header(&cmnd->pdu);
-+ TRACE_DBG("TX header digest for cmd %p: %x", cmnd, cmnd->hdigest);
-+}
-+
-+int digest_rx_data(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_cmnd *req;
-+ struct iscsi_data_out_hdr *req_hdr;
-+ u32 offset;
-+ __be32 crc;
-+ int res = 0;
-+
-+ switch (cmnd_opcode(cmnd)) {
-+ case ISCSI_OP_SCSI_DATA_OUT:
-+ req = cmnd->cmd_req;
-+ if (unlikely(req == NULL)) {
-+ /* It can be for prelim completed commands */
-+ req = cmnd;
-+ goto out;
-+ }
-+ req_hdr = (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
-+ offset = be32_to_cpu(req_hdr->buffer_offset);
-+ break;
-+
-+ default:
-+ req = cmnd;
-+ offset = 0;
-+ }
-+
-+ /*
-+ * We need to skip the digest check for prelim completed commands,
-+ * because we use shared data buffer for them, so, most likely, the
-+ * check will fail. Plus, for such commands we sometimes don't have
-+ * sg_cnt set correctly (cmnd_prepare_get_rejected_cmd_data() doesn't
-+ * do it).
-+ */
-+ if (unlikely(req->prelim_compl_flags != 0))
-+ goto out;
-+
-+ /*
-+ * Temporary to not crash with write residual overflows. ToDo. Until
-+ * that let's always have succeeded data digests for such overflows.
-+ * In ideal, we should allocate additional one or more sg's for the
-+ * overflowed data and free them here or on req release. It's quite
-+ * not trivial for such virtually never used case, so let's do it,
-+ * when it gets needed.
-+ */
-+ if (unlikely(offset + cmnd->pdu.datasize > req->bufflen)) {
-+ PRINT_WARNING("Skipping RX data digest check for residual "
-+ "overflow command op %x (data size %d, buffer size %d)",
-+ cmnd_hdr(req)->scb[0], offset + cmnd->pdu.datasize,
-+ req->bufflen);
-+ goto out;
-+ }
-+
-+ crc = digest_data(req, cmnd->pdu.datasize, offset,
-+ cmnd->conn->rpadding);
-+
-+ if (unlikely(crc != cmnd->ddigest)) {
-+ TRACE(TRACE_MINOR|TRACE_MGMT_DEBUG, "%s", "RX data digest "
-+ "failed");
-+ TRACE_MGMT_DBG("Calculated crc %x, ddigest %x, offset %d", crc,
-+ cmnd->ddigest, offset);
-+ iscsi_dump_pdu(&cmnd->pdu);
-+ res = -EIO;
-+ } else
-+ TRACE_DBG("RX data digest OK for cmd %p", cmnd);
-+
-+out:
-+ return res;
-+}
-+
-+void digest_tx_data(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_data_in_hdr *hdr;
-+ u32 offset;
-+
-+ TRACE_DBG("%s:%d req %p, own_sg %d, sg %p, sgcnt %d cmnd %p, "
-+ "own_sg %d, sg %p, sgcnt %d", __func__, __LINE__,
-+ cmnd->parent_req, cmnd->parent_req->own_sg,
-+ cmnd->parent_req->sg, cmnd->parent_req->sg_cnt,
-+ cmnd, cmnd->own_sg, cmnd->sg, cmnd->sg_cnt);
-+
-+ switch (cmnd_opcode(cmnd)) {
-+ case ISCSI_OP_SCSI_DATA_IN:
-+ hdr = (struct iscsi_data_in_hdr *)&cmnd->pdu.bhs;
-+ offset = be32_to_cpu(hdr->buffer_offset);
-+ break;
-+ default:
-+ offset = 0;
-+ }
-+
-+ cmnd->ddigest = digest_data(cmnd, cmnd->pdu.datasize, offset, 0);
-+ TRACE_DBG("TX data digest for cmd %p: %x (offset %d, opcode %x)", cmnd,
-+ cmnd->ddigest, offset, cmnd_opcode(cmnd));
-+}
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/digest.h linux-3.2/drivers/scst/iscsi-scst/digest.h
---- orig/linux-3.2/drivers/scst/iscsi-scst/digest.h
-+++ linux-3.2/drivers/scst/iscsi-scst/digest.h
-@@ -0,0 +1,32 @@
-+/*
-+ * iSCSI digest handling.
-+ *
-+ * Copyright (C) 2004 Xiranet Communications GmbH <arne.redlich@xiranet.com>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef __ISCSI_DIGEST_H__
-+#define __ISCSI_DIGEST_H__
-+
-+extern void digest_alg_available(int *val);
-+
-+extern int digest_init(struct iscsi_conn *conn);
-+
-+extern int digest_rx_header(struct iscsi_cmnd *cmnd);
-+extern int digest_rx_data(struct iscsi_cmnd *cmnd);
-+
-+extern void digest_tx_header(struct iscsi_cmnd *cmnd);
-+extern void digest_tx_data(struct iscsi_cmnd *cmnd);
-+
-+#endif /* __ISCSI_DIGEST_H__ */
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/event.c linux-3.2/drivers/scst/iscsi-scst/event.c
---- orig/linux-3.2/drivers/scst/iscsi-scst/event.c
-+++ linux-3.2/drivers/scst/iscsi-scst/event.c
-@@ -0,0 +1,163 @@
-+/*
-+ * Event notification code.
-+ *
-+ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ */
-+
-+#include <linux/module.h>
-+#include <net/tcp.h>
-+#include <scst/iscsi_scst.h>
-+#include "iscsi.h"
-+
-+static struct sock *nl;
-+static u32 iscsid_pid;
-+
-+static int event_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
-+{
-+ u32 pid;
-+
-+ pid = NETLINK_CB(skb).pid;
-+ WARN_ON(pid == 0);
-+
-+ iscsid_pid = pid;
-+
-+ return 0;
-+}
-+
-+static void event_recv_skb(struct sk_buff *skb)
-+{
-+ int err;
-+ struct nlmsghdr *nlh;
-+ u32 rlen;
-+
-+ while (skb->len >= NLMSG_SPACE(0)) {
-+ nlh = (struct nlmsghdr *)skb->data;
-+ if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
-+ goto out;
-+ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-+ if (rlen > skb->len)
-+ rlen = skb->len;
-+ err = event_recv_msg(skb, nlh);
-+ if (err)
-+ netlink_ack(skb, nlh, -err);
-+ else if (nlh->nlmsg_flags & NLM_F_ACK)
-+ netlink_ack(skb, nlh, 0);
-+ skb_pull(skb, rlen);
-+ }
-+
-+out:
-+ return;
-+}
-+
-+/* event_mutex supposed to be held */
-+static int __event_send(const void *buf, int buf_len)
-+{
-+ int res = 0, len;
-+ struct sk_buff *skb;
-+ struct nlmsghdr *nlh;
-+ static u32 seq; /* protected by event_mutex */
-+
-+ TRACE_ENTRY();
-+
-+ if (ctr_open_state != ISCSI_CTR_OPEN_STATE_OPEN)
-+ goto out;
-+
-+ len = NLMSG_SPACE(buf_len);
-+
-+ skb = alloc_skb(len, GFP_KERNEL);
-+ if (skb == NULL) {
-+ PRINT_ERROR("alloc_skb() failed (len %d)", len);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ nlh = __nlmsg_put(skb, iscsid_pid, seq++, NLMSG_DONE, buf_len, 0);
-+
-+ memcpy(NLMSG_DATA(nlh), buf, buf_len);
-+ res = netlink_unicast(nl, skb, iscsid_pid, 0);
-+ if (res <= 0) {
-+ if (res != -ECONNREFUSED)
-+ PRINT_ERROR("netlink_unicast() failed: %d", res);
-+ else
-+ TRACE(TRACE_MINOR, "netlink_unicast() failed: %s. "
-+ "Not functioning user space?",
-+ "Connection refused");
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+int event_send(u32 tid, u64 sid, u32 cid, u32 cookie,
-+ enum iscsi_kern_event_code code,
-+ const char *param1, const char *param2)
-+{
-+ int err;
-+ static DEFINE_MUTEX(event_mutex);
-+ struct iscsi_kern_event event;
-+ int param1_size, param2_size;
-+
-+ param1_size = (param1 != NULL) ? strlen(param1) : 0;
-+ param2_size = (param2 != NULL) ? strlen(param2) : 0;
-+
-+ event.tid = tid;
-+ event.sid = sid;
-+ event.cid = cid;
-+ event.code = code;
-+ event.cookie = cookie;
-+ event.param1_size = param1_size;
-+ event.param2_size = param2_size;
-+
-+ mutex_lock(&event_mutex);
-+
-+ err = __event_send(&event, sizeof(event));
-+ if (err <= 0)
-+ goto out_unlock;
-+
-+ if (param1_size > 0) {
-+ err = __event_send(param1, param1_size);
-+ if (err <= 0)
-+ goto out_unlock;
-+ }
-+
-+ if (param2_size > 0) {
-+ err = __event_send(param2, param2_size);
-+ if (err <= 0)
-+ goto out_unlock;
-+ }
-+
-+out_unlock:
-+ mutex_unlock(&event_mutex);
-+ return err;
-+}
-+
-+int __init event_init(void)
-+{
-+ nl = netlink_kernel_create(&init_net, NETLINK_ISCSI_SCST, 1,
-+ event_recv_skb, NULL, THIS_MODULE);
-+ if (!nl) {
-+ PRINT_ERROR("%s", "netlink_kernel_create() failed");
-+ return -ENOMEM;
-+ } else
-+ return 0;
-+}
-+
-+void event_exit(void)
-+{
-+ netlink_kernel_release(nl);
-+}
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/iscsi.c linux-3.2/drivers/scst/iscsi-scst/iscsi.c
---- orig/linux-3.2/drivers/scst/iscsi-scst/iscsi.c
-+++ linux-3.2/drivers/scst/iscsi-scst/iscsi.c
-@@ -0,0 +1,4137 @@
-+/*
-+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/hash.h>
-+#include <linux/kthread.h>
-+#include <linux/scatterlist.h>
-+#include <linux/ctype.h>
-+#include <net/tcp.h>
-+#include <scsi/scsi.h>
-+#include <asm/byteorder.h>
-+#include <asm/unaligned.h>
-+
-+#include "iscsi.h"
-+#include "digest.h"
-+
-+#ifndef GENERATING_UPSTREAM_PATCH
-+#if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+#warning Patch put_page_callback-<kernel-version>.patch not applied on your \
-+kernel or CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION \
-+config option not set. ISCSI-SCST will be working with not the best \
-+performance. Refer README file for details.
-+#endif
-+#endif
-+
-+#define ISCSI_INIT_WRITE_WAKE 0x1
-+
-+static int ctr_major;
-+static const char ctr_name[] = "iscsi-scst-ctl";
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+unsigned long iscsi_trace_flag = ISCSI_DEFAULT_LOG_FLAGS;
-+#endif
-+
-+static struct kmem_cache *iscsi_cmnd_cache;
-+
-+static DEFINE_MUTEX(iscsi_threads_pool_mutex);
-+static LIST_HEAD(iscsi_thread_pools_list);
-+
-+static struct iscsi_thread_pool *iscsi_main_thread_pool;
-+
-+static struct page *dummy_page;
-+static struct scatterlist dummy_sg;
-+
-+static void cmnd_remove_data_wait_hash(struct iscsi_cmnd *cmnd);
-+static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status);
-+static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess);
-+static void req_cmnd_release(struct iscsi_cmnd *req);
-+static int cmnd_insert_data_wait_hash(struct iscsi_cmnd *cmnd);
-+static void iscsi_cmnd_init_write(struct iscsi_cmnd *rsp, int flags);
-+static void iscsi_set_resid_no_scst_cmd(struct iscsi_cmnd *rsp);
-+static void iscsi_set_resid(struct iscsi_cmnd *rsp);
-+
-+static void iscsi_set_not_received_data_len(struct iscsi_cmnd *req,
-+ unsigned int not_received)
-+{
-+ req->not_received_data_len = not_received;
-+ if (req->scst_cmd != NULL)
-+ scst_cmd_set_write_not_received_data_len(req->scst_cmd,
-+ not_received);
-+ return;
-+}
-+
-+static void req_del_from_write_timeout_list(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_conn *conn;
-+
-+ TRACE_ENTRY();
-+
-+ if (!req->on_write_timeout_list)
-+ goto out;
-+
-+ conn = req->conn;
-+
-+ TRACE_DBG("Deleting cmd %p from conn %p write_timeout_list",
-+ req, conn);
-+
-+ spin_lock_bh(&conn->write_list_lock);
-+
-+ /* Recheck, since it can be changed behind us */
-+ if (unlikely(!req->on_write_timeout_list))
-+ goto out_unlock;
-+
-+ list_del(&req->write_timeout_list_entry);
-+ req->on_write_timeout_list = 0;
-+
-+out_unlock:
-+ spin_unlock_bh(&conn->write_list_lock);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline u32 cmnd_write_size(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
-+
-+ if (hdr->flags & ISCSI_CMD_WRITE)
-+ return be32_to_cpu(hdr->data_length);
-+ return 0;
-+}
-+
-+static inline int cmnd_read_size(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
-+
-+ if (hdr->flags & ISCSI_CMD_READ) {
-+ struct iscsi_ahs_hdr *ahdr;
-+
-+ if (!(hdr->flags & ISCSI_CMD_WRITE))
-+ return be32_to_cpu(hdr->data_length);
-+
-+ ahdr = (struct iscsi_ahs_hdr *)cmnd->pdu.ahs;
-+ if (ahdr != NULL) {
-+ uint8_t *p = (uint8_t *)ahdr;
-+ unsigned int size = 0;
-+ do {
-+ int s;
-+
-+ ahdr = (struct iscsi_ahs_hdr *)p;
-+
-+ if (ahdr->ahstype == ISCSI_AHSTYPE_RLENGTH) {
-+ struct iscsi_rlength_ahdr *rh =
-+ (struct iscsi_rlength_ahdr *)ahdr;
-+ return be32_to_cpu(rh->read_length);
-+ }
-+
-+ s = 3 + be16_to_cpu(ahdr->ahslength);
-+ s = (s + 3) & -4;
-+ size += s;
-+ p += s;
-+ } while (size < cmnd->pdu.ahssize);
-+ }
-+ return -1;
-+ }
-+ return 0;
-+}
-+
-+void iscsi_restart_cmnd(struct iscsi_cmnd *cmnd)
-+{
-+ int status;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(cmnd->r2t_len_to_receive != 0);
-+ EXTRACHECKS_BUG_ON(cmnd->r2t_len_to_send != 0);
-+
-+ req_del_from_write_timeout_list(cmnd);
-+
-+ /*
-+ * Let's remove cmnd from the hash earlier to keep it smaller.
-+ * Also we have to remove hashed req from the hash before sending
-+ * response. Otherwise we can have a race, when for some reason cmd's
-+ * release (and, hence, removal from the hash) is delayed after the
-+ * transmission and initiator sends cmd with the same ITT, hence
-+ * the new command will be erroneously rejected as a duplicate.
-+ */
-+ if (cmnd->hashed)
-+ cmnd_remove_data_wait_hash(cmnd);
-+
-+ if (unlikely(test_bit(ISCSI_CONN_REINSTATING,
-+ &cmnd->conn->conn_aflags))) {
-+ struct iscsi_target *target = cmnd->conn->session->target;
-+ bool get_out;
-+
-+ mutex_lock(&target->target_mutex);
-+
-+ get_out = test_bit(ISCSI_CONN_REINSTATING,
-+ &cmnd->conn->conn_aflags);
-+ /* Let's don't look dead */
-+ if (scst_cmd_get_cdb(cmnd->scst_cmd)[0] == TEST_UNIT_READY)
-+ get_out = false;
-+
-+ if (!get_out)
-+ goto unlock_cont;
-+
-+ TRACE_MGMT_DBG("Pending cmnd %p, because conn %p is "
-+ "reinstated", cmnd, cmnd->conn);
-+
-+ cmnd->scst_state = ISCSI_CMD_STATE_REINST_PENDING;
-+ list_add_tail(&cmnd->reinst_pending_cmd_list_entry,
-+ &cmnd->conn->reinst_pending_cmd_list);
-+
-+unlock_cont:
-+ mutex_unlock(&target->target_mutex);
-+
-+ if (get_out)
-+ goto out;
-+ }
-+
-+ if (unlikely(cmnd->prelim_compl_flags != 0)) {
-+ if (test_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags)) {
-+ TRACE_MGMT_DBG("cmnd %p (scst_cmd %p) aborted", cmnd,
-+ cmnd->scst_cmd);
-+ req_cmnd_release_force(cmnd);
-+ goto out;
-+ }
-+
-+ if (cmnd->scst_cmd == NULL) {
-+ TRACE_MGMT_DBG("Finishing preliminary completed cmd %p "
-+ "with NULL scst_cmd", cmnd);
-+ req_cmnd_release(cmnd);
-+ goto out;
-+ }
-+
-+ status = SCST_PREPROCESS_STATUS_ERROR_SENSE_SET;
-+ } else
-+ status = SCST_PREPROCESS_STATUS_SUCCESS;
-+
-+ cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
-+
-+ scst_restart_cmd(cmnd->scst_cmd, status, SCST_CONTEXT_THREAD);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static struct iscsi_cmnd *iscsi_create_tm_clone(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_cmnd *tm_clone;
-+
-+ TRACE_ENTRY();
-+
-+ tm_clone = cmnd_alloc(cmnd->conn, NULL);
-+ if (tm_clone != NULL) {
-+ set_bit(ISCSI_CMD_ABORTED, &tm_clone->prelim_compl_flags);
-+ tm_clone->pdu = cmnd->pdu;
-+
-+ TRACE_MGMT_DBG("TM clone %p for cmnd %p created",
-+ tm_clone, cmnd);
-+ } else
-+ PRINT_ERROR("Failed to create TM clone for cmnd %p", cmnd);
-+
-+ TRACE_EXIT_HRES((unsigned long)tm_clone);
-+ return tm_clone;
-+}
-+
-+void iscsi_fail_data_waiting_cmnd(struct iscsi_cmnd *cmnd)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Failing data waiting cmnd %p (data_out_in_data_receiving %d)",
-+ cmnd, cmnd->data_out_in_data_receiving);
-+
-+ /*
-+ * There is no race with conn_abort(), since all functions
-+ * called from single read thread
-+ */
-+ iscsi_extracheck_is_rd_thread(cmnd->conn);
-+
-+ /* This cmnd is going to die without response */
-+ cmnd->r2t_len_to_receive = 0;
-+ cmnd->r2t_len_to_send = 0;
-+
-+ if (cmnd->pending) {
-+ struct iscsi_session *session = cmnd->conn->session;
-+ struct iscsi_cmnd *tm_clone;
-+
-+ TRACE_MGMT_DBG("Unpending cmnd %p (sn %u, exp_cmd_sn %u)", cmnd,
-+ cmnd->pdu.bhs.sn, session->exp_cmd_sn);
-+
-+ /*
-+ * If cmnd is pending, then the next command, if any, must be
-+ * pending too. So, just insert a clone instead of cmnd to
-+ * fill the hole in SNs. Then we can release cmnd.
-+ */
-+
-+ tm_clone = iscsi_create_tm_clone(cmnd);
-+
-+ spin_lock(&session->sn_lock);
-+
-+ if (tm_clone != NULL) {
-+ TRACE_MGMT_DBG("Adding tm_clone %p after its cmnd",
-+ tm_clone);
-+ list_add(&tm_clone->pending_list_entry,
-+ &cmnd->pending_list_entry);
-+ }
-+
-+ list_del(&cmnd->pending_list_entry);
-+ cmnd->pending = 0;
-+
-+ spin_unlock(&session->sn_lock);
-+ }
-+
-+ req_cmnd_release_force(cmnd);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+struct iscsi_cmnd *cmnd_alloc(struct iscsi_conn *conn,
-+ struct iscsi_cmnd *parent)
-+{
-+ struct iscsi_cmnd *cmnd;
-+
-+ /* ToDo: __GFP_NOFAIL?? */
-+ cmnd = kmem_cache_zalloc(iscsi_cmnd_cache, GFP_KERNEL|__GFP_NOFAIL);
-+
-+ atomic_set(&cmnd->ref_cnt, 1);
-+ cmnd->scst_state = ISCSI_CMD_STATE_NEW;
-+ cmnd->conn = conn;
-+ cmnd->parent_req = parent;
-+
-+ if (parent == NULL) {
-+ conn_get(conn);
-+
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ atomic_set(&cmnd->net_ref_cnt, 0);
-+#endif
-+ INIT_LIST_HEAD(&cmnd->rsp_cmd_list);
-+ INIT_LIST_HEAD(&cmnd->rx_ddigest_cmd_list);
-+ cmnd->target_task_tag = ISCSI_RESERVED_TAG_CPU32;
-+
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ list_add_tail(&cmnd->cmd_list_entry, &conn->cmd_list);
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+ }
-+
-+ TRACE_DBG("conn %p, parent %p, cmnd %p", conn, parent, cmnd);
-+ return cmnd;
-+}
-+
-+/* Frees a command. Also frees the additional header. */
-+static void cmnd_free(struct iscsi_cmnd *cmnd)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("cmnd %p", cmnd);
-+
-+ if (unlikely(test_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags))) {
-+ TRACE_MGMT_DBG("Free aborted cmd %p (scst cmd %p, state %d, "
-+ "parent_req %p)", cmnd, cmnd->scst_cmd,
-+ cmnd->scst_state, cmnd->parent_req);
-+ }
-+
-+ /* Catch users from cmd_list or rsp_cmd_list */
-+ EXTRACHECKS_BUG_ON(atomic_read(&cmnd->ref_cnt) != 0);
-+
-+ kfree(cmnd->pdu.ahs);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (unlikely(cmnd->on_write_list || cmnd->on_write_timeout_list)) {
-+ struct iscsi_scsi_cmd_hdr *req = cmnd_hdr(cmnd);
-+
-+ PRINT_CRIT_ERROR("cmnd %p still on some list?, %x, %x, %x, "
-+ "%x, %x, %x, %x", cmnd, req->opcode, req->scb[0],
-+ req->flags, req->itt, be32_to_cpu(req->data_length),
-+ req->cmd_sn, be32_to_cpu((__force __be32)(cmnd->pdu.datasize)));
-+
-+ if (unlikely(cmnd->parent_req)) {
-+ struct iscsi_scsi_cmd_hdr *preq =
-+ cmnd_hdr(cmnd->parent_req);
-+ PRINT_CRIT_ERROR("%p %x %u", preq, preq->opcode,
-+ preq->scb[0]);
-+ }
-+ BUG();
-+ }
-+#endif
-+
-+ kmem_cache_free(iscsi_cmnd_cache, cmnd);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void iscsi_dec_active_cmds(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_session *sess = req->conn->session;
-+
-+ TRACE_DBG("Decrementing active_cmds (req %p, sess %p, "
-+ "new value %d)", req, sess,
-+ atomic_read(&sess->active_cmds)-1);
-+
-+ EXTRACHECKS_BUG_ON(!req->dec_active_cmds);
-+
-+ atomic_dec(&sess->active_cmds);
-+ smp_mb__after_atomic_dec();
-+ req->dec_active_cmds = 0;
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (unlikely(atomic_read(&sess->active_cmds) < 0)) {
-+ PRINT_CRIT_ERROR("active_cmds < 0 (%d)!!",
-+ atomic_read(&sess->active_cmds));
-+ BUG();
-+ }
-+#endif
-+ return;
-+}
-+
-+/* Might be called under some lock and on SIRQ */
-+void cmnd_done(struct iscsi_cmnd *cmnd)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("cmnd %p", cmnd);
-+
-+ if (unlikely(test_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags))) {
-+ TRACE_MGMT_DBG("Done aborted cmd %p (scst cmd %p, state %d, "
-+ "parent_req %p)", cmnd, cmnd->scst_cmd,
-+ cmnd->scst_state, cmnd->parent_req);
-+ }
-+
-+ EXTRACHECKS_BUG_ON(cmnd->on_rx_digest_list);
-+ EXTRACHECKS_BUG_ON(cmnd->hashed);
-+ EXTRACHECKS_BUG_ON(cmnd->cmd_req);
-+ EXTRACHECKS_BUG_ON(cmnd->data_out_in_data_receiving);
-+
-+ req_del_from_write_timeout_list(cmnd);
-+
-+ if (cmnd->parent_req == NULL) {
-+ struct iscsi_conn *conn = cmnd->conn;
-+ struct iscsi_cmnd *rsp, *t;
-+
-+ TRACE_DBG("Deleting req %p from conn %p", cmnd, conn);
-+
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ list_del(&cmnd->cmd_list_entry);
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+
-+ conn_put(conn);
-+
-+ EXTRACHECKS_BUG_ON(!list_empty(&cmnd->rx_ddigest_cmd_list));
-+
-+ /* Order between above and below code is important! */
-+
-+ if ((cmnd->scst_cmd != NULL) || (cmnd->scst_aen != NULL)) {
-+ switch (cmnd->scst_state) {
-+ case ISCSI_CMD_STATE_PROCESSED:
-+ TRACE_DBG("cmd %p PROCESSED", cmnd);
-+ scst_tgt_cmd_done(cmnd->scst_cmd,
-+ SCST_CONTEXT_DIRECT_ATOMIC);
-+ break;
-+
-+ case ISCSI_CMD_STATE_AFTER_PREPROC:
-+ {
-+ /* It can be for some aborted commands */
-+ struct scst_cmd *scst_cmd = cmnd->scst_cmd;
-+ TRACE_DBG("cmd %p AFTER_PREPROC", cmnd);
-+ cmnd->scst_state = ISCSI_CMD_STATE_RESTARTED;
-+ cmnd->scst_cmd = NULL;
-+ scst_restart_cmd(scst_cmd,
-+ SCST_PREPROCESS_STATUS_ERROR_FATAL,
-+ SCST_CONTEXT_THREAD);
-+ break;
-+ }
-+
-+ case ISCSI_CMD_STATE_AEN:
-+ TRACE_DBG("cmd %p AEN PROCESSED", cmnd);
-+ scst_aen_done(cmnd->scst_aen);
-+ break;
-+
-+ case ISCSI_CMD_STATE_OUT_OF_SCST_PRELIM_COMPL:
-+ break;
-+
-+ default:
-+ PRINT_CRIT_ERROR("Unexpected cmnd scst state "
-+ "%d", cmnd->scst_state);
-+ BUG();
-+ break;
-+ }
-+ }
-+
-+ if (cmnd->own_sg) {
-+ TRACE_DBG("own_sg for req %p", cmnd);
-+ if (cmnd->sg != &dummy_sg)
-+ scst_free(cmnd->sg, cmnd->sg_cnt);
-+#ifdef CONFIG_SCST_DEBUG
-+ cmnd->own_sg = 0;
-+ cmnd->sg = NULL;
-+ cmnd->sg_cnt = -1;
-+#endif
-+ }
-+
-+ if (unlikely(cmnd->dec_active_cmds))
-+ iscsi_dec_active_cmds(cmnd);
-+
-+ list_for_each_entry_safe(rsp, t, &cmnd->rsp_cmd_list,
-+ rsp_cmd_list_entry) {
-+ cmnd_free(rsp);
-+ }
-+
-+ cmnd_free(cmnd);
-+ } else {
-+ struct iscsi_cmnd *parent = cmnd->parent_req;
-+
-+ if (cmnd->own_sg) {
-+ TRACE_DBG("own_sg for rsp %p", cmnd);
-+ if ((cmnd->sg != &dummy_sg) && (cmnd->sg != cmnd->rsp_sg))
-+ scst_free(cmnd->sg, cmnd->sg_cnt);
-+#ifdef CONFIG_SCST_DEBUG
-+ cmnd->own_sg = 0;
-+ cmnd->sg = NULL;
-+ cmnd->sg_cnt = -1;
-+#endif
-+ }
-+
-+ EXTRACHECKS_BUG_ON(cmnd->dec_active_cmds);
-+
-+ if (cmnd == parent->main_rsp) {
-+ TRACE_DBG("Finishing main rsp %p (req %p)", cmnd,
-+ parent);
-+ parent->main_rsp = NULL;
-+ }
-+
-+ cmnd_put(parent);
-+ /*
-+ * cmnd will be freed on the last parent's put and can already
-+ * be freed!!
-+ */
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * Corresponding conn may also get destroyed after this function, except only
-+ * if it's called from the read thread!
-+ *
-+ * It can't be called in parallel with iscsi_cmnds_init_write()!
-+ */
-+void req_cmnd_release_force(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_cmnd *rsp, *t;
-+ struct iscsi_conn *conn = req->conn;
-+ LIST_HEAD(cmds_list);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("req %p", req);
-+
-+ BUG_ON(req == conn->read_cmnd);
-+
-+ spin_lock_bh(&conn->write_list_lock);
-+ list_for_each_entry_safe(rsp, t, &conn->write_list, write_list_entry) {
-+ if (rsp->parent_req != req)
-+ continue;
-+
-+ cmd_del_from_write_list(rsp);
-+
-+ list_add_tail(&rsp->write_list_entry, &cmds_list);
-+ }
-+ spin_unlock_bh(&conn->write_list_lock);
-+
-+ list_for_each_entry_safe(rsp, t, &cmds_list, write_list_entry) {
-+ TRACE_MGMT_DBG("Putting write rsp %p", rsp);
-+ list_del(&rsp->write_list_entry);
-+ cmnd_put(rsp);
-+ }
-+
-+ /* Supposed nobody can add responses in the list anymore */
-+ list_for_each_entry_reverse(rsp, &req->rsp_cmd_list,
-+ rsp_cmd_list_entry) {
-+ bool r;
-+
-+ if (rsp->force_cleanup_done)
-+ continue;
-+
-+ rsp->force_cleanup_done = 1;
-+
-+ if (cmnd_get_check(rsp))
-+ continue;
-+
-+ spin_lock_bh(&conn->write_list_lock);
-+ r = rsp->on_write_list || rsp->write_processing_started;
-+ spin_unlock_bh(&conn->write_list_lock);
-+
-+ cmnd_put(rsp);
-+
-+ if (r)
-+ continue;
-+
-+ /*
-+ * If both on_write_list and write_processing_started not set,
-+ * we can safely put() rsp.
-+ */
-+ TRACE_MGMT_DBG("Putting rsp %p", rsp);
-+ cmnd_put(rsp);
-+ }
-+
-+ if (req->main_rsp != NULL) {
-+ TRACE_MGMT_DBG("Putting main rsp %p", req->main_rsp);
-+ cmnd_put(req->main_rsp);
-+ req->main_rsp = NULL;
-+ }
-+
-+ req_cmnd_release(req);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void req_cmnd_pre_release(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_cmnd *c, *t;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("req %p", req);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ BUG_ON(req->release_called);
-+ req->release_called = 1;
-+#endif
-+
-+ if (unlikely(test_bit(ISCSI_CMD_ABORTED, &req->prelim_compl_flags))) {
-+ TRACE_MGMT_DBG("Release aborted req cmd %p (scst cmd %p, "
-+ "state %d)", req, req->scst_cmd, req->scst_state);
-+ }
-+
-+ BUG_ON(req->parent_req != NULL);
-+
-+ if (unlikely(req->hashed)) {
-+ /* It sometimes can happen during errors recovery */
-+ TRACE_MGMT_DBG("Removing req %p from hash", req);
-+ cmnd_remove_data_wait_hash(req);
-+ }
-+
-+ if (unlikely(req->cmd_req)) {
-+ /* It sometimes can happen during errors recovery */
-+ TRACE_MGMT_DBG("Putting cmd_req %p (req %p)", req->cmd_req, req);
-+ req->cmd_req->data_out_in_data_receiving = 0;
-+ cmnd_put(req->cmd_req);
-+ req->cmd_req = NULL;
-+ }
-+
-+ if (unlikely(req->main_rsp != NULL)) {
-+ TRACE_DBG("Sending main rsp %p", req->main_rsp);
-+ if (cmnd_opcode(req) == ISCSI_OP_SCSI_CMD) {
-+ if (req->scst_cmd != NULL)
-+ iscsi_set_resid(req->main_rsp);
-+ else
-+ iscsi_set_resid_no_scst_cmd(req->main_rsp);
-+ }
-+ iscsi_cmnd_init_write(req->main_rsp, ISCSI_INIT_WRITE_WAKE);
-+ req->main_rsp = NULL;
-+ }
-+
-+ list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
-+ rx_ddigest_cmd_list_entry) {
-+ cmd_del_from_rx_ddigest_list(c);
-+ cmnd_put(c);
-+ }
-+
-+ EXTRACHECKS_BUG_ON(req->pending);
-+
-+ if (unlikely(req->dec_active_cmds))
-+ iscsi_dec_active_cmds(req);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * Corresponding conn may also get destroyed after this function, except only
-+ * if it's called from the read thread!
-+ */
-+static void req_cmnd_release(struct iscsi_cmnd *req)
-+{
-+ TRACE_ENTRY();
-+
-+ req_cmnd_pre_release(req);
-+ cmnd_put(req);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * Corresponding conn may also get destroyed after this function, except only
-+ * if it's called from the read thread!
-+ */
-+void rsp_cmnd_release(struct iscsi_cmnd *cmnd)
-+{
-+ TRACE_DBG("%p", cmnd);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ BUG_ON(cmnd->release_called);
-+ cmnd->release_called = 1;
-+#endif
-+
-+ EXTRACHECKS_BUG_ON(cmnd->parent_req == NULL);
-+
-+ cmnd_put(cmnd);
-+ return;
-+}
-+
-+static struct iscsi_cmnd *iscsi_alloc_rsp(struct iscsi_cmnd *parent)
-+{
-+ struct iscsi_cmnd *rsp;
-+
-+ TRACE_ENTRY();
-+
-+ rsp = cmnd_alloc(parent->conn, parent);
-+
-+ TRACE_DBG("Adding rsp %p to parent %p", rsp, parent);
-+ list_add_tail(&rsp->rsp_cmd_list_entry, &parent->rsp_cmd_list);
-+
-+ cmnd_get(parent);
-+
-+ TRACE_EXIT_HRES((unsigned long)rsp);
-+ return rsp;
-+}
-+
-+static inline struct iscsi_cmnd *iscsi_alloc_main_rsp(struct iscsi_cmnd *parent)
-+{
-+ struct iscsi_cmnd *rsp;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(parent->main_rsp != NULL);
-+
-+ rsp = iscsi_alloc_rsp(parent);
-+ parent->main_rsp = rsp;
-+
-+ TRACE_EXIT_HRES((unsigned long)rsp);
-+ return rsp;
-+}
-+
-+static void iscsi_cmnds_init_write(struct list_head *send, int flags)
-+{
-+ struct iscsi_cmnd *rsp = list_entry(send->next, struct iscsi_cmnd,
-+ write_list_entry);
-+ struct iscsi_conn *conn = rsp->conn;
-+ struct list_head *pos, *next;
-+
-+ BUG_ON(list_empty(send));
-+
-+ if (!(conn->ddigest_type & DIGEST_NONE)) {
-+ list_for_each(pos, send) {
-+ rsp = list_entry(pos, struct iscsi_cmnd,
-+ write_list_entry);
-+
-+ if (rsp->pdu.datasize != 0) {
-+ TRACE_DBG("Doing data digest (%p:%x)", rsp,
-+ cmnd_opcode(rsp));
-+ digest_tx_data(rsp);
-+ }
-+ }
-+ }
-+
-+ spin_lock_bh(&conn->write_list_lock);
-+ list_for_each_safe(pos, next, send) {
-+ rsp = list_entry(pos, struct iscsi_cmnd, write_list_entry);
-+
-+ TRACE_DBG("%p:%x", rsp, cmnd_opcode(rsp));
-+
-+ BUG_ON(conn != rsp->conn);
-+
-+ list_del(&rsp->write_list_entry);
-+ cmd_add_on_write_list(conn, rsp);
-+ }
-+ spin_unlock_bh(&conn->write_list_lock);
-+
-+ if (flags & ISCSI_INIT_WRITE_WAKE)
-+ iscsi_make_conn_wr_active(conn);
-+
-+ return;
-+}
-+
-+static void iscsi_cmnd_init_write(struct iscsi_cmnd *rsp, int flags)
-+{
-+ LIST_HEAD(head);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ if (unlikely(rsp->on_write_list)) {
-+ PRINT_CRIT_ERROR("cmd already on write list (%x %x %x "
-+ "%u %u %d %d", rsp->pdu.bhs.itt,
-+ cmnd_opcode(rsp), cmnd_scsicode(rsp),
-+ rsp->hdigest, rsp->ddigest,
-+ list_empty(&rsp->rsp_cmd_list), rsp->hashed);
-+ BUG();
-+ }
-+#endif
-+ list_add_tail(&rsp->write_list_entry, &head);
-+ iscsi_cmnds_init_write(&head, flags);
-+ return;
-+}
-+
-+static void iscsi_set_resid_no_scst_cmd(struct iscsi_cmnd *rsp)
-+{
-+ struct iscsi_cmnd *req = rsp->parent_req;
-+ struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
-+ struct iscsi_scsi_rsp_hdr *rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
-+ int resid, out_resid;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(req->scst_cmd != NULL);
-+
-+ TRACE_DBG("req %p, rsp %p, outstanding_r2t %d, r2t_len_to_receive %d, "
-+ "r2t_len_to_send %d, not_received_data_len %d", req, rsp,
-+ req->outstanding_r2t, req->r2t_len_to_receive,
-+ req->r2t_len_to_send, req->not_received_data_len);
-+
-+ if ((req_hdr->flags & ISCSI_CMD_READ) &&
-+ (req_hdr->flags & ISCSI_CMD_WRITE)) {
-+ out_resid = req->not_received_data_len;
-+ if (out_resid > 0) {
-+ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
-+ rsp_hdr->residual_count = cpu_to_be32(out_resid);
-+ } else if (out_resid < 0) {
-+ out_resid = -out_resid;
-+ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
-+ rsp_hdr->residual_count = cpu_to_be32(out_resid);
-+ }
-+
-+ resid = cmnd_read_size(req);
-+ if (resid > 0) {
-+ rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_UNDERFLOW;
-+ rsp_hdr->bi_residual_count = cpu_to_be32(resid);
-+ } else if (resid < 0) {
-+ resid = -resid;
-+ rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_OVERFLOW;
-+ rsp_hdr->bi_residual_count = cpu_to_be32(resid);
-+ }
-+ } else if (req_hdr->flags & ISCSI_CMD_READ) {
-+ resid = be32_to_cpu(req_hdr->data_length);
-+ if (resid > 0) {
-+ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
-+ rsp_hdr->residual_count = cpu_to_be32(resid);
-+ }
-+ } else if (req_hdr->flags & ISCSI_CMD_WRITE) {
-+ resid = req->not_received_data_len;
-+ if (resid > 0) {
-+ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
-+ rsp_hdr->residual_count = cpu_to_be32(resid);
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void iscsi_set_resid(struct iscsi_cmnd *rsp)
-+{
-+ struct iscsi_cmnd *req = rsp->parent_req;
-+ struct scst_cmd *scst_cmd = req->scst_cmd;
-+ struct iscsi_scsi_cmd_hdr *req_hdr;
-+ struct iscsi_scsi_rsp_hdr *rsp_hdr;
-+ int resid, out_resid;
-+
-+ TRACE_ENTRY();
-+
-+ if (likely(!scst_get_resid(scst_cmd, &resid, &out_resid))) {
-+ TRACE_DBG("No residuals for req %p", req);
-+ goto out;
-+ }
-+
-+ TRACE_DBG("req %p, resid %d, out_resid %d", req, resid, out_resid);
-+
-+ req_hdr = cmnd_hdr(req);
-+ rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
-+
-+ if ((req_hdr->flags & ISCSI_CMD_READ) &&
-+ (req_hdr->flags & ISCSI_CMD_WRITE)) {
-+ if (out_resid > 0) {
-+ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
-+ rsp_hdr->residual_count = cpu_to_be32(out_resid);
-+ } else if (out_resid < 0) {
-+ out_resid = -out_resid;
-+ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
-+ rsp_hdr->residual_count = cpu_to_be32(out_resid);
-+ }
-+
-+ if (resid > 0) {
-+ rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_UNDERFLOW;
-+ rsp_hdr->bi_residual_count = cpu_to_be32(resid);
-+ } else if (resid < 0) {
-+ resid = -resid;
-+ rsp_hdr->flags |= ISCSI_FLG_BIRESIDUAL_OVERFLOW;
-+ rsp_hdr->bi_residual_count = cpu_to_be32(resid);
-+ }
-+ } else {
-+ if (resid > 0) {
-+ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_UNDERFLOW;
-+ rsp_hdr->residual_count = cpu_to_be32(resid);
-+ } else if (resid < 0) {
-+ resid = -resid;
-+ rsp_hdr->flags |= ISCSI_FLG_RESIDUAL_OVERFLOW;
-+ rsp_hdr->residual_count = cpu_to_be32(resid);
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void send_data_rsp(struct iscsi_cmnd *req, u8 status, int send_status)
-+{
-+ struct iscsi_cmnd *rsp;
-+ struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
-+ struct iscsi_data_in_hdr *rsp_hdr;
-+ u32 pdusize, size, offset, sn;
-+ LIST_HEAD(send);
-+
-+ TRACE_DBG("req %p", req);
-+
-+ pdusize = req->conn->session->sess_params.max_xmit_data_length;
-+ size = req->bufflen;
-+ offset = 0;
-+ sn = 0;
-+
-+ while (1) {
-+ rsp = iscsi_alloc_rsp(req);
-+ TRACE_DBG("rsp %p", rsp);
-+ rsp->sg = req->sg;
-+ rsp->sg_cnt = req->sg_cnt;
-+ rsp->bufflen = req->bufflen;
-+ rsp_hdr = (struct iscsi_data_in_hdr *)&rsp->pdu.bhs;
-+
-+ rsp_hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
-+ rsp_hdr->itt = req_hdr->itt;
-+ rsp_hdr->ttt = ISCSI_RESERVED_TAG;
-+ rsp_hdr->buffer_offset = cpu_to_be32(offset);
-+ rsp_hdr->data_sn = cpu_to_be32(sn);
-+
-+ if (size <= pdusize) {
-+ TRACE_DBG("offset %d, size %d", offset, size);
-+ rsp->pdu.datasize = size;
-+ if (send_status) {
-+ TRACE_DBG("status %x", status);
-+
-+ EXTRACHECKS_BUG_ON((cmnd_hdr(req)->flags & ISCSI_CMD_WRITE) != 0);
-+
-+ rsp_hdr->flags = ISCSI_FLG_FINAL | ISCSI_FLG_STATUS;
-+ rsp_hdr->cmd_status = status;
-+
-+ iscsi_set_resid(rsp);
-+ }
-+ list_add_tail(&rsp->write_list_entry, &send);
-+ break;
-+ }
-+
-+ TRACE_DBG("pdusize %d, offset %d, size %d", pdusize, offset,
-+ size);
-+
-+ rsp->pdu.datasize = pdusize;
-+
-+ size -= pdusize;
-+ offset += pdusize;
-+ sn++;
-+
-+ list_add_tail(&rsp->write_list_entry, &send);
-+ }
-+ iscsi_cmnds_init_write(&send, 0);
-+ return;
-+}
-+
-+static void iscsi_init_status_rsp(struct iscsi_cmnd *rsp,
-+ int status, const u8 *sense_buf, int sense_len)
-+{
-+ struct iscsi_cmnd *req = rsp->parent_req;
-+ struct iscsi_scsi_rsp_hdr *rsp_hdr;
-+ struct scatterlist *sg;
-+
-+ TRACE_ENTRY();
-+
-+ rsp_hdr = (struct iscsi_scsi_rsp_hdr *)&rsp->pdu.bhs;
-+ rsp_hdr->opcode = ISCSI_OP_SCSI_RSP;
-+ rsp_hdr->flags = ISCSI_FLG_FINAL;
-+ rsp_hdr->response = ISCSI_RESPONSE_COMMAND_COMPLETED;
-+ rsp_hdr->cmd_status = status;
-+ rsp_hdr->itt = cmnd_hdr(req)->itt;
-+
-+ if (SCST_SENSE_VALID(sense_buf)) {
-+ TRACE_DBG("%s", "SENSE VALID");
-+
-+ sg = rsp->sg = rsp->rsp_sg;
-+ rsp->sg_cnt = 2;
-+ rsp->own_sg = 1;
-+
-+ sg_init_table(sg, 2);
-+ sg_set_buf(&sg[0], &rsp->sense_hdr, sizeof(rsp->sense_hdr));
-+ sg_set_buf(&sg[1], sense_buf, sense_len);
-+
-+ rsp->sense_hdr.length = cpu_to_be16(sense_len);
-+
-+ rsp->pdu.datasize = sizeof(rsp->sense_hdr) + sense_len;
-+ rsp->bufflen = rsp->pdu.datasize;
-+ } else {
-+ rsp->pdu.datasize = 0;
-+ rsp->bufflen = 0;
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline struct iscsi_cmnd *create_status_rsp(struct iscsi_cmnd *req,
-+ int status, const u8 *sense_buf, int sense_len)
-+{
-+ struct iscsi_cmnd *rsp;
-+
-+ TRACE_ENTRY();
-+
-+ rsp = iscsi_alloc_rsp(req);
-+ TRACE_DBG("rsp %p", rsp);
-+
-+ iscsi_init_status_rsp(rsp, status, sense_buf, sense_len);
-+ iscsi_set_resid(rsp);
-+
-+ TRACE_EXIT_HRES((unsigned long)rsp);
-+ return rsp;
-+}
-+
-+/*
-+ * Initializes data receive fields. Can be called only when they have not been
-+ * initialized yet.
-+ */
-+static int iscsi_set_prelim_r2t_len_to_receive(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_scsi_cmd_hdr *req_hdr = (struct iscsi_scsi_cmd_hdr *)&req->pdu.bhs;
-+ int res = 0;
-+ unsigned int not_received;
-+
-+ TRACE_ENTRY();
-+
-+ if (req_hdr->flags & ISCSI_CMD_FINAL) {
-+ if (req_hdr->flags & ISCSI_CMD_WRITE)
-+ iscsi_set_not_received_data_len(req,
-+ be32_to_cpu(req_hdr->data_length) -
-+ req->pdu.datasize);
-+ goto out;
-+ }
-+
-+ BUG_ON(req->outstanding_r2t != 0);
-+
-+ res = cmnd_insert_data_wait_hash(req);
-+ if (res != 0) {
-+ /*
-+ * We have to close connection, because otherwise a data
-+ * corruption is possible if we allow to receive data
-+ * for this request in another request with dublicated ITT.
-+ */
-+ mark_conn_closed(req->conn);
-+ goto out;
-+ }
-+
-+ /*
-+ * We need to wait for one or more PDUs. Let's simplify
-+ * other code and pretend we need to receive 1 byte.
-+ * In data_out_start() we will correct it.
-+ */
-+ req->outstanding_r2t = 1;
-+ req_add_to_write_timeout_list(req);
-+ req->r2t_len_to_receive = 1;
-+ req->r2t_len_to_send = 0;
-+
-+ not_received = be32_to_cpu(req_hdr->data_length) - req->pdu.datasize;
-+ not_received -= min_t(unsigned int, not_received,
-+ req->conn->session->sess_params.first_burst_length);
-+ iscsi_set_not_received_data_len(req, not_received);
-+
-+ TRACE_DBG("req %p, op %x, outstanding_r2t %d, r2t_len_to_receive %d, "
-+ "r2t_len_to_send %d, not_received_data_len %d", req,
-+ cmnd_opcode(req), req->outstanding_r2t, req->r2t_len_to_receive,
-+ req->r2t_len_to_send, req->not_received_data_len);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int create_preliminary_no_scst_rsp(struct iscsi_cmnd *req,
-+ int status, const u8 *sense_buf, int sense_len)
-+{
-+ struct iscsi_cmnd *rsp;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (req->prelim_compl_flags != 0) {
-+ TRACE_MGMT_DBG("req %p already prelim completed", req);
-+ goto out;
-+ }
-+
-+ req->scst_state = ISCSI_CMD_STATE_OUT_OF_SCST_PRELIM_COMPL;
-+
-+ BUG_ON(req->scst_cmd != NULL);
-+
-+ res = iscsi_preliminary_complete(req, req, true);
-+
-+ rsp = iscsi_alloc_main_rsp(req);
-+ TRACE_DBG("main rsp %p", rsp);
-+
-+ iscsi_init_status_rsp(rsp, status, sense_buf, sense_len);
-+
-+ /* Resid will be set in req_cmnd_release() */
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+int set_scst_preliminary_status_rsp(struct iscsi_cmnd *req,
-+ bool get_data, int key, int asc, int ascq)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (req->scst_cmd == NULL) {
-+ /* There must be already error set */
-+ goto complete;
-+ }
-+
-+ scst_set_cmd_error(req->scst_cmd, key, asc, ascq);
-+
-+complete:
-+ res = iscsi_preliminary_complete(req, req, get_data);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int create_reject_rsp(struct iscsi_cmnd *req, int reason, bool get_data)
-+{
-+ int res = 0;
-+ struct iscsi_cmnd *rsp;
-+ struct iscsi_reject_hdr *rsp_hdr;
-+ struct scatterlist *sg;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Reject: req %p, reason %x", req, reason);
-+
-+ if (cmnd_opcode(req) == ISCSI_OP_SCSI_CMD) {
-+ if (req->scst_cmd == NULL) {
-+ /* BUSY status must be already set */
-+ struct iscsi_scsi_rsp_hdr *rsp_hdr1;
-+ rsp_hdr1 = (struct iscsi_scsi_rsp_hdr *)&req->main_rsp->pdu.bhs;
-+ BUG_ON(rsp_hdr1->cmd_status == 0);
-+ /*
-+ * Let's not send REJECT here. The initiator will retry
-+ * and, hopefully, next time we will not fail allocating
-+ * scst_cmd, so we will then send the REJECT.
-+ */
-+ goto out;
-+ } else {
-+ /*
-+ * "In all the cases in which a pre-instantiated SCSI
-+ * task is terminated because of the reject, the target
-+ * MUST issue a proper SCSI command response with CHECK
-+ * CONDITION as described in Section 10.4.3 Response" -
-+ * RFC 3720.
-+ */
-+ set_scst_preliminary_status_rsp(req, get_data,
-+ SCST_LOAD_SENSE(scst_sense_invalid_message));
-+ }
-+ }
-+
-+ rsp = iscsi_alloc_main_rsp(req);
-+ rsp_hdr = (struct iscsi_reject_hdr *)&rsp->pdu.bhs;
-+
-+ rsp_hdr->opcode = ISCSI_OP_REJECT;
-+ rsp_hdr->ffffffff = ISCSI_RESERVED_TAG;
-+ rsp_hdr->reason = reason;
-+
-+ sg = rsp->sg = rsp->rsp_sg;
-+ rsp->sg_cnt = 1;
-+ rsp->own_sg = 1;
-+ sg_init_one(sg, &req->pdu.bhs, sizeof(struct iscsi_hdr));
-+ rsp->bufflen = rsp->pdu.datasize = sizeof(struct iscsi_hdr);
-+
-+ res = iscsi_preliminary_complete(req, req, true);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static inline int iscsi_get_allowed_cmds(struct iscsi_session *sess)
-+{
-+ int res = max(-1, (int)sess->tgt_params.queued_cmnds -
-+ atomic_read(&sess->active_cmds)-1);
-+ TRACE_DBG("allowed cmds %d (sess %p, active_cmds %d)", res,
-+ sess, atomic_read(&sess->active_cmds));
-+ return res;
-+}
-+
-+static __be32 cmnd_set_sn(struct iscsi_cmnd *cmnd, int set_stat_sn)
-+{
-+ struct iscsi_conn *conn = cmnd->conn;
-+ struct iscsi_session *sess = conn->session;
-+ __be32 res;
-+
-+ spin_lock(&sess->sn_lock);
-+
-+ if (set_stat_sn)
-+ cmnd->pdu.bhs.sn = (__force u32)cpu_to_be32(conn->stat_sn++);
-+ cmnd->pdu.bhs.exp_sn = (__force u32)cpu_to_be32(sess->exp_cmd_sn);
-+ cmnd->pdu.bhs.max_sn = (__force u32)cpu_to_be32(sess->exp_cmd_sn +
-+ iscsi_get_allowed_cmds(sess));
-+
-+ res = cpu_to_be32(conn->stat_sn);
-+
-+ spin_unlock(&sess->sn_lock);
-+ return res;
-+}
-+
-+/* Called under sn_lock */
-+static void update_stat_sn(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_conn *conn = cmnd->conn;
-+ u32 exp_stat_sn;
-+
-+ cmnd->pdu.bhs.exp_sn = exp_stat_sn = be32_to_cpu((__force __be32)cmnd->pdu.bhs.exp_sn);
-+ TRACE_DBG("%x,%x", cmnd_opcode(cmnd), exp_stat_sn);
-+ if ((int)(exp_stat_sn - conn->exp_stat_sn) > 0 &&
-+ (int)(exp_stat_sn - conn->stat_sn) <= 0) {
-+ /* free pdu resources */
-+ cmnd->conn->exp_stat_sn = exp_stat_sn;
-+ }
-+ return;
-+}
-+
-+static struct iscsi_cmnd *cmnd_find_itt_get(struct iscsi_conn *conn, __be32 itt)
-+{
-+ struct iscsi_cmnd *cmnd, *found_cmnd = NULL;
-+
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
-+ if ((cmnd->pdu.bhs.itt == itt) && !cmnd_get_check(cmnd)) {
-+ found_cmnd = cmnd;
-+ break;
-+ }
-+ }
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+
-+ return found_cmnd;
-+}
-+
-+/**
-+ ** We use the ITT hash only to find original request PDU for subsequent
-+ ** Data-Out PDUs.
-+ **/
-+
-+/* Must be called under cmnd_data_wait_hash_lock */
-+static struct iscsi_cmnd *__cmnd_find_data_wait_hash(struct iscsi_conn *conn,
-+ __be32 itt)
-+{
-+ struct list_head *head;
-+ struct iscsi_cmnd *cmnd;
-+
-+ head = &conn->session->cmnd_data_wait_hash[cmnd_hashfn((__force u32)itt)];
-+
-+ list_for_each_entry(cmnd, head, hash_list_entry) {
-+ if (cmnd->pdu.bhs.itt == itt)
-+ return cmnd;
-+ }
-+ return NULL;
-+}
-+
-+static struct iscsi_cmnd *cmnd_find_data_wait_hash(struct iscsi_conn *conn,
-+ __be32 itt)
-+{
-+ struct iscsi_cmnd *res;
-+ struct iscsi_session *session = conn->session;
-+
-+ spin_lock(&session->cmnd_data_wait_hash_lock);
-+ res = __cmnd_find_data_wait_hash(conn, itt);
-+ spin_unlock(&session->cmnd_data_wait_hash_lock);
-+
-+ return res;
-+}
-+
-+static inline u32 get_next_ttt(struct iscsi_conn *conn)
-+{
-+ u32 ttt;
-+ struct iscsi_session *session = conn->session;
-+
-+ /* Not compatible with MC/S! */
-+
-+ iscsi_extracheck_is_rd_thread(conn);
-+
-+ if (unlikely(session->next_ttt == ISCSI_RESERVED_TAG_CPU32))
-+ session->next_ttt++;
-+ ttt = session->next_ttt++;
-+
-+ return ttt;
-+}
-+
-+static int cmnd_insert_data_wait_hash(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_session *session = cmnd->conn->session;
-+ struct iscsi_cmnd *tmp;
-+ struct list_head *head;
-+ int err = 0;
-+ __be32 itt = cmnd->pdu.bhs.itt;
-+
-+ if (unlikely(cmnd->hashed)) {
-+ /*
-+ * It can be for preliminary completed commands, when this
-+ * function already failed.
-+ */
-+ goto out;
-+ }
-+
-+ /*
-+ * We don't need TTT, because ITT/buffer_offset pair is sufficient
-+ * to find out the original request and buffer for Data-Out PDUs, but
-+ * crazy iSCSI spec requires us to send this superfluous field in
-+ * R2T PDUs and some initiators may rely on it.
-+ */
-+ cmnd->target_task_tag = get_next_ttt(cmnd->conn);
-+
-+ TRACE_DBG("%p:%x", cmnd, itt);
-+ if (unlikely(itt == ISCSI_RESERVED_TAG)) {
-+ PRINT_ERROR("%s", "ITT is RESERVED_TAG");
-+ PRINT_BUFFER("Incorrect BHS", &cmnd->pdu.bhs,
-+ sizeof(cmnd->pdu.bhs));
-+ err = -ISCSI_REASON_PROTOCOL_ERROR;
-+ goto out;
-+ }
-+
-+ spin_lock(&session->cmnd_data_wait_hash_lock);
-+
-+ head = &session->cmnd_data_wait_hash[cmnd_hashfn((__force u32)itt)];
-+
-+ tmp = __cmnd_find_data_wait_hash(cmnd->conn, itt);
-+ if (likely(!tmp)) {
-+ TRACE_DBG("Adding cmnd %p to the hash (ITT %x)", cmnd,
-+ cmnd->pdu.bhs.itt);
-+ list_add_tail(&cmnd->hash_list_entry, head);
-+ cmnd->hashed = 1;
-+ } else {
-+ PRINT_ERROR("Task %x in progress, cmnd %p", itt, cmnd);
-+ err = -ISCSI_REASON_TASK_IN_PROGRESS;
-+ }
-+
-+ spin_unlock(&session->cmnd_data_wait_hash_lock);
-+
-+out:
-+ return err;
-+}
-+
-+static void cmnd_remove_data_wait_hash(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_session *session = cmnd->conn->session;
-+ struct iscsi_cmnd *tmp;
-+
-+ spin_lock(&session->cmnd_data_wait_hash_lock);
-+
-+ tmp = __cmnd_find_data_wait_hash(cmnd->conn, cmnd->pdu.bhs.itt);
-+
-+ if (likely(tmp && tmp == cmnd)) {
-+ TRACE_DBG("Deleting cmnd %p from the hash (ITT %x)", cmnd,
-+ cmnd->pdu.bhs.itt);
-+ list_del(&cmnd->hash_list_entry);
-+ cmnd->hashed = 0;
-+ } else
-+ PRINT_ERROR("%p:%x not found", cmnd, cmnd->pdu.bhs.itt);
-+
-+ spin_unlock(&session->cmnd_data_wait_hash_lock);
-+
-+ return;
-+}
-+
-+static void cmnd_prepare_get_rejected_immed_data(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_conn *conn = cmnd->conn;
-+ struct scatterlist *sg = cmnd->sg;
-+ char __user *addr;
-+ u32 size;
-+ unsigned int i;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG_FLAG(iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(cmnd),
-+ "Skipping (cmnd %p, ITT %x, op %x, cmd op %x, "
-+ "datasize %u, scst_cmd %p, scst state %d)", cmnd,
-+ cmnd->pdu.bhs.itt, cmnd_opcode(cmnd), cmnd_hdr(cmnd)->scb[0],
-+ cmnd->pdu.datasize, cmnd->scst_cmd, cmnd->scst_state);
-+
-+ iscsi_extracheck_is_rd_thread(conn);
-+
-+ size = cmnd->pdu.datasize;
-+ if (!size)
-+ goto out;
-+
-+ /* We already checked pdu.datasize in check_segment_length() */
-+
-+ /*
-+ * There are no problems with the safety from concurrent
-+ * accesses to dummy_page in dummy_sg, since data only
-+ * will be read and then discarded.
-+ */
-+ sg = &dummy_sg;
-+ if (cmnd->sg == NULL) {
-+ /* just in case */
-+ cmnd->sg = sg;
-+ cmnd->bufflen = PAGE_SIZE;
-+ cmnd->own_sg = 1;
-+ }
-+
-+ addr = (char __force __user *)(page_address(sg_page(&sg[0])));
-+ conn->read_size = size;
-+ for (i = 0; size > PAGE_SIZE; i++, size -= PAGE_SIZE) {
-+ /* We already checked pdu.datasize in check_segment_length() */
-+ BUG_ON(i >= ISCSI_CONN_IOV_MAX);
-+ conn->read_iov[i].iov_base = addr;
-+ conn->read_iov[i].iov_len = PAGE_SIZE;
-+ }
-+ conn->read_iov[i].iov_base = addr;
-+ conn->read_iov[i].iov_len = size;
-+ conn->read_msg.msg_iov = conn->read_iov;
-+ conn->read_msg.msg_iovlen = ++i;
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+int iscsi_preliminary_complete(struct iscsi_cmnd *req,
-+ struct iscsi_cmnd *orig_req, bool get_data)
-+{
-+ int res = 0;
-+ bool set_r2t_len;
-+ struct iscsi_hdr *orig_req_hdr = &orig_req->pdu.bhs;
-+
-+ TRACE_ENTRY();
-+
-+#ifdef CONFIG_SCST_DEBUG
-+ {
-+ struct iscsi_hdr *req_hdr = &req->pdu.bhs;
-+ TRACE_DBG_FLAG(iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(orig_req),
-+ "Prelim completed req %p, orig_req %p (FINAL %x, "
-+ "outstanding_r2t %d)", req, orig_req,
-+ (req_hdr->flags & ISCSI_CMD_FINAL),
-+ orig_req->outstanding_r2t);
-+ }
-+#endif
-+
-+ iscsi_extracheck_is_rd_thread(req->conn);
-+ BUG_ON(req->parent_req != NULL);
-+
-+ if (test_bit(ISCSI_CMD_PRELIM_COMPLETED, &req->prelim_compl_flags)) {
-+ TRACE_MGMT_DBG("req %p already prelim completed", req);
-+ /* To not try to get data twice */
-+ get_data = false;
-+ }
-+
-+ /*
-+ * We need to receive all outstanding PDUs, even if direction isn't
-+ * WRITE. Test of PRELIM_COMPLETED is needed, because
-+ * iscsi_set_prelim_r2t_len_to_receive() could also have failed before.
-+ */
-+ set_r2t_len = !orig_req->hashed &&
-+ (cmnd_opcode(orig_req) == ISCSI_OP_SCSI_CMD) &&
-+ !test_bit(ISCSI_CMD_PRELIM_COMPLETED,
-+ &orig_req->prelim_compl_flags);
-+
-+ TRACE_DBG("get_data %d, set_r2t_len %d", get_data, set_r2t_len);
-+
-+ if (get_data)
-+ cmnd_prepare_get_rejected_immed_data(req);
-+
-+ if (test_bit(ISCSI_CMD_PRELIM_COMPLETED, &orig_req->prelim_compl_flags))
-+ goto out_set;
-+
-+ if (set_r2t_len)
-+ res = iscsi_set_prelim_r2t_len_to_receive(orig_req);
-+ else if (orig_req_hdr->flags & ISCSI_CMD_WRITE) {
-+ /*
-+ * We will get here if orig_req prelim completed in the middle
-+ * of data receiving. We won't send more R2T's, so
-+ * r2t_len_to_send is final and won't be updated anymore in
-+ * future.
-+ */
-+ iscsi_set_not_received_data_len(orig_req,
-+ orig_req->r2t_len_to_send);
-+ }
-+
-+out_set:
-+ set_bit(ISCSI_CMD_PRELIM_COMPLETED, &orig_req->prelim_compl_flags);
-+ set_bit(ISCSI_CMD_PRELIM_COMPLETED, &req->prelim_compl_flags);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int cmnd_prepare_recv_pdu(struct iscsi_conn *conn,
-+ struct iscsi_cmnd *cmd, u32 offset, u32 size)
-+{
-+ struct scatterlist *sg = cmd->sg;
-+ unsigned int bufflen = cmd->bufflen;
-+ unsigned int idx, i, buff_offs;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("cmd %p, sg %p, offset %u, size %u", cmd, cmd->sg,
-+ offset, size);
-+
-+ iscsi_extracheck_is_rd_thread(conn);
-+
-+ buff_offs = offset;
-+ idx = (offset + sg[0].offset) >> PAGE_SHIFT;
-+ offset &= ~PAGE_MASK;
-+
-+ conn->read_msg.msg_iov = conn->read_iov;
-+ conn->read_size = size;
-+
-+ i = 0;
-+ while (1) {
-+ unsigned int sg_len;
-+ char __user *addr;
-+
-+ if (unlikely(buff_offs >= bufflen)) {
-+ TRACE_DBG("Residual overflow (cmd %p, buff_offs %d, "
-+ "bufflen %d)", cmd, buff_offs, bufflen);
-+ idx = 0;
-+ sg = &dummy_sg;
-+ offset = 0;
-+ }
-+
-+ addr = (char __force __user *)(sg_virt(&sg[idx]));
-+ EXTRACHECKS_BUG_ON(addr == NULL);
-+ sg_len = sg[idx].length - offset;
-+
-+ conn->read_iov[i].iov_base = addr + offset;
-+
-+ if (size <= sg_len) {
-+ TRACE_DBG("idx=%d, i=%d, offset=%u, size=%d, addr=%p",
-+ idx, i, offset, size, addr);
-+ conn->read_iov[i].iov_len = size;
-+ conn->read_msg.msg_iovlen = i+1;
-+ break;
-+ }
-+ conn->read_iov[i].iov_len = sg_len;
-+
-+ TRACE_DBG("idx=%d, i=%d, offset=%u, size=%d, sg_len=%u, "
-+ "addr=%p", idx, i, offset, size, sg_len, addr);
-+
-+ size -= sg_len;
-+ buff_offs += sg_len;
-+
-+ i++;
-+ if (unlikely(i >= ISCSI_CONN_IOV_MAX)) {
-+ PRINT_ERROR("Initiator %s violated negotiated "
-+ "parameters by sending too much data (size "
-+ "left %d)", conn->session->initiator_name,
-+ size);
-+ mark_conn_closed(conn);
-+ res = -EINVAL;
-+ break;
-+ }
-+
-+ idx++;
-+ offset = 0;
-+ }
-+
-+ TRACE_DBG("msg_iov=%p, msg_iovlen=%zd",
-+ conn->read_msg.msg_iov, conn->read_msg.msg_iovlen);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void send_r2t(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_session *sess = req->conn->session;
-+ struct iscsi_cmnd *rsp;
-+ struct iscsi_r2t_hdr *rsp_hdr;
-+ u32 offset, burst;
-+ LIST_HEAD(send);
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(req->r2t_len_to_send == 0);
-+
-+ /*
-+ * There is no race with data_out_start() and conn_abort(), since
-+ * all functions called from single read thread
-+ */
-+ iscsi_extracheck_is_rd_thread(req->conn);
-+
-+ /*
-+ * We don't need to check for PRELIM_COMPLETED here, because for such
-+ * commands we set r2t_len_to_send = 0, hence made sure we won't be
-+ * called here.
-+ */
-+
-+ EXTRACHECKS_BUG_ON(req->outstanding_r2t >
-+ sess->sess_params.max_outstanding_r2t);
-+
-+ if (req->outstanding_r2t == sess->sess_params.max_outstanding_r2t)
-+ goto out;
-+
-+ burst = sess->sess_params.max_burst_length;
-+ offset = be32_to_cpu(cmnd_hdr(req)->data_length) -
-+ req->r2t_len_to_send;
-+
-+ do {
-+ rsp = iscsi_alloc_rsp(req);
-+ rsp->pdu.bhs.ttt = (__force __be32)req->target_task_tag;
-+ rsp_hdr = (struct iscsi_r2t_hdr *)&rsp->pdu.bhs;
-+ rsp_hdr->opcode = ISCSI_OP_R2T;
-+ rsp_hdr->flags = ISCSI_FLG_FINAL;
-+ rsp_hdr->lun = cmnd_hdr(req)->lun;
-+ rsp_hdr->itt = cmnd_hdr(req)->itt;
-+ rsp_hdr->r2t_sn = (__force u32)cpu_to_be32(req->r2t_sn++);
-+ rsp_hdr->buffer_offset = cpu_to_be32(offset);
-+ if (req->r2t_len_to_send > burst) {
-+ rsp_hdr->data_length = cpu_to_be32(burst);
-+ req->r2t_len_to_send -= burst;
-+ offset += burst;
-+ } else {
-+ rsp_hdr->data_length = cpu_to_be32(req->r2t_len_to_send);
-+ req->r2t_len_to_send = 0;
-+ }
-+
-+ TRACE_WRITE("req %p, data_length %u, buffer_offset %u, "
-+ "r2t_sn %u, outstanding_r2t %u", req,
-+ be32_to_cpu(rsp_hdr->data_length),
-+ be32_to_cpu(rsp_hdr->buffer_offset),
-+ be32_to_cpu((__force __be32)rsp_hdr->r2t_sn), req->outstanding_r2t);
-+
-+ list_add_tail(&rsp->write_list_entry, &send);
-+ req->outstanding_r2t++;
-+
-+ } while ((req->outstanding_r2t < sess->sess_params.max_outstanding_r2t) &&
-+ (req->r2t_len_to_send != 0));
-+
-+ iscsi_cmnds_init_write(&send, ISCSI_INIT_WRITE_WAKE);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int iscsi_pre_exec(struct scst_cmd *scst_cmd)
-+{
-+ int res = SCST_PREPROCESS_STATUS_SUCCESS;
-+ struct iscsi_cmnd *req = (struct iscsi_cmnd *)
-+ scst_cmd_get_tgt_priv(scst_cmd);
-+ struct iscsi_cmnd *c, *t;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
-+
-+ /* If data digest isn't used this list will be empty */
-+ list_for_each_entry_safe(c, t, &req->rx_ddigest_cmd_list,
-+ rx_ddigest_cmd_list_entry) {
-+ TRACE_DBG("Checking digest of RX ddigest cmd %p", c);
-+ if (digest_rx_data(c) != 0) {
-+ scst_set_cmd_error(scst_cmd,
-+ SCST_LOAD_SENSE(iscsi_sense_crc_error));
-+ res = SCST_PREPROCESS_STATUS_ERROR_SENSE_SET;
-+ /*
-+ * The rest of rx_ddigest_cmd_list will be freed
-+ * in req_cmnd_release()
-+ */
-+ goto out;
-+ }
-+ cmd_del_from_rx_ddigest_list(c);
-+ cmnd_put(c);
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int nop_out_start(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_conn *conn = cmnd->conn;
-+ struct iscsi_hdr *req_hdr = &cmnd->pdu.bhs;
-+ u32 size, tmp;
-+ int i, err = 0;
-+
-+ TRACE_DBG("%p", cmnd);
-+
-+ iscsi_extracheck_is_rd_thread(conn);
-+
-+ if (!(req_hdr->flags & ISCSI_FLG_FINAL)) {
-+ PRINT_ERROR("%s", "Initiator sent Nop-Out with not a single "
-+ "PDU");
-+ err = -ISCSI_REASON_PROTOCOL_ERROR;
-+ goto out;
-+ }
-+
-+ if (cmnd->pdu.bhs.itt == ISCSI_RESERVED_TAG) {
-+ if (unlikely(!(cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE)))
-+ PRINT_ERROR("%s", "Initiator sent RESERVED tag for "
-+ "non-immediate Nop-Out command");
-+ }
-+
-+ update_stat_sn(cmnd);
-+
-+ size = cmnd->pdu.datasize;
-+
-+ if (size) {
-+ conn->read_msg.msg_iov = conn->read_iov;
-+ if (cmnd->pdu.bhs.itt != ISCSI_RESERVED_TAG) {
-+ struct scatterlist *sg;
-+
-+ cmnd->sg = sg = scst_alloc(size, GFP_KERNEL,
-+ &cmnd->sg_cnt);
-+ if (sg == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "Allocation of buffer "
-+ "for %d Nop-Out payload failed", size);
-+ err = -ISCSI_REASON_OUT_OF_RESOURCES;
-+ goto out;
-+ }
-+
-+ /* We already checked it in check_segment_length() */
-+ BUG_ON(cmnd->sg_cnt > (signed)ISCSI_CONN_IOV_MAX);
-+
-+ cmnd->own_sg = 1;
-+ cmnd->bufflen = size;
-+
-+ for (i = 0; i < cmnd->sg_cnt; i++) {
-+ conn->read_iov[i].iov_base =
-+ (void __force __user *)(page_address(sg_page(&sg[i])));
-+ tmp = min_t(u32, size, PAGE_SIZE);
-+ conn->read_iov[i].iov_len = tmp;
-+ conn->read_size += tmp;
-+ size -= tmp;
-+ }
-+ BUG_ON(size != 0);
-+ } else {
-+ /*
-+ * There are no problems with the safety from concurrent
-+ * accesses to dummy_page, since for ISCSI_RESERVED_TAG
-+ * the data only read and then discarded.
-+ */
-+ for (i = 0; i < (signed)ISCSI_CONN_IOV_MAX; i++) {
-+ conn->read_iov[i].iov_base =
-+ (void __force __user *)(page_address(dummy_page));
-+ tmp = min_t(u32, size, PAGE_SIZE);
-+ conn->read_iov[i].iov_len = tmp;
-+ conn->read_size += tmp;
-+ size -= tmp;
-+ }
-+
-+ /* We already checked size in check_segment_length() */
-+ BUG_ON(size != 0);
-+ }
-+
-+ conn->read_msg.msg_iovlen = i;
-+ TRACE_DBG("msg_iov=%p, msg_iovlen=%zd", conn->read_msg.msg_iov,
-+ conn->read_msg.msg_iovlen);
-+ }
-+
-+out:
-+ return err;
-+}
-+
-+int cmnd_rx_continue(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_conn *conn = req->conn;
-+ struct iscsi_session *session = conn->session;
-+ struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
-+ struct scst_cmd *scst_cmd = req->scst_cmd;
-+ scst_data_direction dir;
-+ bool unsolicited_data_expected = false;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("scsi command: %x", req_hdr->scb[0]);
-+
-+ EXTRACHECKS_BUG_ON(req->scst_state != ISCSI_CMD_STATE_AFTER_PREPROC);
-+
-+ dir = scst_cmd_get_data_direction(scst_cmd);
-+
-+ /*
-+ * Check for preliminary completion here to save R2Ts. For TASK QUEUE
-+ * FULL statuses that might be a big performance win.
-+ */
-+ if (unlikely(scst_cmd_prelim_completed(scst_cmd) ||
-+ unlikely(req->prelim_compl_flags != 0))) {
-+ /*
-+ * If necessary, ISCSI_CMD_ABORTED will be set by
-+ * iscsi_xmit_response().
-+ */
-+ res = iscsi_preliminary_complete(req, req, true);
-+ goto trace;
-+ }
-+
-+ /* For prelim completed commands sg & K can be already set! */
-+
-+ if (dir & SCST_DATA_WRITE) {
-+ req->bufflen = scst_cmd_get_write_fields(scst_cmd, &req->sg,
-+ &req->sg_cnt);
-+ unsolicited_data_expected = !(req_hdr->flags & ISCSI_CMD_FINAL);
-+
-+ if (unlikely(session->sess_params.initial_r2t &&
-+ unsolicited_data_expected)) {
-+ PRINT_ERROR("Initiator %s violated negotiated "
-+ "parameters: initial R2T is required (ITT %x, "
-+ "op %x)", session->initiator_name,
-+ req->pdu.bhs.itt, req_hdr->scb[0]);
-+ goto out_close;
-+ }
-+
-+ if (unlikely(!session->sess_params.immediate_data &&
-+ req->pdu.datasize)) {
-+ PRINT_ERROR("Initiator %s violated negotiated "
-+ "parameters: forbidden immediate data sent "
-+ "(ITT %x, op %x)", session->initiator_name,
-+ req->pdu.bhs.itt, req_hdr->scb[0]);
-+ goto out_close;
-+ }
-+
-+ if (unlikely(session->sess_params.first_burst_length < req->pdu.datasize)) {
-+ PRINT_ERROR("Initiator %s violated negotiated "
-+ "parameters: immediate data len (%d) > "
-+ "first_burst_length (%d) (ITT %x, op %x)",
-+ session->initiator_name,
-+ req->pdu.datasize,
-+ session->sess_params.first_burst_length,
-+ req->pdu.bhs.itt, req_hdr->scb[0]);
-+ goto out_close;
-+ }
-+
-+ req->r2t_len_to_receive = be32_to_cpu(req_hdr->data_length) -
-+ req->pdu.datasize;
-+
-+ /*
-+ * In case of residual overflow req->r2t_len_to_receive and
-+ * req->pdu.datasize might be > req->bufflen
-+ */
-+
-+ res = cmnd_insert_data_wait_hash(req);
-+ if (unlikely(res != 0)) {
-+ /*
-+ * We have to close connection, because otherwise a data
-+ * corruption is possible if we allow to receive data
-+ * for this request in another request with dublicated
-+ * ITT.
-+ */
-+ goto out_close;
-+ }
-+
-+ if (unsolicited_data_expected) {
-+ req->outstanding_r2t = 1;
-+ req->r2t_len_to_send = req->r2t_len_to_receive -
-+ min_t(unsigned int,
-+ session->sess_params.first_burst_length -
-+ req->pdu.datasize,
-+ req->r2t_len_to_receive);
-+ } else
-+ req->r2t_len_to_send = req->r2t_len_to_receive;
-+
-+ req_add_to_write_timeout_list(req);
-+
-+ if (req->pdu.datasize) {
-+ res = cmnd_prepare_recv_pdu(conn, req, 0, req->pdu.datasize);
-+ /* For performance better to send R2Ts ASAP */
-+ if (likely(res == 0) && (req->r2t_len_to_send != 0))
-+ send_r2t(req);
-+ }
-+ } else {
-+ req->sg = scst_cmd_get_sg(scst_cmd);
-+ req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
-+ req->bufflen = scst_cmd_get_bufflen(scst_cmd);
-+
-+ if (unlikely(!(req_hdr->flags & ISCSI_CMD_FINAL) ||
-+ req->pdu.datasize)) {
-+ PRINT_ERROR("Unexpected unsolicited data (ITT %x "
-+ "CDB %x)", req->pdu.bhs.itt, req_hdr->scb[0]);
-+ set_scst_preliminary_status_rsp(req, true,
-+ SCST_LOAD_SENSE(iscsi_sense_unexpected_unsolicited_data));
-+ }
-+ }
-+
-+trace:
-+ TRACE_DBG("req=%p, dir=%d, unsolicited_data_expected=%d, "
-+ "r2t_len_to_receive=%d, r2t_len_to_send=%d, bufflen=%d, "
-+ "own_sg %d", req, dir, unsolicited_data_expected,
-+ req->r2t_len_to_receive, req->r2t_len_to_send, req->bufflen,
-+ req->own_sg);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_close:
-+ mark_conn_closed(conn);
-+ res = -EINVAL;
-+ goto out;
-+}
-+
-+static int scsi_cmnd_start(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_conn *conn = req->conn;
-+ struct iscsi_session *session = conn->session;
-+ struct iscsi_scsi_cmd_hdr *req_hdr = cmnd_hdr(req);
-+ struct scst_cmd *scst_cmd;
-+ scst_data_direction dir;
-+ struct iscsi_ahs_hdr *ahdr;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("scsi command: %x", req_hdr->scb[0]);
-+
-+ TRACE_DBG("Incrementing active_cmds (cmd %p, sess %p, "
-+ "new value %d)", req, session,
-+ atomic_read(&session->active_cmds)+1);
-+ atomic_inc(&session->active_cmds);
-+ req->dec_active_cmds = 1;
-+
-+ scst_cmd = scst_rx_cmd(session->scst_sess,
-+ (uint8_t *)&req_hdr->lun, sizeof(req_hdr->lun),
-+ req_hdr->scb, sizeof(req_hdr->scb), SCST_NON_ATOMIC);
-+ if (scst_cmd == NULL) {
-+ res = create_preliminary_no_scst_rsp(req, SAM_STAT_BUSY,
-+ NULL, 0);
-+ goto out;
-+ }
-+
-+ req->scst_cmd = scst_cmd;
-+ scst_cmd_set_tag(scst_cmd, (__force u32)req_hdr->itt);
-+ scst_cmd_set_tgt_priv(scst_cmd, req);
-+
-+ if ((req_hdr->flags & ISCSI_CMD_READ) &&
-+ (req_hdr->flags & ISCSI_CMD_WRITE)) {
-+ int sz = cmnd_read_size(req);
-+ if (unlikely(sz < 0)) {
-+ PRINT_ERROR("%s", "BIDI data transfer, but initiator "
-+ "not supplied Bidirectional Read Expected Data "
-+ "Transfer Length AHS");
-+ set_scst_preliminary_status_rsp(req, true,
-+ SCST_LOAD_SENSE(scst_sense_parameter_value_invalid));
-+ } else {
-+ dir = SCST_DATA_BIDI;
-+ scst_cmd_set_expected(scst_cmd, dir, sz);
-+ scst_cmd_set_expected_out_transfer_len(scst_cmd,
-+ be32_to_cpu(req_hdr->data_length));
-+#if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ scst_cmd_set_tgt_need_alloc_data_buf(scst_cmd);
-+#endif
-+ }
-+ } else if (req_hdr->flags & ISCSI_CMD_READ) {
-+ dir = SCST_DATA_READ;
-+ scst_cmd_set_expected(scst_cmd, dir,
-+ be32_to_cpu(req_hdr->data_length));
-+#if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ scst_cmd_set_tgt_need_alloc_data_buf(scst_cmd);
-+#endif
-+ } else if (req_hdr->flags & ISCSI_CMD_WRITE) {
-+ dir = SCST_DATA_WRITE;
-+ scst_cmd_set_expected(scst_cmd, dir,
-+ be32_to_cpu(req_hdr->data_length));
-+ } else {
-+ dir = SCST_DATA_NONE;
-+ scst_cmd_set_expected(scst_cmd, dir, 0);
-+ }
-+
-+ switch (req_hdr->flags & ISCSI_CMD_ATTR_MASK) {
-+ case ISCSI_CMD_SIMPLE:
-+ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_SIMPLE);
-+ break;
-+ case ISCSI_CMD_HEAD_OF_QUEUE:
-+ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
-+ break;
-+ case ISCSI_CMD_ORDERED:
-+ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_ORDERED);
-+ break;
-+ case ISCSI_CMD_ACA:
-+ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_ACA);
-+ break;
-+ case ISCSI_CMD_UNTAGGED:
-+ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_UNTAGGED);
-+ break;
-+ default:
-+ PRINT_ERROR("Unknown task code %x, use ORDERED instead",
-+ req_hdr->flags & ISCSI_CMD_ATTR_MASK);
-+ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_ORDERED);
-+ break;
-+ }
-+
-+ scst_cmd_set_tgt_sn(scst_cmd, req_hdr->cmd_sn);
-+
-+ ahdr = (struct iscsi_ahs_hdr *)req->pdu.ahs;
-+ if (ahdr != NULL) {
-+ uint8_t *p = (uint8_t *)ahdr;
-+ unsigned int size = 0;
-+ do {
-+ int s;
-+
-+ ahdr = (struct iscsi_ahs_hdr *)p;
-+
-+ if (ahdr->ahstype == ISCSI_AHSTYPE_CDB) {
-+ struct iscsi_cdb_ahdr *eca =
-+ (struct iscsi_cdb_ahdr *)ahdr;
-+ scst_cmd_set_ext_cdb(scst_cmd, eca->cdb,
-+ be16_to_cpu(ahdr->ahslength) - 1,
-+ GFP_KERNEL);
-+ break;
-+ }
-+ s = 3 + be16_to_cpu(ahdr->ahslength);
-+ s = (s + 3) & -4;
-+ size += s;
-+ p += s;
-+ } while (size < req->pdu.ahssize);
-+ }
-+
-+ TRACE_DBG("START Command (itt %x, queue_type %d)",
-+ req_hdr->itt, scst_cmd_get_queue_type(scst_cmd));
-+ req->scst_state = ISCSI_CMD_STATE_RX_CMD;
-+ conn->rx_task = current;
-+ scst_cmd_init_stage1_done(scst_cmd, SCST_CONTEXT_DIRECT, 0);
-+
-+ if (req->scst_state != ISCSI_CMD_STATE_RX_CMD)
-+ res = cmnd_rx_continue(req);
-+ else {
-+ TRACE_DBG("Delaying req %p post processing (scst_state %d)",
-+ req, req->scst_state);
-+ res = 1;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int data_out_start(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_conn *conn = cmnd->conn;
-+ struct iscsi_data_out_hdr *req_hdr =
-+ (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
-+ struct iscsi_cmnd *orig_req;
-+#if 0
-+ struct iscsi_hdr *orig_req_hdr;
-+#endif
-+ u32 offset = be32_to_cpu(req_hdr->buffer_offset);
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * There is no race with send_r2t(), conn_abort() and
-+ * iscsi_check_tm_data_wait_timeouts(), since
-+ * all the functions called from single read thread
-+ */
-+ iscsi_extracheck_is_rd_thread(cmnd->conn);
-+
-+ update_stat_sn(cmnd);
-+
-+ orig_req = cmnd_find_data_wait_hash(conn, req_hdr->itt);
-+ cmnd->cmd_req = orig_req;
-+ if (unlikely(orig_req == NULL)) {
-+ /*
-+ * It shouldn't happen, since we don't abort any request until
-+ * we received all related PDUs from the initiator or timeout
-+ * them. Let's quietly drop such PDUs.
-+ */
-+ TRACE_MGMT_DBG("Unable to find scsi task ITT %x",
-+ cmnd->pdu.bhs.itt);
-+ res = iscsi_preliminary_complete(cmnd, cmnd, true);
-+ goto out;
-+ }
-+
-+ cmnd_get(orig_req);
-+
-+ if (unlikely(orig_req->r2t_len_to_receive < cmnd->pdu.datasize)) {
-+ if (orig_req->prelim_compl_flags != 0) {
-+ /* We can have fake r2t_len_to_receive */
-+ goto go;
-+ }
-+ PRINT_ERROR("Data size (%d) > R2T length to receive (%d)",
-+ cmnd->pdu.datasize, orig_req->r2t_len_to_receive);
-+ set_scst_preliminary_status_rsp(orig_req, false,
-+ SCST_LOAD_SENSE(iscsi_sense_incorrect_amount_of_data));
-+ goto go;
-+ }
-+
-+ /* Crazy iSCSI spec requires us to make this unneeded check */
-+#if 0 /* ...but some initiators (Windows) don't care to correctly set it */
-+ orig_req_hdr = &orig_req->pdu.bhs;
-+ if (unlikely(orig_req_hdr->lun != req_hdr->lun)) {
-+ PRINT_ERROR("Wrong LUN (%lld) in Data-Out PDU (expected %lld), "
-+ "orig_req %p, cmnd %p", (unsigned long long)req_hdr->lun,
-+ (unsigned long long)orig_req_hdr->lun, orig_req, cmnd);
-+ create_reject_rsp(orig_req, ISCSI_REASON_PROTOCOL_ERROR, false);
-+ goto go;
-+ }
-+#endif
-+
-+go:
-+ if (req_hdr->flags & ISCSI_FLG_FINAL)
-+ orig_req->outstanding_r2t--;
-+
-+ EXTRACHECKS_BUG_ON(orig_req->data_out_in_data_receiving);
-+ orig_req->data_out_in_data_receiving = 1;
-+
-+ TRACE_WRITE("cmnd %p, orig_req %p, offset %u, datasize %u", cmnd,
-+ orig_req, offset, cmnd->pdu.datasize);
-+
-+ if (unlikely(orig_req->prelim_compl_flags != 0))
-+ res = iscsi_preliminary_complete(cmnd, orig_req, true);
-+ else
-+ res = cmnd_prepare_recv_pdu(conn, orig_req, offset, cmnd->pdu.datasize);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static void data_out_end(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_data_out_hdr *req_hdr =
-+ (struct iscsi_data_out_hdr *)&cmnd->pdu.bhs;
-+ struct iscsi_cmnd *req;
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_BUG_ON(cmnd == NULL);
-+ req = cmnd->cmd_req;
-+ if (unlikely(req == NULL))
-+ goto out;
-+
-+ TRACE_DBG("cmnd %p, req %p", cmnd, req);
-+
-+ iscsi_extracheck_is_rd_thread(cmnd->conn);
-+
-+ req->data_out_in_data_receiving = 0;
-+
-+ if (!(cmnd->conn->ddigest_type & DIGEST_NONE) &&
-+ !cmnd->ddigest_checked) {
-+ cmd_add_on_rx_ddigest_list(req, cmnd);
-+ cmnd_get(cmnd);
-+ }
-+
-+ /*
-+ * Now we received the data and can adjust r2t_len_to_receive of the
-+ * orig req. We couldn't do it earlier, because it will break data
-+ * receiving errors recovery (calls of iscsi_fail_data_waiting_cmnd()).
-+ */
-+ req->r2t_len_to_receive -= cmnd->pdu.datasize;
-+
-+ if (unlikely(req->prelim_compl_flags != 0)) {
-+ /*
-+ * We need to call iscsi_preliminary_complete() again
-+ * to handle the case if we just been aborted. This call must
-+ * be done before zeroing r2t_len_to_send to correctly calc.
-+ * residual.
-+ */
-+ iscsi_preliminary_complete(cmnd, req, false);
-+
-+ /*
-+ * We might need to wait for one or more PDUs. Let's simplify
-+ * other code and not perform exact r2t_len_to_receive
-+ * calculation.
-+ */
-+ req->r2t_len_to_receive = req->outstanding_r2t;
-+ req->r2t_len_to_send = 0;
-+ }
-+
-+ TRACE_DBG("req %p, FINAL %x, outstanding_r2t %d, r2t_len_to_receive %d,"
-+ " r2t_len_to_send %d", req, req_hdr->flags & ISCSI_FLG_FINAL,
-+ req->outstanding_r2t, req->r2t_len_to_receive,
-+ req->r2t_len_to_send);
-+
-+ if (!(req_hdr->flags & ISCSI_FLG_FINAL))
-+ goto out_put;
-+
-+ if (req->r2t_len_to_receive == 0) {
-+ if (!req->pending)
-+ iscsi_restart_cmnd(req);
-+ } else if (req->r2t_len_to_send != 0)
-+ send_r2t(req);
-+
-+out_put:
-+ cmnd_put(req);
-+ cmnd->cmd_req = NULL;
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Might be called under target_mutex and cmd_list_lock */
-+static void __cmnd_abort(struct iscsi_cmnd *cmnd)
-+{
-+ unsigned long timeout_time = jiffies + ISCSI_TM_DATA_WAIT_TIMEOUT +
-+ ISCSI_ADD_SCHED_TIME;
-+ struct iscsi_conn *conn = cmnd->conn;
-+
-+ TRACE_MGMT_DBG("Aborting cmd %p, scst_cmd %p (scst state %x, "
-+ "ref_cnt %d, on_write_timeout_list %d, write_start %ld, ITT %x, "
-+ "sn %u, op %x, r2t_len_to_receive %d, r2t_len_to_send %d, "
-+ "CDB op %x, size to write %u, outstanding_r2t %d, "
-+ "sess->exp_cmd_sn %u, conn %p, rd_task %p, read_cmnd %p, "
-+ "read_state %d)", cmnd, cmnd->scst_cmd, cmnd->scst_state,
-+ atomic_read(&cmnd->ref_cnt), cmnd->on_write_timeout_list,
-+ cmnd->write_start, cmnd->pdu.bhs.itt, cmnd->pdu.bhs.sn,
-+ cmnd_opcode(cmnd), cmnd->r2t_len_to_receive,
-+ cmnd->r2t_len_to_send, cmnd_scsicode(cmnd),
-+ cmnd_write_size(cmnd), cmnd->outstanding_r2t,
-+ cmnd->conn->session->exp_cmd_sn, cmnd->conn,
-+ cmnd->conn->rd_task, cmnd->conn->read_cmnd,
-+ cmnd->conn->read_state);
-+
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ TRACE_MGMT_DBG("net_ref_cnt %d", atomic_read(&cmnd->net_ref_cnt));
-+#endif
-+
-+ /*
-+ * Lock to sync with iscsi_check_tm_data_wait_timeouts(), including
-+ * CMD_ABORTED bit set.
-+ */
-+ spin_lock_bh(&conn->conn_thr_pool->rd_lock);
-+
-+ /*
-+ * We suppose that preliminary commands completion is tested by
-+ * comparing prelim_compl_flags with 0. Otherwise a race is possible,
-+ * like sending command in SCST core as PRELIM_COMPLETED, while it
-+ * wasn't aborted in it yet and have as the result a wrong success
-+ * status sent to the initiator.
-+ */
-+ set_bit(ISCSI_CMD_ABORTED, &cmnd->prelim_compl_flags);
-+
-+ TRACE_MGMT_DBG("Setting conn_tm_active for conn %p", conn);
-+ conn->conn_tm_active = 1;
-+
-+ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
-+
-+ /*
-+ * We need the lock to sync with req_add_to_write_timeout_list() and
-+ * close races for rsp_timer.expires.
-+ */
-+ spin_lock_bh(&conn->write_list_lock);
-+ if (!timer_pending(&conn->rsp_timer) ||
-+ time_after(conn->rsp_timer.expires, timeout_time)) {
-+ TRACE_MGMT_DBG("Mod timer on %ld (conn %p)", timeout_time,
-+ conn);
-+ mod_timer(&conn->rsp_timer, timeout_time);
-+ } else
-+ TRACE_MGMT_DBG("Timer for conn %p is going to fire on %ld "
-+ "(timeout time %ld)", conn, conn->rsp_timer.expires,
-+ timeout_time);
-+ spin_unlock_bh(&conn->write_list_lock);
-+
-+ return;
-+}
-+
-+/* Must be called from the read or conn close thread */
-+static int cmnd_abort_pre_checks(struct iscsi_cmnd *req, int *status)
-+{
-+ struct iscsi_task_mgt_hdr *req_hdr =
-+ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
-+ struct iscsi_cmnd *cmnd;
-+ int res = -1;
-+
-+ req_hdr->ref_cmd_sn = be32_to_cpu((__force __be32)req_hdr->ref_cmd_sn);
-+
-+ if (!before(req_hdr->ref_cmd_sn, req_hdr->cmd_sn)) {
-+ TRACE(TRACE_MGMT, "ABORT TASK: RefCmdSN(%u) > CmdSN(%u)",
-+ req_hdr->ref_cmd_sn, req_hdr->cmd_sn);
-+ *status = ISCSI_RESPONSE_UNKNOWN_TASK;
-+ goto out;
-+ }
-+
-+ cmnd = cmnd_find_itt_get(req->conn, req_hdr->rtt);
-+ if (cmnd) {
-+ struct iscsi_scsi_cmd_hdr *hdr = cmnd_hdr(cmnd);
-+
-+ if (req_hdr->lun != hdr->lun) {
-+ PRINT_ERROR("ABORT TASK: LUN mismatch: req LUN "
-+ "%llx, cmd LUN %llx, rtt %u",
-+ (long long unsigned)be64_to_cpu(req_hdr->lun),
-+ (long long unsigned)be64_to_cpu(hdr->lun),
-+ req_hdr->rtt);
-+ *status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ goto out_put;
-+ }
-+
-+ if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
-+ if (req_hdr->ref_cmd_sn != req_hdr->cmd_sn) {
-+ PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != TM "
-+ "cmd CmdSN(%u) for immediate command "
-+ "%p", req_hdr->ref_cmd_sn,
-+ req_hdr->cmd_sn, cmnd);
-+ *status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ goto out_put;
-+ }
-+ } else {
-+ if (req_hdr->ref_cmd_sn != hdr->cmd_sn) {
-+ PRINT_ERROR("ABORT TASK: RefCmdSN(%u) != "
-+ "CmdSN(%u) for command %p",
-+ req_hdr->ref_cmd_sn, req_hdr->cmd_sn,
-+ cmnd);
-+ *status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ goto out_put;
-+ }
-+ }
-+
-+ if (before(req_hdr->cmd_sn, hdr->cmd_sn) ||
-+ (req_hdr->cmd_sn == hdr->cmd_sn)) {
-+ PRINT_ERROR("ABORT TASK: SN mismatch: req SN %x, "
-+ "cmd SN %x, rtt %u", req_hdr->cmd_sn,
-+ hdr->cmd_sn, req_hdr->rtt);
-+ *status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ goto out_put;
-+ }
-+
-+ cmnd_put(cmnd);
-+ res = 0;
-+ } else {
-+ TRACE_MGMT_DBG("cmd RTT %x not found", req_hdr->rtt);
-+ /*
-+ * iSCSI RFC:
-+ *
-+ * b) If the Referenced Task Tag does not identify an existing task,
-+ * but if the CmdSN indicated by the RefCmdSN field in the Task
-+ * Management function request is within the valid CmdSN window
-+ * and less than the CmdSN of the Task Management function
-+ * request itself, then targets must consider the CmdSN received
-+ * and return the "Function complete" response.
-+ *
-+ * c) If the Referenced Task Tag does not identify an existing task
-+ * and if the CmdSN indicated by the RefCmdSN field in the Task
-+ * Management function request is outside the valid CmdSN window,
-+ * then targets must return the "Task does not exist" response.
-+ *
-+ * 128 seems to be a good "window".
-+ */
-+ if (between(req_hdr->ref_cmd_sn, req_hdr->cmd_sn - 128,
-+ req_hdr->cmd_sn)) {
-+ *status = ISCSI_RESPONSE_FUNCTION_COMPLETE;
-+ res = 0;
-+ } else
-+ *status = ISCSI_RESPONSE_UNKNOWN_TASK;
-+ }
-+
-+out:
-+ return res;
-+
-+out_put:
-+ cmnd_put(cmnd);
-+ goto out;
-+}
-+
-+struct iscsi_cmnd_abort_params {
-+ struct work_struct iscsi_cmnd_abort_work;
-+ struct scst_cmd *scst_cmd;
-+};
-+
-+static mempool_t *iscsi_cmnd_abort_mempool;
-+
-+static void iscsi_cmnd_abort_fn(struct work_struct *work)
-+{
-+ struct iscsi_cmnd_abort_params *params = container_of(work,
-+ struct iscsi_cmnd_abort_params, iscsi_cmnd_abort_work);
-+ struct scst_cmd *scst_cmd = params->scst_cmd;
-+ struct iscsi_session *session = scst_sess_get_tgt_priv(scst_cmd->sess);
-+ struct iscsi_conn *conn;
-+ struct iscsi_cmnd *cmnd = scst_cmd_get_tgt_priv(scst_cmd);
-+ bool done = false;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Checking aborted scst_cmd %p (cmnd %p)", scst_cmd, cmnd);
-+
-+ mutex_lock(&session->target->target_mutex);
-+
-+ /*
-+ * cmnd pointer is valid only under cmd_list_lock, but we can't know the
-+ * corresponding conn without dereferencing cmnd at first, so let's
-+ * check all conns and cmnds to find out if our cmnd is still valid
-+ * under lock.
-+ */
-+ list_for_each_entry(conn, &session->conn_list, conn_list_entry) {
-+ struct iscsi_cmnd *c;
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ list_for_each_entry(c, &conn->cmd_list, cmd_list_entry) {
-+ if (c == cmnd) {
-+ __cmnd_abort(cmnd);
-+ done = true;
-+ break;
-+ }
-+ }
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+ if (done)
-+ break;
-+ }
-+
-+ mutex_unlock(&session->target->target_mutex);
-+
-+ scst_cmd_put(scst_cmd);
-+
-+ mempool_free(params, iscsi_cmnd_abort_mempool);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void iscsi_on_abort_cmd(struct scst_cmd *scst_cmd)
-+{
-+ struct iscsi_cmnd_abort_params *params;
-+
-+ TRACE_ENTRY();
-+
-+ params = mempool_alloc(iscsi_cmnd_abort_mempool, GFP_ATOMIC);
-+ if (params == NULL) {
-+ PRINT_CRIT_ERROR("Unable to create iscsi_cmnd_abort_params, "
-+ "iSCSI cmnd for scst_cmd %p may not be aborted",
-+ scst_cmd);
-+ goto out;
-+ }
-+
-+ memset(params, 0, sizeof(*params));
-+ INIT_WORK(&params->iscsi_cmnd_abort_work, iscsi_cmnd_abort_fn);
-+ params->scst_cmd = scst_cmd;
-+
-+ scst_cmd_get(scst_cmd);
-+
-+ TRACE_MGMT_DBG("Scheduling abort check for scst_cmd %p", scst_cmd);
-+
-+ schedule_work(&params->iscsi_cmnd_abort_work);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called from the read or conn close thread */
-+void conn_abort(struct iscsi_conn *conn)
-+{
-+ struct iscsi_cmnd *cmnd, *r, *t;
-+
-+ TRACE_MGMT_DBG("Aborting conn %p", conn);
-+
-+ iscsi_extracheck_is_rd_thread(conn);
-+
-+ cancel_delayed_work_sync(&conn->nop_in_delayed_work);
-+
-+ /* No locks, we are the only user */
-+ list_for_each_entry_safe(r, t, &conn->nop_req_list,
-+ nop_req_list_entry) {
-+ list_del(&r->nop_req_list_entry);
-+ cmnd_put(r);
-+ }
-+
-+ spin_lock_bh(&conn->cmd_list_lock);
-+again:
-+ list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
-+ __cmnd_abort(cmnd);
-+ if (cmnd->r2t_len_to_receive != 0) {
-+ if (!cmnd_get_check(cmnd)) {
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+
-+ /* ToDo: this is racy for MC/S */
-+ iscsi_fail_data_waiting_cmnd(cmnd);
-+
-+ cmnd_put(cmnd);
-+
-+ /*
-+ * We are in the read thread, so we may not
-+ * worry that after cmnd release conn gets
-+ * released as well.
-+ */
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ goto again;
-+ }
-+ }
-+ }
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+
-+ return;
-+}
-+
-+static void execute_task_management(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_conn *conn = req->conn;
-+ struct iscsi_session *sess = conn->session;
-+ struct iscsi_task_mgt_hdr *req_hdr =
-+ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
-+ int rc, status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ int function = req_hdr->function & ISCSI_FUNCTION_MASK;
-+ struct scst_rx_mgmt_params params;
-+
-+ TRACE(TRACE_MGMT, "iSCSI TM fn %d", function);
-+
-+ TRACE_MGMT_DBG("TM req %p, ITT %x, RTT %x, sn %u, con %p", req,
-+ req->pdu.bhs.itt, req_hdr->rtt, req_hdr->cmd_sn, conn);
-+
-+ iscsi_extracheck_is_rd_thread(conn);
-+
-+ spin_lock(&sess->sn_lock);
-+ sess->tm_active++;
-+ sess->tm_sn = req_hdr->cmd_sn;
-+ if (sess->tm_rsp != NULL) {
-+ struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
-+
-+ TRACE_MGMT_DBG("Dropping delayed TM rsp %p", tm_rsp);
-+
-+ sess->tm_rsp = NULL;
-+ sess->tm_active--;
-+
-+ spin_unlock(&sess->sn_lock);
-+
-+ BUG_ON(sess->tm_active < 0);
-+
-+ rsp_cmnd_release(tm_rsp);
-+ } else
-+ spin_unlock(&sess->sn_lock);
-+
-+ memset(&params, 0, sizeof(params));
-+ params.atomic = SCST_NON_ATOMIC;
-+ params.tgt_priv = req;
-+
-+ if ((function != ISCSI_FUNCTION_ABORT_TASK) &&
-+ (req_hdr->rtt != ISCSI_RESERVED_TAG)) {
-+ PRINT_ERROR("Invalid RTT %x (TM fn %d)", req_hdr->rtt,
-+ function);
-+ rc = -1;
-+ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ goto reject;
-+ }
-+
-+ /* cmd_sn is already in CPU format converted in cmnd_rx_start() */
-+
-+ switch (function) {
-+ case ISCSI_FUNCTION_ABORT_TASK:
-+ rc = cmnd_abort_pre_checks(req, &status);
-+ if (rc == 0) {
-+ params.fn = SCST_ABORT_TASK;
-+ params.tag = (__force u32)req_hdr->rtt;
-+ params.tag_set = 1;
-+ params.lun = (uint8_t *)&req_hdr->lun;
-+ params.lun_len = sizeof(req_hdr->lun);
-+ params.lun_set = 1;
-+ params.cmd_sn = req_hdr->cmd_sn;
-+ params.cmd_sn_set = 1;
-+ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
-+ &params);
-+ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ }
-+ break;
-+ case ISCSI_FUNCTION_ABORT_TASK_SET:
-+ params.fn = SCST_ABORT_TASK_SET;
-+ params.lun = (uint8_t *)&req_hdr->lun;
-+ params.lun_len = sizeof(req_hdr->lun);
-+ params.lun_set = 1;
-+ params.cmd_sn = req_hdr->cmd_sn;
-+ params.cmd_sn_set = 1;
-+ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
-+ &params);
-+ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ break;
-+ case ISCSI_FUNCTION_CLEAR_TASK_SET:
-+ params.fn = SCST_CLEAR_TASK_SET;
-+ params.lun = (uint8_t *)&req_hdr->lun;
-+ params.lun_len = sizeof(req_hdr->lun);
-+ params.lun_set = 1;
-+ params.cmd_sn = req_hdr->cmd_sn;
-+ params.cmd_sn_set = 1;
-+ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
-+ &params);
-+ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ break;
-+ case ISCSI_FUNCTION_CLEAR_ACA:
-+ params.fn = SCST_CLEAR_ACA;
-+ params.lun = (uint8_t *)&req_hdr->lun;
-+ params.lun_len = sizeof(req_hdr->lun);
-+ params.lun_set = 1;
-+ params.cmd_sn = req_hdr->cmd_sn;
-+ params.cmd_sn_set = 1;
-+ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
-+ &params);
-+ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ break;
-+ case ISCSI_FUNCTION_TARGET_COLD_RESET:
-+ case ISCSI_FUNCTION_TARGET_WARM_RESET:
-+ params.fn = SCST_TARGET_RESET;
-+ params.cmd_sn = req_hdr->cmd_sn;
-+ params.cmd_sn_set = 1;
-+ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
-+ &params);
-+ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ break;
-+ case ISCSI_FUNCTION_LOGICAL_UNIT_RESET:
-+ params.fn = SCST_LUN_RESET;
-+ params.lun = (uint8_t *)&req_hdr->lun;
-+ params.lun_len = sizeof(req_hdr->lun);
-+ params.lun_set = 1;
-+ params.cmd_sn = req_hdr->cmd_sn;
-+ params.cmd_sn_set = 1;
-+ rc = scst_rx_mgmt_fn(conn->session->scst_sess,
-+ &params);
-+ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ break;
-+ case ISCSI_FUNCTION_TASK_REASSIGN:
-+ rc = -1;
-+ status = ISCSI_RESPONSE_ALLEGIANCE_REASSIGNMENT_UNSUPPORTED;
-+ break;
-+ default:
-+ PRINT_ERROR("Unknown TM function %d", function);
-+ rc = -1;
-+ status = ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ break;
-+ }
-+
-+reject:
-+ if (rc != 0)
-+ iscsi_send_task_mgmt_resp(req, status);
-+
-+ return;
-+}
-+
-+static void nop_out_exec(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_cmnd *rsp;
-+ struct iscsi_nop_in_hdr *rsp_hdr;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("%p", req);
-+
-+ if (req->pdu.bhs.itt != ISCSI_RESERVED_TAG) {
-+ rsp = iscsi_alloc_main_rsp(req);
-+
-+ rsp_hdr = (struct iscsi_nop_in_hdr *)&rsp->pdu.bhs;
-+ rsp_hdr->opcode = ISCSI_OP_NOP_IN;
-+ rsp_hdr->flags = ISCSI_FLG_FINAL;
-+ rsp_hdr->itt = req->pdu.bhs.itt;
-+ rsp_hdr->ttt = ISCSI_RESERVED_TAG;
-+
-+ if (req->pdu.datasize)
-+ BUG_ON(req->sg == NULL);
-+ else
-+ BUG_ON(req->sg != NULL);
-+
-+ if (req->sg) {
-+ rsp->sg = req->sg;
-+ rsp->sg_cnt = req->sg_cnt;
-+ rsp->bufflen = req->bufflen;
-+ }
-+
-+ /* We already checked it in check_segment_length() */
-+ BUG_ON(get_pgcnt(req->pdu.datasize, 0) > ISCSI_CONN_IOV_MAX);
-+
-+ rsp->pdu.datasize = req->pdu.datasize;
-+ } else {
-+ bool found = false;
-+ struct iscsi_cmnd *r;
-+ struct iscsi_conn *conn = req->conn;
-+
-+ TRACE_DBG("Receive Nop-In response (ttt 0x%08x)",
-+ be32_to_cpu(req->pdu.bhs.ttt));
-+
-+ spin_lock_bh(&conn->nop_req_list_lock);
-+ list_for_each_entry(r, &conn->nop_req_list,
-+ nop_req_list_entry) {
-+ if (req->pdu.bhs.ttt == r->pdu.bhs.ttt) {
-+ list_del(&r->nop_req_list_entry);
-+ found = true;
-+ break;
-+ }
-+ }
-+ spin_unlock_bh(&conn->nop_req_list_lock);
-+
-+ if (found)
-+ cmnd_put(r);
-+ else
-+ TRACE_MGMT_DBG("%s", "Got Nop-out response without "
-+ "corresponding Nop-In request");
-+ }
-+
-+ req_cmnd_release(req);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void logout_exec(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_logout_req_hdr *req_hdr;
-+ struct iscsi_cmnd *rsp;
-+ struct iscsi_logout_rsp_hdr *rsp_hdr;
-+
-+ PRINT_INFO("Logout received from initiator %s",
-+ req->conn->session->initiator_name);
-+ TRACE_DBG("%p", req);
-+
-+ req_hdr = (struct iscsi_logout_req_hdr *)&req->pdu.bhs;
-+ rsp = iscsi_alloc_main_rsp(req);
-+ rsp_hdr = (struct iscsi_logout_rsp_hdr *)&rsp->pdu.bhs;
-+ rsp_hdr->opcode = ISCSI_OP_LOGOUT_RSP;
-+ rsp_hdr->flags = ISCSI_FLG_FINAL;
-+ rsp_hdr->itt = req_hdr->itt;
-+ rsp->should_close_conn = 1;
-+
-+ req_cmnd_release(req);
-+
-+ return;
-+}
-+
-+static void iscsi_cmnd_exec(struct iscsi_cmnd *cmnd)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("cmnd %p, op %x, SN %u", cmnd, cmnd_opcode(cmnd),
-+ cmnd->pdu.bhs.sn);
-+
-+ iscsi_extracheck_is_rd_thread(cmnd->conn);
-+
-+ if (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD) {
-+ if (cmnd->r2t_len_to_receive == 0)
-+ iscsi_restart_cmnd(cmnd);
-+ else if (cmnd->r2t_len_to_send != 0)
-+ send_r2t(cmnd);
-+ goto out;
-+ }
-+
-+ if (cmnd->prelim_compl_flags != 0) {
-+ TRACE_MGMT_DBG("Terminating prelim completed non-SCSI cmnd %p "
-+ "(op %x)", cmnd, cmnd_opcode(cmnd));
-+ req_cmnd_release(cmnd);
-+ goto out;
-+ }
-+
-+ switch (cmnd_opcode(cmnd)) {
-+ case ISCSI_OP_NOP_OUT:
-+ nop_out_exec(cmnd);
-+ break;
-+ case ISCSI_OP_SCSI_TASK_MGT_MSG:
-+ execute_task_management(cmnd);
-+ break;
-+ case ISCSI_OP_LOGOUT_CMD:
-+ logout_exec(cmnd);
-+ break;
-+ default:
-+ PRINT_CRIT_ERROR("Unexpected cmnd op %x", cmnd_opcode(cmnd));
-+ BUG();
-+ break;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void set_cork(struct socket *sock, int on)
-+{
-+ int opt = on;
-+ mm_segment_t oldfs;
-+
-+ oldfs = get_fs();
-+ set_fs(get_ds());
-+ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK,
-+ (void __force __user *)&opt, sizeof(opt));
-+ set_fs(oldfs);
-+ return;
-+}
-+
-+void cmnd_tx_start(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_conn *conn = cmnd->conn;
-+
-+ TRACE_DBG("conn %p, cmnd %p, opcode %x", conn, cmnd, cmnd_opcode(cmnd));
-+ iscsi_cmnd_set_length(&cmnd->pdu);
-+
-+ iscsi_extracheck_is_wr_thread(conn);
-+
-+ set_cork(conn->sock, 1);
-+
-+ conn->write_iop = conn->write_iov;
-+ conn->write_iop->iov_base = (void __force __user *)(&cmnd->pdu.bhs);
-+ conn->write_iop->iov_len = sizeof(cmnd->pdu.bhs);
-+ conn->write_iop_used = 1;
-+ conn->write_size = sizeof(cmnd->pdu.bhs) + cmnd->pdu.datasize;
-+ conn->write_offset = 0;
-+
-+ switch (cmnd_opcode(cmnd)) {
-+ case ISCSI_OP_NOP_IN:
-+ if (cmnd->pdu.bhs.itt == ISCSI_RESERVED_TAG)
-+ cmnd->pdu.bhs.sn = (__force u32)cmnd_set_sn(cmnd, 0);
-+ else
-+ cmnd_set_sn(cmnd, 1);
-+ break;
-+ case ISCSI_OP_SCSI_RSP:
-+ cmnd_set_sn(cmnd, 1);
-+ break;
-+ case ISCSI_OP_SCSI_TASK_MGT_RSP:
-+ cmnd_set_sn(cmnd, 1);
-+ break;
-+ case ISCSI_OP_TEXT_RSP:
-+ cmnd_set_sn(cmnd, 1);
-+ break;
-+ case ISCSI_OP_SCSI_DATA_IN:
-+ {
-+ struct iscsi_data_in_hdr *rsp =
-+ (struct iscsi_data_in_hdr *)&cmnd->pdu.bhs;
-+ u32 offset = be32_to_cpu(rsp->buffer_offset);
-+
-+ TRACE_DBG("cmnd %p, offset %u, datasize %u, bufflen %u", cmnd,
-+ offset, cmnd->pdu.datasize, cmnd->bufflen);
-+
-+ BUG_ON(offset > cmnd->bufflen);
-+ BUG_ON(offset + cmnd->pdu.datasize > cmnd->bufflen);
-+
-+ conn->write_offset = offset;
-+
-+ cmnd_set_sn(cmnd, (rsp->flags & ISCSI_FLG_FINAL) ? 1 : 0);
-+ break;
-+ }
-+ case ISCSI_OP_LOGOUT_RSP:
-+ cmnd_set_sn(cmnd, 1);
-+ break;
-+ case ISCSI_OP_R2T:
-+ cmnd->pdu.bhs.sn = (__force u32)cmnd_set_sn(cmnd, 0);
-+ break;
-+ case ISCSI_OP_ASYNC_MSG:
-+ cmnd_set_sn(cmnd, 1);
-+ break;
-+ case ISCSI_OP_REJECT:
-+ cmnd_set_sn(cmnd, 1);
-+ break;
-+ default:
-+ PRINT_ERROR("Unexpected cmnd op %x", cmnd_opcode(cmnd));
-+ break;
-+ }
-+
-+ iscsi_dump_pdu(&cmnd->pdu);
-+ return;
-+}
-+
-+void cmnd_tx_end(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_conn *conn = cmnd->conn;
-+
-+ TRACE_DBG("%p:%x (should_close_conn %d, should_close_all_conn %d)",
-+ cmnd, cmnd_opcode(cmnd), cmnd->should_close_conn,
-+ cmnd->should_close_all_conn);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ switch (cmnd_opcode(cmnd)) {
-+ case ISCSI_OP_NOP_IN:
-+ case ISCSI_OP_SCSI_RSP:
-+ case ISCSI_OP_SCSI_TASK_MGT_RSP:
-+ case ISCSI_OP_TEXT_RSP:
-+ case ISCSI_OP_R2T:
-+ case ISCSI_OP_ASYNC_MSG:
-+ case ISCSI_OP_REJECT:
-+ case ISCSI_OP_SCSI_DATA_IN:
-+ case ISCSI_OP_LOGOUT_RSP:
-+ break;
-+ default:
-+ PRINT_CRIT_ERROR("unexpected cmnd op %x", cmnd_opcode(cmnd));
-+ BUG();
-+ break;
-+ }
-+#endif
-+
-+ if (unlikely(cmnd->should_close_conn)) {
-+ if (cmnd->should_close_all_conn) {
-+ PRINT_INFO("Closing all connections for target %x at "
-+ "initiator's %s request",
-+ cmnd->conn->session->target->tid,
-+ conn->session->initiator_name);
-+ target_del_all_sess(cmnd->conn->session->target, 0);
-+ } else {
-+ PRINT_INFO("Closing connection at initiator's %s "
-+ "request", conn->session->initiator_name);
-+ mark_conn_closed(conn);
-+ }
-+ }
-+
-+ set_cork(cmnd->conn->sock, 0);
-+ return;
-+}
-+
-+/*
-+ * Push the command for execution. This functions reorders the commands.
-+ * Called from the read thread.
-+ *
-+ * Basically, since we don't support MC/S and TCP guarantees data delivery
-+ * order, all that SN's stuff isn't needed at all (commands delivery order is
-+ * a natural commands execution order), but insane iSCSI spec requires
-+ * us to check it and we have to, because some crazy initiators can rely
-+ * on the SN's based order and reorder requests during sending. For all other
-+ * normal initiators all that code is a NOP.
-+ */
-+static void iscsi_push_cmnd(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_session *session = cmnd->conn->session;
-+ struct list_head *entry;
-+ u32 cmd_sn;
-+
-+ TRACE_DBG("cmnd %p, iSCSI opcode %x, sn %u, exp sn %u", cmnd,
-+ cmnd_opcode(cmnd), cmnd->pdu.bhs.sn, session->exp_cmd_sn);
-+
-+ iscsi_extracheck_is_rd_thread(cmnd->conn);
-+
-+ BUG_ON(cmnd->parent_req != NULL);
-+
-+ if (cmnd->pdu.bhs.opcode & ISCSI_OP_IMMEDIATE) {
-+ TRACE_DBG("Immediate cmd %p (cmd_sn %u)", cmnd,
-+ cmnd->pdu.bhs.sn);
-+ iscsi_cmnd_exec(cmnd);
-+ goto out;
-+ }
-+
-+ spin_lock(&session->sn_lock);
-+
-+ cmd_sn = cmnd->pdu.bhs.sn;
-+ if (cmd_sn == session->exp_cmd_sn) {
-+ while (1) {
-+ session->exp_cmd_sn = ++cmd_sn;
-+
-+ if (unlikely(session->tm_active > 0)) {
-+ if (before(cmd_sn, session->tm_sn)) {
-+ struct iscsi_conn *conn = cmnd->conn;
-+
-+ spin_unlock(&session->sn_lock);
-+
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ __cmnd_abort(cmnd);
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+
-+ spin_lock(&session->sn_lock);
-+ }
-+ iscsi_check_send_delayed_tm_resp(session);
-+ }
-+
-+ spin_unlock(&session->sn_lock);
-+
-+ iscsi_cmnd_exec(cmnd);
-+
-+ spin_lock(&session->sn_lock);
-+
-+ if (list_empty(&session->pending_list))
-+ break;
-+ cmnd = list_entry(session->pending_list.next,
-+ struct iscsi_cmnd,
-+ pending_list_entry);
-+ if (cmnd->pdu.bhs.sn != cmd_sn)
-+ break;
-+
-+ list_del(&cmnd->pending_list_entry);
-+ cmnd->pending = 0;
-+
-+ TRACE_MGMT_DBG("Processing pending cmd %p (cmd_sn %u)",
-+ cmnd, cmd_sn);
-+ }
-+ } else {
-+ int drop = 0;
-+
-+ TRACE_DBG("Pending cmd %p (cmd_sn %u, exp_cmd_sn %u)",
-+ cmnd, cmd_sn, session->exp_cmd_sn);
-+
-+ /*
-+ * iSCSI RFC 3720: "The target MUST silently ignore any
-+ * non-immediate command outside of [from ExpCmdSN to MaxCmdSN
-+ * inclusive] range". But we won't honor the MaxCmdSN
-+ * requirement, because, since we adjust MaxCmdSN from the
-+ * separate write thread, rarely it is possible that initiator
-+ * can legally send command with CmdSN>MaxSN. But it won't
-+ * hurt anything, in the worst case it will lead to
-+ * additional QUEUE FULL status.
-+ */
-+
-+ if (unlikely(before(cmd_sn, session->exp_cmd_sn))) {
-+ TRACE_MGMT_DBG("Ignoring out of expected range cmd_sn "
-+ "(sn %u, exp_sn %u, cmd %p, op %x, CDB op %x)",
-+ cmd_sn, session->exp_cmd_sn, cmnd,
-+ cmnd_opcode(cmnd), cmnd_scsicode(cmnd));
-+ drop = 1;
-+ }
-+
-+#if 0
-+ if (unlikely(after(cmd_sn, session->exp_cmd_sn +
-+ iscsi_get_allowed_cmds(session)))) {
-+ TRACE_MGMT_DBG("Too large cmd_sn %u (exp_cmd_sn %u, "
-+ "max_sn %u)", cmd_sn, session->exp_cmd_sn,
-+ iscsi_get_allowed_cmds(session));
-+ drop = 1;
-+ }
-+#endif
-+
-+ spin_unlock(&session->sn_lock);
-+
-+ if (unlikely(drop)) {
-+ req_cmnd_release_force(cmnd);
-+ goto out;
-+ }
-+
-+ if (unlikely(test_bit(ISCSI_CMD_ABORTED,
-+ &cmnd->prelim_compl_flags))) {
-+ struct iscsi_cmnd *tm_clone;
-+
-+ TRACE_MGMT_DBG("Aborted pending cmnd %p, creating TM "
-+ "clone (scst cmd %p, state %d)", cmnd,
-+ cmnd->scst_cmd, cmnd->scst_state);
-+
-+ tm_clone = iscsi_create_tm_clone(cmnd);
-+ if (tm_clone != NULL) {
-+ iscsi_cmnd_exec(cmnd);
-+ cmnd = tm_clone;
-+ }
-+ }
-+
-+ TRACE_MGMT_DBG("Pending cmnd %p (op %x, sn %u, exp sn %u)",
-+ cmnd, cmnd_opcode(cmnd), cmd_sn, session->exp_cmd_sn);
-+
-+ spin_lock(&session->sn_lock);
-+ list_for_each(entry, &session->pending_list) {
-+ struct iscsi_cmnd *tmp =
-+ list_entry(entry, struct iscsi_cmnd,
-+ pending_list_entry);
-+ if (before(cmd_sn, tmp->pdu.bhs.sn))
-+ break;
-+ }
-+ list_add_tail(&cmnd->pending_list_entry, entry);
-+ cmnd->pending = 1;
-+ }
-+
-+ spin_unlock(&session->sn_lock);
-+out:
-+ return;
-+}
-+
-+static int check_segment_length(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_conn *conn = cmnd->conn;
-+ struct iscsi_session *session = conn->session;
-+
-+ if (unlikely(cmnd->pdu.datasize > session->sess_params.max_recv_data_length)) {
-+ PRINT_ERROR("Initiator %s violated negotiated parameters: "
-+ "data too long (ITT %x, datasize %u, "
-+ "max_recv_data_length %u", session->initiator_name,
-+ cmnd->pdu.bhs.itt, cmnd->pdu.datasize,
-+ session->sess_params.max_recv_data_length);
-+ mark_conn_closed(conn);
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+
-+int cmnd_rx_start(struct iscsi_cmnd *cmnd)
-+{
-+ int res, rc = 0;
-+
-+ iscsi_dump_pdu(&cmnd->pdu);
-+
-+ res = check_segment_length(cmnd);
-+ if (res != 0)
-+ goto out;
-+
-+ cmnd->pdu.bhs.sn = be32_to_cpu((__force __be32)cmnd->pdu.bhs.sn);
-+
-+ switch (cmnd_opcode(cmnd)) {
-+ case ISCSI_OP_SCSI_CMD:
-+ res = scsi_cmnd_start(cmnd);
-+ if (unlikely(res < 0))
-+ goto out;
-+ update_stat_sn(cmnd);
-+ break;
-+ case ISCSI_OP_SCSI_DATA_OUT:
-+ res = data_out_start(cmnd);
-+ goto out;
-+ case ISCSI_OP_NOP_OUT:
-+ rc = nop_out_start(cmnd);
-+ break;
-+ case ISCSI_OP_SCSI_TASK_MGT_MSG:
-+ case ISCSI_OP_LOGOUT_CMD:
-+ update_stat_sn(cmnd);
-+ break;
-+ case ISCSI_OP_TEXT_CMD:
-+ case ISCSI_OP_SNACK_CMD:
-+ default:
-+ rc = -ISCSI_REASON_UNSUPPORTED_COMMAND;
-+ break;
-+ }
-+
-+ if (unlikely(rc < 0)) {
-+ PRINT_ERROR("Error %d (iSCSI opcode %x, ITT %x)", rc,
-+ cmnd_opcode(cmnd), cmnd->pdu.bhs.itt);
-+ res = create_reject_rsp(cmnd, -rc, true);
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+void cmnd_rx_end(struct iscsi_cmnd *cmnd)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("cmnd %p, opcode %x", cmnd, cmnd_opcode(cmnd));
-+
-+ cmnd->conn->last_rcv_time = jiffies;
-+ TRACE_DBG("Updated last_rcv_time %ld", cmnd->conn->last_rcv_time);
-+
-+ switch (cmnd_opcode(cmnd)) {
-+ case ISCSI_OP_SCSI_CMD:
-+ case ISCSI_OP_NOP_OUT:
-+ case ISCSI_OP_SCSI_TASK_MGT_MSG:
-+ case ISCSI_OP_LOGOUT_CMD:
-+ iscsi_push_cmnd(cmnd);
-+ goto out;
-+ case ISCSI_OP_SCSI_DATA_OUT:
-+ data_out_end(cmnd);
-+ break;
-+ default:
-+ PRINT_ERROR("Unexpected cmnd op %x", cmnd_opcode(cmnd));
-+ break;
-+ }
-+
-+ req_cmnd_release(cmnd);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+#if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+static int iscsi_alloc_data_buf(struct scst_cmd *cmd)
-+{
-+ /*
-+ * sock->ops->sendpage() is async zero copy operation,
-+ * so we must be sure not to free and reuse
-+ * the command's buffer before the sending was completed
-+ * by the network layers. It is possible only if we
-+ * don't use SGV cache.
-+ */
-+ EXTRACHECKS_BUG_ON(!(scst_cmd_get_data_direction(cmd) & SCST_DATA_READ));
-+ scst_cmd_set_no_sgv(cmd);
-+ return 1;
-+}
-+#endif
-+
-+static void iscsi_preprocessing_done(struct scst_cmd *scst_cmd)
-+{
-+ struct iscsi_cmnd *req = (struct iscsi_cmnd *)
-+ scst_cmd_get_tgt_priv(scst_cmd);
-+
-+ TRACE_DBG("req %p", req);
-+
-+ if (req->conn->rx_task == current)
-+ req->scst_state = ISCSI_CMD_STATE_AFTER_PREPROC;
-+ else {
-+ /*
-+ * We wait for the state change without any protection, so
-+ * without cmnd_get() it is possible that req will die
-+ * "immediately" after the state assignment and
-+ * iscsi_make_conn_rd_active() will operate on dead data.
-+ * We use the ordered version of cmnd_get(), because "get"
-+ * must be done before the state assignment.
-+ *
-+ * We protected from the race on calling cmnd_rx_continue(),
-+ * because there can be only one read thread processing
-+ * connection.
-+ */
-+ cmnd_get(req);
-+ req->scst_state = ISCSI_CMD_STATE_AFTER_PREPROC;
-+ iscsi_make_conn_rd_active(req->conn);
-+ if (unlikely(req->conn->closing)) {
-+ TRACE_DBG("Waking up closing conn %p", req->conn);
-+ wake_up(&req->conn->read_state_waitQ);
-+ }
-+ cmnd_put(req);
-+ }
-+
-+ return;
-+}
-+
-+/* No locks */
-+static void iscsi_try_local_processing(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_conn *conn = req->conn;
-+ struct iscsi_thread_pool *p = conn->conn_thr_pool;
-+ bool local;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_bh(&p->wr_lock);
-+ switch (conn->wr_state) {
-+ case ISCSI_CONN_WR_STATE_IN_LIST:
-+ list_del(&conn->wr_list_entry);
-+ /* go through */
-+ case ISCSI_CONN_WR_STATE_IDLE:
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ conn->wr_task = current;
-+#endif
-+ conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
-+ conn->wr_space_ready = 0;
-+ local = true;
-+ break;
-+ default:
-+ local = false;
-+ break;
-+ }
-+ spin_unlock_bh(&p->wr_lock);
-+
-+ if (local) {
-+ int rc = 1;
-+
-+ do {
-+ rc = iscsi_send(conn);
-+ if (rc <= 0)
-+ break;
-+ } while (req->not_processed_rsp_cnt != 0);
-+
-+ spin_lock_bh(&p->wr_lock);
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ conn->wr_task = NULL;
-+#endif
-+ if ((rc == -EAGAIN) && !conn->wr_space_ready) {
-+ TRACE_DBG("EAGAIN, setting WR_STATE_SPACE_WAIT "
-+ "(conn %p)", conn);
-+ conn->wr_state = ISCSI_CONN_WR_STATE_SPACE_WAIT;
-+ } else if (test_write_ready(conn)) {
-+ list_add_tail(&conn->wr_list_entry, &p->wr_list);
-+ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
-+ wake_up(&p->wr_waitQ);
-+ } else
-+ conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
-+ spin_unlock_bh(&p->wr_lock);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int iscsi_xmit_response(struct scst_cmd *scst_cmd)
-+{
-+ int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
-+ struct iscsi_cmnd *req = (struct iscsi_cmnd *)
-+ scst_cmd_get_tgt_priv(scst_cmd);
-+ struct iscsi_conn *conn = req->conn;
-+ int status = scst_cmd_get_status(scst_cmd);
-+ u8 *sense = scst_cmd_get_sense_buffer(scst_cmd);
-+ int sense_len = scst_cmd_get_sense_buffer_len(scst_cmd);
-+ struct iscsi_cmnd *wr_rsp, *our_rsp;
-+
-+ EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
-+
-+ scst_cmd_set_tgt_priv(scst_cmd, NULL);
-+
-+ EXTRACHECKS_BUG_ON(req->scst_state != ISCSI_CMD_STATE_RESTARTED);
-+
-+ if (unlikely(scst_cmd_aborted(scst_cmd)))
-+ set_bit(ISCSI_CMD_ABORTED, &req->prelim_compl_flags);
-+
-+ if (unlikely(req->prelim_compl_flags != 0)) {
-+ if (test_bit(ISCSI_CMD_ABORTED, &req->prelim_compl_flags)) {
-+ TRACE_MGMT_DBG("req %p (scst_cmd %p) aborted", req,
-+ req->scst_cmd);
-+ scst_set_delivery_status(req->scst_cmd,
-+ SCST_CMD_DELIVERY_ABORTED);
-+ req->scst_state = ISCSI_CMD_STATE_PROCESSED;
-+ req_cmnd_release_force(req);
-+ goto out;
-+ }
-+
-+ TRACE_DBG("Prelim completed req %p", req);
-+
-+ /*
-+ * We could preliminary have finished req before we
-+ * knew its device, so check if we return correct sense
-+ * format.
-+ */
-+ scst_check_convert_sense(scst_cmd);
-+
-+ if (!req->own_sg) {
-+ req->sg = scst_cmd_get_sg(scst_cmd);
-+ req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
-+ }
-+ } else {
-+ EXTRACHECKS_BUG_ON(req->own_sg);
-+ req->sg = scst_cmd_get_sg(scst_cmd);
-+ req->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
-+ }
-+
-+ req->bufflen = scst_cmd_get_adjusted_resp_data_len(scst_cmd);
-+
-+ req->scst_state = ISCSI_CMD_STATE_PROCESSED;
-+
-+ TRACE_DBG("req %p, is_send_status=%x, req->bufflen=%d, req->sg=%p, "
-+ "req->sg_cnt %d", req, is_send_status, req->bufflen, req->sg,
-+ req->sg_cnt);
-+
-+ EXTRACHECKS_BUG_ON(req->hashed);
-+ if (req->main_rsp != NULL)
-+ EXTRACHECKS_BUG_ON(cmnd_opcode(req->main_rsp) != ISCSI_OP_REJECT);
-+
-+ if (unlikely((req->bufflen != 0) && !is_send_status)) {
-+ PRINT_CRIT_ERROR("%s", "Sending DATA without STATUS is "
-+ "unsupported");
-+ scst_set_cmd_error(scst_cmd,
-+ SCST_LOAD_SENSE(scst_sense_hardw_error));
-+ BUG(); /* ToDo */
-+ }
-+
-+ /*
-+ * We need to decrement active_cmds before adding any responses into
-+ * the write queue to eliminate a race, when all responses sent
-+ * with wrong MaxCmdSN.
-+ */
-+ if (likely(req->dec_active_cmds))
-+ iscsi_dec_active_cmds(req);
-+
-+ if (req->bufflen != 0) {
-+ /*
-+ * Check above makes sure that is_send_status is set,
-+ * so status is valid here, but in future that could change.
-+ * ToDo
-+ */
-+ if ((status != SAM_STAT_CHECK_CONDITION) &&
-+ ((cmnd_hdr(req)->flags & (ISCSI_CMD_WRITE|ISCSI_CMD_READ)) !=
-+ (ISCSI_CMD_WRITE|ISCSI_CMD_READ))) {
-+ send_data_rsp(req, status, is_send_status);
-+ } else {
-+ struct iscsi_cmnd *rsp;
-+ send_data_rsp(req, 0, 0);
-+ if (is_send_status) {
-+ rsp = create_status_rsp(req, status, sense,
-+ sense_len);
-+ iscsi_cmnd_init_write(rsp, 0);
-+ }
-+ }
-+ } else if (is_send_status) {
-+ struct iscsi_cmnd *rsp;
-+ rsp = create_status_rsp(req, status, sense, sense_len);
-+ iscsi_cmnd_init_write(rsp, 0);
-+ }
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ else
-+ BUG();
-+#endif
-+
-+ /*
-+ * There's no need for protection, since we are not going to
-+ * dereference them.
-+ */
-+ wr_rsp = list_entry(conn->write_list.next, struct iscsi_cmnd,
-+ write_list_entry);
-+ our_rsp = list_entry(req->rsp_cmd_list.next, struct iscsi_cmnd,
-+ rsp_cmd_list_entry);
-+ if (wr_rsp == our_rsp) {
-+ /*
-+ * This is our rsp, so let's try to process it locally to
-+ * decrease latency. We need to call pre_release before
-+ * processing to handle some error recovery cases.
-+ */
-+ if (scst_get_active_cmd_count(scst_cmd) <= 2) {
-+ req_cmnd_pre_release(req);
-+ iscsi_try_local_processing(req);
-+ cmnd_put(req);
-+ } else {
-+ /*
-+ * There's too much backend activity, so it could be
-+ * better to push it to the write thread.
-+ */
-+ goto out_push_to_wr_thread;
-+ }
-+ } else
-+ goto out_push_to_wr_thread;
-+
-+out:
-+ return SCST_TGT_RES_SUCCESS;
-+
-+out_push_to_wr_thread:
-+ TRACE_DBG("Waking up write thread (conn %p)", conn);
-+ req_cmnd_release(req);
-+ iscsi_make_conn_wr_active(conn);
-+ goto out;
-+}
-+
-+/* Called under sn_lock */
-+static bool iscsi_is_delay_tm_resp(struct iscsi_cmnd *rsp)
-+{
-+ bool res = 0;
-+ struct iscsi_task_mgt_hdr *req_hdr =
-+ (struct iscsi_task_mgt_hdr *)&rsp->parent_req->pdu.bhs;
-+ int function = req_hdr->function & ISCSI_FUNCTION_MASK;
-+ struct iscsi_session *sess = rsp->conn->session;
-+
-+ TRACE_ENTRY();
-+
-+ /* This should be checked for immediate TM commands as well */
-+
-+ switch (function) {
-+ default:
-+ if (before(sess->exp_cmd_sn, req_hdr->cmd_sn))
-+ res = 1;
-+ break;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Called under sn_lock, but might drop it inside, then reacquire */
-+static void iscsi_check_send_delayed_tm_resp(struct iscsi_session *sess)
-+ __acquires(&sn_lock)
-+ __releases(&sn_lock)
-+{
-+ struct iscsi_cmnd *tm_rsp = sess->tm_rsp;
-+
-+ TRACE_ENTRY();
-+
-+ if (tm_rsp == NULL)
-+ goto out;
-+
-+ if (iscsi_is_delay_tm_resp(tm_rsp))
-+ goto out;
-+
-+ TRACE_MGMT_DBG("Sending delayed rsp %p", tm_rsp);
-+
-+ sess->tm_rsp = NULL;
-+ sess->tm_active--;
-+
-+ spin_unlock(&sess->sn_lock);
-+
-+ BUG_ON(sess->tm_active < 0);
-+
-+ iscsi_cmnd_init_write(tm_rsp, ISCSI_INIT_WRITE_WAKE);
-+
-+ spin_lock(&sess->sn_lock);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void iscsi_send_task_mgmt_resp(struct iscsi_cmnd *req, int status)
-+{
-+ struct iscsi_cmnd *rsp;
-+ struct iscsi_task_mgt_hdr *req_hdr =
-+ (struct iscsi_task_mgt_hdr *)&req->pdu.bhs;
-+ struct iscsi_task_rsp_hdr *rsp_hdr;
-+ struct iscsi_session *sess = req->conn->session;
-+ int fn = req_hdr->function & ISCSI_FUNCTION_MASK;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("TM req %p finished", req);
-+ TRACE(TRACE_MGMT, "iSCSI TM fn %d finished, status %d", fn, status);
-+
-+ rsp = iscsi_alloc_rsp(req);
-+ rsp_hdr = (struct iscsi_task_rsp_hdr *)&rsp->pdu.bhs;
-+
-+ rsp_hdr->opcode = ISCSI_OP_SCSI_TASK_MGT_RSP;
-+ rsp_hdr->flags = ISCSI_FLG_FINAL;
-+ rsp_hdr->itt = req_hdr->itt;
-+ rsp_hdr->response = status;
-+
-+ if (fn == ISCSI_FUNCTION_TARGET_COLD_RESET) {
-+ rsp->should_close_conn = 1;
-+ rsp->should_close_all_conn = 1;
-+ }
-+
-+ BUG_ON(sess->tm_rsp != NULL);
-+
-+ spin_lock(&sess->sn_lock);
-+ if (iscsi_is_delay_tm_resp(rsp)) {
-+ TRACE_MGMT_DBG("Delaying TM fn %d response %p "
-+ "(req %p), because not all affected commands "
-+ "received (TM cmd sn %u, exp sn %u)",
-+ req_hdr->function & ISCSI_FUNCTION_MASK, rsp, req,
-+ req_hdr->cmd_sn, sess->exp_cmd_sn);
-+ sess->tm_rsp = rsp;
-+ spin_unlock(&sess->sn_lock);
-+ goto out_release;
-+ }
-+ sess->tm_active--;
-+ spin_unlock(&sess->sn_lock);
-+
-+ BUG_ON(sess->tm_active < 0);
-+
-+ iscsi_cmnd_init_write(rsp, ISCSI_INIT_WRITE_WAKE);
-+
-+out_release:
-+ req_cmnd_release(req);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline int iscsi_get_mgmt_response(int status)
-+{
-+ switch (status) {
-+ case SCST_MGMT_STATUS_SUCCESS:
-+ return ISCSI_RESPONSE_FUNCTION_COMPLETE;
-+
-+ case SCST_MGMT_STATUS_TASK_NOT_EXIST:
-+ return ISCSI_RESPONSE_UNKNOWN_TASK;
-+
-+ case SCST_MGMT_STATUS_LUN_NOT_EXIST:
-+ return ISCSI_RESPONSE_UNKNOWN_LUN;
-+
-+ case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
-+ return ISCSI_RESPONSE_FUNCTION_UNSUPPORTED;
-+
-+ case SCST_MGMT_STATUS_REJECTED:
-+ case SCST_MGMT_STATUS_FAILED:
-+ default:
-+ return ISCSI_RESPONSE_FUNCTION_REJECTED;
-+ }
-+}
-+
-+static void iscsi_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
-+{
-+ int fn = scst_mgmt_cmd_get_fn(scst_mcmd);
-+ struct iscsi_cmnd *req = (struct iscsi_cmnd *)
-+ scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
-+ int status =
-+ iscsi_get_mgmt_response(scst_mgmt_cmd_get_status(scst_mcmd));
-+
-+ if ((status == ISCSI_RESPONSE_UNKNOWN_TASK) &&
-+ (fn == SCST_ABORT_TASK)) {
-+ /* If we are here, we found the task, so must succeed */
-+ status = ISCSI_RESPONSE_FUNCTION_COMPLETE;
-+ }
-+
-+ TRACE_MGMT_DBG("req %p, scst_mcmd %p, fn %d, scst status %d, status %d",
-+ req, scst_mcmd, fn, scst_mgmt_cmd_get_status(scst_mcmd),
-+ status);
-+
-+ switch (fn) {
-+ case SCST_NEXUS_LOSS_SESS:
-+ case SCST_ABORT_ALL_TASKS_SESS:
-+ /* They are internal */
-+ break;
-+ default:
-+ iscsi_send_task_mgmt_resp(req, status);
-+ scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
-+ break;
-+ }
-+ return;
-+}
-+
-+static int iscsi_scsi_aen(struct scst_aen *aen)
-+{
-+ int res = SCST_AEN_RES_SUCCESS;
-+ __be64 lun = scst_aen_get_lun(aen);
-+ const uint8_t *sense = scst_aen_get_sense(aen);
-+ int sense_len = scst_aen_get_sense_len(aen);
-+ struct iscsi_session *sess = scst_sess_get_tgt_priv(
-+ scst_aen_get_sess(aen));
-+ struct iscsi_conn *conn;
-+ bool found;
-+ struct iscsi_cmnd *fake_req, *rsp;
-+ struct iscsi_async_msg_hdr *rsp_hdr;
-+ struct scatterlist *sg;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("SCSI AEN to sess %p (initiator %s)", sess,
-+ sess->initiator_name);
-+
-+ mutex_lock(&sess->target->target_mutex);
-+
-+ found = false;
-+ list_for_each_entry_reverse(conn, &sess->conn_list, conn_list_entry) {
-+ if (!test_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags) &&
-+ (conn->conn_reinst_successor == NULL)) {
-+ found = true;
-+ break;
-+ }
-+ }
-+ if (!found) {
-+ TRACE_MGMT_DBG("Unable to find alive conn for sess %p", sess);
-+ goto out_err;
-+ }
-+
-+ /* Create a fake request */
-+ fake_req = cmnd_alloc(conn, NULL);
-+ if (fake_req == NULL) {
-+ PRINT_ERROR("%s", "Unable to alloc fake AEN request");
-+ goto out_err;
-+ }
-+
-+ mutex_unlock(&sess->target->target_mutex);
-+
-+ rsp = iscsi_alloc_main_rsp(fake_req);
-+ if (rsp == NULL) {
-+ PRINT_ERROR("%s", "Unable to alloc AEN rsp");
-+ goto out_err_free_req;
-+ }
-+
-+ fake_req->scst_state = ISCSI_CMD_STATE_AEN;
-+ fake_req->scst_aen = aen;
-+
-+ rsp_hdr = (struct iscsi_async_msg_hdr *)&rsp->pdu.bhs;
-+
-+ rsp_hdr->opcode = ISCSI_OP_ASYNC_MSG;
-+ rsp_hdr->flags = ISCSI_FLG_FINAL;
-+ rsp_hdr->lun = lun; /* it's already in SCSI form */
-+ rsp_hdr->ffffffff = __constant_cpu_to_be32(0xffffffff);
-+ rsp_hdr->async_event = ISCSI_ASYNC_SCSI;
-+
-+ sg = rsp->sg = rsp->rsp_sg;
-+ rsp->sg_cnt = 2;
-+ rsp->own_sg = 1;
-+
-+ sg_init_table(sg, 2);
-+ sg_set_buf(&sg[0], &rsp->sense_hdr, sizeof(rsp->sense_hdr));
-+ sg_set_buf(&sg[1], sense, sense_len);
-+
-+ rsp->sense_hdr.length = cpu_to_be16(sense_len);
-+ rsp->pdu.datasize = sizeof(rsp->sense_hdr) + sense_len;
-+ rsp->bufflen = rsp->pdu.datasize;
-+
-+ req_cmnd_release(fake_req);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_err_free_req:
-+ req_cmnd_release(fake_req);
-+
-+out_err:
-+ mutex_unlock(&sess->target->target_mutex);
-+ res = SCST_AEN_RES_FAILED;
-+ goto out;
-+}
-+
-+static int iscsi_cpu_mask_changed_aen(struct scst_aen *aen)
-+{
-+ int res = SCST_AEN_RES_SUCCESS;
-+ struct scst_session *scst_sess = scst_aen_get_sess(aen);
-+ struct iscsi_session *sess = scst_sess_get_tgt_priv(scst_sess);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("CPU mask changed AEN to sess %p (initiator %s)", sess,
-+ sess->initiator_name);
-+
-+ mutex_lock(&sess->target->target_mutex);
-+ iscsi_sess_force_close(sess);
-+ mutex_unlock(&sess->target->target_mutex);
-+
-+ scst_aen_done(aen);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int iscsi_report_aen(struct scst_aen *aen)
-+{
-+ int res;
-+ int event_fn = scst_aen_get_event_fn(aen);
-+
-+ TRACE_ENTRY();
-+
-+ switch (event_fn) {
-+ case SCST_AEN_SCSI:
-+ res = iscsi_scsi_aen(aen);
-+ break;
-+ case SCST_AEN_CPU_MASK_CHANGED:
-+ res = iscsi_cpu_mask_changed_aen(aen);
-+ break;
-+ default:
-+ TRACE_MGMT_DBG("Unsupported AEN %d", event_fn);
-+ res = SCST_AEN_RES_NOT_SUPPORTED;
-+ break;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int iscsi_get_initiator_port_transport_id(struct scst_tgt *tgt,
-+ struct scst_session *scst_sess, uint8_t **transport_id)
-+{
-+ struct iscsi_session *sess;
-+ int res = 0;
-+ union iscsi_sid sid;
-+ int tr_id_size;
-+ uint8_t *tr_id;
-+ uint8_t q;
-+
-+ TRACE_ENTRY();
-+
-+ if (scst_sess == NULL) {
-+ res = SCSI_TRANSPORTID_PROTOCOLID_ISCSI;
-+ goto out;
-+ }
-+
-+ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
-+
-+ sid = *(union iscsi_sid *)&sess->sid;
-+ sid.id.tsih = 0;
-+
-+ tr_id_size = 4 + strlen(sess->initiator_name) + 5 +
-+ snprintf(&q, sizeof(q), "%llx", sid.id64) + 1;
-+ tr_id_size = (tr_id_size + 3) & -4;
-+
-+ tr_id = kzalloc(tr_id_size, GFP_KERNEL);
-+ if (tr_id == NULL) {
-+ PRINT_ERROR("Allocation of TransportID (size %d) failed",
-+ tr_id_size);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ tr_id[0] = 0x40 | SCSI_TRANSPORTID_PROTOCOLID_ISCSI;
-+ sprintf(&tr_id[4], "%s,i,0x%llx", sess->initiator_name, sid.id64);
-+
-+ put_unaligned(cpu_to_be16(tr_id_size - 4),
-+ (__be16 *)&tr_id[2]);
-+
-+ *transport_id = tr_id;
-+
-+ TRACE_DBG("Created tid '%s'", &tr_id[4]);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+void iscsi_send_nop_in(struct iscsi_conn *conn)
-+{
-+ struct iscsi_cmnd *req, *rsp;
-+ struct iscsi_nop_in_hdr *rsp_hdr;
-+
-+ TRACE_ENTRY();
-+
-+ req = cmnd_alloc(conn, NULL);
-+ if (req == NULL) {
-+ PRINT_ERROR("%s", "Unable to alloc fake Nop-In request");
-+ goto out_err;
-+ }
-+
-+ rsp = iscsi_alloc_main_rsp(req);
-+ if (rsp == NULL) {
-+ PRINT_ERROR("%s", "Unable to alloc Nop-In rsp");
-+ goto out_err_free_req;
-+ }
-+
-+ cmnd_get(rsp);
-+
-+ rsp_hdr = (struct iscsi_nop_in_hdr *)&rsp->pdu.bhs;
-+ rsp_hdr->opcode = ISCSI_OP_NOP_IN;
-+ rsp_hdr->flags = ISCSI_FLG_FINAL;
-+ rsp_hdr->itt = ISCSI_RESERVED_TAG;
-+ rsp_hdr->ttt = (__force __be32)conn->nop_in_ttt++;
-+
-+ if (conn->nop_in_ttt == ISCSI_RESERVED_TAG_CPU32)
-+ conn->nop_in_ttt = 0;
-+
-+ /* Supposed that all other fields are zeroed */
-+
-+ TRACE_DBG("Sending Nop-In request (ttt 0x%08x)", rsp_hdr->ttt);
-+ spin_lock_bh(&conn->nop_req_list_lock);
-+ list_add_tail(&rsp->nop_req_list_entry, &conn->nop_req_list);
-+ spin_unlock_bh(&conn->nop_req_list_lock);
-+
-+out_err_free_req:
-+ req_cmnd_release(req);
-+
-+out_err:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int iscsi_target_detect(struct scst_tgt_template *templ)
-+{
-+ /* Nothing to do */
-+ return 0;
-+}
-+
-+static int iscsi_target_release(struct scst_tgt *scst_tgt)
-+{
-+ /* Nothing to do */
-+ return 0;
-+}
-+
-+static struct scst_trace_log iscsi_local_trace_tbl[] = {
-+ { TRACE_D_WRITE, "d_write" },
-+ { TRACE_CONN_OC, "conn" },
-+ { TRACE_CONN_OC_DBG, "conn_dbg" },
-+ { TRACE_D_IOV, "iov" },
-+ { TRACE_D_DUMP_PDU, "pdu" },
-+ { TRACE_NET_PG, "net_page" },
-+ { 0, NULL }
-+};
-+
-+#define ISCSI_TRACE_TBL_HELP ", d_write, conn, conn_dbg, iov, pdu, net_page"
-+
-+static uint16_t iscsi_get_scsi_transport_version(struct scst_tgt *scst_tgt)
-+{
-+ return 0x0960; /* iSCSI */
-+}
-+
-+struct scst_tgt_template iscsi_template = {
-+ .name = "iscsi",
-+ .sg_tablesize = 0xFFFF /* no limit */,
-+ .threads_num = 0,
-+ .no_clustering = 1,
-+ .xmit_response_atomic = 0,
-+ .tgtt_attrs = iscsi_attrs,
-+ .tgt_attrs = iscsi_tgt_attrs,
-+ .sess_attrs = iscsi_sess_attrs,
-+ .enable_target = iscsi_enable_target,
-+ .is_target_enabled = iscsi_is_target_enabled,
-+ .add_target = iscsi_sysfs_add_target,
-+ .del_target = iscsi_sysfs_del_target,
-+ .mgmt_cmd = iscsi_sysfs_mgmt_cmd,
-+ .tgtt_optional_attributes = "IncomingUser, OutgoingUser",
-+ .tgt_optional_attributes = "IncomingUser, OutgoingUser, allowed_portal",
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = ISCSI_DEFAULT_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+ .trace_tbl = iscsi_local_trace_tbl,
-+ .trace_tbl_help = ISCSI_TRACE_TBL_HELP,
-+#endif
-+ .detect = iscsi_target_detect,
-+ .release = iscsi_target_release,
-+ .xmit_response = iscsi_xmit_response,
-+#if !defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ .alloc_data_buf = iscsi_alloc_data_buf,
-+#endif
-+ .preprocessing_done = iscsi_preprocessing_done,
-+ .pre_exec = iscsi_pre_exec,
-+ .task_mgmt_affected_cmds_done = iscsi_task_mgmt_affected_cmds_done,
-+ .task_mgmt_fn_done = iscsi_task_mgmt_fn_done,
-+ .on_abort_cmd = iscsi_on_abort_cmd,
-+ .report_aen = iscsi_report_aen,
-+ .get_initiator_port_transport_id = iscsi_get_initiator_port_transport_id,
-+ .get_scsi_transport_version = iscsi_get_scsi_transport_version,
-+};
-+
-+int iscsi_threads_pool_get(const cpumask_t *cpu_mask,
-+ struct iscsi_thread_pool **out_pool)
-+{
-+ int res;
-+ struct iscsi_thread_pool *p;
-+ struct iscsi_thread *t, *tt;
-+ int i, j, count;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&iscsi_threads_pool_mutex);
-+
-+ list_for_each_entry(p, &iscsi_thread_pools_list,
-+ thread_pools_list_entry) {
-+ if ((cpu_mask == NULL) ||
-+ __cpus_equal(cpu_mask, &p->cpu_mask, nr_cpumask_bits)) {
-+ p->thread_pool_ref++;
-+ TRACE_DBG("iSCSI thread pool %p found (new ref %d)",
-+ p, p->thread_pool_ref);
-+ res = 0;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ TRACE_DBG("%s", "Creating new iSCSI thread pool");
-+
-+ p = kzalloc(sizeof(*p), GFP_KERNEL);
-+ if (p == NULL) {
-+ PRINT_ERROR("Unable to allocate iSCSI thread pool (size %zd)",
-+ sizeof(*p));
-+ res = -ENOMEM;
-+ if (!list_empty(&iscsi_thread_pools_list)) {
-+ PRINT_WARNING("%s", "Using global iSCSI thread pool "
-+ "instead");
-+ p = list_entry(iscsi_thread_pools_list.next,
-+ struct iscsi_thread_pool,
-+ thread_pools_list_entry);
-+ } else
-+ res = -ENOMEM;
-+ goto out_unlock;
-+ }
-+
-+ spin_lock_init(&p->rd_lock);
-+ INIT_LIST_HEAD(&p->rd_list);
-+ init_waitqueue_head(&p->rd_waitQ);
-+ spin_lock_init(&p->wr_lock);
-+ INIT_LIST_HEAD(&p->wr_list);
-+ init_waitqueue_head(&p->wr_waitQ);
-+ if (cpu_mask == NULL)
-+ cpus_setall(p->cpu_mask);
-+ else {
-+ cpus_clear(p->cpu_mask);
-+ for_each_cpu(i, cpu_mask)
-+ cpu_set(i, p->cpu_mask);
-+ }
-+ p->thread_pool_ref = 1;
-+ INIT_LIST_HEAD(&p->threads_list);
-+
-+ if (cpu_mask == NULL)
-+ count = max((int)num_online_cpus(), 2);
-+ else {
-+ count = 0;
-+ for_each_cpu(i, cpu_mask)
-+ count++;
-+ }
-+
-+ for (j = 0; j < 2; j++) {
-+ int (*fn)(void *);
-+ char name[25];
-+ static int major;
-+
-+ if (j == 0)
-+ fn = istrd;
-+ else
-+ fn = istwr;
-+
-+ for (i = 0; i < count; i++) {
-+ if (j == 0) {
-+ major++;
-+ if (cpu_mask == NULL)
-+ snprintf(name, sizeof(name), "iscsird%d", i);
-+ else
-+ snprintf(name, sizeof(name), "iscsird%d_%d",
-+ major, i);
-+ } else {
-+ if (cpu_mask == NULL)
-+ snprintf(name, sizeof(name), "iscsiwr%d", i);
-+ else
-+ snprintf(name, sizeof(name), "iscsiwr%d_%d",
-+ major, i);
-+ }
-+
-+ t = kmalloc(sizeof(*t), GFP_KERNEL);
-+ if (t == NULL) {
-+ res = -ENOMEM;
-+ PRINT_ERROR("Failed to allocate thread %s "
-+ "(size %zd)", name, sizeof(*t));
-+ goto out_free;
-+ }
-+
-+ t->thr = kthread_run(fn, p, name);
-+ if (IS_ERR(t->thr)) {
-+ res = PTR_ERR(t->thr);
-+ PRINT_ERROR("kthread_run() for thread %s failed: %d",
-+ name, res);
-+ kfree(t);
-+ goto out_free;
-+ }
-+ list_add_tail(&t->threads_list_entry, &p->threads_list);
-+ }
-+ }
-+
-+ list_add_tail(&p->thread_pools_list_entry, &iscsi_thread_pools_list);
-+ res = 0;
-+
-+ TRACE_DBG("Created iSCSI thread pool %p", p);
-+
-+out_unlock:
-+ mutex_unlock(&iscsi_threads_pool_mutex);
-+
-+ if (out_pool != NULL)
-+ *out_pool = p;
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ list_for_each_entry_safe(t, tt, &p->threads_list, threads_list_entry) {
-+ kthread_stop(t->thr);
-+ list_del(&t->threads_list_entry);
-+ kfree(t);
-+ }
-+ goto out_unlock;
-+}
-+
-+void iscsi_threads_pool_put(struct iscsi_thread_pool *p)
-+{
-+ struct iscsi_thread *t, *tt;
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&iscsi_threads_pool_mutex);
-+
-+ p->thread_pool_ref--;
-+ if (p->thread_pool_ref > 0) {
-+ TRACE_DBG("iSCSI thread pool %p still has %d references)",
-+ p, p->thread_pool_ref);
-+ goto out_unlock;
-+ }
-+
-+ TRACE_DBG("Freeing iSCSI thread pool %p", p);
-+
-+ list_for_each_entry_safe(t, tt, &p->threads_list, threads_list_entry) {
-+ kthread_stop(t->thr);
-+ list_del(&t->threads_list_entry);
-+ kfree(t);
-+ }
-+
-+ list_del(&p->thread_pools_list_entry);
-+
-+ kfree(p);
-+
-+out_unlock:
-+ mutex_unlock(&iscsi_threads_pool_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int __init iscsi_init(void)
-+{
-+ int err = 0;
-+
-+ PRINT_INFO("iSCSI SCST Target - version %s", ISCSI_VERSION_STRING);
-+
-+ dummy_page = alloc_pages(GFP_KERNEL, 0);
-+ if (dummy_page == NULL) {
-+ PRINT_ERROR("%s", "Dummy page allocation failed");
-+ goto out;
-+ }
-+
-+ sg_init_table(&dummy_sg, 1);
-+ sg_set_page(&dummy_sg, dummy_page, PAGE_SIZE, 0);
-+
-+ iscsi_cmnd_abort_mempool = mempool_create_kmalloc_pool(2500,
-+ sizeof(struct iscsi_cmnd_abort_params));
-+ if (iscsi_cmnd_abort_mempool == NULL) {
-+ err = -ENOMEM;
-+ goto out_free_dummy;
-+ }
-+
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ err = net_set_get_put_page_callbacks(iscsi_get_page_callback,
-+ iscsi_put_page_callback);
-+ if (err != 0) {
-+ PRINT_INFO("Unable to set page callbackes: %d", err);
-+ goto out_destroy_mempool;
-+ }
-+#else
-+#ifndef GENERATING_UPSTREAM_PATCH
-+ PRINT_WARNING("%s",
-+ "CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION "
-+ "not enabled in your kernel. ISCSI-SCST will be working with "
-+ "not the best performance. Refer README file for details.");
-+#endif
-+#endif
-+
-+ ctr_major = register_chrdev(0, ctr_name, &ctr_fops);
-+ if (ctr_major < 0) {
-+ PRINT_ERROR("failed to register the control device %d",
-+ ctr_major);
-+ err = ctr_major;
-+ goto out_callb;
-+ }
-+
-+ err = event_init();
-+ if (err < 0)
-+ goto out_reg;
-+
-+ iscsi_cmnd_cache = KMEM_CACHE(iscsi_cmnd, SCST_SLAB_FLAGS);
-+ if (!iscsi_cmnd_cache) {
-+ err = -ENOMEM;
-+ goto out_event;
-+ }
-+
-+ err = scst_register_target_template(&iscsi_template);
-+ if (err < 0)
-+ goto out_kmem;
-+
-+ iscsi_conn_ktype.sysfs_ops = scst_sysfs_get_sysfs_ops();
-+
-+ err = iscsi_threads_pool_get(NULL, &iscsi_main_thread_pool);
-+ if (err != 0)
-+ goto out_thr;
-+
-+out:
-+ return err;
-+
-+out_thr:
-+
-+ scst_unregister_target_template(&iscsi_template);
-+
-+out_kmem:
-+ kmem_cache_destroy(iscsi_cmnd_cache);
-+
-+out_event:
-+ event_exit();
-+
-+out_reg:
-+ unregister_chrdev(ctr_major, ctr_name);
-+
-+out_callb:
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ net_set_get_put_page_callbacks(NULL, NULL);
-+
-+out_destroy_mempool:
-+ mempool_destroy(iscsi_cmnd_abort_mempool);
-+#endif
-+
-+out_free_dummy:
-+ __free_pages(dummy_page, 0);
-+ goto out;
-+}
-+
-+static void __exit iscsi_exit(void)
-+{
-+ iscsi_threads_pool_put(iscsi_main_thread_pool);
-+
-+ BUG_ON(!list_empty(&iscsi_thread_pools_list));
-+
-+ unregister_chrdev(ctr_major, ctr_name);
-+
-+ event_exit();
-+
-+ kmem_cache_destroy(iscsi_cmnd_cache);
-+
-+ scst_unregister_target_template(&iscsi_template);
-+
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ net_set_get_put_page_callbacks(NULL, NULL);
-+#endif
-+
-+ mempool_destroy(iscsi_cmnd_abort_mempool);
-+
-+ __free_pages(dummy_page, 0);
-+ return;
-+}
-+
-+module_init(iscsi_init);
-+module_exit(iscsi_exit);
-+
-+MODULE_VERSION(ISCSI_VERSION_STRING);
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("SCST iSCSI Target");
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/iscsi.h linux-3.2/drivers/scst/iscsi-scst/iscsi.h
---- orig/linux-3.2/drivers/scst/iscsi-scst/iscsi.h
-+++ linux-3.2/drivers/scst/iscsi-scst/iscsi.h
-@@ -0,0 +1,789 @@
-+/*
-+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef __ISCSI_H__
-+#define __ISCSI_H__
-+
-+#include <linux/pagemap.h>
-+#include <linux/mm.h>
-+#include <linux/net.h>
-+#include <linux/module.h>
-+#include <net/sock.h>
-+
-+#include <scst/scst.h>
-+#include <scst/iscsi_scst.h>
-+#include "iscsi_hdr.h"
-+#include "iscsi_dbg.h"
-+
-+#define iscsi_sense_crc_error ABORTED_COMMAND, 0x47, 0x05
-+#define iscsi_sense_unexpected_unsolicited_data ABORTED_COMMAND, 0x0C, 0x0C
-+#define iscsi_sense_incorrect_amount_of_data ABORTED_COMMAND, 0x0C, 0x0D
-+
-+struct iscsi_sess_params {
-+ int initial_r2t;
-+ int immediate_data;
-+ int max_connections;
-+ unsigned int max_recv_data_length;
-+ unsigned int max_xmit_data_length;
-+ unsigned int max_burst_length;
-+ unsigned int first_burst_length;
-+ int default_wait_time;
-+ int default_retain_time;
-+ unsigned int max_outstanding_r2t;
-+ int data_pdu_inorder;
-+ int data_sequence_inorder;
-+ int error_recovery_level;
-+ int header_digest;
-+ int data_digest;
-+ int ofmarker;
-+ int ifmarker;
-+ int ofmarkint;
-+ int ifmarkint;
-+};
-+
-+struct iscsi_tgt_params {
-+ int queued_cmnds;
-+ unsigned int rsp_timeout;
-+ unsigned int nop_in_interval;
-+ unsigned int nop_in_timeout;
-+};
-+
-+struct iscsi_thread {
-+ struct task_struct *thr;
-+ struct list_head threads_list_entry;
-+};
-+
-+struct iscsi_thread_pool {
-+ spinlock_t rd_lock;
-+ struct list_head rd_list;
-+ wait_queue_head_t rd_waitQ;
-+
-+ spinlock_t wr_lock;
-+ struct list_head wr_list;
-+ wait_queue_head_t wr_waitQ;
-+
-+ cpumask_t cpu_mask;
-+
-+ int thread_pool_ref;
-+
-+ struct list_head threads_list;
-+
-+ struct list_head thread_pools_list_entry;
-+};
-+
-+struct iscsi_target;
-+struct iscsi_cmnd;
-+
-+struct iscsi_attr {
-+ struct list_head attrs_list_entry;
-+ struct kobj_attribute attr;
-+ struct iscsi_target *target;
-+ const char *name;
-+};
-+
-+struct iscsi_target {
-+ struct scst_tgt *scst_tgt;
-+
-+ struct mutex target_mutex;
-+
-+ struct list_head session_list; /* protected by target_mutex */
-+
-+ struct list_head target_list_entry;
-+ u32 tid;
-+
-+ unsigned int tgt_enabled:1;
-+
-+ /* Protected by target_mutex */
-+ struct list_head attrs_list;
-+
-+ char name[ISCSI_NAME_LEN];
-+};
-+
-+#define ISCSI_HASH_ORDER 8
-+#define cmnd_hashfn(itt) hash_32(itt, ISCSI_HASH_ORDER)
-+
-+struct iscsi_session {
-+ struct iscsi_target *target;
-+ struct scst_session *scst_sess;
-+
-+ struct list_head pending_list; /* protected by sn_lock */
-+
-+ /* Unprotected, since accessed only from a single read thread */
-+ u32 next_ttt;
-+
-+ /* Read only, if there are connection(s) */
-+ struct iscsi_tgt_params tgt_params;
-+ atomic_t active_cmds;
-+
-+ spinlock_t sn_lock;
-+ u32 exp_cmd_sn; /* protected by sn_lock */
-+
-+ /* All 3 protected by sn_lock */
-+ int tm_active;
-+ u32 tm_sn;
-+ struct iscsi_cmnd *tm_rsp;
-+
-+ /* Read only, if there are connection(s) */
-+ struct iscsi_sess_params sess_params;
-+
-+ /*
-+ * In some corner cases commands can be deleted from the hash
-+ * not from the corresponding read thread. So, let's simplify
-+ * errors recovery and have this lock.
-+ */
-+ spinlock_t cmnd_data_wait_hash_lock;
-+ struct list_head cmnd_data_wait_hash[1 << ISCSI_HASH_ORDER];
-+
-+ struct list_head conn_list; /* protected by target_mutex */
-+
-+ struct list_head session_list_entry;
-+
-+ /* All protected by target_mutex, where necessary */
-+ struct iscsi_session *sess_reinst_successor;
-+ unsigned int sess_reinstating:1;
-+ unsigned int sess_shutting_down:1;
-+
-+ struct iscsi_thread_pool *sess_thr_pool;
-+
-+ /* All don't need any protection */
-+ char *initiator_name;
-+ u64 sid;
-+};
-+
-+#define ISCSI_CONN_IOV_MAX (PAGE_SIZE/sizeof(struct iovec))
-+
-+#define ISCSI_CONN_RD_STATE_IDLE 0
-+#define ISCSI_CONN_RD_STATE_IN_LIST 1
-+#define ISCSI_CONN_RD_STATE_PROCESSING 2
-+
-+#define ISCSI_CONN_WR_STATE_IDLE 0
-+#define ISCSI_CONN_WR_STATE_IN_LIST 1
-+#define ISCSI_CONN_WR_STATE_SPACE_WAIT 2
-+#define ISCSI_CONN_WR_STATE_PROCESSING 3
-+
-+struct iscsi_conn {
-+ struct iscsi_session *session; /* owning session */
-+
-+ /* Both protected by session->sn_lock */
-+ u32 stat_sn;
-+ u32 exp_stat_sn;
-+
-+#define ISCSI_CONN_REINSTATING 1
-+#define ISCSI_CONN_SHUTTINGDOWN 2
-+ unsigned long conn_aflags;
-+
-+ spinlock_t cmd_list_lock; /* BH lock */
-+
-+ /* Protected by cmd_list_lock */
-+ struct list_head cmd_list; /* in/outcoming pdus */
-+
-+ atomic_t conn_ref_cnt;
-+
-+ spinlock_t write_list_lock;
-+ /* List of data pdus to be sent. Protected by write_list_lock */
-+ struct list_head write_list;
-+ /* List of data pdus being sent. Protected by write_list_lock */
-+ struct list_head write_timeout_list;
-+
-+ /* Protected by write_list_lock */
-+ struct timer_list rsp_timer;
-+ unsigned int data_rsp_timeout; /* in jiffies */
-+
-+ /*
-+ * All 2 protected by wr_lock. Modified independently to the
-+ * above field, hence the alignment.
-+ */
-+ unsigned short wr_state __attribute__((aligned(sizeof(long))));
-+ unsigned short wr_space_ready:1;
-+
-+ struct list_head wr_list_entry;
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ struct task_struct *wr_task;
-+#endif
-+
-+ /*
-+ * All are unprotected, since accessed only from a single write
-+ * thread.
-+ */
-+ struct iscsi_cmnd *write_cmnd;
-+ struct iovec *write_iop;
-+ int write_iop_used;
-+ struct iovec write_iov[2];
-+ u32 write_size;
-+ u32 write_offset;
-+ int write_state;
-+
-+ /* Both don't need any protection */
-+ struct file *file;
-+ struct socket *sock;
-+
-+ void (*old_state_change)(struct sock *);
-+ void (*old_data_ready)(struct sock *, int);
-+ void (*old_write_space)(struct sock *);
-+
-+ /* Both read only. Stay here for better CPU cache locality. */
-+ int hdigest_type;
-+ int ddigest_type;
-+
-+ struct iscsi_thread_pool *conn_thr_pool;
-+
-+ /* All 6 protected by rd_lock */
-+ unsigned short rd_state;
-+ unsigned short rd_data_ready:1;
-+ /* Let's save some cache footprint by putting them here */
-+ unsigned short closing:1;
-+ unsigned short active_close:1;
-+ unsigned short deleting:1;
-+ unsigned short conn_tm_active:1;
-+
-+ struct list_head rd_list_entry;
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ struct task_struct *rd_task;
-+#endif
-+
-+ unsigned long last_rcv_time;
-+
-+ /*
-+ * All are unprotected, since accessed only from a single read
-+ * thread.
-+ */
-+ struct iscsi_cmnd *read_cmnd;
-+ struct msghdr read_msg;
-+ u32 read_size;
-+ int read_state;
-+ struct iovec *read_iov;
-+ struct task_struct *rx_task;
-+ uint32_t rpadding;
-+
-+ struct iscsi_target *target;
-+
-+ struct list_head conn_list_entry; /* list entry in session conn_list */
-+
-+ /* All protected by target_mutex, where necessary */
-+ struct iscsi_conn *conn_reinst_successor;
-+ struct list_head reinst_pending_cmd_list;
-+
-+ wait_queue_head_t read_state_waitQ;
-+ struct completion ready_to_free;
-+
-+ /* Doesn't need any protection */
-+ u16 cid;
-+
-+ struct delayed_work nop_in_delayed_work;
-+ unsigned int nop_in_interval; /* in jiffies */
-+ unsigned int nop_in_timeout; /* in jiffies */
-+ struct list_head nop_req_list;
-+ spinlock_t nop_req_list_lock;
-+ u32 nop_in_ttt;
-+
-+ /* Don't need any protection */
-+ struct kobject conn_kobj;
-+ struct completion *conn_kobj_release_cmpl;
-+};
-+
-+struct iscsi_pdu {
-+ struct iscsi_hdr bhs;
-+ void *ahs;
-+ unsigned int ahssize;
-+ unsigned int datasize;
-+};
-+
-+typedef void (iscsi_show_info_t)(struct seq_file *seq,
-+ struct iscsi_target *target);
-+
-+/** Commands' states **/
-+
-+/* New command and SCST processes it */
-+#define ISCSI_CMD_STATE_NEW 0
-+
-+/* SCST processes cmd after scst_rx_cmd() */
-+#define ISCSI_CMD_STATE_RX_CMD 1
-+
-+/* The command returned from preprocessing_done() */
-+#define ISCSI_CMD_STATE_AFTER_PREPROC 2
-+
-+/* The command is waiting for session or connection reinstatement finished */
-+#define ISCSI_CMD_STATE_REINST_PENDING 3
-+
-+/* scst_restart_cmd() called and SCST processing it */
-+#define ISCSI_CMD_STATE_RESTARTED 4
-+
-+/* SCST done processing */
-+#define ISCSI_CMD_STATE_PROCESSED 5
-+
-+/* AEN processing */
-+#define ISCSI_CMD_STATE_AEN 6
-+
-+/* Out of SCST core preliminary completed */
-+#define ISCSI_CMD_STATE_OUT_OF_SCST_PRELIM_COMPL 7
-+
-+/*
-+ * Most of the fields don't need any protection, since accessed from only a
-+ * single thread, except where noted.
-+ *
-+ * ToDo: Eventually divide request and response structures in 2 separate
-+ * structures and stop this IET-derived garbage.
-+ */
-+struct iscsi_cmnd {
-+ struct iscsi_conn *conn;
-+
-+ /*
-+ * Some flags used under conn->write_list_lock, but all modified only
-+ * from single read thread or when there are no references to cmd.
-+ */
-+ unsigned int hashed:1;
-+ unsigned int should_close_conn:1;
-+ unsigned int should_close_all_conn:1;
-+ unsigned int pending:1;
-+ unsigned int own_sg:1;
-+ unsigned int on_write_list:1;
-+ unsigned int write_processing_started:1;
-+ unsigned int force_cleanup_done:1;
-+ unsigned int dec_active_cmds:1;
-+ unsigned int ddigest_checked:1;
-+ /*
-+ * Used to prevent release of original req while its related DATA OUT
-+ * cmd is receiving data, i.e. stays between data_out_start() and
-+ * data_out_end(). Ref counting can't be used for that, because
-+ * req_cmnd_release() supposed to be called only once.
-+ */
-+ unsigned int data_out_in_data_receiving:1;
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ unsigned int on_rx_digest_list:1;
-+ unsigned int release_called:1;
-+#endif
-+
-+ /*
-+ * We suppose that preliminary commands completion is tested by
-+ * comparing prelim_compl_flags with 0. Otherwise, because of the
-+ * gap between setting different flags a race is possible,
-+ * like sending command in SCST core as PRELIM_COMPLETED, while it
-+ * wasn't aborted in it yet and have as the result a wrong success
-+ * status sent to the initiator.
-+ */
-+#define ISCSI_CMD_ABORTED 0
-+#define ISCSI_CMD_PRELIM_COMPLETED 1
-+ unsigned long prelim_compl_flags;
-+
-+ struct list_head hash_list_entry;
-+
-+ /*
-+ * Unions are for readability and grepability and to save some
-+ * cache footprint.
-+ */
-+
-+ union {
-+ /*
-+ * Used only to abort not yet sent responses. Usage in
-+ * cmnd_done() is only a side effect to have a lockless
-+ * accesss to this list from always only a single thread
-+ * at any time. So, all responses live in the parent
-+ * until it has the last reference put.
-+ */
-+ struct list_head rsp_cmd_list;
-+ struct list_head rsp_cmd_list_entry;
-+ };
-+
-+ union {
-+ struct list_head pending_list_entry;
-+ struct list_head reinst_pending_cmd_list_entry;
-+ };
-+
-+ union {
-+ struct list_head write_list_entry;
-+ struct list_head write_timeout_list_entry;
-+ };
-+
-+ /* Both protected by conn->write_list_lock */
-+ unsigned int on_write_timeout_list:1;
-+ unsigned long write_start;
-+
-+ /*
-+ * All unprotected, since could be accessed from only a single
-+ * thread at time
-+ */
-+ struct iscsi_cmnd *parent_req;
-+ struct iscsi_cmnd *cmd_req;
-+
-+ /*
-+ * All unprotected, since could be accessed from only a single
-+ * thread at time
-+ */
-+ union {
-+ /* Request only fields */
-+ struct {
-+ struct list_head rx_ddigest_cmd_list;
-+ struct list_head rx_ddigest_cmd_list_entry;
-+
-+ int scst_state;
-+ union {
-+ struct scst_cmd *scst_cmd;
-+ struct scst_aen *scst_aen;
-+ };
-+
-+ struct iscsi_cmnd *main_rsp;
-+
-+ /*
-+ * Protected on modify by conn->write_list_lock, hence
-+ * modified independently to the above field, hence the
-+ * alignment.
-+ */
-+ int not_processed_rsp_cnt
-+ __attribute__((aligned(sizeof(long))));
-+ };
-+
-+ /* Response only fields */
-+ struct {
-+ struct scatterlist rsp_sg[2];
-+ struct iscsi_sense_data sense_hdr;
-+ };
-+ };
-+
-+ atomic_t ref_cnt;
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ atomic_t net_ref_cnt;
-+#endif
-+
-+ struct iscsi_pdu pdu;
-+
-+ struct scatterlist *sg;
-+ int sg_cnt;
-+ unsigned int bufflen;
-+ u32 r2t_sn;
-+ unsigned int r2t_len_to_receive;
-+ unsigned int r2t_len_to_send;
-+ unsigned int outstanding_r2t;
-+ u32 target_task_tag;
-+ __be32 hdigest;
-+ __be32 ddigest;
-+
-+ struct list_head cmd_list_entry;
-+ struct list_head nop_req_list_entry;
-+
-+ unsigned int not_received_data_len;
-+};
-+
-+/* Max time to wait for our response satisfied for aborted commands */
-+#define ISCSI_TM_DATA_WAIT_TIMEOUT (10 * HZ)
-+
-+/*
-+ * Needed addition to all timeouts to complete a burst of commands at once.
-+ * Otherwise, a part of the burst can be timeouted only in double timeout time.
-+ */
-+#define ISCSI_ADD_SCHED_TIME HZ
-+
-+#define ISCSI_CTR_OPEN_STATE_CLOSED 0
-+#define ISCSI_CTR_OPEN_STATE_OPEN 1
-+#define ISCSI_CTR_OPEN_STATE_CLOSING 2
-+
-+extern struct mutex target_mgmt_mutex;
-+
-+extern int ctr_open_state;
-+extern const struct file_operations ctr_fops;
-+
-+/* iscsi.c */
-+extern struct iscsi_cmnd *cmnd_alloc(struct iscsi_conn *,
-+ struct iscsi_cmnd *parent);
-+extern int cmnd_rx_start(struct iscsi_cmnd *);
-+extern int cmnd_rx_continue(struct iscsi_cmnd *req);
-+extern void cmnd_rx_end(struct iscsi_cmnd *);
-+extern void cmnd_tx_start(struct iscsi_cmnd *);
-+extern void cmnd_tx_end(struct iscsi_cmnd *);
-+extern void req_cmnd_release_force(struct iscsi_cmnd *req);
-+extern void rsp_cmnd_release(struct iscsi_cmnd *);
-+extern void cmnd_done(struct iscsi_cmnd *cmnd);
-+extern void conn_abort(struct iscsi_conn *conn);
-+extern void iscsi_restart_cmnd(struct iscsi_cmnd *cmnd);
-+extern void iscsi_fail_data_waiting_cmnd(struct iscsi_cmnd *cmnd);
-+extern void iscsi_send_nop_in(struct iscsi_conn *conn);
-+extern int iscsi_preliminary_complete(struct iscsi_cmnd *req,
-+ struct iscsi_cmnd *orig_req, bool get_data);
-+extern int set_scst_preliminary_status_rsp(struct iscsi_cmnd *req,
-+ bool get_data, int key, int asc, int ascq);
-+extern int iscsi_threads_pool_get(const cpumask_t *cpu_mask,
-+ struct iscsi_thread_pool **out_pool);
-+extern void iscsi_threads_pool_put(struct iscsi_thread_pool *p);
-+
-+/* conn.c */
-+extern struct kobj_type iscsi_conn_ktype;
-+extern struct iscsi_conn *conn_lookup(struct iscsi_session *, u16);
-+extern void conn_reinst_finished(struct iscsi_conn *);
-+extern int __add_conn(struct iscsi_session *, struct iscsi_kern_conn_info *);
-+extern int __del_conn(struct iscsi_session *, struct iscsi_kern_conn_info *);
-+extern int conn_free(struct iscsi_conn *);
-+extern void iscsi_make_conn_rd_active(struct iscsi_conn *conn);
-+#define ISCSI_CONN_ACTIVE_CLOSE 1
-+#define ISCSI_CONN_DELETING 2
-+extern void __mark_conn_closed(struct iscsi_conn *, int);
-+extern void mark_conn_closed(struct iscsi_conn *);
-+extern void iscsi_make_conn_wr_active(struct iscsi_conn *);
-+extern void iscsi_check_tm_data_wait_timeouts(struct iscsi_conn *conn,
-+ bool force);
-+extern void __iscsi_write_space_ready(struct iscsi_conn *conn);
-+
-+/* nthread.c */
-+extern int iscsi_send(struct iscsi_conn *conn);
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+extern void iscsi_get_page_callback(struct page *page);
-+extern void iscsi_put_page_callback(struct page *page);
-+#endif
-+extern int istrd(void *arg);
-+extern int istwr(void *arg);
-+extern void iscsi_task_mgmt_affected_cmds_done(struct scst_mgmt_cmd *scst_mcmd);
-+extern void req_add_to_write_timeout_list(struct iscsi_cmnd *req);
-+
-+/* target.c */
-+extern const struct attribute *iscsi_tgt_attrs[];
-+extern int iscsi_enable_target(struct scst_tgt *scst_tgt, bool enable);
-+extern bool iscsi_is_target_enabled(struct scst_tgt *scst_tgt);
-+extern ssize_t iscsi_sysfs_send_event(uint32_t tid,
-+ enum iscsi_kern_event_code code,
-+ const char *param1, const char *param2, void **data);
-+extern struct iscsi_target *target_lookup_by_id(u32);
-+extern int __add_target(struct iscsi_kern_target_info *);
-+extern int __del_target(u32 id);
-+extern ssize_t iscsi_sysfs_add_target(const char *target_name, char *params);
-+extern ssize_t iscsi_sysfs_del_target(const char *target_name);
-+extern ssize_t iscsi_sysfs_mgmt_cmd(char *cmd);
-+extern void target_del_session(struct iscsi_target *target,
-+ struct iscsi_session *session, int flags);
-+extern void target_del_all_sess(struct iscsi_target *target, int flags);
-+extern void target_del_all(void);
-+
-+/* config.c */
-+extern const struct attribute *iscsi_attrs[];
-+extern int iscsi_add_attr(struct iscsi_target *target,
-+ const struct iscsi_kern_attr *user_info);
-+extern void __iscsi_del_attr(struct iscsi_target *target,
-+ struct iscsi_attr *tgt_attr);
-+
-+/* session.c */
-+extern const struct attribute *iscsi_sess_attrs[];
-+extern const struct file_operations session_seq_fops;
-+extern struct iscsi_session *session_lookup(struct iscsi_target *, u64);
-+extern void sess_reinst_finished(struct iscsi_session *);
-+extern int __add_session(struct iscsi_target *,
-+ struct iscsi_kern_session_info *);
-+extern int __del_session(struct iscsi_target *, u64);
-+extern int session_free(struct iscsi_session *session, bool del);
-+extern void iscsi_sess_force_close(struct iscsi_session *sess);
-+
-+/* params.c */
-+extern const char *iscsi_get_digest_name(int val, char *res);
-+extern const char *iscsi_get_bool_value(int val);
-+extern int iscsi_params_set(struct iscsi_target *,
-+ struct iscsi_kern_params_info *, int);
-+
-+/* event.c */
-+extern int event_send(u32, u64, u32, u32, enum iscsi_kern_event_code,
-+ const char *param1, const char *param2);
-+extern int event_init(void);
-+extern void event_exit(void);
-+
-+#define get_pgcnt(size, offset) \
-+ ((((size) + ((offset) & ~PAGE_MASK)) + PAGE_SIZE - 1) >> PAGE_SHIFT)
-+
-+static inline void iscsi_cmnd_get_length(struct iscsi_pdu *pdu)
-+{
-+#if defined(__BIG_ENDIAN)
-+ pdu->ahssize = pdu->bhs.length.ahslength * 4;
-+ pdu->datasize = pdu->bhs.length.datalength;
-+#elif defined(__LITTLE_ENDIAN)
-+ pdu->ahssize = ((__force __u32)pdu->bhs.length & 0xff) * 4;
-+ pdu->datasize = be32_to_cpu((__force __be32)((__force __u32)pdu->bhs.length & ~0xff));
-+#else
-+#error
-+#endif
-+}
-+
-+static inline void iscsi_cmnd_set_length(struct iscsi_pdu *pdu)
-+{
-+#if defined(__BIG_ENDIAN)
-+ pdu->bhs.length.ahslength = pdu->ahssize / 4;
-+ pdu->bhs.length.datalength = pdu->datasize;
-+#elif defined(__LITTLE_ENDIAN)
-+ pdu->bhs.length = cpu_to_be32(pdu->datasize) | (__force __be32)(pdu->ahssize / 4);
-+#else
-+#error
-+#endif
-+}
-+
-+extern struct scst_tgt_template iscsi_template;
-+
-+/*
-+ * Skip this command if result is true. Must be called under
-+ * corresponding lock.
-+ */
-+static inline bool cmnd_get_check(struct iscsi_cmnd *cmnd)
-+{
-+ int r = atomic_inc_return(&cmnd->ref_cnt);
-+ int res;
-+ if (unlikely(r == 1)) {
-+ TRACE_DBG("cmnd %p is being destroyed", cmnd);
-+ atomic_dec(&cmnd->ref_cnt);
-+ res = 1;
-+ /* Necessary code is serialized by locks in cmnd_done() */
-+ } else {
-+ TRACE_DBG("cmnd %p, new ref_cnt %d", cmnd,
-+ atomic_read(&cmnd->ref_cnt));
-+ res = 0;
-+ }
-+ return res;
-+}
-+
-+static inline void cmnd_get(struct iscsi_cmnd *cmnd)
-+{
-+ atomic_inc(&cmnd->ref_cnt);
-+ TRACE_DBG("cmnd %p, new cmnd->ref_cnt %d", cmnd,
-+ atomic_read(&cmnd->ref_cnt));
-+ /*
-+ * For the same reason as in kref_get(). Let's be safe and
-+ * always do it.
-+ */
-+ smp_mb__after_atomic_inc();
-+}
-+
-+static inline void cmnd_put(struct iscsi_cmnd *cmnd)
-+{
-+ TRACE_DBG("cmnd %p, new ref_cnt %d", cmnd,
-+ atomic_read(&cmnd->ref_cnt)-1);
-+
-+ EXTRACHECKS_BUG_ON(atomic_read(&cmnd->ref_cnt) == 0);
-+
-+ if (atomic_dec_and_test(&cmnd->ref_cnt))
-+ cmnd_done(cmnd);
-+}
-+
-+/* conn->write_list_lock supposed to be locked and BHs off */
-+static inline void cmd_add_on_write_list(struct iscsi_conn *conn,
-+ struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_cmnd *parent = cmnd->parent_req;
-+
-+ TRACE_DBG("cmnd %p", cmnd);
-+ /* See comment in iscsi_restart_cmnd() */
-+ EXTRACHECKS_BUG_ON(cmnd->parent_req->hashed &&
-+ (cmnd_opcode(cmnd) != ISCSI_OP_R2T));
-+ list_add_tail(&cmnd->write_list_entry, &conn->write_list);
-+ cmnd->on_write_list = 1;
-+
-+ parent->not_processed_rsp_cnt++;
-+ TRACE_DBG("not processed rsp cnt %d (parent %p)",
-+ parent->not_processed_rsp_cnt, parent);
-+}
-+
-+/* conn->write_list_lock supposed to be locked and BHs off */
-+static inline void cmd_del_from_write_list(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_cmnd *parent = cmnd->parent_req;
-+
-+ TRACE_DBG("%p", cmnd);
-+ list_del(&cmnd->write_list_entry);
-+ cmnd->on_write_list = 0;
-+
-+ parent->not_processed_rsp_cnt--;
-+ TRACE_DBG("not processed rsp cnt %d (parent %p)",
-+ parent->not_processed_rsp_cnt, parent);
-+ EXTRACHECKS_BUG_ON(parent->not_processed_rsp_cnt < 0);
-+}
-+
-+static inline void cmd_add_on_rx_ddigest_list(struct iscsi_cmnd *req,
-+ struct iscsi_cmnd *cmnd)
-+{
-+ TRACE_DBG("Adding RX ddigest cmd %p to digest list "
-+ "of req %p", cmnd, req);
-+ list_add_tail(&cmnd->rx_ddigest_cmd_list_entry,
-+ &req->rx_ddigest_cmd_list);
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ cmnd->on_rx_digest_list = 1;
-+#endif
-+}
-+
-+static inline void cmd_del_from_rx_ddigest_list(struct iscsi_cmnd *cmnd)
-+{
-+ TRACE_DBG("Deleting RX digest cmd %p from digest list", cmnd);
-+ list_del(&cmnd->rx_ddigest_cmd_list_entry);
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ cmnd->on_rx_digest_list = 0;
-+#endif
-+}
-+
-+static inline unsigned long iscsi_get_timeout(struct iscsi_cmnd *req)
-+{
-+ unsigned long res;
-+
-+ res = (cmnd_opcode(req) == ISCSI_OP_NOP_OUT) ?
-+ req->conn->nop_in_timeout : req->conn->data_rsp_timeout;
-+
-+ if (unlikely(test_bit(ISCSI_CMD_ABORTED, &req->prelim_compl_flags)))
-+ res = min_t(unsigned long, res, ISCSI_TM_DATA_WAIT_TIMEOUT);
-+
-+ return res;
-+}
-+
-+static inline unsigned long iscsi_get_timeout_time(struct iscsi_cmnd *req)
-+{
-+ return req->write_start + iscsi_get_timeout(req);
-+}
-+
-+static inline int test_write_ready(struct iscsi_conn *conn)
-+{
-+ /*
-+ * No need for write_list protection, in the worst case we will be
-+ * restarted again.
-+ */
-+ return !list_empty(&conn->write_list) || conn->write_cmnd;
-+}
-+
-+static inline void conn_get(struct iscsi_conn *conn)
-+{
-+ atomic_inc(&conn->conn_ref_cnt);
-+ TRACE_DBG("conn %p, new conn_ref_cnt %d", conn,
-+ atomic_read(&conn->conn_ref_cnt));
-+ /*
-+ * For the same reason as in kref_get(). Let's be safe and
-+ * always do it.
-+ */
-+ smp_mb__after_atomic_inc();
-+}
-+
-+static inline void conn_put(struct iscsi_conn *conn)
-+{
-+ TRACE_DBG("conn %p, new conn_ref_cnt %d", conn,
-+ atomic_read(&conn->conn_ref_cnt)-1);
-+ BUG_ON(atomic_read(&conn->conn_ref_cnt) == 0);
-+
-+ /*
-+ * Make it always ordered to protect from undesired side effects like
-+ * accessing just destroyed by close_conn() conn caused by reordering
-+ * of this atomic_dec().
-+ */
-+ smp_mb__before_atomic_dec();
-+ atomic_dec(&conn->conn_ref_cnt);
-+}
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+extern void iscsi_extracheck_is_rd_thread(struct iscsi_conn *conn);
-+extern void iscsi_extracheck_is_wr_thread(struct iscsi_conn *conn);
-+#else
-+static inline void iscsi_extracheck_is_rd_thread(struct iscsi_conn *conn) {}
-+static inline void iscsi_extracheck_is_wr_thread(struct iscsi_conn *conn) {}
-+#endif
-+
-+#endif /* __ISCSI_H__ */
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/iscsi_dbg.h linux-3.2/drivers/scst/iscsi-scst/iscsi_dbg.h
---- orig/linux-3.2/drivers/scst/iscsi-scst/iscsi_dbg.h
-+++ linux-3.2/drivers/scst/iscsi-scst/iscsi_dbg.h
-@@ -0,0 +1,61 @@
-+/*
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef ISCSI_DBG_H
-+#define ISCSI_DBG_H
-+
-+#define LOG_PREFIX "iscsi-scst"
-+
-+#include <scst/scst_debug.h>
-+
-+#define TRACE_D_WRITE 0x80000000
-+#define TRACE_CONN_OC 0x40000000
-+#define TRACE_D_IOV 0x20000000
-+#define TRACE_D_DUMP_PDU 0x10000000
-+#define TRACE_NET_PG 0x08000000
-+#define TRACE_CONN_OC_DBG 0x04000000
-+
-+#ifdef CONFIG_SCST_DEBUG
-+#define ISCSI_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_LINE | TRACE_PID | \
-+ TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_DEBUG | \
-+ TRACE_MINOR | TRACE_SPECIAL | TRACE_CONN_OC)
-+#else
-+#define ISCSI_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
-+ TRACE_SPECIAL)
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+struct iscsi_pdu;
-+struct iscsi_cmnd;
-+extern void iscsi_dump_pdu(struct iscsi_pdu *pdu);
-+extern unsigned long iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(
-+ struct iscsi_cmnd *cmnd);
-+#else
-+#define iscsi_dump_pdu(x) do {} while (0)
-+#define iscsi_get_flow_ctrl_or_mgmt_dbg_log_flag(x) do {} while (0)
-+#endif
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+extern unsigned long iscsi_trace_flag;
-+#define trace_flag iscsi_trace_flag
-+#endif
-+
-+#define TRACE_CONN_CLOSE(args...) TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_CONN_OC, args)
-+#define TRACE_CONN_CLOSE_DBG(args...) TRACE(TRACE_CONN_OC_DBG, args)
-+#define TRACE_NET_PAGE(args...) TRACE_DBG_FLAG(TRACE_NET_PG, args)
-+#define TRACE_WRITE(args...) TRACE_DBG_FLAG(TRACE_DEBUG|TRACE_D_WRITE, args)
-+
-+#endif
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/iscsi_hdr.h linux-3.2/drivers/scst/iscsi-scst/iscsi_hdr.h
---- orig/linux-3.2/drivers/scst/iscsi-scst/iscsi_hdr.h
-+++ linux-3.2/drivers/scst/iscsi-scst/iscsi_hdr.h
-@@ -0,0 +1,526 @@
-+/*
-+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef __ISCSI_HDR_H__
-+#define __ISCSI_HDR_H__
-+
-+#include <linux/types.h>
-+#include <asm/byteorder.h>
-+
-+#define ISCSI_VERSION 0
-+
-+#ifndef __packed
-+#define __packed __attribute__ ((packed))
-+#endif
-+
-+/* iSCSI command PDU header. See also section 10.3 in RFC 3720. */
-+struct iscsi_hdr {
-+ u8 opcode; /* 0 */
-+ u8 flags;
-+ u8 spec1[2];
-+#if defined(__BIG_ENDIAN_BITFIELD)
-+ struct { /* 4 */
-+ unsigned ahslength:8;
-+ unsigned datalength:24;
-+ } length;
-+#elif defined(__LITTLE_ENDIAN_BITFIELD)
-+ __be32 length; /* 4 */
-+#endif
-+ __be64 lun; /* 8 */
-+ __be32 itt; /* 16 */
-+ __be32 ttt; /* 20 */
-+
-+ /*
-+ * SN fields most time stay converted to the CPU form and only received
-+ * and send in the BE form.
-+ */
-+ u32 sn; /* 24 */
-+ u32 exp_sn; /* 28 */
-+ u32 max_sn; /* 32 */
-+
-+ __be32 spec3[3]; /* 36 */
-+} __packed; /* 48 */
-+
-+/* Opcode encoding bits */
-+#define ISCSI_OP_RETRY 0x80
-+#define ISCSI_OP_IMMEDIATE 0x40
-+#define ISCSI_OPCODE_MASK 0x3F
-+
-+/* Client to Server Message Opcode values */
-+#define ISCSI_OP_NOP_OUT 0x00
-+#define ISCSI_OP_SCSI_CMD 0x01
-+#define ISCSI_OP_SCSI_TASK_MGT_MSG 0x02
-+#define ISCSI_OP_LOGIN_CMD 0x03
-+#define ISCSI_OP_TEXT_CMD 0x04
-+#define ISCSI_OP_SCSI_DATA_OUT 0x05
-+#define ISCSI_OP_LOGOUT_CMD 0x06
-+#define ISCSI_OP_SNACK_CMD 0x10
-+
-+/* Server to Client Message Opcode values */
-+#define ISCSI_OP_NOP_IN 0x20
-+#define ISCSI_OP_SCSI_RSP 0x21
-+#define ISCSI_OP_SCSI_TASK_MGT_RSP 0x22
-+#define ISCSI_OP_LOGIN_RSP 0x23
-+#define ISCSI_OP_TEXT_RSP 0x24
-+#define ISCSI_OP_SCSI_DATA_IN 0x25
-+#define ISCSI_OP_LOGOUT_RSP 0x26
-+#define ISCSI_OP_R2T 0x31
-+#define ISCSI_OP_ASYNC_MSG 0x32
-+#define ISCSI_OP_REJECT 0x3f
-+
-+struct iscsi_ahs_hdr {
-+ __be16 ahslength;
-+ u8 ahstype;
-+} __packed;
-+
-+#define ISCSI_AHSTYPE_CDB 1
-+#define ISCSI_AHSTYPE_RLENGTH 2
-+
-+union iscsi_sid {
-+ struct {
-+ u8 isid[6]; /* Initiator Session ID */
-+ __be16 tsih; /* Target Session ID */
-+ } id;
-+ __be64 id64;
-+} __packed;
-+
-+struct iscsi_scsi_cmd_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ __be16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ __be64 lun;
-+ __be32 itt;
-+ __be32 data_length;
-+ u32 cmd_sn;
-+ u32 exp_stat_sn;
-+ u8 scb[16];
-+} __packed;
-+
-+#define ISCSI_CMD_FINAL 0x80
-+#define ISCSI_CMD_READ 0x40
-+#define ISCSI_CMD_WRITE 0x20
-+#define ISCSI_CMD_ATTR_MASK 0x07
-+#define ISCSI_CMD_UNTAGGED 0x00
-+#define ISCSI_CMD_SIMPLE 0x01
-+#define ISCSI_CMD_ORDERED 0x02
-+#define ISCSI_CMD_HEAD_OF_QUEUE 0x03
-+#define ISCSI_CMD_ACA 0x04
-+
-+struct iscsi_cdb_ahdr {
-+ __be16 ahslength;
-+ u8 ahstype;
-+ u8 reserved;
-+ u8 cdb[0];
-+} __packed;
-+
-+struct iscsi_rlength_ahdr {
-+ __be16 ahslength;
-+ u8 ahstype;
-+ u8 reserved;
-+ __be32 read_length;
-+} __packed;
-+
-+struct iscsi_scsi_rsp_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u8 response;
-+ u8 cmd_status;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ u32 rsvd1[2];
-+ __be32 itt;
-+ __be32 snack;
-+ u32 stat_sn;
-+ u32 exp_cmd_sn;
-+ u32 max_cmd_sn;
-+ u32 exp_data_sn;
-+ __be32 bi_residual_count;
-+ __be32 residual_count;
-+} __packed;
-+
-+#define ISCSI_FLG_RESIDUAL_UNDERFLOW 0x02
-+#define ISCSI_FLG_RESIDUAL_OVERFLOW 0x04
-+#define ISCSI_FLG_BIRESIDUAL_UNDERFLOW 0x08
-+#define ISCSI_FLG_BIRESIDUAL_OVERFLOW 0x10
-+
-+#define ISCSI_RESPONSE_COMMAND_COMPLETED 0x00
-+#define ISCSI_RESPONSE_TARGET_FAILURE 0x01
-+
-+struct iscsi_sense_data {
-+ __be16 length;
-+ u8 data[0];
-+} __packed;
-+
-+struct iscsi_task_mgt_hdr {
-+ u8 opcode;
-+ u8 function;
-+ __be16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ __be64 lun;
-+ __be32 itt;
-+ __be32 rtt;
-+ u32 cmd_sn;
-+ u32 exp_stat_sn;
-+ u32 ref_cmd_sn;
-+ u32 exp_data_sn;
-+ u32 rsvd2[2];
-+} __packed;
-+
-+#define ISCSI_FUNCTION_MASK 0x7f
-+
-+#define ISCSI_FUNCTION_ABORT_TASK 1
-+#define ISCSI_FUNCTION_ABORT_TASK_SET 2
-+#define ISCSI_FUNCTION_CLEAR_ACA 3
-+#define ISCSI_FUNCTION_CLEAR_TASK_SET 4
-+#define ISCSI_FUNCTION_LOGICAL_UNIT_RESET 5
-+#define ISCSI_FUNCTION_TARGET_WARM_RESET 6
-+#define ISCSI_FUNCTION_TARGET_COLD_RESET 7
-+#define ISCSI_FUNCTION_TASK_REASSIGN 8
-+
-+struct iscsi_task_rsp_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u8 response;
-+ u8 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ u32 rsvd2[2];
-+ __be32 itt;
-+ u32 rsvd3;
-+ u32 stat_sn;
-+ u32 exp_cmd_sn;
-+ u32 max_cmd_sn;
-+ u32 rsvd4[3];
-+} __packed;
-+
-+#define ISCSI_RESPONSE_FUNCTION_COMPLETE 0
-+#define ISCSI_RESPONSE_UNKNOWN_TASK 1
-+#define ISCSI_RESPONSE_UNKNOWN_LUN 2
-+#define ISCSI_RESPONSE_TASK_ALLEGIANT 3
-+#define ISCSI_RESPONSE_ALLEGIANCE_REASSIGNMENT_UNSUPPORTED 4
-+#define ISCSI_RESPONSE_FUNCTION_UNSUPPORTED 5
-+#define ISCSI_RESPONSE_NO_AUTHORIZATION 6
-+#define ISCSI_RESPONSE_FUNCTION_REJECTED 255
-+
-+struct iscsi_data_out_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ __be64 lun;
-+ __be32 itt;
-+ __be32 ttt;
-+ u32 rsvd2;
-+ u32 exp_stat_sn;
-+ u32 rsvd3;
-+ __be32 data_sn;
-+ __be32 buffer_offset;
-+ u32 rsvd4;
-+} __packed;
-+
-+struct iscsi_data_in_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u8 rsvd1;
-+ u8 cmd_status;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ u32 rsvd2[2];
-+ __be32 itt;
-+ __be32 ttt;
-+ u32 stat_sn;
-+ u32 exp_cmd_sn;
-+ u32 max_cmd_sn;
-+ __be32 data_sn;
-+ __be32 buffer_offset;
-+ __be32 residual_count;
-+} __packed;
-+
-+#define ISCSI_FLG_STATUS 0x01
-+
-+struct iscsi_r2t_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ __be64 lun;
-+ __be32 itt;
-+ __be32 ttt;
-+ u32 stat_sn;
-+ u32 exp_cmd_sn;
-+ u32 max_cmd_sn;
-+ u32 r2t_sn;
-+ __be32 buffer_offset;
-+ __be32 data_length;
-+} __packed;
-+
-+struct iscsi_async_msg_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ __be64 lun;
-+ __be32 ffffffff;
-+ u32 rsvd2;
-+ u32 stat_sn;
-+ u32 exp_cmd_sn;
-+ u32 max_cmd_sn;
-+ u8 async_event;
-+ u8 async_vcode;
-+ __be16 param1;
-+ __be16 param2;
-+ __be16 param3;
-+ u32 rsvd3;
-+} __packed;
-+
-+#define ISCSI_ASYNC_SCSI 0
-+#define ISCSI_ASYNC_LOGOUT 1
-+#define ISCSI_ASYNC_DROP_CONNECTION 2
-+#define ISCSI_ASYNC_DROP_SESSION 3
-+#define ISCSI_ASYNC_PARAM_REQUEST 4
-+#define ISCSI_ASYNC_VENDOR 255
-+
-+struct iscsi_text_req_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ u32 rsvd2[2];
-+ __be32 itt;
-+ __be32 ttt;
-+ u32 cmd_sn;
-+ u32 exp_stat_sn;
-+ u32 rsvd3[4];
-+} __packed;
-+
-+struct iscsi_text_rsp_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ u32 rsvd2[2];
-+ __be32 itt;
-+ __be32 ttt;
-+ u32 stat_sn;
-+ u32 exp_cmd_sn;
-+ u32 max_cmd_sn;
-+ u32 rsvd3[3];
-+} __packed;
-+
-+struct iscsi_login_req_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u8 max_version; /* Max. version supported */
-+ u8 min_version; /* Min. version supported */
-+ u8 ahslength;
-+ u8 datalength[3];
-+ union iscsi_sid sid;
-+ __be32 itt; /* Initiator Task Tag */
-+ __be16 cid; /* Connection ID */
-+ u16 rsvd1;
-+ u32 cmd_sn;
-+ u32 exp_stat_sn;
-+ u32 rsvd2[4];
-+} __packed;
-+
-+struct iscsi_login_rsp_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u8 max_version; /* Max. version supported */
-+ u8 active_version; /* Active version */
-+ u8 ahslength;
-+ u8 datalength[3];
-+ union iscsi_sid sid;
-+ __be32 itt; /* Initiator Task Tag */
-+ u32 rsvd1;
-+ u32 stat_sn;
-+ u32 exp_cmd_sn;
-+ u32 max_cmd_sn;
-+ u8 status_class; /* see Login RSP Status classes below */
-+ u8 status_detail; /* see Login RSP Status details below */
-+ u8 rsvd2[10];
-+} __packed;
-+
-+#define ISCSI_FLG_FINAL 0x80
-+#define ISCSI_FLG_TRANSIT 0x80
-+#define ISCSI_FLG_CSG_SECURITY 0x00
-+#define ISCSI_FLG_CSG_LOGIN 0x04
-+#define ISCSI_FLG_CSG_FULL_FEATURE 0x0c
-+#define ISCSI_FLG_CSG_MASK 0x0c
-+#define ISCSI_FLG_NSG_SECURITY 0x00
-+#define ISCSI_FLG_NSG_LOGIN 0x01
-+#define ISCSI_FLG_NSG_FULL_FEATURE 0x03
-+#define ISCSI_FLG_NSG_MASK 0x03
-+
-+/* Login Status response classes */
-+#define ISCSI_STATUS_SUCCESS 0x00
-+#define ISCSI_STATUS_REDIRECT 0x01
-+#define ISCSI_STATUS_INITIATOR_ERR 0x02
-+#define ISCSI_STATUS_TARGET_ERR 0x03
-+
-+/* Login Status response detail codes */
-+/* Class-0 (Success) */
-+#define ISCSI_STATUS_ACCEPT 0x00
-+
-+/* Class-1 (Redirection) */
-+#define ISCSI_STATUS_TGT_MOVED_TEMP 0x01
-+#define ISCSI_STATUS_TGT_MOVED_PERM 0x02
-+
-+/* Class-2 (Initiator Error) */
-+#define ISCSI_STATUS_INIT_ERR 0x00
-+#define ISCSI_STATUS_AUTH_FAILED 0x01
-+#define ISCSI_STATUS_TGT_FORBIDDEN 0x02
-+#define ISCSI_STATUS_TGT_NOT_FOUND 0x03
-+#define ISCSI_STATUS_TGT_REMOVED 0x04
-+#define ISCSI_STATUS_NO_VERSION 0x05
-+#define ISCSI_STATUS_TOO_MANY_CONN 0x06
-+#define ISCSI_STATUS_MISSING_FIELDS 0x07
-+#define ISCSI_STATUS_CONN_ADD_FAILED 0x08
-+#define ISCSI_STATUS_INV_SESSION_TYPE 0x09
-+#define ISCSI_STATUS_SESSION_NOT_FOUND 0x0a
-+#define ISCSI_STATUS_INV_REQ_TYPE 0x0b
-+
-+/* Class-3 (Target Error) */
-+#define ISCSI_STATUS_TARGET_ERROR 0x00
-+#define ISCSI_STATUS_SVC_UNAVAILABLE 0x01
-+#define ISCSI_STATUS_NO_RESOURCES 0x02
-+
-+struct iscsi_logout_req_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ u32 rsvd2[2];
-+ __be32 itt;
-+ __be16 cid;
-+ u16 rsvd3;
-+ u32 cmd_sn;
-+ u32 exp_stat_sn;
-+ u32 rsvd4[4];
-+} __packed;
-+
-+struct iscsi_logout_rsp_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u8 response;
-+ u8 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ u32 rsvd2[2];
-+ __be32 itt;
-+ u32 rsvd3;
-+ u32 stat_sn;
-+ u32 exp_cmd_sn;
-+ u32 max_cmd_sn;
-+ u32 rsvd4;
-+ __be16 time2wait;
-+ __be16 time2retain;
-+ u32 rsvd5;
-+} __packed;
-+
-+struct iscsi_snack_req_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ u32 rsvd2[2];
-+ __be32 itt;
-+ __be32 ttt;
-+ u32 rsvd3;
-+ u32 exp_stat_sn;
-+ u32 rsvd4[2];
-+ __be32 beg_run;
-+ __be32 run_length;
-+} __packed;
-+
-+struct iscsi_reject_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u8 reason;
-+ u8 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ u32 rsvd2[2];
-+ __be32 ffffffff;
-+ __be32 rsvd3;
-+ u32 stat_sn;
-+ u32 exp_cmd_sn;
-+ u32 max_cmd_sn;
-+ __be32 data_sn;
-+ u32 rsvd4[2];
-+} __packed;
-+
-+#define ISCSI_REASON_RESERVED 0x01
-+#define ISCSI_REASON_DATA_DIGEST_ERROR 0x02
-+#define ISCSI_REASON_DATA_SNACK_REJECT 0x03
-+#define ISCSI_REASON_PROTOCOL_ERROR 0x04
-+#define ISCSI_REASON_UNSUPPORTED_COMMAND 0x05
-+#define ISCSI_REASON_IMMEDIATE_COMMAND_REJECT 0x06
-+#define ISCSI_REASON_TASK_IN_PROGRESS 0x07
-+#define ISCSI_REASON_INVALID_DATA_ACK 0x08
-+#define ISCSI_REASON_INVALID_PDU_FIELD 0x09
-+#define ISCSI_REASON_OUT_OF_RESOURCES 0x0a
-+#define ISCSI_REASON_NEGOTIATION_RESET 0x0b
-+#define ISCSI_REASON_WAITING_LOGOUT 0x0c
-+
-+struct iscsi_nop_out_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ __be64 lun;
-+ __be32 itt;
-+ __be32 ttt;
-+ u32 cmd_sn;
-+ u32 exp_stat_sn;
-+ u32 rsvd2[4];
-+} __packed;
-+
-+struct iscsi_nop_in_hdr {
-+ u8 opcode;
-+ u8 flags;
-+ u16 rsvd1;
-+ u8 ahslength;
-+ u8 datalength[3];
-+ __be64 lun;
-+ __be32 itt;
-+ __be32 ttt;
-+ u32 stat_sn;
-+ u32 exp_cmd_sn;
-+ u32 max_cmd_sn;
-+ u32 rsvd2[3];
-+} __packed;
-+
-+#define ISCSI_RESERVED_TAG_CPU32 (0xffffffffU)
-+#define ISCSI_RESERVED_TAG (__constant_cpu_to_be32(ISCSI_RESERVED_TAG_CPU32))
-+
-+#define cmnd_hdr(cmnd) ((struct iscsi_scsi_cmd_hdr *) (&((cmnd)->pdu.bhs)))
-+#define cmnd_opcode(cmnd) ((cmnd)->pdu.bhs.opcode & ISCSI_OPCODE_MASK)
-+#define cmnd_scsicode(cmnd) (cmnd_hdr((cmnd))->scb[0])
-+
-+#endif /* __ISCSI_HDR_H__ */
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/nthread.c linux-3.2/drivers/scst/iscsi-scst/nthread.c
---- orig/linux-3.2/drivers/scst/iscsi-scst/nthread.c
-+++ linux-3.2/drivers/scst/iscsi-scst/nthread.c
-@@ -0,0 +1,1891 @@
-+/*
-+ * Network threads.
-+ *
-+ * Copyright (C) 2004 - 2005 FUJITA Tomonori <tomof@acm.org>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/kthread.h>
-+#include <linux/delay.h>
-+#include <net/tcp.h>
-+
-+#include "iscsi.h"
-+#include "digest.h"
-+
-+/* Read data states */
-+enum rx_state {
-+ RX_INIT_BHS, /* Must be zero for better "switch" optimization. */
-+ RX_BHS,
-+ RX_CMD_START,
-+ RX_DATA,
-+ RX_END,
-+
-+ RX_CMD_CONTINUE,
-+ RX_INIT_HDIGEST,
-+ RX_CHECK_HDIGEST,
-+ RX_INIT_DDIGEST,
-+ RX_CHECK_DDIGEST,
-+ RX_AHS,
-+ RX_PADDING,
-+};
-+
-+enum tx_state {
-+ TX_INIT = 0, /* Must be zero for better "switch" optimization. */
-+ TX_BHS_DATA,
-+ TX_INIT_PADDING,
-+ TX_PADDING,
-+ TX_INIT_DDIGEST,
-+ TX_DDIGEST,
-+ TX_END,
-+};
-+
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+static void iscsi_check_closewait(struct iscsi_conn *conn)
-+{
-+ struct iscsi_cmnd *cmnd;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_CONN_CLOSE_DBG("conn %p, sk_state %d", conn,
-+ conn->sock->sk->sk_state);
-+
-+ if (conn->sock->sk->sk_state != TCP_CLOSE) {
-+ TRACE_CONN_CLOSE_DBG("conn %p, skipping", conn);
-+ goto out;
-+ }
-+
-+ /*
-+ * No data are going to be sent, so all queued buffers can be freed
-+ * now. In many cases TCP does that only in close(), but we can't rely
-+ * on user space on calling it.
-+ */
-+
-+again:
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ list_for_each_entry(cmnd, &conn->cmd_list, cmd_list_entry) {
-+ struct iscsi_cmnd *rsp;
-+ int restart = 0;
-+
-+ TRACE_CONN_CLOSE_DBG("cmd %p, scst_state %x, "
-+ "r2t_len_to_receive %d, ref_cnt %d, parent_req %p, "
-+ "net_ref_cnt %d, sg %p", cmnd, cmnd->scst_state,
-+ cmnd->r2t_len_to_receive, atomic_read(&cmnd->ref_cnt),
-+ cmnd->parent_req, atomic_read(&cmnd->net_ref_cnt),
-+ cmnd->sg);
-+
-+ BUG_ON(cmnd->parent_req != NULL);
-+
-+ if (cmnd->sg != NULL) {
-+ int i;
-+
-+ if (cmnd_get_check(cmnd))
-+ continue;
-+
-+ for (i = 0; i < cmnd->sg_cnt; i++) {
-+ struct page *page = sg_page(&cmnd->sg[i]);
-+ TRACE_CONN_CLOSE_DBG("page %p, net_priv %p, "
-+ "_count %d", page, page->net_priv,
-+ atomic_read(&page->_count));
-+
-+ if (page->net_priv != NULL) {
-+ if (restart == 0) {
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+ restart = 1;
-+ }
-+ while (page->net_priv != NULL)
-+ iscsi_put_page_callback(page);
-+ }
-+ }
-+ cmnd_put(cmnd);
-+
-+ if (restart)
-+ goto again;
-+ }
-+
-+ list_for_each_entry(rsp, &cmnd->rsp_cmd_list,
-+ rsp_cmd_list_entry) {
-+ TRACE_CONN_CLOSE_DBG(" rsp %p, ref_cnt %d, "
-+ "net_ref_cnt %d, sg %p",
-+ rsp, atomic_read(&rsp->ref_cnt),
-+ atomic_read(&rsp->net_ref_cnt), rsp->sg);
-+
-+ if ((rsp->sg != cmnd->sg) && (rsp->sg != NULL)) {
-+ int i;
-+
-+ if (cmnd_get_check(rsp))
-+ continue;
-+
-+ for (i = 0; i < rsp->sg_cnt; i++) {
-+ struct page *page =
-+ sg_page(&rsp->sg[i]);
-+ TRACE_CONN_CLOSE_DBG(
-+ " page %p, net_priv %p, "
-+ "_count %d",
-+ page, page->net_priv,
-+ atomic_read(&page->_count));
-+
-+ if (page->net_priv != NULL) {
-+ if (restart == 0) {
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+ restart = 1;
-+ }
-+ while (page->net_priv != NULL)
-+ iscsi_put_page_callback(page);
-+ }
-+ }
-+ cmnd_put(rsp);
-+
-+ if (restart)
-+ goto again;
-+ }
-+ }
-+ }
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+#else
-+static inline void iscsi_check_closewait(struct iscsi_conn *conn) {};
-+#endif
-+
-+static void free_pending_commands(struct iscsi_conn *conn)
-+{
-+ struct iscsi_session *session = conn->session;
-+ struct list_head *pending_list = &session->pending_list;
-+ int req_freed;
-+ struct iscsi_cmnd *cmnd;
-+
-+ spin_lock(&session->sn_lock);
-+ do {
-+ req_freed = 0;
-+ list_for_each_entry(cmnd, pending_list, pending_list_entry) {
-+ TRACE_CONN_CLOSE_DBG("Pending cmd %p"
-+ "(conn %p, cmd_sn %u, exp_cmd_sn %u)",
-+ cmnd, conn, cmnd->pdu.bhs.sn,
-+ session->exp_cmd_sn);
-+ if ((cmnd->conn == conn) &&
-+ (session->exp_cmd_sn == cmnd->pdu.bhs.sn)) {
-+ TRACE_MGMT_DBG("Freeing pending cmd %p "
-+ "(cmd_sn %u, exp_cmd_sn %u)",
-+ cmnd, cmnd->pdu.bhs.sn,
-+ session->exp_cmd_sn);
-+
-+ list_del(&cmnd->pending_list_entry);
-+ cmnd->pending = 0;
-+
-+ session->exp_cmd_sn++;
-+
-+ spin_unlock(&session->sn_lock);
-+
-+ req_cmnd_release_force(cmnd);
-+
-+ req_freed = 1;
-+ spin_lock(&session->sn_lock);
-+ break;
-+ }
-+ }
-+ } while (req_freed);
-+ spin_unlock(&session->sn_lock);
-+
-+ return;
-+}
-+
-+static void free_orphaned_pending_commands(struct iscsi_conn *conn)
-+{
-+ struct iscsi_session *session = conn->session;
-+ struct list_head *pending_list = &session->pending_list;
-+ int req_freed;
-+ struct iscsi_cmnd *cmnd;
-+
-+ spin_lock(&session->sn_lock);
-+ do {
-+ req_freed = 0;
-+ list_for_each_entry(cmnd, pending_list, pending_list_entry) {
-+ TRACE_CONN_CLOSE_DBG("Pending cmd %p"
-+ "(conn %p, cmd_sn %u, exp_cmd_sn %u)",
-+ cmnd, conn, cmnd->pdu.bhs.sn,
-+ session->exp_cmd_sn);
-+ if (cmnd->conn == conn) {
-+ TRACE_MGMT_DBG("Freeing orphaned pending "
-+ "cmnd %p (cmd_sn %u, exp_cmd_sn %u)",
-+ cmnd, cmnd->pdu.bhs.sn,
-+ session->exp_cmd_sn);
-+
-+ list_del(&cmnd->pending_list_entry);
-+ cmnd->pending = 0;
-+
-+ if (session->exp_cmd_sn == cmnd->pdu.bhs.sn)
-+ session->exp_cmd_sn++;
-+
-+ spin_unlock(&session->sn_lock);
-+
-+ req_cmnd_release_force(cmnd);
-+
-+ req_freed = 1;
-+ spin_lock(&session->sn_lock);
-+ break;
-+ }
-+ }
-+ } while (req_freed);
-+ spin_unlock(&session->sn_lock);
-+
-+ return;
-+}
-+
-+#ifdef CONFIG_SCST_DEBUG
-+static void trace_conn_close(struct iscsi_conn *conn)
-+{
-+ struct iscsi_cmnd *cmnd;
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ struct iscsi_cmnd *rsp;
-+#endif
-+
-+#if 0
-+ if (time_after(jiffies, start_waiting + 10*HZ))
-+ trace_flag |= TRACE_CONN_OC_DBG;
-+#endif
-+
-+ spin_lock_bh(&conn->cmd_list_lock);
-+ list_for_each_entry(cmnd, &conn->cmd_list,
-+ cmd_list_entry) {
-+ TRACE_CONN_CLOSE_DBG(
-+ "cmd %p, scst_cmd %p, scst_state %x, scst_cmd state "
-+ "%d, r2t_len_to_receive %d, ref_cnt %d, sn %u, "
-+ "parent_req %p, pending %d",
-+ cmnd, cmnd->scst_cmd, cmnd->scst_state,
-+ ((cmnd->parent_req == NULL) && cmnd->scst_cmd) ?
-+ cmnd->scst_cmd->state : -1,
-+ cmnd->r2t_len_to_receive, atomic_read(&cmnd->ref_cnt),
-+ cmnd->pdu.bhs.sn, cmnd->parent_req, cmnd->pending);
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ TRACE_CONN_CLOSE_DBG("net_ref_cnt %d, sg %p",
-+ atomic_read(&cmnd->net_ref_cnt),
-+ cmnd->sg);
-+ if (cmnd->sg != NULL) {
-+ int i;
-+ for (i = 0; i < cmnd->sg_cnt; i++) {
-+ struct page *page = sg_page(&cmnd->sg[i]);
-+ TRACE_CONN_CLOSE_DBG("page %p, "
-+ "net_priv %p, _count %d",
-+ page, page->net_priv,
-+ atomic_read(&page->_count));
-+ }
-+ }
-+
-+ BUG_ON(cmnd->parent_req != NULL);
-+
-+ list_for_each_entry(rsp, &cmnd->rsp_cmd_list,
-+ rsp_cmd_list_entry) {
-+ TRACE_CONN_CLOSE_DBG(" rsp %p, "
-+ "ref_cnt %d, net_ref_cnt %d, sg %p",
-+ rsp, atomic_read(&rsp->ref_cnt),
-+ atomic_read(&rsp->net_ref_cnt), rsp->sg);
-+ if (rsp->sg != cmnd->sg && rsp->sg) {
-+ int i;
-+ for (i = 0; i < rsp->sg_cnt; i++) {
-+ TRACE_CONN_CLOSE_DBG(" page %p, "
-+ "net_priv %p, _count %d",
-+ sg_page(&rsp->sg[i]),
-+ sg_page(&rsp->sg[i])->net_priv,
-+ atomic_read(&sg_page(&rsp->sg[i])->
-+ _count));
-+ }
-+ }
-+ }
-+#endif /* CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION */
-+ }
-+ spin_unlock_bh(&conn->cmd_list_lock);
-+ return;
-+}
-+#else /* CONFIG_SCST_DEBUG */
-+static void trace_conn_close(struct iscsi_conn *conn) {}
-+#endif /* CONFIG_SCST_DEBUG */
-+
-+void iscsi_task_mgmt_affected_cmds_done(struct scst_mgmt_cmd *scst_mcmd)
-+{
-+ int fn = scst_mgmt_cmd_get_fn(scst_mcmd);
-+ void *priv = scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
-+
-+ TRACE_MGMT_DBG("scst_mcmd %p, fn %d, priv %p", scst_mcmd, fn, priv);
-+
-+ switch (fn) {
-+ case SCST_NEXUS_LOSS_SESS:
-+ case SCST_ABORT_ALL_TASKS_SESS:
-+ {
-+ struct iscsi_conn *conn = (struct iscsi_conn *)priv;
-+ struct iscsi_session *sess = conn->session;
-+ struct iscsi_conn *c;
-+
-+ if (sess->sess_reinst_successor != NULL)
-+ scst_reassign_persistent_sess_states(
-+ sess->sess_reinst_successor->scst_sess,
-+ sess->scst_sess);
-+
-+ mutex_lock(&sess->target->target_mutex);
-+
-+ /*
-+ * We can't mark sess as shutting down earlier, because until
-+ * now it might have pending commands. Otherwise, in case of
-+ * reinstatement, it might lead to data corruption, because
-+ * commands in being reinstated session can be executed
-+ * after commands in the new session.
-+ */
-+ sess->sess_shutting_down = 1;
-+ list_for_each_entry(c, &sess->conn_list, conn_list_entry) {
-+ if (!test_bit(ISCSI_CONN_SHUTTINGDOWN, &c->conn_aflags)) {
-+ sess->sess_shutting_down = 0;
-+ break;
-+ }
-+ }
-+
-+ if (conn->conn_reinst_successor != NULL) {
-+ BUG_ON(!test_bit(ISCSI_CONN_REINSTATING,
-+ &conn->conn_reinst_successor->conn_aflags));
-+ conn_reinst_finished(conn->conn_reinst_successor);
-+ conn->conn_reinst_successor = NULL;
-+ } else if (sess->sess_reinst_successor != NULL) {
-+ sess_reinst_finished(sess->sess_reinst_successor);
-+ sess->sess_reinst_successor = NULL;
-+ }
-+ mutex_unlock(&sess->target->target_mutex);
-+
-+ complete_all(&conn->ready_to_free);
-+ break;
-+ }
-+ default:
-+ /* Nothing to do */
-+ break;
-+ }
-+
-+ return;
-+}
-+
-+/* No locks */
-+static void close_conn(struct iscsi_conn *conn)
-+{
-+ struct iscsi_session *session = conn->session;
-+ struct iscsi_target *target = conn->target;
-+ typeof(jiffies) start_waiting = jiffies;
-+ typeof(jiffies) shut_start_waiting = start_waiting;
-+ bool pending_reported = 0, wait_expired = 0, shut_expired = 0;
-+ bool reinst;
-+ uint32_t tid, cid;
-+ uint64_t sid;
-+
-+#define CONN_PENDING_TIMEOUT ((typeof(jiffies))10*HZ)
-+#define CONN_WAIT_TIMEOUT ((typeof(jiffies))10*HZ)
-+#define CONN_REG_SHUT_TIMEOUT ((typeof(jiffies))125*HZ)
-+#define CONN_DEL_SHUT_TIMEOUT ((typeof(jiffies))10*HZ)
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Closing connection %p (conn_ref_cnt=%d)", conn,
-+ atomic_read(&conn->conn_ref_cnt));
-+
-+ iscsi_extracheck_is_rd_thread(conn);
-+
-+ BUG_ON(!conn->closing);
-+
-+ if (conn->active_close) {
-+ /* We want all our already send operations to complete */
-+ conn->sock->ops->shutdown(conn->sock, RCV_SHUTDOWN);
-+ } else {
-+ conn->sock->ops->shutdown(conn->sock,
-+ RCV_SHUTDOWN|SEND_SHUTDOWN);
-+ }
-+
-+ mutex_lock(&session->target->target_mutex);
-+
-+ set_bit(ISCSI_CONN_SHUTTINGDOWN, &conn->conn_aflags);
-+ reinst = (conn->conn_reinst_successor != NULL);
-+
-+ mutex_unlock(&session->target->target_mutex);
-+
-+ if (reinst) {
-+ int rc;
-+ int lun = 0;
-+
-+ /* Abort all outstanding commands */
-+ rc = scst_rx_mgmt_fn_lun(session->scst_sess,
-+ SCST_ABORT_ALL_TASKS_SESS, (uint8_t *)&lun, sizeof(lun),
-+ SCST_NON_ATOMIC, conn);
-+ if (rc != 0)
-+ PRINT_ERROR("SCST_ABORT_ALL_TASKS_SESS failed %d", rc);
-+ } else {
-+ int rc;
-+ int lun = 0;
-+
-+ rc = scst_rx_mgmt_fn_lun(session->scst_sess,
-+ SCST_NEXUS_LOSS_SESS, (uint8_t *)&lun, sizeof(lun),
-+ SCST_NON_ATOMIC, conn);
-+ if (rc != 0)
-+ PRINT_ERROR("SCST_NEXUS_LOSS_SESS failed %d", rc);
-+ }
-+
-+ if (conn->read_state != RX_INIT_BHS) {
-+ struct iscsi_cmnd *cmnd = conn->read_cmnd;
-+
-+ if (cmnd->scst_state == ISCSI_CMD_STATE_RX_CMD) {
-+ TRACE_CONN_CLOSE_DBG("Going to wait for cmnd %p to "
-+ "change state from RX_CMD", cmnd);
-+ }
-+ wait_event(conn->read_state_waitQ,
-+ cmnd->scst_state != ISCSI_CMD_STATE_RX_CMD);
-+
-+ TRACE_CONN_CLOSE_DBG("Releasing conn->read_cmnd %p (conn %p)",
-+ conn->read_cmnd, conn);
-+
-+ conn->read_cmnd = NULL;
-+ conn->read_state = RX_INIT_BHS;
-+ req_cmnd_release_force(cmnd);
-+ }
-+
-+ conn_abort(conn);
-+
-+ /* ToDo: not the best way to wait */
-+ while (atomic_read(&conn->conn_ref_cnt) != 0) {
-+ if (conn->conn_tm_active)
-+ iscsi_check_tm_data_wait_timeouts(conn, true);
-+
-+ mutex_lock(&target->target_mutex);
-+ spin_lock(&session->sn_lock);
-+ if (session->tm_rsp && session->tm_rsp->conn == conn) {
-+ struct iscsi_cmnd *tm_rsp = session->tm_rsp;
-+ TRACE_MGMT_DBG("Dropping delayed TM rsp %p", tm_rsp);
-+ session->tm_rsp = NULL;
-+ session->tm_active--;
-+ WARN_ON(session->tm_active < 0);
-+ spin_unlock(&session->sn_lock);
-+ mutex_unlock(&target->target_mutex);
-+
-+ rsp_cmnd_release(tm_rsp);
-+ } else {
-+ spin_unlock(&session->sn_lock);
-+ mutex_unlock(&target->target_mutex);
-+ }
-+
-+ /* It's safe to check it without sn_lock */
-+ if (!list_empty(&session->pending_list)) {
-+ TRACE_CONN_CLOSE_DBG("Disposing pending commands on "
-+ "connection %p (conn_ref_cnt=%d)", conn,
-+ atomic_read(&conn->conn_ref_cnt));
-+
-+ free_pending_commands(conn);
-+
-+ if (time_after(jiffies,
-+ start_waiting + CONN_PENDING_TIMEOUT)) {
-+ if (!pending_reported) {
-+ TRACE_CONN_CLOSE("%s",
-+ "Pending wait time expired");
-+ pending_reported = 1;
-+ }
-+ free_orphaned_pending_commands(conn);
-+ }
-+ }
-+
-+ iscsi_make_conn_wr_active(conn);
-+
-+ /* That's for active close only, actually */
-+ if (time_after(jiffies, start_waiting + CONN_WAIT_TIMEOUT) &&
-+ !wait_expired) {
-+ TRACE_CONN_CLOSE("Wait time expired (conn %p, "
-+ "sk_state %d)",
-+ conn, conn->sock->sk->sk_state);
-+ conn->sock->ops->shutdown(conn->sock, SEND_SHUTDOWN);
-+ wait_expired = 1;
-+ shut_start_waiting = jiffies;
-+ }
-+
-+ if (wait_expired && !shut_expired &&
-+ time_after(jiffies, shut_start_waiting +
-+ conn->deleting ? CONN_DEL_SHUT_TIMEOUT :
-+ CONN_REG_SHUT_TIMEOUT)) {
-+ TRACE_CONN_CLOSE("Wait time after shutdown expired "
-+ "(conn %p, sk_state %d)", conn,
-+ conn->sock->sk->sk_state);
-+ conn->sock->sk->sk_prot->disconnect(conn->sock->sk, 0);
-+ shut_expired = 1;
-+ }
-+
-+ if (conn->deleting)
-+ msleep(200);
-+ else
-+ msleep(1000);
-+
-+ TRACE_CONN_CLOSE_DBG("conn %p, conn_ref_cnt %d left, "
-+ "wr_state %d, exp_cmd_sn %u",
-+ conn, atomic_read(&conn->conn_ref_cnt),
-+ conn->wr_state, session->exp_cmd_sn);
-+
-+ trace_conn_close(conn);
-+
-+ /* It might never be called for being closed conn */
-+ __iscsi_write_space_ready(conn);
-+
-+ iscsi_check_closewait(conn);
-+ }
-+
-+ write_lock_bh(&conn->sock->sk->sk_callback_lock);
-+ conn->sock->sk->sk_state_change = conn->old_state_change;
-+ conn->sock->sk->sk_data_ready = conn->old_data_ready;
-+ conn->sock->sk->sk_write_space = conn->old_write_space;
-+ write_unlock_bh(&conn->sock->sk->sk_callback_lock);
-+
-+ while (1) {
-+ bool t;
-+
-+ spin_lock_bh(&conn->conn_thr_pool->wr_lock);
-+ t = (conn->wr_state == ISCSI_CONN_WR_STATE_IDLE);
-+ spin_unlock_bh(&conn->conn_thr_pool->wr_lock);
-+
-+ if (t && (atomic_read(&conn->conn_ref_cnt) == 0))
-+ break;
-+
-+ TRACE_CONN_CLOSE_DBG("Waiting for wr thread (conn %p), "
-+ "wr_state %x", conn, conn->wr_state);
-+ msleep(50);
-+ }
-+
-+ wait_for_completion(&conn->ready_to_free);
-+
-+ tid = target->tid;
-+ sid = session->sid;
-+ cid = conn->cid;
-+
-+ mutex_lock(&target->target_mutex);
-+ conn_free(conn);
-+ mutex_unlock(&target->target_mutex);
-+
-+ /*
-+ * We can't send E_CONN_CLOSE earlier, because otherwise we would have
-+ * a race, when the user space tried to destroy session, which still
-+ * has connections.
-+ *
-+ * !! All target, session and conn can be already dead here !!
-+ */
-+ TRACE_CONN_CLOSE("Notifying user space about closing connection %p",
-+ conn);
-+ event_send(tid, sid, cid, 0, E_CONN_CLOSE, NULL, NULL);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int close_conn_thr(void *arg)
-+{
-+ struct iscsi_conn *conn = (struct iscsi_conn *)arg;
-+
-+ TRACE_ENTRY();
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ /*
-+ * To satisfy iscsi_extracheck_is_rd_thread() in functions called
-+ * on the connection close. It is safe, because at this point conn
-+ * can't be used by any other thread.
-+ */
-+ conn->rd_task = current;
-+#endif
-+ close_conn(conn);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+/* No locks */
-+static void start_close_conn(struct iscsi_conn *conn)
-+{
-+ struct task_struct *t;
-+
-+ TRACE_ENTRY();
-+
-+ t = kthread_run(close_conn_thr, conn, "iscsi_conn_cleanup");
-+ if (IS_ERR(t)) {
-+ PRINT_ERROR("kthread_run() failed (%ld), closing conn %p "
-+ "directly", PTR_ERR(t), conn);
-+ close_conn(conn);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline void iscsi_conn_init_read(struct iscsi_conn *conn,
-+ void __user *data, size_t len)
-+{
-+ conn->read_iov[0].iov_base = data;
-+ conn->read_iov[0].iov_len = len;
-+ conn->read_msg.msg_iov = conn->read_iov;
-+ conn->read_msg.msg_iovlen = 1;
-+ conn->read_size = len;
-+ return;
-+}
-+
-+static void iscsi_conn_prepare_read_ahs(struct iscsi_conn *conn,
-+ struct iscsi_cmnd *cmnd)
-+{
-+ int asize = (cmnd->pdu.ahssize + 3) & -4;
-+
-+ /* ToDo: __GFP_NOFAIL ?? */
-+ cmnd->pdu.ahs = kmalloc(asize, __GFP_NOFAIL|GFP_KERNEL);
-+ BUG_ON(cmnd->pdu.ahs == NULL);
-+ iscsi_conn_init_read(conn, (void __force __user *)cmnd->pdu.ahs, asize);
-+ return;
-+}
-+
-+static struct iscsi_cmnd *iscsi_get_send_cmnd(struct iscsi_conn *conn)
-+{
-+ struct iscsi_cmnd *cmnd = NULL;
-+
-+ spin_lock_bh(&conn->write_list_lock);
-+ if (!list_empty(&conn->write_list)) {
-+ cmnd = list_entry(conn->write_list.next, struct iscsi_cmnd,
-+ write_list_entry);
-+ cmd_del_from_write_list(cmnd);
-+ cmnd->write_processing_started = 1;
-+ } else {
-+ spin_unlock_bh(&conn->write_list_lock);
-+ goto out;
-+ }
-+ spin_unlock_bh(&conn->write_list_lock);
-+
-+ if (unlikely(test_bit(ISCSI_CMD_ABORTED,
-+ &cmnd->parent_req->prelim_compl_flags))) {
-+ TRACE_MGMT_DBG("Going to send acmd %p (scst cmd %p, "
-+ "state %d, parent_req %p)", cmnd, cmnd->scst_cmd,
-+ cmnd->scst_state, cmnd->parent_req);
-+ }
-+
-+ if (unlikely(cmnd_opcode(cmnd) == ISCSI_OP_SCSI_TASK_MGT_RSP)) {
-+#ifdef CONFIG_SCST_DEBUG
-+ struct iscsi_task_mgt_hdr *req_hdr =
-+ (struct iscsi_task_mgt_hdr *)&cmnd->parent_req->pdu.bhs;
-+ struct iscsi_task_rsp_hdr *rsp_hdr =
-+ (struct iscsi_task_rsp_hdr *)&cmnd->pdu.bhs;
-+ TRACE_MGMT_DBG("Going to send TM response %p (status %d, "
-+ "fn %d, parent_req %p)", cmnd, rsp_hdr->response,
-+ req_hdr->function & ISCSI_FUNCTION_MASK,
-+ cmnd->parent_req);
-+#endif
-+ }
-+
-+out:
-+ return cmnd;
-+}
-+
-+/* Returns number of bytes left to receive or <0 for error */
-+static int do_recv(struct iscsi_conn *conn)
-+{
-+ int res;
-+ mm_segment_t oldfs;
-+ struct msghdr msg;
-+ int first_len;
-+
-+ EXTRACHECKS_BUG_ON(conn->read_cmnd == NULL);
-+
-+ if (unlikely(conn->closing)) {
-+ res = -EIO;
-+ goto out;
-+ }
-+
-+ /*
-+ * We suppose that if sock_recvmsg() returned less data than requested,
-+ * then next time it will return -EAGAIN, so there's no point to call
-+ * it again.
-+ */
-+
-+restart:
-+ memset(&msg, 0, sizeof(msg));
-+ msg.msg_iov = conn->read_msg.msg_iov;
-+ msg.msg_iovlen = conn->read_msg.msg_iovlen;
-+ first_len = msg.msg_iov->iov_len;
-+
-+ oldfs = get_fs();
-+ set_fs(get_ds());
-+ res = sock_recvmsg(conn->sock, &msg, conn->read_size,
-+ MSG_DONTWAIT | MSG_NOSIGNAL);
-+ set_fs(oldfs);
-+
-+ TRACE_DBG("msg_iovlen %zd, first_len %d, read_size %d, res %d",
-+ msg.msg_iovlen, first_len, conn->read_size, res);
-+
-+ if (res > 0) {
-+ /*
-+ * To save some considerable effort and CPU power we
-+ * suppose that TCP functions adjust
-+ * conn->read_msg.msg_iov and conn->read_msg.msg_iovlen
-+ * on amount of copied data. This BUG_ON is intended
-+ * to catch if it is changed in the future.
-+ */
-+ BUG_ON((res >= first_len) &&
-+ (conn->read_msg.msg_iov->iov_len != 0));
-+ conn->read_size -= res;
-+ if (conn->read_size != 0) {
-+ if (res >= first_len) {
-+ int done = 1 + ((res - first_len) >> PAGE_SHIFT);
-+ TRACE_DBG("done %d", done);
-+ conn->read_msg.msg_iov += done;
-+ conn->read_msg.msg_iovlen -= done;
-+ }
-+ }
-+ res = conn->read_size;
-+ } else {
-+ switch (res) {
-+ case -EAGAIN:
-+ TRACE_DBG("EAGAIN received for conn %p", conn);
-+ res = conn->read_size;
-+ break;
-+ case -ERESTARTSYS:
-+ TRACE_DBG("ERESTARTSYS received for conn %p", conn);
-+ goto restart;
-+ default:
-+ if (!conn->closing) {
-+ PRINT_ERROR("sock_recvmsg() failed: %d", res);
-+ mark_conn_closed(conn);
-+ }
-+ if (res == 0)
-+ res = -EIO;
-+ break;
-+ }
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int iscsi_rx_check_ddigest(struct iscsi_conn *conn)
-+{
-+ struct iscsi_cmnd *cmnd = conn->read_cmnd;
-+ int res;
-+
-+ res = do_recv(conn);
-+ if (res == 0) {
-+ conn->read_state = RX_END;
-+
-+ if (cmnd->pdu.datasize <= 16*1024) {
-+ /*
-+ * It's cache hot, so let's compute it inline. The
-+ * choice here about what will expose more latency:
-+ * possible cache misses or the digest calculation.
-+ */
-+ TRACE_DBG("cmnd %p, opcode %x: checking RX "
-+ "ddigest inline", cmnd, cmnd_opcode(cmnd));
-+ cmnd->ddigest_checked = 1;
-+ res = digest_rx_data(cmnd);
-+ if (unlikely(res != 0)) {
-+ struct iscsi_cmnd *orig_req;
-+ if (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_DATA_OUT)
-+ orig_req = cmnd->cmd_req;
-+ else
-+ orig_req = cmnd;
-+ if (unlikely(orig_req->scst_cmd == NULL)) {
-+ /* Just drop it */
-+ iscsi_preliminary_complete(cmnd, orig_req, false);
-+ } else {
-+ set_scst_preliminary_status_rsp(orig_req, false,
-+ SCST_LOAD_SENSE(iscsi_sense_crc_error));
-+ /*
-+ * Let's prelim complete cmnd too to
-+ * handle the DATA OUT case
-+ */
-+ iscsi_preliminary_complete(cmnd, orig_req, false);
-+ }
-+ res = 0;
-+ }
-+ } else if (cmnd_opcode(cmnd) == ISCSI_OP_SCSI_CMD) {
-+ cmd_add_on_rx_ddigest_list(cmnd, cmnd);
-+ cmnd_get(cmnd);
-+ } else if (cmnd_opcode(cmnd) != ISCSI_OP_SCSI_DATA_OUT) {
-+ /*
-+ * We could get here only for Nop-Out. ISCSI RFC
-+ * doesn't specify how to deal with digest errors in
-+ * this case. Let's just drop the command.
-+ */
-+ TRACE_DBG("cmnd %p, opcode %x: checking NOP RX "
-+ "ddigest", cmnd, cmnd_opcode(cmnd));
-+ res = digest_rx_data(cmnd);
-+ if (unlikely(res != 0)) {
-+ iscsi_preliminary_complete(cmnd, cmnd, false);
-+ res = 0;
-+ }
-+ }
-+ }
-+
-+ return res;
-+}
-+
-+/* No locks, conn is rd processing */
-+static int process_read_io(struct iscsi_conn *conn, int *closed)
-+{
-+ struct iscsi_cmnd *cmnd = conn->read_cmnd;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ /* In case of error cmnd will be freed in close_conn() */
-+
-+ do {
-+ switch (conn->read_state) {
-+ case RX_INIT_BHS:
-+ EXTRACHECKS_BUG_ON(conn->read_cmnd != NULL);
-+ cmnd = cmnd_alloc(conn, NULL);
-+ conn->read_cmnd = cmnd;
-+ iscsi_conn_init_read(cmnd->conn,
-+ (void __force __user *)&cmnd->pdu.bhs,
-+ sizeof(cmnd->pdu.bhs));
-+ conn->read_state = RX_BHS;
-+ /* go through */
-+
-+ case RX_BHS:
-+ res = do_recv(conn);
-+ if (res == 0) {
-+ /*
-+ * This command not yet received on the aborted
-+ * time, so shouldn't be affected by any abort.
-+ */
-+ EXTRACHECKS_BUG_ON(cmnd->prelim_compl_flags != 0);
-+
-+ iscsi_cmnd_get_length(&cmnd->pdu);
-+
-+ if (cmnd->pdu.ahssize == 0) {
-+ if ((conn->hdigest_type & DIGEST_NONE) == 0)
-+ conn->read_state = RX_INIT_HDIGEST;
-+ else
-+ conn->read_state = RX_CMD_START;
-+ } else {
-+ iscsi_conn_prepare_read_ahs(conn, cmnd);
-+ conn->read_state = RX_AHS;
-+ }
-+ }
-+ break;
-+
-+ case RX_CMD_START:
-+ res = cmnd_rx_start(cmnd);
-+ if (res == 0) {
-+ if (cmnd->pdu.datasize == 0)
-+ conn->read_state = RX_END;
-+ else
-+ conn->read_state = RX_DATA;
-+ } else if (res > 0)
-+ conn->read_state = RX_CMD_CONTINUE;
-+ else
-+ BUG_ON(!conn->closing);
-+ break;
-+
-+ case RX_CMD_CONTINUE:
-+ if (cmnd->scst_state == ISCSI_CMD_STATE_RX_CMD) {
-+ TRACE_DBG("cmnd %p is still in RX_CMD state",
-+ cmnd);
-+ res = 1;
-+ break;
-+ }
-+ res = cmnd_rx_continue(cmnd);
-+ if (unlikely(res != 0))
-+ BUG_ON(!conn->closing);
-+ else {
-+ if (cmnd->pdu.datasize == 0)
-+ conn->read_state = RX_END;
-+ else
-+ conn->read_state = RX_DATA;
-+ }
-+ break;
-+
-+ case RX_DATA:
-+ res = do_recv(conn);
-+ if (res == 0) {
-+ int psz = ((cmnd->pdu.datasize + 3) & -4) - cmnd->pdu.datasize;
-+ if (psz != 0) {
-+ TRACE_DBG("padding %d bytes", psz);
-+ iscsi_conn_init_read(conn,
-+ (void __force __user *)&conn->rpadding, psz);
-+ conn->read_state = RX_PADDING;
-+ } else if ((conn->ddigest_type & DIGEST_NONE) != 0)
-+ conn->read_state = RX_END;
-+ else
-+ conn->read_state = RX_INIT_DDIGEST;
-+ }
-+ break;
-+
-+ case RX_END:
-+ if (unlikely(conn->read_size != 0)) {
-+ PRINT_CRIT_ERROR("conn read_size !=0 on RX_END "
-+ "(conn %p, op %x, read_size %d)", conn,
-+ cmnd_opcode(cmnd), conn->read_size);
-+ BUG();
-+ }
-+ conn->read_cmnd = NULL;
-+ conn->read_state = RX_INIT_BHS;
-+
-+ cmnd_rx_end(cmnd);
-+
-+ EXTRACHECKS_BUG_ON(conn->read_size != 0);
-+
-+ /*
-+ * To maintain fairness. Res must be 0 here anyway, the
-+ * assignment is only to remove compiler warning about
-+ * uninitialized variable.
-+ */
-+ res = 0;
-+ goto out;
-+
-+ case RX_INIT_HDIGEST:
-+ iscsi_conn_init_read(conn,
-+ (void __force __user *)&cmnd->hdigest, sizeof(u32));
-+ conn->read_state = RX_CHECK_HDIGEST;
-+ /* go through */
-+
-+ case RX_CHECK_HDIGEST:
-+ res = do_recv(conn);
-+ if (res == 0) {
-+ res = digest_rx_header(cmnd);
-+ if (unlikely(res != 0)) {
-+ PRINT_ERROR("rx header digest for "
-+ "initiator %s failed (%d)",
-+ conn->session->initiator_name,
-+ res);
-+ mark_conn_closed(conn);
-+ } else
-+ conn->read_state = RX_CMD_START;
-+ }
-+ break;
-+
-+ case RX_INIT_DDIGEST:
-+ iscsi_conn_init_read(conn,
-+ (void __force __user *)&cmnd->ddigest,
-+ sizeof(u32));
-+ conn->read_state = RX_CHECK_DDIGEST;
-+ /* go through */
-+
-+ case RX_CHECK_DDIGEST:
-+ res = iscsi_rx_check_ddigest(conn);
-+ break;
-+
-+ case RX_AHS:
-+ res = do_recv(conn);
-+ if (res == 0) {
-+ if ((conn->hdigest_type & DIGEST_NONE) == 0)
-+ conn->read_state = RX_INIT_HDIGEST;
-+ else
-+ conn->read_state = RX_CMD_START;
-+ }
-+ break;
-+
-+ case RX_PADDING:
-+ res = do_recv(conn);
-+ if (res == 0) {
-+ if ((conn->ddigest_type & DIGEST_NONE) == 0)
-+ conn->read_state = RX_INIT_DDIGEST;
-+ else
-+ conn->read_state = RX_END;
-+ }
-+ break;
-+
-+ default:
-+ PRINT_CRIT_ERROR("%d %x", conn->read_state, cmnd_opcode(cmnd));
-+ res = -1; /* to keep compiler happy */
-+ BUG();
-+ }
-+ } while (res == 0);
-+
-+ if (unlikely(conn->closing)) {
-+ start_close_conn(conn);
-+ *closed = 1;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * Called under rd_lock and BHs disabled, but will drop it inside,
-+ * then reacquire.
-+ */
-+static void scst_do_job_rd(struct iscsi_thread_pool *p)
-+ __acquires(&rd_lock)
-+ __releases(&rd_lock)
-+{
-+ TRACE_ENTRY();
-+
-+ /*
-+ * We delete/add to tail connections to maintain fairness between them.
-+ */
-+
-+ while (!list_empty(&p->rd_list)) {
-+ int closed = 0, rc;
-+ struct iscsi_conn *conn = list_entry(p->rd_list.next,
-+ typeof(*conn), rd_list_entry);
-+
-+ list_del(&conn->rd_list_entry);
-+
-+ BUG_ON(conn->rd_state == ISCSI_CONN_RD_STATE_PROCESSING);
-+ conn->rd_data_ready = 0;
-+ conn->rd_state = ISCSI_CONN_RD_STATE_PROCESSING;
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ conn->rd_task = current;
-+#endif
-+ spin_unlock_bh(&p->rd_lock);
-+
-+ rc = process_read_io(conn, &closed);
-+
-+ spin_lock_bh(&p->rd_lock);
-+
-+ if (unlikely(closed))
-+ continue;
-+
-+ if (unlikely(conn->conn_tm_active)) {
-+ spin_unlock_bh(&p->rd_lock);
-+ iscsi_check_tm_data_wait_timeouts(conn, false);
-+ spin_lock_bh(&p->rd_lock);
-+ }
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ conn->rd_task = NULL;
-+#endif
-+ if ((rc == 0) || conn->rd_data_ready) {
-+ list_add_tail(&conn->rd_list_entry, &p->rd_list);
-+ conn->rd_state = ISCSI_CONN_RD_STATE_IN_LIST;
-+ } else
-+ conn->rd_state = ISCSI_CONN_RD_STATE_IDLE;
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline int test_rd_list(struct iscsi_thread_pool *p)
-+{
-+ int res = !list_empty(&p->rd_list) ||
-+ unlikely(kthread_should_stop());
-+ return res;
-+}
-+
-+int istrd(void *arg)
-+{
-+ struct iscsi_thread_pool *p = arg;
-+ int rc;
-+
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("Read thread for pool %p started, PID %d", p, current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+ rc = set_cpus_allowed_ptr(current, &p->cpu_mask);
-+ if (rc != 0)
-+ PRINT_ERROR("Setting CPU affinity failed: %d", rc);
-+
-+ spin_lock_bh(&p->rd_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (!test_rd_list(p)) {
-+ add_wait_queue_exclusive_head(&p->rd_waitQ, &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_rd_list(p))
-+ break;
-+ spin_unlock_bh(&p->rd_lock);
-+ schedule();
-+ spin_lock_bh(&p->rd_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&p->rd_waitQ, &wait);
-+ }
-+ scst_do_job_rd(p);
-+ }
-+ spin_unlock_bh(&p->rd_lock);
-+
-+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so rd_list must be empty.
-+ */
-+ BUG_ON(!list_empty(&p->rd_list));
-+
-+ PRINT_INFO("Read thread for PID %d for pool %p finished", current->pid, p);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+static inline void __iscsi_get_page_callback(struct iscsi_cmnd *cmd)
-+{
-+ int v;
-+
-+ TRACE_NET_PAGE("cmd %p, new net_ref_cnt %d",
-+ cmd, atomic_read(&cmd->net_ref_cnt)+1);
-+
-+ v = atomic_inc_return(&cmd->net_ref_cnt);
-+ if (v == 1) {
-+ TRACE_NET_PAGE("getting cmd %p", cmd);
-+ cmnd_get(cmd);
-+ }
-+ return;
-+}
-+
-+void iscsi_get_page_callback(struct page *page)
-+{
-+ struct iscsi_cmnd *cmd = (struct iscsi_cmnd *)page->net_priv;
-+
-+ TRACE_NET_PAGE("page %p, _count %d", page,
-+ atomic_read(&page->_count));
-+
-+ __iscsi_get_page_callback(cmd);
-+ return;
-+}
-+
-+static inline void __iscsi_put_page_callback(struct iscsi_cmnd *cmd)
-+{
-+ TRACE_NET_PAGE("cmd %p, new net_ref_cnt %d", cmd,
-+ atomic_read(&cmd->net_ref_cnt)-1);
-+
-+ if (atomic_dec_and_test(&cmd->net_ref_cnt)) {
-+ int i, sg_cnt = cmd->sg_cnt;
-+ for (i = 0; i < sg_cnt; i++) {
-+ struct page *page = sg_page(&cmd->sg[i]);
-+ TRACE_NET_PAGE("Clearing page %p", page);
-+ if (page->net_priv == cmd)
-+ page->net_priv = NULL;
-+ }
-+ cmnd_put(cmd);
-+ }
-+ return;
-+}
-+
-+void iscsi_put_page_callback(struct page *page)
-+{
-+ struct iscsi_cmnd *cmd = (struct iscsi_cmnd *)page->net_priv;
-+
-+ TRACE_NET_PAGE("page %p, _count %d", page,
-+ atomic_read(&page->_count));
-+
-+ __iscsi_put_page_callback(cmd);
-+ return;
-+}
-+
-+static void check_net_priv(struct iscsi_cmnd *cmd, struct page *page)
-+{
-+ if ((atomic_read(&cmd->net_ref_cnt) == 1) && (page->net_priv == cmd)) {
-+ TRACE_DBG("sendpage() not called get_page(), zeroing net_priv "
-+ "%p (page %p)", page->net_priv, page);
-+ page->net_priv = NULL;
-+ }
-+ return;
-+}
-+#else
-+static inline void check_net_priv(struct iscsi_cmnd *cmd, struct page *page) {}
-+static inline void __iscsi_get_page_callback(struct iscsi_cmnd *cmd) {}
-+static inline void __iscsi_put_page_callback(struct iscsi_cmnd *cmd) {}
-+#endif
-+
-+void req_add_to_write_timeout_list(struct iscsi_cmnd *req)
-+{
-+ struct iscsi_conn *conn;
-+ bool set_conn_tm_active = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (req->on_write_timeout_list)
-+ goto out;
-+
-+ conn = req->conn;
-+
-+ TRACE_DBG("Adding req %p to conn %p write_timeout_list",
-+ req, conn);
-+
-+ spin_lock_bh(&conn->write_list_lock);
-+
-+ /* Recheck, since it can be changed behind us */
-+ if (unlikely(req->on_write_timeout_list)) {
-+ spin_unlock_bh(&conn->write_list_lock);
-+ goto out;
-+ }
-+
-+ req->on_write_timeout_list = 1;
-+ req->write_start = jiffies;
-+
-+ if (unlikely(cmnd_opcode(req) == ISCSI_OP_NOP_OUT)) {
-+ unsigned long req_tt = iscsi_get_timeout_time(req);
-+ struct iscsi_cmnd *r;
-+ bool inserted = false;
-+ list_for_each_entry(r, &conn->write_timeout_list,
-+ write_timeout_list_entry) {
-+ unsigned long tt = iscsi_get_timeout_time(r);
-+ if (time_after(tt, req_tt)) {
-+ TRACE_DBG("Add NOP IN req %p (tt %ld) before "
-+ "req %p (tt %ld)", req, req_tt, r, tt);
-+ list_add_tail(&req->write_timeout_list_entry,
-+ &r->write_timeout_list_entry);
-+ inserted = true;
-+ break;
-+ } else
-+ TRACE_DBG("Skipping op %x req %p (tt %ld)",
-+ cmnd_opcode(r), r, tt);
-+ }
-+ if (!inserted) {
-+ TRACE_DBG("Add NOP IN req %p in the tail", req);
-+ list_add_tail(&req->write_timeout_list_entry,
-+ &conn->write_timeout_list);
-+ }
-+
-+ /* We suppose that nop_in_timeout must be <= data_rsp_timeout */
-+ req_tt += ISCSI_ADD_SCHED_TIME;
-+ if (timer_pending(&conn->rsp_timer) &&
-+ time_after(conn->rsp_timer.expires, req_tt)) {
-+ TRACE_DBG("Timer adjusted for sooner expired NOP IN "
-+ "req %p", req);
-+ mod_timer(&conn->rsp_timer, req_tt);
-+ }
-+ } else
-+ list_add_tail(&req->write_timeout_list_entry,
-+ &conn->write_timeout_list);
-+
-+ if (!timer_pending(&conn->rsp_timer)) {
-+ unsigned long timeout_time;
-+ if (unlikely(conn->conn_tm_active ||
-+ test_bit(ISCSI_CMD_ABORTED,
-+ &req->prelim_compl_flags))) {
-+ set_conn_tm_active = true;
-+ timeout_time = req->write_start +
-+ ISCSI_TM_DATA_WAIT_TIMEOUT;
-+ } else
-+ timeout_time = iscsi_get_timeout_time(req);
-+
-+ timeout_time += ISCSI_ADD_SCHED_TIME;
-+
-+ TRACE_DBG("Starting timer on %ld (con %p, write_start %ld)",
-+ timeout_time, conn, req->write_start);
-+
-+ conn->rsp_timer.expires = timeout_time;
-+ add_timer(&conn->rsp_timer);
-+ } else if (unlikely(test_bit(ISCSI_CMD_ABORTED,
-+ &req->prelim_compl_flags))) {
-+ unsigned long timeout_time = jiffies +
-+ ISCSI_TM_DATA_WAIT_TIMEOUT + ISCSI_ADD_SCHED_TIME;
-+ set_conn_tm_active = true;
-+ if (time_after(conn->rsp_timer.expires, timeout_time)) {
-+ TRACE_MGMT_DBG("Mod timer on %ld (conn %p)",
-+ timeout_time, conn);
-+ mod_timer(&conn->rsp_timer, timeout_time);
-+ }
-+ }
-+
-+ spin_unlock_bh(&conn->write_list_lock);
-+
-+ /*
-+ * conn_tm_active can be already cleared by
-+ * iscsi_check_tm_data_wait_timeouts(). write_list_lock is an inner
-+ * lock for rd_lock.
-+ */
-+ if (unlikely(set_conn_tm_active)) {
-+ spin_lock_bh(&conn->conn_thr_pool->rd_lock);
-+ TRACE_MGMT_DBG("Setting conn_tm_active for conn %p", conn);
-+ conn->conn_tm_active = 1;
-+ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int write_data(struct iscsi_conn *conn)
-+{
-+ mm_segment_t oldfs;
-+ struct file *file;
-+ struct iovec *iop;
-+ struct socket *sock;
-+ ssize_t (*sock_sendpage)(struct socket *, struct page *, int, size_t,
-+ int);
-+ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
-+ struct iscsi_cmnd *write_cmnd = conn->write_cmnd;
-+ struct iscsi_cmnd *ref_cmd;
-+ struct page *page;
-+ struct scatterlist *sg;
-+ int saved_size, size, sendsize;
-+ int length, offset, idx;
-+ int flags, res, count, sg_size;
-+ bool do_put = false, ref_cmd_to_parent;
-+
-+ TRACE_ENTRY();
-+
-+ iscsi_extracheck_is_wr_thread(conn);
-+
-+ if (!write_cmnd->own_sg) {
-+ ref_cmd = write_cmnd->parent_req;
-+ ref_cmd_to_parent = true;
-+ } else {
-+ ref_cmd = write_cmnd;
-+ ref_cmd_to_parent = false;
-+ }
-+
-+ req_add_to_write_timeout_list(write_cmnd->parent_req);
-+
-+ file = conn->file;
-+ size = conn->write_size;
-+ saved_size = size;
-+ iop = conn->write_iop;
-+ count = conn->write_iop_used;
-+
-+ if (iop) {
-+ while (1) {
-+ loff_t off = 0;
-+ int rest;
-+
-+ BUG_ON(count > (signed)(sizeof(conn->write_iov) /
-+ sizeof(conn->write_iov[0])));
-+retry:
-+ oldfs = get_fs();
-+ set_fs(KERNEL_DS);
-+ res = vfs_writev(file,
-+ (struct iovec __force __user *)iop,
-+ count, &off);
-+ set_fs(oldfs);
-+ TRACE_WRITE("sid %#Lx, cid %u, res %d, iov_len %ld",
-+ (long long unsigned int)conn->session->sid,
-+ conn->cid, res, (long)iop->iov_len);
-+ if (unlikely(res <= 0)) {
-+ if (res == -EAGAIN) {
-+ conn->write_iop = iop;
-+ conn->write_iop_used = count;
-+ goto out_iov;
-+ } else if (res == -EINTR)
-+ goto retry;
-+ goto out_err;
-+ }
-+
-+ rest = res;
-+ size -= res;
-+ while ((typeof(rest))iop->iov_len <= rest && rest) {
-+ rest -= iop->iov_len;
-+ iop++;
-+ count--;
-+ }
-+ if (count == 0) {
-+ conn->write_iop = NULL;
-+ conn->write_iop_used = 0;
-+ if (size)
-+ break;
-+ goto out_iov;
-+ }
-+ BUG_ON(iop > conn->write_iov + sizeof(conn->write_iov)
-+ /sizeof(conn->write_iov[0]));
-+ iop->iov_base += rest;
-+ iop->iov_len -= rest;
-+ }
-+ }
-+
-+ sg = write_cmnd->sg;
-+ if (unlikely(sg == NULL)) {
-+ PRINT_INFO("WARNING: Data missed (cmd %p)!", write_cmnd);
-+ res = 0;
-+ goto out;
-+ }
-+
-+ /* To protect from too early transfer completion race */
-+ __iscsi_get_page_callback(ref_cmd);
-+ do_put = true;
-+
-+ sock = conn->sock;
-+
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ sock_sendpage = sock->ops->sendpage;
-+#else
-+ if ((write_cmnd->parent_req->scst_cmd != NULL) &&
-+ scst_cmd_get_dh_data_buff_alloced(write_cmnd->parent_req->scst_cmd))
-+ sock_sendpage = sock_no_sendpage;
-+ else
-+ sock_sendpage = sock->ops->sendpage;
-+#endif
-+
-+ flags = MSG_DONTWAIT;
-+ sg_size = size;
-+
-+ if (sg != write_cmnd->rsp_sg) {
-+ offset = conn->write_offset + sg[0].offset;
-+ idx = offset >> PAGE_SHIFT;
-+ offset &= ~PAGE_MASK;
-+ length = min(size, (int)PAGE_SIZE - offset);
-+ TRACE_WRITE("write_offset %d, sg_size %d, idx %d, offset %d, "
-+ "length %d", conn->write_offset, sg_size, idx, offset,
-+ length);
-+ } else {
-+ idx = 0;
-+ offset = conn->write_offset;
-+ while (offset >= sg[idx].length) {
-+ offset -= sg[idx].length;
-+ idx++;
-+ }
-+ length = sg[idx].length - offset;
-+ offset += sg[idx].offset;
-+ sock_sendpage = sock_no_sendpage;
-+ TRACE_WRITE("rsp_sg: write_offset %d, sg_size %d, idx %d, "
-+ "offset %d, length %d", conn->write_offset, sg_size,
-+ idx, offset, length);
-+ }
-+ page = sg_page(&sg[idx]);
-+
-+ while (1) {
-+ sendpage = sock_sendpage;
-+
-+#if defined(CONFIG_TCP_ZERO_COPY_TRANSFER_COMPLETION_NOTIFICATION)
-+ {
-+ static DEFINE_SPINLOCK(net_priv_lock);
-+ spin_lock(&net_priv_lock);
-+ if (unlikely(page->net_priv != NULL)) {
-+ if (page->net_priv != ref_cmd) {
-+ /*
-+ * This might happen if user space
-+ * supplies to scst_user the same
-+ * pages in different commands or in
-+ * case of zero-copy FILEIO, when
-+ * several initiators request the same
-+ * data simultaneously.
-+ */
-+ TRACE_DBG("net_priv isn't NULL and != "
-+ "ref_cmd (write_cmnd %p, ref_cmd "
-+ "%p, sg %p, idx %d, page %p, "
-+ "net_priv %p)",
-+ write_cmnd, ref_cmd, sg, idx,
-+ page, page->net_priv);
-+ sendpage = sock_no_sendpage;
-+ }
-+ } else
-+ page->net_priv = ref_cmd;
-+ spin_unlock(&net_priv_lock);
-+ }
-+#endif
-+ sendsize = min(size, length);
-+ if (size <= sendsize) {
-+retry2:
-+ res = sendpage(sock, page, offset, size, flags);
-+ TRACE_WRITE("Final %s sid %#Lx, cid %u, res %d (page "
-+ "index %lu, offset %u, size %u, cmd %p, "
-+ "page %p)", (sendpage != sock_no_sendpage) ?
-+ "sendpage" : "sock_no_sendpage",
-+ (long long unsigned int)conn->session->sid,
-+ conn->cid, res, page->index,
-+ offset, size, write_cmnd, page);
-+ if (unlikely(res <= 0)) {
-+ if (res == -EINTR)
-+ goto retry2;
-+ else
-+ goto out_res;
-+ }
-+
-+ check_net_priv(ref_cmd, page);
-+ if (res == size) {
-+ conn->write_size = 0;
-+ res = saved_size;
-+ goto out_put;
-+ }
-+
-+ offset += res;
-+ size -= res;
-+ goto retry2;
-+ }
-+
-+retry1:
-+ res = sendpage(sock, page, offset, sendsize, flags | MSG_MORE);
-+ TRACE_WRITE("%s sid %#Lx, cid %u, res %d (page index %lu, "
-+ "offset %u, sendsize %u, size %u, cmd %p, page %p)",
-+ (sendpage != sock_no_sendpage) ? "sendpage" :
-+ "sock_no_sendpage",
-+ (unsigned long long)conn->session->sid, conn->cid,
-+ res, page->index, offset, sendsize, size,
-+ write_cmnd, page);
-+ if (unlikely(res <= 0)) {
-+ if (res == -EINTR)
-+ goto retry1;
-+ else
-+ goto out_res;
-+ }
-+
-+ check_net_priv(ref_cmd, page);
-+
-+ size -= res;
-+
-+ if (res == sendsize) {
-+ idx++;
-+ EXTRACHECKS_BUG_ON(idx >= ref_cmd->sg_cnt);
-+ page = sg_page(&sg[idx]);
-+ length = sg[idx].length;
-+ offset = sg[idx].offset;
-+ } else {
-+ offset += res;
-+ sendsize -= res;
-+ goto retry1;
-+ }
-+ }
-+
-+out_off:
-+ conn->write_offset += sg_size - size;
-+
-+out_iov:
-+ conn->write_size = size;
-+ if ((saved_size == size) && res == -EAGAIN)
-+ goto out_put;
-+
-+ res = saved_size - size;
-+
-+out_put:
-+ if (do_put)
-+ __iscsi_put_page_callback(ref_cmd);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_res:
-+ check_net_priv(ref_cmd, page);
-+ if (res == -EAGAIN)
-+ goto out_off;
-+ /* else go through */
-+
-+out_err:
-+#ifndef CONFIG_SCST_DEBUG
-+ if (!conn->closing)
-+#endif
-+ {
-+ PRINT_ERROR("error %d at sid:cid %#Lx:%u, cmnd %p", res,
-+ (long long unsigned int)conn->session->sid,
-+ conn->cid, conn->write_cmnd);
-+ }
-+ if (ref_cmd_to_parent &&
-+ ((ref_cmd->scst_cmd != NULL) || (ref_cmd->scst_aen != NULL))) {
-+ if (ref_cmd->scst_state == ISCSI_CMD_STATE_AEN)
-+ scst_set_aen_delivery_status(ref_cmd->scst_aen,
-+ SCST_AEN_RES_FAILED);
-+ else
-+ scst_set_delivery_status(ref_cmd->scst_cmd,
-+ SCST_CMD_DELIVERY_FAILED);
-+ }
-+ goto out_put;
-+}
-+
-+static int exit_tx(struct iscsi_conn *conn, int res)
-+{
-+ iscsi_extracheck_is_wr_thread(conn);
-+
-+ switch (res) {
-+ case -EAGAIN:
-+ case -ERESTARTSYS:
-+ break;
-+ default:
-+#ifndef CONFIG_SCST_DEBUG
-+ if (!conn->closing)
-+#endif
-+ {
-+ PRINT_ERROR("Sending data failed: initiator %s, "
-+ "write_size %d, write_state %d, res %d",
-+ conn->session->initiator_name,
-+ conn->write_size,
-+ conn->write_state, res);
-+ }
-+ conn->write_state = TX_END;
-+ conn->write_size = 0;
-+ mark_conn_closed(conn);
-+ break;
-+ }
-+ return res;
-+}
-+
-+static int tx_ddigest(struct iscsi_cmnd *cmnd, int state)
-+{
-+ int res, rest = cmnd->conn->write_size;
-+ struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
-+ struct kvec iov;
-+
-+ iscsi_extracheck_is_wr_thread(cmnd->conn);
-+
-+ TRACE_DBG("Sending data digest %x (cmd %p)", cmnd->ddigest, cmnd);
-+
-+ iov.iov_base = (char *)(&cmnd->ddigest) + (sizeof(u32) - rest);
-+ iov.iov_len = rest;
-+
-+ res = kernel_sendmsg(cmnd->conn->sock, &msg, &iov, 1, rest);
-+ if (res > 0) {
-+ cmnd->conn->write_size -= res;
-+ if (!cmnd->conn->write_size)
-+ cmnd->conn->write_state = state;
-+ } else
-+ res = exit_tx(cmnd->conn, res);
-+
-+ return res;
-+}
-+
-+static void init_tx_hdigest(struct iscsi_cmnd *cmnd)
-+{
-+ struct iscsi_conn *conn = cmnd->conn;
-+ struct iovec *iop;
-+
-+ iscsi_extracheck_is_wr_thread(conn);
-+
-+ digest_tx_header(cmnd);
-+
-+ BUG_ON(conn->write_iop_used >=
-+ (signed)(sizeof(conn->write_iov)/sizeof(conn->write_iov[0])));
-+
-+ iop = &conn->write_iop[conn->write_iop_used];
-+ conn->write_iop_used++;
-+ iop->iov_base = (void __force __user *)&(cmnd->hdigest);
-+ iop->iov_len = sizeof(u32);
-+ conn->write_size += sizeof(u32);
-+
-+ return;
-+}
-+
-+static int tx_padding(struct iscsi_cmnd *cmnd, int state)
-+{
-+ int res, rest = cmnd->conn->write_size;
-+ struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
-+ struct kvec iov;
-+ static const uint32_t padding;
-+
-+ iscsi_extracheck_is_wr_thread(cmnd->conn);
-+
-+ TRACE_DBG("Sending %d padding bytes (cmd %p)", rest, cmnd);
-+
-+ iov.iov_base = (char *)(&padding) + (sizeof(uint32_t) - rest);
-+ iov.iov_len = rest;
-+
-+ res = kernel_sendmsg(cmnd->conn->sock, &msg, &iov, 1, rest);
-+ if (res > 0) {
-+ cmnd->conn->write_size -= res;
-+ if (!cmnd->conn->write_size)
-+ cmnd->conn->write_state = state;
-+ } else
-+ res = exit_tx(cmnd->conn, res);
-+
-+ return res;
-+}
-+
-+static int iscsi_do_send(struct iscsi_conn *conn, int state)
-+{
-+ int res;
-+
-+ iscsi_extracheck_is_wr_thread(conn);
-+
-+ res = write_data(conn);
-+ if (res > 0) {
-+ if (!conn->write_size)
-+ conn->write_state = state;
-+ } else
-+ res = exit_tx(conn, res);
-+
-+ return res;
-+}
-+
-+/*
-+ * No locks, conn is wr processing.
-+ *
-+ * IMPORTANT! Connection conn must be protected by additional conn_get()
-+ * upon entrance in this function, because otherwise it could be destroyed
-+ * inside as a result of cmnd release.
-+ */
-+int iscsi_send(struct iscsi_conn *conn)
-+{
-+ struct iscsi_cmnd *cmnd = conn->write_cmnd;
-+ int ddigest, res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("conn %p, write_cmnd %p", conn, cmnd);
-+
-+ iscsi_extracheck_is_wr_thread(conn);
-+
-+ ddigest = conn->ddigest_type != DIGEST_NONE ? 1 : 0;
-+
-+ switch (conn->write_state) {
-+ case TX_INIT:
-+ BUG_ON(cmnd != NULL);
-+ cmnd = conn->write_cmnd = iscsi_get_send_cmnd(conn);
-+ if (!cmnd)
-+ goto out;
-+ cmnd_tx_start(cmnd);
-+ if (!(conn->hdigest_type & DIGEST_NONE))
-+ init_tx_hdigest(cmnd);
-+ conn->write_state = TX_BHS_DATA;
-+ case TX_BHS_DATA:
-+ res = iscsi_do_send(conn, cmnd->pdu.datasize ?
-+ TX_INIT_PADDING : TX_END);
-+ if (res <= 0 || conn->write_state != TX_INIT_PADDING)
-+ break;
-+ case TX_INIT_PADDING:
-+ cmnd->conn->write_size = ((cmnd->pdu.datasize + 3) & -4) -
-+ cmnd->pdu.datasize;
-+ if (cmnd->conn->write_size != 0)
-+ conn->write_state = TX_PADDING;
-+ else if (ddigest)
-+ conn->write_state = TX_INIT_DDIGEST;
-+ else
-+ conn->write_state = TX_END;
-+ break;
-+ case TX_PADDING:
-+ res = tx_padding(cmnd, ddigest ? TX_INIT_DDIGEST : TX_END);
-+ if (res <= 0 || conn->write_state != TX_INIT_DDIGEST)
-+ break;
-+ case TX_INIT_DDIGEST:
-+ cmnd->conn->write_size = sizeof(u32);
-+ conn->write_state = TX_DDIGEST;
-+ case TX_DDIGEST:
-+ res = tx_ddigest(cmnd, TX_END);
-+ break;
-+ default:
-+ PRINT_CRIT_ERROR("%d %d %x", res, conn->write_state,
-+ cmnd_opcode(cmnd));
-+ BUG();
-+ }
-+
-+ if (res == 0)
-+ goto out;
-+
-+ if (conn->write_state != TX_END)
-+ goto out;
-+
-+ if (unlikely(conn->write_size)) {
-+ PRINT_CRIT_ERROR("%d %x %u", res, cmnd_opcode(cmnd),
-+ conn->write_size);
-+ BUG();
-+ }
-+ cmnd_tx_end(cmnd);
-+
-+ rsp_cmnd_release(cmnd);
-+
-+ conn->write_cmnd = NULL;
-+ conn->write_state = TX_INIT;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * Called under wr_lock and BHs disabled, but will drop it inside,
-+ * then reacquire.
-+ */
-+static void scst_do_job_wr(struct iscsi_thread_pool *p)
-+ __acquires(&wr_lock)
-+ __releases(&wr_lock)
-+{
-+ TRACE_ENTRY();
-+
-+ /*
-+ * We delete/add to tail connections to maintain fairness between them.
-+ */
-+
-+ while (!list_empty(&p->wr_list)) {
-+ int rc;
-+ struct iscsi_conn *conn = list_entry(p->wr_list.next,
-+ typeof(*conn), wr_list_entry);
-+
-+ TRACE_DBG("conn %p, wr_state %x, wr_space_ready %d, "
-+ "write ready %d", conn, conn->wr_state,
-+ conn->wr_space_ready, test_write_ready(conn));
-+
-+ list_del(&conn->wr_list_entry);
-+
-+ BUG_ON(conn->wr_state == ISCSI_CONN_WR_STATE_PROCESSING);
-+
-+ conn->wr_state = ISCSI_CONN_WR_STATE_PROCESSING;
-+ conn->wr_space_ready = 0;
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ conn->wr_task = current;
-+#endif
-+ spin_unlock_bh(&p->wr_lock);
-+
-+ conn_get(conn);
-+
-+ rc = iscsi_send(conn);
-+
-+ spin_lock_bh(&p->wr_lock);
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ conn->wr_task = NULL;
-+#endif
-+ if ((rc == -EAGAIN) && !conn->wr_space_ready) {
-+ TRACE_DBG("EAGAIN, setting WR_STATE_SPACE_WAIT "
-+ "(conn %p)", conn);
-+ conn->wr_state = ISCSI_CONN_WR_STATE_SPACE_WAIT;
-+ } else if (test_write_ready(conn)) {
-+ list_add_tail(&conn->wr_list_entry, &p->wr_list);
-+ conn->wr_state = ISCSI_CONN_WR_STATE_IN_LIST;
-+ } else
-+ conn->wr_state = ISCSI_CONN_WR_STATE_IDLE;
-+
-+ conn_put(conn);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline int test_wr_list(struct iscsi_thread_pool *p)
-+{
-+ int res = !list_empty(&p->wr_list) ||
-+ unlikely(kthread_should_stop());
-+ return res;
-+}
-+
-+int istwr(void *arg)
-+{
-+ struct iscsi_thread_pool *p = arg;
-+ int rc;
-+
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("Write thread for pool %p started, PID %d", p, current->pid);
-+
-+ current->flags |= PF_NOFREEZE;
-+ rc = set_cpus_allowed_ptr(current, &p->cpu_mask);
-+ if (rc != 0)
-+ PRINT_ERROR("Setting CPU affinity failed: %d", rc);
-+
-+ spin_lock_bh(&p->wr_lock);
-+ while (!kthread_should_stop()) {
-+ wait_queue_t wait;
-+ init_waitqueue_entry(&wait, current);
-+
-+ if (!test_wr_list(p)) {
-+ add_wait_queue_exclusive_head(&p->wr_waitQ, &wait);
-+ for (;;) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if (test_wr_list(p))
-+ break;
-+ spin_unlock_bh(&p->wr_lock);
-+ schedule();
-+ spin_lock_bh(&p->wr_lock);
-+ }
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&p->wr_waitQ, &wait);
-+ }
-+ scst_do_job_wr(p);
-+ }
-+ spin_unlock_bh(&p->wr_lock);
-+
-+ /*
-+ * If kthread_should_stop() is true, we are guaranteed to be
-+ * on the module unload, so wr_list must be empty.
-+ */
-+ BUG_ON(!list_empty(&p->wr_list));
-+
-+ PRINT_INFO("Write thread PID %d for pool %p finished", current->pid, p);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/param.c linux-3.2/drivers/scst/iscsi-scst/param.c
---- orig/linux-3.2/drivers/scst/iscsi-scst/param.c
-+++ linux-3.2/drivers/scst/iscsi-scst/param.c
-@@ -0,0 +1,342 @@
-+/*
-+ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include "iscsi.h"
-+#include "digest.h"
-+
-+#define CHECK_PARAM(info, iparams, word, min, max) \
-+do { \
-+ if (!(info)->partial || ((info)->partial & 1 << key_##word)) { \
-+ TRACE_DBG("%s: %u", #word, (iparams)[key_##word]); \
-+ if ((iparams)[key_##word] < (min) || \
-+ (iparams)[key_##word] > (max)) { \
-+ if ((iparams)[key_##word] < (min)) { \
-+ (iparams)[key_##word] = (min); \
-+ PRINT_WARNING("%s: %u is too small, resetting " \
-+ "it to allowed min %u", \
-+ #word, (iparams)[key_##word], (min)); \
-+ } else { \
-+ PRINT_WARNING("%s: %u is too big, resetting " \
-+ "it to allowed max %u", \
-+ #word, (iparams)[key_##word], (max)); \
-+ (iparams)[key_##word] = (max); \
-+ } \
-+ } \
-+ } \
-+} while (0)
-+
-+#define SET_PARAM(params, info, iparams, word) \
-+({ \
-+ int changed = 0; \
-+ if (!(info)->partial || ((info)->partial & 1 << key_##word)) { \
-+ if ((params)->word != (iparams)[key_##word]) \
-+ changed = 1; \
-+ (params)->word = (iparams)[key_##word]; \
-+ TRACE_DBG("%s set to %u", #word, (params)->word); \
-+ } \
-+ changed; \
-+})
-+
-+#define GET_PARAM(params, info, iparams, word) \
-+do { \
-+ (iparams)[key_##word] = (params)->word; \
-+} while (0)
-+
-+const char *iscsi_get_bool_value(int val)
-+{
-+ if (val)
-+ return "Yes";
-+ else
-+ return "No";
-+}
-+
-+const char *iscsi_get_digest_name(int val, char *res)
-+{
-+ int pos = 0;
-+
-+ if (val & DIGEST_NONE)
-+ pos = sprintf(&res[pos], "%s", "None");
-+
-+ if (val & DIGEST_CRC32C)
-+ pos += sprintf(&res[pos], "%s%s", (pos != 0) ? ", " : "",
-+ "CRC32C");
-+
-+ if (pos == 0)
-+ sprintf(&res[pos], "%s", "Unknown");
-+
-+ return res;
-+}
-+
-+static void log_params(struct iscsi_sess_params *params)
-+{
-+ char digest_name[64];
-+
-+ PRINT_INFO("Negotiated parameters: InitialR2T %s, ImmediateData %s, "
-+ "MaxConnections %d, MaxRecvDataSegmentLength %d, "
-+ "MaxXmitDataSegmentLength %d, ",
-+ iscsi_get_bool_value(params->initial_r2t),
-+ iscsi_get_bool_value(params->immediate_data), params->max_connections,
-+ params->max_recv_data_length, params->max_xmit_data_length);
-+ PRINT_INFO(" MaxBurstLength %d, FirstBurstLength %d, "
-+ "DefaultTime2Wait %d, DefaultTime2Retain %d, ",
-+ params->max_burst_length, params->first_burst_length,
-+ params->default_wait_time, params->default_retain_time);
-+ PRINT_INFO(" MaxOutstandingR2T %d, DataPDUInOrder %s, "
-+ "DataSequenceInOrder %s, ErrorRecoveryLevel %d, ",
-+ params->max_outstanding_r2t,
-+ iscsi_get_bool_value(params->data_pdu_inorder),
-+ iscsi_get_bool_value(params->data_sequence_inorder),
-+ params->error_recovery_level);
-+ PRINT_INFO(" HeaderDigest %s, DataDigest %s, OFMarker %s, "
-+ "IFMarker %s, OFMarkInt %d, IFMarkInt %d",
-+ iscsi_get_digest_name(params->header_digest, digest_name),
-+ iscsi_get_digest_name(params->data_digest, digest_name),
-+ iscsi_get_bool_value(params->ofmarker),
-+ iscsi_get_bool_value(params->ifmarker),
-+ params->ofmarkint, params->ifmarkint);
-+}
-+
-+/* target_mutex supposed to be locked */
-+static void sess_params_check(struct iscsi_kern_params_info *info)
-+{
-+ int32_t *iparams = info->session_params;
-+ const int max_len = ISCSI_CONN_IOV_MAX * PAGE_SIZE;
-+
-+ /*
-+ * This is only kernel sanity check. Actual data validity checks
-+ * performed in the user space.
-+ */
-+
-+ CHECK_PARAM(info, iparams, initial_r2t, 0, 1);
-+ CHECK_PARAM(info, iparams, immediate_data, 0, 1);
-+ CHECK_PARAM(info, iparams, max_connections, 1, 1);
-+ CHECK_PARAM(info, iparams, max_recv_data_length, 512, max_len);
-+ CHECK_PARAM(info, iparams, max_xmit_data_length, 512, max_len);
-+ CHECK_PARAM(info, iparams, max_burst_length, 512, max_len);
-+ CHECK_PARAM(info, iparams, first_burst_length, 512, max_len);
-+ CHECK_PARAM(info, iparams, max_outstanding_r2t, 1, 65535);
-+ CHECK_PARAM(info, iparams, error_recovery_level, 0, 0);
-+ CHECK_PARAM(info, iparams, data_pdu_inorder, 0, 1);
-+ CHECK_PARAM(info, iparams, data_sequence_inorder, 0, 1);
-+
-+ digest_alg_available(&iparams[key_header_digest]);
-+ digest_alg_available(&iparams[key_data_digest]);
-+
-+ CHECK_PARAM(info, iparams, ofmarker, 0, 0);
-+ CHECK_PARAM(info, iparams, ifmarker, 0, 0);
-+
-+ return;
-+}
-+
-+/* target_mutex supposed to be locked */
-+static void sess_params_set(struct iscsi_sess_params *params,
-+ struct iscsi_kern_params_info *info)
-+{
-+ int32_t *iparams = info->session_params;
-+
-+ SET_PARAM(params, info, iparams, initial_r2t);
-+ SET_PARAM(params, info, iparams, immediate_data);
-+ SET_PARAM(params, info, iparams, max_connections);
-+ SET_PARAM(params, info, iparams, max_recv_data_length);
-+ SET_PARAM(params, info, iparams, max_xmit_data_length);
-+ SET_PARAM(params, info, iparams, max_burst_length);
-+ SET_PARAM(params, info, iparams, first_burst_length);
-+ SET_PARAM(params, info, iparams, default_wait_time);
-+ SET_PARAM(params, info, iparams, default_retain_time);
-+ SET_PARAM(params, info, iparams, max_outstanding_r2t);
-+ SET_PARAM(params, info, iparams, data_pdu_inorder);
-+ SET_PARAM(params, info, iparams, data_sequence_inorder);
-+ SET_PARAM(params, info, iparams, error_recovery_level);
-+ SET_PARAM(params, info, iparams, header_digest);
-+ SET_PARAM(params, info, iparams, data_digest);
-+ SET_PARAM(params, info, iparams, ofmarker);
-+ SET_PARAM(params, info, iparams, ifmarker);
-+ SET_PARAM(params, info, iparams, ofmarkint);
-+ SET_PARAM(params, info, iparams, ifmarkint);
-+ return;
-+}
-+
-+static void sess_params_get(struct iscsi_sess_params *params,
-+ struct iscsi_kern_params_info *info)
-+{
-+ int32_t *iparams = info->session_params;
-+
-+ GET_PARAM(params, info, iparams, initial_r2t);
-+ GET_PARAM(params, info, iparams, immediate_data);
-+ GET_PARAM(params, info, iparams, max_connections);
-+ GET_PARAM(params, info, iparams, max_recv_data_length);
-+ GET_PARAM(params, info, iparams, max_xmit_data_length);
-+ GET_PARAM(params, info, iparams, max_burst_length);
-+ GET_PARAM(params, info, iparams, first_burst_length);
-+ GET_PARAM(params, info, iparams, default_wait_time);
-+ GET_PARAM(params, info, iparams, default_retain_time);
-+ GET_PARAM(params, info, iparams, max_outstanding_r2t);
-+ GET_PARAM(params, info, iparams, data_pdu_inorder);
-+ GET_PARAM(params, info, iparams, data_sequence_inorder);
-+ GET_PARAM(params, info, iparams, error_recovery_level);
-+ GET_PARAM(params, info, iparams, header_digest);
-+ GET_PARAM(params, info, iparams, data_digest);
-+ GET_PARAM(params, info, iparams, ofmarker);
-+ GET_PARAM(params, info, iparams, ifmarker);
-+ GET_PARAM(params, info, iparams, ofmarkint);
-+ GET_PARAM(params, info, iparams, ifmarkint);
-+ return;
-+}
-+
-+/* target_mutex supposed to be locked */
-+static void tgt_params_check(struct iscsi_session *session,
-+ struct iscsi_kern_params_info *info)
-+{
-+ int32_t *iparams = info->target_params;
-+ unsigned int rsp_timeout, nop_in_timeout;
-+
-+ /*
-+ * This is only kernel sanity check. Actual data validity checks
-+ * performed in the user space.
-+ */
-+
-+ CHECK_PARAM(info, iparams, queued_cmnds, MIN_NR_QUEUED_CMNDS,
-+ min_t(int, MAX_NR_QUEUED_CMNDS,
-+ scst_get_max_lun_commands(session->scst_sess, NO_SUCH_LUN)));
-+ CHECK_PARAM(info, iparams, rsp_timeout, MIN_RSP_TIMEOUT,
-+ MAX_RSP_TIMEOUT);
-+ CHECK_PARAM(info, iparams, nop_in_interval, MIN_NOP_IN_INTERVAL,
-+ MAX_NOP_IN_INTERVAL);
-+ CHECK_PARAM(info, iparams, nop_in_timeout, MIN_NOP_IN_TIMEOUT,
-+ MAX_NOP_IN_TIMEOUT);
-+
-+ /*
-+ * We adjust too long timeout in req_add_to_write_timeout_list()
-+ * only for NOPs, so check and warn if this assumption isn't honored.
-+ */
-+ if (!info->partial || (info->partial & 1 << key_rsp_timeout))
-+ rsp_timeout = iparams[key_rsp_timeout];
-+ else
-+ rsp_timeout = session->tgt_params.rsp_timeout;
-+ if (!info->partial || (info->partial & 1 << key_nop_in_timeout))
-+ nop_in_timeout = iparams[key_nop_in_timeout];
-+ else
-+ nop_in_timeout = session->tgt_params.nop_in_timeout;
-+ if (nop_in_timeout > rsp_timeout)
-+ PRINT_WARNING("%s", "RspTimeout should be >= NopInTimeout, "
-+ "otherwise data transfer failure could take up to "
-+ "NopInTimeout long to detect");
-+
-+ return;
-+}
-+
-+/* target_mutex supposed to be locked */
-+static int iscsi_tgt_params_set(struct iscsi_session *session,
-+ struct iscsi_kern_params_info *info, int set)
-+{
-+ struct iscsi_tgt_params *params = &session->tgt_params;
-+ int32_t *iparams = info->target_params;
-+
-+ if (set) {
-+ struct iscsi_conn *conn;
-+
-+ tgt_params_check(session, info);
-+
-+ SET_PARAM(params, info, iparams, queued_cmnds);
-+ SET_PARAM(params, info, iparams, rsp_timeout);
-+ SET_PARAM(params, info, iparams, nop_in_interval);
-+ SET_PARAM(params, info, iparams, nop_in_timeout);
-+
-+ PRINT_INFO("Target parameters set for session %llx: "
-+ "QueuedCommands %d, Response timeout %d, Nop-In "
-+ "interval %d, Nop-In timeout %d", session->sid,
-+ params->queued_cmnds, params->rsp_timeout,
-+ params->nop_in_interval, params->nop_in_timeout);
-+
-+ list_for_each_entry(conn, &session->conn_list,
-+ conn_list_entry) {
-+ conn->data_rsp_timeout = session->tgt_params.rsp_timeout * HZ;
-+ conn->nop_in_interval = session->tgt_params.nop_in_interval * HZ;
-+ conn->nop_in_timeout = session->tgt_params.nop_in_timeout * HZ;
-+ spin_lock_bh(&conn->conn_thr_pool->rd_lock);
-+ if (!conn->closing && (conn->nop_in_interval > 0)) {
-+ TRACE_DBG("Schedule Nop-In work for conn %p", conn);
-+ schedule_delayed_work(&conn->nop_in_delayed_work,
-+ conn->nop_in_interval + ISCSI_ADD_SCHED_TIME);
-+ }
-+ spin_unlock_bh(&conn->conn_thr_pool->rd_lock);
-+ }
-+ } else {
-+ GET_PARAM(params, info, iparams, queued_cmnds);
-+ GET_PARAM(params, info, iparams, rsp_timeout);
-+ GET_PARAM(params, info, iparams, nop_in_interval);
-+ GET_PARAM(params, info, iparams, nop_in_timeout);
-+ }
-+
-+ return 0;
-+}
-+
-+/* target_mutex supposed to be locked */
-+static int iscsi_sess_params_set(struct iscsi_session *session,
-+ struct iscsi_kern_params_info *info, int set)
-+{
-+ struct iscsi_sess_params *params;
-+
-+ if (set)
-+ sess_params_check(info);
-+
-+ params = &session->sess_params;
-+
-+ if (set) {
-+ sess_params_set(params, info);
-+ log_params(params);
-+ } else
-+ sess_params_get(params, info);
-+
-+ return 0;
-+}
-+
-+/* target_mutex supposed to be locked */
-+int iscsi_params_set(struct iscsi_target *target,
-+ struct iscsi_kern_params_info *info, int set)
-+{
-+ int err;
-+ struct iscsi_session *session;
-+
-+ if (info->sid == 0) {
-+ PRINT_ERROR("sid must not be %d", 0);
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ session = session_lookup(target, info->sid);
-+ if (session == NULL) {
-+ PRINT_ERROR("Session for sid %llx not found", info->sid);
-+ err = -ENOENT;
-+ goto out;
-+ }
-+
-+ if (set && !list_empty(&session->conn_list) &&
-+ (info->params_type != key_target)) {
-+ err = -EBUSY;
-+ goto out;
-+ }
-+
-+ if (info->params_type == key_session)
-+ err = iscsi_sess_params_set(session, info, set);
-+ else if (info->params_type == key_target)
-+ err = iscsi_tgt_params_set(session, info, set);
-+ else
-+ err = -EINVAL;
-+
-+out:
-+ return err;
-+}
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/session.c linux-3.2/drivers/scst/iscsi-scst/session.c
---- orig/linux-3.2/drivers/scst/iscsi-scst/session.c
-+++ linux-3.2/drivers/scst/iscsi-scst/session.c
-@@ -0,0 +1,527 @@
-+/*
-+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include "iscsi.h"
-+
-+#include <linux/export.h>
-+
-+/* target_mutex supposed to be locked */
-+struct iscsi_session *session_lookup(struct iscsi_target *target, u64 sid)
-+{
-+ struct iscsi_session *session;
-+
-+ list_for_each_entry(session, &target->session_list,
-+ session_list_entry) {
-+ if (session->sid == sid)
-+ return session;
-+ }
-+ return NULL;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int iscsi_session_alloc(struct iscsi_target *target,
-+ struct iscsi_kern_session_info *info, struct iscsi_session **result)
-+{
-+ int err;
-+ unsigned int i;
-+ struct iscsi_session *session;
-+ char *name = NULL;
-+
-+ session = kzalloc(sizeof(*session), GFP_KERNEL);
-+ if (!session)
-+ return -ENOMEM;
-+
-+ session->target = target;
-+ session->sid = info->sid;
-+ atomic_set(&session->active_cmds, 0);
-+ session->exp_cmd_sn = info->exp_cmd_sn;
-+
-+ session->initiator_name = kstrdup(info->initiator_name, GFP_KERNEL);
-+ if (!session->initiator_name) {
-+ err = -ENOMEM;
-+ goto err;
-+ }
-+
-+ name = info->full_initiator_name;
-+
-+ INIT_LIST_HEAD(&session->conn_list);
-+ INIT_LIST_HEAD(&session->pending_list);
-+
-+ spin_lock_init(&session->sn_lock);
-+
-+ spin_lock_init(&session->cmnd_data_wait_hash_lock);
-+ for (i = 0; i < ARRAY_SIZE(session->cmnd_data_wait_hash); i++)
-+ INIT_LIST_HEAD(&session->cmnd_data_wait_hash[i]);
-+
-+ session->next_ttt = 1;
-+
-+ session->scst_sess = scst_register_session(target->scst_tgt, 0,
-+ name, session, NULL, NULL);
-+ if (session->scst_sess == NULL) {
-+ PRINT_ERROR("%s", "scst_register_session() failed");
-+ err = -ENOMEM;
-+ goto err;
-+ }
-+
-+ err = iscsi_threads_pool_get(&session->scst_sess->acg->acg_cpu_mask,
-+ &session->sess_thr_pool);
-+ if (err != 0)
-+ goto err_unreg;
-+
-+ TRACE_MGMT_DBG("Session %p created: target %p, tid %u, sid %#Lx",
-+ session, target, target->tid, info->sid);
-+
-+ *result = session;
-+ return 0;
-+
-+err_unreg:
-+ scst_unregister_session(session->scst_sess, 1, NULL);
-+
-+err:
-+ if (session) {
-+ kfree(session->initiator_name);
-+ kfree(session);
-+ }
-+ return err;
-+}
-+
-+/* target_mutex supposed to be locked */
-+void sess_reinst_finished(struct iscsi_session *sess)
-+{
-+ struct iscsi_conn *c;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Enabling reinstate successor sess %p", sess);
-+
-+ BUG_ON(!sess->sess_reinstating);
-+
-+ list_for_each_entry(c, &sess->conn_list, conn_list_entry) {
-+ conn_reinst_finished(c);
-+ }
-+ sess->sess_reinstating = 0;
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+int __add_session(struct iscsi_target *target,
-+ struct iscsi_kern_session_info *info)
-+{
-+ struct iscsi_session *new_sess = NULL, *sess, *old_sess;
-+ int err = 0, i;
-+ union iscsi_sid sid;
-+ bool reinstatement = false;
-+ struct iscsi_kern_params_info *params_info;
-+
-+ TRACE_MGMT_DBG("Adding session SID %llx", info->sid);
-+
-+ err = iscsi_session_alloc(target, info, &new_sess);
-+ if (err != 0)
-+ goto out;
-+
-+ mutex_lock(&target->target_mutex);
-+
-+ sess = session_lookup(target, info->sid);
-+ if (sess != NULL) {
-+ PRINT_ERROR("Attempt to add session with existing SID %llx",
-+ info->sid);
-+ err = -EEXIST;
-+ goto out_err_unlock;
-+ }
-+
-+ params_info = kmalloc(sizeof(*params_info), GFP_KERNEL);
-+ if (params_info == NULL) {
-+ PRINT_ERROR("Unable to allocate params info (size %zd)",
-+ sizeof(*params_info));
-+ err = -ENOMEM;
-+ goto out_err_unlock;
-+ }
-+
-+ sid = *(union iscsi_sid *)&info->sid;
-+ sid.id.tsih = 0;
-+ old_sess = NULL;
-+
-+ /*
-+ * We need to find the latest session to correctly handle
-+ * multi-reinstatements
-+ */
-+ list_for_each_entry_reverse(sess, &target->session_list,
-+ session_list_entry) {
-+ union iscsi_sid s = *(union iscsi_sid *)&sess->sid;
-+ s.id.tsih = 0;
-+ if ((sid.id64 == s.id64) &&
-+ (strcmp(info->initiator_name, sess->initiator_name) == 0)) {
-+ if (!sess->sess_shutting_down) {
-+ /* session reinstatement */
-+ old_sess = sess;
-+ }
-+ break;
-+ }
-+ }
-+ sess = NULL;
-+
-+ list_add_tail(&new_sess->session_list_entry, &target->session_list);
-+
-+ memset(params_info, 0, sizeof(*params_info));
-+ params_info->tid = target->tid;
-+ params_info->sid = info->sid;
-+ params_info->params_type = key_session;
-+ for (i = 0; i < session_key_last; i++)
-+ params_info->session_params[i] = info->session_params[i];
-+
-+ err = iscsi_params_set(target, params_info, 1);
-+ if (err != 0)
-+ goto out_del;
-+
-+ memset(params_info, 0, sizeof(*params_info));
-+ params_info->tid = target->tid;
-+ params_info->sid = info->sid;
-+ params_info->params_type = key_target;
-+ for (i = 0; i < target_key_last; i++)
-+ params_info->target_params[i] = info->target_params[i];
-+
-+ err = iscsi_params_set(target, params_info, 1);
-+ if (err != 0)
-+ goto out_del;
-+
-+ kfree(params_info);
-+ params_info = NULL;
-+
-+ if (old_sess != NULL) {
-+ reinstatement = true;
-+
-+ TRACE_MGMT_DBG("Reinstating sess %p with SID %llx (old %p, "
-+ "SID %llx)", new_sess, new_sess->sid, old_sess,
-+ old_sess->sid);
-+
-+ new_sess->sess_reinstating = 1;
-+ old_sess->sess_reinst_successor = new_sess;
-+
-+ target_del_session(old_sess->target, old_sess, 0);
-+ }
-+
-+ mutex_unlock(&target->target_mutex);
-+
-+ if (reinstatement) {
-+ /*
-+ * Mutex target_mgmt_mutex won't allow to add connections to
-+ * the new session after target_mutex was dropped, so it's safe
-+ * to replace the initial UA without it. We can't do it under
-+ * target_mutex, because otherwise we can establish a
-+ * circular locking dependency between target_mutex and
-+ * scst_mutex in SCST core (iscsi_report_aen() called by
-+ * SCST core under scst_mutex).
-+ */
-+ scst_set_initial_UA(new_sess->scst_sess,
-+ SCST_LOAD_SENSE(scst_sense_nexus_loss_UA));
-+ }
-+
-+out:
-+ return err;
-+
-+out_del:
-+ list_del(&new_sess->session_list_entry);
-+ kfree(params_info);
-+
-+out_err_unlock:
-+ mutex_unlock(&target->target_mutex);
-+
-+ scst_unregister_session(new_sess->scst_sess, 1, NULL);
-+ new_sess->scst_sess = NULL;
-+
-+ mutex_lock(&target->target_mutex);
-+ session_free(new_sess, false);
-+ mutex_unlock(&target->target_mutex);
-+ goto out;
-+}
-+
-+static void __session_free(struct iscsi_session *session)
-+{
-+ kfree(session->initiator_name);
-+ kfree(session);
-+}
-+
-+static void iscsi_unreg_sess_done(struct scst_session *scst_sess)
-+{
-+ struct iscsi_session *session;
-+
-+ TRACE_ENTRY();
-+
-+ session = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
-+
-+ session->scst_sess = NULL;
-+ __session_free(session);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* target_mutex supposed to be locked */
-+int session_free(struct iscsi_session *session, bool del)
-+{
-+ unsigned int i;
-+
-+ TRACE_MGMT_DBG("Freeing session %p (SID %llx)",
-+ session, session->sid);
-+
-+ BUG_ON(!list_empty(&session->conn_list));
-+ if (unlikely(atomic_read(&session->active_cmds) != 0)) {
-+ PRINT_CRIT_ERROR("active_cmds not 0 (%d)!!",
-+ atomic_read(&session->active_cmds));
-+ BUG();
-+ }
-+
-+ for (i = 0; i < ARRAY_SIZE(session->cmnd_data_wait_hash); i++)
-+ BUG_ON(!list_empty(&session->cmnd_data_wait_hash[i]));
-+
-+ if (session->sess_reinst_successor != NULL)
-+ sess_reinst_finished(session->sess_reinst_successor);
-+
-+ if (session->sess_reinstating) {
-+ struct iscsi_session *s;
-+ TRACE_MGMT_DBG("Freeing being reinstated sess %p", session);
-+ list_for_each_entry(s, &session->target->session_list,
-+ session_list_entry) {
-+ if (s->sess_reinst_successor == session) {
-+ s->sess_reinst_successor = NULL;
-+ break;
-+ }
-+ }
-+ }
-+
-+ if (del)
-+ list_del(&session->session_list_entry);
-+
-+ if (session->sess_thr_pool != NULL) {
-+ iscsi_threads_pool_put(session->sess_thr_pool);
-+ session->sess_thr_pool = NULL;
-+ }
-+
-+ if (session->scst_sess != NULL) {
-+ /*
-+ * We must NOT call scst_unregister_session() in the waiting
-+ * mode, since we are under target_mutex. Otherwise we can
-+ * establish a circular locking dependency between target_mutex
-+ * and scst_mutex in SCST core (iscsi_report_aen() called by
-+ * SCST core under scst_mutex).
-+ */
-+ scst_unregister_session(session->scst_sess, 0,
-+ iscsi_unreg_sess_done);
-+ } else
-+ __session_free(session);
-+
-+ return 0;
-+}
-+
-+/* target_mutex supposed to be locked */
-+int __del_session(struct iscsi_target *target, u64 sid)
-+{
-+ struct iscsi_session *session;
-+
-+ session = session_lookup(target, sid);
-+ if (!session)
-+ return -ENOENT;
-+
-+ if (!list_empty(&session->conn_list)) {
-+ PRINT_ERROR("%llx still have connections",
-+ (long long unsigned int)session->sid);
-+ return -EBUSY;
-+ }
-+
-+ return session_free(session, true);
-+}
-+
-+/* Must be called under target_mutex */
-+void iscsi_sess_force_close(struct iscsi_session *sess)
-+{
-+ struct iscsi_conn *conn;
-+
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("Deleting session %llx with initiator %s (%p)",
-+ (long long unsigned int)sess->sid, sess->initiator_name, sess);
-+
-+ list_for_each_entry(conn, &sess->conn_list, conn_list_entry) {
-+ TRACE_MGMT_DBG("Deleting connection with initiator %p", conn);
-+ __mark_conn_closed(conn, ISCSI_CONN_ACTIVE_CLOSE|ISCSI_CONN_DELETING);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+#define ISCSI_SESS_BOOL_PARAM_ATTR(name, exported_name) \
-+static ssize_t iscsi_sess_show_##name(struct kobject *kobj, \
-+ struct kobj_attribute *attr, char *buf) \
-+{ \
-+ int pos; \
-+ struct scst_session *scst_sess; \
-+ struct iscsi_session *sess; \
-+ \
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj); \
-+ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess); \
-+ \
-+ pos = sprintf(buf, "%s\n", \
-+ iscsi_get_bool_value(sess->sess_params.name)); \
-+ \
-+ return pos; \
-+} \
-+ \
-+static struct kobj_attribute iscsi_sess_attr_##name = \
-+ __ATTR(exported_name, S_IRUGO, iscsi_sess_show_##name, NULL);
-+
-+#define ISCSI_SESS_INT_PARAM_ATTR(name, exported_name) \
-+static ssize_t iscsi_sess_show_##name(struct kobject *kobj, \
-+ struct kobj_attribute *attr, char *buf) \
-+{ \
-+ int pos; \
-+ struct scst_session *scst_sess; \
-+ struct iscsi_session *sess; \
-+ \
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj); \
-+ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess); \
-+ \
-+ pos = sprintf(buf, "%d\n", sess->sess_params.name); \
-+ \
-+ return pos; \
-+} \
-+ \
-+static struct kobj_attribute iscsi_sess_attr_##name = \
-+ __ATTR(exported_name, S_IRUGO, iscsi_sess_show_##name, NULL);
-+
-+#define ISCSI_SESS_DIGEST_PARAM_ATTR(name, exported_name) \
-+static ssize_t iscsi_sess_show_##name(struct kobject *kobj, \
-+ struct kobj_attribute *attr, char *buf) \
-+{ \
-+ int pos; \
-+ struct scst_session *scst_sess; \
-+ struct iscsi_session *sess; \
-+ char digest_name[64]; \
-+ \
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj); \
-+ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess); \
-+ \
-+ pos = sprintf(buf, "%s\n", iscsi_get_digest_name( \
-+ sess->sess_params.name, digest_name)); \
-+ \
-+ return pos; \
-+} \
-+ \
-+static struct kobj_attribute iscsi_sess_attr_##name = \
-+ __ATTR(exported_name, S_IRUGO, iscsi_sess_show_##name, NULL);
-+
-+ISCSI_SESS_BOOL_PARAM_ATTR(initial_r2t, InitialR2T);
-+ISCSI_SESS_BOOL_PARAM_ATTR(immediate_data, ImmediateData);
-+ISCSI_SESS_INT_PARAM_ATTR(max_recv_data_length, MaxRecvDataSegmentLength);
-+ISCSI_SESS_INT_PARAM_ATTR(max_xmit_data_length, MaxXmitDataSegmentLength);
-+ISCSI_SESS_INT_PARAM_ATTR(max_burst_length, MaxBurstLength);
-+ISCSI_SESS_INT_PARAM_ATTR(first_burst_length, FirstBurstLength);
-+ISCSI_SESS_INT_PARAM_ATTR(max_outstanding_r2t, MaxOutstandingR2T);
-+ISCSI_SESS_DIGEST_PARAM_ATTR(header_digest, HeaderDigest);
-+ISCSI_SESS_DIGEST_PARAM_ATTR(data_digest, DataDigest);
-+
-+static ssize_t iscsi_sess_sid_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos;
-+ struct scst_session *scst_sess;
-+ struct iscsi_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
-+ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
-+
-+ pos = sprintf(buf, "%llx\n", sess->sid);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static struct kobj_attribute iscsi_attr_sess_sid =
-+ __ATTR(sid, S_IRUGO, iscsi_sess_sid_show, NULL);
-+
-+static ssize_t iscsi_sess_reinstating_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos;
-+ struct scst_session *scst_sess;
-+ struct iscsi_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
-+ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
-+
-+ pos = sprintf(buf, "%d\n", sess->sess_reinstating ? 1 : 0);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static struct kobj_attribute iscsi_sess_attr_reinstating =
-+ __ATTR(reinstating, S_IRUGO, iscsi_sess_reinstating_show, NULL);
-+
-+static ssize_t iscsi_sess_force_close_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buf, size_t count)
-+{
-+ int res;
-+ struct scst_session *scst_sess;
-+ struct iscsi_session *sess;
-+
-+ TRACE_ENTRY();
-+
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
-+ sess = (struct iscsi_session *)scst_sess_get_tgt_priv(scst_sess);
-+
-+ if (mutex_lock_interruptible(&sess->target->target_mutex) != 0) {
-+ res = -EINTR;
-+ goto out;
-+ }
-+
-+ iscsi_sess_force_close(sess);
-+
-+ mutex_unlock(&sess->target->target_mutex);
-+
-+ res = count;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static struct kobj_attribute iscsi_sess_attr_force_close =
-+ __ATTR(force_close, S_IWUSR, NULL, iscsi_sess_force_close_store);
-+
-+const struct attribute *iscsi_sess_attrs[] = {
-+ &iscsi_sess_attr_initial_r2t.attr,
-+ &iscsi_sess_attr_immediate_data.attr,
-+ &iscsi_sess_attr_max_recv_data_length.attr,
-+ &iscsi_sess_attr_max_xmit_data_length.attr,
-+ &iscsi_sess_attr_max_burst_length.attr,
-+ &iscsi_sess_attr_first_burst_length.attr,
-+ &iscsi_sess_attr_max_outstanding_r2t.attr,
-+ &iscsi_sess_attr_header_digest.attr,
-+ &iscsi_sess_attr_data_digest.attr,
-+ &iscsi_attr_sess_sid.attr,
-+ &iscsi_sess_attr_reinstating.attr,
-+ &iscsi_sess_attr_force_close.attr,
-+ NULL,
-+};
-+
-diff -uprN orig/linux-3.2/drivers/scst/iscsi-scst/target.c linux-3.2/drivers/scst/iscsi-scst/target.c
---- orig/linux-3.2/drivers/scst/iscsi-scst/target.c
-+++ linux-3.2/drivers/scst/iscsi-scst/target.c
-@@ -0,0 +1,533 @@
-+/*
-+ * Copyright (C) 2002 - 2003 Ardis Technolgies <roman@ardistech.com>
-+ * Copyright (C) 2007 - 2011 Vladislav Bolkhovitin
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/delay.h>
-+#include <linux/module.h>
-+
-+#include "iscsi.h"
-+#include "digest.h"
-+
-+#define MAX_NR_TARGETS (1UL << 30)
-+
-+DEFINE_MUTEX(target_mgmt_mutex);
-+
-+/* All 3 protected by target_mgmt_mutex */
-+static LIST_HEAD(target_list);
-+static u32 next_target_id;
-+static u32 nr_targets;
-+
-+/* target_mgmt_mutex supposed to be locked */
-+struct iscsi_target *target_lookup_by_id(u32 id)
-+{
-+ struct iscsi_target *target;
-+
-+ list_for_each_entry(target, &target_list, target_list_entry) {
-+ if (target->tid == id)
-+ return target;
-+ }
-+ return NULL;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static struct iscsi_target *target_lookup_by_name(const char *name)
-+{
-+ struct iscsi_target *target;
-+
-+ list_for_each_entry(target, &target_list, target_list_entry) {
-+ if (!strcmp(target->name, name))
-+ return target;
-+ }
-+ return NULL;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+static int iscsi_target_create(struct iscsi_kern_target_info *info, u32 tid,
-+ struct iscsi_target **out_target)
-+{
-+ int err = -EINVAL, len;
-+ char *name = info->name;
-+ struct iscsi_target *target;
-+
-+ TRACE_MGMT_DBG("Creating target tid %u, name %s", tid, name);
-+
-+ len = strlen(name);
-+ if (!len) {
-+ PRINT_ERROR("The length of the target name is zero %u", tid);
-+ goto out;
-+ }
-+
-+ if (!try_module_get(THIS_MODULE)) {
-+ PRINT_ERROR("Fail to get module %u", tid);
-+ goto out;
-+ }
-+
-+ target = kzalloc(sizeof(*target), GFP_KERNEL);
-+ if (!target) {
-+ err = -ENOMEM;
-+ goto out_put;
-+ }
-+
-+ target->tid = info->tid = tid;
-+
-+ strlcpy(target->name, name, sizeof(target->name));
-+
-+ mutex_init(&target->target_mutex);
-+ INIT_LIST_HEAD(&target->session_list);
-+ INIT_LIST_HEAD(&target->attrs_list);
-+
-+ target->scst_tgt = scst_register_target(&iscsi_template, target->name);
-+ if (!target->scst_tgt) {
-+ PRINT_ERROR("%s", "scst_register_target() failed");
-+ err = -EBUSY;
-+ goto out_free;
-+ }
-+
-+ scst_tgt_set_tgt_priv(target->scst_tgt, target);
-+
-+ list_add_tail(&target->target_list_entry, &target_list);
-+
-+ *out_target = target;
-+
-+ return 0;
-+
-+out_free:
-+ kfree(target);
-+
-+out_put:
-+ module_put(THIS_MODULE);
-+
-+out:
-+ return err;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+int __add_target(struct iscsi_kern_target_info *info)
-+{
-+ int err;
-+ u32 tid = info->tid;
-+ struct iscsi_target *target = NULL; /* to calm down sparse */
-+ struct iscsi_kern_attr *attr_info;
-+ union add_info_union {
-+ struct iscsi_kern_params_info params_info;
-+ struct iscsi_kern_attr attr_info;
-+ } *add_info;
-+ int i, rc;
-+ unsigned long attrs_ptr_long;
-+ struct iscsi_kern_attr __user *attrs_ptr;
-+
-+ if (nr_targets > MAX_NR_TARGETS) {
-+ err = -EBUSY;
-+ goto out;
-+ }
-+
-+ if (target_lookup_by_name(info->name)) {
-+ PRINT_ERROR("Target %s already exist!", info->name);
-+ err = -EEXIST;
-+ goto out;
-+ }
-+
-+ if (tid && target_lookup_by_id(tid)) {
-+ PRINT_ERROR("Target %u already exist!", tid);
-+ err = -EEXIST;
-+ goto out;
-+ }
-+
-+ add_info = kmalloc(sizeof(*add_info), GFP_KERNEL);
-+ if (add_info == NULL) {
-+ PRINT_ERROR("Unable to allocate additional info (size %zd)",
-+ sizeof(*add_info));
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+ attr_info = (struct iscsi_kern_attr *)add_info;
-+
-+ if (tid == 0) {
-+ do {
-+ if (!++next_target_id)
-+ ++next_target_id;
-+ } while (target_lookup_by_id(next_target_id));
-+
-+ tid = next_target_id;
-+ }
-+
-+ err = iscsi_target_create(info, tid, &target);
-+ if (err != 0)
-+ goto out_free;
-+
-+ nr_targets++;
-+
-+ mutex_lock(&target->target_mutex);
-+
-+ attrs_ptr_long = info->attrs_ptr;
-+ attrs_ptr = (struct iscsi_kern_attr __user *)attrs_ptr_long;
-+ for (i = 0; i < info->attrs_num; i++) {
-+ memset(attr_info, 0, sizeof(*attr_info));
-+
-+ rc = copy_from_user(attr_info, attrs_ptr, sizeof(*attr_info));
-+ if (rc != 0) {
-+ PRINT_ERROR("Failed to copy users of target %s "
-+ "failed", info->name);
-+ err = -EFAULT;
-+ goto out_del_unlock;
-+ }
-+
-+ attr_info->name[sizeof(attr_info->name)-1] = '\0';
-+
-+ err = iscsi_add_attr(target, attr_info);
-+ if (err != 0)
-+ goto out_del_unlock;
-+
-+ attrs_ptr++;
-+ }
-+
-+ mutex_unlock(&target->target_mutex);
-+
-+ err = tid;
-+
-+out_free:
-+ kfree(add_info);
-+
-+out:
-+ return err;
-+
-+out_del_unlock:
-+ mutex_unlock(&target->target_mutex);
-+ __del_target(tid);
-+ goto out_free;
-+}
-+
-+static void target_destroy(struct iscsi_target *target)
-+{
-+ struct iscsi_attr *attr, *t;
-+
-+ TRACE_MGMT_DBG("Destroying target tid %u", target->tid);
-+
-+ list_for_each_entry_safe(attr, t, &target->attrs_list,
-+ attrs_list_entry) {
-+ __iscsi_del_attr(target, attr);
-+ }
-+
-+ scst_unregister_target(target->scst_tgt);
-+
-+ kfree(target);
-+
-+ module_put(THIS_MODULE);
-+ return;
-+}
-+
-+/* target_mgmt_mutex supposed to be locked */
-+int __del_target(u32 id)
-+{
-+ struct iscsi_target *target;
-+ int err;
-+
-+ target = target_lookup_by_id(id);
-+ if (!target) {
-+ err = -ENOENT;
-+ goto out;
-+ }
-+
-+ mutex_lock(&target->target_mutex);
-+
-+ if (!list_empty(&target->session_list)) {
-+ err = -EBUSY;
-+ goto out_unlock;
-+ }
-+
-+ list_del(&target->target_list_entry);
-+ nr_targets--;
-+
-+ mutex_unlock(&target->target_mutex);
-+
-+ target_destroy(target);
-+ return 0;
-+
-+out_unlock:
-+ mutex_unlock(&target->target_mutex);
-+
-+out:
-+ return err;
-+}
-+
-+/* target_mutex supposed to be locked */
-+void target_del_session(struct iscsi_target *target,
-+ struct iscsi_session *session, int flags)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Deleting session %p", session);
-+
-+ if (!list_empty(&session->conn_list)) {
-+ struct iscsi_conn *conn, *tc;
-+ list_for_each_entry_safe(conn, tc, &session->conn_list,
-+ conn_list_entry) {
-+ TRACE_MGMT_DBG("Mark conn %p closing", conn);
-+ __mark_conn_closed(conn, flags);
-+ }
-+ } else {
-+ TRACE_MGMT_DBG("Freeing session %p without connections",
-+ session);
-+ __del_session(target, session->sid);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* target_mutex supposed to be locked */
-+void target_del_all_sess(struct iscsi_target *target, int flags)
-+{
-+ struct iscsi_session *session, *ts;
-+
-+ TRACE_ENTRY();
-+
-+ if (!list_empty(&target->session_list)) {
-+ TRACE_MGMT_DBG("Deleting all sessions from target %p", target);
-+ list_for_each_entry_safe(session, ts, &target->session_list,
-+ session_list_entry) {
-+ target_del_session(target, session, flags);
-+ }
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+void target_del_all(void)
-+{
-+ struct iscsi_target *target, *t;
-+ bool first = true;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("%s", "Deleting all targets");
-+
-+ /* Not the best, ToDo */
-+ while (1) {
-+ mutex_lock(&target_mgmt_mutex);
-+
-+ if (list_empty(&target_list))
-+ break;
-+
-+ /*
-+ * In the first iteration we won't delete targets to go at
-+ * first through all sessions of all targets and close their
-+ * connections. Otherwise we can stuck for noticeable time
-+ * waiting during a target's unregistration for the activities
-+ * suspending over active connection. This can especially got
-+ * bad if any being wait connection itself stuck waiting for
-+ * something and can be recovered only by connection close.
-+ * Let's for such cases not wait while such connection recover
-+ * theyself, but act in advance.
-+ */
-+
-+ list_for_each_entry_safe(target, t, &target_list,
-+ target_list_entry) {
-+ mutex_lock(&target->target_mutex);
-+
-+ if (!list_empty(&target->session_list)) {
-+ target_del_all_sess(target,
-+ ISCSI_CONN_ACTIVE_CLOSE |
-+ ISCSI_CONN_DELETING);
-+ } else if (!first) {
-+ TRACE_MGMT_DBG("Deleting target %p", target);
-+ list_del(&target->target_list_entry);
-+ nr_targets--;
-+ mutex_unlock(&target->target_mutex);
-+ target_destroy(target);
-+ continue;
-+ }
-+
-+ mutex_unlock(&target->target_mutex);
-+ }
-+ mutex_unlock(&target_mgmt_mutex);
-+ msleep(100);
-+
-+ first = false;
-+ }
-+
-+ mutex_unlock(&target_mgmt_mutex);
-+
-+ TRACE_MGMT_DBG("%s", "Deleting all targets finished");
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static ssize_t iscsi_tgt_tid_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int pos;
-+ struct scst_tgt *scst_tgt;
-+ struct iscsi_target *tgt;
-+
-+ TRACE_ENTRY();
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct iscsi_target *)scst_tgt_get_tgt_priv(scst_tgt);
-+
-+ pos = sprintf(buf, "%u\n", tgt->tid);
-+
-+ TRACE_EXIT_RES(pos);
-+ return pos;
-+}
-+
-+static struct kobj_attribute iscsi_tgt_attr_tid =
-+ __ATTR(tid, S_IRUGO, iscsi_tgt_tid_show, NULL);
-+
-+const struct attribute *iscsi_tgt_attrs[] = {
-+ &iscsi_tgt_attr_tid.attr,
-+ NULL,
-+};
-+
-+ssize_t iscsi_sysfs_send_event(uint32_t tid, enum iscsi_kern_event_code code,
-+ const char *param1, const char *param2, void **data)
-+{
-+ int res;
-+ struct scst_sysfs_user_info *info;
-+
-+ TRACE_ENTRY();
-+
-+ if (ctr_open_state != ISCSI_CTR_OPEN_STATE_OPEN) {
-+ PRINT_ERROR("%s", "User space process not connected");
-+ res = -EPERM;
-+ goto out;
-+ }
-+
-+ res = scst_sysfs_user_add_info(&info);
-+ if (res != 0)
-+ goto out;
-+
-+ TRACE_DBG("Sending event %d (tid %d, param1 %s, param2 %s, cookie %d, "
-+ "info %p)", tid, code, param1, param2, info->info_cookie, info);
-+
-+ res = event_send(tid, 0, 0, info->info_cookie, code, param1, param2);
-+ if (res <= 0) {
-+ PRINT_ERROR("event_send() failed: %d", res);
-+ if (res == 0)
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+
-+ /*
-+ * It may wait 30 secs in blocking connect to an unreacheable
-+ * iSNS server. It must be fixed, but not now. ToDo.
-+ */
-+ res = scst_wait_info_completion(info, 31 * HZ);
-+
-+ if (data != NULL)
-+ *data = info->data;
-+
-+out_free:
-+ scst_sysfs_user_del_info(info);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+int iscsi_enable_target(struct scst_tgt *scst_tgt, bool enable)
-+{
-+ struct iscsi_target *tgt =
-+ (struct iscsi_target *)scst_tgt_get_tgt_priv(scst_tgt);
-+ int res;
-+ uint32_t type;
-+
-+ TRACE_ENTRY();
-+
-+ if (enable)
-+ type = E_ENABLE_TARGET;
-+ else
-+ type = E_DISABLE_TARGET;
-+
-+ TRACE_DBG("%s target %d", enable ? "Enabling" : "Disabling", tgt->tid);
-+
-+ res = iscsi_sysfs_send_event(tgt->tid, type, NULL, NULL, NULL);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+bool iscsi_is_target_enabled(struct scst_tgt *scst_tgt)
-+{
-+ struct iscsi_target *tgt =
-+ (struct iscsi_target *)scst_tgt_get_tgt_priv(scst_tgt);
-+
-+ return tgt->tgt_enabled;
-+}
-+
-+ssize_t iscsi_sysfs_add_target(const char *target_name, char *params)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ res = iscsi_sysfs_send_event(0, E_ADD_TARGET, target_name,
-+ params, NULL);
-+ if (res > 0) {
-+ /* It's tid */
-+ res = 0;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+ssize_t iscsi_sysfs_del_target(const char *target_name)
-+{
-+ int res = 0, tid;
-+
-+ TRACE_ENTRY();
-+
-+ /* We don't want to have tgt visible after the mutex unlock */
-+ {
-+ struct iscsi_target *tgt;
-+ mutex_lock(&target_mgmt_mutex);
-+ tgt = target_lookup_by_name(target_name);
-+ if (tgt == NULL) {
-+ PRINT_ERROR("Target %s not found", target_name);
-+ mutex_unlock(&target_mgmt_mutex);
-+ res = -ENOENT;
-+ goto out;
-+ }
-+ tid = tgt->tid;
-+ mutex_unlock(&target_mgmt_mutex);
-+ }
-+
-+ TRACE_DBG("Deleting target %s (tid %d)", target_name, tid);
-+
-+ res = iscsi_sysfs_send_event(tid, E_DEL_TARGET, NULL, NULL, NULL);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+ssize_t iscsi_sysfs_mgmt_cmd(char *cmd)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Sending mgmt cmd %s", cmd);
-+
-+ res = iscsi_sysfs_send_event(0, E_MGMT_CMD, cmd, NULL, NULL);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-diff -uprN orig/linux-3.2/Documentation/scst/README.iscsi linux-3.2/Documentation/scst/README.iscsi
---- orig/linux-3.2/Documentation/scst/README.iscsi
-+++ linux-3.2/Documentation/scst/README.iscsi
-@@ -0,0 +1,748 @@
-+iSCSI SCST target driver
-+========================
-+
-+ISCSI-SCST is a deeply reworked fork of iSCSI Enterprise Target (IET)
-+(http://iscsitarget.sourceforge.net). Reasons of the fork were:
-+
-+ - To be able to use full power of SCST core.
-+
-+ - To fix all the problems, corner cases issues and iSCSI standard
-+ violations which IET has.
-+
-+See for more info http://iscsi-scst.sourceforge.net.
-+
-+Usage
-+-----
-+
-+See in http://iscsi-scst.sourceforge.net/iscsi-scst-howto.txt how to
-+configure iSCSI-SCST.
-+
-+If you want to use Intel CRC32 offload and have corresponding hardware,
-+you should load crc32c-intel module. Then iSCSI-SCST will do all digest
-+calculations using this facility.
-+
-+In 2.0.0 usage of iscsi-scstd.conf as well as iscsi-scst-adm utility is
-+obsolete. Use the sysfs interface facilities instead.
-+
-+The flow of iSCSI-SCST inialization should be as the following:
-+
-+1. Load of SCST and iSCSI-SCST kernel modules with necessary module
-+parameters, if needed.
-+
-+2. Start iSCSI-SCST service.
-+
-+3. Configure targets, devices, LUNs, etc. either using scstadmin
-+(recommended), or using the sysfs interface directly as described below.
-+
-+It is recommended to use TEST UNIT READY ("tur") command to check if
-+iSCSI-SCST target is alive in MPIO configurations.
-+
-+Also see SCST README file how to tune for the best performance.
-+
-+CAUTION: Working of target and initiator on the same host isn't fully
-+======= supported. See SCST README file for details.
-+
-+
-+Sysfs interface
-+---------------
-+
-+Root of SCST sysfs interface is /sys/kernel/scst_tgt. Root of iSCSI-SCST
-+is /sys/kernel/scst_tgt/targets/iscsi. It has the following entries:
-+
-+ - None, one or more subdirectories for targets with name equal to names
-+ of the corresponding targets.
-+
-+ - IncomingUser[num] - optional one or more attributes containing user
-+ name and password for incoming discovery user name. Not exist by
-+ default and can be added through "mgmt" entry, see below.
-+
-+ - OutgoingUser - optional attribute containing user name and password
-+ for outgoing discovery user name. Not exist by default and can be
-+ added through "mgmt" entry, see below.
-+
-+ - iSNSServer - contains name or IP address of iSNS server with optional
-+ "AccessControl" attribute, which allows to enable iSNS access
-+ control. Empty by default.
-+
-+ - allowed_portal[num] - optional attribute, which specifies, on which
-+ portals (target's IP addresses) this target will be available. If not
-+ specified (default) the target will be available on all all portals.
-+ As soon as at least one allowed_portal specified, the target will be
-+ accessible for initiators only on the specified portals. There might
-+ be any number of the allowed_portal attributes. The portals
-+ specification in the allowed_portal attributes can be a simple
-+ DOS-type patterns, containing '*' and '?' symbols. '*' means match
-+ all any symbols, '?' means match only any single symbol. For
-+ instance, "10.170.77.2" will match "10.170.7?.*". Additionally, you
-+ can use negative sign '!' to revert the value of the pattern. For
-+ instance, "10.170.67.2" will match "!10.170.7?.*". See examples
-+ below.
-+
-+ - enabled - using this attribute you can enable or disable iSCSI-SCST
-+ accept new connections. It allows to finish configuring global
-+ iSCSI-SCST attributes before it starts accepting new connections. 0
-+ by default.
-+
-+ - open_state - read-only attribute, which allows to see if the user
-+ space part of iSCSI-SCST connected to the kernel part.
-+
-+ - per_portal_acl - if set, makes iSCSI-SCST work in the per-portal
-+ access control mode. In this mode iSCSI-SCST registers all initiators
-+ in SCST core as "initiator_name#portal_IP_address" pattern, like
-+ "iqn.2006-10.net.vlnb:ini#10.170.77.2" for initiator
-+ iqn.2006-10.net.vlnb connected through portal 10.170.77.2. This mode
-+ allows to make particular initiators be able to use only particular
-+ portals on the target and don't see/be able to connect through
-+ others. See below for more details.
-+
-+ - trace_level - allows to enable and disable various tracing
-+ facilities. See content of this file for help how to use it.
-+
-+ - version - read-only attribute, which allows to see version of
-+ iSCSI-SCST and enabled optional features.
-+
-+ - mgmt - main management entry, which allows to configure iSCSI-SCST.
-+ Namely, add/delete targets as well as add/delete optional global and
-+ per-target attributes. See content of this file for help how to use
-+ it.
-+
-+Each iSCSI-SCST sysfs file (attribute) can contain in the last line mark
-+"[key]". It is automatically added mark used to allow scstadmin to see
-+which attributes it should save in the config file. You can ignore it.
-+
-+Each target subdirectory contains the following entries:
-+
-+ - ini_groups - subdirectory defining initiator groups for this target,
-+ used to define per-initiator access control. See SCST core README for
-+ more details.
-+
-+ - luns - subdirectory defining LUNs of this target. See SCST core
-+ README for more details.
-+
-+ - sessions - subdirectory containing connected to this target sessions.
-+
-+ - IncomingUser[num] - optional one or more attributes containing user
-+ name and password for incoming user name. Not exist by default and can
-+ be added through the "mgmt" entry, see above.
-+
-+ - OutgoingUser - optional attribute containing user name and password
-+ for outgoing user name. Not exist by default and can be added through
-+ the "mgmt" entry, see above.
-+
-+ - Entries defining default iSCSI parameters values used during iSCSI
-+ parameters negotiation. Only entries which can be changed or make
-+ sense are listed there.
-+
-+ - QueuedCommands - defines maximum number of commands queued to any
-+ session of this target. Default is 32 commands.
-+
-+ - NopInInterval - defines interval between NOP-In requests, which the
-+ target will send on idle connections to check if the initiator is
-+ still alive. If there is no NOP-Out reply from the initiator in
-+ RspTimeout time, the corresponding connection will be closed. Default
-+ is 30 seconds. If it's set to 0, then NOP-In requests are disabled.
-+
-+ - NopInTimeout - defines the maximum time in seconds a NOP-In request
-+ can wait for response from initiator, otherwise the corresponding
-+ connection will be closed. Default is 30 seconds.
-+
-+ - RspTimeout - defines the maximum time in seconds a command can wait for
-+ response from initiator, otherwise the corresponding connection will
-+ be closed. Default is 90 seconds.
-+
-+ - enabled - using this attribute you can enable or disable iSCSI-SCST
-+ accept new connections to this target. It allows to finish
-+ configuring it before it starts accepting new connections. 0 by
-+ default.
-+
-+ - redirect - allows to temporarily or permanently redirect login to the
-+ target to another portal. Discovery sessions will not be impacted,
-+ but normal sessions will be redirected before security negotiation.
-+ The destination should be specified using format "<ip_addr>[:port] temp|perm".
-+ IPv6 addresses need to be enclosed in [] brackets. To remove
-+ redirection, provide an empty string. For example:
-+ echo "10.170.77.2:32600 temp" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/redirect
-+ will temporarily redirect login to portal 10.170.77.2 and port 32600.
-+
-+ - tid - TID of this target.
-+
-+Subdirectory "sessions" contains one subdirectory for each connected
-+session with name equal to name of the connected initiator.
-+
-+Each session subdirectory contains the following entries:
-+
-+ - One subdirectory for each TCP connection in this session. ISCSI-SCST
-+ supports 1 connection per session, but the session subdirectory can
-+ contain several connections: one active and other being closed.
-+
-+ - Entries defining negotiated iSCSI parameters. Only parameters which
-+ can be changed or make sense are listed there.
-+
-+ - initiator_name - contains initiator name
-+
-+ - sid - contains SID of this session
-+
-+ - reinstating - contains reinstatement state of this session
-+
-+ - force_close - write-only attribute, which allows to force close this
-+ session. This is the only writable session attribute.
-+
-+ - active_commands - contains number of active, i.e. not yet or being
-+ executed, SCSI commands in this session.
-+
-+ - commands - contains overall number of SCSI commands in this session.
-+
-+Each connection subdirectory contains the following entries:
-+
-+ - cid - contains CID of this connection.
-+
-+ - ip - contains IP address of the connected initiator.
-+
-+ - state - contains processing state of this connection.
-+
-+See SCST README for info about other attributes.
-+
-+Below is a sample script, which configures 1 virtual disk "disk1" using
-+/disk1 image and one target iqn.2006-10.net.vlnb:tgt with all default
-+parameters:
-+
-+#!/bin/bash
-+
-+modprobe scst
-+modprobe scst_vdisk
-+
-+echo "add_device disk1 filename=/disk1; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
-+
-+service iscsi-scst start
-+
-+echo "add_target iqn.2006-10.net.vlnb:tgt" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+echo "add disk1 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/mgmt
-+
-+echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/enabled
-+echo 1 >/sys/kernel/scst_tgt/targets/iscsi/enabled
-+
-+Below is another sample script, which configures 1 real local SCSI disk
-+0:0:1:0 and one target iqn.2006-10.net.vlnb:tgt with all default parameters:
-+
-+#!/bin/bash
-+
-+modprobe scst
-+modprobe scst_disk
-+
-+echo "add_device 0:0:1:0" >/sys/kernel/scst_tgt/handlers/dev_disk/mgmt
-+
-+service iscsi-scst start
-+
-+echo "add_target iqn.2006-10.net.vlnb:tgt" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+echo "add 0:0:1:0 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/mgmt
-+
-+echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/enabled
-+echo 1 >/sys/kernel/scst_tgt/targets/iscsi/enabled
-+
-+Below is an advanced sample script, which configures more virtual
-+devices of various types, including virtual CDROM and 2 targets, one
-+with all default parameters, another one with some not default
-+parameters, incoming and outgoing user names for CHAP authentification,
-+and special permissions for initiator iqn.2005-03.org.open-iscsi:cacdcd2520,
-+which will see another set of devices. Also this sample configures CHAP
-+authentication for discovery sessions and iSNS server with access
-+control.
-+
-+#!/bin/bash
-+
-+modprobe scst
-+modprobe scst_vdisk
-+
-+echo "add_device disk1 filename=/disk1; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
-+echo "add_device disk2 filename=/disk2; blocksize=4096; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
-+echo "add_device blockio filename=/dev/sda5" >/sys/kernel/scst_tgt/handlers/vdisk_blockio/mgmt
-+echo "add_device nullio" >/sys/kernel/scst_tgt/handlers/vdisk_nullio/mgmt
-+echo "add_device cdrom" >/sys/kernel/scst_tgt/handlers/vcdrom/mgmt
-+
-+service iscsi-scst start
-+
-+echo "192.168.1.16 AccessControl" >/sys/kernel/scst_tgt/targets/iscsi/iSNSServer
-+echo "add_attribute IncomingUser joeD 12charsecret" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+echo "add_attribute OutgoingUser jackD 12charsecret1" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+
-+echo "add_target iqn.2006-10.net.vlnb:tgt" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+
-+echo "add disk1 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/mgmt
-+echo "add cdrom 1" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/mgmt
-+
-+echo "add_target iqn.2006-10.net.vlnb:tgt1" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+echo "add_target_attribute iqn.2006-10.net.vlnb:tgt1 IncomingUser1 joe2 12charsecret2" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+echo "add_target_attribute iqn.2006-10.net.vlnb:tgt1 IncomingUser joe 12charsecret" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+echo "add_target_attribute iqn.2006-10.net.vlnb:tgt1 OutgoingUser jim1 12charpasswd" >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+echo "No" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/InitialR2T
-+echo "Yes" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ImmediateData
-+echo "8192" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/MaxRecvDataSegmentLength
-+echo "8192" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/MaxXmitDataSegmentLength
-+echo "131072" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/MaxBurstLength
-+echo "32768" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/FirstBurstLength
-+echo "1" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/MaxOutstandingR2T
-+echo "CRC32C,None" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/HeaderDigest
-+echo "CRC32C,None" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/DataDigest
-+echo "32" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/QueuedCommands
-+
-+echo "add disk2 0" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/luns/mgmt
-+echo "add nullio 26" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/luns/mgmt
-+
-+echo "create special_ini" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/mgmt
-+echo "add blockio 0 read_only=1" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/special_ini/luns/mgmt
-+echo "add iqn.2005-03.org.open-iscsi:cacdcd2520" >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/special_ini/initiators/mgmt
-+
-+echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/enabled
-+echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt1/enabled
-+
-+echo 1 >/sys/kernel/scst_tgt/targets/iscsi/enabled
-+
-+The resulting overall SCST sysfs hierarchy with an initiator connected to
-+both iSCSI-SCST targets will look like:
-+
-+/sys/kernel/scst_tgt
-+|-- devices
-+| |-- blockio
-+| | |-- blocksize
-+| | |-- exported
-+| | | `-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/ini_groups/special_ini/luns/0
-+| | |-- filename
-+| | |-- handler -> ../../handlers/vdisk_blockio
-+| | |-- nv_cache
-+| | |-- read_only
-+| | |-- removable
-+| | |-- resync_size
-+| | |-- size_mb
-+| | |-- t10_dev_id
-+| | |-- threads_num
-+| | |-- threads_pool_type
-+| | |-- type
-+| | `-- usn
-+| |-- cdrom
-+| | |-- exported
-+| | | `-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/1
-+| | |-- filename
-+| | |-- handler -> ../../handlers/vcdrom
-+| | |-- size_mb
-+| | |-- t10_dev_id
-+| | |-- threads_num
-+| | |-- threads_pool_type
-+| | |-- type
-+| | `-- usn
-+| |-- disk1
-+| | |-- blocksize
-+| | |-- exported
-+| | | `-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/0
-+| | |-- filename
-+| | |-- handler -> ../../handlers/vdisk_fileio
-+| | |-- nv_cache
-+| | |-- o_direct
-+| | |-- read_only
-+| | |-- removable
-+| | |-- resync_size
-+| | |-- size_mb
-+| | |-- t10_dev_id
-+| | |-- type
-+| | |-- usn
-+| | `-- write_through
-+| |-- disk2
-+| | |-- blocksize
-+| | |-- exported
-+| | | `-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/luns/0
-+| | |-- filename
-+| | |-- handler -> ../../handlers/vdisk_fileio
-+| | |-- nv_cache
-+| | |-- o_direct
-+| | |-- read_only
-+| | |-- removable
-+| | |-- resync_size
-+| | |-- size_mb
-+| | |-- t10_dev_id
-+| | |-- threads_num
-+| | |-- threads_pool_type
-+| | |-- threads_num
-+| | |-- threads_pool_type
-+| | |-- type
-+| | |-- usn
-+| | `-- write_through
-+| `-- nullio
-+| |-- blocksize
-+| |-- exported
-+| | `-- export0 -> ../../../targets/iscsi/iqn.2006-10.net.vlnb:tgt1/luns/26
-+| |-- handler -> ../../handlers/vdisk_nullio
-+| |-- read_only
-+| |-- removable
-+| |-- size_mb
-+| |-- t10_dev_id
-+| |-- threads_num
-+| |-- threads_pool_type
-+| |-- type
-+| `-- usn
-+|-- handlers
-+| |-- vcdrom
-+| | |-- cdrom -> ../../devices/cdrom
-+| | |-- mgmt
-+| | |-- trace_level
-+| | `-- type
-+| |-- vdisk_blockio
-+| | |-- blockio -> ../../devices/blockio
-+| | |-- mgmt
-+| | |-- trace_level
-+| | `-- type
-+| |-- vdisk_fileio
-+| | |-- disk1 -> ../../devices/disk1
-+| | |-- disk2 -> ../../devices/disk2
-+| | |-- mgmt
-+| | |-- trace_level
-+| | `-- type
-+| `-- vdisk_nullio
-+| |-- mgmt
-+| |-- nullio -> ../../devices/nullio
-+| |-- trace_level
-+| `-- type
-+|-- sgv
-+| |-- global_stats
-+| |-- sgv
-+| | `-- stats
-+| |-- sgv-clust
-+| | `-- stats
-+| `-- sgv-dma
-+| `-- stats
-+|-- targets
-+| `-- iscsi
-+| |-- IncomingUser
-+| |-- OutgoingUser
-+| |-- enabled
-+| |-- iSNSServer
-+| |-- iqn.2006-10.net.vlnb:tgt
-+| | |-- DataDigest
-+| | |-- FirstBurstLength
-+| | |-- HeaderDigest
-+| | |-- ImmediateData
-+| | |-- InitialR2T
-+| | |-- MaxBurstLength
-+| | |-- MaxOutstandingR2T
-+| | |-- MaxRecvDataSegmentLength
-+| | |-- MaxXmitDataSegmentLength
-+| | |-- NopInInterval
-+| | |-- QueuedCommands
-+| | |-- RspTimeout
-+| | |-- enabled
-+| | |-- ini_groups
-+| | | `-- mgmt
-+| | |-- luns
-+| | | |-- 0
-+| | | | |-- device -> ../../../../../devices/disk1
-+| | | | `-- read_only
-+| | | |-- 1
-+| | | | |-- device -> ../../../../../devices/cdrom
-+| | | | `-- read_only
-+| | | `-- mgmt
-+| | |-- per_portal_acl
-+| | |-- redirect
-+| | |-- rel_tgt_id
-+| | |-- sessions
-+| | | `-- iqn.2005-03.org.open-iscsi:cacdcd2520
-+| | | |-- 10.170.75.2
-+| | | | |-- cid
-+| | | | |-- ip
-+| | | | `-- state
-+| | | |-- DataDigest
-+| | | |-- FirstBurstLength
-+| | | |-- HeaderDigest
-+| | | |-- ImmediateData
-+| | | |-- InitialR2T
-+| | | |-- MaxBurstLength
-+| | | |-- MaxOutstandingR2T
-+| | | |-- MaxRecvDataSegmentLength
-+| | | |-- MaxXmitDataSegmentLength
-+| | | |-- active_commands
-+| | | |-- commands
-+| | | |-- force_close
-+| | | |-- initiator_name
-+| | | |-- luns -> ../../luns
-+| | | |-- reinstating
-+| | | `-- sid
-+| | `-- tid
-+| |-- iqn.2006-10.net.vlnb:tgt1
-+| | |-- DataDigest
-+| | |-- FirstBurstLength
-+| | |-- HeaderDigest
-+| | |-- ImmediateData
-+| | |-- IncomingUser
-+| | |-- IncomingUser1
-+| | |-- InitialR2T
-+| | |-- MaxBurstLength
-+| | |-- MaxOutstandingR2T
-+| | |-- MaxRecvDataSegmentLength
-+| | |-- MaxXmitDataSegmentLength
-+| | |-- OutgoingUser
-+| | |-- NopInInterval
-+| | |-- QueuedCommands
-+| | |-- RspTimeout
-+| | |-- enabled
-+| | |-- ini_groups
-+| | | |-- mgmt
-+| | | `-- special_ini
-+| | | |-- initiators
-+| | | | |-- iqn.2005-03.org.open-iscsi:cacdcd2520
-+| | | | `-- mgmt
-+| | | `-- luns
-+| | | |-- 0
-+| | | | |-- device -> ../../../../../../../devices/blockio
-+| | | | `-- read_only
-+| | | `-- mgmt
-+| | |-- luns
-+| | | |-- 0
-+| | | | |-- device -> ../../../../../devices/disk2
-+| | | | `-- read_only
-+| | | |-- 26
-+| | | | |-- device -> ../../../../../devices/nullio
-+| | | | `-- read_only
-+| | | `-- mgmt
-+| | |-- per_portal_acl
-+| | |-- redirect
-+| | |-- rel_tgt_id
-+| | |-- sessions
-+| | | `-- iqn.2005-03.org.open-iscsi:cacdcd2520
-+| | | |-- 10.170.75.2
-+| | | | |-- cid
-+| | | | |-- ip
-+| | | | `-- state
-+| | | |-- DataDigest
-+| | | |-- FirstBurstLength
-+| | | |-- HeaderDigest
-+| | | |-- ImmediateData
-+| | | |-- InitialR2T
-+| | | |-- MaxBurstLength
-+| | | |-- MaxOutstandingR2T
-+| | | |-- MaxRecvDataSegmentLength
-+| | | |-- MaxXmitDataSegmentLength
-+| | | |-- active_commands
-+| | | |-- commands
-+| | | |-- force_close
-+| | | |-- initiator_name
-+| | | |-- luns -> ../../ini_groups/special_ini/luns
-+| | | |-- reinstating
-+| | | `-- sid
-+| | `-- tid
-+| |-- mgmt
-+| |-- open_state
-+| |-- trace_level
-+| `-- version
-+|-- threads
-+|-- trace_level
-+`-- version
-+
-+
-+Advanced initiators access control
-+----------------------------------
-+
-+ISCSI-SCST allows you to optionally control visibility and accessibility
-+of your target and its portals (IP addresses) to remote initiators. This
-+control includes both the target's portals SendTargets discovery as well
-+as regular LUNs access.
-+
-+This facility supersedes the obsolete initiators.[allow,deny] method,
-+which is going to be removed in one of the future versions.
-+
-+This facility is available only in the sysfs build of iSCSI-SCST.
-+
-+By default, all portals are available for the initiators.
-+
-+1. If you want to enable/disable one or more target's portals for all
-+initiators, you should define one ore more allowed_portal attributes.
-+For example:
-+
-+echo 'add_target_attribute iqn.2006-10.net.vlnb:tgt allowed_portal 10.170.77.2' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+
-+will enable only portal 10.170.77.2 and disable all other portals
-+
-+echo 'add_target_attribute iqn.2006-10.net.vlnb:tgt allowed_portal 10.170.77.2' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+echo 'add_target_attribute iqn.2006-10.net.vlnb:tgt allowed_portal 10.170.75.2' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+
-+will enable only portals 10.170.77.2 and 10.170.75.2 and disable all
-+other portals.
-+
-+echo 'add_target_attribute iqn.2006-10.net.vlnb:tgt allowed_portal 10.170.7?.2' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+
-+will enable only portals 10.170.7x.2 and disable all other portals.
-+
-+echo 'add_target_attribute iqn.2006-10.net.vlnb:tgt allowed_portal !*' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+
-+will disable all portals.
-+
-+2. If you want to want to allow only only specific set of initiators be
-+able to connect to your target, you should don't add any default LUNs
-+for the target and create for allowed initiators a security group to
-+which they will be assigned.
-+
-+For example, we want initiator iqn.2005-03.org.vlnb:cacdcd2520 and only
-+it be able to access target iqn.2006-10.net.vlnb:tgt:
-+
-+echo 'add_target iqn.2006-10.net.vlnb:tgt' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+echo 'create allowed_ini' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/mgmt
-+echo 'add dev1 0' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/allowed_ini/luns/mgmt
-+echo 'add iqn.2005-03.org.vlnb:cacdcd2520' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/allowed_ini/initiators/mgmt
-+echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/enabled
-+
-+Since there will be no default LUNs for the target, all initiators other
-+than iqn.2005-03.org.vlnb:cacdcd2520 will be blocked from accessing it.
-+
-+Alternatively, you can create an empty security group and filter out in
-+it all initiators except the allowed one:
-+
-+echo 'add_target iqn.2006-10.net.vlnb:tgt' >/sys/kernel/scst_tgt/targets/iscsi/mgmt
-+echo 'add dev1 0' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/luns/mgmt
-+echo 'create denied_inis' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/mgmt
-+echo 'add !iqn.2005-03.org.vlnb:cacdcd2520' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/denied_inis/initiators/mgmt
-+echo 1 >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/enabled
-+
-+3. If you want to enable/disable one or more target's portals for
-+particular initiators, you should set per_portal_acl attribute to 1 and
-+specify SCST access control to those initiators. If an SCST security
-+group doesn't have any LUNs, all the initiator, which should be assigned
-+to it, will not see this target and/or its portal. For example:
-+
-+(We assume that an empty group "BLOCKING_GROUP" is already created by for
-+target iqn.2006-10.net.vlnb:tgt by command (see above for more information):
-+"echo 'create BLOCKING_GROUP' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/mgmt)
-+
-+echo 'add iqn.2005-03.org.vlnb:cacdcd2520#10.170.77.2' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/BLOCKING_GROUP/initiators/mgmt
-+
-+will block access of initiator iqn.2005-03.org.vlnb:cacdcd2520 to
-+target iqn.2006-10.net.vlnb:tgt portal 10.170.77.2.
-+
-+Another example:
-+
-+echo 'add iqn.2005-03.org.vlnb:cacdcd2520*' >/sys/kernel/scst_tgt/targets/iscsi/iqn.2006-10.net.vlnb:tgt/ini_groups/BLOCKING_GROUP/initiators/mgmt
-+
-+will block access of initiator iqn.2005-03.org.vlnb:cacdcd2520 to
-+all target iqn.2006-10.net.vlnb:tgt portals.
-+
-+
-+Troubleshooting
-+---------------
-+
-+If you have any problems, start troubleshooting from looking at the
-+kernel and system logs. In the kernel log iSCSI-SCST and SCST core send
-+their messages, in the system log iscsi-scstd sends its messages. In
-+most Linux distributions both those logs are put to /var/log/messages
-+file.
-+
-+Then, it might be helpful to increase level of logging. For kernel
-+modules you should make the debug build by enabling CONFIG_SCST_DEBUG.
-+
-+If after looking on the logs the reason of your problem is still unclear
-+for you, report to SCST mailing list scst-devel@lists.sourceforge.net.
-+
-+
-+Work if target's backstorage or link is too slow
-+------------------------------------------------
-+
-+In some cases you can experience I/O stalls or see in the kernel log
-+abort or reset messages. It can happen under high I/O load, when your
-+target's backstorage gets overloaded, or working over a slow link, when
-+the link can't serve all the queued commands on time,
-+
-+To workaround it you can reduce QueuedCommands parameter for the
-+corresponding target to some lower value, like 8 (default is 32).
-+
-+Also see SCST README file for more details about that issue and ways to
-+prevent it.
-+
-+
-+Performance advices
-+-------------------
-+
-+1. If you use Windows XP or Windows 2003+ as initiators, you can
-+consider to decrease TcpAckFrequency parameter to 1. See
-+http://support.microsoft.com/kb/328890/ or google for "TcpAckFrequency"
-+for more details.
-+
-+2. See how to get the maximum throughput from iSCSI, for instance, at
-+http://virtualgeek.typepad.com/virtual_geek/2009/01/a-multivendor-post-to-help-our-mutual-iscsi-customers-using-vmware.html.
-+It's about VMware, but its recommendations apply to other environments
-+as well.
-+
-+3. ISCSI initiators built in pre-CentOS/RHEL 5 reported to have some
-+performance problems. If you use it, it is strongly advised to upgrade.
-+
-+4. If you are going to use your target in an VM environment, for
-+instance as a shared storage with VMware, make sure all your VMs
-+connected to the target via *separate* sessions, i.e. each VM has own
-+connection to the target, not all VMs connected using a single
-+connection. You can check it using SCST sysfs interface. If you
-+miss it, you can greatly loose performance of parallel access to your
-+target from different VMs. This isn't related to the case if your VMs
-+are using the same shared storage, like with VMFS, for instance. In this
-+case all your VM hosts will be connected to the target via separate
-+sessions, which is enough.
-+
-+5. Many dual port network adapters are not able to transfer data
-+simultaneously on both ports, i.e. they transfer data via both ports on
-+the same speed as via any single port. Thus, using such adapters in MPIO
-+configuration can't improve performance. To allow MPIO to have double
-+performance you should either use separate network adapters, or find a
-+dual-port adapter capable to to transfer data simultaneously on both
-+ports. You can check it by running 2 iperf's through both ports in
-+parallel.
-+
-+6. Since network offload works much better in the write direction, than
-+for reading (simplifying, in the read direction often there's additional
-+data copy) in many cases with 10GbE in a single initiator-target pair
-+the initiator's CPU is a bottleneck, so you can see the initiator can
-+read data on much slower rate, than write. You can check it by watching
-+*each particular* CPU load to find out if any of them is close to 100%
-+load, including IRQ processing load. Note, many tools like vmstat give
-+aggregate load on all CPUs, so with 4 cores 25% corresponds to 100% load
-+of any single CPU.
-+
-+7. For high speed network adapters it can be better if you configure
-+them to serve connections, e.g., from initiator on CPU0 and from
-+initiator Y on CPU1. Then you can bind threads processing them also to
-+CPU0 and CPU1 correspondingly using cpu_mask attribute of their targets
-+or security groups. In NUMA-like configurations it can signficantly
-+boost IOPS performance.
-+
-+8. See SCST core's README for more advices. Especially pay attention to
-+have io_grouping_type option set correctly.
-+
-+
-+Compilation options
-+-------------------
-+
-+There are the following compilation options, that could be commented
-+in/out in the kernel's module Makefile:
-+
-+ - CONFIG_SCST_DEBUG - turns on some debugging code, including some logging.
-+ Makes the driver considerably bigger and slower, producing large amount of
-+ log data.
-+
-+ - CONFIG_SCST_TRACING - turns on ability to log events. Makes the driver
-+ considerably bigger and leads to some performance loss.
-+
-+ - CONFIG_SCST_EXTRACHECKS - adds extra validity checks in the various places.
-+
-+ - CONFIG_SCST_ISCSI_DEBUG_DIGEST_FAILURES - simulates digest failures in
-+ random places.
-+
-+
-+Credits
-+-------
-+
-+Thanks to:
-+
-+ * Ming Zhang <blackmagic02881@gmail.com> for fixes
-+
-+ * Krzysztof Blaszkowski <kb@sysmikro.com.pl> for many fixes
-+
-+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> for comments and help in
-+ debugging
-+
-+ * Tomasz Chmielewski <mangoo@wpkg.org> for testing and suggestions
-+
-+ * Bart Van Assche <bvanassche@acm.org> for a lot of help
-+
-+Vladislav Bolkhovitin <vst@vlnb.net>, http://scst.sourceforge.net
-+
-diff -uprN orig/linux-3.2/drivers/scsi/qla2xxx/qla2x_tgt.h linux-3.2/drivers/scsi/qla2xxx/qla2x_tgt.h
---- orig/linux-3.2/drivers/scsi/qla2xxx/qla2x_tgt.h
-+++ linux-3.2/drivers/scsi/qla2xxx/qla2x_tgt.h
-@@ -0,0 +1,137 @@
-+/*
-+ * qla2x_tgt.h
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * Additional file for the target driver support.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2
-+ * of the License, or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+/*
-+ * This should be included only from within qla2xxx module.
-+ */
-+
-+#ifndef __QLA2X_TGT_H
-+#define __QLA2X_TGT_H
-+
-+#include <linux/version.h>
-+
-+extern request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
-+
-+#ifdef CONFIG_SCSI_QLA2XXX_TARGET
-+
-+#include "qla2x_tgt_def.h"
-+
-+extern struct qla_tgt_data qla_target;
-+
-+void qla_set_tgt_mode(scsi_qla_host_t *ha);
-+void qla_clear_tgt_mode(scsi_qla_host_t *ha);
-+
-+static inline bool qla_tgt_mode_enabled(scsi_qla_host_t *ha)
-+{
-+ return ha->host->active_mode & MODE_TARGET;
-+}
-+
-+static inline bool qla_ini_mode_enabled(scsi_qla_host_t *ha)
-+{
-+ return ha->host->active_mode & MODE_INITIATOR;
-+}
-+
-+static inline void qla_reverse_ini_mode(scsi_qla_host_t *ha)
-+{
-+ if (ha->host->active_mode & MODE_INITIATOR)
-+ ha->host->active_mode &= ~MODE_INITIATOR;
-+ else
-+ ha->host->active_mode |= MODE_INITIATOR;
-+}
-+
-+/********************************************************************\
-+ * ISP Queue types left out of new QLogic driver (from old version)
-+\********************************************************************/
-+
-+/*
-+ * qla2x00_do_en_dis_lun
-+ * Issue enable or disable LUN entry IOCB.
-+ *
-+ * Input:
-+ * ha = adapter block pointer.
-+ *
-+ * Caller MUST have hardware lock held. This function might release it,
-+ * then reacquire.
-+ */
-+static inline void
-+__qla2x00_send_enable_lun(scsi_qla_host_t *ha, int enable)
-+{
-+ elun_entry_t *pkt;
-+
-+ BUG_ON(IS_FWI2_CAPABLE(ha));
-+
-+ pkt = (elun_entry_t *)qla2x00_req_pkt(ha);
-+ if (pkt != NULL) {
-+ pkt->entry_type = ENABLE_LUN_TYPE;
-+ if (enable) {
-+ pkt->command_count = QLA2X00_COMMAND_COUNT_INIT;
-+ pkt->immed_notify_count = QLA2X00_IMMED_NOTIFY_COUNT_INIT;
-+ pkt->timeout = 0xffff;
-+ } else {
-+ pkt->command_count = 0;
-+ pkt->immed_notify_count = 0;
-+ pkt->timeout = 0;
-+ }
-+ DEBUG2(printk(KERN_DEBUG
-+ "scsi%lu:ENABLE_LUN IOCB imm %u cmd %u timeout %u\n",
-+ ha->host_no, pkt->immed_notify_count,
-+ pkt->command_count, pkt->timeout));
-+
-+ /* Issue command to ISP */
-+ qla2x00_isp_cmd(ha);
-+
-+ } else
-+ qla_clear_tgt_mode(ha);
-+#if defined(QL_DEBUG_LEVEL_2) || defined(QL_DEBUG_LEVEL_3)
-+ if (!pkt)
-+ printk(KERN_ERR "%s: **** FAILED ****\n", __func__);
-+#endif
-+
-+ return;
-+}
-+
-+/*
-+ * qla2x00_send_enable_lun
-+ * Issue enable LUN entry IOCB.
-+ *
-+ * Input:
-+ * ha = adapter block pointer.
-+ * enable = enable/disable flag.
-+ */
-+static inline void
-+qla2x00_send_enable_lun(scsi_qla_host_t *ha, bool enable)
-+{
-+ if (!IS_FWI2_CAPABLE(ha)) {
-+ unsigned long flags;
-+ spin_lock_irqsave(&ha->hardware_lock, flags);
-+ __qla2x00_send_enable_lun(ha, enable);
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
-+ }
-+}
-+
-+extern void qla2xxx_add_targets(void);
-+extern size_t
-+qla2xxx_add_vtarget(u64 *port_name, u64 *node_name, u64 *parent_host);
-+extern size_t qla2xxx_del_vtarget(u64 *port_name);
-+
-+#endif /* CONFIG_SCSI_QLA2XXX_TARGET */
-+
-+#endif /* __QLA2X_TGT_H */
-diff -uprN orig/linux-3.2/drivers/scsi/qla2xxx/qla2x_tgt_def.h linux-3.2/drivers/scsi/qla2xxx/qla2x_tgt_def.h
---- orig/linux-3.2/drivers/scsi/qla2xxx/qla2x_tgt_def.h
-+++ linux-3.2/drivers/scsi/qla2xxx/qla2x_tgt_def.h
-@@ -0,0 +1,771 @@
-+/*
-+ * qla2x_tgt_def.h
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
-+ * Copyright (C) 2007 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * Additional file for the target driver support.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2
-+ * of the License, or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+/*
-+ * This is the global def file that is useful for including from the
-+ * target portion.
-+ */
-+
-+#ifndef __QLA2X_TGT_DEF_H
-+#define __QLA2X_TGT_DEF_H
-+
-+#include "qla_def.h"
-+
-+#ifndef CONFIG_SCSI_QLA2XXX_TARGET
-+#error __FILE__ " included without CONFIG_SCSI_QLA2XXX_TARGET"
-+#endif
-+
-+#ifndef ENTER
-+#define ENTER(a)
-+#endif
-+
-+#ifndef LEAVE
-+#define LEAVE(a)
-+#endif
-+
-+/*
-+ * Must be changed on any change in any initiator visible interfaces or
-+ * data in the target add-on
-+ */
-+#define QLA2X_TARGET_MAGIC 270
-+
-+/*
-+ * Must be changed on any change in any target visible interfaces or
-+ * data in the initiator
-+ */
-+#define QLA2X_INITIATOR_MAGIC 57224
-+
-+#define QLA2X_INI_MODE_STR_EXCLUSIVE "exclusive"
-+#define QLA2X_INI_MODE_STR_DISABLED "disabled"
-+#define QLA2X_INI_MODE_STR_ENABLED "enabled"
-+
-+#define QLA2X_INI_MODE_EXCLUSIVE 0
-+#define QLA2X_INI_MODE_DISABLED 1
-+#define QLA2X_INI_MODE_ENABLED 2
-+
-+#define QLA2X00_COMMAND_COUNT_INIT 250
-+#define QLA2X00_IMMED_NOTIFY_COUNT_INIT 250
-+
-+/*
-+ * Used to mark which completion handles (for RIO Status's) are for CTIO's
-+ * vs. regular (non-target) info. This is checked for in
-+ * qla2x00_process_response_queue() to see if a handle coming back in a
-+ * multi-complete should come to the tgt driver or be handled there by qla2xxx
-+ */
-+#define CTIO_COMPLETION_HANDLE_MARK BIT_29
-+#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
-+#error "Hackish CTIO_COMPLETION_HANDLE_MARK no longer larger than MAX_OUTSTANDING_COMMANDS"
-+#endif
-+#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
-+
-+/* Used to mark CTIO as intermediate */
-+#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
-+
-+#ifndef OF_SS_MODE_0
-+/*
-+ * ISP target entries - Flags bit definitions.
-+ */
-+#define OF_SS_MODE_0 0
-+#define OF_SS_MODE_1 1
-+#define OF_SS_MODE_2 2
-+#define OF_SS_MODE_3 3
-+
-+#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */
-+#define OF_DATA_IN BIT_6 /* Data in to initiator */
-+ /* (data from target to initiator) */
-+#define OF_DATA_OUT BIT_7 /* Data out from initiator */
-+ /* (data from initiator to target) */
-+#define OF_NO_DATA (BIT_7 | BIT_6)
-+#define OF_INC_RC BIT_8 /* Increment command resource count */
-+#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */
-+#define OF_CONF_REQ BIT_13 /* Confirmation Requested */
-+#define OF_TERM_EXCH BIT_14 /* Terminate exchange */
-+#define OF_SSTS BIT_15 /* Send SCSI status */
-+#endif
-+
-+#ifndef DATASEGS_PER_COMMAND32
-+#define DATASEGS_PER_COMMAND32 3
-+#define DATASEGS_PER_CONT32 7
-+#define QLA_MAX_SG32(ql) \
-+ (((ql) > 0) ? \
-+ (DATASEGS_PER_COMMAND32 + DATASEGS_PER_CONT32 * ((ql) - 1)) : 0)
-+
-+#define DATASEGS_PER_COMMAND64 2
-+#define DATASEGS_PER_CONT64 5
-+#define QLA_MAX_SG64(ql) \
-+ (((ql) > 0) ? \
-+ (DATASEGS_PER_COMMAND64 + DATASEGS_PER_CONT64 * ((ql) - 1)) : 0)
-+#endif
-+
-+#ifndef DATASEGS_PER_COMMAND_24XX
-+#define DATASEGS_PER_COMMAND_24XX 1
-+#define DATASEGS_PER_CONT_24XX 5
-+#define QLA_MAX_SG_24XX(ql) \
-+ (min(1270, ((ql) > 0) ? \
-+ (DATASEGS_PER_COMMAND_24XX + DATASEGS_PER_CONT_24XX * ((ql) - 1)) : 0))
-+#endif
-+
-+/********************************************************************\
-+ * ISP Queue types left out of new QLogic driver (from old version)
-+\********************************************************************/
-+
-+#ifndef ENABLE_LUN_TYPE
-+#define ENABLE_LUN_TYPE 0x0B /* Enable LUN entry. */
-+/*
-+ * ISP queue - enable LUN entry structure definition.
-+ */
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t sys_define_2; /* System defined. */
-+ uint8_t reserved_8;
-+ uint8_t reserved_1;
-+ uint16_t reserved_2;
-+ uint32_t reserved_3;
-+ uint8_t status;
-+ uint8_t reserved_4;
-+ uint8_t command_count; /* Number of ATIOs allocated. */
-+ uint8_t immed_notify_count; /* Number of Immediate Notify entries allocated. */
-+ uint16_t reserved_5;
-+ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
-+ uint16_t reserved_6[20];
-+} __attribute__((packed)) elun_entry_t;
-+#define ENABLE_LUN_SUCCESS 0x01
-+#define ENABLE_LUN_RC_NONZERO 0x04
-+#define ENABLE_LUN_INVALID_REQUEST 0x06
-+#define ENABLE_LUN_ALREADY_ENABLED 0x3E
-+#endif
-+
-+#ifndef MODIFY_LUN_TYPE
-+#define MODIFY_LUN_TYPE 0x0C /* Modify LUN entry. */
-+/*
-+ * ISP queue - modify LUN entry structure definition.
-+ */
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t sys_define_2; /* System defined. */
-+ uint8_t reserved_8;
-+ uint8_t reserved_1;
-+ uint8_t operators;
-+ uint8_t reserved_2;
-+ uint32_t reserved_3;
-+ uint8_t status;
-+ uint8_t reserved_4;
-+ uint8_t command_count; /* Number of ATIOs allocated. */
-+ uint8_t immed_notify_count; /* Number of Immediate Notify */
-+ /* entries allocated. */
-+ uint16_t reserved_5;
-+ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
-+ uint16_t reserved_7[20];
-+} __attribute__((packed)) modify_lun_entry_t;
-+#define MODIFY_LUN_SUCCESS 0x01
-+#define MODIFY_LUN_CMD_ADD BIT_0
-+#define MODIFY_LUN_CMD_SUB BIT_1
-+#define MODIFY_LUN_IMM_ADD BIT_2
-+#define MODIFY_LUN_IMM_SUB BIT_3
-+#endif
-+
-+#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
-+ ? le16_to_cpu((iocb)->target.extended) \
-+ : (uint16_t)(iocb)->target.id.standard)
-+
-+#ifndef IMMED_NOTIFY_TYPE
-+#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
-+/*
-+ * ISP queue - immediate notify entry structure definition.
-+ */
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t sys_define_2; /* System defined. */
-+ target_id_t target;
-+ uint16_t lun;
-+ uint8_t target_id;
-+ uint8_t reserved_1;
-+ uint16_t status_modifier;
-+ uint16_t status;
-+ uint16_t task_flags;
-+ uint16_t seq_id;
-+ uint16_t srr_rx_id;
-+ uint32_t srr_rel_offs;
-+ uint16_t srr_ui;
-+#define SRR_IU_DATA_IN 0x1
-+#define SRR_IU_DATA_OUT 0x5
-+#define SRR_IU_STATUS 0x7
-+ uint16_t srr_ox_id;
-+ uint8_t reserved_2[30];
-+ uint16_t ox_id;
-+} __attribute__((packed)) notify_entry_t;
-+#endif
-+
-+#ifndef NOTIFY_ACK_TYPE
-+#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
-+/*
-+ * ISP queue - notify acknowledge entry structure definition.
-+ */
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t sys_define_2; /* System defined. */
-+ target_id_t target;
-+ uint8_t target_id;
-+ uint8_t reserved_1;
-+ uint16_t flags;
-+ uint16_t resp_code;
-+ uint16_t status;
-+ uint16_t task_flags;
-+ uint16_t seq_id;
-+ uint16_t srr_rx_id;
-+ uint32_t srr_rel_offs;
-+ uint16_t srr_ui;
-+ uint16_t srr_flags;
-+ uint16_t srr_reject_code;
-+ uint8_t srr_reject_vendor_uniq;
-+ uint8_t srr_reject_code_expl;
-+ uint8_t reserved_2[26];
-+ uint16_t ox_id;
-+} __attribute__((packed)) nack_entry_t;
-+#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
-+#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
-+
-+#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
-+
-+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
-+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
-+
-+#define NOTIFY_ACK_SUCCESS 0x01
-+#endif
-+
-+#ifndef ACCEPT_TGT_IO_TYPE
-+#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
-+/*
-+ * ISP queue - Accept Target I/O (ATIO) entry structure definition.
-+ */
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t sys_define_2; /* System defined. */
-+ target_id_t target;
-+ uint16_t rx_id;
-+ uint16_t flags;
-+ uint16_t status;
-+ uint8_t command_ref;
-+ uint8_t task_codes;
-+ uint8_t task_flags;
-+ uint8_t execution_codes;
-+ uint8_t cdb[MAX_CMDSZ];
-+ uint32_t data_length;
-+ uint16_t lun;
-+ uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
-+ uint16_t reserved_32[6];
-+ uint16_t ox_id;
-+} __attribute__((packed)) atio_entry_t;
-+#endif
-+
-+#ifndef CONTINUE_TGT_IO_TYPE
-+#define CONTINUE_TGT_IO_TYPE 0x17
-+/*
-+ * ISP queue - Continue Target I/O (CTIO) entry for status mode 0
-+ * structure definition.
-+ */
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t handle; /* System defined handle */
-+ target_id_t target;
-+ uint16_t rx_id;
-+ uint16_t flags;
-+ uint16_t status;
-+ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
-+ uint16_t dseg_count; /* Data segment count. */
-+ uint32_t relative_offset;
-+ uint32_t residual;
-+ uint16_t reserved_1[3];
-+ uint16_t scsi_status;
-+ uint32_t transfer_length;
-+ uint32_t dseg_0_address[0];
-+} __attribute__((packed)) ctio_common_entry_t;
-+#define ATIO_PATH_INVALID 0x07
-+#define ATIO_CANT_PROV_CAP 0x16
-+#define ATIO_CDB_VALID 0x3D
-+
-+#define ATIO_EXEC_READ BIT_1
-+#define ATIO_EXEC_WRITE BIT_0
-+#endif
-+
-+#ifndef CTIO_A64_TYPE
-+#define CTIO_A64_TYPE 0x1F
-+typedef struct {
-+ ctio_common_entry_t common;
-+ uint32_t dseg_0_address; /* Data segment 0 address. */
-+ uint32_t dseg_0_length; /* Data segment 0 length. */
-+ uint32_t dseg_1_address; /* Data segment 1 address. */
-+ uint32_t dseg_1_length; /* Data segment 1 length. */
-+ uint32_t dseg_2_address; /* Data segment 2 address. */
-+ uint32_t dseg_2_length; /* Data segment 2 length. */
-+} __attribute__((packed)) ctio_entry_t;
-+#define CTIO_SUCCESS 0x01
-+#define CTIO_ABORTED 0x02
-+#define CTIO_INVALID_RX_ID 0x08
-+#define CTIO_TIMEOUT 0x0B
-+#define CTIO_LIP_RESET 0x0E
-+#define CTIO_TARGET_RESET 0x17
-+#define CTIO_PORT_UNAVAILABLE 0x28
-+#define CTIO_PORT_LOGGED_OUT 0x29
-+#define CTIO_PORT_CONF_CHANGED 0x2A
-+#define CTIO_SRR_RECEIVED 0x45
-+
-+#endif
-+
-+#ifndef CTIO_RET_TYPE
-+#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
-+/*
-+ * ISP queue - CTIO returned entry structure definition.
-+ */
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t handle; /* System defined handle. */
-+ target_id_t target;
-+ uint16_t rx_id;
-+ uint16_t flags;
-+ uint16_t status;
-+ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
-+ uint16_t dseg_count; /* Data segment count. */
-+ uint32_t relative_offset;
-+ uint32_t residual;
-+ uint16_t reserved_1[2];
-+ uint16_t sense_length;
-+ uint16_t scsi_status;
-+ uint16_t response_length;
-+ uint8_t sense_data[26];
-+} __attribute__((packed)) ctio_ret_entry_t;
-+#endif
-+
-+#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
-+
-+typedef struct {
-+ uint8_t r_ctl;
-+ uint8_t d_id[3];
-+ uint8_t cs_ctl;
-+ uint8_t s_id[3];
-+ uint8_t type;
-+ uint8_t f_ctl[3];
-+ uint8_t seq_id;
-+ uint8_t df_ctl;
-+ uint16_t seq_cnt;
-+ uint16_t ox_id;
-+ uint16_t rx_id;
-+ uint32_t parameter;
-+} __attribute__((packed)) fcp_hdr_t;
-+
-+typedef struct {
-+ uint8_t d_id[3];
-+ uint8_t r_ctl;
-+ uint8_t s_id[3];
-+ uint8_t cs_ctl;
-+ uint8_t f_ctl[3];
-+ uint8_t type;
-+ uint16_t seq_cnt;
-+ uint8_t df_ctl;
-+ uint8_t seq_id;
-+ uint16_t rx_id;
-+ uint16_t ox_id;
-+ uint32_t parameter;
-+} __attribute__((packed)) fcp_hdr_le_t;
-+
-+#define F_CTL_EXCH_CONTEXT_RESP BIT_23
-+#define F_CTL_SEQ_CONTEXT_RESIP BIT_22
-+#define F_CTL_LAST_SEQ BIT_20
-+#define F_CTL_END_SEQ BIT_19
-+#define F_CTL_SEQ_INITIATIVE BIT_16
-+
-+#define R_CTL_BASIC_LINK_SERV 0x80
-+#define R_CTL_B_ACC 0x4
-+#define R_CTL_B_RJT 0x5
-+
-+typedef struct {
-+ uint64_t lun;
-+ uint8_t cmnd_ref;
-+#ifdef __LITTLE_ENDIAN
-+ uint8_t task_attr:3;
-+ uint8_t reserved:5;
-+#else
-+ uint8_t reserved:5;
-+ uint8_t task_attr:3;
-+#endif
-+ uint8_t task_mgmt_flags;
-+#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6
-+#define FCP_CMND_TASK_MGMT_TARGET_RESET 5
-+#define FCP_CMND_TASK_MGMT_LU_RESET 4
-+#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2
-+#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1
-+#ifdef __LITTLE_ENDIAN
-+ uint8_t wrdata:1;
-+ uint8_t rddata:1;
-+ uint8_t add_cdb_len:6;
-+#else
-+ uint8_t add_cdb_len:6;
-+ uint8_t rddata:1;
-+ uint8_t wrdata:1;
-+#endif
-+ uint8_t cdb[16];
-+ /*
-+ * add_cdb is optional and can absent from fcp_cmnd_t. Size 4 only to
-+ * make sizeof(fcp_cmnd_t) be as expected by BUILD_BUG_ON() in
-+ * q2t_init().
-+ */
-+ uint8_t add_cdb[4];
-+ /* uint32_t data_length; */
-+} __attribute__((packed)) fcp_cmnd_t;
-+
-+/*
-+ * ISP queue - Accept Target I/O (ATIO) type 7 entry for 24xx structure
-+ * definition.
-+ */
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t fcp_cmnd_len_low;
-+#ifdef __LITTLE_ENDIAN
-+ uint8_t fcp_cmnd_len_high:4;
-+ uint8_t attr:4;
-+#else
-+ uint8_t attr:4;
-+ uint8_t fcp_cmnd_len_high:4;
-+#endif
-+ uint32_t exchange_addr;
-+#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
-+ fcp_hdr_t fcp_hdr;
-+ fcp_cmnd_t fcp_cmnd;
-+} __attribute__((packed)) atio7_entry_t;
-+
-+#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
-+
-+/*
-+ * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure
-+ * definition.
-+ */
-+
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t handle; /* System defined handle */
-+ uint16_t nport_handle;
-+#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
-+ uint16_t timeout;
-+ uint16_t dseg_count; /* Data segment count. */
-+ uint8_t vp_index;
-+ uint8_t add_flags;
-+ uint8_t initiator_id[3];
-+ uint8_t reserved;
-+ uint32_t exchange_addr;
-+} __attribute__((packed)) ctio7_common_entry_t;
-+
-+typedef struct {
-+ ctio7_common_entry_t common;
-+ uint16_t reserved1;
-+ uint16_t flags;
-+ uint32_t residual;
-+ uint16_t ox_id;
-+ uint16_t scsi_status;
-+ uint32_t relative_offset;
-+ uint32_t reserved2;
-+ uint32_t transfer_length;
-+ uint32_t reserved3;
-+ uint32_t dseg_0_address[2]; /* Data segment 0 address. */
-+ uint32_t dseg_0_length; /* Data segment 0 length. */
-+} __attribute__((packed)) ctio7_status0_entry_t;
-+
-+typedef struct {
-+ ctio7_common_entry_t common;
-+ uint16_t sense_length;
-+ uint16_t flags;
-+ uint32_t residual;
-+ uint16_t ox_id;
-+ uint16_t scsi_status;
-+ uint16_t response_len;
-+ uint16_t reserved;
-+ uint8_t sense_data[24];
-+} __attribute__((packed)) ctio7_status1_entry_t;
-+
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t handle; /* System defined handle */
-+ uint16_t status;
-+ uint16_t timeout;
-+ uint16_t dseg_count; /* Data segment count. */
-+ uint8_t vp_index;
-+ uint8_t reserved1[5];
-+ uint32_t exchange_address;
-+ uint16_t reserved2;
-+ uint16_t flags;
-+ uint32_t residual;
-+ uint16_t ox_id;
-+ uint16_t reserved3;
-+ uint32_t relative_offset;
-+ uint8_t reserved4[24];
-+} __attribute__((packed)) ctio7_fw_entry_t;
-+
-+/* CTIO7 flags values */
-+#define CTIO7_FLAGS_SEND_STATUS BIT_15
-+#define CTIO7_FLAGS_TERMINATE BIT_14
-+#define CTIO7_FLAGS_CONFORM_REQ BIT_13
-+#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
-+#define CTIO7_FLAGS_STATUS_MODE_0 0
-+#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
-+#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
-+#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
-+#define CTIO7_FLAGS_DSD_PTR BIT_2
-+#define CTIO7_FLAGS_DATA_IN BIT_1
-+#define CTIO7_FLAGS_DATA_OUT BIT_0
-+
-+/*
-+ * ISP queue - immediate notify entry structure definition for 24xx.
-+ */
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t reserved;
-+ uint16_t nport_handle;
-+ uint16_t reserved_2;
-+ uint16_t flags;
-+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
-+#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
-+ uint16_t srr_rx_id;
-+ uint16_t status;
-+ uint8_t status_subcode;
-+ uint8_t reserved_3;
-+ uint32_t exchange_address;
-+ uint32_t srr_rel_offs;
-+ uint16_t srr_ui;
-+ uint16_t srr_ox_id;
-+ uint8_t reserved_4[19];
-+ uint8_t vp_index;
-+ uint32_t reserved_5;
-+ uint8_t port_id[3];
-+ uint8_t reserved_6;
-+ uint16_t reserved_7;
-+ uint16_t ox_id;
-+} __attribute__((packed)) notify24xx_entry_t;
-+
-+#define ELS_PLOGI 0x3
-+#define ELS_FLOGI 0x4
-+#define ELS_LOGO 0x5
-+#define ELS_PRLI 0x20
-+#define ELS_PRLO 0x21
-+#define ELS_TPRLO 0x24
-+#define ELS_PDISC 0x50
-+#define ELS_ADISC 0x52
-+
-+/*
-+ * ISP queue - notify acknowledge entry structure definition for 24xx.
-+ */
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t handle;
-+ uint16_t nport_handle;
-+ uint16_t reserved_1;
-+ uint16_t flags;
-+ uint16_t srr_rx_id;
-+ uint16_t status;
-+ uint8_t status_subcode;
-+ uint8_t reserved_3;
-+ uint32_t exchange_address;
-+ uint32_t srr_rel_offs;
-+ uint16_t srr_ui;
-+ uint16_t srr_flags;
-+ uint8_t reserved_4[19];
-+ uint8_t vp_index;
-+ uint8_t srr_reject_vendor_uniq;
-+ uint8_t srr_reject_code_expl;
-+ uint8_t srr_reject_code;
-+ uint8_t reserved_5[7];
-+ uint16_t ox_id;
-+} __attribute__((packed)) nack24xx_entry_t;
-+
-+/*
-+ * ISP queue - ABTS received/response entries structure definition for 24xx.
-+ */
-+#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
-+#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */
-+
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint8_t reserved_1[6];
-+ uint16_t nport_handle;
-+ uint8_t reserved_2[2];
-+ uint8_t vp_index;
-+#ifdef __LITTLE_ENDIAN
-+ uint8_t reserved_3:4;
-+ uint8_t sof_type:4;
-+#else
-+ uint8_t sof_type:4;
-+ uint8_t reserved_3:4;
-+#endif
-+ uint32_t exchange_address;
-+ fcp_hdr_le_t fcp_hdr_le;
-+ uint8_t reserved_4[16];
-+ uint32_t exchange_addr_to_abort;
-+} __attribute__((packed)) abts24_recv_entry_t;
-+
-+#define ABTS_PARAM_ABORT_SEQ BIT_0
-+
-+typedef struct {
-+ uint16_t reserved;
-+ uint8_t seq_id_last;
-+ uint8_t seq_id_valid;
-+#define SEQ_ID_VALID 0x80
-+#define SEQ_ID_INVALID 0x00
-+ uint16_t rx_id;
-+ uint16_t ox_id;
-+ uint16_t high_seq_cnt;
-+ uint16_t low_seq_cnt;
-+} __attribute__((packed)) ba_acc_le_t;
-+
-+typedef struct {
-+ uint8_t vendor_uniq;
-+ uint8_t reason_expl;
-+ uint8_t reason_code;
-+#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1
-+#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9
-+ uint8_t reserved;
-+} __attribute__((packed)) ba_rjt_le_t;
-+
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t handle;
-+ uint16_t reserved_1;
-+ uint16_t nport_handle;
-+ uint16_t control_flags;
-+#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
-+ uint8_t vp_index;
-+#ifdef __LITTLE_ENDIAN
-+ uint8_t reserved_3:4;
-+ uint8_t sof_type:4;
-+#else
-+ uint8_t sof_type:4;
-+ uint8_t reserved_3:4;
-+#endif
-+ uint32_t exchange_address;
-+ fcp_hdr_le_t fcp_hdr_le;
-+ union {
-+ ba_acc_le_t ba_acct;
-+ ba_rjt_le_t ba_rjt;
-+ } __attribute__((packed)) payload;
-+ uint32_t reserved_4;
-+ uint32_t exchange_addr_to_abort;
-+} __attribute__((packed)) abts24_resp_entry_t;
-+
-+typedef struct {
-+ uint8_t entry_type; /* Entry type. */
-+ uint8_t entry_count; /* Entry count. */
-+ uint8_t sys_define; /* System defined. */
-+ uint8_t entry_status; /* Entry Status. */
-+ uint32_t handle;
-+ uint16_t compl_status;
-+#define ABTS_RESP_COMPL_SUCCESS 0
-+#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
-+ uint16_t nport_handle;
-+ uint16_t reserved_1;
-+ uint8_t reserved_2;
-+#ifdef __LITTLE_ENDIAN
-+ uint8_t reserved_3:4;
-+ uint8_t sof_type:4;
-+#else
-+ uint8_t sof_type:4;
-+ uint8_t reserved_3:4;
-+#endif
-+ uint32_t exchange_address;
-+ fcp_hdr_le_t fcp_hdr_le;
-+ uint8_t reserved_4[8];
-+ uint32_t error_subcode1;
-+#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
-+ uint32_t error_subcode2;
-+ uint32_t exchange_addr_to_abort;
-+} __attribute__((packed)) abts24_resp_fw_entry_t;
-+
-+/********************************************************************\
-+ * Type Definitions used by initiator & target halves
-+\********************************************************************/
-+
-+typedef enum {
-+ ADD_TARGET = 0,
-+ REMOVE_TARGET,
-+ DISABLE_TARGET_MODE,
-+ ENABLE_TARGET_MODE,
-+} qla2x_tgt_host_action_t;
-+
-+/* Changing it don't forget to change QLA2X_TARGET_MAGIC! */
-+struct qla_tgt_data {
-+ int magic;
-+
-+ /* Callbacks */
-+ void (*tgt24_atio_pkt)(scsi_qla_host_t *ha, atio7_entry_t *pkt);
-+ void (*tgt_response_pkt)(scsi_qla_host_t *ha, response_t *pkt);
-+ void (*tgt2x_ctio_completion)(scsi_qla_host_t *ha, uint32_t handle);
-+ void (*tgt_async_event)(uint16_t code, scsi_qla_host_t *ha,
-+ uint16_t *mailbox);
-+ int (*tgt_host_action)(scsi_qla_host_t *ha, qla2x_tgt_host_action_t
-+ action);
-+ void (*tgt_fc_port_added)(scsi_qla_host_t *ha, fc_port_t *fcport);
-+ void (*tgt_fc_port_deleted)(scsi_qla_host_t *ha, fc_port_t *fcport);
-+};
-+
-+int qla2xxx_tgt_register_driver(struct qla_tgt_data *tgt);
-+
-+void qla2xxx_tgt_unregister_driver(void);
-+
-+int qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha);
-+int qla2x00_wait_for_hba_online(scsi_qla_host_t *ha);
-+
-+#endif /* __QLA2X_TGT_DEF_H */
-diff -uprN orig/linux-3.2/drivers/scst/qla2xxx-target/Makefile linux-3.2/drivers/scst/qla2xxx-target/Makefile
---- orig/linux-3.2/drivers/scst/qla2xxx-target/Makefile
-+++ linux-3.2/drivers/scst/qla2xxx-target/Makefile
-@@ -0,0 +1,5 @@
-+ccflags-y += -Idrivers/scsi/qla2xxx
-+
-+qla2x00tgt-y := qla2x00t.o
-+
-+obj-$(CONFIG_SCST_QLA_TGT_ADDON) += qla2x00tgt.o
-diff -uprN orig/linux-3.2/drivers/scst/qla2xxx-target/Kconfig linux-3.2/drivers/scst/qla2xxx-target/Kconfig
---- orig/linux-3.2/drivers/scst/qla2xxx-target/Kconfig
-+++ linux-3.2/drivers/scst/qla2xxx-target/Kconfig
-@@ -0,0 +1,30 @@
-+config SCST_QLA_TGT_ADDON
-+ tristate "QLogic 2XXX Target Mode Add-On"
-+ depends on SCST && SCSI_QLA_FC && SCSI_QLA2XXX_TARGET
-+ default SCST
-+ help
-+ Target mode add-on driver for QLogic 2xxx Fibre Channel host adapters.
-+ Visit http://scst.sourceforge.net for more info about this driver.
-+
-+config QLA_TGT_DEBUG_WORK_IN_THREAD
-+ bool "Use threads context only"
-+ depends on SCST_QLA_TGT_ADDON
-+ help
-+ Makes SCST process incoming commands from the qla2x00t target
-+ driver and call the driver's callbacks in internal SCST
-+ threads context instead of SIRQ context, where thise commands
-+ were received. Useful for debugging and lead to some
-+ performance loss.
-+
-+ If unsure, say "N".
-+
-+config QLA_TGT_DEBUG_SRR
-+ bool "SRR debugging"
-+ depends on SCST_QLA_TGT_ADDON
-+ help
-+ Turns on retransmitting packets (SRR)
-+ debugging. In this mode some CTIOs will be "broken" to force the
-+ initiator to issue a retransmit request. Useful for debugging and lead to big
-+ performance loss.
-+
-+ If unsure, say "N".
-diff -uprN orig/linux-3.2/drivers/scst/qla2xxx-target/qla2x00t.c linux-3.2/drivers/scst/qla2xxx-target/qla2x00t.c
---- orig/linux-3.2/drivers/scst/qla2xxx-target/qla2x00t.c
-+++ linux-3.2/drivers/scst/qla2xxx-target/qla2x00t.c
-@@ -0,0 +1,6448 @@
-+/*
-+ * qla2x00t.c
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
-+ * Copyright (C) 2006 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * QLogic 22xx/23xx/24xx/25xx FC target driver.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/types.h>
-+#include <linux/version.h>
-+#include <linux/blkdev.h>
-+#include <linux/interrupt.h>
-+#include <scsi/scsi.h>
-+#include <scsi/scsi_host.h>
-+#include <linux/pci.h>
-+#include <linux/delay.h>
-+#include <linux/list.h>
-+#include <asm/unaligned.h>
-+
-+#include <scst/scst.h>
-+
-+#include "qla2x00t.h"
-+
-+/*
-+ * This driver calls qla2x00_req_pkt() and qla2x00_issue_marker(), which
-+ * must be called under HW lock and could unlock/lock it inside.
-+ * It isn't an issue, since in the current implementation on the time when
-+ * those functions are called:
-+ *
-+ * - Either context is IRQ and only IRQ handler can modify HW data,
-+ * including rings related fields,
-+ *
-+ * - Or access to target mode variables from struct q2t_tgt doesn't
-+ * cross those functions boundaries, except tgt_stop, which
-+ * additionally protected by irq_cmd_count.
-+ */
-+
-+#ifndef CONFIG_SCSI_QLA2XXX_TARGET
-+#error "CONFIG_SCSI_QLA2XXX_TARGET is NOT DEFINED"
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+#define Q2T_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_LINE | TRACE_PID | \
-+ TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_DEBUG | \
-+ TRACE_MINOR | TRACE_SPECIAL)
-+#else
-+# ifdef CONFIG_SCST_TRACING
-+#define Q2T_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
-+ TRACE_SPECIAL)
-+# endif
-+#endif
-+
-+static int q2t_target_detect(struct scst_tgt_template *templ);
-+static int q2t_target_release(struct scst_tgt *scst_tgt);
-+static int q2x_xmit_response(struct scst_cmd *scst_cmd);
-+static int __q24_xmit_response(struct q2t_cmd *cmd, int xmit_type);
-+static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd);
-+static void q2t_on_free_cmd(struct scst_cmd *scst_cmd);
-+static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *mcmd);
-+static int q2t_get_initiator_port_transport_id(struct scst_tgt *tgt,
-+ struct scst_session *scst_sess, uint8_t **transport_id);
-+
-+/* Predefs for callbacks handed to qla2xxx(target) */
-+static void q24_atio_pkt(scsi_qla_host_t *ha, atio7_entry_t *pkt);
-+static void q2t_response_pkt(scsi_qla_host_t *ha, response_t *pkt);
-+static void q2t_async_event(uint16_t code, scsi_qla_host_t *ha,
-+ uint16_t *mailbox);
-+static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle);
-+static int q2t_host_action(scsi_qla_host_t *ha,
-+ qla2x_tgt_host_action_t action);
-+static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport);
-+static void q2t_fc_port_deleted(scsi_qla_host_t *ha, fc_port_t *fcport);
-+static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
-+ int lun_size, int fn, void *iocb, int flags);
-+static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
-+ atio_entry_t *atio, int ha_locked);
-+static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
-+ atio7_entry_t *atio, int ha_locked);
-+static void q2t_reject_free_srr_imm(scsi_qla_host_t *ha, struct srr_imm *imm,
-+ int ha_lock);
-+static int q2t_cut_cmd_data_head(struct q2t_cmd *cmd, unsigned int offset);
-+static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only);
-+static void q2t_on_hw_pending_cmd_timeout(struct scst_cmd *scst_cmd);
-+static int q2t_unreg_sess(struct q2t_sess *sess);
-+static uint16_t q2t_get_scsi_transport_version(struct scst_tgt *scst_tgt);
-+static uint16_t q2t_get_phys_transport_version(struct scst_tgt *scst_tgt);
-+
-+/** SYSFS **/
-+
-+static ssize_t q2t_version_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+
-+struct kobj_attribute q2t_version_attr =
-+ __ATTR(version, S_IRUGO, q2t_version_show, NULL);
-+
-+static const struct attribute *q2t_attrs[] = {
-+ &q2t_version_attr.attr,
-+ NULL,
-+};
-+
-+static ssize_t q2t_show_expl_conf_enabled(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buffer);
-+static ssize_t q2t_store_expl_conf_enabled(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buffer, size_t size);
-+
-+struct kobj_attribute q2t_expl_conf_attr =
-+ __ATTR(explicit_confirmation, S_IRUGO|S_IWUSR,
-+ q2t_show_expl_conf_enabled, q2t_store_expl_conf_enabled);
-+
-+static ssize_t q2t_abort_isp_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buffer, size_t size);
-+
-+struct kobj_attribute q2t_abort_isp_attr =
-+ __ATTR(abort_isp, S_IWUSR, NULL, q2t_abort_isp_store);
-+
-+static ssize_t q2t_hw_target_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+
-+static struct kobj_attribute q2t_hw_target_attr =
-+ __ATTR(hw_target, S_IRUGO, q2t_hw_target_show, NULL);
-+
-+static ssize_t q2t_node_name_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+
-+static struct kobj_attribute q2t_vp_node_name_attr =
-+ __ATTR(node_name, S_IRUGO, q2t_node_name_show, NULL);
-+
-+static ssize_t q2t_node_name_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buffer, size_t size);
-+
-+static struct kobj_attribute q2t_hw_node_name_attr =
-+ __ATTR(node_name, S_IRUGO|S_IWUSR, q2t_node_name_show,
-+ q2t_node_name_store);
-+
-+static ssize_t q2t_vp_parent_host_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf);
-+
-+static struct kobj_attribute q2t_vp_parent_host_attr =
-+ __ATTR(parent_host, S_IRUGO, q2t_vp_parent_host_show, NULL);
-+
-+static const struct attribute *q2t_tgt_attrs[] = {
-+ &q2t_expl_conf_attr.attr,
-+ &q2t_abort_isp_attr.attr,
-+ NULL,
-+};
-+
-+static int q2t_enable_tgt(struct scst_tgt *tgt, bool enable);
-+static bool q2t_is_tgt_enabled(struct scst_tgt *tgt);
-+static ssize_t q2t_add_vtarget(const char *target_name, char *params);
-+static ssize_t q2t_del_vtarget(const char *target_name);
-+
-+/*
-+ * Global Variables
-+ */
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+#define trace_flag q2t_trace_flag
-+static unsigned long q2t_trace_flag = Q2T_DEFAULT_LOG_FLAGS;
-+#endif
-+
-+static struct scst_tgt_template tgt2x_template = {
-+ .name = "qla2x00t",
-+ .sg_tablesize = 0,
-+ .use_clustering = 1,
-+#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
-+ .xmit_response_atomic = 0,
-+ .rdy_to_xfer_atomic = 0,
-+#else
-+ .xmit_response_atomic = 1,
-+ .rdy_to_xfer_atomic = 1,
-+#endif
-+ .max_hw_pending_time = Q2T_MAX_HW_PENDING_TIME,
-+ .detect = q2t_target_detect,
-+ .release = q2t_target_release,
-+ .xmit_response = q2x_xmit_response,
-+ .rdy_to_xfer = q2t_rdy_to_xfer,
-+ .on_free_cmd = q2t_on_free_cmd,
-+ .task_mgmt_fn_done = q2t_task_mgmt_fn_done,
-+ .get_initiator_port_transport_id = q2t_get_initiator_port_transport_id,
-+ .get_scsi_transport_version = q2t_get_scsi_transport_version,
-+ .get_phys_transport_version = q2t_get_phys_transport_version,
-+ .on_hw_pending_cmd_timeout = q2t_on_hw_pending_cmd_timeout,
-+ .enable_target = q2t_enable_tgt,
-+ .is_target_enabled = q2t_is_tgt_enabled,
-+ .add_target = q2t_add_vtarget,
-+ .del_target = q2t_del_vtarget,
-+ .add_target_parameters = "node_name, parent_host",
-+ .tgtt_attrs = q2t_attrs,
-+ .tgt_attrs = q2t_tgt_attrs,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = Q2T_DEFAULT_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static struct kmem_cache *q2t_cmd_cachep;
-+static struct kmem_cache *q2t_mgmt_cmd_cachep;
-+static mempool_t *q2t_mgmt_cmd_mempool;
-+
-+static DECLARE_RWSEM(q2t_unreg_rwsem);
-+
-+/* It's not yet supported */
-+static inline int scst_cmd_get_ppl_offset(struct scst_cmd *scst_cmd)
-+{
-+ return 0;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static inline void q2t_sess_get(struct q2t_sess *sess)
-+{
-+ sess->sess_ref++;
-+ TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref);
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static inline void q2t_sess_put(struct q2t_sess *sess)
-+{
-+ TRACE_DBG("sess %p, new sess_ref %d", sess, sess->sess_ref-1);
-+ BUG_ON(sess->sess_ref == 0);
-+
-+ sess->sess_ref--;
-+ if (sess->sess_ref == 0)
-+ q2t_unreg_sess(sess);
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
-+static inline struct q2t_sess *q2t_find_sess_by_loop_id(struct q2t_tgt *tgt,
-+ uint16_t loop_id)
-+{
-+ struct q2t_sess *sess;
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ if ((loop_id == sess->loop_id) && !sess->deleted)
-+ return sess;
-+ }
-+ return NULL;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
-+static inline struct q2t_sess *q2t_find_sess_by_s_id_include_deleted(
-+ struct q2t_tgt *tgt, const uint8_t *s_id)
-+{
-+ struct q2t_sess *sess;
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ if ((sess->s_id.b.al_pa == s_id[2]) &&
-+ (sess->s_id.b.area == s_id[1]) &&
-+ (sess->s_id.b.domain == s_id[0]))
-+ return sess;
-+ }
-+ return NULL;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
-+static inline struct q2t_sess *q2t_find_sess_by_s_id(struct q2t_tgt *tgt,
-+ const uint8_t *s_id)
-+{
-+ struct q2t_sess *sess;
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ if ((sess->s_id.b.al_pa == s_id[2]) &&
-+ (sess->s_id.b.area == s_id[1]) &&
-+ (sess->s_id.b.domain == s_id[0]) &&
-+ !sess->deleted)
-+ return sess;
-+ }
-+ return NULL;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
-+static inline struct q2t_sess *q2t_find_sess_by_s_id_le(struct q2t_tgt *tgt,
-+ const uint8_t *s_id)
-+{
-+ struct q2t_sess *sess;
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ if ((sess->s_id.b.al_pa == s_id[0]) &&
-+ (sess->s_id.b.area == s_id[1]) &&
-+ (sess->s_id.b.domain == s_id[2]) &&
-+ !sess->deleted)
-+ return sess;
-+ }
-+ return NULL;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
-+static inline struct q2t_sess *q2t_find_sess_by_port_name(struct q2t_tgt *tgt,
-+ const uint8_t *port_name)
-+{
-+ struct q2t_sess *sess;
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ if ((sess->port_name[0] == port_name[0]) &&
-+ (sess->port_name[1] == port_name[1]) &&
-+ (sess->port_name[2] == port_name[2]) &&
-+ (sess->port_name[3] == port_name[3]) &&
-+ (sess->port_name[4] == port_name[4]) &&
-+ (sess->port_name[5] == port_name[5]) &&
-+ (sess->port_name[6] == port_name[6]) &&
-+ (sess->port_name[7] == port_name[7]))
-+ return sess;
-+ }
-+ return NULL;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static inline void q2t_exec_queue(scsi_qla_host_t *ha)
-+{
-+ qla2x00_isp_cmd(to_qla_parent(ha));
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static inline request_t *q2t_req_pkt(scsi_qla_host_t *ha)
-+{
-+ return qla2x00_req_pkt(to_qla_parent(ha));
-+}
-+
-+/* Might release hw lock, then reacquire!! */
-+static inline int q2t_issue_marker(scsi_qla_host_t *ha, int ha_locked)
-+{
-+ /* Send marker if required */
-+ if (unlikely(to_qla_parent(ha)->marker_needed != 0)) {
-+ int rc = qla2x00_issue_marker(ha, ha_locked);
-+ if (rc != QLA_SUCCESS) {
-+ PRINT_ERROR("qla2x00t(%ld): issue_marker() "
-+ "failed", ha->instance);
-+ }
-+ return rc;
-+ }
-+ return QLA_SUCCESS;
-+}
-+
-+static inline
-+scsi_qla_host_t *q2t_find_host_by_d_id(scsi_qla_host_t *ha, uint8_t *d_id)
-+{
-+ if ((ha->d_id.b.area != d_id[1]) || (ha->d_id.b.domain != d_id[0]))
-+ return NULL;
-+
-+ if (ha->d_id.b.al_pa == d_id[2])
-+ return ha;
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ uint8_t vp_idx;
-+ BUG_ON(ha->tgt_vp_map == NULL);
-+ vp_idx = ha->tgt_vp_map[d_id[2]].idx;
-+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
-+ return ha->tgt_vp_map[vp_idx].vha;
-+ }
-+
-+ return NULL;
-+}
-+
-+static inline
-+scsi_qla_host_t *q2t_find_host_by_vp_idx(scsi_qla_host_t *ha, uint16_t vp_idx)
-+{
-+ if (ha->vp_idx == vp_idx)
-+ return ha;
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ BUG_ON(ha->tgt_vp_map == NULL);
-+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
-+ return ha->tgt_vp_map[vp_idx].vha;
-+ }
-+
-+ return NULL;
-+}
-+
-+static void q24_atio_pkt_all_vps(scsi_qla_host_t *ha, atio7_entry_t *atio)
-+{
-+ TRACE_ENTRY();
-+
-+ BUG_ON(ha == NULL);
-+
-+ switch (atio->entry_type) {
-+ case ATIO_TYPE7:
-+ {
-+ scsi_qla_host_t *host = q2t_find_host_by_d_id(ha, atio->fcp_hdr.d_id);
-+ if (unlikely(NULL == host)) {
-+ /*
-+ * It might happen, because there is a small gap between
-+ * requesting the DPC thread to update loop and actual
-+ * update. It is harmless and on the next retry should
-+ * work well.
-+ */
-+ PRINT_WARNING("qla2x00t(%ld): Received ATIO_TYPE7 "
-+ "with unknown d_id %x:%x:%x", ha->instance,
-+ atio->fcp_hdr.d_id[0], atio->fcp_hdr.d_id[1],
-+ atio->fcp_hdr.d_id[2]);
-+ break;
-+ }
-+ q24_atio_pkt(host, atio);
-+ break;
-+ }
-+
-+ case IMMED_NOTIFY_TYPE:
-+ {
-+ scsi_qla_host_t *host = ha;
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ notify24xx_entry_t *entry = (notify24xx_entry_t *)atio;
-+ if ((entry->vp_index != 0xFF) &&
-+ (entry->nport_handle != 0xFFFF)) {
-+ host = q2t_find_host_by_vp_idx(ha,
-+ entry->vp_index);
-+ if (unlikely(!host)) {
-+ PRINT_ERROR("qla2x00t(%ld): Received "
-+ "ATIO (IMMED_NOTIFY_TYPE) "
-+ "with unknown vp_index %d",
-+ ha->instance, entry->vp_index);
-+ break;
-+ }
-+ }
-+ }
-+ q24_atio_pkt(host, atio);
-+ break;
-+ }
-+
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): Received unknown ATIO atio "
-+ "type %x", ha->instance, atio->entry_type);
-+ break;
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void q2t_response_pkt_all_vps(scsi_qla_host_t *ha, response_t *pkt)
-+{
-+ TRACE_ENTRY();
-+
-+ BUG_ON(ha == NULL);
-+
-+ switch (pkt->entry_type) {
-+ case CTIO_TYPE7:
-+ {
-+ ctio7_fw_entry_t *entry = (ctio7_fw_entry_t *)pkt;
-+ scsi_qla_host_t *host = q2t_find_host_by_vp_idx(ha,
-+ entry->vp_index);
-+ if (unlikely(!host)) {
-+ PRINT_ERROR("qla2x00t(%ld): Response pkt (CTIO_TYPE7) "
-+ "received, with unknown vp_index %d",
-+ ha->instance, entry->vp_index);
-+ break;
-+ }
-+ q2t_response_pkt(host, pkt);
-+ break;
-+ }
-+
-+ case IMMED_NOTIFY_TYPE:
-+ {
-+ scsi_qla_host_t *host = ha;
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ notify24xx_entry_t *entry = (notify24xx_entry_t *)pkt;
-+ host = q2t_find_host_by_vp_idx(ha, entry->vp_index);
-+ if (unlikely(!host)) {
-+ PRINT_ERROR("qla2x00t(%ld): Response pkt "
-+ "(IMMED_NOTIFY_TYPE) received, "
-+ "with unknown vp_index %d",
-+ ha->instance, entry->vp_index);
-+ break;
-+ }
-+ }
-+ q2t_response_pkt(host, pkt);
-+ break;
-+ }
-+
-+ case NOTIFY_ACK_TYPE:
-+ {
-+ scsi_qla_host_t *host = ha;
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ nack24xx_entry_t *entry = (nack24xx_entry_t *)pkt;
-+ if (0xFF != entry->vp_index) {
-+ host = q2t_find_host_by_vp_idx(ha,
-+ entry->vp_index);
-+ if (unlikely(!host)) {
-+ PRINT_ERROR("qla2x00t(%ld): Response "
-+ "pkt (NOTIFY_ACK_TYPE) "
-+ "received, with unknown "
-+ "vp_index %d", ha->instance,
-+ entry->vp_index);
-+ break;
-+ }
-+ }
-+ }
-+ q2t_response_pkt(host, pkt);
-+ break;
-+ }
-+
-+ case ABTS_RECV_24XX:
-+ {
-+ abts24_recv_entry_t *entry = (abts24_recv_entry_t *)pkt;
-+ scsi_qla_host_t *host = q2t_find_host_by_vp_idx(ha,
-+ entry->vp_index);
-+ if (unlikely(!host)) {
-+ PRINT_ERROR("qla2x00t(%ld): Response pkt "
-+ "(ABTS_RECV_24XX) received, with unknown "
-+ "vp_index %d", ha->instance, entry->vp_index);
-+ break;
-+ }
-+ q2t_response_pkt(host, pkt);
-+ break;
-+ }
-+
-+ case ABTS_RESP_24XX:
-+ {
-+ abts24_resp_entry_t *entry = (abts24_resp_entry_t *)pkt;
-+ scsi_qla_host_t *host = q2t_find_host_by_vp_idx(ha,
-+ entry->vp_index);
-+ if (unlikely(!host)) {
-+ PRINT_ERROR("qla2x00t(%ld): Response pkt "
-+ "(ABTS_RECV_24XX) received, with unknown "
-+ "vp_index %d", ha->instance, entry->vp_index);
-+ break;
-+ }
-+ q2t_response_pkt(host, pkt);
-+ break;
-+ }
-+
-+ default:
-+ q2t_response_pkt(ha, pkt);
-+ break;
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+/*
-+ * Registers with initiator driver (but target mode isn't enabled till
-+ * it's turned on via sysfs)
-+ */
-+static int q2t_target_detect(struct scst_tgt_template *tgtt)
-+{
-+ int res, rc;
-+ struct qla_tgt_data t = {
-+ .magic = QLA2X_TARGET_MAGIC,
-+ .tgt24_atio_pkt = q24_atio_pkt_all_vps,
-+ .tgt_response_pkt = q2t_response_pkt_all_vps,
-+ .tgt2x_ctio_completion = q2x_ctio_completion,
-+ .tgt_async_event = q2t_async_event,
-+ .tgt_host_action = q2t_host_action,
-+ .tgt_fc_port_added = q2t_fc_port_added,
-+ .tgt_fc_port_deleted = q2t_fc_port_deleted,
-+ };
-+
-+ TRACE_ENTRY();
-+
-+ rc = qla2xxx_tgt_register_driver(&t);
-+ if (rc < 0) {
-+ res = rc;
-+ PRINT_ERROR("qla2x00t: Unable to register driver: %d", res);
-+ goto out;
-+ }
-+
-+ if (rc != QLA2X_INITIATOR_MAGIC) {
-+ PRINT_ERROR("qla2x00t: Wrong version of the initiator part: "
-+ "%d", rc);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ qla2xxx_add_targets();
-+
-+ res = 0;
-+
-+ PRINT_INFO("qla2x00t: %s", "Target mode driver for QLogic 2x00 controller "
-+ "registered successfully");
-+
-+out:
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+static void q2t_free_session_done(struct scst_session *scst_sess)
-+{
-+ struct q2t_sess *sess;
-+ struct q2t_tgt *tgt;
-+ scsi_qla_host_t *ha, *pha;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(scst_sess == NULL);
-+ sess = (struct q2t_sess *)scst_sess_get_tgt_priv(scst_sess);
-+ BUG_ON(sess == NULL);
-+ tgt = sess->tgt;
-+
-+ TRACE_MGMT_DBG("Unregistration of sess %p finished", sess);
-+
-+ kfree(sess);
-+
-+ if (tgt == NULL)
-+ goto out;
-+
-+ TRACE_DBG("empty(sess_list) %d sess_count %d",
-+ list_empty(&tgt->sess_list), tgt->sess_count);
-+
-+ ha = tgt->ha;
-+ pha = to_qla_parent(ha);
-+
-+ /*
-+ * We need to protect against race, when tgt is freed before or
-+ * inside wake_up()
-+ */
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+ tgt->sess_count--;
-+ if (tgt->sess_count == 0)
-+ wake_up_all(&tgt->waitQ);
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q2t_unreg_sess(struct q2t_sess *sess)
-+{
-+ int res = 1;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(sess == NULL);
-+ BUG_ON(sess->sess_ref != 0);
-+
-+ TRACE_MGMT_DBG("Deleting sess %p from tgt %p", sess, sess->tgt);
-+ list_del(&sess->sess_list_entry);
-+
-+ if (sess->deleted)
-+ list_del(&sess->del_list_entry);
-+
-+ PRINT_INFO("qla2x00t(%ld): %ssession for loop_id %d deleted",
-+ sess->tgt->ha->instance, sess->local ? "local " : "",
-+ sess->loop_id);
-+
-+ scst_unregister_session(sess->scst_sess, 0, q2t_free_session_done);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q2t_reset(scsi_qla_host_t *ha, void *iocb, int mcmd)
-+{
-+ struct q2t_sess *sess;
-+ int loop_id;
-+ uint16_t lun = 0;
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ notify24xx_entry_t *n = (notify24xx_entry_t *)iocb;
-+ loop_id = le16_to_cpu(n->nport_handle);
-+ } else
-+ loop_id = GET_TARGET_ID(ha, (notify_entry_t *)iocb);
-+
-+ if (loop_id == 0xFFFF) {
-+ /* Global event */
-+ atomic_inc(&ha->tgt->tgt_global_resets_count);
-+ q2t_clear_tgt_db(ha->tgt, 1);
-+ if (!list_empty(&ha->tgt->sess_list)) {
-+ sess = list_entry(ha->tgt->sess_list.next,
-+ typeof(*sess), sess_list_entry);
-+ switch (mcmd) {
-+ case Q2T_NEXUS_LOSS_SESS:
-+ mcmd = Q2T_NEXUS_LOSS;
-+ break;
-+
-+ case Q2T_ABORT_ALL_SESS:
-+ mcmd = Q2T_ABORT_ALL;
-+ break;
-+
-+ case Q2T_NEXUS_LOSS:
-+ case Q2T_ABORT_ALL:
-+ break;
-+
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): Not allowed "
-+ "command %x in %s", ha->instance,
-+ mcmd, __func__);
-+ sess = NULL;
-+ break;
-+ }
-+ } else
-+ sess = NULL;
-+ } else
-+ sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
-+
-+ if (sess == NULL) {
-+ res = -ESRCH;
-+ ha->tgt->tm_to_unknown = 1;
-+ goto out;
-+ }
-+
-+ TRACE_MGMT_DBG("scsi(%ld): resetting (session %p from port "
-+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
-+ "mcmd %x, loop_id %d)", ha->host_no, sess,
-+ sess->port_name[0], sess->port_name[1],
-+ sess->port_name[2], sess->port_name[3],
-+ sess->port_name[4], sess->port_name[5],
-+ sess->port_name[6], sess->port_name[7],
-+ mcmd, loop_id);
-+
-+ res = q2t_issue_task_mgmt(sess, (uint8_t *)&lun, sizeof(lun),
-+ mcmd, iocb, Q24_MGMT_SEND_NACK);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static void q2t_schedule_sess_for_deletion(struct q2t_sess *sess)
-+{
-+ struct q2t_tgt *tgt = sess->tgt;
-+ uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
-+ bool schedule;
-+
-+ TRACE_ENTRY();
-+
-+ if (sess->deleted)
-+ goto out;
-+
-+ /*
-+ * If the list is empty, then, most likely, the work isn't
-+ * scheduled.
-+ */
-+ schedule = list_empty(&tgt->del_sess_list);
-+
-+ TRACE_MGMT_DBG("Scheduling sess %p for deletion (schedule %d)", sess,
-+ schedule);
-+ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
-+ sess->deleted = 1;
-+ sess->expires = jiffies + dev_loss_tmo * HZ;
-+
-+ PRINT_INFO("qla2x00t(%ld): session for port %02x:%02x:%02x:"
-+ "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
-+ "deletion in %d secs", tgt->ha->instance,
-+ sess->port_name[0], sess->port_name[1],
-+ sess->port_name[2], sess->port_name[3],
-+ sess->port_name[4], sess->port_name[5],
-+ sess->port_name[6], sess->port_name[7],
-+ sess->loop_id, dev_loss_tmo);
-+
-+ if (schedule)
-+ schedule_delayed_work(&tgt->sess_del_work,
-+ jiffies - sess->expires);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static void q2t_clear_tgt_db(struct q2t_tgt *tgt, bool local_only)
-+{
-+ struct q2t_sess *sess, *sess_tmp;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE(TRACE_MGMT, "qla2x00t: Clearing targets DB for target %p", tgt);
-+
-+ list_for_each_entry_safe(sess, sess_tmp, &tgt->sess_list,
-+ sess_list_entry) {
-+ if (local_only) {
-+ if (!sess->local)
-+ continue;
-+ q2t_schedule_sess_for_deletion(sess);
-+ } else
-+ q2t_sess_put(sess);
-+ }
-+
-+ /* At this point tgt could be already dead */
-+
-+ TRACE_MGMT_DBG("Finished clearing tgt %p DB", tgt);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Called in a thread context */
-+static void q2t_alloc_session_done(struct scst_session *scst_sess,
-+ void *data, int result)
-+{
-+ TRACE_ENTRY();
-+
-+ if (result != 0) {
-+ struct q2t_sess *sess = (struct q2t_sess *)data;
-+ struct q2t_tgt *tgt = sess->tgt;
-+ scsi_qla_host_t *ha = tgt->ha;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+ unsigned long flags;
-+
-+ PRINT_INFO("qla2x00t(%ld): Session initialization failed",
-+ ha->instance);
-+
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+ q2t_sess_put(sess);
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int q24_get_loop_id(scsi_qla_host_t *ha, const uint8_t *s_id,
-+ uint16_t *loop_id)
-+{
-+ dma_addr_t gid_list_dma;
-+ struct gid_list_info *gid_list;
-+ char *id_iter;
-+ int res, rc, i, retries = 0;
-+ uint16_t entries;
-+
-+ TRACE_ENTRY();
-+
-+ gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
-+ &gid_list_dma, GFP_KERNEL);
-+ if (gid_list == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): DMA Alloc failed of %zd",
-+ ha->instance, GID_LIST_SIZE);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ /* Get list of logged in devices */
-+retry:
-+ rc = qla2x00_get_id_list(ha, gid_list, gid_list_dma, &entries);
-+ if (rc != QLA_SUCCESS) {
-+ if (rc == QLA_FW_NOT_READY) {
-+ retries++;
-+ if (retries < 3) {
-+ msleep(1000);
-+ goto retry;
-+ }
-+ }
-+ TRACE_MGMT_DBG("qla2x00t(%ld): get_id_list() failed: %x",
-+ ha->instance, rc);
-+ res = -rc;
-+ goto out_free_id_list;
-+ }
-+
-+ id_iter = (char *)gid_list;
-+ res = -1;
-+ for (i = 0; i < entries; i++) {
-+ struct gid_list_info *gid = (struct gid_list_info *)id_iter;
-+ if ((gid->al_pa == s_id[2]) &&
-+ (gid->area == s_id[1]) &&
-+ (gid->domain == s_id[0])) {
-+ *loop_id = le16_to_cpu(gid->loop_id);
-+ res = 0;
-+ break;
-+ }
-+ id_iter += ha->gid_list_info_size;
-+ }
-+
-+out_free_id_list:
-+ dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, gid_list, gid_list_dma);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static bool q2t_check_fcport_exist(scsi_qla_host_t *ha, struct q2t_sess *sess)
-+{
-+ bool res, found = false;
-+ int rc, i;
-+ uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
-+ uint16_t entries;
-+ void *pmap;
-+ int pmap_len;
-+ fc_port_t *fcport;
-+ int global_resets;
-+
-+ TRACE_ENTRY();
-+
-+retry:
-+ global_resets = atomic_read(&ha->tgt->tgt_global_resets_count);
-+
-+ rc = qla2x00_get_node_name_list(ha, &pmap, &pmap_len);
-+ if (rc != QLA_SUCCESS) {
-+ res = false;
-+ goto out;
-+ }
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ struct qla_port24_data *pmap24 = pmap;
-+
-+ entries = pmap_len/sizeof(*pmap24);
-+
-+ for (i = 0; i < entries; ++i) {
-+ if ((sess->port_name[0] == pmap24[i].port_name[0]) &&
-+ (sess->port_name[1] == pmap24[i].port_name[1]) &&
-+ (sess->port_name[2] == pmap24[i].port_name[2]) &&
-+ (sess->port_name[3] == pmap24[i].port_name[3]) &&
-+ (sess->port_name[4] == pmap24[i].port_name[4]) &&
-+ (sess->port_name[5] == pmap24[i].port_name[5]) &&
-+ (sess->port_name[6] == pmap24[i].port_name[6]) &&
-+ (sess->port_name[7] == pmap24[i].port_name[7])) {
-+ loop_id = le16_to_cpu(pmap24[i].loop_id);
-+ found = true;
-+ break;
-+ }
-+ }
-+ } else {
-+ struct qla_port23_data *pmap2x = pmap;
-+
-+ entries = pmap_len/sizeof(*pmap2x);
-+
-+ for (i = 0; i < entries; ++i) {
-+ if ((sess->port_name[0] == pmap2x[i].port_name[0]) &&
-+ (sess->port_name[1] == pmap2x[i].port_name[1]) &&
-+ (sess->port_name[2] == pmap2x[i].port_name[2]) &&
-+ (sess->port_name[3] == pmap2x[i].port_name[3]) &&
-+ (sess->port_name[4] == pmap2x[i].port_name[4]) &&
-+ (sess->port_name[5] == pmap2x[i].port_name[5]) &&
-+ (sess->port_name[6] == pmap2x[i].port_name[6]) &&
-+ (sess->port_name[7] == pmap2x[i].port_name[7])) {
-+ loop_id = le16_to_cpu(pmap2x[i].loop_id);
-+ found = true;
-+ break;
-+ }
-+ }
-+ }
-+
-+ kfree(pmap);
-+
-+ if (!found) {
-+ res = false;
-+ goto out;
-+ }
-+
-+ TRACE_MGMT_DBG("loop_id %d", loop_id);
-+
-+ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
-+ if (fcport == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): Allocation of tmp FC port failed",
-+ ha->instance);
-+ res = false;
-+ goto out;
-+ }
-+
-+ fcport->loop_id = loop_id;
-+
-+ rc = qla2x00_get_port_database(ha, fcport, 0);
-+ if (rc != QLA_SUCCESS) {
-+ PRINT_ERROR("qla2x00t(%ld): Failed to retrieve fcport "
-+ "information -- get_port_database() returned %x "
-+ "(loop_id=0x%04x)", ha->instance, rc, loop_id);
-+ res = false;
-+ goto out_free_fcport;
-+ }
-+
-+ if (global_resets != atomic_read(&ha->tgt->tgt_global_resets_count)) {
-+ TRACE_MGMT_DBG("qla2x00t(%ld): global reset during session "
-+ "discovery (counter was %d, new %d), retrying",
-+ ha->instance, global_resets,
-+ atomic_read(&ha->tgt->tgt_global_resets_count));
-+ goto retry;
-+ }
-+
-+ TRACE_MGMT_DBG("Updating sess %p s_id %x:%x:%x, "
-+ "loop_id %d) to d_id %x:%x:%x, loop_id %d", sess,
-+ sess->s_id.b.domain, sess->s_id.b.area,
-+ sess->s_id.b.al_pa, sess->loop_id, fcport->d_id.b.domain,
-+ fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->loop_id);
-+
-+ sess->s_id = fcport->d_id;
-+ sess->loop_id = fcport->loop_id;
-+ sess->conf_compl_supported = fcport->conf_compl_supported;
-+
-+ res = true;
-+
-+out_free_fcport:
-+ kfree(fcport);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static void q2t_undelete_sess(struct q2t_sess *sess)
-+{
-+ BUG_ON(!sess->deleted);
-+
-+ list_del(&sess->del_list_entry);
-+ sess->deleted = 0;
-+}
-+
-+static void q2t_del_sess_work_fn(struct delayed_work *work)
-+{
-+ struct q2t_tgt *tgt = container_of(work, struct q2t_tgt,
-+ sess_del_work);
-+ scsi_qla_host_t *ha = tgt->ha;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+ struct q2t_sess *sess;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+ while (!list_empty(&tgt->del_sess_list)) {
-+ sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
-+ del_list_entry);
-+ if (time_after_eq(jiffies, sess->expires)) {
-+ bool cancel;
-+
-+ q2t_undelete_sess(sess);
-+
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+ cancel = q2t_check_fcport_exist(ha, sess);
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+
-+ if (cancel) {
-+ if (sess->deleted) {
-+ /*
-+ * sess was again deleted while we were
-+ * discovering it
-+ */
-+ continue;
-+ }
-+
-+ PRINT_INFO("qla2x00t(%ld): cancel deletion of "
-+ "session for port %02x:%02x:%02x:"
-+ "%02x:%02x:%02x:%02x:%02x (loop ID %d), "
-+ "because it isn't deleted by firmware",
-+ ha->instance,
-+ sess->port_name[0], sess->port_name[1],
-+ sess->port_name[2], sess->port_name[3],
-+ sess->port_name[4], sess->port_name[5],
-+ sess->port_name[6], sess->port_name[7],
-+ sess->loop_id);
-+ } else {
-+ TRACE_MGMT_DBG("Timeout: sess %p about to be "
-+ "deleted", sess);
-+ q2t_sess_put(sess);
-+ }
-+ } else {
-+ schedule_delayed_work(&tgt->sess_del_work,
-+ jiffies - sess->expires);
-+ break;
-+ }
-+ }
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * Must be called under tgt_mutex.
-+ *
-+ * Adds an extra ref to allow to drop hw lock after adding sess to the list.
-+ * Caller must put it.
-+ */
-+static struct q2t_sess *q2t_create_sess(scsi_qla_host_t *ha, fc_port_t *fcport,
-+ bool local)
-+{
-+ char *wwn_str;
-+ const int wwn_str_len = 3*WWN_SIZE+2;
-+ struct q2t_tgt *tgt = ha->tgt;
-+ struct q2t_sess *sess;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+
-+ TRACE_ENTRY();
-+
-+ /* Check to avoid double sessions */
-+ spin_lock_irq(&pha->hardware_lock);
-+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
-+ if ((sess->port_name[0] == fcport->port_name[0]) &&
-+ (sess->port_name[1] == fcport->port_name[1]) &&
-+ (sess->port_name[2] == fcport->port_name[2]) &&
-+ (sess->port_name[3] == fcport->port_name[3]) &&
-+ (sess->port_name[4] == fcport->port_name[4]) &&
-+ (sess->port_name[5] == fcport->port_name[5]) &&
-+ (sess->port_name[6] == fcport->port_name[6]) &&
-+ (sess->port_name[7] == fcport->port_name[7])) {
-+ TRACE_MGMT_DBG("Double sess %p found (s_id %x:%x:%x, "
-+ "loop_id %d), updating to d_id %x:%x:%x, "
-+ "loop_id %d", sess, sess->s_id.b.domain,
-+ sess->s_id.b.area, sess->s_id.b.al_pa,
-+ sess->loop_id, fcport->d_id.b.domain,
-+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
-+ fcport->loop_id);
-+
-+ if (sess->deleted)
-+ q2t_undelete_sess(sess);
-+
-+ q2t_sess_get(sess);
-+ sess->s_id = fcport->d_id;
-+ sess->loop_id = fcport->loop_id;
-+ sess->conf_compl_supported = fcport->conf_compl_supported;
-+ if (sess->local && !local)
-+ sess->local = 0;
-+ spin_unlock_irq(&pha->hardware_lock);
-+ goto out;
-+ }
-+ }
-+ spin_unlock_irq(&pha->hardware_lock);
-+
-+ /* We are under tgt_mutex, so a new sess can't be added behind us */
-+
-+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
-+ if (sess == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): session allocation failed, "
-+ "all commands from port %02x:%02x:%02x:%02x:"
-+ "%02x:%02x:%02x:%02x will be refused", ha->instance,
-+ fcport->port_name[0], fcport->port_name[1],
-+ fcport->port_name[2], fcport->port_name[3],
-+ fcport->port_name[4], fcport->port_name[5],
-+ fcport->port_name[6], fcport->port_name[7]);
-+ goto out;
-+ }
-+
-+ sess->sess_ref = 2; /* plus 1 extra ref, see above */
-+ sess->tgt = ha->tgt;
-+ sess->s_id = fcport->d_id;
-+ sess->loop_id = fcport->loop_id;
-+ sess->conf_compl_supported = fcport->conf_compl_supported;
-+ sess->local = local;
-+ BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
-+ memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
-+
-+ wwn_str = kmalloc(wwn_str_len, GFP_KERNEL);
-+ if (wwn_str == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): Allocation of wwn_str failed. "
-+ "All commands from port %02x:%02x:%02x:%02x:%02x:%02x:"
-+ "%02x:%02x will be refused", ha->instance,
-+ fcport->port_name[0], fcport->port_name[1],
-+ fcport->port_name[2], fcport->port_name[3],
-+ fcport->port_name[4], fcport->port_name[5],
-+ fcport->port_name[6], fcport->port_name[7]);
-+ goto out_free_sess;
-+ }
-+
-+ sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-+ fcport->port_name[0], fcport->port_name[1],
-+ fcport->port_name[2], fcport->port_name[3],
-+ fcport->port_name[4], fcport->port_name[5],
-+ fcport->port_name[6], fcport->port_name[7]);
-+
-+ /* Let's do the session creation async'ly */
-+ sess->scst_sess = scst_register_session(tgt->scst_tgt, 1, wwn_str,
-+ sess, sess, q2t_alloc_session_done);
-+ if (sess->scst_sess == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): scst_register_session() "
-+ "failed for host %ld (wwn %s, loop_id %d), all "
-+ "commands from it will be refused", ha->instance,
-+ ha->host_no, wwn_str, fcport->loop_id);
-+ goto out_free_sess_wwn;
-+ }
-+
-+ TRACE_MGMT_DBG("Adding sess %p to tgt %p", sess, tgt);
-+
-+ spin_lock_irq(&pha->hardware_lock);
-+ list_add_tail(&sess->sess_list_entry, &tgt->sess_list);
-+ tgt->sess_count++;
-+ spin_unlock_irq(&pha->hardware_lock);
-+
-+ PRINT_INFO("qla2x00t(%ld): %ssession for wwn %s (loop_id %d, "
-+ "s_id %x:%x:%x, confirmed completion %ssupported) added",
-+ ha->instance, local ? "local " : "", wwn_str, fcport->loop_id,
-+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
-+ sess->conf_compl_supported ? "" : "not ");
-+
-+ kfree(wwn_str);
-+
-+out:
-+ TRACE_EXIT_HRES(sess);
-+ return sess;
-+
-+out_free_sess_wwn:
-+ kfree(wwn_str);
-+ /* go through */
-+
-+out_free_sess:
-+ kfree(sess);
-+ sess = NULL;
-+ goto out;
-+}
-+
-+static void q2t_fc_port_added(scsi_qla_host_t *ha, fc_port_t *fcport)
-+{
-+ struct q2t_tgt *tgt;
-+ struct q2t_sess *sess;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&ha->tgt_mutex);
-+
-+ tgt = ha->tgt;
-+
-+ if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
-+ goto out_unlock;
-+
-+ if (tgt->tgt_stop)
-+ goto out_unlock;
-+
-+ spin_lock_irq(&pha->hardware_lock);
-+
-+ sess = q2t_find_sess_by_port_name(tgt, fcport->port_name);
-+ if (sess == NULL) {
-+ spin_unlock_irq(&pha->hardware_lock);
-+ sess = q2t_create_sess(ha, fcport, false);
-+ spin_lock_irq(&pha->hardware_lock);
-+ if (sess != NULL)
-+ q2t_sess_put(sess); /* put the extra creation ref */
-+ } else {
-+ if (sess->deleted) {
-+ q2t_undelete_sess(sess);
-+
-+ PRINT_INFO("qla2x00t(%ld): %ssession for port %02x:"
-+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
-+ "reappeared", sess->tgt->ha->instance,
-+ sess->local ? "local " : "", sess->port_name[0],
-+ sess->port_name[1], sess->port_name[2],
-+ sess->port_name[3], sess->port_name[4],
-+ sess->port_name[5], sess->port_name[6],
-+ sess->port_name[7], sess->loop_id);
-+
-+ TRACE_MGMT_DBG("Reappeared sess %p", sess);
-+ }
-+ sess->s_id = fcport->d_id;
-+ sess->loop_id = fcport->loop_id;
-+ sess->conf_compl_supported = fcport->conf_compl_supported;
-+ }
-+
-+ if (sess->local) {
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): local session for "
-+ "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
-+ "(loop ID %d) became global", ha->instance,
-+ fcport->port_name[0], fcport->port_name[1],
-+ fcport->port_name[2], fcport->port_name[3],
-+ fcport->port_name[4], fcport->port_name[5],
-+ fcport->port_name[6], fcport->port_name[7],
-+ sess->loop_id);
-+ sess->local = 0;
-+ }
-+
-+ spin_unlock_irq(&pha->hardware_lock);
-+
-+out_unlock:
-+ mutex_unlock(&ha->tgt_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void q2t_fc_port_deleted(scsi_qla_host_t *ha, fc_port_t *fcport)
-+{
-+ struct q2t_tgt *tgt;
-+ struct q2t_sess *sess;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&ha->tgt_mutex);
-+
-+ tgt = ha->tgt;
-+
-+ if ((tgt == NULL) || (fcport->port_type != FCT_INITIATOR))
-+ goto out_unlock;
-+
-+ if (tgt->tgt_stop)
-+ goto out_unlock;
-+
-+ spin_lock_irq(&pha->hardware_lock);
-+
-+ sess = q2t_find_sess_by_port_name(tgt, fcport->port_name);
-+ if (sess == NULL)
-+ goto out_unlock_ha;
-+
-+ TRACE_MGMT_DBG("sess %p", sess);
-+
-+ sess->local = 1;
-+ q2t_schedule_sess_for_deletion(sess);
-+
-+out_unlock_ha:
-+ spin_unlock_irq(&pha->hardware_lock);
-+
-+out_unlock:
-+ mutex_unlock(&ha->tgt_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline int test_tgt_sess_count(struct q2t_tgt *tgt)
-+{
-+ unsigned long flags;
-+ int res;
-+ scsi_qla_host_t *pha = to_qla_parent(tgt->ha);
-+
-+ /*
-+ * We need to protect against race, when tgt is freed before or
-+ * inside wake_up()
-+ */
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+ TRACE_DBG("tgt %p, empty(sess_list)=%d sess_count=%d",
-+ tgt, list_empty(&tgt->sess_list), tgt->sess_count);
-+ res = (tgt->sess_count == 0);
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+
-+ return res;
-+}
-+
-+/* Must be called under tgt_host_action_mutex or q2t_unreg_rwsem write locked */
-+static void q2t_target_stop(struct scst_tgt *scst_tgt)
-+{
-+ struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+ scsi_qla_host_t *ha = tgt->ha;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Stopping target for host %ld(%p)", ha->host_no, ha);
-+
-+ /*
-+ * Mutex needed to sync with q2t_fc_port_[added,deleted].
-+ * Lock is needed, because we still can get an incoming packet.
-+ */
-+
-+ mutex_lock(&ha->tgt_mutex);
-+ spin_lock_irq(&pha->hardware_lock);
-+ tgt->tgt_stop = 1;
-+ q2t_clear_tgt_db(tgt, false);
-+ spin_unlock_irq(&pha->hardware_lock);
-+ mutex_unlock(&ha->tgt_mutex);
-+
-+ cancel_delayed_work_sync(&tgt->sess_del_work);
-+
-+ TRACE_MGMT_DBG("Waiting for sess works (tgt %p)", tgt);
-+ spin_lock_irq(&tgt->sess_work_lock);
-+ while (!list_empty(&tgt->sess_works_list)) {
-+ spin_unlock_irq(&tgt->sess_work_lock);
-+ flush_scheduled_work();
-+ spin_lock_irq(&tgt->sess_work_lock);
-+ }
-+ spin_unlock_irq(&tgt->sess_work_lock);
-+
-+ TRACE_MGMT_DBG("Waiting for tgt %p: list_empty(sess_list)=%d "
-+ "sess_count=%d", tgt, list_empty(&tgt->sess_list),
-+ tgt->sess_count);
-+
-+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
-+
-+ /* Big hammer */
-+ if (!pha->host_shutting_down && qla_tgt_mode_enabled(ha))
-+ qla2x00_disable_tgt_mode(ha);
-+
-+ /* Wait for sessions to clear out (just in case) */
-+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
-+
-+ TRACE_MGMT_DBG("Waiting for %d IRQ commands to complete (tgt %p)",
-+ tgt->irq_cmd_count, tgt);
-+
-+ mutex_lock(&ha->tgt_mutex);
-+ spin_lock_irq(&pha->hardware_lock);
-+ while (tgt->irq_cmd_count != 0) {
-+ spin_unlock_irq(&pha->hardware_lock);
-+ udelay(2);
-+ spin_lock_irq(&pha->hardware_lock);
-+ }
-+ ha->tgt = NULL;
-+ spin_unlock_irq(&pha->hardware_lock);
-+ mutex_unlock(&ha->tgt_mutex);
-+
-+ TRACE_MGMT_DBG("Stop of tgt %p finished", tgt);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called under tgt_host_action_mutex or q2t_unreg_rwsem write locked */
-+static int q2t_target_release(struct scst_tgt *scst_tgt)
-+{
-+ struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+ scsi_qla_host_t *ha = tgt->ha;
-+
-+ TRACE_ENTRY();
-+
-+ q2t_target_stop(scst_tgt);
-+
-+ ha->q2t_tgt = NULL;
-+ scst_tgt_set_tgt_priv(scst_tgt, NULL);
-+
-+ TRACE_MGMT_DBG("Release of tgt %p finished", tgt);
-+
-+ kfree(tgt);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q2t_sched_sess_work(struct q2t_tgt *tgt, int type,
-+ const void *param, unsigned int param_size)
-+{
-+ int res;
-+ struct q2t_sess_work_param *prm;
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
-+ if (prm == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): Unable to create session "
-+ "work, command will be refused", tgt->ha->instance);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ TRACE_MGMT_DBG("Scheduling work (type %d, prm %p) to find session for "
-+ "param %p (size %d, tgt %p)", type, prm, param, param_size, tgt);
-+
-+ BUG_ON(param_size > (sizeof(*prm) -
-+ offsetof(struct q2t_sess_work_param, cmd)));
-+
-+ prm->type = type;
-+ memcpy(&prm->cmd, param, param_size);
-+
-+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
-+ if (!tgt->sess_works_pending)
-+ tgt->tm_to_unknown = 0;
-+ list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
-+ tgt->sess_works_pending = 1;
-+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
-+
-+ schedule_work(&tgt->sess_work);
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q2x_modify_command_count(scsi_qla_host_t *ha, int cmd_count,
-+ int imm_count)
-+{
-+ modify_lun_entry_t *pkt;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Sending MODIFY_LUN (ha=%p, cmd=%d, imm=%d)",
-+ ha, cmd_count, imm_count);
-+
-+ /* Sending marker isn't necessary, since we called from ISR */
-+
-+ pkt = (modify_lun_entry_t *)q2t_req_pkt(ha);
-+ if (pkt == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
-+ "request packet", ha->instance, __func__);
-+ goto out;
-+ }
-+
-+ ha->tgt->modify_lun_expected++;
-+
-+ pkt->entry_type = MODIFY_LUN_TYPE;
-+ pkt->entry_count = 1;
-+ if (cmd_count < 0) {
-+ pkt->operators = MODIFY_LUN_CMD_SUB; /* Subtract from command count */
-+ pkt->command_count = -cmd_count;
-+ } else if (cmd_count > 0) {
-+ pkt->operators = MODIFY_LUN_CMD_ADD; /* Add to command count */
-+ pkt->command_count = cmd_count;
-+ }
-+
-+ if (imm_count < 0) {
-+ pkt->operators |= MODIFY_LUN_IMM_SUB;
-+ pkt->immed_notify_count = -imm_count;
-+ } else if (imm_count > 0) {
-+ pkt->operators |= MODIFY_LUN_IMM_ADD;
-+ pkt->immed_notify_count = imm_count;
-+ }
-+
-+ pkt->timeout = 0; /* Use default */
-+
-+ TRACE_BUFFER("MODIFY LUN packet data", pkt, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q2x_send_notify_ack(scsi_qla_host_t *ha, notify_entry_t *iocb,
-+ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
-+ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
-+{
-+ nack_entry_t *ntfy;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Sending NOTIFY_ACK (ha=%p)", ha);
-+
-+ /* Send marker if required */
-+ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
-+ goto out;
-+
-+ ntfy = (nack_entry_t *)q2t_req_pkt(ha);
-+ if (ntfy == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
-+ "request packet", ha->instance, __func__);
-+ goto out;
-+ }
-+
-+ if (ha->tgt != NULL)
-+ ha->tgt->notify_ack_expected++;
-+
-+ ntfy->entry_type = NOTIFY_ACK_TYPE;
-+ ntfy->entry_count = 1;
-+ SET_TARGET_ID(ha, ntfy->target, GET_TARGET_ID(ha, iocb));
-+ ntfy->status = iocb->status;
-+ ntfy->task_flags = iocb->task_flags;
-+ ntfy->seq_id = iocb->seq_id;
-+ /* Do not increment here, the chip isn't decrementing */
-+ /* ntfy->flags = __constant_cpu_to_le16(NOTIFY_ACK_RES_COUNT); */
-+ ntfy->flags |= cpu_to_le16(add_flags);
-+ ntfy->srr_rx_id = iocb->srr_rx_id;
-+ ntfy->srr_rel_offs = iocb->srr_rel_offs;
-+ ntfy->srr_ui = iocb->srr_ui;
-+ ntfy->srr_flags = cpu_to_le16(srr_flags);
-+ ntfy->srr_reject_code = cpu_to_le16(srr_reject_code);
-+ ntfy->srr_reject_code_expl = srr_explan;
-+ ntfy->ox_id = iocb->ox_id;
-+
-+ if (resp_code_valid) {
-+ ntfy->resp_code = cpu_to_le16(resp_code);
-+ ntfy->flags |= __constant_cpu_to_le16(
-+ NOTIFY_ACK_TM_RESP_CODE_VALID);
-+ }
-+
-+ TRACE(TRACE_SCSI, "qla2x00t(%ld): Sending Notify Ack Seq %#x -> I %#x "
-+ "St %#x RC %#x", ha->instance,
-+ le16_to_cpu(iocb->seq_id), GET_TARGET_ID(ha, iocb),
-+ le16_to_cpu(iocb->status), le16_to_cpu(ntfy->resp_code));
-+ TRACE_BUFFER("Notify Ack packet data", ntfy, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q24_send_abts_resp(scsi_qla_host_t *ha,
-+ const abts24_recv_entry_t *abts, uint32_t status, bool ids_reversed)
-+{
-+ abts24_resp_entry_t *resp;
-+ uint32_t f_ctl;
-+ uint8_t *p;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Sending task mgmt ABTS response (ha=%p, atio=%p, "
-+ "status=%x", ha, abts, status);
-+
-+ /* Send marker if required */
-+ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
-+ goto out;
-+
-+ resp = (abts24_resp_entry_t *)q2t_req_pkt(ha);
-+ if (resp == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
-+ "request packet", ha->instance, __func__);
-+ goto out;
-+ }
-+
-+ resp->entry_type = ABTS_RESP_24XX;
-+ resp->entry_count = 1;
-+ resp->nport_handle = abts->nport_handle;
-+ resp->vp_index = ha->vp_idx;
-+ resp->sof_type = abts->sof_type;
-+ resp->exchange_address = abts->exchange_address;
-+ resp->fcp_hdr_le = abts->fcp_hdr_le;
-+ f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
-+ F_CTL_LAST_SEQ | F_CTL_END_SEQ |
-+ F_CTL_SEQ_INITIATIVE);
-+ p = (uint8_t *)&f_ctl;
-+ resp->fcp_hdr_le.f_ctl[0] = *p++;
-+ resp->fcp_hdr_le.f_ctl[1] = *p++;
-+ resp->fcp_hdr_le.f_ctl[2] = *p;
-+ if (ids_reversed) {
-+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
-+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
-+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
-+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
-+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
-+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
-+ } else {
-+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
-+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
-+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
-+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
-+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
-+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
-+ }
-+ resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
-+ if (status == SCST_MGMT_STATUS_SUCCESS) {
-+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
-+ resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
-+ resp->payload.ba_acct.low_seq_cnt = 0x0000;
-+ resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
-+ resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
-+ resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
-+ } else {
-+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
-+ resp->payload.ba_rjt.reason_code =
-+ BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
-+ /* Other bytes are zero */
-+ }
-+
-+ TRACE_BUFFER("ABTS RESP packet data", resp, REQUEST_ENTRY_SIZE);
-+
-+ ha->tgt->abts_resp_expected++;
-+
-+ q2t_exec_queue(ha);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q24_retry_term_exchange(scsi_qla_host_t *ha,
-+ abts24_resp_fw_entry_t *entry)
-+{
-+ ctio7_status1_entry_t *ctio;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Sending retry TERM EXCH CTIO7 (ha=%p)", ha);
-+
-+ /* Send marker if required */
-+ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
-+ goto out;
-+
-+ ctio = (ctio7_status1_entry_t *)q2t_req_pkt(ha);
-+ if (ctio == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
-+ "request packet", ha->instance, __func__);
-+ goto out;
-+ }
-+
-+ /*
-+ * We've got on entrance firmware's response on by us generated
-+ * ABTS response. So, in it ID fields are reversed.
-+ */
-+
-+ ctio->common.entry_type = CTIO_TYPE7;
-+ ctio->common.entry_count = 1;
-+ ctio->common.nport_handle = entry->nport_handle;
-+ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
-+ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
-+ ctio->common.vp_index = ha->vp_idx;
-+ ctio->common.initiator_id[0] = entry->fcp_hdr_le.d_id[0];
-+ ctio->common.initiator_id[1] = entry->fcp_hdr_le.d_id[1];
-+ ctio->common.initiator_id[2] = entry->fcp_hdr_le.d_id[2];
-+ ctio->common.exchange_addr = entry->exchange_addr_to_abort;
-+ ctio->flags = __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
-+ ctio->ox_id = entry->fcp_hdr_le.ox_id;
-+
-+ TRACE_BUFFER("CTIO7 retry TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+ q24_send_abts_resp(ha, (abts24_recv_entry_t *)entry,
-+ SCST_MGMT_STATUS_SUCCESS, true);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int __q24_handle_abts(scsi_qla_host_t *ha, abts24_recv_entry_t *abts,
-+ struct q2t_sess *sess)
-+{
-+ int res;
-+ uint32_t tag = abts->exchange_addr_to_abort;
-+ struct q2t_mgmt_cmd *mcmd;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("qla2x00t(%ld): task abort (tag=%d)", ha->instance,
-+ tag);
-+
-+ mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
-+ if (mcmd == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s: Allocation of ABORT cmd failed",
-+ ha->instance, __func__);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ memset(mcmd, 0, sizeof(*mcmd));
-+
-+ mcmd->sess = sess;
-+ memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
-+
-+ res = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, tag,
-+ SCST_ATOMIC, mcmd);
-+ if (res != 0) {
-+ PRINT_ERROR("qla2x00t(%ld): scst_rx_mgmt_fn_tag() failed: %d",
-+ ha->instance, res);
-+ goto out_free;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
-+ goto out;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q24_handle_abts(scsi_qla_host_t *ha, abts24_recv_entry_t *abts)
-+{
-+ int rc;
-+ uint32_t tag = abts->exchange_addr_to_abort;
-+ struct q2t_sess *sess;
-+
-+ TRACE_ENTRY();
-+
-+ if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
-+ PRINT_ERROR("qla2x00t(%ld): ABTS: Abort Sequence not "
-+ "supported", ha->instance);
-+ goto out_err;
-+ }
-+
-+ if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
-+ TRACE_MGMT_DBG("qla2x00t(%ld): ABTS: Unknown Exchange "
-+ "Address received", ha->instance);
-+ goto out_err;
-+ }
-+
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): task abort (s_id=%x:%x:%x, "
-+ "tag=%d, param=%x)", ha->instance, abts->fcp_hdr_le.s_id[2],
-+ abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
-+ le32_to_cpu(abts->fcp_hdr_le.parameter));
-+
-+ sess = q2t_find_sess_by_s_id_le(ha->tgt, abts->fcp_hdr_le.s_id);
-+ if (sess == NULL) {
-+ TRACE_MGMT_DBG("qla2x00t(%ld): task abort for unexisting "
-+ "session", ha->instance);
-+ rc = q2t_sched_sess_work(ha->tgt, Q2T_SESS_WORK_ABORT, abts,
-+ sizeof(*abts));
-+ if (rc != 0) {
-+ ha->tgt->tm_to_unknown = 1;
-+ goto out_err;
-+ }
-+ goto out;
-+ }
-+
-+ rc = __q24_handle_abts(ha, abts, sess);
-+ if (rc != 0) {
-+ PRINT_ERROR("qla2x00t(%ld): scst_rx_mgmt_fn_tag() failed: %d",
-+ ha->instance, rc);
-+ goto out_err;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_err:
-+ q24_send_abts_resp(ha, abts, SCST_MGMT_STATUS_REJECTED, false);
-+ goto out;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q24_send_task_mgmt_ctio(scsi_qla_host_t *ha,
-+ struct q2t_mgmt_cmd *mcmd, uint32_t resp_code)
-+{
-+ const atio7_entry_t *atio = &mcmd->orig_iocb.atio7;
-+ ctio7_status1_entry_t *ctio;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x",
-+ ha, atio, resp_code);
-+
-+ /* Send marker if required */
-+ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
-+ goto out;
-+
-+ ctio = (ctio7_status1_entry_t *)q2t_req_pkt(ha);
-+ if (ctio == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
-+ "request packet", ha->instance, __func__);
-+ goto out;
-+ }
-+
-+ ctio->common.entry_type = CTIO_TYPE7;
-+ ctio->common.entry_count = 1;
-+ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
-+ ctio->common.nport_handle = mcmd->sess->loop_id;
-+ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
-+ ctio->common.vp_index = ha->vp_idx;
-+ ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
-+ ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
-+ ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
-+ ctio->common.exchange_addr = atio->exchange_addr;
-+ ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
-+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
-+ ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
-+ ctio->scsi_status = __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
-+ ctio->response_len = __constant_cpu_to_le16(8);
-+ ((uint32_t *)ctio->sense_data)[0] = cpu_to_be32(resp_code);
-+
-+ TRACE_BUFFER("CTIO7 TASK MGMT packet data", ctio, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q24_send_notify_ack(scsi_qla_host_t *ha,
-+ notify24xx_entry_t *iocb, uint16_t srr_flags,
-+ uint8_t srr_reject_code, uint8_t srr_explan)
-+{
-+ nack24xx_entry_t *nack;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Sending NOTIFY_ACK24 (ha=%p)", ha);
-+
-+ /* Send marker if required */
-+ if (q2t_issue_marker(ha, 1) != QLA_SUCCESS)
-+ goto out;
-+
-+ if (ha->tgt != NULL)
-+ ha->tgt->notify_ack_expected++;
-+
-+ nack = (nack24xx_entry_t *)q2t_req_pkt(ha);
-+ if (nack == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
-+ "request packet", ha->instance, __func__);
-+ goto out;
-+ }
-+
-+ nack->entry_type = NOTIFY_ACK_TYPE;
-+ nack->entry_count = 1;
-+ nack->nport_handle = iocb->nport_handle;
-+ if (le16_to_cpu(iocb->status) == IMM_NTFY_ELS) {
-+ nack->flags = iocb->flags &
-+ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
-+ }
-+ nack->srr_rx_id = iocb->srr_rx_id;
-+ nack->status = iocb->status;
-+ nack->status_subcode = iocb->status_subcode;
-+ nack->exchange_address = iocb->exchange_address;
-+ nack->srr_rel_offs = iocb->srr_rel_offs;
-+ nack->srr_ui = iocb->srr_ui;
-+ nack->srr_flags = cpu_to_le16(srr_flags);
-+ nack->srr_reject_code = srr_reject_code;
-+ nack->srr_reject_code_expl = srr_explan;
-+ nack->ox_id = iocb->ox_id;
-+ nack->vp_index = iocb->vp_index;
-+
-+ TRACE(TRACE_SCSI, "qla2x00t(%ld): Sending 24xx Notify Ack %d",
-+ ha->instance, nack->status);
-+ TRACE_BUFFER("24xx Notify Ack packet data", nack, sizeof(*nack));
-+
-+ q2t_exec_queue(ha);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static uint32_t q2t_convert_to_fc_tm_status(int scst_mstatus)
-+{
-+ uint32_t res;
-+
-+ switch (scst_mstatus) {
-+ case SCST_MGMT_STATUS_SUCCESS:
-+ res = FC_TM_SUCCESS;
-+ break;
-+ case SCST_MGMT_STATUS_TASK_NOT_EXIST:
-+ res = FC_TM_BAD_CMD;
-+ break;
-+ case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
-+ case SCST_MGMT_STATUS_REJECTED:
-+ res = FC_TM_REJECT;
-+ break;
-+ case SCST_MGMT_STATUS_LUN_NOT_EXIST:
-+ case SCST_MGMT_STATUS_FAILED:
-+ default:
-+ res = FC_TM_FAILED;
-+ break;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* SCST Callback */
-+static void q2t_task_mgmt_fn_done(struct scst_mgmt_cmd *scst_mcmd)
-+{
-+ struct q2t_mgmt_cmd *mcmd;
-+ unsigned long flags;
-+ scsi_qla_host_t *ha, *pha;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("scst_mcmd (%p) status %#x state %#x", scst_mcmd,
-+ scst_mcmd->status, scst_mcmd->state);
-+
-+ mcmd = scst_mgmt_cmd_get_tgt_priv(scst_mcmd);
-+ if (unlikely(mcmd == NULL)) {
-+ PRINT_ERROR("qla2x00t: scst_mcmd %p tgt_spec is NULL", mcmd);
-+ goto out;
-+ }
-+
-+ ha = mcmd->sess->tgt->ha;
-+ pha = to_qla_parent(ha);
-+
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ if (mcmd->flags == Q24_MGMT_SEND_NACK) {
-+ q24_send_notify_ack(ha,
-+ &mcmd->orig_iocb.notify_entry24, 0, 0, 0);
-+ } else {
-+ if (scst_mcmd->fn == SCST_ABORT_TASK)
-+ q24_send_abts_resp(ha, &mcmd->orig_iocb.abts,
-+ scst_mgmt_cmd_get_status(scst_mcmd),
-+ false);
-+ else
-+ q24_send_task_mgmt_ctio(ha, mcmd,
-+ q2t_convert_to_fc_tm_status(
-+ scst_mgmt_cmd_get_status(scst_mcmd)));
-+ }
-+ } else {
-+ uint32_t resp_code = q2t_convert_to_fc_tm_status(
-+ scst_mgmt_cmd_get_status(scst_mcmd));
-+ q2x_send_notify_ack(ha, &mcmd->orig_iocb.notify_entry, 0,
-+ resp_code, 1, 0, 0, 0);
-+ }
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+
-+ scst_mgmt_cmd_set_tgt_priv(scst_mcmd, NULL);
-+ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* No locks */
-+static int q2t_pci_map_calc_cnt(struct q2t_prm *prm)
-+{
-+ int res = 0;
-+
-+ BUG_ON(prm->cmd->sg_cnt == 0);
-+
-+ prm->sg = (struct scatterlist *)prm->cmd->sg;
-+ prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, prm->cmd->sg,
-+ prm->cmd->sg_cnt, prm->cmd->dma_data_direction);
-+ if (unlikely(prm->seg_cnt == 0))
-+ goto out_err;
-+
-+ prm->cmd->sg_mapped = 1;
-+
-+ /*
-+ * If greater than four sg entries then we need to allocate
-+ * the continuation entries
-+ */
-+ if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) {
-+ prm->req_cnt += (uint16_t)(prm->seg_cnt -
-+ prm->tgt->datasegs_per_cmd) /
-+ prm->tgt->datasegs_per_cont;
-+ if (((uint16_t)(prm->seg_cnt - prm->tgt->datasegs_per_cmd)) %
-+ prm->tgt->datasegs_per_cont)
-+ prm->req_cnt++;
-+ }
-+
-+out:
-+ TRACE_DBG("seg_cnt=%d, req_cnt=%d, res=%d", prm->seg_cnt,
-+ prm->req_cnt, res);
-+ return res;
-+
-+out_err:
-+ PRINT_ERROR("qla2x00t(%ld): PCI mapping failed: sg_cnt=%d",
-+ prm->tgt->ha->instance, prm->cmd->sg_cnt);
-+ res = -1;
-+ goto out;
-+}
-+
-+static inline void q2t_unmap_sg(scsi_qla_host_t *ha, struct q2t_cmd *cmd)
-+{
-+ EXTRACHECKS_BUG_ON(!cmd->sg_mapped);
-+ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
-+ cmd->sg_mapped = 0;
-+}
-+
-+static int q2t_check_reserve_free_req(scsi_qla_host_t *ha, uint32_t req_cnt)
-+{
-+ int res = SCST_TGT_RES_SUCCESS;
-+ device_reg_t __iomem *reg;
-+ uint32_t cnt;
-+
-+ TRACE_ENTRY();
-+
-+ ha = to_qla_parent(ha);
-+ reg = ha->iobase;
-+
-+ if (ha->req_q_cnt < (req_cnt + 2)) {
-+ if (IS_FWI2_CAPABLE(ha))
-+ cnt = (uint16_t)RD_REG_DWORD(
-+ &reg->isp24.req_q_out);
-+ else
-+ cnt = qla2x00_debounce_register(
-+ ISP_REQ_Q_OUT(ha, &reg->isp));
-+ TRACE_DBG("Request ring circled: cnt=%d, "
-+ "ha->req_ring_index=%d, ha->req_q_cnt=%d, req_cnt=%d",
-+ cnt, ha->req_ring_index, ha->req_q_cnt, req_cnt);
-+ if (ha->req_ring_index < cnt)
-+ ha->req_q_cnt = cnt - ha->req_ring_index;
-+ else
-+ ha->req_q_cnt = ha->request_q_length -
-+ (ha->req_ring_index - cnt);
-+ }
-+
-+ if (unlikely(ha->req_q_cnt < (req_cnt + 2))) {
-+ TRACE(TRACE_OUT_OF_MEM, "qla2x00t(%ld): There is no room in the "
-+ "request ring: ha->req_ring_index=%d, ha->req_q_cnt=%d, "
-+ "req_cnt=%d", ha->instance, ha->req_ring_index,
-+ ha->req_q_cnt, req_cnt);
-+ res = SCST_TGT_RES_QUEUE_FULL;
-+ goto out;
-+ }
-+
-+ ha->req_q_cnt -= req_cnt;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static inline void *q2t_get_req_pkt(scsi_qla_host_t *ha)
-+{
-+ ha = to_qla_parent(ha);
-+
-+ /* Adjust ring index. */
-+ ha->req_ring_index++;
-+ if (ha->req_ring_index == ha->request_q_length) {
-+ ha->req_ring_index = 0;
-+ ha->request_ring_ptr = ha->request_ring;
-+ } else {
-+ ha->request_ring_ptr++;
-+ }
-+ return (cont_entry_t *)ha->request_ring_ptr;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static inline uint32_t q2t_make_handle(scsi_qla_host_t *ha)
-+{
-+ uint32_t h;
-+
-+ h = ha->current_handle;
-+ /* always increment cmd handle */
-+ do {
-+ ++h;
-+ if (h > MAX_OUTSTANDING_COMMANDS)
-+ h = 1; /* 0 is Q2T_NULL_HANDLE */
-+ if (h == ha->current_handle) {
-+ TRACE(TRACE_OUT_OF_MEM, "qla2x00t(%ld): Ran out of "
-+ "empty cmd slots in ha %p", ha->instance, ha);
-+ h = Q2T_NULL_HANDLE;
-+ break;
-+ }
-+ } while ((h == Q2T_NULL_HANDLE) ||
-+ (h == Q2T_SKIP_HANDLE) ||
-+ (ha->cmds[h-1] != NULL));
-+
-+ if (h != Q2T_NULL_HANDLE)
-+ ha->current_handle = h;
-+
-+ return h;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static void q2x_build_ctio_pkt(struct q2t_prm *prm)
-+{
-+ uint32_t h;
-+ ctio_entry_t *pkt;
-+ scsi_qla_host_t *ha = prm->tgt->ha;
-+
-+ pkt = (ctio_entry_t *)ha->request_ring_ptr;
-+ prm->pkt = pkt;
-+ memset(pkt, 0, sizeof(*pkt));
-+
-+ if (prm->tgt->tgt_enable_64bit_addr)
-+ pkt->common.entry_type = CTIO_A64_TYPE;
-+ else
-+ pkt->common.entry_type = CONTINUE_TGT_IO_TYPE;
-+
-+ pkt->common.entry_count = (uint8_t)prm->req_cnt;
-+
-+ h = q2t_make_handle(ha);
-+ if (h != Q2T_NULL_HANDLE)
-+ ha->cmds[h-1] = prm->cmd;
-+
-+ pkt->common.handle = h | CTIO_COMPLETION_HANDLE_MARK;
-+ pkt->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
-+
-+ /* Set initiator ID */
-+ h = GET_TARGET_ID(ha, &prm->cmd->atio.atio2x);
-+ SET_TARGET_ID(ha, pkt->common.target, h);
-+
-+ pkt->common.rx_id = prm->cmd->atio.atio2x.rx_id;
-+ pkt->common.relative_offset = cpu_to_le32(prm->cmd->offset);
-+
-+ TRACE(TRACE_DEBUG|TRACE_SCSI, "qla2x00t(%ld): handle(scst_cmd) -> %08x, "
-+ "timeout %d L %#x -> I %#x E %#x", ha->instance,
-+ pkt->common.handle, Q2T_TIMEOUT,
-+ le16_to_cpu(prm->cmd->atio.atio2x.lun),
-+ GET_TARGET_ID(ha, &pkt->common), pkt->common.rx_id);
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q24_build_ctio_pkt(struct q2t_prm *prm)
-+{
-+ uint32_t h;
-+ ctio7_status0_entry_t *pkt;
-+ scsi_qla_host_t *ha = prm->tgt->ha;
-+ atio7_entry_t *atio = &prm->cmd->atio.atio7;
-+ int res = SCST_TGT_RES_SUCCESS;
-+
-+ TRACE_ENTRY();
-+
-+ pkt = (ctio7_status0_entry_t *)to_qla_parent(ha)->request_ring_ptr;
-+ prm->pkt = pkt;
-+ memset(pkt, 0, sizeof(*pkt));
-+
-+ pkt->common.entry_type = CTIO_TYPE7;
-+ pkt->common.entry_count = (uint8_t)prm->req_cnt;
-+ pkt->common.vp_index = ha->vp_idx;
-+
-+ h = q2t_make_handle(ha);
-+ if (unlikely(h == Q2T_NULL_HANDLE)) {
-+ /*
-+ * CTIO type 7 from the firmware doesn't provide a way to
-+ * know the initiator's LOOP ID, hence we can't find
-+ * the session and, so, the command.
-+ */
-+ res = SCST_TGT_RES_QUEUE_FULL;
-+ goto out;
-+ } else
-+ ha->cmds[h-1] = prm->cmd;
-+
-+ pkt->common.handle = h | CTIO_COMPLETION_HANDLE_MARK;
-+ pkt->common.nport_handle = cpu_to_le16(prm->cmd->loop_id);
-+ pkt->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
-+ pkt->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
-+ pkt->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
-+ pkt->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
-+ pkt->common.exchange_addr = atio->exchange_addr;
-+ pkt->flags |= (atio->attr << 9);
-+ pkt->ox_id = swab16(atio->fcp_hdr.ox_id);
-+ pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
-+
-+out:
-+ TRACE(TRACE_DEBUG|TRACE_SCSI, "qla2x00t(%ld): handle(scst_cmd) -> %08x, "
-+ "timeout %d, ox_id %#x", ha->instance, pkt->common.handle,
-+ Q2T_TIMEOUT, le16_to_cpu(pkt->ox_id));
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. We have already made sure
-+ * that there is sufficient amount of request entries to not drop it.
-+ */
-+static void q2t_load_cont_data_segments(struct q2t_prm *prm)
-+{
-+ int cnt;
-+ uint32_t *dword_ptr;
-+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
-+
-+ TRACE_ENTRY();
-+
-+ /* Build continuation packets */
-+ while (prm->seg_cnt > 0) {
-+ cont_a64_entry_t *cont_pkt64 =
-+ (cont_a64_entry_t *)q2t_get_req_pkt(prm->tgt->ha);
-+
-+ /*
-+ * Make sure that from cont_pkt64 none of
-+ * 64-bit specific fields used for 32-bit
-+ * addressing. Cast to (cont_entry_t *) for
-+ * that.
-+ */
-+
-+ memset(cont_pkt64, 0, sizeof(*cont_pkt64));
-+
-+ cont_pkt64->entry_count = 1;
-+ cont_pkt64->sys_define = 0;
-+
-+ if (enable_64bit_addressing) {
-+ cont_pkt64->entry_type = CONTINUE_A64_TYPE;
-+ dword_ptr =
-+ (uint32_t *)&cont_pkt64->dseg_0_address;
-+ } else {
-+ cont_pkt64->entry_type = CONTINUE_TYPE;
-+ dword_ptr =
-+ (uint32_t *)&((cont_entry_t *)
-+ cont_pkt64)->dseg_0_address;
-+ }
-+
-+ /* Load continuation entry data segments */
-+ for (cnt = 0;
-+ cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
-+ cnt++, prm->seg_cnt--) {
-+ *dword_ptr++ =
-+ cpu_to_le32(pci_dma_lo32
-+ (sg_dma_address(prm->sg)));
-+ if (enable_64bit_addressing) {
-+ *dword_ptr++ =
-+ cpu_to_le32(pci_dma_hi32
-+ (sg_dma_address
-+ (prm->sg)));
-+ }
-+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
-+
-+ TRACE_SG("S/G Segment Cont. phys_addr=%llx:%llx, len=%d",
-+ (long long unsigned int)pci_dma_hi32(sg_dma_address(prm->sg)),
-+ (long long unsigned int)pci_dma_lo32(sg_dma_address(prm->sg)),
-+ (int)sg_dma_len(prm->sg));
-+
-+ prm->sg++;
-+ }
-+
-+ TRACE_BUFFER("Continuation packet data",
-+ cont_pkt64, REQUEST_ENTRY_SIZE);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. We have already made sure
-+ * that there is sufficient amount of request entries to not drop it.
-+ */
-+static void q2x_load_data_segments(struct q2t_prm *prm)
-+{
-+ int cnt;
-+ uint32_t *dword_ptr;
-+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
-+ ctio_common_entry_t *pkt = (ctio_common_entry_t *)prm->pkt;
-+
-+ TRACE_DBG("iocb->scsi_status=%x, iocb->flags=%x",
-+ le16_to_cpu(pkt->scsi_status), le16_to_cpu(pkt->flags));
-+
-+ pkt->transfer_length = cpu_to_le32(prm->cmd->bufflen);
-+
-+ /* Setup packet address segment pointer */
-+ dword_ptr = pkt->dseg_0_address;
-+
-+ if (prm->seg_cnt == 0) {
-+ /* No data transfer */
-+ *dword_ptr++ = 0;
-+ *dword_ptr = 0;
-+
-+ TRACE_BUFFER("No data, CTIO packet data", pkt,
-+ REQUEST_ENTRY_SIZE);
-+ goto out;
-+ }
-+
-+ /* Set total data segment count */
-+ pkt->dseg_count = cpu_to_le16(prm->seg_cnt);
-+
-+ /* If scatter gather */
-+ TRACE_SG("%s", "Building S/G data segments...");
-+ /* Load command entry data segments */
-+ for (cnt = 0;
-+ (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
-+ cnt++, prm->seg_cnt--) {
-+ *dword_ptr++ =
-+ cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
-+ if (enable_64bit_addressing) {
-+ *dword_ptr++ =
-+ cpu_to_le32(pci_dma_hi32
-+ (sg_dma_address(prm->sg)));
-+ }
-+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
-+
-+ TRACE_SG("S/G Segment phys_addr=%llx:%llx, len=%d",
-+ (long long unsigned int)pci_dma_hi32(sg_dma_address(prm->sg)),
-+ (long long unsigned int)pci_dma_lo32(sg_dma_address(prm->sg)),
-+ (int)sg_dma_len(prm->sg));
-+
-+ prm->sg++;
-+ }
-+
-+ TRACE_BUFFER("Scatter/gather, CTIO packet data", pkt,
-+ REQUEST_ENTRY_SIZE);
-+
-+ q2t_load_cont_data_segments(prm);
-+
-+out:
-+ return;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. We have already made sure
-+ * that there is sufficient amount of request entries to not drop it.
-+ */
-+static void q24_load_data_segments(struct q2t_prm *prm)
-+{
-+ int cnt;
-+ uint32_t *dword_ptr;
-+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
-+ ctio7_status0_entry_t *pkt = (ctio7_status0_entry_t *)prm->pkt;
-+
-+ TRACE_DBG("iocb->scsi_status=%x, iocb->flags=%x",
-+ le16_to_cpu(pkt->scsi_status), le16_to_cpu(pkt->flags));
-+
-+ pkt->transfer_length = cpu_to_le32(prm->cmd->bufflen);
-+
-+ /* Setup packet address segment pointer */
-+ dword_ptr = pkt->dseg_0_address;
-+
-+ if (prm->seg_cnt == 0) {
-+ /* No data transfer */
-+ *dword_ptr++ = 0;
-+ *dword_ptr = 0;
-+
-+ TRACE_BUFFER("No data, CTIO7 packet data", pkt,
-+ REQUEST_ENTRY_SIZE);
-+ goto out;
-+ }
-+
-+ /* Set total data segment count */
-+ pkt->common.dseg_count = cpu_to_le16(prm->seg_cnt);
-+
-+ /* If scatter gather */
-+ TRACE_SG("%s", "Building S/G data segments...");
-+ /* Load command entry data segments */
-+ for (cnt = 0;
-+ (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
-+ cnt++, prm->seg_cnt--) {
-+ *dword_ptr++ =
-+ cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
-+ if (enable_64bit_addressing) {
-+ *dword_ptr++ =
-+ cpu_to_le32(pci_dma_hi32(
-+ sg_dma_address(prm->sg)));
-+ }
-+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
-+
-+ TRACE_SG("S/G Segment phys_addr=%llx:%llx, len=%d",
-+ (long long unsigned int)pci_dma_hi32(sg_dma_address(
-+ prm->sg)),
-+ (long long unsigned int)pci_dma_lo32(sg_dma_address(
-+ prm->sg)),
-+ (int)sg_dma_len(prm->sg));
-+
-+ prm->sg++;
-+ }
-+
-+ q2t_load_cont_data_segments(prm);
-+
-+out:
-+ return;
-+}
-+
-+static inline int q2t_has_data(struct q2t_cmd *cmd)
-+{
-+ return cmd->bufflen > 0;
-+}
-+
-+static int q2t_pre_xmit_response(struct q2t_cmd *cmd,
-+ struct q2t_prm *prm, int xmit_type, unsigned long *flags)
-+{
-+ int res;
-+ struct q2t_tgt *tgt = cmd->tgt;
-+ scsi_qla_host_t *ha = tgt->ha;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+ uint16_t full_req_cnt;
-+ struct scst_cmd *scst_cmd = cmd->scst_cmd;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(cmd->aborted)) {
-+ TRACE_MGMT_DBG("qla2x00t(%ld): terminating exchange "
-+ "for aborted cmd=%p (scst_cmd=%p, tag=%d)",
-+ ha->instance, cmd, scst_cmd, cmd->tag);
-+
-+ cmd->state = Q2T_STATE_ABORTED;
-+ scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_ABORTED);
-+
-+ if (IS_FWI2_CAPABLE(ha))
-+ q24_send_term_exchange(ha, cmd, &cmd->atio.atio7, 0);
-+ else
-+ q2x_send_term_exchange(ha, cmd, &cmd->atio.atio2x, 0);
-+ /* !! At this point cmd could be already freed !! */
-+ res = Q2T_PRE_XMIT_RESP_CMD_ABORTED;
-+ goto out;
-+ }
-+
-+ TRACE(TRACE_SCSI, "qla2x00t(%ld): tag=%lld", ha->instance,
-+ scst_cmd_get_tag(scst_cmd));
-+
-+ prm->cmd = cmd;
-+ prm->tgt = tgt;
-+ prm->rq_result = scst_cmd_get_status(scst_cmd);
-+ prm->sense_buffer = scst_cmd_get_sense_buffer(scst_cmd);
-+ prm->sense_buffer_len = scst_cmd_get_sense_buffer_len(scst_cmd);
-+ prm->sg = NULL;
-+ prm->seg_cnt = -1;
-+ prm->req_cnt = 1;
-+ prm->add_status_pkt = 0;
-+
-+ TRACE_DBG("rq_result=%x, xmit_type=%x", prm->rq_result, xmit_type);
-+ if (prm->rq_result != 0)
-+ TRACE_BUFFER("Sense", prm->sense_buffer, prm->sense_buffer_len);
-+
-+ /* Send marker if required */
-+ if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
-+ res = SCST_TGT_RES_FATAL_ERROR;
-+ goto out;
-+ }
-+
-+ TRACE_DBG("CTIO start: ha(%d)", (int)ha->instance);
-+
-+ if ((xmit_type & Q2T_XMIT_DATA) && q2t_has_data(cmd)) {
-+ if (q2t_pci_map_calc_cnt(prm) != 0) {
-+ res = SCST_TGT_RES_QUEUE_FULL;
-+ goto out;
-+ }
-+ }
-+
-+ full_req_cnt = prm->req_cnt;
-+
-+ if (xmit_type & Q2T_XMIT_STATUS) {
-+ /* Bidirectional transfers not supported (yet) */
-+ if (unlikely(scst_get_resid(scst_cmd, &prm->residual, NULL))) {
-+ if (prm->residual > 0) {
-+ TRACE_DBG("Residual underflow: %d (tag %lld, "
-+ "op %x, bufflen %d, rq_result %x)",
-+ prm->residual, scst_cmd->tag,
-+ scst_cmd->cdb[0], cmd->bufflen,
-+ prm->rq_result);
-+ prm->rq_result |= SS_RESIDUAL_UNDER;
-+ } else if (prm->residual < 0) {
-+ TRACE_DBG("Residual overflow: %d (tag %lld, "
-+ "op %x, bufflen %d, rq_result %x)",
-+ prm->residual, scst_cmd->tag,
-+ scst_cmd->cdb[0], cmd->bufflen,
-+ prm->rq_result);
-+ prm->rq_result |= SS_RESIDUAL_OVER;
-+ prm->residual = -prm->residual;
-+ }
-+ }
-+
-+ /*
-+ * If Q2T_XMIT_DATA is not set, add_status_pkt will be ignored
-+ * in *xmit_response() below
-+ */
-+ if (q2t_has_data(cmd)) {
-+ if (SCST_SENSE_VALID(prm->sense_buffer) ||
-+ (IS_FWI2_CAPABLE(ha) &&
-+ (prm->rq_result != 0))) {
-+ prm->add_status_pkt = 1;
-+ full_req_cnt++;
-+ }
-+ }
-+ }
-+
-+ TRACE_DBG("req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d",
-+ prm->req_cnt, full_req_cnt, prm->add_status_pkt);
-+
-+ /* Acquire ring specific lock */
-+ spin_lock_irqsave(&pha->hardware_lock, *flags);
-+
-+ /* Does F/W have an IOCBs for this request */
-+ res = q2t_check_reserve_free_req(ha, full_req_cnt);
-+ if (unlikely(res != SCST_TGT_RES_SUCCESS) &&
-+ (xmit_type & Q2T_XMIT_DATA))
-+ goto out_unlock_free_unmap;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unlock_free_unmap:
-+ if (cmd->sg_mapped)
-+ q2t_unmap_sg(ha, cmd);
-+
-+ /* Release ring specific lock */
-+ spin_unlock_irqrestore(&pha->hardware_lock, *flags);
-+ goto out;
-+}
-+
-+static inline int q2t_need_explicit_conf(scsi_qla_host_t *ha,
-+ struct q2t_cmd *cmd, int sending_sense)
-+{
-+ if (ha->enable_class_2)
-+ return 0;
-+
-+ if (sending_sense)
-+ return cmd->conf_compl_supported;
-+ else
-+ return ha->enable_explicit_conf && cmd->conf_compl_supported;
-+}
-+
-+static void q2x_init_ctio_ret_entry(ctio_ret_entry_t *ctio_m1,
-+ struct q2t_prm *prm)
-+{
-+ TRACE_ENTRY();
-+
-+ prm->sense_buffer_len = min((uint32_t)prm->sense_buffer_len,
-+ (uint32_t)sizeof(ctio_m1->sense_data));
-+
-+ ctio_m1->flags = __constant_cpu_to_le16(OF_SSTS | OF_FAST_POST |
-+ OF_NO_DATA | OF_SS_MODE_1);
-+ ctio_m1->flags |= __constant_cpu_to_le16(OF_INC_RC);
-+ if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
-+ ctio_m1->flags |= __constant_cpu_to_le16(OF_EXPL_CONF |
-+ OF_CONF_REQ);
-+ }
-+ ctio_m1->scsi_status = cpu_to_le16(prm->rq_result);
-+ ctio_m1->residual = cpu_to_le32(prm->residual);
-+ if (SCST_SENSE_VALID(prm->sense_buffer)) {
-+ if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
-+ ctio_m1->flags |= __constant_cpu_to_le16(OF_EXPL_CONF |
-+ OF_CONF_REQ);
-+ }
-+ ctio_m1->scsi_status |= __constant_cpu_to_le16(
-+ SS_SENSE_LEN_VALID);
-+ ctio_m1->sense_length = cpu_to_le16(prm->sense_buffer_len);
-+ memcpy(ctio_m1->sense_data, prm->sense_buffer,
-+ prm->sense_buffer_len);
-+ } else {
-+ memset(ctio_m1->sense_data, 0, sizeof(ctio_m1->sense_data));
-+ ctio_m1->sense_length = 0;
-+ }
-+
-+ /* Sense with len > 26, is it possible ??? */
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int __q2x_xmit_response(struct q2t_cmd *cmd, int xmit_type)
-+{
-+ int res;
-+ unsigned long flags;
-+ scsi_qla_host_t *ha, *pha;
-+ struct q2t_prm prm;
-+ ctio_common_entry_t *pkt;
-+
-+ TRACE_ENTRY();
-+
-+ memset(&prm, 0, sizeof(prm));
-+
-+ res = q2t_pre_xmit_response(cmd, &prm, xmit_type, &flags);
-+ if (unlikely(res != SCST_TGT_RES_SUCCESS)) {
-+ if (res == Q2T_PRE_XMIT_RESP_CMD_ABORTED)
-+ res = SCST_TGT_RES_SUCCESS;
-+ goto out;
-+ }
-+
-+ /* Here pha->hardware_lock already locked */
-+
-+ ha = prm.tgt->ha;
-+ pha = to_qla_parent(ha);
-+
-+ q2x_build_ctio_pkt(&prm);
-+ pkt = (ctio_common_entry_t *)prm.pkt;
-+
-+ if (q2t_has_data(cmd) && (xmit_type & Q2T_XMIT_DATA)) {
-+ pkt->flags |= __constant_cpu_to_le16(OF_FAST_POST | OF_DATA_IN);
-+ pkt->flags |= __constant_cpu_to_le16(OF_INC_RC);
-+
-+ q2x_load_data_segments(&prm);
-+
-+ if (prm.add_status_pkt == 0) {
-+ if (xmit_type & Q2T_XMIT_STATUS) {
-+ pkt->scsi_status = cpu_to_le16(prm.rq_result);
-+ pkt->residual = cpu_to_le32(prm.residual);
-+ pkt->flags |= __constant_cpu_to_le16(OF_SSTS);
-+ if (q2t_need_explicit_conf(ha, cmd, 0)) {
-+ pkt->flags |= __constant_cpu_to_le16(
-+ OF_EXPL_CONF |
-+ OF_CONF_REQ);
-+ }
-+ }
-+ } else {
-+ /*
-+ * We have already made sure that there is sufficient
-+ * amount of request entries to not drop HW lock in
-+ * req_pkt().
-+ */
-+ ctio_ret_entry_t *ctio_m1 =
-+ (ctio_ret_entry_t *)q2t_get_req_pkt(ha);
-+
-+ TRACE_DBG("%s", "Building additional status packet");
-+
-+ memcpy(ctio_m1, pkt, sizeof(*ctio_m1));
-+ ctio_m1->entry_count = 1;
-+ ctio_m1->dseg_count = 0;
-+
-+ /* Real finish is ctio_m1's finish */
-+ pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
-+ pkt->flags &= ~__constant_cpu_to_le16(OF_INC_RC);
-+
-+ q2x_init_ctio_ret_entry(ctio_m1, &prm);
-+ TRACE_BUFFER("Status CTIO packet data", ctio_m1,
-+ REQUEST_ENTRY_SIZE);
-+ }
-+ } else
-+ q2x_init_ctio_ret_entry((ctio_ret_entry_t *)pkt, &prm);
-+
-+ cmd->state = Q2T_STATE_PROCESSED; /* Mid-level is done processing */
-+
-+ TRACE_BUFFER("Xmitting", pkt, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+ /* Release ring specific lock */
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+#ifdef CONFIG_QLA_TGT_DEBUG_SRR
-+static void q2t_check_srr_debug(struct q2t_cmd *cmd, int *xmit_type)
-+{
-+#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
-+ if ((*xmit_type & Q2T_XMIT_STATUS) && (scst_random() % 200) == 50) {
-+ *xmit_type &= ~Q2T_XMIT_STATUS;
-+ TRACE_MGMT_DBG("Dropping cmd %p (tag %d) status", cmd,
-+ cmd->tag);
-+ }
-+#endif
-+
-+ if (q2t_has_data(cmd) && (cmd->sg_cnt > 1) &&
-+ ((scst_random() % 100) == 20)) {
-+ int i, leave = 0;
-+ unsigned int tot_len = 0;
-+
-+ while (leave == 0)
-+ leave = scst_random() % cmd->sg_cnt;
-+
-+ for (i = 0; i < leave; i++)
-+ tot_len += cmd->sg[i].length;
-+
-+ TRACE_MGMT_DBG("Cutting cmd %p (tag %d) buffer tail to len %d, "
-+ "sg_cnt %d (cmd->bufflen %d, cmd->sg_cnt %d)", cmd,
-+ cmd->tag, tot_len, leave, cmd->bufflen, cmd->sg_cnt);
-+
-+ cmd->bufflen = tot_len;
-+ cmd->sg_cnt = leave;
-+ }
-+
-+ if (q2t_has_data(cmd) && ((scst_random() % 100) == 70)) {
-+ unsigned int offset = scst_random() % cmd->bufflen;
-+
-+ TRACE_MGMT_DBG("Cutting cmd %p (tag %d) buffer head "
-+ "to offset %d (cmd->bufflen %d)", cmd, cmd->tag,
-+ offset, cmd->bufflen);
-+ if (offset == 0)
-+ *xmit_type &= ~Q2T_XMIT_DATA;
-+ else if (q2t_cut_cmd_data_head(cmd, offset)) {
-+ TRACE_MGMT_DBG("q2t_cut_cmd_data_head() failed (tag %d)",
-+ cmd->tag);
-+ }
-+ }
-+}
-+#else
-+static inline void q2t_check_srr_debug(struct q2t_cmd *cmd, int *xmit_type) {}
-+#endif
-+
-+static int q2x_xmit_response(struct scst_cmd *scst_cmd)
-+{
-+ int xmit_type = Q2T_XMIT_DATA, res;
-+ int is_send_status = scst_cmd_get_is_send_status(scst_cmd);
-+ struct q2t_cmd *cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ BUG_ON(!q2t_has_data(cmd) && !is_send_status);
-+#endif
-+
-+#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
-+ EXTRACHECKS_BUG_ON(scst_cmd_atomic(scst_cmd));
-+#endif
-+
-+ if (is_send_status)
-+ xmit_type |= Q2T_XMIT_STATUS;
-+
-+ cmd->bufflen = scst_cmd_get_adjusted_resp_data_len(scst_cmd);
-+ cmd->sg = scst_cmd_get_sg(scst_cmd);
-+ cmd->sg_cnt = scst_cmd_get_sg_cnt(scst_cmd);
-+ cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
-+ cmd->dma_data_direction = scst_to_tgt_dma_dir(cmd->data_direction);
-+ cmd->offset = scst_cmd_get_ppl_offset(scst_cmd);
-+ cmd->aborted = scst_cmd_aborted(scst_cmd);
-+
-+ q2t_check_srr_debug(cmd, &xmit_type);
-+
-+ TRACE_DBG("is_send_status=%x, cmd->bufflen=%d, cmd->sg_cnt=%d, "
-+ "cmd->data_direction=%d", is_send_status, cmd->bufflen,
-+ cmd->sg_cnt, cmd->data_direction);
-+
-+ if (IS_FWI2_CAPABLE(cmd->tgt->ha))
-+ res = __q24_xmit_response(cmd, xmit_type);
-+ else
-+ res = __q2x_xmit_response(cmd, xmit_type);
-+
-+ return res;
-+}
-+
-+static void q24_init_ctio_ret_entry(ctio7_status0_entry_t *ctio,
-+ struct q2t_prm *prm)
-+{
-+ ctio7_status1_entry_t *ctio1;
-+
-+ TRACE_ENTRY();
-+
-+ prm->sense_buffer_len = min((uint32_t)prm->sense_buffer_len,
-+ (uint32_t)sizeof(ctio1->sense_data));
-+ ctio->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
-+ if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
-+ ctio->flags |= __constant_cpu_to_le16(
-+ CTIO7_FLAGS_EXPLICIT_CONFORM |
-+ CTIO7_FLAGS_CONFORM_REQ);
-+ }
-+ ctio->residual = cpu_to_le32(prm->residual);
-+ ctio->scsi_status = cpu_to_le16(prm->rq_result);
-+ if (SCST_SENSE_VALID(prm->sense_buffer)) {
-+ int i;
-+ ctio1 = (ctio7_status1_entry_t *)ctio;
-+ if (q2t_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
-+ ctio1->flags |= __constant_cpu_to_le16(
-+ CTIO7_FLAGS_EXPLICIT_CONFORM |
-+ CTIO7_FLAGS_CONFORM_REQ);
-+ }
-+ ctio1->flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
-+ ctio1->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
-+ ctio1->scsi_status |= __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
-+ ctio1->sense_length = cpu_to_le16(prm->sense_buffer_len);
-+ for (i = 0; i < prm->sense_buffer_len/4; i++)
-+ ((uint32_t *)ctio1->sense_data)[i] =
-+ cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
-+#if 0
-+ if (unlikely((prm->sense_buffer_len % 4) != 0)) {
-+ static int q;
-+ if (q < 10) {
-+ PRINT_INFO("qla2x00t(%ld): %d bytes of sense "
-+ "lost", prm->tgt->ha->instance,
-+ prm->sense_buffer_len % 4);
-+ q++;
-+ }
-+ }
-+#endif
-+ } else {
-+ ctio1 = (ctio7_status1_entry_t *)ctio;
-+ ctio1->flags &= ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
-+ ctio1->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
-+ ctio1->sense_length = 0;
-+ memset(ctio1->sense_data, 0, sizeof(ctio1->sense_data));
-+ }
-+
-+ /* Sense with len > 24, is it possible ??? */
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int __q24_xmit_response(struct q2t_cmd *cmd, int xmit_type)
-+{
-+ int res;
-+ unsigned long flags;
-+ scsi_qla_host_t *ha, *pha;
-+ struct q2t_prm prm;
-+ ctio7_status0_entry_t *pkt;
-+
-+ TRACE_ENTRY();
-+
-+ memset(&prm, 0, sizeof(prm));
-+
-+ res = q2t_pre_xmit_response(cmd, &prm, xmit_type, &flags);
-+ if (unlikely(res != SCST_TGT_RES_SUCCESS)) {
-+ if (res == Q2T_PRE_XMIT_RESP_CMD_ABORTED)
-+ res = SCST_TGT_RES_SUCCESS;
-+ goto out;
-+ }
-+
-+ /* Here pha->hardware_lock already locked */
-+
-+ ha = prm.tgt->ha;
-+ pha = to_qla_parent(ha);
-+
-+ res = q24_build_ctio_pkt(&prm);
-+ if (unlikely(res != SCST_TGT_RES_SUCCESS))
-+ goto out_unmap_unlock;
-+
-+ pkt = (ctio7_status0_entry_t *)prm.pkt;
-+
-+ if (q2t_has_data(cmd) && (xmit_type & Q2T_XMIT_DATA)) {
-+ pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
-+ CTIO7_FLAGS_STATUS_MODE_0);
-+
-+ q24_load_data_segments(&prm);
-+
-+ if (prm.add_status_pkt == 0) {
-+ if (xmit_type & Q2T_XMIT_STATUS) {
-+ pkt->scsi_status = cpu_to_le16(prm.rq_result);
-+ pkt->residual = cpu_to_le32(prm.residual);
-+ pkt->flags |= __constant_cpu_to_le16(
-+ CTIO7_FLAGS_SEND_STATUS);
-+ if (q2t_need_explicit_conf(ha, cmd, 0)) {
-+ pkt->flags |= __constant_cpu_to_le16(
-+ CTIO7_FLAGS_EXPLICIT_CONFORM |
-+ CTIO7_FLAGS_CONFORM_REQ);
-+ }
-+ }
-+ } else {
-+ /*
-+ * We have already made sure that there is sufficient
-+ * amount of request entries to not drop HW lock in
-+ * req_pkt().
-+ */
-+ ctio7_status1_entry_t *ctio =
-+ (ctio7_status1_entry_t *)q2t_get_req_pkt(ha);
-+
-+ TRACE_DBG("%s", "Building additional status packet");
-+
-+ memcpy(ctio, pkt, sizeof(*ctio));
-+ ctio->common.entry_count = 1;
-+ ctio->common.dseg_count = 0;
-+ ctio->flags &= ~__constant_cpu_to_le16(
-+ CTIO7_FLAGS_DATA_IN);
-+
-+ /* Real finish is ctio_m1's finish */
-+ pkt->common.handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
-+ pkt->flags |= __constant_cpu_to_le16(
-+ CTIO7_FLAGS_DONT_RET_CTIO);
-+ q24_init_ctio_ret_entry((ctio7_status0_entry_t *)ctio,
-+ &prm);
-+ TRACE_BUFFER("Status CTIO7", ctio, REQUEST_ENTRY_SIZE);
-+ }
-+ } else
-+ q24_init_ctio_ret_entry(pkt, &prm);
-+
-+ cmd->state = Q2T_STATE_PROCESSED; /* Mid-level is done processing */
-+
-+ TRACE_BUFFER("Xmitting CTIO7", pkt, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+out_unlock:
-+ /* Release ring specific lock */
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unmap_unlock:
-+ if (cmd->sg_mapped)
-+ q2t_unmap_sg(ha, cmd);
-+ goto out_unlock;
-+}
-+
-+static int __q2t_rdy_to_xfer(struct q2t_cmd *cmd)
-+{
-+ int res = SCST_TGT_RES_SUCCESS;
-+ unsigned long flags;
-+ scsi_qla_host_t *ha, *pha;
-+ struct q2t_tgt *tgt = cmd->tgt;
-+ struct q2t_prm prm;
-+ void *p;
-+
-+ TRACE_ENTRY();
-+
-+ memset(&prm, 0, sizeof(prm));
-+ prm.cmd = cmd;
-+ prm.tgt = tgt;
-+ prm.sg = NULL;
-+ prm.req_cnt = 1;
-+ ha = tgt->ha;
-+ pha = to_qla_parent(ha);
-+
-+ /* Send marker if required */
-+ if (q2t_issue_marker(ha, 0) != QLA_SUCCESS) {
-+ res = SCST_TGT_RES_FATAL_ERROR;
-+ goto out;
-+ }
-+
-+ TRACE_DBG("CTIO_start: ha(%d)", (int)ha->instance);
-+
-+ /* Calculate number of entries and segments required */
-+ if (q2t_pci_map_calc_cnt(&prm) != 0) {
-+ res = SCST_TGT_RES_QUEUE_FULL;
-+ goto out;
-+ }
-+
-+ /* Acquire ring specific lock */
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+
-+ /* Does F/W have an IOCBs for this request */
-+ res = q2t_check_reserve_free_req(ha, prm.req_cnt);
-+ if (res != SCST_TGT_RES_SUCCESS)
-+ goto out_unlock_free_unmap;
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ ctio7_status0_entry_t *pkt;
-+ res = q24_build_ctio_pkt(&prm);
-+ if (unlikely(res != SCST_TGT_RES_SUCCESS))
-+ goto out_unlock_free_unmap;
-+ pkt = (ctio7_status0_entry_t *)prm.pkt;
-+ pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
-+ CTIO7_FLAGS_STATUS_MODE_0);
-+ q24_load_data_segments(&prm);
-+ p = pkt;
-+ } else {
-+ ctio_common_entry_t *pkt;
-+ q2x_build_ctio_pkt(&prm);
-+ pkt = (ctio_common_entry_t *)prm.pkt;
-+ pkt->flags = __constant_cpu_to_le16(OF_FAST_POST | OF_DATA_OUT);
-+ q2x_load_data_segments(&prm);
-+ p = pkt;
-+ }
-+
-+ cmd->state = Q2T_STATE_NEED_DATA;
-+
-+ TRACE_BUFFER("Xfering", p, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+out_unlock:
-+ /* Release ring specific lock */
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_unlock_free_unmap:
-+ if (cmd->sg_mapped)
-+ q2t_unmap_sg(ha, cmd);
-+ goto out_unlock;
-+}
-+
-+static int q2t_rdy_to_xfer(struct scst_cmd *scst_cmd)
-+{
-+ int res;
-+ struct q2t_cmd *cmd;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE(TRACE_SCSI, "qla2x00t: tag=%lld", scst_cmd_get_tag(scst_cmd));
-+
-+ cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
-+ cmd->bufflen = scst_cmd_get_write_fields(scst_cmd, &cmd->sg,
-+ &cmd->sg_cnt);
-+ cmd->data_direction = scst_cmd_get_data_direction(scst_cmd);
-+ cmd->dma_data_direction = scst_to_tgt_dma_dir(cmd->data_direction);
-+
-+ res = __q2t_rdy_to_xfer(cmd);
-+
-+ TRACE_EXIT();
-+ return res;
-+}
-+
-+/* If hardware_lock held on entry, might drop it, then reacquire */
-+static void q2x_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
-+ atio_entry_t *atio, int ha_locked)
-+{
-+ ctio_ret_entry_t *ctio;
-+ unsigned long flags = 0; /* to stop compiler's warning */
-+ int do_tgt_cmd_done = 0;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Sending TERM EXCH CTIO (ha=%p)", ha);
-+
-+ /* Send marker if required */
-+ if (q2t_issue_marker(ha, ha_locked) != QLA_SUCCESS)
-+ goto out;
-+
-+ if (!ha_locked)
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+
-+ ctio = (ctio_ret_entry_t *)q2t_req_pkt(ha);
-+ if (ctio == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
-+ "request packet", ha->instance, __func__);
-+ goto out_unlock;
-+ }
-+
-+ ctio->entry_type = CTIO_RET_TYPE;
-+ ctio->entry_count = 1;
-+ if (cmd != NULL) {
-+ if (cmd->state < Q2T_STATE_PROCESSED) {
-+ PRINT_ERROR("qla2x00t(%ld): Terminating cmd %p with "
-+ "incorrect state %d", ha->instance, cmd,
-+ cmd->state);
-+ } else
-+ do_tgt_cmd_done = 1;
-+ }
-+ ctio->handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
-+
-+ /* Set IDs */
-+ SET_TARGET_ID(ha, ctio->target, GET_TARGET_ID(ha, atio));
-+ ctio->rx_id = atio->rx_id;
-+
-+ /* Most likely, it isn't needed */
-+ ctio->residual = atio->data_length;
-+ if (ctio->residual != 0)
-+ ctio->scsi_status |= SS_RESIDUAL_UNDER;
-+
-+ ctio->flags = __constant_cpu_to_le16(OF_FAST_POST | OF_TERM_EXCH |
-+ OF_NO_DATA | OF_SS_MODE_1);
-+ ctio->flags |= __constant_cpu_to_le16(OF_INC_RC);
-+
-+ TRACE_BUFFER("CTIO TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+out_unlock:
-+ if (!ha_locked)
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+
-+ if (do_tgt_cmd_done) {
-+ if (!ha_locked && !in_interrupt()) {
-+ msleep(250); /* just in case */
-+ scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_DIRECT);
-+ } else
-+ scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_TASKLET);
-+ /* !! At this point cmd could be already freed !! */
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* If hardware_lock held on entry, might drop it, then reacquire */
-+static void q24_send_term_exchange(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
-+ atio7_entry_t *atio, int ha_locked)
-+{
-+ ctio7_status1_entry_t *ctio;
-+ unsigned long flags = 0; /* to stop compiler's warning */
-+ int do_tgt_cmd_done = 0;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Sending TERM EXCH CTIO7 (ha=%p)", ha);
-+
-+ /* Send marker if required */
-+ if (q2t_issue_marker(ha, ha_locked) != QLA_SUCCESS)
-+ goto out;
-+
-+ if (!ha_locked)
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+
-+ ctio = (ctio7_status1_entry_t *)q2t_req_pkt(ha);
-+ if (ctio == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
-+ "request packet", ha->instance, __func__);
-+ goto out_unlock;
-+ }
-+
-+ ctio->common.entry_type = CTIO_TYPE7;
-+ ctio->common.entry_count = 1;
-+ if (cmd != NULL) {
-+ ctio->common.nport_handle = cmd->loop_id;
-+ if (cmd->state < Q2T_STATE_PROCESSED) {
-+ PRINT_ERROR("qla2x00t(%ld): Terminating cmd %p with "
-+ "incorrect state %d", ha->instance, cmd,
-+ cmd->state);
-+ } else
-+ do_tgt_cmd_done = 1;
-+ } else
-+ ctio->common.nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
-+ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
-+ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
-+ ctio->common.vp_index = ha->vp_idx;
-+ ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
-+ ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
-+ ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
-+ ctio->common.exchange_addr = atio->exchange_addr;
-+ ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
-+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
-+ ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
-+
-+ /* Most likely, it isn't needed */
-+ ctio->residual = get_unaligned((uint32_t *)
-+ &atio->fcp_cmnd.add_cdb[atio->fcp_cmnd.add_cdb_len]);
-+ if (ctio->residual != 0)
-+ ctio->scsi_status |= SS_RESIDUAL_UNDER;
-+
-+ TRACE_BUFFER("CTIO7 TERM EXCH packet data", ctio, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+out_unlock:
-+ if (!ha_locked)
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+
-+ if (do_tgt_cmd_done) {
-+ if (!ha_locked && !in_interrupt()) {
-+ msleep(250); /* just in case */
-+ scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_DIRECT);
-+ } else
-+ scst_tgt_cmd_done(cmd->scst_cmd, SCST_CONTEXT_TASKLET);
-+ /* !! At this point cmd could be already freed !! */
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static inline void q2t_free_cmd(struct q2t_cmd *cmd)
-+{
-+ EXTRACHECKS_BUG_ON(cmd->sg_mapped);
-+
-+ if (unlikely(cmd->free_sg))
-+ kfree(cmd->sg);
-+ kmem_cache_free(q2t_cmd_cachep, cmd);
-+}
-+
-+static void q2t_on_free_cmd(struct scst_cmd *scst_cmd)
-+{
-+ struct q2t_cmd *cmd;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE(TRACE_SCSI, "qla2x00t: Freeing command %p, tag %lld",
-+ scst_cmd, scst_cmd_get_tag(scst_cmd));
-+
-+ cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
-+ scst_cmd_set_tgt_priv(scst_cmd, NULL);
-+
-+ q2t_free_cmd(cmd);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q2t_prepare_srr_ctio(scsi_qla_host_t *ha, struct q2t_cmd *cmd,
-+ void *ctio)
-+{
-+ struct srr_ctio *sc;
-+ struct q2t_tgt *tgt = ha->tgt;
-+ int res = 0;
-+ struct srr_imm *imm;
-+
-+ tgt->ctio_srr_id++;
-+
-+ TRACE_MGMT_DBG("qla2x00t(%ld): CTIO with SRR "
-+ "status received", ha->instance);
-+
-+ if (ctio == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): SRR CTIO, "
-+ "but ctio is NULL", ha->instance);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (cmd->scst_cmd != NULL)
-+ scst_update_hw_pending_start(cmd->scst_cmd);
-+
-+ sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
-+ if (sc != NULL) {
-+ sc->cmd = cmd;
-+ /* IRQ is already OFF */
-+ spin_lock(&tgt->srr_lock);
-+ sc->srr_id = tgt->ctio_srr_id;
-+ list_add_tail(&sc->srr_list_entry,
-+ &tgt->srr_ctio_list);
-+ TRACE_MGMT_DBG("CTIO SRR %p added (id %d)",
-+ sc, sc->srr_id);
-+ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
-+ int found = 0;
-+ list_for_each_entry(imm, &tgt->srr_imm_list,
-+ srr_list_entry) {
-+ if (imm->srr_id == sc->srr_id) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ if (found) {
-+ TRACE_MGMT_DBG("%s", "Scheduling srr work");
-+ schedule_work(&tgt->srr_work);
-+ } else {
-+ PRINT_ERROR("qla2x00t(%ld): imm_srr_id "
-+ "== ctio_srr_id (%d), but there is no "
-+ "corresponding SRR IMM, deleting CTIO "
-+ "SRR %p", ha->instance, tgt->ctio_srr_id,
-+ sc);
-+ list_del(&sc->srr_list_entry);
-+ spin_unlock(&tgt->srr_lock);
-+
-+ kfree(sc);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ }
-+ spin_unlock(&tgt->srr_lock);
-+ } else {
-+ struct srr_imm *ti;
-+ PRINT_ERROR("qla2x00t(%ld): Unable to allocate SRR CTIO entry",
-+ ha->instance);
-+ spin_lock(&tgt->srr_lock);
-+ list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
-+ srr_list_entry) {
-+ if (imm->srr_id == tgt->ctio_srr_id) {
-+ TRACE_MGMT_DBG("IMM SRR %p deleted "
-+ "(id %d)", imm, imm->srr_id);
-+ list_del(&imm->srr_list_entry);
-+ q2t_reject_free_srr_imm(ha, imm, 1);
-+ }
-+ }
-+ spin_unlock(&tgt->srr_lock);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static int q2t_term_ctio_exchange(scsi_qla_host_t *ha, void *ctio,
-+ struct q2t_cmd *cmd, uint32_t status)
-+{
-+ int term = 0;
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ if (ctio != NULL) {
-+ ctio7_fw_entry_t *c = (ctio7_fw_entry_t *)ctio;
-+ term = !(c->flags &
-+ __constant_cpu_to_le16(OF_TERM_EXCH));
-+ } else
-+ term = 1;
-+ if (term) {
-+ q24_send_term_exchange(ha, cmd,
-+ &cmd->atio.atio7, 1);
-+ }
-+ } else {
-+ if (status != CTIO_SUCCESS)
-+ q2x_modify_command_count(ha, 1, 0);
-+#if 0 /* seems, it isn't needed */
-+ if (ctio != NULL) {
-+ ctio_common_entry_t *c = (ctio_common_entry_t *)ctio;
-+ term = !(c->flags &
-+ __constant_cpu_to_le16(
-+ CTIO7_FLAGS_TERMINATE));
-+ } else
-+ term = 1;
-+ if (term) {
-+ q2x_send_term_exchange(ha, cmd,
-+ &cmd->atio.atio2x, 1);
-+ }
-+#endif
-+ }
-+ return term;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static inline struct q2t_cmd *q2t_get_cmd(scsi_qla_host_t *ha, uint32_t handle)
-+{
-+ handle--;
-+ if (ha->cmds[handle] != NULL) {
-+ struct q2t_cmd *cmd = ha->cmds[handle];
-+ ha->cmds[handle] = NULL;
-+ return cmd;
-+ } else
-+ return NULL;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static struct q2t_cmd *q2t_ctio_to_cmd(scsi_qla_host_t *ha, uint32_t handle,
-+ void *ctio)
-+{
-+ struct q2t_cmd *cmd = NULL;
-+
-+ /* Clear out internal marks */
-+ handle &= ~(CTIO_COMPLETION_HANDLE_MARK | CTIO_INTERMEDIATE_HANDLE_MARK);
-+
-+ if (handle != Q2T_NULL_HANDLE) {
-+ if (unlikely(handle == Q2T_SKIP_HANDLE)) {
-+ TRACE_DBG("%s", "SKIP_HANDLE CTIO");
-+ goto out;
-+ }
-+ /* handle-1 is actually used */
-+ if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
-+ PRINT_ERROR("qla2x00t(%ld): Wrong handle %x "
-+ "received", ha->instance, handle);
-+ goto out;
-+ }
-+ cmd = q2t_get_cmd(ha, handle);
-+ if (unlikely(cmd == NULL)) {
-+ PRINT_WARNING("qla2x00t(%ld): Suspicious: unable to "
-+ "find the command with handle %x",
-+ ha->instance, handle);
-+ goto out;
-+ }
-+ } else if (ctio != NULL) {
-+ uint16_t loop_id;
-+ int tag;
-+ struct q2t_sess *sess;
-+ struct scst_cmd *scst_cmd;
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ /* We can't get loop ID from CTIO7 */
-+ PRINT_ERROR("qla2x00t(%ld): Wrong CTIO received: "
-+ "QLA24xx doesn't support NULL handles",
-+ ha->instance);
-+ goto out;
-+ } else {
-+ ctio_common_entry_t *c = (ctio_common_entry_t *)ctio;
-+ loop_id = GET_TARGET_ID(ha, c);
-+ tag = c->rx_id;
-+ }
-+
-+ sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
-+ if (sess == NULL) {
-+ PRINT_WARNING("qla2x00t(%ld): Suspicious: "
-+ "ctio_completion for non-existing session "
-+ "(loop_id %d, tag %d)",
-+ ha->instance, loop_id, tag);
-+ goto out;
-+ }
-+
-+ scst_cmd = scst_find_cmd_by_tag(sess->scst_sess, tag);
-+ if (scst_cmd == NULL) {
-+ PRINT_WARNING("qla2x00t(%ld): Suspicious: unable to "
-+ "find the command with tag %d (loop_id %d)",
-+ ha->instance, tag, loop_id);
-+ goto out;
-+ }
-+
-+ cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
-+ TRACE_DBG("Found q2t_cmd %p (tag %d)", cmd, tag);
-+ }
-+
-+out:
-+ return cmd;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q2t_do_ctio_completion(scsi_qla_host_t *ha, uint32_t handle,
-+ uint32_t status, void *ctio)
-+{
-+ struct scst_cmd *scst_cmd;
-+ struct q2t_cmd *cmd;
-+ enum scst_exec_context context;
-+
-+ TRACE_ENTRY();
-+
-+#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
-+ context = SCST_CONTEXT_THREAD;
-+#else
-+ context = SCST_CONTEXT_TASKLET;
-+#endif
-+
-+ TRACE(TRACE_DEBUG|TRACE_SCSI, "qla2x00t(%ld): handle(ctio %p "
-+ "status %#x) <- %08x", ha->instance, ctio, status, handle);
-+
-+ if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
-+ /* That could happen only in case of an error/reset/abort */
-+ if (status != CTIO_SUCCESS) {
-+ TRACE_MGMT_DBG("Intermediate CTIO received (status %x)",
-+ status);
-+ }
-+ goto out;
-+ }
-+
-+ cmd = q2t_ctio_to_cmd(ha, handle, ctio);
-+ if (cmd == NULL) {
-+ if (status != CTIO_SUCCESS)
-+ q2t_term_ctio_exchange(ha, ctio, NULL, status);
-+ goto out;
-+ }
-+
-+ scst_cmd = cmd->scst_cmd;
-+
-+ if (cmd->sg_mapped)
-+ q2t_unmap_sg(ha, cmd);
-+
-+ if (unlikely(status != CTIO_SUCCESS)) {
-+ switch (status & 0xFFFF) {
-+ case CTIO_LIP_RESET:
-+ case CTIO_TARGET_RESET:
-+ case CTIO_ABORTED:
-+ case CTIO_TIMEOUT:
-+ case CTIO_INVALID_RX_ID:
-+ /* They are OK */
-+ TRACE(TRACE_MINOR_AND_MGMT_DBG,
-+ "qla2x00t(%ld): CTIO with "
-+ "status %#x received, state %x, scst_cmd %p, "
-+ "op %x (LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
-+ "TIMEOUT=b, INVALID_RX_ID=8)", ha->instance,
-+ status, cmd->state, scst_cmd, scst_cmd->cdb[0]);
-+ break;
-+
-+ case CTIO_PORT_LOGGED_OUT:
-+ case CTIO_PORT_UNAVAILABLE:
-+ PRINT_INFO("qla2x00t(%ld): CTIO with PORT LOGGED "
-+ "OUT (29) or PORT UNAVAILABLE (28) status %x "
-+ "received (state %x, scst_cmd %p, op %x)",
-+ ha->instance, status, cmd->state, scst_cmd,
-+ scst_cmd->cdb[0]);
-+ break;
-+
-+ case CTIO_SRR_RECEIVED:
-+ if (q2t_prepare_srr_ctio(ha, cmd, ctio) != 0)
-+ break;
-+ else
-+ goto out;
-+
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): CTIO with error status "
-+ "0x%x received (state %x, scst_cmd %p, op %x)",
-+ ha->instance, status, cmd->state, scst_cmd,
-+ scst_cmd->cdb[0]);
-+ break;
-+ }
-+
-+ if (cmd->state != Q2T_STATE_NEED_DATA)
-+ if (q2t_term_ctio_exchange(ha, ctio, cmd, status))
-+ goto out;
-+ }
-+
-+ if (cmd->state == Q2T_STATE_PROCESSED) {
-+ TRACE_DBG("Command %p finished", cmd);
-+ } else if (cmd->state == Q2T_STATE_NEED_DATA) {
-+ int rx_status = SCST_RX_STATUS_SUCCESS;
-+
-+ cmd->state = Q2T_STATE_DATA_IN;
-+
-+ if (unlikely(status != CTIO_SUCCESS))
-+ rx_status = SCST_RX_STATUS_ERROR;
-+ else
-+ cmd->write_data_transferred = 1;
-+
-+ TRACE_DBG("Data received, context %x, rx_status %d",
-+ context, rx_status);
-+
-+ scst_rx_data(scst_cmd, rx_status, context);
-+ goto out;
-+ } else if (cmd->state == Q2T_STATE_ABORTED) {
-+ TRACE_MGMT_DBG("Aborted command %p (tag %d) finished", cmd,
-+ cmd->tag);
-+ } else {
-+ PRINT_ERROR("qla2x00t(%ld): A command in state (%d) should "
-+ "not return a CTIO complete", ha->instance, cmd->state);
-+ }
-+
-+ if (unlikely(status != CTIO_SUCCESS)) {
-+ TRACE_MGMT_DBG("%s", "Finishing failed CTIO");
-+ scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
-+ }
-+
-+ scst_tgt_cmd_done(scst_cmd, context);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+/* called via callback from qla2xxx */
-+static void q2x_ctio_completion(scsi_qla_host_t *ha, uint32_t handle)
-+{
-+ struct q2t_tgt *tgt = ha->tgt;
-+
-+ TRACE_ENTRY();
-+
-+ if (likely(tgt != NULL)) {
-+ tgt->irq_cmd_count++;
-+ q2t_do_ctio_completion(ha, handle, CTIO_SUCCESS, NULL);
-+ tgt->irq_cmd_count--;
-+ } else {
-+ TRACE_DBG("CTIO, but target mode not enabled (ha %p handle "
-+ "%#x)", ha, handle);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q2x_do_send_cmd_to_scst(struct q2t_cmd *cmd)
-+{
-+ int res = 0;
-+ struct q2t_sess *sess = cmd->sess;
-+ uint16_t lun;
-+ atio_entry_t *atio = &cmd->atio.atio2x;
-+ scst_data_direction dir;
-+ int context;
-+
-+ TRACE_ENTRY();
-+
-+ /* make it be in network byte order */
-+ lun = swab16(le16_to_cpu(atio->lun));
-+ cmd->scst_cmd = scst_rx_cmd(sess->scst_sess, (uint8_t *)&lun,
-+ sizeof(lun), atio->cdb, Q2T_MAX_CDB_LEN,
-+ SCST_ATOMIC);
-+
-+ if (cmd->scst_cmd == NULL) {
-+ PRINT_ERROR("%s", "qla2x00t: scst_rx_cmd() failed");
-+ res = -EFAULT;
-+ goto out;
-+ }
-+
-+ cmd->tag = atio->rx_id;
-+ scst_cmd_set_tag(cmd->scst_cmd, cmd->tag);
-+ scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
-+
-+ if ((atio->execution_codes & (ATIO_EXEC_READ | ATIO_EXEC_WRITE)) ==
-+ (ATIO_EXEC_READ | ATIO_EXEC_WRITE))
-+ dir = SCST_DATA_BIDI;
-+ else if (atio->execution_codes & ATIO_EXEC_READ)
-+ dir = SCST_DATA_READ;
-+ else if (atio->execution_codes & ATIO_EXEC_WRITE)
-+ dir = SCST_DATA_WRITE;
-+ else
-+ dir = SCST_DATA_NONE;
-+ scst_cmd_set_expected(cmd->scst_cmd, dir,
-+ le32_to_cpu(atio->data_length));
-+
-+ switch (atio->task_codes) {
-+ case ATIO_SIMPLE_QUEUE:
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_SIMPLE);
-+ break;
-+ case ATIO_HEAD_OF_QUEUE:
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
-+ break;
-+ case ATIO_ORDERED_QUEUE:
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ORDERED);
-+ break;
-+ case ATIO_ACA_QUEUE:
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ACA);
-+ break;
-+ case ATIO_UNTAGGED:
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_UNTAGGED);
-+ break;
-+ default:
-+ PRINT_ERROR("qla2x00t: unknown task code %x, use "
-+ "ORDERED instead", atio->task_codes);
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ORDERED);
-+ break;
-+ }
-+
-+#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
-+ context = SCST_CONTEXT_THREAD;
-+#else
-+ context = SCST_CONTEXT_TASKLET;
-+#endif
-+
-+ TRACE_DBG("Context %x", context);
-+ TRACE(TRACE_SCSI, "qla2x00t: START Command (tag %d, queue_type %d)",
-+ cmd->tag, scst_cmd_get_queue_type(cmd->scst_cmd));
-+ scst_cmd_init_done(cmd->scst_cmd, context);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q24_do_send_cmd_to_scst(struct q2t_cmd *cmd)
-+{
-+ int res = 0;
-+ struct q2t_sess *sess = cmd->sess;
-+ atio7_entry_t *atio = &cmd->atio.atio7;
-+ scst_data_direction dir;
-+ int context;
-+
-+ TRACE_ENTRY();
-+
-+ cmd->scst_cmd = scst_rx_cmd(sess->scst_sess,
-+ (uint8_t *)&atio->fcp_cmnd.lun, sizeof(atio->fcp_cmnd.lun),
-+ atio->fcp_cmnd.cdb, sizeof(atio->fcp_cmnd.cdb) +
-+ atio->fcp_cmnd.add_cdb_len, SCST_ATOMIC);
-+
-+ if (cmd->scst_cmd == NULL) {
-+ PRINT_ERROR("%s", "qla2x00t: scst_rx_cmd() failed");
-+ res = -EFAULT;
-+ goto out;
-+ }
-+
-+ cmd->tag = atio->exchange_addr;
-+ scst_cmd_set_tag(cmd->scst_cmd, cmd->tag);
-+ scst_cmd_set_tgt_priv(cmd->scst_cmd, cmd);
-+
-+ if (atio->fcp_cmnd.rddata && atio->fcp_cmnd.wrdata)
-+ dir = SCST_DATA_BIDI;
-+ else if (atio->fcp_cmnd.rddata)
-+ dir = SCST_DATA_READ;
-+ else if (atio->fcp_cmnd.wrdata)
-+ dir = SCST_DATA_WRITE;
-+ else
-+ dir = SCST_DATA_NONE;
-+ scst_cmd_set_expected(cmd->scst_cmd, dir,
-+ be32_to_cpu(get_unaligned((uint32_t *)
-+ &atio->fcp_cmnd.add_cdb[atio->fcp_cmnd.add_cdb_len])));
-+
-+ switch (atio->fcp_cmnd.task_attr) {
-+ case ATIO_SIMPLE_QUEUE:
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_SIMPLE);
-+ break;
-+ case ATIO_HEAD_OF_QUEUE:
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
-+ break;
-+ case ATIO_ORDERED_QUEUE:
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ORDERED);
-+ break;
-+ case ATIO_ACA_QUEUE:
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ACA);
-+ break;
-+ case ATIO_UNTAGGED:
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_UNTAGGED);
-+ break;
-+ default:
-+ PRINT_ERROR("qla2x00t: unknown task code %x, use "
-+ "ORDERED instead", atio->fcp_cmnd.task_attr);
-+ scst_cmd_set_queue_type(cmd->scst_cmd, SCST_CMD_QUEUE_ORDERED);
-+ break;
-+ }
-+
-+#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
-+ context = SCST_CONTEXT_THREAD;
-+#else
-+ context = SCST_CONTEXT_TASKLET;
-+#endif
-+
-+ TRACE_DBG("Context %x", context);
-+ TRACE(TRACE_SCSI, "qla2x00t: START Command %p (tag %d, queue type %x)",
-+ cmd, cmd->tag, scst_cmd_get_queue_type(cmd->scst_cmd));
-+ scst_cmd_init_done(cmd->scst_cmd, context);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q2t_do_send_cmd_to_scst(scsi_qla_host_t *ha,
-+ struct q2t_cmd *cmd, struct q2t_sess *sess)
-+{
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ cmd->sess = sess;
-+ cmd->loop_id = sess->loop_id;
-+ cmd->conf_compl_supported = sess->conf_compl_supported;
-+
-+ if (IS_FWI2_CAPABLE(ha))
-+ res = q24_do_send_cmd_to_scst(cmd);
-+ else
-+ res = q2x_do_send_cmd_to_scst(cmd);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q2t_send_cmd_to_scst(scsi_qla_host_t *ha, atio_t *atio)
-+{
-+ int res = 0;
-+ struct q2t_tgt *tgt = ha->tgt;
-+ struct q2t_sess *sess;
-+ struct q2t_cmd *cmd;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(tgt->tgt_stop)) {
-+ TRACE_MGMT_DBG("New command while device %p is shutting "
-+ "down", tgt);
-+ res = -EFAULT;
-+ goto out;
-+ }
-+
-+ cmd = kmem_cache_zalloc(q2t_cmd_cachep, GFP_ATOMIC);
-+ if (cmd == NULL) {
-+ TRACE(TRACE_OUT_OF_MEM, "qla2x00t(%ld): Allocation of cmd "
-+ "failed", ha->instance);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ memcpy(&cmd->atio.atio2x, atio, sizeof(*atio));
-+ cmd->state = Q2T_STATE_NEW;
-+ cmd->tgt = ha->tgt;
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ atio7_entry_t *a = (atio7_entry_t *)atio;
-+ sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
-+ if (unlikely(sess == NULL)) {
-+ TRACE_MGMT_DBG("qla2x00t(%ld): Unable to find "
-+ "wwn login (s_id %x:%x:%x), trying to create "
-+ "it manually", ha->instance,
-+ a->fcp_hdr.s_id[0], a->fcp_hdr.s_id[1],
-+ a->fcp_hdr.s_id[2]);
-+ goto out_sched;
-+ }
-+ } else {
-+ sess = q2t_find_sess_by_loop_id(tgt,
-+ GET_TARGET_ID(ha, (atio_entry_t *)atio));
-+ if (unlikely(sess == NULL)) {
-+ TRACE_MGMT_DBG("qla2x00t(%ld): Unable to find "
-+ "wwn login (loop_id=%d), trying to create it "
-+ "manually", ha->instance,
-+ GET_TARGET_ID(ha, (atio_entry_t *)atio));
-+ goto out_sched;
-+ }
-+ }
-+
-+ res = q2t_do_send_cmd_to_scst(ha, cmd, sess);
-+ if (unlikely(res != 0))
-+ goto out_free_cmd;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free_cmd:
-+ q2t_free_cmd(cmd);
-+ goto out;
-+
-+out_sched:
-+ if (atio->entry_count > 1) {
-+ TRACE_MGMT_DBG("Dropping multy entry cmd %p", cmd);
-+ res = -EBUSY;
-+ goto out_free_cmd;
-+ }
-+ res = q2t_sched_sess_work(tgt, Q2T_SESS_WORK_CMD, &cmd, sizeof(cmd));
-+ if (res != 0)
-+ goto out_free_cmd;
-+ goto out;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q2t_issue_task_mgmt(struct q2t_sess *sess, uint8_t *lun,
-+ int lun_size, int fn, void *iocb, int flags)
-+{
-+ int res = 0, rc = -1;
-+ struct q2t_mgmt_cmd *mcmd;
-+
-+ TRACE_ENTRY();
-+
-+ mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
-+ if (mcmd == NULL) {
-+ PRINT_CRIT_ERROR("qla2x00t(%ld): Allocation of management "
-+ "command failed, some commands and their data could "
-+ "leak", sess->tgt->ha->instance);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ memset(mcmd, 0, sizeof(*mcmd));
-+
-+ mcmd->sess = sess;
-+ if (iocb) {
-+ memcpy(&mcmd->orig_iocb.notify_entry, iocb,
-+ sizeof(mcmd->orig_iocb.notify_entry));
-+ }
-+ mcmd->flags = flags;
-+
-+ switch (fn) {
-+ case Q2T_CLEAR_ACA:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): CLEAR_ACA received",
-+ sess->tgt->ha->instance);
-+ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_CLEAR_ACA,
-+ lun, lun_size, SCST_ATOMIC, mcmd);
-+ break;
-+
-+ case Q2T_TARGET_RESET:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): TARGET_RESET received",
-+ sess->tgt->ha->instance);
-+ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_TARGET_RESET,
-+ lun, lun_size, SCST_ATOMIC, mcmd);
-+ break;
-+
-+ case Q2T_LUN_RESET:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): LUN_RESET received",
-+ sess->tgt->ha->instance);
-+ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_LUN_RESET,
-+ lun, lun_size, SCST_ATOMIC, mcmd);
-+ break;
-+
-+ case Q2T_CLEAR_TS:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): CLEAR_TS received",
-+ sess->tgt->ha->instance);
-+ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_CLEAR_TASK_SET,
-+ lun, lun_size, SCST_ATOMIC, mcmd);
-+ break;
-+
-+ case Q2T_ABORT_TS:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): ABORT_TS received",
-+ sess->tgt->ha->instance);
-+ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_ABORT_TASK_SET,
-+ lun, lun_size, SCST_ATOMIC, mcmd);
-+ break;
-+
-+ case Q2T_ABORT_ALL:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Doing ABORT_ALL_TASKS",
-+ sess->tgt->ha->instance);
-+ rc = scst_rx_mgmt_fn_lun(sess->scst_sess,
-+ SCST_ABORT_ALL_TASKS,
-+ lun, lun_size, SCST_ATOMIC, mcmd);
-+ break;
-+
-+ case Q2T_ABORT_ALL_SESS:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Doing ABORT_ALL_TASKS_SESS",
-+ sess->tgt->ha->instance);
-+ rc = scst_rx_mgmt_fn_lun(sess->scst_sess,
-+ SCST_ABORT_ALL_TASKS_SESS,
-+ lun, lun_size, SCST_ATOMIC, mcmd);
-+ break;
-+
-+ case Q2T_NEXUS_LOSS_SESS:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Doing NEXUS_LOSS_SESS",
-+ sess->tgt->ha->instance);
-+ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_NEXUS_LOSS_SESS,
-+ lun, lun_size, SCST_ATOMIC, mcmd);
-+ break;
-+
-+ case Q2T_NEXUS_LOSS:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Doing NEXUS_LOSS",
-+ sess->tgt->ha->instance);
-+ rc = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_NEXUS_LOSS,
-+ lun, lun_size, SCST_ATOMIC, mcmd);
-+ break;
-+
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): Unknown task mgmt fn 0x%x",
-+ sess->tgt->ha->instance, fn);
-+ rc = -1;
-+ break;
-+ }
-+
-+ if (rc != 0) {
-+ PRINT_ERROR("qla2x00t(%ld): scst_rx_mgmt_fn_lun() failed: %d",
-+ sess->tgt->ha->instance, rc);
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
-+ goto out;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q2t_handle_task_mgmt(scsi_qla_host_t *ha, void *iocb)
-+{
-+ int res = 0;
-+ struct q2t_tgt *tgt;
-+ struct q2t_sess *sess;
-+ uint8_t *lun;
-+ uint16_t lun_data;
-+ int lun_size;
-+ int fn;
-+
-+ TRACE_ENTRY();
-+
-+ tgt = ha->tgt;
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ atio7_entry_t *a = (atio7_entry_t *)iocb;
-+ lun = (uint8_t *)&a->fcp_cmnd.lun;
-+ lun_size = sizeof(a->fcp_cmnd.lun);
-+ fn = a->fcp_cmnd.task_mgmt_flags;
-+ sess = q2t_find_sess_by_s_id(tgt, a->fcp_hdr.s_id);
-+ } else {
-+ notify_entry_t *n = (notify_entry_t *)iocb;
-+ /* make it be in network byte order */
-+ lun_data = swab16(le16_to_cpu(n->lun));
-+ lun = (uint8_t *)&lun_data;
-+ lun_size = sizeof(lun_data);
-+ fn = n->task_flags >> IMM_NTFY_TASK_MGMT_SHIFT;
-+ sess = q2t_find_sess_by_loop_id(tgt, GET_TARGET_ID(ha, n));
-+ }
-+
-+ if (sess == NULL) {
-+ TRACE_MGMT_DBG("qla2x00t(%ld): task mgmt fn 0x%x for "
-+ "non-existant session", ha->instance, fn);
-+ res = q2t_sched_sess_work(tgt, Q2T_SESS_WORK_TM, iocb,
-+ IS_FWI2_CAPABLE(ha) ? sizeof(atio7_entry_t) :
-+ sizeof(notify_entry_t));
-+ if (res != 0)
-+ tgt->tm_to_unknown = 1;
-+ goto out;
-+ }
-+
-+ res = q2t_issue_task_mgmt(sess, lun, lun_size, fn, iocb, 0);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int __q2t_abort_task(scsi_qla_host_t *ha, notify_entry_t *iocb,
-+ struct q2t_sess *sess)
-+{
-+ int res, rc;
-+ struct q2t_mgmt_cmd *mcmd;
-+
-+ TRACE_ENTRY();
-+
-+ mcmd = mempool_alloc(q2t_mgmt_cmd_mempool, GFP_ATOMIC);
-+ if (mcmd == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s: Allocation of ABORT cmd failed",
-+ ha->instance, __func__);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ memset(mcmd, 0, sizeof(*mcmd));
-+
-+ mcmd->sess = sess;
-+ memcpy(&mcmd->orig_iocb.notify_entry, iocb,
-+ sizeof(mcmd->orig_iocb.notify_entry));
-+
-+ rc = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK,
-+ le16_to_cpu(iocb->seq_id), SCST_ATOMIC, mcmd);
-+ if (rc != 0) {
-+ PRINT_ERROR("qla2x00t(%ld): scst_rx_mgmt_fn_tag() failed: %d",
-+ ha->instance, rc);
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ mempool_free(mcmd, q2t_mgmt_cmd_mempool);
-+ goto out;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static int q2t_abort_task(scsi_qla_host_t *ha, notify_entry_t *iocb)
-+{
-+ int res;
-+ struct q2t_sess *sess;
-+ int loop_id;
-+
-+ TRACE_ENTRY();
-+
-+ loop_id = GET_TARGET_ID(ha, iocb);
-+
-+ sess = q2t_find_sess_by_loop_id(ha->tgt, loop_id);
-+ if (sess == NULL) {
-+ TRACE_MGMT_DBG("qla2x00t(%ld): task abort for unexisting "
-+ "session", ha->instance);
-+ res = q2t_sched_sess_work(sess->tgt, Q2T_SESS_WORK_ABORT, iocb,
-+ sizeof(*iocb));
-+ if (res != 0)
-+ sess->tgt->tm_to_unknown = 1;
-+ goto out;
-+ }
-+
-+ res = __q2t_abort_task(ha, iocb, sess);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static int q24_handle_els(scsi_qla_host_t *ha, notify24xx_entry_t *iocb)
-+{
-+ int res = 1; /* send notify ack */
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("qla2x00t(%ld): ELS opcode %x", ha->instance,
-+ iocb->status_subcode);
-+
-+ switch (iocb->status_subcode) {
-+ case ELS_PLOGI:
-+ case ELS_FLOGI:
-+ case ELS_PRLI:
-+ break;
-+
-+ case ELS_LOGO:
-+ case ELS_PRLO:
-+ res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
-+ break;
-+
-+ case ELS_PDISC:
-+ case ELS_ADISC:
-+ {
-+ struct q2t_tgt *tgt = ha->tgt;
-+ if (tgt->link_reinit_iocb_pending) {
-+ q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
-+ tgt->link_reinit_iocb_pending = 0;
-+ }
-+ break;
-+ }
-+
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): Unsupported ELS command %x "
-+ "received", ha->instance, iocb->status_subcode);
-+#if 0
-+ res = q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS);
-+#endif
-+ break;
-+ }
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int q2t_cut_cmd_data_head(struct q2t_cmd *cmd, unsigned int offset)
-+{
-+ int res = 0;
-+ int cnt, first_sg, first_page = 0, first_page_offs = 0, i;
-+ unsigned int l;
-+ int cur_dst, cur_src;
-+ struct scatterlist *sg;
-+ size_t bufflen = 0;
-+
-+ TRACE_ENTRY();
-+
-+ first_sg = -1;
-+ cnt = 0;
-+ l = 0;
-+ for (i = 0; i < cmd->sg_cnt; i++) {
-+ l += cmd->sg[i].length;
-+ if (l > offset) {
-+ int sg_offs = l - cmd->sg[i].length;
-+ first_sg = i;
-+ if (cmd->sg[i].offset == 0) {
-+ first_page_offs = offset % PAGE_SIZE;
-+ first_page = (offset - sg_offs) >> PAGE_SHIFT;
-+ } else {
-+ TRACE_SG("i=%d, sg[i].offset=%d, "
-+ "sg_offs=%d", i, cmd->sg[i].offset, sg_offs);
-+ if ((cmd->sg[i].offset + sg_offs) > offset) {
-+ first_page_offs = offset - sg_offs;
-+ first_page = 0;
-+ } else {
-+ int sec_page_offs = sg_offs +
-+ (PAGE_SIZE - cmd->sg[i].offset);
-+ first_page_offs = sec_page_offs % PAGE_SIZE;
-+ first_page = 1 +
-+ ((offset - sec_page_offs) >>
-+ PAGE_SHIFT);
-+ }
-+ }
-+ cnt = cmd->sg_cnt - i + (first_page_offs != 0);
-+ break;
-+ }
-+ }
-+ if (first_sg == -1) {
-+ PRINT_ERROR("qla2x00t(%ld): Wrong offset %d, buf length %d",
-+ cmd->tgt->ha->instance, offset, cmd->bufflen);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ TRACE_SG("offset=%d, first_sg=%d, first_page=%d, "
-+ "first_page_offs=%d, cmd->bufflen=%d, cmd->sg_cnt=%d", offset,
-+ first_sg, first_page, first_page_offs, cmd->bufflen,
-+ cmd->sg_cnt);
-+
-+ sg = kmalloc(cnt * sizeof(sg[0]), GFP_KERNEL);
-+ if (sg == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): Unable to allocate cut "
-+ "SG (len %zd)", cmd->tgt->ha->instance,
-+ cnt * sizeof(sg[0]));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+ sg_init_table(sg, cnt);
-+
-+ cur_dst = 0;
-+ cur_src = first_sg;
-+ if (first_page_offs != 0) {
-+ int fpgs;
-+ sg_set_page(&sg[cur_dst], &sg_page(&cmd->sg[cur_src])[first_page],
-+ PAGE_SIZE - first_page_offs, first_page_offs);
-+ bufflen += sg[cur_dst].length;
-+ TRACE_SG("cur_dst=%d, cur_src=%d, sg[].page=%p, "
-+ "sg[].offset=%d, sg[].length=%d, bufflen=%zu",
-+ cur_dst, cur_src, sg_page(&sg[cur_dst]), sg[cur_dst].offset,
-+ sg[cur_dst].length, bufflen);
-+ cur_dst++;
-+
-+ fpgs = (cmd->sg[cur_src].length >> PAGE_SHIFT) +
-+ ((cmd->sg[cur_src].length & ~PAGE_MASK) != 0);
-+ first_page++;
-+ if (fpgs > first_page) {
-+ sg_set_page(&sg[cur_dst],
-+ &sg_page(&cmd->sg[cur_src])[first_page],
-+ cmd->sg[cur_src].length - PAGE_SIZE*first_page,
-+ 0);
-+ TRACE_SG("fpgs=%d, cur_dst=%d, cur_src=%d, "
-+ "sg[].page=%p, sg[].length=%d, bufflen=%zu",
-+ fpgs, cur_dst, cur_src, sg_page(&sg[cur_dst]),
-+ sg[cur_dst].length, bufflen);
-+ bufflen += sg[cur_dst].length;
-+ cur_dst++;
-+ }
-+ cur_src++;
-+ }
-+
-+ while (cur_src < cmd->sg_cnt) {
-+ sg_set_page(&sg[cur_dst], sg_page(&cmd->sg[cur_src]),
-+ cmd->sg[cur_src].length, cmd->sg[cur_src].offset);
-+ TRACE_SG("cur_dst=%d, cur_src=%d, "
-+ "sg[].page=%p, sg[].length=%d, sg[].offset=%d, "
-+ "bufflen=%zu", cur_dst, cur_src, sg_page(&sg[cur_dst]),
-+ sg[cur_dst].length, sg[cur_dst].offset, bufflen);
-+ bufflen += sg[cur_dst].length;
-+ cur_dst++;
-+ cur_src++;
-+ }
-+
-+ if (cmd->free_sg)
-+ kfree(cmd->sg);
-+
-+ cmd->sg = sg;
-+ cmd->free_sg = 1;
-+ cmd->sg_cnt = cur_dst;
-+ cmd->bufflen = bufflen;
-+ cmd->offset += offset;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static inline int q2t_srr_adjust_data(struct q2t_cmd *cmd,
-+ uint32_t srr_rel_offs, int *xmit_type)
-+{
-+ int res = 0;
-+ int rel_offs;
-+
-+ rel_offs = srr_rel_offs - cmd->offset;
-+ TRACE_MGMT_DBG("srr_rel_offs=%d, rel_offs=%d", srr_rel_offs, rel_offs);
-+
-+ *xmit_type = Q2T_XMIT_ALL;
-+
-+ if (rel_offs < 0) {
-+ PRINT_ERROR("qla2x00t(%ld): SRR rel_offs (%d) "
-+ "< 0", cmd->tgt->ha->instance, rel_offs);
-+ res = -1;
-+ } else if (rel_offs == cmd->bufflen)
-+ *xmit_type = Q2T_XMIT_STATUS;
-+ else if (rel_offs > 0)
-+ res = q2t_cut_cmd_data_head(cmd, rel_offs);
-+
-+ return res;
-+}
-+
-+/* No locks, thread context */
-+static void q24_handle_srr(scsi_qla_host_t *ha, struct srr_ctio *sctio,
-+ struct srr_imm *imm)
-+{
-+ notify24xx_entry_t *ntfy = &imm->imm.notify_entry24;
-+ struct q2t_cmd *cmd = sctio->cmd;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("SRR cmd %p, srr_ui %x", cmd, ntfy->srr_ui);
-+
-+ switch (ntfy->srr_ui) {
-+ case SRR_IU_STATUS:
-+ spin_lock_irq(&pha->hardware_lock);
-+ q24_send_notify_ack(ha, ntfy,
-+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&pha->hardware_lock);
-+ __q24_xmit_response(cmd, Q2T_XMIT_STATUS);
-+ break;
-+ case SRR_IU_DATA_IN:
-+ cmd->bufflen = scst_cmd_get_adjusted_resp_data_len(cmd->scst_cmd);
-+ if (q2t_has_data(cmd)) {
-+ uint32_t offset;
-+ int xmit_type;
-+ offset = le32_to_cpu(imm->imm.notify_entry24.srr_rel_offs);
-+ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
-+ goto out_reject;
-+ spin_lock_irq(&pha->hardware_lock);
-+ q24_send_notify_ack(ha, ntfy,
-+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&pha->hardware_lock);
-+ __q24_xmit_response(cmd, xmit_type);
-+ } else {
-+ PRINT_ERROR("qla2x00t(%ld): SRR for in data for cmd "
-+ "without them (tag %d, SCSI status %d), "
-+ "reject", ha->instance, cmd->tag,
-+ scst_cmd_get_status(cmd->scst_cmd));
-+ goto out_reject;
-+ }
-+ break;
-+ case SRR_IU_DATA_OUT:
-+ cmd->bufflen = scst_cmd_get_write_fields(cmd->scst_cmd,
-+ &cmd->sg, &cmd->sg_cnt);
-+ if (q2t_has_data(cmd)) {
-+ uint32_t offset;
-+ int xmit_type;
-+ offset = le32_to_cpu(imm->imm.notify_entry24.srr_rel_offs);
-+ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
-+ goto out_reject;
-+ spin_lock_irq(&pha->hardware_lock);
-+ q24_send_notify_ack(ha, ntfy,
-+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&pha->hardware_lock);
-+ if (xmit_type & Q2T_XMIT_DATA)
-+ __q2t_rdy_to_xfer(cmd);
-+ } else {
-+ PRINT_ERROR("qla2x00t(%ld): SRR for out data for cmd "
-+ "without them (tag %d, SCSI status %d), "
-+ "reject", ha->instance, cmd->tag,
-+ scst_cmd_get_status(cmd->scst_cmd));
-+ goto out_reject;
-+ }
-+ break;
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): Unknown srr_ui value %x",
-+ ha->instance, ntfy->srr_ui);
-+ goto out_reject;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_reject:
-+ spin_lock_irq(&pha->hardware_lock);
-+ q24_send_notify_ack(ha, ntfy, NOTIFY_ACK_SRR_FLAGS_REJECT,
-+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
-+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
-+ if (cmd->state == Q2T_STATE_NEED_DATA) {
-+ cmd->state = Q2T_STATE_DATA_IN;
-+ scst_rx_data(cmd->scst_cmd, SCST_RX_STATUS_ERROR,
-+ SCST_CONTEXT_THREAD);
-+ } else
-+ q24_send_term_exchange(ha, cmd, &cmd->atio.atio7, 1);
-+ spin_unlock_irq(&pha->hardware_lock);
-+ goto out;
-+}
-+
-+/* No locks, thread context */
-+static void q2x_handle_srr(scsi_qla_host_t *ha, struct srr_ctio *sctio,
-+ struct srr_imm *imm)
-+{
-+ notify_entry_t *ntfy = &imm->imm.notify_entry;
-+ struct q2t_cmd *cmd = sctio->cmd;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("SRR cmd %p, srr_ui %x", cmd, ntfy->srr_ui);
-+
-+ switch (ntfy->srr_ui) {
-+ case SRR_IU_STATUS:
-+ spin_lock_irq(&pha->hardware_lock);
-+ q2x_send_notify_ack(ha, ntfy, 0, 0, 0,
-+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&pha->hardware_lock);
-+ __q2x_xmit_response(cmd, Q2T_XMIT_STATUS);
-+ break;
-+ case SRR_IU_DATA_IN:
-+ cmd->bufflen = scst_cmd_get_adjusted_resp_data_len(cmd->scst_cmd);
-+ if (q2t_has_data(cmd)) {
-+ uint32_t offset;
-+ int xmit_type;
-+ offset = le32_to_cpu(imm->imm.notify_entry.srr_rel_offs);
-+ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
-+ goto out_reject;
-+ spin_lock_irq(&pha->hardware_lock);
-+ q2x_send_notify_ack(ha, ntfy, 0, 0, 0,
-+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&pha->hardware_lock);
-+ __q2x_xmit_response(cmd, xmit_type);
-+ } else {
-+ PRINT_ERROR("qla2x00t(%ld): SRR for in data for cmd "
-+ "without them (tag %d, SCSI status %d), "
-+ "reject", ha->instance, cmd->tag,
-+ scst_cmd_get_status(cmd->scst_cmd));
-+ goto out_reject;
-+ }
-+ break;
-+ case SRR_IU_DATA_OUT:
-+ cmd->bufflen = scst_cmd_get_write_fields(cmd->scst_cmd,
-+ &cmd->sg, &cmd->sg_cnt);
-+ if (q2t_has_data(cmd)) {
-+ uint32_t offset;
-+ int xmit_type;
-+ offset = le32_to_cpu(imm->imm.notify_entry.srr_rel_offs);
-+ if (q2t_srr_adjust_data(cmd, offset, &xmit_type) != 0)
-+ goto out_reject;
-+ spin_lock_irq(&pha->hardware_lock);
-+ q2x_send_notify_ack(ha, ntfy, 0, 0, 0,
-+ NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
-+ spin_unlock_irq(&pha->hardware_lock);
-+ if (xmit_type & Q2T_XMIT_DATA)
-+ __q2t_rdy_to_xfer(cmd);
-+ } else {
-+ PRINT_ERROR("qla2x00t(%ld): SRR for out data for cmd "
-+ "without them (tag %d, SCSI status %d), "
-+ "reject", ha->instance, cmd->tag,
-+ scst_cmd_get_status(cmd->scst_cmd));
-+ goto out_reject;
-+ }
-+ break;
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): Unknown srr_ui value %x",
-+ ha->instance, ntfy->srr_ui);
-+ goto out_reject;
-+ }
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+
-+out_reject:
-+ spin_lock_irq(&pha->hardware_lock);
-+ q2x_send_notify_ack(ha, ntfy, 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT,
-+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
-+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
-+ if (cmd->state == Q2T_STATE_NEED_DATA) {
-+ cmd->state = Q2T_STATE_DATA_IN;
-+ scst_rx_data(cmd->scst_cmd, SCST_RX_STATUS_ERROR,
-+ SCST_CONTEXT_THREAD);
-+ } else
-+ q2x_send_term_exchange(ha, cmd, &cmd->atio.atio2x, 1);
-+ spin_unlock_irq(&pha->hardware_lock);
-+ goto out;
-+}
-+
-+static void q2t_reject_free_srr_imm(scsi_qla_host_t *ha, struct srr_imm *imm,
-+ int ha_locked)
-+{
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+
-+ if (!ha_locked)
-+ spin_lock_irq(&pha->hardware_lock);
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ q24_send_notify_ack(ha, &imm->imm.notify_entry24,
-+ NOTIFY_ACK_SRR_FLAGS_REJECT,
-+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
-+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
-+ } else {
-+ q2x_send_notify_ack(ha, &imm->imm.notify_entry,
-+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT,
-+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
-+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
-+ }
-+
-+ if (!ha_locked)
-+ spin_unlock_irq(&pha->hardware_lock);
-+
-+ kfree(imm);
-+ return;
-+}
-+
-+static void q2t_handle_srr_work(struct work_struct *work)
-+{
-+ struct q2t_tgt *tgt = container_of(work, struct q2t_tgt, srr_work);
-+ scsi_qla_host_t *ha = tgt->ha;
-+ struct srr_ctio *sctio;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("SRR work (tgt %p)", tgt);
-+
-+restart:
-+ spin_lock_irq(&tgt->srr_lock);
-+ list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
-+ struct srr_imm *imm;
-+ struct q2t_cmd *cmd;
-+ struct srr_imm *i, *ti;
-+
-+ imm = NULL;
-+ list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
-+ srr_list_entry) {
-+ if (i->srr_id == sctio->srr_id) {
-+ list_del(&i->srr_list_entry);
-+ if (imm) {
-+ PRINT_ERROR("qla2x00t(%ld): There must "
-+ "be only one IMM SRR per CTIO SRR "
-+ "(IMM SRR %p, id %d, CTIO %p",
-+ ha->instance, i, i->srr_id, sctio);
-+ q2t_reject_free_srr_imm(ha, i, 0);
-+ } else
-+ imm = i;
-+ }
-+ }
-+
-+ TRACE_MGMT_DBG("IMM SRR %p, CTIO SRR %p (id %d)", imm, sctio,
-+ sctio->srr_id);
-+
-+ if (imm == NULL) {
-+ TRACE_MGMT_DBG("Not found matching IMM for SRR CTIO "
-+ "(id %d)", sctio->srr_id);
-+ continue;
-+ } else
-+ list_del(&sctio->srr_list_entry);
-+
-+ spin_unlock_irq(&tgt->srr_lock);
-+
-+ cmd = sctio->cmd;
-+
-+ /* Restore the originals, except bufflen */
-+ cmd->offset = scst_cmd_get_ppl_offset(cmd->scst_cmd);
-+ if (cmd->free_sg) {
-+ kfree(cmd->sg);
-+ cmd->free_sg = 0;
-+ }
-+ cmd->sg = scst_cmd_get_sg(cmd->scst_cmd);
-+ cmd->sg_cnt = scst_cmd_get_sg_cnt(cmd->scst_cmd);
-+
-+ TRACE_MGMT_DBG("SRR cmd %p (scst_cmd %p, tag %d, op %x), "
-+ "sg_cnt=%d, offset=%d", cmd, cmd->scst_cmd,
-+ cmd->tag, cmd->scst_cmd->cdb[0], cmd->sg_cnt,
-+ cmd->offset);
-+
-+ if (IS_FWI2_CAPABLE(ha))
-+ q24_handle_srr(ha, sctio, imm);
-+ else
-+ q2x_handle_srr(ha, sctio, imm);
-+
-+ kfree(imm);
-+ kfree(sctio);
-+ goto restart;
-+ }
-+ spin_unlock_irq(&tgt->srr_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+static void q2t_prepare_srr_imm(scsi_qla_host_t *ha, void *iocb)
-+{
-+ struct srr_imm *imm;
-+ struct q2t_tgt *tgt = ha->tgt;
-+ notify_entry_t *iocb2x = (notify_entry_t *)iocb;
-+ notify24xx_entry_t *iocb24 = (notify24xx_entry_t *)iocb;
-+ struct srr_ctio *sctio;
-+
-+ tgt->imm_srr_id++;
-+
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): SRR received", ha->instance);
-+
-+ imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
-+ if (imm != NULL) {
-+ memcpy(&imm->imm.notify_entry, iocb,
-+ sizeof(imm->imm.notify_entry));
-+
-+ /* IRQ is already OFF */
-+ spin_lock(&tgt->srr_lock);
-+ imm->srr_id = tgt->imm_srr_id;
-+ list_add_tail(&imm->srr_list_entry,
-+ &tgt->srr_imm_list);
-+ TRACE_MGMT_DBG("IMM NTFY SRR %p added (id %d, ui %x)", imm,
-+ imm->srr_id, iocb24->srr_ui);
-+ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
-+ int found = 0;
-+ list_for_each_entry(sctio, &tgt->srr_ctio_list,
-+ srr_list_entry) {
-+ if (sctio->srr_id == imm->srr_id) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ if (found) {
-+ TRACE_MGMT_DBG("%s", "Scheduling srr work");
-+ schedule_work(&tgt->srr_work);
-+ } else {
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): imm_srr_id "
-+ "== ctio_srr_id (%d), but there is no "
-+ "corresponding SRR CTIO, deleting IMM "
-+ "SRR %p", ha->instance, tgt->ctio_srr_id,
-+ imm);
-+ list_del(&imm->srr_list_entry);
-+
-+ kfree(imm);
-+
-+ spin_unlock(&tgt->srr_lock);
-+ goto out_reject;
-+ }
-+ }
-+ spin_unlock(&tgt->srr_lock);
-+ } else {
-+ struct srr_ctio *ts;
-+
-+ PRINT_ERROR("qla2x00t(%ld): Unable to allocate SRR IMM "
-+ "entry, SRR request will be rejected", ha->instance);
-+
-+ /* IRQ is already OFF */
-+ spin_lock(&tgt->srr_lock);
-+ list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
-+ srr_list_entry) {
-+ if (sctio->srr_id == tgt->imm_srr_id) {
-+ TRACE_MGMT_DBG("CTIO SRR %p deleted "
-+ "(id %d)", sctio, sctio->srr_id);
-+ list_del(&sctio->srr_list_entry);
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ q24_send_term_exchange(ha, sctio->cmd,
-+ &sctio->cmd->atio.atio7, 1);
-+ } else {
-+ q2x_send_term_exchange(ha, sctio->cmd,
-+ &sctio->cmd->atio.atio2x, 1);
-+ }
-+ kfree(sctio);
-+ }
-+ }
-+ spin_unlock(&tgt->srr_lock);
-+ goto out_reject;
-+ }
-+
-+out:
-+ return;
-+
-+out_reject:
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ q24_send_notify_ack(ha, iocb24,
-+ NOTIFY_ACK_SRR_FLAGS_REJECT,
-+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
-+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
-+ } else {
-+ q2x_send_notify_ack(ha, iocb2x,
-+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_REJECT,
-+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
-+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
-+ }
-+ goto out;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q2t_handle_imm_notify(scsi_qla_host_t *ha, void *iocb)
-+{
-+ uint16_t status;
-+ uint32_t add_flags = 0;
-+ int send_notify_ack = 1;
-+ notify_entry_t *iocb2x = (notify_entry_t *)iocb;
-+ notify24xx_entry_t *iocb24 = (notify24xx_entry_t *)iocb;
-+
-+ TRACE_ENTRY();
-+
-+ status = le16_to_cpu(iocb2x->status);
-+
-+ TRACE_BUFF_FLAG(TRACE_BUFF, "IMMED Notify Coming Up",
-+ iocb, sizeof(*iocb2x));
-+
-+ switch (status) {
-+ case IMM_NTFY_LIP_RESET:
-+ {
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): LIP reset (loop %#x), "
-+ "subcode %x", ha->instance,
-+ le16_to_cpu(iocb24->nport_handle),
-+ iocb24->status_subcode);
-+ } else {
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): LIP reset (I %#x)",
-+ ha->instance, GET_TARGET_ID(ha, iocb2x));
-+ /* set the Clear LIP reset event flag */
-+ add_flags |= NOTIFY_ACK_CLEAR_LIP_RESET;
-+ }
-+ /*
-+ * No additional resets or aborts are needed, because firmware
-+ * will as required by FCP either generate TARGET RESET or
-+ * reject all affected commands with LIP_RESET status.
-+ */
-+ break;
-+ }
-+
-+ case IMM_NTFY_LIP_LINK_REINIT:
-+ {
-+ struct q2t_tgt *tgt = ha->tgt;
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): LINK REINIT (loop %#x, "
-+ "subcode %x)", ha->instance,
-+ le16_to_cpu(iocb24->nport_handle),
-+ iocb24->status_subcode);
-+ if (tgt->link_reinit_iocb_pending)
-+ q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
-+ memcpy(&tgt->link_reinit_iocb, iocb24, sizeof(*iocb24));
-+ tgt->link_reinit_iocb_pending = 1;
-+ /*
-+ * QLogic requires to wait after LINK REINIT for possible
-+ * PDISC or ADISC ELS commands
-+ */
-+ send_notify_ack = 0;
-+ break;
-+ }
-+
-+ case IMM_NTFY_PORT_LOGOUT:
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Port logout (loop "
-+ "%#x, subcode %x)", ha->instance,
-+ le16_to_cpu(iocb24->nport_handle),
-+ iocb24->status_subcode);
-+ } else {
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Port logout (S "
-+ "%08x -> L %#x)", ha->instance,
-+ le16_to_cpu(iocb2x->seq_id),
-+ le16_to_cpu(iocb2x->lun));
-+ }
-+ if (q2t_reset(ha, iocb, Q2T_NEXUS_LOSS_SESS) == 0)
-+ send_notify_ack = 0;
-+ /* The sessions will be cleared in the callback, if needed */
-+ break;
-+
-+ case IMM_NTFY_GLBL_TPRLO:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Global TPRLO (%x)",
-+ ha->instance, status);
-+ if (q2t_reset(ha, iocb, Q2T_NEXUS_LOSS) == 0)
-+ send_notify_ack = 0;
-+ /* The sessions will be cleared in the callback, if needed */
-+ break;
-+
-+ case IMM_NTFY_PORT_CONFIG:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Port config changed (%x)",
-+ ha->instance, status);
-+ break;
-+
-+ case IMM_NTFY_GLBL_LOGO:
-+ PRINT_WARNING("qla2x00t(%ld): Link failure detected",
-+ ha->instance);
-+ /* I_T nexus loss */
-+ if (q2t_reset(ha, iocb, Q2T_NEXUS_LOSS) == 0)
-+ send_notify_ack = 0;
-+ break;
-+
-+ case IMM_NTFY_IOCB_OVERFLOW:
-+ PRINT_ERROR("qla2x00t(%ld): Cannot provide requested "
-+ "capability (IOCB overflowed the immediate notify "
-+ "resource count)", ha->instance);
-+ break;
-+
-+ case IMM_NTFY_ABORT_TASK:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Abort Task (S %08x I %#x -> "
-+ "L %#x)", ha->instance, le16_to_cpu(iocb2x->seq_id),
-+ GET_TARGET_ID(ha, iocb2x), le16_to_cpu(iocb2x->lun));
-+ if (q2t_abort_task(ha, iocb2x) == 0)
-+ send_notify_ack = 0;
-+ break;
-+
-+ case IMM_NTFY_RESOURCE:
-+ PRINT_ERROR("qla2x00t(%ld): Out of resources, host %ld",
-+ ha->instance, ha->host_no);
-+ break;
-+
-+ case IMM_NTFY_MSG_RX:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Immediate notify task %x",
-+ ha->instance, iocb2x->task_flags);
-+ if (q2t_handle_task_mgmt(ha, iocb2x) == 0)
-+ send_notify_ack = 0;
-+ break;
-+
-+ case IMM_NTFY_ELS:
-+ if (q24_handle_els(ha, iocb24) == 0)
-+ send_notify_ack = 0;
-+ break;
-+
-+ case IMM_NTFY_SRR:
-+ q2t_prepare_srr_imm(ha, iocb);
-+ send_notify_ack = 0;
-+ break;
-+
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): Received unknown immediate "
-+ "notify status %x", ha->instance, status);
-+ break;
-+ }
-+
-+ if (send_notify_ack) {
-+ if (IS_FWI2_CAPABLE(ha))
-+ q24_send_notify_ack(ha, iocb24, 0, 0, 0);
-+ else
-+ q2x_send_notify_ack(ha, iocb2x, add_flags, 0, 0, 0,
-+ 0, 0);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q2x_send_busy(scsi_qla_host_t *ha, atio_entry_t *atio)
-+{
-+ ctio_ret_entry_t *ctio;
-+
-+ TRACE_ENTRY();
-+
-+ /* Sending marker isn't necessary, since we called from ISR */
-+
-+ ctio = (ctio_ret_entry_t *)q2t_req_pkt(ha);
-+ if (ctio == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
-+ "request packet", ha->instance, __func__);
-+ goto out;
-+ }
-+
-+ ctio->entry_type = CTIO_RET_TYPE;
-+ ctio->entry_count = 1;
-+ ctio->handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
-+ ctio->scsi_status = __constant_cpu_to_le16(SAM_STAT_BUSY);
-+ ctio->residual = atio->data_length;
-+ if (ctio->residual != 0)
-+ ctio->scsi_status |= SS_RESIDUAL_UNDER;
-+
-+ /* Set IDs */
-+ SET_TARGET_ID(ha, ctio->target, GET_TARGET_ID(ha, atio));
-+ ctio->rx_id = atio->rx_id;
-+
-+ ctio->flags = __constant_cpu_to_le16(OF_SSTS | OF_FAST_POST |
-+ OF_NO_DATA | OF_SS_MODE_1);
-+ ctio->flags |= __constant_cpu_to_le16(OF_INC_RC);
-+ /*
-+ * CTIO from fw w/o scst_cmd doesn't provide enough info to retry it,
-+ * if the explicit conformation is used.
-+ */
-+
-+ TRACE_BUFFER("CTIO BUSY packet data", ctio, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q24_send_busy(scsi_qla_host_t *ha, atio7_entry_t *atio,
-+ uint16_t status)
-+{
-+ ctio7_status1_entry_t *ctio;
-+ struct q2t_sess *sess;
-+ uint16_t loop_id;
-+
-+ TRACE_ENTRY();
-+
-+ /*
-+ * In some cases, for instance for ATIO_EXCHANGE_ADDRESS_UNKNOWN, the
-+ * spec requires to issue queue full SCSI status. So, let's search among
-+ * being deleted sessions as well and use CTIO7_NHANDLE_UNRECOGNIZED,
-+ * if we can't find sess.
-+ */
-+ sess = q2t_find_sess_by_s_id_include_deleted(ha->tgt,
-+ atio->fcp_hdr.s_id);
-+ if (sess != NULL)
-+ loop_id = sess->loop_id;
-+ else
-+ loop_id = CTIO7_NHANDLE_UNRECOGNIZED;
-+
-+ /* Sending marker isn't necessary, since we called from ISR */
-+
-+ ctio = (ctio7_status1_entry_t *)q2t_req_pkt(ha);
-+ if (ctio == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): %s failed: unable to allocate "
-+ "request packet", ha->instance, __func__);
-+ goto out;
-+ }
-+
-+ ctio->common.entry_type = CTIO_TYPE7;
-+ ctio->common.entry_count = 1;
-+ ctio->common.handle = Q2T_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
-+ ctio->common.nport_handle = loop_id;
-+ ctio->common.timeout = __constant_cpu_to_le16(Q2T_TIMEOUT);
-+ ctio->common.vp_index = ha->vp_idx;
-+ ctio->common.initiator_id[0] = atio->fcp_hdr.s_id[2];
-+ ctio->common.initiator_id[1] = atio->fcp_hdr.s_id[1];
-+ ctio->common.initiator_id[2] = atio->fcp_hdr.s_id[0];
-+ ctio->common.exchange_addr = atio->exchange_addr;
-+ ctio->flags = (atio->attr << 9) | __constant_cpu_to_le16(
-+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
-+ CTIO7_FLAGS_DONT_RET_CTIO);
-+ /*
-+ * CTIO from fw w/o scst_cmd doesn't provide enough info to retry it,
-+ * if the explicit conformation is used.
-+ */
-+ ctio->ox_id = swab16(atio->fcp_hdr.ox_id);
-+ ctio->scsi_status = cpu_to_le16(status);
-+ ctio->residual = get_unaligned((uint32_t *)
-+ &atio->fcp_cmnd.add_cdb[atio->fcp_cmnd.add_cdb_len]);
-+ if (ctio->residual != 0)
-+ ctio->scsi_status |= SS_RESIDUAL_UNDER;
-+
-+ TRACE_BUFFER("CTIO7 BUSY packet data", ctio, REQUEST_ENTRY_SIZE);
-+
-+ q2t_exec_queue(ha);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+/* called via callback from qla2xxx */
-+static void q24_atio_pkt(scsi_qla_host_t *ha, atio7_entry_t *atio)
-+{
-+ int rc;
-+ struct q2t_tgt *tgt = ha->tgt;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(tgt == NULL)) {
-+ TRACE_MGMT_DBG("ATIO pkt, but no tgt (ha %p)", ha);
-+ goto out;
-+ }
-+
-+ TRACE(TRACE_SCSI, "qla2x00t(%ld): ATIO pkt %p: type %02x count %02x",
-+ ha->instance, atio, atio->entry_type, atio->entry_count);
-+
-+ /*
-+ * In tgt_stop mode we also should allow all requests to pass.
-+ * Otherwise, some commands can stuck.
-+ */
-+
-+ tgt->irq_cmd_count++;
-+
-+ switch (atio->entry_type) {
-+ case ATIO_TYPE7:
-+ TRACE_DBG("ATIO_TYPE7 instance %ld, lun %Lx, read/write %d/%d, "
-+ "add_cdb_len %d, data_length %04x, s_id %x:%x:%x",
-+ ha->instance, atio->fcp_cmnd.lun, atio->fcp_cmnd.rddata,
-+ atio->fcp_cmnd.wrdata, atio->fcp_cmnd.add_cdb_len,
-+ be32_to_cpu(get_unaligned((uint32_t *)
-+ &atio->fcp_cmnd.add_cdb[atio->fcp_cmnd.add_cdb_len])),
-+ atio->fcp_hdr.s_id[0], atio->fcp_hdr.s_id[1],
-+ atio->fcp_hdr.s_id[2]);
-+ TRACE_BUFFER("Incoming ATIO7 packet data", atio,
-+ REQUEST_ENTRY_SIZE);
-+ PRINT_BUFF_FLAG(TRACE_SCSI, "FCP CDB", atio->fcp_cmnd.cdb,
-+ sizeof(atio->fcp_cmnd.cdb));
-+ if (unlikely(atio->exchange_addr ==
-+ ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
-+ TRACE(TRACE_OUT_OF_MEM, "qla2x00t(%ld): ATIO_TYPE7 "
-+ "received with UNKNOWN exchange address, "
-+ "sending QUEUE_FULL", ha->instance);
-+ q24_send_busy(ha, atio, SAM_STAT_TASK_SET_FULL);
-+ break;
-+ }
-+ if (likely(atio->fcp_cmnd.task_mgmt_flags == 0))
-+ rc = q2t_send_cmd_to_scst(ha, (atio_t *)atio);
-+ else
-+ rc = q2t_handle_task_mgmt(ha, atio);
-+ if (unlikely(rc != 0)) {
-+ if (rc == -ESRCH) {
-+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
-+ q24_send_busy(ha, atio, SAM_STAT_BUSY);
-+#else
-+ q24_send_term_exchange(ha, NULL, atio, 1);
-+#endif
-+ } else {
-+ PRINT_INFO("qla2x00t(%ld): Unable to send "
-+ "command to SCST, sending BUSY status",
-+ ha->instance);
-+ q24_send_busy(ha, atio, SAM_STAT_BUSY);
-+ }
-+ }
-+ break;
-+
-+ case IMMED_NOTIFY_TYPE:
-+ {
-+ notify_entry_t *pkt = (notify_entry_t *)atio;
-+ if (unlikely(pkt->entry_status != 0)) {
-+ PRINT_ERROR("qla2x00t(%ld): Received ATIO packet %x "
-+ "with error status %x", ha->instance,
-+ pkt->entry_type, pkt->entry_status);
-+ break;
-+ }
-+ TRACE_DBG("%s", "IMMED_NOTIFY ATIO");
-+ q2t_handle_imm_notify(ha, pkt);
-+ break;
-+ }
-+
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): Received unknown ATIO atio "
-+ "type %x", ha->instance, atio->entry_type);
-+ break;
-+ }
-+
-+ tgt->irq_cmd_count--;
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* pha->hardware_lock supposed to be held on entry */
-+/* called via callback from qla2xxx */
-+static void q2t_response_pkt(scsi_qla_host_t *ha, response_t *pkt)
-+{
-+ struct q2t_tgt *tgt = ha->tgt;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(tgt == NULL)) {
-+ PRINT_ERROR("qla2x00t(%ld): Response pkt %x received, but no "
-+ "tgt (ha %p)", ha->instance, pkt->entry_type, ha);
-+ goto out;
-+ }
-+
-+ TRACE(TRACE_SCSI, "qla2x00t(%ld): pkt %p: T %02x C %02x S %02x "
-+ "handle %#x", ha->instance, pkt, pkt->entry_type,
-+ pkt->entry_count, pkt->entry_status, pkt->handle);
-+
-+ /*
-+ * In tgt_stop mode we also should allow all requests to pass.
-+ * Otherwise, some commands can stuck.
-+ */
-+
-+ if (unlikely(pkt->entry_status != 0)) {
-+ PRINT_ERROR("qla2x00t(%ld): Received response packet %x "
-+ "with error status %x", ha->instance, pkt->entry_type,
-+ pkt->entry_status);
-+ switch (pkt->entry_type) {
-+ case ACCEPT_TGT_IO_TYPE:
-+ case IMMED_NOTIFY_TYPE:
-+ case ABTS_RECV_24XX:
-+ goto out;
-+ default:
-+ break;
-+ }
-+ }
-+
-+ tgt->irq_cmd_count++;
-+
-+ switch (pkt->entry_type) {
-+ case CTIO_TYPE7:
-+ {
-+ ctio7_fw_entry_t *entry = (ctio7_fw_entry_t *)pkt;
-+ TRACE_DBG("CTIO_TYPE7: instance %ld",
-+ ha->instance);
-+ TRACE_BUFFER("Incoming CTIO7 packet data", entry,
-+ REQUEST_ENTRY_SIZE);
-+ q2t_do_ctio_completion(ha, entry->handle,
-+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
-+ entry);
-+ break;
-+ }
-+
-+ case ACCEPT_TGT_IO_TYPE:
-+ {
-+ atio_entry_t *atio;
-+ int rc;
-+ atio = (atio_entry_t *)pkt;
-+ TRACE_DBG("ACCEPT_TGT_IO instance %ld status %04x "
-+ "lun %04x read/write %d data_length %04x "
-+ "target_id %02x rx_id %04x ",
-+ ha->instance, le16_to_cpu(atio->status),
-+ le16_to_cpu(atio->lun),
-+ atio->execution_codes,
-+ le32_to_cpu(atio->data_length),
-+ GET_TARGET_ID(ha, atio), atio->rx_id);
-+ TRACE_BUFFER("Incoming ATIO packet data", atio,
-+ REQUEST_ENTRY_SIZE);
-+ if (atio->status != __constant_cpu_to_le16(ATIO_CDB_VALID)) {
-+ PRINT_ERROR("qla2x00t(%ld): ATIO with error "
-+ "status %x received", ha->instance,
-+ le16_to_cpu(atio->status));
-+ break;
-+ }
-+ TRACE_BUFFER("Incoming ATIO packet data", atio, REQUEST_ENTRY_SIZE);
-+ PRINT_BUFF_FLAG(TRACE_SCSI, "FCP CDB", atio->cdb,
-+ sizeof(atio->cdb));
-+ rc = q2t_send_cmd_to_scst(ha, (atio_t *)atio);
-+ if (unlikely(rc != 0)) {
-+ if (rc == -ESRCH) {
-+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
-+ q2x_send_busy(ha, atio);
-+#else
-+ q2x_send_term_exchange(ha, NULL, atio, 1);
-+#endif
-+ } else {
-+ PRINT_INFO("qla2x00t(%ld): Unable to send "
-+ "command to SCST, sending BUSY status",
-+ ha->instance);
-+ q2x_send_busy(ha, atio);
-+ }
-+ }
-+ }
-+ break;
-+
-+ case CONTINUE_TGT_IO_TYPE:
-+ {
-+ ctio_common_entry_t *entry = (ctio_common_entry_t *)pkt;
-+ TRACE_DBG("CONTINUE_TGT_IO: instance %ld", ha->instance);
-+ TRACE_BUFFER("Incoming CTIO packet data", entry,
-+ REQUEST_ENTRY_SIZE);
-+ q2t_do_ctio_completion(ha, entry->handle,
-+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
-+ entry);
-+ break;
-+ }
-+
-+ case CTIO_A64_TYPE:
-+ {
-+ ctio_common_entry_t *entry = (ctio_common_entry_t *)pkt;
-+ TRACE_DBG("CTIO_A64: instance %ld", ha->instance);
-+ TRACE_BUFFER("Incoming CTIO_A64 packet data", entry,
-+ REQUEST_ENTRY_SIZE);
-+ q2t_do_ctio_completion(ha, entry->handle,
-+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
-+ entry);
-+ break;
-+ }
-+
-+ case IMMED_NOTIFY_TYPE:
-+ TRACE_DBG("%s", "IMMED_NOTIFY");
-+ q2t_handle_imm_notify(ha, (notify_entry_t *)pkt);
-+ break;
-+
-+ case NOTIFY_ACK_TYPE:
-+ if (tgt->notify_ack_expected > 0) {
-+ nack_entry_t *entry = (nack_entry_t *)pkt;
-+ TRACE_DBG("NOTIFY_ACK seq %08x status %x",
-+ le16_to_cpu(entry->seq_id),
-+ le16_to_cpu(entry->status));
-+ TRACE_BUFFER("Incoming NOTIFY_ACK packet data", pkt,
-+ RESPONSE_ENTRY_SIZE);
-+ tgt->notify_ack_expected--;
-+ if (entry->status != __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
-+ PRINT_ERROR("qla2x00t(%ld): NOTIFY_ACK "
-+ "failed %x", ha->instance,
-+ le16_to_cpu(entry->status));
-+ }
-+ } else {
-+ PRINT_ERROR("qla2x00t(%ld): Unexpected NOTIFY_ACK "
-+ "received", ha->instance);
-+ }
-+ break;
-+
-+ case ABTS_RECV_24XX:
-+ TRACE_DBG("ABTS_RECV_24XX: instance %ld", ha->instance);
-+ TRACE_BUFF_FLAG(TRACE_BUFF, "Incoming ABTS_RECV "
-+ "packet data", pkt, REQUEST_ENTRY_SIZE);
-+ q24_handle_abts(ha, (abts24_recv_entry_t *)pkt);
-+ break;
-+
-+ case ABTS_RESP_24XX:
-+ if (tgt->abts_resp_expected > 0) {
-+ abts24_resp_fw_entry_t *entry =
-+ (abts24_resp_fw_entry_t *)pkt;
-+ TRACE_DBG("ABTS_RESP_24XX: compl_status %x",
-+ entry->compl_status);
-+ TRACE_BUFF_FLAG(TRACE_BUFF, "Incoming ABTS_RESP "
-+ "packet data", pkt, REQUEST_ENTRY_SIZE);
-+ tgt->abts_resp_expected--;
-+ if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
-+ if ((entry->error_subcode1 == 0x1E) &&
-+ (entry->error_subcode2 == 0)) {
-+ /*
-+ * We've got a race here: aborted exchange not
-+ * terminated, i.e. response for the aborted
-+ * command was sent between the abort request
-+ * was received and processed. Unfortunately,
-+ * the firmware has a silly requirement that
-+ * all aborted exchanges must be explicitely
-+ * terminated, otherwise it refuses to send
-+ * responses for the abort requests. So, we
-+ * have to (re)terminate the exchange and
-+ * retry the abort response.
-+ */
-+ q24_retry_term_exchange(ha, entry);
-+ } else
-+ PRINT_ERROR("qla2x00t(%ld): ABTS_RESP_24XX "
-+ "failed %x (subcode %x:%x)", ha->instance,
-+ entry->compl_status, entry->error_subcode1,
-+ entry->error_subcode2);
-+ }
-+ } else {
-+ PRINT_ERROR("qla2x00t(%ld): Unexpected ABTS_RESP_24XX "
-+ "received", ha->instance);
-+ }
-+ break;
-+
-+ case MODIFY_LUN_TYPE:
-+ if (tgt->modify_lun_expected > 0) {
-+ modify_lun_entry_t *entry = (modify_lun_entry_t *)pkt;
-+ TRACE_DBG("MODIFY_LUN %x, imm %c%d, cmd %c%d",
-+ entry->status,
-+ (entry->operators & MODIFY_LUN_IMM_ADD) ? '+'
-+ : (entry->operators & MODIFY_LUN_IMM_SUB) ? '-'
-+ : ' ',
-+ entry->immed_notify_count,
-+ (entry->operators & MODIFY_LUN_CMD_ADD) ? '+'
-+ : (entry->operators & MODIFY_LUN_CMD_SUB) ? '-'
-+ : ' ',
-+ entry->command_count);
-+ tgt->modify_lun_expected--;
-+ if (entry->status != MODIFY_LUN_SUCCESS) {
-+ PRINT_ERROR("qla2x00t(%ld): MODIFY_LUN "
-+ "failed %x", ha->instance,
-+ entry->status);
-+ }
-+ } else {
-+ PRINT_ERROR("qla2x00t(%ld): Unexpected MODIFY_LUN "
-+ "received", (ha != NULL) ? (long)ha->instance : -1);
-+ }
-+ break;
-+
-+ case ENABLE_LUN_TYPE:
-+ {
-+ elun_entry_t *entry = (elun_entry_t *)pkt;
-+ TRACE_DBG("ENABLE_LUN %x imm %u cmd %u ",
-+ entry->status, entry->immed_notify_count,
-+ entry->command_count);
-+ if (entry->status == ENABLE_LUN_ALREADY_ENABLED) {
-+ TRACE_DBG("LUN is already enabled: %#x",
-+ entry->status);
-+ entry->status = ENABLE_LUN_SUCCESS;
-+ } else if (entry->status == ENABLE_LUN_RC_NONZERO) {
-+ TRACE_DBG("ENABLE_LUN succeeded, but with "
-+ "error: %#x", entry->status);
-+ entry->status = ENABLE_LUN_SUCCESS;
-+ } else if (entry->status != ENABLE_LUN_SUCCESS) {
-+ PRINT_ERROR("qla2x00t(%ld): ENABLE_LUN "
-+ "failed %x", ha->instance, entry->status);
-+ qla_clear_tgt_mode(ha);
-+ } /* else success */
-+ break;
-+ }
-+
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): Received unknown response pkt "
-+ "type %x", ha->instance, pkt->entry_type);
-+ break;
-+ }
-+
-+ tgt->irq_cmd_count--;
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * pha->hardware_lock supposed to be held on entry. Might drop it, then reacquire
-+ */
-+static void q2t_async_event(uint16_t code, scsi_qla_host_t *ha,
-+ uint16_t *mailbox)
-+{
-+ struct q2t_tgt *tgt = ha->tgt;
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(tgt == NULL)) {
-+ TRACE_DBG("ASYNC EVENT %#x, but no tgt (ha %p)", code, ha);
-+ goto out;
-+ }
-+
-+ /*
-+ * In tgt_stop mode we also should allow all requests to pass.
-+ * Otherwise, some commands can stuck.
-+ */
-+
-+ tgt->irq_cmd_count++;
-+
-+ switch (code) {
-+ case MBA_RESET: /* Reset */
-+ case MBA_SYSTEM_ERR: /* System Error */
-+ case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
-+ case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
-+ case MBA_ATIO_TRANSFER_ERR: /* ATIO Queue Transfer Error */
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): System error async event %#x "
-+ "occured", ha->instance, code);
-+ break;
-+
-+ case MBA_LOOP_UP:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Loop up occured",
-+ ha->instance);
-+ if (tgt->link_reinit_iocb_pending) {
-+ q24_send_notify_ack(ha, &tgt->link_reinit_iocb, 0, 0, 0);
-+ tgt->link_reinit_iocb_pending = 0;
-+ }
-+ break;
-+
-+ case MBA_LIP_OCCURRED:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): LIP occured", ha->instance);
-+ break;
-+
-+ case MBA_LOOP_DOWN:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Loop down occured",
-+ ha->instance);
-+ break;
-+
-+ case MBA_LIP_RESET:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): LIP reset occured",
-+ ha->instance);
-+ break;
-+
-+ case MBA_PORT_UPDATE:
-+ case MBA_RSCN_UPDATE:
-+ TRACE_MGMT_DBG("qla2x00t(%ld): Port update async event %#x "
-+ "occured", ha->instance, code);
-+ /* .mark_all_devices_lost() is handled by the initiator driver */
-+ break;
-+
-+ default:
-+ TRACE(TRACE_MGMT, "qla2x00t(%ld): Async event %#x occured: "
-+ "ignoring (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)",
-+ ha->instance, code,
-+ le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
-+ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
-+ break;
-+ }
-+
-+ tgt->irq_cmd_count--;
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int q2t_get_target_name(uint8_t *wwn, char **ppwwn_name)
-+{
-+ const int wwn_len = 3*WWN_SIZE+2;
-+ int res = 0;
-+ char *name;
-+
-+ name = kmalloc(wwn_len, GFP_KERNEL);
-+ if (name == NULL) {
-+ PRINT_ERROR("qla2x00t: Allocation of tgt wwn name (size %d) "
-+ "failed", wwn_len);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ sprintf(name, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-+ wwn[0], wwn[1], wwn[2], wwn[3],
-+ wwn[4], wwn[5], wwn[6], wwn[7]);
-+
-+ *ppwwn_name = name;
-+
-+out:
-+ return res;
-+}
-+
-+/* Must be called under tgt_mutex */
-+static struct q2t_sess *q2t_make_local_sess(scsi_qla_host_t *ha,
-+ const uint8_t *s_id, uint16_t loop_id)
-+{
-+ struct q2t_sess *sess = NULL;
-+ fc_port_t *fcport = NULL;
-+ int rc, global_resets;
-+
-+ TRACE_ENTRY();
-+
-+retry:
-+ global_resets = atomic_read(&ha->tgt->tgt_global_resets_count);
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ BUG_ON(s_id == NULL);
-+
-+ rc = q24_get_loop_id(ha, s_id, &loop_id);
-+ if (rc != 0) {
-+ if ((s_id[0] == 0xFF) &&
-+ (s_id[1] == 0xFC)) {
-+ /*
-+ * This is Domain Controller, so it should be
-+ * OK to drop SCSI commands from it.
-+ */
-+ TRACE_MGMT_DBG("Unable to find initiator with "
-+ "S_ID %x:%x:%x", s_id[0], s_id[1],
-+ s_id[2]);
-+ } else
-+ PRINT_ERROR("qla2x00t(%ld): Unable to find "
-+ "initiator with S_ID %x:%x:%x",
-+ ha->instance, s_id[0], s_id[1],
-+ s_id[2]);
-+ goto out;
-+ }
-+ }
-+
-+ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
-+ if (fcport == NULL) {
-+ PRINT_ERROR("qla2x00t(%ld): Allocation of tmp FC port failed",
-+ ha->instance);
-+ goto out;
-+ }
-+
-+ TRACE_MGMT_DBG("loop_id %d", loop_id);
-+
-+ fcport->loop_id = loop_id;
-+
-+ rc = qla2x00_get_port_database(ha, fcport, 0);
-+ if (rc != QLA_SUCCESS) {
-+ PRINT_ERROR("qla2x00t(%ld): Failed to retrieve fcport "
-+ "information -- get_port_database() returned %x "
-+ "(loop_id=0x%04x)", ha->instance, rc, loop_id);
-+ goto out_free_fcport;
-+ }
-+
-+ if (global_resets != atomic_read(&ha->tgt->tgt_global_resets_count)) {
-+ TRACE_MGMT_DBG("qla2x00t(%ld): global reset during session "
-+ "discovery (counter was %d, new %d), retrying",
-+ ha->instance, global_resets,
-+ atomic_read(&ha->tgt->tgt_global_resets_count));
-+ kfree(fcport);
-+ fcport = NULL;
-+ goto retry;
-+ }
-+
-+ sess = q2t_create_sess(ha, fcport, true);
-+
-+out_free_fcport:
-+ kfree(fcport);
-+
-+out:
-+ TRACE_EXIT_HRES((unsigned long)sess);
-+ return sess;
-+}
-+
-+static void q2t_exec_sess_work(struct q2t_tgt *tgt,
-+ struct q2t_sess_work_param *prm)
-+{
-+ scsi_qla_host_t *ha = tgt->ha;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+ int rc;
-+ struct q2t_sess *sess = NULL;
-+ uint8_t *s_id = NULL; /* to hide compiler warnings */
-+ uint8_t local_s_id[3];
-+ int loop_id = -1; /* to hide compiler warnings */
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("prm %p", prm);
-+
-+ mutex_lock(&ha->tgt_mutex);
-+ spin_lock_irq(&pha->hardware_lock);
-+
-+ if (tgt->tgt_stop)
-+ goto send;
-+
-+ switch (prm->type) {
-+ case Q2T_SESS_WORK_CMD:
-+ {
-+ struct q2t_cmd *cmd = prm->cmd;
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ atio7_entry_t *a = (atio7_entry_t *)&cmd->atio;
-+ s_id = a->fcp_hdr.s_id;
-+ } else
-+ loop_id = GET_TARGET_ID(ha, (atio_entry_t *)&cmd->atio);
-+ break;
-+ }
-+ case Q2T_SESS_WORK_ABORT:
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ sess = q2t_find_sess_by_s_id_le(tgt,
-+ prm->abts.fcp_hdr_le.s_id);
-+ if (sess == NULL) {
-+ s_id = local_s_id;
-+ s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
-+ s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
-+ s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
-+ }
-+ goto after_find;
-+ } else
-+ loop_id = GET_TARGET_ID(ha, &prm->tm_iocb);
-+ break;
-+ case Q2T_SESS_WORK_TM:
-+ if (IS_FWI2_CAPABLE(ha))
-+ s_id = prm->tm_iocb2.fcp_hdr.s_id;
-+ else
-+ loop_id = GET_TARGET_ID(ha, &prm->tm_iocb);
-+ break;
-+ default:
-+ BUG_ON(1);
-+ break;
-+ }
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ BUG_ON(s_id == NULL);
-+ sess = q2t_find_sess_by_s_id(tgt, s_id);
-+ } else
-+ sess = q2t_find_sess_by_loop_id(tgt, loop_id);
-+
-+after_find:
-+ if (sess != NULL) {
-+ TRACE_MGMT_DBG("sess %p found", sess);
-+ q2t_sess_get(sess);
-+ } else {
-+ /*
-+ * We are under tgt_mutex, so a new sess can't be added
-+ * behind us.
-+ */
-+ spin_unlock_irq(&pha->hardware_lock);
-+ sess = q2t_make_local_sess(ha, s_id, loop_id);
-+ spin_lock_irq(&pha->hardware_lock);
-+ /* sess has got an extra creation ref */
-+ }
-+
-+send:
-+ if ((sess == NULL) || tgt->tgt_stop)
-+ goto out_term;
-+
-+ switch (prm->type) {
-+ case Q2T_SESS_WORK_CMD:
-+ {
-+ struct q2t_cmd *cmd = prm->cmd;
-+ if (tgt->tm_to_unknown) {
-+ /*
-+ * Cmd might be already aborted behind us, so be safe
-+ * and abort it. It should be OK, initiator will retry
-+ * it.
-+ */
-+ goto out_term;
-+ }
-+ TRACE_MGMT_DBG("Sending work cmd %p to SCST", cmd);
-+ rc = q2t_do_send_cmd_to_scst(ha, cmd, sess);
-+ break;
-+ }
-+ case Q2T_SESS_WORK_ABORT:
-+ if (IS_FWI2_CAPABLE(ha))
-+ rc = __q24_handle_abts(ha, &prm->abts, sess);
-+ else
-+ rc = __q2t_abort_task(ha, &prm->tm_iocb, sess);
-+ break;
-+ case Q2T_SESS_WORK_TM:
-+ {
-+ uint8_t *lun;
-+ uint16_t lun_data;
-+ int lun_size, fn;
-+ void *iocb;
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ atio7_entry_t *a = &prm->tm_iocb2;
-+ iocb = a;
-+ lun = (uint8_t *)&a->fcp_cmnd.lun;
-+ lun_size = sizeof(a->fcp_cmnd.lun);
-+ fn = a->fcp_cmnd.task_mgmt_flags;
-+ } else {
-+ notify_entry_t *n = &prm->tm_iocb;
-+ iocb = n;
-+ /* make it be in network byte order */
-+ lun_data = swab16(le16_to_cpu(n->lun));
-+ lun = (uint8_t *)&lun_data;
-+ lun_size = sizeof(lun_data);
-+ fn = n->task_flags >> IMM_NTFY_TASK_MGMT_SHIFT;
-+ }
-+ rc = q2t_issue_task_mgmt(sess, lun, lun_size, fn, iocb, 0);
-+ break;
-+ }
-+ default:
-+ BUG_ON(1);
-+ break;
-+ }
-+
-+ if (rc != 0)
-+ goto out_term;
-+
-+out_put:
-+ if (sess != NULL)
-+ q2t_sess_put(sess);
-+
-+ spin_unlock_irq(&pha->hardware_lock);
-+ mutex_unlock(&ha->tgt_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+
-+out_term:
-+ switch (prm->type) {
-+ case Q2T_SESS_WORK_CMD:
-+ {
-+ struct q2t_cmd *cmd = prm->cmd;
-+ TRACE_MGMT_DBG("Terminating work cmd %p", cmd);
-+ /*
-+ * cmd has not sent to SCST yet, so pass NULL as the second
-+ * argument
-+ */
-+ if (IS_FWI2_CAPABLE(ha))
-+ q24_send_term_exchange(ha, NULL, &cmd->atio.atio7, 1);
-+ else
-+ q2x_send_term_exchange(ha, NULL, &cmd->atio.atio2x, 1);
-+ q2t_free_cmd(cmd);
-+ break;
-+ }
-+ case Q2T_SESS_WORK_ABORT:
-+ if (IS_FWI2_CAPABLE(ha))
-+ q24_send_abts_resp(ha, &prm->abts,
-+ SCST_MGMT_STATUS_REJECTED, false);
-+ else
-+ q2x_send_notify_ack(ha, &prm->tm_iocb, 0,
-+ 0, 0, 0, 0, 0);
-+ break;
-+ case Q2T_SESS_WORK_TM:
-+ if (IS_FWI2_CAPABLE(ha))
-+ q24_send_term_exchange(ha, NULL, &prm->tm_iocb2, 1);
-+ else
-+ q2x_send_notify_ack(ha, &prm->tm_iocb, 0,
-+ 0, 0, 0, 0, 0);
-+ break;
-+ default:
-+ BUG_ON(1);
-+ break;
-+ }
-+ goto out_put;
-+}
-+
-+static void q2t_sess_work_fn(struct work_struct *work)
-+{
-+ struct q2t_tgt *tgt = container_of(work, struct q2t_tgt, sess_work);
-+ scsi_qla_host_t *pha = to_qla_parent(tgt->ha);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Sess work (tgt %p)", tgt);
-+
-+ spin_lock_irq(&tgt->sess_work_lock);
-+ while (!list_empty(&tgt->sess_works_list)) {
-+ struct q2t_sess_work_param *prm = list_entry(
-+ tgt->sess_works_list.next, typeof(*prm),
-+ sess_works_list_entry);
-+
-+ /*
-+ * This work can be scheduled on several CPUs at time, so we
-+ * must delete the entry to eliminate double processing
-+ */
-+ list_del(&prm->sess_works_list_entry);
-+
-+ spin_unlock_irq(&tgt->sess_work_lock);
-+
-+ q2t_exec_sess_work(tgt, prm);
-+
-+ spin_lock_irq(&tgt->sess_work_lock);
-+
-+ kfree(prm);
-+ }
-+ spin_unlock_irq(&tgt->sess_work_lock);
-+
-+ spin_lock_irq(&pha->hardware_lock);
-+ spin_lock(&tgt->sess_work_lock);
-+ if (list_empty(&tgt->sess_works_list)) {
-+ tgt->sess_works_pending = 0;
-+ tgt->tm_to_unknown = 0;
-+ }
-+ spin_unlock(&tgt->sess_work_lock);
-+ spin_unlock_irq(&pha->hardware_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* pha->hardware_lock supposed to be held and IRQs off */
-+static void q2t_cleanup_hw_pending_cmd(scsi_qla_host_t *ha, struct q2t_cmd *cmd)
-+{
-+ uint32_t h;
-+
-+ for (h = 0; h < MAX_OUTSTANDING_COMMANDS; h++) {
-+ if (ha->cmds[h] == cmd) {
-+ TRACE_DBG("Clearing handle %d for cmd %p", h, cmd);
-+ ha->cmds[h] = NULL;
-+ break;
-+ }
-+ }
-+ return;
-+}
-+
-+static void q2t_on_hw_pending_cmd_timeout(struct scst_cmd *scst_cmd)
-+{
-+ struct q2t_cmd *cmd = (struct q2t_cmd *)scst_cmd_get_tgt_priv(scst_cmd);
-+ struct q2t_tgt *tgt = cmd->tgt;
-+ scsi_qla_host_t *ha = tgt->ha;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+ unsigned long flags;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Cmd %p HW pending for too long (state %x)", cmd,
-+ cmd->state);
-+
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+
-+ if (cmd->sg_mapped)
-+ q2t_unmap_sg(ha, cmd);
-+
-+ if (cmd->state == Q2T_STATE_PROCESSED) {
-+ TRACE_MGMT_DBG("Force finishing cmd %p", cmd);
-+ } else if (cmd->state == Q2T_STATE_NEED_DATA) {
-+ TRACE_MGMT_DBG("Force rx_data cmd %p", cmd);
-+
-+ q2t_cleanup_hw_pending_cmd(ha, cmd);
-+
-+ scst_rx_data(scst_cmd, SCST_RX_STATUS_ERROR_FATAL,
-+ SCST_CONTEXT_THREAD);
-+ goto out_unlock;
-+ } else if (cmd->state == Q2T_STATE_ABORTED) {
-+ TRACE_MGMT_DBG("Force finishing aborted cmd %p (tag %d)",
-+ cmd, cmd->tag);
-+ } else {
-+ PRINT_ERROR("qla2x00t(%ld): A command in state (%d) should "
-+ "not be HW pending", ha->instance, cmd->state);
-+ goto out_unlock;
-+ }
-+
-+ q2t_cleanup_hw_pending_cmd(ha, cmd);
-+
-+ scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_FAILED);
-+ scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_THREAD);
-+
-+out_unlock:
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/* Must be called under tgt_host_action_mutex */
-+static int q2t_add_target(scsi_qla_host_t *ha)
-+{
-+ int res;
-+ int rc;
-+ char *wwn;
-+ int sg_tablesize;
-+ struct q2t_tgt *tgt;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Registering target for host %ld(%p)", ha->host_no, ha);
-+
-+ BUG_ON((ha->q2t_tgt != NULL) || (ha->tgt != NULL));
-+
-+ tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
-+ if (tgt == NULL) {
-+ PRINT_ERROR("qla2x00t: %s", "Allocation of tgt failed");
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ tgt->ha = ha;
-+ init_waitqueue_head(&tgt->waitQ);
-+ INIT_LIST_HEAD(&tgt->sess_list);
-+ INIT_LIST_HEAD(&tgt->del_sess_list);
-+ INIT_DELAYED_WORK(&tgt->sess_del_work,
-+ (void (*)(struct work_struct *))q2t_del_sess_work_fn);
-+ spin_lock_init(&tgt->sess_work_lock);
-+ INIT_WORK(&tgt->sess_work, q2t_sess_work_fn);
-+ INIT_LIST_HEAD(&tgt->sess_works_list);
-+ spin_lock_init(&tgt->srr_lock);
-+ INIT_LIST_HEAD(&tgt->srr_ctio_list);
-+ INIT_LIST_HEAD(&tgt->srr_imm_list);
-+ INIT_WORK(&tgt->srr_work, q2t_handle_srr_work);
-+ atomic_set(&tgt->tgt_global_resets_count, 0);
-+
-+ ha->q2t_tgt = tgt;
-+
-+ res = q2t_get_target_name(ha->port_name, &wwn);
-+ if (res != 0)
-+ goto out_free;
-+
-+ tgt->scst_tgt = scst_register_target(&tgt2x_template, wwn);
-+
-+ kfree(wwn);
-+
-+ if (!tgt->scst_tgt) {
-+ PRINT_ERROR("qla2x00t(%ld): scst_register_target() "
-+ "failed for host %ld(%p)", ha->instance,
-+ ha->host_no, ha);
-+ res = -ENOMEM;
-+ goto out_free;
-+ }
-+
-+ if (IS_FWI2_CAPABLE(ha)) {
-+ PRINT_INFO("qla2x00t(%ld): using 64 Bit PCI "
-+ "addressing", ha->instance);
-+ tgt->tgt_enable_64bit_addr = 1;
-+ /* 3 is reserved */
-+ sg_tablesize =
-+ QLA_MAX_SG_24XX(ha->request_q_length - 3);
-+ tgt->datasegs_per_cmd = DATASEGS_PER_COMMAND_24XX;
-+ tgt->datasegs_per_cont = DATASEGS_PER_CONT_24XX;
-+ } else {
-+ if (ha->flags.enable_64bit_addressing) {
-+ PRINT_INFO("qla2x00t(%ld): 64 Bit PCI "
-+ "addressing enabled", ha->instance);
-+ tgt->tgt_enable_64bit_addr = 1;
-+ /* 3 is reserved */
-+ sg_tablesize =
-+ QLA_MAX_SG64(ha->request_q_length - 3);
-+ tgt->datasegs_per_cmd = DATASEGS_PER_COMMAND64;
-+ tgt->datasegs_per_cont = DATASEGS_PER_CONT64;
-+ } else {
-+ PRINT_INFO("qla2x00t(%ld): Using 32 Bit "
-+ "PCI addressing", ha->instance);
-+ sg_tablesize =
-+ QLA_MAX_SG32(ha->request_q_length - 3);
-+ tgt->datasegs_per_cmd = DATASEGS_PER_COMMAND32;
-+ tgt->datasegs_per_cont = DATASEGS_PER_CONT32;
-+ }
-+ }
-+
-+ rc = sysfs_create_link(scst_sysfs_get_tgt_kobj(tgt->scst_tgt),
-+ &ha->host->shost_dev.kobj, "host");
-+ if (rc != 0)
-+ PRINT_ERROR("qla2x00t(%ld): Unable to create \"host\" link for "
-+ "target %s", ha->instance,
-+ scst_get_tgt_name(tgt->scst_tgt));
-+ if (!ha->parent) {
-+ rc = sysfs_create_file(scst_sysfs_get_tgt_kobj(tgt->scst_tgt),
-+ &q2t_hw_target_attr.attr);
-+ if (rc != 0)
-+ PRINT_ERROR("qla2x00t(%ld): Unable to create "
-+ "\"hw_target\" file for target %s",
-+ ha->instance, scst_get_tgt_name(tgt->scst_tgt));
-+
-+ rc = sysfs_create_file(scst_sysfs_get_tgt_kobj(tgt->scst_tgt),
-+ &q2t_hw_node_name_attr.attr);
-+ if (rc != 0)
-+ PRINT_ERROR("qla2x00t(%ld): Unable to create "
-+ "\"node_name\" file for HW target %s",
-+ ha->instance, scst_get_tgt_name(tgt->scst_tgt));
-+ } else {
-+ rc = sysfs_create_file(scst_sysfs_get_tgt_kobj(tgt->scst_tgt),
-+ &q2t_vp_node_name_attr.attr);
-+ if (rc != 0)
-+ PRINT_ERROR("qla2x00t(%ld): Unable to create "
-+ "\"node_name\" file for NPIV target %s",
-+ ha->instance, scst_get_tgt_name(tgt->scst_tgt));
-+
-+ rc = sysfs_create_file(scst_sysfs_get_tgt_kobj(tgt->scst_tgt),
-+ &q2t_vp_parent_host_attr.attr);
-+ if (rc != 0)
-+ PRINT_ERROR("qla2x00t(%ld): Unable to create "
-+ "\"parent_host\" file for NPIV target %s",
-+ ha->instance, scst_get_tgt_name(tgt->scst_tgt));
-+ }
-+
-+ scst_tgt_set_sg_tablesize(tgt->scst_tgt, sg_tablesize);
-+ scst_tgt_set_tgt_priv(tgt->scst_tgt, tgt);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ ha->q2t_tgt = NULL;
-+ kfree(tgt);
-+ goto out;
-+}
-+
-+/* Must be called under tgt_host_action_mutex */
-+static int q2t_remove_target(scsi_qla_host_t *ha)
-+{
-+ TRACE_ENTRY();
-+
-+ if ((ha->q2t_tgt == NULL) || (ha->tgt != NULL)) {
-+ PRINT_ERROR("qla2x00t(%ld): Can't remove "
-+ "existing target", ha->instance);
-+ }
-+
-+ TRACE_DBG("Unregistering target for host %ld(%p)", ha->host_no, ha);
-+ scst_unregister_target(ha->q2t_tgt->scst_tgt);
-+ /*
-+ * Free of tgt happens via callback q2t_target_release
-+ * called from scst_unregister_target, so we shouldn't touch
-+ * it again.
-+ */
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static int q2t_host_action(scsi_qla_host_t *ha,
-+ qla2x_tgt_host_action_t action)
-+{
-+ int res = 0;
-+ scsi_qla_host_t *pha = to_qla_parent(ha);
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(ha == NULL);
-+
-+ /* To sync with q2t_exit() */
-+ if (down_read_trylock(&q2t_unreg_rwsem) == 0)
-+ goto out;
-+
-+ mutex_lock(&ha->tgt_host_action_mutex);
-+
-+ switch (action) {
-+ case ADD_TARGET:
-+ res = q2t_add_target(ha);
-+ break;
-+ case REMOVE_TARGET:
-+ res = q2t_remove_target(ha);
-+ break;
-+ case ENABLE_TARGET_MODE:
-+ {
-+ fc_port_t *fcport;
-+
-+ if (qla_tgt_mode_enabled(ha)) {
-+ PRINT_INFO("qla2x00t(%ld): Target mode already "
-+ "enabled", ha->instance);
-+ break;
-+ }
-+
-+ if ((ha->q2t_tgt == NULL) || (ha->tgt != NULL)) {
-+ PRINT_ERROR("qla2x00t(%ld): Can't enable target mode "
-+ "for not existing target", ha->instance);
-+ break;
-+ }
-+
-+ PRINT_INFO("qla2x00t(%ld): Enabling target mode",
-+ ha->instance);
-+
-+ spin_lock_irq(&pha->hardware_lock);
-+ ha->tgt = ha->q2t_tgt;
-+ ha->tgt->tgt_stop = 0;
-+ spin_unlock_irq(&pha->hardware_lock);
-+ list_for_each_entry_rcu(fcport, &ha->fcports, list) {
-+ q2t_fc_port_added(ha, fcport);
-+ }
-+ TRACE_DBG("Enable tgt mode for host %ld(%ld,%p)",
-+ ha->host_no, ha->instance, ha);
-+ qla2x00_enable_tgt_mode(ha);
-+ break;
-+ }
-+
-+ case DISABLE_TARGET_MODE:
-+ if (!qla_tgt_mode_enabled(ha)) {
-+ PRINT_INFO("qla2x00t(%ld): Target mode already "
-+ "disabled", ha->instance);
-+ break;
-+ }
-+
-+ PRINT_INFO("qla2x00t(%ld): Disabling target mode",
-+ ha->instance);
-+
-+ BUG_ON(ha->tgt == NULL);
-+
-+ q2t_target_stop(ha->tgt->scst_tgt);
-+ break;
-+
-+ default:
-+ PRINT_ERROR("qla2x00t(%ld): %s: unsupported action %d",
-+ ha->instance, __func__, action);
-+ res = -EINVAL;
-+ break;
-+ }
-+
-+ mutex_unlock(&ha->tgt_host_action_mutex);
-+
-+ up_read(&q2t_unreg_rwsem);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int q2t_enable_tgt(struct scst_tgt *scst_tgt, bool enable)
-+{
-+ struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+ scsi_qla_host_t *ha = tgt->ha;
-+ int res;
-+
-+ if (enable)
-+ res = q2t_host_action(ha, ENABLE_TARGET_MODE);
-+ else
-+ res = q2t_host_action(ha, DISABLE_TARGET_MODE);
-+
-+ return res;
-+}
-+
-+static bool q2t_is_tgt_enabled(struct scst_tgt *scst_tgt)
-+{
-+ struct q2t_tgt *tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+ scsi_qla_host_t *ha = tgt->ha;
-+
-+ return qla_tgt_mode_enabled(ha);
-+}
-+
-+static int q2t_parse_wwn(const char *ns, u64 *nm)
-+{
-+ unsigned int i, j;
-+ u8 wwn[8];
-+
-+ /* validate we have enough characters for WWPN */
-+ if (strnlen(ns, 23) != 23)
-+ return -EINVAL;
-+
-+ memset(wwn, 0, sizeof(wwn));
-+
-+ /* Validate and store the new name */
-+ for (i = 0, j = 0; i < 16; i++) {
-+ if ((*ns >= 'a') && (*ns <= 'f'))
-+ j = ((j << 4) | ((*ns++ - 'a') + 10));
-+ else if ((*ns >= 'A') && (*ns <= 'F'))
-+ j = ((j << 4) | ((*ns++ - 'A') + 10));
-+ else if ((*ns >= '0') && (*ns <= '9'))
-+ j = ((j << 4) | (*ns++ - '0'));
-+ else
-+ return -EINVAL;
-+ if (i % 2) {
-+ wwn[i/2] = j & 0xff;
-+ j = 0;
-+ if ((i < 15) && (':' != *ns++))
-+ return -EINVAL;
-+ }
-+ }
-+
-+ *nm = wwn_to_u64(wwn);
-+
-+ return 0;
-+}
-+
-+static ssize_t q2t_add_vtarget(const char *target_name, char *params)
-+{
-+ int res;
-+ char *param, *p, *pp;
-+ u64 port_name, node_name, *pnode_name = NULL;
-+ u64 parent_host, *pparent_host = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ res = q2t_parse_wwn(target_name, &port_name);
-+ if (res) {
-+ PRINT_ERROR("qla2x00t: Syntax error at target name %s",
-+ target_name);
-+ goto out;
-+ }
-+
-+ while (1) {
-+ param = scst_get_next_token_str(&params);
-+ if (param == NULL)
-+ break;
-+
-+ p = scst_get_next_lexem(&param);
-+ if (*p == '\0') {
-+ PRINT_ERROR("qla2x00t: Syntax error at %s (target %s)",
-+ param, target_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ pp = scst_get_next_lexem(&param);
-+ if (*pp == '\0') {
-+ PRINT_ERROR("qla2x00t: Parameter %s value missed for "
-+ "target %s", p, target_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (scst_get_next_lexem(&param)[0] != '\0') {
-+ PRINT_ERROR("qla2x00t: Too many parameter's %s values "
-+ "(target %s)", p, target_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (!strcasecmp("node_name", p)) {
-+ res = q2t_parse_wwn(pp, &node_name);
-+ if (res) {
-+ PRINT_ERROR("qla2x00t: Illegal node_name %s "
-+ "(target %s)", pp, target_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+ pnode_name = &node_name;
-+ continue;
-+ }
-+
-+ if (!strcasecmp("parent_host", p)) {
-+ res = q2t_parse_wwn(pp, &parent_host);
-+ if (res != 0) {
-+ PRINT_ERROR("qla2x00t: Illegal parent_host %s"
-+ " (target %s)", pp, target_name);
-+ goto out;
-+ }
-+ pparent_host = &parent_host;
-+ continue;
-+ }
-+
-+ PRINT_ERROR("qla2x00t: Unknown parameter %s (target %s)", p,
-+ target_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (!pnode_name) {
-+ PRINT_ERROR("qla2x00t: Missing parameter node_name (target %s)",
-+ target_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (!pparent_host) {
-+ PRINT_ERROR("qla2x00t: Missing parameter parent_host "
-+ "(target %s)", target_name);
-+ res = -EINVAL;
-+ goto out;
-+ }
-+
-+ res = qla2xxx_add_vtarget(&port_name, pnode_name, pparent_host);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t q2t_del_vtarget(const char *target_name)
-+{
-+ int res;
-+ u64 port_name;
-+
-+ TRACE_ENTRY();
-+
-+ res = q2t_parse_wwn(target_name, &port_name);
-+ if (res) {
-+ PRINT_ERROR("qla2x00t: Syntax error at target name %s",
-+ target_name);
-+ goto out;
-+ }
-+
-+ res = qla2xxx_del_vtarget(&port_name);
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int q2t_get_initiator_port_transport_id(struct scst_tgt *tgt,
-+ struct scst_session *scst_sess, uint8_t **transport_id)
-+{
-+ struct q2t_sess *sess;
-+ int res = 0;
-+ int tr_id_size;
-+ uint8_t *tr_id;
-+
-+ TRACE_ENTRY();
-+
-+ if (scst_sess == NULL) {
-+ res = SCSI_TRANSPORTID_PROTOCOLID_FCP2;
-+ goto out;
-+ }
-+
-+ sess = (struct q2t_sess *)scst_sess_get_tgt_priv(scst_sess);
-+
-+ tr_id_size = 24;
-+
-+ tr_id = kzalloc(tr_id_size, GFP_KERNEL);
-+ if (tr_id == NULL) {
-+ PRINT_ERROR("qla2x00t: Allocation of TransportID (size %d) "
-+ "failed", tr_id_size);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ tr_id[0] = SCSI_TRANSPORTID_PROTOCOLID_FCP2;
-+
-+ BUILD_BUG_ON(sizeof(sess->port_name) != 8);
-+ memcpy(&tr_id[8], sess->port_name, 8);
-+
-+ *transport_id = tr_id;
-+
-+ TRACE_BUFF_FLAG(TRACE_DEBUG, "Created tid", tr_id, tr_id_size);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t q2t_show_expl_conf_enabled(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buffer)
-+{
-+ struct scst_tgt *scst_tgt;
-+ struct q2t_tgt *tgt;
-+ scsi_qla_host_t *ha;
-+ ssize_t size;
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+ ha = tgt->ha;
-+
-+ size = scnprintf(buffer, PAGE_SIZE, "%d\n%s", ha->enable_explicit_conf,
-+ ha->enable_explicit_conf ? SCST_SYSFS_KEY_MARK "\n" : "");
-+
-+ return size;
-+}
-+
-+static ssize_t q2t_store_expl_conf_enabled(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buffer, size_t size)
-+{
-+ struct scst_tgt *scst_tgt;
-+ struct q2t_tgt *tgt;
-+ scsi_qla_host_t *ha, *pha;
-+ unsigned long flags;
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+ ha = tgt->ha;
-+ pha = to_qla_parent(ha);
-+
-+ spin_lock_irqsave(&pha->hardware_lock, flags);
-+
-+ switch (buffer[0]) {
-+ case '0':
-+ ha->enable_explicit_conf = 0;
-+ PRINT_INFO("qla2x00t(%ld): explicit conformations disabled",
-+ ha->instance);
-+ break;
-+ case '1':
-+ ha->enable_explicit_conf = 1;
-+ PRINT_INFO("qla2x00t(%ld): explicit conformations enabled",
-+ ha->instance);
-+ break;
-+ default:
-+ PRINT_ERROR("%s: qla2x00t(%ld): Requested action not "
-+ "understood: %s", __func__, ha->instance, buffer);
-+ break;
-+ }
-+
-+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
-+
-+ return size;
-+}
-+
-+static ssize_t q2t_abort_isp_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buffer, size_t size)
-+{
-+ struct scst_tgt *scst_tgt;
-+ struct q2t_tgt *tgt;
-+ scsi_qla_host_t *ha;
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+ ha = tgt->ha;
-+
-+ PRINT_INFO("qla2x00t(%ld): Aborting ISP", ha->instance);
-+
-+ set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-+ qla2x00_wait_for_hba_online(ha);
-+
-+ return size;
-+}
-+
-+static ssize_t q2t_version_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ sprintf(buf, "%s\n", Q2T_VERSION_STRING);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ strcat(buf, "EXTRACHECKS\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_TRACING
-+ strcat(buf, "TRACING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+ strcat(buf, "DEBUG\n");
-+#endif
-+
-+#ifdef CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD
-+ strcat(buf, "QLA_TGT_DEBUG_WORK_IN_THREAD\n");
-+#endif
-+
-+ TRACE_EXIT();
-+ return strlen(buf);
-+}
-+
-+static ssize_t q2t_hw_target_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%d\n", 1);
-+}
-+
-+static ssize_t q2t_node_name_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_tgt *scst_tgt;
-+ struct q2t_tgt *tgt;
-+ scsi_qla_host_t *ha;
-+ ssize_t res;
-+ char *wwn;
-+ uint8_t *node_name;
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+ ha = tgt->ha;
-+
-+ if (ha->parent == NULL) {
-+ if (qla_tgt_mode_enabled(ha) || !ha->node_name_set)
-+ node_name = ha->node_name;
-+ else
-+ node_name = ha->tgt_node_name;
-+ } else
-+ node_name = ha->node_name;
-+
-+ res = q2t_get_target_name(node_name, &wwn);
-+ if (res != 0)
-+ goto out;
-+
-+ res = sprintf(buf, "%s\n", wwn);
-+ if ((ha->parent != NULL) || ha->node_name_set)
-+ res += sprintf(&buf[res], "%s\n", SCST_SYSFS_KEY_MARK);
-+
-+ kfree(wwn);
-+
-+out:
-+ return res;
-+}
-+
-+static ssize_t q2t_node_name_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buffer, size_t size)
-+{
-+ struct scst_tgt *scst_tgt;
-+ struct q2t_tgt *tgt;
-+ scsi_qla_host_t *ha;
-+ u64 node_name, old_node_name;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+ ha = tgt->ha;
-+
-+ BUG_ON(ha->parent != NULL);
-+
-+ if (size == 0)
-+ goto out_default;
-+
-+ res = q2t_parse_wwn(buffer, &node_name);
-+ if (res != 0) {
-+ if ((buffer[0] == '\0') || (buffer[0] == '\n'))
-+ goto out_default;
-+ PRINT_ERROR("qla2x00t(%ld): Wrong node name", ha->instance);
-+ goto out;
-+ }
-+
-+ old_node_name = wwn_to_u64(ha->node_name);
-+ if (old_node_name == node_name)
-+ goto out_success;
-+
-+ u64_to_wwn(node_name, ha->tgt_node_name);
-+ ha->node_name_set = 1;
-+
-+abort:
-+ if (qla_tgt_mode_enabled(ha)) {
-+ set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
-+ qla2x00_wait_for_hba_online(ha);
-+ }
-+
-+out_success:
-+ res = size;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_default:
-+ ha->node_name_set = 0;
-+ goto abort;
-+}
-+
-+static ssize_t q2t_vp_parent_host_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_tgt *scst_tgt;
-+ struct q2t_tgt *tgt;
-+ scsi_qla_host_t *ha;
-+ ssize_t res;
-+ char *wwn;
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = (struct q2t_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+ ha = to_qla_parent(tgt->ha);
-+
-+ res = q2t_get_target_name(ha->port_name, &wwn);
-+ if (res != 0)
-+ goto out;
-+
-+ res = sprintf(buf, "%s\n%s\n", wwn, SCST_SYSFS_KEY_MARK);
-+
-+ kfree(wwn);
-+
-+out:
-+ return res;
-+}
-+
-+static uint16_t q2t_get_scsi_transport_version(struct scst_tgt *scst_tgt)
-+{
-+ /* FCP-2 */
-+ return 0x0900;
-+}
-+
-+static uint16_t q2t_get_phys_transport_version(struct scst_tgt *scst_tgt)
-+{
-+ return 0x0DA0; /* FC-FS */
-+}
-+
-+static int __init q2t_init(void)
-+{
-+ int res = 0;
-+
-+ TRACE_ENTRY();
-+
-+ BUILD_BUG_ON(sizeof(atio7_entry_t) != sizeof(atio_entry_t));
-+
-+ PRINT_INFO("qla2x00t: Initializing QLogic Fibre Channel HBA Driver "
-+ "target mode addon version %s", Q2T_VERSION_STRING);
-+
-+ q2t_cmd_cachep = KMEM_CACHE(q2t_cmd, SCST_SLAB_FLAGS);
-+ if (q2t_cmd_cachep == NULL) {
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ q2t_mgmt_cmd_cachep = KMEM_CACHE(q2t_mgmt_cmd, SCST_SLAB_FLAGS);
-+ if (q2t_mgmt_cmd_cachep == NULL) {
-+ res = -ENOMEM;
-+ goto out_cmd_free;
-+ }
-+
-+ q2t_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
-+ mempool_free_slab, q2t_mgmt_cmd_cachep);
-+ if (q2t_mgmt_cmd_mempool == NULL) {
-+ res = -ENOMEM;
-+ goto out_kmem_free;
-+ }
-+
-+ res = scst_register_target_template(&tgt2x_template);
-+ if (res < 0)
-+ goto out_mempool_free;
-+
-+ /*
-+ * qla2xxx_tgt_register_driver() happens in q2t_target_detect
-+ * called via scst_register_target_template()
-+ */
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+ scst_unregister_target_template(&tgt2x_template);
-+ qla2xxx_tgt_unregister_driver();
-+
-+out_mempool_free:
-+ mempool_destroy(q2t_mgmt_cmd_mempool);
-+
-+out_kmem_free:
-+ kmem_cache_destroy(q2t_mgmt_cmd_cachep);
-+
-+out_cmd_free:
-+ kmem_cache_destroy(q2t_cmd_cachep);
-+ goto out;
-+}
-+
-+static void __exit q2t_exit(void)
-+{
-+ TRACE_ENTRY();
-+
-+ PRINT_INFO("qla2x00t: %s", "Unloading QLogic Fibre Channel HBA Driver "
-+ "target mode addon driver");
-+
-+ /* To sync with q2t_host_action() */
-+ down_write(&q2t_unreg_rwsem);
-+
-+ scst_unregister_target_template(&tgt2x_template);
-+
-+ /*
-+ * Now we have everywhere target mode disabled and no possibilities
-+ * to call us through sysfs, so we can safely remove all the references
-+ * to our functions.
-+ */
-+ qla2xxx_tgt_unregister_driver();
-+
-+ mempool_destroy(q2t_mgmt_cmd_mempool);
-+ kmem_cache_destroy(q2t_mgmt_cmd_cachep);
-+ kmem_cache_destroy(q2t_cmd_cachep);
-+
-+ /* Let's make lockdep happy */
-+ up_write(&q2t_unreg_rwsem);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+module_init(q2t_init);
-+module_exit(q2t_exit);
-+
-+MODULE_AUTHOR("Vladislav Bolkhovitin and others");
-+MODULE_DESCRIPTION("Target mode addon for qla2[2,3,4,5+]xx");
-+MODULE_LICENSE("GPL");
-+MODULE_VERSION(Q2T_VERSION_STRING);
-diff -uprN orig/linux-3.2/drivers/scst/qla2xxx-target/qla2x00t.h linux-3.2/drivers/scst/qla2xxx-target/qla2x00t.h
---- orig/linux-3.2/drivers/scst/qla2xxx-target/qla2x00t.h
-+++ linux-3.2/drivers/scst/qla2xxx-target/qla2x00t.h
-@@ -0,0 +1,287 @@
-+/*
-+ * qla2x00t.h
-+ *
-+ * Copyright (C) 2004 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ * Copyright (C) 2004 - 2005 Leonid Stoljar
-+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
-+ * Copyright (C) 2006 - 2010 ID7 Ltd.
-+ * Copyright (C) 2010 - 2011 SCST Ltd.
-+ *
-+ * QLogic 22xx/23xx/24xx/25xx FC target driver.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation, version 2
-+ * of the License.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ */
-+
-+#ifndef __QLA2X00T_H
-+#define __QLA2X00T_H
-+
-+#include <qla_def.h>
-+#include <qla2x_tgt.h>
-+#include <qla2x_tgt_def.h>
-+
-+#include <scst_debug.h>
-+
-+/* Version numbers, the same as for the kernel */
-+#define Q2T_VERSION(a, b, c, d) (((a) << 030) + ((b) << 020) + (c) << 010 + (d))
-+#define Q2T_VERSION_CODE Q2T_VERSION(2, 2, 0, 0)
-+#define Q2T_VERSION_STRING "2.2.0"
-+#define Q2T_PROC_VERSION_NAME "version"
-+
-+#define Q2T_MAX_CDB_LEN 16
-+#define Q2T_TIMEOUT 10 /* in seconds */
-+
-+#define Q2T_MAX_HW_PENDING_TIME 60 /* in seconds */
-+
-+/* Immediate notify status constants */
-+#define IMM_NTFY_LIP_RESET 0x000E
-+#define IMM_NTFY_LIP_LINK_REINIT 0x000F
-+#define IMM_NTFY_IOCB_OVERFLOW 0x0016
-+#define IMM_NTFY_ABORT_TASK 0x0020
-+#define IMM_NTFY_PORT_LOGOUT 0x0029
-+#define IMM_NTFY_PORT_CONFIG 0x002A
-+#define IMM_NTFY_GLBL_TPRLO 0x002D
-+#define IMM_NTFY_GLBL_LOGO 0x002E
-+#define IMM_NTFY_RESOURCE 0x0034
-+#define IMM_NTFY_MSG_RX 0x0036
-+#define IMM_NTFY_SRR 0x0045
-+#define IMM_NTFY_ELS 0x0046
-+
-+/* Immediate notify task flags */
-+#define IMM_NTFY_TASK_MGMT_SHIFT 8
-+
-+#define Q2T_CLEAR_ACA 0x40
-+#define Q2T_TARGET_RESET 0x20
-+#define Q2T_LUN_RESET 0x10
-+#define Q2T_CLEAR_TS 0x04
-+#define Q2T_ABORT_TS 0x02
-+#define Q2T_ABORT_ALL_SESS 0xFFFF
-+#define Q2T_ABORT_ALL 0xFFFE
-+#define Q2T_NEXUS_LOSS_SESS 0xFFFD
-+#define Q2T_NEXUS_LOSS 0xFFFC
-+
-+/* Notify Acknowledge flags */
-+#define NOTIFY_ACK_RES_COUNT BIT_8
-+#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5
-+#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
-+
-+/* Command's states */
-+#define Q2T_STATE_NEW 0 /* New command and SCST processing it */
-+#define Q2T_STATE_NEED_DATA 1 /* SCST needs data to continue */
-+#define Q2T_STATE_DATA_IN 2 /* Data arrived and SCST processing it */
-+#define Q2T_STATE_PROCESSED 3 /* SCST done processing */
-+#define Q2T_STATE_ABORTED 4 /* Command aborted */
-+
-+/* Special handles */
-+#define Q2T_NULL_HANDLE 0
-+#define Q2T_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
-+
-+/* ATIO task_codes field */
-+#define ATIO_SIMPLE_QUEUE 0
-+#define ATIO_HEAD_OF_QUEUE 1
-+#define ATIO_ORDERED_QUEUE 2
-+#define ATIO_ACA_QUEUE 4
-+#define ATIO_UNTAGGED 5
-+
-+/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
-+#define FC_TM_SUCCESS 0
-+#define FC_TM_BAD_FCP_DATA 1
-+#define FC_TM_BAD_CMD 2
-+#define FC_TM_FCP_DATA_MISMATCH 3
-+#define FC_TM_REJECT 4
-+#define FC_TM_FAILED 5
-+
-+/*
-+ * Error code of q2t_pre_xmit_response() meaning that cmd's exchange was
-+ * terminated, so no more actions is needed and success should be returned
-+ * to SCST. Must be different from any SCST_TGT_RES_* codes.
-+ */
-+#define Q2T_PRE_XMIT_RESP_CMD_ABORTED 0x1717
-+
-+#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
-+#define pci_dma_lo32(a) (a & 0xffffffff)
-+#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
-+#else
-+#define pci_dma_lo32(a) (a & 0xffffffff)
-+#define pci_dma_hi32(a) 0
-+#endif
-+
-+struct q2t_tgt {
-+ struct scst_tgt *scst_tgt;
-+ scsi_qla_host_t *ha;
-+
-+ /*
-+ * To sync between IRQ handlers and q2t_target_release(). Needed,
-+ * because req_pkt() can drop/reacquire HW lock inside. Protected by
-+ * HW lock.
-+ */
-+ int irq_cmd_count;
-+
-+ int datasegs_per_cmd, datasegs_per_cont;
-+
-+ /* Target's flags, serialized by pha->hardware_lock */
-+ unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addressing enabled */
-+ unsigned int link_reinit_iocb_pending:1;
-+ unsigned int tm_to_unknown:1; /* TM to unknown session was sent */
-+ unsigned int sess_works_pending:1; /* there are sess_work entries */
-+
-+ /*
-+ * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
-+ * OR hardware_lock for reading.
-+ */
-+ unsigned long tgt_stop; /* the driver is being stopped */
-+
-+ /* Count of sessions refering q2t_tgt. Protected by hardware_lock. */
-+ int sess_count;
-+
-+ /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
-+ struct list_head sess_list;
-+
-+ /* Protected by hardware_lock */
-+ struct list_head del_sess_list;
-+ struct delayed_work sess_del_work;
-+
-+ spinlock_t sess_work_lock;
-+ struct list_head sess_works_list;
-+ struct work_struct sess_work;
-+
-+ notify24xx_entry_t link_reinit_iocb;
-+ wait_queue_head_t waitQ;
-+ int notify_ack_expected;
-+ int abts_resp_expected;
-+ int modify_lun_expected;
-+
-+ int ctio_srr_id;
-+ int imm_srr_id;
-+ spinlock_t srr_lock;
-+ struct list_head srr_ctio_list;
-+ struct list_head srr_imm_list;
-+ struct work_struct srr_work;
-+
-+ atomic_t tgt_global_resets_count;
-+
-+ struct list_head tgt_list_entry;
-+};
-+
-+/*
-+ * Equivilant to IT Nexus (Initiator-Target)
-+ */
-+struct q2t_sess {
-+ uint16_t loop_id;
-+ port_id_t s_id;
-+
-+ unsigned int conf_compl_supported:1;
-+ unsigned int deleted:1;
-+ unsigned int local:1;
-+
-+ struct scst_session *scst_sess;
-+ struct q2t_tgt *tgt;
-+
-+ int sess_ref; /* protected by hardware_lock */
-+
-+ struct list_head sess_list_entry;
-+ unsigned long expires;
-+ struct list_head del_list_entry;
-+
-+ uint8_t port_name[WWN_SIZE];
-+};
-+
-+struct q2t_cmd {
-+ struct q2t_sess *sess;
-+ int state;
-+ struct scst_cmd *scst_cmd;
-+
-+ unsigned int conf_compl_supported:1;/* to save extra sess dereferences */
-+ unsigned int sg_mapped:1;
-+ unsigned int free_sg:1;
-+ unsigned int aborted:1; /* Needed in case of SRR */
-+ unsigned int write_data_transferred:1;
-+
-+ struct scatterlist *sg; /* cmd data buffer SG vector */
-+ int sg_cnt; /* SG segments count */
-+ int bufflen; /* cmd buffer length */
-+ int offset;
-+ scst_data_direction data_direction;
-+ uint32_t tag;
-+ dma_addr_t dma_handle;
-+ enum dma_data_direction dma_data_direction;
-+
-+ uint16_t loop_id; /* to save extra sess dereferences */
-+ struct q2t_tgt *tgt; /* to save extra sess dereferences */
-+
-+ union {
-+ atio7_entry_t atio7;
-+ atio_entry_t atio2x;
-+ } __attribute__((packed)) atio;
-+};
-+
-+struct q2t_sess_work_param {
-+ struct list_head sess_works_list_entry;
-+
-+#define Q2T_SESS_WORK_CMD 0
-+#define Q2T_SESS_WORK_ABORT 1
-+#define Q2T_SESS_WORK_TM 2
-+ int type;
-+
-+ union {
-+ struct q2t_cmd *cmd;
-+ abts24_recv_entry_t abts;
-+ notify_entry_t tm_iocb;
-+ atio7_entry_t tm_iocb2;
-+ };
-+};
-+
-+struct q2t_mgmt_cmd {
-+ struct q2t_sess *sess;
-+ unsigned int flags;
-+#define Q24_MGMT_SEND_NACK 1
-+ union {
-+ atio7_entry_t atio7;
-+ notify_entry_t notify_entry;
-+ notify24xx_entry_t notify_entry24;
-+ abts24_recv_entry_t abts;
-+ } __attribute__((packed)) orig_iocb;
-+};
-+
-+struct q2t_prm {
-+ struct q2t_cmd *cmd;
-+ struct q2t_tgt *tgt;
-+ void *pkt;
-+ struct scatterlist *sg; /* cmd data buffer SG vector */
-+ int seg_cnt;
-+ int req_cnt;
-+ uint16_t rq_result;
-+ uint16_t scsi_status;
-+ unsigned char *sense_buffer;
-+ int sense_buffer_len;
-+ int residual;
-+ int add_status_pkt;
-+};
-+
-+struct srr_imm {
-+ struct list_head srr_list_entry;
-+ int srr_id;
-+ union {
-+ notify_entry_t notify_entry;
-+ notify24xx_entry_t notify_entry24;
-+ } __attribute__((packed)) imm;
-+};
-+
-+struct srr_ctio {
-+ struct list_head srr_list_entry;
-+ int srr_id;
-+ struct q2t_cmd *cmd;
-+};
-+
-+#define Q2T_XMIT_DATA 1
-+#define Q2T_XMIT_STATUS 2
-+#define Q2T_XMIT_ALL (Q2T_XMIT_STATUS|Q2T_XMIT_DATA)
-+
-+#endif /* __QLA2X00T_H */
-diff -uprN orig/linux-3.2/Documentation/scst/README.qla2x00t linux-3.2/Documentation/scst/README.qla2x00t
---- orig/linux-3.2/Documentation/scst/README.qla2x00t
-+++ linux-3.2/Documentation/scst/README.qla2x00t
-@@ -0,0 +1,572 @@
-+Target driver for QLogic 22xx/23xx/24xx/25xx Fibre Channel cards
-+================================================================
-+
-+Version 2.1.0
-+-------------
-+
-+This driver consists from two parts: the target mode driver itself and
-+the changed initiator driver from Linux kernel, which is, particularly,
-+intended to perform all the initialization and shutdown tasks. The
-+initiator driver was changed to provide the target mode support and all
-+necessary callbacks, but it's still capable to work as initiator only.
-+Mode, when a host acts as the initiator and the target simultaneously,
-+is supported as well.
-+
-+This version is compatible with SCST core version 2.0.0 and higher and
-+Linux kernel 2.6.26 and higher. Sorry, kernels below 2.6.26 are not
-+supported, because it's too hard to backport used initiator driver to
-+older kernels.
-+
-+The original initiator driver was taken from the kernel 2.6.26. Also the
-+following 2.6.26.x commits have been applied to it (upstream ID):
-+048feec5548c0582ee96148c61b87cccbcb5f9be,
-+031e134e5f95233d80fb1b62fdaf5e1be587597c,
-+5f3a9a207f1fccde476dd31b4c63ead2967d934f,
-+85821c906cf3563a00a3d98fa380a2581a7a5ff1,
-+3c01b4f9fbb43fc911acd33ea7a14ea7a4f9866b,
-+8eca3f39c4b11320787f7b216f63214aee8415a9,
-+0f19bc681ed0849a2b95778460a0a8132e3700e2.
-+
-+See also "ToDo" file for list of known issues and unimplemented
-+features.
-+
-+
-+Installation
-+------------
-+
-+Only vanilla kernels from kernel.org and RHEL/CentOS 5.2 kernels are
-+supported, but SCST should work on other (vendors') kernels, if you
-+manage to successfully compile it on them. The main problem with
-+vendors' kernels is that they often contain patches, which will appear
-+only in the next version of the vanilla kernel, therefore it's quite
-+hard to track such changes. Thus, if during compilation for some vendor
-+kernel your compiler complains about redefinition of some symbol, you
-+should either switch to vanilla kernel, or add or change as necessary
-+the corresponding to that symbol "#if LINUX_VERSION_CODE" statement.
-+
-+Before installation make sure that the link
-+"/lib/modules/`you_kernel_version`/build" points to the source code for
-+your currently running kernel.
-+
-+If your kernel version is <2.6.28, then you should consider applying
-+kernel patch scst_fc_vport_create.patch from the "kernel" subdirectory.
-+Without it, creating and removing NPIV targets using SCST sysfs
-+interface will be disabled. NOTE: you will still be able to create and
-+remove NPIV targets using the standard Linux interface (i.e. echoing
-+wwpn:wwnn into /sys/class/fc_host/hostX/vport_create and
-+/sys/class/fc_host/hostX/vport_delete).
-+
-+Then you should replace (or link) by the initiator driver from this
-+package "qla2xxx" subdirectory in kernel_source/drivers/scsi/ of the
-+currently running kernel and using your favorite kernel configuration
-+tool enable in the QLogic QLA2XXX Fibre Channel driver target mode
-+support (CONFIG_SCSI_QLA2XXX_TARGET). Then rebuild the kernel and its
-+modules. During this step you will compile the initiator driver. To
-+install it, install the built kernel and its modules.
-+
-+Then edit qla2x00-target/Makefile and set SCST_INC_DIR variable to point
-+to the directory, where SCST's public include files are located. If you
-+install QLA2x00 target driver's source code in the SCST's directory,
-+then SCST_INC_DIR will be set correctly for you.
-+
-+Also you can set SCST_DIR variable to the directory, where SCST was
-+built, but this is optional. If you don't set it or set incorrectly,
-+during the compilation you will get a bunch of harmless warnings like
-+"WARNING: "scst_rx_data" [/XXX/qla2x00tgt.ko] undefined!"
-+
-+To compile the target driver, type 'make' in qla2x00-target/
-+subdirectory. It will build qla2x00tgt.ko module.
-+
-+To install the target driver, type 'make install' in qla2x00-target/
-+subdirectory. The target driver will be installed in
-+/lib/modules/`you_kernel_version`/extra. To uninstall it, type 'make
-+uninstall'.
-+
-+
-+Usage
-+-----
-+
-+After the drivers are loaded and adapters successfully initialized by
-+the initiator driver, including firmware image load, you should
-+configure exported devices using the corresponding interface of SCST
-+core. It is highly recommended to use scstadmin utility for that
-+purpose.
-+
-+Then target mode should be enabled via a sysfs interface on a per card
-+basis, like:
-+
-+echo "1" >/sys/kernel/scst_tgt/targets/qla2x00t/target/enabled
-+
-+See below for full description of the driver's sysfs interface.
-+
-+With the obsolete proc interface you should instead use
-+target_mode_enabled under the appropriate scsi_host entry, like:
-+
-+echo "1" >/sys/class/scsi_host/host0/target_mode_enabled
-+
-+You can find some installation and configuration HOWTOs in
-+http://scst.sourceforge.net/qla2x00t-howto.html and
-+https://forums.openfiler.com/viewtopic.php?id=3422.
-+
-+
-+IMPORTANT USAGE NOTES
-+---------------------
-+
-+1. It is strongly recommended to use firmware version 5.x or higher
-+for 24xx/25xx adapters. See
-+http://sourceforge.net/mailarchive/forum.php?thread_name=4B4CD39F.6020401%40vlnb.net&forum_name=scst-devel
-+for more details why.
-+
-+2. If you reload qla2x00tgt module, you should also reload qla2xxx
-+module, otherwise your initiators could not see the target, when it is
-+enabled after qla2x00tgt module load.
-+
-+3. You need to issue LIP after you enabled a target, if you enabled it
-+after one or more its initiators already started.
-+
-+
-+Initiator and target modes
-+--------------------------
-+
-+When qla2xxx compiled with CONFIG_SCSI_QLA2XXX_TARGET enabled, it has
-+parameter "qlini_mode", which determines when initiator mode will be
-+enabled. Possible values:
-+
-+ - "exclusive" (default) - initiator mode will be enabled on load,
-+disabled on enabling target mode and then on disabling target mode
-+enabled back.
-+
-+ - "disabled" - initiator mode will never be enabled.
-+
-+ - "enabled" - initiator mode will always stay enabled.
-+
-+Usage of mode "disabled" is recommended, if you have incorrectly
-+functioning your target's initiators, which if once seen a port in
-+initiator mode, later refuse to see it as a target.
-+
-+Use mode "enabled" if you need your QLA adapters to work in both
-+initiator and target modes at the same time.
-+
-+You can always see which modes are currently active in active_mode sysfs
-+attribute.
-+
-+In all the modes you can at any time use sysfs attribute
-+ini_mode_force_reverse to force enable or disable initiator mode on any
-+particular port. Setting this attribute to 1 will reverse current status
-+of the initiator mode from enabled to disabled and vice versa.
-+
-+
-+Explicit conformation
-+---------------------
-+
-+This option should (actually, almost always must) be enabled by echoing
-+"1" in /sys/kernel/scst_tgt/targets/qla2x00t/target/host/explicit_conform_enabled,
-+if a target card exports at least one stateful SCSI device, like tape,
-+and class 2 isn't used, otherwise link-level errors could lead to loss
-+of the target/initiator state synchronization. Also check if initiator
-+supports this feature, it is reported in the kernel logs ("confirmed
-+completion supported" or not). No major performance degradation was
-+noticed, if it is enabled. Supported only for 23xx+. Disabled by
-+default.
-+
-+
-+Class 2
-+-------
-+
-+Class 2 is the close equivalent of TCP in the network world. If you
-+enable it, all the Fibre Channel packets will be acknowledged. By
-+default, class 3 is used, which is UDP-like. Enable class 2 by echoing
-+"1" in /sys/kernel/scst_tgt/targets/qla2x00t/target/host/class2_enabled.
-+This option needs a special firmware with class 2 support. Disabled by
-+default.
-+
-+
-+N_Port ID Virtualization
-+------------------------
-+
-+N_Port ID Virtualization (NPIV) is a Fibre Channel facility allowing
-+multiple N_Port IDs to share a single physical N_Port. NPIV is fully
-+supported by this driver. You must have 24xx+ ISPs with NPIV-supporting
-+and NPIV-switches switch(es) to use this facility.
-+
-+You can add NPIV targets by echoing:
-+
-+add_target target_name node_name=node_name_value; parent_host=parent_host_value
-+
-+in /sys/kernel/scst_tgt/targets/qla2x00t/mgmt.
-+
-+Removing NPIV targets is done by echoing:
-+
-+del_target target_name
-+
-+in/sys/kernel/scst_tgt/targets/qla2x00t/mgmt.
-+
-+Also, you can create and remove NPIV targets using the standard Linux
-+interface (i.e. echoing wwpn:wwnn into /sys/class/fc_host/hostX/vport_create
-+and /sys/class/fc_host/hostX/vport_delete).
-+
-+It is recommended to use scstadmin utility and its config file to
-+configure virtual NPIV targets instead of the above direct interface.
-+
-+
-+Compilation options
-+-------------------
-+
-+There are the following compilation options, that could be commented
-+in/out in Makefile:
-+
-+ - CONFIG_SCST_DEBUG - turns on some debugging code, including some logging.
-+ Makes the driver considerably bigger and slower, producing large amount of
-+ log data.
-+
-+ - CONFIG_SCST_TRACING - turns on ability to log events. Makes the driver
-+ considerably bigger and leads to some performance loss.
-+
-+ - CONFIG_QLA_TGT_DEBUG_WORK_IN_THREAD - makes SCST process incoming
-+ commands from the qla2x00t target driver and call the driver's
-+ callbacks in internal SCST threads context instead of SIRQ context,
-+ where those commands were received. Useful for debugging and lead to
-+ some performance loss.
-+
-+ - CONFIG_QLA_TGT_DEBUG_SRR - turns on retransmitting packets (SRR)
-+ debugging. In this mode some CTIOs will be "broken" to force the
-+ initiator to issue a retransmit request.
-+
-+
-+Sysfs interface
-+---------------
-+
-+Starting from 2.0.0 this driver has sysfs interface. The procfs
-+interface from version 2.0.0 is obsolete and will be removed in one of
-+the next versions.
-+
-+Root of SCST sysfs interface is /sys/kernel/scst_tgt. Root of this
-+driver is /sys/kernel/scst_tgt/targets/qla2x00t. It has the following
-+entries:
-+
-+ - None, one or more subdirectories for targets with name equal to port
-+ names of the corresponding targets.
-+
-+ - trace_level - allows to enable and disable various tracing
-+ facilities. See content of this file for help how to use it.
-+
-+ - version - read-only attribute, which allows to see version of
-+ this driver and enabled optional features.
-+
-+ - mgmt - main management entry, which allows to configure NPIV targets.
-+ See content of this file for help how to use it.
-+
-+ - hw_target (hardware target only) - read-only attribute with value 1.
-+ It allows to distinguish hardware and virtual targets.
-+
-+Each target subdirectory contains the following entries:
-+
-+ - host - link pointing on the corresponding scsi_host of the initiator
-+ driver
-+
-+ - ini_groups - subdirectory defining initiator groups for this target,
-+ used to define per-initiator access control. See SCST core README for
-+ more details.
-+
-+ - luns - subdirectory defining LUNs of this target. See SCST core
-+ README for more details.
-+
-+ - sessions - subdirectory containing connected to this target sessions.
-+
-+ - enabled - using this attribute you can enable or disable target mode
-+ of this FC port. It allows to finish configuring it before it starts
-+ accepting new connections. 0 by default.
-+
-+ - explicit_confirmation - allows to enable explicit conformations, see
-+ above.
-+
-+ - rel_tgt_id - allows to read or write SCSI Relative Target Port
-+ Identifier attribute. This identifier is used to identify SCSI Target
-+ Ports by some SCSI commands, mainly by Persistent Reservations
-+ commands. This identifier must be unique among all SCST targets, but
-+ for convenience SCST allows disabled targets to have not unique
-+ rel_tgt_id. In this case SCST will not allow to enable this target
-+ until rel_tgt_id becomes unique. This attribute initialized unique by
-+ SCST by default.
-+
-+ - node_name (NPIV targets only) - read-only attribute, which allows to see
-+ the target World Wide Node Name.
-+
-+ - parent_host (NPIV target only) - read-only attribute, which allows to see
-+ the parent HBA World Wide Port Name (WWPN).
-+
-+Subdirectory "sessions" contains one subdirectory for each connected
-+session with name equal to port name of the connected initiator.
-+
-+Each session subdirectory contains the following entries:
-+
-+ - initiator_name - contains initiator's port name
-+
-+ - active_commands - contains number of active, i.e. not yet or being
-+ executed, SCSI commands in this session.
-+
-+ - commands - contains overall number of SCSI commands in this session.
-+
-+Below is a sample script, which configures 2 virtual disk "disk1" using
-+/disk1 image for usage with 25:00:00:f0:98:87:92:f3 hardware target, and
-+"disk2" using /disk2 image for usage with 50:50:00:00:00:00:00:11 NPIV
-+target. All initiators connected to this targets will see those devices.
-+
-+#!/bin/bash
-+
-+modprobe scst
-+modprobe scst_vdisk
-+
-+echo "add_device disk1 filename=/disk1; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
-+echo "add_device disk2 filename=/disk2; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
-+
-+modprobe qla2x00tgt
-+
-+echo "add_target 50:50:00:00:00:00:00:11 node_name=50:50:00:00:00:00:00:00;parent_host=25:00:00:f0:98:87:92:f3" >\
-+/sys/kernel/scst_tgt/targets/qla2x00t/mgmt
-+
-+echo "add disk1 0" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
-+echo "add disk2 0" >/sys/kernel/scst_tgt/targets/qla2x00t/50:50:00:00:00:00:00:11/luns/mgmt
-+echo 1 >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/enabled
-+echo 1 >/sys/kernel/scst_tgt/targets/qla2x00t/50:50:00:00:00:00:00:11/enabled
-+
-+Below is another sample script, which configures 1 real local SCSI disk
-+0:0:1:0 for usage with 25:00:00:f0:98:87:92:f3 target:
-+
-+#!/bin/bash
-+
-+modprobe scst
-+modprobe scst_disk
-+
-+echo "add_device 0:0:1:0" >/sys/kernel/scst_tgt/handlers/dev_disk/mgmt
-+
-+modprobe qla2x00tgt
-+
-+echo "add 0:0:1:0 0" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
-+echo 1 >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/enabled
-+
-+Below is an advanced sample script, which configures more virtual
-+devices of various types, including virtual CDROM. In this script
-+initiator 25:00:00:f0:99:87:94:a3 will see disk1 and disk2 devices, all
-+other initiators will see read only blockio, nullio and cdrom devices.
-+
-+#!/bin/bash
-+
-+modprobe scst
-+modprobe scst_vdisk
-+
-+echo "add_device disk1 filename=/disk1; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
-+echo "add_device disk2 filename=/disk2; blocksize=4096; nv_cache=1" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
-+echo "add_device blockio filename=/dev/sda5" >/sys/kernel/scst_tgt/handlers/vdisk_blockio/mgmt
-+echo "add_device nullio" >/sys/kernel/scst_tgt/handlers/vdisk_nullio/mgmt
-+echo "add_device cdrom" >/sys/kernel/scst_tgt/handlers/vcdrom/mgmt
-+
-+modprobe qla2x00tgt
-+
-+echo "add blockio 0 read_only=1" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
-+echo "add nullio 1" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
-+echo "add cdrom 2" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/mgmt
-+
-+echo "create 25:00:00:f0:99:87:94:a3" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/mgmt
-+echo "add disk1 0" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/25:00:00:f0:99:87:94:a3/luns/mgmt
-+echo "add disk2 1" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/25:00:00:f0:99:87:94:a3/luns/mgmt
-+echo "add 25:00:00:f0:99:87:94:a3" >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/25:00:00:f0:99:87:94:a3/initiators/mgmt
-+
-+echo 1 >/sys/kernel/scst_tgt/targets/qla2x00t/25:00:00:f0:98:87:92:f3/enabled
-+
-+The resulting overall SCST sysfs hierarchy with initiator
-+25:00:00:f0:99:87:94:a3 connected will look like:
-+
-+/sys/kernel/scst_tgt
-+|-- devices
-+| |-- blockio
-+| | |-- blocksize
-+| | |-- exported
-+| | | `-- export0 -> ../../../targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/0
-+| | |-- filename
-+| | |-- handler -> ../../handlers/vdisk_blockio
-+| | |-- nv_cache
-+| | |-- read_only
-+| | |-- removable
-+| | |-- resync_size
-+| | |-- size_mb
-+| | |-- t10_dev_id
-+| | |-- threads_num
-+| | |-- threads_pool_type
-+| | |-- type
-+| | `-- usn
-+| |-- cdrom
-+| | |-- exported
-+| | | `-- export0 -> ../../../targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/2
-+| | |-- filename
-+| | |-- handler -> ../../handlers/vcdrom
-+| | |-- size_mb
-+| | |-- t10_dev_id
-+| | |-- threads_num
-+| | |-- threads_pool_type
-+| | |-- type
-+| | `-- usn
-+| |-- disk1
-+| | |-- blocksize
-+| | |-- exported
-+| | | `-- export0 -> ../../../targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/25:00:00:f0:99:87:94:a3/luns/0
-+| | |-- filename
-+| | |-- handler -> ../../handlers/vdisk_fileio
-+| | |-- nv_cache
-+| | |-- o_direct
-+| | |-- read_only
-+| | |-- removable
-+| | |-- resync_size
-+| | |-- size_mb
-+| | |-- t10_dev_id
-+| | |-- threads_num
-+| | |-- threads_pool_type
-+| | |-- type
-+| | |-- usn
-+| | `-- write_through
-+| |-- disk2
-+| | |-- blocksize
-+| | |-- exported
-+| | | `-- export0 -> ../../../targets/qla2x00t/25:00:00:f0:98:87:92:f3/ini_groups/25:00:00:f0:99:87:94:a3/luns/1
-+| | |-- filename
-+| | |-- handler -> ../../handlers/vdisk_fileio
-+| | |-- nv_cache
-+| | |-- o_direct
-+| | |-- read_only
-+| | |-- removable
-+| | |-- resync_size
-+| | |-- size_mb
-+| | |-- t10_dev_id
-+| | |-- threads_num
-+| | |-- threads_pool_type
-+| | |-- type
-+| | |-- usn
-+| | `-- write_through
-+| `-- nullio
-+| |-- blocksize
-+| |-- exported
-+| | `-- export0 -> ../../../targets/qla2x00t/25:00:00:f0:98:87:92:f3/luns/1
-+| |-- handler -> ../../handlers/vdisk_nullio
-+| |-- read_only
-+| |-- removable
-+| |-- size_mb
-+| |-- t10_dev_id
-+| |-- threads_num
-+| |-- threads_pool_type
-+| |-- type
-+| `-- usn
-+|-- handlers
-+| |-- vcdrom
-+| | |-- cdrom -> ../../devices/cdrom
-+| | |-- mgmt
-+| | |-- trace_level
-+| | `-- type
-+| |-- vdisk_blockio
-+| | |-- blockio -> ../../devices/blockio
-+| | |-- mgmt
-+| | |-- trace_level
-+| | `-- type
-+| |-- vdisk_fileio
-+| | |-- disk1 -> ../../devices/disk1
-+| | |-- disk2 -> ../../devices/disk2
-+| | |-- mgmt
-+| | |-- trace_level
-+| | `-- type
-+| `-- vdisk_nullio
-+| |-- mgmt
-+| |-- nullio -> ../../devices/nullio
-+| |-- trace_level
-+| `-- type
-+|-- sgv
-+| |-- global_stats
-+| |-- sgv
-+| | `-- stats
-+| |-- sgv-clust
-+| | `-- stats
-+| `-- sgv-dma
-+| `-- stats
-+|-- targets
-+| `-- qla2x00t
-+| |-- 25:00:00:f0:98:87:92:f3
-+| | |-- enabled
-+| | |-- explicit_confirmation
-+| | |-- host -> ../../../../../class/scsi_host/host4
-+| | |-- ini_groups
-+| | | |-- 25:00:00:f0:99:87:94:a3
-+| | | | |-- initiators
-+| | | | | |-- 25:00:00:f0:99:87:94:a3
-+| | | | | `-- mgmt
-+| | | | `-- luns
-+| | | | |-- 0
-+| | | | | |-- device -> ../../../../../../../devices/disk1
-+| | | | | `-- read_only
-+| | | | |-- 1
-+| | | | | |-- device -> ../../../../../../../devices/disk2
-+| | | | | `-- read_only
-+| | | | `-- mgmt
-+| | | `-- mgmt
-+| | |-- luns
-+| | | |-- 0
-+| | | | |-- device -> ../../../../../devices/blockio
-+| | | | `-- read_only
-+| | | |-- 1
-+| | | | |-- device -> ../../../../../devices/nullio
-+| | | | `-- read_only
-+| | | |-- 2
-+| | | | |-- device -> ../../../../../devices/cdrom
-+| | | | `-- read_only
-+| | | `-- mgmt
-+| | |-- rel_tgt_id
-+| | |-- hw_target
-+| | `-- sessions
-+| | `-- 25:00:00:f0:99:87:94:a3
-+| | |-- active_commands
-+| | |-- commands
-+| | |-- initiator_name
-+| | `-- luns -> ../../ini_groups/25:00:00:f0:99:87:94:a3/luns
-+| |-- trace_level
-+| |-- version
-+| `-- mgmt
-+|-- threads
-+|-- trace_level
-+`-- version
-+
-+
-+Performance advices
-+-------------------
-+
-+1. If you are going to use your target in an VM environment, for
-+instance as a shared storage with VMware, make sure all your VMs
-+connected to the target via *separate* sessions. You can check it using
-+SCST proc or sysfs interface. You should use available facilities, like
-+NPIV, to make separate sessions for each VM. If you miss it, you can
-+greatly loose performance of parallel access to your target from
-+different VMs. This isn't related to the case if your VMs are using the
-+same shared storage, like with VMFS, for instance. In this case all your
-+VM hosts will be connected to the target via separate sessions, which is
-+enough.
-+
-+2. See SCST core's README for more advices. Especially pay attention to
-+have io_grouping_type option set correctly.
-+
-+
-+Credits
-+-------
-+
-+Thanks to:
-+
-+ * QLogic support for their invaluable help.
-+
-+ * Nathaniel Clark <nate@misrule.us> for porting to new 2.6 kernel
-+initiator driver.
-+
-+ * Mark Buechler <mark.buechler@gmail.com> for the original
-+WWN-based authentification, a lot of useful suggestions, bug reports and
-+help in debugging.
-+
-+ * Ming Zhang <mingz@ele.uri.edu> for fixes.
-+
-+ * Uri Yanai <Uri.Yanai@ngsoft.com> and Dorit Halsadi
-+<Dorit.Halsadi@dothill.com> for adding full NPIV support.
-+
-+Vladislav Bolkhovitin <vst@vlnb.net>, http://scst.sourceforge.net
-This patch adds the kernel module ib_srpt, which is a SCSI RDMA Protocol (SRP)
-target implementation. This driver uses the InfiniBand stack and the SCST core.
-
-It is a high performance driver capable of handling 600K+ 4K random write
-IOPS by a single target as well as 2.5+ GB/s sequential throughput over
-a single QDR IB port.
-
-It was originally developed by Vu Pham (Mellanox) and has been optimized by
-Bart Van Assche.
-
-Signed-off-by: Bart Van Assche <bvanassche@acm.org>
-Cc: Vu Pham <vu@mellanox.com>
-Cc: Roland Dreier <rdreier@cisco.com>
-Cc: David Dillow <dillowda@ornl.gov>
-diff -uprN orig/linux-3.2/Documentation/scst/README.srpt linux-3.2/Documentation/scst/README.srpt
---- orig/linux-3.2/Documentation/scst/README.srpt
-+++ linux-3.2/Documentation/scst/README.srpt
-@@ -0,0 +1,112 @@
-+SCSI RDMA Protocol (SRP) Target driver for Linux
-+=================================================
-+
-+The SRP Target driver is designed to work directly on top of the
-+OpenFabrics OFED-1.x software stack (http://www.openfabrics.org) or
-+the Infiniband drivers in the Linux kernel tree
-+(http://www.kernel.org). The SRP target driver also interfaces with
-+the generic SCSI target mid-level driver called SCST
-+(http://scst.sourceforge.net).
-+
-+How-to run
-+-----------
-+
-+A. On srp target machine
-+1. Please refer to SCST's README for loading scst driver and its
-+dev_handlers drivers (scst_disk, scst_vdisk block or file IO mode, nullio, ...)
-+
-+Example 1: working with real back-end scsi disks
-+a. modprobe scst
-+b. modprobe scst_disk
-+c. cat /proc/scsi_tgt/scsi_tgt
-+
-+ibstor00:~ # cat /proc/scsi_tgt/scsi_tgt
-+Device (host:ch:id:lun or name) Device handler
-+0:0:0:0 dev_disk
-+4:0:0:0 dev_disk
-+5:0:0:0 dev_disk
-+6:0:0:0 dev_disk
-+7:0:0:0 dev_disk
-+
-+Now you want to exclude the first scsi disk and expose the last 4 scsi disks as
-+IB/SRP luns for I/O
-+echo "add 4:0:0:0 0" >/proc/scsi_tgt/groups/Default/devices
-+echo "add 5:0:0:0 1" >/proc/scsi_tgt/groups/Default/devices
-+echo "add 6:0:0:0 2" >/proc/scsi_tgt/groups/Default/devices
-+echo "add 7:0:0:0 3" >/proc/scsi_tgt/groups/Default/devices
-+
-+Example 2: working with VDISK FILEIO mode (using md0 device and file 10G-file)
-+a. modprobe scst
-+b. modprobe scst_vdisk
-+c. echo "open vdisk0 /dev/md0" > /proc/scsi_tgt/vdisk/vdisk
-+d. echo "open vdisk1 /10G-file" > /proc/scsi_tgt/vdisk/vdisk
-+e. echo "add vdisk0 0" >/proc/scsi_tgt/groups/Default/devices
-+f. echo "add vdisk1 1" >/proc/scsi_tgt/groups/Default/devices
-+
-+Example 3: working with VDISK BLOCKIO mode (using md0 device, sda, and cciss/c1d0)
-+a. modprobe scst
-+b. modprobe scst_vdisk
-+c. echo "open vdisk0 /dev/md0 BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
-+d. echo "open vdisk1 /dev/sda BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
-+e. echo "open vdisk2 /dev/cciss/c1d0 BLOCKIO" > /proc/scsi_tgt/vdisk/vdisk
-+f. echo "add vdisk0 0" >/proc/scsi_tgt/groups/Default/devices
-+g. echo "add vdisk1 1" >/proc/scsi_tgt/groups/Default/devices
-+h. echo "add vdisk2 2" >/proc/scsi_tgt/groups/Default/devices
-+
-+2. modprobe ib_srpt
-+
-+
-+B. On initiator machines you can manualy do the following steps:
-+1. modprobe ib_srp
-+2. ibsrpdm -c (to discover new SRP target)
-+3. echo <new target info> > /sys/class/infiniband_srp/srp-mthca0-1/add_target
-+4. fdisk -l (will show new discovered scsi disks)
-+
-+Example:
-+Assume that you use port 1 of first HCA in the system ie. mthca0
-+
-+[root@lab104 ~]# ibsrpdm -c -d /dev/infiniband/umad0
-+id_ext=0002c90200226cf4,ioc_guid=0002c90200226cf4,
-+dgid=fe800000000000000002c90200226cf5,pkey=ffff,service_id=0002c90200226cf4
-+[root@lab104 ~]# echo id_ext=0002c90200226cf4,ioc_guid=0002c90200226cf4,
-+dgid=fe800000000000000002c90200226cf5,pkey=ffff,service_id=0002c90200226cf4 >
-+/sys/class/infiniband_srp/srp-mthca0-1/add_target
-+
-+OR
-+
-++ You can edit /etc/infiniband/openib.conf to load srp driver and srp HA daemon
-+automatically ie. set SRP_LOAD=yes, and SRPHA_ENABLE=yes
-++ To set up and use high availability feature you need dm-multipath driver
-+and multipath tool
-++ Please refer to OFED-1.x SRP's user manual for more in-details instructions
-+on how-to enable/use HA feature
-+
-+To minimize QUEUE_FULL conditions, you can apply scst_increase_max_tgt_cmds
-+patch from SRPT package from http://sourceforge.net/project/showfiles.php?group_id=110471
-+
-+
-+Performance notes
-+-----------------
-+
-+In some cases, for instance working with SSD devices, which consume 100%
-+of a single CPU load for data transfers in their internal threads, to
-+maximize IOPS it can be needed to assign for those threads dedicated
-+CPUs using Linux CPU affinity facilities. No IRQ processing should be
-+done on those CPUs. Check that using /proc/interrupts. See taskset
-+command and Documentation/IRQ-affinity.txt in your kernel's source tree
-+for how to assign CPU affinity to tasks and IRQs.
-+
-+The reason for that is that processing of coming commands in SIRQ context
-+can be done on the same CPUs as SSD devices' threads doing data
-+transfers. As the result, those threads won't receive all the CPU power
-+and perform worse.
-+
-+Alternatively to CPU affinity assignment, you can try to enable SRP
-+target's internal thread. It will allows Linux CPU scheduler to better
-+distribute load among available CPUs. To enable SRP target driver's
-+internal thread you should load ib_srpt module with parameter
-+"thread=1".
-+
-+
-+Send questions about this driver to scst-devel@lists.sourceforge.net, CC:
-+Vu Pham <vuhuong@mellanox.com> and Bart Van Assche <bvanassche@acm.org>.
-diff -uprN orig/linux-3.2/drivers/scst/srpt/Kconfig linux-3.2/drivers/scst/srpt/Kconfig
---- orig/linux-3.2/drivers/scst/srpt/Kconfig
-+++ linux-3.2/drivers/scst/srpt/Kconfig
-@@ -0,0 +1,12 @@
-+config SCST_SRPT
-+ tristate "InfiniBand SCSI RDMA Protocol target support"
-+ depends on INFINIBAND && SCST
-+ ---help---
-+
-+ Support for the SCSI RDMA Protocol (SRP) Target driver. The
-+ SRP protocol is a protocol that allows an initiator to access
-+ a block storage device on another host (target) over a network
-+ that supports the RDMA protocol. Currently the RDMA protocol is
-+ supported by InfiniBand and by iWarp network hardware. More
-+ information about the SRP protocol can be found on the website
-+ of the INCITS T10 technical committee (http://www.t10.org/).
-diff -uprN orig/linux-3.2/drivers/scst/srpt/Makefile linux-3.2/drivers/scst/srpt/Makefile
---- orig/linux-3.2/drivers/scst/srpt/Makefile
-+++ linux-3.2/drivers/scst/srpt/Makefile
-@@ -0,0 +1,1 @@
-+obj-$(CONFIG_SCST_SRPT) += ib_srpt.o
-diff -uprN orig/linux-3.2/drivers/scst/srpt/ib_dm_mad.h linux-3.2/drivers/scst/srpt/ib_dm_mad.h
---- orig/linux-3.2/drivers/scst/srpt/ib_dm_mad.h
-+++ linux-3.2/drivers/scst/srpt/ib_dm_mad.h
-@@ -0,0 +1,139 @@
-+/*
-+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses. You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ * Redistribution and use in source and binary forms, with or
-+ * without modification, are permitted provided that the following
-+ * conditions are met:
-+ *
-+ * - Redistributions of source code must retain the above
-+ * copyright notice, this list of conditions and the following
-+ * disclaimer.
-+ *
-+ * - Redistributions in binary form must reproduce the above
-+ * copyright notice, this list of conditions and the following
-+ * disclaimer in the documentation and/or other materials
-+ * provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#ifndef IB_DM_MAD_H
-+#define IB_DM_MAD_H
-+
-+#include <linux/types.h>
-+
-+#include <rdma/ib_mad.h>
-+
-+enum {
-+ /*
-+ * See also section 13.4.7 Status Field, table 115 MAD Common Status
-+ * Field Bit Values and also section 16.3.1.1 Status Field in the
-+ * InfiniBand Architecture Specification.
-+ */
-+ DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
-+ DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
-+ DM_MAD_STATUS_INVALID_FIELD = 0x001c,
-+ DM_MAD_STATUS_NO_IOC = 0x0100,
-+
-+ /*
-+ * See also the Device Management chapter, section 16.3.3 Attributes,
-+ * table 279 Device Management Attributes in the InfiniBand
-+ * Architecture Specification.
-+ */
-+ DM_ATTR_CLASS_PORT_INFO = 0x01,
-+ DM_ATTR_IOU_INFO = 0x10,
-+ DM_ATTR_IOC_PROFILE = 0x11,
-+ DM_ATTR_SVC_ENTRIES = 0x12
-+};
-+
-+struct ib_dm_hdr {
-+ u8 reserved[28];
-+};
-+
-+/*
-+ * Structure of management datagram sent by the SRP target implementation.
-+ * Contains a management datagram header, reliable multi-packet transaction
-+ * protocol (RMPP) header and ib_dm_hdr. Notes:
-+ * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
-+ * management datagrams.
-+ * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
-+ * is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
-+ * - The maximum supported size for a management datagram when not using RMPP
-+ * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
-+ */
-+struct ib_dm_mad {
-+ struct ib_mad_hdr mad_hdr;
-+ struct ib_rmpp_hdr rmpp_hdr;
-+ struct ib_dm_hdr dm_hdr;
-+ u8 data[IB_MGMT_DEVICE_DATA];
-+};
-+
-+/*
-+ * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
-+ * Architecture Specification.
-+ */
-+struct ib_dm_iou_info {
-+ __be16 change_id;
-+ u8 max_controllers;
-+ u8 op_rom;
-+ u8 controller_list[128];
-+};
-+
-+/*
-+ * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
-+ * the InfiniBand Architecture Specification.
-+ */
-+struct ib_dm_ioc_profile {
-+ __be64 guid;
-+ __be32 vendor_id;
-+ __be32 device_id;
-+ __be16 device_version;
-+ __be16 reserved1;
-+ __be32 subsys_vendor_id;
-+ __be32 subsys_device_id;
-+ __be16 io_class;
-+ __be16 io_subclass;
-+ __be16 protocol;
-+ __be16 protocol_version;
-+ __be16 service_conn;
-+ __be16 initiators_supported;
-+ __be16 send_queue_depth;
-+ u8 reserved2;
-+ u8 rdma_read_depth;
-+ __be32 send_size;
-+ __be32 rdma_size;
-+ u8 op_cap_mask;
-+ u8 svc_cap_mask;
-+ u8 num_svc_entries;
-+ u8 reserved3[9];
-+ u8 id_string[64];
-+};
-+
-+struct ib_dm_svc_entry {
-+ u8 name[40];
-+ __be64 id;
-+};
-+
-+/*
-+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
-+ * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
-+ */
-+struct ib_dm_svc_entries {
-+ struct ib_dm_svc_entry service_entries[4];
-+};
-+
-+#endif
-diff -uprN orig/linux-3.2/drivers/scst/srpt/ib_srpt.c linux-3.2/drivers/scst/srpt/ib_srpt.c
---- orig/linux-3.2/drivers/scst/srpt/ib_srpt.c
-+++ linux-3.2/drivers/scst/srpt/ib_srpt.c
-@@ -0,0 +1,3850 @@
-+/*
-+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
-+ * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
-+ * Copyright (C) 2008 Vladislav Bolkhovitin <vst@vlnb.net>
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses. You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ * Redistribution and use in source and binary forms, with or
-+ * without modification, are permitted provided that the following
-+ * conditions are met:
-+ *
-+ * - Redistributions of source code must retain the above
-+ * copyright notice, this list of conditions and the following
-+ * disclaimer.
-+ *
-+ * - Redistributions in binary form must reproduce the above
-+ * copyright notice, this list of conditions and the following
-+ * disclaimer in the documentation and/or other materials
-+ * provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/slab.h>
-+#include <linux/err.h>
-+#include <linux/ctype.h>
-+#include <linux/kthread.h>
-+#include <linux/string.h>
-+#include <linux/delay.h>
-+#include <asm/atomic.h>
-+#include "ib_srpt.h"
-+#define LOG_PREFIX "ib_srpt" /* Prefix for SCST tracing macros. */
-+#include <scst/scst_debug.h>
-+
-+/* Name of this kernel module. */
-+#define DRV_NAME "ib_srpt"
-+#define DRV_VERSION "2.2.1-pre"
-+#define DRV_RELDATE "(not yet released)"
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+/* Flags to be used in SCST debug tracing statements. */
-+#define DEFAULT_SRPT_TRACE_FLAGS (TRACE_OUT_OF_MEM | TRACE_MINOR \
-+ | TRACE_MGMT | TRACE_SPECIAL)
-+/* Name of the entry that will be created under /proc/scsi_tgt/ib_srpt. */
-+#define SRPT_PROC_TRACE_LEVEL_NAME "trace_level"
-+#endif
-+
-+#define SRPT_ID_STRING "SCST SRP target"
-+
-+MODULE_AUTHOR("Vu Pham and Bart Van Assche");
-+MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
-+ "v" DRV_VERSION " (" DRV_RELDATE ")");
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+/*
-+ * Global Variables
-+ */
-+
-+static u64 srpt_service_guid;
-+/* List of srpt_device structures. */
-+static atomic_t srpt_device_count;
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+static unsigned long trace_flag = DEFAULT_SRPT_TRACE_FLAGS;
-+module_param(trace_flag, long, 0644);
-+MODULE_PARM_DESC(trace_flag, "SCST trace flags.");
-+#endif
-+
-+static unsigned srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
-+module_param(srp_max_rdma_size, int, 0644);
-+MODULE_PARM_DESC(srp_max_rdma_size,
-+ "Maximum size of SRP RDMA transfers for new connections.");
-+
-+static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
-+module_param(srp_max_req_size, int, 0444);
-+MODULE_PARM_DESC(srp_max_req_size,
-+ "Maximum size of SRP request messages in bytes.");
-+
-+static unsigned int srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
-+module_param(srp_max_rsp_size, int, S_IRUGO | S_IWUSR);
-+MODULE_PARM_DESC(srp_max_rsp_size,
-+ "Maximum size of SRP response messages in bytes.");
-+
-+static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
-+module_param(srpt_srq_size, int, S_IRUGO | S_IWUSR);
-+MODULE_PARM_DESC(srpt_srq_size,
-+ "Shared receive queue (SRQ) size.");
-+
-+static int srpt_sq_size = DEF_SRPT_SQ_SIZE;
-+module_param(srpt_sq_size, int, 0444);
-+MODULE_PARM_DESC(srpt_sq_size,
-+ "Per-channel send queue (SQ) size.");
-+
-+static bool use_port_guid_in_session_name;
-+module_param(use_port_guid_in_session_name, bool, 0444);
-+MODULE_PARM_DESC(use_port_guid_in_session_name,
-+ "Use target port ID in the session name such that"
-+ " redundant paths between multiport systems can be masked.");
-+
-+static bool use_node_guid_in_target_name;
-+module_param(use_node_guid_in_target_name, bool, 0444);
-+MODULE_PARM_DESC(use_node_guid_in_target_name,
-+ "Use target node GUIDs of HCAs as SCST target names.");
-+
-+static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
-+{
-+ return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
-+}
-+module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
-+ 0444);
-+MODULE_PARM_DESC(srpt_service_guid,
-+ "Using this value for ioc_guid, id_ext, and cm_listen_id"
-+ " instead of using the node_guid of the first HCA.");
-+
-+static struct ib_client srpt_client;
-+static void srpt_unregister_mad_agent(struct srpt_device *sdev);
-+static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx);
-+static void srpt_drain_channel(struct ib_cm_id *cm_id);
-+static void srpt_free_ch(struct scst_session *sess);
-+
-+static enum rdma_ch_state srpt_set_ch_state_to_disc(struct srpt_rdma_ch *ch)
-+{
-+ unsigned long flags;
-+ enum rdma_ch_state prev;
-+ bool changed = false;
-+
-+ spin_lock_irqsave(&ch->spinlock, flags);
-+ prev = ch->state;
-+ switch (prev) {
-+ case CH_CONNECTING:
-+ case CH_LIVE:
-+ ch->state = CH_DISCONNECTING;
-+ wake_up_process(ch->thread);
-+ changed = true;
-+ break;
-+ default:
-+ break;
-+ }
-+ spin_unlock_irqrestore(&ch->spinlock, flags);
-+
-+ return prev;
-+}
-+
-+static bool srpt_set_ch_state_to_draining(struct srpt_rdma_ch *ch)
-+{
-+ unsigned long flags;
-+ bool changed = false;
-+
-+ spin_lock_irqsave(&ch->spinlock, flags);
-+ switch (ch->state) {
-+ case CH_CONNECTING:
-+ case CH_LIVE:
-+ case CH_DISCONNECTING:
-+ ch->state = CH_DRAINING;
-+ wake_up_process(ch->thread);
-+ changed = true;
-+ break;
-+ default:
-+ break;
-+ }
-+ spin_unlock_irqrestore(&ch->spinlock, flags);
-+
-+ return changed;
-+}
-+
-+/**
-+ * srpt_test_and_set_ch_state() - Test and set the channel state.
-+ *
-+ * Returns true if and only if the channel state has been set to the new state.
-+ */
-+static bool srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch,
-+ enum rdma_ch_state old,
-+ enum rdma_ch_state new)
-+{
-+ unsigned long flags;
-+ bool changed = false;
-+
-+ spin_lock_irqsave(&ch->spinlock, flags);
-+ if (ch->state == old) {
-+ ch->state = new;
-+ wake_up_process(ch->thread);
-+ changed = true;
-+ }
-+ spin_unlock_irqrestore(&ch->spinlock, flags);
-+
-+ return changed;
-+}
-+
-+/**
-+ * srpt_adjust_req_lim() - Adjust ch->req_lim and ch->req_lim_delta atomically.
-+ *
-+ * Returns the new value of ch->req_lim.
-+ */
-+static int srpt_adjust_req_lim(struct srpt_rdma_ch *ch, int req_lim_change,
-+ int req_lim_delta_change)
-+{
-+ int req_lim;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ch->spinlock, flags);
-+ ch->req_lim += req_lim_change;
-+ req_lim = ch->req_lim;
-+ ch->req_lim_delta += req_lim_delta_change;
-+ spin_unlock_irqrestore(&ch->spinlock, flags);
-+
-+ return req_lim;
-+}
-+
-+/**
-+ * srpt_inc_req_lim() - Increase ch->req_lim and decrease ch->req_lim_delta.
-+ *
-+ * Returns one more than the previous value of ch->req_lim_delta.
-+ */
-+static int srpt_inc_req_lim(struct srpt_rdma_ch *ch)
-+{
-+ int req_lim_delta;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ch->spinlock, flags);
-+ req_lim_delta = ch->req_lim_delta + 1;
-+ ch->req_lim += req_lim_delta;
-+ ch->req_lim_delta = 0;
-+ spin_unlock_irqrestore(&ch->spinlock, flags);
-+
-+ return req_lim_delta;
-+}
-+
-+/**
-+ * srpt_undo_inc_req_lim() - Undo the effect of srpt_inc_req_lim.
-+ */
-+static int srpt_undo_inc_req_lim(struct srpt_rdma_ch *ch, int req_lim_delta)
-+{
-+ return srpt_adjust_req_lim(ch, -req_lim_delta, req_lim_delta - 1);
-+}
-+
-+/**
-+ * srpt_event_handler() - Asynchronous IB event callback function.
-+ *
-+ * Callback function called by the InfiniBand core when an asynchronous IB
-+ * event occurs. This callback may occur in interrupt context. See also
-+ * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
-+ * Architecture Specification.
-+ */
-+static void srpt_event_handler(struct ib_event_handler *handler,
-+ struct ib_event *event)
-+{
-+ struct srpt_device *sdev;
-+ struct srpt_port *sport;
-+ u8 port_num;
-+
-+ TRACE_ENTRY();
-+
-+ sdev = ib_get_client_data(event->device, &srpt_client);
-+ if (!sdev || sdev->device != event->device)
-+ return;
-+
-+ TRACE_DBG("ASYNC event= %d on device= %s",
-+ event->event, sdev->device->name);
-+
-+ switch (event->event) {
-+ case IB_EVENT_PORT_ERR:
-+ port_num = event->element.port_num - 1;
-+ if (port_num < sdev->device->phys_port_cnt) {
-+ sport = &sdev->port[port_num];
-+ sport->lid = 0;
-+ sport->sm_lid = 0;
-+ } else {
-+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
-+ event->event, port_num + 1,
-+ sdev->device->phys_port_cnt);
-+ }
-+ break;
-+ case IB_EVENT_PORT_ACTIVE:
-+ case IB_EVENT_LID_CHANGE:
-+ case IB_EVENT_PKEY_CHANGE:
-+ case IB_EVENT_SM_CHANGE:
-+ case IB_EVENT_CLIENT_REREGISTER:
-+ /* Refresh port data asynchronously. */
-+ port_num = event->element.port_num - 1;
-+ if (port_num < sdev->device->phys_port_cnt) {
-+ sport = &sdev->port[port_num];
-+ if (!sport->lid && !sport->sm_lid)
-+ schedule_work(&sport->work);
-+ } else {
-+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
-+ event->event, port_num + 1,
-+ sdev->device->phys_port_cnt);
-+ }
-+ break;
-+ default:
-+ PRINT_ERROR("received unrecognized IB event %d", event->event);
-+ break;
-+ }
-+
-+ TRACE_EXIT();
-+}
-+
-+/**
-+ * srpt_srq_event() - IB SRQ event callback function.
-+ */
-+static void srpt_srq_event(struct ib_event *event, void *ctx)
-+{
-+ TRACE_DBG("SRQ event %d", event->event);
-+}
-+
-+static const char *get_ch_state_name(enum rdma_ch_state s)
-+{
-+ switch (s) {
-+ case CH_CONNECTING:
-+ return "connecting";
-+ case CH_LIVE:
-+ return "live";
-+ case CH_DISCONNECTING:
-+ return "disconnecting";
-+ case CH_DRAINING:
-+ return "draining";
-+ case CH_FREEING:
-+ return "freeing";
-+ }
-+ return "???";
-+}
-+
-+/**
-+ * srpt_qp_event() - IB QP event callback function.
-+ */
-+static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
-+{
-+ TRACE_DBG("QP event %d on cm_id=%p sess_name=%s state=%s",
-+ event->event, ch->cm_id, ch->sess_name,
-+ get_ch_state_name(ch->state));
-+
-+ switch (event->event) {
-+ case IB_EVENT_COMM_EST:
-+ ib_cm_notify(ch->cm_id, event->event);
-+ break;
-+ case IB_EVENT_QP_LAST_WQE_REACHED:
-+ TRACE_DBG("%s, state %s: received Last WQE event.",
-+ ch->sess_name, get_ch_state_name(ch->state));
-+ ch->last_wqe_received = true;
-+ BUG_ON(!ch->thread);
-+ wake_up_process(ch->thread);
-+ break;
-+ default:
-+ PRINT_ERROR("received unrecognized IB QP event %d",
-+ event->event);
-+ break;
-+ }
-+}
-+
-+/**
-+ * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
-+ *
-+ * @slot: one-based slot number.
-+ * @value: four-bit value.
-+ *
-+ * Copies the lowest four bits of value in element slot of the array of four
-+ * bit elements called c_list (controller list). The index slot is one-based.
-+ */
-+static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
-+{
-+ u16 id;
-+ u8 tmp;
-+
-+ id = (slot - 1) / 2;
-+ if (slot & 0x1) {
-+ tmp = c_list[id] & 0xf;
-+ c_list[id] = (value << 4) | tmp;
-+ } else {
-+ tmp = c_list[id] & 0xf0;
-+ c_list[id] = (value & 0xf) | tmp;
-+ }
-+}
-+
-+/**
-+ * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
-+ *
-+ * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
-+ * Specification.
-+ */
-+static void srpt_get_class_port_info(struct ib_dm_mad *mad)
-+{
-+ struct ib_class_port_info *cif;
-+
-+ cif = (struct ib_class_port_info *)mad->data;
-+ memset(cif, 0, sizeof *cif);
-+ cif->base_version = 1;
-+ cif->class_version = 1;
-+ cif->resp_time_value = 20;
-+
-+ mad->mad_hdr.status = 0;
-+}
-+
-+/**
-+ * srpt_get_iou() - Write IOUnitInfo to a management datagram.
-+ *
-+ * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
-+ * Specification. See also section B.7, table B.6 in the SRP r16a document.
-+ */
-+static void srpt_get_iou(struct ib_dm_mad *mad)
-+{
-+ struct ib_dm_iou_info *ioui;
-+ u8 slot;
-+ int i;
-+
-+ ioui = (struct ib_dm_iou_info *)mad->data;
-+ ioui->change_id = cpu_to_be16(1);
-+ ioui->max_controllers = 16;
-+
-+ /* set present for slot 1 and empty for the rest */
-+ srpt_set_ioc(ioui->controller_list, 1, 1);
-+ for (i = 1, slot = 2; i < 16; i++, slot++)
-+ srpt_set_ioc(ioui->controller_list, slot, 0);
-+
-+ mad->mad_hdr.status = 0;
-+}
-+
-+/**
-+ * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
-+ *
-+ * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
-+ * Architecture Specification. See also section B.7, table B.7 in the SRP
-+ * r16a document.
-+ */
-+static void srpt_get_ioc(struct srpt_device *sdev, u32 slot,
-+ struct ib_dm_mad *mad)
-+{
-+ struct ib_dm_ioc_profile *iocp;
-+
-+ iocp = (struct ib_dm_ioc_profile *)mad->data;
-+
-+ if (!slot || slot > 16) {
-+ mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
-+ return;
-+ }
-+
-+ if (slot > 2) {
-+ mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
-+ return;
-+ }
-+
-+ memset(iocp, 0, sizeof *iocp);
-+ strcpy(iocp->id_string, SRPT_ID_STRING);
-+ iocp->guid = cpu_to_be64(srpt_service_guid);
-+ iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
-+ iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
-+ iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
-+ iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
-+ iocp->subsys_device_id = 0x0;
-+ iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
-+ iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
-+ iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
-+ iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
-+ iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
-+ iocp->rdma_read_depth = 4;
-+ iocp->send_size = cpu_to_be32(srp_max_req_size);
-+ iocp->rdma_size = cpu_to_be32(min(max(srp_max_rdma_size, 256U),
-+ 1U << 24));
-+ iocp->num_svc_entries = 1;
-+ iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
-+ SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
-+
-+ mad->mad_hdr.status = 0;
-+}
-+
-+/**
-+ * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
-+ *
-+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
-+ * Specification. See also section B.7, table B.8 in the SRP r16a document.
-+ */
-+static void srpt_get_svc_entries(u64 ioc_guid,
-+ u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
-+{
-+ struct ib_dm_svc_entries *svc_entries;
-+
-+ WARN_ON(!ioc_guid);
-+
-+ if (!slot || slot > 16) {
-+ mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
-+ return;
-+ }
-+
-+ if (slot > 2 || lo > hi || hi > 1) {
-+ mad->mad_hdr.status = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
-+ return;
-+ }
-+
-+ svc_entries = (struct ib_dm_svc_entries *)mad->data;
-+ memset(svc_entries, 0, sizeof *svc_entries);
-+ svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
-+ snprintf(svc_entries->service_entries[0].name,
-+ sizeof(svc_entries->service_entries[0].name),
-+ "%s%016llx",
-+ SRP_SERVICE_NAME_PREFIX,
-+ ioc_guid);
-+
-+ mad->mad_hdr.status = 0;
-+}
-+
-+/**
-+ * srpt_mgmt_method_get() - Process a received management datagram.
-+ * @sp: source port through which the MAD has been received.
-+ * @rq_mad: received MAD.
-+ * @rsp_mad: response MAD.
-+ */
-+static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
-+ struct ib_dm_mad *rsp_mad)
-+{
-+ u16 attr_id;
-+ u32 slot;
-+ u8 hi, lo;
-+
-+ attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
-+ switch (attr_id) {
-+ case DM_ATTR_CLASS_PORT_INFO:
-+ srpt_get_class_port_info(rsp_mad);
-+ break;
-+ case DM_ATTR_IOU_INFO:
-+ srpt_get_iou(rsp_mad);
-+ break;
-+ case DM_ATTR_IOC_PROFILE:
-+ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
-+ srpt_get_ioc(sp->sdev, slot, rsp_mad);
-+ break;
-+ case DM_ATTR_SVC_ENTRIES:
-+ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
-+ hi = (u8) ((slot >> 8) & 0xff);
-+ lo = (u8) (slot & 0xff);
-+ slot = (u16) ((slot >> 16) & 0xffff);
-+ srpt_get_svc_entries(srpt_service_guid,
-+ slot, hi, lo, rsp_mad);
-+ break;
-+ default:
-+ rsp_mad->mad_hdr.status =
-+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
-+ break;
-+ }
-+}
-+
-+/**
-+ * srpt_mad_send_handler() - Post MAD-send callback function.
-+ */
-+static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
-+ struct ib_mad_send_wc *mad_wc)
-+{
-+ ib_destroy_ah(mad_wc->send_buf->ah);
-+ ib_free_send_mad(mad_wc->send_buf);
-+}
-+
-+/**
-+ * srpt_mad_recv_handler() - MAD reception callback function.
-+ */
-+static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
-+ struct ib_mad_recv_wc *mad_wc)
-+{
-+ struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
-+ struct ib_ah *ah;
-+ struct ib_mad_send_buf *rsp;
-+ struct ib_dm_mad *dm_mad;
-+
-+ if (!mad_wc || !mad_wc->recv_buf.mad)
-+ return;
-+
-+ ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
-+ mad_wc->recv_buf.grh, mad_agent->port_num);
-+ if (IS_ERR(ah))
-+ goto err;
-+
-+ BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
-+
-+ rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
-+ mad_wc->wc->pkey_index, 0,
-+ IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
-+ GFP_KERNEL);
-+ if (IS_ERR(rsp))
-+ goto err_rsp;
-+
-+ rsp->ah = ah;
-+
-+ dm_mad = rsp->mad;
-+ memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
-+ dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
-+ dm_mad->mad_hdr.status = 0;
-+
-+ switch (mad_wc->recv_buf.mad->mad_hdr.method) {
-+ case IB_MGMT_METHOD_GET:
-+ srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
-+ break;
-+ case IB_MGMT_METHOD_SET:
-+ dm_mad->mad_hdr.status =
-+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
-+ break;
-+ default:
-+ dm_mad->mad_hdr.status =
-+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
-+ break;
-+ }
-+
-+ if (!ib_post_send_mad(rsp, NULL)) {
-+ ib_free_recv_mad(mad_wc);
-+ /* will destroy_ah & free_send_mad in send completion */
-+ return;
-+ }
-+
-+ ib_free_send_mad(rsp);
-+
-+err_rsp:
-+ ib_destroy_ah(ah);
-+err:
-+ ib_free_recv_mad(mad_wc);
-+}
-+
-+/**
-+ * srpt_refresh_port() - Configure a HCA port.
-+ *
-+ * Enable InfiniBand management datagram processing, update the cached sm_lid,
-+ * lid and gid values, and register a callback function for processing MADs
-+ * on the specified port.
-+ *
-+ * Note: It is safe to call this function more than once for the same port.
-+ */
-+static int srpt_refresh_port(struct srpt_port *sport)
-+{
-+ struct ib_mad_reg_req reg_req;
-+ struct ib_port_modify port_modify;
-+ struct ib_port_attr port_attr;
-+ int ret;
-+
-+ TRACE_ENTRY();
-+
-+ memset(&port_modify, 0, sizeof port_modify);
-+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
-+ port_modify.clr_port_cap_mask = 0;
-+
-+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
-+ if (ret)
-+ goto err_mod_port;
-+
-+ ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
-+ if (ret)
-+ goto err_query_port;
-+
-+ sport->sm_lid = port_attr.sm_lid;
-+ sport->lid = port_attr.lid;
-+
-+ ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
-+ if (ret)
-+ goto err_query_port;
-+
-+ if (!sport->mad_agent) {
-+ memset(&reg_req, 0, sizeof reg_req);
-+ reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
-+ reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
-+ set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
-+ set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
-+
-+ sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
-+ sport->port,
-+ IB_QPT_GSI,
-+ &reg_req, 0,
-+ srpt_mad_send_handler,
-+ srpt_mad_recv_handler,
-+ sport);
-+ if (IS_ERR(sport->mad_agent)) {
-+ ret = PTR_ERR(sport->mad_agent);
-+ sport->mad_agent = NULL;
-+ goto err_query_port;
-+ }
-+ }
-+
-+ TRACE_EXIT_RES(0);
-+
-+ return 0;
-+
-+err_query_port:
-+ port_modify.set_port_cap_mask = 0;
-+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
-+ ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
-+
-+err_mod_port:
-+ TRACE_EXIT_RES(ret);
-+
-+ return ret;
-+}
-+
-+/**
-+ * srpt_unregister_mad_agent() - Unregister MAD callback functions.
-+ *
-+ * Note: It is safe to call this function more than once for the same device.
-+ */
-+static void srpt_unregister_mad_agent(struct srpt_device *sdev)
-+{
-+ struct ib_port_modify port_modify = {
-+ .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
-+ };
-+ struct srpt_port *sport;
-+ int i;
-+
-+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
-+ sport = &sdev->port[i - 1];
-+ WARN_ON(sport->port != i);
-+ if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
-+ PRINT_ERROR("%s", "disabling MAD processing failed.");
-+ if (sport->mad_agent) {
-+ ib_unregister_mad_agent(sport->mad_agent);
-+ sport->mad_agent = NULL;
-+ }
-+ }
-+}
-+
-+/**
-+ * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
-+ */
-+static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
-+ int ioctx_size, int dma_size,
-+ enum dma_data_direction dir)
-+{
-+ struct srpt_ioctx *ioctx;
-+
-+ ioctx = kmalloc(ioctx_size, GFP_KERNEL);
-+ if (!ioctx)
-+ goto err;
-+
-+ ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
-+ if (!ioctx->buf)
-+ goto err_free_ioctx;
-+
-+ ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
-+ if (ib_dma_mapping_error(sdev->device, ioctx->dma))
-+ goto err_free_buf;
-+
-+ return ioctx;
-+
-+err_free_buf:
-+ kfree(ioctx->buf);
-+err_free_ioctx:
-+ kfree(ioctx);
-+err:
-+ return NULL;
-+}
-+
-+/**
-+ * srpt_free_ioctx() - Free an SRPT I/O context structure.
-+ */
-+static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
-+ int dma_size, enum dma_data_direction dir)
-+{
-+ if (!ioctx)
-+ return;
-+
-+ ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
-+ kfree(ioctx->buf);
-+ kfree(ioctx);
-+}
-+
-+/**
-+ * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
-+ * @sdev: Device to allocate the I/O context ring for.
-+ * @ring_size: Number of elements in the I/O context ring.
-+ * @ioctx_size: I/O context size.
-+ * @dma_size: DMA buffer size.
-+ * @dir: DMA data direction.
-+ */
-+static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
-+ int ring_size, int ioctx_size,
-+ int dma_size, enum dma_data_direction dir)
-+{
-+ struct srpt_ioctx **ring;
-+ int i;
-+
-+ TRACE_ENTRY();
-+
-+ WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) &&
-+ ioctx_size != sizeof(struct srpt_send_ioctx));
-+
-+ ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
-+ if (!ring)
-+ goto out;
-+ for (i = 0; i < ring_size; ++i) {
-+ ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
-+ if (!ring[i])
-+ goto err;
-+ ring[i]->index = i;
-+ }
-+ goto out;
-+
-+err:
-+ while (--i >= 0)
-+ srpt_free_ioctx(sdev, ring[i], dma_size, dir);
-+ kfree(ring);
-+ ring = NULL;
-+out:
-+ TRACE_EXIT_HRES(ring);
-+ return ring;
-+}
-+
-+/**
-+ * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
-+ */
-+static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
-+ struct srpt_device *sdev, int ring_size,
-+ int dma_size, enum dma_data_direction dir)
-+{
-+ int i;
-+
-+ for (i = 0; i < ring_size; ++i)
-+ srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
-+ kfree(ioctx_ring);
-+}
-+
-+/**
-+ * srpt_set_cmd_state() - Set the state of a SCSI command.
-+ * @new: New state.
-+ *
-+ * Does not modify the state of aborted commands. Returns the previous command
-+ * state.
-+ */
-+static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
-+ enum srpt_command_state new)
-+{
-+ enum srpt_command_state previous;
-+
-+ BUG_ON(!ioctx);
-+
-+ spin_lock(&ioctx->spinlock);
-+ previous = ioctx->state;
-+ if (previous != SRPT_STATE_DONE)
-+ ioctx->state = new;
-+ spin_unlock(&ioctx->spinlock);
-+
-+ return previous;
-+}
-+
-+/**
-+ * srpt_test_and_set_cmd_state() - Test and set the state of a command.
-+ *
-+ * Returns true if and only if the previous command state was equal to 'old'.
-+ */
-+static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
-+ enum srpt_command_state old,
-+ enum srpt_command_state new)
-+{
-+ enum srpt_command_state previous;
-+
-+ WARN_ON(!ioctx);
-+ WARN_ON(old == SRPT_STATE_DONE);
-+ WARN_ON(new == SRPT_STATE_NEW);
-+
-+ spin_lock(&ioctx->spinlock);
-+ previous = ioctx->state;
-+ if (previous == old)
-+ ioctx->state = new;
-+ spin_unlock(&ioctx->spinlock);
-+
-+ return previous == old;
-+}
-+
-+/**
-+ * srpt_post_recv() - Post an IB receive request.
-+ */
-+static int srpt_post_recv(struct srpt_device *sdev,
-+ struct srpt_recv_ioctx *ioctx)
-+{
-+ struct ib_sge list;
-+ struct ib_recv_wr wr, *bad_wr;
-+
-+ BUG_ON(!sdev);
-+ wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
-+
-+ list.addr = ioctx->ioctx.dma;
-+ list.length = srp_max_req_size;
-+ list.lkey = sdev->mr->lkey;
-+
-+ wr.next = NULL;
-+ wr.sg_list = &list;
-+ wr.num_sge = 1;
-+
-+ return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
-+}
-+
-+static int srpt_adjust_srq_wr_avail(struct srpt_rdma_ch *ch, int delta)
-+{
-+ int res;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ch->spinlock, flags);
-+ ch->sq_wr_avail += delta;
-+ res = ch->sq_wr_avail;
-+ spin_unlock_irqrestore(&ch->spinlock, flags);
-+
-+ return res;
-+}
-+
-+/**
-+ * srpt_post_send() - Post an IB send request.
-+ *
-+ * Returns zero upon success and a non-zero value upon failure.
-+ */
-+static int srpt_post_send(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx, int len)
-+{
-+ struct ib_sge list;
-+ struct ib_send_wr wr, *bad_wr;
-+ struct srpt_device *sdev = ch->sport->sdev;
-+ int ret;
-+
-+ ret = -ENOMEM;
-+ if (srpt_adjust_srq_wr_avail(ch, -1) < 0) {
-+ PRINT_WARNING("%s", "IB send queue full (needed 1)");
-+ goto out;
-+ }
-+
-+ ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
-+ DMA_TO_DEVICE);
-+
-+ list.addr = ioctx->ioctx.dma;
-+ list.length = len;
-+ list.lkey = sdev->mr->lkey;
-+
-+ wr.next = NULL;
-+ wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
-+ wr.sg_list = &list;
-+ wr.num_sge = 1;
-+ wr.opcode = IB_WR_SEND;
-+ wr.send_flags = IB_SEND_SIGNALED;
-+
-+ ret = ib_post_send(ch->qp, &wr, &bad_wr);
-+
-+out:
-+ if (ret < 0)
-+ srpt_adjust_srq_wr_avail(ch, 1);
-+ return ret;
-+}
-+
-+/**
-+ * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
-+ * @ioctx: Pointer to the I/O context associated with the request.
-+ * @srp_cmd: Pointer to the SRP_CMD request data.
-+ * @dir: Pointer to the variable to which the transfer direction will be
-+ * written.
-+ * @data_len: Pointer to the variable to which the total data length of all
-+ * descriptors in the SRP_CMD request will be written.
-+ *
-+ * This function initializes ioctx->nrbuf and ioctx->r_bufs.
-+ *
-+ * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
-+ * -ENOMEM when memory allocation fails and zero upon success.
-+ */
-+static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
-+ struct srp_cmd *srp_cmd,
-+ scst_data_direction *dir, u64 *data_len)
-+{
-+ struct srp_indirect_buf *idb;
-+ struct srp_direct_buf *db;
-+ unsigned add_cdb_offset;
-+ int ret;
-+
-+ /*
-+ * The pointer computations below will only be compiled correctly
-+ * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
-+ * whether srp_cmd::add_data has been declared as a byte pointer.
-+ */
-+ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
-+ && !__same_type(srp_cmd->add_data[0], (u8)0));
-+
-+ BUG_ON(!dir);
-+ BUG_ON(!data_len);
-+
-+ ret = 0;
-+ *data_len = 0;
-+
-+ /*
-+ * The lower four bits of the buffer format field contain the DATA-IN
-+ * buffer descriptor format, and the highest four bits contain the
-+ * DATA-OUT buffer descriptor format.
-+ */
-+ *dir = SCST_DATA_NONE;
-+ if (srp_cmd->buf_fmt & 0xf)
-+ /* DATA-IN: transfer data from target to initiator (read). */
-+ *dir = SCST_DATA_READ;
-+ else if (srp_cmd->buf_fmt >> 4)
-+ /* DATA-OUT: transfer data from initiator to target (write). */
-+ *dir = SCST_DATA_WRITE;
-+
-+ /*
-+ * According to the SRP spec, the lower two bits of the 'ADDITIONAL
-+ * CDB LENGTH' field are reserved and the size in bytes of this field
-+ * is four times the value specified in bits 3..7. Hence the "& ~3".
-+ */
-+ add_cdb_offset = srp_cmd->add_cdb_len & ~3;
-+ if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
-+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
-+ ioctx->n_rbuf = 1;
-+ ioctx->rbufs = &ioctx->single_rbuf;
-+
-+ db = (struct srp_direct_buf *)(srp_cmd->add_data
-+ + add_cdb_offset);
-+ memcpy(ioctx->rbufs, db, sizeof *db);
-+ *data_len = be32_to_cpu(db->len);
-+ } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
-+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
-+ idb = (struct srp_indirect_buf *)(srp_cmd->add_data
-+ + add_cdb_offset);
-+
-+ ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
-+
-+ if (ioctx->n_rbuf >
-+ (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
-+ PRINT_ERROR("received unsupported SRP_CMD request type"
-+ " (%u out + %u in != %u / %zu)",
-+ srp_cmd->data_out_desc_cnt,
-+ srp_cmd->data_in_desc_cnt,
-+ be32_to_cpu(idb->table_desc.len),
-+ sizeof(*db));
-+ ioctx->n_rbuf = 0;
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (ioctx->n_rbuf == 1)
-+ ioctx->rbufs = &ioctx->single_rbuf;
-+ else {
-+ ioctx->rbufs =
-+ kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
-+ if (!ioctx->rbufs) {
-+ ioctx->n_rbuf = 0;
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ }
-+
-+ db = idb->desc_list;
-+ memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
-+ *data_len = be32_to_cpu(idb->len);
-+ }
-+out:
-+ return ret;
-+}
-+
-+/**
-+ * srpt_init_ch_qp() - Initialize queue pair attributes.
-+ *
-+ * Initialized the attributes of queue pair 'qp' by allowing local write,
-+ * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
-+ */
-+static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
-+{
-+ struct ib_qp_attr *attr;
-+ int ret;
-+
-+ attr = kzalloc(sizeof *attr, GFP_KERNEL);
-+ if (!attr)
-+ return -ENOMEM;
-+
-+ attr->qp_state = IB_QPS_INIT;
-+ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
-+ IB_ACCESS_REMOTE_WRITE;
-+ attr->port_num = ch->sport->port;
-+ attr->pkey_index = 0;
-+
-+ ret = ib_modify_qp(qp, attr,
-+ IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
-+ IB_QP_PKEY_INDEX);
-+
-+ kfree(attr);
-+ return ret;
-+}
-+
-+/**
-+ * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
-+ * @ch: channel of the queue pair.
-+ * @qp: queue pair to change the state of.
-+ *
-+ * Returns zero upon success and a negative value upon failure.
-+ */
-+static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
-+{
-+ struct ib_qp_attr *attr;
-+ int attr_mask;
-+ int ret;
-+
-+ attr = kzalloc(sizeof *attr, GFP_KERNEL);
-+ if (!attr)
-+ return -ENOMEM;
-+
-+ attr->qp_state = IB_QPS_RTR;
-+ ret = ib_cm_init_qp_attr(ch->cm_id, attr, &attr_mask);
-+ if (ret)
-+ goto out;
-+
-+ attr->max_dest_rd_atomic = 4;
-+ TRACE_DBG("qp timeout = %d", attr->timeout);
-+
-+ ret = ib_modify_qp(qp, attr, attr_mask);
-+
-+out:
-+ kfree(attr);
-+ return ret;
-+}
-+
-+/**
-+ * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
-+ * @ch: channel of the queue pair.
-+ * @qp: queue pair to change the state of.
-+ *
-+ * Returns zero upon success and a negative value upon failure.
-+ */
-+static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
-+{
-+ struct ib_qp_attr *attr;
-+ int attr_mask;
-+ int ret;
-+ uint64_t T_tr_ns;
-+ uint32_t T_tr_ms, max_compl_time_ms;
-+
-+ attr = kzalloc(sizeof *attr, GFP_KERNEL);
-+ if (!attr)
-+ return -ENOMEM;
-+
-+ attr->qp_state = IB_QPS_RTS;
-+ ret = ib_cm_init_qp_attr(ch->cm_id, attr, &attr_mask);
-+ if (ret)
-+ goto out;
-+
-+ attr->max_rd_atomic = 4;
-+
-+ /*
-+ * From IBTA C9-140: Transport Timer timeout interval
-+ * T_tr = 4.096 us * 2**(local ACK timeout) where the local ACK timeout
-+ * is a five-bit value, with zero meaning that the timer is disabled.
-+ */
-+ WARN_ON(attr->timeout >= (1 << 5));
-+ if (attr->timeout) {
-+ T_tr_ns = 1ULL << (12 + attr->timeout);
-+ max_compl_time_ms = attr->retry_cnt * 4 * T_tr_ns;
-+ do_div(max_compl_time_ms, 1000000);
-+ T_tr_ms = T_tr_ns;
-+ do_div(T_tr_ms, 1000000);
-+ TRACE_DBG("Session %s: QP local ack timeout = %d or T_tr ="
-+ " %u ms; retry_cnt = %d; max compl. time = %d ms",
-+ ch->sess_name, attr->timeout, T_tr_ms,
-+ attr->retry_cnt, max_compl_time_ms);
-+
-+ if (max_compl_time_ms >= RDMA_COMPL_TIMEOUT_S * 1000) {
-+ PRINT_ERROR("Maximum RDMA completion time (%d ms)"
-+ " exceeds ib_srpt timeout (%d ms)",
-+ max_compl_time_ms,
-+ 1000 * RDMA_COMPL_TIMEOUT_S);
-+ }
-+ }
-+
-+ ret = ib_modify_qp(qp, attr, attr_mask);
-+
-+out:
-+ kfree(attr);
-+ return ret;
-+}
-+
-+/**
-+ * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
-+ */
-+static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
-+{
-+ struct ib_qp_attr *attr;
-+ int ret;
-+
-+ attr = kzalloc(sizeof *attr, GFP_KERNEL);
-+ if (!attr)
-+ return -ENOMEM;
-+
-+ attr->qp_state = IB_QPS_ERR;
-+ ret = ib_modify_qp(ch->qp, attr, IB_QP_STATE);
-+ kfree(attr);
-+ return ret;
-+}
-+
-+/**
-+ * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
-+ */
-+static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
-+{
-+ struct srpt_send_ioctx *ioctx;
-+ unsigned long flags;
-+
-+ BUG_ON(!ch);
-+
-+ ioctx = NULL;
-+ spin_lock_irqsave(&ch->spinlock, flags);
-+ if (!list_empty(&ch->free_list)) {
-+ ioctx = list_first_entry(&ch->free_list,
-+ struct srpt_send_ioctx, free_list);
-+ list_del(&ioctx->free_list);
-+ }
-+ spin_unlock_irqrestore(&ch->spinlock, flags);
-+
-+ if (!ioctx)
-+ return ioctx;
-+
-+ BUG_ON(ioctx->ch != ch);
-+ spin_lock_init(&ioctx->spinlock);
-+ ioctx->state = SRPT_STATE_NEW;
-+ ioctx->n_rbuf = 0;
-+ ioctx->rbufs = NULL;
-+ ioctx->n_rdma = 0;
-+ ioctx->n_rdma_ius = 0;
-+ ioctx->rdma_ius = NULL;
-+ ioctx->mapped_sg_count = 0;
-+ ioctx->scmnd = NULL;
-+
-+ return ioctx;
-+}
-+
-+/**
-+ * srpt_put_send_ioctx() - Free up resources.
-+ */
-+static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
-+{
-+ struct srpt_rdma_ch *ch;
-+ unsigned long flags;
-+
-+ BUG_ON(!ioctx);
-+ ch = ioctx->ch;
-+ BUG_ON(!ch);
-+
-+ ioctx->scmnd = NULL;
-+
-+ /*
-+ * If the WARN_ON() below gets triggered this means that
-+ * srpt_unmap_sg_to_ib_sge() has not been called before
-+ * scst_tgt_cmd_done().
-+ */
-+ WARN_ON(ioctx->mapped_sg_count);
-+
-+ if (ioctx->n_rbuf > 1) {
-+ kfree(ioctx->rbufs);
-+ ioctx->rbufs = NULL;
-+ ioctx->n_rbuf = 0;
-+ }
-+
-+ spin_lock_irqsave(&ch->spinlock, flags);
-+ list_add(&ioctx->free_list, &ch->free_list);
-+ spin_unlock_irqrestore(&ch->spinlock, flags);
-+}
-+
-+/**
-+ * srpt_abort_cmd() - Abort a SCSI command.
-+ * @ioctx: I/O context associated with the SCSI command.
-+ * @context: Preferred execution context.
-+ */
-+static void srpt_abort_cmd(struct srpt_send_ioctx *ioctx,
-+ enum scst_exec_context context)
-+{
-+ struct scst_cmd *scmnd;
-+ enum srpt_command_state state;
-+
-+ TRACE_ENTRY();
-+
-+ BUG_ON(!ioctx);
-+
-+ /*
-+ * If the command is in a state where the target core is waiting for
-+ * the ib_srpt driver, change the state to the next state. Changing
-+ * the state of the command from SRPT_STATE_NEED_DATA to
-+ * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
-+ * function a second time.
-+ */
-+ spin_lock(&ioctx->spinlock);
-+ state = ioctx->state;
-+ switch (state) {
-+ case SRPT_STATE_NEED_DATA:
-+ ioctx->state = SRPT_STATE_DATA_IN;
-+ break;
-+ case SRPT_STATE_DATA_IN:
-+ case SRPT_STATE_CMD_RSP_SENT:
-+ case SRPT_STATE_MGMT_RSP_SENT:
-+ ioctx->state = SRPT_STATE_DONE;
-+ break;
-+ default:
-+ break;
-+ }
-+ spin_unlock(&ioctx->spinlock);
-+
-+ if (state == SRPT_STATE_DONE)
-+ goto out;
-+
-+ scmnd = ioctx->scmnd;
-+ WARN_ON(!scmnd);
-+ if (!scmnd)
-+ goto out;
-+
-+ WARN_ON(ioctx != scst_cmd_get_tgt_priv(scmnd));
-+
-+ TRACE_DBG("Aborting cmd with state %d and tag %lld",
-+ state, scst_cmd_get_tag(scmnd));
-+
-+ switch (state) {
-+ case SRPT_STATE_NEW:
-+ case SRPT_STATE_DATA_IN:
-+ case SRPT_STATE_MGMT:
-+ /*
-+ * Do nothing - defer abort processing until
-+ * srpt_xmit_response() is invoked.
-+ */
-+ WARN_ON(!scst_cmd_aborted(scmnd));
-+ break;
-+ case SRPT_STATE_NEED_DATA:
-+ /* SCST_DATA_WRITE - RDMA read error or RDMA read timeout. */
-+ scst_rx_data(ioctx->scmnd, SCST_RX_STATUS_ERROR, context);
-+ break;
-+ case SRPT_STATE_CMD_RSP_SENT:
-+ /*
-+ * SRP_RSP sending failed or the SRP_RSP send completion has
-+ * not been received in time.
-+ */
-+ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
-+ srpt_put_send_ioctx(ioctx);
-+ scst_set_delivery_status(scmnd, SCST_CMD_DELIVERY_ABORTED);
-+ scst_tgt_cmd_done(scmnd, context);
-+ break;
-+ case SRPT_STATE_MGMT_RSP_SENT:
-+ /*
-+ * Management command response sending failed. This state is
-+ * never reached since there is no scmnd associated with
-+ * management commands. Note: the SCST core frees these
-+ * commands immediately after srpt_tsk_mgmt_done() returned.
-+ */
-+ WARN_ON("ERROR: unexpected command state");
-+ break;
-+ default:
-+ WARN_ON("ERROR: unexpected command state");
-+ break;
-+ }
-+
-+out:
-+ ;
-+
-+ TRACE_EXIT();
-+}
-+
-+/**
-+ * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
-+ */
-+static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id,
-+ enum scst_exec_context context)
-+{
-+ struct srpt_send_ioctx *ioctx;
-+ enum srpt_command_state state;
-+ struct scst_cmd *scmnd;
-+ u32 index;
-+
-+ srpt_adjust_srq_wr_avail(ch, 1);
-+
-+ index = idx_from_wr_id(wr_id);
-+ ioctx = ch->ioctx_ring[index];
-+ state = ioctx->state;
-+ scmnd = ioctx->scmnd;
-+
-+ EXTRACHECKS_WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
-+ && state != SRPT_STATE_MGMT_RSP_SENT
-+ && state != SRPT_STATE_NEED_DATA
-+ && state != SRPT_STATE_DONE);
-+
-+ /*
-+ * If SRP_RSP sending failed, undo the ch->req_lim and ch->req_lim_delta
-+ * changes.
-+ */
-+ if (state == SRPT_STATE_CMD_RSP_SENT
-+ || state == SRPT_STATE_MGMT_RSP_SENT)
-+ srpt_undo_inc_req_lim(ch, ioctx->req_lim_delta);
-+ if (state != SRPT_STATE_DONE) {
-+ if (scmnd)
-+ srpt_abort_cmd(ioctx, context);
-+ else
-+ srpt_put_send_ioctx(ioctx);
-+ } else
-+ PRINT_ERROR("Received more than one IB error completion"
-+ " for wr_id = %u.", (unsigned)index);
-+}
-+
-+/**
-+ * srpt_handle_send_comp() - Process an IB send completion notification.
-+ */
-+static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx,
-+ enum scst_exec_context context)
-+{
-+ enum srpt_command_state state;
-+
-+ srpt_adjust_srq_wr_avail(ch, 1);
-+
-+ state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
-+
-+ EXTRACHECKS_WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
-+ && state != SRPT_STATE_MGMT_RSP_SENT
-+ && state != SRPT_STATE_DONE);
-+
-+ if (state != SRPT_STATE_DONE) {
-+ struct scst_cmd *scmnd;
-+
-+ scmnd = ioctx->scmnd;
-+ EXTRACHECKS_WARN_ON((state == SRPT_STATE_MGMT_RSP_SENT)
-+ != (scmnd == NULL));
-+ if (scmnd) {
-+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
-+ srpt_put_send_ioctx(ioctx);
-+ scst_tgt_cmd_done(scmnd, context);
-+ } else
-+ srpt_put_send_ioctx(ioctx);
-+ } else {
-+ PRINT_ERROR("IB completion has been received too late for"
-+ " wr_id = %u.", ioctx->ioctx.index);
-+ }
-+}
-+
-+/**
-+ * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
-+ */
-+static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx,
-+ enum srpt_opcode opcode,
-+ enum scst_exec_context context)
-+{
-+ struct scst_cmd *scmnd;
-+
-+ EXTRACHECKS_WARN_ON(ioctx->n_rdma <= 0);
-+ srpt_adjust_srq_wr_avail(ch, ioctx->n_rdma);
-+
-+ scmnd = ioctx->scmnd;
-+ if (opcode == SRPT_RDMA_READ_LAST && scmnd) {
-+ if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
-+ SRPT_STATE_DATA_IN))
-+ scst_rx_data(ioctx->scmnd, SCST_RX_STATUS_SUCCESS,
-+ context);
-+ else
-+ PRINT_ERROR("%s[%d]: wrong state = %d", __func__,
-+ __LINE__, ioctx->state);
-+ } else if (opcode == SRPT_RDMA_ABORT) {
-+ ioctx->rdma_aborted = true;
-+ } else {
-+ WARN(true, "scmnd == NULL (opcode %d)", opcode);
-+ }
-+}
-+
-+/**
-+ * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
-+ */
-+static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx,
-+ enum srpt_opcode opcode,
-+ enum scst_exec_context context)
-+{
-+ struct scst_cmd *scmnd;
-+ enum srpt_command_state state;
-+
-+ scmnd = ioctx->scmnd;
-+ state = ioctx->state;
-+ if (scmnd) {
-+ switch (opcode) {
-+ case SRPT_RDMA_READ_LAST:
-+ if (ioctx->n_rdma <= 0) {
-+ PRINT_ERROR("Received invalid RDMA read error"
-+ " completion with idx %d",
-+ ioctx->ioctx.index);
-+ break;
-+ }
-+ srpt_adjust_srq_wr_avail(ch, ioctx->n_rdma);
-+ if (state == SRPT_STATE_NEED_DATA)
-+ srpt_abort_cmd(ioctx, context);
-+ else
-+ PRINT_ERROR("%s[%d]: wrong state = %d",
-+ __func__, __LINE__, state);
-+ break;
-+ case SRPT_RDMA_WRITE_LAST:
-+ scst_set_delivery_status(scmnd,
-+ SCST_CMD_DELIVERY_ABORTED);
-+ break;
-+ default:
-+ PRINT_ERROR("%s[%d]: opcode = %u", __func__, __LINE__,
-+ opcode);
-+ break;
-+ }
-+ } else
-+ PRINT_ERROR("%s[%d]: scmnd == NULL", __func__, __LINE__);
-+}
-+
-+/**
-+ * srpt_build_cmd_rsp() - Build an SRP_RSP response.
-+ * @ch: RDMA channel through which the request has been received.
-+ * @ioctx: I/O context associated with the SRP_CMD request. The response will
-+ * be built in the buffer ioctx->buf points at and hence this function will
-+ * overwrite the request data.
-+ * @tag: tag of the request for which this response is being generated.
-+ * @status: value for the STATUS field of the SRP_RSP information unit.
-+ * @sense_data: pointer to sense data to be included in the response.
-+ * @sense_data_len: length in bytes of the sense data.
-+ *
-+ * Returns the size in bytes of the SRP_RSP response.
-+ *
-+ * An SRP_RSP response contains a SCSI status or service response. See also
-+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
-+ * response. See also SPC-2 for more information about sense data.
-+ */
-+static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx, u64 tag,
-+ int status, const u8 *sense_data,
-+ int sense_data_len)
-+{
-+ struct srp_rsp *srp_rsp;
-+ int max_sense_len;
-+
-+ /*
-+ * The lowest bit of all SAM-3 status codes is zero (see also
-+ * paragraph 5.3 in SAM-3).
-+ */
-+ EXTRACHECKS_WARN_ON(status & 1);
-+
-+ srp_rsp = ioctx->ioctx.buf;
-+ BUG_ON(!srp_rsp);
-+ memset(srp_rsp, 0, sizeof *srp_rsp);
-+
-+ srp_rsp->opcode = SRP_RSP;
-+ srp_rsp->req_lim_delta = cpu_to_be32(ioctx->req_lim_delta);
-+ srp_rsp->tag = tag;
-+ srp_rsp->status = status;
-+
-+ if (!SCST_SENSE_VALID(sense_data))
-+ sense_data_len = 0;
-+ else {
-+ BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
-+ max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
-+ if (sense_data_len > max_sense_len) {
-+ PRINT_WARNING("truncated sense data from %d to %d"
-+ " bytes", sense_data_len, max_sense_len);
-+ sense_data_len = max_sense_len;
-+ }
-+
-+ srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
-+ srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
-+ memcpy(srp_rsp + 1, sense_data, sense_data_len);
-+ }
-+
-+ return sizeof(*srp_rsp) + sense_data_len;
-+}
-+
-+/**
-+ * srpt_build_tskmgmt_rsp() - Build a task management response.
-+ * @ch: RDMA channel through which the request has been received.
-+ * @ioctx: I/O context in which the SRP_RSP response will be built.
-+ * @rsp_code: RSP_CODE that will be stored in the response.
-+ * @tag: Tag of the request for which this response is being generated.
-+ *
-+ * Returns the size in bytes of the SRP_RSP response.
-+ *
-+ * An SRP_RSP response contains a SCSI status or service response. See also
-+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
-+ * response.
-+ */
-+static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx,
-+ u8 rsp_code, u64 tag)
-+{
-+ struct srp_rsp *srp_rsp;
-+ int resp_data_len;
-+ int resp_len;
-+
-+ resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
-+ resp_len = sizeof(*srp_rsp) + resp_data_len;
-+
-+ srp_rsp = ioctx->ioctx.buf;
-+ BUG_ON(!srp_rsp);
-+ memset(srp_rsp, 0, sizeof *srp_rsp);
-+
-+ srp_rsp->opcode = SRP_RSP;
-+ srp_rsp->req_lim_delta = cpu_to_be32(ioctx->req_lim_delta);
-+ srp_rsp->tag = tag;
-+
-+ if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
-+ srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
-+ srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
-+ srp_rsp->data[3] = rsp_code;
-+ }
-+
-+ return resp_len;
-+}
-+
-+/**
-+ * srpt_handle_cmd() - Process SRP_CMD.
-+ */
-+static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
-+ struct srpt_recv_ioctx *recv_ioctx,
-+ struct srpt_send_ioctx *send_ioctx,
-+ enum scst_exec_context context)
-+{
-+ struct scst_cmd *scmnd;
-+ struct srp_cmd *srp_cmd;
-+ scst_data_direction dir;
-+ u64 data_len;
-+ int ret;
-+ int atomic;
-+
-+ BUG_ON(!send_ioctx);
-+
-+ srp_cmd = recv_ioctx->ioctx.buf;
-+
-+ atomic = context == SCST_CONTEXT_TASKLET ? SCST_ATOMIC
-+ : SCST_NON_ATOMIC;
-+ scmnd = scst_rx_cmd(ch->scst_sess, (u8 *) &srp_cmd->lun,
-+ sizeof srp_cmd->lun, srp_cmd->cdb,
-+ sizeof srp_cmd->cdb, atomic);
-+ if (!scmnd) {
-+ PRINT_ERROR("0x%llx: allocation of an SCST command failed",
-+ srp_cmd->tag);
-+ goto err;
-+ }
-+
-+ send_ioctx->scmnd = scmnd;
-+
-+ ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
-+ if (ret) {
-+ PRINT_ERROR("0x%llx: parsing SRP descriptor table failed.",
-+ srp_cmd->tag);
-+ scst_set_cmd_error(scmnd,
-+ SCST_LOAD_SENSE(scst_sense_invalid_field_in_cdb));
-+ }
-+
-+ switch (srp_cmd->task_attr) {
-+ case SRP_CMD_HEAD_OF_Q:
-+ scst_cmd_set_queue_type(scmnd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
-+ break;
-+ case SRP_CMD_ORDERED_Q:
-+ scst_cmd_set_queue_type(scmnd, SCST_CMD_QUEUE_ORDERED);
-+ break;
-+ case SRP_CMD_SIMPLE_Q:
-+ scst_cmd_set_queue_type(scmnd, SCST_CMD_QUEUE_SIMPLE);
-+ break;
-+ case SRP_CMD_ACA:
-+ scst_cmd_set_queue_type(scmnd, SCST_CMD_QUEUE_ACA);
-+ break;
-+ default:
-+ scst_cmd_set_queue_type(scmnd, SCST_CMD_QUEUE_ORDERED);
-+ break;
-+ }
-+
-+ scst_cmd_set_tag(scmnd, srp_cmd->tag);
-+ scst_cmd_set_tgt_priv(scmnd, send_ioctx);
-+ scst_cmd_set_expected(scmnd, dir, data_len);
-+ scst_cmd_init_done(scmnd, context);
-+
-+ return 0;
-+
-+err:
-+ srpt_put_send_ioctx(send_ioctx);
-+ return -1;
-+}
-+
-+/**
-+ * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
-+ *
-+ * Returns SCST_MGMT_STATUS_SUCCESS upon success.
-+ *
-+ * Each task management function is performed by calling one of the
-+ * scst_rx_mgmt_fn*() functions. These functions will either report failure
-+ * or process the task management function asynchronously. The function
-+ * srpt_tsk_mgmt_done() will be called by the SCST core upon completion of the
-+ * task management function. When srpt_handle_tsk_mgmt() reports failure
-+ * (i.e. returns -1) a response will have been built in ioctx->buf. This
-+ * information unit has to be sent back by the caller.
-+ *
-+ * For more information about SRP_TSK_MGMT information units, see also section
-+ * 6.7 in the SRP r16a document.
-+ */
-+static u8 srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
-+ struct srpt_recv_ioctx *recv_ioctx,
-+ struct srpt_send_ioctx *send_ioctx)
-+{
-+ struct srp_tsk_mgmt *srp_tsk;
-+ int ret;
-+
-+ ret = SCST_MGMT_STATUS_FAILED;
-+
-+ BUG_ON(!send_ioctx);
-+ BUG_ON(send_ioctx->ch != ch);
-+
-+ srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
-+
-+ srp_tsk = recv_ioctx->ioctx.buf;
-+
-+ TRACE_DBG("recv_tsk_mgmt= %d for task_tag= %lld"
-+ " using tag= %lld cm_id= %p sess= %p",
-+ srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag,
-+ ch->cm_id, ch->scst_sess);
-+
-+ send_ioctx->tsk_mgmt.tag = srp_tsk->tag;
-+
-+ switch (srp_tsk->tsk_mgmt_func) {
-+ case SRP_TSK_ABORT_TASK:
-+ TRACE_DBG("%s", "Processing SRP_TSK_ABORT_TASK");
-+ ret = scst_rx_mgmt_fn_tag(ch->scst_sess,
-+ SCST_ABORT_TASK,
-+ srp_tsk->task_tag,
-+ SCST_ATOMIC, send_ioctx);
-+ break;
-+ case SRP_TSK_ABORT_TASK_SET:
-+ TRACE_DBG("%s", "Processing SRP_TSK_ABORT_TASK_SET");
-+ ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
-+ SCST_ABORT_TASK_SET,
-+ (u8 *) &srp_tsk->lun,
-+ sizeof srp_tsk->lun,
-+ SCST_ATOMIC, send_ioctx);
-+ break;
-+ case SRP_TSK_CLEAR_TASK_SET:
-+ TRACE_DBG("%s", "Processing SRP_TSK_CLEAR_TASK_SET");
-+ ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
-+ SCST_CLEAR_TASK_SET,
-+ (u8 *) &srp_tsk->lun,
-+ sizeof srp_tsk->lun,
-+ SCST_ATOMIC, send_ioctx);
-+ break;
-+ case SRP_TSK_LUN_RESET:
-+ TRACE_DBG("%s", "Processing SRP_TSK_LUN_RESET");
-+ ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
-+ SCST_LUN_RESET,
-+ (u8 *) &srp_tsk->lun,
-+ sizeof srp_tsk->lun,
-+ SCST_ATOMIC, send_ioctx);
-+ break;
-+ case SRP_TSK_CLEAR_ACA:
-+ TRACE_DBG("%s", "Processing SRP_TSK_CLEAR_ACA");
-+ ret = scst_rx_mgmt_fn_lun(ch->scst_sess,
-+ SCST_CLEAR_ACA,
-+ (u8 *) &srp_tsk->lun,
-+ sizeof srp_tsk->lun,
-+ SCST_ATOMIC, send_ioctx);
-+ break;
-+ default:
-+ TRACE_DBG("%s", "Unsupported task management function.");
-+ ret = SCST_MGMT_STATUS_FN_NOT_SUPPORTED;
-+ }
-+
-+ if (ret != SCST_MGMT_STATUS_SUCCESS)
-+ srpt_put_send_ioctx(send_ioctx);
-+
-+ return ret;
-+}
-+
-+static u8 scst_to_srp_tsk_mgmt_status(const int scst_mgmt_status)
-+{
-+ switch (scst_mgmt_status) {
-+ case SCST_MGMT_STATUS_SUCCESS:
-+ return SRP_TSK_MGMT_SUCCESS;
-+ case SCST_MGMT_STATUS_FN_NOT_SUPPORTED:
-+ return SRP_TSK_MGMT_FUNC_NOT_SUPP;
-+ case SCST_MGMT_STATUS_TASK_NOT_EXIST:
-+ case SCST_MGMT_STATUS_LUN_NOT_EXIST:
-+ case SCST_MGMT_STATUS_REJECTED:
-+ case SCST_MGMT_STATUS_FAILED:
-+ default:
-+ break;
-+ }
-+ return SRP_TSK_MGMT_FAILED;
-+}
-+
-+/**
-+ * srpt_handle_new_iu() - Process a newly received information unit.
-+ * @ch: RDMA channel through which the information unit has been received.
-+ * @ioctx: SRPT I/O context associated with the information unit.
-+ */
-+static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
-+ struct srpt_recv_ioctx *recv_ioctx,
-+ struct srpt_send_ioctx *send_ioctx,
-+ enum scst_exec_context context)
-+{
-+ struct srp_cmd *srp_cmd;
-+ enum rdma_ch_state ch_state;
-+
-+ BUG_ON(!ch);
-+ BUG_ON(!recv_ioctx);
-+
-+ ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
-+ recv_ioctx->ioctx.dma, srp_max_req_size,
-+ DMA_FROM_DEVICE);
-+
-+ ch_state = ch->state;
-+ srp_cmd = recv_ioctx->ioctx.buf;
-+ if (unlikely(ch_state == CH_CONNECTING)) {
-+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
-+ goto out;
-+ }
-+
-+ if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
-+ if (!send_ioctx)
-+ send_ioctx = srpt_get_send_ioctx(ch);
-+ if (unlikely(!send_ioctx)) {
-+ list_add_tail(&recv_ioctx->wait_list,
-+ &ch->cmd_wait_list);
-+ goto out;
-+ }
-+ }
-+
-+ switch (srp_cmd->opcode) {
-+ case SRP_CMD:
-+ srpt_handle_cmd(ch, recv_ioctx, send_ioctx, context);
-+ break;
-+ case SRP_TSK_MGMT:
-+ srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
-+ break;
-+ case SRP_I_LOGOUT:
-+ PRINT_ERROR("%s", "Not yet implemented: SRP_I_LOGOUT");
-+ break;
-+ case SRP_CRED_RSP:
-+ TRACE_DBG("%s", "received SRP_CRED_RSP");
-+ break;
-+ case SRP_AER_RSP:
-+ TRACE_DBG("%s", "received SRP_AER_RSP");
-+ break;
-+ case SRP_RSP:
-+ PRINT_ERROR("%s", "Received SRP_RSP");
-+ break;
-+ default:
-+ PRINT_ERROR("received IU with unknown opcode 0x%x",
-+ srp_cmd->opcode);
-+ break;
-+ }
-+
-+ srpt_post_recv(ch->sport->sdev, recv_ioctx);
-+out:
-+ return;
-+}
-+
-+static void srpt_process_rcv_completion(struct ib_cq *cq,
-+ struct srpt_rdma_ch *ch,
-+ enum scst_exec_context context,
-+ struct ib_wc *wc)
-+{
-+ struct srpt_device *sdev = ch->sport->sdev;
-+ struct srpt_recv_ioctx *ioctx;
-+ u32 index;
-+
-+ index = idx_from_wr_id(wc->wr_id);
-+ if (wc->status == IB_WC_SUCCESS) {
-+ int req_lim;
-+
-+ req_lim = srpt_adjust_req_lim(ch, -1, 0);
-+ if (unlikely(req_lim < 0))
-+ PRINT_ERROR("req_lim = %d < 0", req_lim);
-+ ioctx = sdev->ioctx_ring[index];
-+ srpt_handle_new_iu(ch, ioctx, NULL, context);
-+ } else {
-+ PRINT_INFO("receiving failed for idx %u with status %d",
-+ index, wc->status);
-+ }
-+}
-+
-+/**
-+ * srpt_process_send_completion() - Process an IB send completion.
-+ *
-+ * Note: Although this has not yet been observed during tests, at least in
-+ * theory it is possible that the srpt_get_send_ioctx() call invoked by
-+ * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
-+ * value in each response is set to at least one, and it is possible that this
-+ * response makes the initiator send a new request before the send completion
-+ * for that response has been processed. This could e.g. happen if the call to
-+ * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
-+ * if IB retransmission causes generation of the send completion to be
-+ * delayed. Incoming information units for which srpt_get_send_ioctx() fails
-+ * are queued on cmd_wait_list. The code below processes these delayed
-+ * requests one at a time.
-+ */
-+static void srpt_process_send_completion(struct ib_cq *cq,
-+ struct srpt_rdma_ch *ch,
-+ enum scst_exec_context context,
-+ struct ib_wc *wc)
-+{
-+ struct srpt_send_ioctx *send_ioctx;
-+ uint32_t index;
-+ enum srpt_opcode opcode;
-+
-+ index = idx_from_wr_id(wc->wr_id);
-+ opcode = opcode_from_wr_id(wc->wr_id);
-+ send_ioctx = ch->ioctx_ring[index];
-+ if (wc->status == IB_WC_SUCCESS) {
-+ if (opcode == SRPT_SEND)
-+ srpt_handle_send_comp(ch, send_ioctx, context);
-+ else {
-+ EXTRACHECKS_WARN_ON(opcode != SRPT_RDMA_ABORT &&
-+ wc->opcode != IB_WC_RDMA_READ);
-+ srpt_handle_rdma_comp(ch, send_ioctx, opcode, context);
-+ }
-+ } else {
-+ if (opcode == SRPT_SEND) {
-+ PRINT_INFO("sending response for idx %u failed with"
-+ " status %d", index, wc->status);
-+ srpt_handle_send_err_comp(ch, wc->wr_id, context);
-+ } else if (opcode != SRPT_RDMA_MID) {
-+ PRINT_INFO("RDMA t %d for idx %u failed with status %d",
-+ opcode, index, wc->status);
-+ srpt_handle_rdma_err_comp(ch, send_ioctx, opcode,
-+ context);
-+ }
-+ }
-+
-+ while (unlikely(opcode == SRPT_SEND
-+ && !list_empty(&ch->cmd_wait_list)
-+ && ch->state == CH_LIVE
-+ && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
-+ struct srpt_recv_ioctx *recv_ioctx;
-+
-+ recv_ioctx = list_first_entry(&ch->cmd_wait_list,
-+ struct srpt_recv_ioctx,
-+ wait_list);
-+ list_del(&recv_ioctx->wait_list);
-+ srpt_handle_new_iu(ch, recv_ioctx, send_ioctx, context);
-+ }
-+}
-+
-+static void srpt_process_completion(struct ib_cq *cq,
-+ struct srpt_rdma_ch *ch,
-+ enum scst_exec_context rcv_context,
-+ enum scst_exec_context send_context)
-+{
-+ struct ib_wc *const wc = ch->wc;
-+ int i, n;
-+
-+ EXTRACHECKS_WARN_ON(cq != ch->cq);
-+
-+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
-+ while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
-+ for (i = 0; i < n; i++) {
-+ if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
-+ srpt_process_rcv_completion(cq, ch, rcv_context,
-+ &wc[i]);
-+ else
-+ srpt_process_send_completion(cq, ch,
-+ send_context,
-+ &wc[i]);
-+ }
-+ }
-+}
-+
-+/**
-+ * srpt_completion() - IB completion queue callback function.
-+ */
-+static void srpt_completion(struct ib_cq *cq, void *ctx)
-+{
-+ struct srpt_rdma_ch *ch = ctx;
-+
-+ BUG_ON(!ch->thread);
-+ wake_up_process(ch->thread);
-+}
-+
-+static int srpt_compl_thread(void *arg)
-+{
-+ struct srpt_rdma_ch *ch;
-+
-+ /* Hibernation / freezing of the SRPT kernel thread is not supported. */
-+ current->flags |= PF_NOFREEZE;
-+
-+ ch = arg;
-+ BUG_ON(!ch);
-+ while (!kthread_should_stop() && !ch->last_wqe_received) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ srpt_process_completion(ch->cq, ch, SCST_CONTEXT_THREAD,
-+ SCST_CONTEXT_DIRECT);
-+ schedule();
-+ }
-+ set_current_state(TASK_RUNNING);
-+
-+ srpt_process_completion(ch->cq, ch, SCST_CONTEXT_THREAD,
-+ SCST_CONTEXT_DIRECT);
-+
-+ /*
-+ * Note: scst_unregister_session() must only be invoked after the last
-+ * WQE event has been received.
-+ */
-+ TRACE_DBG("ch %s: about to invoke scst_unregister_session()",
-+ ch->sess_name);
-+ scst_unregister_session(ch->scst_sess, false, srpt_free_ch);
-+
-+ /*
-+ * Some HCAs can queue send completions after the Last WQE
-+ * event. Make sure to process these work completions.
-+ */
-+ while (ch->state < CH_FREEING) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ srpt_process_completion(ch->cq, ch, SCST_CONTEXT_THREAD,
-+ SCST_CONTEXT_DIRECT);
-+ schedule();
-+ }
-+
-+ complete(&ch->finished_processing_completions);
-+
-+ while (!kthread_should_stop())
-+ schedule();
-+
-+ return 0;
-+}
-+
-+/**
-+ * srpt_create_ch_ib() - Create receive and send completion queues.
-+ */
-+static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
-+{
-+ struct ib_qp_init_attr *qp_init;
-+ struct srpt_device *sdev = ch->sport->sdev;
-+ int ret;
-+
-+ EXTRACHECKS_WARN_ON(ch->rq_size < 1);
-+
-+ ret = -ENOMEM;
-+ qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
-+ if (!qp_init)
-+ goto out;
-+
-+ ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
-+ ch->rq_size + srpt_sq_size, 0);
-+ if (IS_ERR(ch->cq)) {
-+ ret = PTR_ERR(ch->cq);
-+ PRINT_ERROR("failed to create CQ cqe= %d ret= %d",
-+ ch->rq_size + srpt_sq_size, ret);
-+ goto out;
-+ }
-+
-+ qp_init->qp_context = (void *)ch;
-+ qp_init->event_handler
-+ = (void(*)(struct ib_event *, void*))srpt_qp_event;
-+ qp_init->send_cq = ch->cq;
-+ qp_init->recv_cq = ch->cq;
-+ qp_init->srq = sdev->srq;
-+ qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
-+ qp_init->qp_type = IB_QPT_RC;
-+ qp_init->cap.max_send_wr = srpt_sq_size;
-+ /*
-+ * A quote from the OFED 1.5.3.1 release notes
-+ * (docs/release_notes/mthca_release_notes.txt), section "Known Issues":
-+ * In mem-free devices, RC QPs can be created with a maximum of
-+ * (max_sge - 1) entries only; UD QPs can be created with a maximum of
-+ * (max_sge - 3) entries.
-+ * A quote from the OFED 1.2.5 release notes
-+ * (docs/mthca_release_notes.txt), section "Known Issues":
-+ * In mem-free devices, RC QPs can be created with a maximum of
-+ * (max_sge - 3) entries only.
-+ */
-+ ch->max_sge = sdev->dev_attr.max_sge - 3;
-+ WARN_ON(ch->max_sge < 1);
-+ qp_init->cap.max_send_sge = ch->max_sge;
-+
-+ ch->qp = ib_create_qp(sdev->pd, qp_init);
-+ if (IS_ERR(ch->qp)) {
-+ ret = PTR_ERR(ch->qp);
-+ PRINT_ERROR("failed to create_qp ret= %d", ret);
-+ goto err_destroy_cq;
-+ }
-+
-+ ch->sq_wr_avail = qp_init->cap.max_send_wr;
-+
-+ TRACE_DBG("%s: max_cqe= %d max_sge= %d sq_size = %d"
-+ " cm_id= %p", __func__, ch->cq->cqe,
-+ qp_init->cap.max_send_sge, qp_init->cap.max_send_wr,
-+ ch->cm_id);
-+
-+ ret = srpt_init_ch_qp(ch, ch->qp);
-+ if (ret) {
-+ PRINT_ERROR("srpt_init_ch_qp() failed (%d)", ret);
-+ goto err_destroy_qp;
-+ }
-+
-+out:
-+ kfree(qp_init);
-+ return ret;
-+
-+err_destroy_qp:
-+ ib_destroy_qp(ch->qp);
-+err_destroy_cq:
-+ ib_destroy_cq(ch->cq);
-+ goto out;
-+}
-+
-+static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
-+{
-+ TRACE_ENTRY();
-+
-+ while (ib_poll_cq(ch->cq, ARRAY_SIZE(ch->wc), ch->wc) > 0)
-+ ;
-+
-+ ib_destroy_qp(ch->qp);
-+ ib_destroy_cq(ch->cq);
-+
-+ TRACE_EXIT();
-+}
-+
-+/**
-+ * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
-+ *
-+ * Reset the QP and make sure all resources associated with the channel will
-+ * be deallocated at an appropriate time.
-+ *
-+ * Returns true if and only if the channel state has been modified from
-+ * CH_CONNECTING or CH_LIVE into CH_DISCONNECTING.
-+ *
-+ * Note: The caller must hold ch->sport->sdev->spinlock.
-+ */
-+static bool __srpt_close_ch(struct srpt_rdma_ch *ch)
-+{
-+ struct srpt_device *sdev;
-+ enum rdma_ch_state prev_state;
-+ bool was_live;
-+
-+ sdev = ch->sport->sdev;
-+ was_live = false;
-+
-+ prev_state = srpt_set_ch_state_to_disc(ch);
-+
-+ switch (prev_state) {
-+ case CH_CONNECTING:
-+ ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
-+ NULL, 0);
-+ /* fall through */
-+ case CH_LIVE:
-+ was_live = true;
-+ if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
-+ PRINT_ERROR("%s", "sending CM DREQ failed.");
-+ break;
-+ case CH_DISCONNECTING:
-+ case CH_DRAINING:
-+ case CH_FREEING:
-+ break;
-+ }
-+
-+ return was_live;
-+}
-+
-+/**
-+ * srpt_close_ch() - Close an RDMA channel.
-+ */
-+static void srpt_close_ch(struct srpt_rdma_ch *ch)
-+{
-+ struct srpt_device *sdev;
-+
-+ sdev = ch->sport->sdev;
-+ spin_lock_irq(&sdev->spinlock);
-+ __srpt_close_ch(ch);
-+ spin_unlock_irq(&sdev->spinlock);
-+}
-+
-+/**
-+ * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
-+ * @cm_id: Pointer to the CM ID of the channel to be drained.
-+ *
-+ * Note: Must be called from inside srpt_cm_handler to avoid a race between
-+ * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
-+ * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
-+ * waits until all target sessions for the associated IB device have been
-+ * unregistered and target session registration involves a call to
-+ * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
-+ * this function has finished).
-+ */
-+static void srpt_drain_channel(struct ib_cm_id *cm_id)
-+{
-+ struct srpt_rdma_ch *ch;
-+ int ret;
-+
-+ WARN_ON_ONCE(irqs_disabled());
-+
-+ ch = cm_id->context;
-+ if (srpt_set_ch_state_to_draining(ch)) {
-+ ret = srpt_ch_qp_err(ch);
-+ if (ret < 0)
-+ PRINT_ERROR("Setting queue pair in error state"
-+ " failed: %d", ret);
-+ }
-+}
-+
-+static void srpt_free_ch(struct scst_session *sess)
-+{
-+ struct srpt_rdma_ch *ch;
-+ struct srpt_device *sdev;
-+
-+ TRACE_ENTRY();
-+
-+ ch = scst_sess_get_tgt_priv(sess);
-+ BUG_ON(ch->scst_sess != sess);
-+ sdev = ch->sport->sdev;
-+ BUG_ON(!sdev);
-+
-+ WARN_ON(!srpt_test_and_set_ch_state(ch, CH_DRAINING, CH_FREEING));
-+ WARN_ON(!ch->last_wqe_received);
-+
-+ BUG_ON(!ch->thread);
-+ BUG_ON(ch->thread == current);
-+
-+ while (wait_for_completion_timeout(&ch->finished_processing_completions,
-+ 10 * HZ) == 0)
-+ PRINT_INFO("%s", "Waiting for completion processing thread ...");
-+
-+ srpt_destroy_ch_ib(ch);
-+
-+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
-+ sdev, ch->rq_size,
-+ ch->max_rsp_size, DMA_TO_DEVICE);
-+
-+ spin_lock_irq(&sdev->spinlock);
-+ list_del(&ch->list);
-+ spin_unlock_irq(&sdev->spinlock);
-+
-+ ib_destroy_cm_id(ch->cm_id);
-+
-+ kthread_stop(ch->thread);
-+ ch->thread = NULL;
-+
-+ kfree(ch);
-+
-+ wake_up(&sdev->ch_releaseQ);
-+
-+ TRACE_EXIT();
-+}
-+
-+/**
-+ * srpt_enable_target() - Allows to enable a target via sysfs.
-+ */
-+static int srpt_enable_target(struct scst_tgt *scst_tgt, bool enable)
-+{
-+ struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
-+
-+ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
-+
-+ if (!sdev)
-+ return -ENOENT;
-+
-+ TRACE_DBG("%s target %s", enable ? "Enabling" : "Disabling",
-+ sdev->device->name);
-+
-+ spin_lock_irq(&sdev->spinlock);
-+ sdev->enabled = enable;
-+ spin_unlock_irq(&sdev->spinlock);
-+
-+ return 0;
-+}
-+
-+/**
-+ * srpt_is_target_enabled() - Allows to query a targets status via sysfs.
-+ */
-+static bool srpt_is_target_enabled(struct scst_tgt *scst_tgt)
-+{
-+ struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
-+ bool res;
-+
-+ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
-+
-+ if (!sdev)
-+ return false;
-+
-+ spin_lock_irq(&sdev->spinlock);
-+ res = sdev->enabled;
-+ spin_unlock_irq(&sdev->spinlock);
-+ return res;
-+}
-+
-+/**
-+ * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
-+ *
-+ * Ownership of the cm_id is transferred to the SCST session if this function
-+ * returns zero. Otherwise the caller remains the owner of cm_id.
-+ */
-+static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
-+ struct ib_cm_req_event_param *param,
-+ void *private_data)
-+{
-+ struct srpt_device *sdev = cm_id->context;
-+ struct srp_login_req *req;
-+ struct srp_login_rsp *rsp;
-+ struct srp_login_rej *rej;
-+ struct ib_cm_rep_param *rep_param;
-+ struct srpt_rdma_ch *ch;
-+ struct task_struct *thread;
-+ u32 it_iu_len;
-+ int i;
-+ int ret = 0;
-+
-+ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
-+
-+ if (WARN_ON(!sdev || !private_data))
-+ return -EINVAL;
-+
-+ req = (struct srp_login_req *)private_data;
-+
-+ it_iu_len = be32_to_cpu(req->req_it_iu_len);
-+
-+ PRINT_INFO("Received SRP_LOGIN_REQ with"
-+ " i_port_id 0x%llx:0x%llx, t_port_id 0x%llx:0x%llx and it_iu_len %d"
-+ " on port %d (guid=0x%llx:0x%llx)",
-+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
-+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
-+ be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
-+ be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
-+ it_iu_len,
-+ param->port,
-+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
-+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
-+
-+ rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
-+ rej = kzalloc(sizeof *rej, GFP_KERNEL);
-+ rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
-+
-+ if (!rsp || !rej || !rep_param) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
-+ rej->reason = cpu_to_be32(
-+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
-+ ret = -EINVAL;
-+ PRINT_ERROR("rejected SRP_LOGIN_REQ because its"
-+ " length (%d bytes) is out of range (%d .. %d)",
-+ it_iu_len, 64, srp_max_req_size);
-+ goto reject;
-+ }
-+
-+ if (!srpt_is_target_enabled(sdev->scst_tgt)) {
-+ rej->reason = cpu_to_be32(
-+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
-+ ret = -EINVAL;
-+ PRINT_ERROR("rejected SRP_LOGIN_REQ because the target %s (%s)"
-+ " has not yet been enabled",
-+ sdev->scst_tgt->tgt_name, sdev->device->name);
-+ goto reject;
-+ }
-+
-+ if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
-+ || *(__be64 *)(req->target_port_id + 8) !=
-+ cpu_to_be64(srpt_service_guid)) {
-+ rej->reason = cpu_to_be32(
-+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
-+ ret = -ENOMEM;
-+ PRINT_ERROR("%s", "rejected SRP_LOGIN_REQ because it"
-+ " has an invalid target port identifier.");
-+ goto reject;
-+ }
-+
-+ ch = kzalloc(sizeof *ch, GFP_KERNEL);
-+ if (!ch) {
-+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
-+ PRINT_ERROR("%s",
-+ "rejected SRP_LOGIN_REQ because out of memory.");
-+ ret = -ENOMEM;
-+ goto reject;
-+ }
-+
-+ memcpy(ch->i_port_id, req->initiator_port_id, 16);
-+ memcpy(ch->t_port_id, req->target_port_id, 16);
-+ ch->sport = &sdev->port[param->port - 1];
-+ ch->cm_id = cm_id;
-+ cm_id->context = ch;
-+ /*
-+ * Avoid QUEUE_FULL conditions by limiting the number of buffers used
-+ * for the SRP protocol to the SCST SCSI command queue size.
-+ */
-+ ch->rq_size = min(SRPT_RQ_SIZE, scst_get_max_lun_commands(NULL, 0));
-+ spin_lock_init(&ch->spinlock);
-+ ch->state = CH_CONNECTING;
-+ INIT_LIST_HEAD(&ch->cmd_wait_list);
-+ init_waitqueue_head(&ch->state_wq);
-+ init_completion(&ch->finished_processing_completions);
-+ ch->max_rsp_size = max_t(uint32_t, srp_max_rsp_size, MIN_MAX_RSP_SIZE);
-+ ch->ioctx_ring = (struct srpt_send_ioctx **)
-+ srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
-+ sizeof(*ch->ioctx_ring[0]),
-+ ch->max_rsp_size, DMA_TO_DEVICE);
-+ if (!ch->ioctx_ring) {
-+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
-+ goto free_ch;
-+ }
-+
-+ INIT_LIST_HEAD(&ch->free_list);
-+ for (i = 0; i < ch->rq_size; i++) {
-+ ch->ioctx_ring[i]->ch = ch;
-+ list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
-+ }
-+
-+ ret = srpt_create_ch_ib(ch);
-+ if (ret) {
-+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
-+ PRINT_ERROR("%s", "rejected SRP_LOGIN_REQ because creating"
-+ " a new RDMA channel failed.");
-+ goto free_ring;
-+ }
-+
-+ if (use_port_guid_in_session_name) {
-+ /*
-+ * If the kernel module parameter use_port_guid_in_session_name
-+ * has been specified, use a combination of the target port
-+ * GUID and the initiator port ID as the session name. This
-+ * was the original behavior of the SRP target implementation
-+ * (i.e. before the SRPT was included in OFED 1.3).
-+ */
-+ snprintf(ch->sess_name, sizeof(ch->sess_name),
-+ "0x%016llx%016llx",
-+ be64_to_cpu(*(__be64 *)
-+ &sdev->port[param->port - 1].gid.raw[8]),
-+ be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
-+ } else {
-+ /*
-+ * Default behavior: use the initator port identifier as the
-+ * session name.
-+ */
-+ snprintf(ch->sess_name, sizeof(ch->sess_name),
-+ "0x%016llx%016llx",
-+ be64_to_cpu(*(__be64 *)ch->i_port_id),
-+ be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
-+ }
-+
-+ TRACE_DBG("registering session %s", ch->sess_name);
-+
-+ BUG_ON(!sdev->scst_tgt);
-+ ch->scst_sess = scst_register_session(sdev->scst_tgt, 0, ch->sess_name,
-+ ch, NULL, NULL);
-+ if (!ch->scst_sess) {
-+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
-+ TRACE_DBG("%s", "Failed to create SCST session");
-+ goto destroy_ib;
-+ }
-+
-+ thread = kthread_run(srpt_compl_thread, ch, "srpt_%s",
-+ ch->sport->sdev->device->name);
-+ if (IS_ERR(thread)) {
-+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
-+ PRINT_ERROR("failed to create kernel thread %ld", PTR_ERR(ch->thread));
-+ goto unreg_ch;
-+ }
-+
-+ spin_lock_irq(&sdev->spinlock);
-+ if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
-+ struct srpt_rdma_ch *ch2;
-+
-+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
-+ list_for_each_entry(ch2, &sdev->rch_list, list) {
-+ if (!memcmp(ch2->i_port_id, req->initiator_port_id, 16)
-+ && !memcmp(ch2->t_port_id, req->target_port_id, 16)
-+ && param->port == ch2->sport->port
-+ && param->listen_id == ch2->sport->sdev->cm_id
-+ && ch2->cm_id) {
-+ if (!__srpt_close_ch(ch2))
-+ continue;
-+
-+ PRINT_INFO("Relogin - closed existing channel"
-+ " %s; cm_id = %p", ch2->sess_name,
-+ ch2->cm_id);
-+
-+ rsp->rsp_flags =
-+ SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
-+ }
-+ }
-+ } else {
-+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
-+ }
-+ list_add_tail(&ch->list, &sdev->rch_list);
-+ ch->thread = thread;
-+ spin_unlock_irq(&sdev->spinlock);
-+
-+ ret = srpt_ch_qp_rtr(ch, ch->qp);
-+ if (ret) {
-+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
-+ PRINT_ERROR("rejected SRP_LOGIN_REQ because enabling"
-+ " RTR failed (error code = %d)", ret);
-+ goto reject_and_release;
-+ }
-+
-+ TRACE_DBG("Establish connection sess=%p name=%s cm_id=%p",
-+ ch->scst_sess, ch->sess_name, ch->cm_id);
-+
-+ /* create srp_login_response */
-+ rsp->opcode = SRP_LOGIN_RSP;
-+ rsp->tag = req->tag;
-+ rsp->max_it_iu_len = req->req_it_iu_len;
-+ rsp->max_ti_iu_len = req->req_it_iu_len;
-+ ch->max_ti_iu_len = it_iu_len;
-+ rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
-+ SRP_BUF_FORMAT_INDIRECT);
-+ rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
-+ ch->req_lim = ch->rq_size;
-+ ch->req_lim_delta = 0;
-+
-+ /* create cm reply */
-+ rep_param->qp_num = ch->qp->qp_num;
-+ rep_param->private_data = (void *)rsp;
-+ rep_param->private_data_len = sizeof *rsp;
-+ rep_param->rnr_retry_count = 7;
-+ rep_param->flow_control = 1;
-+ rep_param->failover_accepted = 0;
-+ rep_param->srq = 1;
-+ rep_param->responder_resources = 4;
-+ rep_param->initiator_depth = 4;
-+
-+ spin_lock_irq(&sdev->spinlock);
-+ if (ch->state == CH_CONNECTING)
-+ ret = ib_send_cm_rep(cm_id, rep_param);
-+ else
-+ ret = -ECONNABORTED;
-+ spin_unlock_irq(&sdev->spinlock);
-+
-+ switch (ret) {
-+ case 0:
-+ break;
-+ case -ECONNABORTED:
-+ goto out_keep_cm_id;
-+ default:
-+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
-+ PRINT_ERROR("sending SRP_LOGIN_REQ response failed"
-+ " (error code = %d)", ret);
-+ goto reject_and_release;
-+ }
-+
-+ goto out;
-+
-+reject_and_release:
-+ PRINT_INFO("Rejecting login with reason %#x", be32_to_cpu(rej->reason));
-+ rej->opcode = SRP_LOGIN_REJ;
-+ rej->tag = req->tag;
-+ rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
-+ SRP_BUF_FORMAT_INDIRECT);
-+ ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
-+ (void *)rej, sizeof *rej);
-+
-+ srpt_close_ch(ch);
-+out_keep_cm_id:
-+ /*
-+ * Tell the caller not to free cm_id since srpt_free_ch() will do that.
-+ */
-+ ret = 0;
-+ goto out;
-+
-+unreg_ch:
-+ scst_unregister_session(ch->scst_sess, true, NULL);
-+
-+destroy_ib:
-+ srpt_destroy_ch_ib(ch);
-+
-+free_ring:
-+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
-+ ch->sport->sdev, ch->rq_size,
-+ ch->max_rsp_size, DMA_TO_DEVICE);
-+
-+free_ch:
-+ cm_id->context = NULL;
-+ kfree(ch);
-+
-+reject:
-+ PRINT_INFO("Rejecting login with reason %#x", be32_to_cpu(rej->reason));
-+ rej->opcode = SRP_LOGIN_REJ;
-+ rej->tag = req->tag;
-+ rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
-+ SRP_BUF_FORMAT_INDIRECT);
-+ ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
-+ (void *)rej, sizeof *rej);
-+
-+out:
-+ kfree(rep_param);
-+ kfree(rsp);
-+ kfree(rej);
-+
-+ return ret;
-+}
-+
-+static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
-+{
-+ PRINT_INFO("Received InfiniBand REJ packet for cm_id %p.", cm_id);
-+ srpt_drain_channel(cm_id);
-+}
-+
-+/**
-+ * srpt_cm_rtu_recv() - Process IB CM RTU_RECEIVED and USER_ESTABLISHED events.
-+ *
-+ * An IB_CM_RTU_RECEIVED message indicates that the connection is established
-+ * and that the recipient may begin transmitting (RTU = ready to use).
-+ */
-+static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
-+{
-+ struct srpt_rdma_ch *ch;
-+ int ret;
-+
-+ ch = cm_id->context;
-+ BUG_ON(!ch);
-+
-+ if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
-+ struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
-+
-+ ret = srpt_ch_qp_rts(ch, ch->qp);
-+
-+ list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
-+ wait_list) {
-+ list_del(&ioctx->wait_list);
-+ srpt_handle_new_iu(ch, ioctx, NULL,
-+ SCST_CONTEXT_THREAD);
-+ }
-+ if (ret)
-+ srpt_close_ch(ch);
-+ }
-+}
-+
-+static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
-+{
-+ PRINT_INFO("Received InfiniBand TimeWait exit for cm_id %p.", cm_id);
-+ srpt_drain_channel(cm_id);
-+}
-+
-+static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
-+{
-+ PRINT_INFO("Received InfiniBand REP error for cm_id %p.", cm_id);
-+ srpt_drain_channel(cm_id);
-+}
-+
-+/**
-+ * srpt_cm_dreq_recv() - Process reception of a DREQ message.
-+ */
-+static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
-+{
-+ struct srpt_rdma_ch *ch;
-+
-+ ch = cm_id->context;
-+
-+ switch (srpt_set_ch_state_to_disc(ch)) {
-+ case CH_CONNECTING:
-+ case CH_LIVE:
-+ if (ib_send_cm_drep(ch->cm_id, NULL, 0) >= 0)
-+ PRINT_INFO("Received DREQ and sent DREP for session %s",
-+ ch->sess_name);
-+ else
-+ PRINT_ERROR("%s", "Sending DREP failed");
-+ break;
-+ default:
-+ WARN_ON(true);
-+ break;
-+ }
-+}
-+
-+/**
-+ * srpt_cm_drep_recv() - Process reception of a DREP message.
-+ */
-+static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
-+{
-+ PRINT_INFO("Received InfiniBand DREP message for cm_id %p.", cm_id);
-+ srpt_drain_channel(cm_id);
-+}
-+
-+/**
-+ * srpt_cm_handler() - IB connection manager callback function.
-+ *
-+ * A non-zero return value will cause the caller destroy the CM ID.
-+ *
-+ * Note: srpt_cm_handler() must only return a non-zero value when transferring
-+ * ownership of the cm_id to a channel if srpt_cm_req_recv() failed. Returning
-+ * a non-zero value in any other case will trigger a race with the
-+ * ib_destroy_cm_id() call in srpt_free_ch().
-+ */
-+static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
-+{
-+ int ret;
-+
-+ BUG_ON(!cm_id->context);
-+
-+ ret = 0;
-+ switch (event->event) {
-+ case IB_CM_REQ_RECEIVED:
-+ ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
-+ event->private_data);
-+ break;
-+ case IB_CM_REJ_RECEIVED:
-+ srpt_cm_rej_recv(cm_id);
-+ break;
-+ case IB_CM_RTU_RECEIVED:
-+ case IB_CM_USER_ESTABLISHED:
-+ srpt_cm_rtu_recv(cm_id);
-+ break;
-+ case IB_CM_DREQ_RECEIVED:
-+ srpt_cm_dreq_recv(cm_id);
-+ break;
-+ case IB_CM_DREP_RECEIVED:
-+ srpt_cm_drep_recv(cm_id);
-+ break;
-+ case IB_CM_TIMEWAIT_EXIT:
-+ srpt_cm_timewait_exit(cm_id);
-+ break;
-+ case IB_CM_REP_ERROR:
-+ srpt_cm_rep_error(cm_id);
-+ break;
-+ case IB_CM_DREQ_ERROR:
-+ PRINT_INFO("%s", "Received IB DREQ ERROR event.");
-+ break;
-+ case IB_CM_MRA_RECEIVED:
-+ PRINT_INFO("%s", "Received IB MRA event");
-+ break;
-+ default:
-+ PRINT_ERROR("received unrecognized IB CM event %d",
-+ event->event);
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+/**
-+ * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
-+ */
-+static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx,
-+ struct scst_cmd *scmnd)
-+{
-+ struct scatterlist *sg;
-+ int sg_cnt;
-+ scst_data_direction dir;
-+ struct rdma_iu *riu;
-+ struct srp_direct_buf *db;
-+ dma_addr_t dma_addr;
-+ struct ib_sge *sge_array, *sge;
-+ u64 raddr;
-+ u32 rsize;
-+ u32 tsize;
-+ u32 dma_len;
-+ int count;
-+ int i, j, k;
-+ int max_sge, nsge;
-+
-+ BUG_ON(!ch);
-+ BUG_ON(!ioctx);
-+ BUG_ON(!scmnd);
-+ max_sge = ch->max_sge;
-+ dir = scst_cmd_get_data_direction(scmnd);
-+ BUG_ON(dir == SCST_DATA_NONE);
-+ /*
-+ * Cache 'dir' because it is needed in srpt_unmap_sg_to_ib_sge()
-+ * and because scst_set_cmd_error_status() resets scmnd->data_direction.
-+ */
-+ ioctx->dir = dir;
-+ if (dir == SCST_DATA_WRITE) {
-+ scst_cmd_get_write_fields(scmnd, &sg, &sg_cnt);
-+ WARN_ON(!sg);
-+ } else {
-+ sg = scst_cmd_get_sg(scmnd);
-+ sg_cnt = scst_cmd_get_sg_cnt(scmnd);
-+ WARN_ON(!sg);
-+ }
-+ ioctx->sg = sg;
-+ ioctx->sg_cnt = sg_cnt;
-+ count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
-+ scst_to_tgt_dma_dir(dir));
-+ if (unlikely(!count))
-+ return -EBUSY;
-+
-+ ioctx->mapped_sg_count = count;
-+
-+ {
-+ int size, nrdma;
-+
-+ nrdma = (count + max_sge - 1) / max_sge + ioctx->n_rbuf;
-+ nsge = count + ioctx->n_rbuf;
-+ size = nrdma * sizeof(*riu) + nsge * sizeof(*sge);
-+ ioctx->rdma_ius = size <= sizeof(ioctx->rdma_ius_buf) ?
-+ ioctx->rdma_ius_buf : kmalloc(size,
-+ scst_cmd_atomic(scmnd) ? GFP_ATOMIC : GFP_KERNEL);
-+ if (!ioctx->rdma_ius)
-+ goto free_mem;
-+
-+ ioctx->n_rdma_ius = nrdma;
-+ sge_array = (struct ib_sge *)(ioctx->rdma_ius + nrdma);
-+ }
-+
-+ db = ioctx->rbufs;
-+ tsize = (dir == SCST_DATA_READ)
-+ ? scst_cmd_get_adjusted_resp_data_len(scmnd)
-+ : scst_cmd_get_bufflen(scmnd);
-+ dma_len = sg_dma_len(&sg[0]);
-+ riu = ioctx->rdma_ius;
-+ sge = sge_array;
-+
-+ /*
-+ * For each remote desc - calculate the #ib_sge.
-+ * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
-+ * each remote desc rdma_iu is required a rdma wr;
-+ * else
-+ * we need to allocate extra rdma_iu to carry extra #ib_sge in
-+ * another rdma wr
-+ */
-+ for (i = 0, j = 0;
-+ j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
-+ rsize = be32_to_cpu(db->len);
-+ raddr = be64_to_cpu(db->va);
-+ riu->raddr = raddr;
-+ riu->rkey = be32_to_cpu(db->key);
-+ riu->sge_cnt = 0;
-+ riu->sge = sge;
-+
-+ /* calculate how many sge required for this remote_buf */
-+ while (rsize > 0 && tsize > 0) {
-+
-+ if (rsize >= dma_len) {
-+ tsize -= dma_len;
-+ rsize -= dma_len;
-+ raddr += dma_len;
-+
-+ if (tsize > 0) {
-+ ++j;
-+ if (j < count)
-+ dma_len = sg_dma_len(&sg[j]);
-+ }
-+ } else {
-+ tsize -= rsize;
-+ dma_len -= rsize;
-+ rsize = 0;
-+ }
-+
-+ ++riu->sge_cnt;
-+ ++sge;
-+
-+ if (rsize > 0 && riu->sge_cnt == max_sge) {
-+ ++riu;
-+ riu->raddr = raddr;
-+ riu->rkey = be32_to_cpu(db->key);
-+ riu->sge_cnt = 0;
-+ riu->sge = sge;
-+ }
-+ }
-+ }
-+
-+ ioctx->n_rdma = riu - ioctx->rdma_ius;
-+ EXTRACHECKS_WARN_ON(ioctx->n_rdma > ioctx->n_rdma_ius);
-+ EXTRACHECKS_WARN_ON(sge - sge_array > nsge);
-+
-+ db = ioctx->rbufs;
-+ tsize = (dir == SCST_DATA_READ)
-+ ? scst_cmd_get_adjusted_resp_data_len(scmnd)
-+ : scst_cmd_get_bufflen(scmnd);
-+ riu = ioctx->rdma_ius;
-+ dma_len = sg_dma_len(&sg[0]);
-+ dma_addr = sg_dma_address(&sg[0]);
-+
-+ /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
-+ for (i = 0, j = 0;
-+ j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
-+ rsize = be32_to_cpu(db->len);
-+ sge = riu->sge;
-+ k = 0;
-+
-+ while (rsize > 0 && tsize > 0) {
-+ sge->addr = dma_addr;
-+ sge->lkey = ch->sport->sdev->mr->lkey;
-+
-+ if (rsize >= dma_len) {
-+ sge->length =
-+ (tsize < dma_len) ? tsize : dma_len;
-+ tsize -= dma_len;
-+ rsize -= dma_len;
-+
-+ if (tsize > 0) {
-+ ++j;
-+ if (j < count) {
-+ dma_len = sg_dma_len(&sg[j]);
-+ dma_addr =
-+ sg_dma_address(&sg[j]);
-+ }
-+ }
-+ } else {
-+ sge->length = (tsize < rsize) ? tsize : rsize;
-+ tsize -= rsize;
-+ dma_len -= rsize;
-+ dma_addr += rsize;
-+ rsize = 0;
-+ }
-+
-+ ++k;
-+ if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
-+ ++riu;
-+ sge = riu->sge;
-+ k = 0;
-+ } else if (rsize > 0 && tsize > 0)
-+ ++sge;
-+ }
-+ }
-+
-+ EXTRACHECKS_WARN_ON(riu - ioctx->rdma_ius != ioctx->n_rdma);
-+
-+ return 0;
-+
-+free_mem:
-+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
-+
-+ return -ENOMEM;
-+}
-+
-+/**
-+ * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
-+ */
-+static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx)
-+{
-+ struct scatterlist *sg;
-+ scst_data_direction dir;
-+
-+ EXTRACHECKS_BUG_ON(!ch);
-+ EXTRACHECKS_BUG_ON(!ioctx);
-+ EXTRACHECKS_BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
-+
-+ if (ioctx->rdma_ius != (void *)ioctx->rdma_ius_buf)
-+ kfree(ioctx->rdma_ius);
-+ ioctx->rdma_ius = NULL;
-+ ioctx->n_rdma = 0;
-+
-+ if (ioctx->mapped_sg_count) {
-+ EXTRACHECKS_BUG_ON(!ioctx->scmnd);
-+ EXTRACHECKS_WARN_ON(ioctx
-+ != scst_cmd_get_tgt_priv(ioctx->scmnd));
-+ sg = ioctx->sg;
-+ EXTRACHECKS_WARN_ON(!sg);
-+ dir = ioctx->dir;
-+ EXTRACHECKS_BUG_ON(dir == SCST_DATA_NONE);
-+ ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
-+ scst_to_tgt_dma_dir(dir));
-+ ioctx->mapped_sg_count = 0;
-+ }
-+}
-+
-+/**
-+ * srpt_perform_rdmas() - Perform IB RDMA.
-+ *
-+ * Returns zero upon success or a negative number upon failure.
-+ */
-+static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx,
-+ scst_data_direction dir)
-+{
-+ struct ib_send_wr wr;
-+ struct ib_send_wr *bad_wr;
-+ struct rdma_iu *riu;
-+ int i;
-+ int ret;
-+ int sq_wr_avail;
-+ const int n_rdma = ioctx->n_rdma;
-+
-+ if (dir == SCST_DATA_WRITE) {
-+ ret = -ENOMEM;
-+ sq_wr_avail = srpt_adjust_srq_wr_avail(ch, -n_rdma);
-+ if (sq_wr_avail < 0) {
-+ PRINT_WARNING("IB send queue full (needed %d)",
-+ n_rdma);
-+ goto out;
-+ }
-+ }
-+
-+ ioctx->rdma_aborted = false;
-+ ret = 0;
-+ riu = ioctx->rdma_ius;
-+ memset(&wr, 0, sizeof wr);
-+
-+ for (i = 0; i < n_rdma; ++i, ++riu) {
-+ if (dir == SCST_DATA_READ) {
-+ wr.opcode = IB_WR_RDMA_WRITE;
-+ wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
-+ SRPT_RDMA_WRITE_LAST :
-+ SRPT_RDMA_MID,
-+ ioctx->ioctx.index);
-+ } else {
-+ wr.opcode = IB_WR_RDMA_READ;
-+ wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
-+ SRPT_RDMA_READ_LAST :
-+ SRPT_RDMA_MID,
-+ ioctx->ioctx.index);
-+ }
-+ wr.next = NULL;
-+ wr.wr.rdma.remote_addr = riu->raddr;
-+ wr.wr.rdma.rkey = riu->rkey;
-+ wr.num_sge = riu->sge_cnt;
-+ wr.sg_list = riu->sge;
-+
-+ /* only get completion event for the last rdma wr */
-+ if (i == (n_rdma - 1) && dir == SCST_DATA_WRITE)
-+ wr.send_flags = IB_SEND_SIGNALED;
-+
-+ ret = ib_post_send(ch->qp, &wr, &bad_wr);
-+ if (ret)
-+ break;
-+ }
-+
-+ if (ret)
-+ PRINT_ERROR("%s[%d]: ib_post_send() returned %d for %d/%d",
-+ __func__, __LINE__, ret, i, n_rdma);
-+ if (ret && i > 0) {
-+ wr.num_sge = 0;
-+ wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
-+ wr.send_flags = IB_SEND_SIGNALED;
-+ while (ch->state == CH_LIVE &&
-+ ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
-+ PRINT_INFO("Trying to abort failed RDMA transfer [%d]",
-+ ioctx->ioctx.index);
-+ msleep(1000);
-+ }
-+ while (ch->state != CH_DRAINING && !ioctx->rdma_aborted) {
-+ PRINT_INFO("Waiting until RDMA abort finished [%d]",
-+ ioctx->ioctx.index);
-+ msleep(1000);
-+ }
-+ PRINT_INFO("%s[%d]: done", __func__, __LINE__);
-+ }
-+
-+out:
-+ if (unlikely(dir == SCST_DATA_WRITE && ret < 0))
-+ srpt_adjust_srq_wr_avail(ch, n_rdma);
-+ return ret;
-+}
-+
-+/**
-+ * srpt_xfer_data() - Start data transfer from initiator to target.
-+ *
-+ * Returns an SCST_TGT_RES_... status code.
-+ *
-+ * Note: Must not block.
-+ */
-+static int srpt_xfer_data(struct srpt_rdma_ch *ch,
-+ struct srpt_send_ioctx *ioctx,
-+ struct scst_cmd *scmnd)
-+{
-+ int ret;
-+
-+ ret = srpt_map_sg_to_ib_sge(ch, ioctx, scmnd);
-+ if (ret) {
-+ PRINT_ERROR("%s[%d] ret=%d", __func__, __LINE__, ret);
-+ ret = SCST_TGT_RES_QUEUE_FULL;
-+ goto out;
-+ }
-+
-+ ret = srpt_perform_rdmas(ch, ioctx, scst_cmd_get_data_direction(scmnd));
-+ if (ret) {
-+ if (ret == -EAGAIN || ret == -ENOMEM) {
-+ PRINT_INFO("%s[%d] queue full -- ret=%d",
-+ __func__, __LINE__, ret);
-+ ret = SCST_TGT_RES_QUEUE_FULL;
-+ } else {
-+ PRINT_ERROR("%s[%d] fatal error -- ret=%d",
-+ __func__, __LINE__, ret);
-+ ret = SCST_TGT_RES_FATAL_ERROR;
-+ }
-+ goto out_unmap;
-+ }
-+
-+ ret = SCST_TGT_RES_SUCCESS;
-+
-+out:
-+ return ret;
-+out_unmap:
-+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
-+ goto out;
-+}
-+
-+/**
-+ * srpt_pending_cmd_timeout() - SCST command HCA processing timeout callback.
-+ *
-+ * Called by the SCST core if no IB completion notification has been received
-+ * within RDMA_COMPL_TIMEOUT_S seconds.
-+ */
-+static void srpt_pending_cmd_timeout(struct scst_cmd *scmnd)
-+{
-+ struct srpt_send_ioctx *ioctx;
-+ enum srpt_command_state state;
-+
-+ ioctx = scst_cmd_get_tgt_priv(scmnd);
-+ BUG_ON(!ioctx);
-+
-+ state = ioctx->state;
-+ switch (state) {
-+ case SRPT_STATE_NEW:
-+ case SRPT_STATE_DATA_IN:
-+ case SRPT_STATE_DONE:
-+ /*
-+ * srpt_pending_cmd_timeout() should never be invoked for
-+ * commands in this state.
-+ */
-+ PRINT_ERROR("Processing SCST command %p (SRPT state %d) took"
-+ " too long -- aborting", scmnd, state);
-+ break;
-+ case SRPT_STATE_NEED_DATA:
-+ case SRPT_STATE_CMD_RSP_SENT:
-+ case SRPT_STATE_MGMT_RSP_SENT:
-+ default:
-+ PRINT_ERROR("Command %p: IB completion for idx %u has not"
-+ " been received in time (SRPT command state %d)",
-+ scmnd, ioctx->ioctx.index, state);
-+ break;
-+ }
-+
-+ srpt_abort_cmd(ioctx, SCST_CONTEXT_SAME);
-+}
-+
-+/**
-+ * srpt_rdy_to_xfer() - Transfers data from initiator to target.
-+ *
-+ * Called by the SCST core to transfer data from the initiator to the target
-+ * (SCST_DATA_WRITE). Must not block.
-+ */
-+static int srpt_rdy_to_xfer(struct scst_cmd *scmnd)
-+{
-+ struct srpt_send_ioctx *ioctx;
-+ enum srpt_command_state prev_cmd_state;
-+ int ret;
-+
-+ ioctx = scst_cmd_get_tgt_priv(scmnd);
-+ prev_cmd_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
-+ ret = srpt_xfer_data(ioctx->ch, ioctx, scmnd);
-+ if (unlikely(ret != SCST_TGT_RES_SUCCESS))
-+ srpt_set_cmd_state(ioctx, prev_cmd_state);
-+
-+ return ret;
-+}
-+
-+/**
-+ * srpt_xmit_response() - Transmits the response to a SCSI command.
-+ *
-+ * Callback function called by the SCST core. Must not block. Must ensure that
-+ * scst_tgt_cmd_done() will get invoked when returning SCST_TGT_RES_SUCCESS.
-+ */
-+static int srpt_xmit_response(struct scst_cmd *scmnd)
-+{
-+ struct srpt_rdma_ch *ch;
-+ struct srpt_send_ioctx *ioctx;
-+ enum srpt_command_state state;
-+ int ret;
-+ scst_data_direction dir;
-+ int resp_len;
-+
-+ ret = SCST_TGT_RES_SUCCESS;
-+
-+ ioctx = scst_cmd_get_tgt_priv(scmnd);
-+ BUG_ON(!ioctx);
-+
-+ ch = scst_sess_get_tgt_priv(scst_cmd_get_session(scmnd));
-+ BUG_ON(!ch);
-+
-+ spin_lock(&ioctx->spinlock);
-+ state = ioctx->state;
-+ switch (state) {
-+ case SRPT_STATE_NEW:
-+ case SRPT_STATE_DATA_IN:
-+ ioctx->state = SRPT_STATE_CMD_RSP_SENT;
-+ break;
-+ default:
-+ WARN(true, "Unexpected command state %d", state);
-+ break;
-+ }
-+ spin_unlock(&ioctx->spinlock);
-+
-+ if (unlikely(scst_cmd_aborted(scmnd))) {
-+ srpt_adjust_req_lim(ch, 0, 1);
-+ srpt_abort_cmd(ioctx, SCST_CONTEXT_SAME);
-+ goto out;
-+ }
-+
-+ EXTRACHECKS_BUG_ON(scst_cmd_atomic(scmnd));
-+
-+ dir = scst_cmd_get_data_direction(scmnd);
-+
-+ /* For read commands, transfer the data to the initiator. */
-+ if (dir == SCST_DATA_READ
-+ && scst_cmd_get_adjusted_resp_data_len(scmnd)) {
-+ ret = srpt_xfer_data(ch, ioctx, scmnd);
-+ if (unlikely(ret != SCST_TGT_RES_SUCCESS)) {
-+ srpt_set_cmd_state(ioctx, state);
-+ PRINT_WARNING("xfer_data failed for tag %llu"
-+ " - %s", scst_cmd_get_tag(scmnd),
-+ ret == SCST_TGT_RES_QUEUE_FULL ?
-+ "retrying" : "failing");
-+ goto out;
-+ }
-+ }
-+
-+ ioctx->req_lim_delta = srpt_inc_req_lim(ch);
-+ resp_len = srpt_build_cmd_rsp(ch, ioctx,
-+ scst_cmd_get_tag(scmnd),
-+ scst_cmd_get_status(scmnd),
-+ scst_cmd_get_sense_buffer(scmnd),
-+ scst_cmd_get_sense_buffer_len(scmnd));
-+
-+ if (srpt_post_send(ch, ioctx, resp_len)) {
-+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
-+ srpt_set_cmd_state(ioctx, state);
-+ srpt_undo_inc_req_lim(ch, ioctx->req_lim_delta);
-+ PRINT_WARNING("sending response failed for tag %llu - retrying",
-+ scst_cmd_get_tag(scmnd));
-+ ret = SCST_TGT_RES_QUEUE_FULL;
-+ }
-+
-+out:
-+ return ret;
-+}
-+
-+/**
-+ * srpt_tsk_mgmt_done() - SCST callback function that sends back the response
-+ * for a task management request.
-+ *
-+ * Must not block.
-+ */
-+static void srpt_tsk_mgmt_done(struct scst_mgmt_cmd *mcmnd)
-+{
-+ struct srpt_rdma_ch *ch;
-+ struct srpt_send_ioctx *ioctx;
-+ int rsp_len;
-+
-+ ioctx = scst_mgmt_cmd_get_tgt_priv(mcmnd);
-+ BUG_ON(!ioctx);
-+
-+ ch = ioctx->ch;
-+ BUG_ON(!ch);
-+
-+ TRACE_DBG("%s: tsk_mgmt_done for tag= %lld status=%d",
-+ __func__, ioctx->tsk_mgmt.tag,
-+ scst_mgmt_cmd_get_status(mcmnd));
-+
-+ WARN_ON(in_irq());
-+
-+ srpt_set_cmd_state(ioctx, SRPT_STATE_MGMT_RSP_SENT);
-+ WARN_ON(ioctx->state == SRPT_STATE_DONE);
-+
-+ ioctx->req_lim_delta = srpt_inc_req_lim(ch);
-+ rsp_len = srpt_build_tskmgmt_rsp(ch, ioctx,
-+ scst_to_srp_tsk_mgmt_status(
-+ scst_mgmt_cmd_get_status(mcmnd)),
-+ ioctx->tsk_mgmt.tag);
-+ /*
-+ * Note: the srpt_post_send() call below sends the task management
-+ * response asynchronously. It is possible that the SCST core has
-+ * already freed the struct scst_mgmt_cmd structure before the
-+ * response is sent. This is fine however.
-+ */
-+ if (srpt_post_send(ch, ioctx, rsp_len)) {
-+ PRINT_ERROR("%s", "Sending SRP_RSP response failed.");
-+ srpt_put_send_ioctx(ioctx);
-+ srpt_undo_inc_req_lim(ch, ioctx->req_lim_delta);
-+ }
-+}
-+
-+/**
-+ * srpt_get_initiator_port_transport_id() - SCST TransportID callback function.
-+ *
-+ * See also SPC-3, section 7.5.4.5, TransportID for initiator ports using SRP.
-+ */
-+static int srpt_get_initiator_port_transport_id(struct scst_tgt *tgt,
-+ struct scst_session *scst_sess, uint8_t **transport_id)
-+{
-+ struct srpt_rdma_ch *ch;
-+ struct spc_rdma_transport_id {
-+ uint8_t protocol_identifier;
-+ uint8_t reserved[7];
-+ uint8_t i_port_id[16];
-+ };
-+ struct spc_rdma_transport_id *tr_id;
-+ int res;
-+
-+ TRACE_ENTRY();
-+
-+ if (!scst_sess) {
-+ res = SCSI_TRANSPORTID_PROTOCOLID_SRP;
-+ goto out;
-+ }
-+
-+ ch = scst_sess_get_tgt_priv(scst_sess);
-+ BUG_ON(!ch);
-+
-+ BUILD_BUG_ON(sizeof(*tr_id) != 24);
-+
-+ tr_id = kzalloc(sizeof(struct spc_rdma_transport_id), GFP_KERNEL);
-+ if (!tr_id) {
-+ PRINT_ERROR("%s", "Allocation of TransportID failed");
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ res = 0;
-+ tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
-+ memcpy(tr_id->i_port_id, ch->i_port_id, sizeof(ch->i_port_id));
-+
-+ *transport_id = (uint8_t *)tr_id;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ * srpt_on_free_cmd() - Free command-private data.
-+ *
-+ * Called by the SCST core. May be called in IRQ context.
-+ */
-+static void srpt_on_free_cmd(struct scst_cmd *scmnd)
-+{
-+}
-+
-+static void srpt_refresh_port_work(struct work_struct *work)
-+{
-+ struct srpt_port *sport = container_of(work, struct srpt_port, work);
-+
-+ srpt_refresh_port(sport);
-+}
-+
-+/**
-+ * srpt_detect() - Returns the number of target adapters.
-+ *
-+ * Callback function called by the SCST core.
-+ */
-+static int srpt_detect(struct scst_tgt_template *tp)
-+{
-+ int device_count;
-+
-+ TRACE_ENTRY();
-+
-+ device_count = atomic_read(&srpt_device_count);
-+
-+ TRACE_EXIT_RES(device_count);
-+
-+ return device_count;
-+}
-+
-+static int srpt_ch_list_empty(struct srpt_device *sdev)
-+{
-+ int res;
-+
-+ spin_lock_irq(&sdev->spinlock);
-+ res = list_empty(&sdev->rch_list);
-+ spin_unlock_irq(&sdev->spinlock);
-+
-+ return res;
-+}
-+
-+/**
-+ * srpt_release_sdev() - Free channel resources associated with a target.
-+ */
-+static int srpt_release_sdev(struct srpt_device *sdev)
-+{
-+ struct srpt_rdma_ch *ch, *next_ch;
-+
-+ TRACE_ENTRY();
-+
-+ WARN_ON_ONCE(irqs_disabled());
-+ BUG_ON(!sdev);
-+
-+ spin_lock_irq(&sdev->spinlock);
-+ list_for_each_entry_safe(ch, next_ch, &sdev->rch_list, list)
-+ __srpt_close_ch(ch);
-+ spin_unlock_irq(&sdev->spinlock);
-+
-+ while (wait_event_timeout(sdev->ch_releaseQ,
-+ srpt_ch_list_empty(sdev), 5 * HZ) <= 0) {
-+ PRINT_INFO("%s: waiting for session unregistration ...",
-+ sdev->device->name);
-+ spin_lock_irq(&sdev->spinlock);
-+ list_for_each_entry_safe(ch, next_ch, &sdev->rch_list, list) {
-+ PRINT_INFO("%s: state %s; %d commands in progress",
-+ ch->sess_name, get_ch_state_name(ch->state),
-+ atomic_read(&ch->scst_sess->sess_cmd_count));
-+ }
-+ spin_unlock_irq(&sdev->spinlock);
-+ }
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+/**
-+ * srpt_release() - Free the resources associated with an SCST target.
-+ *
-+ * Callback function called by the SCST core from scst_unregister_target().
-+ */
-+static int srpt_release(struct scst_tgt *scst_tgt)
-+{
-+ struct srpt_device *sdev = scst_tgt_get_tgt_priv(scst_tgt);
-+
-+ TRACE_ENTRY();
-+
-+ EXTRACHECKS_WARN_ON_ONCE(irqs_disabled());
-+
-+ BUG_ON(!scst_tgt);
-+ if (WARN_ON(!sdev))
-+ return -ENODEV;
-+
-+ srpt_release_sdev(sdev);
-+
-+ scst_tgt_set_tgt_priv(scst_tgt, NULL);
-+
-+ TRACE_EXIT();
-+
-+ return 0;
-+}
-+
-+/**
-+ * srpt_get_scsi_transport_version() - Returns the SCSI transport version.
-+ * This function is called from scst_pres.c, the code that implements
-+ * persistent reservation support.
-+ */
-+static uint16_t srpt_get_scsi_transport_version(struct scst_tgt *scst_tgt)
-+{
-+ return 0x0940; /* SRP */
-+}
-+
-+static ssize_t show_login_info(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_tgt *scst_tgt;
-+ struct srpt_device *sdev;
-+ struct srpt_port *sport;
-+ int i;
-+ int len;
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ sdev = scst_tgt_get_tgt_priv(scst_tgt);
-+ len = 0;
-+ for (i = 0; i < sdev->device->phys_port_cnt; i++) {
-+ sport = &sdev->port[i];
-+
-+ len += sprintf(buf + len,
-+ "tid_ext=%016llx,ioc_guid=%016llx,pkey=ffff,"
-+ "dgid=%04x%04x%04x%04x%04x%04x%04x%04x,"
-+ "service_id=%016llx\n",
-+ srpt_service_guid,
-+ srpt_service_guid,
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[0]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[1]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[2]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[3]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[4]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[5]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[6]),
-+ be16_to_cpu(((__be16 *) sport->gid.raw)[7]),
-+ srpt_service_guid);
-+ }
-+
-+ return len;
-+}
-+
-+static struct kobj_attribute srpt_show_login_info_attr =
-+ __ATTR(login_info, S_IRUGO, show_login_info, NULL);
-+
-+static const struct attribute *srpt_tgt_attrs[] = {
-+ &srpt_show_login_info_attr.attr,
-+ NULL
-+};
-+
-+static ssize_t show_req_lim(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_session *scst_sess;
-+ struct srpt_rdma_ch *ch;
-+
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
-+ ch = scst_sess_get_tgt_priv(scst_sess);
-+ if (!ch)
-+ return -ENOENT;
-+ return sprintf(buf, "%d\n", ch->req_lim);
-+}
-+
-+static ssize_t show_req_lim_delta(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_session *scst_sess;
-+ struct srpt_rdma_ch *ch;
-+
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
-+ ch = scst_sess_get_tgt_priv(scst_sess);
-+ if (!ch)
-+ return -ENOENT;
-+ return sprintf(buf, "%d\n", ch->req_lim_delta);
-+}
-+
-+static ssize_t show_ch_state(struct kobject *kobj, struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ struct scst_session *scst_sess;
-+ struct srpt_rdma_ch *ch;
-+
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
-+ ch = scst_sess_get_tgt_priv(scst_sess);
-+ if (!ch)
-+ return -ENOENT;
-+ return sprintf(buf, "%s\n", get_ch_state_name(ch->state));
-+}
-+
-+static const struct kobj_attribute srpt_req_lim_attr =
-+ __ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
-+static const struct kobj_attribute srpt_req_lim_delta_attr =
-+ __ATTR(req_lim_delta, S_IRUGO, show_req_lim_delta, NULL);
-+static const struct kobj_attribute srpt_ch_state_attr =
-+ __ATTR(ch_state, S_IRUGO, show_ch_state, NULL);
-+
-+static const struct attribute *srpt_sess_attrs[] = {
-+ &srpt_req_lim_attr.attr,
-+ &srpt_req_lim_delta_attr.attr,
-+ &srpt_ch_state_attr.attr,
-+ NULL
-+};
-+
-+/* SCST target template for the SRP target implementation. */
-+static struct scst_tgt_template srpt_template = {
-+ .name = DRV_NAME,
-+ .sg_tablesize = SRPT_DEF_SG_TABLESIZE,
-+ .max_hw_pending_time = RDMA_COMPL_TIMEOUT_S,
-+ .enable_target = srpt_enable_target,
-+ .is_target_enabled = srpt_is_target_enabled,
-+ .tgt_attrs = srpt_tgt_attrs,
-+ .sess_attrs = srpt_sess_attrs,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = DEFAULT_SRPT_TRACE_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+ .detect = srpt_detect,
-+ .release = srpt_release,
-+ .xmit_response = srpt_xmit_response,
-+ .rdy_to_xfer = srpt_rdy_to_xfer,
-+ .on_hw_pending_cmd_timeout = srpt_pending_cmd_timeout,
-+ .on_free_cmd = srpt_on_free_cmd,
-+ .task_mgmt_fn_done = srpt_tsk_mgmt_done,
-+ .get_initiator_port_transport_id = srpt_get_initiator_port_transport_id,
-+ .get_scsi_transport_version = srpt_get_scsi_transport_version,
-+};
-+
-+/**
-+ * srpt_add_one() - Infiniband device addition callback function.
-+ */
-+static void srpt_add_one(struct ib_device *device)
-+{
-+ struct srpt_device *sdev;
-+ struct srpt_port *sport;
-+ struct ib_srq_init_attr srq_attr;
-+ char tgt_name[24];
-+ int i, ret;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("device = %p, device->dma_ops = %p", device, device->dma_ops);
-+
-+ sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
-+ if (!sdev)
-+ goto err;
-+
-+ sdev->device = device;
-+ INIT_LIST_HEAD(&sdev->rch_list);
-+ init_waitqueue_head(&sdev->ch_releaseQ);
-+ spin_lock_init(&sdev->spinlock);
-+
-+ if (use_node_guid_in_target_name) {
-+ snprintf(tgt_name, sizeof(tgt_name), "%04x:%04x:%04x:%04x",
-+ be16_to_cpu(((__be16 *)&device->node_guid)[0]),
-+ be16_to_cpu(((__be16 *)&device->node_guid)[1]),
-+ be16_to_cpu(((__be16 *)&device->node_guid)[2]),
-+ be16_to_cpu(((__be16 *)&device->node_guid)[3]));
-+ sdev->scst_tgt = scst_register_target(&srpt_template, tgt_name);
-+ } else
-+ sdev->scst_tgt = scst_register_target(&srpt_template, NULL);
-+ if (!sdev->scst_tgt) {
-+ PRINT_ERROR("SCST registration failed for %s.",
-+ sdev->device->name);
-+ goto free_dev;
-+ }
-+
-+ scst_tgt_set_tgt_priv(sdev->scst_tgt, sdev);
-+
-+ ret = ib_query_device(device, &sdev->dev_attr);
-+ if (ret) {
-+ PRINT_ERROR("ib_query_device() failed: %d", ret);
-+ goto unregister_tgt;
-+ }
-+
-+ sdev->pd = ib_alloc_pd(device);
-+ if (IS_ERR(sdev->pd)) {
-+ PRINT_ERROR("ib_alloc_pd() failed: %ld", PTR_ERR(sdev->pd));
-+ goto unregister_tgt;
-+ }
-+
-+ sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
-+ if (IS_ERR(sdev->mr)) {
-+ PRINT_ERROR("ib_get_dma_mr() failed: %ld", PTR_ERR(sdev->mr));
-+ goto err_pd;
-+ }
-+
-+ sdev->srq_size = min(max(srpt_srq_size, MIN_SRPT_SRQ_SIZE),
-+ sdev->dev_attr.max_srq_wr);
-+
-+ memset(&srq_attr, 0, sizeof(srq_attr));
-+ srq_attr.event_handler = srpt_srq_event;
-+ srq_attr.srq_context = (void *)sdev;
-+ srq_attr.attr.max_wr = sdev->srq_size;
-+ srq_attr.attr.max_sge = 1;
-+ srq_attr.attr.srq_limit = 0;
-+ srq_attr.srq_type = IB_SRQT_BASIC;
-+
-+ sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
-+ if (IS_ERR(sdev->srq)) {
-+ PRINT_ERROR("ib_create_srq() failed: %ld", PTR_ERR(sdev->srq));
-+ goto err_mr;
-+ }
-+
-+ TRACE_DBG("%s: create SRQ #wr= %d max_allow=%d dev= %s", __func__,
-+ sdev->srq_size, sdev->dev_attr.max_srq_wr, device->name);
-+
-+ if (!srpt_service_guid)
-+ srpt_service_guid = be64_to_cpu(device->node_guid) &
-+ ~be64_to_cpu(IB_SERVICE_ID_AGN_MASK);
-+
-+ sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
-+ if (IS_ERR(sdev->cm_id)) {
-+ PRINT_ERROR("ib_create_cm_id() failed: %ld",
-+ PTR_ERR(sdev->cm_id));
-+ goto err_srq;
-+ }
-+
-+ /* print out target login information */
-+ TRACE_DBG("Target login info: id_ext=%016llx,"
-+ "ioc_guid=%016llx,pkey=ffff,service_id=%016llx",
-+ srpt_service_guid, srpt_service_guid, srpt_service_guid);
-+
-+ /*
-+ * We do not have a consistent service_id (ie. also id_ext of target_id)
-+ * to identify this target. We currently use the guid of the first HCA
-+ * in the system as service_id; therefore, the target_id will change
-+ * if this HCA is gone bad and replaced by different HCA
-+ */
-+ ret = ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0,
-+ NULL);
-+ if (ret) {
-+ PRINT_ERROR("ib_cm_listen() failed: %d (cm_id state = %d)",
-+ ret, sdev->cm_id->state);
-+ goto err_cm;
-+ }
-+
-+ INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
-+ srpt_event_handler);
-+ ret = ib_register_event_handler(&sdev->event_handler);
-+ if (ret) {
-+ PRINT_ERROR("ib_register_event_handler() failed: %d", ret);
-+ goto err_cm;
-+ }
-+
-+ sdev->ioctx_ring = (struct srpt_recv_ioctx **)
-+ srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
-+ sizeof(*sdev->ioctx_ring[0]),
-+ srp_max_req_size, DMA_FROM_DEVICE);
-+ if (!sdev->ioctx_ring) {
-+ PRINT_ERROR("%s", "srpt_alloc_ioctx_ring() failed");
-+ goto err_event;
-+ }
-+
-+ for (i = 0; i < sdev->srq_size; ++i)
-+ srpt_post_recv(sdev, sdev->ioctx_ring[i]);
-+
-+ WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
-+
-+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
-+ sport = &sdev->port[i - 1];
-+ sport->sdev = sdev;
-+ sport->port = i;
-+ INIT_WORK(&sport->work, srpt_refresh_port_work);
-+ if (srpt_refresh_port(sport)) {
-+ PRINT_ERROR("MAD registration failed for %s-%d.",
-+ sdev->device->name, i);
-+ goto err_ring;
-+ }
-+ }
-+
-+ atomic_inc(&srpt_device_count);
-+out:
-+ ib_set_client_data(device, &srpt_client, sdev);
-+
-+ TRACE_EXIT();
-+ return;
-+
-+err_ring:
-+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
-+ sdev->srq_size, srp_max_req_size,
-+ DMA_FROM_DEVICE);
-+err_event:
-+ ib_unregister_event_handler(&sdev->event_handler);
-+err_cm:
-+ ib_destroy_cm_id(sdev->cm_id);
-+err_srq:
-+ ib_destroy_srq(sdev->srq);
-+err_mr:
-+ ib_dereg_mr(sdev->mr);
-+err_pd:
-+ ib_dealloc_pd(sdev->pd);
-+unregister_tgt:
-+ scst_unregister_target(sdev->scst_tgt);
-+free_dev:
-+ kfree(sdev);
-+err:
-+ sdev = NULL;
-+ PRINT_INFO("%s(%s) failed.", __func__, device->name);
-+ goto out;
-+}
-+
-+/**
-+ * srpt_remove_one() - InfiniBand device removal callback function.
-+ */
-+static void srpt_remove_one(struct ib_device *device)
-+{
-+ int i;
-+ struct srpt_device *sdev;
-+
-+ TRACE_ENTRY();
-+
-+ sdev = ib_get_client_data(device, &srpt_client);
-+ if (!sdev) {
-+ PRINT_INFO("%s(%s): nothing to do.", __func__, device->name);
-+ return;
-+ }
-+
-+ srpt_unregister_mad_agent(sdev);
-+
-+ ib_unregister_event_handler(&sdev->event_handler);
-+
-+ /* Cancel any work queued by the just unregistered IB event handler. */
-+ for (i = 0; i < sdev->device->phys_port_cnt; i++)
-+ cancel_work_sync(&sdev->port[i].work);
-+
-+ ib_destroy_cm_id(sdev->cm_id);
-+
-+ /*
-+ * Unregistering an SCST target must happen after destroying sdev->cm_id
-+ * such that no new SRP_LOGIN_REQ information units can arrive while
-+ * destroying the SCST target.
-+ */
-+ scst_unregister_target(sdev->scst_tgt);
-+ sdev->scst_tgt = NULL;
-+
-+ ib_destroy_srq(sdev->srq);
-+ ib_dereg_mr(sdev->mr);
-+ ib_dealloc_pd(sdev->pd);
-+
-+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
-+ sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
-+ sdev->ioctx_ring = NULL;
-+ kfree(sdev);
-+
-+ TRACE_EXIT();
-+}
-+
-+static struct ib_client srpt_client = {
-+ .name = DRV_NAME,
-+ .add = srpt_add_one,
-+ .remove = srpt_remove_one
-+};
-+
-+/**
-+ * srpt_init_module() - Kernel module initialization.
-+ *
-+ * Note: Since ib_register_client() registers callback functions, and since at
-+ * least one of these callback functions (srpt_add_one()) calls SCST functions,
-+ * the SCST target template must be registered before ib_register_client() is
-+ * called.
-+ */
-+static int __init srpt_init_module(void)
-+{
-+ int ret;
-+
-+ ret = -EINVAL;
-+ if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
-+ PRINT_ERROR("invalid value %d for kernel module parameter"
-+ " srp_max_req_size -- must be at least %d.",
-+ srp_max_req_size,
-+ MIN_MAX_REQ_SIZE);
-+ goto out;
-+ }
-+
-+ if (srp_max_rsp_size < MIN_MAX_RSP_SIZE) {
-+ PRINT_ERROR("invalid value %d for kernel module parameter"
-+ " srp_max_rsp_size -- must be at least %d.",
-+ srp_max_rsp_size,
-+ MIN_MAX_RSP_SIZE);
-+ goto out;
-+ }
-+
-+ if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
-+ || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
-+ PRINT_ERROR("invalid value %d for kernel module parameter"
-+ " srpt_srq_size -- must be in the range [%d..%d].",
-+ srpt_srq_size, MIN_SRPT_SRQ_SIZE,
-+ MAX_SRPT_SRQ_SIZE);
-+ goto out;
-+ }
-+
-+ if (srpt_sq_size < MIN_SRPT_SQ_SIZE) {
-+ PRINT_ERROR("invalid value %d for kernel module parameter"
-+ " srpt_sq_size -- must be at least %d.",
-+ srpt_srq_size, MIN_SRPT_SQ_SIZE);
-+ goto out;
-+ }
-+
-+ if (!use_node_guid_in_target_name)
-+ PRINT_WARNING("%s", "Usage of HCA numbers as SCST target names "
-+ "is deprecated and will be removed in one of the next "
-+ "versions. It is strongly recommended to set "
-+ "use_node_guid_in_target_name parameter in 1 and "
-+ "update your SCST config file accordingly to use HCAs "
-+ "GUIDs.");
-+
-+ ret = scst_register_target_template(&srpt_template);
-+ if (ret < 0) {
-+ PRINT_ERROR("%s", "couldn't register with scst");
-+ ret = -ENODEV;
-+ goto out;
-+ }
-+
-+ ret = ib_register_client(&srpt_client);
-+ if (ret) {
-+ PRINT_ERROR("%s", "couldn't register IB client");
-+ goto out_unregister_target;
-+ }
-+
-+ return 0;
-+
-+out_unregister_target:
-+ scst_unregister_target_template(&srpt_template);
-+out:
-+ return ret;
-+}
-+
-+static void __exit srpt_cleanup_module(void)
-+{
-+ TRACE_ENTRY();
-+
-+ ib_unregister_client(&srpt_client);
-+ scst_unregister_target_template(&srpt_template);
-+
-+ TRACE_EXIT();
-+}
-+
-+module_init(srpt_init_module);
-+module_exit(srpt_cleanup_module);
-+
-+/*
-+ * Local variables:
-+ * c-basic-offset: 8
-+ * indent-tabs-mode: t
-+ * End:
-+ */
-diff -uprN orig/linux-3.2/drivers/scst/srpt/ib_srpt.h linux-3.2/drivers/scst/srpt/ib_srpt.h
---- orig/linux-3.2/drivers/scst/srpt/ib_srpt.h
-+++ linux-3.2/drivers/scst/srpt/ib_srpt.h
-@@ -0,0 +1,407 @@
-+/*
-+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
-+ * Copyright (C) 2009 - 2011 Bart Van Assche <bvanassche@acm.org>.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses. You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ * Redistribution and use in source and binary forms, with or
-+ * without modification, are permitted provided that the following
-+ * conditions are met:
-+ *
-+ * - Redistributions of source code must retain the above
-+ * copyright notice, this list of conditions and the following
-+ * disclaimer.
-+ *
-+ * - Redistributions in binary form must reproduce the above
-+ * copyright notice, this list of conditions and the following
-+ * disclaimer in the documentation and/or other materials
-+ * provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#ifndef IB_SRPT_H
-+#define IB_SRPT_H
-+
-+#include <linux/types.h>
-+#include <linux/list.h>
-+#include <linux/wait.h>
-+#include <rdma/ib_verbs.h>
-+#include <rdma/ib_sa.h>
-+#include <rdma/ib_cm.h>
-+#include <scsi/srp.h>
-+#include <scst/scst.h>
-+#include "ib_dm_mad.h"
-+
-+/*
-+ * The prefix the ServiceName field must start with in the device management
-+ * ServiceEntries attribute pair. See also the SRP specification.
-+ */
-+#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
-+
-+enum {
-+ /*
-+ * SRP IOControllerProfile attributes for SRP target ports that have
-+ * not been defined in <scsi/srp.h>. Source: section B.7, table B.7
-+ * in the SRP specification.
-+ */
-+ SRP_PROTOCOL = 0x0108,
-+ SRP_PROTOCOL_VERSION = 0x0001,
-+ SRP_IO_SUBCLASS = 0x609e,
-+ SRP_SEND_TO_IOC = 0x01,
-+ SRP_SEND_FROM_IOC = 0x02,
-+ SRP_RDMA_READ_FROM_IOC = 0x08,
-+ SRP_RDMA_WRITE_FROM_IOC = 0x20,
-+
-+ /*
-+ * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP r16a
-+ * document.
-+ */
-+ SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
-+ SRP_LOSOLNT = 0x10, /* logout solicited notification */
-+ SRP_CRSOLNT = 0x20, /* credit request solicited notification */
-+ SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
-+
-+ /*
-+ * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
-+ * 18 and 20 in the SRP specification.
-+ */
-+ SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
-+ SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
-+
-+ /*
-+ * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
-+ * 16 and 22 in the SRP specification.
-+ */
-+ SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
-+
-+ /* See also table 24 in the SRP specification. */
-+ SRP_TSK_MGMT_SUCCESS = 0x00,
-+ SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
-+ SRP_TSK_MGMT_FAILED = 0x05,
-+
-+ /* See also table 21 in the SRP specification. */
-+ SRP_CMD_SIMPLE_Q = 0x0,
-+ SRP_CMD_HEAD_OF_Q = 0x1,
-+ SRP_CMD_ORDERED_Q = 0x2,
-+ SRP_CMD_ACA = 0x4,
-+
-+ SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
-+ SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
-+ SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
-+
-+ SRPT_DEF_SG_TABLESIZE = 128,
-+
-+ MIN_SRPT_SQ_SIZE = 16,
-+ DEF_SRPT_SQ_SIZE = 4096,
-+ SRPT_RQ_SIZE = 128,
-+ MIN_SRPT_SRQ_SIZE = 4,
-+ DEFAULT_SRPT_SRQ_SIZE = 4095,
-+ MAX_SRPT_SRQ_SIZE = 65535,
-+
-+ MIN_MAX_REQ_SIZE = 996,
-+ DEFAULT_MAX_REQ_SIZE
-+ = sizeof(struct srp_cmd)/*48*/
-+ + sizeof(struct srp_indirect_buf)/*20*/
-+ + 255 * sizeof(struct srp_direct_buf)/*16*/,
-+
-+ MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
-+ DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
-+
-+ DEFAULT_MAX_RDMA_SIZE = 65536,
-+
-+ RDMA_COMPL_TIMEOUT_S = 80,
-+};
-+
-+enum srpt_opcode {
-+ SRPT_RECV,
-+ SRPT_SEND,
-+ SRPT_RDMA_MID,
-+ SRPT_RDMA_ABORT,
-+ SRPT_RDMA_READ_LAST,
-+ SRPT_RDMA_WRITE_LAST,
-+};
-+
-+static inline u64 encode_wr_id(enum srpt_opcode opcode, u32 idx)
-+{
-+ return ((u64)opcode << 32) | idx;
-+}
-+
-+static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
-+{
-+ return wr_id >> 32;
-+}
-+
-+static inline u32 idx_from_wr_id(u64 wr_id)
-+{
-+ return (u32)wr_id;
-+}
-+
-+struct rdma_iu {
-+ u64 raddr;
-+ u32 rkey;
-+ struct ib_sge *sge;
-+ u32 sge_cnt;
-+};
-+
-+/**
-+ * enum srpt_command_state - SCSI command state managed by SRPT.
-+ * @SRPT_STATE_NEW: New command arrived and is being processed.
-+ * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
-+ * for data arrival.
-+ * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
-+ * being processed.
-+ * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
-+ * @SRPT_STATE_MGMT: Processing a SCSI task management command.
-+ * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
-+ * @SRPT_STATE_DONE: Command processing finished successfully, command
-+ * processing has been aborted or command processing
-+ * failed.
-+ */
-+enum srpt_command_state {
-+ SRPT_STATE_NEW = 0,
-+ SRPT_STATE_NEED_DATA = 1,
-+ SRPT_STATE_DATA_IN = 2,
-+ SRPT_STATE_CMD_RSP_SENT = 3,
-+ SRPT_STATE_MGMT = 4,
-+ SRPT_STATE_MGMT_RSP_SENT = 5,
-+ SRPT_STATE_DONE = 6,
-+};
-+
-+/**
-+ * struct srpt_ioctx - Shared SRPT I/O context information.
-+ * @buf: Pointer to the buffer.
-+ * @dma: DMA address of the buffer.
-+ * @index: Index of the I/O context in its ioctx_ring array.
-+ */
-+struct srpt_ioctx {
-+ void *buf;
-+ dma_addr_t dma;
-+ uint32_t index;
-+};
-+
-+/**
-+ * struct srpt_recv_ioctx - SRPT receive I/O context.
-+ * @ioctx: See above.
-+ * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
-+ */
-+struct srpt_recv_ioctx {
-+ struct srpt_ioctx ioctx;
-+ struct list_head wait_list;
-+};
-+
-+/**
-+ * struct srpt_tsk_mgmt - SCST management command context information.
-+ * @tag: SCSI tag of the management command.
-+ */
-+struct srpt_tsk_mgmt {
-+ u64 tag;
-+};
-+
-+/**
-+ * struct srpt_send_ioctx - SRPT send I/O context.
-+ * @ioctx: See above.
-+ * @ch: Channel pointer.
-+ * @rdma_ius: Array with information about the RDMA mapping.
-+ * @rbufs: Pointer to SRP data buffer array.
-+ * @single_rbuf: SRP data buffer if the command has only a single buffer.
-+ * @sg: Pointer to sg-list associated with this I/O context.
-+ * @spinlock: Protects 'state'.
-+ * @state: I/O context state.
-+ * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
-+ * the already initiated transfers have finished.
-+ * @scmnd: SCST command data structure.
-+ * @dir:
-+ * @free_list: Node in srpt_rdma_ch.free_list.
-+ * @sg_cnt: SG-list size.
-+ * @mapped_sg_count: ib_dma_map_sg() return value.
-+ * @n_rdma_ius: Size of the rdma_ius array.
-+ * @n_rdma: Number of elements used of the rdma_ius array.
-+ * @n_rbuf: Number of data buffers in the received SRP command.
-+ * @req_lim_delta: Value of the req_lim_delta value field in the latest
-+ * SRP response sent.
-+ * @tsk_mgmt:
-+ */
-+struct srpt_send_ioctx {
-+ struct srpt_ioctx ioctx;
-+ struct srpt_rdma_ch *ch;
-+ struct rdma_iu *rdma_ius;
-+ struct srp_direct_buf *rbufs;
-+ struct srp_direct_buf single_rbuf;
-+ struct scatterlist *sg;
-+ struct list_head free_list;
-+ spinlock_t spinlock;
-+ enum srpt_command_state state;
-+ bool rdma_aborted;
-+ struct scst_cmd *scmnd;
-+ scst_data_direction dir;
-+ int sg_cnt;
-+ int mapped_sg_count;
-+ u16 n_rdma_ius;
-+ u8 n_rdma;
-+ u8 n_rbuf;
-+ int req_lim_delta;
-+ struct srpt_tsk_mgmt tsk_mgmt;
-+ u8 rdma_ius_buf[2 * sizeof(struct rdma_iu)
-+ + 2 * sizeof(struct ib_sge)]
-+ __aligned(sizeof(uint64_t));
-+};
-+
-+/**
-+ * enum rdma_ch_state - SRP channel state.
-+ * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
-+ * @CH_LIVE: QP is in RTS state.
-+ * @CH_DISCONNECTING: DREQ has been received and waiting for DREP or DREQ has
-+ * been sent and waiting for DREP or channel is being closed
-+ * for another reason.
-+ * @CH_DRAINING: QP is in ERR state.
-+ * @CH_FREEING: QP resources are being freed.
-+ */
-+enum rdma_ch_state {
-+ CH_CONNECTING,
-+ CH_LIVE,
-+ CH_DISCONNECTING,
-+ CH_DRAINING,
-+ CH_FREEING,
-+};
-+
-+/**
-+ * struct srpt_rdma_ch - RDMA channel.
-+ * @thread: Kernel thread that processes the IB queues associated with
-+ * the channel.
-+ * @cm_id: IB CM ID associated with the channel.
-+ * @qp: IB queue pair used for communicating over this channel.
-+ * @cq: IB completion queue for this channel.
-+ * @rq_size: IB receive queue size.
-+ * @max_sge: Maximum length of RDMA scatter list.
-+ * @sq_wr_avail: number of work requests available in the send queue.
-+ * @sport: pointer to the information of the HCA port used by this
-+ * channel.
-+ * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
-+ * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
-+ * @max_ti_iu_len: maximum target-to-initiator information unit length.
-+ * @req_lim: request limit: maximum number of requests that may be sent
-+ * by the initiator without having received a response.
-+ * @req_lim_delta: One less than the req_lim_delta value that will be included
-+ * in the next reply sent to the initiator. See also the SRP
-+ * credit algorithm in the SRP spec.
-+ * @spinlock: Protects free_list.
-+ * @free_list: Head of list with free send I/O contexts.
-+ * @ioctx_ring:
-+ * @wc:
-+ * @state: channel state. See also enum rdma_ch_state.
-+ * @list: node for insertion in the srpt_device.rch_list list.
-+ * @cmd_wait_list: list of SCST commands that arrived before the RTU event. This
-+ * list contains struct srpt_ioctx elements and is protected
-+ * against concurrent modification by the cm_id spinlock.
-+ * @scst_sess: SCST session information associated with this SRP channel.
-+ * @sess_name: SCST session name.
-+ */
-+struct srpt_rdma_ch {
-+ struct task_struct *thread;
-+ struct ib_cm_id *cm_id;
-+ struct ib_qp *qp;
-+ struct ib_cq *cq;
-+ int rq_size;
-+ int max_sge;
-+ int max_rsp_size;
-+ int sq_wr_avail;
-+ struct srpt_port *sport;
-+ u8 i_port_id[16];
-+ u8 t_port_id[16];
-+ int max_ti_iu_len;
-+ int req_lim;
-+ int req_lim_delta;
-+ spinlock_t spinlock;
-+ struct list_head free_list;
-+ struct srpt_send_ioctx **ioctx_ring;
-+ struct ib_wc wc[16];
-+ enum rdma_ch_state state;
-+ wait_queue_head_t state_wq;
-+ struct list_head list;
-+ struct list_head cmd_wait_list;
-+ struct completion finished_processing_completions;
-+ bool last_wqe_received;
-+
-+ struct scst_session *scst_sess;
-+ u8 sess_name[36];
-+};
-+
-+/**
-+ * struct srpt_port - Information associated by SRPT with a single IB port.
-+ * @sdev: backpointer to the HCA information.
-+ * @mad_agent: per-port management datagram processing information.
-+ * @port: one-based port number.
-+ * @sm_lid: cached value of the port's sm_lid.
-+ * @lid: cached value of the port's lid.
-+ * @gid: cached value of the port's gid.
-+ * @work: work structure for refreshing the aforementioned cached values.
-+ */
-+struct srpt_port {
-+ struct srpt_device *sdev;
-+ struct ib_mad_agent *mad_agent;
-+ u8 port;
-+ u16 sm_lid;
-+ u16 lid;
-+ union ib_gid gid;
-+ struct work_struct work;
-+};
-+
-+/**
-+ * struct srpt_device - Information associated by SRPT with a single HCA.
-+ * @device: Backpointer to the struct ib_device managed by the IB core.
-+ * @pd: IB protection domain.
-+ * @mr: L_Key (local key) with write access to all local memory.
-+ * @srq: Per-HCA SRQ (shared receive queue).
-+ * @cm_id: Connection identifier.
-+ * @dev_attr: Attributes of the InfiniBand device as obtained during the
-+ * ib_client.add() callback.
-+ * @srq_size: SRQ size.
-+ * @ioctx_ring: Per-HCA SRQ.
-+ * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
-+ * @ch_releaseQ: Enables waiting for removal from rch_list.
-+ * @spinlock: Protects rch_list.
-+ * @port: Information about the ports owned by this HCA.
-+ * @event_handler: Per-HCA asynchronous IB event handler.
-+ * @dev: Per-port srpt-<portname> device instance.
-+ * @scst_tgt: SCST target information associated with this HCA.
-+ * @enabled: Whether or not this SCST target is enabled.
-+ */
-+struct srpt_device {
-+ struct ib_device *device;
-+ struct ib_pd *pd;
-+ struct ib_mr *mr;
-+ struct ib_srq *srq;
-+ struct ib_cm_id *cm_id;
-+ struct ib_device_attr dev_attr;
-+ int srq_size;
-+ struct srpt_recv_ioctx **ioctx_ring;
-+ struct list_head rch_list;
-+ wait_queue_head_t ch_releaseQ;
-+ spinlock_t spinlock;
-+ struct srpt_port port[2];
-+ struct ib_event_handler event_handler;
-+ struct scst_tgt *scst_tgt;
-+ bool enabled;
-+};
-+
-+#endif /* IB_SRPT_H */
-+
-+/*
-+ * Local variables:
-+ * c-basic-offset: 8
-+ * indent-tabs-mode: t
-+ * End:
-+ */
-diff -uprN orig/linux-3.2/Documentation/scst/README.scst_local linux-3.2/Documentation/scst/README.scst_local
---- orig/linux-3.2/Documentation/scst/README.scst_local
-+++ linux-3.2/Documentation/scst/README.scst_local
-@@ -0,0 +1,286 @@
-+SCST Local ...
-+Richard Sharpe, 30-Nov-2008
-+
-+This is the SCST Local driver. Its function is to allow you to access devices
-+that are exported via SCST directly on the same Linux system that they are
-+exported from.
-+
-+No assumptions are made in the code about the device types on the target, so
-+any device handlers that you load in SCST should be visible, including tapes
-+and so forth.
-+
-+You can freely use any sg, sd, st, etc. devices imported from target,
-+except the following: you can't mount file systems or put swap on them
-+for all dev handlers, except BLOCKIO and pass-through, because it can
-+lead to recursive memory allocation deadlock. This is a limitation of
-+Linux memory/cache manager. See SCST README file for details. For
-+BLOCKIO and pass-through dev handlers there's no such limitation, so you
-+can freely mount file systems over them.
-+
-+To build, simply issue 'make' in the scst_local directory.
-+
-+Try 'modinfo scst_local' for a listing of module parameters so far.
-+
-+Here is how I have used it so far:
-+
-+1. Load up scst:
-+
-+ modprobe scst
-+ modprobe scst_vdisk
-+
-+2. Create a virtual disk (or your own device handler):
-+
-+ dd if=/dev/zero of=/some/path/vdisk1.img bs=16384 count=1000000
-+ echo "add_device vm_disk1 filename=/some/path/vdisk1.img" >/sys/kernel/scst_tgt/handlers/vdisk_fileio/mgmt
-+
-+3. Load the scst_local driver:
-+
-+ insmod scst_local
-+ echo "add vm_disk1 0" >/sys/kernel/scst_tgt/targets/scst_local/scst_local_tgt/luns/mgmt
-+
-+4. Check what you have
-+
-+ cat /proc/scsi/scsi
-+ Attached devices:
-+ Host: scsi0 Channel: 00 Id: 00 Lun: 00
-+ Vendor: ATA Model: ST9320320AS Rev: 0303
-+ Type: Direct-Access ANSI SCSI revision: 05
-+ Host: scsi4 Channel: 00 Id: 00 Lun: 00
-+ Vendor: TSSTcorp Model: CD/DVDW TS-L632D Rev: TO04
-+ Type: CD-ROM ANSI SCSI revision: 05
-+ Host: scsi7 Channel: 00 Id: 00 Lun: 00
-+ Vendor: SCST_FIO Model: vm_disk1 Rev: 200
-+ Type: Direct-Access ANSI SCSI revision: 04
-+
-+Or instead of manually "add_device" in (2) and step (3) write a
-+scstadmin config:
-+
-+HANDLER vdisk_fileio {
-+ DEVICE vm_disk1 {
-+ filename /some/path/vdisk1.img
-+ }
-+}
-+
-+TARGET_DRIVER scst_local {
-+ TARGET scst_local_tgt {
-+ LUN 0 vm_disk1
-+ }
-+}
-+
-+then:
-+
-+ insmod scst_local
-+ scstadmin -config conf_file.cfg
-+
-+More advanced examples:
-+
-+For (3) you can:
-+
-+ insmod scst_local add_default_tgt=0
-+ echo "add_target scst_local_tgt session_name=scst_local_host" >/sys/kernel/scst_tgt/targets/scst_local//mgmt
-+ echo "add vm_disk1 0" >/sys/kernel/scst_tgt/targets/scst_local/scst_local_tgt/luns/mgmt
-+
-+Scst_local module's parameter add_default_tgt disables creation of
-+default target "scst_local_tgt" and session "scst_local_host", so you
-+needed to create it manually.
-+
-+There can be any number of targets and sessions created. Each SCST
-+session corresponds to SCSI host. You can change which LUNs assigned to
-+each session by using SCST access control. This mode is intended for
-+user space target drivers (see below).
-+
-+Alternatively, you can write an scstadmin's config file conf_file.cfg:
-+
-+HANDLER vdisk_fileio {
-+ DEVICE vm_disk1 {
-+ filename /some/path/vdisk1.img
-+ }
-+}
-+
-+TARGET_DRIVER scst_local {
-+ TARGET scst_local_tgt {
-+ session_name scst_local_host
-+
-+ LUN 0 vm_disk1
-+ }
-+}
-+
-+then:
-+
-+ insmod scst_local add_default_tgt=0
-+ scstadmin -config conf_file.cfg
-+
-+NOTE! Although scstadmin allows to create scst_local's sessions using
-+"session_name" expression, it doesn't save existing sessions during
-+writing config file by "write_config" command. If you need this
-+functionality, feel free to send a request for it in SCST development
-+mailing list.
-+
-+5. Have fun.
-+
-+Some of this was coded while in Santa Clara, some in Bangalore, and some in
-+Hyderabad. Noe doubt some will be coded on the way back to Santa Clara.
-+
-+The code still has bugs, so if you encounter any, email me the fixes at:
-+
-+ realrichardsharpe@gmail.com
-+
-+I am thinking of renaming this to something more interesting.
-+
-+
-+Sysfs interface
-+===============
-+
-+See SCST's README for a common SCST sysfs description.
-+
-+Root of this driver is /sys/kernel/scst_tgt/targets/scst_local. It has
-+the following additional entry:
-+
-+ - stats - read-only attribute with some statistical information.
-+
-+Each target subdirectory contains the following additional entries:
-+
-+ - phys_transport_version - contains and allows to change physical
-+ transport version descriptor. It determines by which physical
-+ interface this target will look like. See SPC for more details. By
-+ default, it is not defined (0).
-+
-+ - scsi_transport_version - contains and allows to change SCSI
-+ transport version descriptor. It determines by which SCSI
-+ transport this target will look like. See SPC for more details. By
-+ default, it is SAS.
-+
-+Each session subdirectory contains the following additional entries:
-+
-+ - transport_id - contains this host's TransportID. This TransportID
-+ used to identify initiator in Persisten Reservation commands. If you
-+ change scsi_transport_version for a target, make sure you set for all
-+ its sessions correct TransportID. See SPC for more details.
-+
-+ - host - links to the corresponding SCSI host. Using it you can find
-+ local sg/bsg/sd/etc. devices of this session. For instance, this
-+ links points out to host12, so you can find your sg devices by:
-+
-+$ lsscsi -g|grep "\[12:"
-+[12:0:0:0] disk SCST_FIO rd1 200 /dev/sdc /dev/sg2
-+[12:0:0:1] disk SCST_FIO nullio 200 /dev/sdd /dev/sg3
-+
-+They are /dev/sg2 and /dev/sg3.
-+
-+The following management commands available via /sys/kernel/scst_tgt/targets/scst_local/mgmt:
-+
-+ - add_target target_name [session_name=sess_name; [session_name=sess_name1;] [...]] -
-+ creates a target with optionally one or more sessions.
-+
-+ - del_target target_name - deletes a target.
-+
-+ - add_session target_name session_name - adds to target target_name
-+ session (SCSI host) with name session_name.
-+
-+ - del_session target_name session_name - deletes session session_name
-+ from target target_name.
-+
-+
-+Note on performance
-+===================
-+
-+Although this driver implemented in the most performance effective way,
-+including zero-copy passing data between SCSI/block subsystems and SCST,
-+in many cases it is NOT suited to measure performance as a NULL link.
-+For example, it is not suited for max IOPS measurements. This is because
-+for such cases not performance of the link between the target and
-+initiator is the bottleneck, but CPU or memory speed on the target or
-+initiator. For scst_local you have both initiator and target on the same
-+system, which means each your initiator and target are much less
-+CPU/memory powerful.
-+
-+
-+User space target drivers
-+=========================
-+
-+Scst_local can be used to write full featured SCST target drivers in
-+user space:
-+
-+1. For each SCSI target a user space target driver should create an
-+ scst_local's target using "add_target" command.
-+
-+2. Then the user space target driver should, if needed, set its SCSI and
-+ physical transport version descriptors using attributes
-+ scsi_transport_version and phys_transport_version correspondingly in
-+ /sys/kernel/scst_tgt/targets/scst_local/target_name directory.
-+
-+3. For incoming session (I_T nexus) from an initiator the user space
-+ target driver should create scst_local's session using "add_session"
-+ command.
-+
-+4. Then, if needed, the user space target driver should set TransportID
-+ for this session (I_T nexus) using attribute
-+ /sys/kernel/scst_tgt/targets/scst_local/target_name/sessions/session_name/transport_id
-+
-+5. Then the user space target driver should find out sg/bsg devices for
-+ the LUNs the created session has using link
-+ /sys/kernel/scst_tgt/targets/scst_local/target_name/sessions/session_name/host
-+ as described above.
-+
-+6. Then the user space target driver can start serving the initiator using
-+ found sg/bsg devices.
-+
-+For other connected initiators steps 3-6 should be repeated.
-+
-+
-+Compilation options
-+===================
-+
-+There are the following compilation options, that could be commented
-+in/out in Makefile:
-+
-+ - CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING - by default, when this option
-+ is not defined, scst_local reschedules all commands for processing in
-+ one of the SCST threads. If this option is defined, scst_local tries
-+ to not do it, if possible (sometimes queuecommand() called under
-+ various locks held), but instead process them in the submitter's
-+ context. This is to increase performance, but as on 2.6.37 and below
-+ Linux block layer doesn't work with such kind of reentrance, hence
-+ this option disabled by default. Note! At the moment in
-+ scst_estimate_context*() returning DIRECT contexts disabled, so this
-+ option doesn't have any real effect.
-+
-+
-+Change log
-+==========
-+
-+V0.1 24-Sep-2008 (Hyderabad) Initial coding, pretty chatty and messy,
-+ but worked.
-+
-+V0.2 25-Sep-2008 (Hong Kong) Cleaned up the code a lot, reduced the log
-+ chatter, fixed a bug where multiple LUNs did not
-+ work. Also, added logging control. Tested with
-+ five virtual disks. They all came up as /dev/sdb
-+ through /dev/sdf and I could dd to them. Also
-+ fixed a bug preventing multiple adapters.
-+
-+V0.3 26-Sep-2008 (Santa Clara) Added back a copyright plus cleaned up some
-+ unused functions and structures.
-+
-+V0.4 5-Oct-2008 (Santa Clara) Changed name to scst_local as suggested, cleaned
-+ up some unused variables (made them used) and
-+ change allocation to a kmem_cache pool.
-+
-+V0.5 5-Oct-2008 (Santa Clara) Added mgmt commands to handle dev reset and
-+ aborts. Not sure if aborts works. Also corrected
-+ the version info and renamed readme to README.
-+
-+V0.6 7-Oct-2008 (Santa Clara) Removed some redundant code and made some
-+ changes suggested by Vladislav.
-+
-+V0.7 11-Oct-2008 (Santa Clara) Moved into the scst tree. Cleaned up some
-+ unused functions, used TRACE macros etc.
-+
-+V0.9 30-Nov-2008 (Mtn View) Cleaned up an additional problem with symbols not
-+ being defined in older version of the kernel. Also
-+ fixed some English and cleaned up this doc.
-+
-+V1.0 10-Sep-2010 (Moscow) Sysfs management added. Reviewed and cleaned up.
-+
-+V2.1 Update for kernels up to 3.0. Cleanups.
-+
-diff -uprN orig/linux-3.2/drivers/scst/scst_local/Kconfig linux-3.2/drivers/scst/scst_local/Kconfig
---- orig/linux-3.2/drivers/scst/scst_local/Kconfig
-+++ linux-3.2/drivers/scst/scst_local/Kconfig
-@@ -0,0 +1,22 @@
-+config SCST_LOCAL
-+ tristate "SCST Local driver"
-+ depends on SCST && !HIGHMEM4G && !HIGHMEM64G
-+ ---help---
-+ This module provides a LLD SCSI driver that connects to
-+ the SCST target mode subsystem in a loop-back manner.
-+ It allows you to test target-mode device-handlers locally.
-+ You will need the SCST subsystem as well.
-+
-+ If unsure whether you really want or need this, say N.
-+
-+config SCST_LOCAL_FORCE_DIRECT_PROCESSING
-+ bool "Force local processing"
-+ depends on SCST_LOCAL
-+ help
-+ This experimental option forces scst_local to make SCST process
-+ SCSI commands in the same context, in which they was submitted.
-+ Otherwise, they will be processed in SCST threads. Setting this
-+ option to "Y" will give some performance increase, but might be
-+ unsafe.
-+
-+ If unsure, say "N".
-diff -uprN orig/linux-3.2/drivers/scst/scst_local/Makefile linux-3.2/drivers/scst/scst_local/Makefile
---- orig/linux-3.2/drivers/scst/scst_local/Makefile
-+++ linux-3.2/drivers/scst/scst_local/Makefile
-@@ -0,0 +1,2 @@
-+obj-$(CONFIG_SCST_LOCAL) += scst_local.o
-+
-diff -uprN orig/linux-3.2/drivers/scst/scst_local/scst_local.c linux-3.2/drivers/scst/scst_local/scst_local.c
---- orig/linux-3.2/drivers/scst/scst_local/scst_local.c
-+++ linux-3.2/drivers/scst/scst_local/scst_local.c
-@@ -0,0 +1,1587 @@
-+/*
-+ * Copyright (C) 2008 - 2010 Richard Sharpe
-+ * Copyright (C) 1992 Eric Youngdale
-+ * Copyright (C) 2008 - 2011 Vladislav Bolkhovitin <vst@vlnb.net>
-+ *
-+ * Simulate a host adapter and an SCST target adapter back to back
-+ *
-+ * Based on the scsi_debug.c driver originally by Eric Youngdale and
-+ * others, including D Gilbert et al
-+ *
-+ */
-+
-+#include <linux/module.h>
-+
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/init.h>
-+#include <linux/moduleparam.h>
-+#include <linux/scatterlist.h>
-+#include <linux/slab.h>
-+#include <linux/completion.h>
-+#include <linux/spinlock.h>
-+
-+#include <scsi/scsi.h>
-+#include <scsi/scsi_cmnd.h>
-+#include <scsi/scsi_host.h>
-+#include <scsi/scsi_tcq.h>
-+
-+#define LOG_PREFIX "scst_local"
-+
-+/* SCST includes ... */
-+#include <scst/scst.h>
-+#include <scst/scst_debug.h>
-+
-+#ifdef CONFIG_SCST_DEBUG
-+#define SCST_LOCAL_DEFAULT_LOG_FLAGS (TRACE_FUNCTION | TRACE_PID | \
-+ TRACE_LINE | TRACE_OUT_OF_MEM | TRACE_MGMT | TRACE_MGMT_DEBUG | \
-+ TRACE_MINOR | TRACE_SPECIAL)
-+#else
-+# ifdef CONFIG_SCST_TRACING
-+#define SCST_LOCAL_DEFAULT_LOG_FLAGS (TRACE_OUT_OF_MEM | TRACE_MGMT | \
-+ TRACE_SPECIAL)
-+# endif
-+#endif
-+
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+#define trace_flag scst_local_trace_flag
-+static unsigned long scst_local_trace_flag = SCST_LOCAL_DEFAULT_LOG_FLAGS;
-+#endif
-+
-+#define SCST_LOCAL_VERSION "2.2.0"
-+static const char *scst_local_version_date = "20110901";
-+
-+/* Some statistics */
-+static atomic_t num_aborts = ATOMIC_INIT(0);
-+static atomic_t num_dev_resets = ATOMIC_INIT(0);
-+static atomic_t num_target_resets = ATOMIC_INIT(0);
-+
-+static bool scst_local_add_default_tgt = true;
-+module_param_named(add_default_tgt, scst_local_add_default_tgt, bool, S_IRUGO);
-+MODULE_PARM_DESC(add_default_tgt, "add (default) or not on start default "
-+ "target scst_local_tgt with default session scst_local_host");
-+
-+struct scst_aen_work_item {
-+ struct list_head work_list_entry;
-+ struct scst_aen *aen;
-+};
-+
-+struct scst_local_tgt {
-+ struct scst_tgt *scst_tgt;
-+ struct list_head sessions_list; /* protected by scst_local_mutex */
-+ struct list_head tgts_list_entry;
-+
-+ /* SCSI version descriptors */
-+ uint16_t scsi_transport_version;
-+ uint16_t phys_transport_version;
-+};
-+
-+struct scst_local_sess {
-+ struct scst_session *scst_sess;
-+
-+ unsigned int unregistering:1;
-+
-+ struct device dev;
-+ struct Scsi_Host *shost;
-+ struct scst_local_tgt *tgt;
-+
-+ int number;
-+
-+ struct mutex tr_id_mutex;
-+ uint8_t *transport_id;
-+ int transport_id_len;
-+
-+ struct work_struct aen_work;
-+ spinlock_t aen_lock;
-+ struct list_head aen_work_list; /* protected by aen_lock */
-+
-+ struct list_head sessions_list_entry;
-+};
-+
-+#define to_scst_lcl_sess(d) \
-+ container_of(d, struct scst_local_sess, dev)
-+
-+static int __scst_local_add_adapter(struct scst_local_tgt *tgt,
-+ const char *initiator_name, bool locked);
-+static int scst_local_add_adapter(struct scst_local_tgt *tgt,
-+ const char *initiator_name);
-+static void scst_local_remove_adapter(struct scst_local_sess *sess);
-+static int scst_local_add_target(const char *target_name,
-+ struct scst_local_tgt **out_tgt);
-+static void __scst_local_remove_target(struct scst_local_tgt *tgt);
-+static void scst_local_remove_target(struct scst_local_tgt *tgt);
-+
-+static atomic_t scst_local_sess_num = ATOMIC_INIT(0);
-+
-+static LIST_HEAD(scst_local_tgts_list);
-+static DEFINE_MUTEX(scst_local_mutex);
-+
-+static DECLARE_RWSEM(scst_local_exit_rwsem);
-+
-+MODULE_AUTHOR("Richard Sharpe, Vladislav Bolkhovitin + ideas from SCSI_DEBUG");
-+MODULE_DESCRIPTION("SCSI+SCST local adapter driver");
-+MODULE_LICENSE("GPL");
-+MODULE_VERSION(SCST_LOCAL_VERSION);
-+
-+static int scst_local_get_sas_transport_id(struct scst_local_sess *sess,
-+ uint8_t **transport_id, int *len)
-+{
-+ int res = 0;
-+ int tr_id_size = 0;
-+ uint8_t *tr_id = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ tr_id_size = 24; /* A SAS TransportID */
-+
-+ tr_id = kzalloc(tr_id_size, GFP_KERNEL);
-+ if (tr_id == NULL) {
-+ PRINT_ERROR("Allocation of TransportID (size %d) failed",
-+ tr_id_size);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ tr_id[0] = 0x00 | SCSI_TRANSPORTID_PROTOCOLID_SAS;
-+
-+ /*
-+ * Assemble a valid SAS address = 0x5OOUUIIR12345678 ... Does SCST
-+ * have one?
-+ */
-+
-+ tr_id[4] = 0x5F;
-+ tr_id[5] = 0xEE;
-+ tr_id[6] = 0xDE;
-+ tr_id[7] = 0x40 | ((sess->number >> 4) & 0x0F);
-+ tr_id[8] = 0x0F | (sess->number & 0xF0);
-+ tr_id[9] = 0xAD;
-+ tr_id[10] = 0xE0;
-+ tr_id[11] = 0x50;
-+
-+ *transport_id = tr_id;
-+ *len = tr_id_size;
-+
-+ TRACE_DBG("Created tid '%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X'",
-+ tr_id[4], tr_id[5], tr_id[6], tr_id[7],
-+ tr_id[8], tr_id[9], tr_id[10], tr_id[11]);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_local_get_initiator_port_transport_id(
-+ struct scst_tgt *tgt, struct scst_session *scst_sess,
-+ uint8_t **transport_id)
-+{
-+ int res = 0;
-+ int tr_id_size = 0;
-+ uint8_t *tr_id = NULL;
-+ struct scst_local_sess *sess;
-+
-+ TRACE_ENTRY();
-+
-+ if (scst_sess == NULL) {
-+ res = SCSI_TRANSPORTID_PROTOCOLID_SAS;
-+ goto out;
-+ }
-+
-+ sess = (struct scst_local_sess *)scst_sess_get_tgt_priv(scst_sess);
-+
-+ mutex_lock(&sess->tr_id_mutex);
-+
-+ if (sess->transport_id == NULL) {
-+ res = scst_local_get_sas_transport_id(sess,
-+ transport_id, &tr_id_size);
-+ goto out_unlock;
-+ }
-+
-+ tr_id_size = sess->transport_id_len;
-+ BUG_ON(tr_id_size == 0);
-+
-+ tr_id = kzalloc(tr_id_size, GFP_KERNEL);
-+ if (tr_id == NULL) {
-+ PRINT_ERROR("Allocation of TransportID (size %d) failed",
-+ tr_id_size);
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ memcpy(tr_id, sess->transport_id, sess->transport_id_len);
-+
-+out_unlock:
-+ mutex_unlock(&sess->tr_id_mutex);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/**
-+ ** Tgtt attributes
-+ **/
-+
-+static ssize_t scst_local_version_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ sprintf(buf, "%s/%s\n", SCST_LOCAL_VERSION, scst_local_version_date);
-+
-+#ifdef CONFIG_SCST_EXTRACHECKS
-+ strcat(buf, "EXTRACHECKS\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_TRACING
-+ strcat(buf, "TRACING\n");
-+#endif
-+
-+#ifdef CONFIG_SCST_DEBUG
-+ strcat(buf, "DEBUG\n");
-+#endif
-+
-+ TRACE_EXIT();
-+ return strlen(buf);
-+}
-+
-+static struct kobj_attribute scst_local_version_attr =
-+ __ATTR(version, S_IRUGO, scst_local_version_show, NULL);
-+
-+static ssize_t scst_local_stats_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+
-+{
-+ return sprintf(buf, "Aborts: %d, Device Resets: %d, Target Resets: %d",
-+ atomic_read(&num_aborts), atomic_read(&num_dev_resets),
-+ atomic_read(&num_target_resets));
-+}
-+
-+static struct kobj_attribute scst_local_stats_attr =
-+ __ATTR(stats, S_IRUGO, scst_local_stats_show, NULL);
-+
-+static const struct attribute *scst_local_tgtt_attrs[] = {
-+ &scst_local_version_attr.attr,
-+ &scst_local_stats_attr.attr,
-+ NULL,
-+};
-+
-+/**
-+ ** Tgt attributes
-+ **/
-+
-+static ssize_t scst_local_scsi_transport_version_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_tgt *scst_tgt;
-+ struct scst_local_tgt *tgt;
-+ ssize_t res = -ENOENT;
-+
-+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ goto out;
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = scst_tgt_get_tgt_priv(scst_tgt);
-+ if (!tgt)
-+ goto out_up;
-+
-+ if (tgt->scsi_transport_version != 0)
-+ res = sprintf(buf, "0x%x\n%s", tgt->scsi_transport_version,
-+ SCST_SYSFS_KEY_MARK "\n");
-+ else
-+ res = sprintf(buf, "0x%x\n", 0x0BE0); /* SAS */
-+
-+out_up:
-+ up_read(&scst_local_exit_rwsem);
-+out:
-+ return res;
-+}
-+
-+static ssize_t scst_local_scsi_transport_version_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buffer, size_t size)
-+{
-+ ssize_t res = -ENOENT;
-+ struct scst_tgt *scst_tgt;
-+ struct scst_local_tgt *tgt;
-+ unsigned long val;
-+
-+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ goto out;
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = scst_tgt_get_tgt_priv(scst_tgt);
-+ if (!tgt)
-+ goto out_up;
-+
-+ res = strict_strtoul(buffer, 0, &val);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtoul() for %s failed: %zd", buffer, res);
-+ goto out_up;
-+ }
-+
-+ tgt->scsi_transport_version = val;
-+
-+ res = size;
-+
-+out_up:
-+ up_read(&scst_local_exit_rwsem);
-+out:
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_local_scsi_transport_version_attr =
-+ __ATTR(scsi_transport_version, S_IRUGO | S_IWUSR,
-+ scst_local_scsi_transport_version_show,
-+ scst_local_scsi_transport_version_store);
-+
-+static ssize_t scst_local_phys_transport_version_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ struct scst_tgt *scst_tgt;
-+ struct scst_local_tgt *tgt;
-+ ssize_t res = -ENOENT;
-+
-+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ goto out;
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = scst_tgt_get_tgt_priv(scst_tgt);
-+ if (!tgt)
-+ goto out_up;
-+
-+ res = sprintf(buf, "0x%x\n%s", tgt->phys_transport_version,
-+ (tgt->phys_transport_version != 0) ?
-+ SCST_SYSFS_KEY_MARK "\n" : "");
-+
-+out_up:
-+ up_read(&scst_local_exit_rwsem);
-+out:
-+ return res;
-+}
-+
-+static ssize_t scst_local_phys_transport_version_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buffer, size_t size)
-+{
-+ ssize_t res = -ENOENT;
-+ struct scst_tgt *scst_tgt;
-+ struct scst_local_tgt *tgt;
-+ unsigned long val;
-+
-+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ goto out;
-+
-+ scst_tgt = container_of(kobj, struct scst_tgt, tgt_kobj);
-+ tgt = scst_tgt_get_tgt_priv(scst_tgt);
-+ if (!tgt)
-+ goto out_up;
-+
-+ res = strict_strtoul(buffer, 0, &val);
-+ if (res != 0) {
-+ PRINT_ERROR("strict_strtoul() for %s failed: %zd", buffer, res);
-+ goto out_up;
-+ }
-+
-+ tgt->phys_transport_version = val;
-+
-+ res = size;
-+
-+out_up:
-+ up_read(&scst_local_exit_rwsem);
-+out:
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_local_phys_transport_version_attr =
-+ __ATTR(phys_transport_version, S_IRUGO | S_IWUSR,
-+ scst_local_phys_transport_version_show,
-+ scst_local_phys_transport_version_store);
-+
-+static const struct attribute *scst_local_tgt_attrs[] = {
-+ &scst_local_scsi_transport_version_attr.attr,
-+ &scst_local_phys_transport_version_attr.attr,
-+ NULL,
-+};
-+
-+/**
-+ ** Session attributes
-+ **/
-+
-+static ssize_t scst_local_transport_id_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ ssize_t res;
-+ struct scst_session *scst_sess;
-+ struct scst_local_sess *sess;
-+ uint8_t *tr_id;
-+ int tr_id_len, i;
-+
-+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ return -ENOENT;
-+
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
-+ sess = (struct scst_local_sess *)scst_sess_get_tgt_priv(scst_sess);
-+
-+ mutex_lock(&sess->tr_id_mutex);
-+
-+ if (sess->transport_id != NULL) {
-+ tr_id = sess->transport_id;
-+ tr_id_len = sess->transport_id_len;
-+ } else {
-+ res = scst_local_get_sas_transport_id(sess, &tr_id, &tr_id_len);
-+ if (res != 0)
-+ goto out_unlock;
-+ }
-+
-+ res = 0;
-+ for (i = 0; i < tr_id_len; i++)
-+ res += sprintf(&buf[res], "%c", tr_id[i]);
-+
-+ if (sess->transport_id == NULL)
-+ kfree(tr_id);
-+
-+out_unlock:
-+ mutex_unlock(&sess->tr_id_mutex);
-+ up_read(&scst_local_exit_rwsem);
-+ return res;
-+}
-+
-+static ssize_t scst_local_transport_id_store(struct kobject *kobj,
-+ struct kobj_attribute *attr, const char *buffer, size_t size)
-+{
-+ ssize_t res;
-+ struct scst_session *scst_sess;
-+ struct scst_local_sess *sess;
-+
-+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ return -ENOENT;
-+
-+ scst_sess = container_of(kobj, struct scst_session, sess_kobj);
-+ sess = (struct scst_local_sess *)scst_sess_get_tgt_priv(scst_sess);
-+
-+ mutex_lock(&sess->tr_id_mutex);
-+
-+ if (sess->transport_id != NULL) {
-+ kfree(sess->transport_id);
-+ sess->transport_id = NULL;
-+ sess->transport_id_len = 0;
-+ }
-+
-+ if (size == 0)
-+ goto out_res;
-+
-+ sess->transport_id = kzalloc(size, GFP_KERNEL);
-+ if (sess->transport_id == NULL) {
-+ PRINT_ERROR("Allocation of transport_id (size %zd) failed",
-+ size);
-+ res = -ENOMEM;
-+ goto out_unlock;
-+ }
-+
-+ sess->transport_id_len = size;
-+
-+ memcpy(sess->transport_id, buffer, sess->transport_id_len);
-+
-+out_res:
-+ res = size;
-+
-+out_unlock:
-+ mutex_unlock(&sess->tr_id_mutex);
-+ up_read(&scst_local_exit_rwsem);
-+ return res;
-+}
-+
-+static struct kobj_attribute scst_local_transport_id_attr =
-+ __ATTR(transport_id, S_IRUGO | S_IWUSR,
-+ scst_local_transport_id_show,
-+ scst_local_transport_id_store);
-+
-+static const struct attribute *scst_local_sess_attrs[] = {
-+ &scst_local_transport_id_attr.attr,
-+ NULL,
-+};
-+
-+static ssize_t scst_local_sysfs_add_target(const char *target_name, char *params)
-+{
-+ int res;
-+ struct scst_local_tgt *tgt;
-+ char *param, *p;
-+
-+ TRACE_ENTRY();
-+
-+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ return -ENOENT;
-+
-+ res = scst_local_add_target(target_name, &tgt);
-+ if (res != 0)
-+ goto out_up;
-+
-+ while (1) {
-+ param = scst_get_next_token_str(&params);
-+ if (param == NULL)
-+ break;
-+
-+ p = scst_get_next_lexem(&param);
-+ if (*p == '\0')
-+ break;
-+
-+ if (strcasecmp("session_name", p) != 0) {
-+ PRINT_ERROR("Unknown parameter %s", p);
-+ res = -EINVAL;
-+ goto out_remove;
-+ }
-+
-+ p = scst_get_next_lexem(&param);
-+ if (*p == '\0') {
-+ PRINT_ERROR("Wrong session name %s", p);
-+ res = -EINVAL;
-+ goto out_remove;
-+ }
-+
-+ res = scst_local_add_adapter(tgt, p);
-+ if (res != 0)
-+ goto out_remove;
-+ }
-+
-+out_up:
-+ up_read(&scst_local_exit_rwsem);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_remove:
-+ scst_local_remove_target(tgt);
-+ goto out_up;
-+}
-+
-+static ssize_t scst_local_sysfs_del_target(const char *target_name)
-+{
-+ int res;
-+ struct scst_local_tgt *tgt;
-+ bool deleted = false;
-+
-+ TRACE_ENTRY();
-+
-+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ return -ENOENT;
-+
-+ mutex_lock(&scst_local_mutex);
-+ list_for_each_entry(tgt, &scst_local_tgts_list, tgts_list_entry) {
-+ if (strcmp(target_name, tgt->scst_tgt->tgt_name) == 0) {
-+ __scst_local_remove_target(tgt);
-+ deleted = true;
-+ break;
-+ }
-+ }
-+ mutex_unlock(&scst_local_mutex);
-+
-+ if (!deleted) {
-+ PRINT_ERROR("Target %s not found", target_name);
-+ res = -ENOENT;
-+ goto out_up;
-+ }
-+
-+ res = 0;
-+
-+out_up:
-+ up_read(&scst_local_exit_rwsem);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static ssize_t scst_local_sysfs_mgmt_cmd(char *buf)
-+{
-+ ssize_t res;
-+ char *command, *target_name, *session_name;
-+ struct scst_local_tgt *t, *tgt;
-+
-+ TRACE_ENTRY();
-+
-+ if (down_read_trylock(&scst_local_exit_rwsem) == 0)
-+ return -ENOENT;
-+
-+ command = scst_get_next_lexem(&buf);
-+
-+ target_name = scst_get_next_lexem(&buf);
-+ if (*target_name == '\0') {
-+ PRINT_ERROR("%s", "Target name required");
-+ res = -EINVAL;
-+ goto out_up;
-+ }
-+
-+ mutex_lock(&scst_local_mutex);
-+
-+ tgt = NULL;
-+ list_for_each_entry(t, &scst_local_tgts_list, tgts_list_entry) {
-+ if (strcmp(t->scst_tgt->tgt_name, target_name) == 0) {
-+ tgt = t;
-+ break;
-+ }
-+ }
-+ if (tgt == NULL) {
-+ PRINT_ERROR("Target %s not found", target_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ session_name = scst_get_next_lexem(&buf);
-+ if (*session_name == '\0') {
-+ PRINT_ERROR("%s", "Session name required");
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ if (strcasecmp("add_session", command) == 0) {
-+ res = __scst_local_add_adapter(tgt, session_name, true);
-+ } else if (strcasecmp("del_session", command) == 0) {
-+ struct scst_local_sess *s, *sess = NULL;
-+ list_for_each_entry(s, &tgt->sessions_list,
-+ sessions_list_entry) {
-+ if (strcmp(s->scst_sess->initiator_name, session_name) == 0) {
-+ sess = s;
-+ break;
-+ }
-+ }
-+ if (sess == NULL) {
-+ PRINT_ERROR("Session %s not found (target %s)",
-+ session_name, target_name);
-+ res = -EINVAL;
-+ goto out_unlock;
-+ }
-+ scst_local_remove_adapter(sess);
-+ }
-+
-+ res = 0;
-+
-+out_unlock:
-+ mutex_unlock(&scst_local_mutex);
-+
-+out_up:
-+ up_read(&scst_local_exit_rwsem);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_local_abort(struct scsi_cmnd *SCpnt)
-+{
-+ struct scst_local_sess *sess;
-+ int ret;
-+ DECLARE_COMPLETION_ONSTACK(dev_reset_completion);
-+
-+ TRACE_ENTRY();
-+
-+ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
-+
-+ ret = scst_rx_mgmt_fn_tag(sess->scst_sess, SCST_ABORT_TASK, SCpnt->tag,
-+ false, &dev_reset_completion);
-+
-+ /* Now wait for the completion ... */
-+ wait_for_completion_interruptible(&dev_reset_completion);
-+
-+ atomic_inc(&num_aborts);
-+
-+ if (ret == 0)
-+ ret = SUCCESS;
-+
-+ TRACE_EXIT_RES(ret);
-+ return ret;
-+}
-+
-+static int scst_local_device_reset(struct scsi_cmnd *SCpnt)
-+{
-+ struct scst_local_sess *sess;
-+ __be16 lun;
-+ int ret;
-+ DECLARE_COMPLETION_ONSTACK(dev_reset_completion);
-+
-+ TRACE_ENTRY();
-+
-+ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
-+
-+ lun = cpu_to_be16(SCpnt->device->lun);
-+
-+ ret = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_LUN_RESET,
-+ (const uint8_t *)&lun, sizeof(lun), false,
-+ &dev_reset_completion);
-+
-+ /* Now wait for the completion ... */
-+ wait_for_completion_interruptible(&dev_reset_completion);
-+
-+ atomic_inc(&num_dev_resets);
-+
-+ if (ret == 0)
-+ ret = SUCCESS;
-+
-+ TRACE_EXIT_RES(ret);
-+ return ret;
-+}
-+
-+static int scst_local_target_reset(struct scsi_cmnd *SCpnt)
-+{
-+ struct scst_local_sess *sess;
-+ __be16 lun;
-+ int ret;
-+ DECLARE_COMPLETION_ONSTACK(dev_reset_completion);
-+
-+ TRACE_ENTRY();
-+
-+ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
-+
-+ lun = cpu_to_be16(SCpnt->device->lun);
-+
-+ ret = scst_rx_mgmt_fn_lun(sess->scst_sess, SCST_TARGET_RESET,
-+ (const uint8_t *)&lun, sizeof(lun), false,
-+ &dev_reset_completion);
-+
-+ /* Now wait for the completion ... */
-+ wait_for_completion_interruptible(&dev_reset_completion);
-+
-+ atomic_inc(&num_target_resets);
-+
-+ if (ret == 0)
-+ ret = SUCCESS;
-+
-+ TRACE_EXIT_RES(ret);
-+ return ret;
-+}
-+
-+static void copy_sense(struct scsi_cmnd *cmnd, struct scst_cmd *scst_cmnd)
-+{
-+ int scst_cmnd_sense_len = scst_cmd_get_sense_buffer_len(scst_cmnd);
-+
-+ TRACE_ENTRY();
-+
-+ scst_cmnd_sense_len = (SCSI_SENSE_BUFFERSIZE < scst_cmnd_sense_len ?
-+ SCSI_SENSE_BUFFERSIZE : scst_cmnd_sense_len);
-+ memcpy(cmnd->sense_buffer, scst_cmd_get_sense_buffer(scst_cmnd),
-+ scst_cmnd_sense_len);
-+
-+ TRACE_BUFFER("Sense set", cmnd->sense_buffer, scst_cmnd_sense_len);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+/*
-+ * Utility function to handle processing of done and allow
-+ * easy insertion of error injection if desired
-+ */
-+static int scst_local_send_resp(struct scsi_cmnd *cmnd,
-+ struct scst_cmd *scst_cmnd,
-+ void (*done)(struct scsi_cmnd *),
-+ int scsi_result)
-+{
-+ int ret = 0;
-+
-+ TRACE_ENTRY();
-+
-+ if (scst_cmnd) {
-+ /* The buffer isn't ours, so let's be safe and restore it */
-+ scst_check_restore_sg_buff(scst_cmnd);
-+
-+ /* Simulate autosense by this driver */
-+ if (unlikely(SCST_SENSE_VALID(scst_cmnd->sense)))
-+ copy_sense(cmnd, scst_cmnd);
-+ }
-+
-+ cmnd->result = scsi_result;
-+
-+ done(cmnd);
-+
-+ TRACE_EXIT_RES(ret);
-+ return ret;
-+}
-+
-+/*
-+ * This does the heavy lifting ... we pass all the commands on to the
-+ * target driver and have it do its magic ...
-+ */
-+#ifdef CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING
-+static int scst_local_queuecommand(struct Scsi_Host *host,
-+ struct scsi_cmnd *SCpnt)
-+#else
-+static int scst_local_queuecommand_lck(struct scsi_cmnd *SCpnt,
-+ void (*done)(struct scsi_cmnd *))
-+ __acquires(&h->host_lock)
-+ __releases(&h->host_lock)
-+#endif
-+{
-+ struct scst_local_sess *sess;
-+ struct scatterlist *sgl = NULL;
-+ int sgl_count = 0;
-+ __be16 lun;
-+ struct scst_cmd *scst_cmd = NULL;
-+ scst_data_direction dir;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("lun %d, cmd: 0x%02X", SCpnt->device->lun, SCpnt->cmnd[0]);
-+
-+ sess = to_scst_lcl_sess(scsi_get_device(SCpnt->device->host));
-+
-+ scsi_set_resid(SCpnt, 0);
-+
-+ /*
-+ * Tell the target that we have a command ... but first we need
-+ * to get the LUN into a format that SCST understand
-+ *
-+ * NOTE! We need to call it with atomic parameter true to not
-+ * get into mem alloc deadlock when mounting file systems over
-+ * our devices.
-+ */
-+ lun = cpu_to_be16(SCpnt->device->lun);
-+ scst_cmd = scst_rx_cmd(sess->scst_sess, (const uint8_t *)&lun,
-+ sizeof(lun), SCpnt->cmnd, SCpnt->cmd_len, true);
-+ if (!scst_cmd) {
-+ PRINT_ERROR("%s", "scst_rx_cmd() failed");
-+ return SCSI_MLQUEUE_HOST_BUSY;
-+ }
-+
-+ scst_cmd_set_tag(scst_cmd, SCpnt->tag);
-+ switch (scsi_get_tag_type(SCpnt->device)) {
-+ case MSG_SIMPLE_TAG:
-+ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_SIMPLE);
-+ break;
-+ case MSG_HEAD_TAG:
-+ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_HEAD_OF_QUEUE);
-+ break;
-+ case MSG_ORDERED_TAG:
-+ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_ORDERED);
-+ break;
-+ case SCSI_NO_TAG:
-+ default:
-+ scst_cmd_set_queue_type(scst_cmd, SCST_CMD_QUEUE_UNTAGGED);
-+ break;
-+ }
-+
-+ sgl = scsi_sglist(SCpnt);
-+ sgl_count = scsi_sg_count(SCpnt);
-+
-+ dir = SCST_DATA_NONE;
-+ switch (SCpnt->sc_data_direction) {
-+ case DMA_TO_DEVICE:
-+ dir = SCST_DATA_WRITE;
-+ scst_cmd_set_expected(scst_cmd, dir, scsi_bufflen(SCpnt));
-+ scst_cmd_set_noio_mem_alloc(scst_cmd);
-+ scst_cmd_set_tgt_sg(scst_cmd, sgl, sgl_count);
-+ break;
-+ case DMA_FROM_DEVICE:
-+ dir = SCST_DATA_READ;
-+ scst_cmd_set_expected(scst_cmd, dir, scsi_bufflen(SCpnt));
-+ scst_cmd_set_noio_mem_alloc(scst_cmd);
-+ scst_cmd_set_tgt_sg(scst_cmd, sgl, sgl_count);
-+ break;
-+ case DMA_BIDIRECTIONAL:
-+ /* Some of these symbols are only defined after 2.6.24 */
-+ dir = SCST_DATA_BIDI;
-+ scst_cmd_set_expected(scst_cmd, dir, scsi_bufflen(SCpnt));
-+ scst_cmd_set_expected_out_transfer_len(scst_cmd,
-+ scsi_in(SCpnt)->length);
-+ scst_cmd_set_noio_mem_alloc(scst_cmd);
-+ scst_cmd_set_tgt_sg(scst_cmd, scsi_in(SCpnt)->table.sgl,
-+ scsi_in(SCpnt)->table.nents);
-+ scst_cmd_set_tgt_out_sg(scst_cmd, sgl, sgl_count);
-+ break;
-+ case DMA_NONE:
-+ default:
-+ dir = SCST_DATA_NONE;
-+ scst_cmd_set_expected(scst_cmd, dir, 0);
-+ break;
-+ }
-+
-+ /* Save the correct thing below depending on version */
-+ scst_cmd_set_tgt_priv(scst_cmd, SCpnt);
-+
-+/*
-+ * Although starting from 2.6.37 queuecommand() called with no host_lock
-+ * held, in fact without DEF_SCSI_QCMD() it doesn't work and leading
-+ * to various problems like hangs under highload. Most likely, it is caused
-+ * by some not reenrable block layer function(s). So, until that changed, we
-+ * have to go ahead with extra context switch. In this regard doesn't matter
-+ * much if we under host_lock or not (although we absolutely don't need this
-+ * lock), so let's have simpler code with DEF_SCSI_QCMD().
-+ */
-+#ifdef CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING
-+ scst_cmd_init_done(scst_cmd, SCST_CONTEXT_DIRECT);
-+#else
-+ /*
-+ * We called with IRQs disabled, so have no choice,
-+ * except to pass to the thread context.
-+ */
-+ scst_cmd_init_done(scst_cmd, SCST_CONTEXT_THREAD);
-+#endif
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+#if !defined(CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING)
-+/*
-+ * See comment in scst_local_queuecommand_lck() near
-+ * CONFIG_SCST_LOCAL_FORCE_DIRECT_PROCESSING
-+ */
-+static DEF_SCSI_QCMD(scst_local_queuecommand)
-+#endif
-+
-+static int scst_local_targ_pre_exec(struct scst_cmd *scst_cmd)
-+{
-+ int res = SCST_PREPROCESS_STATUS_SUCCESS;
-+
-+ TRACE_ENTRY();
-+
-+ if (scst_cmd_get_dh_data_buff_alloced(scst_cmd) &&
-+ (scst_cmd_get_data_direction(scst_cmd) & SCST_DATA_WRITE))
-+ scst_copy_sg(scst_cmd, SCST_SG_COPY_FROM_TARGET);
-+
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+/* Must be called under sess->aen_lock. Drops then reacquires it inside. */
-+static void scst_process_aens(struct scst_local_sess *sess,
-+ bool cleanup_only)
-+ __releases(&sess->aen_lock)
-+ __acquires(&sess->aen_lock)
-+{
-+ struct scst_aen_work_item *work_item = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_DBG("Target work sess %p", sess);
-+
-+ while (!list_empty(&sess->aen_work_list)) {
-+ work_item = list_entry(sess->aen_work_list.next,
-+ struct scst_aen_work_item, work_list_entry);
-+ list_del(&work_item->work_list_entry);
-+
-+ spin_unlock(&sess->aen_lock);
-+
-+ if (cleanup_only)
-+ goto done;
-+
-+ BUG_ON(work_item->aen->event_fn != SCST_AEN_SCSI);
-+
-+ /* Let's always rescan */
-+ scsi_scan_target(&sess->shost->shost_gendev, 0, 0,
-+ SCAN_WILD_CARD, 1);
-+
-+done:
-+ scst_aen_done(work_item->aen);
-+ kfree(work_item);
-+
-+ spin_lock(&sess->aen_lock);
-+ }
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void scst_aen_work_fn(struct work_struct *work)
-+{
-+ struct scst_local_sess *sess =
-+ container_of(work, struct scst_local_sess, aen_work);
-+
-+ TRACE_ENTRY();
-+
-+ TRACE_MGMT_DBG("Target work %p)", sess);
-+
-+ spin_lock(&sess->aen_lock);
-+ scst_process_aens(sess, false);
-+ spin_unlock(&sess->aen_lock);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_local_report_aen(struct scst_aen *aen)
-+{
-+ int res = 0;
-+ int event_fn = scst_aen_get_event_fn(aen);
-+ struct scst_local_sess *sess;
-+ struct scst_aen_work_item *work_item = NULL;
-+
-+ TRACE_ENTRY();
-+
-+ sess = (struct scst_local_sess *)scst_sess_get_tgt_priv(
-+ scst_aen_get_sess(aen));
-+ switch (event_fn) {
-+ case SCST_AEN_SCSI:
-+ /*
-+ * Allocate a work item and place it on the queue
-+ */
-+ work_item = kzalloc(sizeof(*work_item), GFP_KERNEL);
-+ if (!work_item) {
-+ PRINT_ERROR("%s", "Unable to allocate work item "
-+ "to handle AEN!");
-+ return -ENOMEM;
-+ }
-+
-+ spin_lock(&sess->aen_lock);
-+
-+ if (unlikely(sess->unregistering)) {
-+ spin_unlock(&sess->aen_lock);
-+ kfree(work_item);
-+ res = SCST_AEN_RES_NOT_SUPPORTED;
-+ goto out;
-+ }
-+
-+ list_add_tail(&work_item->work_list_entry, &sess->aen_work_list);
-+ work_item->aen = aen;
-+
-+ spin_unlock(&sess->aen_lock);
-+
-+ schedule_work(&sess->aen_work);
-+ break;
-+
-+ default:
-+ TRACE_MGMT_DBG("Unsupported AEN %d", event_fn);
-+ res = SCST_AEN_RES_NOT_SUPPORTED;
-+ break;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+}
-+
-+static int scst_local_targ_detect(struct scst_tgt_template *tgt_template)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_EXIT();
-+ return 0;
-+};
-+
-+static int scst_local_targ_release(struct scst_tgt *tgt)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static int scst_local_targ_xmit_response(struct scst_cmd *scst_cmd)
-+{
-+ struct scsi_cmnd *SCpnt = NULL;
-+ void (*done)(struct scsi_cmnd *);
-+
-+ TRACE_ENTRY();
-+
-+ if (unlikely(scst_cmd_aborted(scst_cmd))) {
-+ scst_set_delivery_status(scst_cmd, SCST_CMD_DELIVERY_ABORTED);
-+ scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_SAME);
-+ return SCST_TGT_RES_SUCCESS;
-+ }
-+
-+ if (scst_cmd_get_dh_data_buff_alloced(scst_cmd) &&
-+ (scst_cmd_get_data_direction(scst_cmd) & SCST_DATA_READ))
-+ scst_copy_sg(scst_cmd, SCST_SG_COPY_TO_TARGET);
-+
-+ SCpnt = scst_cmd_get_tgt_priv(scst_cmd);
-+ done = SCpnt->scsi_done;
-+
-+ /*
-+ * This might have to change to use the two status flags
-+ */
-+ if (scst_cmd_get_is_send_status(scst_cmd)) {
-+ int resid = 0, out_resid = 0;
-+
-+ /* Calculate the residual ... */
-+ if (likely(!scst_get_resid(scst_cmd, &resid, &out_resid))) {
-+ TRACE_DBG("No residuals for request %p", SCpnt);
-+ } else {
-+ if (out_resid != 0)
-+ PRINT_ERROR("Unable to return OUT residual %d "
-+ "(op %02x)", out_resid, SCpnt->cmnd[0]);
-+ }
-+
-+ scsi_set_resid(SCpnt, resid);
-+
-+ /*
-+ * It seems like there is no way to set out_resid ...
-+ */
-+
-+ (void)scst_local_send_resp(SCpnt, scst_cmd, done,
-+ scst_cmd_get_status(scst_cmd));
-+ }
-+
-+ /* Now tell SCST that the command is done ... */
-+ scst_tgt_cmd_done(scst_cmd, SCST_CONTEXT_SAME);
-+
-+ TRACE_EXIT();
-+ return SCST_TGT_RES_SUCCESS;
-+}
-+
-+static void scst_local_targ_task_mgmt_done(struct scst_mgmt_cmd *mgmt_cmd)
-+{
-+ struct completion *compl;
-+
-+ TRACE_ENTRY();
-+
-+ compl = (struct completion *)scst_mgmt_cmd_get_tgt_priv(mgmt_cmd);
-+ if (compl)
-+ complete(compl);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static uint16_t scst_local_get_scsi_transport_version(struct scst_tgt *scst_tgt)
-+{
-+ struct scst_local_tgt *tgt;
-+
-+ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+
-+ if (tgt->scsi_transport_version == 0)
-+ return 0x0BE0; /* SAS */
-+ else
-+ return tgt->scsi_transport_version;
-+}
-+
-+static uint16_t scst_local_get_phys_transport_version(struct scst_tgt *scst_tgt)
-+{
-+ struct scst_local_tgt *tgt;
-+
-+ tgt = (struct scst_local_tgt *)scst_tgt_get_tgt_priv(scst_tgt);
-+
-+ return tgt->phys_transport_version;
-+}
-+
-+static struct scst_tgt_template scst_local_targ_tmpl = {
-+ .name = "scst_local",
-+ .sg_tablesize = 0xffff,
-+ .xmit_response_atomic = 1,
-+ .enabled_attr_not_needed = 1,
-+ .tgtt_attrs = scst_local_tgtt_attrs,
-+ .tgt_attrs = scst_local_tgt_attrs,
-+ .sess_attrs = scst_local_sess_attrs,
-+ .add_target = scst_local_sysfs_add_target,
-+ .del_target = scst_local_sysfs_del_target,
-+ .mgmt_cmd = scst_local_sysfs_mgmt_cmd,
-+ .add_target_parameters = "session_name",
-+ .mgmt_cmd_help = " echo \"add_session target_name session_name\" >mgmt\n"
-+ " echo \"del_session target_name session_name\" >mgmt\n",
-+ .detect = scst_local_targ_detect,
-+ .release = scst_local_targ_release,
-+ .pre_exec = scst_local_targ_pre_exec,
-+ .xmit_response = scst_local_targ_xmit_response,
-+ .task_mgmt_fn_done = scst_local_targ_task_mgmt_done,
-+ .report_aen = scst_local_report_aen,
-+ .get_initiator_port_transport_id = scst_local_get_initiator_port_transport_id,
-+ .get_scsi_transport_version = scst_local_get_scsi_transport_version,
-+ .get_phys_transport_version = scst_local_get_phys_transport_version,
-+#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
-+ .default_trace_flags = SCST_LOCAL_DEFAULT_LOG_FLAGS,
-+ .trace_flags = &trace_flag,
-+#endif
-+};
-+
-+static struct scsi_host_template scst_lcl_ini_driver_template = {
-+ .name = SCST_LOCAL_NAME,
-+ .queuecommand = scst_local_queuecommand,
-+ .eh_abort_handler = scst_local_abort,
-+ .eh_device_reset_handler = scst_local_device_reset,
-+ .eh_target_reset_handler = scst_local_target_reset,
-+ .can_queue = 256,
-+ .this_id = -1,
-+ .sg_tablesize = 0xFFFF,
-+ .cmd_per_lun = 32,
-+ .max_sectors = 0xffff,
-+ /* Possible pass-through backend device may not support clustering */
-+ .use_clustering = DISABLE_CLUSTERING,
-+ .skip_settle_delay = 1,
-+ .module = THIS_MODULE,
-+};
-+
-+/*
-+ * LLD Bus and functions
-+ */
-+
-+static int scst_local_driver_probe(struct device *dev)
-+{
-+ int ret;
-+ struct scst_local_sess *sess;
-+ struct Scsi_Host *hpnt;
-+
-+ TRACE_ENTRY();
-+
-+ sess = to_scst_lcl_sess(dev);
-+
-+ TRACE_DBG("sess %p", sess);
-+
-+ hpnt = scsi_host_alloc(&scst_lcl_ini_driver_template, sizeof(*sess));
-+ if (NULL == hpnt) {
-+ PRINT_ERROR("%s", "scsi_register() failed");
-+ ret = -ENODEV;
-+ goto out;
-+ }
-+
-+ sess->shost = hpnt;
-+
-+ hpnt->max_id = 0; /* Don't want more than one id */
-+ hpnt->max_lun = 0xFFFF;
-+
-+ /*
-+ * Because of a change in the size of this field at 2.6.26
-+ * we use this check ... it allows us to work on earlier
-+ * kernels. If we don't, max_cmd_size gets set to 4 (and we get
-+ * a compiler warning) so a scan never occurs.
-+ */
-+ hpnt->max_cmd_len = 260;
-+
-+ ret = scsi_add_host(hpnt, &sess->dev);
-+ if (ret) {
-+ PRINT_ERROR("%s", "scsi_add_host() failed");
-+ ret = -ENODEV;
-+ scsi_host_put(hpnt);
-+ goto out;
-+ }
-+
-+out:
-+ TRACE_EXIT_RES(ret);
-+ return ret;
-+}
-+
-+static int scst_local_driver_remove(struct device *dev)
-+{
-+ struct scst_local_sess *sess;
-+
-+ TRACE_ENTRY();
-+
-+ sess = to_scst_lcl_sess(dev);
-+ if (!sess) {
-+ PRINT_ERROR("%s", "Unable to locate sess info");
-+ return -ENODEV;
-+ }
-+
-+ scsi_remove_host(sess->shost);
-+ scsi_host_put(sess->shost);
-+
-+ TRACE_EXIT();
-+ return 0;
-+}
-+
-+static int scst_local_bus_match(struct device *dev,
-+ struct device_driver *dev_driver)
-+{
-+ TRACE_ENTRY();
-+
-+ TRACE_EXIT();
-+ return 1;
-+}
-+
-+static struct bus_type scst_local_lld_bus = {
-+ .name = "scst_local_bus",
-+ .match = scst_local_bus_match,
-+ .probe = scst_local_driver_probe,
-+ .remove = scst_local_driver_remove,
-+};
-+
-+static struct device_driver scst_local_driver = {
-+ .name = SCST_LOCAL_NAME,
-+ .bus = &scst_local_lld_bus,
-+};
-+
-+static struct device *scst_local_root;
-+
-+static void scst_local_release_adapter(struct device *dev)
-+{
-+ struct scst_local_sess *sess;
-+
-+ TRACE_ENTRY();
-+
-+ sess = to_scst_lcl_sess(dev);
-+ if (sess == NULL)
-+ goto out;
-+
-+ spin_lock(&sess->aen_lock);
-+ sess->unregistering = 1;
-+ scst_process_aens(sess, true);
-+ spin_unlock(&sess->aen_lock);
-+
-+ cancel_work_sync(&sess->aen_work);
-+
-+ scst_unregister_session(sess->scst_sess, true, NULL);
-+
-+ kfree(sess);
-+
-+out:
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int __scst_local_add_adapter(struct scst_local_tgt *tgt,
-+ const char *initiator_name, bool locked)
-+{
-+ int res;
-+ struct scst_local_sess *sess;
-+
-+ TRACE_ENTRY();
-+
-+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
-+ if (NULL == sess) {
-+ PRINT_ERROR("Unable to alloc scst_lcl_host (size %zu)",
-+ sizeof(*sess));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ sess->tgt = tgt;
-+ sess->number = atomic_inc_return(&scst_local_sess_num);
-+ mutex_init(&sess->tr_id_mutex);
-+
-+ /*
-+ * Init this stuff we need for scheduling AEN work
-+ */
-+ INIT_WORK(&sess->aen_work, scst_aen_work_fn);
-+ spin_lock_init(&sess->aen_lock);
-+ INIT_LIST_HEAD(&sess->aen_work_list);
-+
-+ sess->scst_sess = scst_register_session(tgt->scst_tgt, 0,
-+ initiator_name, (void *)sess, NULL, NULL);
-+ if (sess->scst_sess == NULL) {
-+ PRINT_ERROR("%s", "scst_register_session failed");
-+ kfree(sess);
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+
-+ sess->dev.bus = &scst_local_lld_bus;
-+ sess->dev.parent = scst_local_root;
-+ sess->dev.release = &scst_local_release_adapter;
-+ sess->dev.init_name = kobject_name(&sess->scst_sess->sess_kobj);
-+
-+ res = device_register(&sess->dev);
-+ if (res != 0)
-+ goto unregister_session;
-+
-+ res = sysfs_create_link(scst_sysfs_get_sess_kobj(sess->scst_sess),
-+ &sess->shost->shost_dev.kobj, "host");
-+ if (res != 0) {
-+ PRINT_ERROR("Unable to create \"host\" link for target "
-+ "%s", scst_get_tgt_name(tgt->scst_tgt));
-+ goto unregister_dev;
-+ }
-+
-+ if (!locked)
-+ mutex_lock(&scst_local_mutex);
-+ list_add_tail(&sess->sessions_list_entry, &tgt->sessions_list);
-+ if (!locked)
-+ mutex_unlock(&scst_local_mutex);
-+
-+ if (scst_initiator_has_luns(tgt->scst_tgt, initiator_name))
-+ scsi_scan_target(&sess->shost->shost_gendev, 0, 0,
-+ SCAN_WILD_CARD, 1);
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+unregister_dev:
-+ device_unregister(&sess->dev);
-+
-+unregister_session:
-+ scst_unregister_session(sess->scst_sess, true, NULL);
-+
-+out_free:
-+ kfree(sess);
-+ goto out;
-+}
-+
-+static int scst_local_add_adapter(struct scst_local_tgt *tgt,
-+ const char *initiator_name)
-+{
-+ return __scst_local_add_adapter(tgt, initiator_name, false);
-+}
-+
-+/* Must be called under scst_local_mutex */
-+static void scst_local_remove_adapter(struct scst_local_sess *sess)
-+{
-+ TRACE_ENTRY();
-+
-+ list_del(&sess->sessions_list_entry);
-+
-+ device_unregister(&sess->dev);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int scst_local_add_target(const char *target_name,
-+ struct scst_local_tgt **out_tgt)
-+{
-+ int res;
-+ struct scst_local_tgt *tgt;
-+
-+ TRACE_ENTRY();
-+
-+ tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
-+ if (NULL == tgt) {
-+ PRINT_ERROR("Unable to alloc tgt (size %zu)", sizeof(*tgt));
-+ res = -ENOMEM;
-+ goto out;
-+ }
-+
-+ INIT_LIST_HEAD(&tgt->sessions_list);
-+
-+ tgt->scst_tgt = scst_register_target(&scst_local_targ_tmpl, target_name);
-+ if (tgt->scst_tgt == NULL) {
-+ PRINT_ERROR("%s", "scst_register_target() failed:");
-+ res = -EFAULT;
-+ goto out_free;
-+ }
-+
-+ scst_tgt_set_tgt_priv(tgt->scst_tgt, tgt);
-+
-+ mutex_lock(&scst_local_mutex);
-+ list_add_tail(&tgt->tgts_list_entry, &scst_local_tgts_list);
-+ mutex_unlock(&scst_local_mutex);
-+
-+ if (out_tgt != NULL)
-+ *out_tgt = tgt;
-+
-+ res = 0;
-+
-+out:
-+ TRACE_EXIT_RES(res);
-+ return res;
-+
-+out_free:
-+ kfree(tgt);
-+ goto out;
-+}
-+
-+/* Must be called under scst_local_mutex */
-+static void __scst_local_remove_target(struct scst_local_tgt *tgt)
-+{
-+ struct scst_local_sess *sess, *ts;
-+
-+ TRACE_ENTRY();
-+
-+ list_for_each_entry_safe(sess, ts, &tgt->sessions_list,
-+ sessions_list_entry) {
-+ scst_local_remove_adapter(sess);
-+ }
-+
-+ list_del(&tgt->tgts_list_entry);
-+
-+ scst_unregister_target(tgt->scst_tgt);
-+
-+ kfree(tgt);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static void scst_local_remove_target(struct scst_local_tgt *tgt)
-+{
-+ TRACE_ENTRY();
-+
-+ mutex_lock(&scst_local_mutex);
-+ __scst_local_remove_target(tgt);
-+ mutex_unlock(&scst_local_mutex);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+static int __init scst_local_init(void)
-+{
-+ int ret;
-+ struct scst_local_tgt *tgt;
-+
-+ TRACE_ENTRY();
-+
-+ scst_local_root = root_device_register(SCST_LOCAL_NAME);
-+ if (IS_ERR(scst_local_root)) {
-+ ret = PTR_ERR(scst_local_root);
-+ goto out;
-+ }
-+
-+ ret = bus_register(&scst_local_lld_bus);
-+ if (ret < 0) {
-+ PRINT_ERROR("bus_register() error: %d", ret);
-+ goto dev_unreg;
-+ }
-+
-+ ret = driver_register(&scst_local_driver);
-+ if (ret < 0) {
-+ PRINT_ERROR("driver_register() error: %d", ret);
-+ goto bus_unreg;
-+ }
-+
-+ ret = scst_register_target_template(&scst_local_targ_tmpl);
-+ if (ret != 0) {
-+ PRINT_ERROR("Unable to register target template: %d", ret);
-+ goto driver_unreg;
-+ }
-+
-+ /*
-+ * If we are using sysfs, then don't add a default target unless
-+ * we are told to do so. When using procfs, we always add a default
-+ * target because that was what the earliest versions did. Just
-+ * remove the preprocessor directives when no longer needed.
-+ */
-+ if (!scst_local_add_default_tgt)
-+ goto out;
-+
-+ ret = scst_local_add_target("scst_local_tgt", &tgt);
-+ if (ret != 0)
-+ goto tgt_templ_unreg;
-+
-+ ret = scst_local_add_adapter(tgt, "scst_local_host");
-+ if (ret != 0)
-+ goto tgt_unreg;
-+
-+out:
-+ TRACE_EXIT_RES(ret);
-+ return ret;
-+
-+tgt_unreg:
-+ scst_local_remove_target(tgt);
-+
-+tgt_templ_unreg:
-+ scst_unregister_target_template(&scst_local_targ_tmpl);
-+
-+driver_unreg:
-+ driver_unregister(&scst_local_driver);
-+
-+bus_unreg:
-+ bus_unregister(&scst_local_lld_bus);
-+
-+dev_unreg:
-+ root_device_unregister(scst_local_root);
-+
-+ goto out;
-+}
-+
-+static void __exit scst_local_exit(void)
-+{
-+ struct scst_local_tgt *tgt, *tt;
-+
-+ TRACE_ENTRY();
-+
-+ down_write(&scst_local_exit_rwsem);
-+
-+ mutex_lock(&scst_local_mutex);
-+ list_for_each_entry_safe(tgt, tt, &scst_local_tgts_list,
-+ tgts_list_entry) {
-+ __scst_local_remove_target(tgt);
-+ }
-+ mutex_unlock(&scst_local_mutex);
-+
-+ driver_unregister(&scst_local_driver);
-+ bus_unregister(&scst_local_lld_bus);
-+ root_device_unregister(scst_local_root);
-+
-+ /* Now unregister the target template */
-+ scst_unregister_target_template(&scst_local_targ_tmpl);
-+
-+ /* To make lockdep happy */
-+ up_write(&scst_local_exit_rwsem);
-+
-+ TRACE_EXIT();
-+ return;
-+}
-+
-+device_initcall(scst_local_init);
-+module_exit(scst_local_exit);