aboutsummaryrefslogtreecommitdiffstats
path: root/main/linux-vserver
diff options
context:
space:
mode:
Diffstat (limited to 'main/linux-vserver')
-rw-r--r--main/linux-vserver/APKBUILD8
-rw-r--r--main/linux-vserver/patch-3.6.11-vs2.3.4.6.diff26053
2 files changed, 26057 insertions, 4 deletions
diff --git a/main/linux-vserver/APKBUILD b/main/linux-vserver/APKBUILD
index 6c0000c704..3b6b7b41cc 100644
--- a/main/linux-vserver/APKBUILD
+++ b/main/linux-vserver/APKBUILD
@@ -2,7 +2,7 @@
_flavor=vserver
pkgname=linux-${_flavor}
-pkgver=3.6.10
+pkgver=3.6.11
pkgrel=0
_vsver=vs2.3.4.6
@@ -21,7 +21,7 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://www.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://www.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- http://vserver.13thfloor.at/Experimental/patch-$pkgver-$_vsver.diff
+ patch-$pkgver-$_vsver.diff
kernelconfig.x86
kernelconfig.x86_64
"
@@ -136,7 +136,7 @@ dev() {
}
md5sums="1a1760420eac802c541a20ab51a093d1 linux-3.6.tar.xz
-406a52f90a2ddc78a3ecdf4fe46e7cf7 patch-3.6.10.xz
-ec3f4d86c1d2d174bf212d978e4b1f26 patch-3.6.10-vs2.3.4.6.diff
+bd4bba74093405887d521309a74c19e9 patch-3.6.11.xz
+27809872b8bc0dffce4e383d68d1c989 patch-3.6.11-vs2.3.4.6.diff
b28263bb0d529a80d4f13fbdd76520b2 kernelconfig.x86
1e9e12d5bd63552127331b875a554992 kernelconfig.x86_64"
diff --git a/main/linux-vserver/patch-3.6.11-vs2.3.4.6.diff b/main/linux-vserver/patch-3.6.11-vs2.3.4.6.diff
new file mode 100644
index 0000000000..f6813e4940
--- /dev/null
+++ b/main/linux-vserver/patch-3.6.11-vs2.3.4.6.diff
@@ -0,0 +1,26053 @@
+diff -NurpP --minimal linux-3.6.10/Documentation/vserver/debug.txt linux-3.6.10-vs2.3.4.6/Documentation/vserver/debug.txt
+--- linux-3.6.10/Documentation/vserver/debug.txt 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/Documentation/vserver/debug.txt 2012-10-04 16:46:59.000000000 +0000
+@@ -0,0 +1,154 @@
++
++debug_cvirt:
++
++ 2 4 "vx_map_tgid: %p/%llx: %d -> %d"
++ "vx_rmap_tgid: %p/%llx: %d -> %d"
++
++debug_dlim:
++
++ 0 1 "ALLOC (%p,#%d)%c inode (%d)"
++ "FREE (%p,#%d)%c inode"
++ 1 2 "ALLOC (%p,#%d)%c %lld bytes (%d)"
++ "FREE (%p,#%d)%c %lld bytes"
++ 2 4 "ADJUST: %lld,%lld on %ld,%ld [mult=%d]"
++ 3 8 "ext3_has_free_blocks(%p): %lu<%lu+1, %c, %u!=%u r=%d"
++ "ext3_has_free_blocks(%p): free=%lu, root=%lu"
++ "rcu_free_dl_info(%p)"
++ 4 10 "alloc_dl_info(%p,%d) = %p"
++ "dealloc_dl_info(%p)"
++ "get_dl_info(%p[#%d.%d])"
++ "put_dl_info(%p[#%d.%d])"
++ 5 20 "alloc_dl_info(%p,%d)*"
++ 6 40 "__hash_dl_info: %p[#%d]"
++ "__unhash_dl_info: %p[#%d]"
++ 7 80 "locate_dl_info(%p,#%d) = %p"
++
++debug_misc:
++
++ 0 1 "destroy_dqhash: %p [#0x%08x] c=%d"
++ "new_dqhash: %p [#0x%08x]"
++ "vroot[%d]_clr_dev: dev=%p[%lu,%d:%d]"
++ "vroot[%d]_get_real_bdev: dev=%p[%lu,%d:%d]"
++ "vroot[%d]_set_dev: dev=%p[%lu,%d:%d]"
++ "vroot_get_real_bdev not set"
++ 1 2 "cow_break_link(»%s«)"
++ "temp copy »%s«"
++ 2 4 "dentry_open(new): %p"
++ "dentry_open(old): %p"
++ "lookup_create(new): %p"
++ "old path »%s«"
++ "path_lookup(old): %d"
++ "vfs_create(new): %d"
++ "vfs_rename: %d"
++ "vfs_sendfile: %d"
++ 3 8 "fput(new_file=%p[#%d])"
++ "fput(old_file=%p[#%d])"
++ 4 10 "vx_info_kill(%p[#%d],%d,%d) = %d"
++ "vx_info_kill(%p[#%d],%d,%d)*"
++ 5 20 "vs_reboot(%p[#%d],%d)"
++ 6 40 "dropping task %p[#%u,%u] for %p[#%u,%u]"
++
++debug_net:
++
++ 2 4 "nx_addr_conflict(%p,%p) %d.%d,%d.%d"
++ 3 8 "inet_bind(%p) %d.%d.%d.%d, %d.%d.%d.%d, %d.%d.%d.%d"
++ "inet_bind(%p)* %p,%p;%lx %d.%d.%d.%d"
++ 4 10 "ip_route_connect(%p) %p,%p;%lx"
++ 5 20 "__addr_in_socket(%p,%d.%d.%d.%d) %p:%d.%d.%d.%d %p;%lx"
++ 6 40 "sk,egf: %p [#%d] (from %d)"
++ "sk,egn: %p [#%d] (from %d)"
++ "sk,req: %p [#%d] (from %d)"
++ "sk: %p [#%d] (from %d)"
++ "tw: %p [#%d] (from %d)"
++ 7 80 "__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d"
++ "__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d"
++
++debug_nid:
++
++ 0 1 "__lookup_nx_info(#%u): %p[#%u]"
++ "alloc_nx_info(%d) = %p"
++ "create_nx_info(%d) (dynamic rejected)"
++ "create_nx_info(%d) = %p (already there)"
++ "create_nx_info(%d) = %p (new)"
++ "dealloc_nx_info(%p)"
++ 1 2 "alloc_nx_info(%d)*"
++ "create_nx_info(%d)*"
++ 2 4 "get_nx_info(%p[#%d.%d])"
++ "put_nx_info(%p[#%d.%d])"
++ 3 8 "claim_nx_info(%p[#%d.%d.%d]) %p"
++ "clr_nx_info(%p[#%d.%d])"
++ "init_nx_info(%p[#%d.%d])"
++ "release_nx_info(%p[#%d.%d.%d]) %p"
++ "set_nx_info(%p[#%d.%d])"
++ 4 10 "__hash_nx_info: %p[#%d]"
++ "__nx_dynamic_id: [#%d]"
++ "__unhash_nx_info: %p[#%d.%d.%d]"
++ 5 20 "moved task %p into nxi:%p[#%d]"
++ "nx_migrate_task(%p,%p[#%d.%d.%d])"
++ "task_get_nx_info(%p)"
++ 6 40 "nx_clear_persistent(%p[#%d])"
++
++debug_quota:
++
++ 0 1 "quota_sync_dqh(%p,%d) discard inode %p"
++ 1 2 "quota_sync_dqh(%p,%d)"
++ "sync_dquots(%p,%d)"
++ "sync_dquots_dqh(%p,%d)"
++ 3 8 "do_quotactl(%p,%d,cmd=%d,id=%d,%p)"
++
++debug_switch:
++
++ 0 1 "vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]"
++ 1 2 "vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]"
++ 4 10 "%s: (%s %s) returned %s with %d"
++
++debug_tag:
++
++ 7 80 "dx_parse_tag(»%s«): %d:#%d"
++ "dx_propagate_tag(%p[#%lu.%d]): %d,%d"
++
++debug_xid:
++
++ 0 1 "__lookup_vx_info(#%u): %p[#%u]"
++ "alloc_vx_info(%d) = %p"
++ "alloc_vx_info(%d)*"
++ "create_vx_info(%d) (dynamic rejected)"
++ "create_vx_info(%d) = %p (already there)"
++ "create_vx_info(%d) = %p (new)"
++ "dealloc_vx_info(%p)"
++ "loc_vx_info(%d) = %p (found)"
++ "loc_vx_info(%d) = %p (new)"
++ "loc_vx_info(%d) = %p (not available)"
++ 1 2 "create_vx_info(%d)*"
++ "loc_vx_info(%d)*"
++ 2 4 "get_vx_info(%p[#%d.%d])"
++ "put_vx_info(%p[#%d.%d])"
++ 3 8 "claim_vx_info(%p[#%d.%d.%d]) %p"
++ "clr_vx_info(%p[#%d.%d])"
++ "init_vx_info(%p[#%d.%d])"
++ "release_vx_info(%p[#%d.%d.%d]) %p"
++ "set_vx_info(%p[#%d.%d])"
++ 4 10 "__hash_vx_info: %p[#%d]"
++ "__unhash_vx_info: %p[#%d.%d.%d]"
++ "__vx_dynamic_id: [#%d]"
++ 5 20 "enter_vx_info(%p[#%d],%p) %p[#%d,%p]"
++ "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]"
++ "moved task %p into vxi:%p[#%d]"
++ "task_get_vx_info(%p)"
++ "vx_migrate_task(%p,%p[#%d.%d])"
++ 6 40 "vx_clear_persistent(%p[#%d])"
++ "vx_exit_init(%p[#%d],%p[#%d,%d,%d])"
++ "vx_set_init(%p[#%d],%p[#%d,%d,%d])"
++ "vx_set_persistent(%p[#%d])"
++ "vx_set_reaper(%p[#%d],%p[#%d,%d])"
++ 7 80 "vx_child_reaper(%p[#%u,%u]) = %p[#%u,%u]"
++
++
++debug_limit:
++
++ n 2^n "vx_acc_cres[%5d,%s,%2d]: %5d%s"
++ "vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
++
++ m 2^m "vx_acc_page[%5d,%s,%2d]: %5d%s"
++ "vx_acc_pages[%5d,%s,%2d]: %5d += %5d"
++ "vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
+diff -NurpP --minimal linux-3.6.10/Makefile linux-3.6.10-vs2.3.4.6/Makefile
+--- linux-3.6.10/Makefile 2012-12-11 11:36:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/Makefile 2012-12-12 12:58:35.000000000 +0000
+@@ -1,7 +1,7 @@
+ VERSION = 3
+ PATCHLEVEL = 6
+ SUBLEVEL = 11
+-EXTRAVERSION =
++EXTRAVERSION = -vs2.3.4.6
+ NAME = Terrified Chipmunk
+
+ # *DOCUMENTATION*
+diff -NurpP --minimal linux-3.6.10/arch/alpha/Kconfig linux-3.6.10-vs2.3.4.6/arch/alpha/Kconfig
+--- linux-3.6.10/arch/alpha/Kconfig 2012-10-04 13:26:43.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/alpha/Kconfig 2012-10-04 16:46:59.000000000 +0000
+@@ -664,6 +664,8 @@ config DUMMY_CONSOLE
+ depends on VGA_HOSE
+ default y
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/alpha/kernel/ptrace.c linux-3.6.10-vs2.3.4.6/arch/alpha/kernel/ptrace.c
+--- linux-3.6.10/arch/alpha/kernel/ptrace.c 2012-05-21 16:06:12.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/alpha/kernel/ptrace.c 2012-10-04 16:46:59.000000000 +0000
+@@ -13,6 +13,7 @@
+ #include <linux/user.h>
+ #include <linux/security.h>
+ #include <linux/signal.h>
++#include <linux/vs_base.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+diff -NurpP --minimal linux-3.6.10/arch/alpha/kernel/systbls.S linux-3.6.10-vs2.3.4.6/arch/alpha/kernel/systbls.S
+--- linux-3.6.10/arch/alpha/kernel/systbls.S 2012-10-04 13:26:43.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/alpha/kernel/systbls.S 2012-10-04 16:46:59.000000000 +0000
+@@ -446,7 +446,7 @@ sys_call_table:
+ .quad sys_stat64 /* 425 */
+ .quad sys_lstat64
+ .quad sys_fstat64
+- .quad sys_ni_syscall /* sys_vserver */
++ .quad sys_vserver /* sys_vserver */
+ .quad sys_ni_syscall /* sys_mbind */
+ .quad sys_ni_syscall /* sys_get_mempolicy */
+ .quad sys_ni_syscall /* sys_set_mempolicy */
+diff -NurpP --minimal linux-3.6.10/arch/alpha/kernel/traps.c linux-3.6.10-vs2.3.4.6/arch/alpha/kernel/traps.c
+--- linux-3.6.10/arch/alpha/kernel/traps.c 2012-05-21 16:06:12.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/alpha/kernel/traps.c 2012-10-04 16:46:59.000000000 +0000
+@@ -184,7 +184,8 @@ die_if_kernel(char * str, struct pt_regs
+ #ifdef CONFIG_SMP
+ printk("CPU %d ", hard_smp_processor_id());
+ #endif
+- printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
++ printk("%s(%d[#%u]): %s %ld\n", current->comm,
++ task_pid_nr(current), current->xid, str, err);
+ dik_show_regs(regs, r9_15);
+ add_taint(TAINT_DIE);
+ dik_show_trace((unsigned long *)(regs+1));
+diff -NurpP --minimal linux-3.6.10/arch/arm/Kconfig linux-3.6.10-vs2.3.4.6/arch/arm/Kconfig
+--- linux-3.6.10/arch/arm/Kconfig 2012-12-11 11:36:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/arm/Kconfig 2012-12-11 11:45:19.000000000 +0000
+@@ -2342,6 +2342,8 @@ source "fs/Kconfig"
+
+ source "arch/arm/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/arm/kernel/calls.S linux-3.6.10-vs2.3.4.6/arch/arm/kernel/calls.S
+--- linux-3.6.10/arch/arm/kernel/calls.S 2012-10-04 13:26:43.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/arm/kernel/calls.S 2012-10-04 16:46:59.000000000 +0000
+@@ -322,7 +322,7 @@
+ /* 310 */ CALL(sys_request_key)
+ CALL(sys_keyctl)
+ CALL(ABI(sys_semtimedop, sys_oabi_semtimedop))
+-/* vserver */ CALL(sys_ni_syscall)
++ CALL(sys_vserver)
+ CALL(sys_ioprio_set)
+ /* 315 */ CALL(sys_ioprio_get)
+ CALL(sys_inotify_init)
+diff -NurpP --minimal linux-3.6.10/arch/arm/kernel/process.c linux-3.6.10-vs2.3.4.6/arch/arm/kernel/process.c
+--- linux-3.6.10/arch/arm/kernel/process.c 2012-10-04 13:26:43.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/arm/kernel/process.c 2012-10-04 16:46:59.000000000 +0000
+@@ -337,7 +337,8 @@ void __show_regs(struct pt_regs *regs)
+ void show_regs(struct pt_regs * regs)
+ {
+ printk("\n");
+- printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
++ printk("Pid: %d[#%u], comm: %20s\n",
++ task_pid_nr(current), current->xid, current->comm);
+ __show_regs(regs);
+ dump_stack();
+ }
+diff -NurpP --minimal linux-3.6.10/arch/arm/kernel/traps.c linux-3.6.10-vs2.3.4.6/arch/arm/kernel/traps.c
+--- linux-3.6.10/arch/arm/kernel/traps.c 2012-10-04 13:26:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/arm/kernel/traps.c 2012-10-04 17:03:56.000000000 +0000
+@@ -249,8 +249,8 @@ static int __die(const char *str, int er
+
+ print_modules();
+ __show_regs(regs);
+- printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
+- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
++ printk(KERN_EMERG "Process %.*s (pid: %d:#%u, stack limit = 0x%p)\n",
++ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), tsk->xid, end_of_stack(tsk));
+
+ if (!user_mode(regs) || in_interrupt()) {
+ dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
+diff -NurpP --minimal linux-3.6.10/arch/cris/Kconfig linux-3.6.10-vs2.3.4.6/arch/cris/Kconfig
+--- linux-3.6.10/arch/cris/Kconfig 2012-10-04 13:26:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/cris/Kconfig 2012-10-04 16:46:59.000000000 +0000
+@@ -673,6 +673,8 @@ source "drivers/staging/Kconfig"
+
+ source "arch/cris/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/frv/kernel/kernel_thread.S linux-3.6.10-vs2.3.4.6/arch/frv/kernel/kernel_thread.S
+--- linux-3.6.10/arch/frv/kernel/kernel_thread.S 2012-10-04 13:26:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/frv/kernel/kernel_thread.S 2012-10-04 16:46:59.000000000 +0000
+@@ -37,7 +37,7 @@ kernel_thread:
+
+ # start by forking the current process, but with shared VM
+ setlos.p #__NR_clone,gr7 ; syscall number
+- ori gr10,#CLONE_VM,gr8 ; first syscall arg [clone_flags]
++ ori gr10,#CLONE_KT,gr8 ; first syscall arg [clone_flags]
+ sethi.p #0xe4e4,gr9 ; second syscall arg [newsp]
+ setlo #0xe4e4,gr9
+ setlos.p #0,gr10 ; third syscall arg [parent_tidptr]
+diff -NurpP --minimal linux-3.6.10/arch/h8300/Kconfig linux-3.6.10-vs2.3.4.6/arch/h8300/Kconfig
+--- linux-3.6.10/arch/h8300/Kconfig 2012-10-04 13:26:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/h8300/Kconfig 2012-10-04 16:46:59.000000000 +0000
+@@ -215,6 +215,8 @@ source "fs/Kconfig"
+
+ source "arch/h8300/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/ia64/Kconfig linux-3.6.10-vs2.3.4.6/arch/ia64/Kconfig
+--- linux-3.6.10/arch/ia64/Kconfig 2012-10-04 13:26:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/ia64/Kconfig 2012-10-04 16:46:59.000000000 +0000
+@@ -652,6 +652,8 @@ source "fs/Kconfig"
+
+ source "arch/ia64/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/ia64/kernel/entry.S linux-3.6.10-vs2.3.4.6/arch/ia64/kernel/entry.S
+--- linux-3.6.10/arch/ia64/kernel/entry.S 2012-03-19 18:46:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/ia64/kernel/entry.S 2012-10-04 16:46:59.000000000 +0000
+@@ -1714,7 +1714,7 @@ sys_call_table:
+ data8 sys_mq_notify
+ data8 sys_mq_getsetattr
+ data8 sys_kexec_load
+- data8 sys_ni_syscall // reserved for vserver
++ data8 sys_vserver
+ data8 sys_waitid // 1270
+ data8 sys_add_key
+ data8 sys_request_key
+diff -NurpP --minimal linux-3.6.10/arch/ia64/kernel/process.c linux-3.6.10-vs2.3.4.6/arch/ia64/kernel/process.c
+--- linux-3.6.10/arch/ia64/kernel/process.c 2012-12-11 11:36:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/ia64/kernel/process.c 2012-11-06 17:43:40.000000000 +0000
+@@ -111,8 +111,8 @@ show_regs (struct pt_regs *regs)
+ unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
+
+ print_modules();
+- printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current),
+- smp_processor_id(), current->comm);
++ printk("\nPid: %d[#%u], CPU %d, comm: %20s\n", task_pid_nr(current),
++ current->xid, smp_processor_id(), current->comm);
+ printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n",
+ regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
+ init_utsname()->release);
+diff -NurpP --minimal linux-3.6.10/arch/ia64/kernel/ptrace.c linux-3.6.10-vs2.3.4.6/arch/ia64/kernel/ptrace.c
+--- linux-3.6.10/arch/ia64/kernel/ptrace.c 2012-05-21 16:06:26.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/ia64/kernel/ptrace.c 2012-10-04 16:46:59.000000000 +0000
+@@ -21,6 +21,7 @@
+ #include <linux/regset.h>
+ #include <linux/elf.h>
+ #include <linux/tracehook.h>
++#include <linux/vs_base.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+diff -NurpP --minimal linux-3.6.10/arch/ia64/kernel/traps.c linux-3.6.10-vs2.3.4.6/arch/ia64/kernel/traps.c
+--- linux-3.6.10/arch/ia64/kernel/traps.c 2012-05-21 16:06:26.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/ia64/kernel/traps.c 2012-10-04 16:47:00.000000000 +0000
+@@ -60,8 +60,9 @@ die (const char *str, struct pt_regs *re
+ put_cpu();
+
+ if (++die.lock_owner_depth < 3) {
+- printk("%s[%d]: %s %ld [%d]\n",
+- current->comm, task_pid_nr(current), str, err, ++die_counter);
++ printk("%s[%d[#%u]]: %s %ld [%d]\n",
++ current->comm, task_pid_nr(current), current->xid,
++ str, err, ++die_counter);
+ if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
+ != NOTIFY_STOP)
+ show_regs(regs);
+@@ -324,8 +325,9 @@ handle_fpu_swa (int fp_fault, struct pt_
+ if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
+ last.time = current_jiffies + 5 * HZ;
+ printk(KERN_WARNING
+- "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
+- current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
++ "%s(%d[#%u]): floating-point assist fault at ip %016lx, isr %016lx\n",
++ current->comm, task_pid_nr(current), current->xid,
++ regs->cr_iip + ia64_psr(regs)->ri, isr);
+ }
+ }
+ }
+diff -NurpP --minimal linux-3.6.10/arch/m32r/kernel/traps.c linux-3.6.10-vs2.3.4.6/arch/m32r/kernel/traps.c
+--- linux-3.6.10/arch/m32r/kernel/traps.c 2012-05-21 16:06:26.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/m32r/kernel/traps.c 2012-10-04 16:47:00.000000000 +0000
+@@ -195,8 +195,9 @@ static void show_registers(struct pt_reg
+ } else {
+ printk("SPI: %08lx\n", sp);
+ }
+- printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
+- current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
++ printk("Process %s (pid: %d[#%u], process nr: %d, stackpage=%08lx)",
++ current->comm, task_pid_nr(current), current->xid,
++ 0xffff & i, 4096+(unsigned long)current);
+
+ /*
+ * When in-kernel, we also print out the stack and code at the
+diff -NurpP --minimal linux-3.6.10/arch/m68k/Kconfig linux-3.6.10-vs2.3.4.6/arch/m68k/Kconfig
+--- linux-3.6.10/arch/m68k/Kconfig 2012-10-04 13:26:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/m68k/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -129,6 +129,8 @@ source "fs/Kconfig"
+
+ source "arch/m68k/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/mips/Kconfig linux-3.6.10-vs2.3.4.6/arch/mips/Kconfig
+--- linux-3.6.10/arch/mips/Kconfig 2012-10-04 13:26:52.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/mips/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -2554,6 +2554,8 @@ source "fs/Kconfig"
+
+ source "arch/mips/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/mips/kernel/ptrace.c linux-3.6.10-vs2.3.4.6/arch/mips/kernel/ptrace.c
+--- linux-3.6.10/arch/mips/kernel/ptrace.c 2012-07-22 21:38:52.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/mips/kernel/ptrace.c 2012-10-04 16:47:00.000000000 +0000
+@@ -25,6 +25,7 @@
+ #include <linux/security.h>
+ #include <linux/audit.h>
+ #include <linux/seccomp.h>
++#include <linux/vs_base.h>
+
+ #include <asm/byteorder.h>
+ #include <asm/cpu.h>
+@@ -262,6 +263,9 @@ long arch_ptrace(struct task_struct *chi
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = (void __user *) data;
+
++ if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
++ goto out;
++
+ switch (request) {
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+diff -NurpP --minimal linux-3.6.10/arch/mips/kernel/scall32-o32.S linux-3.6.10-vs2.3.4.6/arch/mips/kernel/scall32-o32.S
+--- linux-3.6.10/arch/mips/kernel/scall32-o32.S 2012-01-09 15:14:05.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/mips/kernel/scall32-o32.S 2012-10-04 16:47:00.000000000 +0000
+@@ -523,7 +523,7 @@ einval: li v0, -ENOSYS
+ sys sys_mq_timedreceive 5
+ sys sys_mq_notify 2 /* 4275 */
+ sys sys_mq_getsetattr 3
+- sys sys_ni_syscall 0 /* sys_vserver */
++ sys sys_vserver 3
+ sys sys_waitid 5
+ sys sys_ni_syscall 0 /* available, was setaltroot */
+ sys sys_add_key 5 /* 4280 */
+diff -NurpP --minimal linux-3.6.10/arch/mips/kernel/scall64-64.S linux-3.6.10-vs2.3.4.6/arch/mips/kernel/scall64-64.S
+--- linux-3.6.10/arch/mips/kernel/scall64-64.S 2012-01-09 15:14:05.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/mips/kernel/scall64-64.S 2012-10-04 16:47:00.000000000 +0000
+@@ -362,7 +362,7 @@ sys_call_table:
+ PTR sys_mq_timedreceive
+ PTR sys_mq_notify
+ PTR sys_mq_getsetattr /* 5235 */
+- PTR sys_ni_syscall /* sys_vserver */
++ PTR sys_vserver
+ PTR sys_waitid
+ PTR sys_ni_syscall /* available, was setaltroot */
+ PTR sys_add_key
+diff -NurpP --minimal linux-3.6.10/arch/mips/kernel/scall64-n32.S linux-3.6.10-vs2.3.4.6/arch/mips/kernel/scall64-n32.S
+--- linux-3.6.10/arch/mips/kernel/scall64-n32.S 2012-01-09 15:14:05.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/mips/kernel/scall64-n32.S 2012-10-04 16:47:00.000000000 +0000
+@@ -361,7 +361,7 @@ EXPORT(sysn32_call_table)
+ PTR compat_sys_mq_timedreceive
+ PTR compat_sys_mq_notify
+ PTR compat_sys_mq_getsetattr
+- PTR sys_ni_syscall /* 6240, sys_vserver */
++ PTR sys32_vserver /* 6240 */
+ PTR compat_sys_waitid
+ PTR sys_ni_syscall /* available, was setaltroot */
+ PTR sys_add_key
+diff -NurpP --minimal linux-3.6.10/arch/mips/kernel/scall64-o32.S linux-3.6.10-vs2.3.4.6/arch/mips/kernel/scall64-o32.S
+--- linux-3.6.10/arch/mips/kernel/scall64-o32.S 2012-01-09 15:14:05.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/mips/kernel/scall64-o32.S 2012-10-04 16:47:00.000000000 +0000
+@@ -480,7 +480,7 @@ sys_call_table:
+ PTR compat_sys_mq_timedreceive
+ PTR compat_sys_mq_notify /* 4275 */
+ PTR compat_sys_mq_getsetattr
+- PTR sys_ni_syscall /* sys_vserver */
++ PTR sys32_vserver
+ PTR sys_32_waitid
+ PTR sys_ni_syscall /* available, was setaltroot */
+ PTR sys_add_key /* 4280 */
+diff -NurpP --minimal linux-3.6.10/arch/mips/kernel/traps.c linux-3.6.10-vs2.3.4.6/arch/mips/kernel/traps.c
+--- linux-3.6.10/arch/mips/kernel/traps.c 2012-10-04 13:26:53.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/mips/kernel/traps.c 2012-10-04 16:47:00.000000000 +0000
+@@ -347,9 +347,10 @@ void show_registers(struct pt_regs *regs
+
+ __show_regs(regs);
+ print_modules();
+- printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
+- current->comm, current->pid, current_thread_info(), current,
+- field, current_thread_info()->tp_value);
++ printk("Process %s (pid: %d:#%u, threadinfo=%p, task=%p, tls=%0*lx)\n",
++ current->comm, task_pid_nr(current), current->xid,
++ current_thread_info(), current,
++ field, current_thread_info()->tp_value);
+ if (cpu_has_userlocal) {
+ unsigned long tls;
+
+diff -NurpP --minimal linux-3.6.10/arch/parisc/Kconfig linux-3.6.10-vs2.3.4.6/arch/parisc/Kconfig
+--- linux-3.6.10/arch/parisc/Kconfig 2012-07-22 21:38:52.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/parisc/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -281,6 +281,8 @@ source "fs/Kconfig"
+
+ source "arch/parisc/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/parisc/kernel/syscall_table.S linux-3.6.10-vs2.3.4.6/arch/parisc/kernel/syscall_table.S
+--- linux-3.6.10/arch/parisc/kernel/syscall_table.S 2011-10-24 16:45:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/parisc/kernel/syscall_table.S 2012-10-04 16:47:00.000000000 +0000
+@@ -361,7 +361,7 @@
+ ENTRY_COMP(mbind) /* 260 */
+ ENTRY_COMP(get_mempolicy)
+ ENTRY_COMP(set_mempolicy)
+- ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
++ ENTRY_DIFF(vserver)
+ ENTRY_SAME(add_key)
+ ENTRY_SAME(request_key) /* 265 */
+ ENTRY_SAME(keyctl)
+diff -NurpP --minimal linux-3.6.10/arch/parisc/kernel/traps.c linux-3.6.10-vs2.3.4.6/arch/parisc/kernel/traps.c
+--- linux-3.6.10/arch/parisc/kernel/traps.c 2012-05-21 16:06:28.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/parisc/kernel/traps.c 2012-10-04 16:47:00.000000000 +0000
+@@ -235,8 +235,9 @@ void die_if_kernel(char *str, struct pt_
+ if (err == 0)
+ return; /* STFU */
+
+- printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
+- current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
++ printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld) at " RFMT "\n",
++ current->comm, task_pid_nr(current), current->xid,
++ str, err, regs->iaoq[0]);
+ #ifdef PRINT_USER_FAULTS
+ /* XXX for debugging only */
+ show_regs(regs);
+@@ -269,8 +270,8 @@ void die_if_kernel(char *str, struct pt_
+ pdc_console_restart();
+
+ if (err)
+- printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
+- current->comm, task_pid_nr(current), str, err);
++ printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld)\n",
++ current->comm, task_pid_nr(current), current->xid, str, err);
+
+ /* Wot's wrong wif bein' racy? */
+ if (current->thread.flags & PARISC_KERNEL_DEATH) {
+diff -NurpP --minimal linux-3.6.10/arch/parisc/mm/fault.c linux-3.6.10-vs2.3.4.6/arch/parisc/mm/fault.c
+--- linux-3.6.10/arch/parisc/mm/fault.c 2010-08-02 14:52:06.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/parisc/mm/fault.c 2012-10-04 16:47:00.000000000 +0000
+@@ -237,8 +237,9 @@ bad_area:
+
+ #ifdef PRINT_USER_FAULTS
+ printk(KERN_DEBUG "\n");
+- printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
+- task_pid_nr(tsk), tsk->comm, code, address);
++ printk(KERN_DEBUG "do_page_fault() pid=%d:#%u "
++ "command='%s' type=%lu address=0x%08lx\n",
++ task_pid_nr(tsk), tsk->xid, tsk->comm, code, address);
+ if (vma) {
+ printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+ vma->vm_start, vma->vm_end);
+diff -NurpP --minimal linux-3.6.10/arch/powerpc/Kconfig linux-3.6.10-vs2.3.4.6/arch/powerpc/Kconfig
+--- linux-3.6.10/arch/powerpc/Kconfig 2012-10-04 13:26:53.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/powerpc/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -996,6 +996,8 @@ source "lib/Kconfig"
+
+ source "arch/powerpc/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ config KEYS_COMPAT
+diff -NurpP --minimal linux-3.6.10/arch/powerpc/include/asm/unistd.h linux-3.6.10-vs2.3.4.6/arch/powerpc/include/asm/unistd.h
+--- linux-3.6.10/arch/powerpc/include/asm/unistd.h 2012-10-04 13:26:53.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/powerpc/include/asm/unistd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -275,7 +275,7 @@
+ #endif
+ #define __NR_rtas 255
+ #define __NR_sys_debug_setcontext 256
+-/* Number 257 is reserved for vserver */
++#define __NR_vserver 257
+ #define __NR_migrate_pages 258
+ #define __NR_mbind 259
+ #define __NR_get_mempolicy 260
+diff -NurpP --minimal linux-3.6.10/arch/powerpc/kernel/process.c linux-3.6.10-vs2.3.4.6/arch/powerpc/kernel/process.c
+--- linux-3.6.10/arch/powerpc/kernel/process.c 2012-10-04 13:26:54.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/powerpc/kernel/process.c 2012-10-04 16:47:00.000000000 +0000
+@@ -661,8 +661,9 @@ void show_regs(struct pt_regs * regs)
+ #else
+ printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
+ #endif
+- printk("TASK = %p[%d] '%s' THREAD: %p",
+- current, task_pid_nr(current), current->comm, task_thread_info(current));
++ printk("TASK = %p[%d,#%u] '%s' THREAD: %p",
++ current, task_pid_nr(current), current->xid,
++ current->comm, task_thread_info(current));
+
+ #ifdef CONFIG_SMP
+ printk(" CPU: %d", raw_smp_processor_id());
+diff -NurpP --minimal linux-3.6.10/arch/powerpc/kernel/traps.c linux-3.6.10-vs2.3.4.6/arch/powerpc/kernel/traps.c
+--- linux-3.6.10/arch/powerpc/kernel/traps.c 2012-10-04 13:26:54.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/powerpc/kernel/traps.c 2012-10-04 16:47:00.000000000 +0000
+@@ -1119,8 +1119,9 @@ void nonrecoverable_exception(struct pt_
+
+ void trace_syscall(struct pt_regs *regs)
+ {
+- printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
+- current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
++ printk("Task: %p(%d[#%u]), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
++ current, task_pid_nr(current), current->xid,
++ regs->nip, regs->link, regs->gpr[0],
+ regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
+ }
+
+diff -NurpP --minimal linux-3.6.10/arch/s390/Kconfig linux-3.6.10-vs2.3.4.6/arch/s390/Kconfig
+--- linux-3.6.10/arch/s390/Kconfig 2012-10-04 13:26:54.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/s390/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -634,6 +634,8 @@ source "fs/Kconfig"
+
+ source "arch/s390/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/s390/include/asm/tlb.h linux-3.6.10-vs2.3.4.6/arch/s390/include/asm/tlb.h
+--- linux-3.6.10/arch/s390/include/asm/tlb.h 2012-07-22 21:38:59.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/s390/include/asm/tlb.h 2012-10-04 16:47:00.000000000 +0000
+@@ -24,6 +24,7 @@
+ #include <linux/mm.h>
+ #include <linux/pagemap.h>
+ #include <linux/swap.h>
++
+ #include <asm/processor.h>
+ #include <asm/pgalloc.h>
+ #include <asm/tlbflush.h>
+diff -NurpP --minimal linux-3.6.10/arch/s390/include/asm/unistd.h linux-3.6.10-vs2.3.4.6/arch/s390/include/asm/unistd.h
+--- linux-3.6.10/arch/s390/include/asm/unistd.h 2012-10-04 13:26:55.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/s390/include/asm/unistd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -200,7 +200,7 @@
+ #define __NR_clock_gettime (__NR_timer_create+6)
+ #define __NR_clock_getres (__NR_timer_create+7)
+ #define __NR_clock_nanosleep (__NR_timer_create+8)
+-/* Number 263 is reserved for vserver */
++#define __NR_vserver 263
+ #define __NR_statfs64 265
+ #define __NR_fstatfs64 266
+ #define __NR_remap_file_pages 267
+diff -NurpP --minimal linux-3.6.10/arch/s390/kernel/ptrace.c linux-3.6.10-vs2.3.4.6/arch/s390/kernel/ptrace.c
+--- linux-3.6.10/arch/s390/kernel/ptrace.c 2012-10-04 13:26:55.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/s390/kernel/ptrace.c 2012-10-04 16:47:00.000000000 +0000
+@@ -21,6 +21,7 @@
+ #include <linux/tracehook.h>
+ #include <linux/seccomp.h>
+ #include <linux/compat.h>
++#include <linux/vs_base.h>
+ #include <trace/syscall.h>
+ #include <asm/segment.h>
+ #include <asm/page.h>
+diff -NurpP --minimal linux-3.6.10/arch/s390/kernel/syscalls.S linux-3.6.10-vs2.3.4.6/arch/s390/kernel/syscalls.S
+--- linux-3.6.10/arch/s390/kernel/syscalls.S 2012-01-09 15:14:06.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/s390/kernel/syscalls.S 2012-10-04 16:47:00.000000000 +0000
+@@ -271,7 +271,7 @@ SYSCALL(sys_clock_settime,sys_clock_sett
+ SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper) /* 260 */
+ SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
+ SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
+-NI_SYSCALL /* reserved for vserver */
++SYSCALL(sys_vserver,sys_vserver,sys32_vserver)
+ SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
+ SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
+ SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
+diff -NurpP --minimal linux-3.6.10/arch/sh/Kconfig linux-3.6.10-vs2.3.4.6/arch/sh/Kconfig
+--- linux-3.6.10/arch/sh/Kconfig 2012-10-04 13:26:55.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/sh/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -940,6 +940,8 @@ source "fs/Kconfig"
+
+ source "arch/sh/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/sh/kernel/irq.c linux-3.6.10-vs2.3.4.6/arch/sh/kernel/irq.c
+--- linux-3.6.10/arch/sh/kernel/irq.c 2012-10-04 13:26:56.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/sh/kernel/irq.c 2012-10-04 16:47:00.000000000 +0000
+@@ -14,6 +14,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/delay.h>
+ #include <linux/ratelimit.h>
++// #include <linux/vs_context.h>
+ #include <asm/processor.h>
+ #include <asm/machvec.h>
+ #include <asm/uaccess.h>
+diff -NurpP --minimal linux-3.6.10/arch/sparc/Kconfig linux-3.6.10-vs2.3.4.6/arch/sparc/Kconfig
+--- linux-3.6.10/arch/sparc/Kconfig 2012-10-04 13:26:58.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/sparc/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -593,6 +593,8 @@ source "fs/Kconfig"
+
+ source "arch/sparc/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/sparc/include/asm/unistd.h linux-3.6.10-vs2.3.4.6/arch/sparc/include/asm/unistd.h
+--- linux-3.6.10/arch/sparc/include/asm/unistd.h 2012-10-04 13:26:59.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/sparc/include/asm/unistd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -335,7 +335,7 @@
+ #define __NR_timer_getoverrun 264
+ #define __NR_timer_delete 265
+ #define __NR_timer_create 266
+-/* #define __NR_vserver 267 Reserved for VSERVER */
++#define __NR_vserver 267
+ #define __NR_io_setup 268
+ #define __NR_io_destroy 269
+ #define __NR_io_submit 270
+diff -NurpP --minimal linux-3.6.10/arch/sparc/kernel/systbls_32.S linux-3.6.10-vs2.3.4.6/arch/sparc/kernel/systbls_32.S
+--- linux-3.6.10/arch/sparc/kernel/systbls_32.S 2012-01-09 15:14:09.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/sparc/kernel/systbls_32.S 2012-10-04 16:47:00.000000000 +0000
+@@ -70,7 +70,7 @@ sys_call_table:
+ /*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_ni_syscall
+ /*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
+ /*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
+-/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
++/*265*/ .long sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy
+ /*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
+ /*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
+ /*280*/ .long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
+diff -NurpP --minimal linux-3.6.10/arch/sparc/kernel/systbls_64.S linux-3.6.10-vs2.3.4.6/arch/sparc/kernel/systbls_64.S
+--- linux-3.6.10/arch/sparc/kernel/systbls_64.S 2012-07-22 21:39:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/sparc/kernel/systbls_64.S 2012-10-04 16:47:00.000000000 +0000
+@@ -71,7 +71,7 @@ sys_call_table32:
+ /*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys_nis_syscall
+ .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
+ /*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
+- .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
++ .word sys_timer_delete, compat_sys_timer_create, sys32_vserver, compat_sys_io_setup, sys_io_destroy
+ /*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
+ .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
+ /*280*/ .word sys32_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat
+@@ -148,7 +148,7 @@ sys_call_table:
+ /*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall
+ .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
+ /*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
+- .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
++ .word sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy
+ /*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
+ .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
+ /*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
+diff -NurpP --minimal linux-3.6.10/arch/um/Kconfig.rest linux-3.6.10-vs2.3.4.6/arch/um/Kconfig.rest
+--- linux-3.6.10/arch/um/Kconfig.rest 2012-01-09 15:14:09.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/um/Kconfig.rest 2012-10-04 16:47:00.000000000 +0000
+@@ -12,6 +12,8 @@ source "arch/um/Kconfig.net"
+
+ source "fs/Kconfig"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/um/include/shared/kern_constants.h linux-3.6.10-vs2.3.4.6/arch/um/include/shared/kern_constants.h
+--- linux-3.6.10/arch/um/include/shared/kern_constants.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/um/include/shared/kern_constants.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1 @@
++#include "../../../../include/generated/asm-offsets.h"
+diff -NurpP --minimal linux-3.6.10/arch/um/include/shared/user_constants.h linux-3.6.10-vs2.3.4.6/arch/um/include/shared/user_constants.h
+--- linux-3.6.10/arch/um/include/shared/user_constants.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/um/include/shared/user_constants.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,40 @@
++/*
++ * DO NOT MODIFY.
++ *
++ * This file was generated by arch/um/Makefile
++ *
++ */
++
++#define HOST_SC_CR2 176 /* offsetof(struct sigcontext, cr2) # */
++#define HOST_SC_ERR 152 /* offsetof(struct sigcontext, err) # */
++#define HOST_SC_TRAPNO 160 /* offsetof(struct sigcontext, trapno) # */
++#define HOST_FP_SIZE 64 /* sizeof(struct _fpstate) / sizeof(unsigned long) # */
++#define HOST_RBX 5 /* RBX # */
++#define HOST_RCX 11 /* RCX # */
++#define HOST_RDI 14 /* RDI # */
++#define HOST_RSI 13 /* RSI # */
++#define HOST_RDX 12 /* RDX # */
++#define HOST_RBP 4 /* RBP # */
++#define HOST_RAX 10 /* RAX # */
++#define HOST_R8 9 /* R8 # */
++#define HOST_R9 8 /* R9 # */
++#define HOST_R10 7 /* R10 # */
++#define HOST_R11 6 /* R11 # */
++#define HOST_R12 3 /* R12 # */
++#define HOST_R13 2 /* R13 # */
++#define HOST_R14 1 /* R14 # */
++#define HOST_R15 0 /* R15 # */
++#define HOST_ORIG_RAX 15 /* ORIG_RAX # */
++#define HOST_CS 17 /* CS # */
++#define HOST_SS 20 /* SS # */
++#define HOST_EFLAGS 18 /* EFLAGS # */
++#define HOST_IP 16 /* RIP # */
++#define HOST_SP 19 /* RSP # */
++#define UM_FRAME_SIZE 216 /* sizeof(struct user_regs_struct) # */
++#define UM_POLLIN 1 /* POLLIN # */
++#define UM_POLLPRI 2 /* POLLPRI # */
++#define UM_POLLOUT 4 /* POLLOUT # */
++#define UM_PROT_READ 1 /* PROT_READ # */
++#define UM_PROT_WRITE 2 /* PROT_WRITE # */
++#define UM_PROT_EXEC 4 /* PROT_EXEC # */
++
+diff -NurpP --minimal linux-3.6.10/arch/x86/Kconfig linux-3.6.10-vs2.3.4.6/arch/x86/Kconfig
+--- linux-3.6.10/arch/x86/Kconfig 2012-10-04 13:27:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/x86/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -2228,6 +2228,8 @@ source "fs/Kconfig"
+
+ source "arch/x86/Kconfig.debug"
+
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+diff -NurpP --minimal linux-3.6.10/arch/x86/syscalls/syscall_32.tbl linux-3.6.10-vs2.3.4.6/arch/x86/syscalls/syscall_32.tbl
+--- linux-3.6.10/arch/x86/syscalls/syscall_32.tbl 2012-07-22 21:39:02.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/x86/syscalls/syscall_32.tbl 2012-10-04 16:47:00.000000000 +0000
+@@ -279,7 +279,7 @@
+ 270 i386 tgkill sys_tgkill
+ 271 i386 utimes sys_utimes compat_sys_utimes
+ 272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64
+-273 i386 vserver
++273 i386 vserver sys_vserver sys32_vserver
+ 274 i386 mbind sys_mbind
+ 275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+ 276 i386 set_mempolicy sys_set_mempolicy
+diff -NurpP --minimal linux-3.6.10/arch/x86/syscalls/syscall_64.tbl linux-3.6.10-vs2.3.4.6/arch/x86/syscalls/syscall_64.tbl
+--- linux-3.6.10/arch/x86/syscalls/syscall_64.tbl 2012-10-04 13:27:01.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/arch/x86/syscalls/syscall_64.tbl 2012-10-04 16:47:00.000000000 +0000
+@@ -242,7 +242,7 @@
+ 233 common epoll_ctl sys_epoll_ctl
+ 234 common tgkill sys_tgkill
+ 235 common utimes sys_utimes
+-236 64 vserver
++236 64 vserver sys_vserver
+ 237 common mbind sys_mbind
+ 238 common set_mempolicy sys_set_mempolicy
+ 239 common get_mempolicy sys_get_mempolicy
+diff -NurpP --minimal linux-3.6.10/drivers/block/Kconfig linux-3.6.10-vs2.3.4.6/drivers/block/Kconfig
+--- linux-3.6.10/drivers/block/Kconfig 2012-05-21 16:06:43.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/block/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -290,6 +290,13 @@ config BLK_DEV_CRYPTOLOOP
+
+ source "drivers/block/drbd/Kconfig"
+
++config BLK_DEV_VROOT
++ tristate "Virtual Root device support"
++ depends on QUOTACTL
++ ---help---
++ Saying Y here will allow you to use quota/fs ioctls on a shared
++ partition within a virtual server without compromising security.
++
+ config BLK_DEV_NBD
+ tristate "Network block device support"
+ depends on NET
+diff -NurpP --minimal linux-3.6.10/drivers/block/Makefile linux-3.6.10-vs2.3.4.6/drivers/block/Makefile
+--- linux-3.6.10/drivers/block/Makefile 2012-03-19 18:46:52.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/block/Makefile 2012-10-04 16:47:00.000000000 +0000
+@@ -35,6 +35,7 @@ obj-$(CONFIG_VIODASD) += viodasd.o
+ obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
+ obj-$(CONFIG_BLK_DEV_UB) += ub.o
+ obj-$(CONFIG_BLK_DEV_HD) += hd.o
++obj-$(CONFIG_BLK_DEV_VROOT) += vroot.o
+
+ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
+ obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
+diff -NurpP --minimal linux-3.6.10/drivers/block/loop.c linux-3.6.10-vs2.3.4.6/drivers/block/loop.c
+--- linux-3.6.10/drivers/block/loop.c 2012-07-22 21:39:02.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/block/loop.c 2012-10-04 16:47:00.000000000 +0000
+@@ -76,6 +76,7 @@
+ #include <linux/sysfs.h>
+ #include <linux/miscdevice.h>
+ #include <linux/falloc.h>
++#include <linux/vs_context.h>
+
+ #include <asm/uaccess.h>
+
+@@ -869,6 +870,7 @@ static int loop_set_fd(struct loop_devic
+ lo->lo_blocksize = lo_blocksize;
+ lo->lo_device = bdev;
+ lo->lo_flags = lo_flags;
++ lo->lo_xid = vx_current_xid();
+ lo->lo_backing_file = file;
+ lo->transfer = transfer_none;
+ lo->ioctl = NULL;
+@@ -1001,6 +1003,7 @@ static int loop_clr_fd(struct loop_devic
+ lo->lo_sizelimit = 0;
+ lo->lo_encrypt_key_size = 0;
+ lo->lo_thread = NULL;
++ lo->lo_xid = 0;
+ memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
+ memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
+ memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+@@ -1042,7 +1045,7 @@ loop_set_status(struct loop_device *lo,
+
+ if (lo->lo_encrypt_key_size &&
+ lo->lo_key_owner != uid &&
+- !capable(CAP_SYS_ADMIN))
++ !vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP))
+ return -EPERM;
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+@@ -1132,7 +1135,8 @@ loop_get_status(struct loop_device *lo,
+ memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
+ info->lo_encrypt_type =
+ lo->lo_encryption ? lo->lo_encryption->number : 0;
+- if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
++ if (lo->lo_encrypt_key_size &&
++ vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP)) {
+ info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
+ memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
+ lo->lo_encrypt_key_size);
+@@ -1492,6 +1496,11 @@ static int lo_open(struct block_device *
+ goto out;
+ }
+
++ if (!vx_check(lo->lo_xid, VS_IDENT|VS_HOSTID|VS_ADMIN_P)) {
++ err = -EACCES;
++ goto out;
++ }
++
+ mutex_lock(&lo->lo_ctl_mutex);
+ lo->lo_refcnt++;
+ mutex_unlock(&lo->lo_ctl_mutex);
+diff -NurpP --minimal linux-3.6.10/drivers/block/vroot.c linux-3.6.10-vs2.3.4.6/drivers/block/vroot.c
+--- linux-3.6.10/drivers/block/vroot.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/block/vroot.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,291 @@
++/*
++ * linux/drivers/block/vroot.c
++ *
++ * written by Herbert Pötzl, 9/11/2002
++ * ported to 2.6.10 by Herbert Pötzl, 30/12/2004
++ *
++ * based on the loop.c code by Theodore Ts'o.
++ *
++ * Copyright (C) 2002-2007 by Herbert Pötzl.
++ * Redistribution of this file is permitted under the
++ * GNU General Public License.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/file.h>
++#include <linux/major.h>
++#include <linux/blkdev.h>
++#include <linux/slab.h>
++
++#include <linux/vroot.h>
++#include <linux/vs_context.h>
++
++
++static int max_vroot = 8;
++
++static struct vroot_device *vroot_dev;
++static struct gendisk **disks;
++
++
++static int vroot_set_dev(
++ struct vroot_device *vr,
++ struct block_device *bdev,
++ unsigned int arg)
++{
++ struct block_device *real_bdev;
++ struct file *file;
++ struct inode *inode;
++ int error;
++
++ error = -EBUSY;
++ if (vr->vr_state != Vr_unbound)
++ goto out;
++
++ error = -EBADF;
++ file = fget(arg);
++ if (!file)
++ goto out;
++
++ error = -EINVAL;
++ inode = file->f_dentry->d_inode;
++
++
++ if (S_ISBLK(inode->i_mode)) {
++ real_bdev = inode->i_bdev;
++ vr->vr_device = real_bdev;
++ __iget(real_bdev->bd_inode);
++ } else
++ goto out_fput;
++
++ vxdprintk(VXD_CBIT(misc, 0),
++ "vroot[%d]_set_dev: dev=" VXF_DEV,
++ vr->vr_number, VXD_DEV(real_bdev));
++
++ vr->vr_state = Vr_bound;
++ error = 0;
++
++ out_fput:
++ fput(file);
++ out:
++ return error;
++}
++
++static int vroot_clr_dev(
++ struct vroot_device *vr,
++ struct block_device *bdev)
++{
++ struct block_device *real_bdev;
++
++ if (vr->vr_state != Vr_bound)
++ return -ENXIO;
++ if (vr->vr_refcnt > 1) /* we needed one fd for the ioctl */
++ return -EBUSY;
++
++ real_bdev = vr->vr_device;
++
++ vxdprintk(VXD_CBIT(misc, 0),
++ "vroot[%d]_clr_dev: dev=" VXF_DEV,
++ vr->vr_number, VXD_DEV(real_bdev));
++
++ bdput(real_bdev);
++ vr->vr_state = Vr_unbound;
++ vr->vr_device = NULL;
++ return 0;
++}
++
++
++static int vr_ioctl(struct block_device *bdev, fmode_t mode,
++ unsigned int cmd, unsigned long arg)
++{
++ struct vroot_device *vr = bdev->bd_disk->private_data;
++ int err;
++
++ down(&vr->vr_ctl_mutex);
++ switch (cmd) {
++ case VROOT_SET_DEV:
++ err = vroot_set_dev(vr, bdev, arg);
++ break;
++ case VROOT_CLR_DEV:
++ err = vroot_clr_dev(vr, bdev);
++ break;
++ default:
++ err = -EINVAL;
++ break;
++ }
++ up(&vr->vr_ctl_mutex);
++ return err;
++}
++
++static int vr_open(struct block_device *bdev, fmode_t mode)
++{
++ struct vroot_device *vr = bdev->bd_disk->private_data;
++
++ down(&vr->vr_ctl_mutex);
++ vr->vr_refcnt++;
++ up(&vr->vr_ctl_mutex);
++ return 0;
++}
++
++static int vr_release(struct gendisk *disk, fmode_t mode)
++{
++ struct vroot_device *vr = disk->private_data;
++
++ down(&vr->vr_ctl_mutex);
++ --vr->vr_refcnt;
++ up(&vr->vr_ctl_mutex);
++ return 0;
++}
++
++static struct block_device_operations vr_fops = {
++ .owner = THIS_MODULE,
++ .open = vr_open,
++ .release = vr_release,
++ .ioctl = vr_ioctl,
++};
++
++static void vroot_make_request(struct request_queue *q, struct bio *bio)
++{
++ printk("vroot_make_request %p, %p\n", q, bio);
++ bio_io_error(bio);
++}
++
++struct block_device *__vroot_get_real_bdev(struct block_device *bdev)
++{
++ struct inode *inode = bdev->bd_inode;
++ struct vroot_device *vr;
++ struct block_device *real_bdev;
++ int minor = iminor(inode);
++
++ vr = &vroot_dev[minor];
++ real_bdev = vr->vr_device;
++
++ vxdprintk(VXD_CBIT(misc, 0),
++ "vroot[%d]_get_real_bdev: dev=" VXF_DEV,
++ vr->vr_number, VXD_DEV(real_bdev));
++
++ if (vr->vr_state != Vr_bound)
++ return ERR_PTR(-ENXIO);
++
++ __iget(real_bdev->bd_inode);
++ return real_bdev;
++}
++
++
++
++/*
++ * And now the modules code and kernel interface.
++ */
++
++module_param(max_vroot, int, 0);
++
++MODULE_PARM_DESC(max_vroot, "Maximum number of vroot devices (1-256)");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_BLOCKDEV_MAJOR(VROOT_MAJOR);
++
++MODULE_AUTHOR ("Herbert Pötzl");
++MODULE_DESCRIPTION ("Virtual Root Device Mapper");
++
++
++int __init vroot_init(void)
++{
++ int err, i;
++
++ if (max_vroot < 1 || max_vroot > 256) {
++ max_vroot = MAX_VROOT_DEFAULT;
++ printk(KERN_WARNING "vroot: invalid max_vroot "
++ "(must be between 1 and 256), "
++ "using default (%d)\n", max_vroot);
++ }
++
++ if (register_blkdev(VROOT_MAJOR, "vroot"))
++ return -EIO;
++
++ err = -ENOMEM;
++ vroot_dev = kmalloc(max_vroot * sizeof(struct vroot_device), GFP_KERNEL);
++ if (!vroot_dev)
++ goto out_mem1;
++ memset(vroot_dev, 0, max_vroot * sizeof(struct vroot_device));
++
++ disks = kmalloc(max_vroot * sizeof(struct gendisk *), GFP_KERNEL);
++ if (!disks)
++ goto out_mem2;
++
++ for (i = 0; i < max_vroot; i++) {
++ disks[i] = alloc_disk(1);
++ if (!disks[i])
++ goto out_mem3;
++ disks[i]->queue = blk_alloc_queue(GFP_KERNEL);
++ if (!disks[i]->queue)
++ goto out_mem3;
++ blk_queue_make_request(disks[i]->queue, vroot_make_request);
++ }
++
++ for (i = 0; i < max_vroot; i++) {
++ struct vroot_device *vr = &vroot_dev[i];
++ struct gendisk *disk = disks[i];
++
++ memset(vr, 0, sizeof(*vr));
++ sema_init(&vr->vr_ctl_mutex, 1);
++ vr->vr_number = i;
++ disk->major = VROOT_MAJOR;
++ disk->first_minor = i;
++ disk->fops = &vr_fops;
++ sprintf(disk->disk_name, "vroot%d", i);
++ disk->private_data = vr;
++ }
++
++ err = register_vroot_grb(&__vroot_get_real_bdev);
++ if (err)
++ goto out_mem3;
++
++ for (i = 0; i < max_vroot; i++)
++ add_disk(disks[i]);
++ printk(KERN_INFO "vroot: loaded (max %d devices)\n", max_vroot);
++ return 0;
++
++out_mem3:
++ while (i--)
++ put_disk(disks[i]);
++ kfree(disks);
++out_mem2:
++ kfree(vroot_dev);
++out_mem1:
++ unregister_blkdev(VROOT_MAJOR, "vroot");
++ printk(KERN_ERR "vroot: ran out of memory\n");
++ return err;
++}
++
++void vroot_exit(void)
++{
++ int i;
++
++ if (unregister_vroot_grb(&__vroot_get_real_bdev))
++ printk(KERN_WARNING "vroot: cannot unregister grb\n");
++
++ for (i = 0; i < max_vroot; i++) {
++ del_gendisk(disks[i]);
++ put_disk(disks[i]);
++ }
++ unregister_blkdev(VROOT_MAJOR, "vroot");
++
++ kfree(disks);
++ kfree(vroot_dev);
++}
++
++module_init(vroot_init);
++module_exit(vroot_exit);
++
++#ifndef MODULE
++
++static int __init max_vroot_setup(char *str)
++{
++ max_vroot = simple_strtol(str, NULL, 0);
++ return 1;
++}
++
++__setup("max_vroot=", max_vroot_setup);
++
++#endif
++
+diff -NurpP --minimal linux-3.6.10/drivers/infiniband/Kconfig linux-3.6.10-vs2.3.4.6/drivers/infiniband/Kconfig
+--- linux-3.6.10/drivers/infiniband/Kconfig 2012-07-22 21:39:06.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/infiniband/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -39,7 +39,7 @@ config INFINIBAND_USER_MEM
+ config INFINIBAND_ADDR_TRANS
+ bool
+ depends on INET
+- depends on !(INFINIBAND = y && IPV6 = m)
++ depends on !(INFINIBAND = y && IPV6 = y)
+ default y
+
+ source "drivers/infiniband/hw/mthca/Kconfig"
+diff -NurpP --minimal linux-3.6.10/drivers/infiniband/core/addr.c linux-3.6.10-vs2.3.4.6/drivers/infiniband/core/addr.c
+--- linux-3.6.10/drivers/infiniband/core/addr.c 2012-10-04 13:27:10.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/infiniband/core/addr.c 2012-10-04 16:47:00.000000000 +0000
+@@ -263,7 +263,7 @@ static int addr6_resolve(struct sockaddr
+
+ if (ipv6_addr_any(&fl6.saddr)) {
+ ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
+- &fl6.daddr, 0, &fl6.saddr);
++ &fl6.daddr, 0, &fl6.saddr, NULL);
+ if (ret)
+ goto put;
+
+diff -NurpP --minimal linux-3.6.10/drivers/md/dm-ioctl.c linux-3.6.10-vs2.3.4.6/drivers/md/dm-ioctl.c
+--- linux-3.6.10/drivers/md/dm-ioctl.c 2012-10-04 13:27:11.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/md/dm-ioctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -16,6 +16,7 @@
+ #include <linux/dm-ioctl.h>
+ #include <linux/hdreg.h>
+ #include <linux/compat.h>
++#include <linux/vs_context.h>
+
+ #include <asm/uaccess.h>
+
+@@ -106,7 +107,8 @@ static struct hash_cell *__get_name_cell
+ unsigned int h = hash_str(str);
+
+ list_for_each_entry (hc, _name_buckets + h, name_list)
+- if (!strcmp(hc->name, str)) {
++ if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) &&
++ !strcmp(hc->name, str)) {
+ dm_get(hc->md);
+ return hc;
+ }
+@@ -120,7 +122,8 @@ static struct hash_cell *__get_uuid_cell
+ unsigned int h = hash_str(str);
+
+ list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
+- if (!strcmp(hc->uuid, str)) {
++ if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) &&
++ !strcmp(hc->uuid, str)) {
+ dm_get(hc->md);
+ return hc;
+ }
+@@ -131,13 +134,15 @@ static struct hash_cell *__get_uuid_cell
+ static struct hash_cell *__get_dev_cell(uint64_t dev)
+ {
+ struct mapped_device *md;
+- struct hash_cell *hc;
++ struct hash_cell *hc = NULL;
+
+ md = dm_get_md(huge_decode_dev(dev));
+ if (!md)
+ return NULL;
+
+- hc = dm_get_mdptr(md);
++ if (vx_check(dm_get_xid(md), VS_WATCH_P | VS_IDENT))
++ hc = dm_get_mdptr(md);
++
+ if (!hc) {
+ dm_put(md);
+ return NULL;
+@@ -445,6 +450,9 @@ typedef int (*ioctl_fn)(struct dm_ioctl
+
+ static int remove_all(struct dm_ioctl *param, size_t param_size)
+ {
++ if (!vx_check(0, VS_ADMIN))
++ return -EPERM;
++
+ dm_hash_remove_all(1);
+ param->data_size = 0;
+ return 0;
+@@ -492,6 +500,8 @@ static int list_devices(struct dm_ioctl
+ */
+ for (i = 0; i < NUM_BUCKETS; i++) {
+ list_for_each_entry (hc, _name_buckets + i, name_list) {
++ if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT))
++ continue;
+ needed += sizeof(struct dm_name_list);
+ needed += strlen(hc->name) + 1;
+ needed += ALIGN_MASK;
+@@ -515,6 +525,8 @@ static int list_devices(struct dm_ioctl
+ */
+ for (i = 0; i < NUM_BUCKETS; i++) {
+ list_for_each_entry (hc, _name_buckets + i, name_list) {
++ if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT))
++ continue;
+ if (old_nl)
+ old_nl->next = (uint32_t) ((void *) nl -
+ (void *) old_nl);
+@@ -1619,8 +1631,8 @@ static int ctl_ioctl(uint command, struc
+ ioctl_fn fn = NULL;
+ size_t input_param_size;
+
+- /* only root can play with this */
+- if (!capable(CAP_SYS_ADMIN))
++ /* only root and certain contexts can play with this */
++ if (!vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_MAPPER))
+ return -EACCES;
+
+ if (_IOC_TYPE(command) != DM_IOCTL)
+diff -NurpP --minimal linux-3.6.10/drivers/md/dm.c linux-3.6.10-vs2.3.4.6/drivers/md/dm.c
+--- linux-3.6.10/drivers/md/dm.c 2012-12-11 11:36:52.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/md/dm.c 2012-12-08 00:36:32.000000000 +0000
+@@ -19,6 +19,7 @@
+ #include <linux/idr.h>
+ #include <linux/hdreg.h>
+ #include <linux/delay.h>
++#include <linux/vs_base.h>
+
+ #include <trace/events/block.h>
+
+@@ -131,6 +132,7 @@ struct mapped_device {
+ rwlock_t map_lock;
+ atomic_t holders;
+ atomic_t open_count;
++ xid_t xid;
+
+ unsigned long flags;
+
+@@ -343,6 +345,7 @@ int dm_deleting_md(struct mapped_device
+ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
+ {
+ struct mapped_device *md;
++ int ret = -ENXIO;
+
+ spin_lock(&_minor_lock);
+
+@@ -351,18 +354,19 @@ static int dm_blk_open(struct block_devi
+ goto out;
+
+ if (test_bit(DMF_FREEING, &md->flags) ||
+- dm_deleting_md(md)) {
+- md = NULL;
++ dm_deleting_md(md))
++ goto out;
++
++ ret = -EACCES;
++ if (!vx_check(md->xid, VS_IDENT|VS_HOSTID))
+ goto out;
+- }
+
+ dm_get(md);
+ atomic_inc(&md->open_count);
+-
++ ret = 0;
+ out:
+ spin_unlock(&_minor_lock);
+-
+- return md ? 0 : -ENXIO;
++ return ret;
+ }
+
+ static int dm_blk_close(struct gendisk *disk, fmode_t mode)
+@@ -583,6 +587,14 @@ int dm_set_geometry(struct mapped_device
+ return 0;
+ }
+
++/*
++ * Get the xid associated with a dm device
++ */
++xid_t dm_get_xid(struct mapped_device *md)
++{
++ return md->xid;
++}
++
+ /*-----------------------------------------------------------------
+ * CRUD START:
+ * A more elegant soln is in the works that uses the queue
+@@ -1898,6 +1910,7 @@ static struct mapped_device *alloc_dev(i
+ INIT_LIST_HEAD(&md->uevent_list);
+ spin_lock_init(&md->uevent_lock);
+
++ md->xid = vx_current_xid();
+ md->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!md->queue)
+ goto bad_queue;
+diff -NurpP --minimal linux-3.6.10/drivers/md/dm.h linux-3.6.10-vs2.3.4.6/drivers/md/dm.h
+--- linux-3.6.10/drivers/md/dm.h 2012-10-04 13:27:11.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/md/dm.h 2012-10-04 16:47:00.000000000 +0000
+@@ -46,6 +46,8 @@ struct dm_dev_internal {
+ struct dm_table;
+ struct dm_md_mempools;
+
++xid_t dm_get_xid(struct mapped_device *md);
++
+ /*-----------------------------------------------------------------
+ * Internal table functions.
+ *---------------------------------------------------------------*/
+diff -NurpP --minimal linux-3.6.10/drivers/net/tun.c linux-3.6.10-vs2.3.4.6/drivers/net/tun.c
+--- linux-3.6.10/drivers/net/tun.c 2012-10-04 13:27:20.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/net/tun.c 2012-10-04 16:47:00.000000000 +0000
+@@ -64,6 +64,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/virtio_net.h>
+ #include <linux/rcupdate.h>
++#include <linux/vs_network.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>
+@@ -122,6 +123,7 @@ struct tun_struct {
+ unsigned int flags;
+ uid_t owner;
+ gid_t group;
++ nid_t nid;
+
+ struct net_device *dev;
+ netdev_features_t set_features;
+@@ -1033,6 +1035,7 @@ static void tun_setup(struct net_device
+
+ tun->owner = -1;
+ tun->group = -1;
++ tun->nid = current->nid;
+
+ dev->ethtool_ops = &tun_ethtool_ops;
+ dev->destructor = tun_free_netdev;
+@@ -1191,7 +1194,7 @@ static int tun_set_iff(struct net *net,
+
+ if (((tun->owner != -1 && cred->euid != tun->owner) ||
+ (tun->group != -1 && !in_egroup_p(tun->group))) &&
+- !capable(CAP_NET_ADMIN))
++ !cap_raised(current_cap(), CAP_NET_ADMIN))
+ return -EPERM;
+ err = security_tun_dev_attach(tun->socket.sk);
+ if (err < 0)
+@@ -1205,7 +1208,7 @@ static int tun_set_iff(struct net *net,
+ char *name;
+ unsigned long flags = 0;
+
+- if (!capable(CAP_NET_ADMIN))
++ if (!nx_capable(CAP_NET_ADMIN, NXC_TUN_CREATE))
+ return -EPERM;
+ err = security_tun_dev_create();
+ if (err < 0)
+@@ -1276,6 +1279,9 @@ static int tun_set_iff(struct net *net,
+
+ sk->sk_destruct = tun_sock_destruct;
+
++ if (!nx_check(tun->nid, VS_IDENT | VS_HOSTID | VS_ADMIN_P))
++ return -EPERM;
++
+ err = tun_attach(tun, file);
+ if (err < 0)
+ goto failed;
+@@ -1459,6 +1465,16 @@ static long __tun_chr_ioctl(struct file
+ tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
+ break;
+
++ case TUNSETNID:
++ if (!capable(CAP_CONTEXT))
++ return -EPERM;
++
++ /* Set nid owner of the device */
++ tun->nid = (nid_t) arg;
++
++ tun_debug(KERN_INFO, tun, "nid owner set to %u\n", tun->nid);
++ break;
++
+ case TUNSETLINK:
+ /* Only allow setting the type when the interface is down */
+ if (tun->dev->flags & IFF_UP) {
+diff -NurpP --minimal linux-3.6.10/drivers/staging/csr/csr_wifi_hip_xbv.c linux-3.6.10-vs2.3.4.6/drivers/staging/csr/csr_wifi_hip_xbv.c
+--- linux-3.6.10/drivers/staging/csr/csr_wifi_hip_xbv.c 2012-10-04 13:27:30.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/staging/csr/csr_wifi_hip_xbv.c 2012-12-10 16:33:14.000000000 +0000
+@@ -55,7 +55,7 @@ typedef struct
+ {
+ char t_name[4];
+ u32 t_len;
+-} tag_t;
++} ctag_t;
+
+
+ #define TAG_EQ(i, v) (((i)[0] == (v)[0]) && \
+@@ -90,7 +90,7 @@ typedef struct
+ u32 ptr;
+ } xbv_stack_t;
+
+-static s32 read_tag(card_t *card, ct_t *ct, tag_t *tag);
++static s32 read_tag(card_t *card, ct_t *ct, ctag_t *tag);
+ static s32 read_bytes(card_t *card, ct_t *ct, void *buf, u32 len);
+ static s32 read_uint(card_t *card, ct_t *ct, u32 *u, u32 len);
+ static s32 xbv_check(xbv1_t *fwinfo, const xbv_stack_t *stack,
+@@ -160,7 +160,7 @@ static u32 write_fwdl_to_ptdl(void *buf,
+ CsrResult xbv1_parse(card_t *card, fwreadfn_t readfn, void *dlpriv, xbv1_t *fwinfo)
+ {
+ ct_t ct;
+- tag_t tag;
++ ctag_t tag;
+ xbv_stack_t stack;
+
+ ct.dlpriv = dlpriv;
+@@ -505,7 +505,7 @@ static u32 xbv2uint(u8 *ptr, s32 len)
+ }
+
+
+-static s32 read_tag(card_t *card, ct_t *ct, tag_t *tag)
++static s32 read_tag(card_t *card, ct_t *ct, ctag_t *tag)
+ {
+ u8 buf[8];
+ s32 n;
+diff -NurpP --minimal linux-3.6.10/drivers/tty/sysrq.c linux-3.6.10-vs2.3.4.6/drivers/tty/sysrq.c
+--- linux-3.6.10/drivers/tty/sysrq.c 2012-05-21 16:07:16.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/tty/sysrq.c 2012-10-04 16:47:00.000000000 +0000
+@@ -41,6 +41,7 @@
+ #include <linux/slab.h>
+ #include <linux/input.h>
+ #include <linux/uaccess.h>
++#include <linux/vserver/debug.h>
+
+ #include <asm/ptrace.h>
+ #include <asm/irq_regs.h>
+@@ -398,6 +399,21 @@ static struct sysrq_key_op sysrq_unrt_op
+ .enable_mask = SYSRQ_ENABLE_RTNICE,
+ };
+
++
++#ifdef CONFIG_VSERVER_DEBUG
++static void sysrq_handle_vxinfo(int key)
++{
++ dump_vx_info_inactive((key == 'x') ? 0 : 1);
++}
++
++static struct sysrq_key_op sysrq_showvxinfo_op = {
++ .handler = sysrq_handle_vxinfo,
++ .help_msg = "conteXt",
++ .action_msg = "Show Context Info",
++ .enable_mask = SYSRQ_ENABLE_DUMP,
++};
++#endif
++
+ /* Key Operations table and lock */
+ static DEFINE_SPINLOCK(sysrq_key_table_lock);
+
+@@ -452,7 +468,11 @@ static struct sysrq_key_op *sysrq_key_ta
+ NULL, /* v */
+ &sysrq_showstate_blocked_op, /* w */
+ /* x: May be registered on ppc/powerpc for xmon */
++#ifdef CONFIG_VSERVER_DEBUG
++ &sysrq_showvxinfo_op, /* x */
++#else
+ NULL, /* x */
++#endif
+ /* y: May be registered on sparc64 for global register dump */
+ NULL, /* y */
+ &sysrq_ftrace_dump_op, /* z */
+@@ -467,6 +487,8 @@ static int sysrq_key_table_key2index(int
+ retval = key - '0';
+ else if ((key >= 'a') && (key <= 'z'))
+ retval = key + 10 - 'a';
++ else if ((key >= 'A') && (key <= 'Z'))
++ retval = key + 10 - 'A';
+ else
+ retval = -1;
+ return retval;
+diff -NurpP --minimal linux-3.6.10/drivers/tty/tty_io.c linux-3.6.10-vs2.3.4.6/drivers/tty/tty_io.c
+--- linux-3.6.10/drivers/tty/tty_io.c 2012-07-22 21:39:32.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/drivers/tty/tty_io.c 2012-10-04 16:47:00.000000000 +0000
+@@ -104,6 +104,7 @@
+
+ #include <linux/kmod.h>
+ #include <linux/nsproxy.h>
++#include <linux/vs_pid.h>
+
+ #undef TTY_DEBUG_HANGUP
+
+@@ -2123,7 +2124,8 @@ static int tiocsti(struct tty_struct *tt
+ char ch, mbz = 0;
+ struct tty_ldisc *ld;
+
+- if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
++ if (((current->signal->tty != tty) &&
++ !vx_capable(CAP_SYS_ADMIN, VXC_TIOCSTI)))
+ return -EPERM;
+ if (get_user(ch, p))
+ return -EFAULT;
+@@ -2411,6 +2413,7 @@ static int tiocspgrp(struct tty_struct *
+ return -ENOTTY;
+ if (get_user(pgrp_nr, p))
+ return -EFAULT;
++ pgrp_nr = vx_rmap_pid(pgrp_nr);
+ if (pgrp_nr < 0)
+ return -EINVAL;
+ rcu_read_lock();
+diff -NurpP --minimal linux-3.6.10/fs/attr.c linux-3.6.10-vs2.3.4.6/fs/attr.c
+--- linux-3.6.10/fs/attr.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/attr.c 2012-10-04 16:47:00.000000000 +0000
+@@ -14,6 +14,9 @@
+ #include <linux/fcntl.h>
+ #include <linux/security.h>
+ #include <linux/evm.h>
++#include <linux/proc_fs.h>
++#include <linux/devpts_fs.h>
++#include <linux/vs_tag.h>
+
+ /**
+ * inode_change_ok - check if attribute changes to an inode are allowed
+@@ -74,6 +77,10 @@ int inode_change_ok(const struct inode *
+ return -EPERM;
+ }
+
++ /* check for inode tag permission */
++ if (dx_permission(inode, MAY_WRITE))
++ return -EACCES;
++
+ return 0;
+ }
+ EXPORT_SYMBOL(inode_change_ok);
+@@ -144,6 +151,8 @@ void setattr_copy(struct inode *inode, c
+ inode->i_uid = attr->ia_uid;
+ if (ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
++ if ((ia_valid & ATTR_TAG) && IS_TAGGED(inode))
++ inode->i_tag = attr->ia_tag;
+ if (ia_valid & ATTR_ATIME)
+ inode->i_atime = timespec_trunc(attr->ia_atime,
+ inode->i_sb->s_time_gran);
+@@ -173,7 +182,8 @@ int notify_change(struct dentry * dentry
+
+ WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
+
+- if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) {
++ if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
++ ATTR_TAG | ATTR_TIMES_SET)) {
+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ return -EPERM;
+ }
+diff -NurpP --minimal linux-3.6.10/fs/block_dev.c linux-3.6.10-vs2.3.4.6/fs/block_dev.c
+--- linux-3.6.10/fs/block_dev.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/block_dev.c 2012-10-04 16:47:00.000000000 +0000
+@@ -27,6 +27,7 @@
+ #include <linux/namei.h>
+ #include <linux/log2.h>
+ #include <linux/cleancache.h>
++#include <linux/vs_device.h>
+ #include <asm/uaccess.h>
+ #include "internal.h"
+
+@@ -581,6 +582,7 @@ struct block_device *bdget(dev_t dev)
+ bdev->bd_invalidated = 0;
+ inode->i_mode = S_IFBLK;
+ inode->i_rdev = dev;
++ inode->i_mdev = dev;
+ inode->i_bdev = bdev;
+ inode->i_data.a_ops = &def_blk_aops;
+ mapping_set_gfp_mask(&inode->i_data, GFP_USER);
+@@ -627,6 +629,11 @@ EXPORT_SYMBOL(bdput);
+ static struct block_device *bd_acquire(struct inode *inode)
+ {
+ struct block_device *bdev;
++ dev_t mdev;
++
++ if (!vs_map_blkdev(inode->i_rdev, &mdev, DATTR_OPEN))
++ return NULL;
++ inode->i_mdev = mdev;
+
+ spin_lock(&bdev_lock);
+ bdev = inode->i_bdev;
+@@ -637,7 +644,7 @@ static struct block_device *bd_acquire(s
+ }
+ spin_unlock(&bdev_lock);
+
+- bdev = bdget(inode->i_rdev);
++ bdev = bdget(mdev);
+ if (bdev) {
+ spin_lock(&bdev_lock);
+ if (!inode->i_bdev) {
+diff -NurpP --minimal linux-3.6.10/fs/btrfs/ctree.h linux-3.6.10-vs2.3.4.6/fs/btrfs/ctree.h
+--- linux-3.6.10/fs/btrfs/ctree.h 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/btrfs/ctree.h 2012-10-04 16:47:00.000000000 +0000
+@@ -674,11 +674,14 @@ struct btrfs_inode_item {
+ /* modification sequence number for NFS */
+ __le64 sequence;
+
++ __le16 tag;
+ /*
+ * a little future expansion, for more than this we can
+ * just grow the inode item and version it
+ */
+- __le64 reserved[4];
++ __le16 reserved16;
++ __le32 reserved32;
++ __le64 reserved[3];
+ struct btrfs_timespec atime;
+ struct btrfs_timespec ctime;
+ struct btrfs_timespec mtime;
+@@ -1727,6 +1730,8 @@ struct btrfs_ioctl_defrag_range_args {
+ #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
+ #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22)
+
++#define BTRFS_MOUNT_TAGGED (1 << 24)
++
+ #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
+ #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
+ #define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \
+@@ -1988,6 +1993,7 @@ BTRFS_SETGET_FUNCS(inode_block_group, st
+ BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32);
+ BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32);
+ BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32);
++BTRFS_SETGET_FUNCS(inode_tag, struct btrfs_inode_item, tag, 16);
+ BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32);
+ BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64);
+ BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64);
+@@ -2041,6 +2047,10 @@ BTRFS_SETGET_FUNCS(extent_flags, struct
+
+ BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32);
+
++#define BTRFS_INODE_IXUNLINK (1 << 24)
++#define BTRFS_INODE_BARRIER (1 << 25)
++#define BTRFS_INODE_COW (1 << 26)
++
+
+ BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8);
+
+@@ -3305,6 +3315,7 @@ extern const struct dentry_operations bt
+ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ void btrfs_update_iflags(struct inode *inode);
+ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
++int btrfs_sync_flags(struct inode *inode, int, int);
+ int btrfs_defrag_file(struct inode *inode, struct file *file,
+ struct btrfs_ioctl_defrag_range_args *range,
+ u64 newer_than, unsigned long max_pages);
+diff -NurpP --minimal linux-3.6.10/fs/btrfs/disk-io.c linux-3.6.10-vs2.3.4.6/fs/btrfs/disk-io.c
+--- linux-3.6.10/fs/btrfs/disk-io.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/btrfs/disk-io.c 2012-10-04 16:47:00.000000000 +0000
+@@ -2187,6 +2187,9 @@ int open_ctree(struct super_block *sb,
+ goto fail_alloc;
+ }
+
++ if (btrfs_test_opt(tree_root, TAGGED))
++ sb->s_flags |= MS_TAGGED;
++
+ features = btrfs_super_incompat_flags(disk_super) &
+ ~BTRFS_FEATURE_INCOMPAT_SUPP;
+ if (features) {
+diff -NurpP --minimal linux-3.6.10/fs/btrfs/inode.c linux-3.6.10-vs2.3.4.6/fs/btrfs/inode.c
+--- linux-3.6.10/fs/btrfs/inode.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/btrfs/inode.c 2012-10-04 16:47:00.000000000 +0000
+@@ -39,6 +39,7 @@
+ #include <linux/slab.h>
+ #include <linux/ratelimit.h>
+ #include <linux/mount.h>
++#include <linux/vs_tag.h>
+ #include "compat.h"
+ #include "ctree.h"
+ #include "disk-io.h"
+@@ -2545,6 +2546,8 @@ static void btrfs_read_locked_inode(stru
+ struct btrfs_key location;
+ int maybe_acls;
+ u32 rdev;
++ uid_t uid;
++ gid_t gid;
+ int ret;
+ bool filled = false;
+
+@@ -2572,8 +2575,13 @@ static void btrfs_read_locked_inode(stru
+ struct btrfs_inode_item);
+ inode->i_mode = btrfs_inode_mode(leaf, inode_item);
+ set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
+- inode->i_uid = btrfs_inode_uid(leaf, inode_item);
+- inode->i_gid = btrfs_inode_gid(leaf, inode_item);
++
++ uid = btrfs_inode_uid(leaf, inode_item);
++ gid = btrfs_inode_gid(leaf, inode_item);
++ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
++ btrfs_inode_tag(leaf, inode_item));
+ btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
+
+ tspec = btrfs_inode_atime(inode_item);
+@@ -2651,8 +2659,14 @@ static void fill_inode_item(struct btrfs
+ struct btrfs_inode_item *item,
+ struct inode *inode)
+ {
+- btrfs_set_inode_uid(leaf, item, inode->i_uid);
+- btrfs_set_inode_gid(leaf, item, inode->i_gid);
++ uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
++ gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
++
++ btrfs_set_inode_uid(leaf, item, uid);
++ btrfs_set_inode_gid(leaf, item, gid);
++#ifdef CONFIG_TAGGING_INTERN
++ btrfs_set_inode_tag(leaf, item, inode->i_tag);
++#endif
+ btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
+ btrfs_set_inode_mode(leaf, item, inode->i_mode);
+ btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+@@ -7636,11 +7650,13 @@ static const struct inode_operations btr
+ .listxattr = btrfs_listxattr,
+ .removexattr = btrfs_removexattr,
+ .permission = btrfs_permission,
++ .sync_flags = btrfs_sync_flags,
+ .get_acl = btrfs_get_acl,
+ };
+ static const struct inode_operations btrfs_dir_ro_inode_operations = {
+ .lookup = btrfs_lookup,
+ .permission = btrfs_permission,
++ .sync_flags = btrfs_sync_flags,
+ .get_acl = btrfs_get_acl,
+ };
+
+diff -NurpP --minimal linux-3.6.10/fs/btrfs/ioctl.c linux-3.6.10-vs2.3.4.6/fs/btrfs/ioctl.c
+--- linux-3.6.10/fs/btrfs/ioctl.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/btrfs/ioctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -74,10 +74,13 @@ static unsigned int btrfs_flags_to_ioctl
+ {
+ unsigned int iflags = 0;
+
+- if (flags & BTRFS_INODE_SYNC)
+- iflags |= FS_SYNC_FL;
+ if (flags & BTRFS_INODE_IMMUTABLE)
+ iflags |= FS_IMMUTABLE_FL;
++ if (flags & BTRFS_INODE_IXUNLINK)
++ iflags |= FS_IXUNLINK_FL;
++
++ if (flags & BTRFS_INODE_SYNC)
++ iflags |= FS_SYNC_FL;
+ if (flags & BTRFS_INODE_APPEND)
+ iflags |= FS_APPEND_FL;
+ if (flags & BTRFS_INODE_NODUMP)
+@@ -94,28 +97,78 @@ static unsigned int btrfs_flags_to_ioctl
+ else if (flags & BTRFS_INODE_NOCOMPRESS)
+ iflags |= FS_NOCOMP_FL;
+
++ if (flags & BTRFS_INODE_BARRIER)
++ iflags |= FS_BARRIER_FL;
++ if (flags & BTRFS_INODE_COW)
++ iflags |= FS_COW_FL;
+ return iflags;
+ }
+
+ /*
+- * Update inode->i_flags based on the btrfs internal flags.
++ * Update inode->i_(v)flags based on the btrfs internal flags.
+ */
+ void btrfs_update_iflags(struct inode *inode)
+ {
+ struct btrfs_inode *ip = BTRFS_I(inode);
+
+- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
++ inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+- if (ip->flags & BTRFS_INODE_SYNC)
+- inode->i_flags |= S_SYNC;
+ if (ip->flags & BTRFS_INODE_IMMUTABLE)
+ inode->i_flags |= S_IMMUTABLE;
++ if (ip->flags & BTRFS_INODE_IXUNLINK)
++ inode->i_flags |= S_IXUNLINK;
++
++ if (ip->flags & BTRFS_INODE_SYNC)
++ inode->i_flags |= S_SYNC;
+ if (ip->flags & BTRFS_INODE_APPEND)
+ inode->i_flags |= S_APPEND;
+ if (ip->flags & BTRFS_INODE_NOATIME)
+ inode->i_flags |= S_NOATIME;
+ if (ip->flags & BTRFS_INODE_DIRSYNC)
+ inode->i_flags |= S_DIRSYNC;
++
++ inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++ if (ip->flags & BTRFS_INODE_BARRIER)
++ inode->i_vflags |= V_BARRIER;
++ if (ip->flags & BTRFS_INODE_COW)
++ inode->i_vflags |= V_COW;
++}
++
++/*
++ * Update btrfs internal flags from inode->i_(v)flags.
++ */
++void btrfs_update_flags(struct inode *inode)
++{
++ struct btrfs_inode *ip = BTRFS_I(inode);
++
++ unsigned int flags = inode->i_flags;
++ unsigned int vflags = inode->i_vflags;
++
++ ip->flags &= ~(BTRFS_INODE_SYNC | BTRFS_INODE_APPEND |
++ BTRFS_INODE_IMMUTABLE | BTRFS_INODE_IXUNLINK |
++ BTRFS_INODE_NOATIME | BTRFS_INODE_DIRSYNC |
++ BTRFS_INODE_BARRIER | BTRFS_INODE_COW);
++
++ if (flags & S_IMMUTABLE)
++ ip->flags |= BTRFS_INODE_IMMUTABLE;
++ if (flags & S_IXUNLINK)
++ ip->flags |= BTRFS_INODE_IXUNLINK;
++
++ if (flags & S_SYNC)
++ ip->flags |= BTRFS_INODE_SYNC;
++ if (flags & S_APPEND)
++ ip->flags |= BTRFS_INODE_APPEND;
++ if (flags & S_NOATIME)
++ ip->flags |= BTRFS_INODE_NOATIME;
++ if (flags & S_DIRSYNC)
++ ip->flags |= BTRFS_INODE_DIRSYNC;
++
++ if (vflags & V_BARRIER)
++ ip->flags |= BTRFS_INODE_BARRIER;
++ if (vflags & V_COW)
++ ip->flags |= BTRFS_INODE_COW;
+ }
+
+ /*
+@@ -131,6 +184,7 @@ void btrfs_inherit_iflags(struct inode *
+ return;
+
+ flags = BTRFS_I(dir)->flags;
++ flags &= ~BTRFS_INODE_BARRIER;
+
+ if (flags & BTRFS_INODE_NOCOMPRESS) {
+ BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
+@@ -146,6 +200,30 @@ void btrfs_inherit_iflags(struct inode *
+ btrfs_update_iflags(inode);
+ }
+
++int btrfs_sync_flags(struct inode *inode, int flags, int vflags)
++{
++ struct btrfs_inode *ip = BTRFS_I(inode);
++ struct btrfs_root *root = ip->root;
++ struct btrfs_trans_handle *trans;
++ int ret;
++
++ trans = btrfs_join_transaction(root);
++ BUG_ON(!trans);
++
++ inode->i_flags = flags;
++ inode->i_vflags = vflags;
++ btrfs_update_flags(inode);
++
++ ret = btrfs_update_inode(trans, root, inode);
++ BUG_ON(ret);
++
++ btrfs_update_iflags(inode);
++ inode->i_ctime = CURRENT_TIME;
++ btrfs_end_transaction(trans, root);
++
++ return 0;
++}
++
+ static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
+ {
+ struct btrfs_inode *ip = BTRFS_I(file->f_path.dentry->d_inode);
+@@ -206,21 +284,27 @@ static int btrfs_ioctl_setflags(struct f
+
+ flags = btrfs_mask_flags(inode->i_mode, flags);
+ oldflags = btrfs_flags_to_ioctl(ip->flags);
+- if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
++ if ((flags ^ oldflags) & (FS_APPEND_FL |
++ FS_IMMUTABLE_FL | FS_IXUNLINK_FL)) {
+ if (!capable(CAP_LINUX_IMMUTABLE)) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+ }
+
+- if (flags & FS_SYNC_FL)
+- ip->flags |= BTRFS_INODE_SYNC;
+- else
+- ip->flags &= ~BTRFS_INODE_SYNC;
+ if (flags & FS_IMMUTABLE_FL)
+ ip->flags |= BTRFS_INODE_IMMUTABLE;
+ else
+ ip->flags &= ~BTRFS_INODE_IMMUTABLE;
++ if (flags & FS_IXUNLINK_FL)
++ ip->flags |= BTRFS_INODE_IXUNLINK;
++ else
++ ip->flags &= ~BTRFS_INODE_IXUNLINK;
++
++ if (flags & FS_SYNC_FL)
++ ip->flags |= BTRFS_INODE_SYNC;
++ else
++ ip->flags &= ~BTRFS_INODE_SYNC;
+ if (flags & FS_APPEND_FL)
+ ip->flags |= BTRFS_INODE_APPEND;
+ else
+diff -NurpP --minimal linux-3.6.10/fs/btrfs/super.c linux-3.6.10-vs2.3.4.6/fs/btrfs/super.c
+--- linux-3.6.10/fs/btrfs/super.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/btrfs/super.c 2012-10-04 16:47:00.000000000 +0000
+@@ -306,7 +306,7 @@ enum {
+ Opt_no_space_cache, Opt_recovery, Opt_skip_balance,
+ Opt_check_integrity, Opt_check_integrity_including_extent_data,
+ Opt_check_integrity_print_mask, Opt_fatal_errors,
+- Opt_err,
++ Opt_tag, Opt_notag, Opt_tagid, Opt_err,
+ };
+
+ static match_table_t tokens = {
+@@ -346,6 +346,9 @@ static match_table_t tokens = {
+ {Opt_check_integrity_including_extent_data, "check_int_data"},
+ {Opt_check_integrity_print_mask, "check_int_print_mask=%d"},
+ {Opt_fatal_errors, "fatal_errors=%s"},
++ {Opt_tag, "tag"},
++ {Opt_notag, "notag"},
++ {Opt_tagid, "tagid=%u"},
+ {Opt_err, NULL},
+ };
+
+@@ -596,6 +599,22 @@ int btrfs_parse_options(struct btrfs_roo
+ goto out;
+ }
+ break;
++#ifndef CONFIG_TAGGING_NONE
++ case Opt_tag:
++ printk(KERN_INFO "btrfs: use tagging\n");
++ btrfs_set_opt(info->mount_opt, TAGGED);
++ break;
++ case Opt_notag:
++ printk(KERN_INFO "btrfs: disabled tagging\n");
++ btrfs_clear_opt(info->mount_opt, TAGGED);
++ break;
++#endif
++#ifdef CONFIG_PROPAGATE
++ case Opt_tagid:
++ /* use args[0] */
++ btrfs_set_opt(info->mount_opt, TAGGED);
++ break;
++#endif
+ case Opt_err:
+ printk(KERN_INFO "btrfs: unrecognized mount option "
+ "'%s'\n", p);
+@@ -1196,6 +1215,12 @@ static int btrfs_remount(struct super_bl
+ btrfs_resize_thread_pool(fs_info,
+ fs_info->thread_pool_size, old_thread_pool_size);
+
++ if (btrfs_test_opt(root, TAGGED) && !(sb->s_flags & MS_TAGGED)) {
++ printk("btrfs: %s: tagging not permitted on remount.\n",
++ sb->s_id);
++ return -EINVAL;
++ }
++
+ if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+ return 0;
+
+diff -NurpP --minimal linux-3.6.10/fs/char_dev.c linux-3.6.10-vs2.3.4.6/fs/char_dev.c
+--- linux-3.6.10/fs/char_dev.c 2012-03-19 18:47:25.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/char_dev.c 2012-10-04 16:47:00.000000000 +0000
+@@ -21,6 +21,8 @@
+ #include <linux/mutex.h>
+ #include <linux/backing-dev.h>
+ #include <linux/tty.h>
++#include <linux/vs_context.h>
++#include <linux/vs_device.h>
+
+ #include "internal.h"
+
+@@ -371,14 +373,21 @@ static int chrdev_open(struct inode *ino
+ struct cdev *p;
+ struct cdev *new = NULL;
+ int ret = 0;
++ dev_t mdev;
++
++ if (!vs_map_chrdev(inode->i_rdev, &mdev, DATTR_OPEN))
++ return -EPERM;
++ inode->i_mdev = mdev;
+
+ spin_lock(&cdev_lock);
+ p = inode->i_cdev;
+ if (!p) {
+ struct kobject *kobj;
+ int idx;
++
+ spin_unlock(&cdev_lock);
+- kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
++
++ kobj = kobj_lookup(cdev_map, mdev, &idx);
+ if (!kobj)
+ return -ENXIO;
+ new = container_of(kobj, struct cdev, kobj);
+diff -NurpP --minimal linux-3.6.10/fs/dcache.c linux-3.6.10-vs2.3.4.6/fs/dcache.c
+--- linux-3.6.10/fs/dcache.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/dcache.c 2012-12-07 21:49:54.000000000 +0000
+@@ -37,6 +37,7 @@
+ #include <linux/rculist_bl.h>
+ #include <linux/prefetch.h>
+ #include <linux/ratelimit.h>
++#include <linux/vs_limit.h>
+ #include "internal.h"
+ #include "mount.h"
+
+@@ -617,6 +618,8 @@ int d_invalidate(struct dentry * dentry)
+ spin_lock(&dentry->d_lock);
+ }
+
++ vx_dentry_dec(dentry);
++
+ /*
+ * Somebody else still using it?
+ *
+@@ -646,6 +649,7 @@ EXPORT_SYMBOL(d_invalidate);
+ static inline void __dget_dlock(struct dentry *dentry)
+ {
+ dentry->d_count++;
++ vx_dentry_inc(dentry);
+ }
+
+ static inline void __dget(struct dentry *dentry)
+@@ -1276,6 +1280,9 @@ struct dentry *__d_alloc(struct super_bl
+ struct dentry *dentry;
+ char *dname;
+
++ if (!vx_dentry_avail(1))
++ return NULL;
++
+ dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
+ if (!dentry)
+ return NULL;
+@@ -1308,6 +1315,7 @@ struct dentry *__d_alloc(struct super_bl
+
+ dentry->d_count = 1;
+ dentry->d_flags = 0;
++ vx_dentry_inc(dentry);
+ spin_lock_init(&dentry->d_lock);
+ seqcount_init(&dentry->d_seq);
+ dentry->d_inode = NULL;
+@@ -2012,6 +2020,7 @@ struct dentry *__d_lookup(struct dentry
+ }
+
+ dentry->d_count++;
++ vx_dentry_inc(dentry);
+ found = dentry;
+ spin_unlock(&dentry->d_lock);
+ break;
+diff -NurpP --minimal linux-3.6.10/fs/devpts/inode.c linux-3.6.10-vs2.3.4.6/fs/devpts/inode.c
+--- linux-3.6.10/fs/devpts/inode.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/devpts/inode.c 2012-10-04 16:47:00.000000000 +0000
+@@ -25,6 +25,7 @@
+ #include <linux/parser.h>
+ #include <linux/fsnotify.h>
+ #include <linux/seq_file.h>
++#include <linux/vs_base.h>
+
+ #define DEVPTS_DEFAULT_MODE 0600
+ /*
+@@ -36,6 +37,21 @@
+ #define DEVPTS_DEFAULT_PTMX_MODE 0000
+ #define PTMX_MINOR 2
+
++static int devpts_permission(struct inode *inode, int mask)
++{
++ int ret = -EACCES;
++
++ /* devpts is xid tagged */
++ if (vx_check((xid_t)inode->i_tag, VS_WATCH_P | VS_IDENT))
++ ret = generic_permission(inode, mask);
++ return ret;
++}
++
++static struct inode_operations devpts_file_inode_operations = {
++ .permission = devpts_permission,
++};
++
++
+ /*
+ * sysctl support for setting limits on the number of Unix98 ptys allocated.
+ * Otherwise one can eat up all kernel memory by opening /dev/ptmx repeatedly.
+@@ -336,6 +352,34 @@ static int devpts_show_options(struct se
+ return 0;
+ }
+
++static int devpts_filter(struct dentry *de)
++{
++ xid_t xid = 0;
++
++ /* devpts is xid tagged */
++ if (de && de->d_inode)
++ xid = (xid_t)de->d_inode->i_tag;
++#ifdef CONFIG_VSERVER_WARN_DEVPTS
++ else
++ vxwprintk_task(1, "devpts " VS_Q("%.*s") " without inode.",
++ de->d_name.len, de->d_name.name);
++#endif
++ return vx_check(xid, VS_WATCH_P | VS_IDENT);
++}
++
++static int devpts_readdir(struct file * filp, void * dirent, filldir_t filldir)
++{
++ return dcache_readdir_filter(filp, dirent, filldir, devpts_filter);
++}
++
++static struct file_operations devpts_dir_operations = {
++ .open = dcache_dir_open,
++ .release = dcache_dir_close,
++ .llseek = dcache_dir_lseek,
++ .read = generic_read_dir,
++ .readdir = devpts_readdir,
++};
++
+ static const struct super_operations devpts_sops = {
+ .statfs = simple_statfs,
+ .remount_fs = devpts_remount,
+@@ -379,8 +423,10 @@ devpts_fill_super(struct super_block *s,
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
+ inode->i_op = &simple_dir_inode_operations;
+- inode->i_fop = &simple_dir_operations;
++ inode->i_fop = &devpts_dir_operations;
+ set_nlink(inode, 2);
++ /* devpts is xid tagged */
++ inode->i_tag = (tag_t)vx_current_xid();
+
+ s->s_root = d_make_root(inode);
+ if (s->s_root)
+@@ -572,6 +618,9 @@ int devpts_pty_new(struct inode *ptmx_in
+ inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ init_special_inode(inode, S_IFCHR|opts->mode, device);
++ /* devpts is xid tagged */
++ inode->i_tag = (tag_t)vx_current_xid();
++ inode->i_op = &devpts_file_inode_operations;
+ inode->i_private = tty;
+ tty->driver_data = inode;
+
+diff -NurpP --minimal linux-3.6.10/fs/ext2/balloc.c linux-3.6.10-vs2.3.4.6/fs/ext2/balloc.c
+--- linux-3.6.10/fs/ext2/balloc.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext2/balloc.c 2012-10-04 16:47:00.000000000 +0000
+@@ -699,7 +699,6 @@ ext2_try_to_allocate(struct super_block
+ start = 0;
+ end = EXT2_BLOCKS_PER_GROUP(sb);
+ }
+-
+ BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb));
+
+ repeat:
+diff -NurpP --minimal linux-3.6.10/fs/ext2/ext2.h linux-3.6.10-vs2.3.4.6/fs/ext2/ext2.h
+--- linux-3.6.10/fs/ext2/ext2.h 2012-07-22 21:39:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext2/ext2.h 2012-10-04 16:47:00.000000000 +0000
+@@ -244,8 +244,12 @@ struct ext2_group_desc
+ #define EXT2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
+ #define EXT2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
+ #define EXT2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
++#define EXT2_IXUNLINK_FL FS_IXUNLINK_FL /* Immutable invert on unlink */
+ #define EXT2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
+
++#define EXT2_BARRIER_FL FS_BARRIER_FL /* Barrier for chroot() */
++#define EXT2_COW_FL FS_COW_FL /* Copy on Write marker */
++
+ #define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
+ #define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
+
+@@ -329,7 +333,8 @@ struct ext2_inode {
+ __u16 i_pad1;
+ __le16 l_i_uid_high; /* these 2 fields */
+ __le16 l_i_gid_high; /* were reserved2[0] */
+- __u32 l_i_reserved2;
++ __le16 l_i_tag; /* Context Tag */
++ __u16 l_i_reserved2;
+ } linux2;
+ struct {
+ __u8 h_i_frag; /* Fragment number */
+@@ -357,6 +362,7 @@ struct ext2_inode {
+ #define i_gid_low i_gid
+ #define i_uid_high osd2.linux2.l_i_uid_high
+ #define i_gid_high osd2.linux2.l_i_gid_high
++#define i_raw_tag osd2.linux2.l_i_tag
+ #define i_reserved2 osd2.linux2.l_i_reserved2
+
+ /*
+@@ -384,6 +390,7 @@ struct ext2_inode {
+ #define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */
+ #define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */
+ #define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */
++#define EXT2_MOUNT_TAGGED (1<<24) /* Enable Context Tags */
+
+
+ #define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
+@@ -757,6 +764,7 @@ extern void ext2_set_inode_flags(struct
+ extern void ext2_get_inode_flags(struct ext2_inode_info *);
+ extern int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len);
++extern int ext2_sync_flags(struct inode *, int, int);
+
+ /* ioctl.c */
+ extern long ext2_ioctl(struct file *, unsigned int, unsigned long);
+diff -NurpP --minimal linux-3.6.10/fs/ext2/file.c linux-3.6.10-vs2.3.4.6/fs/ext2/file.c
+--- linux-3.6.10/fs/ext2/file.c 2011-10-24 16:45:27.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext2/file.c 2012-10-04 16:47:00.000000000 +0000
+@@ -104,4 +104,5 @@ const struct inode_operations ext2_file_
+ .setattr = ext2_setattr,
+ .get_acl = ext2_get_acl,
+ .fiemap = ext2_fiemap,
++ .sync_flags = ext2_sync_flags,
+ };
+diff -NurpP --minimal linux-3.6.10/fs/ext2/ialloc.c linux-3.6.10-vs2.3.4.6/fs/ext2/ialloc.c
+--- linux-3.6.10/fs/ext2/ialloc.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext2/ialloc.c 2012-10-04 16:47:00.000000000 +0000
+@@ -17,6 +17,7 @@
+ #include <linux/backing-dev.h>
+ #include <linux/buffer_head.h>
+ #include <linux/random.h>
++#include <linux/vs_tag.h>
+ #include "ext2.h"
+ #include "xattr.h"
+ #include "acl.h"
+@@ -547,6 +548,7 @@ got:
+ inode->i_mode = mode;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = dir->i_gid;
++ inode->i_tag = dx_current_fstag(sb);
+ } else
+ inode_init_owner(inode, dir, mode);
+
+diff -NurpP --minimal linux-3.6.10/fs/ext2/inode.c linux-3.6.10-vs2.3.4.6/fs/ext2/inode.c
+--- linux-3.6.10/fs/ext2/inode.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext2/inode.c 2012-10-04 16:47:00.000000000 +0000
+@@ -31,6 +31,7 @@
+ #include <linux/mpage.h>
+ #include <linux/fiemap.h>
+ #include <linux/namei.h>
++#include <linux/vs_tag.h>
+ #include "ext2.h"
+ #include "acl.h"
+ #include "xip.h"
+@@ -1165,7 +1166,7 @@ static void ext2_truncate_blocks(struct
+ return;
+ if (ext2_inode_is_fast_symlink(inode))
+ return;
+- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
++ if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
+ return;
+ __ext2_truncate_blocks(inode, offset);
+ }
+@@ -1256,36 +1257,61 @@ void ext2_set_inode_flags(struct inode *
+ {
+ unsigned int flags = EXT2_I(inode)->i_flags;
+
+- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
++ inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
++
++
++ if (flags & EXT2_IMMUTABLE_FL)
++ inode->i_flags |= S_IMMUTABLE;
++ if (flags & EXT2_IXUNLINK_FL)
++ inode->i_flags |= S_IXUNLINK;
++
+ if (flags & EXT2_SYNC_FL)
+ inode->i_flags |= S_SYNC;
+ if (flags & EXT2_APPEND_FL)
+ inode->i_flags |= S_APPEND;
+- if (flags & EXT2_IMMUTABLE_FL)
+- inode->i_flags |= S_IMMUTABLE;
+ if (flags & EXT2_NOATIME_FL)
+ inode->i_flags |= S_NOATIME;
+ if (flags & EXT2_DIRSYNC_FL)
+ inode->i_flags |= S_DIRSYNC;
++
++ inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++ if (flags & EXT2_BARRIER_FL)
++ inode->i_vflags |= V_BARRIER;
++ if (flags & EXT2_COW_FL)
++ inode->i_vflags |= V_COW;
+ }
+
+ /* Propagate flags from i_flags to EXT2_I(inode)->i_flags */
+ void ext2_get_inode_flags(struct ext2_inode_info *ei)
+ {
+ unsigned int flags = ei->vfs_inode.i_flags;
++ unsigned int vflags = ei->vfs_inode.i_vflags;
++
++ ei->i_flags &= ~(EXT2_SYNC_FL | EXT2_APPEND_FL |
++ EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL |
++ EXT2_NOATIME_FL | EXT2_DIRSYNC_FL |
++ EXT2_BARRIER_FL | EXT2_COW_FL);
++
++ if (flags & S_IMMUTABLE)
++ ei->i_flags |= EXT2_IMMUTABLE_FL;
++ if (flags & S_IXUNLINK)
++ ei->i_flags |= EXT2_IXUNLINK_FL;
+
+- ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
+- EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
+ if (flags & S_SYNC)
+ ei->i_flags |= EXT2_SYNC_FL;
+ if (flags & S_APPEND)
+ ei->i_flags |= EXT2_APPEND_FL;
+- if (flags & S_IMMUTABLE)
+- ei->i_flags |= EXT2_IMMUTABLE_FL;
+ if (flags & S_NOATIME)
+ ei->i_flags |= EXT2_NOATIME_FL;
+ if (flags & S_DIRSYNC)
+ ei->i_flags |= EXT2_DIRSYNC_FL;
++
++ if (vflags & V_BARRIER)
++ ei->i_flags |= EXT2_BARRIER_FL;
++ if (vflags & V_COW)
++ ei->i_flags |= EXT2_COW_FL;
+ }
+
+ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
+@@ -1321,8 +1347,10 @@ struct inode *ext2_iget (struct super_bl
+ i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+ i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+ }
+- i_uid_write(inode, i_uid);
+- i_gid_write(inode, i_gid);
++ i_uid_write(inode, INOTAG_UID(DX_TAG(inode), i_uid, i_gid));
++ i_gid_write(inode, INOTAG_GID(DX_TAG(inode), i_uid, i_gid));
++ inode->i_tag = INOTAG_TAG(DX_TAG(inode), i_uid, i_gid,
++ le16_to_cpu(raw_inode->i_raw_tag));
+ set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
+ inode->i_size = le32_to_cpu(raw_inode->i_size);
+ inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
+@@ -1420,8 +1448,8 @@ static int __ext2_write_inode(struct ino
+ struct ext2_inode_info *ei = EXT2_I(inode);
+ struct super_block *sb = inode->i_sb;
+ ino_t ino = inode->i_ino;
+- uid_t uid = i_uid_read(inode);
+- gid_t gid = i_gid_read(inode);
++ uid_t uid = TAGINO_UID(DX_TAG(inode), i_uid_read(inode), inode->i_tag);
++ gid_t gid = TAGINO_GID(DX_TAG(inode), i_gid_read(inode), inode->i_tag);
+ struct buffer_head * bh;
+ struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
+ int n;
+@@ -1457,6 +1485,9 @@ static int __ext2_write_inode(struct ino
+ raw_inode->i_uid_high = 0;
+ raw_inode->i_gid_high = 0;
+ }
++#ifdef CONFIG_TAGGING_INTERN
++ raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
++#endif
+ raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+ raw_inode->i_size = cpu_to_le32(inode->i_size);
+ raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
+@@ -1537,7 +1568,8 @@ int ext2_setattr(struct dentry *dentry,
+ if (is_quota_modification(inode, iattr))
+ dquot_initialize(inode);
+ if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
+- (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
++ (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)) ||
++ (iattr->ia_valid & ATTR_TAG && iattr->ia_tag != inode->i_tag)) {
+ error = dquot_transfer(inode, iattr);
+ if (error)
+ return error;
+diff -NurpP --minimal linux-3.6.10/fs/ext2/ioctl.c linux-3.6.10-vs2.3.4.6/fs/ext2/ioctl.c
+--- linux-3.6.10/fs/ext2/ioctl.c 2012-03-19 18:47:25.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext2/ioctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -17,6 +17,16 @@
+ #include <asm/uaccess.h>
+
+
++int ext2_sync_flags(struct inode *inode, int flags, int vflags)
++{
++ inode->i_flags = flags;
++ inode->i_vflags = vflags;
++ ext2_get_inode_flags(EXT2_I(inode));
++ inode->i_ctime = CURRENT_TIME_SEC;
++ mark_inode_dirty(inode);
++ return 0;
++}
++
+ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ struct inode *inode = filp->f_dentry->d_inode;
+@@ -51,6 +61,11 @@ long ext2_ioctl(struct file *filp, unsig
+
+ flags = ext2_mask_flags(inode->i_mode, flags);
+
++ if (IS_BARRIER(inode)) {
++ vxwprintk_task(1, "messing with the barrier.");
++ return -EACCES;
++ }
++
+ mutex_lock(&inode->i_mutex);
+ /* Is it quota file? Do not allow user to mess with it */
+ if (IS_NOQUOTA(inode)) {
+@@ -66,7 +81,9 @@ long ext2_ioctl(struct file *filp, unsig
+ *
+ * This test looks nicer. Thanks to Pauline Middelink
+ */
+- if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
++ if ((oldflags & EXT2_IMMUTABLE_FL) ||
++ ((flags ^ oldflags) & (EXT2_APPEND_FL |
++ EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL))) {
+ if (!capable(CAP_LINUX_IMMUTABLE)) {
+ mutex_unlock(&inode->i_mutex);
+ ret = -EPERM;
+@@ -74,7 +91,7 @@ long ext2_ioctl(struct file *filp, unsig
+ }
+ }
+
+- flags = flags & EXT2_FL_USER_MODIFIABLE;
++ flags &= EXT2_FL_USER_MODIFIABLE;
+ flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
+ ei->i_flags = flags;
+
+diff -NurpP --minimal linux-3.6.10/fs/ext2/namei.c linux-3.6.10-vs2.3.4.6/fs/ext2/namei.c
+--- linux-3.6.10/fs/ext2/namei.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext2/namei.c 2012-10-04 16:47:00.000000000 +0000
+@@ -32,6 +32,7 @@
+
+ #include <linux/pagemap.h>
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
+ #include "ext2.h"
+ #include "xattr.h"
+ #include "acl.h"
+@@ -73,6 +74,7 @@ static struct dentry *ext2_lookup(struct
+ (unsigned long) ino);
+ return ERR_PTR(-EIO);
+ }
++ dx_propagate_tag(nd, inode);
+ }
+ return d_splice_alias(inode, dentry);
+ }
+@@ -397,6 +399,7 @@ const struct inode_operations ext2_dir_i
+ .removexattr = generic_removexattr,
+ #endif
+ .setattr = ext2_setattr,
++ .sync_flags = ext2_sync_flags,
+ .get_acl = ext2_get_acl,
+ };
+
+diff -NurpP --minimal linux-3.6.10/fs/ext2/super.c linux-3.6.10-vs2.3.4.6/fs/ext2/super.c
+--- linux-3.6.10/fs/ext2/super.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext2/super.c 2012-10-04 16:47:00.000000000 +0000
+@@ -390,7 +390,8 @@ enum {
+ Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
+ Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
+ Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
+- Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
++ Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation,
++ Opt_tag, Opt_notag, Opt_tagid
+ };
+
+ static const match_table_t tokens = {
+@@ -418,6 +419,9 @@ static const match_table_t tokens = {
+ {Opt_acl, "acl"},
+ {Opt_noacl, "noacl"},
+ {Opt_xip, "xip"},
++ {Opt_tag, "tag"},
++ {Opt_notag, "notag"},
++ {Opt_tagid, "tagid=%u"},
+ {Opt_grpquota, "grpquota"},
+ {Opt_ignore, "noquota"},
+ {Opt_quota, "quota"},
+@@ -501,6 +505,20 @@ static int parse_options(char *options,
+ case Opt_nouid32:
+ set_opt (sbi->s_mount_opt, NO_UID32);
+ break;
++#ifndef CONFIG_TAGGING_NONE
++ case Opt_tag:
++ set_opt (sbi->s_mount_opt, TAGGED);
++ break;
++ case Opt_notag:
++ clear_opt (sbi->s_mount_opt, TAGGED);
++ break;
++#endif
++#ifdef CONFIG_PROPAGATE
++ case Opt_tagid:
++ /* use args[0] */
++ set_opt (sbi->s_mount_opt, TAGGED);
++ break;
++#endif
+ case Opt_nocheck:
+ clear_opt (sbi->s_mount_opt, CHECK);
+ break;
+@@ -859,6 +877,8 @@ static int ext2_fill_super(struct super_
+ if (!parse_options((char *) data, sb))
+ goto failed_mount;
+
++ if (EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_TAGGED)
++ sb->s_flags |= MS_TAGGED;
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
+ MS_POSIXACL : 0);
+@@ -1264,6 +1284,14 @@ static int ext2_remount (struct super_bl
+ err = -EINVAL;
+ goto restore_opts;
+ }
++
++ if ((sbi->s_mount_opt & EXT2_MOUNT_TAGGED) &&
++ !(sb->s_flags & MS_TAGGED)) {
++ printk("EXT2-fs: %s: tagging not permitted on remount.\n",
++ sb->s_id);
++ err = -EINVAL;
++ goto restore_opts;
++ }
+
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+diff -NurpP --minimal linux-3.6.10/fs/ext3/ext3.h linux-3.6.10-vs2.3.4.6/fs/ext3/ext3.h
+--- linux-3.6.10/fs/ext3/ext3.h 2012-07-22 21:39:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext3/ext3.h 2012-10-04 16:47:00.000000000 +0000
+@@ -151,10 +151,14 @@ struct ext3_group_desc
+ #define EXT3_NOTAIL_FL 0x00008000 /* file tail should not be merged */
+ #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
+ #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
++#define EXT3_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */
+ #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
+
+-#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
+-#define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
++#define EXT3_BARRIER_FL 0x04000000 /* Barrier for chroot() */
++#define EXT3_COW_FL 0x20000000 /* Copy on Write marker */
++
++#define EXT3_FL_USER_VISIBLE 0x0103DFFF /* User visible flags */
++#define EXT3_FL_USER_MODIFIABLE 0x010380FF /* User modifiable flags */
+
+ /* Flags that should be inherited by new inodes from their parent. */
+ #define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
+@@ -290,7 +294,8 @@ struct ext3_inode {
+ __u16 i_pad1;
+ __le16 l_i_uid_high; /* these 2 fields */
+ __le16 l_i_gid_high; /* were reserved2[0] */
+- __u32 l_i_reserved2;
++ __le16 l_i_tag; /* Context Tag */
++ __u16 l_i_reserved2;
+ } linux2;
+ struct {
+ __u8 h_i_frag; /* Fragment number */
+@@ -320,6 +325,7 @@ struct ext3_inode {
+ #define i_gid_low i_gid
+ #define i_uid_high osd2.linux2.l_i_uid_high
+ #define i_gid_high osd2.linux2.l_i_gid_high
++#define i_raw_tag osd2.linux2.l_i_tag
+ #define i_reserved2 osd2.linux2.l_i_reserved2
+
+ /*
+@@ -364,6 +370,7 @@ struct ext3_inode {
+ #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
+ #define EXT3_MOUNT_DATA_ERR_ABORT 0x400000 /* Abort on file data write
+ * error in ordered mode */
++#define EXT3_MOUNT_TAGGED (1<<24) /* Enable Context Tags */
+
+ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
+ #ifndef _LINUX_EXT2_FS_H
+@@ -1061,6 +1068,7 @@ extern void ext3_get_inode_flags(struct
+ extern void ext3_set_aops(struct inode *inode);
+ extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len);
++extern int ext3_sync_flags(struct inode *, int, int);
+
+ /* ioctl.c */
+ extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
+diff -NurpP --minimal linux-3.6.10/fs/ext3/file.c linux-3.6.10-vs2.3.4.6/fs/ext3/file.c
+--- linux-3.6.10/fs/ext3/file.c 2012-05-21 16:07:20.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext3/file.c 2012-10-04 16:47:00.000000000 +0000
+@@ -76,5 +76,6 @@ const struct inode_operations ext3_file_
+ #endif
+ .get_acl = ext3_get_acl,
+ .fiemap = ext3_fiemap,
++ .sync_flags = ext3_sync_flags,
+ };
+
+diff -NurpP --minimal linux-3.6.10/fs/ext3/ialloc.c linux-3.6.10-vs2.3.4.6/fs/ext3/ialloc.c
+--- linux-3.6.10/fs/ext3/ialloc.c 2012-07-22 21:39:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext3/ialloc.c 2012-10-04 16:47:00.000000000 +0000
+@@ -14,6 +14,7 @@
+
+ #include <linux/quotaops.h>
+ #include <linux/random.h>
++#include <linux/vs_tag.h>
+
+ #include "ext3.h"
+ #include "xattr.h"
+@@ -469,6 +470,7 @@ got:
+ inode->i_mode = mode;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = dir->i_gid;
++ inode->i_tag = dx_current_fstag(sb);
+ } else
+ inode_init_owner(inode, dir, mode);
+
+diff -NurpP --minimal linux-3.6.10/fs/ext3/inode.c linux-3.6.10-vs2.3.4.6/fs/ext3/inode.c
+--- linux-3.6.10/fs/ext3/inode.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext3/inode.c 2012-10-04 16:47:00.000000000 +0000
+@@ -27,6 +27,8 @@
+ #include <linux/writeback.h>
+ #include <linux/mpage.h>
+ #include <linux/namei.h>
++#include <linux/vs_tag.h>
++
+ #include "ext3.h"
+ #include "xattr.h"
+ #include "acl.h"
+@@ -2848,36 +2850,60 @@ void ext3_set_inode_flags(struct inode *
+ {
+ unsigned int flags = EXT3_I(inode)->i_flags;
+
+- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
++ inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
++
++ if (flags & EXT3_IMMUTABLE_FL)
++ inode->i_flags |= S_IMMUTABLE;
++ if (flags & EXT3_IXUNLINK_FL)
++ inode->i_flags |= S_IXUNLINK;
++
+ if (flags & EXT3_SYNC_FL)
+ inode->i_flags |= S_SYNC;
+ if (flags & EXT3_APPEND_FL)
+ inode->i_flags |= S_APPEND;
+- if (flags & EXT3_IMMUTABLE_FL)
+- inode->i_flags |= S_IMMUTABLE;
+ if (flags & EXT3_NOATIME_FL)
+ inode->i_flags |= S_NOATIME;
+ if (flags & EXT3_DIRSYNC_FL)
+ inode->i_flags |= S_DIRSYNC;
++
++ inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++ if (flags & EXT3_BARRIER_FL)
++ inode->i_vflags |= V_BARRIER;
++ if (flags & EXT3_COW_FL)
++ inode->i_vflags |= V_COW;
+ }
+
+ /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
+ void ext3_get_inode_flags(struct ext3_inode_info *ei)
+ {
+ unsigned int flags = ei->vfs_inode.i_flags;
++ unsigned int vflags = ei->vfs_inode.i_vflags;
++
++ ei->i_flags &= ~(EXT3_SYNC_FL | EXT3_APPEND_FL |
++ EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL |
++ EXT3_NOATIME_FL | EXT3_DIRSYNC_FL |
++ EXT3_BARRIER_FL | EXT3_COW_FL);
++
++ if (flags & S_IMMUTABLE)
++ ei->i_flags |= EXT3_IMMUTABLE_FL;
++ if (flags & S_IXUNLINK)
++ ei->i_flags |= EXT3_IXUNLINK_FL;
+
+- ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
+- EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
+ if (flags & S_SYNC)
+ ei->i_flags |= EXT3_SYNC_FL;
+ if (flags & S_APPEND)
+ ei->i_flags |= EXT3_APPEND_FL;
+- if (flags & S_IMMUTABLE)
+- ei->i_flags |= EXT3_IMMUTABLE_FL;
+ if (flags & S_NOATIME)
+ ei->i_flags |= EXT3_NOATIME_FL;
+ if (flags & S_DIRSYNC)
+ ei->i_flags |= EXT3_DIRSYNC_FL;
++
++ if (vflags & V_BARRIER)
++ ei->i_flags |= EXT3_BARRIER_FL;
++ if (vflags & V_COW)
++ ei->i_flags |= EXT3_COW_FL;
+ }
+
+ struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
+@@ -2915,8 +2941,10 @@ struct inode *ext3_iget(struct super_blo
+ i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+ i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+ }
+- i_uid_write(inode, i_uid);
+- i_gid_write(inode, i_gid);
++ i_uid_write(inode, INOTAG_UID(DX_TAG(inode), i_uid, i_gid));
++ i_gid_write(inode, INOTAG_GID(DX_TAG(inode), i_uid, i_gid));
++ inode->i_tag = INOTAG_TAG(DX_TAG(inode), i_uid, i_gid,
++ le16_to_cpu(raw_inode->i_raw_tag));
+ set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
+ inode->i_size = le32_to_cpu(raw_inode->i_size);
+ inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
+@@ -3088,8 +3116,8 @@ again:
+
+ ext3_get_inode_flags(ei);
+ raw_inode->i_mode = cpu_to_le16(inode->i_mode);
+- i_uid = i_uid_read(inode);
+- i_gid = i_gid_read(inode);
++ i_uid = TAGINO_UID(DX_TAG(inode), i_uid_read(inode), inode->i_tag);
++ i_gid = TAGINO_GID(DX_TAG(inode), i_gid_read(inode), inode->i_tag);
+ if(!(test_opt(inode->i_sb, NO_UID32))) {
+ raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
+ raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
+@@ -3114,6 +3142,9 @@ again:
+ raw_inode->i_uid_high = 0;
+ raw_inode->i_gid_high = 0;
+ }
++#ifdef CONFIG_TAGGING_INTERN
++ raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
++#endif
+ raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+ disksize = cpu_to_le32(ei->i_disksize);
+ if (disksize != raw_inode->i_size) {
+@@ -3282,7 +3313,8 @@ int ext3_setattr(struct dentry *dentry,
+ if (is_quota_modification(inode, attr))
+ dquot_initialize(inode);
+ if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
+- (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
++ (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)) ||
++ (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
+ handle_t *handle;
+
+ /* (user+group)*(old+new) structure, inode write (sb,
+@@ -3304,6 +3336,8 @@ int ext3_setattr(struct dentry *dentry,
+ inode->i_uid = attr->ia_uid;
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
++ if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
++ inode->i_tag = attr->ia_tag;
+ error = ext3_mark_inode_dirty(handle, inode);
+ ext3_journal_stop(handle);
+ }
+diff -NurpP --minimal linux-3.6.10/fs/ext3/ioctl.c linux-3.6.10-vs2.3.4.6/fs/ext3/ioctl.c
+--- linux-3.6.10/fs/ext3/ioctl.c 2012-05-21 16:07:20.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext3/ioctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -12,6 +12,34 @@
+ #include <asm/uaccess.h>
+ #include "ext3.h"
+
++
++int ext3_sync_flags(struct inode *inode, int flags, int vflags)
++{
++ handle_t *handle = NULL;
++ struct ext3_iloc iloc;
++ int err;
++
++ handle = ext3_journal_start(inode, 1);
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
++ if (IS_SYNC(inode))
++ handle->h_sync = 1;
++ err = ext3_reserve_inode_write(handle, inode, &iloc);
++ if (err)
++ goto flags_err;
++
++ inode->i_flags = flags;
++ inode->i_vflags = vflags;
++ ext3_get_inode_flags(EXT3_I(inode));
++ inode->i_ctime = CURRENT_TIME_SEC;
++
++ err = ext3_mark_iloc_dirty(handle, inode, &iloc);
++flags_err:
++ ext3_journal_stop(handle);
++ return err;
++}
++
+ long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ struct inode *inode = filp->f_dentry->d_inode;
+@@ -45,6 +73,11 @@ long ext3_ioctl(struct file *filp, unsig
+
+ flags = ext3_mask_flags(inode->i_mode, flags);
+
++ if (IS_BARRIER(inode)) {
++ vxwprintk_task(1, "messing with the barrier.");
++ return -EACCES;
++ }
++
+ mutex_lock(&inode->i_mutex);
+
+ /* Is it quota file? Do not allow user to mess with it */
+@@ -63,7 +96,9 @@ long ext3_ioctl(struct file *filp, unsig
+ *
+ * This test looks nicer. Thanks to Pauline Middelink
+ */
+- if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
++ if ((oldflags & EXT3_IMMUTABLE_FL) ||
++ ((flags ^ oldflags) & (EXT3_APPEND_FL |
++ EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL))) {
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ goto flags_out;
+ }
+@@ -88,7 +123,7 @@ long ext3_ioctl(struct file *filp, unsig
+ if (err)
+ goto flags_err;
+
+- flags = flags & EXT3_FL_USER_MODIFIABLE;
++ flags &= EXT3_FL_USER_MODIFIABLE;
+ flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
+ ei->i_flags = flags;
+
+diff -NurpP --minimal linux-3.6.10/fs/ext3/namei.c linux-3.6.10-vs2.3.4.6/fs/ext3/namei.c
+--- linux-3.6.10/fs/ext3/namei.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext3/namei.c 2012-10-04 16:47:00.000000000 +0000
+@@ -25,6 +25,8 @@
+ */
+
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
++
+ #include "ext3.h"
+ #include "namei.h"
+ #include "xattr.h"
+@@ -915,6 +917,7 @@ restart:
+ submit_bh(READ | REQ_META | REQ_PRIO,
+ bh);
+ }
++ dx_propagate_tag(nd, inode);
+ }
+ }
+ if ((bh = bh_use[ra_ptr++]) == NULL)
+@@ -2526,6 +2529,7 @@ const struct inode_operations ext3_dir_i
+ .listxattr = ext3_listxattr,
+ .removexattr = generic_removexattr,
+ #endif
++ .sync_flags = ext3_sync_flags,
+ .get_acl = ext3_get_acl,
+ };
+
+diff -NurpP --minimal linux-3.6.10/fs/ext3/super.c linux-3.6.10-vs2.3.4.6/fs/ext3/super.c
+--- linux-3.6.10/fs/ext3/super.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext3/super.c 2012-10-04 16:47:00.000000000 +0000
+@@ -811,7 +811,8 @@ enum {
+ Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
+ Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
+ Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
+- Opt_resize, Opt_usrquota, Opt_grpquota
++ Opt_resize, Opt_usrquota, Opt_grpquota,
++ Opt_tag, Opt_notag, Opt_tagid
+ };
+
+ static const match_table_t tokens = {
+@@ -868,6 +869,9 @@ static const match_table_t tokens = {
+ {Opt_barrier, "barrier"},
+ {Opt_nobarrier, "nobarrier"},
+ {Opt_resize, "resize"},
++ {Opt_tag, "tag"},
++ {Opt_notag, "notag"},
++ {Opt_tagid, "tagid=%u"},
+ {Opt_err, NULL},
+ };
+
+@@ -1033,6 +1037,20 @@ static int parse_options (char *options,
+ case Opt_nouid32:
+ set_opt (sbi->s_mount_opt, NO_UID32);
+ break;
++#ifndef CONFIG_TAGGING_NONE
++ case Opt_tag:
++ set_opt (sbi->s_mount_opt, TAGGED);
++ break;
++ case Opt_notag:
++ clear_opt (sbi->s_mount_opt, TAGGED);
++ break;
++#endif
++#ifdef CONFIG_PROPAGATE
++ case Opt_tagid:
++ /* use args[0] */
++ set_opt (sbi->s_mount_opt, TAGGED);
++ break;
++#endif
+ case Opt_nocheck:
+ clear_opt (sbi->s_mount_opt, CHECK);
+ break;
+@@ -1731,6 +1749,9 @@ static int ext3_fill_super (struct super
+ NULL, 0))
+ goto failed_mount;
+
++ if (EXT3_SB(sb)->s_mount_opt & EXT3_MOUNT_TAGGED)
++ sb->s_flags |= MS_TAGGED;
++
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+
+@@ -2618,6 +2639,14 @@ static int ext3_remount (struct super_bl
+ if (test_opt(sb, ABORT))
+ ext3_abort(sb, __func__, "Abort forced by user");
+
++ if ((sbi->s_mount_opt & EXT3_MOUNT_TAGGED) &&
++ !(sb->s_flags & MS_TAGGED)) {
++ printk("EXT3-fs: %s: tagging not permitted on remount.\n",
++ sb->s_id);
++ err = -EINVAL;
++ goto restore_opts;
++ }
++
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+
+diff -NurpP --minimal linux-3.6.10/fs/ext4/ext4.h linux-3.6.10-vs2.3.4.6/fs/ext4/ext4.h
+--- linux-3.6.10/fs/ext4/ext4.h 2012-12-11 11:36:57.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext4/ext4.h 2012-12-08 00:36:33.000000000 +0000
+@@ -393,8 +393,12 @@ struct flex_groups {
+ #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
+ #define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
+ #define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
++#define EXT4_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */
+ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
+
++#define EXT4_BARRIER_FL 0x04000000 /* Barrier for chroot() */
++#define EXT4_COW_FL 0x20000000 /* Copy on Write marker */
++
+ #define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */
+ #define EXT4_FL_USER_MODIFIABLE 0x004B80FF /* User modifiable flags */
+
+@@ -666,7 +670,7 @@ struct ext4_inode {
+ __le16 l_i_uid_high; /* these 2 fields */
+ __le16 l_i_gid_high; /* were reserved2[0] */
+ __le16 l_i_checksum_lo;/* crc32c(uuid+inum+inode) LE */
+- __le16 l_i_reserved;
++ __le16 l_i_tag; /* Context Tag */
+ } linux2;
+ struct {
+ __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
+@@ -784,6 +788,7 @@ do { \
+ #define i_gid_low i_gid
+ #define i_uid_high osd2.linux2.l_i_uid_high
+ #define i_gid_high osd2.linux2.l_i_gid_high
++#define i_raw_tag osd2.linux2.l_i_tag
+ #define i_checksum_lo osd2.linux2.l_i_checksum_lo
+
+ #elif defined(__GNU__)
+@@ -964,6 +969,7 @@ struct ext4_inode_info {
+ #define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
+ #define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */
+ #define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */
++#define EXT4_MOUNT_TAGGED 0x40000 /* Enable Context Tags */
+ #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
+ #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
+ #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
+@@ -2392,6 +2398,7 @@ extern int ext4_map_blocks(handle_t *han
+ struct ext4_map_blocks *map, int flags);
+ extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len);
++extern int ext4_sync_flags(struct inode *, int, int);
+ /* move_extent.c */
+ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ __u64 start_orig, __u64 start_donor,
+diff -NurpP --minimal linux-3.6.10/fs/ext4/file.c linux-3.6.10-vs2.3.4.6/fs/ext4/file.c
+--- linux-3.6.10/fs/ext4/file.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext4/file.c 2012-10-04 16:47:00.000000000 +0000
+@@ -334,5 +334,6 @@ const struct inode_operations ext4_file_
+ #endif
+ .get_acl = ext4_get_acl,
+ .fiemap = ext4_fiemap,
++ .sync_flags = ext4_sync_flags,
+ };
+
+diff -NurpP --minimal linux-3.6.10/fs/ext4/ialloc.c linux-3.6.10-vs2.3.4.6/fs/ext4/ialloc.c
+--- linux-3.6.10/fs/ext4/ialloc.c 2012-12-11 11:36:57.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext4/ialloc.c 2012-11-06 17:43:41.000000000 +0000
+@@ -22,6 +22,7 @@
+ #include <linux/random.h>
+ #include <linux/bitops.h>
+ #include <linux/blkdev.h>
++#include <linux/vs_tag.h>
+ #include <asm/byteorder.h>
+
+ #include "ext4.h"
+@@ -839,6 +840,7 @@ got:
+ inode->i_mode = mode;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = dir->i_gid;
++ inode->i_tag = dx_current_fstag(sb);
+ } else
+ inode_init_owner(inode, dir, mode);
+
+diff -NurpP --minimal linux-3.6.10/fs/ext4/inode.c linux-3.6.10-vs2.3.4.6/fs/ext4/inode.c
+--- linux-3.6.10/fs/ext4/inode.c 2012-12-11 11:36:57.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext4/inode.c 2012-11-06 17:43:41.000000000 +0000
+@@ -37,6 +37,7 @@
+ #include <linux/printk.h>
+ #include <linux/slab.h>
+ #include <linux/ratelimit.h>
++#include <linux/vs_tag.h>
+
+ #include "ext4_jbd2.h"
+ #include "xattr.h"
+@@ -3715,41 +3716,64 @@ void ext4_set_inode_flags(struct inode *
+ {
+ unsigned int flags = EXT4_I(inode)->i_flags;
+
+- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
++ inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
++
++ if (flags & EXT4_IMMUTABLE_FL)
++ inode->i_flags |= S_IMMUTABLE;
++ if (flags & EXT4_IXUNLINK_FL)
++ inode->i_flags |= S_IXUNLINK;
++
+ if (flags & EXT4_SYNC_FL)
+ inode->i_flags |= S_SYNC;
+ if (flags & EXT4_APPEND_FL)
+ inode->i_flags |= S_APPEND;
+- if (flags & EXT4_IMMUTABLE_FL)
+- inode->i_flags |= S_IMMUTABLE;
+ if (flags & EXT4_NOATIME_FL)
+ inode->i_flags |= S_NOATIME;
+ if (flags & EXT4_DIRSYNC_FL)
+ inode->i_flags |= S_DIRSYNC;
++
++ inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++ if (flags & EXT4_BARRIER_FL)
++ inode->i_vflags |= V_BARRIER;
++ if (flags & EXT4_COW_FL)
++ inode->i_vflags |= V_COW;
+ }
+
+ /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
+ void ext4_get_inode_flags(struct ext4_inode_info *ei)
+ {
+- unsigned int vfs_fl;
++ unsigned int vfs_fl, vfs_vf;
+ unsigned long old_fl, new_fl;
+
+ do {
+ vfs_fl = ei->vfs_inode.i_flags;
++ vfs_vf = ei->vfs_inode.i_vflags;
+ old_fl = ei->i_flags;
+ new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
+ EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
+- EXT4_DIRSYNC_FL);
++ EXT4_DIRSYNC_FL|EXT4_BARRIER_FL|
++ EXT4_COW_FL);
++
++ if (vfs_fl & S_IMMUTABLE)
++ new_fl |= EXT4_IMMUTABLE_FL;
++ if (vfs_fl & S_IXUNLINK)
++ new_fl |= EXT4_IXUNLINK_FL;
++
+ if (vfs_fl & S_SYNC)
+ new_fl |= EXT4_SYNC_FL;
+ if (vfs_fl & S_APPEND)
+ new_fl |= EXT4_APPEND_FL;
+- if (vfs_fl & S_IMMUTABLE)
+- new_fl |= EXT4_IMMUTABLE_FL;
+ if (vfs_fl & S_NOATIME)
+ new_fl |= EXT4_NOATIME_FL;
+ if (vfs_fl & S_DIRSYNC)
+ new_fl |= EXT4_DIRSYNC_FL;
++
++ if (vfs_vf & V_BARRIER)
++ new_fl |= EXT4_BARRIER_FL;
++ if (vfs_vf & V_COW)
++ new_fl |= EXT4_COW_FL;
+ } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
+ }
+
+@@ -3841,8 +3865,10 @@ struct inode *ext4_iget(struct super_blo
+ i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+ i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+ }
+- i_uid_write(inode, i_uid);
+- i_gid_write(inode, i_gid);
++ i_uid_write(inode, INOTAG_UID(DX_TAG(inode), i_uid, i_gid));
++ i_gid_write(inode, INOTAG_GID(DX_TAG(inode), i_uid, i_gid));
++ inode->i_tag = INOTAG_TAG(DX_TAG(inode), i_uid, i_gid,
++ le16_to_cpu(raw_inode->i_raw_tag));
+ set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
+
+ ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
+@@ -4066,8 +4092,8 @@ static int ext4_do_update_inode(handle_t
+
+ ext4_get_inode_flags(ei);
+ raw_inode->i_mode = cpu_to_le16(inode->i_mode);
+- i_uid = i_uid_read(inode);
+- i_gid = i_gid_read(inode);
++ i_uid = TAGINO_UID(DX_TAG(inode), i_uid_read(inode), inode->i_tag);
++ i_gid = TAGINO_GID(DX_TAG(inode), i_gid_read(inode), inode->i_tag);
+ if (!(test_opt(inode->i_sb, NO_UID32))) {
+ raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
+ raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
+@@ -4090,6 +4116,9 @@ static int ext4_do_update_inode(handle_t
+ raw_inode->i_uid_high = 0;
+ raw_inode->i_gid_high = 0;
+ }
++#ifdef CONFIG_TAGGING_INTERN
++ raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
++#endif
+ raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+
+ EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
+@@ -4278,7 +4307,8 @@ int ext4_setattr(struct dentry *dentry,
+ if (is_quota_modification(inode, attr))
+ dquot_initialize(inode);
+ if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
+- (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
++ (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)) ||
++ (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
+ handle_t *handle;
+
+ /* (user+group)*(old+new) structure, inode write (sb,
+@@ -4300,6 +4330,8 @@ int ext4_setattr(struct dentry *dentry,
+ inode->i_uid = attr->ia_uid;
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
++ if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
++ inode->i_tag = attr->ia_tag;
+ error = ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
+ }
+diff -NurpP --minimal linux-3.6.10/fs/ext4/ioctl.c linux-3.6.10-vs2.3.4.6/fs/ext4/ioctl.c
+--- linux-3.6.10/fs/ext4/ioctl.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext4/ioctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -14,12 +14,40 @@
+ #include <linux/compat.h>
+ #include <linux/mount.h>
+ #include <linux/file.h>
++#include <linux/vs_tag.h>
+ #include <asm/uaccess.h>
+ #include "ext4_jbd2.h"
+ #include "ext4.h"
+
+ #define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
+
++int ext4_sync_flags(struct inode *inode, int flags, int vflags)
++{
++ handle_t *handle = NULL;
++ struct ext4_iloc iloc;
++ int err;
++
++ handle = ext4_journal_start(inode, 1);
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++
++ if (IS_SYNC(inode))
++ ext4_handle_sync(handle);
++ err = ext4_reserve_inode_write(handle, inode, &iloc);
++ if (err)
++ goto flags_err;
++
++ inode->i_flags = flags;
++ inode->i_vflags = vflags;
++ ext4_get_inode_flags(EXT4_I(inode));
++ inode->i_ctime = ext4_current_time(inode);
++
++ err = ext4_mark_iloc_dirty(handle, inode, &iloc);
++flags_err:
++ ext4_journal_stop(handle);
++ return err;
++}
++
+ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ struct inode *inode = filp->f_dentry->d_inode;
+@@ -53,6 +81,11 @@ long ext4_ioctl(struct file *filp, unsig
+
+ flags = ext4_mask_flags(inode->i_mode, flags);
+
++ if (IS_BARRIER(inode)) {
++ vxwprintk_task(1, "messing with the barrier.");
++ return -EACCES;
++ }
++
+ err = -EPERM;
+ mutex_lock(&inode->i_mutex);
+ /* Is it quota file? Do not allow user to mess with it */
+@@ -70,7 +103,9 @@ long ext4_ioctl(struct file *filp, unsig
+ *
+ * This test looks nicer. Thanks to Pauline Middelink
+ */
+- if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) {
++ if ((oldflags & EXT4_IMMUTABLE_FL) ||
++ ((flags ^ oldflags) & (EXT4_APPEND_FL |
++ EXT4_IMMUTABLE_FL | EXT4_IXUNLINK_FL))) {
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ goto flags_out;
+ }
+diff -NurpP --minimal linux-3.6.10/fs/ext4/namei.c linux-3.6.10-vs2.3.4.6/fs/ext4/namei.c
+--- linux-3.6.10/fs/ext4/namei.c 2012-12-11 11:36:57.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext4/namei.c 2012-11-06 17:43:41.000000000 +0000
+@@ -34,6 +34,7 @@
+ #include <linux/quotaops.h>
+ #include <linux/buffer_head.h>
+ #include <linux/bio.h>
++#include <linux/vs_tag.h>
+ #include "ext4.h"
+ #include "ext4_jbd2.h"
+
+@@ -1199,6 +1200,7 @@ restart:
+ ll_rw_block(READ | REQ_META | REQ_PRIO,
+ 1, &bh);
+ }
++ dx_propagate_tag(nd, inode);
+ }
+ if ((bh = bh_use[ra_ptr++]) == NULL)
+ goto next;
+@@ -2982,6 +2984,7 @@ const struct inode_operations ext4_dir_i
+ #endif
+ .get_acl = ext4_get_acl,
+ .fiemap = ext4_fiemap,
++ .sync_flags = ext4_sync_flags,
+ };
+
+ const struct inode_operations ext4_special_inode_operations = {
+diff -NurpP --minimal linux-3.6.10/fs/ext4/super.c linux-3.6.10-vs2.3.4.6/fs/ext4/super.c
+--- linux-3.6.10/fs/ext4/super.c 2012-12-11 11:36:57.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ext4/super.c 2012-12-08 00:36:33.000000000 +0000
+@@ -1220,6 +1220,7 @@ enum {
+ Opt_inode_readahead_blks, Opt_journal_ioprio,
+ Opt_dioread_nolock, Opt_dioread_lock,
+ Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
++ Opt_tag, Opt_notag, Opt_tagid
+ };
+
+ static const match_table_t tokens = {
+@@ -1298,6 +1299,9 @@ static const match_table_t tokens = {
+ {Opt_removed, "reservation"}, /* mount option from ext2/3 */
+ {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
+ {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */
++ {Opt_tag, "tag"},
++ {Opt_notag, "notag"},
++ {Opt_tagid, "tagid=%u"},
+ {Opt_err, NULL},
+ };
+
+@@ -1544,6 +1548,20 @@ static int handle_mount_opt(struct super
+ return -1;
+ *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
+ return 1;
++#ifndef CONFIG_TAGGING_NONE
++ case Opt_tag:
++ set_opt(sb, TAGGED);
++ return 1;
++ case Opt_notag:
++ clear_opt(sb, TAGGED);
++ return 1;
++#endif
++#ifdef CONFIG_PROPAGATE
++ case Opt_tagid:
++ /* use args[0] */
++ set_opt(sb, TAGGED);
++ return 1;
++#endif
+ }
+
+ for (m = ext4_mount_opts; m->token != Opt_err; m++) {
+@@ -3418,6 +3436,9 @@ static int ext4_fill_super(struct super_
+ }
+ }
+
++ if (EXT4_SB(sb)->s_mount_opt & EXT4_MOUNT_TAGGED)
++ sb->s_flags |= MS_TAGGED;
++
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+
+@@ -4583,6 +4604,14 @@ static int ext4_remount(struct super_blo
+ if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
+ ext4_abort(sb, "Abort forced by user");
+
++ if ((sbi->s_mount_opt & EXT4_MOUNT_TAGGED) &&
++ !(sb->s_flags & MS_TAGGED)) {
++ printk("EXT4-fs: %s: tagging not permitted on remount.\n",
++ sb->s_id);
++ err = -EINVAL;
++ goto restore_opts;
++ }
++
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+
+diff -NurpP --minimal linux-3.6.10/fs/fcntl.c linux-3.6.10-vs2.3.4.6/fs/fcntl.c
+--- linux-3.6.10/fs/fcntl.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/fcntl.c 2012-10-04 17:05:02.000000000 +0000
+@@ -21,6 +21,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/user_namespace.h>
++#include <linux/vs_limit.h>
+
+ #include <asm/poll.h>
+ #include <asm/siginfo.h>
+@@ -104,6 +105,8 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldf
+
+ if (tofree)
+ filp_close(tofree, files);
++ else
++ vx_openfd_inc(newfd); /* fd was unused */
+
+ return newfd;
+
+@@ -477,6 +480,8 @@ SYSCALL_DEFINE3(fcntl, unsigned int, fd,
+ filp = fget_raw_light(fd, &fput_needed);
+ if (!filp)
+ goto out;
++ if (!vx_files_avail(1))
++ goto out;
+
+ if (unlikely(filp->f_mode & FMODE_PATH)) {
+ if (!check_fcntl_cmd(cmd))
+diff -NurpP --minimal linux-3.6.10/fs/file.c linux-3.6.10-vs2.3.4.6/fs/file.c
+--- linux-3.6.10/fs/file.c 2012-05-21 16:07:20.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/file.c 2012-10-04 16:47:00.000000000 +0000
+@@ -21,6 +21,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/rcupdate.h>
+ #include <linux/workqueue.h>
++#include <linux/vs_limit.h>
+
+ struct fdtable_defer {
+ spinlock_t lock;
+@@ -358,6 +359,8 @@ struct files_struct *dup_fd(struct files
+ struct file *f = *old_fds++;
+ if (f) {
+ get_file(f);
++ /* TODO: sum it first for check and performance */
++ vx_openfd_inc(open_files - i);
+ } else {
+ /*
+ * The fd may be claimed in the fd bitmap but not yet
+@@ -464,6 +467,7 @@ repeat:
+ else
+ __clear_close_on_exec(fd, fdt);
+ error = fd;
++ vx_openfd_inc(fd);
+ #if 1
+ /* Sanity check */
+ if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
+diff -NurpP --minimal linux-3.6.10/fs/file_table.c linux-3.6.10-vs2.3.4.6/fs/file_table.c
+--- linux-3.6.10/fs/file_table.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/file_table.c 2012-11-17 13:36:19.000000000 +0000
+@@ -26,6 +26,8 @@
+ #include <linux/hardirq.h>
+ #include <linux/task_work.h>
+ #include <linux/ima.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_context.h>
+
+ #include <linux/atomic.h>
+
+@@ -136,6 +138,8 @@ struct file *get_empty_filp(void)
+ spin_lock_init(&f->f_lock);
+ eventpoll_init_file(f);
+ /* f->f_version: 0 */
++ f->f_xid = vx_current_xid();
++ vx_files_inc(f);
+ return f;
+
+ over:
+@@ -257,6 +261,8 @@ static void __fput(struct file *file)
+ i_readcount_dec(inode);
+ if (file->f_mode & FMODE_WRITE)
+ drop_file_write_access(file);
++ vx_files_dec(file);
++ file->f_xid = 0;
+ file->f_path.dentry = NULL;
+ file->f_path.mnt = NULL;
+ file_free(file);
+@@ -449,6 +455,8 @@ void put_filp(struct file *file)
+ {
+ if (atomic_long_dec_and_test(&file->f_count)) {
+ security_file_free(file);
++ vx_files_dec(file);
++ file->f_xid = 0;
+ file_sb_list_del(file);
+ file_free(file);
+ }
+diff -NurpP --minimal linux-3.6.10/fs/fs_struct.c linux-3.6.10-vs2.3.4.6/fs/fs_struct.c
+--- linux-3.6.10/fs/fs_struct.c 2012-10-04 13:27:39.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/fs_struct.c 2012-10-04 17:08:56.000000000 +0000
+@@ -4,6 +4,7 @@
+ #include <linux/path.h>
+ #include <linux/slab.h>
+ #include <linux/fs_struct.h>
++#include <linux/vserver/global.h>
+ #include "internal.h"
+
+ /*
+@@ -87,6 +88,7 @@ void free_fs_struct(struct fs_struct *fs
+ {
+ path_put(&fs->root);
+ path_put(&fs->pwd);
++ atomic_dec(&vs_global_fs);
+ kmem_cache_free(fs_cachep, fs);
+ }
+
+@@ -124,6 +126,7 @@ struct fs_struct *copy_fs_struct(struct
+ fs->pwd = old->pwd;
+ path_get(&fs->pwd);
+ spin_unlock(&old->lock);
++ atomic_inc(&vs_global_fs);
+ }
+ return fs;
+ }
+diff -NurpP --minimal linux-3.6.10/fs/gfs2/file.c linux-3.6.10-vs2.3.4.6/fs/gfs2/file.c
+--- linux-3.6.10/fs/gfs2/file.c 2012-12-11 11:36:57.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/gfs2/file.c 2012-12-08 00:36:33.000000000 +0000
+@@ -143,6 +143,9 @@ static const u32 fsflags_to_gfs2[32] = {
+ [12] = GFS2_DIF_EXHASH,
+ [14] = GFS2_DIF_INHERIT_JDATA,
+ [17] = GFS2_DIF_TOPDIR,
++ [27] = GFS2_DIF_IXUNLINK,
++ [26] = GFS2_DIF_BARRIER,
++ [29] = GFS2_DIF_COW,
+ };
+
+ static const u32 gfs2_to_fsflags[32] = {
+@@ -153,6 +156,9 @@ static const u32 gfs2_to_fsflags[32] = {
+ [gfs2fl_ExHash] = FS_INDEX_FL,
+ [gfs2fl_TopLevel] = FS_TOPDIR_FL,
+ [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
++ [gfs2fl_IXUnlink] = FS_IXUNLINK_FL,
++ [gfs2fl_Barrier] = FS_BARRIER_FL,
++ [gfs2fl_Cow] = FS_COW_FL,
+ };
+
+ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
+@@ -183,12 +189,18 @@ void gfs2_set_inode_flags(struct inode *
+ {
+ struct gfs2_inode *ip = GFS2_I(inode);
+ unsigned int flags = inode->i_flags;
++ unsigned int vflags = inode->i_vflags;
++
++ flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC | S_NOSEC);
+
+- flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
+ if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
+ inode->i_flags |= S_NOSEC;
+ if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
+ flags |= S_IMMUTABLE;
++ if (ip->i_diskflags & GFS2_DIF_IXUNLINK)
++ flags |= S_IXUNLINK;
++
+ if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
+ flags |= S_APPEND;
+ if (ip->i_diskflags & GFS2_DIF_NOATIME)
+@@ -196,6 +208,43 @@ void gfs2_set_inode_flags(struct inode *
+ if (ip->i_diskflags & GFS2_DIF_SYNC)
+ flags |= S_SYNC;
+ inode->i_flags = flags;
++
++ vflags &= ~(V_BARRIER | V_COW);
++
++ if (ip->i_diskflags & GFS2_DIF_BARRIER)
++ vflags |= V_BARRIER;
++ if (ip->i_diskflags & GFS2_DIF_COW)
++ vflags |= V_COW;
++ inode->i_vflags = vflags;
++}
++
++void gfs2_get_inode_flags(struct inode *inode)
++{
++ struct gfs2_inode *ip = GFS2_I(inode);
++ unsigned int flags = inode->i_flags;
++ unsigned int vflags = inode->i_vflags;
++
++ ip->i_diskflags &= ~(GFS2_DIF_APPENDONLY |
++ GFS2_DIF_NOATIME | GFS2_DIF_SYNC |
++ GFS2_DIF_IMMUTABLE | GFS2_DIF_IXUNLINK |
++ GFS2_DIF_BARRIER | GFS2_DIF_COW);
++
++ if (flags & S_IMMUTABLE)
++ ip->i_diskflags |= GFS2_DIF_IMMUTABLE;
++ if (flags & S_IXUNLINK)
++ ip->i_diskflags |= GFS2_DIF_IXUNLINK;
++
++ if (flags & S_APPEND)
++ ip->i_diskflags |= GFS2_DIF_APPENDONLY;
++ if (flags & S_NOATIME)
++ ip->i_diskflags |= GFS2_DIF_NOATIME;
++ if (flags & S_SYNC)
++ ip->i_diskflags |= GFS2_DIF_SYNC;
++
++ if (vflags & V_BARRIER)
++ ip->i_diskflags |= GFS2_DIF_BARRIER;
++ if (vflags & V_COW)
++ ip->i_diskflags |= GFS2_DIF_COW;
+ }
+
+ /* Flags that can be set by user space */
+@@ -309,6 +358,37 @@ static int gfs2_set_flags(struct file *f
+ return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
+ }
+
++int gfs2_sync_flags(struct inode *inode, int flags, int vflags)
++{
++ struct gfs2_inode *ip = GFS2_I(inode);
++ struct gfs2_sbd *sdp = GFS2_SB(inode);
++ struct buffer_head *bh;
++ struct gfs2_holder gh;
++ int error;
++
++ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
++ if (error)
++ return error;
++ error = gfs2_trans_begin(sdp, RES_DINODE, 0);
++ if (error)
++ goto out;
++ error = gfs2_meta_inode_buffer(ip, &bh);
++ if (error)
++ goto out_trans_end;
++ gfs2_trans_add_bh(ip->i_gl, bh, 1);
++ inode->i_flags = flags;
++ inode->i_vflags = vflags;
++ gfs2_get_inode_flags(inode);
++ gfs2_dinode_out(ip, bh->b_data);
++ brelse(bh);
++ gfs2_set_aops(inode);
++out_trans_end:
++ gfs2_trans_end(sdp);
++out:
++ gfs2_glock_dq_uninit(&gh);
++ return error;
++}
++
+ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ switch(cmd) {
+diff -NurpP --minimal linux-3.6.10/fs/gfs2/inode.h linux-3.6.10-vs2.3.4.6/fs/gfs2/inode.h
+--- linux-3.6.10/fs/gfs2/inode.h 2012-07-22 21:39:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/gfs2/inode.h 2012-10-04 16:47:00.000000000 +0000
+@@ -117,6 +117,7 @@ extern const struct file_operations gfs2
+ extern const struct file_operations gfs2_dir_fops_nolock;
+
+ extern void gfs2_set_inode_flags(struct inode *inode);
++extern int gfs2_sync_flags(struct inode *inode, int flags, int vflags);
+
+ #ifdef CONFIG_GFS2_FS_LOCKING_DLM
+ extern const struct file_operations gfs2_file_fops;
+diff -NurpP --minimal linux-3.6.10/fs/inode.c linux-3.6.10-vs2.3.4.6/fs/inode.c
+--- linux-3.6.10/fs/inode.c 2012-12-11 11:36:58.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/inode.c 2012-12-08 00:36:33.000000000 +0000
+@@ -17,6 +17,7 @@
+ #include <linux/prefetch.h>
+ #include <linux/buffer_head.h> /* for inode_has_buffers */
+ #include <linux/ratelimit.h>
++#include <linux/vs_tag.h>
+ #include "internal.h"
+
+ /*
+@@ -128,6 +129,9 @@ int inode_init_always(struct super_block
+ struct address_space *const mapping = &inode->i_data;
+
+ inode->i_sb = sb;
++
++ /* essential because of inode slab reuse */
++ inode->i_tag = 0;
+ inode->i_blkbits = sb->s_blocksize_bits;
+ inode->i_flags = 0;
+ atomic_set(&inode->i_count, 1);
+@@ -149,6 +153,7 @@ int inode_init_always(struct super_block
+ inode->i_bdev = NULL;
+ inode->i_cdev = NULL;
+ inode->i_rdev = 0;
++ inode->i_mdev = 0;
+ inode->dirtied_when = 0;
+
+ if (security_inode_alloc(inode))
+@@ -483,6 +488,8 @@ void __insert_inode_hash(struct inode *i
+ }
+ EXPORT_SYMBOL(__insert_inode_hash);
+
++EXPORT_SYMBOL_GPL(__iget);
++
+ /**
+ * __remove_inode_hash - remove an inode from the hash
+ * @inode: inode to unhash
+@@ -1804,9 +1811,11 @@ void init_special_inode(struct inode *in
+ if (S_ISCHR(mode)) {
+ inode->i_fop = &def_chr_fops;
+ inode->i_rdev = rdev;
++ inode->i_mdev = rdev;
+ } else if (S_ISBLK(mode)) {
+ inode->i_fop = &def_blk_fops;
+ inode->i_rdev = rdev;
++ inode->i_mdev = rdev;
+ } else if (S_ISFIFO(mode))
+ inode->i_fop = &def_fifo_fops;
+ else if (S_ISSOCK(mode))
+@@ -1835,6 +1844,7 @@ void inode_init_owner(struct inode *inod
+ } else
+ inode->i_gid = current_fsgid();
+ inode->i_mode = mode;
++ inode->i_tag = dx_current_fstag(inode->i_sb);
+ }
+ EXPORT_SYMBOL(inode_init_owner);
+
+diff -NurpP --minimal linux-3.6.10/fs/ioctl.c linux-3.6.10-vs2.3.4.6/fs/ioctl.c
+--- linux-3.6.10/fs/ioctl.c 2012-05-21 16:07:24.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ioctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -15,6 +15,9 @@
+ #include <linux/writeback.h>
+ #include <linux/buffer_head.h>
+ #include <linux/falloc.h>
++#include <linux/proc_fs.h>
++#include <linux/vserver/inode.h>
++#include <linux/vs_tag.h>
+
+ #include <asm/ioctls.h>
+
+diff -NurpP --minimal linux-3.6.10/fs/ioprio.c linux-3.6.10-vs2.3.4.6/fs/ioprio.c
+--- linux-3.6.10/fs/ioprio.c 2012-07-22 21:39:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ioprio.c 2012-10-04 16:47:00.000000000 +0000
+@@ -28,6 +28,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/security.h>
+ #include <linux/pid_namespace.h>
++#include <linux/vs_base.h>
+
+ int set_task_ioprio(struct task_struct *task, int ioprio)
+ {
+@@ -105,6 +106,8 @@ SYSCALL_DEFINE3(ioprio_set, int, which,
+ else
+ pgrp = find_vpid(who);
+ do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
++ if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
++ continue;
+ ret = set_task_ioprio(p, ioprio);
+ if (ret)
+ break;
+@@ -198,6 +201,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which,
+ else
+ pgrp = find_vpid(who);
+ do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
++ if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
++ continue;
+ tmpio = get_task_ioprio(p);
+ if (tmpio < 0)
+ continue;
+diff -NurpP --minimal linux-3.6.10/fs/jfs/file.c linux-3.6.10-vs2.3.4.6/fs/jfs/file.c
+--- linux-3.6.10/fs/jfs/file.c 2011-10-24 16:45:27.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/jfs/file.c 2012-10-04 16:47:00.000000000 +0000
+@@ -109,7 +109,8 @@ int jfs_setattr(struct dentry *dentry, s
+ if (is_quota_modification(inode, iattr))
+ dquot_initialize(inode);
+ if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
+- (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
++ (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) ||
++ (iattr->ia_valid & ATTR_TAG && iattr->ia_tag != inode->i_tag)) {
+ rc = dquot_transfer(inode, iattr);
+ if (rc)
+ return rc;
+@@ -142,6 +143,7 @@ const struct inode_operations jfs_file_i
+ #ifdef CONFIG_JFS_POSIX_ACL
+ .get_acl = jfs_get_acl,
+ #endif
++ .sync_flags = jfs_sync_flags,
+ };
+
+ const struct file_operations jfs_file_operations = {
+diff -NurpP --minimal linux-3.6.10/fs/jfs/ioctl.c linux-3.6.10-vs2.3.4.6/fs/jfs/ioctl.c
+--- linux-3.6.10/fs/jfs/ioctl.c 2012-03-19 18:47:25.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/jfs/ioctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -11,6 +11,7 @@
+ #include <linux/mount.h>
+ #include <linux/time.h>
+ #include <linux/sched.h>
++#include <linux/mount.h>
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+
+@@ -52,6 +53,16 @@ static long jfs_map_ext2(unsigned long f
+ }
+
+
++int jfs_sync_flags(struct inode *inode, int flags, int vflags)
++{
++ inode->i_flags = flags;
++ inode->i_vflags = vflags;
++ jfs_get_inode_flags(JFS_IP(inode));
++ inode->i_ctime = CURRENT_TIME_SEC;
++ mark_inode_dirty(inode);
++ return 0;
++}
++
+ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ struct inode *inode = filp->f_dentry->d_inode;
+@@ -85,6 +96,11 @@ long jfs_ioctl(struct file *filp, unsign
+ if (!S_ISDIR(inode->i_mode))
+ flags &= ~JFS_DIRSYNC_FL;
+
++ if (IS_BARRIER(inode)) {
++ vxwprintk_task(1, "messing with the barrier.");
++ return -EACCES;
++ }
++
+ /* Is it quota file? Do not allow user to mess with it */
+ if (IS_NOQUOTA(inode)) {
+ err = -EPERM;
+@@ -102,8 +118,8 @@ long jfs_ioctl(struct file *filp, unsign
+ * the relevant capability.
+ */
+ if ((oldflags & JFS_IMMUTABLE_FL) ||
+- ((flags ^ oldflags) &
+- (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
++ ((flags ^ oldflags) & (JFS_APPEND_FL |
++ JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL))) {
+ if (!capable(CAP_LINUX_IMMUTABLE)) {
+ mutex_unlock(&inode->i_mutex);
+ err = -EPERM;
+@@ -111,7 +127,7 @@ long jfs_ioctl(struct file *filp, unsign
+ }
+ }
+
+- flags = flags & JFS_FL_USER_MODIFIABLE;
++ flags &= JFS_FL_USER_MODIFIABLE;
+ flags |= oldflags & ~JFS_FL_USER_MODIFIABLE;
+ jfs_inode->mode2 = flags;
+
+diff -NurpP --minimal linux-3.6.10/fs/jfs/jfs_dinode.h linux-3.6.10-vs2.3.4.6/fs/jfs/jfs_dinode.h
+--- linux-3.6.10/fs/jfs/jfs_dinode.h 2008-12-24 23:26:37.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/jfs/jfs_dinode.h 2012-10-04 16:47:00.000000000 +0000
+@@ -161,9 +161,13 @@ struct dinode {
+
+ #define JFS_APPEND_FL 0x01000000 /* writes to file may only append */
+ #define JFS_IMMUTABLE_FL 0x02000000 /* Immutable file */
++#define JFS_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */
+
+-#define JFS_FL_USER_VISIBLE 0x03F80000
+-#define JFS_FL_USER_MODIFIABLE 0x03F80000
++#define JFS_BARRIER_FL 0x04000000 /* Barrier for chroot() */
++#define JFS_COW_FL 0x20000000 /* Copy on Write marker */
++
++#define JFS_FL_USER_VISIBLE 0x07F80000
++#define JFS_FL_USER_MODIFIABLE 0x07F80000
+ #define JFS_FL_INHERIT 0x03C80000
+
+ /* These are identical to EXT[23]_IOC_GETFLAGS/SETFLAGS */
+diff -NurpP --minimal linux-3.6.10/fs/jfs/jfs_filsys.h linux-3.6.10-vs2.3.4.6/fs/jfs/jfs_filsys.h
+--- linux-3.6.10/fs/jfs/jfs_filsys.h 2008-12-24 23:26:37.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/jfs/jfs_filsys.h 2012-10-04 16:47:00.000000000 +0000
+@@ -263,6 +263,7 @@
+ #define JFS_NAME_MAX 255
+ #define JFS_PATH_MAX BPSIZE
+
++#define JFS_TAGGED 0x00800000 /* Context Tagging */
+
+ /*
+ * file system state (superblock state)
+diff -NurpP --minimal linux-3.6.10/fs/jfs/jfs_imap.c linux-3.6.10-vs2.3.4.6/fs/jfs/jfs_imap.c
+--- linux-3.6.10/fs/jfs/jfs_imap.c 2012-01-09 15:14:54.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/jfs/jfs_imap.c 2012-10-04 16:47:00.000000000 +0000
+@@ -46,6 +46,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/quotaops.h>
+ #include <linux/slab.h>
++#include <linux/vs_tag.h>
+
+ #include "jfs_incore.h"
+ #include "jfs_inode.h"
+@@ -3058,6 +3059,8 @@ static int copy_from_dinode(struct dinod
+ {
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+ struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
++ uid_t uid;
++ gid_t gid;
+
+ jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
+ jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
+@@ -3078,14 +3081,18 @@ static int copy_from_dinode(struct dinod
+ }
+ set_nlink(ip, le32_to_cpu(dip->di_nlink));
+
+- jfs_ip->saved_uid = le32_to_cpu(dip->di_uid);
++ uid = le32_to_cpu(dip->di_uid);
++ gid = le32_to_cpu(dip->di_gid);
++ ip->i_tag = INOTAG_TAG(DX_TAG(ip), uid, gid, 0);
++
++ jfs_ip->saved_uid = INOTAG_UID(DX_TAG(ip), uid, gid);
+ if (sbi->uid == -1)
+ ip->i_uid = jfs_ip->saved_uid;
+ else {
+ ip->i_uid = sbi->uid;
+ }
+
+- jfs_ip->saved_gid = le32_to_cpu(dip->di_gid);
++ jfs_ip->saved_gid = INOTAG_GID(DX_TAG(ip), uid, gid);
+ if (sbi->gid == -1)
+ ip->i_gid = jfs_ip->saved_gid;
+ else {
+@@ -3150,14 +3157,12 @@ static void copy_to_dinode(struct dinode
+ dip->di_size = cpu_to_le64(ip->i_size);
+ dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
+ dip->di_nlink = cpu_to_le32(ip->i_nlink);
+- if (sbi->uid == -1)
+- dip->di_uid = cpu_to_le32(ip->i_uid);
+- else
+- dip->di_uid = cpu_to_le32(jfs_ip->saved_uid);
+- if (sbi->gid == -1)
+- dip->di_gid = cpu_to_le32(ip->i_gid);
+- else
+- dip->di_gid = cpu_to_le32(jfs_ip->saved_gid);
++
++ dip->di_uid = cpu_to_le32(TAGINO_UID(DX_TAG(ip),
++ (sbi->uid == -1) ? ip->i_uid : jfs_ip->saved_uid, ip->i_tag));
++ dip->di_gid = cpu_to_le32(TAGINO_GID(DX_TAG(ip),
++ (sbi->gid == -1) ? ip->i_gid : jfs_ip->saved_gid, ip->i_tag));
++
+ jfs_get_inode_flags(jfs_ip);
+ /*
+ * mode2 is only needed for storing the higher order bits.
+diff -NurpP --minimal linux-3.6.10/fs/jfs/jfs_inode.c linux-3.6.10-vs2.3.4.6/fs/jfs/jfs_inode.c
+--- linux-3.6.10/fs/jfs/jfs_inode.c 2012-01-09 15:14:54.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/jfs/jfs_inode.c 2012-10-04 16:47:00.000000000 +0000
+@@ -18,6 +18,7 @@
+
+ #include <linux/fs.h>
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
+ #include "jfs_incore.h"
+ #include "jfs_inode.h"
+ #include "jfs_filsys.h"
+@@ -30,29 +31,46 @@ void jfs_set_inode_flags(struct inode *i
+ {
+ unsigned int flags = JFS_IP(inode)->mode2;
+
+- inode->i_flags &= ~(S_IMMUTABLE | S_APPEND |
+- S_NOATIME | S_DIRSYNC | S_SYNC);
++ inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+ if (flags & JFS_IMMUTABLE_FL)
+ inode->i_flags |= S_IMMUTABLE;
++ if (flags & JFS_IXUNLINK_FL)
++ inode->i_flags |= S_IXUNLINK;
++
++ if (flags & JFS_SYNC_FL)
++ inode->i_flags |= S_SYNC;
+ if (flags & JFS_APPEND_FL)
+ inode->i_flags |= S_APPEND;
+ if (flags & JFS_NOATIME_FL)
+ inode->i_flags |= S_NOATIME;
+ if (flags & JFS_DIRSYNC_FL)
+ inode->i_flags |= S_DIRSYNC;
+- if (flags & JFS_SYNC_FL)
+- inode->i_flags |= S_SYNC;
++
++ inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++ if (flags & JFS_BARRIER_FL)
++ inode->i_vflags |= V_BARRIER;
++ if (flags & JFS_COW_FL)
++ inode->i_vflags |= V_COW;
+ }
+
+ void jfs_get_inode_flags(struct jfs_inode_info *jfs_ip)
+ {
+ unsigned int flags = jfs_ip->vfs_inode.i_flags;
++ unsigned int vflags = jfs_ip->vfs_inode.i_vflags;
++
++ jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL |
++ JFS_APPEND_FL | JFS_NOATIME_FL |
++ JFS_DIRSYNC_FL | JFS_SYNC_FL |
++ JFS_BARRIER_FL | JFS_COW_FL);
+
+- jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_APPEND_FL | JFS_NOATIME_FL |
+- JFS_DIRSYNC_FL | JFS_SYNC_FL);
+ if (flags & S_IMMUTABLE)
+ jfs_ip->mode2 |= JFS_IMMUTABLE_FL;
++ if (flags & S_IXUNLINK)
++ jfs_ip->mode2 |= JFS_IXUNLINK_FL;
++
+ if (flags & S_APPEND)
+ jfs_ip->mode2 |= JFS_APPEND_FL;
+ if (flags & S_NOATIME)
+@@ -61,6 +79,11 @@ void jfs_get_inode_flags(struct jfs_inod
+ jfs_ip->mode2 |= JFS_DIRSYNC_FL;
+ if (flags & S_SYNC)
+ jfs_ip->mode2 |= JFS_SYNC_FL;
++
++ if (vflags & V_BARRIER)
++ jfs_ip->mode2 |= JFS_BARRIER_FL;
++ if (vflags & V_COW)
++ jfs_ip->mode2 |= JFS_COW_FL;
+ }
+
+ /*
+diff -NurpP --minimal linux-3.6.10/fs/jfs/jfs_inode.h linux-3.6.10-vs2.3.4.6/fs/jfs/jfs_inode.h
+--- linux-3.6.10/fs/jfs/jfs_inode.h 2011-10-24 16:45:27.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/jfs/jfs_inode.h 2012-10-04 16:47:00.000000000 +0000
+@@ -39,6 +39,7 @@ extern struct dentry *jfs_fh_to_dentry(s
+ extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type);
+ extern void jfs_set_inode_flags(struct inode *);
++extern int jfs_sync_flags(struct inode *, int, int);
+ extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
+ extern int jfs_setattr(struct dentry *, struct iattr *);
+
+diff -NurpP --minimal linux-3.6.10/fs/jfs/namei.c linux-3.6.10-vs2.3.4.6/fs/jfs/namei.c
+--- linux-3.6.10/fs/jfs/namei.c 2012-10-04 13:27:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/jfs/namei.c 2012-10-04 16:47:00.000000000 +0000
+@@ -22,6 +22,7 @@
+ #include <linux/ctype.h>
+ #include <linux/quotaops.h>
+ #include <linux/exportfs.h>
++#include <linux/vs_tag.h>
+ #include "jfs_incore.h"
+ #include "jfs_superblock.h"
+ #include "jfs_inode.h"
+@@ -1461,6 +1462,7 @@ static struct dentry *jfs_lookup(struct
+ jfs_err("jfs_lookup: iget failed on inum %d", (uint)inum);
+ }
+
++ dx_propagate_tag(nd, ip);
+ return d_splice_alias(ip, dentry);
+ }
+
+@@ -1525,6 +1527,7 @@ const struct inode_operations jfs_dir_in
+ #ifdef CONFIG_JFS_POSIX_ACL
+ .get_acl = jfs_get_acl,
+ #endif
++ .sync_flags = jfs_sync_flags,
+ };
+
+ const struct file_operations jfs_dir_operations = {
+diff -NurpP --minimal linux-3.6.10/fs/jfs/super.c linux-3.6.10-vs2.3.4.6/fs/jfs/super.c
+--- linux-3.6.10/fs/jfs/super.c 2012-10-04 13:27:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/jfs/super.c 2012-10-04 16:47:00.000000000 +0000
+@@ -197,7 +197,8 @@ static void jfs_put_super(struct super_b
+ enum {
+ Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
+ Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
+- Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask
++ Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
++ Opt_tag, Opt_notag, Opt_tagid
+ };
+
+ static const match_table_t tokens = {
+@@ -207,6 +208,10 @@ static const match_table_t tokens = {
+ {Opt_resize, "resize=%u"},
+ {Opt_resize_nosize, "resize"},
+ {Opt_errors, "errors=%s"},
++ {Opt_tag, "tag"},
++ {Opt_notag, "notag"},
++ {Opt_tagid, "tagid=%u"},
++ {Opt_tag, "tagxid"},
+ {Opt_ignore, "noquota"},
+ {Opt_ignore, "quota"},
+ {Opt_usrquota, "usrquota"},
+@@ -341,6 +346,20 @@ static int parse_options(char *options,
+ }
+ break;
+ }
++#ifndef CONFIG_TAGGING_NONE
++ case Opt_tag:
++ *flag |= JFS_TAGGED;
++ break;
++ case Opt_notag:
++ *flag &= JFS_TAGGED;
++ break;
++#endif
++#ifdef CONFIG_PROPAGATE
++ case Opt_tagid:
++ /* use args[0] */
++ *flag |= JFS_TAGGED;
++ break;
++#endif
+ default:
+ printk("jfs: Unrecognized mount option \"%s\" "
+ " or missing value\n", p);
+@@ -372,6 +391,12 @@ static int jfs_remount(struct super_bloc
+ return -EINVAL;
+ }
+
++ if ((flag & JFS_TAGGED) && !(sb->s_flags & MS_TAGGED)) {
++ printk(KERN_ERR "JFS: %s: tagging not permitted on remount.\n",
++ sb->s_id);
++ return -EINVAL;
++ }
++
+ if (newLVSize) {
+ if (sb->s_flags & MS_RDONLY) {
+ printk(KERN_ERR
+@@ -455,6 +480,9 @@ static int jfs_fill_super(struct super_b
+ #ifdef CONFIG_JFS_POSIX_ACL
+ sb->s_flags |= MS_POSIXACL;
+ #endif
++ /* map mount option tagxid */
++ if (sbi->flag & JFS_TAGGED)
++ sb->s_flags |= MS_TAGGED;
+
+ if (newLVSize) {
+ printk(KERN_ERR "resize option for remount only\n");
+diff -NurpP --minimal linux-3.6.10/fs/libfs.c linux-3.6.10-vs2.3.4.6/fs/libfs.c
+--- linux-3.6.10/fs/libfs.c 2012-10-04 13:27:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/libfs.c 2012-10-04 16:47:00.000000000 +0000
+@@ -135,7 +135,8 @@ static inline unsigned char dt_type(stru
+ * both impossible due to the lock on directory.
+ */
+
+-int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
++static inline int do_dcache_readdir_filter(struct file *filp,
++ void *dirent, filldir_t filldir, int (*filter)(struct dentry *dentry))
+ {
+ struct dentry *dentry = filp->f_path.dentry;
+ struct dentry *cursor = filp->private_data;
+@@ -166,6 +167,8 @@ int dcache_readdir(struct file * filp, v
+ for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
+ struct dentry *next;
+ next = list_entry(p, struct dentry, d_u.d_child);
++ if (filter && !filter(next))
++ continue;
+ spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ if (!simple_positive(next)) {
+ spin_unlock(&next->d_lock);
+@@ -192,6 +195,17 @@ int dcache_readdir(struct file * filp, v
+ return 0;
+ }
+
++int dcache_readdir(struct file *filp, void *dirent, filldir_t filldir)
++{
++ return do_dcache_readdir_filter(filp, dirent, filldir, NULL);
++}
++
++int dcache_readdir_filter(struct file *filp, void *dirent, filldir_t filldir,
++ int (*filter)(struct dentry *))
++{
++ return do_dcache_readdir_filter(filp, dirent, filldir, filter);
++}
++
+ ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
+ {
+ return -EISDIR;
+@@ -983,6 +997,7 @@ EXPORT_SYMBOL(dcache_dir_close);
+ EXPORT_SYMBOL(dcache_dir_lseek);
+ EXPORT_SYMBOL(dcache_dir_open);
+ EXPORT_SYMBOL(dcache_readdir);
++EXPORT_SYMBOL(dcache_readdir_filter);
+ EXPORT_SYMBOL(generic_read_dir);
+ EXPORT_SYMBOL(mount_pseudo);
+ EXPORT_SYMBOL(simple_write_begin);
+diff -NurpP --minimal linux-3.6.10/fs/locks.c linux-3.6.10-vs2.3.4.6/fs/locks.c
+--- linux-3.6.10/fs/locks.c 2012-10-04 13:27:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/locks.c 2012-10-04 16:47:00.000000000 +0000
+@@ -126,6 +126,8 @@
+ #include <linux/time.h>
+ #include <linux/rcupdate.h>
+ #include <linux/pid_namespace.h>
++#include <linux/vs_base.h>
++#include <linux/vs_limit.h>
+
+ #include <asm/uaccess.h>
+
+@@ -184,11 +186,17 @@ static void locks_init_lock_heads(struct
+ /* Allocate an empty lock structure. */
+ struct file_lock *locks_alloc_lock(void)
+ {
+- struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
++ struct file_lock *fl;
+
+- if (fl)
+- locks_init_lock_heads(fl);
++ if (!vx_locks_avail(1))
++ return NULL;
+
++ fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
++
++ if (fl) {
++ locks_init_lock_heads(fl);
++ fl->fl_xid = -1;
++ }
+ return fl;
+ }
+ EXPORT_SYMBOL_GPL(locks_alloc_lock);
+@@ -212,6 +220,7 @@ void locks_free_lock(struct file_lock *f
+ BUG_ON(!list_empty(&fl->fl_block));
+ BUG_ON(!list_empty(&fl->fl_link));
+
++ vx_locks_dec(fl);
+ locks_release_private(fl);
+ kmem_cache_free(filelock_cache, fl);
+ }
+@@ -221,6 +230,7 @@ void locks_init_lock(struct file_lock *f
+ {
+ memset(fl, 0, sizeof(struct file_lock));
+ locks_init_lock_heads(fl);
++ fl->fl_xid = -1;
+ }
+
+ EXPORT_SYMBOL(locks_init_lock);
+@@ -261,6 +271,7 @@ void locks_copy_lock(struct file_lock *n
+ new->fl_file = fl->fl_file;
+ new->fl_ops = fl->fl_ops;
+ new->fl_lmops = fl->fl_lmops;
++ new->fl_xid = fl->fl_xid;
+
+ locks_copy_private(new, fl);
+ }
+@@ -299,6 +310,11 @@ static int flock_make_lock(struct file *
+ fl->fl_flags = FL_FLOCK;
+ fl->fl_type = type;
+ fl->fl_end = OFFSET_MAX;
++
++ vxd_assert(filp->f_xid == vx_current_xid(),
++ "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
++ fl->fl_xid = filp->f_xid;
++ vx_locks_inc(fl);
+
+ *lock = fl;
+ return 0;
+@@ -438,6 +454,7 @@ static int lease_init(struct file *filp,
+
+ fl->fl_owner = current->files;
+ fl->fl_pid = current->tgid;
++ fl->fl_xid = vx_current_xid();
+
+ fl->fl_file = filp;
+ fl->fl_flags = FL_LEASE;
+@@ -457,6 +474,11 @@ static struct file_lock *lease_alloc(str
+ if (fl == NULL)
+ return ERR_PTR(error);
+
++ fl->fl_xid = vx_current_xid();
++ if (filp)
++ vxd_assert(filp->f_xid == fl->fl_xid,
++ "f_xid(%d) == fl_xid(%d)", filp->f_xid, fl->fl_xid);
++ vx_locks_inc(fl);
+ error = lease_init(filp, type, fl);
+ if (error) {
+ locks_free_lock(fl);
+@@ -753,6 +775,7 @@ static int flock_lock_file(struct file *
+ lock_flocks();
+ }
+
++ new_fl->fl_xid = -1;
+ find_conflict:
+ for_each_lock(inode, before) {
+ struct file_lock *fl = *before;
+@@ -773,6 +796,7 @@ find_conflict:
+ goto out;
+ locks_copy_lock(new_fl, request);
+ locks_insert_lock(before, new_fl);
++ vx_locks_inc(new_fl);
+ new_fl = NULL;
+ error = 0;
+
+@@ -783,7 +807,8 @@ out:
+ return error;
+ }
+
+-static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
++static int __posix_lock_file(struct inode *inode, struct file_lock *request,
++ struct file_lock *conflock, xid_t xid)
+ {
+ struct file_lock *fl;
+ struct file_lock *new_fl = NULL;
+@@ -793,6 +818,8 @@ static int __posix_lock_file(struct inod
+ struct file_lock **before;
+ int error, added = 0;
+
++ vxd_assert(xid == vx_current_xid(),
++ "xid(%d) == current(%d)", xid, vx_current_xid());
+ /*
+ * We may need two file_lock structures for this operation,
+ * so we get them in advance to avoid races.
+@@ -803,7 +830,11 @@ static int __posix_lock_file(struct inod
+ (request->fl_type != F_UNLCK ||
+ request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
+ new_fl = locks_alloc_lock();
++ new_fl->fl_xid = xid;
++ vx_locks_inc(new_fl);
+ new_fl2 = locks_alloc_lock();
++ new_fl2->fl_xid = xid;
++ vx_locks_inc(new_fl2);
+ }
+
+ lock_flocks();
+@@ -1002,7 +1033,8 @@ static int __posix_lock_file(struct inod
+ int posix_lock_file(struct file *filp, struct file_lock *fl,
+ struct file_lock *conflock)
+ {
+- return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
++ return __posix_lock_file(filp->f_path.dentry->d_inode,
++ fl, conflock, filp->f_xid);
+ }
+ EXPORT_SYMBOL(posix_lock_file);
+
+@@ -1092,7 +1124,7 @@ int locks_mandatory_area(int read_write,
+ fl.fl_end = offset + count - 1;
+
+ for (;;) {
+- error = __posix_lock_file(inode, &fl, NULL);
++ error = __posix_lock_file(inode, &fl, NULL, filp->f_xid);
+ if (error != FILE_LOCK_DEFERRED)
+ break;
+ error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
+@@ -1397,6 +1429,7 @@ int generic_add_lease(struct file *filp,
+ goto out;
+
+ locks_insert_lock(before, lease);
++ vx_locks_inc(lease);
+ return 0;
+
+ out:
+@@ -1838,6 +1871,11 @@ int fcntl_setlk(unsigned int fd, struct
+ if (file_lock == NULL)
+ return -ENOLCK;
+
++ vxd_assert(filp->f_xid == vx_current_xid(),
++ "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
++ file_lock->fl_xid = filp->f_xid;
++ vx_locks_inc(file_lock);
++
+ /*
+ * This might block, so we do it before checking the inode.
+ */
+@@ -1956,6 +1994,11 @@ int fcntl_setlk64(unsigned int fd, struc
+ if (file_lock == NULL)
+ return -ENOLCK;
+
++ vxd_assert(filp->f_xid == vx_current_xid(),
++ "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
++ file_lock->fl_xid = filp->f_xid;
++ vx_locks_inc(file_lock);
++
+ /*
+ * This might block, so we do it before checking the inode.
+ */
+@@ -2221,8 +2264,11 @@ static int locks_show(struct seq_file *f
+
+ lock_get_status(f, fl, *((loff_t *)f->private), "");
+
+- list_for_each_entry(bfl, &fl->fl_block, fl_block)
++ list_for_each_entry(bfl, &fl->fl_block, fl_block) {
++ if (!vx_check(fl->fl_xid, VS_WATCH_P | VS_IDENT))
++ continue;
+ lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
++ }
+
+ return 0;
+ }
+diff -NurpP --minimal linux-3.6.10/fs/mount.h linux-3.6.10-vs2.3.4.6/fs/mount.h
+--- linux-3.6.10/fs/mount.h 2012-10-04 13:27:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/mount.h 2012-10-04 16:47:00.000000000 +0000
+@@ -46,6 +46,7 @@ struct mount {
+ int mnt_expiry_mark; /* true if marked for expiry */
+ int mnt_pinned;
+ int mnt_ghosts;
++ tag_t mnt_tag; /* tagging used for vfsmount */
+ };
+
+ #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
+diff -NurpP --minimal linux-3.6.10/fs/namei.c linux-3.6.10-vs2.3.4.6/fs/namei.c
+--- linux-3.6.10/fs/namei.c 2012-12-11 11:36:58.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/namei.c 2012-12-08 00:08:53.000000000 +0000
+@@ -34,6 +34,14 @@
+ #include <linux/device_cgroup.h>
+ #include <linux/fs_struct.h>
+ #include <linux/posix_acl.h>
++#include <linux/proc_fs.h>
++#include <linux/vserver/inode.h>
++#include <linux/vs_base.h>
++#include <linux/vs_tag.h>
++#include <linux/vs_cowbl.h>
++#include <linux/vs_device.h>
++#include <linux/vs_context.h>
++#include <linux/pid_namespace.h>
+ #include <asm/uaccess.h>
+
+ #include "internal.h"
+@@ -212,6 +220,89 @@ static int check_acl(struct inode *inode
+ return -EAGAIN;
+ }
+
++static inline int dx_barrier(const struct inode *inode)
++{
++ if (IS_BARRIER(inode) && !vx_check(0, VS_ADMIN | VS_WATCH)) {
++ vxwprintk_task(1, "did hit the barrier.");
++ return 1;
++ }
++ return 0;
++}
++
++static int __dx_permission(const struct inode *inode, int mask)
++{
++ if (dx_barrier(inode))
++ return -EACCES;
++
++ if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) {
++ /* devpts is xid tagged */
++ if (S_ISDIR(inode->i_mode) ||
++ vx_check((xid_t)inode->i_tag, VS_IDENT | VS_WATCH_P))
++ return 0;
++
++ /* just pretend we didn't find anything */
++ return -ENOENT;
++ }
++ else if (inode->i_sb->s_magic == PROC_SUPER_MAGIC) {
++ struct proc_dir_entry *de = PDE(inode);
++
++ if (de && !vx_hide_check(0, de->vx_flags))
++ goto out;
++
++ if ((mask & (MAY_WRITE | MAY_APPEND))) {
++ struct pid *pid;
++ struct task_struct *tsk;
++
++ if (vx_check(0, VS_ADMIN | VS_WATCH_P) ||
++ vx_flags(VXF_STATE_SETUP, 0))
++ return 0;
++
++ pid = PROC_I(inode)->pid;
++ if (!pid)
++ goto out;
++
++ rcu_read_lock();
++ tsk = pid_task(pid, PIDTYPE_PID);
++ vxdprintk(VXD_CBIT(tag, 0), "accessing %p[#%u]",
++ tsk, (tsk ? vx_task_xid(tsk) : 0));
++ if (tsk &&
++ vx_check(vx_task_xid(tsk), VS_IDENT | VS_WATCH_P)) {
++ rcu_read_unlock();
++ return 0;
++ }
++ rcu_read_unlock();
++ }
++ else {
++ /* FIXME: Should we block some entries here? */
++ return 0;
++ }
++ }
++ else {
++ if (dx_notagcheck(inode->i_sb) ||
++ dx_check(inode->i_tag, DX_HOSTID | DX_ADMIN | DX_WATCH |
++ DX_IDENT))
++ return 0;
++ }
++
++out:
++ return -EACCES;
++}
++
++int dx_permission(const struct inode *inode, int mask)
++{
++ int ret = __dx_permission(inode, mask);
++ if (unlikely(ret)) {
++#ifndef CONFIG_VSERVER_WARN_DEVPTS
++ if (inode->i_sb->s_magic != DEVPTS_SUPER_MAGIC)
++#endif
++ vxwprintk_task(1,
++ "denied [0x%x] access to inode %s:%p[#%d,%lu]",
++ mask, inode->i_sb->s_id, inode, inode->i_tag,
++ inode->i_ino);
++ }
++ return ret;
++}
++
+ /*
+ * This does the basic permission checking
+ */
+@@ -334,10 +425,14 @@ int __inode_permission(struct inode *ino
+ /*
+ * Nobody gets write access to an immutable file.
+ */
+- if (IS_IMMUTABLE(inode))
++ if (IS_IMMUTABLE(inode) && !IS_COW(inode))
+ return -EACCES;
+ }
+
++ retval = dx_permission(inode, mask);
++ if (retval)
++ return retval;
++
+ retval = do_inode_permission(inode, mask);
+ if (retval)
+ return retval;
+@@ -1189,7 +1284,8 @@ static void follow_dotdot(struct nameida
+
+ if (nd->path.dentry == nd->root.dentry &&
+ nd->path.mnt == nd->root.mnt) {
+- break;
++ /* for sane '/' avoid follow_mount() */
++ return;
+ }
+ if (nd->path.dentry != nd->path.mnt->mnt_root) {
+ /* rare case of legitimate dget_parent()... */
+@@ -1338,6 +1434,9 @@ static int lookup_fast(struct nameidata
+ goto unlazy;
+ }
+ }
++
++ /* FIXME: check dx permission */
++
+ path->mnt = mnt;
+ path->dentry = dentry;
+ if (unlikely(!__follow_mount_rcu(nd, path, inode)))
+@@ -1373,6 +1472,8 @@ unlazy:
+ }
+ }
+
++ /* FIXME: check dx permission */
++
+ path->mnt = mnt;
+ path->dentry = dentry;
+ err = follow_managed(path, nd->flags);
+@@ -2192,7 +2293,7 @@ static int may_delete(struct inode *dir,
+ if (IS_APPEND(dir))
+ return -EPERM;
+ if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
+- IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
++ IS_IXORUNLINK(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
+ return -EPERM;
+ if (isdir) {
+ if (!S_ISDIR(victim->d_inode->i_mode))
+@@ -2271,19 +2372,25 @@ int vfs_create(struct inode *dir, struct
+ bool want_excl)
+ {
+ int error = may_create(dir, dentry);
+- if (error)
+- return error;
++ if (error) {
++ vxdprintk(VXD_CBIT(misc, 3), "may_create failed with %d", error);
++ return error;
++ }
+
+ if (!dir->i_op->create)
+ return -EACCES; /* shouldn't it be ENOSYS? */
+ mode &= S_IALLUGO;
+ mode |= S_IFREG;
+ error = security_inode_create(dir, dentry, mode);
+- if (error)
+- return error;
++ if (error) {
++ vxdprintk(VXD_CBIT(misc, 3), "security_inode_create failed with %d", error);
++ return error;
++ }
+ error = dir->i_op->create(dir, dentry, mode, want_excl);
+ if (!error)
+ fsnotify_create(dir, dentry);
++ else
++ vxdprintk(VXD_CBIT(misc, 3), "i_op->create failed with %d", error);
+ return error;
+ }
+
+@@ -2318,6 +2425,15 @@ static int may_open(struct path *path, i
+ break;
+ }
+
++#ifdef CONFIG_VSERVER_COWBL
++ if (IS_COW(inode) &&
++ ((flag & O_ACCMODE) != O_RDONLY)) {
++ if (IS_COW_LINK(inode))
++ return -EMLINK;
++ inode->i_flags &= ~(S_IXUNLINK|S_IMMUTABLE);
++ mark_inode_dirty(inode);
++ }
++#endif
+ error = inode_permission(inode, acc_mode);
+ if (error)
+ return error;
+@@ -2820,6 +2936,16 @@ finish_open:
+ }
+ finish_open_created:
+ error = may_open(&nd->path, acc_mode, open_flag);
++#ifdef CONFIG_VSERVER_COWBL
++ if (error == -EMLINK) {
++ struct dentry *dentry;
++ dentry = cow_break_link(pathname);
++ if (IS_ERR(dentry))
++ error = PTR_ERR(dentry);
++ else
++ dput(dentry);
++ }
++#endif
+ if (error)
+ goto out;
+ file->f_path.mnt = nd->path.mnt;
+@@ -2884,6 +3010,7 @@ static struct file *path_openat(int dfd,
+ int opened = 0;
+ int error;
+
++restart:
+ file = get_empty_filp();
+ if (!file)
+ return ERR_PTR(-ENFILE);
+@@ -2920,6 +3047,16 @@ static struct file *path_openat(int dfd,
+ error = do_last(nd, &path, file, op, &opened, pathname);
+ put_link(nd, &link, cookie);
+ }
++
++#ifdef CONFIG_VSERVER_COWBL
++ if (error == -EMLINK) {
++ if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
++ path_put(&nd->root);
++ if (base)
++ fput(base);
++ goto restart;
++ }
++#endif
+ out:
+ if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
+ path_put(&nd->root);
+@@ -3023,6 +3160,11 @@ struct dentry *kern_path_create(int dfd,
+ goto fail;
+ }
+ *path = nd.path;
++ vxdprintk(VXD_CBIT(misc, 3), "kern_path_create path.dentry = %p (%.*s), dentry = %p (%.*s), d_inode = %p",
++ path->dentry, path->dentry->d_name.len,
++ path->dentry->d_name.name, dentry,
++ dentry->d_name.len, dentry->d_name.name,
++ path->dentry->d_inode);
+ return dentry;
+ fail:
+ dput(dentry);
+@@ -3489,7 +3631,7 @@ int vfs_link(struct dentry *old_dentry,
+ /*
+ * A link to an append-only or immutable file cannot be created.
+ */
+- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
++ if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
+ return -EPERM;
+ if (!dir->i_op->link)
+ return -EPERM;
+@@ -3874,6 +4016,275 @@ int vfs_follow_link(struct nameidata *nd
+ return __vfs_follow_link(nd, link);
+ }
+
++
++#ifdef CONFIG_VSERVER_COWBL
++
++static inline
++long do_cow_splice(struct file *in, struct file *out, size_t len)
++{
++ loff_t ppos = 0;
++
++ return do_splice_direct(in, &ppos, out, len, 0);
++}
++
++struct dentry *cow_break_link(const char *pathname)
++{
++ int ret, mode, pathlen, redo = 0;
++ struct nameidata old_nd, dir_nd;
++ struct path dir_path, *old_path, *new_path;
++ struct dentry *dir, *old_dentry, *new_dentry = NULL;
++ struct file *old_file;
++ struct file *new_file;
++ char *to, *path, pad='\251';
++ loff_t size;
++
++ vxdprintk(VXD_CBIT(misc, 1),
++ "cow_break_link(" VS_Q("%s") ")", pathname);
++
++ path = kmalloc(PATH_MAX, GFP_KERNEL);
++ ret = -ENOMEM;
++ if (!path)
++ goto out;
++
++ /* old_nd.path will have refs to dentry and mnt */
++ ret = do_path_lookup(AT_FDCWD, pathname, LOOKUP_FOLLOW, &old_nd);
++ vxdprintk(VXD_CBIT(misc, 2),
++ "do_path_lookup(old): %d", ret);
++ if (ret < 0)
++ goto out_free_path;
++
++ /* dentry/mnt refs handed over to old_path */
++ old_path = &old_nd.path;
++ /* no explicit reference for old_dentry here */
++ old_dentry = old_path->dentry;
++
++ mode = old_dentry->d_inode->i_mode;
++ to = d_path(old_path, path, PATH_MAX-2);
++ pathlen = strlen(to);
++ vxdprintk(VXD_CBIT(misc, 2),
++ "old path " VS_Q("%s") " [%p:" VS_Q("%.*s") ":%d]", to,
++ old_dentry,
++ old_dentry->d_name.len, old_dentry->d_name.name,
++ old_dentry->d_name.len);
++
++ to[pathlen + 1] = 0;
++retry:
++ new_dentry = NULL;
++ to[pathlen] = pad--;
++ ret = -ELOOP;
++ if (pad <= '\240')
++ goto out_rel_old;
++
++ vxdprintk(VXD_CBIT(misc, 1), "temp copy " VS_Q("%s"), to);
++
++ /* dir_nd.path will have refs to dentry and mnt */
++ ret = do_path_lookup(AT_FDCWD, to,
++ LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE, &dir_nd);
++ vxdprintk(VXD_CBIT(misc, 2), "do_path_lookup(new): %d", ret);
++ if (ret < 0)
++ goto retry;
++
++ /* this puppy downs the dir inode mutex if successful.
++ dir_path will hold refs to dentry and mnt and
++ we'll have get write access to the mnt */
++ new_dentry = kern_path_create(AT_FDCWD, to, &dir_path, 0);
++ if (!new_dentry || IS_ERR(new_dentry)) {
++ path_put(&dir_nd.path);
++ vxdprintk(VXD_CBIT(misc, 2),
++ "kern_path_create(new) failed with %ld",
++ PTR_ERR(new_dentry));
++ goto retry;
++ }
++ vxdprintk(VXD_CBIT(misc, 2),
++ "kern_path_create(new): %p [" VS_Q("%.*s") ":%d]",
++ new_dentry,
++ new_dentry->d_name.len, new_dentry->d_name.name,
++ new_dentry->d_name.len);
++
++ /* take a reference on new_dentry */
++ dget(new_dentry);
++
++ /* dentry/mnt refs handed over to new_path */
++ new_path = &dir_path;
++
++ /* dentry for old/new dir */
++ dir = dir_nd.path.dentry;
++
++ /* give up reference on dir */
++ dput(new_path->dentry);
++
++ /* new_dentry already has a reference */
++ new_path->dentry = new_dentry;
++
++ ret = vfs_create(dir->d_inode, new_dentry, mode, 1);
++ vxdprintk(VXD_CBIT(misc, 2),
++ "vfs_create(new): %d", ret);
++ if (ret == -EEXIST) {
++ mutex_unlock(&dir->d_inode->i_mutex);
++ path_put(&dir_nd.path);
++ mnt_drop_write(new_path->mnt);
++ path_put(new_path);
++ new_dentry = NULL;
++ goto retry;
++ }
++ else if (ret < 0)
++ goto out_unlock_new;
++
++ /* drop out early, ret passes ENOENT */
++ ret = -ENOENT;
++ if ((redo = d_unhashed(old_dentry)))
++ goto out_unlock_new;
++
++ /* doesn't change refs for old_path */
++ old_file = dentry_open(old_path, O_RDONLY, current_cred());
++ vxdprintk(VXD_CBIT(misc, 2),
++ "dentry_open(old): %p", old_file);
++ if (IS_ERR(old_file)) {
++ ret = PTR_ERR(old_file);
++ goto out_unlock_new;
++ }
++
++ /* doesn't change refs for new_path */
++ new_file = dentry_open(new_path, O_WRONLY, current_cred());
++ vxdprintk(VXD_CBIT(misc, 2),
++ "dentry_open(new): %p", new_file);
++ if (IS_ERR(new_file)) {
++ ret = PTR_ERR(new_file);
++ goto out_fput_old;
++ }
++
++ size = i_size_read(old_file->f_dentry->d_inode);
++ ret = do_cow_splice(old_file, new_file, size);
++ vxdprintk(VXD_CBIT(misc, 2), "do_splice_direct: %d", ret);
++ if (ret < 0) {
++ goto out_fput_both;
++ } else if (ret < size) {
++ ret = -ENOSPC;
++ goto out_fput_both;
++ } else {
++ struct inode *old_inode = old_dentry->d_inode;
++ struct inode *new_inode = new_dentry->d_inode;
++ struct iattr attr = {
++ .ia_uid = old_inode->i_uid,
++ .ia_gid = old_inode->i_gid,
++ .ia_valid = ATTR_UID | ATTR_GID
++ };
++
++ setattr_copy(new_inode, &attr);
++ mark_inode_dirty(new_inode);
++ }
++
++ /* lock rename mutex */
++ mutex_lock(&old_dentry->d_inode->i_sb->s_vfs_rename_mutex);
++
++ /* drop out late */
++ ret = -ENOENT;
++ if ((redo = d_unhashed(old_dentry)))
++ goto out_unlock;
++
++ vxdprintk(VXD_CBIT(misc, 2),
++ "vfs_rename: [" VS_Q("%*s") ":%d] -> [" VS_Q("%*s") ":%d]",
++ new_dentry->d_name.len, new_dentry->d_name.name,
++ new_dentry->d_name.len,
++ old_dentry->d_name.len, old_dentry->d_name.name,
++ old_dentry->d_name.len);
++ ret = vfs_rename(dir_nd.path.dentry->d_inode, new_dentry,
++ old_dentry->d_parent->d_inode, old_dentry);
++ vxdprintk(VXD_CBIT(misc, 2), "vfs_rename: %d", ret);
++
++out_unlock:
++ mutex_unlock(&old_dentry->d_inode->i_sb->s_vfs_rename_mutex);
++
++out_fput_both:
++ vxdprintk(VXD_CBIT(misc, 3),
++ "fput(new_file=%p[#%ld])", new_file,
++ atomic_long_read(&new_file->f_count));
++ fput(new_file);
++
++out_fput_old:
++ vxdprintk(VXD_CBIT(misc, 3),
++ "fput(old_file=%p[#%ld])", old_file,
++ atomic_long_read(&old_file->f_count));
++ fput(old_file);
++
++out_unlock_new:
++ /* drop references from dir_nd.path */
++ path_put(&dir_nd.path);
++
++ /* drop write access to mnt */
++ mnt_drop_write(new_path->mnt);
++
++ /* unlock the inode mutex from kern_path_create() */
++ mutex_unlock(&dir->d_inode->i_mutex);
++ if (!ret)
++ goto out_redo;
++
++ /* error path cleanup */
++ vfs_unlink(dir->d_inode, new_dentry);
++
++out_redo:
++ if (!redo)
++ goto out_rel_both;
++
++ /* lookup dentry once again
++ old_nd.path will be freed as old_path in out_rel_old */
++ ret = do_path_lookup(AT_FDCWD, pathname, LOOKUP_FOLLOW, &old_nd);
++ if (ret)
++ goto out_rel_both;
++
++ /* drop reference on new_dentry */
++ dput(new_dentry);
++ new_dentry = old_path->dentry;
++ dget(new_dentry);
++ vxdprintk(VXD_CBIT(misc, 2),
++ "do_path_lookup(redo): %p [" VS_Q("%.*s") ":%d]",
++ new_dentry,
++ new_dentry->d_name.len, new_dentry->d_name.name,
++ new_dentry->d_name.len);
++
++out_rel_both:
++ if (new_path)
++ path_put(new_path);
++out_rel_old:
++ path_put(old_path);
++out_free_path:
++ kfree(path);
++out:
++ if (ret) {
++ dput(new_dentry);
++ new_dentry = ERR_PTR(ret);
++ }
++ vxdprintk(VXD_CBIT(misc, 3),
++ "cow_break_link returning with %p", new_dentry);
++ return new_dentry;
++}
++
++#endif
++
++int vx_info_mnt_namespace(struct mnt_namespace *ns, char *buffer)
++{
++ struct path path;
++ struct vfsmount *vmnt;
++ char *pstr, *root;
++ int length = 0;
++
++ pstr = kmalloc(PATH_MAX, GFP_KERNEL);
++ if (!pstr)
++ return 0;
++
++ vmnt = &ns->root->mnt;
++ path.mnt = vmnt;
++ path.dentry = vmnt->mnt_root;
++ root = d_path(&path, pstr, PATH_MAX - 2);
++ length = sprintf(buffer + length,
++ "Namespace:\t%p [#%u]\n"
++ "RootPath:\t%s\n",
++ ns, atomic_read(&ns->count),
++ root);
++ kfree(pstr);
++ return length;
++}
++
+ /* get the link contents into pagecache */
+ static char *page_getlink(struct dentry * dentry, struct page **ppage)
+ {
+@@ -3998,3 +4409,4 @@ EXPORT_SYMBOL(vfs_symlink);
+ EXPORT_SYMBOL(vfs_unlink);
+ EXPORT_SYMBOL(dentry_unhash);
+ EXPORT_SYMBOL(generic_readlink);
++EXPORT_SYMBOL(vx_info_mnt_namespace);
+diff -NurpP --minimal linux-3.6.10/fs/namespace.c linux-3.6.10-vs2.3.4.6/fs/namespace.c
+--- linux-3.6.10/fs/namespace.c 2012-10-04 13:27:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/namespace.c 2012-12-08 00:10:23.000000000 +0000
+@@ -20,6 +20,11 @@
+ #include <linux/fs_struct.h> /* get_fs_root et.al. */
+ #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
+ #include <linux/uaccess.h>
++#include <linux/vs_base.h>
++#include <linux/vs_context.h>
++#include <linux/vs_tag.h>
++#include <linux/vserver/space.h>
++#include <linux/vserver/global.h>
+ #include "pnode.h"
+ #include "internal.h"
+
+@@ -749,6 +754,10 @@ vfs_kern_mount(struct file_system_type *
+ if (!type)
+ return ERR_PTR(-ENODEV);
+
++ if ((type->fs_flags & FS_BINARY_MOUNTDATA) &&
++ !vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT))
++ return ERR_PTR(-EPERM);
++
+ mnt = alloc_vfsmnt(name);
+ if (!mnt)
+ return ERR_PTR(-ENOMEM);
+@@ -801,6 +810,7 @@ static struct mount *clone_mnt(struct mo
+ mnt->mnt.mnt_root = dget(root);
+ mnt->mnt_mountpoint = mnt->mnt.mnt_root;
+ mnt->mnt_parent = mnt;
++ mnt->mnt_tag = old->mnt_tag;
+ br_write_lock(&vfsmount_lock);
+ list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
+ br_write_unlock(&vfsmount_lock);
+@@ -1266,7 +1276,7 @@ SYSCALL_DEFINE2(umount, char __user *, n
+ goto dput_and_out;
+
+ retval = -EPERM;
+- if (!capable(CAP_SYS_ADMIN))
++ if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
+ goto dput_and_out;
+
+ retval = do_umount(mnt, flags);
+@@ -1292,7 +1302,7 @@ SYSCALL_DEFINE1(oldumount, char __user *
+
+ static int mount_is_safe(struct path *path)
+ {
+- if (capable(CAP_SYS_ADMIN))
++ if (vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
+ return 0;
+ return -EPERM;
+ #ifdef notyet
+@@ -1610,7 +1620,7 @@ static int do_change_type(struct path *p
+ int type;
+ int err = 0;
+
+- if (!capable(CAP_SYS_ADMIN))
++ if (!vx_capable(CAP_SYS_ADMIN, VXC_NAMESPACE))
+ return -EPERM;
+
+ if (path->dentry != path->mnt->mnt_root)
+@@ -1626,6 +1636,7 @@ static int do_change_type(struct path *p
+ if (err)
+ goto out_unlock;
+ }
++ // mnt->mnt_flags = mnt_flags;
+
+ br_write_lock(&vfsmount_lock);
+ for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
+@@ -1641,12 +1652,14 @@ static int do_change_type(struct path *p
+ * do loopback mount.
+ */
+ static int do_loopback(struct path *path, char *old_name,
+- int recurse)
++ tag_t tag, unsigned long flags, int mnt_flags)
+ {
+ LIST_HEAD(umount_list);
+ struct path old_path;
+ struct mount *mnt = NULL, *old;
+ int err = mount_is_safe(path);
++ int recurse = flags & MS_REC;
++
+ if (err)
+ return err;
+ if (!old_name || !*old_name)
+@@ -1715,13 +1728,13 @@ static int change_mount_flags(struct vfs
+ * on it - tough luck.
+ */
+ static int do_remount(struct path *path, int flags, int mnt_flags,
+- void *data)
++ void *data, xid_t xid)
+ {
+ int err;
+ struct super_block *sb = path->mnt->mnt_sb;
+ struct mount *mnt = real_mount(path->mnt);
+
+- if (!capable(CAP_SYS_ADMIN))
++ if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_REMOUNT))
+ return -EPERM;
+
+ if (!check_mnt(mnt))
+@@ -1770,7 +1783,7 @@ static int do_move_mount(struct path *pa
+ struct mount *p;
+ struct mount *old;
+ int err = 0;
+- if (!capable(CAP_SYS_ADMIN))
++ if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
+ return -EPERM;
+ if (!old_name || !*old_name)
+ return -EINVAL;
+@@ -1927,7 +1940,7 @@ static int do_new_mount(struct path *pat
+ return -EINVAL;
+
+ /* we need capabilities... */
+- if (!capable(CAP_SYS_ADMIN))
++ if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
+ return -EPERM;
+
+ mnt = do_kern_mount(type, flags, name, data);
+@@ -2197,6 +2210,7 @@ long do_mount(char *dev_name, char *dir_
+ struct path path;
+ int retval = 0;
+ int mnt_flags = 0;
++ tag_t tag = 0;
+
+ /* Discard magic */
+ if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+@@ -2224,6 +2238,12 @@ long do_mount(char *dev_name, char *dir_
+ if (!(flags & MS_NOATIME))
+ mnt_flags |= MNT_RELATIME;
+
++ if (dx_parse_tag(data_page, &tag, 1, &mnt_flags, &flags)) {
++ /* FIXME: bind and re-mounts get the tag flag? */
++ if (flags & (MS_BIND|MS_REMOUNT))
++ flags |= MS_TAGID;
++ }
++
+ /* Separate the per-mountpoint flags */
+ if (flags & MS_NOSUID)
+ mnt_flags |= MNT_NOSUID;
+@@ -2240,15 +2260,17 @@ long do_mount(char *dev_name, char *dir_
+ if (flags & MS_RDONLY)
+ mnt_flags |= MNT_READONLY;
+
++ if (!capable(CAP_SYS_ADMIN))
++ mnt_flags |= MNT_NODEV;
+ flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
+ MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+ MS_STRICTATIME);
+
+ if (flags & MS_REMOUNT)
+ retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
+- data_page);
++ data_page, tag);
+ else if (flags & MS_BIND)
+- retval = do_loopback(&path, dev_name, flags & MS_REC);
++ retval = do_loopback(&path, dev_name, tag, flags, mnt_flags);
+ else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+ retval = do_change_type(&path, flags);
+ else if (flags & MS_MOVE)
+@@ -2329,6 +2351,7 @@ static struct mnt_namespace *dup_mnt_ns(
+ q = next_mnt(q, new);
+ }
+ up_write(&namespace_sem);
++ atomic_inc(&vs_global_mnt_ns);
+
+ if (rootmnt)
+ mntput(rootmnt);
+@@ -2524,9 +2547,10 @@ SYSCALL_DEFINE2(pivot_root, const char _
+ error = -EINVAL;
+ new_mnt = real_mount(new.mnt);
+ root_mnt = real_mount(root.mnt);
+- if (IS_MNT_SHARED(real_mount(old.mnt)) ||
++ if ((IS_MNT_SHARED(real_mount(old.mnt)) ||
+ IS_MNT_SHARED(new_mnt->mnt_parent) ||
+- IS_MNT_SHARED(root_mnt->mnt_parent))
++ IS_MNT_SHARED(root_mnt->mnt_parent)) &&
++ !vx_flags(VXF_STATE_SETUP, 0))
+ goto out4;
+ if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
+ goto out4;
+@@ -2647,6 +2671,7 @@ void put_mnt_ns(struct mnt_namespace *ns
+ br_write_unlock(&vfsmount_lock);
+ up_write(&namespace_sem);
+ release_mounts(&umount_list);
++ atomic_dec(&vs_global_mnt_ns);
+ kfree(ns);
+ }
+
+diff -NurpP --minimal linux-3.6.10/fs/nfs/client.c linux-3.6.10-vs2.3.4.6/fs/nfs/client.c
+--- linux-3.6.10/fs/nfs/client.c 2012-12-11 11:36:58.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/nfs/client.c 2012-11-06 17:43:41.000000000 +0000
+@@ -692,6 +692,9 @@ int nfs_init_server_rpcclient(struct nfs
+ if (server->flags & NFS_MOUNT_SOFT)
+ server->client->cl_softrtry = 1;
+
++ server->client->cl_tag = 0;
++ if (server->flags & NFS_MOUNT_TAGGED)
++ server->client->cl_tag = 1;
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(nfs_init_server_rpcclient);
+@@ -869,6 +872,10 @@ static void nfs_server_set_fsinfo(struct
+ server->acdirmin = server->acdirmax = 0;
+ }
+
++ /* FIXME: needs fsinfo
++ if (server->flags & NFS_MOUNT_TAGGED)
++ sb->s_flags |= MS_TAGGED; */
++
+ server->maxfilesize = fsinfo->maxfilesize;
+
+ server->time_delta = fsinfo->time_delta;
+diff -NurpP --minimal linux-3.6.10/fs/nfs/dir.c linux-3.6.10-vs2.3.4.6/fs/nfs/dir.c
+--- linux-3.6.10/fs/nfs/dir.c 2012-10-04 13:27:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/nfs/dir.c 2012-10-04 16:47:00.000000000 +0000
+@@ -36,6 +36,7 @@
+ #include <linux/sched.h>
+ #include <linux/kmemleak.h>
+ #include <linux/xattr.h>
++#include <linux/vs_tag.h>
+
+ #include "delegation.h"
+ #include "iostat.h"
+@@ -1251,6 +1252,7 @@ struct dentry *nfs_lookup(struct inode *
+ /* Success: notify readdir to use READDIRPLUS */
+ nfs_advise_use_readdirplus(dir);
+
++ dx_propagate_tag(nd, inode);
+ no_entry:
+ res = d_materialise_unique(dentry, inode);
+ if (res != NULL) {
+diff -NurpP --minimal linux-3.6.10/fs/nfs/inode.c linux-3.6.10-vs2.3.4.6/fs/nfs/inode.c
+--- linux-3.6.10/fs/nfs/inode.c 2012-10-04 13:27:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/nfs/inode.c 2012-10-04 16:47:00.000000000 +0000
+@@ -39,6 +39,7 @@
+ #include <linux/compat.h>
+ #include <linux/freezer.h>
+ #include <linux/crc32.h>
++#include <linux/vs_tag.h>
+
+ #include <asm/uaccess.h>
+
+@@ -279,6 +280,8 @@ nfs_fhget(struct super_block *sb, struct
+ if (inode->i_state & I_NEW) {
+ struct nfs_inode *nfsi = NFS_I(inode);
+ unsigned long now = jiffies;
++ uid_t uid;
++ gid_t gid;
+
+ /* We set i_ino for the few things that still rely on it,
+ * such as stat(2) */
+@@ -323,8 +326,8 @@ nfs_fhget(struct super_block *sb, struct
+ inode->i_version = 0;
+ inode->i_size = 0;
+ clear_nlink(inode);
+- inode->i_uid = -2;
+- inode->i_gid = -2;
++ uid = -2;
++ gid = -2;
+ inode->i_blocks = 0;
+ memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
+ nfsi->write_io = 0;
+@@ -358,11 +361,11 @@ nfs_fhget(struct super_block *sb, struct
+ else if (nfs_server_capable(inode, NFS_CAP_NLINK))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+ if (fattr->valid & NFS_ATTR_FATTR_OWNER)
+- inode->i_uid = fattr->uid;
++ uid = fattr->uid;
+ else if (nfs_server_capable(inode, NFS_CAP_OWNER))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+ if (fattr->valid & NFS_ATTR_FATTR_GROUP)
+- inode->i_gid = fattr->gid;
++ gid = fattr->gid;
+ else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+ if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
+@@ -373,6 +376,11 @@ nfs_fhget(struct super_block *sb, struct
+ */
+ inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
+ }
++ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, 0);
++ /* maybe fattr->xid someday */
++
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ nfsi->attrtimeo_timestamp = now;
+ nfsi->access_cache = RB_ROOT;
+@@ -494,6 +502,8 @@ void nfs_setattr_update_inode(struct ino
+ inode->i_uid = attr->ia_uid;
+ if ((attr->ia_valid & ATTR_GID) != 0)
+ inode->i_gid = attr->ia_gid;
++ if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
++ inode->i_tag = attr->ia_tag;
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ spin_unlock(&inode->i_lock);
+ }
+@@ -965,6 +975,9 @@ static int nfs_check_inode_attributes(st
+ struct nfs_inode *nfsi = NFS_I(inode);
+ loff_t cur_size, new_isize;
+ unsigned long invalid = 0;
++ uid_t uid;
++ gid_t gid;
++ tag_t tag;
+
+
+ if (nfs_have_delegated_attributes(inode))
+@@ -990,13 +1003,18 @@ static int nfs_check_inode_attributes(st
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ }
+
++ uid = INOTAG_UID(DX_TAG(inode), fattr->uid, fattr->gid);
++ gid = INOTAG_GID(DX_TAG(inode), fattr->uid, fattr->gid);
++ tag = INOTAG_TAG(DX_TAG(inode), fattr->uid, fattr->gid, 0);
++
+ /* Have any file permissions changed? */
+ if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
+ invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+- if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && inode->i_uid != fattr->uid)
++ if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && uid != fattr->uid)
+ invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+- if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && inode->i_gid != fattr->gid)
++ if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && gid != fattr->gid)
+ invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
++ /* maybe check for tag too? */
+
+ /* Has the link count changed? */
+ if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
+@@ -1300,6 +1318,9 @@ static int nfs_update_inode(struct inode
+ unsigned long invalid = 0;
+ unsigned long now = jiffies;
+ unsigned long save_cache_validity;
++ uid_t uid;
++ gid_t gid;
++ tag_t tag;
+
+ dfprintk(VFS, "NFS: %s(%s/%ld fh_crc=0x%08x ct=%d info=0x%x)\n",
+ __func__, inode->i_sb->s_id, inode->i_ino,
+@@ -1401,6 +1422,9 @@ static int nfs_update_inode(struct inode
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_REVAL_FORCED);
+
++ uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
++ gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
++ tag = inode->i_tag;
+
+ if (fattr->valid & NFS_ATTR_FATTR_ATIME)
+ memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
+@@ -1422,9 +1446,9 @@ static int nfs_update_inode(struct inode
+ | NFS_INO_REVAL_FORCED);
+
+ if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
+- if (inode->i_uid != fattr->uid) {
++ if (uid != fattr->uid) {
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+- inode->i_uid = fattr->uid;
++ uid = fattr->uid;
+ }
+ } else if (server->caps & NFS_CAP_OWNER)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+@@ -1433,9 +1457,9 @@ static int nfs_update_inode(struct inode
+ | NFS_INO_REVAL_FORCED);
+
+ if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
+- if (inode->i_gid != fattr->gid) {
++ if (gid != fattr->gid) {
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+- inode->i_gid = fattr->gid;
++ gid = fattr->gid;
+ }
+ } else if (server->caps & NFS_CAP_OWNER_GROUP)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+@@ -1443,6 +1467,10 @@ static int nfs_update_inode(struct inode
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
+
++ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, tag);
++
+ if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
+ if (inode->i_nlink != fattr->nlink) {
+ invalid |= NFS_INO_INVALID_ATTR;
+diff -NurpP --minimal linux-3.6.10/fs/nfs/nfs3xdr.c linux-3.6.10-vs2.3.4.6/fs/nfs/nfs3xdr.c
+--- linux-3.6.10/fs/nfs/nfs3xdr.c 2012-10-04 13:27:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/nfs/nfs3xdr.c 2012-10-04 16:47:00.000000000 +0000
+@@ -20,6 +20,7 @@
+ #include <linux/nfs3.h>
+ #include <linux/nfs_fs.h>
+ #include <linux/nfsacl.h>
++#include <linux/vs_tag.h>
+ #include "internal.h"
+
+ #define NFSDBG_FACILITY NFSDBG_XDR
+@@ -560,7 +561,8 @@ static __be32 *xdr_decode_nfstime3(__be3
+ * set_mtime mtime;
+ * };
+ */
+-static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
++static void encode_sattr3(struct xdr_stream *xdr,
++ const struct iattr *attr, int tag)
+ {
+ u32 nbytes;
+ __be32 *p;
+@@ -592,15 +594,19 @@ static void encode_sattr3(struct xdr_str
+ } else
+ *p++ = xdr_zero;
+
+- if (attr->ia_valid & ATTR_UID) {
++ if (attr->ia_valid & ATTR_UID ||
++ (tag && (attr->ia_valid & ATTR_TAG))) {
+ *p++ = xdr_one;
+- *p++ = cpu_to_be32(attr->ia_uid);
++ *p++ = cpu_to_be32(TAGINO_UID(tag,
++ attr->ia_uid, attr->ia_tag));
+ } else
+ *p++ = xdr_zero;
+
+- if (attr->ia_valid & ATTR_GID) {
++ if (attr->ia_valid & ATTR_GID ||
++ (tag && (attr->ia_valid & ATTR_TAG))) {
+ *p++ = xdr_one;
+- *p++ = cpu_to_be32(attr->ia_gid);
++ *p++ = cpu_to_be32(TAGINO_GID(tag,
++ attr->ia_gid, attr->ia_tag));
+ } else
+ *p++ = xdr_zero;
+
+@@ -879,7 +885,7 @@ static void nfs3_xdr_enc_setattr3args(st
+ const struct nfs3_sattrargs *args)
+ {
+ encode_nfs_fh3(xdr, args->fh);
+- encode_sattr3(xdr, args->sattr);
++ encode_sattr3(xdr, args->sattr, req->rq_task->tk_client->cl_tag);
+ encode_sattrguard3(xdr, args);
+ }
+
+@@ -1029,13 +1035,13 @@ static void nfs3_xdr_enc_write3args(stru
+ * };
+ */
+ static void encode_createhow3(struct xdr_stream *xdr,
+- const struct nfs3_createargs *args)
++ const struct nfs3_createargs *args, int tag)
+ {
+ encode_uint32(xdr, args->createmode);
+ switch (args->createmode) {
+ case NFS3_CREATE_UNCHECKED:
+ case NFS3_CREATE_GUARDED:
+- encode_sattr3(xdr, args->sattr);
++ encode_sattr3(xdr, args->sattr, tag);
+ break;
+ case NFS3_CREATE_EXCLUSIVE:
+ encode_createverf3(xdr, args->verifier);
+@@ -1050,7 +1056,7 @@ static void nfs3_xdr_enc_create3args(str
+ const struct nfs3_createargs *args)
+ {
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
+- encode_createhow3(xdr, args);
++ encode_createhow3(xdr, args, req->rq_task->tk_client->cl_tag);
+ }
+
+ /*
+@@ -1066,7 +1072,7 @@ static void nfs3_xdr_enc_mkdir3args(stru
+ const struct nfs3_mkdirargs *args)
+ {
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
+- encode_sattr3(xdr, args->sattr);
++ encode_sattr3(xdr, args->sattr, req->rq_task->tk_client->cl_tag);
+ }
+
+ /*
+@@ -1083,9 +1089,9 @@ static void nfs3_xdr_enc_mkdir3args(stru
+ * };
+ */
+ static void encode_symlinkdata3(struct xdr_stream *xdr,
+- const struct nfs3_symlinkargs *args)
++ const struct nfs3_symlinkargs *args, int tag)
+ {
+- encode_sattr3(xdr, args->sattr);
++ encode_sattr3(xdr, args->sattr, tag);
+ encode_nfspath3(xdr, args->pages, args->pathlen);
+ }
+
+@@ -1094,7 +1100,7 @@ static void nfs3_xdr_enc_symlink3args(st
+ const struct nfs3_symlinkargs *args)
+ {
+ encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
+- encode_symlinkdata3(xdr, args);
++ encode_symlinkdata3(xdr, args, req->rq_task->tk_client->cl_tag);
+ }
+
+ /*
+@@ -1122,24 +1128,24 @@ static void nfs3_xdr_enc_symlink3args(st
+ * };
+ */
+ static void encode_devicedata3(struct xdr_stream *xdr,
+- const struct nfs3_mknodargs *args)
++ const struct nfs3_mknodargs *args, int tag)
+ {
+- encode_sattr3(xdr, args->sattr);
++ encode_sattr3(xdr, args->sattr, tag);
+ encode_specdata3(xdr, args->rdev);
+ }
+
+ static void encode_mknoddata3(struct xdr_stream *xdr,
+- const struct nfs3_mknodargs *args)
++ const struct nfs3_mknodargs *args, int tag)
+ {
+ encode_ftype3(xdr, args->type);
+ switch (args->type) {
+ case NF3CHR:
+ case NF3BLK:
+- encode_devicedata3(xdr, args);
++ encode_devicedata3(xdr, args, tag);
+ break;
+ case NF3SOCK:
+ case NF3FIFO:
+- encode_sattr3(xdr, args->sattr);
++ encode_sattr3(xdr, args->sattr, tag);
+ break;
+ case NF3REG:
+ case NF3DIR:
+@@ -1154,7 +1160,7 @@ static void nfs3_xdr_enc_mknod3args(stru
+ const struct nfs3_mknodargs *args)
+ {
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
+- encode_mknoddata3(xdr, args);
++ encode_mknoddata3(xdr, args, req->rq_task->tk_client->cl_tag);
+ }
+
+ /*
+diff -NurpP --minimal linux-3.6.10/fs/nfs/super.c linux-3.6.10-vs2.3.4.6/fs/nfs/super.c
+--- linux-3.6.10/fs/nfs/super.c 2012-12-11 11:36:58.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/nfs/super.c 2012-12-08 00:36:33.000000000 +0000
+@@ -54,6 +54,7 @@
+ #include <linux/parser.h>
+ #include <linux/nsproxy.h>
+ #include <linux/rcupdate.h>
++#include <linux/vs_tag.h>
+
+ #include <asm/uaccess.h>
+
+@@ -88,6 +89,7 @@ enum {
+ Opt_sharecache, Opt_nosharecache,
+ Opt_resvport, Opt_noresvport,
+ Opt_fscache, Opt_nofscache,
++ Opt_tag, Opt_notag,
+
+ /* Mount options that take integer arguments */
+ Opt_port,
+@@ -100,6 +102,7 @@ enum {
+ Opt_mountport,
+ Opt_mountvers,
+ Opt_minorversion,
++ Opt_tagid,
+
+ /* Mount options that take string arguments */
+ Opt_nfsvers,
+@@ -182,6 +185,10 @@ static const match_table_t nfs_mount_opt
+ /* The following needs to be listed after all other options */
+ { Opt_nfsvers, "v%s" },
+
++ { Opt_tag, "tag" },
++ { Opt_notag, "notag" },
++ { Opt_tagid, "tagid=%u" },
++
+ { Opt_err, NULL }
+ };
+
+@@ -626,6 +633,7 @@ static void nfs_show_mount_options(struc
+ { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
+ { NFS_MOUNT_UNSHARED, ",nosharecache", "" },
+ { NFS_MOUNT_NORESVPORT, ",noresvport", "" },
++ { NFS_MOUNT_TAGGED, ",tag", "" },
+ { 0, NULL, NULL }
+ };
+ const struct proc_nfs_info *nfs_infop;
+@@ -1243,6 +1251,14 @@ static int nfs_parse_mount_options(char
+ kfree(mnt->fscache_uniq);
+ mnt->fscache_uniq = NULL;
+ break;
++#ifndef CONFIG_TAGGING_NONE
++ case Opt_tag:
++ mnt->flags |= NFS_MOUNT_TAGGED;
++ break;
++ case Opt_notag:
++ mnt->flags &= ~NFS_MOUNT_TAGGED;
++ break;
++#endif
+
+ /*
+ * options that take numeric values
+@@ -1329,6 +1345,12 @@ static int nfs_parse_mount_options(char
+ goto out_invalid_value;
+ mnt->minorversion = option;
+ break;
++#ifdef CONFIG_PROPAGATE
++ case Opt_tagid:
++ /* use args[0] */
++ nfs_data.flags |= NFS_MOUNT_TAGGED;
++ break;
++#endif
+
+ /*
+ * options that take text values
+diff -NurpP --minimal linux-3.6.10/fs/nfsd/auth.c linux-3.6.10-vs2.3.4.6/fs/nfsd/auth.c
+--- linux-3.6.10/fs/nfsd/auth.c 2012-07-22 21:39:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/nfsd/auth.c 2012-10-04 16:47:00.000000000 +0000
+@@ -2,6 +2,7 @@
+
+ #include <linux/sched.h>
+ #include <linux/user_namespace.h>
++#include <linux/vs_tag.h>
+ #include "nfsd.h"
+ #include "auth.h"
+
+@@ -37,6 +38,9 @@ int nfsd_setuser(struct svc_rqst *rqstp,
+
+ new->fsuid = rqstp->rq_cred.cr_uid;
+ new->fsgid = rqstp->rq_cred.cr_gid;
++ /* FIXME: this desperately needs a tag :)
++ new->xid = (xid_t)INOTAG_TAG(DX_TAG_NFSD, cred.cr_uid, cred.cr_gid, 0);
++ */
+
+ rqgi = rqstp->rq_cred.cr_group_info;
+
+diff -NurpP --minimal linux-3.6.10/fs/nfsd/nfs3xdr.c linux-3.6.10-vs2.3.4.6/fs/nfsd/nfs3xdr.c
+--- linux-3.6.10/fs/nfsd/nfs3xdr.c 2012-05-21 16:07:26.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/nfsd/nfs3xdr.c 2012-10-04 16:47:00.000000000 +0000
+@@ -7,6 +7,7 @@
+ */
+
+ #include <linux/namei.h>
++#include <linux/vs_tag.h>
+ #include "xdr3.h"
+ #include "auth.h"
+
+@@ -95,6 +96,8 @@ static __be32 *
+ decode_sattr3(__be32 *p, struct iattr *iap)
+ {
+ u32 tmp;
++ uid_t uid = 0;
++ gid_t gid = 0;
+
+ iap->ia_valid = 0;
+
+@@ -104,12 +107,15 @@ decode_sattr3(__be32 *p, struct iattr *i
+ }
+ if (*p++) {
+ iap->ia_valid |= ATTR_UID;
+- iap->ia_uid = ntohl(*p++);
++ uid = ntohl(*p++);
+ }
+ if (*p++) {
+ iap->ia_valid |= ATTR_GID;
+- iap->ia_gid = ntohl(*p++);
++ gid = ntohl(*p++);
+ }
++ iap->ia_uid = INOTAG_UID(DX_TAG_NFSD, uid, gid);
++ iap->ia_gid = INOTAG_GID(DX_TAG_NFSD, uid, gid);
++ iap->ia_tag = INOTAG_TAG(DX_TAG_NFSD, uid, gid, 0);
+ if (*p++) {
+ u64 newsize;
+
+@@ -165,8 +171,12 @@ encode_fattr3(struct svc_rqst *rqstp, __
+ *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
+ *p++ = htonl((u32) stat->mode);
+ *p++ = htonl((u32) stat->nlink);
+- *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
+- *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
++ *p++ = htonl((u32) nfsd_ruid(rqstp,
++ TAGINO_UID(0 /* FIXME: DX_TAG(dentry->d_inode) */,
++ stat->uid, stat->tag)));
++ *p++ = htonl((u32) nfsd_rgid(rqstp,
++ TAGINO_GID(0 /* FIXME: DX_TAG(dentry->d_inode) */,
++ stat->gid, stat->tag)));
+ if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) {
+ p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
+ } else {
+diff -NurpP --minimal linux-3.6.10/fs/nfsd/nfs4xdr.c linux-3.6.10-vs2.3.4.6/fs/nfsd/nfs4xdr.c
+--- linux-3.6.10/fs/nfsd/nfs4xdr.c 2012-10-04 13:27:40.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/nfsd/nfs4xdr.c 2012-10-04 16:47:00.000000000 +0000
+@@ -46,6 +46,7 @@
+ #include <linux/utsname.h>
+ #include <linux/pagemap.h>
+ #include <linux/sunrpc/svcauth_gss.h>
++#include <linux/vs_tag.h>
+
+ #include "idmap.h"
+ #include "acl.h"
+@@ -2351,14 +2352,18 @@ out_acl:
+ WRITE32(stat.nlink);
+ }
+ if (bmval1 & FATTR4_WORD1_OWNER) {
+- status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
++ status = nfsd4_encode_user(rqstp,
++ TAGINO_UID(DX_TAG(dentry->d_inode),
++ stat.uid, stat.tag), &p, &buflen);
+ if (status == nfserr_resource)
+ goto out_resource;
+ if (status)
+ goto out;
+ }
+ if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
+- status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
++ status = nfsd4_encode_group(rqstp,
++ TAGINO_GID(DX_TAG(dentry->d_inode),
++ stat.gid, stat.tag), &p, &buflen);
+ if (status == nfserr_resource)
+ goto out_resource;
+ if (status)
+diff -NurpP --minimal linux-3.6.10/fs/nfsd/nfsxdr.c linux-3.6.10-vs2.3.4.6/fs/nfsd/nfsxdr.c
+--- linux-3.6.10/fs/nfsd/nfsxdr.c 2011-05-22 14:17:53.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/nfsd/nfsxdr.c 2012-10-04 16:47:00.000000000 +0000
+@@ -6,6 +6,7 @@
+
+ #include "xdr.h"
+ #include "auth.h"
++#include <linux/vs_tag.h>
+
+ #define NFSDDBG_FACILITY NFSDDBG_XDR
+
+@@ -88,6 +89,8 @@ static __be32 *
+ decode_sattr(__be32 *p, struct iattr *iap)
+ {
+ u32 tmp, tmp1;
++ uid_t uid = 0;
++ gid_t gid = 0;
+
+ iap->ia_valid = 0;
+
+@@ -101,12 +104,15 @@ decode_sattr(__be32 *p, struct iattr *ia
+ }
+ if ((tmp = ntohl(*p++)) != (u32)-1) {
+ iap->ia_valid |= ATTR_UID;
+- iap->ia_uid = tmp;
++ uid = tmp;
+ }
+ if ((tmp = ntohl(*p++)) != (u32)-1) {
+ iap->ia_valid |= ATTR_GID;
+- iap->ia_gid = tmp;
++ gid = tmp;
+ }
++ iap->ia_uid = INOTAG_UID(DX_TAG_NFSD, uid, gid);
++ iap->ia_gid = INOTAG_GID(DX_TAG_NFSD, uid, gid);
++ iap->ia_tag = INOTAG_TAG(DX_TAG_NFSD, uid, gid, 0);
+ if ((tmp = ntohl(*p++)) != (u32)-1) {
+ iap->ia_valid |= ATTR_SIZE;
+ iap->ia_size = tmp;
+@@ -151,8 +157,10 @@ encode_fattr(struct svc_rqst *rqstp, __b
+ *p++ = htonl(nfs_ftypes[type >> 12]);
+ *p++ = htonl((u32) stat->mode);
+ *p++ = htonl((u32) stat->nlink);
+- *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
+- *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
++ *p++ = htonl((u32) nfsd_ruid(rqstp,
++ TAGINO_UID(DX_TAG(dentry->d_inode), stat->uid, stat->tag)));
++ *p++ = htonl((u32) nfsd_rgid(rqstp,
++ TAGINO_GID(DX_TAG(dentry->d_inode), stat->gid, stat->tag)));
+
+ if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) {
+ *p++ = htonl(NFS_MAXPATHLEN);
+diff -NurpP --minimal linux-3.6.10/fs/ocfs2/dlmglue.c linux-3.6.10-vs2.3.4.6/fs/ocfs2/dlmglue.c
+--- linux-3.6.10/fs/ocfs2/dlmglue.c 2012-07-22 21:39:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ocfs2/dlmglue.c 2012-10-04 16:47:00.000000000 +0000
+@@ -2047,6 +2047,7 @@ static void __ocfs2_stuff_meta_lvb(struc
+ lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
+ lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
+ lvb->lvb_igid = cpu_to_be32(inode->i_gid);
++ lvb->lvb_itag = cpu_to_be16(inode->i_tag);
+ lvb->lvb_imode = cpu_to_be16(inode->i_mode);
+ lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
+ lvb->lvb_iatime_packed =
+@@ -2097,6 +2098,7 @@ static void ocfs2_refresh_inode_from_lvb
+
+ inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
+ inode->i_gid = be32_to_cpu(lvb->lvb_igid);
++ inode->i_tag = be16_to_cpu(lvb->lvb_itag);
+ inode->i_mode = be16_to_cpu(lvb->lvb_imode);
+ set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
+ ocfs2_unpack_timespec(&inode->i_atime,
+diff -NurpP --minimal linux-3.6.10/fs/ocfs2/dlmglue.h linux-3.6.10-vs2.3.4.6/fs/ocfs2/dlmglue.h
+--- linux-3.6.10/fs/ocfs2/dlmglue.h 2010-10-21 11:07:50.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ocfs2/dlmglue.h 2012-10-04 16:47:00.000000000 +0000
+@@ -46,7 +46,8 @@ struct ocfs2_meta_lvb {
+ __be16 lvb_inlink;
+ __be32 lvb_iattr;
+ __be32 lvb_igeneration;
+- __be32 lvb_reserved2;
++ __be16 lvb_itag;
++ __be16 lvb_reserved2;
+ };
+
+ #define OCFS2_QINFO_LVB_VERSION 1
+diff -NurpP --minimal linux-3.6.10/fs/ocfs2/file.c linux-3.6.10-vs2.3.4.6/fs/ocfs2/file.c
+--- linux-3.6.10/fs/ocfs2/file.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ocfs2/file.c 2012-10-04 16:47:00.000000000 +0000
+@@ -1123,7 +1123,7 @@ int ocfs2_setattr(struct dentry *dentry,
+ attr->ia_valid &= ~ATTR_SIZE;
+
+ #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
+- | ATTR_GID | ATTR_UID | ATTR_MODE)
++ | ATTR_GID | ATTR_UID | ATTR_TAG | ATTR_MODE)
+ if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
+ return 0;
+
+diff -NurpP --minimal linux-3.6.10/fs/ocfs2/inode.c linux-3.6.10-vs2.3.4.6/fs/ocfs2/inode.c
+--- linux-3.6.10/fs/ocfs2/inode.c 2012-07-22 21:39:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ocfs2/inode.c 2012-10-04 16:47:00.000000000 +0000
+@@ -28,6 +28,7 @@
+ #include <linux/highmem.h>
+ #include <linux/pagemap.h>
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
+
+ #include <asm/byteorder.h>
+
+@@ -78,11 +79,13 @@ void ocfs2_set_inode_flags(struct inode
+ {
+ unsigned int flags = OCFS2_I(inode)->ip_attr;
+
+- inode->i_flags &= ~(S_IMMUTABLE |
++ inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+ if (flags & OCFS2_IMMUTABLE_FL)
+ inode->i_flags |= S_IMMUTABLE;
++ if (flags & OCFS2_IXUNLINK_FL)
++ inode->i_flags |= S_IXUNLINK;
+
+ if (flags & OCFS2_SYNC_FL)
+ inode->i_flags |= S_SYNC;
+@@ -92,25 +95,44 @@ void ocfs2_set_inode_flags(struct inode
+ inode->i_flags |= S_NOATIME;
+ if (flags & OCFS2_DIRSYNC_FL)
+ inode->i_flags |= S_DIRSYNC;
++
++ inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++ if (flags & OCFS2_BARRIER_FL)
++ inode->i_vflags |= V_BARRIER;
++ if (flags & OCFS2_COW_FL)
++ inode->i_vflags |= V_COW;
+ }
+
+ /* Propagate flags from i_flags to OCFS2_I(inode)->ip_attr */
+ void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi)
+ {
+ unsigned int flags = oi->vfs_inode.i_flags;
++ unsigned int vflags = oi->vfs_inode.i_vflags;
++
++ oi->ip_attr &= ~(OCFS2_SYNC_FL | OCFS2_APPEND_FL |
++ OCFS2_IMMUTABLE_FL | OCFS2_IXUNLINK_FL |
++ OCFS2_NOATIME_FL | OCFS2_DIRSYNC_FL |
++ OCFS2_BARRIER_FL | OCFS2_COW_FL);
++
++ if (flags & S_IMMUTABLE)
++ oi->ip_attr |= OCFS2_IMMUTABLE_FL;
++ if (flags & S_IXUNLINK)
++ oi->ip_attr |= OCFS2_IXUNLINK_FL;
+
+- oi->ip_attr &= ~(OCFS2_SYNC_FL|OCFS2_APPEND_FL|
+- OCFS2_IMMUTABLE_FL|OCFS2_NOATIME_FL|OCFS2_DIRSYNC_FL);
+ if (flags & S_SYNC)
+ oi->ip_attr |= OCFS2_SYNC_FL;
+ if (flags & S_APPEND)
+ oi->ip_attr |= OCFS2_APPEND_FL;
+- if (flags & S_IMMUTABLE)
+- oi->ip_attr |= OCFS2_IMMUTABLE_FL;
+ if (flags & S_NOATIME)
+ oi->ip_attr |= OCFS2_NOATIME_FL;
+ if (flags & S_DIRSYNC)
+ oi->ip_attr |= OCFS2_DIRSYNC_FL;
++
++ if (vflags & V_BARRIER)
++ oi->ip_attr |= OCFS2_BARRIER_FL;
++ if (vflags & V_COW)
++ oi->ip_attr |= OCFS2_COW_FL;
+ }
+
+ struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno)
+@@ -241,6 +263,8 @@ void ocfs2_populate_inode(struct inode *
+ struct super_block *sb;
+ struct ocfs2_super *osb;
+ int use_plocks = 1;
++ uid_t uid;
++ gid_t gid;
+
+ sb = inode->i_sb;
+ osb = OCFS2_SB(sb);
+@@ -269,8 +293,12 @@ void ocfs2_populate_inode(struct inode *
+ inode->i_generation = le32_to_cpu(fe->i_generation);
+ inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
+ inode->i_mode = le16_to_cpu(fe->i_mode);
+- inode->i_uid = le32_to_cpu(fe->i_uid);
+- inode->i_gid = le32_to_cpu(fe->i_gid);
++ uid = le32_to_cpu(fe->i_uid);
++ gid = le32_to_cpu(fe->i_gid);
++ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
++ /* le16_to_cpu(raw_inode->i_raw_tag)i */ 0);
+
+ /* Fast symlinks will have i_size but no allocated clusters. */
+ if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
+diff -NurpP --minimal linux-3.6.10/fs/ocfs2/inode.h linux-3.6.10-vs2.3.4.6/fs/ocfs2/inode.h
+--- linux-3.6.10/fs/ocfs2/inode.h 2012-01-09 15:14:55.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ocfs2/inode.h 2012-10-04 16:47:00.000000000 +0000
+@@ -154,6 +154,7 @@ struct buffer_head *ocfs2_bread(struct i
+
+ void ocfs2_set_inode_flags(struct inode *inode);
+ void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi);
++int ocfs2_sync_flags(struct inode *inode, int, int);
+
+ static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode)
+ {
+diff -NurpP --minimal linux-3.6.10/fs/ocfs2/ioctl.c linux-3.6.10-vs2.3.4.6/fs/ocfs2/ioctl.c
+--- linux-3.6.10/fs/ocfs2/ioctl.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ocfs2/ioctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -76,7 +76,41 @@ static int ocfs2_get_inode_attr(struct i
+ return status;
+ }
+
+-static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
++int ocfs2_sync_flags(struct inode *inode, int flags, int vflags)
++{
++ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++ struct buffer_head *bh = NULL;
++ handle_t *handle = NULL;
++ int status;
++
++ status = ocfs2_inode_lock(inode, &bh, 1);
++ if (status < 0) {
++ mlog_errno(status);
++ return status;
++ }
++ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
++ if (IS_ERR(handle)) {
++ status = PTR_ERR(handle);
++ mlog_errno(status);
++ goto bail_unlock;
++ }
++
++ inode->i_flags = flags;
++ inode->i_vflags = vflags;
++ ocfs2_get_inode_flags(OCFS2_I(inode));
++
++ status = ocfs2_mark_inode_dirty(handle, inode, bh);
++ if (status < 0)
++ mlog_errno(status);
++
++ ocfs2_commit_trans(osb, handle);
++bail_unlock:
++ ocfs2_inode_unlock(inode, 1);
++ brelse(bh);
++ return status;
++}
++
++int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
+ unsigned mask)
+ {
+ struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
+@@ -101,6 +135,11 @@ static int ocfs2_set_inode_attr(struct i
+ if (!S_ISDIR(inode->i_mode))
+ flags &= ~OCFS2_DIRSYNC_FL;
+
++ if (IS_BARRIER(inode)) {
++ vxwprintk_task(1, "messing with the barrier.");
++ goto bail_unlock;
++ }
++
+ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+@@ -879,6 +918,7 @@ bail:
+ return status;
+ }
+
++
+ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ struct inode *inode = filp->f_path.dentry->d_inode;
+diff -NurpP --minimal linux-3.6.10/fs/ocfs2/namei.c linux-3.6.10-vs2.3.4.6/fs/ocfs2/namei.c
+--- linux-3.6.10/fs/ocfs2/namei.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ocfs2/namei.c 2012-10-04 16:47:00.000000000 +0000
+@@ -41,6 +41,7 @@
+ #include <linux/slab.h>
+ #include <linux/highmem.h>
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
+
+ #include <cluster/masklog.h>
+
+@@ -475,6 +476,7 @@ static int __ocfs2_mknod_locked(struct i
+ struct ocfs2_dinode *fe = NULL;
+ struct ocfs2_extent_list *fel;
+ u16 feat;
++ tag_t tag;
+
+ *new_fe_bh = NULL;
+
+@@ -512,8 +514,11 @@ static int __ocfs2_mknod_locked(struct i
+ fe->i_suballoc_loc = cpu_to_le64(suballoc_loc);
+ fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
+ fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot);
+- fe->i_uid = cpu_to_le32(inode->i_uid);
+- fe->i_gid = cpu_to_le32(inode->i_gid);
++
++ tag = dx_current_fstag(osb->sb);
++ fe->i_uid = cpu_to_le32(TAGINO_UID(DX_TAG(inode), inode->i_uid, tag));
++ fe->i_gid = cpu_to_le32(TAGINO_GID(DX_TAG(inode), inode->i_gid, tag));
++ inode->i_tag = tag;
+ fe->i_mode = cpu_to_le16(inode->i_mode);
+ if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+ fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev));
+diff -NurpP --minimal linux-3.6.10/fs/ocfs2/ocfs2.h linux-3.6.10-vs2.3.4.6/fs/ocfs2/ocfs2.h
+--- linux-3.6.10/fs/ocfs2/ocfs2.h 2012-01-09 15:14:55.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ocfs2/ocfs2.h 2012-10-04 16:47:00.000000000 +0000
+@@ -272,6 +272,7 @@ enum ocfs2_mount_options
+ writes */
+ OCFS2_MOUNT_HB_NONE = 1 << 13, /* No heartbeat */
+ OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */
++ OCFS2_MOUNT_TAGGED = 1 << 15, /* use tagging */
+ };
+
+ #define OCFS2_OSB_SOFT_RO 0x0001
+diff -NurpP --minimal linux-3.6.10/fs/ocfs2/ocfs2_fs.h linux-3.6.10-vs2.3.4.6/fs/ocfs2/ocfs2_fs.h
+--- linux-3.6.10/fs/ocfs2/ocfs2_fs.h 2011-05-22 14:17:53.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ocfs2/ocfs2_fs.h 2012-10-04 16:47:00.000000000 +0000
+@@ -266,6 +266,11 @@
+ #define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
+ #define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
+
++#define OCFS2_IXUNLINK_FL FS_IXUNLINK_FL /* Immutable invert on unlink */
++
++#define OCFS2_BARRIER_FL FS_BARRIER_FL /* Barrier for chroot() */
++#define OCFS2_COW_FL FS_COW_FL /* Copy on Write marker */
++
+ #define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
+ #define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
+
+diff -NurpP --minimal linux-3.6.10/fs/ocfs2/super.c linux-3.6.10-vs2.3.4.6/fs/ocfs2/super.c
+--- linux-3.6.10/fs/ocfs2/super.c 2012-05-21 16:07:26.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/ocfs2/super.c 2012-10-04 16:47:00.000000000 +0000
+@@ -185,6 +185,7 @@ enum {
+ Opt_coherency_full,
+ Opt_resv_level,
+ Opt_dir_resv_level,
++ Opt_tag, Opt_notag, Opt_tagid,
+ Opt_err,
+ };
+
+@@ -216,6 +217,9 @@ static const match_table_t tokens = {
+ {Opt_coherency_full, "coherency=full"},
+ {Opt_resv_level, "resv_level=%u"},
+ {Opt_dir_resv_level, "dir_resv_level=%u"},
++ {Opt_tag, "tag"},
++ {Opt_notag, "notag"},
++ {Opt_tagid, "tagid=%u"},
+ {Opt_err, NULL}
+ };
+
+@@ -662,6 +666,13 @@ static int ocfs2_remount(struct super_bl
+ goto out;
+ }
+
++ if ((osb->s_mount_opt & OCFS2_MOUNT_TAGGED) !=
++ (parsed_options.mount_opt & OCFS2_MOUNT_TAGGED)) {
++ ret = -EINVAL;
++ mlog(ML_ERROR, "Cannot change tagging on remount\n");
++ goto out;
++ }
++
+ /* We're going to/from readonly mode. */
+ if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
+ /* Disable quota accounting before remounting RO */
+@@ -1177,6 +1188,9 @@ static int ocfs2_fill_super(struct super
+
+ ocfs2_complete_mount_recovery(osb);
+
++ if (osb->s_mount_opt & OCFS2_MOUNT_TAGGED)
++ sb->s_flags |= MS_TAGGED;
++
+ if (ocfs2_mount_local(osb))
+ snprintf(nodestr, sizeof(nodestr), "local");
+ else
+@@ -1503,6 +1517,20 @@ static int ocfs2_parse_options(struct su
+ option < OCFS2_MAX_RESV_LEVEL)
+ mopt->dir_resv_level = option;
+ break;
++#ifndef CONFIG_TAGGING_NONE
++ case Opt_tag:
++ mopt->mount_opt |= OCFS2_MOUNT_TAGGED;
++ break;
++ case Opt_notag:
++ mopt->mount_opt &= ~OCFS2_MOUNT_TAGGED;
++ break;
++#endif
++#ifdef CONFIG_PROPAGATE
++ case Opt_tagid:
++ /* use args[0] */
++ mopt->mount_opt |= OCFS2_MOUNT_TAGGED;
++ break;
++#endif
+ default:
+ mlog(ML_ERROR,
+ "Unrecognized mount option \"%s\" "
+diff -NurpP --minimal linux-3.6.10/fs/open.c linux-3.6.10-vs2.3.4.6/fs/open.c
+--- linux-3.6.10/fs/open.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/open.c 2012-10-04 16:47:00.000000000 +0000
+@@ -30,6 +30,11 @@
+ #include <linux/fs_struct.h>
+ #include <linux/ima.h>
+ #include <linux/dnotify.h>
++#include <linux/vs_base.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_tag.h>
++#include <linux/vs_cowbl.h>
++#include <linux/vserver/dlimit.h>
+
+ #include "internal.h"
+
+@@ -74,6 +79,12 @@ static long do_sys_truncate(const char _
+ error = user_path(pathname, &path);
+ if (error)
+ goto out;
++
++#ifdef CONFIG_VSERVER_COWBL
++ error = cow_check_and_break(&path);
++ if (error)
++ goto dput_and_out;
++#endif
+ inode = path.dentry->d_inode;
+
+ /* For directories it's -EISDIR, for other non-regulars - -EINVAL */
+@@ -495,6 +506,10 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
+
+ error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
+ if (!error) {
++#ifdef CONFIG_VSERVER_COWBL
++ error = cow_check_and_break(&path);
++ if (!error)
++#endif
+ error = chmod_common(&path, mode);
+ path_put(&path);
+ }
+@@ -522,13 +537,13 @@ static int chown_common(struct path *pat
+ if (!uid_valid(uid))
+ return -EINVAL;
+ newattrs.ia_valid |= ATTR_UID;
+- newattrs.ia_uid = uid;
++ newattrs.ia_uid = dx_map_uid(user);
+ }
+ if (group != (gid_t) -1) {
+ if (!gid_valid(gid))
+ return -EINVAL;
+ newattrs.ia_valid |= ATTR_GID;
+- newattrs.ia_gid = gid;
++ newattrs.ia_gid = dx_map_gid(group);
+ }
+ if (!S_ISDIR(inode->i_mode))
+ newattrs.ia_valid |=
+@@ -561,6 +576,18 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_release;
++#ifdef CONFIG_VSERVER_COWBL
++ error = cow_check_and_break(&path);
++ if (!error)
++#endif
++#ifdef CONFIG_VSERVER_COWBL
++ error = cow_check_and_break(&path);
++ if (!error)
++#endif
++#ifdef CONFIG_VSERVER_COWBL
++ error = cow_check_and_break(&path);
++ if (!error)
++#endif
+ error = chown_common(&path, user, group);
+ mnt_drop_write(path.mnt);
+ out_release:
+@@ -809,6 +836,7 @@ static void __put_unused_fd(struct files
+ __clear_open_fd(fd, fdt);
+ if (fd < files->next_fd)
+ files->next_fd = fd;
++ vx_openfd_dec(fd);
+ }
+
+ void put_unused_fd(unsigned int fd)
+diff -NurpP --minimal linux-3.6.10/fs/proc/array.c linux-3.6.10-vs2.3.4.6/fs/proc/array.c
+--- linux-3.6.10/fs/proc/array.c 2012-07-22 21:39:42.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc/array.c 2012-10-04 16:47:00.000000000 +0000
+@@ -82,6 +82,8 @@
+ #include <linux/ptrace.h>
+ #include <linux/tracehook.h>
+ #include <linux/user_namespace.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+@@ -172,6 +174,9 @@ static inline void task_state(struct seq
+ rcu_read_lock();
+ ppid = pid_alive(p) ?
+ task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
++ if (unlikely(vx_current_initpid(p->pid)))
++ ppid = 0;
++
+ tpid = 0;
+ if (pid_alive(p)) {
+ struct task_struct *tracer = ptrace_parent(p);
+@@ -296,7 +301,7 @@ static inline void task_sig(struct seq_f
+ }
+
+ static void render_cap_t(struct seq_file *m, const char *header,
+- kernel_cap_t *a)
++ struct vx_info *vxi, kernel_cap_t *a)
+ {
+ unsigned __capi;
+
+@@ -321,10 +326,11 @@ static inline void task_cap(struct seq_f
+ cap_bset = cred->cap_bset;
+ rcu_read_unlock();
+
+- render_cap_t(m, "CapInh:\t", &cap_inheritable);
+- render_cap_t(m, "CapPrm:\t", &cap_permitted);
+- render_cap_t(m, "CapEff:\t", &cap_effective);
+- render_cap_t(m, "CapBnd:\t", &cap_bset);
++ /* FIXME: maybe move the p->vx_info masking to __task_cred() ? */
++ render_cap_t(m, "CapInh:\t", p->vx_info, &cap_inheritable);
++ render_cap_t(m, "CapPrm:\t", p->vx_info, &cap_permitted);
++ render_cap_t(m, "CapEff:\t", p->vx_info, &cap_effective);
++ render_cap_t(m, "CapBnd:\t", p->vx_info, &cap_bset);
+ }
+
+ static inline void task_context_switch_counts(struct seq_file *m,
+@@ -346,6 +352,42 @@ static void task_cpus_allowed(struct seq
+ seq_putc(m, '\n');
+ }
+
++int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
++ struct pid *pid, struct task_struct *task)
++{
++ seq_printf(m, "Proxy:\t%p(%c)\n"
++ "Count:\t%u\n"
++ "uts:\t%p(%c)\n"
++ "ipc:\t%p(%c)\n"
++ "mnt:\t%p(%c)\n"
++ "pid:\t%p(%c)\n"
++ "net:\t%p(%c)\n",
++ task->nsproxy,
++ (task->nsproxy == init_task.nsproxy ? 'I' : '-'),
++ atomic_read(&task->nsproxy->count),
++ task->nsproxy->uts_ns,
++ (task->nsproxy->uts_ns == init_task.nsproxy->uts_ns ? 'I' : '-'),
++ task->nsproxy->ipc_ns,
++ (task->nsproxy->ipc_ns == init_task.nsproxy->ipc_ns ? 'I' : '-'),
++ task->nsproxy->mnt_ns,
++ (task->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns ? 'I' : '-'),
++ task->nsproxy->pid_ns,
++ (task->nsproxy->pid_ns == init_task.nsproxy->pid_ns ? 'I' : '-'),
++ task->nsproxy->net_ns,
++ (task->nsproxy->net_ns == init_task.nsproxy->net_ns ? 'I' : '-'));
++ return 0;
++}
++
++void task_vs_id(struct seq_file *m, struct task_struct *task)
++{
++ if (task_vx_flags(task, VXF_HIDE_VINFO, 0))
++ return;
++
++ seq_printf(m, "VxID: %d\n", vx_task_xid(task));
++ seq_printf(m, "NxID: %d\n", nx_task_nid(task));
++}
++
++
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+ {
+@@ -362,6 +404,7 @@ int proc_pid_status(struct seq_file *m,
+ task_cap(m, task);
+ task_cpus_allowed(m, task);
+ cpuset_task_status_allowed(m, task);
++ task_vs_id(m, task);
+ task_context_switch_counts(m, task);
+ return 0;
+ }
+@@ -471,6 +514,17 @@ static int do_task_stat(struct seq_file
+ /* convert nsec -> ticks */
+ start_time = nsec_to_clock_t(start_time);
+
++ /* fixup start time for virt uptime */
++ if (vx_flags(VXF_VIRT_UPTIME, 0)) {
++ unsigned long long bias =
++ current->vx_info->cvirt.bias_clock;
++
++ if (start_time > bias)
++ start_time -= bias;
++ else
++ start_time = 0;
++ }
++
+ seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state);
+ seq_put_decimal_ll(m, ' ', ppid);
+ seq_put_decimal_ll(m, ' ', pgid);
+diff -NurpP --minimal linux-3.6.10/fs/proc/base.c linux-3.6.10-vs2.3.4.6/fs/proc/base.c
+--- linux-3.6.10/fs/proc/base.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc/base.c 2012-10-04 16:47:00.000000000 +0000
+@@ -85,6 +85,8 @@
+ #include <linux/fs_struct.h>
+ #include <linux/slab.h>
+ #include <linux/flex_array.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
+ #ifdef CONFIG_HARDWALL
+ #include <asm/hardwall.h>
+ #endif
+@@ -941,11 +943,16 @@ static ssize_t oom_adjust_write(struct f
+ goto err_task_lock;
+ }
+
+- if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) {
++ if (oom_adjust < task->signal->oom_adj &&
++ !vx_capable(CAP_SYS_RESOURCE, VXC_OOM_ADJUST)) {
+ err = -EACCES;
+ goto err_sighand;
+ }
+
++ /* prevent guest processes from circumventing the oom killer */
++ if (vx_current_xid() && (oom_adjust == OOM_DISABLE))
++ oom_adjust = OOM_ADJUST_MIN;
++
+ /*
+ * Warn that /proc/pid/oom_adj is deprecated, see
+ * Documentation/feature-removal-schedule.txt.
+@@ -1548,6 +1555,8 @@ struct inode *proc_pid_make_inode(struct
+ inode->i_gid = cred->egid;
+ rcu_read_unlock();
+ }
++ /* procfs is xid tagged */
++ inode->i_tag = (tag_t)vx_task_xid(task);
+ security_task_to_inode(task, inode);
+
+ out:
+@@ -1593,6 +1602,8 @@ int pid_getattr(struct vfsmount *mnt, st
+
+ /* dentry stuff */
+
++static unsigned name_to_int(struct dentry *dentry);
++
+ /*
+ * Exceptional case: normally we are not allowed to unhash a busy
+ * directory. In this case, however, we can do it - no aliasing problems
+@@ -1621,6 +1632,12 @@ int pid_revalidate(struct dentry *dentry
+ task = get_proc_task(inode);
+
+ if (task) {
++ unsigned pid = name_to_int(dentry);
++
++ if (pid != ~0U && pid != vx_map_pid(task->pid)) {
++ put_task_struct(task);
++ goto drop;
++ }
+ if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
+ task_dumpable(task)) {
+ rcu_read_lock();
+@@ -1637,6 +1654,7 @@ int pid_revalidate(struct dentry *dentry
+ put_task_struct(task);
+ return 1;
+ }
++drop:
+ d_drop(dentry);
+ return 0;
+ }
+@@ -2457,6 +2475,13 @@ static struct dentry *proc_pident_lookup
+ if (!task)
+ goto out_no_task;
+
++ /* TODO: maybe we can come up with a generic approach? */
++ if (task_vx_flags(task, VXF_HIDE_VINFO, 0) &&
++ (dentry->d_name.len == 5) &&
++ (!memcmp(dentry->d_name.name, "vinfo", 5) ||
++ !memcmp(dentry->d_name.name, "ninfo", 5)))
++ goto out;
++
+ /*
+ * Yes, it does not scale. And it should not. Don't add
+ * new entries into /proc/<tgid>/ without very good reasons.
+@@ -2842,7 +2867,7 @@ out_iput:
+ static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
+ {
+ struct dentry *error;
+- struct task_struct *task = get_proc_task(dir);
++ struct task_struct *task = get_proc_task_real(dir);
+ const struct pid_entry *p, *last;
+
+ error = ERR_PTR(-ENOENT);
+@@ -3017,6 +3042,9 @@ static int proc_pid_personality(struct s
+ static const struct file_operations proc_task_operations;
+ static const struct inode_operations proc_task_inode_operations;
+
++extern int proc_pid_vx_info(struct task_struct *, char *);
++extern int proc_pid_nx_info(struct task_struct *, char *);
++
+ static const struct pid_entry tgid_base_stuff[] = {
+ DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
+ DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
+@@ -3083,6 +3111,8 @@ static const struct pid_entry tgid_base_
+ #ifdef CONFIG_CGROUPS
+ REG("cgroup", S_IRUGO, proc_cgroup_operations),
+ #endif
++ INF("vinfo", S_IRUGO, proc_pid_vx_info),
++ INF("ninfo", S_IRUGO, proc_pid_nx_info),
+ INF("oom_score", S_IRUGO, proc_oom_score),
+ REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
+ REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+@@ -3106,6 +3136,7 @@ static const struct pid_entry tgid_base_
+ REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
+ REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
+ #endif
++ ONE("nsproxy", S_IRUGO, proc_pid_nsproxy),
+ };
+
+ static int proc_tgid_base_readdir(struct file * filp,
+@@ -3300,7 +3331,7 @@ retry:
+ iter.task = NULL;
+ pid = find_ge_pid(iter.tgid, ns);
+ if (pid) {
+- iter.tgid = pid_nr_ns(pid, ns);
++ iter.tgid = pid_unmapped_nr_ns(pid, ns);
+ iter.task = pid_task(pid, PIDTYPE_PID);
+ /* What we to know is if the pid we have find is the
+ * pid of a thread_group_leader. Testing for task
+@@ -3330,7 +3361,7 @@ static int proc_pid_fill_cache(struct fi
+ struct tgid_iter iter)
+ {
+ char name[PROC_NUMBUF];
+- int len = snprintf(name, sizeof(name), "%d", iter.tgid);
++ int len = snprintf(name, sizeof(name), "%d", vx_map_tgid(iter.tgid));
+ return proc_fill_cache(filp, dirent, filldir, name, len,
+ proc_pid_instantiate, iter.task, NULL);
+ }
+@@ -3354,7 +3385,7 @@ int proc_pid_readdir(struct file * filp,
+ goto out_no_task;
+ nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+
+- reaper = get_proc_task(filp->f_path.dentry->d_inode);
++ reaper = get_proc_task_real(filp->f_path.dentry->d_inode);
+ if (!reaper)
+ goto out_no_task;
+
+@@ -3376,6 +3407,8 @@ int proc_pid_readdir(struct file * filp,
+ __filldir = fake_filldir;
+
+ filp->f_pos = iter.tgid + TGID_OFFSET;
++ if (!vx_proc_task_visible(iter.task))
++ continue;
+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
+ put_task_struct(iter.task);
+ goto out;
+@@ -3537,6 +3570,8 @@ static struct dentry *proc_task_lookup(s
+ tid = name_to_int(dentry);
+ if (tid == ~0U)
+ goto out;
++ if (vx_current_initpid(tid))
++ goto out;
+
+ ns = dentry->d_sb->s_fs_info;
+ rcu_read_lock();
+diff -NurpP --minimal linux-3.6.10/fs/proc/generic.c linux-3.6.10-vs2.3.4.6/fs/proc/generic.c
+--- linux-3.6.10/fs/proc/generic.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc/generic.c 2012-10-04 16:47:00.000000000 +0000
+@@ -22,6 +22,7 @@
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
+ #include <linux/completion.h>
++#include <linux/vserver/inode.h>
+ #include <asm/uaccess.h>
+
+ #include "internal.h"
+@@ -424,11 +425,15 @@ struct dentry *proc_lookup_de(struct pro
+ for (de = de->subdir; de ; de = de->next) {
+ if (de->namelen != dentry->d_name.len)
+ continue;
++ if (!vx_hide_check(0, de->vx_flags))
++ continue;
+ if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
+ pde_get(de);
+ spin_unlock(&proc_subdir_lock);
+ error = -EINVAL;
+ inode = proc_get_inode(dir->i_sb, de);
++ /* generic proc entries belong to the host */
++ inode->i_tag = 0;
+ goto out_unlock;
+ }
+ }
+@@ -506,6 +511,8 @@ int proc_readdir_de(struct proc_dir_entr
+
+ /* filldir passes info to user space */
+ pde_get(de);
++ if (!vx_hide_check(0, de->vx_flags))
++ goto skip;
+ spin_unlock(&proc_subdir_lock);
+ if (filldir(dirent, de->name, de->namelen, filp->f_pos,
+ de->low_ino, de->mode >> 12) < 0) {
+@@ -513,6 +520,7 @@ int proc_readdir_de(struct proc_dir_entr
+ goto out;
+ }
+ spin_lock(&proc_subdir_lock);
++ skip:
+ filp->f_pos++;
+ next = de->next;
+ pde_put(de);
+@@ -626,6 +634,7 @@ static struct proc_dir_entry *__proc_cre
+ ent->nlink = nlink;
+ atomic_set(&ent->count, 1);
+ ent->pde_users = 0;
++ ent->vx_flags = IATTR_PROC_DEFAULT;
+ spin_lock_init(&ent->pde_unload_lock);
+ ent->pde_unload_completion = NULL;
+ INIT_LIST_HEAD(&ent->pde_openers);
+@@ -649,7 +658,8 @@ struct proc_dir_entry *proc_symlink(cons
+ kfree(ent->data);
+ kfree(ent);
+ ent = NULL;
+- }
++ } else
++ ent->vx_flags = IATTR_PROC_SYMLINK;
+ } else {
+ kfree(ent);
+ ent = NULL;
+diff -NurpP --minimal linux-3.6.10/fs/proc/inode.c linux-3.6.10-vs2.3.4.6/fs/proc/inode.c
+--- linux-3.6.10/fs/proc/inode.c 2012-07-22 21:39:42.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc/inode.c 2012-10-04 16:47:00.000000000 +0000
+@@ -458,6 +458,8 @@ struct inode *proc_get_inode(struct supe
+ inode->i_uid = de->uid;
+ inode->i_gid = de->gid;
+ }
++ if (de->vx_flags)
++ PROC_I(inode)->vx_flags = de->vx_flags;
+ if (de->size)
+ inode->i_size = de->size;
+ if (de->nlink)
+diff -NurpP --minimal linux-3.6.10/fs/proc/internal.h linux-3.6.10-vs2.3.4.6/fs/proc/internal.h
+--- linux-3.6.10/fs/proc/internal.h 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc/internal.h 2012-10-04 16:47:00.000000000 +0000
+@@ -10,6 +10,8 @@
+ */
+
+ #include <linux/proc_fs.h>
++#include <linux/vs_pid.h>
++
+ struct ctl_table_header;
+
+ extern struct proc_dir_entry proc_root;
+@@ -52,6 +54,9 @@ extern int proc_pid_status(struct seq_fi
+ struct pid *pid, struct task_struct *task);
+ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task);
++extern int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
++ struct pid *pid, struct task_struct *task);
++
+ extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
+
+ extern const struct file_operations proc_tid_children_operations;
+@@ -81,11 +86,16 @@ static inline struct pid *proc_pid(struc
+ return PROC_I(inode)->pid;
+ }
+
+-static inline struct task_struct *get_proc_task(struct inode *inode)
++static inline struct task_struct *get_proc_task_real(struct inode *inode)
+ {
+ return get_pid_task(proc_pid(inode), PIDTYPE_PID);
+ }
+
++static inline struct task_struct *get_proc_task(struct inode *inode)
++{
++ return vx_get_proc_task(inode, proc_pid(inode));
++}
++
+ static inline int proc_fd(struct inode *inode)
+ {
+ return PROC_I(inode)->fd;
+diff -NurpP --minimal linux-3.6.10/fs/proc/loadavg.c linux-3.6.10-vs2.3.4.6/fs/proc/loadavg.c
+--- linux-3.6.10/fs/proc/loadavg.c 2009-09-10 13:26:23.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc/loadavg.c 2012-10-04 16:47:00.000000000 +0000
+@@ -12,15 +12,27 @@
+
+ static int loadavg_proc_show(struct seq_file *m, void *v)
+ {
++ unsigned long running;
++ unsigned int threads;
+ unsigned long avnrun[3];
+
+ get_avenrun(avnrun, FIXED_1/200, 0);
+
++ if (vx_flags(VXF_VIRT_LOAD, 0)) {
++ struct vx_info *vxi = current_vx_info();
++
++ running = atomic_read(&vxi->cvirt.nr_running);
++ threads = atomic_read(&vxi->cvirt.nr_threads);
++ } else {
++ running = nr_running();
++ threads = nr_threads;
++ }
++
+ seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
+ LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
+ LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
+ LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
+- nr_running(), nr_threads,
++ running, threads,
+ task_active_pid_ns(current)->last_pid);
+ return 0;
+ }
+diff -NurpP --minimal linux-3.6.10/fs/proc/meminfo.c linux-3.6.10-vs2.3.4.6/fs/proc/meminfo.c
+--- linux-3.6.10/fs/proc/meminfo.c 2012-01-09 15:14:55.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc/meminfo.c 2012-10-04 16:47:00.000000000 +0000
+@@ -39,7 +39,8 @@ static int meminfo_proc_show(struct seq_
+ allowed = ((totalram_pages - hugetlb_total_pages())
+ * sysctl_overcommit_ratio / 100) + total_swap_pages;
+
+- cached = global_page_state(NR_FILE_PAGES) -
++ cached = vx_flags(VXF_VIRT_MEM, 0) ?
++ vx_vsi_cached(&i) : global_page_state(NR_FILE_PAGES) -
+ total_swapcache_pages - i.bufferram;
+ if (cached < 0)
+ cached = 0;
+diff -NurpP --minimal linux-3.6.10/fs/proc/root.c linux-3.6.10-vs2.3.4.6/fs/proc/root.c
+--- linux-3.6.10/fs/proc/root.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc/root.c 2012-10-04 16:47:00.000000000 +0000
+@@ -19,9 +19,14 @@
+ #include <linux/mount.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/parser.h>
++#include <linux/vserver/inode.h>
+
+ #include "internal.h"
+
++struct proc_dir_entry *proc_virtual;
++
++extern void proc_vx_init(void);
++
+ static int proc_test_super(struct super_block *sb, void *data)
+ {
+ return sb->s_fs_info == data;
+@@ -189,6 +194,7 @@ void __init proc_root_init(void)
+ #endif
+ proc_mkdir("bus", NULL);
+ proc_sys_init();
++ proc_vx_init();
+ }
+
+ static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat
+@@ -255,6 +261,7 @@ struct proc_dir_entry proc_root = {
+ .proc_iops = &proc_root_inode_operations,
+ .proc_fops = &proc_root_operations,
+ .parent = &proc_root,
++ .vx_flags = IATTR_ADMIN | IATTR_WATCH,
+ .name = "/proc",
+ };
+
+diff -NurpP --minimal linux-3.6.10/fs/proc/stat.c linux-3.6.10-vs2.3.4.6/fs/proc/stat.c
+--- linux-3.6.10/fs/proc/stat.c 2012-12-11 11:36:58.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc/stat.c 2012-11-06 17:43:41.000000000 +0000
+@@ -9,6 +9,7 @@
+ #include <linux/slab.h>
+ #include <linux/time.h>
+ #include <linux/irqnr.h>
++#include <linux/vserver/cvirt.h>
+ #include <asm/cputime.h>
+ #include <linux/tick.h>
+
+@@ -92,6 +93,10 @@ static int show_stat(struct seq_file *p,
+ irq = softirq = steal = 0;
+ guest = guest_nice = 0;
+ getboottime(&boottime);
++
++ if (vx_flags(VXF_VIRT_UPTIME, 0))
++ vx_vsi_boottime(&boottime);
++
+ jif = boottime.tv_sec;
+
+ for_each_possible_cpu(i) {
+diff -NurpP --minimal linux-3.6.10/fs/proc/uptime.c linux-3.6.10-vs2.3.4.6/fs/proc/uptime.c
+--- linux-3.6.10/fs/proc/uptime.c 2012-03-19 18:47:26.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc/uptime.c 2012-10-04 16:47:00.000000000 +0000
+@@ -5,6 +5,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/time.h>
+ #include <linux/kernel_stat.h>
++#include <linux/vserver/cvirt.h>
+ #include <asm/cputime.h>
+
+ static int uptime_proc_show(struct seq_file *m, void *v)
+@@ -25,6 +26,10 @@ static int uptime_proc_show(struct seq_f
+ nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
+ idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
+ idle.tv_nsec = rem;
++
++ if (vx_flags(VXF_VIRT_UPTIME, 0))
++ vx_vsi_uptime(&uptime, &idle);
++
+ seq_printf(m, "%lu.%02lu %lu.%02lu\n",
+ (unsigned long) uptime.tv_sec,
+ (uptime.tv_nsec / (NSEC_PER_SEC / 100)),
+diff -NurpP --minimal linux-3.6.10/fs/proc_namespace.c linux-3.6.10-vs2.3.4.6/fs/proc_namespace.c
+--- linux-3.6.10/fs/proc_namespace.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/proc_namespace.c 2012-12-10 17:17:40.000000000 +0000
+@@ -44,6 +44,8 @@ static int show_sb_opts(struct seq_file
+ { MS_SYNCHRONOUS, ",sync" },
+ { MS_DIRSYNC, ",dirsync" },
+ { MS_MANDLOCK, ",mand" },
++ { MS_TAGGED, ",tag" },
++ { MS_NOTAGCHECK, ",notagcheck" },
+ { 0, NULL }
+ };
+ const struct proc_fs_info *fs_infop;
+@@ -80,6 +82,40 @@ static inline void mangle(struct seq_fil
+ seq_escape(m, s, " \t\n\\");
+ }
+
++#ifdef CONFIG_VSERVER_EXTRA_MNT_CHECK
++
++static int mnt_is_reachable(struct vfsmount *vfsmnt)
++{
++ struct path root;
++ struct dentry *point;
++ struct mount *mnt = real_mount(vfsmnt);
++ struct mount *root_mnt;
++ int ret;
++
++ if (mnt == mnt->mnt_ns->root)
++ return 1;
++
++ br_read_lock(&vfsmount_lock);
++ root = current->fs->root;
++ root_mnt = real_mount(root.mnt);
++ point = root.dentry;
++
++ while ((mnt != mnt->mnt_parent) && (mnt != root_mnt)) {
++ point = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ }
++
++ ret = (mnt == root_mnt) && is_subdir(point, root.dentry);
++
++ br_read_unlock(&vfsmount_lock);
++
++ return ret;
++}
++
++#else
++#define mnt_is_reachable(v) (1)
++#endif
++
+ static void show_type(struct seq_file *m, struct super_block *sb)
+ {
+ mangle(m, sb->s_type->name);
+@@ -96,6 +132,17 @@ static int show_vfsmnt(struct seq_file *
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ struct super_block *sb = mnt_path.dentry->d_sb;
+
++ if (vx_flags(VXF_HIDE_MOUNT, 0))
++ return SEQ_SKIP;
++ if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
++ return SEQ_SKIP;
++
++ if (!vx_check(0, VS_ADMIN|VS_WATCH) &&
++ mnt == current->fs->root.mnt) {
++ seq_puts(m, "/dev/root / ");
++ goto type;
++ }
++
+ if (sb->s_op->show_devname) {
+ err = sb->s_op->show_devname(m, mnt_path.dentry);
+ if (err)
+@@ -106,6 +153,7 @@ static int show_vfsmnt(struct seq_file *
+ seq_putc(m, ' ');
+ seq_path(m, &mnt_path, " \t\n\\");
+ seq_putc(m, ' ');
++type:
+ show_type(m, sb);
+ seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
+ err = show_sb_opts(m, sb);
+@@ -128,6 +176,11 @@ static int show_mountinfo(struct seq_fil
+ struct path root = p->root;
+ int err = 0;
+
++ if (vx_flags(VXF_HIDE_MOUNT, 0))
++ return SEQ_SKIP;
++ if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
++ return SEQ_SKIP;
++
+ seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id,
+ MAJOR(sb->s_dev), MINOR(sb->s_dev));
+ if (sb->s_op->show_path)
+@@ -187,6 +240,17 @@ static int show_vfsstat(struct seq_file
+ struct super_block *sb = mnt_path.dentry->d_sb;
+ int err = 0;
+
++ if (vx_flags(VXF_HIDE_MOUNT, 0))
++ return SEQ_SKIP;
++ if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
++ return SEQ_SKIP;
++
++ if (!vx_check(0, VS_ADMIN|VS_WATCH) &&
++ mnt == current->fs->root.mnt) {
++ seq_puts(m, "device /dev/root mounted on / ");
++ goto type;
++ }
++
+ /* device */
+ if (sb->s_op->show_devname) {
+ seq_puts(m, "device ");
+@@ -203,7 +267,7 @@ static int show_vfsstat(struct seq_file
+ seq_puts(m, " mounted on ");
+ seq_path(m, &mnt_path, " \t\n\\");
+ seq_putc(m, ' ');
+-
++type:
+ /* file system type */
+ seq_puts(m, "with fstype ");
+ show_type(m, sb);
+diff -NurpP --minimal linux-3.6.10/fs/quota/dquot.c linux-3.6.10-vs2.3.4.6/fs/quota/dquot.c
+--- linux-3.6.10/fs/quota/dquot.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/quota/dquot.c 2012-10-04 16:47:00.000000000 +0000
+@@ -1580,6 +1580,9 @@ int __dquot_alloc_space(struct inode *in
+ struct dquot **dquots = inode->i_dquot;
+ int reserve = flags & DQUOT_SPACE_RESERVE;
+
++ if ((ret = dl_alloc_space(inode, number)))
++ return ret;
++
+ /*
+ * First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex
+@@ -1635,6 +1638,9 @@ int dquot_alloc_inode(const struct inode
+ struct dquot_warn warn[MAXQUOTAS];
+ struct dquot * const *dquots = inode->i_dquot;
+
++ if ((ret = dl_alloc_inode(inode)))
++ return ret;
++
+ /* First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex */
+ if (!dquot_active(inode))
+@@ -1706,6 +1712,8 @@ void __dquot_free_space(struct inode *in
+ struct dquot **dquots = inode->i_dquot;
+ int reserve = flags & DQUOT_SPACE_RESERVE;
+
++ dl_free_space(inode, number);
++
+ /* First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex */
+ if (!dquot_active(inode)) {
+@@ -1750,6 +1758,8 @@ void dquot_free_inode(const struct inode
+ struct dquot_warn warn[MAXQUOTAS];
+ struct dquot * const *dquots = inode->i_dquot;
+
++ dl_free_inode(inode);
++
+ /* First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex */
+ if (!dquot_active(inode))
+diff -NurpP --minimal linux-3.6.10/fs/quota/quota.c linux-3.6.10-vs2.3.4.6/fs/quota/quota.c
+--- linux-3.6.10/fs/quota/quota.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/quota/quota.c 2012-10-04 16:47:00.000000000 +0000
+@@ -8,6 +8,7 @@
+ #include <linux/fs.h>
+ #include <linux/namei.h>
+ #include <linux/slab.h>
++#include <linux/vs_context.h>
+ #include <asm/current.h>
+ #include <linux/uaccess.h>
+ #include <linux/kernel.h>
+@@ -37,7 +38,7 @@ static int check_quotactl_permission(str
+ break;
+ /*FALLTHROUGH*/
+ default:
+- if (!capable(CAP_SYS_ADMIN))
++ if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
+ return -EPERM;
+ }
+
+@@ -291,6 +292,46 @@ static int do_quotactl(struct super_bloc
+ }
+ }
+
++#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
++
++#include <linux/vroot.h>
++#include <linux/major.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/vserver/debug.h>
++
++static vroot_grb_func *vroot_get_real_bdev = NULL;
++
++static DEFINE_SPINLOCK(vroot_grb_lock);
++
++int register_vroot_grb(vroot_grb_func *func) {
++ int ret = -EBUSY;
++
++ spin_lock(&vroot_grb_lock);
++ if (!vroot_get_real_bdev) {
++ vroot_get_real_bdev = func;
++ ret = 0;
++ }
++ spin_unlock(&vroot_grb_lock);
++ return ret;
++}
++EXPORT_SYMBOL(register_vroot_grb);
++
++int unregister_vroot_grb(vroot_grb_func *func) {
++ int ret = -EINVAL;
++
++ spin_lock(&vroot_grb_lock);
++ if (vroot_get_real_bdev) {
++ vroot_get_real_bdev = NULL;
++ ret = 0;
++ }
++ spin_unlock(&vroot_grb_lock);
++ return ret;
++}
++EXPORT_SYMBOL(unregister_vroot_grb);
++
++#endif
++
+ /* Return 1 if 'cmd' will block on frozen filesystem */
+ static int quotactl_cmd_write(int cmd)
+ {
+@@ -323,6 +364,22 @@ static struct super_block *quotactl_bloc
+ putname(tmp);
+ if (IS_ERR(bdev))
+ return ERR_CAST(bdev);
++#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
++ if (bdev && bdev->bd_inode &&
++ imajor(bdev->bd_inode) == VROOT_MAJOR) {
++ struct block_device *bdnew = (void *)-EINVAL;
++
++ if (vroot_get_real_bdev)
++ bdnew = vroot_get_real_bdev(bdev);
++ else
++ vxdprintk(VXD_CBIT(misc, 0),
++ "vroot_get_real_bdev not set");
++ bdput(bdev);
++ if (IS_ERR(bdnew))
++ return ERR_PTR(PTR_ERR(bdnew));
++ bdev = bdnew;
++ }
++#endif
+ if (quotactl_cmd_write(cmd))
+ sb = get_super_thawed(bdev);
+ else
+diff -NurpP --minimal linux-3.6.10/fs/reiserfs/file.c linux-3.6.10-vs2.3.4.6/fs/reiserfs/file.c
+--- linux-3.6.10/fs/reiserfs/file.c 2012-05-21 16:07:26.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/reiserfs/file.c 2012-10-04 16:47:00.000000000 +0000
+@@ -319,5 +319,6 @@ const struct inode_operations reiserfs_f
+ .listxattr = reiserfs_listxattr,
+ .removexattr = reiserfs_removexattr,
+ .permission = reiserfs_permission,
++ .sync_flags = reiserfs_sync_flags,
+ .get_acl = reiserfs_get_acl,
+ };
+diff -NurpP --minimal linux-3.6.10/fs/reiserfs/inode.c linux-3.6.10-vs2.3.4.6/fs/reiserfs/inode.c
+--- linux-3.6.10/fs/reiserfs/inode.c 2012-12-11 11:36:58.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/reiserfs/inode.c 2012-12-08 00:36:33.000000000 +0000
+@@ -18,6 +18,7 @@
+ #include <linux/writeback.h>
+ #include <linux/quotaops.h>
+ #include <linux/swap.h>
++#include <linux/vs_tag.h>
+
+ int reiserfs_commit_write(struct file *f, struct page *page,
+ unsigned from, unsigned to);
+@@ -1131,6 +1132,8 @@ static void init_inode(struct inode *ino
+ struct buffer_head *bh;
+ struct item_head *ih;
+ __u32 rdev;
++ uid_t uid;
++ gid_t gid;
+ //int version = ITEM_VERSION_1;
+
+ bh = PATH_PLAST_BUFFER(path);
+@@ -1151,12 +1154,13 @@ static void init_inode(struct inode *ino
+ (struct stat_data_v1 *)B_I_PITEM(bh, ih);
+ unsigned long blocks;
+
++ uid = sd_v1_uid(sd);
++ gid = sd_v1_gid(sd);
++
+ set_inode_item_key_version(inode, KEY_FORMAT_3_5);
+ set_inode_sd_version(inode, STAT_DATA_V1);
+ inode->i_mode = sd_v1_mode(sd);
+ set_nlink(inode, sd_v1_nlink(sd));
+- inode->i_uid = sd_v1_uid(sd);
+- inode->i_gid = sd_v1_gid(sd);
+ inode->i_size = sd_v1_size(sd);
+ inode->i_atime.tv_sec = sd_v1_atime(sd);
+ inode->i_mtime.tv_sec = sd_v1_mtime(sd);
+@@ -1198,11 +1202,12 @@ static void init_inode(struct inode *ino
+ // (directories and symlinks)
+ struct stat_data *sd = (struct stat_data *)B_I_PITEM(bh, ih);
+
++ uid = sd_v2_uid(sd);
++ gid = sd_v2_gid(sd);
++
+ inode->i_mode = sd_v2_mode(sd);
+ set_nlink(inode, sd_v2_nlink(sd));
+- inode->i_uid = sd_v2_uid(sd);
+ inode->i_size = sd_v2_size(sd);
+- inode->i_gid = sd_v2_gid(sd);
+ inode->i_mtime.tv_sec = sd_v2_mtime(sd);
+ inode->i_atime.tv_sec = sd_v2_atime(sd);
+ inode->i_ctime.tv_sec = sd_v2_ctime(sd);
+@@ -1232,6 +1237,10 @@ static void init_inode(struct inode *ino
+ sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
+ }
+
++ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, 0);
++
+ pathrelse(path);
+ if (S_ISREG(inode->i_mode)) {
+ inode->i_op = &reiserfs_file_inode_operations;
+@@ -1254,13 +1263,15 @@ static void init_inode(struct inode *ino
+ static void inode2sd(void *sd, struct inode *inode, loff_t size)
+ {
+ struct stat_data *sd_v2 = (struct stat_data *)sd;
++ uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
++ gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
+ __u16 flags;
+
++ set_sd_v2_uid(sd_v2, uid);
++ set_sd_v2_gid(sd_v2, gid);
+ set_sd_v2_mode(sd_v2, inode->i_mode);
+ set_sd_v2_nlink(sd_v2, inode->i_nlink);
+- set_sd_v2_uid(sd_v2, inode->i_uid);
+ set_sd_v2_size(sd_v2, size);
+- set_sd_v2_gid(sd_v2, inode->i_gid);
+ set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
+ set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
+ set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
+@@ -2869,14 +2880,19 @@ int reiserfs_commit_write(struct file *f
+ void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
+ {
+ if (reiserfs_attrs(inode->i_sb)) {
+- if (sd_attrs & REISERFS_SYNC_FL)
+- inode->i_flags |= S_SYNC;
+- else
+- inode->i_flags &= ~S_SYNC;
+ if (sd_attrs & REISERFS_IMMUTABLE_FL)
+ inode->i_flags |= S_IMMUTABLE;
+ else
+ inode->i_flags &= ~S_IMMUTABLE;
++ if (sd_attrs & REISERFS_IXUNLINK_FL)
++ inode->i_flags |= S_IXUNLINK;
++ else
++ inode->i_flags &= ~S_IXUNLINK;
++
++ if (sd_attrs & REISERFS_SYNC_FL)
++ inode->i_flags |= S_SYNC;
++ else
++ inode->i_flags &= ~S_SYNC;
+ if (sd_attrs & REISERFS_APPEND_FL)
+ inode->i_flags |= S_APPEND;
+ else
+@@ -2889,6 +2905,15 @@ void sd_attrs_to_i_attrs(__u16 sd_attrs,
+ REISERFS_I(inode)->i_flags |= i_nopack_mask;
+ else
+ REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
++
++ if (sd_attrs & REISERFS_BARRIER_FL)
++ inode->i_vflags |= V_BARRIER;
++ else
++ inode->i_vflags &= ~V_BARRIER;
++ if (sd_attrs & REISERFS_COW_FL)
++ inode->i_vflags |= V_COW;
++ else
++ inode->i_vflags &= ~V_COW;
+ }
+ }
+
+@@ -2899,6 +2924,11 @@ void i_attrs_to_sd_attrs(struct inode *i
+ *sd_attrs |= REISERFS_IMMUTABLE_FL;
+ else
+ *sd_attrs &= ~REISERFS_IMMUTABLE_FL;
++ if (inode->i_flags & S_IXUNLINK)
++ *sd_attrs |= REISERFS_IXUNLINK_FL;
++ else
++ *sd_attrs &= ~REISERFS_IXUNLINK_FL;
++
+ if (inode->i_flags & S_SYNC)
+ *sd_attrs |= REISERFS_SYNC_FL;
+ else
+@@ -2911,6 +2941,15 @@ void i_attrs_to_sd_attrs(struct inode *i
+ *sd_attrs |= REISERFS_NOTAIL_FL;
+ else
+ *sd_attrs &= ~REISERFS_NOTAIL_FL;
++
++ if (inode->i_vflags & V_BARRIER)
++ *sd_attrs |= REISERFS_BARRIER_FL;
++ else
++ *sd_attrs &= ~REISERFS_BARRIER_FL;
++ if (inode->i_vflags & V_COW)
++ *sd_attrs |= REISERFS_COW_FL;
++ else
++ *sd_attrs &= ~REISERFS_COW_FL;
+ }
+ }
+
+@@ -3155,7 +3194,8 @@ int reiserfs_setattr(struct dentry *dent
+ }
+
+ if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+- (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
++ (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
++ (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
+ struct reiserfs_transaction_handle th;
+ int jbegin_count =
+ 2 *
+@@ -3186,6 +3226,9 @@ int reiserfs_setattr(struct dentry *dent
+ inode->i_uid = attr->ia_uid;
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
++ if ((attr->ia_valid & ATTR_TAG) &&
++ IS_TAGGED(inode))
++ inode->i_tag = attr->ia_tag;
+ mark_inode_dirty(inode);
+ error = journal_end(&th, inode->i_sb, jbegin_count);
+ if (error)
+diff -NurpP --minimal linux-3.6.10/fs/reiserfs/ioctl.c linux-3.6.10-vs2.3.4.6/fs/reiserfs/ioctl.c
+--- linux-3.6.10/fs/reiserfs/ioctl.c 2012-05-21 16:07:27.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/reiserfs/ioctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -11,6 +11,21 @@
+ #include <linux/pagemap.h>
+ #include <linux/compat.h>
+
++
++int reiserfs_sync_flags(struct inode *inode, int flags, int vflags)
++{
++ __u16 sd_attrs = 0;
++
++ inode->i_flags = flags;
++ inode->i_vflags = vflags;
++
++ i_attrs_to_sd_attrs(inode, &sd_attrs);
++ REISERFS_I(inode)->i_attrs = sd_attrs;
++ inode->i_ctime = CURRENT_TIME_SEC;
++ mark_inode_dirty(inode);
++ return 0;
++}
++
+ /*
+ * reiserfs_ioctl - handler for ioctl for inode
+ * supported commands:
+@@ -22,7 +37,7 @@
+ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ struct inode *inode = filp->f_path.dentry->d_inode;
+- unsigned int flags;
++ unsigned int flags, oldflags;
+ int err = 0;
+
+ reiserfs_write_lock(inode->i_sb);
+@@ -47,6 +62,7 @@ long reiserfs_ioctl(struct file *filp, u
+
+ flags = REISERFS_I(inode)->i_attrs;
+ i_attrs_to_sd_attrs(inode, (__u16 *) & flags);
++ flags &= REISERFS_FL_USER_VISIBLE;
+ err = put_user(flags, (int __user *)arg);
+ break;
+ case REISERFS_IOC_SETFLAGS:{
+@@ -67,6 +83,10 @@ long reiserfs_ioctl(struct file *filp, u
+ err = -EFAULT;
+ goto setflags_out;
+ }
++ if (IS_BARRIER(inode)) {
++ vxwprintk_task(1, "messing with the barrier.");
++ return -EACCES;
++ }
+ /*
+ * Is it quota file? Do not allow user to mess with it
+ */
+@@ -91,6 +111,10 @@ long reiserfs_ioctl(struct file *filp, u
+ goto setflags_out;
+ }
+ }
++
++ oldflags = REISERFS_I(inode)->i_attrs;
++ flags &= REISERFS_FL_USER_MODIFIABLE;
++ flags |= oldflags & ~REISERFS_FL_USER_MODIFIABLE;
+ sd_attrs_to_i_attrs(flags, inode);
+ REISERFS_I(inode)->i_attrs = flags;
+ inode->i_ctime = CURRENT_TIME_SEC;
+diff -NurpP --minimal linux-3.6.10/fs/reiserfs/namei.c linux-3.6.10-vs2.3.4.6/fs/reiserfs/namei.c
+--- linux-3.6.10/fs/reiserfs/namei.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/reiserfs/namei.c 2012-10-04 16:47:00.000000000 +0000
+@@ -18,6 +18,7 @@
+ #include "acl.h"
+ #include "xattr.h"
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
+
+ #define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { inc_nlink(i); if (i->i_nlink >= REISERFS_LINK_MAX) set_nlink(i, 1); }
+ #define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) drop_nlink(i);
+@@ -362,6 +363,7 @@ static struct dentry *reiserfs_lookup(st
+ if (retval == IO_ERROR) {
+ return ERR_PTR(-EIO);
+ }
++ dx_propagate_tag(nd, inode);
+
+ return d_splice_alias(inode, dentry);
+ }
+diff -NurpP --minimal linux-3.6.10/fs/reiserfs/reiserfs.h linux-3.6.10-vs2.3.4.6/fs/reiserfs/reiserfs.h
+--- linux-3.6.10/fs/reiserfs/reiserfs.h 2012-07-22 21:39:42.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/reiserfs/reiserfs.h 2012-10-04 16:47:00.000000000 +0000
+@@ -549,6 +549,7 @@ enum reiserfs_mount_options {
+ REISERFS_EXPOSE_PRIVROOT,
+ REISERFS_BARRIER_NONE,
+ REISERFS_BARRIER_FLUSH,
++ REISERFS_TAGGED,
+
+ /* Actions on error */
+ REISERFS_ERROR_PANIC,
+@@ -1548,6 +1549,11 @@ struct stat_data_v1 {
+ #define REISERFS_COMPR_FL FS_COMPR_FL
+ #define REISERFS_NOTAIL_FL FS_NOTAIL_FL
+
++/* unfortunately reiserfs sdattr is only 16 bit */
++#define REISERFS_IXUNLINK_FL (FS_IXUNLINK_FL >> 16)
++#define REISERFS_BARRIER_FL (FS_BARRIER_FL >> 16)
++#define REISERFS_COW_FL (FS_COW_FL >> 16)
++
+ /* persistent flags that file inherits from the parent directory */
+ #define REISERFS_INHERIT_MASK ( REISERFS_IMMUTABLE_FL | \
+ REISERFS_SYNC_FL | \
+@@ -1557,6 +1563,9 @@ struct stat_data_v1 {
+ REISERFS_COMPR_FL | \
+ REISERFS_NOTAIL_FL )
+
++#define REISERFS_FL_USER_VISIBLE 0x80FF
++#define REISERFS_FL_USER_MODIFIABLE 0x80FF
++
+ /* Stat Data on disk (reiserfs version of UFS disk inode minus the
+ address blocks) */
+ struct stat_data {
+@@ -2647,6 +2656,7 @@ static inline void reiserfs_update_sd(st
+ void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode);
+ void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs);
+ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr);
++int reiserfs_sync_flags(struct inode *inode, int, int);
+
+ int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len);
+
+diff -NurpP --minimal linux-3.6.10/fs/reiserfs/super.c linux-3.6.10-vs2.3.4.6/fs/reiserfs/super.c
+--- linux-3.6.10/fs/reiserfs/super.c 2012-12-11 11:36:58.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/reiserfs/super.c 2012-12-08 00:36:33.000000000 +0000
+@@ -1020,6 +1020,14 @@ static int reiserfs_parse_options(struct
+ {"user_xattr",.setmask = 1 << REISERFS_UNSUPPORTED_OPT},
+ {"nouser_xattr",.clrmask = 1 << REISERFS_UNSUPPORTED_OPT},
+ #endif
++#ifndef CONFIG_TAGGING_NONE
++ {"tagxid",.setmask = 1 << REISERFS_TAGGED},
++ {"tag",.setmask = 1 << REISERFS_TAGGED},
++ {"notag",.clrmask = 1 << REISERFS_TAGGED},
++#endif
++#ifdef CONFIG_PROPAGATE
++ {"tag",.arg_required = 'T',.values = NULL},
++#endif
+ #ifdef CONFIG_REISERFS_FS_POSIX_ACL
+ {"acl",.setmask = 1 << REISERFS_POSIXACL},
+ {"noacl",.clrmask = 1 << REISERFS_POSIXACL},
+@@ -1338,6 +1346,14 @@ static int reiserfs_remount(struct super
+ handle_quota_files(s, qf_names, &qfmt);
+ #endif
+
++ if ((mount_options & (1 << REISERFS_TAGGED)) &&
++ !(s->s_flags & MS_TAGGED)) {
++ reiserfs_warning(s, "super-vs01",
++ "reiserfs: tagging not permitted on remount.");
++ err = -EINVAL;
++ goto out_err;
++ }
++
+ handle_attrs(s);
+
+ /* Add options that are safe here */
+@@ -1831,6 +1847,10 @@ static int reiserfs_fill_super(struct su
+ goto error_unlocked;
+ }
+
++ /* map mount option tagxid */
++ if (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_TAGGED))
++ s->s_flags |= MS_TAGGED;
++
+ rs = SB_DISK_SUPER_BLOCK(s);
+ /* Let's do basic sanity check to verify that underlying device is not
+ smaller than the filesystem. If the check fails then abort and scream,
+diff -NurpP --minimal linux-3.6.10/fs/reiserfs/xattr.c linux-3.6.10-vs2.3.4.6/fs/reiserfs/xattr.c
+--- linux-3.6.10/fs/reiserfs/xattr.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/reiserfs/xattr.c 2012-10-04 16:47:00.000000000 +0000
+@@ -40,6 +40,7 @@
+ #include <linux/errno.h>
+ #include <linux/gfp.h>
+ #include <linux/fs.h>
++#include <linux/mount.h>
+ #include <linux/file.h>
+ #include <linux/pagemap.h>
+ #include <linux/xattr.h>
+diff -NurpP --minimal linux-3.6.10/fs/stat.c linux-3.6.10-vs2.3.4.6/fs/stat.c
+--- linux-3.6.10/fs/stat.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/stat.c 2012-10-04 16:47:00.000000000 +0000
+@@ -26,6 +26,7 @@ void generic_fillattr(struct inode *inod
+ stat->nlink = inode->i_nlink;
+ stat->uid = inode->i_uid;
+ stat->gid = inode->i_gid;
++ stat->tag = inode->i_tag;
+ stat->rdev = inode->i_rdev;
+ stat->size = i_size_read(inode);
+ stat->atime = inode->i_atime;
+diff -NurpP --minimal linux-3.6.10/fs/statfs.c linux-3.6.10-vs2.3.4.6/fs/statfs.c
+--- linux-3.6.10/fs/statfs.c 2012-07-22 21:39:42.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/statfs.c 2012-10-04 16:47:00.000000000 +0000
+@@ -7,6 +7,8 @@
+ #include <linux/statfs.h>
+ #include <linux/security.h>
+ #include <linux/uaccess.h>
++#include <linux/vs_base.h>
++#include <linux/vs_dlimit.h>
+ #include "internal.h"
+
+ static int flags_by_mnt(int mnt_flags)
+@@ -60,6 +62,8 @@ static int statfs_by_dentry(struct dentr
+ retval = dentry->d_sb->s_op->statfs(dentry, buf);
+ if (retval == 0 && buf->f_frsize == 0)
+ buf->f_frsize = buf->f_bsize;
++ if (!vx_check(0, VS_ADMIN|VS_WATCH))
++ vx_vsi_statfs(dentry->d_sb, buf);
+ return retval;
+ }
+
+diff -NurpP --minimal linux-3.6.10/fs/super.c linux-3.6.10-vs2.3.4.6/fs/super.c
+--- linux-3.6.10/fs/super.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/super.c 2012-10-04 17:06:05.000000000 +0000
+@@ -34,6 +34,8 @@
+ #include <linux/cleancache.h>
+ #include <linux/fsnotify.h>
+ #include <linux/lockdep.h>
++#include <linux/magic.h>
++#include <linux/vs_context.h>
+ #include "internal.h"
+
+
+@@ -1148,6 +1150,13 @@ mount_fs(struct file_system_type *type,
+ WARN_ON(sb->s_bdi == &default_backing_dev_info);
+ sb->s_flags |= MS_BORN;
+
++ error = -EPERM;
++ if (!vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT) &&
++ !sb->s_bdev &&
++ (sb->s_magic != PROC_SUPER_MAGIC) &&
++ (sb->s_magic != DEVPTS_SUPER_MAGIC))
++ goto out_sb;
++
+ error = security_sb_kern_mount(sb, flags, secdata);
+ if (error)
+ goto out_sb;
+diff -NurpP --minimal linux-3.6.10/fs/sysfs/mount.c linux-3.6.10-vs2.3.4.6/fs/sysfs/mount.c
+--- linux-3.6.10/fs/sysfs/mount.c 2012-10-04 13:27:41.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/sysfs/mount.c 2012-10-04 16:47:00.000000000 +0000
+@@ -47,7 +47,7 @@ static int sysfs_fill_super(struct super
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+- sb->s_magic = SYSFS_MAGIC;
++ sb->s_magic = SYSFS_SUPER_MAGIC;
+ sb->s_op = &sysfs_ops;
+ sb->s_time_gran = 1;
+
+diff -NurpP --minimal linux-3.6.10/fs/utimes.c linux-3.6.10-vs2.3.4.6/fs/utimes.c
+--- linux-3.6.10/fs/utimes.c 2012-07-22 21:39:42.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/utimes.c 2012-10-04 16:47:00.000000000 +0000
+@@ -8,6 +8,8 @@
+ #include <linux/stat.h>
+ #include <linux/utime.h>
+ #include <linux/syscalls.h>
++#include <linux/mount.h>
++#include <linux/vs_cowbl.h>
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+
+@@ -52,12 +54,18 @@ static int utimes_common(struct path *pa
+ {
+ int error;
+ struct iattr newattrs;
+- struct inode *inode = path->dentry->d_inode;
++ struct inode *inode;
+
+ error = mnt_want_write(path->mnt);
+ if (error)
+ goto out;
+
++ error = cow_check_and_break(path);
++ if (error)
++ goto mnt_drop_write_and_out;
++
++ inode = path->dentry->d_inode;
++
+ if (times && times[0].tv_nsec == UTIME_NOW &&
+ times[1].tv_nsec == UTIME_NOW)
+ times = NULL;
+diff -NurpP --minimal linux-3.6.10/fs/xattr.c linux-3.6.10-vs2.3.4.6/fs/xattr.c
+--- linux-3.6.10/fs/xattr.c 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xattr.c 2012-10-04 16:47:00.000000000 +0000
+@@ -20,6 +20,7 @@
+ #include <linux/fsnotify.h>
+ #include <linux/audit.h>
+ #include <linux/vmalloc.h>
++#include <linux/mount.h>
+
+ #include <asm/uaccess.h>
+
+@@ -51,7 +52,7 @@ xattr_permission(struct inode *inode, co
+ * The trusted.* namespace can only be accessed by privileged users.
+ */
+ if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) {
+- if (!capable(CAP_SYS_ADMIN))
++ if (!vx_capable(CAP_SYS_ADMIN, VXC_FS_TRUSTED))
+ return (mask & MAY_WRITE) ? -EPERM : -ENODATA;
+ return 0;
+ }
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_dinode.h linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_dinode.h
+--- linux-3.6.10/fs/xfs/xfs_dinode.h 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_dinode.h 2012-10-04 16:47:00.000000000 +0000
+@@ -51,7 +51,9 @@ typedef struct xfs_dinode {
+ __be32 di_nlink; /* number of links to file */
+ __be16 di_projid_lo; /* lower part of owner's project id */
+ __be16 di_projid_hi; /* higher part owner's project id */
+- __u8 di_pad[6]; /* unused, zeroed space */
++ __u8 di_pad[2]; /* unused, zeroed space */
++ __be16 di_tag; /* context tagging */
++ __be16 di_vflags; /* vserver specific flags */
+ __be16 di_flushiter; /* incremented on flush */
+ xfs_timestamp_t di_atime; /* time last accessed */
+ xfs_timestamp_t di_mtime; /* time last modified */
+@@ -184,6 +186,8 @@ static inline void xfs_dinode_put_rdev(s
+ #define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
+ #define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
+ #define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
++#define XFS_DIFLAG_IXUNLINK_BIT 15 /* Immutable inver on unlink */
++
+ #define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
+ #define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
+ #define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
+@@ -199,6 +203,7 @@ static inline void xfs_dinode_put_rdev(s
+ #define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
+ #define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT)
+ #define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT)
++#define XFS_DIFLAG_IXUNLINK (1 << XFS_DIFLAG_IXUNLINK_BIT)
+
+ #ifdef CONFIG_XFS_RT
+ #define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
+@@ -211,6 +216,10 @@ static inline void xfs_dinode_put_rdev(s
+ XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
+ XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
+ XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
+- XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM)
++ XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM | \
++ XFS_DIFLAG_IXUNLINK)
++
++#define XFS_DIVFLAG_BARRIER 0x01
++#define XFS_DIVFLAG_COW 0x02
+
+ #endif /* __XFS_DINODE_H__ */
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_fs.h linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_fs.h
+--- linux-3.6.10/fs/xfs/xfs_fs.h 2011-10-24 16:45:31.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_fs.h 2012-10-04 16:47:00.000000000 +0000
+@@ -67,6 +67,9 @@ struct fsxattr {
+ #define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */
+ #define XFS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */
+ #define XFS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */
++#define XFS_XFLAG_IXUNLINK 0x00008000 /* immutable invert on unlink */
++#define XFS_XFLAG_BARRIER 0x10000000 /* chroot() barrier */
++#define XFS_XFLAG_COW 0x20000000 /* copy on write mark */
+ #define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
+
+ /*
+@@ -302,7 +305,8 @@ typedef struct xfs_bstat {
+ #define bs_projid bs_projid_lo /* (previously just bs_projid) */
+ __u16 bs_forkoff; /* inode fork offset in bytes */
+ __u16 bs_projid_hi; /* higher part of project id */
+- unsigned char bs_pad[10]; /* pad space, unused */
++ unsigned char bs_pad[8]; /* pad space, unused */
++ __u16 bs_tag; /* context tagging */
+ __u32 bs_dmevmask; /* DMIG event mask */
+ __u16 bs_dmstate; /* DMIG state info */
+ __u16 bs_aextents; /* attribute number of extents */
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_ialloc.c linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_ialloc.c
+--- linux-3.6.10/fs/xfs/xfs_ialloc.c 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_ialloc.c 2012-10-04 16:47:00.000000000 +0000
+@@ -37,7 +37,6 @@
+ #include "xfs_error.h"
+ #include "xfs_bmap.h"
+
+-
+ /*
+ * Allocation group level functions.
+ */
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_inode.c linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_inode.c
+--- linux-3.6.10/fs/xfs/xfs_inode.c 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_inode.c 2012-11-16 21:43:16.000000000 +0000
+@@ -16,6 +16,7 @@
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+ #include <linux/log2.h>
++#include <linux/vs_tag.h>
+
+ #include "xfs.h"
+ #include "xfs_fs.h"
+@@ -563,15 +564,25 @@ xfs_iformat_btree(
+ STATIC void
+ xfs_dinode_from_disk(
+ xfs_icdinode_t *to,
+- xfs_dinode_t *from)
++ xfs_dinode_t *from,
++ int tagged)
+ {
++ uint32_t uid, gid, tag;
++
+ to->di_magic = be16_to_cpu(from->di_magic);
+ to->di_mode = be16_to_cpu(from->di_mode);
+ to->di_version = from ->di_version;
+ to->di_format = from->di_format;
+ to->di_onlink = be16_to_cpu(from->di_onlink);
+- to->di_uid = be32_to_cpu(from->di_uid);
+- to->di_gid = be32_to_cpu(from->di_gid);
++
++ uid = be32_to_cpu(from->di_uid);
++ gid = be32_to_cpu(from->di_gid);
++ tag = be16_to_cpu(from->di_tag);
++
++ to->di_uid = INOTAG_UID(tagged, uid, gid);
++ to->di_gid = INOTAG_GID(tagged, uid, gid);
++ to->di_tag = INOTAG_TAG(tagged, uid, gid, tag);
++
+ to->di_nlink = be32_to_cpu(from->di_nlink);
+ to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
+ to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
+@@ -593,21 +604,26 @@ xfs_dinode_from_disk(
+ to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
+ to->di_dmstate = be16_to_cpu(from->di_dmstate);
+ to->di_flags = be16_to_cpu(from->di_flags);
++ to->di_vflags = be16_to_cpu(from->di_vflags);
+ to->di_gen = be32_to_cpu(from->di_gen);
+ }
+
+ void
+ xfs_dinode_to_disk(
+ xfs_dinode_t *to,
+- xfs_icdinode_t *from)
++ xfs_icdinode_t *from,
++ int tagged)
+ {
+ to->di_magic = cpu_to_be16(from->di_magic);
+ to->di_mode = cpu_to_be16(from->di_mode);
+ to->di_version = from ->di_version;
+ to->di_format = from->di_format;
+ to->di_onlink = cpu_to_be16(from->di_onlink);
+- to->di_uid = cpu_to_be32(from->di_uid);
+- to->di_gid = cpu_to_be32(from->di_gid);
++
++ to->di_uid = cpu_to_be32(TAGINO_UID(tagged, from->di_uid, from->di_tag));
++ to->di_gid = cpu_to_be32(TAGINO_GID(tagged, from->di_gid, from->di_tag));
++ to->di_tag = cpu_to_be16(TAGINO_TAG(tagged, from->di_tag));
++
+ to->di_nlink = cpu_to_be32(from->di_nlink);
+ to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
+ to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
+@@ -629,12 +645,14 @@ xfs_dinode_to_disk(
+ to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
+ to->di_dmstate = cpu_to_be16(from->di_dmstate);
+ to->di_flags = cpu_to_be16(from->di_flags);
++ to->di_vflags = cpu_to_be16(from->di_vflags);
+ to->di_gen = cpu_to_be32(from->di_gen);
+ }
+
+ STATIC uint
+ _xfs_dic2xflags(
+- __uint16_t di_flags)
++ __uint16_t di_flags,
++ __uint16_t di_vflags)
+ {
+ uint flags = 0;
+
+@@ -645,6 +663,8 @@ _xfs_dic2xflags(
+ flags |= XFS_XFLAG_PREALLOC;
+ if (di_flags & XFS_DIFLAG_IMMUTABLE)
+ flags |= XFS_XFLAG_IMMUTABLE;
++ if (di_flags & XFS_DIFLAG_IXUNLINK)
++ flags |= XFS_XFLAG_IXUNLINK;
+ if (di_flags & XFS_DIFLAG_APPEND)
+ flags |= XFS_XFLAG_APPEND;
+ if (di_flags & XFS_DIFLAG_SYNC)
+@@ -669,6 +689,10 @@ _xfs_dic2xflags(
+ flags |= XFS_XFLAG_FILESTREAM;
+ }
+
++ if (di_vflags & XFS_DIVFLAG_BARRIER)
++ flags |= FS_BARRIER_FL;
++ if (di_vflags & XFS_DIVFLAG_COW)
++ flags |= FS_COW_FL;
+ return flags;
+ }
+
+@@ -678,7 +702,7 @@ xfs_ip2xflags(
+ {
+ xfs_icdinode_t *dic = &ip->i_d;
+
+- return _xfs_dic2xflags(dic->di_flags) |
++ return _xfs_dic2xflags(dic->di_flags, dic->di_vflags) |
+ (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
+ }
+
+@@ -686,7 +710,8 @@ uint
+ xfs_dic2xflags(
+ xfs_dinode_t *dip)
+ {
+- return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
++ return _xfs_dic2xflags(be16_to_cpu(dip->di_flags),
++ be16_to_cpu(dip->di_vflags)) |
+ (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
+ }
+
+@@ -740,7 +765,8 @@ xfs_iread(
+ * Otherwise, just get the truly permanent information.
+ */
+ if (dip->di_mode) {
+- xfs_dinode_from_disk(&ip->i_d, dip);
++ xfs_dinode_from_disk(&ip->i_d, dip,
++ mp->m_flags & XFS_MOUNT_TAGGED);
+ error = xfs_iformat(ip, dip);
+ if (error) {
+ #ifdef DEBUG
+@@ -927,6 +953,7 @@ xfs_ialloc(
+ ASSERT(ip->i_d.di_nlink == nlink);
+ ip->i_d.di_uid = current_fsuid();
+ ip->i_d.di_gid = current_fsgid();
++ ip->i_d.di_tag = current_fstag(&ip->i_vnode);
+ xfs_set_projid(ip, prid);
+ memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
+
+@@ -986,6 +1013,7 @@ xfs_ialloc(
+ ip->i_d.di_dmevmask = 0;
+ ip->i_d.di_dmstate = 0;
+ ip->i_d.di_flags = 0;
++ ip->i_d.di_vflags = 0;
+ flags = XFS_ILOG_CORE;
+ switch (mode & S_IFMT) {
+ case S_IFIFO:
+@@ -1667,6 +1695,7 @@ xfs_ifree(
+ }
+ ip->i_d.di_mode = 0; /* mark incore inode as free */
+ ip->i_d.di_flags = 0;
++ ip->i_d.di_vflags = 0;
+ ip->i_d.di_dmevmask = 0;
+ ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
+ ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
+@@ -1833,6 +1862,7 @@ xfs_iroot_realloc(
+ return;
+ }
+
++#include <linux/vs_tag.h>
+
+ /*
+ * This is called when the amount of space needed for if_data
+@@ -2521,7 +2551,8 @@ xfs_iflush_int(
+ * because if the inode is dirty at all the core must
+ * be.
+ */
+- xfs_dinode_to_disk(dip, &ip->i_d);
++ xfs_dinode_to_disk(dip, &ip->i_d,
++ mp->m_flags & XFS_MOUNT_TAGGED);
+
+ /* Wrap, we never let the log put out DI_MAX_FLUSH */
+ if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_inode.h linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_inode.h
+--- linux-3.6.10/fs/xfs/xfs_inode.h 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_inode.h 2012-10-04 16:47:00.000000000 +0000
+@@ -134,7 +134,9 @@ typedef struct xfs_icdinode {
+ __uint32_t di_nlink; /* number of links to file */
+ __uint16_t di_projid_lo; /* lower part of owner's project id */
+ __uint16_t di_projid_hi; /* higher part of owner's project id */
+- __uint8_t di_pad[6]; /* unused, zeroed space */
++ __uint8_t di_pad[2]; /* unused, zeroed space */
++ __uint16_t di_tag; /* context tagging */
++ __uint16_t di_vflags; /* vserver specific flags */
+ __uint16_t di_flushiter; /* incremented on flush */
+ xfs_ictimestamp_t di_atime; /* time last accessed */
+ xfs_ictimestamp_t di_mtime; /* time last modified */
+@@ -561,7 +563,7 @@ int xfs_imap_to_bp(struct xfs_mount *,
+ int xfs_iread(struct xfs_mount *, struct xfs_trans *,
+ struct xfs_inode *, uint);
+ void xfs_dinode_to_disk(struct xfs_dinode *,
+- struct xfs_icdinode *);
++ struct xfs_icdinode *, int);
+ void xfs_idestroy_fork(struct xfs_inode *, int);
+ void xfs_idata_realloc(struct xfs_inode *, int, int);
+ void xfs_iroot_realloc(struct xfs_inode *, int, int);
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_ioctl.c linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_ioctl.c
+--- linux-3.6.10/fs/xfs/xfs_ioctl.c 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_ioctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -26,7 +26,7 @@
+ #include "xfs_bmap_btree.h"
+ #include "xfs_dinode.h"
+ #include "xfs_inode.h"
+-#include "xfs_ioctl.h"
++// #include "xfs_ioctl.h"
+ #include "xfs_rtalloc.h"
+ #include "xfs_itable.h"
+ #include "xfs_error.h"
+@@ -762,6 +762,10 @@ xfs_merge_ioc_xflags(
+ xflags |= XFS_XFLAG_IMMUTABLE;
+ else
+ xflags &= ~XFS_XFLAG_IMMUTABLE;
++ if (flags & FS_IXUNLINK_FL)
++ xflags |= XFS_XFLAG_IXUNLINK;
++ else
++ xflags &= ~XFS_XFLAG_IXUNLINK;
+ if (flags & FS_APPEND_FL)
+ xflags |= XFS_XFLAG_APPEND;
+ else
+@@ -790,6 +794,8 @@ xfs_di2lxflags(
+
+ if (di_flags & XFS_DIFLAG_IMMUTABLE)
+ flags |= FS_IMMUTABLE_FL;
++ if (di_flags & XFS_DIFLAG_IXUNLINK)
++ flags |= FS_IXUNLINK_FL;
+ if (di_flags & XFS_DIFLAG_APPEND)
+ flags |= FS_APPEND_FL;
+ if (di_flags & XFS_DIFLAG_SYNC)
+@@ -850,6 +856,8 @@ xfs_set_diflags(
+ di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
+ if (xflags & XFS_XFLAG_IMMUTABLE)
+ di_flags |= XFS_DIFLAG_IMMUTABLE;
++ if (xflags & XFS_XFLAG_IXUNLINK)
++ di_flags |= XFS_DIFLAG_IXUNLINK;
+ if (xflags & XFS_XFLAG_APPEND)
+ di_flags |= XFS_DIFLAG_APPEND;
+ if (xflags & XFS_XFLAG_SYNC)
+@@ -892,6 +900,10 @@ xfs_diflags_to_linux(
+ inode->i_flags |= S_IMMUTABLE;
+ else
+ inode->i_flags &= ~S_IMMUTABLE;
++ if (xflags & XFS_XFLAG_IXUNLINK)
++ inode->i_flags |= S_IXUNLINK;
++ else
++ inode->i_flags &= ~S_IXUNLINK;
+ if (xflags & XFS_XFLAG_APPEND)
+ inode->i_flags |= S_APPEND;
+ else
+@@ -1396,10 +1408,18 @@ xfs_file_ioctl(
+ case XFS_IOC_FSGETXATTRA:
+ return xfs_ioc_fsgetxattr(ip, 1, arg);
+ case XFS_IOC_FSSETXATTR:
++ if (IS_BARRIER(inode)) {
++ vxwprintk_task(1, "messing with the barrier.");
++ return -XFS_ERROR(EACCES);
++ }
+ return xfs_ioc_fssetxattr(ip, filp, arg);
+ case XFS_IOC_GETXFLAGS:
+ return xfs_ioc_getxflags(ip, arg);
+ case XFS_IOC_SETXFLAGS:
++ if (IS_BARRIER(inode)) {
++ vxwprintk_task(1, "messing with the barrier.");
++ return -XFS_ERROR(EACCES);
++ }
+ return xfs_ioc_setxflags(ip, filp, arg);
+
+ case XFS_IOC_FSSETDM: {
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_ioctl.h linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_ioctl.h
+--- linux-3.6.10/fs/xfs/xfs_ioctl.h 2011-10-24 16:45:31.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_ioctl.h 2012-10-04 16:47:00.000000000 +0000
+@@ -70,6 +70,12 @@ xfs_handle_to_dentry(
+ void __user *uhandle,
+ u32 hlen);
+
++extern int
++xfs_sync_flags(
++ struct inode *inode,
++ int flags,
++ int vflags);
++
+ extern long
+ xfs_file_ioctl(
+ struct file *filp,
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_iops.c linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_iops.c
+--- linux-3.6.10/fs/xfs/xfs_iops.c 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_iops.c 2012-11-16 21:14:53.000000000 +0000
+@@ -28,6 +28,7 @@
+ #include "xfs_bmap_btree.h"
+ #include "xfs_dinode.h"
+ #include "xfs_inode.h"
++#include "xfs_ioctl.h"
+ #include "xfs_bmap.h"
+ #include "xfs_rtalloc.h"
+ #include "xfs_error.h"
+@@ -46,6 +47,7 @@
+ #include <linux/security.h>
+ #include <linux/fiemap.h>
+ #include <linux/slab.h>
++#include <linux/vs_tag.h>
+
+ static int
+ xfs_initxattrs(
+@@ -421,6 +423,7 @@ xfs_vn_getattr(
+ stat->nlink = ip->i_d.di_nlink;
+ stat->uid = ip->i_d.di_uid;
+ stat->gid = ip->i_d.di_gid;
++ stat->tag = ip->i_d.di_tag;
+ stat->ino = ip->i_ino;
+ stat->atime = inode->i_atime;
+ stat->mtime = inode->i_mtime;
+@@ -1033,6 +1036,7 @@ static const struct inode_operations xfs
+ .listxattr = xfs_vn_listxattr,
+ .fiemap = xfs_vn_fiemap,
+ .update_time = xfs_vn_update_time,
++ .sync_flags = xfs_sync_flags,
+ };
+
+ static const struct inode_operations xfs_dir_inode_operations = {
+@@ -1059,6 +1063,7 @@ static const struct inode_operations xfs
+ .removexattr = generic_removexattr,
+ .listxattr = xfs_vn_listxattr,
+ .update_time = xfs_vn_update_time,
++ .sync_flags = xfs_sync_flags,
+ };
+
+ static const struct inode_operations xfs_dir_ci_inode_operations = {
+@@ -1110,6 +1115,10 @@ xfs_diflags_to_iflags(
+ inode->i_flags |= S_IMMUTABLE;
+ else
+ inode->i_flags &= ~S_IMMUTABLE;
++ if (ip->i_d.di_flags & XFS_DIFLAG_IXUNLINK)
++ inode->i_flags |= S_IXUNLINK;
++ else
++ inode->i_flags &= ~S_IXUNLINK;
+ if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
+ inode->i_flags |= S_APPEND;
+ else
+@@ -1122,6 +1131,15 @@ xfs_diflags_to_iflags(
+ inode->i_flags |= S_NOATIME;
+ else
+ inode->i_flags &= ~S_NOATIME;
++
++ if (ip->i_d.di_vflags & XFS_DIVFLAG_BARRIER)
++ inode->i_vflags |= V_BARRIER;
++ else
++ inode->i_vflags &= ~V_BARRIER;
++ if (ip->i_d.di_vflags & XFS_DIVFLAG_COW)
++ inode->i_vflags |= V_COW;
++ else
++ inode->i_vflags &= ~V_COW;
+ }
+
+ /*
+@@ -1153,6 +1171,7 @@ xfs_setup_inode(
+ set_nlink(inode, ip->i_d.di_nlink);
+ inode->i_uid = ip->i_d.di_uid;
+ inode->i_gid = ip->i_d.di_gid;
++ inode->i_tag = ip->i_d.di_tag;
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFBLK:
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_itable.c linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_itable.c
+--- linux-3.6.10/fs/xfs/xfs_itable.c 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_itable.c 2012-10-04 16:47:00.000000000 +0000
+@@ -96,6 +96,7 @@ xfs_bulkstat_one_int(
+ buf->bs_mode = dic->di_mode;
+ buf->bs_uid = dic->di_uid;
+ buf->bs_gid = dic->di_gid;
++ buf->bs_tag = dic->di_tag;
+ buf->bs_size = dic->di_size;
+ buf->bs_atime.tv_sec = dic->di_atime.t_sec;
+ buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_linux.h linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_linux.h
+--- linux-3.6.10/fs/xfs/xfs_linux.h 2011-10-24 16:45:31.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_linux.h 2012-10-04 16:47:00.000000000 +0000
+@@ -121,6 +121,7 @@
+
+ #define current_cpu() (raw_smp_processor_id())
+ #define current_pid() (current->pid)
++#define current_fstag(vp) (dx_current_fstag((vp)->i_sb))
+ #define current_test_flags(f) (current->flags & (f))
+ #define current_set_flags_nested(sp, f) \
+ (*(sp) = current->flags, current->flags |= (f))
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_log_recover.c linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_log_recover.c
+--- linux-3.6.10/fs/xfs/xfs_log_recover.c 2012-12-11 11:36:58.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_log_recover.c 2012-12-08 00:36:33.000000000 +0000
+@@ -2359,7 +2359,8 @@ xlog_recover_inode_pass2(
+ }
+
+ /* The core is in in-core format */
+- xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
++ xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr,
++ mp->m_flags & XFS_MOUNT_TAGGED);
+
+ /* the rest is in on-disk format */
+ if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_mount.h linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_mount.h
+--- linux-3.6.10/fs/xfs/xfs_mount.h 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_mount.h 2012-10-04 16:47:00.000000000 +0000
+@@ -251,6 +251,7 @@ typedef struct xfs_mount {
+ allocator */
+ #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */
+
++#define XFS_MOUNT_TAGGED (1ULL << 31) /* context tagging */
+
+ /*
+ * Default minimum read and write sizes.
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_super.c linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_super.c
+--- linux-3.6.10/fs/xfs/xfs_super.c 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_super.c 2012-10-04 16:47:00.000000000 +0000
+@@ -112,6 +112,9 @@ mempool_t *xfs_ioend_pool;
+ #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */
+ #define MNTOPT_DISCARD "discard" /* Discard unused blocks */
+ #define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */
++#define MNTOPT_TAGXID "tagxid" /* context tagging for inodes */
++#define MNTOPT_TAGGED "tag" /* context tagging for inodes */
++#define MNTOPT_NOTAGTAG "notag" /* do not use context tagging */
+
+ /*
+ * Table driven mount option parser.
+@@ -120,10 +123,14 @@ mempool_t *xfs_ioend_pool;
+ * in the future, too.
+ */
+ enum {
++ Opt_tag, Opt_notag,
+ Opt_barrier, Opt_nobarrier, Opt_err
+ };
+
+ static const match_table_t tokens = {
++ {Opt_tag, "tagxid"},
++ {Opt_tag, "tag"},
++ {Opt_notag, "notag"},
+ {Opt_barrier, "barrier"},
+ {Opt_nobarrier, "nobarrier"},
+ {Opt_err, NULL}
+@@ -371,6 +378,19 @@ xfs_parseargs(
+ } else if (!strcmp(this_char, "irixsgid")) {
+ xfs_warn(mp,
+ "irixsgid is now a sysctl(2) variable, option is deprecated.");
++#ifndef CONFIG_TAGGING_NONE
++ } else if (!strcmp(this_char, MNTOPT_TAGGED)) {
++ mp->m_flags |= XFS_MOUNT_TAGGED;
++ } else if (!strcmp(this_char, MNTOPT_NOTAGTAG)) {
++ mp->m_flags &= ~XFS_MOUNT_TAGGED;
++ } else if (!strcmp(this_char, MNTOPT_TAGXID)) {
++ mp->m_flags |= XFS_MOUNT_TAGGED;
++#endif
++#ifdef CONFIG_PROPAGATE
++ } else if (!strcmp(this_char, MNTOPT_TAGGED)) {
++ /* use value */
++ mp->m_flags |= XFS_MOUNT_TAGGED;
++#endif
+ } else {
+ xfs_warn(mp, "unknown mount option [%s].", this_char);
+ return EINVAL;
+@@ -1056,6 +1076,16 @@ xfs_fs_remount(
+ case Opt_nobarrier:
+ mp->m_flags &= ~XFS_MOUNT_BARRIER;
+ break;
++ case Opt_tag:
++ if (!(sb->s_flags & MS_TAGGED)) {
++ printk(KERN_INFO
++ "XFS: %s: tagging not permitted on remount.\n",
++ sb->s_id);
++ return -EINVAL;
++ }
++ break;
++ case Opt_notag:
++ break;
+ default:
+ /*
+ * Logically we would return an error here to prevent
+@@ -1275,6 +1305,9 @@ xfs_fs_fill_super(
+ if (error)
+ goto out_free_sb;
+
++ if (mp->m_flags & XFS_MOUNT_TAGGED)
++ sb->s_flags |= MS_TAGGED;
++
+ /*
+ * we must configure the block size in the superblock before we run the
+ * full mount process as the mount process can lookup and cache inodes.
+diff -NurpP --minimal linux-3.6.10/fs/xfs/xfs_vnodeops.c linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_vnodeops.c
+--- linux-3.6.10/fs/xfs/xfs_vnodeops.c 2012-10-04 13:27:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/fs/xfs/xfs_vnodeops.c 2012-10-04 16:47:00.000000000 +0000
+@@ -103,6 +103,77 @@ xfs_readlink_bmap(
+ return error;
+ }
+
++
++STATIC void
++xfs_get_inode_flags(
++ xfs_inode_t *ip)
++{
++ struct inode *inode = VFS_I(ip);
++ unsigned int flags = inode->i_flags;
++ unsigned int vflags = inode->i_vflags;
++
++ if (flags & S_IMMUTABLE)
++ ip->i_d.di_flags |= XFS_DIFLAG_IMMUTABLE;
++ else
++ ip->i_d.di_flags &= ~XFS_DIFLAG_IMMUTABLE;
++ if (flags & S_IXUNLINK)
++ ip->i_d.di_flags |= XFS_DIFLAG_IXUNLINK;
++ else
++ ip->i_d.di_flags &= ~XFS_DIFLAG_IXUNLINK;
++
++ if (vflags & V_BARRIER)
++ ip->i_d.di_vflags |= XFS_DIVFLAG_BARRIER;
++ else
++ ip->i_d.di_vflags &= ~XFS_DIVFLAG_BARRIER;
++ if (vflags & V_COW)
++ ip->i_d.di_vflags |= XFS_DIVFLAG_COW;
++ else
++ ip->i_d.di_vflags &= ~XFS_DIVFLAG_COW;
++}
++
++int
++xfs_sync_flags(
++ struct inode *inode,
++ int flags,
++ int vflags)
++{
++ struct xfs_inode *ip = XFS_I(inode);
++ struct xfs_mount *mp = ip->i_mount;
++ struct xfs_trans *tp;
++ unsigned int lock_flags = 0;
++ int code;
++
++ tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
++ code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
++ if (code)
++ goto error_out;
++
++ xfs_ilock(ip, XFS_ILOCK_EXCL);
++ xfs_trans_ijoin(tp, ip, 0);
++
++ inode->i_flags = flags;
++ inode->i_vflags = vflags;
++ xfs_get_inode_flags(ip);
++
++ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
++ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
++
++ XFS_STATS_INC(xs_ig_attrchg);
++
++ if (mp->m_flags & XFS_MOUNT_WSYNC)
++ xfs_trans_set_sync(tp);
++ code = xfs_trans_commit(tp, 0);
++ xfs_iunlock(ip, XFS_ILOCK_EXCL);
++ return code;
++
++error_out:
++ xfs_trans_cancel(tp, 0);
++ if (lock_flags)
++ xfs_iunlock(ip, XFS_ILOCK_EXCL);
++ return code;
++}
++
++
+ int
+ xfs_readlink(
+ xfs_inode_t *ip,
+diff -NurpP --minimal linux-3.6.10/include/linux/Kbuild linux-3.6.10-vs2.3.4.6/include/linux/Kbuild
+--- linux-3.6.10/include/linux/Kbuild 2012-10-04 13:27:45.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/Kbuild 2012-10-04 16:47:00.000000000 +0000
+@@ -18,6 +18,7 @@ header-y += netfilter_bridge/
+ header-y += netfilter_ipv4/
+ header-y += netfilter_ipv6/
+ header-y += usb/
++header-y += vserver/
+ header-y += wimax/
+
+ objhdr-y += version.h
+diff -NurpP --minimal linux-3.6.10/include/linux/capability.h linux-3.6.10-vs2.3.4.6/include/linux/capability.h
+--- linux-3.6.10/include/linux/capability.h 2012-07-22 21:39:43.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/capability.h 2012-10-04 16:47:00.000000000 +0000
+@@ -280,6 +280,7 @@ struct cpu_vfs_cap_data {
+ arbitrary SCSI commands */
+ /* Allow setting encryption key on loopback filesystem */
+ /* Allow setting zone reclaim policy */
++/* Allow the selection of a security context */
+
+ #define CAP_SYS_ADMIN 21
+
+@@ -366,7 +367,12 @@ struct cpu_vfs_cap_data {
+
+ #define CAP_LAST_CAP CAP_BLOCK_SUSPEND
+
+-#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
++/* Allow context manipulations */
++/* Allow changing context info on files */
++
++#define CAP_CONTEXT 63
++
++#define cap_valid(x) ((x) >= 0 && ((x) <= CAP_LAST_CAP || (x) == CAP_CONTEXT))
+
+ /*
+ * Bit location of each capability (used by user-space library and kernel)
+diff -NurpP --minimal linux-3.6.10/include/linux/cred.h linux-3.6.10-vs2.3.4.6/include/linux/cred.h
+--- linux-3.6.10/include/linux/cred.h 2012-07-22 21:39:43.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/cred.h 2012-10-04 16:47:00.000000000 +0000
+@@ -157,6 +157,7 @@ extern void exit_creds(struct task_struc
+ extern int copy_creds(struct task_struct *, unsigned long);
+ extern const struct cred *get_task_cred(struct task_struct *);
+ extern struct cred *cred_alloc_blank(void);
++extern struct cred *__prepare_creds(const struct cred *);
+ extern struct cred *prepare_creds(void);
+ extern struct cred *prepare_exec_creds(void);
+ extern int commit_creds(struct cred *);
+@@ -210,6 +211,31 @@ static inline void validate_process_cred
+ }
+ #endif
+
++static inline void set_cred_subscribers(struct cred *cred, int n)
++{
++#ifdef CONFIG_DEBUG_CREDENTIALS
++ atomic_set(&cred->subscribers, n);
++#endif
++}
++
++static inline int read_cred_subscribers(const struct cred *cred)
++{
++#ifdef CONFIG_DEBUG_CREDENTIALS
++ return atomic_read(&cred->subscribers);
++#else
++ return 0;
++#endif
++}
++
++static inline void alter_cred_subscribers(const struct cred *_cred, int n)
++{
++#ifdef CONFIG_DEBUG_CREDENTIALS
++ struct cred *cred = (struct cred *) _cred;
++
++ atomic_add(n, &cred->subscribers);
++#endif
++}
++
+ /**
+ * get_new_cred - Get a reference on a new set of credentials
+ * @cred: The new credentials to reference
+diff -NurpP --minimal linux-3.6.10/include/linux/devpts_fs.h linux-3.6.10-vs2.3.4.6/include/linux/devpts_fs.h
+--- linux-3.6.10/include/linux/devpts_fs.h 2008-12-24 23:26:37.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/devpts_fs.h 2012-10-04 16:47:00.000000000 +0000
+@@ -45,5 +45,4 @@ static inline void devpts_pty_kill(struc
+
+ #endif
+
+-
+ #endif /* _LINUX_DEVPTS_FS_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/fs.h linux-3.6.10-vs2.3.4.6/include/linux/fs.h
+--- linux-3.6.10/include/linux/fs.h 2012-10-04 13:27:45.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/fs.h 2012-10-04 16:47:00.000000000 +0000
+@@ -225,6 +225,9 @@ struct inodes_stat_t {
+ #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
+ #define MS_I_VERSION (1<<23) /* Update inode I_version field */
+ #define MS_STRICTATIME (1<<24) /* Always perform atime updates */
++#define MS_TAGGED (1<<25) /* use generic inode tagging */
++#define MS_TAGID (1<<26) /* use specific tag for this mount */
++#define MS_NOTAGCHECK (1<<27) /* don't check tags */
+ #define MS_NOSEC (1<<28)
+ #define MS_BORN (1<<29)
+ #define MS_ACTIVE (1<<30)
+@@ -256,6 +259,14 @@ struct inodes_stat_t {
+ #define S_IMA 1024 /* Inode has an associated IMA struct */
+ #define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
+ #define S_NOSEC 4096 /* no suid or xattr security attributes */
++#define S_IXUNLINK 8192 /* Immutable Invert on unlink */
++
++/* Linux-VServer related Inode flags */
++
++#define V_VALID 1
++#define V_XATTR 2
++#define V_BARRIER 4 /* Barrier for chroot() */
++#define V_COW 8 /* Copy on Write */
+
+ /*
+ * Note that nosuid etc flags are inode-specific: setting some file-system
+@@ -278,12 +289,15 @@ struct inodes_stat_t {
+ #define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
+ ((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
+ #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
+-#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
+-#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
++#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
++#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
++#define IS_TAGGED(inode) __IS_FLG(inode, MS_TAGGED)
+
+ #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
+ #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
+ #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
++#define IS_IXUNLINK(inode) ((inode)->i_flags & S_IXUNLINK)
++#define IS_IXORUNLINK(inode) ((IS_IXUNLINK(inode) ? S_IMMUTABLE : 0) ^ IS_IMMUTABLE(inode))
+ #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
+
+ #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
+@@ -294,6 +308,16 @@ struct inodes_stat_t {
+ #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
+ #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
+
++#define IS_BARRIER(inode) (S_ISDIR((inode)->i_mode) && ((inode)->i_vflags & V_BARRIER))
++
++#ifdef CONFIG_VSERVER_COWBL
++# define IS_COW(inode) (IS_IXUNLINK(inode) && IS_IMMUTABLE(inode))
++# define IS_COW_LINK(inode) (S_ISREG((inode)->i_mode) && ((inode)->i_nlink > 1))
++#else
++# define IS_COW(inode) (0)
++# define IS_COW_LINK(inode) (0)
++#endif
++
+ /* the read-only stuff doesn't really belong here, but any other place is
+ probably as bad and I don't want to create yet another include file. */
+
+@@ -380,11 +404,14 @@ struct inodes_stat_t {
+ #define FS_EXTENT_FL 0x00080000 /* Extents */
+ #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */
+ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */
++#define FS_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */
+ #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
+
+-#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
+-#define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
++#define FS_BARRIER_FL 0x04000000 /* Barrier for chroot() */
++#define FS_COW_FL 0x20000000 /* Copy on Write marker */
+
++#define FS_FL_USER_VISIBLE 0x0103DFFF /* User visible flags */
++#define FS_FL_USER_MODIFIABLE 0x010380FF /* User modifiable flags */
+
+ #define SYNC_FILE_RANGE_WAIT_BEFORE 1
+ #define SYNC_FILE_RANGE_WRITE 2
+@@ -472,6 +499,7 @@ typedef void (dio_iodone_t)(struct kiocb
+ #define ATTR_KILL_PRIV (1 << 14)
+ #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
+ #define ATTR_TIMES_SET (1 << 16)
++#define ATTR_TAG (1 << 17)
+
+ /*
+ * This is the Inode Attributes structure, used for notify_change(). It
+@@ -487,6 +515,7 @@ struct iattr {
+ umode_t ia_mode;
+ kuid_t ia_uid;
+ kgid_t ia_gid;
++ tag_t ia_tag;
+ loff_t ia_size;
+ struct timespec ia_atime;
+ struct timespec ia_mtime;
+@@ -500,6 +529,9 @@ struct iattr {
+ struct file *ia_file;
+ };
+
++#define ATTR_FLAG_BARRIER 512 /* Barrier for chroot() */
++#define ATTR_FLAG_IXUNLINK 1024 /* Immutable invert on unlink */
++
+ /*
+ * Includes for diskquotas.
+ */
+@@ -784,7 +816,9 @@ struct inode {
+ unsigned short i_opflags;
+ kuid_t i_uid;
+ kgid_t i_gid;
+- unsigned int i_flags;
++ tag_t i_tag;
++ unsigned short i_flags;
++ unsigned short i_vflags;
+
+ #ifdef CONFIG_FS_POSIX_ACL
+ struct posix_acl *i_acl;
+@@ -813,6 +847,7 @@ struct inode {
+ unsigned int __i_nlink;
+ };
+ dev_t i_rdev;
++ dev_t i_mdev;
+ loff_t i_size;
+ struct timespec i_atime;
+ struct timespec i_mtime;
+@@ -975,12 +1010,12 @@ static inline void i_gid_write(struct in
+
+ static inline unsigned iminor(const struct inode *inode)
+ {
+- return MINOR(inode->i_rdev);
++ return MINOR(inode->i_mdev);
+ }
+
+ static inline unsigned imajor(const struct inode *inode)
+ {
+- return MAJOR(inode->i_rdev);
++ return MAJOR(inode->i_mdev);
+ }
+
+ extern struct block_device *I_BDEV(struct inode *inode);
+@@ -1047,6 +1082,7 @@ struct file {
+ loff_t f_pos;
+ struct fown_struct f_owner;
+ const struct cred *f_cred;
++ xid_t f_xid;
+ struct file_ra_state f_ra;
+
+ u64 f_version;
+@@ -1194,6 +1230,7 @@ struct file_lock {
+ struct file *fl_file;
+ loff_t fl_start;
+ loff_t fl_end;
++ xid_t fl_xid;
+
+ struct fasync_struct * fl_fasync; /* for lease break notifications */
+ /* for lease breaks: */
+@@ -1829,6 +1866,7 @@ struct inode_operations {
+ ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
+ ssize_t (*listxattr) (struct dentry *, char *, size_t);
+ int (*removexattr) (struct dentry *, const char *);
++ int (*sync_flags) (struct inode *, int, int);
+ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
+ u64 len);
+ int (*update_time)(struct inode *, struct timespec *, int);
+@@ -1850,6 +1888,7 @@ extern ssize_t vfs_readv(struct file *,
+ unsigned long, loff_t *);
+ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
+ unsigned long, loff_t *);
++ssize_t vfs_sendfile(struct file *, struct file *, loff_t *, size_t, loff_t);
+
+ struct super_operations {
+ struct inode *(*alloc_inode)(struct super_block *sb);
+@@ -2692,6 +2731,7 @@ extern int dcache_dir_open(struct inode
+ extern int dcache_dir_close(struct inode *, struct file *);
+ extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
+ extern int dcache_readdir(struct file *, void *, filldir_t);
++extern int dcache_readdir_filter(struct file *, void *, filldir_t, int (*)(struct dentry *));
+ extern int simple_setattr(struct dentry *, struct iattr *);
+ extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+ extern int simple_statfs(struct dentry *, struct kstatfs *);
+diff -NurpP --minimal linux-3.6.10/include/linux/gfs2_ondisk.h linux-3.6.10-vs2.3.4.6/include/linux/gfs2_ondisk.h
+--- linux-3.6.10/include/linux/gfs2_ondisk.h 2012-10-04 13:27:45.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/gfs2_ondisk.h 2012-10-04 16:54:29.000000000 +0000
+@@ -225,6 +225,9 @@ enum {
+ gfs2fl_Sync = 8,
+ gfs2fl_System = 9,
+ gfs2fl_TopLevel = 10,
++ gfs2fl_IXUnlink = 16,
++ gfs2fl_Barrier = 17,
++ gfs2fl_Cow = 18,
+ gfs2fl_TruncInProg = 29,
+ gfs2fl_InheritDirectio = 30,
+ gfs2fl_InheritJdata = 31,
+@@ -242,6 +245,9 @@ enum {
+ #define GFS2_DIF_SYNC 0x00000100
+ #define GFS2_DIF_SYSTEM 0x00000200 /* New in gfs2 */
+ #define GFS2_DIF_TOPDIR 0x00000400 /* New in gfs2 */
++#define GFS2_DIF_IXUNLINK 0x00010000
++#define GFS2_DIF_BARRIER 0x00020000
++#define GFS2_DIF_COW 0x00040000
+ #define GFS2_DIF_TRUNC_IN_PROG 0x20000000 /* New in gfs2 */
+ #define GFS2_DIF_INHERIT_DIRECTIO 0x40000000 /* only in gfs1 */
+ #define GFS2_DIF_INHERIT_JDATA 0x80000000
+diff -NurpP --minimal linux-3.6.10/include/linux/if_tun.h linux-3.6.10-vs2.3.4.6/include/linux/if_tun.h
+--- linux-3.6.10/include/linux/if_tun.h 2010-08-02 14:52:54.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/if_tun.h 2012-10-04 16:47:00.000000000 +0000
+@@ -53,6 +53,7 @@
+ #define TUNDETACHFILTER _IOW('T', 214, struct sock_fprog)
+ #define TUNGETVNETHDRSZ _IOR('T', 215, int)
+ #define TUNSETVNETHDRSZ _IOW('T', 216, int)
++#define TUNSETNID _IOW('T', 217, int)
+
+ /* TUNSETIFF ifr flags */
+ #define IFF_TUN 0x0001
+diff -NurpP --minimal linux-3.6.10/include/linux/init_task.h linux-3.6.10-vs2.3.4.6/include/linux/init_task.h
+--- linux-3.6.10/include/linux/init_task.h 2012-10-04 13:27:45.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/init_task.h 2012-10-04 16:47:00.000000000 +0000
+@@ -210,6 +210,10 @@ extern struct task_group root_task_group
+ INIT_TRACE_RECURSION \
+ INIT_TASK_RCU_PREEMPT(tsk) \
+ INIT_CPUSET_SEQ \
++ .xid = 0, \
++ .vx_info = NULL, \
++ .nid = 0, \
++ .nx_info = NULL, \
+ }
+
+
+diff -NurpP --minimal linux-3.6.10/include/linux/ipc.h linux-3.6.10-vs2.3.4.6/include/linux/ipc.h
+--- linux-3.6.10/include/linux/ipc.h 2012-03-19 18:47:28.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/ipc.h 2012-10-04 16:47:00.000000000 +0000
+@@ -91,6 +91,7 @@ struct kern_ipc_perm
+ key_t key;
+ uid_t uid;
+ gid_t gid;
++ xid_t xid;
+ uid_t cuid;
+ gid_t cgid;
+ umode_t mode;
+diff -NurpP --minimal linux-3.6.10/include/linux/ipc_namespace.h linux-3.6.10-vs2.3.4.6/include/linux/ipc_namespace.h
+--- linux-3.6.10/include/linux/ipc_namespace.h 2012-07-22 21:39:43.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/ipc_namespace.h 2012-10-04 16:47:00.000000000 +0000
+@@ -133,7 +133,8 @@ static inline int mq_init_ns(struct ipc_
+
+ #if defined(CONFIG_IPC_NS)
+ extern struct ipc_namespace *copy_ipcs(unsigned long flags,
+- struct task_struct *tsk);
++ struct ipc_namespace *old_ns,
++ struct user_namespace *user_ns);
+ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
+ {
+ if (ns)
+@@ -144,12 +145,13 @@ static inline struct ipc_namespace *get_
+ extern void put_ipc_ns(struct ipc_namespace *ns);
+ #else
+ static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
+- struct task_struct *tsk)
++ struct ipc_namespace *old_ns,
++ struct user_namespace *user_ns)
+ {
+ if (flags & CLONE_NEWIPC)
+ return ERR_PTR(-EINVAL);
+
+- return tsk->nsproxy->ipc_ns;
++ return old_ns;
+ }
+
+ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
+diff -NurpP --minimal linux-3.6.10/include/linux/loop.h linux-3.6.10-vs2.3.4.6/include/linux/loop.h
+--- linux-3.6.10/include/linux/loop.h 2012-01-09 15:14:58.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/loop.h 2012-10-04 16:47:00.000000000 +0000
+@@ -45,6 +45,7 @@ struct loop_device {
+ struct loop_func_table *lo_encryption;
+ __u32 lo_init[2];
+ uid_t lo_key_owner; /* Who set the key */
++ xid_t lo_xid;
+ int (*ioctl)(struct loop_device *, int cmd,
+ unsigned long arg);
+
+diff -NurpP --minimal linux-3.6.10/include/linux/magic.h linux-3.6.10-vs2.3.4.6/include/linux/magic.h
+--- linux-3.6.10/include/linux/magic.h 2012-05-21 16:07:31.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/magic.h 2012-10-04 16:47:00.000000000 +0000
+@@ -3,7 +3,7 @@
+
+ #define ADFS_SUPER_MAGIC 0xadf5
+ #define AFFS_SUPER_MAGIC 0xadff
+-#define AFS_SUPER_MAGIC 0x5346414F
++#define AFS_SUPER_MAGIC 0x5346414F
+ #define AUTOFS_SUPER_MAGIC 0x0187
+ #define CODA_SUPER_MAGIC 0x73757245
+ #define CRAMFS_MAGIC 0x28cd3d45 /* some random number */
+diff -NurpP --minimal linux-3.6.10/include/linux/major.h linux-3.6.10-vs2.3.4.6/include/linux/major.h
+--- linux-3.6.10/include/linux/major.h 2009-09-10 13:26:25.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/major.h 2012-10-04 16:47:00.000000000 +0000
+@@ -15,6 +15,7 @@
+ #define HD_MAJOR IDE0_MAJOR
+ #define PTY_SLAVE_MAJOR 3
+ #define TTY_MAJOR 4
++#define VROOT_MAJOR 4
+ #define TTYAUX_MAJOR 5
+ #define LP_MAJOR 6
+ #define VCS_MAJOR 7
+diff -NurpP --minimal linux-3.6.10/include/linux/memcontrol.h linux-3.6.10-vs2.3.4.6/include/linux/memcontrol.h
+--- linux-3.6.10/include/linux/memcontrol.h 2012-10-04 13:27:45.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/memcontrol.h 2012-10-04 16:47:00.000000000 +0000
+@@ -83,6 +83,13 @@ extern struct mem_cgroup *try_get_mem_cg
+ extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+ extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
+
++extern u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member);
++extern u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member);
++
++extern s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem);
++extern s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem);
++extern s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem);
++
+ static inline
+ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
+ {
+diff -NurpP --minimal linux-3.6.10/include/linux/mm_types.h linux-3.6.10-vs2.3.4.6/include/linux/mm_types.h
+--- linux-3.6.10/include/linux/mm_types.h 2012-10-04 13:27:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/mm_types.h 2012-10-04 16:47:00.000000000 +0000
+@@ -370,6 +370,7 @@ struct mm_struct {
+
+ /* Architecture-specific MM context */
+ mm_context_t context;
++ struct vx_info *mm_vx_info;
+
+ unsigned long flags; /* Must use atomic bitops to access the bits */
+
+diff -NurpP --minimal linux-3.6.10/include/linux/mmzone.h linux-3.6.10-vs2.3.4.6/include/linux/mmzone.h
+--- linux-3.6.10/include/linux/mmzone.h 2012-12-11 11:36:59.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/mmzone.h 2012-12-08 00:36:33.000000000 +0000
+@@ -727,6 +727,13 @@ typedef struct pglist_data {
+ __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
+ })
+
++#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
++
++#define node_end_pfn(nid) ({\
++ pg_data_t *__pgdat = NODE_DATA(nid);\
++ __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
++})
++
+ #include <linux/memory_hotplug.h>
+
+ extern struct mutex zonelists_mutex;
+diff -NurpP --minimal linux-3.6.10/include/linux/mount.h linux-3.6.10-vs2.3.4.6/include/linux/mount.h
+--- linux-3.6.10/include/linux/mount.h 2012-03-19 18:47:28.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/mount.h 2012-10-04 16:47:00.000000000 +0000
+@@ -47,6 +47,9 @@ struct mnt_namespace;
+
+ #define MNT_INTERNAL 0x4000
+
++#define MNT_TAGID 0x10000
++#define MNT_NOTAG 0x20000
++
+ struct vfsmount {
+ struct dentry *mnt_root; /* root of the mounted tree */
+ struct super_block *mnt_sb; /* pointer to superblock */
+diff -NurpP --minimal linux-3.6.10/include/linux/net.h linux-3.6.10-vs2.3.4.6/include/linux/net.h
+--- linux-3.6.10/include/linux/net.h 2012-10-04 13:27:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/net.h 2012-10-04 16:47:00.000000000 +0000
+@@ -73,6 +73,7 @@ struct net;
+ #define SOCK_PASSCRED 3
+ #define SOCK_PASSSEC 4
+ #define SOCK_EXTERNALLY_ALLOCATED 5
++#define SOCK_USER_SOCKET 6
+
+ #ifndef ARCH_HAS_SOCKET_TYPES
+ /**
+diff -NurpP --minimal linux-3.6.10/include/linux/netdevice.h linux-3.6.10-vs2.3.4.6/include/linux/netdevice.h
+--- linux-3.6.10/include/linux/netdevice.h 2012-10-04 13:27:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/netdevice.h 2012-10-04 16:47:00.000000000 +0000
+@@ -1651,6 +1651,7 @@ extern void netdev_resync_ops(struct ne
+
+ extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
+ extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
++extern struct net_device *dev_get_by_index_real_rcu(struct net *net, int ifindex);
+ extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+ extern int dev_restart(struct net_device *dev);
+ #ifdef CONFIG_NETPOLL_TRAP
+diff -NurpP --minimal linux-3.6.10/include/linux/nfs_mount.h linux-3.6.10-vs2.3.4.6/include/linux/nfs_mount.h
+--- linux-3.6.10/include/linux/nfs_mount.h 2011-01-05 20:50:31.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/nfs_mount.h 2012-10-04 16:47:00.000000000 +0000
+@@ -63,7 +63,8 @@ struct nfs_mount_data {
+ #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */
+ #define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */
+ #define NFS_MOUNT_UNSHARED 0x8000 /* 5 */
+-#define NFS_MOUNT_FLAGMASK 0xFFFF
++#define NFS_MOUNT_TAGGED 0x10000 /* context tagging */
++#define NFS_MOUNT_FLAGMASK 0x1FFFF
+
+ /* The following are for internal use only */
+ #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
+diff -NurpP --minimal linux-3.6.10/include/linux/nsproxy.h linux-3.6.10-vs2.3.4.6/include/linux/nsproxy.h
+--- linux-3.6.10/include/linux/nsproxy.h 2011-10-24 16:45:32.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/nsproxy.h 2012-10-04 16:47:00.000000000 +0000
+@@ -3,6 +3,7 @@
+
+ #include <linux/spinlock.h>
+ #include <linux/sched.h>
++#include <linux/vserver/debug.h>
+
+ struct mnt_namespace;
+ struct uts_namespace;
+@@ -63,6 +64,7 @@ static inline struct nsproxy *task_nspro
+ }
+
+ int copy_namespaces(unsigned long flags, struct task_struct *tsk);
++struct nsproxy *copy_nsproxy(struct nsproxy *orig);
+ void exit_task_namespaces(struct task_struct *tsk);
+ void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
+ void free_nsproxy(struct nsproxy *ns);
+@@ -70,16 +72,26 @@ int unshare_nsproxy_namespaces(unsigned
+ struct fs_struct *);
+ int __init nsproxy_cache_init(void);
+
+-static inline void put_nsproxy(struct nsproxy *ns)
++#define get_nsproxy(n) __get_nsproxy(n, __FILE__, __LINE__)
++
++static inline void __get_nsproxy(struct nsproxy *ns,
++ const char *_file, int _line)
+ {
+- if (atomic_dec_and_test(&ns->count)) {
+- free_nsproxy(ns);
+- }
++ vxlprintk(VXD_CBIT(space, 0), "get_nsproxy(%p[%u])",
++ ns, atomic_read(&ns->count), _file, _line);
++ atomic_inc(&ns->count);
+ }
+
+-static inline void get_nsproxy(struct nsproxy *ns)
++#define put_nsproxy(n) __put_nsproxy(n, __FILE__, __LINE__)
++
++static inline void __put_nsproxy(struct nsproxy *ns,
++ const char *_file, int _line)
+ {
+- atomic_inc(&ns->count);
++ vxlprintk(VXD_CBIT(space, 0), "put_nsproxy(%p[%u])",
++ ns, atomic_read(&ns->count), _file, _line);
++ if (atomic_dec_and_test(&ns->count)) {
++ free_nsproxy(ns);
++ }
+ }
+
+ #endif
+diff -NurpP --minimal linux-3.6.10/include/linux/pid.h linux-3.6.10-vs2.3.4.6/include/linux/pid.h
+--- linux-3.6.10/include/linux/pid.h 2011-07-22 09:18:11.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/pid.h 2012-10-04 16:47:00.000000000 +0000
+@@ -8,7 +8,8 @@ enum pid_type
+ PIDTYPE_PID,
+ PIDTYPE_PGID,
+ PIDTYPE_SID,
+- PIDTYPE_MAX
++ PIDTYPE_MAX,
++ PIDTYPE_REALPID
+ };
+
+ /*
+@@ -171,6 +172,7 @@ static inline pid_t pid_nr(struct pid *p
+ }
+
+ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
++pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns);
+ pid_t pid_vnr(struct pid *pid);
+
+ #define do_each_pid_task(pid, type, task) \
+diff -NurpP --minimal linux-3.6.10/include/linux/proc_fs.h linux-3.6.10-vs2.3.4.6/include/linux/proc_fs.h
+--- linux-3.6.10/include/linux/proc_fs.h 2012-07-22 21:39:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/proc_fs.h 2012-10-04 16:47:00.000000000 +0000
+@@ -54,6 +54,7 @@ struct proc_dir_entry {
+ nlink_t nlink;
+ kuid_t uid;
+ kgid_t gid;
++ int vx_flags;
+ loff_t size;
+ const struct inode_operations *proc_iops;
+ /*
+@@ -252,12 +253,18 @@ extern const struct proc_ns_operations n
+ extern const struct proc_ns_operations utsns_operations;
+ extern const struct proc_ns_operations ipcns_operations;
+
++struct vx_info;
++struct nx_info;
++
+ union proc_op {
+ int (*proc_get_link)(struct dentry *, struct path *);
+ int (*proc_read)(struct task_struct *task, char *page);
+ int (*proc_show)(struct seq_file *m,
+ struct pid_namespace *ns, struct pid *pid,
+ struct task_struct *task);
++ int (*proc_vs_read)(char *page);
++ int (*proc_vxi_read)(struct vx_info *vxi, char *page);
++ int (*proc_nxi_read)(struct nx_info *nxi, char *page);
+ };
+
+ struct ctl_table_header;
+@@ -265,6 +272,7 @@ struct ctl_table;
+
+ struct proc_inode {
+ struct pid *pid;
++ int vx_flags;
+ int fd;
+ union proc_op op;
+ struct proc_dir_entry *pde;
+diff -NurpP --minimal linux-3.6.10/include/linux/quotaops.h linux-3.6.10-vs2.3.4.6/include/linux/quotaops.h
+--- linux-3.6.10/include/linux/quotaops.h 2012-10-04 13:27:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/quotaops.h 2012-10-04 16:47:00.000000000 +0000
+@@ -8,6 +8,7 @@
+ #define _LINUX_QUOTAOPS_
+
+ #include <linux/fs.h>
++#include <linux/vs_dlimit.h>
+
+ #define DQUOT_SPACE_WARN 0x1
+ #define DQUOT_SPACE_RESERVE 0x2
+@@ -205,11 +206,12 @@ static inline void dquot_drop(struct ino
+
+ static inline int dquot_alloc_inode(const struct inode *inode)
+ {
+- return 0;
++ return dl_alloc_inode(inode);
+ }
+
+ static inline void dquot_free_inode(const struct inode *inode)
+ {
++ dl_free_inode(inode);
+ }
+
+ static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
+@@ -220,6 +222,10 @@ static inline int dquot_transfer(struct
+ static inline int __dquot_alloc_space(struct inode *inode, qsize_t number,
+ int flags)
+ {
++ int ret = 0;
++
++ if ((ret = dl_alloc_space(inode, number)))
++ return ret;
+ if (!(flags & DQUOT_SPACE_RESERVE))
+ inode_add_bytes(inode, number);
+ return 0;
+@@ -230,6 +236,7 @@ static inline void __dquot_free_space(st
+ {
+ if (!(flags & DQUOT_SPACE_RESERVE))
+ inode_sub_bytes(inode, number);
++ dl_free_space(inode, number);
+ }
+
+ static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+diff -NurpP --minimal linux-3.6.10/include/linux/reboot.h linux-3.6.10-vs2.3.4.6/include/linux/reboot.h
+--- linux-3.6.10/include/linux/reboot.h 2011-10-24 16:45:32.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/reboot.h 2012-10-04 16:47:00.000000000 +0000
+@@ -33,6 +33,7 @@
+ #define LINUX_REBOOT_CMD_RESTART2 0xA1B2C3D4
+ #define LINUX_REBOOT_CMD_SW_SUSPEND 0xD000FCE2
+ #define LINUX_REBOOT_CMD_KEXEC 0x45584543
++#define LINUX_REBOOT_CMD_OOM 0xDEADBEEF
+
+
+ #ifdef __KERNEL__
+diff -NurpP --minimal linux-3.6.10/include/linux/sched.h linux-3.6.10-vs2.3.4.6/include/linux/sched.h
+--- linux-3.6.10/include/linux/sched.h 2012-10-04 13:27:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/sched.h 2012-10-04 16:47:00.000000000 +0000
+@@ -1422,6 +1422,14 @@ struct task_struct {
+ #endif
+ struct seccomp seccomp;
+
++/* vserver context data */
++ struct vx_info *vx_info;
++ struct nx_info *nx_info;
++
++ xid_t xid;
++ nid_t nid;
++ tag_t tag;
++
+ /* Thread group tracking */
+ u32 parent_exec_id;
+ u32 self_exec_id;
+@@ -1668,6 +1676,11 @@ struct pid_namespace;
+ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+ struct pid_namespace *ns);
+
++#include <linux/vserver/base.h>
++#include <linux/vserver/context.h>
++#include <linux/vserver/debug.h>
++#include <linux/vserver/pid.h>
++
+ static inline pid_t task_pid_nr(struct task_struct *tsk)
+ {
+ return tsk->pid;
+@@ -1681,7 +1694,8 @@ static inline pid_t task_pid_nr_ns(struc
+
+ static inline pid_t task_pid_vnr(struct task_struct *tsk)
+ {
+- return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
++ // return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
++ return vx_map_pid(__task_pid_nr_ns(tsk, PIDTYPE_PID, NULL));
+ }
+
+
+@@ -1694,7 +1708,7 @@ pid_t task_tgid_nr_ns(struct task_struct
+
+ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+ {
+- return pid_vnr(task_tgid(tsk));
++ return vx_map_tgid(pid_vnr(task_tgid(tsk)));
+ }
+
+
+diff -NurpP --minimal linux-3.6.10/include/linux/shmem_fs.h linux-3.6.10-vs2.3.4.6/include/linux/shmem_fs.h
+--- linux-3.6.10/include/linux/shmem_fs.h 2012-07-22 21:39:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/shmem_fs.h 2012-10-04 16:47:00.000000000 +0000
+@@ -8,6 +8,9 @@
+
+ /* inode in-kernel data */
+
++#define TMPFS_SUPER_MAGIC 0x01021994
++
++
+ struct shmem_inode_info {
+ spinlock_t lock;
+ unsigned long flags;
+diff -NurpP --minimal linux-3.6.10/include/linux/stat.h linux-3.6.10-vs2.3.4.6/include/linux/stat.h
+--- linux-3.6.10/include/linux/stat.h 2012-07-22 21:39:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/stat.h 2012-10-04 16:47:00.000000000 +0000
+@@ -67,6 +67,7 @@ struct kstat {
+ unsigned int nlink;
+ kuid_t uid;
+ kgid_t gid;
++ tag_t tag;
+ dev_t rdev;
+ loff_t size;
+ struct timespec atime;
+diff -NurpP --minimal linux-3.6.10/include/linux/sunrpc/auth.h linux-3.6.10-vs2.3.4.6/include/linux/sunrpc/auth.h
+--- linux-3.6.10/include/linux/sunrpc/auth.h 2012-10-04 13:27:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/sunrpc/auth.h 2012-10-04 16:47:00.000000000 +0000
+@@ -25,6 +25,7 @@
+ struct auth_cred {
+ uid_t uid;
+ gid_t gid;
++ tag_t tag;
+ struct group_info *group_info;
+ const char *principal;
+ unsigned char machine_cred : 1;
+diff -NurpP --minimal linux-3.6.10/include/linux/sunrpc/clnt.h linux-3.6.10-vs2.3.4.6/include/linux/sunrpc/clnt.h
+--- linux-3.6.10/include/linux/sunrpc/clnt.h 2012-05-21 16:07:32.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/sunrpc/clnt.h 2012-10-04 16:47:00.000000000 +0000
+@@ -49,7 +49,8 @@ struct rpc_clnt {
+ unsigned int cl_softrtry : 1,/* soft timeouts */
+ cl_discrtry : 1,/* disconnect before retry */
+ cl_autobind : 1,/* use getport() */
+- cl_chatty : 1;/* be verbose */
++ cl_chatty : 1,/* be verbose */
++ cl_tag : 1;/* context tagging */
+
+ struct rpc_rtt * cl_rtt; /* RTO estimator data */
+ const struct rpc_timeout *cl_timeout; /* Timeout strategy */
+diff -NurpP --minimal linux-3.6.10/include/linux/sysctl.h linux-3.6.10-vs2.3.4.6/include/linux/sysctl.h
+--- linux-3.6.10/include/linux/sysctl.h 2012-05-21 16:07:32.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/sysctl.h 2012-10-04 16:47:00.000000000 +0000
+@@ -60,6 +60,7 @@ enum
+ CTL_ABI=9, /* Binary emulation */
+ CTL_CPU=10, /* CPU stuff (speed scaling, etc) */
+ CTL_ARLAN=254, /* arlan wireless driver */
++ CTL_VSERVER=4242, /* Linux-VServer debug */
+ CTL_S390DBF=5677, /* s390 debug */
+ CTL_SUNRPC=7249, /* sunrpc debug */
+ CTL_PM=9899, /* frv power management */
+@@ -94,6 +95,7 @@ enum
+
+ KERN_PANIC=15, /* int: panic timeout */
+ KERN_REALROOTDEV=16, /* real root device to mount after initrd */
++ KERN_VSHELPER=17, /* string: path to vshelper policy agent */
+
+ KERN_SPARC_REBOOT=21, /* reboot command on Sparc */
+ KERN_CTLALTDEL=22, /* int: allow ctl-alt-del to reboot */
+diff -NurpP --minimal linux-3.6.10/include/linux/sysfs.h linux-3.6.10-vs2.3.4.6/include/linux/sysfs.h
+--- linux-3.6.10/include/linux/sysfs.h 2012-07-22 21:39:44.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/sysfs.h 2012-10-04 16:47:00.000000000 +0000
+@@ -19,6 +19,8 @@
+ #include <linux/kobject_ns.h>
+ #include <linux/atomic.h>
+
++#define SYSFS_SUPER_MAGIC 0x62656572
++
+ struct kobject;
+ struct module;
+ enum kobj_ns_type;
+diff -NurpP --minimal linux-3.6.10/include/linux/time.h linux-3.6.10-vs2.3.4.6/include/linux/time.h
+--- linux-3.6.10/include/linux/time.h 2012-10-04 13:27:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/time.h 2012-10-04 16:47:00.000000000 +0000
+@@ -280,6 +280,8 @@ static __always_inline void timespec_add
+ a->tv_nsec = ns;
+ }
+
++#include <linux/vs_time.h>
++
+ #endif /* __KERNEL__ */
+
+ /*
+diff -NurpP --minimal linux-3.6.10/include/linux/types.h linux-3.6.10-vs2.3.4.6/include/linux/types.h
+--- linux-3.6.10/include/linux/types.h 2012-10-04 13:27:47.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/types.h 2012-10-04 16:47:00.000000000 +0000
+@@ -41,6 +41,9 @@ typedef __kernel_uid32_t uid_t;
+ typedef __kernel_gid32_t gid_t;
+ typedef __kernel_uid16_t uid16_t;
+ typedef __kernel_gid16_t gid16_t;
++typedef unsigned int xid_t;
++typedef unsigned int nid_t;
++typedef unsigned int tag_t;
+
+ typedef unsigned long uintptr_t;
+
+diff -NurpP --minimal linux-3.6.10/include/linux/utsname.h linux-3.6.10-vs2.3.4.6/include/linux/utsname.h
+--- linux-3.6.10/include/linux/utsname.h 2012-01-09 15:14:59.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/utsname.h 2012-10-04 16:47:00.000000000 +0000
+@@ -62,7 +62,8 @@ static inline void get_uts_ns(struct uts
+ }
+
+ extern struct uts_namespace *copy_utsname(unsigned long flags,
+- struct task_struct *tsk);
++ struct uts_namespace *old_ns,
++ struct user_namespace *user_ns);
+ extern void free_uts_ns(struct kref *kref);
+
+ static inline void put_uts_ns(struct uts_namespace *ns)
+@@ -79,12 +80,13 @@ static inline void put_uts_ns(struct uts
+ }
+
+ static inline struct uts_namespace *copy_utsname(unsigned long flags,
+- struct task_struct *tsk)
++ struct uts_namespace *old_ns,
++ struct user_namespace *user_ns)
+ {
+ if (flags & CLONE_NEWUTS)
+ return ERR_PTR(-EINVAL);
+
+- return tsk->nsproxy->uts_ns;
++ return old_ns;
+ }
+ #endif
+
+diff -NurpP --minimal linux-3.6.10/include/linux/vroot.h linux-3.6.10-vs2.3.4.6/include/linux/vroot.h
+--- linux-3.6.10/include/linux/vroot.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vroot.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,51 @@
++
++/*
++ * include/linux/vroot.h
++ *
++ * written by Herbert Pötzl, 9/11/2002
++ * ported to 2.6 by Herbert Pötzl, 30/12/2004
++ *
++ * Copyright (C) 2002-2007 by Herbert Pötzl.
++ * Redistribution of this file is permitted under the
++ * GNU General Public License.
++ */
++
++#ifndef _LINUX_VROOT_H
++#define _LINUX_VROOT_H
++
++
++#ifdef __KERNEL__
++
++/* Possible states of device */
++enum {
++ Vr_unbound,
++ Vr_bound,
++};
++
++struct vroot_device {
++ int vr_number;
++ int vr_refcnt;
++
++ struct semaphore vr_ctl_mutex;
++ struct block_device *vr_device;
++ int vr_state;
++};
++
++
++typedef struct block_device *(vroot_grb_func)(struct block_device *);
++
++extern int register_vroot_grb(vroot_grb_func *);
++extern int unregister_vroot_grb(vroot_grb_func *);
++
++#endif /* __KERNEL__ */
++
++#define MAX_VROOT_DEFAULT 8
++
++/*
++ * IOCTL commands --- we will commandeer 0x56 ('V')
++ */
++
++#define VROOT_SET_DEV 0x5600
++#define VROOT_CLR_DEV 0x5601
++
++#endif /* _LINUX_VROOT_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_base.h linux-3.6.10-vs2.3.4.6/include/linux/vs_base.h
+--- linux-3.6.10/include/linux/vs_base.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_base.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,10 @@
++#ifndef _VS_BASE_H
++#define _VS_BASE_H
++
++#include "vserver/base.h"
++#include "vserver/check.h"
++#include "vserver/debug.h"
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_context.h linux-3.6.10-vs2.3.4.6/include/linux/vs_context.h
+--- linux-3.6.10/include/linux/vs_context.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_context.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,242 @@
++#ifndef _VS_CONTEXT_H
++#define _VS_CONTEXT_H
++
++#include "vserver/base.h"
++#include "vserver/check.h"
++#include "vserver/context.h"
++#include "vserver/history.h"
++#include "vserver/debug.h"
++
++#include <linux/sched.h>
++
++
++#define get_vx_info(i) __get_vx_info(i, __FILE__, __LINE__, __HERE__)
++
++static inline struct vx_info *__get_vx_info(struct vx_info *vxi,
++ const char *_file, int _line, void *_here)
++{
++ if (!vxi)
++ return NULL;
++
++ vxlprintk(VXD_CBIT(xid, 2), "get_vx_info(%p[#%d.%d])",
++ vxi, vxi ? vxi->vx_id : 0,
++ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++ _file, _line);
++ __vxh_get_vx_info(vxi, _here);
++
++ atomic_inc(&vxi->vx_usecnt);
++ return vxi;
++}
++
++
++extern void free_vx_info(struct vx_info *);
++
++#define put_vx_info(i) __put_vx_info(i, __FILE__, __LINE__, __HERE__)
++
++static inline void __put_vx_info(struct vx_info *vxi,
++ const char *_file, int _line, void *_here)
++{
++ if (!vxi)
++ return;
++
++ vxlprintk(VXD_CBIT(xid, 2), "put_vx_info(%p[#%d.%d])",
++ vxi, vxi ? vxi->vx_id : 0,
++ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++ _file, _line);
++ __vxh_put_vx_info(vxi, _here);
++
++ if (atomic_dec_and_test(&vxi->vx_usecnt))
++ free_vx_info(vxi);
++}
++
++
++#define init_vx_info(p, i) \
++ __init_vx_info(p, i, __FILE__, __LINE__, __HERE__)
++
++static inline void __init_vx_info(struct vx_info **vxp, struct vx_info *vxi,
++ const char *_file, int _line, void *_here)
++{
++ if (vxi) {
++ vxlprintk(VXD_CBIT(xid, 3),
++ "init_vx_info(%p[#%d.%d])",
++ vxi, vxi ? vxi->vx_id : 0,
++ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++ _file, _line);
++ __vxh_init_vx_info(vxi, vxp, _here);
++
++ atomic_inc(&vxi->vx_usecnt);
++ }
++ *vxp = vxi;
++}
++
++
++#define set_vx_info(p, i) \
++ __set_vx_info(p, i, __FILE__, __LINE__, __HERE__)
++
++static inline void __set_vx_info(struct vx_info **vxp, struct vx_info *vxi,
++ const char *_file, int _line, void *_here)
++{
++ struct vx_info *vxo;
++
++ if (!vxi)
++ return;
++
++ vxlprintk(VXD_CBIT(xid, 3), "set_vx_info(%p[#%d.%d])",
++ vxi, vxi ? vxi->vx_id : 0,
++ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++ _file, _line);
++ __vxh_set_vx_info(vxi, vxp, _here);
++
++ atomic_inc(&vxi->vx_usecnt);
++ vxo = xchg(vxp, vxi);
++ BUG_ON(vxo);
++}
++
++
++#define clr_vx_info(p) __clr_vx_info(p, __FILE__, __LINE__, __HERE__)
++
++static inline void __clr_vx_info(struct vx_info **vxp,
++ const char *_file, int _line, void *_here)
++{
++ struct vx_info *vxo;
++
++ vxo = xchg(vxp, NULL);
++ if (!vxo)
++ return;
++
++ vxlprintk(VXD_CBIT(xid, 3), "clr_vx_info(%p[#%d.%d])",
++ vxo, vxo ? vxo->vx_id : 0,
++ vxo ? atomic_read(&vxo->vx_usecnt) : 0,
++ _file, _line);
++ __vxh_clr_vx_info(vxo, vxp, _here);
++
++ if (atomic_dec_and_test(&vxo->vx_usecnt))
++ free_vx_info(vxo);
++}
++
++
++#define claim_vx_info(v, p) \
++ __claim_vx_info(v, p, __FILE__, __LINE__, __HERE__)
++
++static inline void __claim_vx_info(struct vx_info *vxi,
++ struct task_struct *task,
++ const char *_file, int _line, void *_here)
++{
++ vxlprintk(VXD_CBIT(xid, 3), "claim_vx_info(%p[#%d.%d.%d]) %p",
++ vxi, vxi ? vxi->vx_id : 0,
++ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++ vxi ? atomic_read(&vxi->vx_tasks) : 0,
++ task, _file, _line);
++ __vxh_claim_vx_info(vxi, task, _here);
++
++ atomic_inc(&vxi->vx_tasks);
++}
++
++
++extern void unhash_vx_info(struct vx_info *);
++
++#define release_vx_info(v, p) \
++ __release_vx_info(v, p, __FILE__, __LINE__, __HERE__)
++
++static inline void __release_vx_info(struct vx_info *vxi,
++ struct task_struct *task,
++ const char *_file, int _line, void *_here)
++{
++ vxlprintk(VXD_CBIT(xid, 3), "release_vx_info(%p[#%d.%d.%d]) %p",
++ vxi, vxi ? vxi->vx_id : 0,
++ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++ vxi ? atomic_read(&vxi->vx_tasks) : 0,
++ task, _file, _line);
++ __vxh_release_vx_info(vxi, task, _here);
++
++ might_sleep();
++
++ if (atomic_dec_and_test(&vxi->vx_tasks))
++ unhash_vx_info(vxi);
++}
++
++
++#define task_get_vx_info(p) \
++ __task_get_vx_info(p, __FILE__, __LINE__, __HERE__)
++
++static inline struct vx_info *__task_get_vx_info(struct task_struct *p,
++ const char *_file, int _line, void *_here)
++{
++ struct vx_info *vxi;
++
++ task_lock(p);
++ vxlprintk(VXD_CBIT(xid, 5), "task_get_vx_info(%p)",
++ p, _file, _line);
++ vxi = __get_vx_info(p->vx_info, _file, _line, _here);
++ task_unlock(p);
++ return vxi;
++}
++
++
++static inline void __wakeup_vx_info(struct vx_info *vxi)
++{
++ if (waitqueue_active(&vxi->vx_wait))
++ wake_up_interruptible(&vxi->vx_wait);
++}
++
++
++#define enter_vx_info(v, s) __enter_vx_info(v, s, __FILE__, __LINE__)
++
++static inline void __enter_vx_info(struct vx_info *vxi,
++ struct vx_info_save *vxis, const char *_file, int _line)
++{
++ vxlprintk(VXD_CBIT(xid, 5), "enter_vx_info(%p[#%d],%p) %p[#%d,%p]",
++ vxi, vxi ? vxi->vx_id : 0, vxis, current,
++ current->xid, current->vx_info, _file, _line);
++ vxis->vxi = xchg(&current->vx_info, vxi);
++ vxis->xid = current->xid;
++ current->xid = vxi ? vxi->vx_id : 0;
++}
++
++#define leave_vx_info(s) __leave_vx_info(s, __FILE__, __LINE__)
++
++static inline void __leave_vx_info(struct vx_info_save *vxis,
++ const char *_file, int _line)
++{
++ vxlprintk(VXD_CBIT(xid, 5), "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]",
++ vxis, vxis->xid, vxis->vxi, current,
++ current->xid, current->vx_info, _file, _line);
++ (void)xchg(&current->vx_info, vxis->vxi);
++ current->xid = vxis->xid;
++}
++
++
++static inline void __enter_vx_admin(struct vx_info_save *vxis)
++{
++ vxis->vxi = xchg(&current->vx_info, NULL);
++ vxis->xid = xchg(&current->xid, (xid_t)0);
++}
++
++static inline void __leave_vx_admin(struct vx_info_save *vxis)
++{
++ (void)xchg(&current->xid, vxis->xid);
++ (void)xchg(&current->vx_info, vxis->vxi);
++}
++
++#define task_is_init(p) \
++ __task_is_init(p, __FILE__, __LINE__, __HERE__)
++
++static inline int __task_is_init(struct task_struct *p,
++ const char *_file, int _line, void *_here)
++{
++ int is_init = is_global_init(p);
++
++ task_lock(p);
++ if (p->vx_info)
++ is_init = p->vx_info->vx_initpid == p->pid;
++ task_unlock(p);
++ return is_init;
++}
++
++extern void exit_vx_info(struct task_struct *, int);
++extern void exit_vx_info_early(struct task_struct *, int);
++
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_cowbl.h linux-3.6.10-vs2.3.4.6/include/linux/vs_cowbl.h
+--- linux-3.6.10/include/linux/vs_cowbl.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_cowbl.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,48 @@
++#ifndef _VS_COWBL_H
++#define _VS_COWBL_H
++
++#include <linux/fs.h>
++#include <linux/dcache.h>
++#include <linux/namei.h>
++#include <linux/slab.h>
++
++extern struct dentry *cow_break_link(const char *pathname);
++
++static inline int cow_check_and_break(struct path *path)
++{
++ struct inode *inode = path->dentry->d_inode;
++ int error = 0;
++
++ /* do we need this check? */
++ if (IS_RDONLY(inode))
++ return -EROFS;
++
++ if (IS_COW(inode)) {
++ if (IS_COW_LINK(inode)) {
++ struct dentry *new_dentry, *old_dentry = path->dentry;
++ char *pp, *buf;
++
++ buf = kmalloc(PATH_MAX, GFP_KERNEL);
++ if (!buf) {
++ return -ENOMEM;
++ }
++ pp = d_path(path, buf, PATH_MAX);
++ new_dentry = cow_break_link(pp);
++ kfree(buf);
++ if (!IS_ERR(new_dentry)) {
++ path->dentry = new_dentry;
++ dput(old_dentry);
++ } else
++ error = PTR_ERR(new_dentry);
++ } else {
++ inode->i_flags &= ~(S_IXUNLINK | S_IMMUTABLE);
++ inode->i_ctime = CURRENT_TIME;
++ mark_inode_dirty(inode);
++ }
++ }
++ return error;
++}
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_cvirt.h linux-3.6.10-vs2.3.4.6/include/linux/vs_cvirt.h
+--- linux-3.6.10/include/linux/vs_cvirt.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_cvirt.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,50 @@
++#ifndef _VS_CVIRT_H
++#define _VS_CVIRT_H
++
++#include "vserver/cvirt.h"
++#include "vserver/context.h"
++#include "vserver/base.h"
++#include "vserver/check.h"
++#include "vserver/debug.h"
++
++
++static inline void vx_activate_task(struct task_struct *p)
++{
++ struct vx_info *vxi;
++
++ if ((vxi = p->vx_info)) {
++ vx_update_load(vxi);
++ atomic_inc(&vxi->cvirt.nr_running);
++ }
++}
++
++static inline void vx_deactivate_task(struct task_struct *p)
++{
++ struct vx_info *vxi;
++
++ if ((vxi = p->vx_info)) {
++ vx_update_load(vxi);
++ atomic_dec(&vxi->cvirt.nr_running);
++ }
++}
++
++static inline void vx_uninterruptible_inc(struct task_struct *p)
++{
++ struct vx_info *vxi;
++
++ if ((vxi = p->vx_info))
++ atomic_inc(&vxi->cvirt.nr_uninterruptible);
++}
++
++static inline void vx_uninterruptible_dec(struct task_struct *p)
++{
++ struct vx_info *vxi;
++
++ if ((vxi = p->vx_info))
++ atomic_dec(&vxi->cvirt.nr_uninterruptible);
++}
++
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_device.h linux-3.6.10-vs2.3.4.6/include/linux/vs_device.h
+--- linux-3.6.10/include/linux/vs_device.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_device.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,45 @@
++#ifndef _VS_DEVICE_H
++#define _VS_DEVICE_H
++
++#include "vserver/base.h"
++#include "vserver/device.h"
++#include "vserver/debug.h"
++
++
++#ifdef CONFIG_VSERVER_DEVICE
++
++int vs_map_device(struct vx_info *, dev_t, dev_t *, umode_t);
++
++#define vs_device_perm(v, d, m, p) \
++ ((vs_map_device(current_vx_info(), d, NULL, m) & (p)) == (p))
++
++#else
++
++static inline
++int vs_map_device(struct vx_info *vxi,
++ dev_t device, dev_t *target, umode_t mode)
++{
++ if (target)
++ *target = device;
++ return ~0;
++}
++
++#define vs_device_perm(v, d, m, p) ((p) == (p))
++
++#endif
++
++
++#define vs_map_chrdev(d, t, p) \
++ ((vs_map_device(current_vx_info(), d, t, S_IFCHR) & (p)) == (p))
++#define vs_map_blkdev(d, t, p) \
++ ((vs_map_device(current_vx_info(), d, t, S_IFBLK) & (p)) == (p))
++
++#define vs_chrdev_perm(d, p) \
++ vs_device_perm(current_vx_info(), d, S_IFCHR, p)
++#define vs_blkdev_perm(d, p) \
++ vs_device_perm(current_vx_info(), d, S_IFBLK, p)
++
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_dlimit.h linux-3.6.10-vs2.3.4.6/include/linux/vs_dlimit.h
+--- linux-3.6.10/include/linux/vs_dlimit.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_dlimit.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,215 @@
++#ifndef _VS_DLIMIT_H
++#define _VS_DLIMIT_H
++
++#include <linux/fs.h>
++
++#include "vserver/dlimit.h"
++#include "vserver/base.h"
++#include "vserver/debug.h"
++
++
++#define get_dl_info(i) __get_dl_info(i, __FILE__, __LINE__)
++
++static inline struct dl_info *__get_dl_info(struct dl_info *dli,
++ const char *_file, int _line)
++{
++ if (!dli)
++ return NULL;
++ vxlprintk(VXD_CBIT(dlim, 4), "get_dl_info(%p[#%d.%d])",
++ dli, dli ? dli->dl_tag : 0,
++ dli ? atomic_read(&dli->dl_usecnt) : 0,
++ _file, _line);
++ atomic_inc(&dli->dl_usecnt);
++ return dli;
++}
++
++
++#define free_dl_info(i) \
++ call_rcu(&(i)->dl_rcu, rcu_free_dl_info)
++
++#define put_dl_info(i) __put_dl_info(i, __FILE__, __LINE__)
++
++static inline void __put_dl_info(struct dl_info *dli,
++ const char *_file, int _line)
++{
++ if (!dli)
++ return;
++ vxlprintk(VXD_CBIT(dlim, 4), "put_dl_info(%p[#%d.%d])",
++ dli, dli ? dli->dl_tag : 0,
++ dli ? atomic_read(&dli->dl_usecnt) : 0,
++ _file, _line);
++ if (atomic_dec_and_test(&dli->dl_usecnt))
++ free_dl_info(dli);
++}
++
++
++#define __dlimit_char(d) ((d) ? '*' : ' ')
++
++static inline int __dl_alloc_space(struct super_block *sb,
++ tag_t tag, dlsize_t nr, const char *file, int line)
++{
++ struct dl_info *dli = NULL;
++ int ret = 0;
++
++ if (nr == 0)
++ goto out;
++ dli = locate_dl_info(sb, tag);
++ if (!dli)
++ goto out;
++
++ spin_lock(&dli->dl_lock);
++ ret = (dli->dl_space_used + nr > dli->dl_space_total);
++ if (!ret)
++ dli->dl_space_used += nr;
++ spin_unlock(&dli->dl_lock);
++ put_dl_info(dli);
++out:
++ vxlprintk(VXD_CBIT(dlim, 1),
++ "ALLOC (%p,#%d)%c %lld bytes (%d)",
++ sb, tag, __dlimit_char(dli), (long long)nr,
++ ret, file, line);
++ return ret ? -ENOSPC : 0;
++}
++
++static inline void __dl_free_space(struct super_block *sb,
++ tag_t tag, dlsize_t nr, const char *_file, int _line)
++{
++ struct dl_info *dli = NULL;
++
++ if (nr == 0)
++ goto out;
++ dli = locate_dl_info(sb, tag);
++ if (!dli)
++ goto out;
++
++ spin_lock(&dli->dl_lock);
++ if (dli->dl_space_used > nr)
++ dli->dl_space_used -= nr;
++ else
++ dli->dl_space_used = 0;
++ spin_unlock(&dli->dl_lock);
++ put_dl_info(dli);
++out:
++ vxlprintk(VXD_CBIT(dlim, 1),
++ "FREE (%p,#%d)%c %lld bytes",
++ sb, tag, __dlimit_char(dli), (long long)nr,
++ _file, _line);
++}
++
++static inline int __dl_alloc_inode(struct super_block *sb,
++ tag_t tag, const char *_file, int _line)
++{
++ struct dl_info *dli;
++ int ret = 0;
++
++ dli = locate_dl_info(sb, tag);
++ if (!dli)
++ goto out;
++
++ spin_lock(&dli->dl_lock);
++ dli->dl_inodes_used++;
++ ret = (dli->dl_inodes_used > dli->dl_inodes_total);
++ spin_unlock(&dli->dl_lock);
++ put_dl_info(dli);
++out:
++ vxlprintk(VXD_CBIT(dlim, 0),
++ "ALLOC (%p,#%d)%c inode (%d)",
++ sb, tag, __dlimit_char(dli), ret, _file, _line);
++ return ret ? -ENOSPC : 0;
++}
++
++static inline void __dl_free_inode(struct super_block *sb,
++ tag_t tag, const char *_file, int _line)
++{
++ struct dl_info *dli;
++
++ dli = locate_dl_info(sb, tag);
++ if (!dli)
++ goto out;
++
++ spin_lock(&dli->dl_lock);
++ if (dli->dl_inodes_used > 1)
++ dli->dl_inodes_used--;
++ else
++ dli->dl_inodes_used = 0;
++ spin_unlock(&dli->dl_lock);
++ put_dl_info(dli);
++out:
++ vxlprintk(VXD_CBIT(dlim, 0),
++ "FREE (%p,#%d)%c inode",
++ sb, tag, __dlimit_char(dli), _file, _line);
++}
++
++static inline void __dl_adjust_block(struct super_block *sb, tag_t tag,
++ unsigned long long *free_blocks, unsigned long long *root_blocks,
++ const char *_file, int _line)
++{
++ struct dl_info *dli;
++ uint64_t broot, bfree;
++
++ dli = locate_dl_info(sb, tag);
++ if (!dli)
++ return;
++
++ spin_lock(&dli->dl_lock);
++ broot = (dli->dl_space_total -
++ (dli->dl_space_total >> 10) * dli->dl_nrlmult)
++ >> sb->s_blocksize_bits;
++ bfree = (dli->dl_space_total - dli->dl_space_used)
++ >> sb->s_blocksize_bits;
++ spin_unlock(&dli->dl_lock);
++
++ vxlprintk(VXD_CBIT(dlim, 2),
++ "ADJUST: %lld,%lld on %lld,%lld [mult=%d]",
++ (long long)bfree, (long long)broot,
++ *free_blocks, *root_blocks, dli->dl_nrlmult,
++ _file, _line);
++ if (free_blocks) {
++ if (*free_blocks > bfree)
++ *free_blocks = bfree;
++ }
++ if (root_blocks) {
++ if (*root_blocks > broot)
++ *root_blocks = broot;
++ }
++ put_dl_info(dli);
++}
++
++#define dl_prealloc_space(in, bytes) \
++ __dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
++ __FILE__, __LINE__ )
++
++#define dl_alloc_space(in, bytes) \
++ __dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
++ __FILE__, __LINE__ )
++
++#define dl_reserve_space(in, bytes) \
++ __dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
++ __FILE__, __LINE__ )
++
++#define dl_claim_space(in, bytes) (0)
++
++#define dl_release_space(in, bytes) \
++ __dl_free_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
++ __FILE__, __LINE__ )
++
++#define dl_free_space(in, bytes) \
++ __dl_free_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
++ __FILE__, __LINE__ )
++
++
++
++#define dl_alloc_inode(in) \
++ __dl_alloc_inode((in)->i_sb, (in)->i_tag, __FILE__, __LINE__ )
++
++#define dl_free_inode(in) \
++ __dl_free_inode((in)->i_sb, (in)->i_tag, __FILE__, __LINE__ )
++
++
++#define dl_adjust_block(sb, tag, fb, rb) \
++ __dl_adjust_block(sb, tag, fb, rb, __FILE__, __LINE__ )
++
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_inet.h linux-3.6.10-vs2.3.4.6/include/linux/vs_inet.h
+--- linux-3.6.10/include/linux/vs_inet.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_inet.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,353 @@
++#ifndef _VS_INET_H
++#define _VS_INET_H
++
++#include "vserver/base.h"
++#include "vserver/network.h"
++#include "vserver/debug.h"
++
++#define IPI_LOOPBACK htonl(INADDR_LOOPBACK)
++
++#define NXAV4(a) NIPQUAD((a)->ip[0]), NIPQUAD((a)->ip[1]), \
++ NIPQUAD((a)->mask), (a)->type
++#define NXAV4_FMT "[" NIPQUAD_FMT "-" NIPQUAD_FMT "/" NIPQUAD_FMT ":%04x]"
++
++#define NIPQUAD(addr) \
++ ((unsigned char *)&addr)[0], \
++ ((unsigned char *)&addr)[1], \
++ ((unsigned char *)&addr)[2], \
++ ((unsigned char *)&addr)[3]
++
++#define NIPQUAD_FMT "%u.%u.%u.%u"
++
++
++static inline
++int v4_addr_match(struct nx_addr_v4 *nxa, __be32 addr, uint16_t tmask)
++{
++ __be32 ip = nxa->ip[0].s_addr;
++ __be32 mask = nxa->mask.s_addr;
++ __be32 bcast = ip | ~mask;
++ int ret = 0;
++
++ switch (nxa->type & tmask) {
++ case NXA_TYPE_MASK:
++ ret = (ip == (addr & mask));
++ break;
++ case NXA_TYPE_ADDR:
++ ret = 3;
++ if (addr == ip)
++ break;
++ /* fall through to broadcast */
++ case NXA_MOD_BCAST:
++ ret = ((tmask & NXA_MOD_BCAST) && (addr == bcast));
++ break;
++ case NXA_TYPE_RANGE:
++ ret = ((nxa->ip[0].s_addr <= addr) &&
++ (nxa->ip[1].s_addr > addr));
++ break;
++ case NXA_TYPE_ANY:
++ ret = 2;
++ break;
++ }
++
++ vxdprintk(VXD_CBIT(net, 0),
++ "v4_addr_match(%p" NXAV4_FMT "," NIPQUAD_FMT ",%04x) = %d",
++ nxa, NXAV4(nxa), NIPQUAD(addr), tmask, ret);
++ return ret;
++}
++
++static inline
++int v4_addr_in_nx_info(struct nx_info *nxi, __be32 addr, uint16_t tmask)
++{
++ struct nx_addr_v4 *nxa;
++ int ret = 1;
++
++ if (!nxi)
++ goto out;
++
++ ret = 2;
++ /* allow 127.0.0.1 when remapping lback */
++ if ((tmask & NXA_LOOPBACK) &&
++ (addr == IPI_LOOPBACK) &&
++ nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
++ goto out;
++ ret = 3;
++ /* check for lback address */
++ if ((tmask & NXA_MOD_LBACK) &&
++ (nxi->v4_lback.s_addr == addr))
++ goto out;
++ ret = 4;
++ /* check for broadcast address */
++ if ((tmask & NXA_MOD_BCAST) &&
++ (nxi->v4_bcast.s_addr == addr))
++ goto out;
++ ret = 5;
++ /* check for v4 addresses */
++ for (nxa = &nxi->v4; nxa; nxa = nxa->next)
++ if (v4_addr_match(nxa, addr, tmask))
++ goto out;
++ ret = 0;
++out:
++ vxdprintk(VXD_CBIT(net, 0),
++ "v4_addr_in_nx_info(%p[#%u]," NIPQUAD_FMT ",%04x) = %d",
++ nxi, nxi ? nxi->nx_id : 0, NIPQUAD(addr), tmask, ret);
++ return ret;
++}
++
++static inline
++int v4_nx_addr_match(struct nx_addr_v4 *nxa, struct nx_addr_v4 *addr, uint16_t mask)
++{
++ /* FIXME: needs full range checks */
++ return v4_addr_match(nxa, addr->ip[0].s_addr, mask);
++}
++
++static inline
++int v4_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v4 *nxa, uint16_t mask)
++{
++ struct nx_addr_v4 *ptr;
++
++ for (ptr = &nxi->v4; ptr; ptr = ptr->next)
++ if (v4_nx_addr_match(ptr, nxa, mask))
++ return 1;
++ return 0;
++}
++
++#include <net/inet_sock.h>
++
++/*
++ * Check if a given address matches for a socket
++ *
++ * nxi: the socket's nx_info if any
++ * addr: to be verified address
++ */
++static inline
++int v4_sock_addr_match (
++ struct nx_info *nxi,
++ struct inet_sock *inet,
++ __be32 addr)
++{
++ __be32 saddr = inet->inet_rcv_saddr;
++ __be32 bcast = nxi ? nxi->v4_bcast.s_addr : INADDR_BROADCAST;
++
++ if (addr && (saddr == addr || bcast == addr))
++ return 1;
++ if (!saddr)
++ return v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND);
++ return 0;
++}
++
++
++/* inet related checks and helpers */
++
++
++struct in_ifaddr;
++struct net_device;
++struct sock;
++
++#ifdef CONFIG_INET
++
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <net/inet_sock.h>
++#include <net/inet_timewait_sock.h>
++
++
++int dev_in_nx_info(struct net_device *, struct nx_info *);
++int v4_dev_in_nx_info(struct net_device *, struct nx_info *);
++int nx_v4_addr_conflict(struct nx_info *, struct nx_info *);
++
++
++/*
++ * check if address is covered by socket
++ *
++ * sk: the socket to check against
++ * addr: the address in question (must be != 0)
++ */
++
++static inline
++int __v4_addr_match_socket(const struct sock *sk, struct nx_addr_v4 *nxa)
++{
++ struct nx_info *nxi = sk->sk_nx_info;
++ __be32 saddr = sk_rcv_saddr(sk);
++
++ vxdprintk(VXD_CBIT(net, 5),
++ "__v4_addr_in_socket(%p," NXAV4_FMT ") %p:" NIPQUAD_FMT " %p;%lx",
++ sk, NXAV4(nxa), nxi, NIPQUAD(saddr), sk->sk_socket,
++ (sk->sk_socket?sk->sk_socket->flags:0));
++
++ if (saddr) { /* direct address match */
++ return v4_addr_match(nxa, saddr, -1);
++ } else if (nxi) { /* match against nx_info */
++ return v4_nx_addr_in_nx_info(nxi, nxa, -1);
++ } else { /* unrestricted any socket */
++ return 1;
++ }
++}
++
++
++
++static inline
++int nx_dev_visible(struct nx_info *nxi, struct net_device *dev)
++{
++ vxdprintk(VXD_CBIT(net, 1),
++ "nx_dev_visible(%p[#%u],%p " VS_Q("%s") ") %d",
++ nxi, nxi ? nxi->nx_id : 0, dev, dev->name,
++ nxi ? dev_in_nx_info(dev, nxi) : 0);
++
++ if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
++ return 1;
++ if (dev_in_nx_info(dev, nxi))
++ return 1;
++ return 0;
++}
++
++
++static inline
++int v4_ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi)
++{
++ if (!nxi)
++ return 1;
++ if (!ifa)
++ return 0;
++ return v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW);
++}
++
++static inline
++int nx_v4_ifa_visible(struct nx_info *nxi, struct in_ifaddr *ifa)
++{
++ vxdprintk(VXD_CBIT(net, 1), "nx_v4_ifa_visible(%p[#%u],%p) %d",
++ nxi, nxi ? nxi->nx_id : 0, ifa,
++ nxi ? v4_ifa_in_nx_info(ifa, nxi) : 0);
++
++ if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
++ return 1;
++ if (v4_ifa_in_nx_info(ifa, nxi))
++ return 1;
++ return 0;
++}
++
++
++struct nx_v4_sock_addr {
++ __be32 saddr; /* Address used for validation */
++ __be32 baddr; /* Address used for socket bind */
++};
++
++static inline
++int v4_map_sock_addr(struct inet_sock *inet, struct sockaddr_in *addr,
++ struct nx_v4_sock_addr *nsa)
++{
++ struct sock *sk = &inet->sk;
++ struct nx_info *nxi = sk->sk_nx_info;
++ __be32 saddr = addr->sin_addr.s_addr;
++ __be32 baddr = saddr;
++
++ vxdprintk(VXD_CBIT(net, 3),
++ "inet_bind(%p)* %p,%p;%lx " NIPQUAD_FMT,
++ sk, sk->sk_nx_info, sk->sk_socket,
++ (sk->sk_socket ? sk->sk_socket->flags : 0),
++ NIPQUAD(saddr));
++
++ if (nxi) {
++ if (saddr == INADDR_ANY) {
++ if (nx_info_flags(nxi, NXF_SINGLE_IP, 0))
++ baddr = nxi->v4.ip[0].s_addr;
++ } else if (saddr == IPI_LOOPBACK) {
++ if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
++ baddr = nxi->v4_lback.s_addr;
++ } else if (!ipv4_is_multicast(saddr) ||
++ !nx_info_ncaps(nxi, NXC_MULTICAST)) {
++ /* normal address bind */
++ if (!v4_addr_in_nx_info(nxi, saddr, NXA_MASK_BIND))
++ return -EADDRNOTAVAIL;
++ }
++ }
++
++ vxdprintk(VXD_CBIT(net, 3),
++ "inet_bind(%p) " NIPQUAD_FMT ", " NIPQUAD_FMT,
++ sk, NIPQUAD(saddr), NIPQUAD(baddr));
++
++ nsa->saddr = saddr;
++ nsa->baddr = baddr;
++ return 0;
++}
++
++static inline
++void v4_set_sock_addr(struct inet_sock *inet, struct nx_v4_sock_addr *nsa)
++{
++ inet->inet_saddr = nsa->baddr;
++ inet->inet_rcv_saddr = nsa->baddr;
++}
++
++
++/*
++ * helper to simplify inet_lookup_listener
++ *
++ * nxi: the socket's nx_info if any
++ * addr: to be verified address
++ * saddr: socket address
++ */
++static inline int v4_inet_addr_match (
++ struct nx_info *nxi,
++ __be32 addr,
++ __be32 saddr)
++{
++ if (addr && (saddr == addr))
++ return 1;
++ if (!saddr)
++ return nxi ? v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND) : 1;
++ return 0;
++}
++
++static inline __be32 nx_map_sock_lback(struct nx_info *nxi, __be32 addr)
++{
++ if (nx_info_flags(nxi, NXF_HIDE_LBACK, 0) &&
++ (addr == nxi->v4_lback.s_addr))
++ return IPI_LOOPBACK;
++ return addr;
++}
++
++static inline
++int nx_info_has_v4(struct nx_info *nxi)
++{
++ if (!nxi)
++ return 1;
++ if (NX_IPV4(nxi))
++ return 1;
++ if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
++ return 1;
++ return 0;
++}
++
++#else /* CONFIG_INET */
++
++static inline
++int nx_dev_visible(struct nx_info *n, struct net_device *d)
++{
++ return 1;
++}
++
++static inline
++int nx_v4_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s)
++{
++ return 1;
++}
++
++static inline
++int v4_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n)
++{
++ return 1;
++}
++
++static inline
++int nx_info_has_v4(struct nx_info *nxi)
++{
++ return 0;
++}
++
++#endif /* CONFIG_INET */
++
++#define current_nx_info_has_v4() \
++ nx_info_has_v4(current_nx_info())
++
++#else
++// #warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_inet6.h linux-3.6.10-vs2.3.4.6/include/linux/vs_inet6.h
+--- linux-3.6.10/include/linux/vs_inet6.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_inet6.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,246 @@
++#ifndef _VS_INET6_H
++#define _VS_INET6_H
++
++#include "vserver/base.h"
++#include "vserver/network.h"
++#include "vserver/debug.h"
++
++#include <net/ipv6.h>
++
++#define NXAV6(a) &(a)->ip, &(a)->mask, (a)->prefix, (a)->type
++#define NXAV6_FMT "[%pI6/%pI6/%d:%04x]"
++
++
++#ifdef CONFIG_IPV6
++
++static inline
++int v6_addr_match(struct nx_addr_v6 *nxa,
++ const struct in6_addr *addr, uint16_t mask)
++{
++ int ret = 0;
++
++ switch (nxa->type & mask) {
++ case NXA_TYPE_MASK:
++ ret = ipv6_masked_addr_cmp(&nxa->ip, &nxa->mask, addr);
++ break;
++ case NXA_TYPE_ADDR:
++ ret = ipv6_addr_equal(&nxa->ip, addr);
++ break;
++ case NXA_TYPE_ANY:
++ ret = 1;
++ break;
++ }
++ vxdprintk(VXD_CBIT(net, 0),
++ "v6_addr_match(%p" NXAV6_FMT ",%pI6,%04x) = %d",
++ nxa, NXAV6(nxa), addr, mask, ret);
++ return ret;
++}
++
++static inline
++int v6_addr_in_nx_info(struct nx_info *nxi,
++ const struct in6_addr *addr, uint16_t mask)
++{
++ struct nx_addr_v6 *nxa;
++ int ret = 1;
++
++ if (!nxi)
++ goto out;
++ for (nxa = &nxi->v6; nxa; nxa = nxa->next)
++ if (v6_addr_match(nxa, addr, mask))
++ goto out;
++ ret = 0;
++out:
++ vxdprintk(VXD_CBIT(net, 0),
++ "v6_addr_in_nx_info(%p[#%u],%pI6,%04x) = %d",
++ nxi, nxi ? nxi->nx_id : 0, addr, mask, ret);
++ return ret;
++}
++
++static inline
++int v6_nx_addr_match(struct nx_addr_v6 *nxa, struct nx_addr_v6 *addr, uint16_t mask)
++{
++ /* FIXME: needs full range checks */
++ return v6_addr_match(nxa, &addr->ip, mask);
++}
++
++static inline
++int v6_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v6 *nxa, uint16_t mask)
++{
++ struct nx_addr_v6 *ptr;
++
++ for (ptr = &nxi->v6; ptr; ptr = ptr->next)
++ if (v6_nx_addr_match(ptr, nxa, mask))
++ return 1;
++ return 0;
++}
++
++
++/*
++ * Check if a given address matches for a socket
++ *
++ * nxi: the socket's nx_info if any
++ * addr: to be verified address
++ */
++static inline
++int v6_sock_addr_match (
++ struct nx_info *nxi,
++ struct inet_sock *inet,
++ struct in6_addr *addr)
++{
++ struct sock *sk = &inet->sk;
++ struct in6_addr *saddr = inet6_rcv_saddr(sk);
++
++ if (!ipv6_addr_any(addr) &&
++ ipv6_addr_equal(saddr, addr))
++ return 1;
++ if (ipv6_addr_any(saddr))
++ return v6_addr_in_nx_info(nxi, addr, -1);
++ return 0;
++}
++
++/*
++ * check if address is covered by socket
++ *
++ * sk: the socket to check against
++ * addr: the address in question (must be != 0)
++ */
++
++static inline
++int __v6_addr_match_socket(const struct sock *sk, struct nx_addr_v6 *nxa)
++{
++ struct nx_info *nxi = sk->sk_nx_info;
++ struct in6_addr *saddr = inet6_rcv_saddr(sk);
++
++ vxdprintk(VXD_CBIT(net, 5),
++ "__v6_addr_in_socket(%p," NXAV6_FMT ") %p:%pI6 %p;%lx",
++ sk, NXAV6(nxa), nxi, saddr, sk->sk_socket,
++ (sk->sk_socket?sk->sk_socket->flags:0));
++
++ if (!ipv6_addr_any(saddr)) { /* direct address match */
++ return v6_addr_match(nxa, saddr, -1);
++ } else if (nxi) { /* match against nx_info */
++ return v6_nx_addr_in_nx_info(nxi, nxa, -1);
++ } else { /* unrestricted any socket */
++ return 1;
++ }
++}
++
++
++/* inet related checks and helpers */
++
++
++struct in_ifaddr;
++struct net_device;
++struct sock;
++
++
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <net/inet_timewait_sock.h>
++
++
++int dev_in_nx_info(struct net_device *, struct nx_info *);
++int v6_dev_in_nx_info(struct net_device *, struct nx_info *);
++int nx_v6_addr_conflict(struct nx_info *, struct nx_info *);
++
++
++
++static inline
++int v6_ifa_in_nx_info(struct inet6_ifaddr *ifa, struct nx_info *nxi)
++{
++ if (!nxi)
++ return 1;
++ if (!ifa)
++ return 0;
++ return v6_addr_in_nx_info(nxi, &ifa->addr, -1);
++}
++
++static inline
++int nx_v6_ifa_visible(struct nx_info *nxi, struct inet6_ifaddr *ifa)
++{
++ vxdprintk(VXD_CBIT(net, 1), "nx_v6_ifa_visible(%p[#%u],%p) %d",
++ nxi, nxi ? nxi->nx_id : 0, ifa,
++ nxi ? v6_ifa_in_nx_info(ifa, nxi) : 0);
++
++ if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
++ return 1;
++ if (v6_ifa_in_nx_info(ifa, nxi))
++ return 1;
++ return 0;
++}
++
++
++struct nx_v6_sock_addr {
++ struct in6_addr saddr; /* Address used for validation */
++ struct in6_addr baddr; /* Address used for socket bind */
++};
++
++static inline
++int v6_map_sock_addr(struct inet_sock *inet, struct sockaddr_in6 *addr,
++ struct nx_v6_sock_addr *nsa)
++{
++ // struct sock *sk = &inet->sk;
++ // struct nx_info *nxi = sk->sk_nx_info;
++ struct in6_addr saddr = addr->sin6_addr;
++ struct in6_addr baddr = saddr;
++
++ nsa->saddr = saddr;
++ nsa->baddr = baddr;
++ return 0;
++}
++
++static inline
++void v6_set_sock_addr(struct inet_sock *inet, struct nx_v6_sock_addr *nsa)
++{
++ // struct sock *sk = &inet->sk;
++ // struct in6_addr *saddr = inet6_rcv_saddr(sk);
++
++ // *saddr = nsa->baddr;
++ // inet->inet_saddr = nsa->baddr;
++}
++
++static inline
++int nx_info_has_v6(struct nx_info *nxi)
++{
++ if (!nxi)
++ return 1;
++ if (NX_IPV6(nxi))
++ return 1;
++ return 0;
++}
++
++#else /* CONFIG_IPV6 */
++
++static inline
++int nx_v6_dev_visible(struct nx_info *n, struct net_device *d)
++{
++ return 1;
++}
++
++
++static inline
++int nx_v6_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s)
++{
++ return 1;
++}
++
++static inline
++int v6_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n)
++{
++ return 1;
++}
++
++static inline
++int nx_info_has_v6(struct nx_info *nxi)
++{
++ return 0;
++}
++
++#endif /* CONFIG_IPV6 */
++
++#define current_nx_info_has_v6() \
++ nx_info_has_v6(current_nx_info())
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_limit.h linux-3.6.10-vs2.3.4.6/include/linux/vs_limit.h
+--- linux-3.6.10/include/linux/vs_limit.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_limit.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,140 @@
++#ifndef _VS_LIMIT_H
++#define _VS_LIMIT_H
++
++#include "vserver/limit.h"
++#include "vserver/base.h"
++#include "vserver/context.h"
++#include "vserver/debug.h"
++#include "vserver/context.h"
++#include "vserver/limit_int.h"
++
++
++#define vx_acc_cres(v, d, p, r) \
++ __vx_acc_cres(v, r, d, p, __FILE__, __LINE__)
++
++#define vx_acc_cres_cond(x, d, p, r) \
++ __vx_acc_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \
++ r, d, p, __FILE__, __LINE__)
++
++
++#define vx_add_cres(v, a, p, r) \
++ __vx_add_cres(v, r, a, p, __FILE__, __LINE__)
++#define vx_sub_cres(v, a, p, r) vx_add_cres(v, -(a), p, r)
++
++#define vx_add_cres_cond(x, a, p, r) \
++ __vx_add_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \
++ r, a, p, __FILE__, __LINE__)
++#define vx_sub_cres_cond(x, a, p, r) vx_add_cres_cond(x, -(a), p, r)
++
++
++/* process and file limits */
++
++#define vx_nproc_inc(p) \
++ vx_acc_cres((p)->vx_info, 1, p, RLIMIT_NPROC)
++
++#define vx_nproc_dec(p) \
++ vx_acc_cres((p)->vx_info,-1, p, RLIMIT_NPROC)
++
++#define vx_files_inc(f) \
++ vx_acc_cres_cond((f)->f_xid, 1, f, RLIMIT_NOFILE)
++
++#define vx_files_dec(f) \
++ vx_acc_cres_cond((f)->f_xid,-1, f, RLIMIT_NOFILE)
++
++#define vx_locks_inc(l) \
++ vx_acc_cres_cond((l)->fl_xid, 1, l, RLIMIT_LOCKS)
++
++#define vx_locks_dec(l) \
++ vx_acc_cres_cond((l)->fl_xid,-1, l, RLIMIT_LOCKS)
++
++#define vx_openfd_inc(f) \
++ vx_acc_cres(current_vx_info(), 1, (void *)(long)(f), VLIMIT_OPENFD)
++
++#define vx_openfd_dec(f) \
++ vx_acc_cres(current_vx_info(),-1, (void *)(long)(f), VLIMIT_OPENFD)
++
++
++#define vx_cres_avail(v, n, r) \
++ __vx_cres_avail(v, r, n, __FILE__, __LINE__)
++
++
++#define vx_nproc_avail(n) \
++ vx_cres_avail(current_vx_info(), n, RLIMIT_NPROC)
++
++#define vx_files_avail(n) \
++ vx_cres_avail(current_vx_info(), n, RLIMIT_NOFILE)
++
++#define vx_locks_avail(n) \
++ vx_cres_avail(current_vx_info(), n, RLIMIT_LOCKS)
++
++#define vx_openfd_avail(n) \
++ vx_cres_avail(current_vx_info(), n, VLIMIT_OPENFD)
++
++
++/* dentry limits */
++
++#define vx_dentry_inc(d) do { \
++ if ((d)->d_count == 1) \
++ vx_acc_cres(current_vx_info(), 1, d, VLIMIT_DENTRY); \
++ } while (0)
++
++#define vx_dentry_dec(d) do { \
++ if ((d)->d_count == 0) \
++ vx_acc_cres(current_vx_info(),-1, d, VLIMIT_DENTRY); \
++ } while (0)
++
++#define vx_dentry_avail(n) \
++ vx_cres_avail(current_vx_info(), n, VLIMIT_DENTRY)
++
++
++/* socket limits */
++
++#define vx_sock_inc(s) \
++ vx_acc_cres((s)->sk_vx_info, 1, s, VLIMIT_NSOCK)
++
++#define vx_sock_dec(s) \
++ vx_acc_cres((s)->sk_vx_info,-1, s, VLIMIT_NSOCK)
++
++#define vx_sock_avail(n) \
++ vx_cres_avail(current_vx_info(), n, VLIMIT_NSOCK)
++
++
++/* ipc resource limits */
++
++#define vx_ipcmsg_add(v, u, a) \
++ vx_add_cres(v, a, u, RLIMIT_MSGQUEUE)
++
++#define vx_ipcmsg_sub(v, u, a) \
++ vx_sub_cres(v, a, u, RLIMIT_MSGQUEUE)
++
++#define vx_ipcmsg_avail(v, a) \
++ vx_cres_avail(v, a, RLIMIT_MSGQUEUE)
++
++
++#define vx_ipcshm_add(v, k, a) \
++ vx_add_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM)
++
++#define vx_ipcshm_sub(v, k, a) \
++ vx_sub_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM)
++
++#define vx_ipcshm_avail(v, a) \
++ vx_cres_avail(v, a, VLIMIT_SHMEM)
++
++
++#define vx_semary_inc(a) \
++ vx_acc_cres(current_vx_info(), 1, a, VLIMIT_SEMARY)
++
++#define vx_semary_dec(a) \
++ vx_acc_cres(current_vx_info(), -1, a, VLIMIT_SEMARY)
++
++
++#define vx_nsems_add(a,n) \
++ vx_add_cres(current_vx_info(), n, a, VLIMIT_NSEMS)
++
++#define vx_nsems_sub(a,n) \
++ vx_sub_cres(current_vx_info(), n, a, VLIMIT_NSEMS)
++
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_network.h linux-3.6.10-vs2.3.4.6/include/linux/vs_network.h
+--- linux-3.6.10/include/linux/vs_network.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_network.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,169 @@
++#ifndef _NX_VS_NETWORK_H
++#define _NX_VS_NETWORK_H
++
++#include "vserver/context.h"
++#include "vserver/network.h"
++#include "vserver/base.h"
++#include "vserver/check.h"
++#include "vserver/debug.h"
++
++#include <linux/sched.h>
++
++
++#define get_nx_info(i) __get_nx_info(i, __FILE__, __LINE__)
++
++static inline struct nx_info *__get_nx_info(struct nx_info *nxi,
++ const char *_file, int _line)
++{
++ if (!nxi)
++ return NULL;
++
++ vxlprintk(VXD_CBIT(nid, 2), "get_nx_info(%p[#%d.%d])",
++ nxi, nxi ? nxi->nx_id : 0,
++ nxi ? atomic_read(&nxi->nx_usecnt) : 0,
++ _file, _line);
++
++ atomic_inc(&nxi->nx_usecnt);
++ return nxi;
++}
++
++
++extern void free_nx_info(struct nx_info *);
++
++#define put_nx_info(i) __put_nx_info(i, __FILE__, __LINE__)
++
++static inline void __put_nx_info(struct nx_info *nxi, const char *_file, int _line)
++{
++ if (!nxi)
++ return;
++
++ vxlprintk(VXD_CBIT(nid, 2), "put_nx_info(%p[#%d.%d])",
++ nxi, nxi ? nxi->nx_id : 0,
++ nxi ? atomic_read(&nxi->nx_usecnt) : 0,
++ _file, _line);
++
++ if (atomic_dec_and_test(&nxi->nx_usecnt))
++ free_nx_info(nxi);
++}
++
++
++#define init_nx_info(p, i) __init_nx_info(p, i, __FILE__, __LINE__)
++
++static inline void __init_nx_info(struct nx_info **nxp, struct nx_info *nxi,
++ const char *_file, int _line)
++{
++ if (nxi) {
++ vxlprintk(VXD_CBIT(nid, 3),
++ "init_nx_info(%p[#%d.%d])",
++ nxi, nxi ? nxi->nx_id : 0,
++ nxi ? atomic_read(&nxi->nx_usecnt) : 0,
++ _file, _line);
++
++ atomic_inc(&nxi->nx_usecnt);
++ }
++ *nxp = nxi;
++}
++
++
++#define set_nx_info(p, i) __set_nx_info(p, i, __FILE__, __LINE__)
++
++static inline void __set_nx_info(struct nx_info **nxp, struct nx_info *nxi,
++ const char *_file, int _line)
++{
++ struct nx_info *nxo;
++
++ if (!nxi)
++ return;
++
++ vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d])",
++ nxi, nxi ? nxi->nx_id : 0,
++ nxi ? atomic_read(&nxi->nx_usecnt) : 0,
++ _file, _line);
++
++ atomic_inc(&nxi->nx_usecnt);
++ nxo = xchg(nxp, nxi);
++ BUG_ON(nxo);
++}
++
++#define clr_nx_info(p) __clr_nx_info(p, __FILE__, __LINE__)
++
++static inline void __clr_nx_info(struct nx_info **nxp,
++ const char *_file, int _line)
++{
++ struct nx_info *nxo;
++
++ nxo = xchg(nxp, NULL);
++ if (!nxo)
++ return;
++
++ vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d])",
++ nxo, nxo ? nxo->nx_id : 0,
++ nxo ? atomic_read(&nxo->nx_usecnt) : 0,
++ _file, _line);
++
++ if (atomic_dec_and_test(&nxo->nx_usecnt))
++ free_nx_info(nxo);
++}
++
++
++#define claim_nx_info(v, p) __claim_nx_info(v, p, __FILE__, __LINE__)
++
++static inline void __claim_nx_info(struct nx_info *nxi,
++ struct task_struct *task, const char *_file, int _line)
++{
++ vxlprintk(VXD_CBIT(nid, 3), "claim_nx_info(%p[#%d.%d.%d]) %p",
++ nxi, nxi ? nxi->nx_id : 0,
++ nxi?atomic_read(&nxi->nx_usecnt):0,
++ nxi?atomic_read(&nxi->nx_tasks):0,
++ task, _file, _line);
++
++ atomic_inc(&nxi->nx_tasks);
++}
++
++
++extern void unhash_nx_info(struct nx_info *);
++
++#define release_nx_info(v, p) __release_nx_info(v, p, __FILE__, __LINE__)
++
++static inline void __release_nx_info(struct nx_info *nxi,
++ struct task_struct *task, const char *_file, int _line)
++{
++ vxlprintk(VXD_CBIT(nid, 3), "release_nx_info(%p[#%d.%d.%d]) %p",
++ nxi, nxi ? nxi->nx_id : 0,
++ nxi ? atomic_read(&nxi->nx_usecnt) : 0,
++ nxi ? atomic_read(&nxi->nx_tasks) : 0,
++ task, _file, _line);
++
++ might_sleep();
++
++ if (atomic_dec_and_test(&nxi->nx_tasks))
++ unhash_nx_info(nxi);
++}
++
++
++#define task_get_nx_info(i) __task_get_nx_info(i, __FILE__, __LINE__)
++
++static __inline__ struct nx_info *__task_get_nx_info(struct task_struct *p,
++ const char *_file, int _line)
++{
++ struct nx_info *nxi;
++
++ task_lock(p);
++ vxlprintk(VXD_CBIT(nid, 5), "task_get_nx_info(%p)",
++ p, _file, _line);
++ nxi = __get_nx_info(p->nx_info, _file, _line);
++ task_unlock(p);
++ return nxi;
++}
++
++
++static inline void exit_nx_info(struct task_struct *p)
++{
++ if (p->nx_info)
++ release_nx_info(p->nx_info, p);
++}
++
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_pid.h linux-3.6.10-vs2.3.4.6/include/linux/vs_pid.h
+--- linux-3.6.10/include/linux/vs_pid.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_pid.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,50 @@
++#ifndef _VS_PID_H
++#define _VS_PID_H
++
++#include "vserver/base.h"
++#include "vserver/check.h"
++#include "vserver/context.h"
++#include "vserver/debug.h"
++#include "vserver/pid.h"
++#include <linux/pid_namespace.h>
++
++
++#define VXF_FAKE_INIT (VXF_INFO_INIT | VXF_STATE_INIT)
++
++static inline
++int vx_proc_task_visible(struct task_struct *task)
++{
++ if ((task->pid == 1) &&
++ !vx_flags(VXF_FAKE_INIT, VXF_FAKE_INIT))
++ /* show a blend through init */
++ goto visible;
++ if (vx_check(vx_task_xid(task), VS_WATCH | VS_IDENT))
++ goto visible;
++ return 0;
++visible:
++ return 1;
++}
++
++#define find_task_by_real_pid(pid) find_task_by_pid_ns(pid, &init_pid_ns)
++
++
++static inline
++struct task_struct *vx_get_proc_task(struct inode *inode, struct pid *pid)
++{
++ struct task_struct *task = get_pid_task(pid, PIDTYPE_PID);
++
++ if (task && !vx_proc_task_visible(task)) {
++ vxdprintk(VXD_CBIT(misc, 6),
++ "dropping task (get) %p[#%u,%u] for %p[#%u,%u]",
++ task, task->xid, task->pid,
++ current, current->xid, current->pid);
++ put_task_struct(task);
++ task = NULL;
++ }
++ return task;
++}
++
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_sched.h linux-3.6.10-vs2.3.4.6/include/linux/vs_sched.h
+--- linux-3.6.10/include/linux/vs_sched.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_sched.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,40 @@
++#ifndef _VS_SCHED_H
++#define _VS_SCHED_H
++
++#include "vserver/base.h"
++#include "vserver/context.h"
++#include "vserver/sched.h"
++
++
++#define MAX_PRIO_BIAS 20
++#define MIN_PRIO_BIAS -20
++
++static inline
++int vx_adjust_prio(struct task_struct *p, int prio, int max_user)
++{
++ struct vx_info *vxi = p->vx_info;
++
++ if (vxi)
++ prio += vx_cpu(vxi, sched_pc).prio_bias;
++ return prio;
++}
++
++static inline void vx_account_user(struct vx_info *vxi,
++ cputime_t cputime, int nice)
++{
++ if (!vxi)
++ return;
++ vx_cpu(vxi, sched_pc).user_ticks += cputime;
++}
++
++static inline void vx_account_system(struct vx_info *vxi,
++ cputime_t cputime, int idle)
++{
++ if (!vxi)
++ return;
++ vx_cpu(vxi, sched_pc).sys_ticks += cputime;
++}
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_socket.h linux-3.6.10-vs2.3.4.6/include/linux/vs_socket.h
+--- linux-3.6.10/include/linux/vs_socket.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_socket.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,67 @@
++#ifndef _VS_SOCKET_H
++#define _VS_SOCKET_H
++
++#include "vserver/debug.h"
++#include "vserver/base.h"
++#include "vserver/cacct.h"
++#include "vserver/context.h"
++#include "vserver/tag.h"
++
++
++/* socket accounting */
++
++#include <linux/socket.h>
++
++static inline int vx_sock_type(int family)
++{
++ switch (family) {
++ case PF_UNSPEC:
++ return VXA_SOCK_UNSPEC;
++ case PF_UNIX:
++ return VXA_SOCK_UNIX;
++ case PF_INET:
++ return VXA_SOCK_INET;
++ case PF_INET6:
++ return VXA_SOCK_INET6;
++ case PF_PACKET:
++ return VXA_SOCK_PACKET;
++ default:
++ return VXA_SOCK_OTHER;
++ }
++}
++
++#define vx_acc_sock(v, f, p, s) \
++ __vx_acc_sock(v, f, p, s, __FILE__, __LINE__)
++
++static inline void __vx_acc_sock(struct vx_info *vxi,
++ int family, int pos, int size, char *file, int line)
++{
++ if (vxi) {
++ int type = vx_sock_type(family);
++
++ atomic_long_inc(&vxi->cacct.sock[type][pos].count);
++ atomic_long_add(size, &vxi->cacct.sock[type][pos].total);
++ }
++}
++
++#define vx_sock_recv(sk, s) \
++ vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 0, s)
++#define vx_sock_send(sk, s) \
++ vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 1, s)
++#define vx_sock_fail(sk, s) \
++ vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 2, s)
++
++
++#define sock_vx_init(s) do { \
++ (s)->sk_xid = 0; \
++ (s)->sk_vx_info = NULL; \
++ } while (0)
++
++#define sock_nx_init(s) do { \
++ (s)->sk_nid = 0; \
++ (s)->sk_nx_info = NULL; \
++ } while (0)
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_tag.h linux-3.6.10-vs2.3.4.6/include/linux/vs_tag.h
+--- linux-3.6.10/include/linux/vs_tag.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_tag.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,47 @@
++#ifndef _VS_TAG_H
++#define _VS_TAG_H
++
++#include <linux/vserver/tag.h>
++
++/* check conditions */
++
++#define DX_ADMIN 0x0001
++#define DX_WATCH 0x0002
++#define DX_HOSTID 0x0008
++
++#define DX_IDENT 0x0010
++
++#define DX_ARG_MASK 0x0010
++
++
++#define dx_task_tag(t) ((t)->tag)
++
++#define dx_current_tag() dx_task_tag(current)
++
++#define dx_check(c, m) __dx_check(dx_current_tag(), c, m)
++
++#define dx_weak_check(c, m) ((m) ? dx_check(c, m) : 1)
++
++
++/*
++ * check current context for ADMIN/WATCH and
++ * optionally against supplied argument
++ */
++static inline int __dx_check(tag_t cid, tag_t id, unsigned int mode)
++{
++ if (mode & DX_ARG_MASK) {
++ if ((mode & DX_IDENT) && (id == cid))
++ return 1;
++ }
++ return (((mode & DX_ADMIN) && (cid == 0)) ||
++ ((mode & DX_WATCH) && (cid == 1)) ||
++ ((mode & DX_HOSTID) && (id == 0)));
++}
++
++struct inode;
++int dx_permission(const struct inode *inode, int mask);
++
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vs_time.h linux-3.6.10-vs2.3.4.6/include/linux/vs_time.h
+--- linux-3.6.10/include/linux/vs_time.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vs_time.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,19 @@
++#ifndef _VS_TIME_H
++#define _VS_TIME_H
++
++
++/* time faking stuff */
++
++#ifdef CONFIG_VSERVER_VTIME
++
++extern void vx_adjust_timespec(struct timespec *ts);
++extern int vx_settimeofday(const struct timespec *ts);
++
++#else
++#define vx_adjust_timespec(t) do { } while (0)
++#define vx_settimeofday(t) do_settimeofday(t)
++#endif
++
++#else
++#warning duplicate inclusion
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/Kbuild linux-3.6.10-vs2.3.4.6/include/linux/vserver/Kbuild
+--- linux-3.6.10/include/linux/vserver/Kbuild 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/Kbuild 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,8 @@
++
++header-y += context_cmd.h network_cmd.h space_cmd.h \
++ cacct_cmd.h cvirt_cmd.h limit_cmd.h dlimit_cmd.h \
++ inode_cmd.h tag_cmd.h sched_cmd.h signal_cmd.h \
++ debug_cmd.h device_cmd.h
++
++header-y += switch.h network.h monitor.h inode.h device.h
++
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/base.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/base.h
+--- linux-3.6.10/include/linux/vserver/base.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/base.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,178 @@
++#ifndef _VX_BASE_H
++#define _VX_BASE_H
++
++
++/* context state changes */
++
++enum {
++ VSC_STARTUP = 1,
++ VSC_SHUTDOWN,
++
++ VSC_NETUP,
++ VSC_NETDOWN,
++};
++
++
++
++#define vx_task_xid(t) ((t)->xid)
++
++#define vx_current_xid() vx_task_xid(current)
++
++#define current_vx_info() (current->vx_info)
++
++
++#define nx_task_nid(t) ((t)->nid)
++
++#define nx_current_nid() nx_task_nid(current)
++
++#define current_nx_info() (current->nx_info)
++
++
++/* generic flag merging */
++
++#define vs_check_flags(v, m, f) (((v) & (m)) ^ (f))
++
++#define vs_mask_flags(v, f, m) (((v) & ~(m)) | ((f) & (m)))
++
++#define vs_mask_mask(v, f, m) (((v) & ~(m)) | ((v) & (f) & (m)))
++
++#define vs_check_bit(v, n) ((v) & (1LL << (n)))
++
++
++/* context flags */
++
++#define __vx_flags(v) ((v) ? (v)->vx_flags : 0)
++
++#define vx_current_flags() __vx_flags(current_vx_info())
++
++#define vx_info_flags(v, m, f) \
++ vs_check_flags(__vx_flags(v), m, f)
++
++#define task_vx_flags(t, m, f) \
++ ((t) && vx_info_flags((t)->vx_info, m, f))
++
++#define vx_flags(m, f) vx_info_flags(current_vx_info(), m, f)
++
++
++/* context caps */
++
++#define __vx_ccaps(v) ((v) ? (v)->vx_ccaps : 0)
++
++#define vx_current_ccaps() __vx_ccaps(current_vx_info())
++
++#define vx_info_ccaps(v, c) (__vx_ccaps(v) & (c))
++
++#define vx_ccaps(c) vx_info_ccaps(current_vx_info(), (c))
++
++
++
++/* network flags */
++
++#define __nx_flags(n) ((n) ? (n)->nx_flags : 0)
++
++#define nx_current_flags() __nx_flags(current_nx_info())
++
++#define nx_info_flags(n, m, f) \
++ vs_check_flags(__nx_flags(n), m, f)
++
++#define task_nx_flags(t, m, f) \
++ ((t) && nx_info_flags((t)->nx_info, m, f))
++
++#define nx_flags(m, f) nx_info_flags(current_nx_info(), m, f)
++
++
++/* network caps */
++
++#define __nx_ncaps(n) ((n) ? (n)->nx_ncaps : 0)
++
++#define nx_current_ncaps() __nx_ncaps(current_nx_info())
++
++#define nx_info_ncaps(n, c) (__nx_ncaps(n) & (c))
++
++#define nx_ncaps(c) nx_info_ncaps(current_nx_info(), c)
++
++
++/* context mask capabilities */
++
++#define __vx_mcaps(v) ((v) ? (v)->vx_ccaps >> 32UL : ~0 )
++
++#define vx_info_mcaps(v, c) (__vx_mcaps(v) & (c))
++
++#define vx_mcaps(c) vx_info_mcaps(current_vx_info(), c)
++
++
++/* context bcap mask */
++
++#define __vx_bcaps(v) ((v)->vx_bcaps)
++
++#define vx_current_bcaps() __vx_bcaps(current_vx_info())
++
++
++/* mask given bcaps */
++
++#define vx_info_mbcaps(v, c) ((v) ? cap_intersect(__vx_bcaps(v), c) : c)
++
++#define vx_mbcaps(c) vx_info_mbcaps(current_vx_info(), c)
++
++
++/* masked cap_bset */
++
++#define vx_info_cap_bset(v) vx_info_mbcaps(v, current->cap_bset)
++
++#define vx_current_cap_bset() vx_info_cap_bset(current_vx_info())
++
++#if 0
++#define vx_info_mbcap(v, b) \
++ (!vx_info_flags(v, VXF_STATE_SETUP, 0) ? \
++ vx_info_bcaps(v, b) : (b))
++
++#define task_vx_mbcap(t, b) \
++ vx_info_mbcap((t)->vx_info, (t)->b)
++
++#define vx_mbcap(b) task_vx_mbcap(current, b)
++#endif
++
++#define vx_cap_raised(v, c, f) cap_raised(vx_info_mbcaps(v, c), f)
++
++#define vx_capable(b, c) (capable(b) || \
++ (cap_raised(current_cap(), b) && vx_ccaps(c)))
++
++#define vx_ns_capable(n, b, c) (ns_capable(n, b) || \
++ (cap_raised(current_cap(), b) && vx_ccaps(c)))
++
++#define nx_capable(b, c) (capable(b) || \
++ (cap_raised(current_cap(), b) && nx_ncaps(c)))
++
++#define vx_task_initpid(t, n) \
++ ((t)->vx_info && \
++ ((t)->vx_info->vx_initpid == (n)))
++
++#define vx_current_initpid(n) vx_task_initpid(current, n)
++
++
++/* context unshare mask */
++
++#define __vx_umask(v) ((v)->vx_umask)
++
++#define vx_current_umask() __vx_umask(current_vx_info())
++
++#define vx_can_unshare(b, f) (capable(b) || \
++ (cap_raised(current_cap(), b) && \
++ !((f) & ~vx_current_umask())))
++
++
++#define __vx_wmask(v) ((v)->vx_wmask)
++
++#define vx_current_wmask() __vx_wmask(current_vx_info())
++
++
++#define __vx_state(v) ((v) ? ((v)->vx_state) : 0)
++
++#define vx_info_state(v, m) (__vx_state(v) & (m))
++
++
++#define __nx_state(n) ((n) ? ((n)->nx_state) : 0)
++
++#define nx_info_state(n, m) (__nx_state(n) & (m))
++
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/cacct.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/cacct.h
+--- linux-3.6.10/include/linux/vserver/cacct.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/cacct.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,15 @@
++#ifndef _VX_CACCT_H
++#define _VX_CACCT_H
++
++
++enum sock_acc_field {
++ VXA_SOCK_UNSPEC = 0,
++ VXA_SOCK_UNIX,
++ VXA_SOCK_INET,
++ VXA_SOCK_INET6,
++ VXA_SOCK_PACKET,
++ VXA_SOCK_OTHER,
++ VXA_SOCK_SIZE /* array size */
++};
++
++#endif /* _VX_CACCT_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/cacct_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/cacct_cmd.h
+--- linux-3.6.10/include/linux/vserver/cacct_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/cacct_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,23 @@
++#ifndef _VX_CACCT_CMD_H
++#define _VX_CACCT_CMD_H
++
++
++/* virtual host info name commands */
++
++#define VCMD_sock_stat VC_CMD(VSTAT, 5, 0)
++
++struct vcmd_sock_stat_v0 {
++ uint32_t field;
++ uint32_t count[3];
++ uint64_t total[3];
++};
++
++
++#ifdef __KERNEL__
++
++#include <linux/compiler.h>
++
++extern int vc_sock_stat(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++#endif /* _VX_CACCT_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/cacct_def.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/cacct_def.h
+--- linux-3.6.10/include/linux/vserver/cacct_def.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/cacct_def.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,43 @@
++#ifndef _VX_CACCT_DEF_H
++#define _VX_CACCT_DEF_H
++
++#include <asm/atomic.h>
++#include <linux/vserver/cacct.h>
++
++
++struct _vx_sock_acc {
++ atomic_long_t count;
++ atomic_long_t total;
++};
++
++/* context sub struct */
++
++struct _vx_cacct {
++ struct _vx_sock_acc sock[VXA_SOCK_SIZE][3];
++ atomic_t slab[8];
++ atomic_t page[6][8];
++};
++
++#ifdef CONFIG_VSERVER_DEBUG
++
++static inline void __dump_vx_cacct(struct _vx_cacct *cacct)
++{
++ int i, j;
++
++ printk("\t_vx_cacct:");
++ for (i = 0; i < 6; i++) {
++ struct _vx_sock_acc *ptr = cacct->sock[i];
++
++ printk("\t [%d] =", i);
++ for (j = 0; j < 3; j++) {
++ printk(" [%d] = %8lu, %8lu", j,
++ atomic_long_read(&ptr[j].count),
++ atomic_long_read(&ptr[j].total));
++ }
++ printk("\n");
++ }
++}
++
++#endif
++
++#endif /* _VX_CACCT_DEF_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/cacct_int.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/cacct_int.h
+--- linux-3.6.10/include/linux/vserver/cacct_int.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/cacct_int.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,21 @@
++#ifndef _VX_CACCT_INT_H
++#define _VX_CACCT_INT_H
++
++
++#ifdef __KERNEL__
++
++static inline
++unsigned long vx_sock_count(struct _vx_cacct *cacct, int type, int pos)
++{
++ return atomic_long_read(&cacct->sock[type][pos].count);
++}
++
++
++static inline
++unsigned long vx_sock_total(struct _vx_cacct *cacct, int type, int pos)
++{
++ return atomic_long_read(&cacct->sock[type][pos].total);
++}
++
++#endif /* __KERNEL__ */
++#endif /* _VX_CACCT_INT_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/check.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/check.h
+--- linux-3.6.10/include/linux/vserver/check.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/check.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,89 @@
++#ifndef _VS_CHECK_H
++#define _VS_CHECK_H
++
++
++#define MAX_S_CONTEXT 65535 /* Arbitrary limit */
++
++#ifdef CONFIG_VSERVER_DYNAMIC_IDS
++#define MIN_D_CONTEXT 49152 /* dynamic contexts start here */
++#else
++#define MIN_D_CONTEXT 65536
++#endif
++
++/* check conditions */
++
++#define VS_ADMIN 0x0001
++#define VS_WATCH 0x0002
++#define VS_HIDE 0x0004
++#define VS_HOSTID 0x0008
++
++#define VS_IDENT 0x0010
++#define VS_EQUIV 0x0020
++#define VS_PARENT 0x0040
++#define VS_CHILD 0x0080
++
++#define VS_ARG_MASK 0x00F0
++
++#define VS_DYNAMIC 0x0100
++#define VS_STATIC 0x0200
++
++#define VS_ATR_MASK 0x0F00
++
++#ifdef CONFIG_VSERVER_PRIVACY
++#define VS_ADMIN_P (0)
++#define VS_WATCH_P (0)
++#else
++#define VS_ADMIN_P VS_ADMIN
++#define VS_WATCH_P VS_WATCH
++#endif
++
++#define VS_HARDIRQ 0x1000
++#define VS_SOFTIRQ 0x2000
++#define VS_IRQ 0x4000
++
++#define VS_IRQ_MASK 0xF000
++
++#include <linux/hardirq.h>
++
++/*
++ * check current context for ADMIN/WATCH and
++ * optionally against supplied argument
++ */
++static inline int __vs_check(int cid, int id, unsigned int mode)
++{
++ if (mode & VS_ARG_MASK) {
++ if ((mode & VS_IDENT) && (id == cid))
++ return 1;
++ }
++ if (mode & VS_ATR_MASK) {
++ if ((mode & VS_DYNAMIC) &&
++ (id >= MIN_D_CONTEXT) &&
++ (id <= MAX_S_CONTEXT))
++ return 1;
++ if ((mode & VS_STATIC) &&
++ (id > 1) && (id < MIN_D_CONTEXT))
++ return 1;
++ }
++ if (mode & VS_IRQ_MASK) {
++ if ((mode & VS_IRQ) && unlikely(in_interrupt()))
++ return 1;
++ if ((mode & VS_HARDIRQ) && unlikely(in_irq()))
++ return 1;
++ if ((mode & VS_SOFTIRQ) && unlikely(in_softirq()))
++ return 1;
++ }
++ return (((mode & VS_ADMIN) && (cid == 0)) ||
++ ((mode & VS_WATCH) && (cid == 1)) ||
++ ((mode & VS_HOSTID) && (id == 0)));
++}
++
++#define vx_check(c, m) __vs_check(vx_current_xid(), c, (m) | VS_IRQ)
++
++#define vx_weak_check(c, m) ((m) ? vx_check(c, m) : 1)
++
++
++#define nx_check(c, m) __vs_check(nx_current_nid(), c, m)
++
++#define nx_weak_check(c, m) ((m) ? nx_check(c, m) : 1)
++
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/context.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/context.h
+--- linux-3.6.10/include/linux/vserver/context.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/context.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,188 @@
++#ifndef _VX_CONTEXT_H
++#define _VX_CONTEXT_H
++
++#include <linux/types.h>
++#include <linux/capability.h>
++
++
++/* context flags */
++
++#define VXF_INFO_SCHED 0x00000002
++#define VXF_INFO_NPROC 0x00000004
++#define VXF_INFO_PRIVATE 0x00000008
++
++#define VXF_INFO_INIT 0x00000010
++#define VXF_INFO_HIDE 0x00000020
++#define VXF_INFO_ULIMIT 0x00000040
++#define VXF_INFO_NSPACE 0x00000080
++
++#define VXF_SCHED_HARD 0x00000100
++#define VXF_SCHED_PRIO 0x00000200
++#define VXF_SCHED_PAUSE 0x00000400
++
++#define VXF_VIRT_MEM 0x00010000
++#define VXF_VIRT_UPTIME 0x00020000
++#define VXF_VIRT_CPU 0x00040000
++#define VXF_VIRT_LOAD 0x00080000
++#define VXF_VIRT_TIME 0x00100000
++
++#define VXF_HIDE_MOUNT 0x01000000
++/* was VXF_HIDE_NETIF 0x02000000 */
++#define VXF_HIDE_VINFO 0x04000000
++
++#define VXF_STATE_SETUP (1ULL << 32)
++#define VXF_STATE_INIT (1ULL << 33)
++#define VXF_STATE_ADMIN (1ULL << 34)
++
++#define VXF_SC_HELPER (1ULL << 36)
++#define VXF_REBOOT_KILL (1ULL << 37)
++#define VXF_PERSISTENT (1ULL << 38)
++
++#define VXF_FORK_RSS (1ULL << 48)
++#define VXF_PROLIFIC (1ULL << 49)
++
++#define VXF_IGNEG_NICE (1ULL << 52)
++
++#define VXF_ONE_TIME (0x0007ULL << 32)
++
++#define VXF_INIT_SET (VXF_STATE_SETUP | VXF_STATE_INIT | VXF_STATE_ADMIN)
++
++
++/* context migration */
++
++#define VXM_SET_INIT 0x00000001
++#define VXM_SET_REAPER 0x00000002
++
++/* context caps */
++
++#define VXC_SET_UTSNAME 0x00000001
++#define VXC_SET_RLIMIT 0x00000002
++#define VXC_FS_SECURITY 0x00000004
++#define VXC_FS_TRUSTED 0x00000008
++#define VXC_TIOCSTI 0x00000010
++
++/* was VXC_RAW_ICMP 0x00000100 */
++#define VXC_SYSLOG 0x00001000
++#define VXC_OOM_ADJUST 0x00002000
++#define VXC_AUDIT_CONTROL 0x00004000
++
++#define VXC_SECURE_MOUNT 0x00010000
++#define VXC_SECURE_REMOUNT 0x00020000
++#define VXC_BINARY_MOUNT 0x00040000
++
++#define VXC_QUOTA_CTL 0x00100000
++#define VXC_ADMIN_MAPPER 0x00200000
++#define VXC_ADMIN_CLOOP 0x00400000
++
++#define VXC_KTHREAD 0x01000000
++#define VXC_NAMESPACE 0x02000000
++
++
++#ifdef __KERNEL__
++
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/rcupdate.h>
++
++#include "limit_def.h"
++#include "sched_def.h"
++#include "cvirt_def.h"
++#include "cacct_def.h"
++#include "device_def.h"
++
++#define VX_SPACES 2
++
++struct _vx_info_pc {
++ struct _vx_sched_pc sched_pc;
++ struct _vx_cvirt_pc cvirt_pc;
++};
++
++struct _vx_space {
++ unsigned long vx_nsmask; /* assignment mask */
++ struct nsproxy *vx_nsproxy; /* private namespaces */
++ struct fs_struct *vx_fs; /* private namespace fs */
++ const struct cred *vx_cred; /* task credentials */
++};
++
++struct vx_info {
++ struct hlist_node vx_hlist; /* linked list of contexts */
++ xid_t vx_id; /* context id */
++ atomic_t vx_usecnt; /* usage count */
++ atomic_t vx_tasks; /* tasks count */
++ struct vx_info *vx_parent; /* parent context */
++ int vx_state; /* context state */
++
++ struct _vx_space space[VX_SPACES]; /* namespace store */
++
++ uint64_t vx_flags; /* context flags */
++ uint64_t vx_ccaps; /* context caps (vserver) */
++ uint64_t vx_umask; /* unshare mask (guest) */
++ uint64_t vx_wmask; /* warn mask (guest) */
++ kernel_cap_t vx_bcaps; /* bounding caps (system) */
++
++ struct task_struct *vx_reaper; /* guest reaper process */
++ pid_t vx_initpid; /* PID of guest init */
++ int64_t vx_badness_bias; /* OOM points bias */
++
++ struct _vx_limit limit; /* vserver limits */
++ struct _vx_sched sched; /* vserver scheduler */
++ struct _vx_cvirt cvirt; /* virtual/bias stuff */
++ struct _vx_cacct cacct; /* context accounting */
++
++ struct _vx_device dmap; /* default device map targets */
++
++#ifndef CONFIG_SMP
++ struct _vx_info_pc info_pc; /* per cpu data */
++#else
++ struct _vx_info_pc *ptr_pc; /* per cpu array */
++#endif
++
++ wait_queue_head_t vx_wait; /* context exit waitqueue */
++ int reboot_cmd; /* last sys_reboot() cmd */
++ int exit_code; /* last process exit code */
++
++ char vx_name[65]; /* vserver name */
++};
++
++#ifndef CONFIG_SMP
++#define vx_ptr_pc(vxi) (&(vxi)->info_pc)
++#define vx_per_cpu(vxi, v, id) vx_ptr_pc(vxi)->v
++#else
++#define vx_ptr_pc(vxi) ((vxi)->ptr_pc)
++#define vx_per_cpu(vxi, v, id) per_cpu_ptr(vx_ptr_pc(vxi), id)->v
++#endif
++
++#define vx_cpu(vxi, v) vx_per_cpu(vxi, v, smp_processor_id())
++
++
++struct vx_info_save {
++ struct vx_info *vxi;
++ xid_t xid;
++};
++
++
++/* status flags */
++
++#define VXS_HASHED 0x0001
++#define VXS_PAUSED 0x0010
++#define VXS_SHUTDOWN 0x0100
++#define VXS_HELPER 0x1000
++#define VXS_RELEASED 0x8000
++
++
++extern void claim_vx_info(struct vx_info *, struct task_struct *);
++extern void release_vx_info(struct vx_info *, struct task_struct *);
++
++extern struct vx_info *lookup_vx_info(int);
++extern struct vx_info *lookup_or_create_vx_info(int);
++
++extern int get_xid_list(int, unsigned int *, int);
++extern int xid_is_hashed(xid_t);
++
++extern int vx_migrate_task(struct task_struct *, struct vx_info *, int);
++
++extern long vs_state_change(struct vx_info *, unsigned int);
++
++
++#endif /* __KERNEL__ */
++#endif /* _VX_CONTEXT_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/context_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/context_cmd.h
+--- linux-3.6.10/include/linux/vserver/context_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/context_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,162 @@
++#ifndef _VX_CONTEXT_CMD_H
++#define _VX_CONTEXT_CMD_H
++
++
++/* vinfo commands */
++
++#define VCMD_task_xid VC_CMD(VINFO, 1, 0)
++
++#ifdef __KERNEL__
++extern int vc_task_xid(uint32_t);
++
++#endif /* __KERNEL__ */
++
++#define VCMD_vx_info VC_CMD(VINFO, 5, 0)
++
++struct vcmd_vx_info_v0 {
++ uint32_t xid;
++ uint32_t initpid;
++ /* more to come */
++};
++
++#ifdef __KERNEL__
++extern int vc_vx_info(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++#define VCMD_ctx_stat VC_CMD(VSTAT, 0, 0)
++
++struct vcmd_ctx_stat_v0 {
++ uint32_t usecnt;
++ uint32_t tasks;
++ /* more to come */
++};
++
++#ifdef __KERNEL__
++extern int vc_ctx_stat(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++/* context commands */
++
++#define VCMD_ctx_create_v0 VC_CMD(VPROC, 1, 0)
++#define VCMD_ctx_create VC_CMD(VPROC, 1, 1)
++
++struct vcmd_ctx_create {
++ uint64_t flagword;
++};
++
++#define VCMD_ctx_migrate_v0 VC_CMD(PROCMIG, 1, 0)
++#define VCMD_ctx_migrate VC_CMD(PROCMIG, 1, 1)
++
++struct vcmd_ctx_migrate {
++ uint64_t flagword;
++};
++
++#ifdef __KERNEL__
++extern int vc_ctx_create(uint32_t, void __user *);
++extern int vc_ctx_migrate(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++
++/* flag commands */
++
++#define VCMD_get_cflags VC_CMD(FLAGS, 1, 0)
++#define VCMD_set_cflags VC_CMD(FLAGS, 2, 0)
++
++struct vcmd_ctx_flags_v0 {
++ uint64_t flagword;
++ uint64_t mask;
++};
++
++#ifdef __KERNEL__
++extern int vc_get_cflags(struct vx_info *, void __user *);
++extern int vc_set_cflags(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++
++/* context caps commands */
++
++#define VCMD_get_ccaps VC_CMD(FLAGS, 3, 1)
++#define VCMD_set_ccaps VC_CMD(FLAGS, 4, 1)
++
++struct vcmd_ctx_caps_v1 {
++ uint64_t ccaps;
++ uint64_t cmask;
++};
++
++#ifdef __KERNEL__
++extern int vc_get_ccaps(struct vx_info *, void __user *);
++extern int vc_set_ccaps(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++
++/* bcaps commands */
++
++#define VCMD_get_bcaps VC_CMD(FLAGS, 9, 0)
++#define VCMD_set_bcaps VC_CMD(FLAGS, 10, 0)
++
++struct vcmd_bcaps {
++ uint64_t bcaps;
++ uint64_t bmask;
++};
++
++#ifdef __KERNEL__
++extern int vc_get_bcaps(struct vx_info *, void __user *);
++extern int vc_set_bcaps(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++
++/* umask commands */
++
++#define VCMD_get_umask VC_CMD(FLAGS, 13, 0)
++#define VCMD_set_umask VC_CMD(FLAGS, 14, 0)
++
++struct vcmd_umask {
++ uint64_t umask;
++ uint64_t mask;
++};
++
++#ifdef __KERNEL__
++extern int vc_get_umask(struct vx_info *, void __user *);
++extern int vc_set_umask(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++
++/* wmask commands */
++
++#define VCMD_get_wmask VC_CMD(FLAGS, 15, 0)
++#define VCMD_set_wmask VC_CMD(FLAGS, 16, 0)
++
++struct vcmd_wmask {
++ uint64_t wmask;
++ uint64_t mask;
++};
++
++#ifdef __KERNEL__
++extern int vc_get_wmask(struct vx_info *, void __user *);
++extern int vc_set_wmask(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++
++/* OOM badness */
++
++#define VCMD_get_badness VC_CMD(MEMCTRL, 5, 0)
++#define VCMD_set_badness VC_CMD(MEMCTRL, 6, 0)
++
++struct vcmd_badness_v0 {
++ int64_t bias;
++};
++
++#ifdef __KERNEL__
++extern int vc_get_badness(struct vx_info *, void __user *);
++extern int vc_set_badness(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++#endif /* _VX_CONTEXT_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/cvirt.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/cvirt.h
+--- linux-3.6.10/include/linux/vserver/cvirt.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/cvirt.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,22 @@
++#ifndef _VX_CVIRT_H
++#define _VX_CVIRT_H
++
++
++#ifdef __KERNEL__
++
++struct timespec;
++
++void vx_vsi_boottime(struct timespec *);
++
++void vx_vsi_uptime(struct timespec *, struct timespec *);
++
++
++struct vx_info;
++
++void vx_update_load(struct vx_info *);
++
++
++int vx_do_syslog(int, char __user *, int);
++
++#endif /* __KERNEL__ */
++#endif /* _VX_CVIRT_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/cvirt_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/cvirt_cmd.h
+--- linux-3.6.10/include/linux/vserver/cvirt_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/cvirt_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,53 @@
++#ifndef _VX_CVIRT_CMD_H
++#define _VX_CVIRT_CMD_H
++
++
++/* virtual host info name commands */
++
++#define VCMD_set_vhi_name VC_CMD(VHOST, 1, 0)
++#define VCMD_get_vhi_name VC_CMD(VHOST, 2, 0)
++
++struct vcmd_vhi_name_v0 {
++ uint32_t field;
++ char name[65];
++};
++
++
++enum vhi_name_field {
++ VHIN_CONTEXT = 0,
++ VHIN_SYSNAME,
++ VHIN_NODENAME,
++ VHIN_RELEASE,
++ VHIN_VERSION,
++ VHIN_MACHINE,
++ VHIN_DOMAINNAME,
++};
++
++
++#ifdef __KERNEL__
++
++#include <linux/compiler.h>
++
++extern int vc_set_vhi_name(struct vx_info *, void __user *);
++extern int vc_get_vhi_name(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++#define VCMD_virt_stat VC_CMD(VSTAT, 3, 0)
++
++struct vcmd_virt_stat_v0 {
++ uint64_t offset;
++ uint64_t uptime;
++ uint32_t nr_threads;
++ uint32_t nr_running;
++ uint32_t nr_uninterruptible;
++ uint32_t nr_onhold;
++ uint32_t nr_forks;
++ uint32_t load[3];
++};
++
++#ifdef __KERNEL__
++extern int vc_virt_stat(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++#endif /* _VX_CVIRT_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/cvirt_def.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/cvirt_def.h
+--- linux-3.6.10/include/linux/vserver/cvirt_def.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/cvirt_def.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,80 @@
++#ifndef _VX_CVIRT_DEF_H
++#define _VX_CVIRT_DEF_H
++
++#include <linux/jiffies.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include <linux/time.h>
++#include <asm/atomic.h>
++
++
++struct _vx_usage_stat {
++ uint64_t user;
++ uint64_t nice;
++ uint64_t system;
++ uint64_t softirq;
++ uint64_t irq;
++ uint64_t idle;
++ uint64_t iowait;
++};
++
++struct _vx_syslog {
++ wait_queue_head_t log_wait;
++ spinlock_t logbuf_lock; /* lock for the log buffer */
++
++ unsigned long log_start; /* next char to be read by syslog() */
++ unsigned long con_start; /* next char to be sent to consoles */
++ unsigned long log_end; /* most-recently-written-char + 1 */
++ unsigned long logged_chars; /* #chars since last read+clear operation */
++
++ char log_buf[1024];
++};
++
++
++/* context sub struct */
++
++struct _vx_cvirt {
++ atomic_t nr_threads; /* number of current threads */
++ atomic_t nr_running; /* number of running threads */
++ atomic_t nr_uninterruptible; /* number of uninterruptible threads */
++
++ atomic_t nr_onhold; /* processes on hold */
++ uint32_t onhold_last; /* jiffies when put on hold */
++
++ struct timespec bias_ts; /* time offset to the host */
++ struct timespec bias_idle;
++ struct timespec bias_uptime; /* context creation point */
++ uint64_t bias_clock; /* offset in clock_t */
++
++ spinlock_t load_lock; /* lock for the load averages */
++ atomic_t load_updates; /* nr of load updates done so far */
++ uint32_t load_last; /* last time load was calculated */
++ uint32_t load[3]; /* load averages 1,5,15 */
++
++ atomic_t total_forks; /* number of forks so far */
++
++ struct _vx_syslog syslog;
++};
++
++struct _vx_cvirt_pc {
++ struct _vx_usage_stat cpustat;
++};
++
++
++#ifdef CONFIG_VSERVER_DEBUG
++
++static inline void __dump_vx_cvirt(struct _vx_cvirt *cvirt)
++{
++ printk("\t_vx_cvirt:\n");
++ printk("\t threads: %4d, %4d, %4d, %4d\n",
++ atomic_read(&cvirt->nr_threads),
++ atomic_read(&cvirt->nr_running),
++ atomic_read(&cvirt->nr_uninterruptible),
++ atomic_read(&cvirt->nr_onhold));
++ /* add rest here */
++ printk("\t total_forks = %d\n", atomic_read(&cvirt->total_forks));
++}
++
++#endif
++
++#endif /* _VX_CVIRT_DEF_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/debug.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/debug.h
+--- linux-3.6.10/include/linux/vserver/debug.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/debug.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,145 @@
++#ifndef _VX_DEBUG_H
++#define _VX_DEBUG_H
++
++
++#define VXD_CBIT(n, m) (vs_debug_ ## n & (1 << (m)))
++#define VXD_CMIN(n, m) (vs_debug_ ## n > (m))
++#define VXD_MASK(n, m) (vs_debug_ ## n & (m))
++
++#define VXD_DEV(d) (d), (d)->bd_inode->i_ino, \
++ imajor((d)->bd_inode), iminor((d)->bd_inode)
++#define VXF_DEV "%p[%lu,%d:%d]"
++
++#if defined(CONFIG_QUOTES_UTF8)
++#define VS_Q_LQM "\xc2\xbb"
++#define VS_Q_RQM "\xc2\xab"
++#elif defined(CONFIG_QUOTES_ASCII)
++#define VS_Q_LQM "\x27"
++#define VS_Q_RQM "\x27"
++#else
++#define VS_Q_LQM "\xbb"
++#define VS_Q_RQM "\xab"
++#endif
++
++#define VS_Q(f) VS_Q_LQM f VS_Q_RQM
++
++
++#define vxd_path(p) \
++ ({ static char _buffer[PATH_MAX]; \
++ d_path(p, _buffer, sizeof(_buffer)); })
++
++#define vxd_cond_path(n) \
++ ((n) ? vxd_path(&(n)->path) : "<null>" )
++
++
++#ifdef CONFIG_VSERVER_DEBUG
++
++extern unsigned int vs_debug_switch;
++extern unsigned int vs_debug_xid;
++extern unsigned int vs_debug_nid;
++extern unsigned int vs_debug_tag;
++extern unsigned int vs_debug_net;
++extern unsigned int vs_debug_limit;
++extern unsigned int vs_debug_cres;
++extern unsigned int vs_debug_dlim;
++extern unsigned int vs_debug_quota;
++extern unsigned int vs_debug_cvirt;
++extern unsigned int vs_debug_space;
++extern unsigned int vs_debug_perm;
++extern unsigned int vs_debug_misc;
++
++
++#define VX_LOGLEVEL "vxD: "
++#define VX_PROC_FMT "%p: "
++#define VX_PROCESS current
++
++#define vxdprintk(c, f, x...) \
++ do { \
++ if (c) \
++ printk(VX_LOGLEVEL VX_PROC_FMT f "\n", \
++ VX_PROCESS , ##x); \
++ } while (0)
++
++#define vxlprintk(c, f, x...) \
++ do { \
++ if (c) \
++ printk(VX_LOGLEVEL f " @%s:%d\n", x); \
++ } while (0)
++
++#define vxfprintk(c, f, x...) \
++ do { \
++ if (c) \
++ printk(VX_LOGLEVEL f " %s@%s:%d\n", x); \
++ } while (0)
++
++
++struct vx_info;
++
++void dump_vx_info(struct vx_info *, int);
++void dump_vx_info_inactive(int);
++
++#else /* CONFIG_VSERVER_DEBUG */
++
++#define vs_debug_switch 0
++#define vs_debug_xid 0
++#define vs_debug_nid 0
++#define vs_debug_tag 0
++#define vs_debug_net 0
++#define vs_debug_limit 0
++#define vs_debug_cres 0
++#define vs_debug_dlim 0
++#define vs_debug_quota 0
++#define vs_debug_cvirt 0
++#define vs_debug_space 0
++#define vs_debug_perm 0
++#define vs_debug_misc 0
++
++#define vxdprintk(x...) do { } while (0)
++#define vxlprintk(x...) do { } while (0)
++#define vxfprintk(x...) do { } while (0)
++
++#endif /* CONFIG_VSERVER_DEBUG */
++
++
++#ifdef CONFIG_VSERVER_WARN
++
++#define VX_WARNLEVEL KERN_WARNING "vxW: "
++#define VX_WARN_TASK "[" VS_Q("%s") ",%u:#%u|%u|%u] "
++#define VX_WARN_XID "[xid #%u] "
++#define VX_WARN_NID "[nid #%u] "
++#define VX_WARN_TAG "[tag #%u] "
++
++#define vxwprintk(c, f, x...) \
++ do { \
++ if (c) \
++ printk(VX_WARNLEVEL f "\n", ##x); \
++ } while (0)
++
++#else /* CONFIG_VSERVER_WARN */
++
++#define vxwprintk(x...) do { } while (0)
++
++#endif /* CONFIG_VSERVER_WARN */
++
++#define vxwprintk_task(c, f, x...) \
++ vxwprintk(c, VX_WARN_TASK f, \
++ current->comm, current->pid, \
++ current->xid, current->nid, current->tag, ##x)
++#define vxwprintk_xid(c, f, x...) \
++ vxwprintk(c, VX_WARN_XID f, current->xid, x)
++#define vxwprintk_nid(c, f, x...) \
++ vxwprintk(c, VX_WARN_NID f, current->nid, x)
++#define vxwprintk_tag(c, f, x...) \
++ vxwprintk(c, VX_WARN_TAG f, current->tag, x)
++
++#ifdef CONFIG_VSERVER_DEBUG
++#define vxd_assert_lock(l) assert_spin_locked(l)
++#define vxd_assert(c, f, x...) vxlprintk(!(c), \
++ "assertion [" f "] failed.", ##x, __FILE__, __LINE__)
++#else
++#define vxd_assert_lock(l) do { } while (0)
++#define vxd_assert(c, f, x...) do { } while (0)
++#endif
++
++
++#endif /* _VX_DEBUG_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/debug_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/debug_cmd.h
+--- linux-3.6.10/include/linux/vserver/debug_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/debug_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,58 @@
++#ifndef _VX_DEBUG_CMD_H
++#define _VX_DEBUG_CMD_H
++
++
++/* debug commands */
++
++#define VCMD_dump_history VC_CMD(DEBUG, 1, 0)
++
++#define VCMD_read_history VC_CMD(DEBUG, 5, 0)
++#define VCMD_read_monitor VC_CMD(DEBUG, 6, 0)
++
++struct vcmd_read_history_v0 {
++ uint32_t index;
++ uint32_t count;
++ char __user *data;
++};
++
++struct vcmd_read_monitor_v0 {
++ uint32_t index;
++ uint32_t count;
++ char __user *data;
++};
++
++
++#ifdef __KERNEL__
++
++#ifdef CONFIG_COMPAT
++
++#include <asm/compat.h>
++
++struct vcmd_read_history_v0_x32 {
++ uint32_t index;
++ uint32_t count;
++ compat_uptr_t data_ptr;
++};
++
++struct vcmd_read_monitor_v0_x32 {
++ uint32_t index;
++ uint32_t count;
++ compat_uptr_t data_ptr;
++};
++
++#endif /* CONFIG_COMPAT */
++
++extern int vc_dump_history(uint32_t);
++
++extern int vc_read_history(uint32_t, void __user *);
++extern int vc_read_monitor(uint32_t, void __user *);
++
++#ifdef CONFIG_COMPAT
++
++extern int vc_read_history_x32(uint32_t, void __user *);
++extern int vc_read_monitor_x32(uint32_t, void __user *);
++
++#endif /* CONFIG_COMPAT */
++
++#endif /* __KERNEL__ */
++#endif /* _VX_DEBUG_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/device.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/device.h
+--- linux-3.6.10/include/linux/vserver/device.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/device.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,15 @@
++#ifndef _VX_DEVICE_H
++#define _VX_DEVICE_H
++
++
++#define DATTR_CREATE 0x00000001
++#define DATTR_OPEN 0x00000002
++
++#define DATTR_REMAP 0x00000010
++
++#define DATTR_MASK 0x00000013
++
++
++#else /* _VX_DEVICE_H */
++#warning duplicate inclusion
++#endif /* _VX_DEVICE_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/device_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/device_cmd.h
+--- linux-3.6.10/include/linux/vserver/device_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/device_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,44 @@
++#ifndef _VX_DEVICE_CMD_H
++#define _VX_DEVICE_CMD_H
++
++
++/* device vserver commands */
++
++#define VCMD_set_mapping VC_CMD(DEVICE, 1, 0)
++#define VCMD_unset_mapping VC_CMD(DEVICE, 2, 0)
++
++struct vcmd_set_mapping_v0 {
++ const char __user *device;
++ const char __user *target;
++ uint32_t flags;
++};
++
++
++#ifdef __KERNEL__
++
++#ifdef CONFIG_COMPAT
++
++#include <asm/compat.h>
++
++struct vcmd_set_mapping_v0_x32 {
++ compat_uptr_t device_ptr;
++ compat_uptr_t target_ptr;
++ uint32_t flags;
++};
++
++#endif /* CONFIG_COMPAT */
++
++#include <linux/compiler.h>
++
++extern int vc_set_mapping(struct vx_info *, void __user *);
++extern int vc_unset_mapping(struct vx_info *, void __user *);
++
++#ifdef CONFIG_COMPAT
++
++extern int vc_set_mapping_x32(struct vx_info *, void __user *);
++extern int vc_unset_mapping_x32(struct vx_info *, void __user *);
++
++#endif /* CONFIG_COMPAT */
++
++#endif /* __KERNEL__ */
++#endif /* _VX_DEVICE_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/device_def.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/device_def.h
+--- linux-3.6.10/include/linux/vserver/device_def.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/device_def.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,17 @@
++#ifndef _VX_DEVICE_DEF_H
++#define _VX_DEVICE_DEF_H
++
++#include <linux/types.h>
++
++struct vx_dmap_target {
++ dev_t target;
++ uint32_t flags;
++};
++
++struct _vx_device {
++#ifdef CONFIG_VSERVER_DEVICE
++ struct vx_dmap_target targets[2];
++#endif
++};
++
++#endif /* _VX_DEVICE_DEF_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/dlimit.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/dlimit.h
+--- linux-3.6.10/include/linux/vserver/dlimit.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/dlimit.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,54 @@
++#ifndef _VX_DLIMIT_H
++#define _VX_DLIMIT_H
++
++#include "switch.h"
++
++
++#ifdef __KERNEL__
++
++/* keep in sync with CDLIM_INFINITY */
++
++#define DLIM_INFINITY (~0ULL)
++
++#include <linux/spinlock.h>
++#include <linux/rcupdate.h>
++
++struct super_block;
++
++struct dl_info {
++ struct hlist_node dl_hlist; /* linked list of contexts */
++ struct rcu_head dl_rcu; /* the rcu head */
++ tag_t dl_tag; /* context tag */
++ atomic_t dl_usecnt; /* usage count */
++ atomic_t dl_refcnt; /* reference count */
++
++ struct super_block *dl_sb; /* associated superblock */
++
++ spinlock_t dl_lock; /* protect the values */
++
++ unsigned long long dl_space_used; /* used space in bytes */
++ unsigned long long dl_space_total; /* maximum space in bytes */
++ unsigned long dl_inodes_used; /* used inodes */
++ unsigned long dl_inodes_total; /* maximum inodes */
++
++ unsigned int dl_nrlmult; /* non root limit mult */
++};
++
++struct rcu_head;
++
++extern void rcu_free_dl_info(struct rcu_head *);
++extern void unhash_dl_info(struct dl_info *);
++
++extern struct dl_info *locate_dl_info(struct super_block *, tag_t);
++
++
++struct kstatfs;
++
++extern void vx_vsi_statfs(struct super_block *, struct kstatfs *);
++
++typedef uint64_t dlsize_t;
++
++#endif /* __KERNEL__ */
++#else /* _VX_DLIMIT_H */
++#warning duplicate inclusion
++#endif /* _VX_DLIMIT_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/dlimit_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/dlimit_cmd.h
+--- linux-3.6.10/include/linux/vserver/dlimit_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/dlimit_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,109 @@
++#ifndef _VX_DLIMIT_CMD_H
++#define _VX_DLIMIT_CMD_H
++
++
++/* dlimit vserver commands */
++
++#define VCMD_add_dlimit VC_CMD(DLIMIT, 1, 0)
++#define VCMD_rem_dlimit VC_CMD(DLIMIT, 2, 0)
++
++#define VCMD_set_dlimit VC_CMD(DLIMIT, 5, 0)
++#define VCMD_get_dlimit VC_CMD(DLIMIT, 6, 0)
++
++struct vcmd_ctx_dlimit_base_v0 {
++ const char __user *name;
++ uint32_t flags;
++};
++
++struct vcmd_ctx_dlimit_v0 {
++ const char __user *name;
++ uint32_t space_used; /* used space in kbytes */
++ uint32_t space_total; /* maximum space in kbytes */
++ uint32_t inodes_used; /* used inodes */
++ uint32_t inodes_total; /* maximum inodes */
++ uint32_t reserved; /* reserved for root in % */
++ uint32_t flags;
++};
++
++#define CDLIM_UNSET ((uint32_t)0UL)
++#define CDLIM_INFINITY ((uint32_t)~0UL)
++#define CDLIM_KEEP ((uint32_t)~1UL)
++
++#define DLIME_UNIT 0
++#define DLIME_KILO 1
++#define DLIME_MEGA 2
++#define DLIME_GIGA 3
++
++#define DLIMF_SHIFT 0x10
++
++#define DLIMS_USED 0
++#define DLIMS_TOTAL 2
++
++static inline
++uint64_t dlimit_space_32to64(uint32_t val, uint32_t flags, int shift)
++{
++ int exp = (flags & DLIMF_SHIFT) ?
++ (flags >> shift) & DLIME_GIGA : DLIME_KILO;
++ return ((uint64_t)val) << (10 * exp);
++}
++
++static inline
++uint32_t dlimit_space_64to32(uint64_t val, uint32_t *flags, int shift)
++{
++ int exp = 0;
++
++ if (*flags & DLIMF_SHIFT) {
++ while (val > (1LL << 32) && (exp < 3)) {
++ val >>= 10;
++ exp++;
++ }
++ *flags &= ~(DLIME_GIGA << shift);
++ *flags |= exp << shift;
++ } else
++ val >>= 10;
++ return val;
++}
++
++#ifdef __KERNEL__
++
++#ifdef CONFIG_COMPAT
++
++#include <asm/compat.h>
++
++struct vcmd_ctx_dlimit_base_v0_x32 {
++ compat_uptr_t name_ptr;
++ uint32_t flags;
++};
++
++struct vcmd_ctx_dlimit_v0_x32 {
++ compat_uptr_t name_ptr;
++ uint32_t space_used; /* used space in kbytes */
++ uint32_t space_total; /* maximum space in kbytes */
++ uint32_t inodes_used; /* used inodes */
++ uint32_t inodes_total; /* maximum inodes */
++ uint32_t reserved; /* reserved for root in % */
++ uint32_t flags;
++};
++
++#endif /* CONFIG_COMPAT */
++
++#include <linux/compiler.h>
++
++extern int vc_add_dlimit(uint32_t, void __user *);
++extern int vc_rem_dlimit(uint32_t, void __user *);
++
++extern int vc_set_dlimit(uint32_t, void __user *);
++extern int vc_get_dlimit(uint32_t, void __user *);
++
++#ifdef CONFIG_COMPAT
++
++extern int vc_add_dlimit_x32(uint32_t, void __user *);
++extern int vc_rem_dlimit_x32(uint32_t, void __user *);
++
++extern int vc_set_dlimit_x32(uint32_t, void __user *);
++extern int vc_get_dlimit_x32(uint32_t, void __user *);
++
++#endif /* CONFIG_COMPAT */
++
++#endif /* __KERNEL__ */
++#endif /* _VX_DLIMIT_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/global.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/global.h
+--- linux-3.6.10/include/linux/vserver/global.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/global.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,19 @@
++#ifndef _VX_GLOBAL_H
++#define _VX_GLOBAL_H
++
++
++extern atomic_t vx_global_ctotal;
++extern atomic_t vx_global_cactive;
++
++extern atomic_t nx_global_ctotal;
++extern atomic_t nx_global_cactive;
++
++extern atomic_t vs_global_nsproxy;
++extern atomic_t vs_global_fs;
++extern atomic_t vs_global_mnt_ns;
++extern atomic_t vs_global_uts_ns;
++extern atomic_t vs_global_user_ns;
++extern atomic_t vs_global_pid_ns;
++
++
++#endif /* _VX_GLOBAL_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/history.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/history.h
+--- linux-3.6.10/include/linux/vserver/history.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/history.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,197 @@
++#ifndef _VX_HISTORY_H
++#define _VX_HISTORY_H
++
++
++enum {
++ VXH_UNUSED = 0,
++ VXH_THROW_OOPS = 1,
++
++ VXH_GET_VX_INFO,
++ VXH_PUT_VX_INFO,
++ VXH_INIT_VX_INFO,
++ VXH_SET_VX_INFO,
++ VXH_CLR_VX_INFO,
++ VXH_CLAIM_VX_INFO,
++ VXH_RELEASE_VX_INFO,
++ VXH_ALLOC_VX_INFO,
++ VXH_DEALLOC_VX_INFO,
++ VXH_HASH_VX_INFO,
++ VXH_UNHASH_VX_INFO,
++ VXH_LOC_VX_INFO,
++ VXH_LOOKUP_VX_INFO,
++ VXH_CREATE_VX_INFO,
++};
++
++struct _vxhe_vxi {
++ struct vx_info *ptr;
++ unsigned xid;
++ unsigned usecnt;
++ unsigned tasks;
++};
++
++struct _vxhe_set_clr {
++ void *data;
++};
++
++struct _vxhe_loc_lookup {
++ unsigned arg;
++};
++
++struct _vx_hist_entry {
++ void *loc;
++ unsigned short seq;
++ unsigned short type;
++ struct _vxhe_vxi vxi;
++ union {
++ struct _vxhe_set_clr sc;
++ struct _vxhe_loc_lookup ll;
++ };
++};
++
++#ifdef CONFIG_VSERVER_HISTORY
++
++extern unsigned volatile int vxh_active;
++
++struct _vx_hist_entry *vxh_advance(void *loc);
++
++
++static inline
++void __vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi)
++{
++ entry->vxi.ptr = vxi;
++ if (vxi) {
++ entry->vxi.usecnt = atomic_read(&vxi->vx_usecnt);
++ entry->vxi.tasks = atomic_read(&vxi->vx_tasks);
++ entry->vxi.xid = vxi->vx_id;
++ }
++}
++
++
++#define __HERE__ current_text_addr()
++
++#define __VXH_BODY(__type, __data, __here) \
++ struct _vx_hist_entry *entry; \
++ \
++ preempt_disable(); \
++ entry = vxh_advance(__here); \
++ __data; \
++ entry->type = __type; \
++ preempt_enable();
++
++
++ /* pass vxi only */
++
++#define __VXH_SMPL \
++ __vxh_copy_vxi(entry, vxi)
++
++static inline
++void __vxh_smpl(struct vx_info *vxi, int __type, void *__here)
++{
++ __VXH_BODY(__type, __VXH_SMPL, __here)
++}
++
++ /* pass vxi and data (void *) */
++
++#define __VXH_DATA \
++ __vxh_copy_vxi(entry, vxi); \
++ entry->sc.data = data
++
++static inline
++void __vxh_data(struct vx_info *vxi, void *data,
++ int __type, void *__here)
++{
++ __VXH_BODY(__type, __VXH_DATA, __here)
++}
++
++ /* pass vxi and arg (long) */
++
++#define __VXH_LONG \
++ __vxh_copy_vxi(entry, vxi); \
++ entry->ll.arg = arg
++
++static inline
++void __vxh_long(struct vx_info *vxi, long arg,
++ int __type, void *__here)
++{
++ __VXH_BODY(__type, __VXH_LONG, __here)
++}
++
++
++static inline
++void __vxh_throw_oops(void *__here)
++{
++ __VXH_BODY(VXH_THROW_OOPS, {}, __here);
++ /* prevent further acquisition */
++ vxh_active = 0;
++}
++
++
++#define vxh_throw_oops() __vxh_throw_oops(__HERE__);
++
++#define __vxh_get_vx_info(v, h) __vxh_smpl(v, VXH_GET_VX_INFO, h);
++#define __vxh_put_vx_info(v, h) __vxh_smpl(v, VXH_PUT_VX_INFO, h);
++
++#define __vxh_init_vx_info(v, d, h) \
++ __vxh_data(v, d, VXH_INIT_VX_INFO, h);
++#define __vxh_set_vx_info(v, d, h) \
++ __vxh_data(v, d, VXH_SET_VX_INFO, h);
++#define __vxh_clr_vx_info(v, d, h) \
++ __vxh_data(v, d, VXH_CLR_VX_INFO, h);
++
++#define __vxh_claim_vx_info(v, d, h) \
++ __vxh_data(v, d, VXH_CLAIM_VX_INFO, h);
++#define __vxh_release_vx_info(v, d, h) \
++ __vxh_data(v, d, VXH_RELEASE_VX_INFO, h);
++
++#define vxh_alloc_vx_info(v) \
++ __vxh_smpl(v, VXH_ALLOC_VX_INFO, __HERE__);
++#define vxh_dealloc_vx_info(v) \
++ __vxh_smpl(v, VXH_DEALLOC_VX_INFO, __HERE__);
++
++#define vxh_hash_vx_info(v) \
++ __vxh_smpl(v, VXH_HASH_VX_INFO, __HERE__);
++#define vxh_unhash_vx_info(v) \
++ __vxh_smpl(v, VXH_UNHASH_VX_INFO, __HERE__);
++
++#define vxh_loc_vx_info(v, l) \
++ __vxh_long(v, l, VXH_LOC_VX_INFO, __HERE__);
++#define vxh_lookup_vx_info(v, l) \
++ __vxh_long(v, l, VXH_LOOKUP_VX_INFO, __HERE__);
++#define vxh_create_vx_info(v, l) \
++ __vxh_long(v, l, VXH_CREATE_VX_INFO, __HERE__);
++
++extern void vxh_dump_history(void);
++
++
++#else /* CONFIG_VSERVER_HISTORY */
++
++#define __HERE__ 0
++
++#define vxh_throw_oops() do { } while (0)
++
++#define __vxh_get_vx_info(v, h) do { } while (0)
++#define __vxh_put_vx_info(v, h) do { } while (0)
++
++#define __vxh_init_vx_info(v, d, h) do { } while (0)
++#define __vxh_set_vx_info(v, d, h) do { } while (0)
++#define __vxh_clr_vx_info(v, d, h) do { } while (0)
++
++#define __vxh_claim_vx_info(v, d, h) do { } while (0)
++#define __vxh_release_vx_info(v, d, h) do { } while (0)
++
++#define vxh_alloc_vx_info(v) do { } while (0)
++#define vxh_dealloc_vx_info(v) do { } while (0)
++
++#define vxh_hash_vx_info(v) do { } while (0)
++#define vxh_unhash_vx_info(v) do { } while (0)
++
++#define vxh_loc_vx_info(v, l) do { } while (0)
++#define vxh_lookup_vx_info(v, l) do { } while (0)
++#define vxh_create_vx_info(v, l) do { } while (0)
++
++#define vxh_dump_history() do { } while (0)
++
++
++#endif /* CONFIG_VSERVER_HISTORY */
++
++#endif /* _VX_HISTORY_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/inode.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/inode.h
+--- linux-3.6.10/include/linux/vserver/inode.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/inode.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,39 @@
++#ifndef _VX_INODE_H
++#define _VX_INODE_H
++
++
++#define IATTR_TAG 0x01000000
++
++#define IATTR_ADMIN 0x00000001
++#define IATTR_WATCH 0x00000002
++#define IATTR_HIDE 0x00000004
++#define IATTR_FLAGS 0x00000007
++
++#define IATTR_BARRIER 0x00010000
++#define IATTR_IXUNLINK 0x00020000
++#define IATTR_IMMUTABLE 0x00040000
++#define IATTR_COW 0x00080000
++
++#ifdef __KERNEL__
++
++
++#ifdef CONFIG_VSERVER_PROC_SECURE
++#define IATTR_PROC_DEFAULT ( IATTR_ADMIN | IATTR_HIDE )
++#define IATTR_PROC_SYMLINK ( IATTR_ADMIN )
++#else
++#define IATTR_PROC_DEFAULT ( IATTR_ADMIN )
++#define IATTR_PROC_SYMLINK ( IATTR_ADMIN )
++#endif
++
++#define vx_hide_check(c, m) (((m) & IATTR_HIDE) ? vx_check(c, m) : 1)
++
++#endif /* __KERNEL__ */
++
++/* inode ioctls */
++
++#define FIOC_GETXFLG _IOR('x', 5, long)
++#define FIOC_SETXFLG _IOW('x', 6, long)
++
++#else /* _VX_INODE_H */
++#warning duplicate inclusion
++#endif /* _VX_INODE_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/inode_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/inode_cmd.h
+--- linux-3.6.10/include/linux/vserver/inode_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/inode_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,59 @@
++#ifndef _VX_INODE_CMD_H
++#define _VX_INODE_CMD_H
++
++
++/* inode vserver commands */
++
++#define VCMD_get_iattr VC_CMD(INODE, 1, 1)
++#define VCMD_set_iattr VC_CMD(INODE, 2, 1)
++
++#define VCMD_fget_iattr VC_CMD(INODE, 3, 0)
++#define VCMD_fset_iattr VC_CMD(INODE, 4, 0)
++
++struct vcmd_ctx_iattr_v1 {
++ const char __user *name;
++ uint32_t tag;
++ uint32_t flags;
++ uint32_t mask;
++};
++
++struct vcmd_ctx_fiattr_v0 {
++ uint32_t tag;
++ uint32_t flags;
++ uint32_t mask;
++};
++
++
++#ifdef __KERNEL__
++
++
++#ifdef CONFIG_COMPAT
++
++#include <asm/compat.h>
++
++struct vcmd_ctx_iattr_v1_x32 {
++ compat_uptr_t name_ptr;
++ uint32_t tag;
++ uint32_t flags;
++ uint32_t mask;
++};
++
++#endif /* CONFIG_COMPAT */
++
++#include <linux/compiler.h>
++
++extern int vc_get_iattr(void __user *);
++extern int vc_set_iattr(void __user *);
++
++extern int vc_fget_iattr(uint32_t, void __user *);
++extern int vc_fset_iattr(uint32_t, void __user *);
++
++#ifdef CONFIG_COMPAT
++
++extern int vc_get_iattr_x32(void __user *);
++extern int vc_set_iattr_x32(void __user *);
++
++#endif /* CONFIG_COMPAT */
++
++#endif /* __KERNEL__ */
++#endif /* _VX_INODE_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/limit.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/limit.h
+--- linux-3.6.10/include/linux/vserver/limit.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/limit.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,71 @@
++#ifndef _VX_LIMIT_H
++#define _VX_LIMIT_H
++
++#define VLIMIT_NSOCK 16
++#define VLIMIT_OPENFD 17
++#define VLIMIT_ANON 18
++#define VLIMIT_SHMEM 19
++#define VLIMIT_SEMARY 20
++#define VLIMIT_NSEMS 21
++#define VLIMIT_DENTRY 22
++#define VLIMIT_MAPPED 23
++
++
++#ifdef __KERNEL__
++
++#define VLIM_NOCHECK ((1L << VLIMIT_DENTRY) | (1L << RLIMIT_RSS))
++
++/* keep in sync with CRLIM_INFINITY */
++
++#define VLIM_INFINITY (~0ULL)
++
++#include <asm/atomic.h>
++#include <asm/resource.h>
++
++#ifndef RLIM_INFINITY
++#warning RLIM_INFINITY is undefined
++#endif
++
++#define __rlim_val(l, r, v) ((l)->res[r].v)
++
++#define __rlim_soft(l, r) __rlim_val(l, r, soft)
++#define __rlim_hard(l, r) __rlim_val(l, r, hard)
++
++#define __rlim_rcur(l, r) __rlim_val(l, r, rcur)
++#define __rlim_rmin(l, r) __rlim_val(l, r, rmin)
++#define __rlim_rmax(l, r) __rlim_val(l, r, rmax)
++
++#define __rlim_lhit(l, r) __rlim_val(l, r, lhit)
++#define __rlim_hit(l, r) atomic_inc(&__rlim_lhit(l, r))
++
++typedef atomic_long_t rlim_atomic_t;
++typedef unsigned long rlim_t;
++
++#define __rlim_get(l, r) atomic_long_read(&__rlim_rcur(l, r))
++#define __rlim_set(l, r, v) atomic_long_set(&__rlim_rcur(l, r), v)
++#define __rlim_inc(l, r) atomic_long_inc(&__rlim_rcur(l, r))
++#define __rlim_dec(l, r) atomic_long_dec(&__rlim_rcur(l, r))
++#define __rlim_add(l, r, v) atomic_long_add(v, &__rlim_rcur(l, r))
++#define __rlim_sub(l, r, v) atomic_long_sub(v, &__rlim_rcur(l, r))
++
++
++#if (RLIM_INFINITY == VLIM_INFINITY)
++#define VX_VLIM(r) ((long long)(long)(r))
++#define VX_RLIM(v) ((rlim_t)(v))
++#else
++#define VX_VLIM(r) (((r) == RLIM_INFINITY) \
++ ? VLIM_INFINITY : (long long)(r))
++#define VX_RLIM(v) (((v) == VLIM_INFINITY) \
++ ? RLIM_INFINITY : (rlim_t)(v))
++#endif
++
++struct sysinfo;
++
++void vx_vsi_meminfo(struct sysinfo *);
++void vx_vsi_swapinfo(struct sysinfo *);
++long vx_vsi_cached(struct sysinfo *);
++
++#define NUM_LIMITS 24
++
++#endif /* __KERNEL__ */
++#endif /* _VX_LIMIT_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/limit_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/limit_cmd.h
+--- linux-3.6.10/include/linux/vserver/limit_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/limit_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,71 @@
++#ifndef _VX_LIMIT_CMD_H
++#define _VX_LIMIT_CMD_H
++
++
++/* rlimit vserver commands */
++
++#define VCMD_get_rlimit VC_CMD(RLIMIT, 1, 0)
++#define VCMD_set_rlimit VC_CMD(RLIMIT, 2, 0)
++#define VCMD_get_rlimit_mask VC_CMD(RLIMIT, 3, 0)
++#define VCMD_reset_hits VC_CMD(RLIMIT, 7, 0)
++#define VCMD_reset_minmax VC_CMD(RLIMIT, 9, 0)
++
++struct vcmd_ctx_rlimit_v0 {
++ uint32_t id;
++ uint64_t minimum;
++ uint64_t softlimit;
++ uint64_t maximum;
++};
++
++struct vcmd_ctx_rlimit_mask_v0 {
++ uint32_t minimum;
++ uint32_t softlimit;
++ uint32_t maximum;
++};
++
++#define VCMD_rlimit_stat VC_CMD(VSTAT, 1, 0)
++
++struct vcmd_rlimit_stat_v0 {
++ uint32_t id;
++ uint32_t hits;
++ uint64_t value;
++ uint64_t minimum;
++ uint64_t maximum;
++};
++
++#define CRLIM_UNSET (0ULL)
++#define CRLIM_INFINITY (~0ULL)
++#define CRLIM_KEEP (~1ULL)
++
++#ifdef __KERNEL__
++
++#ifdef CONFIG_IA32_EMULATION
++
++struct vcmd_ctx_rlimit_v0_x32 {
++ uint32_t id;
++ uint64_t minimum;
++ uint64_t softlimit;
++ uint64_t maximum;
++} __attribute__ ((packed));
++
++#endif /* CONFIG_IA32_EMULATION */
++
++#include <linux/compiler.h>
++
++extern int vc_get_rlimit_mask(uint32_t, void __user *);
++extern int vc_get_rlimit(struct vx_info *, void __user *);
++extern int vc_set_rlimit(struct vx_info *, void __user *);
++extern int vc_reset_hits(struct vx_info *, void __user *);
++extern int vc_reset_minmax(struct vx_info *, void __user *);
++
++extern int vc_rlimit_stat(struct vx_info *, void __user *);
++
++#ifdef CONFIG_IA32_EMULATION
++
++extern int vc_get_rlimit_x32(struct vx_info *, void __user *);
++extern int vc_set_rlimit_x32(struct vx_info *, void __user *);
++
++#endif /* CONFIG_IA32_EMULATION */
++
++#endif /* __KERNEL__ */
++#endif /* _VX_LIMIT_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/limit_def.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/limit_def.h
+--- linux-3.6.10/include/linux/vserver/limit_def.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/limit_def.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,47 @@
++#ifndef _VX_LIMIT_DEF_H
++#define _VX_LIMIT_DEF_H
++
++#include <asm/atomic.h>
++#include <asm/resource.h>
++
++#include "limit.h"
++
++
++struct _vx_res_limit {
++ rlim_t soft; /* Context soft limit */
++ rlim_t hard; /* Context hard limit */
++
++ rlim_atomic_t rcur; /* Current value */
++ rlim_t rmin; /* Context minimum */
++ rlim_t rmax; /* Context maximum */
++
++ atomic_t lhit; /* Limit hits */
++};
++
++/* context sub struct */
++
++struct _vx_limit {
++ struct _vx_res_limit res[NUM_LIMITS];
++};
++
++#ifdef CONFIG_VSERVER_DEBUG
++
++static inline void __dump_vx_limit(struct _vx_limit *limit)
++{
++ int i;
++
++ printk("\t_vx_limit:");
++ for (i = 0; i < NUM_LIMITS; i++) {
++ printk("\t [%2d] = %8lu %8lu/%8lu, %8ld/%8ld, %8d\n",
++ i, (unsigned long)__rlim_get(limit, i),
++ (unsigned long)__rlim_rmin(limit, i),
++ (unsigned long)__rlim_rmax(limit, i),
++ (long)__rlim_soft(limit, i),
++ (long)__rlim_hard(limit, i),
++ atomic_read(&__rlim_lhit(limit, i)));
++ }
++}
++
++#endif
++
++#endif /* _VX_LIMIT_DEF_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/limit_int.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/limit_int.h
+--- linux-3.6.10/include/linux/vserver/limit_int.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/limit_int.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,198 @@
++#ifndef _VX_LIMIT_INT_H
++#define _VX_LIMIT_INT_H
++
++#include "context.h"
++
++#ifdef __KERNEL__
++
++#define VXD_RCRES_COND(r) VXD_CBIT(cres, r)
++#define VXD_RLIMIT_COND(r) VXD_CBIT(limit, r)
++
++extern const char *vlimit_name[NUM_LIMITS];
++
++static inline void __vx_acc_cres(struct vx_info *vxi,
++ int res, int dir, void *_data, char *_file, int _line)
++{
++ if (VXD_RCRES_COND(res))
++ vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5ld%s (%p)",
++ (vxi ? vxi->vx_id : -1), vlimit_name[res], res,
++ (vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
++ (dir > 0) ? "++" : "--", _data, _file, _line);
++ if (!vxi)
++ return;
++
++ if (dir > 0)
++ __rlim_inc(&vxi->limit, res);
++ else
++ __rlim_dec(&vxi->limit, res);
++}
++
++static inline void __vx_add_cres(struct vx_info *vxi,
++ int res, int amount, void *_data, char *_file, int _line)
++{
++ if (VXD_RCRES_COND(res))
++ vxlprintk(1, "vx_add_cres[%5d,%s,%2d]: %5ld += %5d (%p)",
++ (vxi ? vxi->vx_id : -1), vlimit_name[res], res,
++ (vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
++ amount, _data, _file, _line);
++ if (amount == 0)
++ return;
++ if (!vxi)
++ return;
++ __rlim_add(&vxi->limit, res, amount);
++}
++
++static inline
++int __vx_cres_adjust_max(struct _vx_limit *limit, int res, rlim_t value)
++{
++ int cond = (value > __rlim_rmax(limit, res));
++
++ if (cond)
++ __rlim_rmax(limit, res) = value;
++ return cond;
++}
++
++static inline
++int __vx_cres_adjust_min(struct _vx_limit *limit, int res, rlim_t value)
++{
++ int cond = (value < __rlim_rmin(limit, res));
++
++ if (cond)
++ __rlim_rmin(limit, res) = value;
++ return cond;
++}
++
++static inline
++void __vx_cres_fixup(struct _vx_limit *limit, int res, rlim_t value)
++{
++ if (!__vx_cres_adjust_max(limit, res, value))
++ __vx_cres_adjust_min(limit, res, value);
++}
++
++
++/* return values:
++ +1 ... no limit hit
++ -1 ... over soft limit
++ 0 ... over hard limit */
++
++static inline int __vx_cres_avail(struct vx_info *vxi,
++ int res, int num, char *_file, int _line)
++{
++ struct _vx_limit *limit;
++ rlim_t value;
++
++ if (VXD_RLIMIT_COND(res))
++ vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld/%5ld > %5ld + %5d",
++ (vxi ? vxi->vx_id : -1), vlimit_name[res], res,
++ (vxi ? (long)__rlim_soft(&vxi->limit, res) : -1),
++ (vxi ? (long)__rlim_hard(&vxi->limit, res) : -1),
++ (vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
++ num, _file, _line);
++ if (!vxi)
++ return 1;
++
++ limit = &vxi->limit;
++ value = __rlim_get(limit, res);
++
++ if (!__vx_cres_adjust_max(limit, res, value))
++ __vx_cres_adjust_min(limit, res, value);
++
++ if (num == 0)
++ return 1;
++
++ if (__rlim_soft(limit, res) == RLIM_INFINITY)
++ return -1;
++ if (value + num <= __rlim_soft(limit, res))
++ return -1;
++
++ if (__rlim_hard(limit, res) == RLIM_INFINITY)
++ return 1;
++ if (value + num <= __rlim_hard(limit, res))
++ return 1;
++
++ __rlim_hit(limit, res);
++ return 0;
++}
++
++
++static const int VLA_RSS[] = { RLIMIT_RSS, VLIMIT_ANON, VLIMIT_MAPPED, 0 };
++
++static inline
++rlim_t __vx_cres_array_sum(struct _vx_limit *limit, const int *array)
++{
++ rlim_t value, sum = 0;
++ int res;
++
++ while ((res = *array++)) {
++ value = __rlim_get(limit, res);
++ __vx_cres_fixup(limit, res, value);
++ sum += value;
++ }
++ return sum;
++}
++
++static inline
++rlim_t __vx_cres_array_fixup(struct _vx_limit *limit, const int *array)
++{
++ rlim_t value = __vx_cres_array_sum(limit, array + 1);
++ int res = *array;
++
++ if (value == __rlim_get(limit, res))
++ return value;
++
++ __rlim_set(limit, res, value);
++ /* now adjust min/max */
++ if (!__vx_cres_adjust_max(limit, res, value))
++ __vx_cres_adjust_min(limit, res, value);
++
++ return value;
++}
++
++static inline int __vx_cres_array_avail(struct vx_info *vxi,
++ const int *array, int num, char *_file, int _line)
++{
++ struct _vx_limit *limit;
++ rlim_t value = 0;
++ int res;
++
++ if (num == 0)
++ return 1;
++ if (!vxi)
++ return 1;
++
++ limit = &vxi->limit;
++ res = *array;
++ value = __vx_cres_array_sum(limit, array + 1);
++
++ __rlim_set(limit, res, value);
++ __vx_cres_fixup(limit, res, value);
++
++ return __vx_cres_avail(vxi, res, num, _file, _line);
++}
++
++
++static inline void vx_limit_fixup(struct _vx_limit *limit, int id)
++{
++ rlim_t value;
++ int res;
++
++ /* complex resources first */
++ if ((id < 0) || (id == RLIMIT_RSS))
++ __vx_cres_array_fixup(limit, VLA_RSS);
++
++ for (res = 0; res < NUM_LIMITS; res++) {
++ if ((id > 0) && (res != id))
++ continue;
++
++ value = __rlim_get(limit, res);
++ __vx_cres_fixup(limit, res, value);
++
++ /* not supposed to happen, maybe warn? */
++ if (__rlim_rmax(limit, res) > __rlim_hard(limit, res))
++ __rlim_rmax(limit, res) = __rlim_hard(limit, res);
++ }
++}
++
++
++#endif /* __KERNEL__ */
++#endif /* _VX_LIMIT_INT_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/monitor.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/monitor.h
+--- linux-3.6.10/include/linux/vserver/monitor.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/monitor.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,96 @@
++#ifndef _VX_MONITOR_H
++#define _VX_MONITOR_H
++
++#include <linux/types.h>
++
++enum {
++ VXM_UNUSED = 0,
++
++ VXM_SYNC = 0x10,
++
++ VXM_UPDATE = 0x20,
++ VXM_UPDATE_1,
++ VXM_UPDATE_2,
++
++ VXM_RQINFO_1 = 0x24,
++ VXM_RQINFO_2,
++
++ VXM_ACTIVATE = 0x40,
++ VXM_DEACTIVATE,
++ VXM_IDLE,
++
++ VXM_HOLD = 0x44,
++ VXM_UNHOLD,
++
++ VXM_MIGRATE = 0x48,
++ VXM_RESCHED,
++
++ /* all other bits are flags */
++ VXM_SCHED = 0x80,
++};
++
++struct _vxm_update_1 {
++ uint32_t tokens_max;
++ uint32_t fill_rate;
++ uint32_t interval;
++};
++
++struct _vxm_update_2 {
++ uint32_t tokens_min;
++ uint32_t fill_rate;
++ uint32_t interval;
++};
++
++struct _vxm_rqinfo_1 {
++ uint16_t running;
++ uint16_t onhold;
++ uint16_t iowait;
++ uint16_t uintr;
++ uint32_t idle_tokens;
++};
++
++struct _vxm_rqinfo_2 {
++ uint32_t norm_time;
++ uint32_t idle_time;
++ uint32_t idle_skip;
++};
++
++struct _vxm_sched {
++ uint32_t tokens;
++ uint32_t norm_time;
++ uint32_t idle_time;
++};
++
++struct _vxm_task {
++ uint16_t pid;
++ uint16_t state;
++};
++
++struct _vxm_event {
++ uint32_t jif;
++ union {
++ uint32_t seq;
++ uint32_t sec;
++ };
++ union {
++ uint32_t tokens;
++ uint32_t nsec;
++ struct _vxm_task tsk;
++ };
++};
++
++struct _vx_mon_entry {
++ uint16_t type;
++ uint16_t xid;
++ union {
++ struct _vxm_event ev;
++ struct _vxm_sched sd;
++ struct _vxm_update_1 u1;
++ struct _vxm_update_2 u2;
++ struct _vxm_rqinfo_1 q1;
++ struct _vxm_rqinfo_2 q2;
++ };
++};
++
++
++#endif /* _VX_MONITOR_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/network.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/network.h
+--- linux-3.6.10/include/linux/vserver/network.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/network.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,148 @@
++#ifndef _VX_NETWORK_H
++#define _VX_NETWORK_H
++
++#include <linux/types.h>
++
++
++#define MAX_N_CONTEXT 65535 /* Arbitrary limit */
++
++
++/* network flags */
++
++#define NXF_INFO_PRIVATE 0x00000008
++
++#define NXF_SINGLE_IP 0x00000100
++#define NXF_LBACK_REMAP 0x00000200
++#define NXF_LBACK_ALLOW 0x00000400
++
++#define NXF_HIDE_NETIF 0x02000000
++#define NXF_HIDE_LBACK 0x04000000
++
++#define NXF_STATE_SETUP (1ULL << 32)
++#define NXF_STATE_ADMIN (1ULL << 34)
++
++#define NXF_SC_HELPER (1ULL << 36)
++#define NXF_PERSISTENT (1ULL << 38)
++
++#define NXF_ONE_TIME (0x0005ULL << 32)
++
++
++#define NXF_INIT_SET (__nxf_init_set())
++
++static inline uint64_t __nxf_init_set(void) {
++ return NXF_STATE_ADMIN
++#ifdef CONFIG_VSERVER_AUTO_LBACK
++ | NXF_LBACK_REMAP
++ | NXF_HIDE_LBACK
++#endif
++#ifdef CONFIG_VSERVER_AUTO_SINGLE
++ | NXF_SINGLE_IP
++#endif
++ | NXF_HIDE_NETIF;
++}
++
++
++/* network caps */
++
++#define NXC_TUN_CREATE 0x00000001
++
++#define NXC_RAW_ICMP 0x00000100
++
++#define NXC_MULTICAST 0x00001000
++
++
++/* address types */
++
++#define NXA_TYPE_IPV4 0x0001
++#define NXA_TYPE_IPV6 0x0002
++
++#define NXA_TYPE_NONE 0x0000
++#define NXA_TYPE_ANY 0x00FF
++
++#define NXA_TYPE_ADDR 0x0010
++#define NXA_TYPE_MASK 0x0020
++#define NXA_TYPE_RANGE 0x0040
++
++#define NXA_MASK_ALL (NXA_TYPE_ADDR | NXA_TYPE_MASK | NXA_TYPE_RANGE)
++
++#define NXA_MOD_BCAST 0x0100
++#define NXA_MOD_LBACK 0x0200
++
++#define NXA_LOOPBACK 0x1000
++
++#define NXA_MASK_BIND (NXA_MASK_ALL | NXA_MOD_BCAST | NXA_MOD_LBACK)
++#define NXA_MASK_SHOW (NXA_MASK_ALL | NXA_LOOPBACK)
++
++#ifdef __KERNEL__
++
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/rcupdate.h>
++#include <linux/in.h>
++#include <linux/in6.h>
++#include <asm/atomic.h>
++
++struct nx_addr_v4 {
++ struct nx_addr_v4 *next;
++ struct in_addr ip[2];
++ struct in_addr mask;
++ uint16_t type;
++ uint16_t flags;
++};
++
++struct nx_addr_v6 {
++ struct nx_addr_v6 *next;
++ struct in6_addr ip;
++ struct in6_addr mask;
++ uint32_t prefix;
++ uint16_t type;
++ uint16_t flags;
++};
++
++struct nx_info {
++ struct hlist_node nx_hlist; /* linked list of nxinfos */
++ nid_t nx_id; /* vnet id */
++ atomic_t nx_usecnt; /* usage count */
++ atomic_t nx_tasks; /* tasks count */
++ int nx_state; /* context state */
++
++ uint64_t nx_flags; /* network flag word */
++ uint64_t nx_ncaps; /* network capabilities */
++
++ struct in_addr v4_lback; /* Loopback address */
++ struct in_addr v4_bcast; /* Broadcast address */
++ struct nx_addr_v4 v4; /* First/Single ipv4 address */
++#ifdef CONFIG_IPV6
++ struct nx_addr_v6 v6; /* First/Single ipv6 address */
++#endif
++ char nx_name[65]; /* network context name */
++};
++
++
++/* status flags */
++
++#define NXS_HASHED 0x0001
++#define NXS_SHUTDOWN 0x0100
++#define NXS_RELEASED 0x8000
++
++extern struct nx_info *lookup_nx_info(int);
++
++extern int get_nid_list(int, unsigned int *, int);
++extern int nid_is_hashed(nid_t);
++
++extern int nx_migrate_task(struct task_struct *, struct nx_info *);
++
++extern long vs_net_change(struct nx_info *, unsigned int);
++
++struct sock;
++
++
++#define NX_IPV4(n) ((n)->v4.type != NXA_TYPE_NONE)
++#ifdef CONFIG_IPV6
++#define NX_IPV6(n) ((n)->v6.type != NXA_TYPE_NONE)
++#else
++#define NX_IPV6(n) (0)
++#endif
++
++#endif /* __KERNEL__ */
++#endif /* _VX_NETWORK_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/network_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/network_cmd.h
+--- linux-3.6.10/include/linux/vserver/network_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/network_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,164 @@
++#ifndef _VX_NETWORK_CMD_H
++#define _VX_NETWORK_CMD_H
++
++
++/* vinfo commands */
++
++#define VCMD_task_nid VC_CMD(VINFO, 2, 0)
++
++#ifdef __KERNEL__
++extern int vc_task_nid(uint32_t);
++
++#endif /* __KERNEL__ */
++
++#define VCMD_nx_info VC_CMD(VINFO, 6, 0)
++
++struct vcmd_nx_info_v0 {
++ uint32_t nid;
++ /* more to come */
++};
++
++#ifdef __KERNEL__
++extern int vc_nx_info(struct nx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++#include <linux/in.h>
++#include <linux/in6.h>
++
++#define VCMD_net_create_v0 VC_CMD(VNET, 1, 0)
++#define VCMD_net_create VC_CMD(VNET, 1, 1)
++
++struct vcmd_net_create {
++ uint64_t flagword;
++};
++
++#define VCMD_net_migrate VC_CMD(NETMIG, 1, 0)
++
++#define VCMD_net_add VC_CMD(NETALT, 1, 0)
++#define VCMD_net_remove VC_CMD(NETALT, 2, 0)
++
++struct vcmd_net_addr_v0 {
++ uint16_t type;
++ uint16_t count;
++ struct in_addr ip[4];
++ struct in_addr mask[4];
++};
++
++#define VCMD_net_add_ipv4_v1 VC_CMD(NETALT, 1, 1)
++#define VCMD_net_rem_ipv4_v1 VC_CMD(NETALT, 2, 1)
++
++struct vcmd_net_addr_ipv4_v1 {
++ uint16_t type;
++ uint16_t flags;
++ struct in_addr ip;
++ struct in_addr mask;
++};
++
++#define VCMD_net_add_ipv4 VC_CMD(NETALT, 1, 2)
++#define VCMD_net_rem_ipv4 VC_CMD(NETALT, 2, 2)
++
++struct vcmd_net_addr_ipv4_v2 {
++ uint16_t type;
++ uint16_t flags;
++ struct in_addr ip;
++ struct in_addr ip2;
++ struct in_addr mask;
++};
++
++#define VCMD_net_add_ipv6 VC_CMD(NETALT, 3, 1)
++#define VCMD_net_remove_ipv6 VC_CMD(NETALT, 4, 1)
++
++struct vcmd_net_addr_ipv6_v1 {
++ uint16_t type;
++ uint16_t flags;
++ uint32_t prefix;
++ struct in6_addr ip;
++ struct in6_addr mask;
++};
++
++#define VCMD_add_match_ipv4 VC_CMD(NETALT, 5, 0)
++#define VCMD_get_match_ipv4 VC_CMD(NETALT, 6, 0)
++
++struct vcmd_match_ipv4_v0 {
++ uint16_t type;
++ uint16_t flags;
++ uint16_t parent;
++ uint16_t prefix;
++ struct in_addr ip;
++ struct in_addr ip2;
++ struct in_addr mask;
++};
++
++#define VCMD_add_match_ipv6 VC_CMD(NETALT, 7, 0)
++#define VCMD_get_match_ipv6 VC_CMD(NETALT, 8, 0)
++
++struct vcmd_match_ipv6_v0 {
++ uint16_t type;
++ uint16_t flags;
++ uint16_t parent;
++ uint16_t prefix;
++ struct in6_addr ip;
++ struct in6_addr ip2;
++ struct in6_addr mask;
++};
++
++
++#ifdef __KERNEL__
++extern int vc_net_create(uint32_t, void __user *);
++extern int vc_net_migrate(struct nx_info *, void __user *);
++
++extern int vc_net_add(struct nx_info *, void __user *);
++extern int vc_net_remove(struct nx_info *, void __user *);
++
++extern int vc_net_add_ipv4_v1(struct nx_info *, void __user *);
++extern int vc_net_add_ipv4(struct nx_info *, void __user *);
++
++extern int vc_net_rem_ipv4_v1(struct nx_info *, void __user *);
++extern int vc_net_rem_ipv4(struct nx_info *, void __user *);
++
++extern int vc_net_add_ipv6(struct nx_info *, void __user *);
++extern int vc_net_remove_ipv6(struct nx_info *, void __user *);
++
++extern int vc_add_match_ipv4(struct nx_info *, void __user *);
++extern int vc_get_match_ipv4(struct nx_info *, void __user *);
++
++extern int vc_add_match_ipv6(struct nx_info *, void __user *);
++extern int vc_get_match_ipv6(struct nx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++
++/* flag commands */
++
++#define VCMD_get_nflags VC_CMD(FLAGS, 5, 0)
++#define VCMD_set_nflags VC_CMD(FLAGS, 6, 0)
++
++struct vcmd_net_flags_v0 {
++ uint64_t flagword;
++ uint64_t mask;
++};
++
++#ifdef __KERNEL__
++extern int vc_get_nflags(struct nx_info *, void __user *);
++extern int vc_set_nflags(struct nx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++
++/* network caps commands */
++
++#define VCMD_get_ncaps VC_CMD(FLAGS, 7, 0)
++#define VCMD_set_ncaps VC_CMD(FLAGS, 8, 0)
++
++struct vcmd_net_caps_v0 {
++ uint64_t ncaps;
++ uint64_t cmask;
++};
++
++#ifdef __KERNEL__
++extern int vc_get_ncaps(struct nx_info *, void __user *);
++extern int vc_set_ncaps(struct nx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++#endif /* _VX_CONTEXT_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/percpu.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/percpu.h
+--- linux-3.6.10/include/linux/vserver/percpu.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/percpu.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,14 @@
++#ifndef _VX_PERCPU_H
++#define _VX_PERCPU_H
++
++#include "cvirt_def.h"
++#include "sched_def.h"
++
++struct _vx_percpu {
++ struct _vx_cvirt_pc cvirt;
++ struct _vx_sched_pc sched;
++};
++
++#define PERCPU_PERCTX (sizeof(struct _vx_percpu))
++
++#endif /* _VX_PERCPU_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/pid.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/pid.h
+--- linux-3.6.10/include/linux/vserver/pid.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/pid.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,51 @@
++#ifndef _VSERVER_PID_H
++#define _VSERVER_PID_H
++
++/* pid faking stuff */
++
++#define vx_info_map_pid(v, p) \
++ __vx_info_map_pid((v), (p), __func__, __FILE__, __LINE__)
++#define vx_info_map_tgid(v,p) vx_info_map_pid(v,p)
++#define vx_map_pid(p) vx_info_map_pid(current_vx_info(), p)
++#define vx_map_tgid(p) vx_map_pid(p)
++
++static inline int __vx_info_map_pid(struct vx_info *vxi, int pid,
++ const char *func, const char *file, int line)
++{
++ if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) {
++ vxfprintk(VXD_CBIT(cvirt, 2),
++ "vx_map_tgid: %p/%llx: %d -> %d",
++ vxi, (long long)vxi->vx_flags, pid,
++ (pid && pid == vxi->vx_initpid) ? 1 : pid,
++ func, file, line);
++ if (pid == 0)
++ return 0;
++ if (pid == vxi->vx_initpid)
++ return 1;
++ }
++ return pid;
++}
++
++#define vx_info_rmap_pid(v, p) \
++ __vx_info_rmap_pid((v), (p), __func__, __FILE__, __LINE__)
++#define vx_rmap_pid(p) vx_info_rmap_pid(current_vx_info(), p)
++#define vx_rmap_tgid(p) vx_rmap_pid(p)
++
++static inline int __vx_info_rmap_pid(struct vx_info *vxi, int pid,
++ const char *func, const char *file, int line)
++{
++ if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) {
++ vxfprintk(VXD_CBIT(cvirt, 2),
++ "vx_rmap_tgid: %p/%llx: %d -> %d",
++ vxi, (long long)vxi->vx_flags, pid,
++ (pid == 1) ? vxi->vx_initpid : pid,
++ func, file, line);
++ if ((pid == 1) && vxi->vx_initpid)
++ return vxi->vx_initpid;
++ if (pid == vxi->vx_initpid)
++ return ~0U;
++ }
++ return pid;
++}
++
++#endif
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/sched.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/sched.h
+--- linux-3.6.10/include/linux/vserver/sched.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/sched.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,23 @@
++#ifndef _VX_SCHED_H
++#define _VX_SCHED_H
++
++
++#ifdef __KERNEL__
++
++struct timespec;
++
++void vx_vsi_uptime(struct timespec *, struct timespec *);
++
++
++struct vx_info;
++
++void vx_update_load(struct vx_info *);
++
++
++void vx_update_sched_param(struct _vx_sched *sched,
++ struct _vx_sched_pc *sched_pc);
++
++#endif /* __KERNEL__ */
++#else /* _VX_SCHED_H */
++#warning duplicate inclusion
++#endif /* _VX_SCHED_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/sched_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/sched_cmd.h
+--- linux-3.6.10/include/linux/vserver/sched_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/sched_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,21 @@
++#ifndef _VX_SCHED_CMD_H
++#define _VX_SCHED_CMD_H
++
++
++struct vcmd_prio_bias {
++ int32_t cpu_id;
++ int32_t prio_bias;
++};
++
++#define VCMD_set_prio_bias VC_CMD(SCHED, 4, 0)
++#define VCMD_get_prio_bias VC_CMD(SCHED, 5, 0)
++
++#ifdef __KERNEL__
++
++#include <linux/compiler.h>
++
++extern int vc_set_prio_bias(struct vx_info *, void __user *);
++extern int vc_get_prio_bias(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++#endif /* _VX_SCHED_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/sched_def.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/sched_def.h
+--- linux-3.6.10/include/linux/vserver/sched_def.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/sched_def.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,38 @@
++#ifndef _VX_SCHED_DEF_H
++#define _VX_SCHED_DEF_H
++
++#include <linux/spinlock.h>
++#include <linux/jiffies.h>
++#include <linux/cpumask.h>
++#include <asm/atomic.h>
++#include <asm/param.h>
++
++
++/* context sub struct */
++
++struct _vx_sched {
++ int prio_bias; /* bias offset for priority */
++
++ cpumask_t update; /* CPUs which should update */
++};
++
++struct _vx_sched_pc {
++ int prio_bias; /* bias offset for priority */
++
++ uint64_t user_ticks; /* token tick events */
++ uint64_t sys_ticks; /* token tick events */
++ uint64_t hold_ticks; /* token ticks paused */
++};
++
++
++#ifdef CONFIG_VSERVER_DEBUG
++
++static inline void __dump_vx_sched(struct _vx_sched *sched)
++{
++ printk("\t_vx_sched:\n");
++ printk("\t priority = %4d\n", sched->prio_bias);
++}
++
++#endif
++
++#endif /* _VX_SCHED_DEF_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/signal.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/signal.h
+--- linux-3.6.10/include/linux/vserver/signal.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/signal.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,14 @@
++#ifndef _VX_SIGNAL_H
++#define _VX_SIGNAL_H
++
++
++#ifdef __KERNEL__
++
++struct vx_info;
++
++int vx_info_kill(struct vx_info *, int, int);
++
++#endif /* __KERNEL__ */
++#else /* _VX_SIGNAL_H */
++#warning duplicate inclusion
++#endif /* _VX_SIGNAL_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/signal_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/signal_cmd.h
+--- linux-3.6.10/include/linux/vserver/signal_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/signal_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,43 @@
++#ifndef _VX_SIGNAL_CMD_H
++#define _VX_SIGNAL_CMD_H
++
++
++/* signalling vserver commands */
++
++#define VCMD_ctx_kill VC_CMD(PROCTRL, 1, 0)
++#define VCMD_wait_exit VC_CMD(EVENT, 99, 0)
++
++struct vcmd_ctx_kill_v0 {
++ int32_t pid;
++ int32_t sig;
++};
++
++struct vcmd_wait_exit_v0 {
++ int32_t reboot_cmd;
++ int32_t exit_code;
++};
++
++#ifdef __KERNEL__
++
++extern int vc_ctx_kill(struct vx_info *, void __user *);
++extern int vc_wait_exit(struct vx_info *, void __user *);
++
++#endif /* __KERNEL__ */
++
++/* process alteration commands */
++
++#define VCMD_get_pflags VC_CMD(PROCALT, 5, 0)
++#define VCMD_set_pflags VC_CMD(PROCALT, 6, 0)
++
++struct vcmd_pflags_v0 {
++ uint32_t flagword;
++ uint32_t mask;
++};
++
++#ifdef __KERNEL__
++
++extern int vc_get_pflags(uint32_t pid, void __user *);
++extern int vc_set_pflags(uint32_t pid, void __user *);
++
++#endif /* __KERNEL__ */
++#endif /* _VX_SIGNAL_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/space.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/space.h
+--- linux-3.6.10/include/linux/vserver/space.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/space.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,12 @@
++#ifndef _VX_SPACE_H
++#define _VX_SPACE_H
++
++#include <linux/types.h>
++
++struct vx_info;
++
++int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index);
++
++#else /* _VX_SPACE_H */
++#warning duplicate inclusion
++#endif /* _VX_SPACE_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/space_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/space_cmd.h
+--- linux-3.6.10/include/linux/vserver/space_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/space_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,38 @@
++#ifndef _VX_SPACE_CMD_H
++#define _VX_SPACE_CMD_H
++
++
++#define VCMD_enter_space_v0 VC_CMD(PROCALT, 1, 0)
++#define VCMD_enter_space_v1 VC_CMD(PROCALT, 1, 1)
++#define VCMD_enter_space VC_CMD(PROCALT, 1, 2)
++
++#define VCMD_set_space_v0 VC_CMD(PROCALT, 3, 0)
++#define VCMD_set_space_v1 VC_CMD(PROCALT, 3, 1)
++#define VCMD_set_space VC_CMD(PROCALT, 3, 2)
++
++#define VCMD_get_space_mask_v0 VC_CMD(PROCALT, 4, 0)
++
++#define VCMD_get_space_mask VC_CMD(VSPACE, 0, 1)
++#define VCMD_get_space_default VC_CMD(VSPACE, 1, 0)
++
++
++struct vcmd_space_mask_v1 {
++ uint64_t mask;
++};
++
++struct vcmd_space_mask_v2 {
++ uint64_t mask;
++ uint32_t index;
++};
++
++
++#ifdef __KERNEL__
++
++extern int vc_enter_space_v1(struct vx_info *, void __user *);
++extern int vc_set_space_v1(struct vx_info *, void __user *);
++extern int vc_enter_space(struct vx_info *, void __user *);
++extern int vc_set_space(struct vx_info *, void __user *);
++extern int vc_get_space_mask(void __user *, int);
++
++#endif /* __KERNEL__ */
++#endif /* _VX_SPACE_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/switch.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/switch.h
+--- linux-3.6.10/include/linux/vserver/switch.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/switch.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,98 @@
++#ifndef _VX_SWITCH_H
++#define _VX_SWITCH_H
++
++#include <linux/types.h>
++
++
++#define VC_CATEGORY(c) (((c) >> 24) & 0x3F)
++#define VC_COMMAND(c) (((c) >> 16) & 0xFF)
++#define VC_VERSION(c) ((c) & 0xFFF)
++
++#define VC_CMD(c, i, v) ((((VC_CAT_ ## c) & 0x3F) << 24) \
++ | (((i) & 0xFF) << 16) | ((v) & 0xFFF))
++
++/*
++
++ Syscall Matrix V2.8
++
++ |VERSION|CREATE |MODIFY |MIGRATE|CONTROL|EXPERIM| |SPECIAL|SPECIAL|
++ |STATS |DESTROY|ALTER |CHANGE |LIMIT |TEST | | | |
++ |INFO |SETUP | |MOVE | | | | | |
++ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++ SYSTEM |VERSION|VSETUP |VHOST | | | | |DEVICE | |
++ HOST | 00| 01| 02| 03| 04| 05| | 06| 07|
++ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++ CPU | |VPROC |PROCALT|PROCMIG|PROCTRL| | |SCHED. | |
++ PROCESS| 08| 09| 10| 11| 12| 13| | 14| 15|
++ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++ MEMORY | | | | |MEMCTRL| | |SWAP | |
++ | 16| 17| 18| 19| 20| 21| | 22| 23|
++ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++ NETWORK| |VNET |NETALT |NETMIG |NETCTL | | |SERIAL | |
++ | 24| 25| 26| 27| 28| 29| | 30| 31|
++ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++ DISK | | | |TAGMIG |DLIMIT | | |INODE | |
++ VFS | 32| 33| 34| 35| 36| 37| | 38| 39|
++ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++ OTHER |VSTAT | | | | | | |VINFO | |
++ | 40| 41| 42| 43| 44| 45| | 46| 47|
++ =======+=======+=======+=======+=======+=======+=======+ +=======+=======+
++ SPECIAL|EVENT | | | |FLAGS | | |VSPACE | |
++ | 48| 49| 50| 51| 52| 53| | 54| 55|
++ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++ SPECIAL|DEBUG | | | |RLIMIT |SYSCALL| | |COMPAT |
++ | 56| 57| 58| 59| 60|TEST 61| | 62| 63|
++ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++
++*/
++
++#define VC_CAT_VERSION 0
++
++#define VC_CAT_VSETUP 1
++#define VC_CAT_VHOST 2
++
++#define VC_CAT_DEVICE 6
++
++#define VC_CAT_VPROC 9
++#define VC_CAT_PROCALT 10
++#define VC_CAT_PROCMIG 11
++#define VC_CAT_PROCTRL 12
++
++#define VC_CAT_SCHED 14
++#define VC_CAT_MEMCTRL 20
++
++#define VC_CAT_VNET 25
++#define VC_CAT_NETALT 26
++#define VC_CAT_NETMIG 27
++#define VC_CAT_NETCTRL 28
++
++#define VC_CAT_TAGMIG 35
++#define VC_CAT_DLIMIT 36
++#define VC_CAT_INODE 38
++
++#define VC_CAT_VSTAT 40
++#define VC_CAT_VINFO 46
++#define VC_CAT_EVENT 48
++
++#define VC_CAT_FLAGS 52
++#define VC_CAT_VSPACE 54
++#define VC_CAT_DEBUG 56
++#define VC_CAT_RLIMIT 60
++
++#define VC_CAT_SYSTEST 61
++#define VC_CAT_COMPAT 63
++
++/* query version */
++
++#define VCMD_get_version VC_CMD(VERSION, 0, 0)
++#define VCMD_get_vci VC_CMD(VERSION, 1, 0)
++
++
++#ifdef __KERNEL__
++
++#include <linux/errno.h>
++
++#endif /* __KERNEL__ */
++
++#endif /* _VX_SWITCH_H */
++
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/tag.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/tag.h
+--- linux-3.6.10/include/linux/vserver/tag.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/tag.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,143 @@
++#ifndef _DX_TAG_H
++#define _DX_TAG_H
++
++#include <linux/types.h>
++
++
++#define DX_TAG(in) (IS_TAGGED(in))
++
++
++#ifdef CONFIG_TAG_NFSD
++#define DX_TAG_NFSD 1
++#else
++#define DX_TAG_NFSD 0
++#endif
++
++
++#ifdef CONFIG_TAGGING_NONE
++
++#define MAX_UID 0xFFFFFFFF
++#define MAX_GID 0xFFFFFFFF
++
++#define INOTAG_TAG(cond, uid, gid, tag) (0)
++
++#define TAGINO_UID(cond, uid, tag) (uid)
++#define TAGINO_GID(cond, gid, tag) (gid)
++
++#endif
++
++
++#ifdef CONFIG_TAGGING_GID16
++
++#define MAX_UID 0xFFFFFFFF
++#define MAX_GID 0x0000FFFF
++
++#define INOTAG_TAG(cond, uid, gid, tag) \
++ ((cond) ? (((gid) >> 16) & 0xFFFF) : 0)
++
++#define TAGINO_UID(cond, uid, tag) (uid)
++#define TAGINO_GID(cond, gid, tag) \
++ ((cond) ? (((gid) & 0xFFFF) | ((tag) << 16)) : (gid))
++
++#endif
++
++
++#ifdef CONFIG_TAGGING_ID24
++
++#define MAX_UID 0x00FFFFFF
++#define MAX_GID 0x00FFFFFF
++
++#define INOTAG_TAG(cond, uid, gid, tag) \
++ ((cond) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0)
++
++#define TAGINO_UID(cond, uid, tag) \
++ ((cond) ? (((uid) & 0xFFFFFF) | (((tag) & 0xFF00) << 16)) : (uid))
++#define TAGINO_GID(cond, gid, tag) \
++ ((cond) ? (((gid) & 0xFFFFFF) | (((tag) & 0x00FF) << 24)) : (gid))
++
++#endif
++
++
++#ifdef CONFIG_TAGGING_UID16
++
++#define MAX_UID 0x0000FFFF
++#define MAX_GID 0xFFFFFFFF
++
++#define INOTAG_TAG(cond, uid, gid, tag) \
++ ((cond) ? (((uid) >> 16) & 0xFFFF) : 0)
++
++#define TAGINO_UID(cond, uid, tag) \
++ ((cond) ? (((uid) & 0xFFFF) | ((tag) << 16)) : (uid))
++#define TAGINO_GID(cond, gid, tag) (gid)
++
++#endif
++
++
++#ifdef CONFIG_TAGGING_INTERN
++
++#define MAX_UID 0xFFFFFFFF
++#define MAX_GID 0xFFFFFFFF
++
++#define INOTAG_TAG(cond, uid, gid, tag) \
++ ((cond) ? (tag) : 0)
++
++#define TAGINO_UID(cond, uid, tag) (uid)
++#define TAGINO_GID(cond, gid, tag) (gid)
++
++#endif
++
++
++#ifndef CONFIG_TAGGING_NONE
++#define dx_current_fstag(sb) \
++ ((sb)->s_flags & MS_TAGGED ? dx_current_tag() : 0)
++#else
++#define dx_current_fstag(sb) (0)
++#endif
++
++#ifndef CONFIG_TAGGING_INTERN
++#define TAGINO_TAG(cond, tag) (0)
++#else
++#define TAGINO_TAG(cond, tag) ((cond) ? (tag) : 0)
++#endif
++
++#define INOTAG_UID(cond, uid, gid) \
++ ((cond) ? ((uid) & MAX_UID) : (uid))
++#define INOTAG_GID(cond, uid, gid) \
++ ((cond) ? ((gid) & MAX_GID) : (gid))
++
++
++static inline uid_t dx_map_uid(uid_t uid)
++{
++ if ((uid > MAX_UID) && (uid != -1))
++ uid = -2;
++ return (uid & MAX_UID);
++}
++
++static inline gid_t dx_map_gid(gid_t gid)
++{
++ if ((gid > MAX_GID) && (gid != -1))
++ gid = -2;
++ return (gid & MAX_GID);
++}
++
++struct peer_tag {
++ int32_t xid;
++ int32_t nid;
++};
++
++#define dx_notagcheck(sb) ((sb) && ((sb)->s_flags & MS_NOTAGCHECK))
++
++int dx_parse_tag(char *string, tag_t *tag, int remove, int *mnt_flags,
++ unsigned long *flags);
++
++#ifdef CONFIG_PROPAGATE
++
++void __dx_propagate_tag(struct nameidata *nd, struct inode *inode);
++
++#define dx_propagate_tag(n, i) __dx_propagate_tag(n, i)
++
++#else
++#define dx_propagate_tag(n, i) do { } while (0)
++#endif
++
++#endif /* _DX_TAG_H */
+diff -NurpP --minimal linux-3.6.10/include/linux/vserver/tag_cmd.h linux-3.6.10-vs2.3.4.6/include/linux/vserver/tag_cmd.h
+--- linux-3.6.10/include/linux/vserver/tag_cmd.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/linux/vserver/tag_cmd.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,22 @@
++#ifndef _VX_TAG_CMD_H
++#define _VX_TAG_CMD_H
++
++
++/* vinfo commands */
++
++#define VCMD_task_tag VC_CMD(VINFO, 3, 0)
++
++#ifdef __KERNEL__
++extern int vc_task_tag(uint32_t);
++
++#endif /* __KERNEL__ */
++
++/* context commands */
++
++#define VCMD_tag_migrate VC_CMD(TAGMIG, 1, 0)
++
++#ifdef __KERNEL__
++extern int vc_tag_migrate(uint32_t);
++
++#endif /* __KERNEL__ */
++#endif /* _VX_TAG_CMD_H */
+diff -NurpP --minimal linux-3.6.10/include/net/addrconf.h linux-3.6.10-vs2.3.4.6/include/net/addrconf.h
+--- linux-3.6.10/include/net/addrconf.h 2012-10-04 13:27:47.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/net/addrconf.h 2012-10-04 16:47:00.000000000 +0000
+@@ -81,7 +81,8 @@ extern int ipv6_dev_get_saddr(struct n
+ struct net_device *dev,
+ const struct in6_addr *daddr,
+ unsigned int srcprefs,
+- struct in6_addr *saddr);
++ struct in6_addr *saddr,
++ struct nx_info *nxi);
+ extern int ipv6_get_lladdr(struct net_device *dev,
+ struct in6_addr *addr,
+ unsigned char banned_flags);
+diff -NurpP --minimal linux-3.6.10/include/net/af_unix.h linux-3.6.10-vs2.3.4.6/include/net/af_unix.h
+--- linux-3.6.10/include/net/af_unix.h 2012-10-04 13:27:47.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/net/af_unix.h 2012-10-04 16:47:00.000000000 +0000
+@@ -4,6 +4,7 @@
+ #include <linux/socket.h>
+ #include <linux/un.h>
+ #include <linux/mutex.h>
++#include <linux/vs_base.h>
+ #include <net/sock.h>
+
+ extern void unix_inflight(struct file *fp);
+diff -NurpP --minimal linux-3.6.10/include/net/inet_timewait_sock.h linux-3.6.10-vs2.3.4.6/include/net/inet_timewait_sock.h
+--- linux-3.6.10/include/net/inet_timewait_sock.h 2012-03-19 18:47:29.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/net/inet_timewait_sock.h 2012-10-04 16:47:00.000000000 +0000
+@@ -112,6 +112,10 @@ struct inet_timewait_sock {
+ #define tw_net __tw_common.skc_net
+ #define tw_daddr __tw_common.skc_daddr
+ #define tw_rcv_saddr __tw_common.skc_rcv_saddr
++#define tw_xid __tw_common.skc_xid
++#define tw_vx_info __tw_common.skc_vx_info
++#define tw_nid __tw_common.skc_nid
++#define tw_nx_info __tw_common.skc_nx_info
+ int tw_timeout;
+ volatile unsigned char tw_substate;
+ unsigned char tw_rcv_wscale;
+diff -NurpP --minimal linux-3.6.10/include/net/ip6_route.h linux-3.6.10-vs2.3.4.6/include/net/ip6_route.h
+--- linux-3.6.10/include/net/ip6_route.h 2012-10-04 13:27:47.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/net/ip6_route.h 2012-10-04 16:47:00.000000000 +0000
+@@ -97,7 +97,8 @@ extern int ip6_route_get_saddr(struct
+ struct rt6_info *rt,
+ const struct in6_addr *daddr,
+ unsigned int prefs,
+- struct in6_addr *saddr);
++ struct in6_addr *saddr,
++ struct nx_info *nxi);
+
+ extern struct rt6_info *rt6_lookup(struct net *net,
+ const struct in6_addr *daddr,
+diff -NurpP --minimal linux-3.6.10/include/net/route.h linux-3.6.10-vs2.3.4.6/include/net/route.h
+--- linux-3.6.10/include/net/route.h 2012-12-11 11:36:59.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/net/route.h 2012-11-06 17:43:41.000000000 +0000
+@@ -204,6 +204,9 @@ static inline void ip_rt_put(struct rtab
+ dst_release(&rt->dst);
+ }
+
++#include <linux/vs_base.h>
++#include <linux/vs_inet.h>
++
+ #define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
+
+ extern const __u8 ip_tos2prio[16];
+@@ -253,6 +256,9 @@ static inline void ip_route_connect_init
+ protocol, flow_flags, dst, src, dport, sport);
+ }
+
++extern struct rtable *ip_v4_find_src(struct net *net, struct nx_info *,
++ struct flowi4 *);
++
+ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
+ __be32 dst, __be32 src, u32 tos,
+ int oif, u8 protocol,
+@@ -261,11 +267,25 @@ static inline struct rtable *ip_route_co
+ {
+ struct net *net = sock_net(sk);
+ struct rtable *rt;
++ struct nx_info *nx_info = current_nx_info();
+
+ ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
+ sport, dport, sk, can_sleep);
+
+- if (!dst || !src) {
++ if (sk)
++ nx_info = sk->sk_nx_info;
++
++ vxdprintk(VXD_CBIT(net, 4),
++ "ip_route_connect(%p) %p,%p;%lx",
++ sk, nx_info, sk->sk_socket,
++ (sk->sk_socket?sk->sk_socket->flags:0));
++
++ rt = ip_v4_find_src(net, nx_info, fl4);
++ if (IS_ERR(rt))
++ return rt;
++ ip_rt_put(rt);
++
++ if (!fl4->daddr || !fl4->saddr) {
+ rt = __ip_route_output_key(net, fl4);
+ if (IS_ERR(rt))
+ return rt;
+diff -NurpP --minimal linux-3.6.10/include/net/sock.h linux-3.6.10-vs2.3.4.6/include/net/sock.h
+--- linux-3.6.10/include/net/sock.h 2012-10-04 13:27:47.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/include/net/sock.h 2012-10-04 16:47:00.000000000 +0000
+@@ -171,6 +171,10 @@ struct sock_common {
+ #ifdef CONFIG_NET_NS
+ struct net *skc_net;
+ #endif
++ xid_t skc_xid;
++ struct vx_info *skc_vx_info;
++ nid_t skc_nid;
++ struct nx_info *skc_nx_info;
+ /*
+ * fields between dontcopy_begin/dontcopy_end
+ * are not copied in sock_copy()
+@@ -284,6 +288,10 @@ struct sock {
+ #define sk_bind_node __sk_common.skc_bind_node
+ #define sk_prot __sk_common.skc_prot
+ #define sk_net __sk_common.skc_net
++#define sk_xid __sk_common.skc_xid
++#define sk_vx_info __sk_common.skc_vx_info
++#define sk_nid __sk_common.skc_nid
++#define sk_nx_info __sk_common.skc_nx_info
+ socket_lock_t sk_lock;
+ struct sk_buff_head sk_receive_queue;
+ /*
+diff -NurpP --minimal linux-3.6.10/init/Kconfig linux-3.6.10-vs2.3.4.6/init/Kconfig
+--- linux-3.6.10/init/Kconfig 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/init/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -624,6 +624,7 @@ config HAVE_UNSTABLE_SCHED_CLOCK
+ menuconfig CGROUPS
+ boolean "Control Group support"
+ depends on EVENTFD
++ default y
+ help
+ This option adds support for grouping sets of processes together, for
+ use with process control subsystems such as Cpusets, CFS, memory
+@@ -889,6 +890,7 @@ config USER_NS
+ bool "User namespace (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on UIDGID_CONVERTED
++ depends on VSERVER_DISABLED
+ select UIDGID_STRICT_TYPE_CHECKS
+
+ default n
+diff -NurpP --minimal linux-3.6.10/init/main.c linux-3.6.10-vs2.3.4.6/init/main.c
+--- linux-3.6.10/init/main.c 2012-12-11 11:36:59.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/init/main.c 2012-11-06 17:43:41.000000000 +0000
+@@ -69,6 +69,7 @@
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
+ #include <linux/file.h>
++#include <linux/vserver/percpu.h>
+
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+diff -NurpP --minimal linux-3.6.10/ipc/mqueue.c linux-3.6.10-vs2.3.4.6/ipc/mqueue.c
+--- linux-3.6.10/ipc/mqueue.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/ipc/mqueue.c 2012-10-04 16:47:00.000000000 +0000
+@@ -35,6 +35,8 @@
+ #include <linux/ipc_namespace.h>
+ #include <linux/user_namespace.h>
+ #include <linux/slab.h>
++#include <linux/vs_context.h>
++#include <linux/vs_limit.h>
+
+ #include <net/sock.h>
+ #include "util.h"
+@@ -76,6 +78,7 @@ struct mqueue_inode_info {
+ struct pid* notify_owner;
+ struct user_namespace *notify_user_ns;
+ struct user_struct *user; /* user who created, for accounting */
++ struct vx_info *vxi;
+ struct sock *notify_sock;
+ struct sk_buff *notify_cookie;
+
+@@ -235,6 +238,7 @@ static struct inode *mqueue_get_inode(st
+ if (S_ISREG(mode)) {
+ struct mqueue_inode_info *info;
+ unsigned long mq_bytes, mq_treesize;
++ struct vx_info *vxi = current_vx_info();
+
+ inode->i_fop = &mqueue_file_operations;
+ inode->i_size = FILENT_SIZE;
+@@ -248,6 +252,7 @@ static struct inode *mqueue_get_inode(st
+ info->notify_user_ns = NULL;
+ info->qsize = 0;
+ info->user = NULL; /* set when all is ok */
++ info->vxi = NULL;
+ info->msg_tree = RB_ROOT;
+ info->node_cache = NULL;
+ memset(&info->attr, 0, sizeof(info->attr));
+@@ -281,17 +286,20 @@ static struct inode *mqueue_get_inode(st
+
+ spin_lock(&mq_lock);
+ if (u->mq_bytes + mq_bytes < u->mq_bytes ||
+- u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
++ u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE) ||
++ !vx_ipcmsg_avail(vxi, mq_bytes)) {
+ spin_unlock(&mq_lock);
+ /* mqueue_evict_inode() releases info->messages */
+ ret = -EMFILE;
+ goto out_inode;
+ }
+ u->mq_bytes += mq_bytes;
++ vx_ipcmsg_add(vxi, u, mq_bytes);
+ spin_unlock(&mq_lock);
+
+ /* all is ok */
+ info->user = get_uid(u);
++ info->vxi = get_vx_info(vxi);
+ } else if (S_ISDIR(mode)) {
+ inc_nlink(inode);
+ /* Some things misbehave if size == 0 on a directory */
+@@ -395,8 +403,11 @@ static void mqueue_evict_inode(struct in
+
+ user = info->user;
+ if (user) {
++ struct vx_info *vxi = info->vxi;
++
+ spin_lock(&mq_lock);
+ user->mq_bytes -= mq_bytes;
++ vx_ipcmsg_sub(vxi, user, mq_bytes);
+ /*
+ * get_ns_from_inode() ensures that the
+ * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
+@@ -406,6 +417,7 @@ static void mqueue_evict_inode(struct in
+ if (ipc_ns)
+ ipc_ns->mq_queues_count--;
+ spin_unlock(&mq_lock);
++ put_vx_info(vxi);
+ free_uid(user);
+ }
+ if (ipc_ns)
+diff -NurpP --minimal linux-3.6.10/ipc/msg.c linux-3.6.10-vs2.3.4.6/ipc/msg.c
+--- linux-3.6.10/ipc/msg.c 2011-05-22 14:17:59.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/ipc/msg.c 2012-10-04 16:47:00.000000000 +0000
+@@ -37,6 +37,7 @@
+ #include <linux/rwsem.h>
+ #include <linux/nsproxy.h>
+ #include <linux/ipc_namespace.h>
++#include <linux/vs_base.h>
+
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+@@ -190,6 +191,7 @@ static int newque(struct ipc_namespace *
+
+ msq->q_perm.mode = msgflg & S_IRWXUGO;
+ msq->q_perm.key = key;
++ msq->q_perm.xid = vx_current_xid();
+
+ msq->q_perm.security = NULL;
+ retval = security_msg_queue_alloc(msq);
+diff -NurpP --minimal linux-3.6.10/ipc/namespace.c linux-3.6.10-vs2.3.4.6/ipc/namespace.c
+--- linux-3.6.10/ipc/namespace.c 2012-07-22 21:39:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/ipc/namespace.c 2012-10-04 16:47:00.000000000 +0000
+@@ -13,11 +13,12 @@
+ #include <linux/mount.h>
+ #include <linux/user_namespace.h>
+ #include <linux/proc_fs.h>
++#include <linux/vs_base.h>
++#include <linux/vserver/global.h>
+
+ #include "util.h"
+
+-static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk,
+- struct ipc_namespace *old_ns)
++static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns)
+ {
+ struct ipc_namespace *ns;
+ int err;
+@@ -46,19 +47,19 @@ static struct ipc_namespace *create_ipc_
+ ipcns_notify(IPCNS_CREATED);
+ register_ipcns_notifier(ns);
+
+- ns->user_ns = get_user_ns(task_cred_xxx(tsk, user_ns));
++ // ns->user_ns = get_user_ns(task_cred_xxx(tsk, user_ns));
++ ns->user_ns = get_user_ns(user_ns);
+
+ return ns;
+ }
+
+ struct ipc_namespace *copy_ipcs(unsigned long flags,
+- struct task_struct *tsk)
++ struct ipc_namespace *old_ns,
++ struct user_namespace *user_ns)
+ {
+- struct ipc_namespace *ns = tsk->nsproxy->ipc_ns;
+-
+ if (!(flags & CLONE_NEWIPC))
+- return get_ipc_ns(ns);
+- return create_ipc_ns(tsk, ns);
++ return get_ipc_ns(old_ns);
++ return create_ipc_ns(user_ns);
+ }
+
+ /*
+diff -NurpP --minimal linux-3.6.10/ipc/sem.c linux-3.6.10-vs2.3.4.6/ipc/sem.c
+--- linux-3.6.10/ipc/sem.c 2012-01-09 15:14:59.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/ipc/sem.c 2012-10-04 16:47:00.000000000 +0000
+@@ -86,6 +86,8 @@
+ #include <linux/rwsem.h>
+ #include <linux/nsproxy.h>
+ #include <linux/ipc_namespace.h>
++#include <linux/vs_base.h>
++#include <linux/vs_limit.h>
+
+ #include <asm/uaccess.h>
+ #include "util.h"
+@@ -306,6 +308,7 @@ static int newary(struct ipc_namespace *
+
+ sma->sem_perm.mode = (semflg & S_IRWXUGO);
+ sma->sem_perm.key = key;
++ sma->sem_perm.xid = vx_current_xid();
+
+ sma->sem_perm.security = NULL;
+ retval = security_sem_alloc(sma);
+@@ -321,6 +324,9 @@ static int newary(struct ipc_namespace *
+ return id;
+ }
+ ns->used_sems += nsems;
++ /* FIXME: obsoleted? */
++ vx_semary_inc(sma);
++ vx_nsems_add(sma, nsems);
+
+ sma->sem_base = (struct sem *) &sma[1];
+
+@@ -770,6 +776,9 @@ static void freeary(struct ipc_namespace
+
+ wake_up_sem_queue_do(&tasks);
+ ns->used_sems -= sma->sem_nsems;
++ /* FIXME: obsoleted? */
++ vx_nsems_sub(sma, sma->sem_nsems);
++ vx_semary_dec(sma);
+ security_sem_free(sma);
+ ipc_rcu_putref(sma);
+ }
+diff -NurpP --minimal linux-3.6.10/ipc/shm.c linux-3.6.10-vs2.3.4.6/ipc/shm.c
+--- linux-3.6.10/ipc/shm.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/ipc/shm.c 2012-10-04 16:47:00.000000000 +0000
+@@ -39,6 +39,8 @@
+ #include <linux/nsproxy.h>
+ #include <linux/mount.h>
+ #include <linux/ipc_namespace.h>
++#include <linux/vs_context.h>
++#include <linux/vs_limit.h>
+
+ #include <asm/uaccess.h>
+
+@@ -187,7 +189,12 @@ static void shm_open(struct vm_area_stru
+ */
+ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
+ {
+- ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ struct vx_info *vxi = lookup_vx_info(shp->shm_perm.xid);
++ int numpages = (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ vx_ipcshm_sub(vxi, shp, numpages);
++ ns->shm_tot -= numpages;
++
+ shm_rmid(ns, shp);
+ shm_unlock(shp);
+ if (!is_file_hugepages(shp->shm_file))
+@@ -197,6 +204,7 @@ static void shm_destroy(struct ipc_names
+ shp->mlock_user);
+ fput (shp->shm_file);
+ security_shm_free(shp);
++ put_vx_info(vxi);
+ ipc_rcu_putref(shp);
+ }
+
+@@ -474,11 +482,15 @@ static int newseg(struct ipc_namespace *
+ if (ns->shm_tot + numpages > ns->shm_ctlall)
+ return -ENOSPC;
+
++ if (!vx_ipcshm_avail(current_vx_info(), numpages))
++ return -ENOSPC;
++
+ shp = ipc_rcu_alloc(sizeof(*shp));
+ if (!shp)
+ return -ENOMEM;
+
+ shp->shm_perm.key = key;
++ shp->shm_perm.xid = vx_current_xid();
+ shp->shm_perm.mode = (shmflg & S_IRWXUGO);
+ shp->mlock_user = NULL;
+
+@@ -533,6 +545,7 @@ static int newseg(struct ipc_namespace *
+ ns->shm_tot += numpages;
+ error = shp->shm_perm.id;
+ shm_unlock(shp);
++ vx_ipcshm_add(current_vx_info(), key, numpages);
+ return error;
+
+ no_id:
+diff -NurpP --minimal linux-3.6.10/kernel/Makefile linux-3.6.10-vs2.3.4.6/kernel/Makefile
+--- linux-3.6.10/kernel/Makefile 2012-07-22 21:39:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/Makefile 2012-10-04 16:47:00.000000000 +0000
+@@ -24,6 +24,7 @@ endif
+
+ obj-y += sched/
+ obj-y += power/
++obj-y += vserver/
+
+ ifeq ($(CONFIG_CHECKPOINT_RESTORE),y)
+ obj-$(CONFIG_X86) += kcmp.o
+diff -NurpP --minimal linux-3.6.10/kernel/auditsc.c linux-3.6.10-vs2.3.4.6/kernel/auditsc.c
+--- linux-3.6.10/kernel/auditsc.c 2012-07-22 21:39:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/auditsc.c 2012-10-04 16:47:00.000000000 +0000
+@@ -2309,7 +2309,7 @@ int audit_set_loginuid(uid_t loginuid)
+ if (task->loginuid != -1)
+ return -EPERM;
+ #else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
+- if (!capable(CAP_AUDIT_CONTROL))
++ if (!vx_capable(CAP_AUDIT_CONTROL, VXC_AUDIT_CONTROL))
+ return -EPERM;
+ #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
+
+diff -NurpP --minimal linux-3.6.10/kernel/capability.c linux-3.6.10-vs2.3.4.6/kernel/capability.c
+--- linux-3.6.10/kernel/capability.c 2012-07-22 21:39:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/capability.c 2012-10-04 16:47:00.000000000 +0000
+@@ -15,6 +15,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/user_namespace.h>
++#include <linux/vs_context.h>
+ #include <asm/uaccess.h>
+
+ /*
+@@ -116,6 +117,7 @@ static int cap_validate_magic(cap_user_h
+ return 0;
+ }
+
++
+ /*
+ * The only thing that can change the capabilities of the current
+ * process is the current process. As such, we can't be in this code
+@@ -349,6 +351,8 @@ bool has_ns_capability_noaudit(struct ta
+ return (ret == 0);
+ }
+
++#include <linux/vserver/base.h>
++
+ /**
+ * has_capability_noaudit - Does a task have a capability (unaudited) in the
+ * initial user ns
+diff -NurpP --minimal linux-3.6.10/kernel/compat.c linux-3.6.10-vs2.3.4.6/kernel/compat.c
+--- linux-3.6.10/kernel/compat.c 2012-07-22 21:39:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/compat.c 2012-10-04 16:47:00.000000000 +0000
+@@ -1054,7 +1054,7 @@ asmlinkage long compat_sys_stime(compat_
+ if (err)
+ return err;
+
+- do_settimeofday(&tv);
++ vx_settimeofday(&tv);
+ return 0;
+ }
+
+diff -NurpP --minimal linux-3.6.10/kernel/cred.c linux-3.6.10-vs2.3.4.6/kernel/cred.c
+--- linux-3.6.10/kernel/cred.c 2012-07-22 21:39:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/cred.c 2012-10-04 16:47:00.000000000 +0000
+@@ -70,31 +70,6 @@ struct cred init_cred = {
+ #endif
+ };
+
+-static inline void set_cred_subscribers(struct cred *cred, int n)
+-{
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- atomic_set(&cred->subscribers, n);
+-#endif
+-}
+-
+-static inline int read_cred_subscribers(const struct cred *cred)
+-{
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- return atomic_read(&cred->subscribers);
+-#else
+- return 0;
+-#endif
+-}
+-
+-static inline void alter_cred_subscribers(const struct cred *_cred, int n)
+-{
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- struct cred *cred = (struct cred *) _cred;
+-
+- atomic_add(n, &cred->subscribers);
+-#endif
+-}
+-
+ /*
+ * Dispose of the shared task group credentials
+ */
+@@ -284,21 +259,16 @@ error:
+ *
+ * Call commit_creds() or abort_creds() to clean up.
+ */
+-struct cred *prepare_creds(void)
++struct cred *__prepare_creds(const struct cred *old)
+ {
+- struct task_struct *task = current;
+- const struct cred *old;
+ struct cred *new;
+
+- validate_process_creds();
+-
+ new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ kdebug("prepare_creds() alloc %p", new);
+
+- old = task->cred;
+ memcpy(new, old, sizeof(struct cred));
+
+ atomic_set(&new->usage, 1);
+@@ -326,6 +296,13 @@ error:
+ abort_creds(new);
+ return NULL;
+ }
++
++struct cred *prepare_creds(void)
++{
++ validate_process_creds();
++
++ return __prepare_creds(current->cred);
++}
+ EXPORT_SYMBOL(prepare_creds);
+
+ /*
+diff -NurpP --minimal linux-3.6.10/kernel/exit.c linux-3.6.10-vs2.3.4.6/kernel/exit.c
+--- linux-3.6.10/kernel/exit.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/exit.c 2012-12-08 00:20:39.000000000 +0000
+@@ -48,6 +48,10 @@
+ #include <linux/fs_struct.h>
+ #include <linux/init_task.h>
+ #include <linux/perf_event.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vs_pid.h>
+ #include <trace/events/sched.h>
+ #include <linux/hw_breakpoint.h>
+ #include <linux/oom.h>
+@@ -494,9 +498,11 @@ static void close_files(struct files_str
+ filp_close(file, files);
+ cond_resched();
+ }
++ vx_openfd_dec(i);
+ }
+ i++;
+ set >>= 1;
++ cond_resched();
+ }
+ }
+ }
+@@ -711,15 +717,25 @@ static struct task_struct *find_new_reap
+ __acquires(&tasklist_lock)
+ {
+ struct pid_namespace *pid_ns = task_active_pid_ns(father);
+- struct task_struct *thread;
++ struct vx_info *vxi = task_get_vx_info(father);
++ struct task_struct *thread = father;
++ struct task_struct *reaper;
+
+- thread = father;
+ while_each_thread(father, thread) {
+ if (thread->flags & PF_EXITING)
+ continue;
+ if (unlikely(pid_ns->child_reaper == father))
+ pid_ns->child_reaper = thread;
+- return thread;
++ reaper = thread;
++ goto out_put;
++ }
++
++ reaper = pid_ns->child_reaper;
++ if (vxi) {
++ BUG_ON(!vxi->vx_reaper);
++ if (vxi->vx_reaper != init_pid_ns.child_reaper &&
++ vxi->vx_reaper != father)
++ reaper = vxi->vx_reaper;
+ }
+
+ if (unlikely(pid_ns->child_reaper == father)) {
+@@ -757,7 +773,9 @@ static struct task_struct *find_new_reap
+ }
+ }
+
+- return pid_ns->child_reaper;
++out_put:
++ put_vx_info(vxi);
++ return reaper;
+ }
+
+ /*
+@@ -808,10 +826,15 @@ static void forget_original_parent(struc
+ list_for_each_entry_safe(p, n, &father->children, sibling) {
+ struct task_struct *t = p;
+ do {
+- t->real_parent = reaper;
++ struct task_struct *new_parent = reaper;
++
++ if (unlikely(p == reaper))
++ new_parent = task_active_pid_ns(p)->child_reaper;
++
++ t->real_parent = new_parent;
+ if (t->parent == father) {
+ BUG_ON(t->ptrace);
+- t->parent = t->real_parent;
++ t->parent = new_parent;
+ }
+ if (t->pdeath_signal)
+ group_send_sig_info(t->pdeath_signal,
+@@ -1018,6 +1041,9 @@ void do_exit(long code)
+ */
+ ptrace_put_breakpoints(tsk);
+
++ /* needs to stay before exit_notify() */
++ exit_vx_info_early(tsk, code);
++
+ exit_notify(tsk, group_dead);
+ #ifdef CONFIG_NUMA
+ task_lock(tsk);
+@@ -1068,10 +1094,15 @@ void do_exit(long code)
+ smp_mb();
+ raw_spin_unlock_wait(&tsk->pi_lock);
+
++ /* needs to stay after exit_notify() */
++ exit_vx_info(tsk, code);
++ exit_nx_info(tsk);
++
+ /* causes final put_task_struct in finish_task_switch(). */
+ tsk->state = TASK_DEAD;
+ tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
+ schedule();
++ printk("bad task: %p [%lx]\n", current, current->state);
+ BUG();
+ /* Avoid "noreturn function does return". */
+ for (;;)
+diff -NurpP --minimal linux-3.6.10/kernel/fork.c linux-3.6.10-vs2.3.4.6/kernel/fork.c
+--- linux-3.6.10/kernel/fork.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/fork.c 2012-10-04 16:47:00.000000000 +0000
+@@ -70,6 +70,9 @@
+ #include <linux/khugepaged.h>
+ #include <linux/signalfd.h>
+ #include <linux/uprobes.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vs_limit.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
+@@ -210,6 +213,8 @@ void free_task(struct task_struct *tsk)
+ arch_release_thread_info(tsk->stack);
+ free_thread_info(tsk->stack);
+ rt_mutex_debug_task_free(tsk);
++ clr_vx_info(&tsk->vx_info);
++ clr_nx_info(&tsk->nx_info);
+ ftrace_graph_exit_task(tsk);
+ put_seccomp_filter(tsk);
+ arch_release_task_struct(tsk);
+@@ -541,6 +546,7 @@ static struct mm_struct *mm_init(struct
+ if (likely(!mm_alloc_pgd(mm))) {
+ mm->def_flags = 0;
+ mmu_notifier_mm_init(mm);
++ set_vx_info(&mm->mm_vx_info, p->vx_info);
+ return mm;
+ }
+
+@@ -593,6 +599,7 @@ void __mmdrop(struct mm_struct *mm)
+ destroy_context(mm);
+ mmu_notifier_mm_destroy(mm);
+ check_mm(mm);
++ clr_vx_info(&mm->mm_vx_info);
+ free_mm(mm);
+ }
+ EXPORT_SYMBOL_GPL(__mmdrop);
+@@ -834,6 +841,7 @@ struct mm_struct *dup_mm(struct task_str
+ goto fail_nomem;
+
+ memcpy(mm, oldmm, sizeof(*mm));
++ mm->mm_vx_info = NULL;
+ mm_init_cpumask(mm);
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -874,6 +882,7 @@ fail_nocontext:
+ * If init_new_context() failed, we cannot use mmput() to free the mm
+ * because it calls destroy_context()
+ */
++ clr_vx_info(&mm->mm_vx_info);
+ mm_free_pgd(mm);
+ free_mm(mm);
+ return NULL;
+@@ -1157,6 +1166,8 @@ static struct task_struct *copy_process(
+ int retval;
+ struct task_struct *p;
+ int cgroup_callbacks_done = 0;
++ struct vx_info *vxi;
++ struct nx_info *nxi;
+
+ if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
+ return ERR_PTR(-EINVAL);
+@@ -1204,7 +1215,12 @@ static struct task_struct *copy_process(
+ DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
+ DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+ #endif
++ init_vx_info(&p->vx_info, current_vx_info());
++ init_nx_info(&p->nx_info, current_nx_info());
++
+ retval = -EAGAIN;
++ if (!vx_nproc_avail(1))
++ goto bad_fork_free;
+ if (atomic_read(&p->real_cred->user->processes) >=
+ task_rlimit(p, RLIMIT_NPROC)) {
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
+@@ -1483,6 +1499,18 @@ static struct task_struct *copy_process(
+
+ total_forks++;
+ spin_unlock(&current->sighand->siglock);
++
++ /* p is copy of current */
++ vxi = p->vx_info;
++ if (vxi) {
++ claim_vx_info(vxi, p);
++ atomic_inc(&vxi->cvirt.nr_threads);
++ atomic_inc(&vxi->cvirt.total_forks);
++ vx_nproc_inc(p);
++ }
++ nxi = p->nx_info;
++ if (nxi)
++ claim_nx_info(nxi, p);
+ write_unlock_irq(&tasklist_lock);
+ proc_fork_connector(p);
+ cgroup_post_fork(p);
+diff -NurpP --minimal linux-3.6.10/kernel/kthread.c linux-3.6.10-vs2.3.4.6/kernel/kthread.c
+--- linux-3.6.10/kernel/kthread.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/kthread.c 2012-10-04 16:47:00.000000000 +0000
+@@ -16,6 +16,7 @@
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
+ #include <linux/freezer.h>
++#include <linux/vs_pid.h>
+ #include <trace/events/sched.h>
+
+ static DEFINE_SPINLOCK(kthread_create_lock);
+diff -NurpP --minimal linux-3.6.10/kernel/nsproxy.c linux-3.6.10-vs2.3.4.6/kernel/nsproxy.c
+--- linux-3.6.10/kernel/nsproxy.c 2012-01-09 15:15:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/nsproxy.c 2012-10-04 16:47:00.000000000 +0000
+@@ -20,11 +20,14 @@
+ #include <linux/mnt_namespace.h>
+ #include <linux/utsname.h>
+ #include <linux/pid_namespace.h>
++#include <linux/vserver/global.h>
++#include <linux/vserver/debug.h>
+ #include <net/net_namespace.h>
+ #include <linux/ipc_namespace.h>
+ #include <linux/proc_fs.h>
+ #include <linux/file.h>
+ #include <linux/syscalls.h>
++#include "../fs/mount.h"
+
+ static struct kmem_cache *nsproxy_cachep;
+
+@@ -46,8 +49,11 @@ static inline struct nsproxy *create_nsp
+ struct nsproxy *nsproxy;
+
+ nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL);
+- if (nsproxy)
++ if (nsproxy) {
+ atomic_set(&nsproxy->count, 1);
++ atomic_inc(&vs_global_nsproxy);
++ }
++ vxdprintk(VXD_CBIT(space, 2), "create_nsproxy = %p[1]", nsproxy);
+ return nsproxy;
+ }
+
+@@ -56,8 +62,11 @@ static inline struct nsproxy *create_nsp
+ * Return the newly created nsproxy. Do not attach this to the task,
+ * leave it to the caller to do proper locking and attach it to task.
+ */
+-static struct nsproxy *create_new_namespaces(unsigned long flags,
+- struct task_struct *tsk, struct fs_struct *new_fs)
++static struct nsproxy *unshare_namespaces(unsigned long flags,
++ struct nsproxy *orig,
++ struct fs_struct *new_fs,
++ struct user_namespace *new_user,
++ struct pid_namespace *new_pid)
+ {
+ struct nsproxy *new_nsp;
+ int err;
+@@ -66,31 +75,31 @@ static struct nsproxy *create_new_namesp
+ if (!new_nsp)
+ return ERR_PTR(-ENOMEM);
+
+- new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, new_fs);
++ new_nsp->mnt_ns = copy_mnt_ns(flags, orig->mnt_ns, new_fs);
+ if (IS_ERR(new_nsp->mnt_ns)) {
+ err = PTR_ERR(new_nsp->mnt_ns);
+ goto out_ns;
+ }
+
+- new_nsp->uts_ns = copy_utsname(flags, tsk);
++ new_nsp->uts_ns = copy_utsname(flags, orig->uts_ns, new_user);
+ if (IS_ERR(new_nsp->uts_ns)) {
+ err = PTR_ERR(new_nsp->uts_ns);
+ goto out_uts;
+ }
+
+- new_nsp->ipc_ns = copy_ipcs(flags, tsk);
++ new_nsp->ipc_ns = copy_ipcs(flags, orig->ipc_ns, new_user);
+ if (IS_ERR(new_nsp->ipc_ns)) {
+ err = PTR_ERR(new_nsp->ipc_ns);
+ goto out_ipc;
+ }
+
+- new_nsp->pid_ns = copy_pid_ns(flags, task_active_pid_ns(tsk));
++ new_nsp->pid_ns = copy_pid_ns(flags, new_pid);
+ if (IS_ERR(new_nsp->pid_ns)) {
+ err = PTR_ERR(new_nsp->pid_ns);
+ goto out_pid;
+ }
+
+- new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns);
++ new_nsp->net_ns = copy_net_ns(flags, orig->net_ns);
+ if (IS_ERR(new_nsp->net_ns)) {
+ err = PTR_ERR(new_nsp->net_ns);
+ goto out_net;
+@@ -115,6 +124,40 @@ out_ns:
+ return ERR_PTR(err);
+ }
+
++static struct nsproxy *create_new_namespaces(unsigned long flags,
++ struct task_struct *tsk, struct fs_struct *new_fs)
++{
++ return unshare_namespaces(flags, tsk->nsproxy,
++ new_fs, task_cred_xxx(tsk, user_ns),
++ task_active_pid_ns(tsk));
++}
++
++/*
++ * copies the nsproxy, setting refcount to 1, and grabbing a
++ * reference to all contained namespaces.
++ */
++struct nsproxy *copy_nsproxy(struct nsproxy *orig)
++{
++ struct nsproxy *ns = create_nsproxy();
++
++ if (ns) {
++ memcpy(ns, orig, sizeof(struct nsproxy));
++ atomic_set(&ns->count, 1);
++
++ if (ns->mnt_ns)
++ get_mnt_ns(ns->mnt_ns);
++ if (ns->uts_ns)
++ get_uts_ns(ns->uts_ns);
++ if (ns->ipc_ns)
++ get_ipc_ns(ns->ipc_ns);
++ if (ns->pid_ns)
++ get_pid_ns(ns->pid_ns);
++ if (ns->net_ns)
++ get_net(ns->net_ns);
++ }
++ return ns;
++}
++
+ /*
+ * called from clone. This now handles copy for nsproxy and all
+ * namespaces therein.
+@@ -122,9 +165,12 @@ out_ns:
+ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
+ {
+ struct nsproxy *old_ns = tsk->nsproxy;
+- struct nsproxy *new_ns;
++ struct nsproxy *new_ns = NULL;
+ int err = 0;
+
++ vxdprintk(VXD_CBIT(space, 7), "copy_namespaces(0x%08lx,%p[%p])",
++ flags, tsk, old_ns);
++
+ if (!old_ns)
+ return 0;
+
+@@ -134,7 +180,7 @@ int copy_namespaces(unsigned long flags,
+ CLONE_NEWPID | CLONE_NEWNET)))
+ return 0;
+
+- if (!capable(CAP_SYS_ADMIN)) {
++ if (!vx_can_unshare(CAP_SYS_ADMIN, flags)) {
+ err = -EPERM;
+ goto out;
+ }
+@@ -161,6 +207,9 @@ int copy_namespaces(unsigned long flags,
+
+ out:
+ put_nsproxy(old_ns);
++ vxdprintk(VXD_CBIT(space, 3),
++ "copy_namespaces(0x%08lx,%p[%p]) = %d [%p]",
++ flags, tsk, old_ns, err, new_ns);
+ return err;
+ }
+
+@@ -174,7 +223,9 @@ void free_nsproxy(struct nsproxy *ns)
+ put_ipc_ns(ns->ipc_ns);
+ if (ns->pid_ns)
+ put_pid_ns(ns->pid_ns);
+- put_net(ns->net_ns);
++ if (ns->net_ns)
++ put_net(ns->net_ns);
++ atomic_dec(&vs_global_nsproxy);
+ kmem_cache_free(nsproxy_cachep, ns);
+ }
+
+@@ -187,11 +238,15 @@ int unshare_nsproxy_namespaces(unsigned
+ {
+ int err = 0;
+
++ vxdprintk(VXD_CBIT(space, 4),
++ "unshare_nsproxy_namespaces(0x%08lx,[%p])",
++ unshare_flags, current->nsproxy);
++
+ if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
+ CLONE_NEWNET)))
+ return 0;
+
+- if (!capable(CAP_SYS_ADMIN))
++ if (!vx_can_unshare(CAP_SYS_ADMIN, unshare_flags))
+ return -EPERM;
+
+ *new_nsp = create_new_namespaces(unshare_flags, current,
+diff -NurpP --minimal linux-3.6.10/kernel/pid.c linux-3.6.10-vs2.3.4.6/kernel/pid.c
+--- linux-3.6.10/kernel/pid.c 2012-07-22 21:39:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/pid.c 2012-10-04 16:47:00.000000000 +0000
+@@ -36,6 +36,7 @@
+ #include <linux/pid_namespace.h>
+ #include <linux/init_task.h>
+ #include <linux/syscalls.h>
++#include <linux/vs_pid.h>
+
+ #define pid_hashfn(nr, ns) \
+ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
+@@ -344,7 +345,7 @@ EXPORT_SYMBOL_GPL(find_pid_ns);
+
+ struct pid *find_vpid(int nr)
+ {
+- return find_pid_ns(nr, current->nsproxy->pid_ns);
++ return find_pid_ns(vx_rmap_pid(nr), current->nsproxy->pid_ns);
+ }
+ EXPORT_SYMBOL_GPL(find_vpid);
+
+@@ -404,6 +405,9 @@ void transfer_pid(struct task_struct *ol
+ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
+ {
+ struct task_struct *result = NULL;
++
++ if (type == PIDTYPE_REALPID)
++ type = PIDTYPE_PID;
+ if (pid) {
+ struct hlist_node *first;
+ first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
+@@ -423,7 +427,7 @@ struct task_struct *find_task_by_pid_ns(
+ rcu_lockdep_assert(rcu_read_lock_held(),
+ "find_task_by_pid_ns() needs rcu_read_lock()"
+ " protection");
+- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++ return pid_task(find_pid_ns(vx_rmap_pid(nr), ns), PIDTYPE_PID);
+ }
+
+ struct task_struct *find_task_by_vpid(pid_t vnr)
+@@ -467,7 +471,7 @@ struct pid *find_get_pid(pid_t nr)
+ }
+ EXPORT_SYMBOL_GPL(find_get_pid);
+
+-pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
++pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns)
+ {
+ struct upid *upid;
+ pid_t nr = 0;
+@@ -480,6 +484,11 @@ pid_t pid_nr_ns(struct pid *pid, struct
+ return nr;
+ }
+
++pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
++{
++ return vx_map_pid(pid_unmapped_nr_ns(pid, ns));
++}
++
+ pid_t pid_vnr(struct pid *pid)
+ {
+ return pid_nr_ns(pid, current->nsproxy->pid_ns);
+diff -NurpP --minimal linux-3.6.10/kernel/pid_namespace.c linux-3.6.10-vs2.3.4.6/kernel/pid_namespace.c
+--- linux-3.6.10/kernel/pid_namespace.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/pid_namespace.c 2012-10-04 16:47:00.000000000 +0000
+@@ -16,6 +16,7 @@
+ #include <linux/slab.h>
+ #include <linux/proc_fs.h>
+ #include <linux/reboot.h>
++#include <linux/vserver/global.h>
+
+ #define BITS_PER_PAGE (PAGE_SIZE*8)
+
+@@ -89,6 +90,7 @@ static struct pid_namespace *create_pid_
+ goto out_free_map;
+
+ kref_init(&ns->kref);
++ atomic_inc(&vs_global_pid_ns);
+ ns->level = level;
+ ns->parent = get_pid_ns(parent_pid_ns);
+
+@@ -120,6 +122,7 @@ static void destroy_pid_namespace(struct
+
+ for (i = 0; i < PIDMAP_ENTRIES; i++)
+ kfree(ns->pidmap[i].page);
++ atomic_dec(&vs_global_pid_ns);
+ kmem_cache_free(pid_ns_cachep, ns);
+ }
+
+diff -NurpP --minimal linux-3.6.10/kernel/posix-timers.c linux-3.6.10-vs2.3.4.6/kernel/posix-timers.c
+--- linux-3.6.10/kernel/posix-timers.c 2012-01-09 15:15:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/posix-timers.c 2012-10-04 16:47:00.000000000 +0000
+@@ -47,6 +47,7 @@
+ #include <linux/wait.h>
+ #include <linux/workqueue.h>
+ #include <linux/export.h>
++#include <linux/vs_context.h>
+
+ /*
+ * Management arrays for POSIX timers. Timers are kept in slab memory
+@@ -340,6 +341,7 @@ int posix_timer_event(struct k_itimer *t
+ {
+ struct task_struct *task;
+ int shared, ret = -1;
++
+ /*
+ * FIXME: if ->sigq is queued we can race with
+ * dequeue_signal()->do_schedule_next_timer().
+@@ -356,10 +358,18 @@ int posix_timer_event(struct k_itimer *t
+ rcu_read_lock();
+ task = pid_task(timr->it_pid, PIDTYPE_PID);
+ if (task) {
++ struct vx_info_save vxis;
++ struct vx_info *vxi;
++
++ vxi = get_vx_info(task->vx_info);
++ enter_vx_info(vxi, &vxis);
+ shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
+ ret = send_sigqueue(timr->sigq, task, shared);
++ leave_vx_info(&vxis);
++ put_vx_info(vxi);
+ }
+ rcu_read_unlock();
++
+ /* If we failed to send the signal the timer stops. */
+ return ret > 0;
+ }
+diff -NurpP --minimal linux-3.6.10/kernel/printk.c linux-3.6.10-vs2.3.4.6/kernel/printk.c
+--- linux-3.6.10/kernel/printk.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/printk.c 2012-10-04 16:47:00.000000000 +0000
+@@ -42,6 +42,7 @@
+ #include <linux/notifier.h>
+ #include <linux/rculist.h>
+ #include <linux/poll.h>
++#include <linux/vs_cvirt.h>
+
+ #include <asm/uaccess.h>
+
+@@ -818,7 +819,7 @@ static int check_syslog_permissions(int
+ return 0;
+
+ if (syslog_action_restricted(type)) {
+- if (capable(CAP_SYSLOG))
++ if (vx_capable(CAP_SYSLOG, VXC_SYSLOG))
+ return 0;
+ /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
+ if (capable(CAP_SYS_ADMIN)) {
+@@ -1111,12 +1112,9 @@ int do_syslog(int type, char __user *buf
+ if (error)
+ return error;
+
+- switch (type) {
+- case SYSLOG_ACTION_CLOSE: /* Close log */
+- break;
+- case SYSLOG_ACTION_OPEN: /* Open log */
+- break;
+- case SYSLOG_ACTION_READ: /* Read from log */
++ if ((type == SYSLOG_ACTION_READ) ||
++ (type == SYSLOG_ACTION_READ_ALL) ||
++ (type == SYSLOG_ACTION_READ_CLEAR)) {
+ error = -EINVAL;
+ if (!buf || len < 0)
+ goto out;
+@@ -1127,6 +1125,16 @@ int do_syslog(int type, char __user *buf
+ error = -EFAULT;
+ goto out;
+ }
++ }
++ if (!vx_check(0, VS_ADMIN|VS_WATCH))
++ return vx_do_syslog(type, buf, len);
++
++ switch (type) {
++ case SYSLOG_ACTION_CLOSE: /* Close log */
++ break;
++ case SYSLOG_ACTION_OPEN: /* Open log */
++ break;
++ case SYSLOG_ACTION_READ: /* Read from log */
+ error = wait_event_interruptible(log_wait,
+ syslog_seq != log_next_seq);
+ if (error)
+@@ -1139,16 +1147,6 @@ int do_syslog(int type, char __user *buf
+ /* FALL THRU */
+ /* Read last kernel messages */
+ case SYSLOG_ACTION_READ_ALL:
+- error = -EINVAL;
+- if (!buf || len < 0)
+- goto out;
+- error = 0;
+- if (!len)
+- goto out;
+- if (!access_ok(VERIFY_WRITE, buf, len)) {
+- error = -EFAULT;
+- goto out;
+- }
+ error = syslog_print_all(buf, len, clear);
+ break;
+ /* Clear ring buffer */
+diff -NurpP --minimal linux-3.6.10/kernel/ptrace.c linux-3.6.10-vs2.3.4.6/kernel/ptrace.c
+--- linux-3.6.10/kernel/ptrace.c 2012-07-22 21:39:46.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/ptrace.c 2012-10-04 16:47:00.000000000 +0000
+@@ -22,6 +22,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/uaccess.h>
+ #include <linux/regset.h>
++#include <linux/vs_context.h>
+ #include <linux/hw_breakpoint.h>
+ #include <linux/cn_proc.h>
+
+@@ -216,6 +217,11 @@ ok:
+ dumpable = get_dumpable(task->mm);
+ if (!dumpable && !ptrace_has_cap(task_user_ns(task), mode))
+ return -EPERM;
++ if (!vx_check(task->xid, VS_ADMIN_P|VS_WATCH_P|VS_IDENT))
++ return -EPERM;
++ if (!vx_check(task->xid, VS_IDENT) &&
++ !task_vx_flags(task, VXF_STATE_ADMIN, 0))
++ return -EACCES;
+
+ return security_ptrace_access_check(task, mode);
+ }
+diff -NurpP --minimal linux-3.6.10/kernel/sched/core.c linux-3.6.10-vs2.3.4.6/kernel/sched/core.c
+--- linux-3.6.10/kernel/sched/core.c 2012-12-11 11:37:02.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/sched/core.c 2012-11-06 17:43:41.000000000 +0000
+@@ -72,6 +72,8 @@
+ #include <linux/slab.h>
+ #include <linux/init_task.h>
+ #include <linux/binfmts.h>
++#include <linux/vs_sched.h>
++#include <linux/vs_cvirt.h>
+
+ #include <asm/switch_to.h>
+ #include <asm/tlb.h>
+@@ -2223,9 +2225,17 @@ EXPORT_SYMBOL(avenrun); /* should be rem
+ */
+ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+ {
+- loads[0] = (avenrun[0] + offset) << shift;
+- loads[1] = (avenrun[1] + offset) << shift;
+- loads[2] = (avenrun[2] + offset) << shift;
++ if (vx_flags(VXF_VIRT_LOAD, 0)) {
++ struct vx_info *vxi = current_vx_info();
++
++ loads[0] = (vxi->cvirt.load[0] + offset) << shift;
++ loads[1] = (vxi->cvirt.load[1] + offset) << shift;
++ loads[2] = (vxi->cvirt.load[2] + offset) << shift;
++ } else {
++ loads[0] = (avenrun[0] + offset) << shift;
++ loads[1] = (avenrun[1] + offset) << shift;
++ loads[2] = (avenrun[2] + offset) << shift;
++ }
+ }
+
+ static long calc_load_fold_active(struct rq *this_rq)
+@@ -2854,14 +2864,17 @@ static inline void task_group_account_fi
+ void account_user_time(struct task_struct *p, cputime_t cputime,
+ cputime_t cputime_scaled)
+ {
++ struct vx_info *vxi = p->vx_info; /* p is _always_ current */
++ int nice = (TASK_NICE(p) > 0);
+ int index;
+
+ /* Add user time to process. */
+ p->utime += cputime;
+ p->utimescaled += cputime_scaled;
++ vx_account_user(vxi, cputime, nice);
+ account_group_user_time(p, cputime);
+
+- index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++ index = (nice) ? CPUTIME_NICE : CPUTIME_USER;
+
+ /* Add user time to cpustat. */
+ task_group_account_field(p, index, (__force u64) cputime);
+@@ -2908,9 +2921,12 @@ static inline
+ void __account_system_time(struct task_struct *p, cputime_t cputime,
+ cputime_t cputime_scaled, int index)
+ {
++ struct vx_info *vxi = p->vx_info; /* p is _always_ current */
++
+ /* Add system time to process. */
+ p->stime += cputime;
+ p->stimescaled += cputime_scaled;
++ vx_account_system(vxi, cputime, 0 /* do we have idle time? */);
+ account_group_system_time(p, cputime);
+
+ /* Add system time to cpustat. */
+@@ -4137,7 +4153,7 @@ SYSCALL_DEFINE1(nice, int, increment)
+ nice = 19;
+
+ if (increment < 0 && !can_nice(current, nice))
+- return -EPERM;
++ return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM;
+
+ retval = security_task_setnice(current, nice);
+ if (retval)
+diff -NurpP --minimal linux-3.6.10/kernel/sched/fair.c linux-3.6.10-vs2.3.4.6/kernel/sched/fair.c
+--- linux-3.6.10/kernel/sched/fair.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/sched/fair.c 2012-10-04 16:47:00.000000000 +0000
+@@ -26,6 +26,7 @@
+ #include <linux/slab.h>
+ #include <linux/profile.h>
+ #include <linux/interrupt.h>
++#include <linux/vs_cvirt.h>
+
+ #include <trace/events/sched.h>
+
+@@ -1111,6 +1112,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
+ __enqueue_entity(cfs_rq, se);
+ se->on_rq = 1;
+
++ if (entity_is_task(se))
++ vx_activate_task(task_of(se));
+ if (cfs_rq->nr_running == 1) {
+ list_add_leaf_cfs_rq(cfs_rq);
+ check_enqueue_throttle(cfs_rq);
+@@ -1191,6 +1194,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
+ if (se != cfs_rq->curr)
+ __dequeue_entity(cfs_rq, se);
+ se->on_rq = 0;
++ if (entity_is_task(se))
++ vx_deactivate_task(task_of(se));
+ update_cfs_load(cfs_rq, 0);
+ account_entity_dequeue(cfs_rq, se);
+
+diff -NurpP --minimal linux-3.6.10/kernel/signal.c linux-3.6.10-vs2.3.4.6/kernel/signal.c
+--- linux-3.6.10/kernel/signal.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/signal.c 2012-10-04 16:47:00.000000000 +0000
+@@ -30,6 +30,8 @@
+ #include <linux/nsproxy.h>
+ #include <linux/user_namespace.h>
+ #include <linux/uprobes.h>
++#include <linux/vs_context.h>
++#include <linux/vs_pid.h>
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/signal.h>
+
+@@ -790,9 +792,18 @@ static int check_kill_permission(int sig
+ struct pid *sid;
+ int error;
+
++ vxdprintk(VXD_CBIT(misc, 7),
++ "check_kill_permission(%d,%p,%p[#%u,%u])",
++ sig, info, t, vx_task_xid(t), t->pid);
++
+ if (!valid_signal(sig))
+ return -EINVAL;
+
++/* FIXME: needed? if so, why?
++ if ((info != SEND_SIG_NOINFO) &&
++ (is_si_special(info) || !si_fromuser(info)))
++ goto skip; */
++
+ if (!si_fromuser(info))
+ return 0;
+
+@@ -816,6 +827,20 @@ static int check_kill_permission(int sig
+ }
+ }
+
++ error = -EPERM;
++ if (t->pid == 1 && current->xid)
++ return error;
++
++ error = -ESRCH;
++ /* FIXME: we shouldn't return ESRCH ever, to avoid
++ loops, maybe ENOENT or EACCES? */
++ if (!vx_check(vx_task_xid(t), VS_WATCH_P | VS_IDENT)) {
++ vxdprintk(current->xid || VXD_CBIT(misc, 7),
++ "signal %d[%p] xid mismatch %p[#%u,%u] xid=#%u",
++ sig, info, t, vx_task_xid(t), t->pid, current->xid);
++ return error;
++ }
++/* skip: */
+ return security_task_kill(t, info, sig, 0);
+ }
+
+@@ -1351,7 +1376,7 @@ int kill_pid_info(int sig, struct siginf
+ rcu_read_lock();
+ retry:
+ p = pid_task(pid, PIDTYPE_PID);
+- if (p) {
++ if (p && vx_check(vx_task_xid(p), VS_IDENT)) {
+ error = group_send_sig_info(sig, info, p);
+ if (unlikely(error == -ESRCH))
+ /*
+@@ -1399,7 +1424,7 @@ int kill_pid_info_as_cred(int sig, struc
+
+ rcu_read_lock();
+ p = pid_task(pid, PIDTYPE_PID);
+- if (!p) {
++ if (!p || !vx_check(vx_task_xid(p), VS_IDENT)) {
+ ret = -ESRCH;
+ goto out_unlock;
+ }
+@@ -1451,8 +1476,10 @@ static int kill_something_info(int sig,
+ struct task_struct * p;
+
+ for_each_process(p) {
+- if (task_pid_vnr(p) > 1 &&
+- !same_thread_group(p, current)) {
++ if (vx_check(vx_task_xid(p), VS_ADMIN|VS_IDENT) &&
++ task_pid_vnr(p) > 1 &&
++ !same_thread_group(p, current) &&
++ !vx_current_initpid(p->pid)) {
+ int err = group_send_sig_info(sig, info, p);
+ ++count;
+ if (err != -EPERM)
+@@ -2317,6 +2344,11 @@ relock:
+ !sig_kernel_only(signr))
+ continue;
+
++ /* virtual init is protected against user signals */
++ if ((info->si_code == SI_USER) &&
++ vx_current_initpid(current->pid))
++ continue;
++
+ if (sig_kernel_stop(signr)) {
+ /*
+ * The default action is to stop all threads in
+diff -NurpP --minimal linux-3.6.10/kernel/softirq.c linux-3.6.10-vs2.3.4.6/kernel/softirq.c
+--- linux-3.6.10/kernel/softirq.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/softirq.c 2012-10-04 16:47:00.000000000 +0000
+@@ -24,6 +24,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/tick.h>
++#include <linux/vs_context.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+diff -NurpP --minimal linux-3.6.10/kernel/sys.c linux-3.6.10-vs2.3.4.6/kernel/sys.c
+--- linux-3.6.10/kernel/sys.c 2012-12-11 11:37:02.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/sys.c 2012-11-06 17:43:41.000000000 +0000
+@@ -47,6 +47,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/kprobes.h>
+ #include <linux/user_namespace.h>
++#include <linux/vs_pid.h>
+
+ #include <linux/kmsg_dump.h>
+ /* Move somewhere else to avoid recompiling? */
+@@ -154,7 +155,10 @@ static int set_one_prio(struct task_stru
+ goto out;
+ }
+ if (niceval < task_nice(p) && !can_nice(p, niceval)) {
+- error = -EACCES;
++ if (vx_flags(VXF_IGNEG_NICE, 0))
++ error = 0;
++ else
++ error = -EACCES;
+ goto out;
+ }
+ no_nice = security_task_setnice(p, niceval);
+@@ -205,6 +209,8 @@ SYSCALL_DEFINE3(setpriority, int, which,
+ else
+ pgrp = task_pgrp(current);
+ do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
++ if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
++ continue;
+ error = set_one_prio(p, niceval, error);
+ } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+ break;
+@@ -270,6 +276,8 @@ SYSCALL_DEFINE2(getpriority, int, which,
+ else
+ pgrp = task_pgrp(current);
+ do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
++ if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
++ continue;
+ niceval = 20 - task_nice(p);
+ if (niceval > retval)
+ retval = niceval;
+@@ -422,6 +430,8 @@ EXPORT_SYMBOL_GPL(kernel_power_off);
+
+ static DEFINE_MUTEX(reboot_mutex);
+
++long vs_reboot(unsigned int, void __user *);
++
+ /*
+ * Reboot system call: for obvious reasons only root may call it,
+ * and even root needs to set up some magic numbers in the registers
+@@ -463,6 +473,9 @@ SYSCALL_DEFINE4(reboot, int, magic1, int
+ if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
+ cmd = LINUX_REBOOT_CMD_HALT;
+
++ if (!vx_check(0, VS_ADMIN|VS_WATCH))
++ return vs_reboot(cmd, arg);
++
+ mutex_lock(&reboot_mutex);
+ switch (cmd) {
+ case LINUX_REBOOT_CMD_RESTART:
+@@ -1370,7 +1383,8 @@ SYSCALL_DEFINE2(sethostname, char __user
+ int errno;
+ char tmp[__NEW_UTS_LEN];
+
+- if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
++ if (!vx_ns_capable(current->nsproxy->uts_ns->user_ns,
++ CAP_SYS_ADMIN, VXC_SET_UTSNAME))
+ return -EPERM;
+
+ if (len < 0 || len > __NEW_UTS_LEN)
+@@ -1421,7 +1435,8 @@ SYSCALL_DEFINE2(setdomainname, char __us
+ int errno;
+ char tmp[__NEW_UTS_LEN];
+
+- if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
++ if (!vx_ns_capable(current->nsproxy->uts_ns->user_ns,
++ CAP_SYS_ADMIN, VXC_SET_UTSNAME))
+ return -EPERM;
+ if (len < 0 || len > __NEW_UTS_LEN)
+ return -EINVAL;
+@@ -1540,7 +1555,7 @@ int do_prlimit(struct task_struct *tsk,
+ /* Keep the capable check against init_user_ns until
+ cgroups can contain all limits */
+ if (new_rlim->rlim_max > rlim->rlim_max &&
+- !capable(CAP_SYS_RESOURCE))
++ !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
+ retval = -EPERM;
+ if (!retval)
+ retval = security_task_setrlimit(tsk->group_leader,
+@@ -1593,7 +1608,8 @@ static int check_prlimit_permission(stru
+ gid_eq(cred->gid, tcred->sgid) &&
+ gid_eq(cred->gid, tcred->gid))
+ return 0;
+- if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
++ if (vx_ns_capable(tcred->user_ns,
++ CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
+ return 0;
+
+ return -EPERM;
+diff -NurpP --minimal linux-3.6.10/kernel/sysctl.c linux-3.6.10-vs2.3.4.6/kernel/sysctl.c
+--- linux-3.6.10/kernel/sysctl.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/sysctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -82,6 +82,7 @@
+ #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT)
+ #include <linux/lockdep.h>
+ #endif
++extern char vshelper_path[];
+ #ifdef CONFIG_CHR_DEV_SG
+ #include <scsi/sg.h>
+ #endif
+@@ -568,6 +569,13 @@ static struct ctl_table kern_table[] = {
+ .proc_handler = proc_dostring,
+ },
+ #endif
++ {
++ .procname = "vshelper",
++ .data = &vshelper_path,
++ .maxlen = 256,
++ .mode = 0644,
++ .proc_handler = &proc_dostring,
++ },
+ #ifdef CONFIG_CHR_DEV_SG
+ {
+ .procname = "sg-big-buff",
+diff -NurpP --minimal linux-3.6.10/kernel/sysctl_binary.c linux-3.6.10-vs2.3.4.6/kernel/sysctl_binary.c
+--- linux-3.6.10/kernel/sysctl_binary.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/sysctl_binary.c 2012-10-04 16:47:00.000000000 +0000
+@@ -73,6 +73,7 @@ static const struct bin_table bin_kern_t
+
+ { CTL_INT, KERN_PANIC, "panic" },
+ { CTL_INT, KERN_REALROOTDEV, "real-root-dev" },
++ { CTL_STR, KERN_VSHELPER, "vshelper" },
+
+ { CTL_STR, KERN_SPARC_REBOOT, "reboot-cmd" },
+ { CTL_INT, KERN_CTLALTDEL, "ctrl-alt-del" },
+diff -NurpP --minimal linux-3.6.10/kernel/time/timekeeping.c linux-3.6.10-vs2.3.4.6/kernel/time/timekeeping.c
+--- linux-3.6.10/kernel/time/timekeeping.c 2012-12-11 11:37:02.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/time/timekeeping.c 2012-11-06 17:43:41.000000000 +0000
+@@ -309,6 +309,7 @@ void getnstimeofday(struct timespec *ts)
+
+ ts->tv_nsec = 0;
+ timespec_add_ns(ts, nsecs);
++ vx_adjust_timespec(ts);
+ }
+ EXPORT_SYMBOL(getnstimeofday);
+
+diff -NurpP --minimal linux-3.6.10/kernel/time.c linux-3.6.10-vs2.3.4.6/kernel/time.c
+--- linux-3.6.10/kernel/time.c 2012-05-21 16:07:34.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/time.c 2012-10-04 16:47:00.000000000 +0000
+@@ -92,7 +92,7 @@ SYSCALL_DEFINE1(stime, time_t __user *,
+ if (err)
+ return err;
+
+- do_settimeofday(&tv);
++ vx_settimeofday(&tv);
+ return 0;
+ }
+
+@@ -172,7 +172,7 @@ int do_sys_settimeofday(const struct tim
+ }
+ }
+ if (tv)
+- return do_settimeofday(tv);
++ return vx_settimeofday(tv);
+ return 0;
+ }
+
+diff -NurpP --minimal linux-3.6.10/kernel/timer.c linux-3.6.10-vs2.3.4.6/kernel/timer.c
+--- linux-3.6.10/kernel/timer.c 2012-12-11 11:37:02.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/timer.c 2012-11-06 17:43:41.000000000 +0000
+@@ -40,6 +40,10 @@
+ #include <linux/irq_work.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
++#include <linux/vs_base.h>
++#include <linux/vs_cvirt.h>
++#include <linux/vs_pid.h>
++#include <linux/vserver/sched.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+diff -NurpP --minimal linux-3.6.10/kernel/user_namespace.c linux-3.6.10-vs2.3.4.6/kernel/user_namespace.c
+--- linux-3.6.10/kernel/user_namespace.c 2012-07-22 21:39:47.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/user_namespace.c 2012-10-04 16:47:00.000000000 +0000
+@@ -19,6 +19,7 @@
+ #include <linux/fs.h>
+ #include <linux/uaccess.h>
+ #include <linux/ctype.h>
++#include <linux/vserver/global.h>
+
+ static struct kmem_cache *user_ns_cachep __read_mostly;
+
+@@ -52,6 +53,7 @@ int create_user_ns(struct cred *new)
+ return -ENOMEM;
+
+ kref_init(&ns->kref);
++ atomic_inc(&vs_global_user_ns);
+ ns->parent = parent_ns;
+ ns->owner = owner;
+ ns->group = group;
+@@ -84,6 +86,9 @@ void free_user_ns(struct kref *kref)
+
+ parent = ns->parent;
+ kmem_cache_free(user_ns_cachep, ns);
++
++ /* FIXME: maybe move into destroyer? */
++ atomic_dec(&vs_global_user_ns);
+ put_user_ns(parent);
+ }
+ EXPORT_SYMBOL(free_user_ns);
+diff -NurpP --minimal linux-3.6.10/kernel/utsname.c linux-3.6.10-vs2.3.4.6/kernel/utsname.c
+--- linux-3.6.10/kernel/utsname.c 2012-07-22 21:39:47.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/utsname.c 2012-10-04 16:47:00.000000000 +0000
+@@ -16,14 +16,17 @@
+ #include <linux/slab.h>
+ #include <linux/user_namespace.h>
+ #include <linux/proc_fs.h>
++#include <linux/vserver/global.h>
+
+ static struct uts_namespace *create_uts_ns(void)
+ {
+ struct uts_namespace *uts_ns;
+
+ uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL);
+- if (uts_ns)
++ if (uts_ns) {
+ kref_init(&uts_ns->kref);
++ atomic_inc(&vs_global_uts_ns);
++ }
+ return uts_ns;
+ }
+
+@@ -32,8 +35,8 @@ static struct uts_namespace *create_uts_
+ * @old_ns: namespace to clone
+ * Return NULL on error (failure to kmalloc), new ns otherwise
+ */
+-static struct uts_namespace *clone_uts_ns(struct task_struct *tsk,
+- struct uts_namespace *old_ns)
++static struct uts_namespace *clone_uts_ns(struct uts_namespace *old_ns,
++ struct user_namespace *old_user)
+ {
+ struct uts_namespace *ns;
+
+@@ -43,7 +46,7 @@ static struct uts_namespace *clone_uts_n
+
+ down_read(&uts_sem);
+ memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
+- ns->user_ns = get_user_ns(task_cred_xxx(tsk, user_ns));
++ ns->user_ns = get_user_ns(old_user);
+ up_read(&uts_sem);
+ return ns;
+ }
+@@ -55,9 +58,9 @@ static struct uts_namespace *clone_uts_n
+ * versa.
+ */
+ struct uts_namespace *copy_utsname(unsigned long flags,
+- struct task_struct *tsk)
++ struct uts_namespace *old_ns,
++ struct user_namespace *user_ns)
+ {
+- struct uts_namespace *old_ns = tsk->nsproxy->uts_ns;
+ struct uts_namespace *new_ns;
+
+ BUG_ON(!old_ns);
+@@ -66,7 +69,7 @@ struct uts_namespace *copy_utsname(unsig
+ if (!(flags & CLONE_NEWUTS))
+ return old_ns;
+
+- new_ns = clone_uts_ns(tsk, old_ns);
++ new_ns = clone_uts_ns(old_ns, user_ns);
+
+ put_uts_ns(old_ns);
+ return new_ns;
+@@ -78,6 +81,7 @@ void free_uts_ns(struct kref *kref)
+
+ ns = container_of(kref, struct uts_namespace, kref);
+ put_user_ns(ns->user_ns);
++ atomic_dec(&vs_global_uts_ns);
+ kfree(ns);
+ }
+
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/Kconfig linux-3.6.10-vs2.3.4.6/kernel/vserver/Kconfig
+--- linux-3.6.10/kernel/vserver/Kconfig 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/Kconfig 2012-12-10 17:21:15.000000000 +0000
+@@ -0,0 +1,233 @@
++#
++# Linux VServer configuration
++#
++
++menu "Linux VServer"
++
++config VSERVER_AUTO_LBACK
++ bool "Automatically Assign Loopback IP"
++ default y
++ help
++ Automatically assign a guest specific loopback
++ IP and add it to the kernel network stack on
++ startup.
++
++config VSERVER_AUTO_SINGLE
++ bool "Automatic Single IP Special Casing"
++ depends on EXPERIMENTAL
++ default y
++ help
++ This allows network contexts with a single IP to
++ automatically remap 0.0.0.0 bindings to that IP,
++ avoiding further network checks and improving
++ performance.
++
++ (note: such guests do not allow to change the ip
++ on the fly and do not show loopback addresses)
++
++config VSERVER_COWBL
++ bool "Enable COW Immutable Link Breaking"
++ default y
++ help
++ This enables the COW (Copy-On-Write) link break code.
++ It allows you to treat unified files like normal files
++ when writing to them (which will implicitely break the
++ link and create a copy of the unified file)
++
++config VSERVER_VTIME
++ bool "Enable Virtualized Guest Time"
++ depends on EXPERIMENTAL
++ default n
++ help
++ This enables per guest time offsets to allow for
++ adjusting the system clock individually per guest.
++ this adds some overhead to the time functions and
++ therefore should not be enabled without good reason.
++
++config VSERVER_DEVICE
++ bool "Enable Guest Device Mapping"
++ depends on EXPERIMENTAL
++ default n
++ help
++ This enables generic device remapping.
++
++config VSERVER_PROC_SECURE
++ bool "Enable Proc Security"
++ depends on PROC_FS
++ default y
++ help
++ This configures ProcFS security to initially hide
++ non-process entries for all contexts except the main and
++ spectator context (i.e. for all guests), which is a secure
++ default.
++
++ (note: on 1.2x the entries were visible by default)
++
++choice
++ prompt "Persistent Inode Tagging"
++ default TAGGING_ID24
++ help
++ This adds persistent context information to filesystems
++ mounted with the tagxid option. Tagging is a requirement
++ for per-context disk limits and per-context quota.
++
++
++config TAGGING_NONE
++ bool "Disabled"
++ help
++ do not store per-context information in inodes.
++
++config TAGGING_UID16
++ bool "UID16/GID32"
++ help
++ reduces UID to 16 bit, but leaves GID at 32 bit.
++
++config TAGGING_GID16
++ bool "UID32/GID16"
++ help
++ reduces GID to 16 bit, but leaves UID at 32 bit.
++
++config TAGGING_ID24
++ bool "UID24/GID24"
++ help
++ uses the upper 8bit from UID and GID for XID tagging
++ which leaves 24bit for UID/GID each, which should be
++ more than sufficient for normal use.
++
++config TAGGING_INTERN
++ bool "UID32/GID32"
++ help
++ this uses otherwise reserved inode fields in the on
++ disk representation, which limits the use to a few
++ filesystems (currently ext2 and ext3)
++
++endchoice
++
++config TAG_NFSD
++ bool "Tag NFSD User Auth and Files"
++ default n
++ help
++ Enable this if you do want the in-kernel NFS
++ Server to use the tagging specified above.
++ (will require patched clients too)
++
++config VSERVER_PRIVACY
++ bool "Honor Privacy Aspects of Guests"
++ default n
++ help
++ When enabled, most context checks will disallow
++ access to structures assigned to a specific context,
++ like ptys or loop devices.
++
++config VSERVER_CONTEXTS
++ int "Maximum number of Contexts (1-65533)" if EMBEDDED
++ range 1 65533
++ default "768" if 64BIT
++ default "256"
++ help
++ This setting will optimize certain data structures
++ and memory allocations according to the expected
++ maximum.
++
++ note: this is not a strict upper limit.
++
++config VSERVER_WARN
++ bool "VServer Warnings"
++ default y
++ help
++ This enables various runtime warnings, which will
++ notify about potential manipulation attempts or
++ resource shortage. It is generally considered to
++ be a good idea to have that enabled.
++
++config VSERVER_WARN_DEVPTS
++ bool "VServer DevPTS Warnings"
++ depends on VSERVER_WARN
++ default y
++ help
++ This enables DevPTS related warnings, issued when a
++ process inside a context tries to lookup or access
++ a dynamic pts from the host or a different context.
++
++config VSERVER_DEBUG
++ bool "VServer Debugging Code"
++ default n
++ help
++ Set this to yes if you want to be able to activate
++ debugging output at runtime. It adds a very small
++ overhead to all vserver related functions and
++ increases the kernel size by about 20k.
++
++config VSERVER_HISTORY
++ bool "VServer History Tracing"
++ depends on VSERVER_DEBUG
++ default n
++ help
++ Set this to yes if you want to record the history of
++ linux-vserver activities, so they can be replayed in
++ the event of a kernel panic or oops.
++
++config VSERVER_HISTORY_SIZE
++ int "Per-CPU History Size (32-65536)"
++ depends on VSERVER_HISTORY
++ range 32 65536
++ default 64
++ help
++ This allows you to specify the number of entries in
++ the per-CPU history buffer.
++
++config VSERVER_EXTRA_MNT_CHECK
++ bool "Extra Checks for Reachability"
++ default n
++ help
++ Set this to yes if you want to do extra checks for
++ vfsmount reachability in the proc filesystem code.
++ This shouldn't be required on any setup utilizing
++ mnt namespaces.
++
++choice
++ prompt "Quotes used in debug and warn messages"
++ default QUOTES_ISO8859
++
++config QUOTES_ISO8859
++ bool "Extended ASCII (ISO 8859) angle quotes"
++ help
++ This uses the extended ASCII characters \xbb
++ and \xab for quoting file and process names.
++
++config QUOTES_UTF8
++ bool "UTF-8 angle quotes"
++ help
++ This uses the the UTF-8 sequences for angle
++ quotes to quote file and process names.
++
++config QUOTES_ASCII
++ bool "ASCII single quotes"
++ help
++ This uses the ASCII single quote character
++ (\x27) to quote file and process names.
++
++endchoice
++
++endmenu
++
++
++config VSERVER
++ bool
++ default y
++ select NAMESPACES
++ select UTS_NS
++ select IPC_NS
++# select USER_NS
++ select SYSVIPC
++
++config VSERVER_SECURITY
++ bool
++ depends on SECURITY
++ default y
++ select SECURITY_CAPABILITIES
++
++config VSERVER_DISABLED
++ bool
++ default n
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/Makefile linux-3.6.10-vs2.3.4.6/kernel/vserver/Makefile
+--- linux-3.6.10/kernel/vserver/Makefile 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/Makefile 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,18 @@
++#
++# Makefile for the Linux vserver routines.
++#
++
++
++obj-y += vserver.o
++
++vserver-y := switch.o context.o space.o sched.o network.o inode.o \
++ limit.o cvirt.o cacct.o signal.o helper.o init.o \
++ dlimit.o tag.o
++
++vserver-$(CONFIG_INET) += inet.o
++vserver-$(CONFIG_PROC_FS) += proc.o
++vserver-$(CONFIG_VSERVER_DEBUG) += sysctl.o debug.o
++vserver-$(CONFIG_VSERVER_HISTORY) += history.o
++vserver-$(CONFIG_VSERVER_MONITOR) += monitor.o
++vserver-$(CONFIG_VSERVER_DEVICE) += device.o
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/cacct.c linux-3.6.10-vs2.3.4.6/kernel/vserver/cacct.c
+--- linux-3.6.10/kernel/vserver/cacct.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/cacct.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,42 @@
++/*
++ * linux/kernel/vserver/cacct.c
++ *
++ * Virtual Server: Context Accounting
++ *
++ * Copyright (C) 2006-2007 Herbert Pötzl
++ *
++ * V0.01 added accounting stats
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/vs_context.h>
++#include <linux/vserver/cacct_cmd.h>
++#include <linux/vserver/cacct_int.h>
++
++#include <asm/errno.h>
++#include <asm/uaccess.h>
++
++
++int vc_sock_stat(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_sock_stat_v0 vc_data;
++ int j, field;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ field = vc_data.field;
++ if ((field < 0) || (field >= VXA_SOCK_SIZE))
++ return -EINVAL;
++
++ for (j = 0; j < 3; j++) {
++ vc_data.count[j] = vx_sock_count(&vxi->cacct, field, j);
++ vc_data.total[j] = vx_sock_total(&vxi->cacct, field, j);
++ }
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/cacct_init.h linux-3.6.10-vs2.3.4.6/kernel/vserver/cacct_init.h
+--- linux-3.6.10/kernel/vserver/cacct_init.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/cacct_init.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,25 @@
++
++
++static inline void vx_info_init_cacct(struct _vx_cacct *cacct)
++{
++ int i, j;
++
++
++ for (i = 0; i < VXA_SOCK_SIZE; i++) {
++ for (j = 0; j < 3; j++) {
++ atomic_long_set(&cacct->sock[i][j].count, 0);
++ atomic_long_set(&cacct->sock[i][j].total, 0);
++ }
++ }
++ for (i = 0; i < 8; i++)
++ atomic_set(&cacct->slab[i], 0);
++ for (i = 0; i < 5; i++)
++ for (j = 0; j < 4; j++)
++ atomic_set(&cacct->page[i][j], 0);
++}
++
++static inline void vx_info_exit_cacct(struct _vx_cacct *cacct)
++{
++ return;
++}
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/cacct_proc.h linux-3.6.10-vs2.3.4.6/kernel/vserver/cacct_proc.h
+--- linux-3.6.10/kernel/vserver/cacct_proc.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/cacct_proc.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,53 @@
++#ifndef _VX_CACCT_PROC_H
++#define _VX_CACCT_PROC_H
++
++#include <linux/vserver/cacct_int.h>
++
++
++#define VX_SOCKA_TOP \
++ "Type\t recv #/bytes\t\t send #/bytes\t\t fail #/bytes\n"
++
++static inline int vx_info_proc_cacct(struct _vx_cacct *cacct, char *buffer)
++{
++ int i, j, length = 0;
++ static char *type[VXA_SOCK_SIZE] = {
++ "UNSPEC", "UNIX", "INET", "INET6", "PACKET", "OTHER"
++ };
++
++ length += sprintf(buffer + length, VX_SOCKA_TOP);
++ for (i = 0; i < VXA_SOCK_SIZE; i++) {
++ length += sprintf(buffer + length, "%s:", type[i]);
++ for (j = 0; j < 3; j++) {
++ length += sprintf(buffer + length,
++ "\t%10lu/%-10lu",
++ vx_sock_count(cacct, i, j),
++ vx_sock_total(cacct, i, j));
++ }
++ buffer[length++] = '\n';
++ }
++
++ length += sprintf(buffer + length, "\n");
++ length += sprintf(buffer + length,
++ "slab:\t %8u %8u %8u %8u\n",
++ atomic_read(&cacct->slab[1]),
++ atomic_read(&cacct->slab[4]),
++ atomic_read(&cacct->slab[0]),
++ atomic_read(&cacct->slab[2]));
++
++ length += sprintf(buffer + length, "\n");
++ for (i = 0; i < 5; i++) {
++ length += sprintf(buffer + length,
++ "page[%d]: %8u %8u %8u %8u\t %8u %8u %8u %8u\n", i,
++ atomic_read(&cacct->page[i][0]),
++ atomic_read(&cacct->page[i][1]),
++ atomic_read(&cacct->page[i][2]),
++ atomic_read(&cacct->page[i][3]),
++ atomic_read(&cacct->page[i][4]),
++ atomic_read(&cacct->page[i][5]),
++ atomic_read(&cacct->page[i][6]),
++ atomic_read(&cacct->page[i][7]));
++ }
++ return length;
++}
++
++#endif /* _VX_CACCT_PROC_H */
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/context.c linux-3.6.10-vs2.3.4.6/kernel/vserver/context.c
+--- linux-3.6.10/kernel/vserver/context.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/context.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,1119 @@
++/*
++ * linux/kernel/vserver/context.c
++ *
++ * Virtual Server: Context Support
++ *
++ * Copyright (C) 2003-2011 Herbert Pötzl
++ *
++ * V0.01 context helper
++ * V0.02 vx_ctx_kill syscall command
++ * V0.03 replaced context_info calls
++ * V0.04 redesign of struct (de)alloc
++ * V0.05 rlimit basic implementation
++ * V0.06 task_xid and info commands
++ * V0.07 context flags and caps
++ * V0.08 switch to RCU based hash
++ * V0.09 revert to non RCU for now
++ * V0.10 and back to working RCU hash
++ * V0.11 and back to locking again
++ * V0.12 referenced context store
++ * V0.13 separate per cpu data
++ * V0.14 changed vcmds to vxi arg
++ * V0.15 added context stat
++ * V0.16 have __create claim() the vxi
++ * V0.17 removed older and legacy stuff
++ * V0.18 added user credentials
++ * V0.19 added warn mask
++ *
++ */
++
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/security.h>
++#include <linux/pid_namespace.h>
++#include <linux/capability.h>
++
++#include <linux/vserver/context.h>
++#include <linux/vserver/network.h>
++#include <linux/vserver/debug.h>
++#include <linux/vserver/limit.h>
++#include <linux/vserver/limit_int.h>
++#include <linux/vserver/space.h>
++#include <linux/init_task.h>
++#include <linux/fs_struct.h>
++#include <linux/cred.h>
++
++#include <linux/vs_context.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_pid.h>
++#include <linux/vserver/context_cmd.h>
++
++#include "cvirt_init.h"
++#include "cacct_init.h"
++#include "limit_init.h"
++#include "sched_init.h"
++
++
++atomic_t vx_global_ctotal = ATOMIC_INIT(0);
++atomic_t vx_global_cactive = ATOMIC_INIT(0);
++
++
++/* now inactive context structures */
++
++static struct hlist_head vx_info_inactive = HLIST_HEAD_INIT;
++
++static DEFINE_SPINLOCK(vx_info_inactive_lock);
++
++
++/* __alloc_vx_info()
++
++ * allocate an initialized vx_info struct
++ * doesn't make it visible (hash) */
++
++static struct vx_info *__alloc_vx_info(xid_t xid)
++{
++ struct vx_info *new = NULL;
++ int cpu, index;
++
++ vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
++
++ /* would this benefit from a slab cache? */
++ new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
++ if (!new)
++ return 0;
++
++ memset(new, 0, sizeof(struct vx_info));
++#ifdef CONFIG_SMP
++ new->ptr_pc = alloc_percpu(struct _vx_info_pc);
++ if (!new->ptr_pc)
++ goto error;
++#endif
++ new->vx_id = xid;
++ INIT_HLIST_NODE(&new->vx_hlist);
++ atomic_set(&new->vx_usecnt, 0);
++ atomic_set(&new->vx_tasks, 0);
++ new->vx_parent = NULL;
++ new->vx_state = 0;
++ init_waitqueue_head(&new->vx_wait);
++
++ /* prepare reaper */
++ get_task_struct(init_pid_ns.child_reaper);
++ new->vx_reaper = init_pid_ns.child_reaper;
++ new->vx_badness_bias = 0;
++
++ /* rest of init goes here */
++ vx_info_init_limit(&new->limit);
++ vx_info_init_sched(&new->sched);
++ vx_info_init_cvirt(&new->cvirt);
++ vx_info_init_cacct(&new->cacct);
++
++ /* per cpu data structures */
++ for_each_possible_cpu(cpu) {
++ vx_info_init_sched_pc(
++ &vx_per_cpu(new, sched_pc, cpu), cpu);
++ vx_info_init_cvirt_pc(
++ &vx_per_cpu(new, cvirt_pc, cpu), cpu);
++ }
++
++ new->vx_flags = VXF_INIT_SET;
++ new->vx_bcaps = CAP_FULL_SET; // maybe ~CAP_SETPCAP
++ new->vx_ccaps = 0;
++ new->vx_umask = 0;
++ new->vx_wmask = 0;
++
++ new->reboot_cmd = 0;
++ new->exit_code = 0;
++
++ // preconfig spaces
++ for (index = 0; index < VX_SPACES; index++) {
++ struct _vx_space *space = &new->space[index];
++
++ // filesystem
++ spin_lock(&init_fs.lock);
++ init_fs.users++;
++ spin_unlock(&init_fs.lock);
++ space->vx_fs = &init_fs;
++
++ /* FIXME: do we want defaults? */
++ // space->vx_real_cred = 0;
++ // space->vx_cred = 0;
++ }
++
++
++ vxdprintk(VXD_CBIT(xid, 0),
++ "alloc_vx_info(%d) = %p", xid, new);
++ vxh_alloc_vx_info(new);
++ atomic_inc(&vx_global_ctotal);
++ return new;
++#ifdef CONFIG_SMP
++error:
++ kfree(new);
++ return 0;
++#endif
++}
++
++/* __dealloc_vx_info()
++
++ * final disposal of vx_info */
++
++static void __dealloc_vx_info(struct vx_info *vxi)
++{
++#ifdef CONFIG_VSERVER_WARN
++ struct vx_info_save vxis;
++ int cpu;
++#endif
++ vxdprintk(VXD_CBIT(xid, 0),
++ "dealloc_vx_info(%p)", vxi);
++ vxh_dealloc_vx_info(vxi);
++
++#ifdef CONFIG_VSERVER_WARN
++ enter_vx_info(vxi, &vxis);
++ vx_info_exit_limit(&vxi->limit);
++ vx_info_exit_sched(&vxi->sched);
++ vx_info_exit_cvirt(&vxi->cvirt);
++ vx_info_exit_cacct(&vxi->cacct);
++
++ for_each_possible_cpu(cpu) {
++ vx_info_exit_sched_pc(
++ &vx_per_cpu(vxi, sched_pc, cpu), cpu);
++ vx_info_exit_cvirt_pc(
++ &vx_per_cpu(vxi, cvirt_pc, cpu), cpu);
++ }
++ leave_vx_info(&vxis);
++#endif
++
++ vxi->vx_id = -1;
++ vxi->vx_state |= VXS_RELEASED;
++
++#ifdef CONFIG_SMP
++ free_percpu(vxi->ptr_pc);
++#endif
++ kfree(vxi);
++ atomic_dec(&vx_global_ctotal);
++}
++
++static void __shutdown_vx_info(struct vx_info *vxi)
++{
++ struct nsproxy *nsproxy;
++ struct fs_struct *fs;
++ struct cred *cred;
++ int index, kill;
++
++ might_sleep();
++
++ vxi->vx_state |= VXS_SHUTDOWN;
++ vs_state_change(vxi, VSC_SHUTDOWN);
++
++ for (index = 0; index < VX_SPACES; index++) {
++ struct _vx_space *space = &vxi->space[index];
++
++ nsproxy = xchg(&space->vx_nsproxy, NULL);
++ if (nsproxy)
++ put_nsproxy(nsproxy);
++
++ fs = xchg(&space->vx_fs, NULL);
++ spin_lock(&fs->lock);
++ kill = !--fs->users;
++ spin_unlock(&fs->lock);
++ if (kill)
++ free_fs_struct(fs);
++
++ cred = (struct cred *)xchg(&space->vx_cred, NULL);
++ if (cred)
++ abort_creds(cred);
++ }
++}
++
++/* exported stuff */
++
++void free_vx_info(struct vx_info *vxi)
++{
++ unsigned long flags;
++ unsigned index;
++
++ /* check for reference counts first */
++ BUG_ON(atomic_read(&vxi->vx_usecnt));
++ BUG_ON(atomic_read(&vxi->vx_tasks));
++
++ /* context must not be hashed */
++ BUG_ON(vx_info_state(vxi, VXS_HASHED));
++
++ /* context shutdown is mandatory */
++ BUG_ON(!vx_info_state(vxi, VXS_SHUTDOWN));
++
++ /* spaces check */
++ for (index = 0; index < VX_SPACES; index++) {
++ struct _vx_space *space = &vxi->space[index];
++
++ BUG_ON(space->vx_nsproxy);
++ BUG_ON(space->vx_fs);
++ // BUG_ON(space->vx_real_cred);
++ // BUG_ON(space->vx_cred);
++ }
++
++ spin_lock_irqsave(&vx_info_inactive_lock, flags);
++ hlist_del(&vxi->vx_hlist);
++ spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
++
++ __dealloc_vx_info(vxi);
++}
++
++
++/* hash table for vx_info hash */
++
++#define VX_HASH_SIZE 13
++
++static struct hlist_head vx_info_hash[VX_HASH_SIZE] =
++ { [0 ... VX_HASH_SIZE-1] = HLIST_HEAD_INIT };
++
++static DEFINE_SPINLOCK(vx_info_hash_lock);
++
++
++static inline unsigned int __hashval(xid_t xid)
++{
++ return (xid % VX_HASH_SIZE);
++}
++
++
++
++/* __hash_vx_info()
++
++ * add the vxi to the global hash table
++ * requires the hash_lock to be held */
++
++static inline void __hash_vx_info(struct vx_info *vxi)
++{
++ struct hlist_head *head;
++
++ vxd_assert_lock(&vx_info_hash_lock);
++ vxdprintk(VXD_CBIT(xid, 4),
++ "__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
++ vxh_hash_vx_info(vxi);
++
++ /* context must not be hashed */
++ BUG_ON(vx_info_state(vxi, VXS_HASHED));
++
++ vxi->vx_state |= VXS_HASHED;
++ head = &vx_info_hash[__hashval(vxi->vx_id)];
++ hlist_add_head(&vxi->vx_hlist, head);
++ atomic_inc(&vx_global_cactive);
++}
++
++/* __unhash_vx_info()
++
++ * remove the vxi from the global hash table
++ * requires the hash_lock to be held */
++
++static inline void __unhash_vx_info(struct vx_info *vxi)
++{
++ unsigned long flags;
++
++ vxd_assert_lock(&vx_info_hash_lock);
++ vxdprintk(VXD_CBIT(xid, 4),
++ "__unhash_vx_info: %p[#%d.%d.%d]", vxi, vxi->vx_id,
++ atomic_read(&vxi->vx_usecnt), atomic_read(&vxi->vx_tasks));
++ vxh_unhash_vx_info(vxi);
++
++ /* context must be hashed */
++ BUG_ON(!vx_info_state(vxi, VXS_HASHED));
++ /* but without tasks */
++ BUG_ON(atomic_read(&vxi->vx_tasks));
++
++ vxi->vx_state &= ~VXS_HASHED;
++ hlist_del_init(&vxi->vx_hlist);
++ spin_lock_irqsave(&vx_info_inactive_lock, flags);
++ hlist_add_head(&vxi->vx_hlist, &vx_info_inactive);
++ spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
++ atomic_dec(&vx_global_cactive);
++}
++
++
++/* __lookup_vx_info()
++
++ * requires the hash_lock to be held
++ * doesn't increment the vx_refcnt */
++
++static inline struct vx_info *__lookup_vx_info(xid_t xid)
++{
++ struct hlist_head *head = &vx_info_hash[__hashval(xid)];
++ struct hlist_node *pos;
++ struct vx_info *vxi;
++
++ vxd_assert_lock(&vx_info_hash_lock);
++ hlist_for_each(pos, head) {
++ vxi = hlist_entry(pos, struct vx_info, vx_hlist);
++
++ if (vxi->vx_id == xid)
++ goto found;
++ }
++ vxi = NULL;
++found:
++ vxdprintk(VXD_CBIT(xid, 0),
++ "__lookup_vx_info(#%u): %p[#%u]",
++ xid, vxi, vxi ? vxi->vx_id : 0);
++ vxh_lookup_vx_info(vxi, xid);
++ return vxi;
++}
++
++
++/* __create_vx_info()
++
++ * create the requested context
++ * get(), claim() and hash it */
++
++static struct vx_info *__create_vx_info(int id)
++{
++ struct vx_info *new, *vxi = NULL;
++
++ vxdprintk(VXD_CBIT(xid, 1), "create_vx_info(%d)*", id);
++
++ if (!(new = __alloc_vx_info(id)))
++ return ERR_PTR(-ENOMEM);
++
++ /* required to make dynamic xids unique */
++ spin_lock(&vx_info_hash_lock);
++
++ /* static context requested */
++ if ((vxi = __lookup_vx_info(id))) {
++ vxdprintk(VXD_CBIT(xid, 0),
++ "create_vx_info(%d) = %p (already there)", id, vxi);
++ if (vx_info_flags(vxi, VXF_STATE_SETUP, 0))
++ vxi = ERR_PTR(-EBUSY);
++ else
++ vxi = ERR_PTR(-EEXIST);
++ goto out_unlock;
++ }
++ /* new context */
++ vxdprintk(VXD_CBIT(xid, 0),
++ "create_vx_info(%d) = %p (new)", id, new);
++ claim_vx_info(new, NULL);
++ __hash_vx_info(get_vx_info(new));
++ vxi = new, new = NULL;
++
++out_unlock:
++ spin_unlock(&vx_info_hash_lock);
++ vxh_create_vx_info(IS_ERR(vxi) ? NULL : vxi, id);
++ if (new)
++ __dealloc_vx_info(new);
++ return vxi;
++}
++
++
++/* exported stuff */
++
++
++void unhash_vx_info(struct vx_info *vxi)
++{
++ spin_lock(&vx_info_hash_lock);
++ __unhash_vx_info(vxi);
++ spin_unlock(&vx_info_hash_lock);
++ __shutdown_vx_info(vxi);
++ __wakeup_vx_info(vxi);
++}
++
++
++/* lookup_vx_info()
++
++ * search for a vx_info and get() it
++ * negative id means current */
++
++struct vx_info *lookup_vx_info(int id)
++{
++ struct vx_info *vxi = NULL;
++
++ if (id < 0) {
++ vxi = get_vx_info(current_vx_info());
++ } else if (id > 1) {
++ spin_lock(&vx_info_hash_lock);
++ vxi = get_vx_info(__lookup_vx_info(id));
++ spin_unlock(&vx_info_hash_lock);
++ }
++ return vxi;
++}
++
++/* xid_is_hashed()
++
++ * verify that xid is still hashed */
++
++int xid_is_hashed(xid_t xid)
++{
++ int hashed;
++
++ spin_lock(&vx_info_hash_lock);
++ hashed = (__lookup_vx_info(xid) != NULL);
++ spin_unlock(&vx_info_hash_lock);
++ return hashed;
++}
++
++#ifdef CONFIG_PROC_FS
++
++/* get_xid_list()
++
++ * get a subset of hashed xids for proc
++ * assumes size is at least one */
++
++int get_xid_list(int index, unsigned int *xids, int size)
++{
++ int hindex, nr_xids = 0;
++
++ /* only show current and children */
++ if (!vx_check(0, VS_ADMIN | VS_WATCH)) {
++ if (index > 0)
++ return 0;
++ xids[nr_xids] = vx_current_xid();
++ return 1;
++ }
++
++ for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) {
++ struct hlist_head *head = &vx_info_hash[hindex];
++ struct hlist_node *pos;
++
++ spin_lock(&vx_info_hash_lock);
++ hlist_for_each(pos, head) {
++ struct vx_info *vxi;
++
++ if (--index > 0)
++ continue;
++
++ vxi = hlist_entry(pos, struct vx_info, vx_hlist);
++ xids[nr_xids] = vxi->vx_id;
++ if (++nr_xids >= size) {
++ spin_unlock(&vx_info_hash_lock);
++ goto out;
++ }
++ }
++ /* keep the lock time short */
++ spin_unlock(&vx_info_hash_lock);
++ }
++out:
++ return nr_xids;
++}
++#endif
++
++#ifdef CONFIG_VSERVER_DEBUG
++
++void dump_vx_info_inactive(int level)
++{
++ struct hlist_node *entry, *next;
++
++ hlist_for_each_safe(entry, next, &vx_info_inactive) {
++ struct vx_info *vxi =
++ list_entry(entry, struct vx_info, vx_hlist);
++
++ dump_vx_info(vxi, level);
++ }
++}
++
++#endif
++
++#if 0
++int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
++{
++ struct user_struct *new_user, *old_user;
++
++ if (!p || !vxi)
++ BUG();
++
++ if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
++ return -EACCES;
++
++ new_user = alloc_uid(vxi->vx_id, p->uid);
++ if (!new_user)
++ return -ENOMEM;
++
++ old_user = p->user;
++ if (new_user != old_user) {
++ atomic_inc(&new_user->processes);
++ atomic_dec(&old_user->processes);
++ p->user = new_user;
++ }
++ free_uid(old_user);
++ return 0;
++}
++#endif
++
++#if 0
++void vx_mask_cap_bset(struct vx_info *vxi, struct task_struct *p)
++{
++ // p->cap_effective &= vxi->vx_cap_bset;
++ p->cap_effective =
++ cap_intersect(p->cap_effective, vxi->cap_bset);
++ // p->cap_inheritable &= vxi->vx_cap_bset;
++ p->cap_inheritable =
++ cap_intersect(p->cap_inheritable, vxi->cap_bset);
++ // p->cap_permitted &= vxi->vx_cap_bset;
++ p->cap_permitted =
++ cap_intersect(p->cap_permitted, vxi->cap_bset);
++}
++#endif
++
++
++#include <linux/file.h>
++#include <linux/fdtable.h>
++
++static int vx_openfd_task(struct task_struct *tsk)
++{
++ struct files_struct *files = tsk->files;
++ struct fdtable *fdt;
++ const unsigned long *bptr;
++ int count, total;
++
++ /* no rcu_read_lock() because of spin_lock() */
++ spin_lock(&files->file_lock);
++ fdt = files_fdtable(files);
++ bptr = fdt->open_fds;
++ count = fdt->max_fds / (sizeof(unsigned long) * 8);
++ for (total = 0; count > 0; count--) {
++ if (*bptr)
++ total += hweight_long(*bptr);
++ bptr++;
++ }
++ spin_unlock(&files->file_lock);
++ return total;
++}
++
++
++/* for *space compatibility */
++
++asmlinkage long sys_unshare(unsigned long);
++
++/*
++ * migrate task to new context
++ * gets vxi, puts old_vxi on change
++ * optionally unshares namespaces (hack)
++ */
++
++int vx_migrate_task(struct task_struct *p, struct vx_info *vxi, int unshare)
++{
++ struct vx_info *old_vxi;
++ int ret = 0;
++
++ if (!p || !vxi)
++ BUG();
++
++ vxdprintk(VXD_CBIT(xid, 5),
++ "vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
++ vxi->vx_id, atomic_read(&vxi->vx_usecnt));
++
++ if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0) &&
++ !vx_info_flags(vxi, VXF_STATE_SETUP, 0))
++ return -EACCES;
++
++ if (vx_info_state(vxi, VXS_SHUTDOWN))
++ return -EFAULT;
++
++ old_vxi = task_get_vx_info(p);
++ if (old_vxi == vxi)
++ goto out;
++
++// if (!(ret = vx_migrate_user(p, vxi))) {
++ {
++ int openfd;
++
++ task_lock(p);
++ openfd = vx_openfd_task(p);
++
++ if (old_vxi) {
++ atomic_dec(&old_vxi->cvirt.nr_threads);
++ atomic_dec(&old_vxi->cvirt.nr_running);
++ __rlim_dec(&old_vxi->limit, RLIMIT_NPROC);
++ /* FIXME: what about the struct files here? */
++ __rlim_sub(&old_vxi->limit, VLIMIT_OPENFD, openfd);
++ /* account for the executable */
++ __rlim_dec(&old_vxi->limit, VLIMIT_DENTRY);
++ }
++ atomic_inc(&vxi->cvirt.nr_threads);
++ atomic_inc(&vxi->cvirt.nr_running);
++ __rlim_inc(&vxi->limit, RLIMIT_NPROC);
++ /* FIXME: what about the struct files here? */
++ __rlim_add(&vxi->limit, VLIMIT_OPENFD, openfd);
++ /* account for the executable */
++ __rlim_inc(&vxi->limit, VLIMIT_DENTRY);
++
++ if (old_vxi) {
++ release_vx_info(old_vxi, p);
++ clr_vx_info(&p->vx_info);
++ }
++ claim_vx_info(vxi, p);
++ set_vx_info(&p->vx_info, vxi);
++ p->xid = vxi->vx_id;
++
++ vxdprintk(VXD_CBIT(xid, 5),
++ "moved task %p into vxi:%p[#%d]",
++ p, vxi, vxi->vx_id);
++
++ // vx_mask_cap_bset(vxi, p);
++ task_unlock(p);
++
++ /* hack for *spaces to provide compatibility */
++ if (unshare) {
++ struct nsproxy *old_nsp, *new_nsp;
++
++ ret = unshare_nsproxy_namespaces(
++ CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER,
++ &new_nsp, NULL);
++ if (ret)
++ goto out;
++
++ old_nsp = xchg(&p->nsproxy, new_nsp);
++ vx_set_space(vxi,
++ CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER, 0);
++ put_nsproxy(old_nsp);
++ }
++ }
++out:
++ put_vx_info(old_vxi);
++ return ret;
++}
++
++int vx_set_reaper(struct vx_info *vxi, struct task_struct *p)
++{
++ struct task_struct *old_reaper;
++ struct vx_info *reaper_vxi;
++
++ if (!vxi)
++ return -EINVAL;
++
++ vxdprintk(VXD_CBIT(xid, 6),
++ "vx_set_reaper(%p[#%d],%p[#%d,%d])",
++ vxi, vxi->vx_id, p, p->xid, p->pid);
++
++ old_reaper = vxi->vx_reaper;
++ if (old_reaper == p)
++ return 0;
++
++ reaper_vxi = task_get_vx_info(p);
++ if (reaper_vxi && reaper_vxi != vxi) {
++ vxwprintk(1,
++ "Unsuitable reaper [" VS_Q("%s") ",%u:#%u] "
++ "for [xid #%u]",
++ p->comm, p->pid, p->xid, vx_current_xid());
++ goto out;
++ }
++
++ /* set new child reaper */
++ get_task_struct(p);
++ vxi->vx_reaper = p;
++ put_task_struct(old_reaper);
++out:
++ put_vx_info(reaper_vxi);
++ return 0;
++}
++
++int vx_set_init(struct vx_info *vxi, struct task_struct *p)
++{
++ if (!vxi)
++ return -EINVAL;
++
++ vxdprintk(VXD_CBIT(xid, 6),
++ "vx_set_init(%p[#%d],%p[#%d,%d,%d])",
++ vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
++
++ vxi->vx_flags &= ~VXF_STATE_INIT;
++ // vxi->vx_initpid = p->tgid;
++ vxi->vx_initpid = p->pid;
++ return 0;
++}
++
++void vx_exit_init(struct vx_info *vxi, struct task_struct *p, int code)
++{
++ vxdprintk(VXD_CBIT(xid, 6),
++ "vx_exit_init(%p[#%d],%p[#%d,%d,%d])",
++ vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
++
++ vxi->exit_code = code;
++ vxi->vx_initpid = 0;
++}
++
++
++void vx_set_persistent(struct vx_info *vxi)
++{
++ vxdprintk(VXD_CBIT(xid, 6),
++ "vx_set_persistent(%p[#%d])", vxi, vxi->vx_id);
++
++ get_vx_info(vxi);
++ claim_vx_info(vxi, NULL);
++}
++
++void vx_clear_persistent(struct vx_info *vxi)
++{
++ vxdprintk(VXD_CBIT(xid, 6),
++ "vx_clear_persistent(%p[#%d])", vxi, vxi->vx_id);
++
++ release_vx_info(vxi, NULL);
++ put_vx_info(vxi);
++}
++
++void vx_update_persistent(struct vx_info *vxi)
++{
++ if (vx_info_flags(vxi, VXF_PERSISTENT, 0))
++ vx_set_persistent(vxi);
++ else
++ vx_clear_persistent(vxi);
++}
++
++
++/* task must be current or locked */
++
++void exit_vx_info(struct task_struct *p, int code)
++{
++ struct vx_info *vxi = p->vx_info;
++
++ if (vxi) {
++ atomic_dec(&vxi->cvirt.nr_threads);
++ vx_nproc_dec(p);
++
++ vxi->exit_code = code;
++ release_vx_info(vxi, p);
++ }
++}
++
++void exit_vx_info_early(struct task_struct *p, int code)
++{
++ struct vx_info *vxi = p->vx_info;
++
++ if (vxi) {
++ if (vxi->vx_initpid == p->pid)
++ vx_exit_init(vxi, p, code);
++ if (vxi->vx_reaper == p)
++ vx_set_reaper(vxi, init_pid_ns.child_reaper);
++ }
++}
++
++
++/* vserver syscall commands below here */
++
++/* taks xid and vx_info functions */
++
++#include <asm/uaccess.h>
++
++
++int vc_task_xid(uint32_t id)
++{
++ xid_t xid;
++
++ if (id) {
++ struct task_struct *tsk;
++
++ rcu_read_lock();
++ tsk = find_task_by_real_pid(id);
++ xid = (tsk) ? tsk->xid : -ESRCH;
++ rcu_read_unlock();
++ } else
++ xid = vx_current_xid();
++ return xid;
++}
++
++
++int vc_vx_info(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_vx_info_v0 vc_data;
++
++ vc_data.xid = vxi->vx_id;
++ vc_data.initpid = vxi->vx_initpid;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++
++int vc_ctx_stat(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_stat_v0 vc_data;
++
++ vc_data.usecnt = atomic_read(&vxi->vx_usecnt);
++ vc_data.tasks = atomic_read(&vxi->vx_tasks);
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++
++/* context functions */
++
++int vc_ctx_create(uint32_t xid, void __user *data)
++{
++ struct vcmd_ctx_create vc_data = { .flagword = VXF_INIT_SET };
++ struct vx_info *new_vxi;
++ int ret;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ if ((xid > MAX_S_CONTEXT) || (xid < 2))
++ return -EINVAL;
++
++ new_vxi = __create_vx_info(xid);
++ if (IS_ERR(new_vxi))
++ return PTR_ERR(new_vxi);
++
++ /* initial flags */
++ new_vxi->vx_flags = vc_data.flagword;
++
++ ret = -ENOEXEC;
++ if (vs_state_change(new_vxi, VSC_STARTUP))
++ goto out;
++
++ ret = vx_migrate_task(current, new_vxi, (!data));
++ if (ret)
++ goto out;
++
++ /* return context id on success */
++ ret = new_vxi->vx_id;
++
++ /* get a reference for persistent contexts */
++ if ((vc_data.flagword & VXF_PERSISTENT))
++ vx_set_persistent(new_vxi);
++out:
++ release_vx_info(new_vxi, NULL);
++ put_vx_info(new_vxi);
++ return ret;
++}
++
++
++int vc_ctx_migrate(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_migrate vc_data = { .flagword = 0 };
++ int ret;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = vx_migrate_task(current, vxi, 0);
++ if (ret)
++ return ret;
++ if (vc_data.flagword & VXM_SET_INIT)
++ ret = vx_set_init(vxi, current);
++ if (ret)
++ return ret;
++ if (vc_data.flagword & VXM_SET_REAPER)
++ ret = vx_set_reaper(vxi, current);
++ return ret;
++}
++
++
++int vc_get_cflags(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_flags_v0 vc_data;
++
++ vc_data.flagword = vxi->vx_flags;
++
++ /* special STATE flag handling */
++ vc_data.mask = vs_mask_flags(~0ULL, vxi->vx_flags, VXF_ONE_TIME);
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++int vc_set_cflags(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_flags_v0 vc_data;
++ uint64_t mask, trigger;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ /* special STATE flag handling */
++ mask = vs_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
++ trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword);
++
++ if (vxi == current_vx_info()) {
++ /* if (trigger & VXF_STATE_SETUP)
++ vx_mask_cap_bset(vxi, current); */
++ if (trigger & VXF_STATE_INIT) {
++ int ret;
++
++ ret = vx_set_init(vxi, current);
++ if (ret)
++ return ret;
++ ret = vx_set_reaper(vxi, current);
++ if (ret)
++ return ret;
++ }
++ }
++
++ vxi->vx_flags = vs_mask_flags(vxi->vx_flags,
++ vc_data.flagword, mask);
++ if (trigger & VXF_PERSISTENT)
++ vx_update_persistent(vxi);
++
++ return 0;
++}
++
++
++static inline uint64_t caps_from_cap_t(kernel_cap_t c)
++{
++ uint64_t v = c.cap[0] | ((uint64_t)c.cap[1] << 32);
++
++ // printk("caps_from_cap_t(%08x:%08x) = %016llx\n", c.cap[1], c.cap[0], v);
++ return v;
++}
++
++static inline kernel_cap_t cap_t_from_caps(uint64_t v)
++{
++ kernel_cap_t c = __cap_empty_set;
++
++ c.cap[0] = v & 0xFFFFFFFF;
++ c.cap[1] = (v >> 32) & 0xFFFFFFFF;
++
++ // printk("cap_t_from_caps(%016llx) = %08x:%08x\n", v, c.cap[1], c.cap[0]);
++ return c;
++}
++
++
++static int do_get_caps(struct vx_info *vxi, uint64_t *bcaps, uint64_t *ccaps)
++{
++ if (bcaps)
++ *bcaps = caps_from_cap_t(vxi->vx_bcaps);
++ if (ccaps)
++ *ccaps = vxi->vx_ccaps;
++
++ return 0;
++}
++
++int vc_get_ccaps(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_caps_v1 vc_data;
++ int ret;
++
++ ret = do_get_caps(vxi, NULL, &vc_data.ccaps);
++ if (ret)
++ return ret;
++ vc_data.cmask = ~0ULL;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++static int do_set_caps(struct vx_info *vxi,
++ uint64_t bcaps, uint64_t bmask, uint64_t ccaps, uint64_t cmask)
++{
++ uint64_t bcold = caps_from_cap_t(vxi->vx_bcaps);
++
++#if 0
++ printk("do_set_caps(%16llx, %16llx, %16llx, %16llx)\n",
++ bcaps, bmask, ccaps, cmask);
++#endif
++ vxi->vx_bcaps = cap_t_from_caps(
++ vs_mask_flags(bcold, bcaps, bmask));
++ vxi->vx_ccaps = vs_mask_flags(vxi->vx_ccaps, ccaps, cmask);
++
++ return 0;
++}
++
++int vc_set_ccaps(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_caps_v1 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_set_caps(vxi, 0, 0, vc_data.ccaps, vc_data.cmask);
++}
++
++int vc_get_bcaps(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_bcaps vc_data;
++ int ret;
++
++ ret = do_get_caps(vxi, &vc_data.bcaps, NULL);
++ if (ret)
++ return ret;
++ vc_data.bmask = ~0ULL;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++int vc_set_bcaps(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_bcaps vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_set_caps(vxi, vc_data.bcaps, vc_data.bmask, 0, 0);
++}
++
++
++int vc_get_umask(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_umask vc_data;
++
++ vc_data.umask = vxi->vx_umask;
++ vc_data.mask = ~0ULL;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++int vc_set_umask(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_umask vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ vxi->vx_umask = vs_mask_flags(vxi->vx_umask,
++ vc_data.umask, vc_data.mask);
++ return 0;
++}
++
++
++int vc_get_wmask(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_wmask vc_data;
++
++ vc_data.wmask = vxi->vx_wmask;
++ vc_data.mask = ~0ULL;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++int vc_set_wmask(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_wmask vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ vxi->vx_wmask = vs_mask_flags(vxi->vx_wmask,
++ vc_data.wmask, vc_data.mask);
++ return 0;
++}
++
++
++int vc_get_badness(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_badness_v0 vc_data;
++
++ vc_data.bias = vxi->vx_badness_bias;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++int vc_set_badness(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_badness_v0 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ vxi->vx_badness_bias = vc_data.bias;
++ return 0;
++}
++
++#include <linux/module.h>
++
++EXPORT_SYMBOL_GPL(free_vx_info);
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/cvirt.c linux-3.6.10-vs2.3.4.6/kernel/vserver/cvirt.c
+--- linux-3.6.10/kernel/vserver/cvirt.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/cvirt.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,313 @@
++/*
++ * linux/kernel/vserver/cvirt.c
++ *
++ * Virtual Server: Context Virtualization
++ *
++ * Copyright (C) 2004-2007 Herbert Pötzl
++ *
++ * V0.01 broken out from limit.c
++ * V0.02 added utsname stuff
++ * V0.03 changed vcmds to vxi arg
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/utsname.h>
++#include <linux/vs_cvirt.h>
++#include <linux/vserver/switch.h>
++#include <linux/vserver/cvirt_cmd.h>
++
++#include <asm/uaccess.h>
++
++
++void vx_vsi_boottime(struct timespec *boottime)
++{
++ struct vx_info *vxi = current_vx_info();
++
++ set_normalized_timespec(boottime,
++ boottime->tv_sec + vxi->cvirt.bias_uptime.tv_sec,
++ boottime->tv_nsec + vxi->cvirt.bias_uptime.tv_nsec);
++ return;
++}
++
++void vx_vsi_uptime(struct timespec *uptime, struct timespec *idle)
++{
++ struct vx_info *vxi = current_vx_info();
++
++ set_normalized_timespec(uptime,
++ uptime->tv_sec - vxi->cvirt.bias_uptime.tv_sec,
++ uptime->tv_nsec - vxi->cvirt.bias_uptime.tv_nsec);
++ if (!idle)
++ return;
++ set_normalized_timespec(idle,
++ idle->tv_sec - vxi->cvirt.bias_idle.tv_sec,
++ idle->tv_nsec - vxi->cvirt.bias_idle.tv_nsec);
++ return;
++}
++
++uint64_t vx_idle_jiffies(void)
++{
++ return init_task.utime + init_task.stime;
++}
++
++
++
++static inline uint32_t __update_loadavg(uint32_t load,
++ int wsize, int delta, int n)
++{
++ unsigned long long calc, prev;
++
++ /* just set it to n */
++ if (unlikely(delta >= wsize))
++ return (n << FSHIFT);
++
++ calc = delta * n;
++ calc <<= FSHIFT;
++ prev = (wsize - delta);
++ prev *= load;
++ calc += prev;
++ do_div(calc, wsize);
++ return calc;
++}
++
++
++void vx_update_load(struct vx_info *vxi)
++{
++ uint32_t now, last, delta;
++ unsigned int nr_running, nr_uninterruptible;
++ unsigned int total;
++ unsigned long flags;
++
++ spin_lock_irqsave(&vxi->cvirt.load_lock, flags);
++
++ now = jiffies;
++ last = vxi->cvirt.load_last;
++ delta = now - last;
++
++ if (delta < 5*HZ)
++ goto out;
++
++ nr_running = atomic_read(&vxi->cvirt.nr_running);
++ nr_uninterruptible = atomic_read(&vxi->cvirt.nr_uninterruptible);
++ total = nr_running + nr_uninterruptible;
++
++ vxi->cvirt.load[0] = __update_loadavg(vxi->cvirt.load[0],
++ 60*HZ, delta, total);
++ vxi->cvirt.load[1] = __update_loadavg(vxi->cvirt.load[1],
++ 5*60*HZ, delta, total);
++ vxi->cvirt.load[2] = __update_loadavg(vxi->cvirt.load[2],
++ 15*60*HZ, delta, total);
++
++ vxi->cvirt.load_last = now;
++out:
++ atomic_inc(&vxi->cvirt.load_updates);
++ spin_unlock_irqrestore(&vxi->cvirt.load_lock, flags);
++}
++
++
++/*
++ * Commands to do_syslog:
++ *
++ * 0 -- Close the log. Currently a NOP.
++ * 1 -- Open the log. Currently a NOP.
++ * 2 -- Read from the log.
++ * 3 -- Read all messages remaining in the ring buffer.
++ * 4 -- Read and clear all messages remaining in the ring buffer
++ * 5 -- Clear ring buffer.
++ * 6 -- Disable printk's to console
++ * 7 -- Enable printk's to console
++ * 8 -- Set level of messages printed to console
++ * 9 -- Return number of unread characters in the log buffer
++ * 10 -- Return size of the log buffer
++ */
++int vx_do_syslog(int type, char __user *buf, int len)
++{
++ int error = 0;
++ int do_clear = 0;
++ struct vx_info *vxi = current_vx_info();
++ struct _vx_syslog *log;
++
++ if (!vxi)
++ return -EINVAL;
++ log = &vxi->cvirt.syslog;
++
++ switch (type) {
++ case 0: /* Close log */
++ case 1: /* Open log */
++ break;
++ case 2: /* Read from log */
++ error = wait_event_interruptible(log->log_wait,
++ (log->log_start - log->log_end));
++ if (error)
++ break;
++ spin_lock_irq(&log->logbuf_lock);
++ spin_unlock_irq(&log->logbuf_lock);
++ break;
++ case 4: /* Read/clear last kernel messages */
++ do_clear = 1;
++ /* fall through */
++ case 3: /* Read last kernel messages */
++ return 0;
++
++ case 5: /* Clear ring buffer */
++ return 0;
++
++ case 6: /* Disable logging to console */
++ case 7: /* Enable logging to console */
++ case 8: /* Set level of messages printed to console */
++ break;
++
++ case 9: /* Number of chars in the log buffer */
++ return 0;
++ case 10: /* Size of the log buffer */
++ return 0;
++ default:
++ error = -EINVAL;
++ break;
++ }
++ return error;
++}
++
++
++/* virtual host info names */
++
++static char *vx_vhi_name(struct vx_info *vxi, int id)
++{
++ struct nsproxy *nsproxy;
++ struct uts_namespace *uts;
++
++ if (id == VHIN_CONTEXT)
++ return vxi->vx_name;
++
++ nsproxy = vxi->space[0].vx_nsproxy;
++ if (!nsproxy)
++ return NULL;
++
++ uts = nsproxy->uts_ns;
++ if (!uts)
++ return NULL;
++
++ switch (id) {
++ case VHIN_SYSNAME:
++ return uts->name.sysname;
++ case VHIN_NODENAME:
++ return uts->name.nodename;
++ case VHIN_RELEASE:
++ return uts->name.release;
++ case VHIN_VERSION:
++ return uts->name.version;
++ case VHIN_MACHINE:
++ return uts->name.machine;
++ case VHIN_DOMAINNAME:
++ return uts->name.domainname;
++ default:
++ return NULL;
++ }
++ return NULL;
++}
++
++int vc_set_vhi_name(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_vhi_name_v0 vc_data;
++ char *name;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ name = vx_vhi_name(vxi, vc_data.field);
++ if (!name)
++ return -EINVAL;
++
++ memcpy(name, vc_data.name, 65);
++ return 0;
++}
++
++int vc_get_vhi_name(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_vhi_name_v0 vc_data;
++ char *name;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ name = vx_vhi_name(vxi, vc_data.field);
++ if (!name)
++ return -EINVAL;
++
++ memcpy(vc_data.name, name, 65);
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++
++int vc_virt_stat(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_virt_stat_v0 vc_data;
++ struct _vx_cvirt *cvirt = &vxi->cvirt;
++ struct timespec uptime;
++
++ do_posix_clock_monotonic_gettime(&uptime);
++ set_normalized_timespec(&uptime,
++ uptime.tv_sec - cvirt->bias_uptime.tv_sec,
++ uptime.tv_nsec - cvirt->bias_uptime.tv_nsec);
++
++ vc_data.offset = timespec_to_ns(&cvirt->bias_ts);
++ vc_data.uptime = timespec_to_ns(&uptime);
++ vc_data.nr_threads = atomic_read(&cvirt->nr_threads);
++ vc_data.nr_running = atomic_read(&cvirt->nr_running);
++ vc_data.nr_uninterruptible = atomic_read(&cvirt->nr_uninterruptible);
++ vc_data.nr_onhold = atomic_read(&cvirt->nr_onhold);
++ vc_data.nr_forks = atomic_read(&cvirt->total_forks);
++ vc_data.load[0] = cvirt->load[0];
++ vc_data.load[1] = cvirt->load[1];
++ vc_data.load[2] = cvirt->load[2];
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++
++#ifdef CONFIG_VSERVER_VTIME
++
++/* virtualized time base */
++
++void vx_adjust_timespec(struct timespec *ts)
++{
++ struct vx_info *vxi;
++
++ if (!vx_flags(VXF_VIRT_TIME, 0))
++ return;
++
++ vxi = current_vx_info();
++ ts->tv_sec += vxi->cvirt.bias_ts.tv_sec;
++ ts->tv_nsec += vxi->cvirt.bias_ts.tv_nsec;
++
++ if (ts->tv_nsec >= NSEC_PER_SEC) {
++ ts->tv_sec++;
++ ts->tv_nsec -= NSEC_PER_SEC;
++ } else if (ts->tv_nsec < 0) {
++ ts->tv_sec--;
++ ts->tv_nsec += NSEC_PER_SEC;
++ }
++}
++
++int vx_settimeofday(const struct timespec *ts)
++{
++ struct timespec ats, delta;
++ struct vx_info *vxi;
++
++ if (!vx_flags(VXF_VIRT_TIME, 0))
++ return do_settimeofday(ts);
++
++ getnstimeofday(&ats);
++ delta = timespec_sub(*ts, ats);
++
++ vxi = current_vx_info();
++ vxi->cvirt.bias_ts = timespec_add(vxi->cvirt.bias_ts, delta);
++ return 0;
++}
++
++#endif
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/cvirt_init.h linux-3.6.10-vs2.3.4.6/kernel/vserver/cvirt_init.h
+--- linux-3.6.10/kernel/vserver/cvirt_init.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/cvirt_init.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,70 @@
++
++
++extern uint64_t vx_idle_jiffies(void);
++
++static inline void vx_info_init_cvirt(struct _vx_cvirt *cvirt)
++{
++ uint64_t idle_jiffies = vx_idle_jiffies();
++ uint64_t nsuptime;
++
++ do_posix_clock_monotonic_gettime(&cvirt->bias_uptime);
++ nsuptime = (unsigned long long)cvirt->bias_uptime.tv_sec
++ * NSEC_PER_SEC + cvirt->bias_uptime.tv_nsec;
++ cvirt->bias_clock = nsec_to_clock_t(nsuptime);
++ cvirt->bias_ts.tv_sec = 0;
++ cvirt->bias_ts.tv_nsec = 0;
++
++ jiffies_to_timespec(idle_jiffies, &cvirt->bias_idle);
++ atomic_set(&cvirt->nr_threads, 0);
++ atomic_set(&cvirt->nr_running, 0);
++ atomic_set(&cvirt->nr_uninterruptible, 0);
++ atomic_set(&cvirt->nr_onhold, 0);
++
++ spin_lock_init(&cvirt->load_lock);
++ cvirt->load_last = jiffies;
++ atomic_set(&cvirt->load_updates, 0);
++ cvirt->load[0] = 0;
++ cvirt->load[1] = 0;
++ cvirt->load[2] = 0;
++ atomic_set(&cvirt->total_forks, 0);
++
++ spin_lock_init(&cvirt->syslog.logbuf_lock);
++ init_waitqueue_head(&cvirt->syslog.log_wait);
++ cvirt->syslog.log_start = 0;
++ cvirt->syslog.log_end = 0;
++ cvirt->syslog.con_start = 0;
++ cvirt->syslog.logged_chars = 0;
++}
++
++static inline
++void vx_info_init_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu)
++{
++ // cvirt_pc->cpustat = { 0 };
++}
++
++static inline void vx_info_exit_cvirt(struct _vx_cvirt *cvirt)
++{
++#ifdef CONFIG_VSERVER_WARN
++ int value;
++#endif
++ vxwprintk_xid((value = atomic_read(&cvirt->nr_threads)),
++ "!!! cvirt: %p[nr_threads] = %d on exit.",
++ cvirt, value);
++ vxwprintk_xid((value = atomic_read(&cvirt->nr_running)),
++ "!!! cvirt: %p[nr_running] = %d on exit.",
++ cvirt, value);
++ vxwprintk_xid((value = atomic_read(&cvirt->nr_uninterruptible)),
++ "!!! cvirt: %p[nr_uninterruptible] = %d on exit.",
++ cvirt, value);
++ vxwprintk_xid((value = atomic_read(&cvirt->nr_onhold)),
++ "!!! cvirt: %p[nr_onhold] = %d on exit.",
++ cvirt, value);
++ return;
++}
++
++static inline
++void vx_info_exit_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu)
++{
++ return;
++}
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/cvirt_proc.h linux-3.6.10-vs2.3.4.6/kernel/vserver/cvirt_proc.h
+--- linux-3.6.10/kernel/vserver/cvirt_proc.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/cvirt_proc.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,123 @@
++#ifndef _VX_CVIRT_PROC_H
++#define _VX_CVIRT_PROC_H
++
++#include <linux/nsproxy.h>
++#include <linux/mnt_namespace.h>
++#include <linux/ipc_namespace.h>
++#include <linux/utsname.h>
++#include <linux/ipc.h>
++
++extern int vx_info_mnt_namespace(struct mnt_namespace *, char *);
++
++static inline
++int vx_info_proc_nsproxy(struct nsproxy *nsproxy, char *buffer)
++{
++ struct mnt_namespace *ns;
++ struct uts_namespace *uts;
++ struct ipc_namespace *ipc;
++ int length = 0;
++
++ if (!nsproxy)
++ goto out;
++
++ length += sprintf(buffer + length,
++ "NSProxy:\t%p [%p,%p,%p]\n",
++ nsproxy, nsproxy->mnt_ns,
++ nsproxy->uts_ns, nsproxy->ipc_ns);
++
++ ns = nsproxy->mnt_ns;
++ if (!ns)
++ goto skip_ns;
++
++ length += vx_info_mnt_namespace(ns, buffer + length);
++
++skip_ns:
++
++ uts = nsproxy->uts_ns;
++ if (!uts)
++ goto skip_uts;
++
++ length += sprintf(buffer + length,
++ "SysName:\t%.*s\n"
++ "NodeName:\t%.*s\n"
++ "Release:\t%.*s\n"
++ "Version:\t%.*s\n"
++ "Machine:\t%.*s\n"
++ "DomainName:\t%.*s\n",
++ __NEW_UTS_LEN, uts->name.sysname,
++ __NEW_UTS_LEN, uts->name.nodename,
++ __NEW_UTS_LEN, uts->name.release,
++ __NEW_UTS_LEN, uts->name.version,
++ __NEW_UTS_LEN, uts->name.machine,
++ __NEW_UTS_LEN, uts->name.domainname);
++skip_uts:
++
++ ipc = nsproxy->ipc_ns;
++ if (!ipc)
++ goto skip_ipc;
++
++ length += sprintf(buffer + length,
++ "SEMS:\t\t%d %d %d %d %d\n"
++ "MSG:\t\t%d %d %d\n"
++ "SHM:\t\t%lu %lu %d %d\n",
++ ipc->sem_ctls[0], ipc->sem_ctls[1],
++ ipc->sem_ctls[2], ipc->sem_ctls[3],
++ ipc->used_sems,
++ ipc->msg_ctlmax, ipc->msg_ctlmnb, ipc->msg_ctlmni,
++ (unsigned long)ipc->shm_ctlmax,
++ (unsigned long)ipc->shm_ctlall,
++ ipc->shm_ctlmni, ipc->shm_tot);
++skip_ipc:
++out:
++ return length;
++}
++
++
++#include <linux/sched.h>
++
++#define LOAD_INT(x) ((x) >> FSHIFT)
++#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1 - 1)) * 100)
++
++static inline
++int vx_info_proc_cvirt(struct _vx_cvirt *cvirt, char *buffer)
++{
++ int length = 0;
++ int a, b, c;
++
++ length += sprintf(buffer + length,
++ "BiasUptime:\t%lu.%02lu\n",
++ (unsigned long)cvirt->bias_uptime.tv_sec,
++ (cvirt->bias_uptime.tv_nsec / (NSEC_PER_SEC / 100)));
++
++ a = cvirt->load[0] + (FIXED_1 / 200);
++ b = cvirt->load[1] + (FIXED_1 / 200);
++ c = cvirt->load[2] + (FIXED_1 / 200);
++ length += sprintf(buffer + length,
++ "nr_threads:\t%d\n"
++ "nr_running:\t%d\n"
++ "nr_unintr:\t%d\n"
++ "nr_onhold:\t%d\n"
++ "load_updates:\t%d\n"
++ "loadavg:\t%d.%02d %d.%02d %d.%02d\n"
++ "total_forks:\t%d\n",
++ atomic_read(&cvirt->nr_threads),
++ atomic_read(&cvirt->nr_running),
++ atomic_read(&cvirt->nr_uninterruptible),
++ atomic_read(&cvirt->nr_onhold),
++ atomic_read(&cvirt->load_updates),
++ LOAD_INT(a), LOAD_FRAC(a),
++ LOAD_INT(b), LOAD_FRAC(b),
++ LOAD_INT(c), LOAD_FRAC(c),
++ atomic_read(&cvirt->total_forks));
++ return length;
++}
++
++static inline
++int vx_info_proc_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc,
++ char *buffer, int cpu)
++{
++ int length = 0;
++ return length;
++}
++
++#endif /* _VX_CVIRT_PROC_H */
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/debug.c linux-3.6.10-vs2.3.4.6/kernel/vserver/debug.c
+--- linux-3.6.10/kernel/vserver/debug.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/debug.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,32 @@
++/*
++ * kernel/vserver/debug.c
++ *
++ * Copyright (C) 2005-2007 Herbert Pötzl
++ *
++ * V0.01 vx_info dump support
++ *
++ */
++
++#include <linux/module.h>
++
++#include <linux/vserver/context.h>
++
++
++void dump_vx_info(struct vx_info *vxi, int level)
++{
++ printk("vx_info %p[#%d, %d.%d, %4x]\n", vxi, vxi->vx_id,
++ atomic_read(&vxi->vx_usecnt),
++ atomic_read(&vxi->vx_tasks),
++ vxi->vx_state);
++ if (level > 0) {
++ __dump_vx_limit(&vxi->limit);
++ __dump_vx_sched(&vxi->sched);
++ __dump_vx_cvirt(&vxi->cvirt);
++ __dump_vx_cacct(&vxi->cacct);
++ }
++ printk("---\n");
++}
++
++
++EXPORT_SYMBOL_GPL(dump_vx_info);
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/device.c linux-3.6.10-vs2.3.4.6/kernel/vserver/device.c
+--- linux-3.6.10/kernel/vserver/device.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/device.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,443 @@
++/*
++ * linux/kernel/vserver/device.c
++ *
++ * Linux-VServer: Device Support
++ *
++ * Copyright (C) 2006 Herbert Pötzl
++ * Copyright (C) 2007 Daniel Hokka Zakrisson
++ *
++ * V0.01 device mapping basics
++ * V0.02 added defaults
++ *
++ */
++
++#include <linux/slab.h>
++#include <linux/rcupdate.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/hash.h>
++
++#include <asm/errno.h>
++#include <asm/uaccess.h>
++#include <linux/vserver/base.h>
++#include <linux/vserver/debug.h>
++#include <linux/vserver/context.h>
++#include <linux/vserver/device.h>
++#include <linux/vserver/device_cmd.h>
++
++
++#define DMAP_HASH_BITS 4
++
++
++struct vs_mapping {
++ union {
++ struct hlist_node hlist;
++ struct list_head list;
++ } u;
++#define dm_hlist u.hlist
++#define dm_list u.list
++ xid_t xid;
++ dev_t device;
++ struct vx_dmap_target target;
++};
++
++
++static struct hlist_head dmap_main_hash[1 << DMAP_HASH_BITS];
++
++static DEFINE_SPINLOCK(dmap_main_hash_lock);
++
++static struct vx_dmap_target dmap_defaults[2] = {
++ { .flags = DATTR_OPEN },
++ { .flags = DATTR_OPEN },
++};
++
++
++struct kmem_cache *dmap_cachep __read_mostly;
++
++int __init dmap_cache_init(void)
++{
++ dmap_cachep = kmem_cache_create("dmap_cache",
++ sizeof(struct vs_mapping), 0,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++ return 0;
++}
++
++__initcall(dmap_cache_init);
++
++
++static inline unsigned int __hashval(dev_t dev, int bits)
++{
++ return hash_long((unsigned long)dev, bits);
++}
++
++
++/* __hash_mapping()
++ * add the mapping to the hash table
++ */
++static inline void __hash_mapping(struct vx_info *vxi, struct vs_mapping *vdm)
++{
++ spinlock_t *hash_lock = &dmap_main_hash_lock;
++ struct hlist_head *head, *hash = dmap_main_hash;
++ int device = vdm->device;
++
++ spin_lock(hash_lock);
++ vxdprintk(VXD_CBIT(misc, 8), "__hash_mapping: %p[#%d] %08x:%08x",
++ vxi, vxi ? vxi->vx_id : 0, device, vdm->target.target);
++
++ head = &hash[__hashval(device, DMAP_HASH_BITS)];
++ hlist_add_head(&vdm->dm_hlist, head);
++ spin_unlock(hash_lock);
++}
++
++
++static inline int __mode_to_default(umode_t mode)
++{
++ switch (mode) {
++ case S_IFBLK:
++ return 0;
++ case S_IFCHR:
++ return 1;
++ default:
++ BUG();
++ }
++}
++
++
++/* __set_default()
++ * set a default
++ */
++static inline void __set_default(struct vx_info *vxi, umode_t mode,
++ struct vx_dmap_target *vdmt)
++{
++ spinlock_t *hash_lock = &dmap_main_hash_lock;
++ spin_lock(hash_lock);
++
++ if (vxi)
++ vxi->dmap.targets[__mode_to_default(mode)] = *vdmt;
++ else
++ dmap_defaults[__mode_to_default(mode)] = *vdmt;
++
++
++ spin_unlock(hash_lock);
++
++ vxdprintk(VXD_CBIT(misc, 8), "__set_default: %p[#%u] %08x %04x",
++ vxi, vxi ? vxi->vx_id : 0, vdmt->target, vdmt->flags);
++}
++
++
++/* __remove_default()
++ * remove a default
++ */
++static inline int __remove_default(struct vx_info *vxi, umode_t mode)
++{
++ spinlock_t *hash_lock = &dmap_main_hash_lock;
++ spin_lock(hash_lock);
++
++ if (vxi)
++ vxi->dmap.targets[__mode_to_default(mode)].flags = 0;
++ else /* remove == reset */
++ dmap_defaults[__mode_to_default(mode)].flags = DATTR_OPEN | mode;
++
++ spin_unlock(hash_lock);
++ return 0;
++}
++
++
++/* __find_mapping()
++ * find a mapping in the hash table
++ *
++ * caller must hold hash_lock
++ */
++static inline int __find_mapping(xid_t xid, dev_t device, umode_t mode,
++ struct vs_mapping **local, struct vs_mapping **global)
++{
++ struct hlist_head *hash = dmap_main_hash;
++ struct hlist_head *head = &hash[__hashval(device, DMAP_HASH_BITS)];
++ struct hlist_node *pos;
++ struct vs_mapping *vdm;
++
++ *local = NULL;
++ if (global)
++ *global = NULL;
++
++ hlist_for_each(pos, head) {
++ vdm = hlist_entry(pos, struct vs_mapping, dm_hlist);
++
++ if ((vdm->device == device) &&
++ !((vdm->target.flags ^ mode) & S_IFMT)) {
++ if (vdm->xid == xid) {
++ *local = vdm;
++ return 1;
++ } else if (global && vdm->xid == 0)
++ *global = vdm;
++ }
++ }
++
++ if (global && *global)
++ return 0;
++ else
++ return -ENOENT;
++}
++
++
++/* __lookup_mapping()
++ * find a mapping and store the result in target and flags
++ */
++static inline int __lookup_mapping(struct vx_info *vxi,
++ dev_t device, dev_t *target, int *flags, umode_t mode)
++{
++ spinlock_t *hash_lock = &dmap_main_hash_lock;
++ struct vs_mapping *vdm, *global;
++ struct vx_dmap_target *vdmt;
++ int ret = 0;
++ xid_t xid = vxi->vx_id;
++ int index;
++
++ spin_lock(hash_lock);
++ if (__find_mapping(xid, device, mode, &vdm, &global) > 0) {
++ ret = 1;
++ vdmt = &vdm->target;
++ goto found;
++ }
++
++ index = __mode_to_default(mode);
++ if (vxi && vxi->dmap.targets[index].flags) {
++ ret = 2;
++ vdmt = &vxi->dmap.targets[index];
++ } else if (global) {
++ ret = 3;
++ vdmt = &global->target;
++ goto found;
++ } else {
++ ret = 4;
++ vdmt = &dmap_defaults[index];
++ }
++
++found:
++ if (target && (vdmt->flags & DATTR_REMAP))
++ *target = vdmt->target;
++ else if (target)
++ *target = device;
++ if (flags)
++ *flags = vdmt->flags;
++
++ spin_unlock(hash_lock);
++
++ return ret;
++}
++
++
++/* __remove_mapping()
++ * remove a mapping from the hash table
++ */
++static inline int __remove_mapping(struct vx_info *vxi, dev_t device,
++ umode_t mode)
++{
++ spinlock_t *hash_lock = &dmap_main_hash_lock;
++ struct vs_mapping *vdm = NULL;
++ int ret = 0;
++
++ spin_lock(hash_lock);
++
++ ret = __find_mapping((vxi ? vxi->vx_id : 0), device, mode, &vdm,
++ NULL);
++ vxdprintk(VXD_CBIT(misc, 8), "__remove_mapping: %p[#%d] %08x %04x",
++ vxi, vxi ? vxi->vx_id : 0, device, mode);
++ if (ret < 0)
++ goto out;
++ hlist_del(&vdm->dm_hlist);
++
++out:
++ spin_unlock(hash_lock);
++ if (vdm)
++ kmem_cache_free(dmap_cachep, vdm);
++ return ret;
++}
++
++
++
++int vs_map_device(struct vx_info *vxi,
++ dev_t device, dev_t *target, umode_t mode)
++{
++ int ret, flags = DATTR_MASK;
++
++ if (!vxi) {
++ if (target)
++ *target = device;
++ goto out;
++ }
++ ret = __lookup_mapping(vxi, device, target, &flags, mode);
++ vxdprintk(VXD_CBIT(misc, 8), "vs_map_device: %08x target: %08x flags: %04x mode: %04x mapped=%d",
++ device, target ? *target : 0, flags, mode, ret);
++out:
++ return (flags & DATTR_MASK);
++}
++
++
++
++static int do_set_mapping(struct vx_info *vxi,
++ dev_t device, dev_t target, int flags, umode_t mode)
++{
++ if (device) {
++ struct vs_mapping *new;
++
++ new = kmem_cache_alloc(dmap_cachep, GFP_KERNEL);
++ if (!new)
++ return -ENOMEM;
++
++ INIT_HLIST_NODE(&new->dm_hlist);
++ new->device = device;
++ new->target.target = target;
++ new->target.flags = flags | mode;
++ new->xid = (vxi ? vxi->vx_id : 0);
++
++ vxdprintk(VXD_CBIT(misc, 8), "do_set_mapping: %08x target: %08x flags: %04x", device, target, flags);
++ __hash_mapping(vxi, new);
++ } else {
++ struct vx_dmap_target new = {
++ .target = target,
++ .flags = flags | mode,
++ };
++ __set_default(vxi, mode, &new);
++ }
++ return 0;
++}
++
++
++static int do_unset_mapping(struct vx_info *vxi,
++ dev_t device, dev_t target, int flags, umode_t mode)
++{
++ int ret = -EINVAL;
++
++ if (device) {
++ ret = __remove_mapping(vxi, device, mode);
++ if (ret < 0)
++ goto out;
++ } else {
++ ret = __remove_default(vxi, mode);
++ if (ret < 0)
++ goto out;
++ }
++
++out:
++ return ret;
++}
++
++
++static inline int __user_device(const char __user *name, dev_t *dev,
++ umode_t *mode)
++{
++ struct nameidata nd;
++ int ret;
++
++ if (!name) {
++ *dev = 0;
++ return 0;
++ }
++ ret = user_lpath(name, &nd.path);
++ if (ret)
++ return ret;
++ if (nd.path.dentry->d_inode) {
++ *dev = nd.path.dentry->d_inode->i_rdev;
++ *mode = nd.path.dentry->d_inode->i_mode;
++ }
++ path_put(&nd.path);
++ return 0;
++}
++
++static inline int __mapping_mode(dev_t device, dev_t target,
++ umode_t device_mode, umode_t target_mode, umode_t *mode)
++{
++ if (device)
++ *mode = device_mode & S_IFMT;
++ else if (target)
++ *mode = target_mode & S_IFMT;
++ else
++ return -EINVAL;
++
++ /* if both given, device and target mode have to match */
++ if (device && target &&
++ ((device_mode ^ target_mode) & S_IFMT))
++ return -EINVAL;
++ return 0;
++}
++
++
++static inline int do_mapping(struct vx_info *vxi, const char __user *device_path,
++ const char __user *target_path, int flags, int set)
++{
++ dev_t device = ~0, target = ~0;
++ umode_t device_mode = 0, target_mode = 0, mode;
++ int ret;
++
++ ret = __user_device(device_path, &device, &device_mode);
++ if (ret)
++ return ret;
++ ret = __user_device(target_path, &target, &target_mode);
++ if (ret)
++ return ret;
++
++ ret = __mapping_mode(device, target,
++ device_mode, target_mode, &mode);
++ if (ret)
++ return ret;
++
++ if (set)
++ return do_set_mapping(vxi, device, target,
++ flags, mode);
++ else
++ return do_unset_mapping(vxi, device, target,
++ flags, mode);
++}
++
++
++int vc_set_mapping(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_set_mapping_v0 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_mapping(vxi, vc_data.device, vc_data.target,
++ vc_data.flags, 1);
++}
++
++int vc_unset_mapping(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_set_mapping_v0 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_mapping(vxi, vc_data.device, vc_data.target,
++ vc_data.flags, 0);
++}
++
++
++#ifdef CONFIG_COMPAT
++
++int vc_set_mapping_x32(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_set_mapping_v0_x32 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_mapping(vxi, compat_ptr(vc_data.device_ptr),
++ compat_ptr(vc_data.target_ptr), vc_data.flags, 1);
++}
++
++int vc_unset_mapping_x32(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_set_mapping_v0_x32 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_mapping(vxi, compat_ptr(vc_data.device_ptr),
++ compat_ptr(vc_data.target_ptr), vc_data.flags, 0);
++}
++
++#endif /* CONFIG_COMPAT */
++
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/dlimit.c linux-3.6.10-vs2.3.4.6/kernel/vserver/dlimit.c
+--- linux-3.6.10/kernel/vserver/dlimit.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/dlimit.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,531 @@
++/*
++ * linux/kernel/vserver/dlimit.c
++ *
++ * Virtual Server: Context Disk Limits
++ *
++ * Copyright (C) 2004-2009 Herbert Pötzl
++ *
++ * V0.01 initial version
++ * V0.02 compat32 splitup
++ * V0.03 extended interface
++ *
++ */
++
++#include <linux/statfs.h>
++#include <linux/sched.h>
++#include <linux/namei.h>
++#include <linux/vs_tag.h>
++#include <linux/vs_dlimit.h>
++#include <linux/vserver/dlimit_cmd.h>
++#include <linux/slab.h>
++// #include <linux/gfp.h>
++
++#include <asm/uaccess.h>
++
++/* __alloc_dl_info()
++
++ * allocate an initialized dl_info struct
++ * doesn't make it visible (hash) */
++
++static struct dl_info *__alloc_dl_info(struct super_block *sb, tag_t tag)
++{
++ struct dl_info *new = NULL;
++
++ vxdprintk(VXD_CBIT(dlim, 5),
++ "alloc_dl_info(%p,%d)*", sb, tag);
++
++ /* would this benefit from a slab cache? */
++ new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
++ if (!new)
++ return 0;
++
++ memset(new, 0, sizeof(struct dl_info));
++ new->dl_tag = tag;
++ new->dl_sb = sb;
++ // INIT_RCU_HEAD(&new->dl_rcu);
++ INIT_HLIST_NODE(&new->dl_hlist);
++ spin_lock_init(&new->dl_lock);
++ atomic_set(&new->dl_refcnt, 0);
++ atomic_set(&new->dl_usecnt, 0);
++
++ /* rest of init goes here */
++
++ vxdprintk(VXD_CBIT(dlim, 4),
++ "alloc_dl_info(%p,%d) = %p", sb, tag, new);
++ return new;
++}
++
++/* __dealloc_dl_info()
++
++ * final disposal of dl_info */
++
++static void __dealloc_dl_info(struct dl_info *dli)
++{
++ vxdprintk(VXD_CBIT(dlim, 4),
++ "dealloc_dl_info(%p)", dli);
++
++ dli->dl_hlist.next = LIST_POISON1;
++ dli->dl_tag = -1;
++ dli->dl_sb = 0;
++
++ BUG_ON(atomic_read(&dli->dl_usecnt));
++ BUG_ON(atomic_read(&dli->dl_refcnt));
++
++ kfree(dli);
++}
++
++
++/* hash table for dl_info hash */
++
++#define DL_HASH_SIZE 13
++
++struct hlist_head dl_info_hash[DL_HASH_SIZE];
++
++static DEFINE_SPINLOCK(dl_info_hash_lock);
++
++
++static inline unsigned int __hashval(struct super_block *sb, tag_t tag)
++{
++ return ((tag ^ (unsigned long)sb) % DL_HASH_SIZE);
++}
++
++
++
++/* __hash_dl_info()
++
++ * add the dli to the global hash table
++ * requires the hash_lock to be held */
++
++static inline void __hash_dl_info(struct dl_info *dli)
++{
++ struct hlist_head *head;
++
++ vxdprintk(VXD_CBIT(dlim, 6),
++ "__hash_dl_info: %p[#%d]", dli, dli->dl_tag);
++ get_dl_info(dli);
++ head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_tag)];
++ hlist_add_head_rcu(&dli->dl_hlist, head);
++}
++
++/* __unhash_dl_info()
++
++ * remove the dli from the global hash table
++ * requires the hash_lock to be held */
++
++static inline void __unhash_dl_info(struct dl_info *dli)
++{
++ vxdprintk(VXD_CBIT(dlim, 6),
++ "__unhash_dl_info: %p[#%d]", dli, dli->dl_tag);
++ hlist_del_rcu(&dli->dl_hlist);
++ put_dl_info(dli);
++}
++
++
++/* __lookup_dl_info()
++
++ * requires the rcu_read_lock()
++ * doesn't increment the dl_refcnt */
++
++static inline struct dl_info *__lookup_dl_info(struct super_block *sb, tag_t tag)
++{
++ struct hlist_head *head = &dl_info_hash[__hashval(sb, tag)];
++ struct hlist_node *pos;
++ struct dl_info *dli;
++
++ hlist_for_each_entry_rcu(dli, pos, head, dl_hlist) {
++
++ if (dli->dl_tag == tag && dli->dl_sb == sb) {
++ return dli;
++ }
++ }
++ return NULL;
++}
++
++
++struct dl_info *locate_dl_info(struct super_block *sb, tag_t tag)
++{
++ struct dl_info *dli;
++
++ rcu_read_lock();
++ dli = get_dl_info(__lookup_dl_info(sb, tag));
++ vxdprintk(VXD_CBIT(dlim, 7),
++ "locate_dl_info(%p,#%d) = %p", sb, tag, dli);
++ rcu_read_unlock();
++ return dli;
++}
++
++void rcu_free_dl_info(struct rcu_head *head)
++{
++ struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
++ int usecnt, refcnt;
++
++ BUG_ON(!dli || !head);
++
++ usecnt = atomic_read(&dli->dl_usecnt);
++ BUG_ON(usecnt < 0);
++
++ refcnt = atomic_read(&dli->dl_refcnt);
++ BUG_ON(refcnt < 0);
++
++ vxdprintk(VXD_CBIT(dlim, 3),
++ "rcu_free_dl_info(%p)", dli);
++ if (!usecnt)
++ __dealloc_dl_info(dli);
++ else
++ printk("!!! rcu didn't free\n");
++}
++
++
++
++
++static int do_addrem_dlimit(uint32_t id, const char __user *name,
++ uint32_t flags, int add)
++{
++ struct path path;
++ int ret;
++
++ ret = user_lpath(name, &path);
++ if (!ret) {
++ struct super_block *sb;
++ struct dl_info *dli;
++
++ ret = -EINVAL;
++ if (!path.dentry->d_inode)
++ goto out_release;
++ if (!(sb = path.dentry->d_inode->i_sb))
++ goto out_release;
++
++ if (add) {
++ dli = __alloc_dl_info(sb, id);
++ spin_lock(&dl_info_hash_lock);
++
++ ret = -EEXIST;
++ if (__lookup_dl_info(sb, id))
++ goto out_unlock;
++ __hash_dl_info(dli);
++ dli = NULL;
++ } else {
++ spin_lock(&dl_info_hash_lock);
++ dli = __lookup_dl_info(sb, id);
++
++ ret = -ESRCH;
++ if (!dli)
++ goto out_unlock;
++ __unhash_dl_info(dli);
++ }
++ ret = 0;
++ out_unlock:
++ spin_unlock(&dl_info_hash_lock);
++ if (add && dli)
++ __dealloc_dl_info(dli);
++ out_release:
++ path_put(&path);
++ }
++ return ret;
++}
++
++int vc_add_dlimit(uint32_t id, void __user *data)
++{
++ struct vcmd_ctx_dlimit_base_v0 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1);
++}
++
++int vc_rem_dlimit(uint32_t id, void __user *data)
++{
++ struct vcmd_ctx_dlimit_base_v0 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0);
++}
++
++#ifdef CONFIG_COMPAT
++
++int vc_add_dlimit_x32(uint32_t id, void __user *data)
++{
++ struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_addrem_dlimit(id,
++ compat_ptr(vc_data.name_ptr), vc_data.flags, 1);
++}
++
++int vc_rem_dlimit_x32(uint32_t id, void __user *data)
++{
++ struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_addrem_dlimit(id,
++ compat_ptr(vc_data.name_ptr), vc_data.flags, 0);
++}
++
++#endif /* CONFIG_COMPAT */
++
++
++static inline
++int do_set_dlimit(uint32_t id, const char __user *name,
++ uint32_t space_used, uint32_t space_total,
++ uint32_t inodes_used, uint32_t inodes_total,
++ uint32_t reserved, uint32_t flags)
++{
++ struct path path;
++ int ret;
++
++ ret = user_lpath(name, &path);
++ if (!ret) {
++ struct super_block *sb;
++ struct dl_info *dli;
++
++ ret = -EINVAL;
++ if (!path.dentry->d_inode)
++ goto out_release;
++ if (!(sb = path.dentry->d_inode->i_sb))
++ goto out_release;
++
++ /* sanity checks */
++ if ((reserved != CDLIM_KEEP &&
++ reserved > 100) ||
++ (inodes_used != CDLIM_KEEP &&
++ inodes_used > inodes_total) ||
++ (space_used != CDLIM_KEEP &&
++ space_used > space_total))
++ goto out_release;
++
++ ret = -ESRCH;
++ dli = locate_dl_info(sb, id);
++ if (!dli)
++ goto out_release;
++
++ spin_lock(&dli->dl_lock);
++
++ if (inodes_used != CDLIM_KEEP)
++ dli->dl_inodes_used = inodes_used;
++ if (inodes_total != CDLIM_KEEP)
++ dli->dl_inodes_total = inodes_total;
++ if (space_used != CDLIM_KEEP)
++ dli->dl_space_used = dlimit_space_32to64(
++ space_used, flags, DLIMS_USED);
++
++ if (space_total == CDLIM_INFINITY)
++ dli->dl_space_total = DLIM_INFINITY;
++ else if (space_total != CDLIM_KEEP)
++ dli->dl_space_total = dlimit_space_32to64(
++ space_total, flags, DLIMS_TOTAL);
++
++ if (reserved != CDLIM_KEEP)
++ dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100;
++
++ spin_unlock(&dli->dl_lock);
++
++ put_dl_info(dli);
++ ret = 0;
++
++ out_release:
++ path_put(&path);
++ }
++ return ret;
++}
++
++int vc_set_dlimit(uint32_t id, void __user *data)
++{
++ struct vcmd_ctx_dlimit_v0 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_set_dlimit(id, vc_data.name,
++ vc_data.space_used, vc_data.space_total,
++ vc_data.inodes_used, vc_data.inodes_total,
++ vc_data.reserved, vc_data.flags);
++}
++
++#ifdef CONFIG_COMPAT
++
++int vc_set_dlimit_x32(uint32_t id, void __user *data)
++{
++ struct vcmd_ctx_dlimit_v0_x32 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_set_dlimit(id, compat_ptr(vc_data.name_ptr),
++ vc_data.space_used, vc_data.space_total,
++ vc_data.inodes_used, vc_data.inodes_total,
++ vc_data.reserved, vc_data.flags);
++}
++
++#endif /* CONFIG_COMPAT */
++
++
++static inline
++int do_get_dlimit(uint32_t id, const char __user *name,
++ uint32_t *space_used, uint32_t *space_total,
++ uint32_t *inodes_used, uint32_t *inodes_total,
++ uint32_t *reserved, uint32_t *flags)
++{
++ struct path path;
++ int ret;
++
++ ret = user_lpath(name, &path);
++ if (!ret) {
++ struct super_block *sb;
++ struct dl_info *dli;
++
++ ret = -EINVAL;
++ if (!path.dentry->d_inode)
++ goto out_release;
++ if (!(sb = path.dentry->d_inode->i_sb))
++ goto out_release;
++
++ ret = -ESRCH;
++ dli = locate_dl_info(sb, id);
++ if (!dli)
++ goto out_release;
++
++ spin_lock(&dli->dl_lock);
++ *inodes_used = dli->dl_inodes_used;
++ *inodes_total = dli->dl_inodes_total;
++
++ *space_used = dlimit_space_64to32(
++ dli->dl_space_used, flags, DLIMS_USED);
++
++ if (dli->dl_space_total == DLIM_INFINITY)
++ *space_total = CDLIM_INFINITY;
++ else
++ *space_total = dlimit_space_64to32(
++ dli->dl_space_total, flags, DLIMS_TOTAL);
++
++ *reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
++ spin_unlock(&dli->dl_lock);
++
++ put_dl_info(dli);
++ ret = -EFAULT;
++
++ ret = 0;
++ out_release:
++ path_put(&path);
++ }
++ return ret;
++}
++
++
++int vc_get_dlimit(uint32_t id, void __user *data)
++{
++ struct vcmd_ctx_dlimit_v0 vc_data;
++ int ret;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = do_get_dlimit(id, vc_data.name,
++ &vc_data.space_used, &vc_data.space_total,
++ &vc_data.inodes_used, &vc_data.inodes_total,
++ &vc_data.reserved, &vc_data.flags);
++ if (ret)
++ return ret;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++#ifdef CONFIG_COMPAT
++
++int vc_get_dlimit_x32(uint32_t id, void __user *data)
++{
++ struct vcmd_ctx_dlimit_v0_x32 vc_data;
++ int ret;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr),
++ &vc_data.space_used, &vc_data.space_total,
++ &vc_data.inodes_used, &vc_data.inodes_total,
++ &vc_data.reserved, &vc_data.flags);
++ if (ret)
++ return ret;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++#endif /* CONFIG_COMPAT */
++
++
++void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
++{
++ struct dl_info *dli;
++ __u64 blimit, bfree, bavail;
++ __u32 ifree;
++
++ dli = locate_dl_info(sb, dx_current_tag());
++ if (!dli)
++ return;
++
++ spin_lock(&dli->dl_lock);
++ if (dli->dl_inodes_total == (unsigned long)DLIM_INFINITY)
++ goto no_ilim;
++
++ /* reduce max inodes available to limit */
++ if (buf->f_files > dli->dl_inodes_total)
++ buf->f_files = dli->dl_inodes_total;
++
++ ifree = dli->dl_inodes_total - dli->dl_inodes_used;
++ /* reduce free inodes to min */
++ if (ifree < buf->f_ffree)
++ buf->f_ffree = ifree;
++
++no_ilim:
++ if (dli->dl_space_total == DLIM_INFINITY)
++ goto no_blim;
++
++ blimit = dli->dl_space_total >> sb->s_blocksize_bits;
++
++ if (dli->dl_space_total < dli->dl_space_used)
++ bfree = 0;
++ else
++ bfree = (dli->dl_space_total - dli->dl_space_used)
++ >> sb->s_blocksize_bits;
++
++ bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
++ if (bavail < dli->dl_space_used)
++ bavail = 0;
++ else
++ bavail = (bavail - dli->dl_space_used)
++ >> sb->s_blocksize_bits;
++
++ /* reduce max space available to limit */
++ if (buf->f_blocks > blimit)
++ buf->f_blocks = blimit;
++
++ /* reduce free space to min */
++ if (bfree < buf->f_bfree)
++ buf->f_bfree = bfree;
++
++ /* reduce avail space to min */
++ if (bavail < buf->f_bavail)
++ buf->f_bavail = bavail;
++
++no_blim:
++ spin_unlock(&dli->dl_lock);
++ put_dl_info(dli);
++
++ return;
++}
++
++#include <linux/module.h>
++
++EXPORT_SYMBOL_GPL(locate_dl_info);
++EXPORT_SYMBOL_GPL(rcu_free_dl_info);
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/helper.c linux-3.6.10-vs2.3.4.6/kernel/vserver/helper.c
+--- linux-3.6.10/kernel/vserver/helper.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/helper.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,229 @@
++/*
++ * linux/kernel/vserver/helper.c
++ *
++ * Virtual Context Support
++ *
++ * Copyright (C) 2004-2007 Herbert Pötzl
++ *
++ * V0.01 basic helper
++ *
++ */
++
++#include <linux/kmod.h>
++#include <linux/reboot.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vserver/signal.h>
++
++
++char vshelper_path[255] = "/sbin/vshelper";
++
++static int vshelper_init(struct subprocess_info *info, struct cred *new_cred)
++{
++ current->flags &= ~PF_THREAD_BOUND;
++ return 0;
++}
++
++static int do_vshelper(char *name, char *argv[], char *envp[], int sync)
++{
++ int ret;
++
++ if ((ret = call_usermodehelper_fns(name, argv, envp,
++ sync ? UMH_WAIT_PROC : UMH_WAIT_EXEC,
++ vshelper_init, NULL, NULL))) {
++ printk(KERN_WARNING "%s: (%s %s) returned %s with %d\n",
++ name, argv[1], argv[2],
++ sync ? "sync" : "async", ret);
++ }
++ vxdprintk(VXD_CBIT(switch, 4),
++ "%s: (%s %s) returned %s with %d",
++ name, argv[1], argv[2], sync ? "sync" : "async", ret);
++ return ret;
++}
++
++/*
++ * vshelper path is set via /proc/sys
++ * invoked by vserver sys_reboot(), with
++ * the following arguments
++ *
++ * argv [0] = vshelper_path;
++ * argv [1] = action: "restart", "halt", "poweroff", ...
++ * argv [2] = context identifier
++ *
++ * envp [*] = type-specific parameters
++ */
++
++long vs_reboot_helper(struct vx_info *vxi, int cmd, void __user *arg)
++{
++ char id_buf[8], cmd_buf[16];
++ char uid_buf[16], pid_buf[16];
++ int ret;
++
++ char *argv[] = {vshelper_path, NULL, id_buf, 0};
++ char *envp[] = {"HOME=/", "TERM=linux",
++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
++ uid_buf, pid_buf, cmd_buf, 0};
++
++ if (vx_info_state(vxi, VXS_HELPER))
++ return -EAGAIN;
++ vxi->vx_state |= VXS_HELPER;
++
++ snprintf(id_buf, sizeof(id_buf), "%d", vxi->vx_id);
++
++ snprintf(cmd_buf, sizeof(cmd_buf), "VS_CMD=%08x", cmd);
++ snprintf(uid_buf, sizeof(uid_buf), "VS_UID=%d", current_uid());
++ snprintf(pid_buf, sizeof(pid_buf), "VS_PID=%d", current->pid);
++
++ switch (cmd) {
++ case LINUX_REBOOT_CMD_RESTART:
++ argv[1] = "restart";
++ break;
++
++ case LINUX_REBOOT_CMD_HALT:
++ argv[1] = "halt";
++ break;
++
++ case LINUX_REBOOT_CMD_POWER_OFF:
++ argv[1] = "poweroff";
++ break;
++
++ case LINUX_REBOOT_CMD_SW_SUSPEND:
++ argv[1] = "swsusp";
++ break;
++
++ case LINUX_REBOOT_CMD_OOM:
++ argv[1] = "oom";
++ break;
++
++ default:
++ vxi->vx_state &= ~VXS_HELPER;
++ return 0;
++ }
++
++ ret = do_vshelper(vshelper_path, argv, envp, 0);
++ vxi->vx_state &= ~VXS_HELPER;
++ __wakeup_vx_info(vxi);
++ return (ret) ? -EPERM : 0;
++}
++
++
++long vs_reboot(unsigned int cmd, void __user *arg)
++{
++ struct vx_info *vxi = current_vx_info();
++ long ret = 0;
++
++ vxdprintk(VXD_CBIT(misc, 5),
++ "vs_reboot(%p[#%d],%u)",
++ vxi, vxi ? vxi->vx_id : 0, cmd);
++
++ ret = vs_reboot_helper(vxi, cmd, arg);
++ if (ret)
++ return ret;
++
++ vxi->reboot_cmd = cmd;
++ if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) {
++ switch (cmd) {
++ case LINUX_REBOOT_CMD_RESTART:
++ case LINUX_REBOOT_CMD_HALT:
++ case LINUX_REBOOT_CMD_POWER_OFF:
++ vx_info_kill(vxi, 0, SIGKILL);
++ vx_info_kill(vxi, 1, SIGKILL);
++ default:
++ break;
++ }
++ }
++ return 0;
++}
++
++long vs_oom_action(unsigned int cmd)
++{
++ struct vx_info *vxi = current_vx_info();
++ long ret = 0;
++
++ vxdprintk(VXD_CBIT(misc, 5),
++ "vs_oom_action(%p[#%d],%u)",
++ vxi, vxi ? vxi->vx_id : 0, cmd);
++
++ ret = vs_reboot_helper(vxi, cmd, NULL);
++ if (ret)
++ return ret;
++
++ vxi->reboot_cmd = cmd;
++ if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) {
++ vx_info_kill(vxi, 0, SIGKILL);
++ vx_info_kill(vxi, 1, SIGKILL);
++ }
++ return 0;
++}
++
++/*
++ * argv [0] = vshelper_path;
++ * argv [1] = action: "startup", "shutdown"
++ * argv [2] = context identifier
++ *
++ * envp [*] = type-specific parameters
++ */
++
++long vs_state_change(struct vx_info *vxi, unsigned int cmd)
++{
++ char id_buf[8], cmd_buf[16];
++ char *argv[] = {vshelper_path, NULL, id_buf, 0};
++ char *envp[] = {"HOME=/", "TERM=linux",
++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
++
++ if (!vx_info_flags(vxi, VXF_SC_HELPER, 0))
++ return 0;
++
++ snprintf(id_buf, sizeof(id_buf), "%d", vxi->vx_id);
++ snprintf(cmd_buf, sizeof(cmd_buf), "VS_CMD=%08x", cmd);
++
++ switch (cmd) {
++ case VSC_STARTUP:
++ argv[1] = "startup";
++ break;
++ case VSC_SHUTDOWN:
++ argv[1] = "shutdown";
++ break;
++ default:
++ return 0;
++ }
++
++ return do_vshelper(vshelper_path, argv, envp, 1);
++}
++
++
++/*
++ * argv [0] = vshelper_path;
++ * argv [1] = action: "netup", "netdown"
++ * argv [2] = context identifier
++ *
++ * envp [*] = type-specific parameters
++ */
++
++long vs_net_change(struct nx_info *nxi, unsigned int cmd)
++{
++ char id_buf[8], cmd_buf[16];
++ char *argv[] = {vshelper_path, NULL, id_buf, 0};
++ char *envp[] = {"HOME=/", "TERM=linux",
++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
++
++ if (!nx_info_flags(nxi, NXF_SC_HELPER, 0))
++ return 0;
++
++ snprintf(id_buf, sizeof(id_buf), "%d", nxi->nx_id);
++ snprintf(cmd_buf, sizeof(cmd_buf), "VS_CMD=%08x", cmd);
++
++ switch (cmd) {
++ case VSC_NETUP:
++ argv[1] = "netup";
++ break;
++ case VSC_NETDOWN:
++ argv[1] = "netdown";
++ break;
++ default:
++ return 0;
++ }
++
++ return do_vshelper(vshelper_path, argv, envp, 1);
++}
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/history.c linux-3.6.10-vs2.3.4.6/kernel/vserver/history.c
+--- linux-3.6.10/kernel/vserver/history.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/history.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,258 @@
++/*
++ * kernel/vserver/history.c
++ *
++ * Virtual Context History Backtrace
++ *
++ * Copyright (C) 2004-2007 Herbert Pötzl
++ *
++ * V0.01 basic structure
++ * V0.02 hash/unhash and trace
++ * V0.03 preemption fixes
++ *
++ */
++
++#include <linux/module.h>
++#include <asm/uaccess.h>
++
++#include <linux/vserver/context.h>
++#include <linux/vserver/debug.h>
++#include <linux/vserver/debug_cmd.h>
++#include <linux/vserver/history.h>
++
++
++#ifdef CONFIG_VSERVER_HISTORY
++#define VXH_SIZE CONFIG_VSERVER_HISTORY_SIZE
++#else
++#define VXH_SIZE 64
++#endif
++
++struct _vx_history {
++ unsigned int counter;
++
++ struct _vx_hist_entry entry[VXH_SIZE + 1];
++};
++
++
++DEFINE_PER_CPU(struct _vx_history, vx_history_buffer);
++
++unsigned volatile int vxh_active = 1;
++
++static atomic_t sequence = ATOMIC_INIT(0);
++
++
++/* vxh_advance()
++
++ * requires disabled preemption */
++
++struct _vx_hist_entry *vxh_advance(void *loc)
++{
++ unsigned int cpu = smp_processor_id();
++ struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu);
++ struct _vx_hist_entry *entry;
++ unsigned int index;
++
++ index = vxh_active ? (hist->counter++ % VXH_SIZE) : VXH_SIZE;
++ entry = &hist->entry[index];
++
++ entry->seq = atomic_inc_return(&sequence);
++ entry->loc = loc;
++ return entry;
++}
++
++EXPORT_SYMBOL_GPL(vxh_advance);
++
++
++#define VXH_LOC_FMTS "(#%04x,*%d):%p"
++
++#define VXH_LOC_ARGS(e) (e)->seq, cpu, (e)->loc
++
++
++#define VXH_VXI_FMTS "%p[#%d,%d.%d]"
++
++#define VXH_VXI_ARGS(e) (e)->vxi.ptr, \
++ (e)->vxi.ptr ? (e)->vxi.xid : 0, \
++ (e)->vxi.ptr ? (e)->vxi.usecnt : 0, \
++ (e)->vxi.ptr ? (e)->vxi.tasks : 0
++
++void vxh_dump_entry(struct _vx_hist_entry *e, unsigned cpu)
++{
++ switch (e->type) {
++ case VXH_THROW_OOPS:
++ printk( VXH_LOC_FMTS " oops \n", VXH_LOC_ARGS(e));
++ break;
++
++ case VXH_GET_VX_INFO:
++ case VXH_PUT_VX_INFO:
++ printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n",
++ VXH_LOC_ARGS(e),
++ (e->type == VXH_GET_VX_INFO) ? "get" : "put",
++ VXH_VXI_ARGS(e));
++ break;
++
++ case VXH_INIT_VX_INFO:
++ case VXH_SET_VX_INFO:
++ case VXH_CLR_VX_INFO:
++ printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n",
++ VXH_LOC_ARGS(e),
++ (e->type == VXH_INIT_VX_INFO) ? "init" :
++ ((e->type == VXH_SET_VX_INFO) ? "set" : "clr"),
++ VXH_VXI_ARGS(e), e->sc.data);
++ break;
++
++ case VXH_CLAIM_VX_INFO:
++ case VXH_RELEASE_VX_INFO:
++ printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n",
++ VXH_LOC_ARGS(e),
++ (e->type == VXH_CLAIM_VX_INFO) ? "claim" : "release",
++ VXH_VXI_ARGS(e), e->sc.data);
++ break;
++
++ case VXH_ALLOC_VX_INFO:
++ case VXH_DEALLOC_VX_INFO:
++ printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n",
++ VXH_LOC_ARGS(e),
++ (e->type == VXH_ALLOC_VX_INFO) ? "alloc" : "dealloc",
++ VXH_VXI_ARGS(e));
++ break;
++
++ case VXH_HASH_VX_INFO:
++ case VXH_UNHASH_VX_INFO:
++ printk( VXH_LOC_FMTS " __%s_vx_info " VXH_VXI_FMTS "\n",
++ VXH_LOC_ARGS(e),
++ (e->type == VXH_HASH_VX_INFO) ? "hash" : "unhash",
++ VXH_VXI_ARGS(e));
++ break;
++
++ case VXH_LOC_VX_INFO:
++ case VXH_LOOKUP_VX_INFO:
++ case VXH_CREATE_VX_INFO:
++ printk( VXH_LOC_FMTS " __%s_vx_info [#%d] -> " VXH_VXI_FMTS "\n",
++ VXH_LOC_ARGS(e),
++ (e->type == VXH_CREATE_VX_INFO) ? "create" :
++ ((e->type == VXH_LOC_VX_INFO) ? "loc" : "lookup"),
++ e->ll.arg, VXH_VXI_ARGS(e));
++ break;
++ }
++}
++
++static void __vxh_dump_history(void)
++{
++ unsigned int i, cpu;
++
++ printk("History:\tSEQ: %8x\tNR_CPUS: %d\n",
++ atomic_read(&sequence), NR_CPUS);
++
++ for (i = 0; i < VXH_SIZE; i++) {
++ for_each_online_cpu(cpu) {
++ struct _vx_history *hist =
++ &per_cpu(vx_history_buffer, cpu);
++ unsigned int index = (hist->counter - i) % VXH_SIZE;
++ struct _vx_hist_entry *entry = &hist->entry[index];
++
++ vxh_dump_entry(entry, cpu);
++ }
++ }
++}
++
++void vxh_dump_history(void)
++{
++ vxh_active = 0;
++#ifdef CONFIG_SMP
++ local_irq_enable();
++ smp_send_stop();
++ local_irq_disable();
++#endif
++ __vxh_dump_history();
++}
++
++
++/* vserver syscall commands below here */
++
++
++int vc_dump_history(uint32_t id)
++{
++ vxh_active = 0;
++ __vxh_dump_history();
++ vxh_active = 1;
++
++ return 0;
++}
++
++
++int do_read_history(struct __user _vx_hist_entry *data,
++ int cpu, uint32_t *index, uint32_t *count)
++{
++ int pos, ret = 0;
++ struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu);
++ int end = hist->counter;
++ int start = end - VXH_SIZE + 2;
++ int idx = *index;
++
++ /* special case: get current pos */
++ if (!*count) {
++ *index = end;
++ return 0;
++ }
++
++ /* have we lost some data? */
++ if (idx < start)
++ idx = start;
++
++ for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) {
++ struct _vx_hist_entry *entry =
++ &hist->entry[idx % VXH_SIZE];
++
++ /* send entry to userspace */
++ ret = copy_to_user(&data[pos], entry, sizeof(*entry));
++ if (ret)
++ break;
++ }
++ /* save new index and count */
++ *index = idx;
++ *count = pos;
++ return ret ? ret : (*index < end);
++}
++
++int vc_read_history(uint32_t id, void __user *data)
++{
++ struct vcmd_read_history_v0 vc_data;
++ int ret;
++
++ if (id >= NR_CPUS)
++ return -EINVAL;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = do_read_history((struct __user _vx_hist_entry *)vc_data.data,
++ id, &vc_data.index, &vc_data.count);
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return ret;
++}
++
++#ifdef CONFIG_COMPAT
++
++int vc_read_history_x32(uint32_t id, void __user *data)
++{
++ struct vcmd_read_history_v0_x32 vc_data;
++ int ret;
++
++ if (id >= NR_CPUS)
++ return -EINVAL;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = do_read_history((struct __user _vx_hist_entry *)
++ compat_ptr(vc_data.data_ptr),
++ id, &vc_data.index, &vc_data.count);
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return ret;
++}
++
++#endif /* CONFIG_COMPAT */
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/inet.c linux-3.6.10-vs2.3.4.6/kernel/vserver/inet.c
+--- linux-3.6.10/kernel/vserver/inet.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/inet.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,226 @@
++
++#include <linux/in.h>
++#include <linux/inetdevice.h>
++#include <linux/export.h>
++#include <linux/vs_inet.h>
++#include <linux/vs_inet6.h>
++#include <linux/vserver/debug.h>
++#include <net/route.h>
++#include <net/addrconf.h>
++
++
++int nx_v4_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2)
++{
++ int ret = 0;
++
++ if (!nxi1 || !nxi2 || nxi1 == nxi2)
++ ret = 1;
++ else {
++ struct nx_addr_v4 *ptr;
++
++ for (ptr = &nxi1->v4; ptr; ptr = ptr->next) {
++ if (v4_nx_addr_in_nx_info(nxi2, ptr, -1)) {
++ ret = 1;
++ break;
++ }
++ }
++ }
++
++ vxdprintk(VXD_CBIT(net, 2),
++ "nx_v4_addr_conflict(%p,%p): %d",
++ nxi1, nxi2, ret);
++
++ return ret;
++}
++
++
++#ifdef CONFIG_IPV6
++
++int nx_v6_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2)
++{
++ int ret = 0;
++
++ if (!nxi1 || !nxi2 || nxi1 == nxi2)
++ ret = 1;
++ else {
++ struct nx_addr_v6 *ptr;
++
++ for (ptr = &nxi1->v6; ptr; ptr = ptr->next) {
++ if (v6_nx_addr_in_nx_info(nxi2, ptr, -1)) {
++ ret = 1;
++ break;
++ }
++ }
++ }
++
++ vxdprintk(VXD_CBIT(net, 2),
++ "nx_v6_addr_conflict(%p,%p): %d",
++ nxi1, nxi2, ret);
++
++ return ret;
++}
++
++#endif
++
++int v4_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
++{
++ struct in_device *in_dev;
++ struct in_ifaddr **ifap;
++ struct in_ifaddr *ifa;
++ int ret = 0;
++
++ if (!dev)
++ goto out;
++ in_dev = in_dev_get(dev);
++ if (!in_dev)
++ goto out;
++
++ for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
++ ifap = &ifa->ifa_next) {
++ if (v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW)) {
++ ret = 1;
++ break;
++ }
++ }
++ in_dev_put(in_dev);
++out:
++ return ret;
++}
++
++
++#ifdef CONFIG_IPV6
++
++int v6_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
++{
++ struct inet6_dev *in_dev;
++ struct inet6_ifaddr *ifa;
++ int ret = 0;
++
++ if (!dev)
++ goto out;
++ in_dev = in6_dev_get(dev);
++ if (!in_dev)
++ goto out;
++
++ // for (ifap = &in_dev->addr_list; (ifa = *ifap) != NULL;
++ list_for_each_entry(ifa, &in_dev->addr_list, if_list) {
++ if (v6_addr_in_nx_info(nxi, &ifa->addr, -1)) {
++ ret = 1;
++ break;
++ }
++ }
++ in6_dev_put(in_dev);
++out:
++ return ret;
++}
++
++#endif
++
++int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
++{
++ int ret = 1;
++
++ if (!nxi)
++ goto out;
++ if (nxi->v4.type && v4_dev_in_nx_info(dev, nxi))
++ goto out;
++#ifdef CONFIG_IPV6
++ ret = 2;
++ if (nxi->v6.type && v6_dev_in_nx_info(dev, nxi))
++ goto out;
++#endif
++ ret = 0;
++out:
++ vxdprintk(VXD_CBIT(net, 3),
++ "dev_in_nx_info(%p,%p[#%d]) = %d",
++ dev, nxi, nxi ? nxi->nx_id : 0, ret);
++ return ret;
++}
++
++struct rtable *ip_v4_find_src(struct net *net, struct nx_info *nxi,
++ struct flowi4 *fl4)
++{
++ struct rtable *rt;
++
++ if (!nxi)
++ return NULL;
++
++ /* FIXME: handle lback only case */
++ if (!NX_IPV4(nxi))
++ return ERR_PTR(-EPERM);
++
++ vxdprintk(VXD_CBIT(net, 4),
++ "ip_v4_find_src(%p[#%u]) " NIPQUAD_FMT " -> " NIPQUAD_FMT,
++ nxi, nxi ? nxi->nx_id : 0,
++ NIPQUAD(fl4->saddr), NIPQUAD(fl4->daddr));
++
++ /* single IP is unconditional */
++ if (nx_info_flags(nxi, NXF_SINGLE_IP, 0) &&
++ (fl4->saddr == INADDR_ANY))
++ fl4->saddr = nxi->v4.ip[0].s_addr;
++
++ if (fl4->saddr == INADDR_ANY) {
++ struct nx_addr_v4 *ptr;
++ __be32 found = 0;
++
++ rt = __ip_route_output_key(net, fl4);
++ if (!IS_ERR(rt)) {
++ found = fl4->saddr;
++ ip_rt_put(rt);
++ vxdprintk(VXD_CBIT(net, 4),
++ "ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT,
++ nxi, nxi ? nxi->nx_id : 0, fl4->flowi4_oif, NIPQUAD(found));
++ if (v4_addr_in_nx_info(nxi, found, NXA_MASK_BIND))
++ goto found;
++ }
++
++ for (ptr = &nxi->v4; ptr; ptr = ptr->next) {
++ __be32 primary = ptr->ip[0].s_addr;
++ __be32 mask = ptr->mask.s_addr;
++ __be32 neta = primary & mask;
++
++ vxdprintk(VXD_CBIT(net, 4), "ip_v4_find_src(%p[#%u]) chk: "
++ NIPQUAD_FMT "/" NIPQUAD_FMT "/" NIPQUAD_FMT,
++ nxi, nxi ? nxi->nx_id : 0, NIPQUAD(primary),
++ NIPQUAD(mask), NIPQUAD(neta));
++ if ((found & mask) != neta)
++ continue;
++
++ fl4->saddr = primary;
++ rt = __ip_route_output_key(net, fl4);
++ vxdprintk(VXD_CBIT(net, 4),
++ "ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT,
++ nxi, nxi ? nxi->nx_id : 0, fl4->flowi4_oif, NIPQUAD(primary));
++ if (!IS_ERR(rt)) {
++ found = fl4->saddr;
++ ip_rt_put(rt);
++ if (found == primary)
++ goto found;
++ }
++ }
++ /* still no source ip? */
++ found = ipv4_is_loopback(fl4->daddr)
++ ? IPI_LOOPBACK : nxi->v4.ip[0].s_addr;
++ found:
++ /* assign src ip to flow */
++ fl4->saddr = found;
++
++ } else {
++ if (!v4_addr_in_nx_info(nxi, fl4->saddr, NXA_MASK_BIND))
++ return ERR_PTR(-EPERM);
++ }
++
++ if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) {
++ if (ipv4_is_loopback(fl4->daddr))
++ fl4->daddr = nxi->v4_lback.s_addr;
++ if (ipv4_is_loopback(fl4->saddr))
++ fl4->saddr = nxi->v4_lback.s_addr;
++ } else if (ipv4_is_loopback(fl4->daddr) &&
++ !nx_info_flags(nxi, NXF_LBACK_ALLOW, 0))
++ return ERR_PTR(-EPERM);
++
++ return NULL;
++}
++
++EXPORT_SYMBOL_GPL(ip_v4_find_src);
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/init.c linux-3.6.10-vs2.3.4.6/kernel/vserver/init.c
+--- linux-3.6.10/kernel/vserver/init.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/init.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,45 @@
++/*
++ * linux/kernel/init.c
++ *
++ * Virtual Server Init
++ *
++ * Copyright (C) 2004-2007 Herbert Pötzl
++ *
++ * V0.01 basic structure
++ *
++ */
++
++#include <linux/init.h>
++
++int vserver_register_sysctl(void);
++void vserver_unregister_sysctl(void);
++
++
++static int __init init_vserver(void)
++{
++ int ret = 0;
++
++#ifdef CONFIG_VSERVER_DEBUG
++ vserver_register_sysctl();
++#endif
++ return ret;
++}
++
++
++static void __exit exit_vserver(void)
++{
++
++#ifdef CONFIG_VSERVER_DEBUG
++ vserver_unregister_sysctl();
++#endif
++ return;
++}
++
++/* FIXME: GFP_ZONETYPES gone
++long vx_slab[GFP_ZONETYPES]; */
++long vx_area;
++
++
++module_init(init_vserver);
++module_exit(exit_vserver);
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/inode.c linux-3.6.10-vs2.3.4.6/kernel/vserver/inode.c
+--- linux-3.6.10/kernel/vserver/inode.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/inode.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,437 @@
++/*
++ * linux/kernel/vserver/inode.c
++ *
++ * Virtual Server: File System Support
++ *
++ * Copyright (C) 2004-2007 Herbert Pötzl
++ *
++ * V0.01 separated from vcontext V0.05
++ * V0.02 moved to tag (instead of xid)
++ *
++ */
++
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/devpts_fs.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/mount.h>
++#include <linux/parser.h>
++#include <linux/namei.h>
++#include <linux/vserver/inode.h>
++#include <linux/vserver/inode_cmd.h>
++#include <linux/vs_base.h>
++#include <linux/vs_tag.h>
++
++#include <asm/uaccess.h>
++
++
++static int __vc_get_iattr(struct inode *in, uint32_t *tag, uint32_t *flags, uint32_t *mask)
++{
++ struct proc_dir_entry *entry;
++
++ if (!in || !in->i_sb)
++ return -ESRCH;
++
++ *flags = IATTR_TAG
++ | (IS_IMMUTABLE(in) ? IATTR_IMMUTABLE : 0)
++ | (IS_IXUNLINK(in) ? IATTR_IXUNLINK : 0)
++ | (IS_BARRIER(in) ? IATTR_BARRIER : 0)
++ | (IS_COW(in) ? IATTR_COW : 0);
++ *mask = IATTR_IXUNLINK | IATTR_IMMUTABLE | IATTR_COW;
++
++ if (S_ISDIR(in->i_mode))
++ *mask |= IATTR_BARRIER;
++
++ if (IS_TAGGED(in)) {
++ *tag = in->i_tag;
++ *mask |= IATTR_TAG;
++ }
++
++ switch (in->i_sb->s_magic) {
++ case PROC_SUPER_MAGIC:
++ entry = PROC_I(in)->pde;
++
++ /* check for specific inodes? */
++ if (entry)
++ *mask |= IATTR_FLAGS;
++ if (entry)
++ *flags |= (entry->vx_flags & IATTR_FLAGS);
++ else
++ *flags |= (PROC_I(in)->vx_flags & IATTR_FLAGS);
++ break;
++
++ case DEVPTS_SUPER_MAGIC:
++ *tag = in->i_tag;
++ *mask |= IATTR_TAG;
++ break;
++
++ default:
++ break;
++ }
++ return 0;
++}
++
++int vc_get_iattr(void __user *data)
++{
++ struct path path;
++ struct vcmd_ctx_iattr_v1 vc_data = { .tag = -1 };
++ int ret;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = user_lpath(vc_data.name, &path);
++ if (!ret) {
++ ret = __vc_get_iattr(path.dentry->d_inode,
++ &vc_data.tag, &vc_data.flags, &vc_data.mask);
++ path_put(&path);
++ }
++ if (ret)
++ return ret;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ ret = -EFAULT;
++ return ret;
++}
++
++#ifdef CONFIG_COMPAT
++
++int vc_get_iattr_x32(void __user *data)
++{
++ struct path path;
++ struct vcmd_ctx_iattr_v1_x32 vc_data = { .tag = -1 };
++ int ret;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = user_lpath(compat_ptr(vc_data.name_ptr), &path);
++ if (!ret) {
++ ret = __vc_get_iattr(path.dentry->d_inode,
++ &vc_data.tag, &vc_data.flags, &vc_data.mask);
++ path_put(&path);
++ }
++ if (ret)
++ return ret;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ ret = -EFAULT;
++ return ret;
++}
++
++#endif /* CONFIG_COMPAT */
++
++
++int vc_fget_iattr(uint32_t fd, void __user *data)
++{
++ struct file *filp;
++ struct vcmd_ctx_fiattr_v0 vc_data = { .tag = -1 };
++ int ret;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ filp = fget(fd);
++ if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode)
++ return -EBADF;
++
++ ret = __vc_get_iattr(filp->f_dentry->d_inode,
++ &vc_data.tag, &vc_data.flags, &vc_data.mask);
++
++ fput(filp);
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ ret = -EFAULT;
++ return ret;
++}
++
++
++static int __vc_set_iattr(struct dentry *de, uint32_t *tag, uint32_t *flags, uint32_t *mask)
++{
++ struct inode *in = de->d_inode;
++ int error = 0, is_proc = 0, has_tag = 0;
++ struct iattr attr = { 0 };
++
++ if (!in || !in->i_sb)
++ return -ESRCH;
++
++ is_proc = (in->i_sb->s_magic == PROC_SUPER_MAGIC);
++ if ((*mask & IATTR_FLAGS) && !is_proc)
++ return -EINVAL;
++
++ has_tag = IS_TAGGED(in) ||
++ (in->i_sb->s_magic == DEVPTS_SUPER_MAGIC);
++ if ((*mask & IATTR_TAG) && !has_tag)
++ return -EINVAL;
++
++ mutex_lock(&in->i_mutex);
++ if (*mask & IATTR_TAG) {
++ attr.ia_tag = *tag;
++ attr.ia_valid |= ATTR_TAG;
++ }
++
++ if (*mask & IATTR_FLAGS) {
++ struct proc_dir_entry *entry = PROC_I(in)->pde;
++ unsigned int iflags = PROC_I(in)->vx_flags;
++
++ iflags = (iflags & ~(*mask & IATTR_FLAGS))
++ | (*flags & IATTR_FLAGS);
++ PROC_I(in)->vx_flags = iflags;
++ if (entry)
++ entry->vx_flags = iflags;
++ }
++
++ if (*mask & (IATTR_IMMUTABLE | IATTR_IXUNLINK |
++ IATTR_BARRIER | IATTR_COW)) {
++ int iflags = in->i_flags;
++ int vflags = in->i_vflags;
++
++ if (*mask & IATTR_IMMUTABLE) {
++ if (*flags & IATTR_IMMUTABLE)
++ iflags |= S_IMMUTABLE;
++ else
++ iflags &= ~S_IMMUTABLE;
++ }
++ if (*mask & IATTR_IXUNLINK) {
++ if (*flags & IATTR_IXUNLINK)
++ iflags |= S_IXUNLINK;
++ else
++ iflags &= ~S_IXUNLINK;
++ }
++ if (S_ISDIR(in->i_mode) && (*mask & IATTR_BARRIER)) {
++ if (*flags & IATTR_BARRIER)
++ vflags |= V_BARRIER;
++ else
++ vflags &= ~V_BARRIER;
++ }
++ if (S_ISREG(in->i_mode) && (*mask & IATTR_COW)) {
++ if (*flags & IATTR_COW)
++ vflags |= V_COW;
++ else
++ vflags &= ~V_COW;
++ }
++ if (in->i_op && in->i_op->sync_flags) {
++ error = in->i_op->sync_flags(in, iflags, vflags);
++ if (error)
++ goto out;
++ }
++ }
++
++ if (attr.ia_valid) {
++ if (in->i_op && in->i_op->setattr)
++ error = in->i_op->setattr(de, &attr);
++ else {
++ error = inode_change_ok(in, &attr);
++ if (!error) {
++ setattr_copy(in, &attr);
++ mark_inode_dirty(in);
++ }
++ }
++ }
++
++out:
++ mutex_unlock(&in->i_mutex);
++ return error;
++}
++
++int vc_set_iattr(void __user *data)
++{
++ struct path path;
++ struct vcmd_ctx_iattr_v1 vc_data;
++ int ret;
++
++ if (!capable(CAP_LINUX_IMMUTABLE))
++ return -EPERM;
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = user_lpath(vc_data.name, &path);
++ if (!ret) {
++ ret = __vc_set_iattr(path.dentry,
++ &vc_data.tag, &vc_data.flags, &vc_data.mask);
++ path_put(&path);
++ }
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ ret = -EFAULT;
++ return ret;
++}
++
++#ifdef CONFIG_COMPAT
++
++int vc_set_iattr_x32(void __user *data)
++{
++ struct path path;
++ struct vcmd_ctx_iattr_v1_x32 vc_data;
++ int ret;
++
++ if (!capable(CAP_LINUX_IMMUTABLE))
++ return -EPERM;
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = user_lpath(compat_ptr(vc_data.name_ptr), &path);
++ if (!ret) {
++ ret = __vc_set_iattr(path.dentry,
++ &vc_data.tag, &vc_data.flags, &vc_data.mask);
++ path_put(&path);
++ }
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ ret = -EFAULT;
++ return ret;
++}
++
++#endif /* CONFIG_COMPAT */
++
++int vc_fset_iattr(uint32_t fd, void __user *data)
++{
++ struct file *filp;
++ struct vcmd_ctx_fiattr_v0 vc_data;
++ int ret;
++
++ if (!capable(CAP_LINUX_IMMUTABLE))
++ return -EPERM;
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ filp = fget(fd);
++ if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode)
++ return -EBADF;
++
++ ret = __vc_set_iattr(filp->f_dentry, &vc_data.tag,
++ &vc_data.flags, &vc_data.mask);
++
++ fput(filp);
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return ret;
++}
++
++
++enum { Opt_notagcheck, Opt_tag, Opt_notag, Opt_tagid, Opt_err };
++
++static match_table_t tokens = {
++ {Opt_notagcheck, "notagcheck"},
++#ifdef CONFIG_PROPAGATE
++ {Opt_notag, "notag"},
++ {Opt_tag, "tag"},
++ {Opt_tagid, "tagid=%u"},
++#endif
++ {Opt_err, NULL}
++};
++
++
++static void __dx_parse_remove(char *string, char *opt)
++{
++ char *p = strstr(string, opt);
++ char *q = p;
++
++ if (p) {
++ while (*q != '\0' && *q != ',')
++ q++;
++ while (*q)
++ *p++ = *q++;
++ while (*p)
++ *p++ = '\0';
++ }
++}
++
++int dx_parse_tag(char *string, tag_t *tag, int remove, int *mnt_flags,
++ unsigned long *flags)
++{
++ int set = 0;
++ substring_t args[MAX_OPT_ARGS];
++ int token;
++ char *s, *p, *opts;
++#if defined(CONFIG_PROPAGATE) || defined(CONFIG_VSERVER_DEBUG)
++ int option = 0;
++#endif
++
++ if (!string)
++ return 0;
++ s = kstrdup(string, GFP_KERNEL | GFP_ATOMIC);
++ if (!s)
++ return 0;
++
++ opts = s;
++ while ((p = strsep(&opts, ",")) != NULL) {
++ token = match_token(p, tokens, args);
++
++ switch (token) {
++#ifdef CONFIG_PROPAGATE
++ case Opt_tag:
++ if (tag)
++ *tag = 0;
++ if (remove)
++ __dx_parse_remove(s, "tag");
++ *mnt_flags |= MNT_TAGID;
++ set |= MNT_TAGID;
++ break;
++ case Opt_notag:
++ if (remove)
++ __dx_parse_remove(s, "notag");
++ *mnt_flags |= MNT_NOTAG;
++ set |= MNT_NOTAG;
++ break;
++ case Opt_tagid:
++ if (tag && !match_int(args, &option))
++ *tag = option;
++ if (remove)
++ __dx_parse_remove(s, "tagid");
++ *mnt_flags |= MNT_TAGID;
++ set |= MNT_TAGID;
++ break;
++#endif /* CONFIG_PROPAGATE */
++ case Opt_notagcheck:
++ if (remove)
++ __dx_parse_remove(s, "notagcheck");
++ *flags |= MS_NOTAGCHECK;
++ set |= MS_NOTAGCHECK;
++ break;
++ }
++ vxdprintk(VXD_CBIT(tag, 7),
++ "dx_parse_tag(" VS_Q("%s") "): %d:#%d",
++ p, token, option);
++ }
++ if (set)
++ strcpy(string, s);
++ kfree(s);
++ return set;
++}
++
++#ifdef CONFIG_PROPAGATE
++
++void __dx_propagate_tag(struct nameidata *nd, struct inode *inode)
++{
++ tag_t new_tag = 0;
++ struct vfsmount *mnt;
++ int propagate;
++
++ if (!nd)
++ return;
++ mnt = nd->path.mnt;
++ if (!mnt)
++ return;
++
++ propagate = (mnt->mnt_flags & MNT_TAGID);
++ if (propagate)
++ new_tag = mnt->mnt_tag;
++
++ vxdprintk(VXD_CBIT(tag, 7),
++ "dx_propagate_tag(%p[#%lu.%d]): %d,%d",
++ inode, inode->i_ino, inode->i_tag,
++ new_tag, (propagate) ? 1 : 0);
++
++ if (propagate)
++ inode->i_tag = new_tag;
++}
++
++#include <linux/module.h>
++
++EXPORT_SYMBOL_GPL(__dx_propagate_tag);
++
++#endif /* CONFIG_PROPAGATE */
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/limit.c linux-3.6.10-vs2.3.4.6/kernel/vserver/limit.c
+--- linux-3.6.10/kernel/vserver/limit.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/limit.c 2012-12-12 12:09:04.000000000 +0000
+@@ -0,0 +1,345 @@
++/*
++ * linux/kernel/vserver/limit.c
++ *
++ * Virtual Server: Context Limits
++ *
++ * Copyright (C) 2004-2010 Herbert Pötzl
++ *
++ * V0.01 broken out from vcontext V0.05
++ * V0.02 changed vcmds to vxi arg
++ * V0.03 added memory cgroup support
++ *
++ */
++
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/memcontrol.h>
++#include <linux/res_counter.h>
++#include <linux/vs_limit.h>
++#include <linux/vserver/limit.h>
++#include <linux/vserver/limit_cmd.h>
++
++#include <asm/uaccess.h>
++
++
++const char *vlimit_name[NUM_LIMITS] = {
++ [RLIMIT_CPU] = "CPU",
++ [RLIMIT_NPROC] = "NPROC",
++ [RLIMIT_NOFILE] = "NOFILE",
++ [RLIMIT_LOCKS] = "LOCKS",
++ [RLIMIT_SIGPENDING] = "SIGP",
++ [RLIMIT_MSGQUEUE] = "MSGQ",
++
++ [VLIMIT_NSOCK] = "NSOCK",
++ [VLIMIT_OPENFD] = "OPENFD",
++ [VLIMIT_SHMEM] = "SHMEM",
++ [VLIMIT_DENTRY] = "DENTRY",
++};
++
++EXPORT_SYMBOL_GPL(vlimit_name);
++
++#define MASK_ENTRY(x) (1 << (x))
++
++const struct vcmd_ctx_rlimit_mask_v0 vlimit_mask = {
++ /* minimum */
++ 0
++ , /* softlimit */
++ 0
++ , /* maximum */
++ MASK_ENTRY( RLIMIT_NPROC ) |
++ MASK_ENTRY( RLIMIT_NOFILE ) |
++ MASK_ENTRY( RLIMIT_LOCKS ) |
++ MASK_ENTRY( RLIMIT_MSGQUEUE ) |
++
++ MASK_ENTRY( VLIMIT_NSOCK ) |
++ MASK_ENTRY( VLIMIT_OPENFD ) |
++ MASK_ENTRY( VLIMIT_SHMEM ) |
++ MASK_ENTRY( VLIMIT_DENTRY ) |
++ 0
++};
++ /* accounting only */
++uint32_t account_mask =
++ MASK_ENTRY( VLIMIT_SEMARY ) |
++ MASK_ENTRY( VLIMIT_NSEMS ) |
++ MASK_ENTRY( VLIMIT_MAPPED ) |
++ 0;
++
++
++static int is_valid_vlimit(int id)
++{
++ uint32_t mask = vlimit_mask.minimum |
++ vlimit_mask.softlimit | vlimit_mask.maximum;
++ return mask & (1 << id);
++}
++
++static int is_accounted_vlimit(int id)
++{
++ if (is_valid_vlimit(id))
++ return 1;
++ return account_mask & (1 << id);
++}
++
++
++static inline uint64_t vc_get_soft(struct vx_info *vxi, int id)
++{
++ rlim_t limit = __rlim_soft(&vxi->limit, id);
++ return VX_VLIM(limit);
++}
++
++static inline uint64_t vc_get_hard(struct vx_info *vxi, int id)
++{
++ rlim_t limit = __rlim_hard(&vxi->limit, id);
++ return VX_VLIM(limit);
++}
++
++static int do_get_rlimit(struct vx_info *vxi, uint32_t id,
++ uint64_t *minimum, uint64_t *softlimit, uint64_t *maximum)
++{
++ if (!is_valid_vlimit(id))
++ return -EINVAL;
++
++ if (minimum)
++ *minimum = CRLIM_UNSET;
++ if (softlimit)
++ *softlimit = vc_get_soft(vxi, id);
++ if (maximum)
++ *maximum = vc_get_hard(vxi, id);
++ return 0;
++}
++
++int vc_get_rlimit(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_rlimit_v0 vc_data;
++ int ret;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = do_get_rlimit(vxi, vc_data.id,
++ &vc_data.minimum, &vc_data.softlimit, &vc_data.maximum);
++ if (ret)
++ return ret;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++static int do_set_rlimit(struct vx_info *vxi, uint32_t id,
++ uint64_t minimum, uint64_t softlimit, uint64_t maximum)
++{
++ if (!is_valid_vlimit(id))
++ return -EINVAL;
++
++ if (maximum != CRLIM_KEEP)
++ __rlim_hard(&vxi->limit, id) = VX_RLIM(maximum);
++ if (softlimit != CRLIM_KEEP)
++ __rlim_soft(&vxi->limit, id) = VX_RLIM(softlimit);
++
++ /* clamp soft limit */
++ if (__rlim_soft(&vxi->limit, id) > __rlim_hard(&vxi->limit, id))
++ __rlim_soft(&vxi->limit, id) = __rlim_hard(&vxi->limit, id);
++
++ return 0;
++}
++
++int vc_set_rlimit(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_rlimit_v0 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_set_rlimit(vxi, vc_data.id,
++ vc_data.minimum, vc_data.softlimit, vc_data.maximum);
++}
++
++#ifdef CONFIG_IA32_EMULATION
++
++int vc_set_rlimit_x32(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_rlimit_v0_x32 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_set_rlimit(vxi, vc_data.id,
++ vc_data.minimum, vc_data.softlimit, vc_data.maximum);
++}
++
++int vc_get_rlimit_x32(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_rlimit_v0_x32 vc_data;
++ int ret;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ ret = do_get_rlimit(vxi, vc_data.id,
++ &vc_data.minimum, &vc_data.softlimit, &vc_data.maximum);
++ if (ret)
++ return ret;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++#endif /* CONFIG_IA32_EMULATION */
++
++
++int vc_get_rlimit_mask(uint32_t id, void __user *data)
++{
++ if (copy_to_user(data, &vlimit_mask, sizeof(vlimit_mask)))
++ return -EFAULT;
++ return 0;
++}
++
++
++static inline void vx_reset_hits(struct _vx_limit *limit)
++{
++ int lim;
++
++ for (lim = 0; lim < NUM_LIMITS; lim++) {
++ atomic_set(&__rlim_lhit(limit, lim), 0);
++ }
++}
++
++int vc_reset_hits(struct vx_info *vxi, void __user *data)
++{
++ vx_reset_hits(&vxi->limit);
++ return 0;
++}
++
++static inline void vx_reset_minmax(struct _vx_limit *limit)
++{
++ rlim_t value;
++ int lim;
++
++ for (lim = 0; lim < NUM_LIMITS; lim++) {
++ value = __rlim_get(limit, lim);
++ __rlim_rmax(limit, lim) = value;
++ __rlim_rmin(limit, lim) = value;
++ }
++}
++
++int vc_reset_minmax(struct vx_info *vxi, void __user *data)
++{
++ vx_reset_minmax(&vxi->limit);
++ return 0;
++}
++
++
++int vc_rlimit_stat(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_rlimit_stat_v0 vc_data;
++ struct _vx_limit *limit = &vxi->limit;
++ int id;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ id = vc_data.id;
++ if (!is_accounted_vlimit(id))
++ return -EINVAL;
++
++ vx_limit_fixup(limit, id);
++ vc_data.hits = atomic_read(&__rlim_lhit(limit, id));
++ vc_data.value = __rlim_get(limit, id);
++ vc_data.minimum = __rlim_rmin(limit, id);
++ vc_data.maximum = __rlim_rmax(limit, id);
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++
++void vx_vsi_meminfo(struct sysinfo *val)
++{
++#ifdef CONFIG_MEMCG
++ struct mem_cgroup *mcg;
++ u64 res_limit, res_usage;
++
++ rcu_read_lock();
++ mcg = mem_cgroup_from_task(current);
++ rcu_read_unlock();
++ if (!mcg)
++ goto out;
++
++ res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT);
++ res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE);
++
++ if (res_limit != RESOURCE_MAX)
++ val->totalram = (res_limit >> PAGE_SHIFT);
++ val->freeram = val->totalram - (res_usage >> PAGE_SHIFT);
++ val->bufferram = 0;
++ val->totalhigh = 0;
++ val->freehigh = 0;
++out:
++#endif /* CONFIG_MEMCG */
++ return;
++}
++
++void vx_vsi_swapinfo(struct sysinfo *val)
++{
++#ifdef CONFIG_MEMCG
++#ifdef CONFIG_MEMCG_SWAP
++ struct mem_cgroup *mcg;
++ u64 res_limit, res_usage, memsw_limit, memsw_usage;
++ s64 swap_limit, swap_usage;
++
++ rcu_read_lock();
++ mcg = mem_cgroup_from_task(current);
++ rcu_read_unlock();
++ if (!mcg)
++ goto out;
++
++ res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT);
++ res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE);
++ memsw_limit = mem_cgroup_memsw_read_u64(mcg, RES_LIMIT);
++ memsw_usage = mem_cgroup_memsw_read_u64(mcg, RES_USAGE);
++
++ /* memory unlimited */
++ if (res_limit == RESOURCE_MAX)
++ goto out;
++
++ swap_limit = memsw_limit - res_limit;
++ /* we have a swap limit? */
++ if (memsw_limit != RESOURCE_MAX)
++ val->totalswap = swap_limit >> PAGE_SHIFT;
++
++ /* calculate swap part */
++ swap_usage = (memsw_usage > res_usage) ?
++ memsw_usage - res_usage : 0;
++
++ /* total shown minus usage gives free swap */
++ val->freeswap = (swap_usage < swap_limit) ?
++ val->totalswap - (swap_usage >> PAGE_SHIFT) : 0;
++out:
++#else /* !CONFIG_MEMCG_SWAP */
++ val->totalswap = 0;
++ val->freeswap = 0;
++#endif /* !CONFIG_MEMCG_SWAP */
++#endif /* CONFIG_MEMCG */
++ return;
++}
++
++long vx_vsi_cached(struct sysinfo *val)
++{
++ long cache = 0;
++#ifdef CONFIG_MEMCG
++ struct mem_cgroup *mcg;
++
++ rcu_read_lock();
++ mcg = mem_cgroup_from_task(current);
++ rcu_read_unlock();
++ if (!mcg)
++ goto out;
++
++ cache = mem_cgroup_stat_read_cache(mcg);
++out:
++#endif
++ return cache;
++}
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/limit_init.h linux-3.6.10-vs2.3.4.6/kernel/vserver/limit_init.h
+--- linux-3.6.10/kernel/vserver/limit_init.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/limit_init.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,31 @@
++
++
++static inline void vx_info_init_limit(struct _vx_limit *limit)
++{
++ int lim;
++
++ for (lim = 0; lim < NUM_LIMITS; lim++) {
++ __rlim_soft(limit, lim) = RLIM_INFINITY;
++ __rlim_hard(limit, lim) = RLIM_INFINITY;
++ __rlim_set(limit, lim, 0);
++ atomic_set(&__rlim_lhit(limit, lim), 0);
++ __rlim_rmin(limit, lim) = 0;
++ __rlim_rmax(limit, lim) = 0;
++ }
++}
++
++static inline void vx_info_exit_limit(struct _vx_limit *limit)
++{
++ rlim_t value;
++ int lim;
++
++ for (lim = 0; lim < NUM_LIMITS; lim++) {
++ if ((1 << lim) & VLIM_NOCHECK)
++ continue;
++ value = __rlim_get(limit, lim);
++ vxwprintk_xid(value,
++ "!!! limit: %p[%s,%d] = %ld on exit.",
++ limit, vlimit_name[lim], lim, (long)value);
++ }
++}
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/limit_proc.h linux-3.6.10-vs2.3.4.6/kernel/vserver/limit_proc.h
+--- linux-3.6.10/kernel/vserver/limit_proc.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/limit_proc.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,57 @@
++#ifndef _VX_LIMIT_PROC_H
++#define _VX_LIMIT_PROC_H
++
++#include <linux/vserver/limit_int.h>
++
++
++#define VX_LIMIT_FMT ":\t%8ld\t%8ld/%8ld\t%8lld/%8lld\t%6d\n"
++#define VX_LIMIT_TOP \
++ "Limit\t current\t min/max\t\t soft/hard\t\thits\n"
++
++#define VX_LIMIT_ARG(r) \
++ (unsigned long)__rlim_get(limit, r), \
++ (unsigned long)__rlim_rmin(limit, r), \
++ (unsigned long)__rlim_rmax(limit, r), \
++ VX_VLIM(__rlim_soft(limit, r)), \
++ VX_VLIM(__rlim_hard(limit, r)), \
++ atomic_read(&__rlim_lhit(limit, r))
++
++static inline int vx_info_proc_limit(struct _vx_limit *limit, char *buffer)
++{
++ vx_limit_fixup(limit, -1);
++ return sprintf(buffer, VX_LIMIT_TOP
++ "PROC" VX_LIMIT_FMT
++ "VM" VX_LIMIT_FMT
++ "VML" VX_LIMIT_FMT
++ "RSS" VX_LIMIT_FMT
++ "ANON" VX_LIMIT_FMT
++ "RMAP" VX_LIMIT_FMT
++ "FILES" VX_LIMIT_FMT
++ "OFD" VX_LIMIT_FMT
++ "LOCKS" VX_LIMIT_FMT
++ "SOCK" VX_LIMIT_FMT
++ "MSGQ" VX_LIMIT_FMT
++ "SHM" VX_LIMIT_FMT
++ "SEMA" VX_LIMIT_FMT
++ "SEMS" VX_LIMIT_FMT
++ "DENT" VX_LIMIT_FMT,
++ VX_LIMIT_ARG(RLIMIT_NPROC),
++ VX_LIMIT_ARG(RLIMIT_AS),
++ VX_LIMIT_ARG(RLIMIT_MEMLOCK),
++ VX_LIMIT_ARG(RLIMIT_RSS),
++ VX_LIMIT_ARG(VLIMIT_ANON),
++ VX_LIMIT_ARG(VLIMIT_MAPPED),
++ VX_LIMIT_ARG(RLIMIT_NOFILE),
++ VX_LIMIT_ARG(VLIMIT_OPENFD),
++ VX_LIMIT_ARG(RLIMIT_LOCKS),
++ VX_LIMIT_ARG(VLIMIT_NSOCK),
++ VX_LIMIT_ARG(RLIMIT_MSGQUEUE),
++ VX_LIMIT_ARG(VLIMIT_SHMEM),
++ VX_LIMIT_ARG(VLIMIT_SEMARY),
++ VX_LIMIT_ARG(VLIMIT_NSEMS),
++ VX_LIMIT_ARG(VLIMIT_DENTRY));
++}
++
++#endif /* _VX_LIMIT_PROC_H */
++
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/network.c linux-3.6.10-vs2.3.4.6/kernel/vserver/network.c
+--- linux-3.6.10/kernel/vserver/network.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/network.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,912 @@
++/*
++ * linux/kernel/vserver/network.c
++ *
++ * Virtual Server: Network Support
++ *
++ * Copyright (C) 2003-2007 Herbert Pötzl
++ *
++ * V0.01 broken out from vcontext V0.05
++ * V0.02 cleaned up implementation
++ * V0.03 added equiv nx commands
++ * V0.04 switch to RCU based hash
++ * V0.05 and back to locking again
++ * V0.06 changed vcmds to nxi arg
++ * V0.07 have __create claim() the nxi
++ *
++ */
++
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/rcupdate.h>
++
++#include <linux/vs_network.h>
++#include <linux/vs_pid.h>
++#include <linux/vserver/network_cmd.h>
++
++
++atomic_t nx_global_ctotal = ATOMIC_INIT(0);
++atomic_t nx_global_cactive = ATOMIC_INIT(0);
++
++static struct kmem_cache *nx_addr_v4_cachep = NULL;
++static struct kmem_cache *nx_addr_v6_cachep = NULL;
++
++
++static int __init init_network(void)
++{
++ nx_addr_v4_cachep = kmem_cache_create("nx_v4_addr_cache",
++ sizeof(struct nx_addr_v4), 0,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++ nx_addr_v6_cachep = kmem_cache_create("nx_v6_addr_cache",
++ sizeof(struct nx_addr_v6), 0,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++ return 0;
++}
++
++
++/* __alloc_nx_addr_v4() */
++
++static inline struct nx_addr_v4 *__alloc_nx_addr_v4(void)
++{
++ struct nx_addr_v4 *nxa = kmem_cache_alloc(
++ nx_addr_v4_cachep, GFP_KERNEL);
++
++ if (!IS_ERR(nxa))
++ memset(nxa, 0, sizeof(*nxa));
++ return nxa;
++}
++
++/* __dealloc_nx_addr_v4() */
++
++static inline void __dealloc_nx_addr_v4(struct nx_addr_v4 *nxa)
++{
++ kmem_cache_free(nx_addr_v4_cachep, nxa);
++}
++
++/* __dealloc_nx_addr_v4_all() */
++
++static inline void __dealloc_nx_addr_v4_all(struct nx_addr_v4 *nxa)
++{
++ while (nxa) {
++ struct nx_addr_v4 *next = nxa->next;
++
++ __dealloc_nx_addr_v4(nxa);
++ nxa = next;
++ }
++}
++
++
++#ifdef CONFIG_IPV6
++
++/* __alloc_nx_addr_v6() */
++
++static inline struct nx_addr_v6 *__alloc_nx_addr_v6(void)
++{
++ struct nx_addr_v6 *nxa = kmem_cache_alloc(
++ nx_addr_v6_cachep, GFP_KERNEL);
++
++ if (!IS_ERR(nxa))
++ memset(nxa, 0, sizeof(*nxa));
++ return nxa;
++}
++
++/* __dealloc_nx_addr_v6() */
++
++static inline void __dealloc_nx_addr_v6(struct nx_addr_v6 *nxa)
++{
++ kmem_cache_free(nx_addr_v6_cachep, nxa);
++}
++
++/* __dealloc_nx_addr_v6_all() */
++
++static inline void __dealloc_nx_addr_v6_all(struct nx_addr_v6 *nxa)
++{
++ while (nxa) {
++ struct nx_addr_v6 *next = nxa->next;
++
++ __dealloc_nx_addr_v6(nxa);
++ nxa = next;
++ }
++}
++
++#endif /* CONFIG_IPV6 */
++
++/* __alloc_nx_info()
++
++ * allocate an initialized nx_info struct
++ * doesn't make it visible (hash) */
++
++static struct nx_info *__alloc_nx_info(nid_t nid)
++{
++ struct nx_info *new = NULL;
++
++ vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid);
++
++ /* would this benefit from a slab cache? */
++ new = kmalloc(sizeof(struct nx_info), GFP_KERNEL);
++ if (!new)
++ return 0;
++
++ memset(new, 0, sizeof(struct nx_info));
++ new->nx_id = nid;
++ INIT_HLIST_NODE(&new->nx_hlist);
++ atomic_set(&new->nx_usecnt, 0);
++ atomic_set(&new->nx_tasks, 0);
++ new->nx_state = 0;
++
++ new->nx_flags = NXF_INIT_SET;
++
++ /* rest of init goes here */
++
++ new->v4_lback.s_addr = htonl(INADDR_LOOPBACK);
++ new->v4_bcast.s_addr = htonl(INADDR_BROADCAST);
++
++ vxdprintk(VXD_CBIT(nid, 0),
++ "alloc_nx_info(%d) = %p", nid, new);
++ atomic_inc(&nx_global_ctotal);
++ return new;
++}
++
++/* __dealloc_nx_info()
++
++ * final disposal of nx_info */
++
++static void __dealloc_nx_info(struct nx_info *nxi)
++{
++ vxdprintk(VXD_CBIT(nid, 0),
++ "dealloc_nx_info(%p)", nxi);
++
++ nxi->nx_hlist.next = LIST_POISON1;
++ nxi->nx_id = -1;
++
++ BUG_ON(atomic_read(&nxi->nx_usecnt));
++ BUG_ON(atomic_read(&nxi->nx_tasks));
++
++ __dealloc_nx_addr_v4_all(nxi->v4.next);
++
++ nxi->nx_state |= NXS_RELEASED;
++ kfree(nxi);
++ atomic_dec(&nx_global_ctotal);
++}
++
++static void __shutdown_nx_info(struct nx_info *nxi)
++{
++ nxi->nx_state |= NXS_SHUTDOWN;
++ vs_net_change(nxi, VSC_NETDOWN);
++}
++
++/* exported stuff */
++
++void free_nx_info(struct nx_info *nxi)
++{
++ /* context shutdown is mandatory */
++ BUG_ON(nxi->nx_state != NXS_SHUTDOWN);
++
++ /* context must not be hashed */
++ BUG_ON(nxi->nx_state & NXS_HASHED);
++
++ BUG_ON(atomic_read(&nxi->nx_usecnt));
++ BUG_ON(atomic_read(&nxi->nx_tasks));
++
++ __dealloc_nx_info(nxi);
++}
++
++
++void __nx_set_lback(struct nx_info *nxi)
++{
++ int nid = nxi->nx_id;
++ __be32 lback = htonl(INADDR_LOOPBACK ^ ((nid & 0xFFFF) << 8));
++
++ nxi->v4_lback.s_addr = lback;
++}
++
++extern int __nx_inet_add_lback(__be32 addr);
++extern int __nx_inet_del_lback(__be32 addr);
++
++
++/* hash table for nx_info hash */
++
++#define NX_HASH_SIZE 13
++
++struct hlist_head nx_info_hash[NX_HASH_SIZE];
++
++static DEFINE_SPINLOCK(nx_info_hash_lock);
++
++
++static inline unsigned int __hashval(nid_t nid)
++{
++ return (nid % NX_HASH_SIZE);
++}
++
++
++
++/* __hash_nx_info()
++
++ * add the nxi to the global hash table
++ * requires the hash_lock to be held */
++
++static inline void __hash_nx_info(struct nx_info *nxi)
++{
++ struct hlist_head *head;
++
++ vxd_assert_lock(&nx_info_hash_lock);
++ vxdprintk(VXD_CBIT(nid, 4),
++ "__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
++
++ /* context must not be hashed */
++ BUG_ON(nx_info_state(nxi, NXS_HASHED));
++
++ nxi->nx_state |= NXS_HASHED;
++ head = &nx_info_hash[__hashval(nxi->nx_id)];
++ hlist_add_head(&nxi->nx_hlist, head);
++ atomic_inc(&nx_global_cactive);
++}
++
++/* __unhash_nx_info()
++
++ * remove the nxi from the global hash table
++ * requires the hash_lock to be held */
++
++static inline void __unhash_nx_info(struct nx_info *nxi)
++{
++ vxd_assert_lock(&nx_info_hash_lock);
++ vxdprintk(VXD_CBIT(nid, 4),
++ "__unhash_nx_info: %p[#%d.%d.%d]", nxi, nxi->nx_id,
++ atomic_read(&nxi->nx_usecnt), atomic_read(&nxi->nx_tasks));
++
++ /* context must be hashed */
++ BUG_ON(!nx_info_state(nxi, NXS_HASHED));
++ /* but without tasks */
++ BUG_ON(atomic_read(&nxi->nx_tasks));
++
++ nxi->nx_state &= ~NXS_HASHED;
++ hlist_del(&nxi->nx_hlist);
++ atomic_dec(&nx_global_cactive);
++}
++
++
++/* __lookup_nx_info()
++
++ * requires the hash_lock to be held
++ * doesn't increment the nx_refcnt */
++
++static inline struct nx_info *__lookup_nx_info(nid_t nid)
++{
++ struct hlist_head *head = &nx_info_hash[__hashval(nid)];
++ struct hlist_node *pos;
++ struct nx_info *nxi;
++
++ vxd_assert_lock(&nx_info_hash_lock);
++ hlist_for_each(pos, head) {
++ nxi = hlist_entry(pos, struct nx_info, nx_hlist);
++
++ if (nxi->nx_id == nid)
++ goto found;
++ }
++ nxi = NULL;
++found:
++ vxdprintk(VXD_CBIT(nid, 0),
++ "__lookup_nx_info(#%u): %p[#%u]",
++ nid, nxi, nxi ? nxi->nx_id : 0);
++ return nxi;
++}
++
++
++/* __create_nx_info()
++
++ * create the requested context
++ * get(), claim() and hash it */
++
++static struct nx_info *__create_nx_info(int id)
++{
++ struct nx_info *new, *nxi = NULL;
++
++ vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id);
++
++ if (!(new = __alloc_nx_info(id)))
++ return ERR_PTR(-ENOMEM);
++
++ /* required to make dynamic xids unique */
++ spin_lock(&nx_info_hash_lock);
++
++ /* static context requested */
++ if ((nxi = __lookup_nx_info(id))) {
++ vxdprintk(VXD_CBIT(nid, 0),
++ "create_nx_info(%d) = %p (already there)", id, nxi);
++ if (nx_info_flags(nxi, NXF_STATE_SETUP, 0))
++ nxi = ERR_PTR(-EBUSY);
++ else
++ nxi = ERR_PTR(-EEXIST);
++ goto out_unlock;
++ }
++ /* new context */
++ vxdprintk(VXD_CBIT(nid, 0),
++ "create_nx_info(%d) = %p (new)", id, new);
++ claim_nx_info(new, NULL);
++ __nx_set_lback(new);
++ __hash_nx_info(get_nx_info(new));
++ nxi = new, new = NULL;
++
++out_unlock:
++ spin_unlock(&nx_info_hash_lock);
++ if (new)
++ __dealloc_nx_info(new);
++ return nxi;
++}
++
++
++
++/* exported stuff */
++
++
++void unhash_nx_info(struct nx_info *nxi)
++{
++ __shutdown_nx_info(nxi);
++ spin_lock(&nx_info_hash_lock);
++ __unhash_nx_info(nxi);
++ spin_unlock(&nx_info_hash_lock);
++}
++
++/* lookup_nx_info()
++
++ * search for a nx_info and get() it
++ * negative id means current */
++
++struct nx_info *lookup_nx_info(int id)
++{
++ struct nx_info *nxi = NULL;
++
++ if (id < 0) {
++ nxi = get_nx_info(current_nx_info());
++ } else if (id > 1) {
++ spin_lock(&nx_info_hash_lock);
++ nxi = get_nx_info(__lookup_nx_info(id));
++ spin_unlock(&nx_info_hash_lock);
++ }
++ return nxi;
++}
++
++/* nid_is_hashed()
++
++ * verify that nid is still hashed */
++
++int nid_is_hashed(nid_t nid)
++{
++ int hashed;
++
++ spin_lock(&nx_info_hash_lock);
++ hashed = (__lookup_nx_info(nid) != NULL);
++ spin_unlock(&nx_info_hash_lock);
++ return hashed;
++}
++
++
++#ifdef CONFIG_PROC_FS
++
++/* get_nid_list()
++
++ * get a subset of hashed nids for proc
++ * assumes size is at least one */
++
++int get_nid_list(int index, unsigned int *nids, int size)
++{
++ int hindex, nr_nids = 0;
++
++ /* only show current and children */
++ if (!nx_check(0, VS_ADMIN | VS_WATCH)) {
++ if (index > 0)
++ return 0;
++ nids[nr_nids] = nx_current_nid();
++ return 1;
++ }
++
++ for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) {
++ struct hlist_head *head = &nx_info_hash[hindex];
++ struct hlist_node *pos;
++
++ spin_lock(&nx_info_hash_lock);
++ hlist_for_each(pos, head) {
++ struct nx_info *nxi;
++
++ if (--index > 0)
++ continue;
++
++ nxi = hlist_entry(pos, struct nx_info, nx_hlist);
++ nids[nr_nids] = nxi->nx_id;
++ if (++nr_nids >= size) {
++ spin_unlock(&nx_info_hash_lock);
++ goto out;
++ }
++ }
++ /* keep the lock time short */
++ spin_unlock(&nx_info_hash_lock);
++ }
++out:
++ return nr_nids;
++}
++#endif
++
++
++/*
++ * migrate task to new network
++ * gets nxi, puts old_nxi on change
++ */
++
++int nx_migrate_task(struct task_struct *p, struct nx_info *nxi)
++{
++ struct nx_info *old_nxi;
++ int ret = 0;
++
++ if (!p || !nxi)
++ BUG();
++
++ vxdprintk(VXD_CBIT(nid, 5),
++ "nx_migrate_task(%p,%p[#%d.%d.%d])",
++ p, nxi, nxi->nx_id,
++ atomic_read(&nxi->nx_usecnt),
++ atomic_read(&nxi->nx_tasks));
++
++ if (nx_info_flags(nxi, NXF_INFO_PRIVATE, 0) &&
++ !nx_info_flags(nxi, NXF_STATE_SETUP, 0))
++ return -EACCES;
++
++ if (nx_info_state(nxi, NXS_SHUTDOWN))
++ return -EFAULT;
++
++ /* maybe disallow this completely? */
++ old_nxi = task_get_nx_info(p);
++ if (old_nxi == nxi)
++ goto out;
++
++ task_lock(p);
++ if (old_nxi)
++ clr_nx_info(&p->nx_info);
++ claim_nx_info(nxi, p);
++ set_nx_info(&p->nx_info, nxi);
++ p->nid = nxi->nx_id;
++ task_unlock(p);
++
++ vxdprintk(VXD_CBIT(nid, 5),
++ "moved task %p into nxi:%p[#%d]",
++ p, nxi, nxi->nx_id);
++
++ if (old_nxi)
++ release_nx_info(old_nxi, p);
++ ret = 0;
++out:
++ put_nx_info(old_nxi);
++ return ret;
++}
++
++
++void nx_set_persistent(struct nx_info *nxi)
++{
++ vxdprintk(VXD_CBIT(nid, 6),
++ "nx_set_persistent(%p[#%d])", nxi, nxi->nx_id);
++
++ get_nx_info(nxi);
++ claim_nx_info(nxi, NULL);
++}
++
++void nx_clear_persistent(struct nx_info *nxi)
++{
++ vxdprintk(VXD_CBIT(nid, 6),
++ "nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id);
++
++ release_nx_info(nxi, NULL);
++ put_nx_info(nxi);
++}
++
++void nx_update_persistent(struct nx_info *nxi)
++{
++ if (nx_info_flags(nxi, NXF_PERSISTENT, 0))
++ nx_set_persistent(nxi);
++ else
++ nx_clear_persistent(nxi);
++}
++
++/* vserver syscall commands below here */
++
++/* taks nid and nx_info functions */
++
++#include <asm/uaccess.h>
++
++
++int vc_task_nid(uint32_t id)
++{
++ nid_t nid;
++
++ if (id) {
++ struct task_struct *tsk;
++
++ rcu_read_lock();
++ tsk = find_task_by_real_pid(id);
++ nid = (tsk) ? tsk->nid : -ESRCH;
++ rcu_read_unlock();
++ } else
++ nid = nx_current_nid();
++ return nid;
++}
++
++
++int vc_nx_info(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_nx_info_v0 vc_data;
++
++ vc_data.nid = nxi->nx_id;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++
++/* network functions */
++
++int vc_net_create(uint32_t nid, void __user *data)
++{
++ struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET };
++ struct nx_info *new_nxi;
++ int ret;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ if ((nid > MAX_S_CONTEXT) || (nid < 2))
++ return -EINVAL;
++
++ new_nxi = __create_nx_info(nid);
++ if (IS_ERR(new_nxi))
++ return PTR_ERR(new_nxi);
++
++ /* initial flags */
++ new_nxi->nx_flags = vc_data.flagword;
++
++ ret = -ENOEXEC;
++ if (vs_net_change(new_nxi, VSC_NETUP))
++ goto out;
++
++ ret = nx_migrate_task(current, new_nxi);
++ if (ret)
++ goto out;
++
++ /* return context id on success */
++ ret = new_nxi->nx_id;
++
++ /* get a reference for persistent contexts */
++ if ((vc_data.flagword & NXF_PERSISTENT))
++ nx_set_persistent(new_nxi);
++out:
++ release_nx_info(new_nxi, NULL);
++ put_nx_info(new_nxi);
++ return ret;
++}
++
++
++int vc_net_migrate(struct nx_info *nxi, void __user *data)
++{
++ return nx_migrate_task(current, nxi);
++}
++
++
++
++int do_add_v4_addr(struct nx_info *nxi, __be32 ip, __be32 ip2, __be32 mask,
++ uint16_t type, uint16_t flags)
++{
++ struct nx_addr_v4 *nxa = &nxi->v4;
++
++ if (NX_IPV4(nxi)) {
++ /* locate last entry */
++ for (; nxa->next; nxa = nxa->next);
++ nxa->next = __alloc_nx_addr_v4();
++ nxa = nxa->next;
++
++ if (IS_ERR(nxa))
++ return PTR_ERR(nxa);
++ }
++
++ if (nxi->v4.next)
++ /* remove single ip for ip list */
++ nxi->nx_flags &= ~NXF_SINGLE_IP;
++
++ nxa->ip[0].s_addr = ip;
++ nxa->ip[1].s_addr = ip2;
++ nxa->mask.s_addr = mask;
++ nxa->type = type;
++ nxa->flags = flags;
++ return 0;
++}
++
++int do_remove_v4_addr(struct nx_info *nxi, __be32 ip, __be32 ip2, __be32 mask,
++ uint16_t type, uint16_t flags)
++{
++ struct nx_addr_v4 *nxa = &nxi->v4;
++
++ switch (type) {
++/* case NXA_TYPE_ADDR:
++ break; */
++
++ case NXA_TYPE_ANY:
++ __dealloc_nx_addr_v4_all(xchg(&nxa->next, NULL));
++ memset(nxa, 0, sizeof(*nxa));
++ break;
++
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++
++int vc_net_add(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_addr_v0 vc_data;
++ int index, ret = 0;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ switch (vc_data.type) {
++ case NXA_TYPE_IPV4:
++ if ((vc_data.count < 1) || (vc_data.count > 4))
++ return -EINVAL;
++
++ index = 0;
++ while (index < vc_data.count) {
++ ret = do_add_v4_addr(nxi, vc_data.ip[index].s_addr, 0,
++ vc_data.mask[index].s_addr, NXA_TYPE_ADDR, 0);
++ if (ret)
++ return ret;
++ index++;
++ }
++ ret = index;
++ break;
++
++ case NXA_TYPE_IPV4|NXA_MOD_BCAST:
++ nxi->v4_bcast = vc_data.ip[0];
++ ret = 1;
++ break;
++
++ case NXA_TYPE_IPV4|NXA_MOD_LBACK:
++ nxi->v4_lback = vc_data.ip[0];
++ ret = 1;
++ break;
++
++ default:
++ ret = -EINVAL;
++ break;
++ }
++ return ret;
++}
++
++int vc_net_remove(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_addr_v0 vc_data;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ switch (vc_data.type) {
++ case NXA_TYPE_ANY:
++ __dealloc_nx_addr_v4_all(xchg(&nxi->v4.next, NULL));
++ memset(&nxi->v4, 0, sizeof(nxi->v4));
++ break;
++
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++
++int vc_net_add_ipv4_v1(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_addr_ipv4_v1 vc_data;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ switch (vc_data.type) {
++ case NXA_TYPE_ADDR:
++ case NXA_TYPE_MASK:
++ return do_add_v4_addr(nxi, vc_data.ip.s_addr, 0,
++ vc_data.mask.s_addr, vc_data.type, vc_data.flags);
++
++ case NXA_TYPE_ADDR | NXA_MOD_BCAST:
++ nxi->v4_bcast = vc_data.ip;
++ break;
++
++ case NXA_TYPE_ADDR | NXA_MOD_LBACK:
++ nxi->v4_lback = vc_data.ip;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++int vc_net_add_ipv4(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_addr_ipv4_v2 vc_data;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ switch (vc_data.type) {
++ case NXA_TYPE_ADDR:
++ case NXA_TYPE_MASK:
++ case NXA_TYPE_RANGE:
++ return do_add_v4_addr(nxi, vc_data.ip.s_addr, vc_data.ip2.s_addr,
++ vc_data.mask.s_addr, vc_data.type, vc_data.flags);
++
++ case NXA_TYPE_ADDR | NXA_MOD_BCAST:
++ nxi->v4_bcast = vc_data.ip;
++ break;
++
++ case NXA_TYPE_ADDR | NXA_MOD_LBACK:
++ nxi->v4_lback = vc_data.ip;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++int vc_net_rem_ipv4_v1(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_addr_ipv4_v1 vc_data;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_remove_v4_addr(nxi, vc_data.ip.s_addr, 0,
++ vc_data.mask.s_addr, vc_data.type, vc_data.flags);
++}
++
++int vc_net_rem_ipv4(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_addr_ipv4_v2 vc_data;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_remove_v4_addr(nxi, vc_data.ip.s_addr, vc_data.ip2.s_addr,
++ vc_data.mask.s_addr, vc_data.type, vc_data.flags);
++}
++
++#ifdef CONFIG_IPV6
++
++int do_add_v6_addr(struct nx_info *nxi,
++ struct in6_addr *ip, struct in6_addr *mask,
++ uint32_t prefix, uint16_t type, uint16_t flags)
++{
++ struct nx_addr_v6 *nxa = &nxi->v6;
++
++ if (NX_IPV6(nxi)) {
++ /* locate last entry */
++ for (; nxa->next; nxa = nxa->next);
++ nxa->next = __alloc_nx_addr_v6();
++ nxa = nxa->next;
++
++ if (IS_ERR(nxa))
++ return PTR_ERR(nxa);
++ }
++
++ nxa->ip = *ip;
++ nxa->mask = *mask;
++ nxa->prefix = prefix;
++ nxa->type = type;
++ nxa->flags = flags;
++ return 0;
++}
++
++
++int vc_net_add_ipv6(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_addr_ipv6_v1 vc_data;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ switch (vc_data.type) {
++ case NXA_TYPE_ADDR:
++ memset(&vc_data.mask, ~0, sizeof(vc_data.mask));
++ /* fallthrough */
++ case NXA_TYPE_MASK:
++ return do_add_v6_addr(nxi, &vc_data.ip, &vc_data.mask,
++ vc_data.prefix, vc_data.type, vc_data.flags);
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++int vc_net_remove_ipv6(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_addr_ipv6_v1 vc_data;
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ switch (vc_data.type) {
++ case NXA_TYPE_ANY:
++ __dealloc_nx_addr_v6_all(xchg(&nxi->v6.next, NULL));
++ memset(&nxi->v6, 0, sizeof(nxi->v6));
++ break;
++
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++#endif /* CONFIG_IPV6 */
++
++
++int vc_get_nflags(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_flags_v0 vc_data;
++
++ vc_data.flagword = nxi->nx_flags;
++
++ /* special STATE flag handling */
++ vc_data.mask = vs_mask_flags(~0ULL, nxi->nx_flags, NXF_ONE_TIME);
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++int vc_set_nflags(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_flags_v0 vc_data;
++ uint64_t mask, trigger;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ /* special STATE flag handling */
++ mask = vs_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME);
++ trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword);
++
++ nxi->nx_flags = vs_mask_flags(nxi->nx_flags,
++ vc_data.flagword, mask);
++ if (trigger & NXF_PERSISTENT)
++ nx_update_persistent(nxi);
++
++ return 0;
++}
++
++int vc_get_ncaps(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_caps_v0 vc_data;
++
++ vc_data.ncaps = nxi->nx_ncaps;
++ vc_data.cmask = ~0ULL;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
++int vc_set_ncaps(struct nx_info *nxi, void __user *data)
++{
++ struct vcmd_net_caps_v0 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ nxi->nx_ncaps = vs_mask_flags(nxi->nx_ncaps,
++ vc_data.ncaps, vc_data.cmask);
++ return 0;
++}
++
++
++#include <linux/module.h>
++
++module_init(init_network);
++
++EXPORT_SYMBOL_GPL(free_nx_info);
++EXPORT_SYMBOL_GPL(unhash_nx_info);
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/proc.c linux-3.6.10-vs2.3.4.6/kernel/vserver/proc.c
+--- linux-3.6.10/kernel/vserver/proc.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/proc.c 2012-10-09 12:19:59.000000000 +0000
+@@ -0,0 +1,1110 @@
++/*
++ * linux/kernel/vserver/proc.c
++ *
++ * Virtual Context Support
++ *
++ * Copyright (C) 2003-2011 Herbert Pötzl
++ *
++ * V0.01 basic structure
++ * V0.02 adaptation vs1.3.0
++ * V0.03 proc permissions
++ * V0.04 locking/generic
++ * V0.05 next generation procfs
++ * V0.06 inode validation
++ * V0.07 generic rewrite vid
++ * V0.08 remove inode type
++ * V0.09 added u/wmask info
++ *
++ */
++
++#include <linux/proc_fs.h>
++#include <linux/fs_struct.h>
++#include <linux/mount.h>
++#include <linux/namei.h>
++#include <asm/unistd.h>
++
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vs_cvirt.h>
++
++#include <linux/in.h>
++#include <linux/inetdevice.h>
++#include <linux/vs_inet.h>
++#include <linux/vs_inet6.h>
++
++#include <linux/vserver/global.h>
++
++#include "cvirt_proc.h"
++#include "cacct_proc.h"
++#include "limit_proc.h"
++#include "sched_proc.h"
++#include "vci_config.h"
++
++
++static inline char *print_cap_t(char *buffer, kernel_cap_t *c)
++{
++ unsigned __capi;
++
++ CAP_FOR_EACH_U32(__capi) {
++ buffer += sprintf(buffer, "%08x",
++ c->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
++ }
++ return buffer;
++}
++
++
++static struct proc_dir_entry *proc_virtual;
++
++static struct proc_dir_entry *proc_virtnet;
++
++
++/* first the actual feeds */
++
++
++static int proc_vci(char *buffer)
++{
++ return sprintf(buffer,
++ "VCIVersion:\t%04x:%04x\n"
++ "VCISyscall:\t%d\n"
++ "VCIKernel:\t%08x\n",
++ VCI_VERSION >> 16,
++ VCI_VERSION & 0xFFFF,
++ __NR_vserver,
++ vci_kernel_config());
++}
++
++static int proc_virtual_info(char *buffer)
++{
++ return proc_vci(buffer);
++}
++
++static int proc_virtual_status(char *buffer)
++{
++ return sprintf(buffer,
++ "#CTotal:\t%d\n"
++ "#CActive:\t%d\n"
++ "#NSProxy:\t%d\t%d %d %d %d %d %d\n"
++ "#InitTask:\t%d\t%d %d\n",
++ atomic_read(&vx_global_ctotal),
++ atomic_read(&vx_global_cactive),
++ atomic_read(&vs_global_nsproxy),
++ atomic_read(&vs_global_fs),
++ atomic_read(&vs_global_mnt_ns),
++ atomic_read(&vs_global_uts_ns),
++ atomic_read(&nr_ipc_ns),
++ atomic_read(&vs_global_user_ns),
++ atomic_read(&vs_global_pid_ns),
++ atomic_read(&init_task.usage),
++ atomic_read(&init_task.nsproxy->count),
++ init_task.fs->users);
++}
++
++
++int proc_vxi_info(struct vx_info *vxi, char *buffer)
++{
++ int length;
++
++ length = sprintf(buffer,
++ "ID:\t%d\n"
++ "Info:\t%p\n"
++ "Init:\t%d\n"
++ "OOM:\t%lld\n",
++ vxi->vx_id,
++ vxi,
++ vxi->vx_initpid,
++ vxi->vx_badness_bias);
++ return length;
++}
++
++int proc_vxi_status(struct vx_info *vxi, char *buffer)
++{
++ char *orig = buffer;
++
++ buffer += sprintf(buffer,
++ "UseCnt:\t%d\n"
++ "Tasks:\t%d\n"
++ "Flags:\t%016llx\n",
++ atomic_read(&vxi->vx_usecnt),
++ atomic_read(&vxi->vx_tasks),
++ (unsigned long long)vxi->vx_flags);
++
++ buffer += sprintf(buffer, "BCaps:\t");
++ buffer = print_cap_t(buffer, &vxi->vx_bcaps);
++ buffer += sprintf(buffer, "\n");
++
++ buffer += sprintf(buffer,
++ "CCaps:\t%016llx\n"
++ "Umask:\t%16llx\n"
++ "Wmask:\t%16llx\n"
++ "Spaces:\t%08lx %08lx\n",
++ (unsigned long long)vxi->vx_ccaps,
++ (unsigned long long)vxi->vx_umask,
++ (unsigned long long)vxi->vx_wmask,
++ vxi->space[0].vx_nsmask, vxi->space[1].vx_nsmask);
++ return buffer - orig;
++}
++
++int proc_vxi_limit(struct vx_info *vxi, char *buffer)
++{
++ return vx_info_proc_limit(&vxi->limit, buffer);
++}
++
++int proc_vxi_sched(struct vx_info *vxi, char *buffer)
++{
++ int cpu, length;
++
++ length = vx_info_proc_sched(&vxi->sched, buffer);
++ for_each_online_cpu(cpu) {
++ length += vx_info_proc_sched_pc(
++ &vx_per_cpu(vxi, sched_pc, cpu),
++ buffer + length, cpu);
++ }
++ return length;
++}
++
++int proc_vxi_nsproxy0(struct vx_info *vxi, char *buffer)
++{
++ return vx_info_proc_nsproxy(vxi->space[0].vx_nsproxy, buffer);
++}
++
++int proc_vxi_nsproxy1(struct vx_info *vxi, char *buffer)
++{
++ return vx_info_proc_nsproxy(vxi->space[1].vx_nsproxy, buffer);
++}
++
++int proc_vxi_cvirt(struct vx_info *vxi, char *buffer)
++{
++ int cpu, length;
++
++ vx_update_load(vxi);
++ length = vx_info_proc_cvirt(&vxi->cvirt, buffer);
++ for_each_online_cpu(cpu) {
++ length += vx_info_proc_cvirt_pc(
++ &vx_per_cpu(vxi, cvirt_pc, cpu),
++ buffer + length, cpu);
++ }
++ return length;
++}
++
++int proc_vxi_cacct(struct vx_info *vxi, char *buffer)
++{
++ return vx_info_proc_cacct(&vxi->cacct, buffer);
++}
++
++
++static int proc_virtnet_info(char *buffer)
++{
++ return proc_vci(buffer);
++}
++
++static int proc_virtnet_status(char *buffer)
++{
++ return sprintf(buffer,
++ "#CTotal:\t%d\n"
++ "#CActive:\t%d\n",
++ atomic_read(&nx_global_ctotal),
++ atomic_read(&nx_global_cactive));
++}
++
++int proc_nxi_info(struct nx_info *nxi, char *buffer)
++{
++ struct nx_addr_v4 *v4a;
++#ifdef CONFIG_IPV6
++ struct nx_addr_v6 *v6a;
++#endif
++ int length, i;
++
++ length = sprintf(buffer,
++ "ID:\t%d\n"
++ "Info:\t%p\n"
++ "Bcast:\t" NIPQUAD_FMT "\n"
++ "Lback:\t" NIPQUAD_FMT "\n",
++ nxi->nx_id,
++ nxi,
++ NIPQUAD(nxi->v4_bcast.s_addr),
++ NIPQUAD(nxi->v4_lback.s_addr));
++
++ if (!NX_IPV4(nxi))
++ goto skip_v4;
++ for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next)
++ length += sprintf(buffer + length, "%d:\t" NXAV4_FMT "\n",
++ i, NXAV4(v4a));
++skip_v4:
++#ifdef CONFIG_IPV6
++ if (!NX_IPV6(nxi))
++ goto skip_v6;
++ for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next)
++ length += sprintf(buffer + length, "%d:\t" NXAV6_FMT "\n",
++ i, NXAV6(v6a));
++skip_v6:
++#endif
++ return length;
++}
++
++int proc_nxi_status(struct nx_info *nxi, char *buffer)
++{
++ int length;
++
++ length = sprintf(buffer,
++ "UseCnt:\t%d\n"
++ "Tasks:\t%d\n"
++ "Flags:\t%016llx\n"
++ "NCaps:\t%016llx\n",
++ atomic_read(&nxi->nx_usecnt),
++ atomic_read(&nxi->nx_tasks),
++ (unsigned long long)nxi->nx_flags,
++ (unsigned long long)nxi->nx_ncaps);
++ return length;
++}
++
++
++
++/* here the inode helpers */
++
++struct vs_entry {
++ int len;
++ char *name;
++ mode_t mode;
++ struct inode_operations *iop;
++ struct file_operations *fop;
++ union proc_op op;
++};
++
++static struct inode *vs_proc_make_inode(struct super_block *sb, struct vs_entry *p)
++{
++ struct inode *inode = new_inode(sb);
++
++ if (!inode)
++ goto out;
++
++ inode->i_mode = p->mode;
++ if (p->iop)
++ inode->i_op = p->iop;
++ if (p->fop)
++ inode->i_fop = p->fop;
++
++ set_nlink(inode, (p->mode & S_IFDIR) ? 2 : 1);
++ inode->i_flags |= S_IMMUTABLE;
++
++ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
++
++ inode->i_uid = 0;
++ inode->i_gid = 0;
++ inode->i_tag = 0;
++out:
++ return inode;
++}
++
++static struct dentry *vs_proc_instantiate(struct inode *dir,
++ struct dentry *dentry, int id, void *ptr)
++{
++ struct vs_entry *p = ptr;
++ struct inode *inode = vs_proc_make_inode(dir->i_sb, p);
++ struct dentry *error = ERR_PTR(-EINVAL);
++
++ if (!inode)
++ goto out;
++
++ PROC_I(inode)->op = p->op;
++ PROC_I(inode)->fd = id;
++ d_add(dentry, inode);
++ error = NULL;
++out:
++ return error;
++}
++
++/* Lookups */
++
++typedef struct dentry *instantiate_t(struct inode *, struct dentry *, int, void *);
++
++/*
++ * Fill a directory entry.
++ *
++ * If possible create the dcache entry and derive our inode number and
++ * file type from dcache entry.
++ *
++ * Since all of the proc inode numbers are dynamically generated, the inode
++ * numbers do not exist until the inode is cache. This means creating the
++ * the dcache entry in readdir is necessary to keep the inode numbers
++ * reported by readdir in sync with the inode numbers reported
++ * by stat.
++ */
++static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
++ char *name, int len, instantiate_t instantiate, int id, void *ptr)
++{
++ struct dentry *child, *dir = filp->f_dentry;
++ struct inode *inode;
++ struct qstr qname;
++ ino_t ino = 0;
++ unsigned type = DT_UNKNOWN;
++
++ qname.name = name;
++ qname.len = len;
++ qname.hash = full_name_hash(name, len);
++
++ child = d_lookup(dir, &qname);
++ if (!child) {
++ struct dentry *new;
++ new = d_alloc(dir, &qname);
++ if (new) {
++ child = instantiate(dir->d_inode, new, id, ptr);
++ if (child)
++ dput(new);
++ else
++ child = new;
++ }
++ }
++ if (!child || IS_ERR(child) || !child->d_inode)
++ goto end_instantiate;
++ inode = child->d_inode;
++ if (inode) {
++ ino = inode->i_ino;
++ type = inode->i_mode >> 12;
++ }
++ dput(child);
++end_instantiate:
++ if (!ino)
++ ino = find_inode_number(dir, &qname);
++ if (!ino)
++ ino = 1;
++ return filldir(dirent, name, len, filp->f_pos, ino, type);
++}
++
++
++
++/* get and revalidate vx_info/xid */
++
++static inline
++struct vx_info *get_proc_vx_info(struct inode *inode)
++{
++ return lookup_vx_info(PROC_I(inode)->fd);
++}
++
++static int proc_xid_revalidate(struct dentry *dentry, unsigned int flags)
++{
++ struct inode *inode = dentry->d_inode;
++ xid_t xid = PROC_I(inode)->fd;
++
++ if (flags & LOOKUP_RCU) /* FIXME: can be dropped? */
++ return -ECHILD;
++
++ if (!xid || xid_is_hashed(xid))
++ return 1;
++ d_drop(dentry);
++ return 0;
++}
++
++
++/* get and revalidate nx_info/nid */
++
++static int proc_nid_revalidate(struct dentry *dentry, unsigned int flags)
++{
++ struct inode *inode = dentry->d_inode;
++ nid_t nid = PROC_I(inode)->fd;
++
++ if (flags & LOOKUP_RCU) /* FIXME: can be dropped? */
++ return -ECHILD;
++
++ if (!nid || nid_is_hashed(nid))
++ return 1;
++ d_drop(dentry);
++ return 0;
++}
++
++
++
++#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
++
++static ssize_t proc_vs_info_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct inode *inode = file->f_dentry->d_inode;
++ unsigned long page;
++ ssize_t length = 0;
++
++ if (count > PROC_BLOCK_SIZE)
++ count = PROC_BLOCK_SIZE;
++
++ /* fade that out as soon as stable */
++ WARN_ON(PROC_I(inode)->fd);
++
++ if (!(page = __get_free_page(GFP_KERNEL)))
++ return -ENOMEM;
++
++ BUG_ON(!PROC_I(inode)->op.proc_vs_read);
++ length = PROC_I(inode)->op.proc_vs_read((char *)page);
++
++ if (length >= 0)
++ length = simple_read_from_buffer(buf, count, ppos,
++ (char *)page, length);
++
++ free_page(page);
++ return length;
++}
++
++static ssize_t proc_vx_info_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct inode *inode = file->f_dentry->d_inode;
++ struct vx_info *vxi = NULL;
++ xid_t xid = PROC_I(inode)->fd;
++ unsigned long page;
++ ssize_t length = 0;
++
++ if (count > PROC_BLOCK_SIZE)
++ count = PROC_BLOCK_SIZE;
++
++ /* fade that out as soon as stable */
++ WARN_ON(!xid);
++ vxi = lookup_vx_info(xid);
++ if (!vxi)
++ goto out;
++
++ length = -ENOMEM;
++ if (!(page = __get_free_page(GFP_KERNEL)))
++ goto out_put;
++
++ BUG_ON(!PROC_I(inode)->op.proc_vxi_read);
++ length = PROC_I(inode)->op.proc_vxi_read(vxi, (char *)page);
++
++ if (length >= 0)
++ length = simple_read_from_buffer(buf, count, ppos,
++ (char *)page, length);
++
++ free_page(page);
++out_put:
++ put_vx_info(vxi);
++out:
++ return length;
++}
++
++static ssize_t proc_nx_info_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct inode *inode = file->f_dentry->d_inode;
++ struct nx_info *nxi = NULL;
++ nid_t nid = PROC_I(inode)->fd;
++ unsigned long page;
++ ssize_t length = 0;
++
++ if (count > PROC_BLOCK_SIZE)
++ count = PROC_BLOCK_SIZE;
++
++ /* fade that out as soon as stable */
++ WARN_ON(!nid);
++ nxi = lookup_nx_info(nid);
++ if (!nxi)
++ goto out;
++
++ length = -ENOMEM;
++ if (!(page = __get_free_page(GFP_KERNEL)))
++ goto out_put;
++
++ BUG_ON(!PROC_I(inode)->op.proc_nxi_read);
++ length = PROC_I(inode)->op.proc_nxi_read(nxi, (char *)page);
++
++ if (length >= 0)
++ length = simple_read_from_buffer(buf, count, ppos,
++ (char *)page, length);
++
++ free_page(page);
++out_put:
++ put_nx_info(nxi);
++out:
++ return length;
++}
++
++
++
++/* here comes the lower level */
++
++
++#define NOD(NAME, MODE, IOP, FOP, OP) { \
++ .len = sizeof(NAME) - 1, \
++ .name = (NAME), \
++ .mode = MODE, \
++ .iop = IOP, \
++ .fop = FOP, \
++ .op = OP, \
++}
++
++
++#define DIR(NAME, MODE, OTYPE) \
++ NOD(NAME, (S_IFDIR | (MODE)), \
++ &proc_ ## OTYPE ## _inode_operations, \
++ &proc_ ## OTYPE ## _file_operations, { } )
++
++#define INF(NAME, MODE, OTYPE) \
++ NOD(NAME, (S_IFREG | (MODE)), NULL, \
++ &proc_vs_info_file_operations, \
++ { .proc_vs_read = &proc_##OTYPE } )
++
++#define VINF(NAME, MODE, OTYPE) \
++ NOD(NAME, (S_IFREG | (MODE)), NULL, \
++ &proc_vx_info_file_operations, \
++ { .proc_vxi_read = &proc_##OTYPE } )
++
++#define NINF(NAME, MODE, OTYPE) \
++ NOD(NAME, (S_IFREG | (MODE)), NULL, \
++ &proc_nx_info_file_operations, \
++ { .proc_nxi_read = &proc_##OTYPE } )
++
++
++static struct file_operations proc_vs_info_file_operations = {
++ .read = proc_vs_info_read,
++};
++
++static struct file_operations proc_vx_info_file_operations = {
++ .read = proc_vx_info_read,
++};
++
++static struct dentry_operations proc_xid_dentry_operations = {
++ .d_revalidate = proc_xid_revalidate,
++};
++
++static struct vs_entry vx_base_stuff[] = {
++ VINF("info", S_IRUGO, vxi_info),
++ VINF("status", S_IRUGO, vxi_status),
++ VINF("limit", S_IRUGO, vxi_limit),
++ VINF("sched", S_IRUGO, vxi_sched),
++ VINF("nsproxy", S_IRUGO, vxi_nsproxy0),
++ VINF("nsproxy1",S_IRUGO, vxi_nsproxy1),
++ VINF("cvirt", S_IRUGO, vxi_cvirt),
++ VINF("cacct", S_IRUGO, vxi_cacct),
++ {}
++};
++
++
++
++
++static struct dentry *proc_xid_instantiate(struct inode *dir,
++ struct dentry *dentry, int id, void *ptr)
++{
++ dentry->d_op = &proc_xid_dentry_operations;
++ return vs_proc_instantiate(dir, dentry, id, ptr);
++}
++
++static struct dentry *proc_xid_lookup(struct inode *dir,
++ struct dentry *dentry, unsigned int flags)
++{
++ struct vs_entry *p = vx_base_stuff;
++ struct dentry *error = ERR_PTR(-ENOENT);
++
++ for (; p->name; p++) {
++ if (p->len != dentry->d_name.len)
++ continue;
++ if (!memcmp(dentry->d_name.name, p->name, p->len))
++ break;
++ }
++ if (!p->name)
++ goto out;
++
++ error = proc_xid_instantiate(dir, dentry, PROC_I(dir)->fd, p);
++out:
++ return error;
++}
++
++static int proc_xid_readdir(struct file *filp,
++ void *dirent, filldir_t filldir)
++{
++ struct dentry *dentry = filp->f_dentry;
++ struct inode *inode = dentry->d_inode;
++ struct vs_entry *p = vx_base_stuff;
++ int size = sizeof(vx_base_stuff) / sizeof(struct vs_entry);
++ int pos, index;
++ u64 ino;
++
++ pos = filp->f_pos;
++ switch (pos) {
++ case 0:
++ ino = inode->i_ino;
++ if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
++ goto out;
++ pos++;
++ /* fall through */
++ case 1:
++ ino = parent_ino(dentry);
++ if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
++ goto out;
++ pos++;
++ /* fall through */
++ default:
++ index = pos - 2;
++ if (index >= size)
++ goto out;
++ for (p += index; p->name; p++) {
++ if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
++ vs_proc_instantiate, PROC_I(inode)->fd, p))
++ goto out;
++ pos++;
++ }
++ }
++out:
++ filp->f_pos = pos;
++ return 1;
++}
++
++
++
++static struct file_operations proc_nx_info_file_operations = {
++ .read = proc_nx_info_read,
++};
++
++static struct dentry_operations proc_nid_dentry_operations = {
++ .d_revalidate = proc_nid_revalidate,
++};
++
++static struct vs_entry nx_base_stuff[] = {
++ NINF("info", S_IRUGO, nxi_info),
++ NINF("status", S_IRUGO, nxi_status),
++ {}
++};
++
++
++static struct dentry *proc_nid_instantiate(struct inode *dir,
++ struct dentry *dentry, int id, void *ptr)
++{
++ dentry->d_op = &proc_nid_dentry_operations;
++ return vs_proc_instantiate(dir, dentry, id, ptr);
++}
++
++static struct dentry *proc_nid_lookup(struct inode *dir,
++ struct dentry *dentry, unsigned int flags)
++{
++ struct vs_entry *p = nx_base_stuff;
++ struct dentry *error = ERR_PTR(-ENOENT);
++
++ for (; p->name; p++) {
++ if (p->len != dentry->d_name.len)
++ continue;
++ if (!memcmp(dentry->d_name.name, p->name, p->len))
++ break;
++ }
++ if (!p->name)
++ goto out;
++
++ error = proc_nid_instantiate(dir, dentry, PROC_I(dir)->fd, p);
++out:
++ return error;
++}
++
++static int proc_nid_readdir(struct file *filp,
++ void *dirent, filldir_t filldir)
++{
++ struct dentry *dentry = filp->f_dentry;
++ struct inode *inode = dentry->d_inode;
++ struct vs_entry *p = nx_base_stuff;
++ int size = sizeof(nx_base_stuff) / sizeof(struct vs_entry);
++ int pos, index;
++ u64 ino;
++
++ pos = filp->f_pos;
++ switch (pos) {
++ case 0:
++ ino = inode->i_ino;
++ if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
++ goto out;
++ pos++;
++ /* fall through */
++ case 1:
++ ino = parent_ino(dentry);
++ if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
++ goto out;
++ pos++;
++ /* fall through */
++ default:
++ index = pos - 2;
++ if (index >= size)
++ goto out;
++ for (p += index; p->name; p++) {
++ if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
++ vs_proc_instantiate, PROC_I(inode)->fd, p))
++ goto out;
++ pos++;
++ }
++ }
++out:
++ filp->f_pos = pos;
++ return 1;
++}
++
++
++#define MAX_MULBY10 ((~0U - 9) / 10)
++
++static inline int atovid(const char *str, int len)
++{
++ int vid, c;
++
++ vid = 0;
++ while (len-- > 0) {
++ c = *str - '0';
++ str++;
++ if (c > 9)
++ return -1;
++ if (vid >= MAX_MULBY10)
++ return -1;
++ vid *= 10;
++ vid += c;
++ if (!vid)
++ return -1;
++ }
++ return vid;
++}
++
++/* now the upper level (virtual) */
++
++
++static struct file_operations proc_xid_file_operations = {
++ .read = generic_read_dir,
++ .readdir = proc_xid_readdir,
++};
++
++static struct inode_operations proc_xid_inode_operations = {
++ .lookup = proc_xid_lookup,
++};
++
++static struct vs_entry vx_virtual_stuff[] = {
++ INF("info", S_IRUGO, virtual_info),
++ INF("status", S_IRUGO, virtual_status),
++ DIR(NULL, S_IRUGO | S_IXUGO, xid),
++};
++
++
++static struct dentry *proc_virtual_lookup(struct inode *dir,
++ struct dentry *dentry, unsigned int flags)
++{
++ struct vs_entry *p = vx_virtual_stuff;
++ struct dentry *error = ERR_PTR(-ENOENT);
++ int id = 0;
++
++ for (; p->name; p++) {
++ if (p->len != dentry->d_name.len)
++ continue;
++ if (!memcmp(dentry->d_name.name, p->name, p->len))
++ break;
++ }
++ if (p->name)
++ goto instantiate;
++
++ id = atovid(dentry->d_name.name, dentry->d_name.len);
++ if ((id < 0) || !xid_is_hashed(id))
++ goto out;
++
++instantiate:
++ error = proc_xid_instantiate(dir, dentry, id, p);
++out:
++ return error;
++}
++
++static struct file_operations proc_nid_file_operations = {
++ .read = generic_read_dir,
++ .readdir = proc_nid_readdir,
++};
++
++static struct inode_operations proc_nid_inode_operations = {
++ .lookup = proc_nid_lookup,
++};
++
++static struct vs_entry nx_virtnet_stuff[] = {
++ INF("info", S_IRUGO, virtnet_info),
++ INF("status", S_IRUGO, virtnet_status),
++ DIR(NULL, S_IRUGO | S_IXUGO, nid),
++};
++
++
++static struct dentry *proc_virtnet_lookup(struct inode *dir,
++ struct dentry *dentry, unsigned int flags)
++{
++ struct vs_entry *p = nx_virtnet_stuff;
++ struct dentry *error = ERR_PTR(-ENOENT);
++ int id = 0;
++
++ for (; p->name; p++) {
++ if (p->len != dentry->d_name.len)
++ continue;
++ if (!memcmp(dentry->d_name.name, p->name, p->len))
++ break;
++ }
++ if (p->name)
++ goto instantiate;
++
++ id = atovid(dentry->d_name.name, dentry->d_name.len);
++ if ((id < 0) || !nid_is_hashed(id))
++ goto out;
++
++instantiate:
++ error = proc_nid_instantiate(dir, dentry, id, p);
++out:
++ return error;
++}
++
++
++#define PROC_MAXVIDS 32
++
++int proc_virtual_readdir(struct file *filp,
++ void *dirent, filldir_t filldir)
++{
++ struct dentry *dentry = filp->f_dentry;
++ struct inode *inode = dentry->d_inode;
++ struct vs_entry *p = vx_virtual_stuff;
++ int size = sizeof(vx_virtual_stuff) / sizeof(struct vs_entry);
++ int pos, index;
++ unsigned int xid_array[PROC_MAXVIDS];
++ char buf[PROC_NUMBUF];
++ unsigned int nr_xids, i;
++ u64 ino;
++
++ pos = filp->f_pos;
++ switch (pos) {
++ case 0:
++ ino = inode->i_ino;
++ if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
++ goto out;
++ pos++;
++ /* fall through */
++ case 1:
++ ino = parent_ino(dentry);
++ if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
++ goto out;
++ pos++;
++ /* fall through */
++ default:
++ index = pos - 2;
++ if (index >= size)
++ goto entries;
++ for (p += index; p->name; p++) {
++ if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
++ vs_proc_instantiate, 0, p))
++ goto out;
++ pos++;
++ }
++ entries:
++ index = pos - size;
++ p = &vx_virtual_stuff[size - 1];
++ nr_xids = get_xid_list(index, xid_array, PROC_MAXVIDS);
++ for (i = 0; i < nr_xids; i++) {
++ int n, xid = xid_array[i];
++ unsigned int j = PROC_NUMBUF;
++
++ n = xid;
++ do
++ buf[--j] = '0' + (n % 10);
++ while (n /= 10);
++
++ if (proc_fill_cache(filp, dirent, filldir,
++ buf + j, PROC_NUMBUF - j,
++ vs_proc_instantiate, xid, p))
++ goto out;
++ pos++;
++ }
++ }
++out:
++ filp->f_pos = pos;
++ return 0;
++}
++
++static int proc_virtual_getattr(struct vfsmount *mnt,
++ struct dentry *dentry, struct kstat *stat)
++{
++ struct inode *inode = dentry->d_inode;
++
++ generic_fillattr(inode, stat);
++ stat->nlink = 2 + atomic_read(&vx_global_cactive);
++ return 0;
++}
++
++static struct file_operations proc_virtual_dir_operations = {
++ .read = generic_read_dir,
++ .readdir = proc_virtual_readdir,
++};
++
++static struct inode_operations proc_virtual_dir_inode_operations = {
++ .getattr = proc_virtual_getattr,
++ .lookup = proc_virtual_lookup,
++};
++
++
++
++
++
++int proc_virtnet_readdir(struct file *filp,
++ void *dirent, filldir_t filldir)
++{
++ struct dentry *dentry = filp->f_dentry;
++ struct inode *inode = dentry->d_inode;
++ struct vs_entry *p = nx_virtnet_stuff;
++ int size = sizeof(nx_virtnet_stuff) / sizeof(struct vs_entry);
++ int pos, index;
++ unsigned int nid_array[PROC_MAXVIDS];
++ char buf[PROC_NUMBUF];
++ unsigned int nr_nids, i;
++ u64 ino;
++
++ pos = filp->f_pos;
++ switch (pos) {
++ case 0:
++ ino = inode->i_ino;
++ if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
++ goto out;
++ pos++;
++ /* fall through */
++ case 1:
++ ino = parent_ino(dentry);
++ if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
++ goto out;
++ pos++;
++ /* fall through */
++ default:
++ index = pos - 2;
++ if (index >= size)
++ goto entries;
++ for (p += index; p->name; p++) {
++ if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
++ vs_proc_instantiate, 0, p))
++ goto out;
++ pos++;
++ }
++ entries:
++ index = pos - size;
++ p = &nx_virtnet_stuff[size - 1];
++ nr_nids = get_nid_list(index, nid_array, PROC_MAXVIDS);
++ for (i = 0; i < nr_nids; i++) {
++ int n, nid = nid_array[i];
++ unsigned int j = PROC_NUMBUF;
++
++ n = nid;
++ do
++ buf[--j] = '0' + (n % 10);
++ while (n /= 10);
++
++ if (proc_fill_cache(filp, dirent, filldir,
++ buf + j, PROC_NUMBUF - j,
++ vs_proc_instantiate, nid, p))
++ goto out;
++ pos++;
++ }
++ }
++out:
++ filp->f_pos = pos;
++ return 0;
++}
++
++static int proc_virtnet_getattr(struct vfsmount *mnt,
++ struct dentry *dentry, struct kstat *stat)
++{
++ struct inode *inode = dentry->d_inode;
++
++ generic_fillattr(inode, stat);
++ stat->nlink = 2 + atomic_read(&nx_global_cactive);
++ return 0;
++}
++
++static struct file_operations proc_virtnet_dir_operations = {
++ .read = generic_read_dir,
++ .readdir = proc_virtnet_readdir,
++};
++
++static struct inode_operations proc_virtnet_dir_inode_operations = {
++ .getattr = proc_virtnet_getattr,
++ .lookup = proc_virtnet_lookup,
++};
++
++
++
++void proc_vx_init(void)
++{
++ struct proc_dir_entry *ent;
++
++ ent = proc_mkdir("virtual", 0);
++ if (ent) {
++ ent->proc_fops = &proc_virtual_dir_operations;
++ ent->proc_iops = &proc_virtual_dir_inode_operations;
++ }
++ proc_virtual = ent;
++
++ ent = proc_mkdir("virtnet", 0);
++ if (ent) {
++ ent->proc_fops = &proc_virtnet_dir_operations;
++ ent->proc_iops = &proc_virtnet_dir_inode_operations;
++ }
++ proc_virtnet = ent;
++}
++
++
++
++
++/* per pid info */
++
++
++int proc_pid_vx_info(struct task_struct *p, char *buffer)
++{
++ struct vx_info *vxi;
++ char *orig = buffer;
++
++ buffer += sprintf(buffer, "XID:\t%d\n", vx_task_xid(p));
++
++ vxi = task_get_vx_info(p);
++ if (!vxi)
++ goto out;
++
++ buffer += sprintf(buffer, "BCaps:\t");
++ buffer = print_cap_t(buffer, &vxi->vx_bcaps);
++ buffer += sprintf(buffer, "\n");
++ buffer += sprintf(buffer, "CCaps:\t%016llx\n",
++ (unsigned long long)vxi->vx_ccaps);
++ buffer += sprintf(buffer, "CFlags:\t%016llx\n",
++ (unsigned long long)vxi->vx_flags);
++ buffer += sprintf(buffer, "CIPid:\t%d\n", vxi->vx_initpid);
++
++ put_vx_info(vxi);
++out:
++ return buffer - orig;
++}
++
++
++int proc_pid_nx_info(struct task_struct *p, char *buffer)
++{
++ struct nx_info *nxi;
++ struct nx_addr_v4 *v4a;
++#ifdef CONFIG_IPV6
++ struct nx_addr_v6 *v6a;
++#endif
++ char *orig = buffer;
++ int i;
++
++ buffer += sprintf(buffer, "NID:\t%d\n", nx_task_nid(p));
++
++ nxi = task_get_nx_info(p);
++ if (!nxi)
++ goto out;
++
++ buffer += sprintf(buffer, "NCaps:\t%016llx\n",
++ (unsigned long long)nxi->nx_ncaps);
++ buffer += sprintf(buffer, "NFlags:\t%016llx\n",
++ (unsigned long long)nxi->nx_flags);
++
++ buffer += sprintf(buffer,
++ "V4Root[bcast]:\t" NIPQUAD_FMT "\n",
++ NIPQUAD(nxi->v4_bcast.s_addr));
++ buffer += sprintf (buffer,
++ "V4Root[lback]:\t" NIPQUAD_FMT "\n",
++ NIPQUAD(nxi->v4_lback.s_addr));
++ if (!NX_IPV4(nxi))
++ goto skip_v4;
++ for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next)
++ buffer += sprintf(buffer, "V4Root[%d]:\t" NXAV4_FMT "\n",
++ i, NXAV4(v4a));
++skip_v4:
++#ifdef CONFIG_IPV6
++ if (!NX_IPV6(nxi))
++ goto skip_v6;
++ for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next)
++ buffer += sprintf(buffer, "V6Root[%d]:\t" NXAV6_FMT "\n",
++ i, NXAV6(v6a));
++skip_v6:
++#endif
++ put_nx_info(nxi);
++out:
++ return buffer - orig;
++}
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/sched.c linux-3.6.10-vs2.3.4.6/kernel/vserver/sched.c
+--- linux-3.6.10/kernel/vserver/sched.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/sched.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,83 @@
++/*
++ * linux/kernel/vserver/sched.c
++ *
++ * Virtual Server: Scheduler Support
++ *
++ * Copyright (C) 2004-2010 Herbert Pötzl
++ *
++ * V0.01 adapted Sam Vilains version to 2.6.3
++ * V0.02 removed legacy interface
++ * V0.03 changed vcmds to vxi arg
++ * V0.04 removed older and legacy interfaces
++ * V0.05 removed scheduler code/commands
++ *
++ */
++
++#include <linux/vs_context.h>
++#include <linux/vs_sched.h>
++#include <linux/cpumask.h>
++#include <linux/vserver/sched_cmd.h>
++
++#include <asm/uaccess.h>
++
++
++void vx_update_sched_param(struct _vx_sched *sched,
++ struct _vx_sched_pc *sched_pc)
++{
++ sched_pc->prio_bias = sched->prio_bias;
++}
++
++static int do_set_prio_bias(struct vx_info *vxi, struct vcmd_prio_bias *data)
++{
++ int cpu;
++
++ if (data->prio_bias > MAX_PRIO_BIAS)
++ data->prio_bias = MAX_PRIO_BIAS;
++ if (data->prio_bias < MIN_PRIO_BIAS)
++ data->prio_bias = MIN_PRIO_BIAS;
++
++ if (data->cpu_id != ~0) {
++ vxi->sched.update = cpumask_of_cpu(data->cpu_id);
++ cpumask_and(&vxi->sched.update, &vxi->sched.update,
++ cpu_online_mask);
++ } else
++ cpumask_copy(&vxi->sched.update, cpu_online_mask);
++
++ for_each_cpu_mask(cpu, vxi->sched.update)
++ vx_update_sched_param(&vxi->sched,
++ &vx_per_cpu(vxi, sched_pc, cpu));
++ return 0;
++}
++
++int vc_set_prio_bias(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_prio_bias vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return do_set_prio_bias(vxi, &vc_data);
++}
++
++int vc_get_prio_bias(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_prio_bias vc_data;
++ struct _vx_sched_pc *pcd;
++ int cpu;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ cpu = vc_data.cpu_id;
++
++ if (!cpu_possible(cpu))
++ return -EINVAL;
++
++ pcd = &vx_per_cpu(vxi, sched_pc, cpu);
++ vc_data.prio_bias = pcd->prio_bias;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ return -EFAULT;
++ return 0;
++}
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/sched_init.h linux-3.6.10-vs2.3.4.6/kernel/vserver/sched_init.h
+--- linux-3.6.10/kernel/vserver/sched_init.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/sched_init.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,27 @@
++
++static inline void vx_info_init_sched(struct _vx_sched *sched)
++{
++ /* scheduling; hard code starting values as constants */
++ sched->prio_bias = 0;
++}
++
++static inline
++void vx_info_init_sched_pc(struct _vx_sched_pc *sched_pc, int cpu)
++{
++ sched_pc->prio_bias = 0;
++
++ sched_pc->user_ticks = 0;
++ sched_pc->sys_ticks = 0;
++ sched_pc->hold_ticks = 0;
++}
++
++static inline void vx_info_exit_sched(struct _vx_sched *sched)
++{
++ return;
++}
++
++static inline
++void vx_info_exit_sched_pc(struct _vx_sched_pc *sched_pc, int cpu)
++{
++ return;
++}
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/sched_proc.h linux-3.6.10-vs2.3.4.6/kernel/vserver/sched_proc.h
+--- linux-3.6.10/kernel/vserver/sched_proc.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/sched_proc.h 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,32 @@
++#ifndef _VX_SCHED_PROC_H
++#define _VX_SCHED_PROC_H
++
++
++static inline
++int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
++{
++ int length = 0;
++
++ length += sprintf(buffer,
++ "PrioBias:\t%8d\n",
++ sched->prio_bias);
++ return length;
++}
++
++static inline
++int vx_info_proc_sched_pc(struct _vx_sched_pc *sched_pc,
++ char *buffer, int cpu)
++{
++ int length = 0;
++
++ length += sprintf(buffer + length,
++ "cpu %d: %lld %lld %lld", cpu,
++ (unsigned long long)sched_pc->user_ticks,
++ (unsigned long long)sched_pc->sys_ticks,
++ (unsigned long long)sched_pc->hold_ticks);
++ length += sprintf(buffer + length,
++ " %d\n", sched_pc->prio_bias);
++ return length;
++}
++
++#endif /* _VX_SCHED_PROC_H */
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/signal.c linux-3.6.10-vs2.3.4.6/kernel/vserver/signal.c
+--- linux-3.6.10/kernel/vserver/signal.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/signal.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,134 @@
++/*
++ * linux/kernel/vserver/signal.c
++ *
++ * Virtual Server: Signal Support
++ *
++ * Copyright (C) 2003-2007 Herbert Pötzl
++ *
++ * V0.01 broken out from vcontext V0.05
++ * V0.02 changed vcmds to vxi arg
++ * V0.03 adjusted siginfo for kill
++ *
++ */
++
++#include <asm/uaccess.h>
++
++#include <linux/vs_context.h>
++#include <linux/vs_pid.h>
++#include <linux/vserver/signal_cmd.h>
++
++
++int vx_info_kill(struct vx_info *vxi, int pid, int sig)
++{
++ int retval, count = 0;
++ struct task_struct *p;
++ struct siginfo *sip = SEND_SIG_PRIV;
++
++ retval = -ESRCH;
++ vxdprintk(VXD_CBIT(misc, 4),
++ "vx_info_kill(%p[#%d],%d,%d)*",
++ vxi, vxi->vx_id, pid, sig);
++ read_lock(&tasklist_lock);
++ switch (pid) {
++ case 0:
++ case -1:
++ for_each_process(p) {
++ int err = 0;
++
++ if (vx_task_xid(p) != vxi->vx_id || p->pid <= 1 ||
++ (pid && vxi->vx_initpid == p->pid))
++ continue;
++
++ err = group_send_sig_info(sig, sip, p);
++ ++count;
++ if (err != -EPERM)
++ retval = err;
++ }
++ break;
++
++ case 1:
++ if (vxi->vx_initpid) {
++ pid = vxi->vx_initpid;
++ /* for now, only SIGINT to private init ... */
++ if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) &&
++ /* ... as long as there are tasks left */
++ (atomic_read(&vxi->vx_tasks) > 1))
++ sig = SIGINT;
++ }
++ /* fallthrough */
++ default:
++ rcu_read_lock();
++ p = find_task_by_real_pid(pid);
++ rcu_read_unlock();
++ if (p) {
++ if (vx_task_xid(p) == vxi->vx_id)
++ retval = group_send_sig_info(sig, sip, p);
++ }
++ break;
++ }
++ read_unlock(&tasklist_lock);
++ vxdprintk(VXD_CBIT(misc, 4),
++ "vx_info_kill(%p[#%d],%d,%d,%ld) = %d",
++ vxi, vxi->vx_id, pid, sig, (long)sip, retval);
++ return retval;
++}
++
++int vc_ctx_kill(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_ctx_kill_v0 vc_data;
++
++ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ /* special check to allow guest shutdown */
++ if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) &&
++ /* forbid killall pid=0 when init is present */
++ (((vc_data.pid < 1) && vxi->vx_initpid) ||
++ (vc_data.pid > 1)))
++ return -EACCES;
++
++ return vx_info_kill(vxi, vc_data.pid, vc_data.sig);
++}
++
++
++static int __wait_exit(struct vx_info *vxi)
++{
++ DECLARE_WAITQUEUE(wait, current);
++ int ret = 0;
++
++ add_wait_queue(&vxi->vx_wait, &wait);
++ set_current_state(TASK_INTERRUPTIBLE);
++
++wait:
++ if (vx_info_state(vxi,
++ VXS_SHUTDOWN | VXS_HASHED | VXS_HELPER) == VXS_SHUTDOWN)
++ goto out;
++ if (signal_pending(current)) {
++ ret = -ERESTARTSYS;
++ goto out;
++ }
++ schedule();
++ goto wait;
++
++out:
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&vxi->vx_wait, &wait);
++ return ret;
++}
++
++
++
++int vc_wait_exit(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_wait_exit_v0 vc_data;
++ int ret;
++
++ ret = __wait_exit(vxi);
++ vc_data.reboot_cmd = vxi->reboot_cmd;
++ vc_data.exit_code = vxi->exit_code;
++
++ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++ ret = -EFAULT;
++ return ret;
++}
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/space.c linux-3.6.10-vs2.3.4.6/kernel/vserver/space.c
+--- linux-3.6.10/kernel/vserver/space.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/space.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,436 @@
++/*
++ * linux/kernel/vserver/space.c
++ *
++ * Virtual Server: Context Space Support
++ *
++ * Copyright (C) 2003-2010 Herbert Pötzl
++ *
++ * V0.01 broken out from context.c 0.07
++ * V0.02 added task locking for namespace
++ * V0.03 broken out vx_enter_namespace
++ * V0.04 added *space support and commands
++ * V0.05 added credential support
++ *
++ */
++
++#include <linux/utsname.h>
++#include <linux/nsproxy.h>
++#include <linux/err.h>
++#include <linux/fs_struct.h>
++#include <linux/cred.h>
++#include <asm/uaccess.h>
++
++#include <linux/vs_context.h>
++#include <linux/vserver/space.h>
++#include <linux/vserver/space_cmd.h>
++
++atomic_t vs_global_nsproxy = ATOMIC_INIT(0);
++atomic_t vs_global_fs = ATOMIC_INIT(0);
++atomic_t vs_global_mnt_ns = ATOMIC_INIT(0);
++atomic_t vs_global_uts_ns = ATOMIC_INIT(0);
++atomic_t vs_global_user_ns = ATOMIC_INIT(0);
++atomic_t vs_global_pid_ns = ATOMIC_INIT(0);
++
++
++/* namespace functions */
++
++#include <linux/mnt_namespace.h>
++#include <linux/user_namespace.h>
++#include <linux/pid_namespace.h>
++#include <linux/ipc_namespace.h>
++#include <net/net_namespace.h>
++#include "../fs/mount.h"
++
++
++static const struct vcmd_space_mask_v1 space_mask_v0 = {
++ .mask = CLONE_FS |
++ CLONE_NEWNS |
++#ifdef CONFIG_UTS_NS
++ CLONE_NEWUTS |
++#endif
++#ifdef CONFIG_IPC_NS
++ CLONE_NEWIPC |
++#endif
++#ifdef CONFIG_USER_NS
++ CLONE_NEWUSER |
++#endif
++ 0
++};
++
++static const struct vcmd_space_mask_v1 space_mask = {
++ .mask = CLONE_FS |
++ CLONE_NEWNS |
++#ifdef CONFIG_UTS_NS
++ CLONE_NEWUTS |
++#endif
++#ifdef CONFIG_IPC_NS
++ CLONE_NEWIPC |
++#endif
++#ifdef CONFIG_USER_NS
++ CLONE_NEWUSER |
++#endif
++#ifdef CONFIG_PID_NS
++ CLONE_NEWPID |
++#endif
++#ifdef CONFIG_NET_NS
++ CLONE_NEWNET |
++#endif
++ 0
++};
++
++static const struct vcmd_space_mask_v1 default_space_mask = {
++ .mask = CLONE_FS |
++ CLONE_NEWNS |
++#ifdef CONFIG_UTS_NS
++ CLONE_NEWUTS |
++#endif
++#ifdef CONFIG_IPC_NS
++ CLONE_NEWIPC |
++#endif
++#ifdef CONFIG_USER_NS
++ CLONE_NEWUSER |
++#endif
++#ifdef CONFIG_PID_NS
++// CLONE_NEWPID |
++#endif
++ 0
++};
++
++/*
++ * build a new nsproxy mix
++ * assumes that both proxies are 'const'
++ * does not touch nsproxy refcounts
++ * will hold a reference on the result.
++ */
++
++struct nsproxy *vs_mix_nsproxy(struct nsproxy *old_nsproxy,
++ struct nsproxy *new_nsproxy, unsigned long mask)
++{
++ struct mnt_namespace *old_ns;
++ struct uts_namespace *old_uts;
++ struct ipc_namespace *old_ipc;
++#ifdef CONFIG_PID_NS
++ struct pid_namespace *old_pid;
++#endif
++#ifdef CONFIG_NET_NS
++ struct net *old_net;
++#endif
++ struct nsproxy *nsproxy;
++
++ nsproxy = copy_nsproxy(old_nsproxy);
++ if (!nsproxy)
++ goto out;
++
++ if (mask & CLONE_NEWNS) {
++ old_ns = nsproxy->mnt_ns;
++ nsproxy->mnt_ns = new_nsproxy->mnt_ns;
++ if (nsproxy->mnt_ns)
++ get_mnt_ns(nsproxy->mnt_ns);
++ } else
++ old_ns = NULL;
++
++ if (mask & CLONE_NEWUTS) {
++ old_uts = nsproxy->uts_ns;
++ nsproxy->uts_ns = new_nsproxy->uts_ns;
++ if (nsproxy->uts_ns)
++ get_uts_ns(nsproxy->uts_ns);
++ } else
++ old_uts = NULL;
++
++ if (mask & CLONE_NEWIPC) {
++ old_ipc = nsproxy->ipc_ns;
++ nsproxy->ipc_ns = new_nsproxy->ipc_ns;
++ if (nsproxy->ipc_ns)
++ get_ipc_ns(nsproxy->ipc_ns);
++ } else
++ old_ipc = NULL;
++
++#ifdef CONFIG_PID_NS
++ if (mask & CLONE_NEWPID) {
++ old_pid = nsproxy->pid_ns;
++ nsproxy->pid_ns = new_nsproxy->pid_ns;
++ if (nsproxy->pid_ns)
++ get_pid_ns(nsproxy->pid_ns);
++ } else
++ old_pid = NULL;
++#endif
++#ifdef CONFIG_NET_NS
++ if (mask & CLONE_NEWNET) {
++ old_net = nsproxy->net_ns;
++ nsproxy->net_ns = new_nsproxy->net_ns;
++ if (nsproxy->net_ns)
++ get_net(nsproxy->net_ns);
++ } else
++ old_net = NULL;
++#endif
++ if (old_ns)
++ put_mnt_ns(old_ns);
++ if (old_uts)
++ put_uts_ns(old_uts);
++ if (old_ipc)
++ put_ipc_ns(old_ipc);
++#ifdef CONFIG_PID_NS
++ if (old_pid)
++ put_pid_ns(old_pid);
++#endif
++#ifdef CONFIG_NET_NS
++ if (old_net)
++ put_net(old_net);
++#endif
++out:
++ return nsproxy;
++}
++
++
++/*
++ * merge two nsproxy structs into a new one.
++ * will hold a reference on the result.
++ */
++
++static inline
++struct nsproxy *__vs_merge_nsproxy(struct nsproxy *old,
++ struct nsproxy *proxy, unsigned long mask)
++{
++ struct nsproxy null_proxy = { .mnt_ns = NULL };
++
++ if (!proxy)
++ return NULL;
++
++ if (mask) {
++ /* vs_mix_nsproxy returns with reference */
++ return vs_mix_nsproxy(old ? old : &null_proxy,
++ proxy, mask);
++ }
++ get_nsproxy(proxy);
++ return proxy;
++}
++
++
++int vx_enter_space(struct vx_info *vxi, unsigned long mask, unsigned index)
++{
++ struct nsproxy *proxy, *proxy_cur, *proxy_new;
++ struct fs_struct *fs_cur, *fs = NULL;
++ struct _vx_space *space;
++ int ret, kill = 0;
++
++ vxdprintk(VXD_CBIT(space, 8), "vx_enter_space(%p[#%u],0x%08lx,%d)",
++ vxi, vxi->vx_id, mask, index);
++
++ if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
++ return -EACCES;
++
++ if (index >= VX_SPACES)
++ return -EINVAL;
++
++ space = &vxi->space[index];
++
++ if (!mask)
++ mask = space->vx_nsmask;
++
++ if ((mask & space->vx_nsmask) != mask)
++ return -EINVAL;
++
++ if (mask & CLONE_FS) {
++ fs = copy_fs_struct(space->vx_fs);
++ if (!fs)
++ return -ENOMEM;
++ }
++ proxy = space->vx_nsproxy;
++
++ vxdprintk(VXD_CBIT(space, 9),
++ "vx_enter_space(%p[#%u],0x%08lx,%d) -> (%p,%p)",
++ vxi, vxi->vx_id, mask, index, proxy, fs);
++
++ task_lock(current);
++ fs_cur = current->fs;
++
++ if (mask & CLONE_FS) {
++ spin_lock(&fs_cur->lock);
++ current->fs = fs;
++ kill = !--fs_cur->users;
++ spin_unlock(&fs_cur->lock);
++ }
++
++ proxy_cur = current->nsproxy;
++ get_nsproxy(proxy_cur);
++ task_unlock(current);
++
++ if (kill)
++ free_fs_struct(fs_cur);
++
++ proxy_new = __vs_merge_nsproxy(proxy_cur, proxy, mask);
++ if (IS_ERR(proxy_new)) {
++ ret = PTR_ERR(proxy_new);
++ goto out_put;
++ }
++
++ proxy_new = xchg(&current->nsproxy, proxy_new);
++
++ if (mask & CLONE_NEWUSER) {
++ struct cred *cred;
++
++ vxdprintk(VXD_CBIT(space, 10),
++ "vx_enter_space(%p[#%u],%p) cred (%p,%p)",
++ vxi, vxi->vx_id, space->vx_cred,
++ current->real_cred, current->cred);
++
++ if (space->vx_cred) {
++ cred = __prepare_creds(space->vx_cred);
++ if (cred)
++ commit_creds(cred);
++ }
++ }
++
++ ret = 0;
++
++ if (proxy_new)
++ put_nsproxy(proxy_new);
++out_put:
++ if (proxy_cur)
++ put_nsproxy(proxy_cur);
++ return ret;
++}
++
++
++int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index)
++{
++ struct nsproxy *proxy_vxi, *proxy_cur, *proxy_new;
++ struct fs_struct *fs_vxi, *fs;
++ struct _vx_space *space;
++ int ret, kill = 0;
++
++ vxdprintk(VXD_CBIT(space, 8), "vx_set_space(%p[#%u],0x%08lx,%d)",
++ vxi, vxi->vx_id, mask, index);
++
++ if ((mask & space_mask.mask) != mask)
++ return -EINVAL;
++
++ if (index >= VX_SPACES)
++ return -EINVAL;
++
++ space = &vxi->space[index];
++
++ proxy_vxi = space->vx_nsproxy;
++ fs_vxi = space->vx_fs;
++
++ if (mask & CLONE_FS) {
++ fs = copy_fs_struct(current->fs);
++ if (!fs)
++ return -ENOMEM;
++ }
++
++ task_lock(current);
++
++ if (mask & CLONE_FS) {
++ spin_lock(&fs_vxi->lock);
++ space->vx_fs = fs;
++ kill = !--fs_vxi->users;
++ spin_unlock(&fs_vxi->lock);
++ }
++
++ proxy_cur = current->nsproxy;
++ get_nsproxy(proxy_cur);
++ task_unlock(current);
++
++ if (kill)
++ free_fs_struct(fs_vxi);
++
++ proxy_new = __vs_merge_nsproxy(proxy_vxi, proxy_cur, mask);
++ if (IS_ERR(proxy_new)) {
++ ret = PTR_ERR(proxy_new);
++ goto out_put;
++ }
++
++ proxy_new = xchg(&space->vx_nsproxy, proxy_new);
++ space->vx_nsmask |= mask;
++
++ if (mask & CLONE_NEWUSER) {
++ struct cred *cred;
++
++ vxdprintk(VXD_CBIT(space, 10),
++ "vx_set_space(%p[#%u],%p) cred (%p,%p)",
++ vxi, vxi->vx_id, space->vx_cred,
++ current->real_cred, current->cred);
++
++ cred = prepare_creds();
++ cred = (struct cred *)xchg(&space->vx_cred, cred);
++ if (cred)
++ abort_creds(cred);
++ }
++
++ ret = 0;
++
++ if (proxy_new)
++ put_nsproxy(proxy_new);
++out_put:
++ if (proxy_cur)
++ put_nsproxy(proxy_cur);
++ return ret;
++}
++
++
++int vc_enter_space_v1(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_space_mask_v1 vc_data = { .mask = 0 };
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return vx_enter_space(vxi, vc_data.mask, 0);
++}
++
++int vc_enter_space(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_space_mask_v2 vc_data = { .mask = 0 };
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ if (vc_data.index >= VX_SPACES)
++ return -EINVAL;
++
++ return vx_enter_space(vxi, vc_data.mask, vc_data.index);
++}
++
++int vc_set_space_v1(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_space_mask_v1 vc_data = { .mask = 0 };
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ return vx_set_space(vxi, vc_data.mask, 0);
++}
++
++int vc_set_space(struct vx_info *vxi, void __user *data)
++{
++ struct vcmd_space_mask_v2 vc_data = { .mask = 0 };
++
++ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++ return -EFAULT;
++
++ if (vc_data.index >= VX_SPACES)
++ return -EINVAL;
++
++ return vx_set_space(vxi, vc_data.mask, vc_data.index);
++}
++
++int vc_get_space_mask(void __user *data, int type)
++{
++ const struct vcmd_space_mask_v1 *mask;
++
++ if (type == 0)
++ mask = &space_mask_v0;
++ else if (type == 1)
++ mask = &space_mask;
++ else
++ mask = &default_space_mask;
++
++ vxdprintk(VXD_CBIT(space, 10),
++ "vc_get_space_mask(%d) = %08llx", type, mask->mask);
++
++ if (copy_to_user(data, mask, sizeof(*mask)))
++ return -EFAULT;
++ return 0;
++}
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/switch.c linux-3.6.10-vs2.3.4.6/kernel/vserver/switch.c
+--- linux-3.6.10/kernel/vserver/switch.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/switch.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,556 @@
++/*
++ * linux/kernel/vserver/switch.c
++ *
++ * Virtual Server: Syscall Switch
++ *
++ * Copyright (C) 2003-2011 Herbert Pötzl
++ *
++ * V0.01 syscall switch
++ * V0.02 added signal to context
++ * V0.03 added rlimit functions
++ * V0.04 added iattr, task/xid functions
++ * V0.05 added debug/history stuff
++ * V0.06 added compat32 layer
++ * V0.07 vcmd args and perms
++ * V0.08 added status commands
++ * V0.09 added tag commands
++ * V0.10 added oom bias
++ * V0.11 added device commands
++ * V0.12 added warn mask
++ *
++ */
++
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vserver/switch.h>
++
++#include "vci_config.h"
++
++
++static inline
++int vc_get_version(uint32_t id)
++{
++ return VCI_VERSION;
++}
++
++static inline
++int vc_get_vci(uint32_t id)
++{
++ return vci_kernel_config();
++}
++
++#include <linux/vserver/context_cmd.h>
++#include <linux/vserver/cvirt_cmd.h>
++#include <linux/vserver/cacct_cmd.h>
++#include <linux/vserver/limit_cmd.h>
++#include <linux/vserver/network_cmd.h>
++#include <linux/vserver/sched_cmd.h>
++#include <linux/vserver/debug_cmd.h>
++#include <linux/vserver/inode_cmd.h>
++#include <linux/vserver/dlimit_cmd.h>
++#include <linux/vserver/signal_cmd.h>
++#include <linux/vserver/space_cmd.h>
++#include <linux/vserver/tag_cmd.h>
++#include <linux/vserver/device_cmd.h>
++
++#include <linux/vserver/inode.h>
++#include <linux/vserver/dlimit.h>
++
++
++#ifdef CONFIG_COMPAT
++#define __COMPAT(name, id, data, compat) \
++ (compat) ? name ## _x32(id, data) : name(id, data)
++#define __COMPAT_NO_ID(name, data, compat) \
++ (compat) ? name ## _x32(data) : name(data)
++#else
++#define __COMPAT(name, id, data, compat) \
++ name(id, data)
++#define __COMPAT_NO_ID(name, data, compat) \
++ name(data)
++#endif
++
++
++static inline
++long do_vcmd(uint32_t cmd, uint32_t id,
++ struct vx_info *vxi, struct nx_info *nxi,
++ void __user *data, int compat)
++{
++ switch (cmd) {
++
++ case VCMD_get_version:
++ return vc_get_version(id);
++ case VCMD_get_vci:
++ return vc_get_vci(id);
++
++ case VCMD_task_xid:
++ return vc_task_xid(id);
++ case VCMD_vx_info:
++ return vc_vx_info(vxi, data);
++
++ case VCMD_task_nid:
++ return vc_task_nid(id);
++ case VCMD_nx_info:
++ return vc_nx_info(nxi, data);
++
++ case VCMD_task_tag:
++ return vc_task_tag(id);
++
++ case VCMD_set_space_v1:
++ return vc_set_space_v1(vxi, data);
++ /* this is version 2 */
++ case VCMD_set_space:
++ return vc_set_space(vxi, data);
++
++ case VCMD_get_space_mask_v0:
++ return vc_get_space_mask(data, 0);
++ /* this is version 1 */
++ case VCMD_get_space_mask:
++ return vc_get_space_mask(data, 1);
++
++ case VCMD_get_space_default:
++ return vc_get_space_mask(data, -1);
++
++ case VCMD_set_umask:
++ return vc_set_umask(vxi, data);
++
++ case VCMD_get_umask:
++ return vc_get_umask(vxi, data);
++
++ case VCMD_set_wmask:
++ return vc_set_wmask(vxi, data);
++
++ case VCMD_get_wmask:
++ return vc_get_wmask(vxi, data);
++#ifdef CONFIG_IA32_EMULATION
++ case VCMD_get_rlimit:
++ return __COMPAT(vc_get_rlimit, vxi, data, compat);
++ case VCMD_set_rlimit:
++ return __COMPAT(vc_set_rlimit, vxi, data, compat);
++#else
++ case VCMD_get_rlimit:
++ return vc_get_rlimit(vxi, data);
++ case VCMD_set_rlimit:
++ return vc_set_rlimit(vxi, data);
++#endif
++ case VCMD_get_rlimit_mask:
++ return vc_get_rlimit_mask(id, data);
++ case VCMD_reset_hits:
++ return vc_reset_hits(vxi, data);
++ case VCMD_reset_minmax:
++ return vc_reset_minmax(vxi, data);
++
++ case VCMD_get_vhi_name:
++ return vc_get_vhi_name(vxi, data);
++ case VCMD_set_vhi_name:
++ return vc_set_vhi_name(vxi, data);
++
++ case VCMD_ctx_stat:
++ return vc_ctx_stat(vxi, data);
++ case VCMD_virt_stat:
++ return vc_virt_stat(vxi, data);
++ case VCMD_sock_stat:
++ return vc_sock_stat(vxi, data);
++ case VCMD_rlimit_stat:
++ return vc_rlimit_stat(vxi, data);
++
++ case VCMD_set_cflags:
++ return vc_set_cflags(vxi, data);
++ case VCMD_get_cflags:
++ return vc_get_cflags(vxi, data);
++
++ /* this is version 1 */
++ case VCMD_set_ccaps:
++ return vc_set_ccaps(vxi, data);
++ /* this is version 1 */
++ case VCMD_get_ccaps:
++ return vc_get_ccaps(vxi, data);
++ case VCMD_set_bcaps:
++ return vc_set_bcaps(vxi, data);
++ case VCMD_get_bcaps:
++ return vc_get_bcaps(vxi, data);
++
++ case VCMD_set_badness:
++ return vc_set_badness(vxi, data);
++ case VCMD_get_badness:
++ return vc_get_badness(vxi, data);
++
++ case VCMD_set_nflags:
++ return vc_set_nflags(nxi, data);
++ case VCMD_get_nflags:
++ return vc_get_nflags(nxi, data);
++
++ case VCMD_set_ncaps:
++ return vc_set_ncaps(nxi, data);
++ case VCMD_get_ncaps:
++ return vc_get_ncaps(nxi, data);
++
++ case VCMD_set_prio_bias:
++ return vc_set_prio_bias(vxi, data);
++ case VCMD_get_prio_bias:
++ return vc_get_prio_bias(vxi, data);
++ case VCMD_add_dlimit:
++ return __COMPAT(vc_add_dlimit, id, data, compat);
++ case VCMD_rem_dlimit:
++ return __COMPAT(vc_rem_dlimit, id, data, compat);
++ case VCMD_set_dlimit:
++ return __COMPAT(vc_set_dlimit, id, data, compat);
++ case VCMD_get_dlimit:
++ return __COMPAT(vc_get_dlimit, id, data, compat);
++
++ case VCMD_ctx_kill:
++ return vc_ctx_kill(vxi, data);
++
++ case VCMD_wait_exit:
++ return vc_wait_exit(vxi, data);
++
++ case VCMD_get_iattr:
++ return __COMPAT_NO_ID(vc_get_iattr, data, compat);
++ case VCMD_set_iattr:
++ return __COMPAT_NO_ID(vc_set_iattr, data, compat);
++
++ case VCMD_fget_iattr:
++ return vc_fget_iattr(id, data);
++ case VCMD_fset_iattr:
++ return vc_fset_iattr(id, data);
++
++ case VCMD_enter_space_v0:
++ return vc_enter_space_v1(vxi, NULL);
++ case VCMD_enter_space_v1:
++ return vc_enter_space_v1(vxi, data);
++ /* this is version 2 */
++ case VCMD_enter_space:
++ return vc_enter_space(vxi, data);
++
++ case VCMD_ctx_create_v0:
++ return vc_ctx_create(id, NULL);
++ case VCMD_ctx_create:
++ return vc_ctx_create(id, data);
++ case VCMD_ctx_migrate_v0:
++ return vc_ctx_migrate(vxi, NULL);
++ case VCMD_ctx_migrate:
++ return vc_ctx_migrate(vxi, data);
++
++ case VCMD_net_create_v0:
++ return vc_net_create(id, NULL);
++ case VCMD_net_create:
++ return vc_net_create(id, data);
++ case VCMD_net_migrate:
++ return vc_net_migrate(nxi, data);
++
++ case VCMD_tag_migrate:
++ return vc_tag_migrate(id);
++
++ case VCMD_net_add:
++ return vc_net_add(nxi, data);
++ case VCMD_net_remove:
++ return vc_net_remove(nxi, data);
++
++ case VCMD_net_add_ipv4_v1:
++ return vc_net_add_ipv4_v1(nxi, data);
++ /* this is version 2 */
++ case VCMD_net_add_ipv4:
++ return vc_net_add_ipv4(nxi, data);
++
++ case VCMD_net_rem_ipv4_v1:
++ return vc_net_rem_ipv4_v1(nxi, data);
++ /* this is version 2 */
++ case VCMD_net_rem_ipv4:
++ return vc_net_rem_ipv4(nxi, data);
++#ifdef CONFIG_IPV6
++ case VCMD_net_add_ipv6:
++ return vc_net_add_ipv6(nxi, data);
++ case VCMD_net_remove_ipv6:
++ return vc_net_remove_ipv6(nxi, data);
++#endif
++/* case VCMD_add_match_ipv4:
++ return vc_add_match_ipv4(nxi, data);
++ case VCMD_get_match_ipv4:
++ return vc_get_match_ipv4(nxi, data);
++#ifdef CONFIG_IPV6
++ case VCMD_add_match_ipv6:
++ return vc_add_match_ipv6(nxi, data);
++ case VCMD_get_match_ipv6:
++ return vc_get_match_ipv6(nxi, data);
++#endif */
++
++#ifdef CONFIG_VSERVER_DEVICE
++ case VCMD_set_mapping:
++ return __COMPAT(vc_set_mapping, vxi, data, compat);
++ case VCMD_unset_mapping:
++ return __COMPAT(vc_unset_mapping, vxi, data, compat);
++#endif
++#ifdef CONFIG_VSERVER_HISTORY
++ case VCMD_dump_history:
++ return vc_dump_history(id);
++ case VCMD_read_history:
++ return __COMPAT(vc_read_history, id, data, compat);
++#endif
++ default:
++ vxwprintk_task(1, "unimplemented VCMD_%02d_%d[%d]",
++ VC_CATEGORY(cmd), VC_COMMAND(cmd), VC_VERSION(cmd));
++ }
++ return -ENOSYS;
++}
++
++
++#define __VCMD(vcmd, _perm, _args, _flags) \
++ case VCMD_ ## vcmd: perm = _perm; \
++ args = _args; flags = _flags; break
++
++
++#define VCA_NONE 0x00
++#define VCA_VXI 0x01
++#define VCA_NXI 0x02
++
++#define VCF_NONE 0x00
++#define VCF_INFO 0x01
++#define VCF_ADMIN 0x02
++#define VCF_ARES 0x06 /* includes admin */
++#define VCF_SETUP 0x08
++
++#define VCF_ZIDOK 0x10 /* zero id okay */
++
++
++static inline
++long do_vserver(uint32_t cmd, uint32_t id, void __user *data, int compat)
++{
++ long ret;
++ int permit = -1, state = 0;
++ int perm = -1, args = 0, flags = 0;
++ struct vx_info *vxi = NULL;
++ struct nx_info *nxi = NULL;
++
++ switch (cmd) {
++ /* unpriviledged commands */
++ __VCMD(get_version, 0, VCA_NONE, 0);
++ __VCMD(get_vci, 0, VCA_NONE, 0);
++ __VCMD(get_rlimit_mask, 0, VCA_NONE, 0);
++ __VCMD(get_space_mask_v0,0, VCA_NONE, 0);
++ __VCMD(get_space_mask, 0, VCA_NONE, 0);
++ __VCMD(get_space_default,0, VCA_NONE, 0);
++
++ /* info commands */
++ __VCMD(task_xid, 2, VCA_NONE, 0);
++ __VCMD(reset_hits, 2, VCA_VXI, 0);
++ __VCMD(reset_minmax, 2, VCA_VXI, 0);
++ __VCMD(vx_info, 3, VCA_VXI, VCF_INFO);
++ __VCMD(get_bcaps, 3, VCA_VXI, VCF_INFO);
++ __VCMD(get_ccaps, 3, VCA_VXI, VCF_INFO);
++ __VCMD(get_cflags, 3, VCA_VXI, VCF_INFO);
++ __VCMD(get_umask, 3, VCA_VXI, VCF_INFO);
++ __VCMD(get_wmask, 3, VCA_VXI, VCF_INFO);
++ __VCMD(get_badness, 3, VCA_VXI, VCF_INFO);
++ __VCMD(get_vhi_name, 3, VCA_VXI, VCF_INFO);
++ __VCMD(get_rlimit, 3, VCA_VXI, VCF_INFO);
++
++ __VCMD(ctx_stat, 3, VCA_VXI, VCF_INFO);
++ __VCMD(virt_stat, 3, VCA_VXI, VCF_INFO);
++ __VCMD(sock_stat, 3, VCA_VXI, VCF_INFO);
++ __VCMD(rlimit_stat, 3, VCA_VXI, VCF_INFO);
++
++ __VCMD(task_nid, 2, VCA_NONE, 0);
++ __VCMD(nx_info, 3, VCA_NXI, VCF_INFO);
++ __VCMD(get_ncaps, 3, VCA_NXI, VCF_INFO);
++ __VCMD(get_nflags, 3, VCA_NXI, VCF_INFO);
++
++ __VCMD(task_tag, 2, VCA_NONE, 0);
++
++ __VCMD(get_iattr, 2, VCA_NONE, 0);
++ __VCMD(fget_iattr, 2, VCA_NONE, 0);
++ __VCMD(get_dlimit, 3, VCA_NONE, VCF_INFO);
++ __VCMD(get_prio_bias, 3, VCA_VXI, VCF_INFO);
++
++ /* lower admin commands */
++ __VCMD(wait_exit, 4, VCA_VXI, VCF_INFO);
++ __VCMD(ctx_create_v0, 5, VCA_NONE, 0);
++ __VCMD(ctx_create, 5, VCA_NONE, 0);
++ __VCMD(ctx_migrate_v0, 5, VCA_VXI, VCF_ADMIN);
++ __VCMD(ctx_migrate, 5, VCA_VXI, VCF_ADMIN);
++ __VCMD(enter_space_v0, 5, VCA_VXI, VCF_ADMIN);
++ __VCMD(enter_space_v1, 5, VCA_VXI, VCF_ADMIN);
++ __VCMD(enter_space, 5, VCA_VXI, VCF_ADMIN);
++
++ __VCMD(net_create_v0, 5, VCA_NONE, 0);
++ __VCMD(net_create, 5, VCA_NONE, 0);
++ __VCMD(net_migrate, 5, VCA_NXI, VCF_ADMIN);
++
++ __VCMD(tag_migrate, 5, VCA_NONE, VCF_ADMIN);
++
++ /* higher admin commands */
++ __VCMD(ctx_kill, 6, VCA_VXI, VCF_ARES);
++ __VCMD(set_space_v1, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++ __VCMD(set_space, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++
++ __VCMD(set_ccaps, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++ __VCMD(set_bcaps, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++ __VCMD(set_cflags, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++ __VCMD(set_umask, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++ __VCMD(set_wmask, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++ __VCMD(set_badness, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++
++ __VCMD(set_vhi_name, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++ __VCMD(set_rlimit, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++ __VCMD(set_prio_bias, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
++
++ __VCMD(set_ncaps, 7, VCA_NXI, VCF_ARES | VCF_SETUP);
++ __VCMD(set_nflags, 7, VCA_NXI, VCF_ARES | VCF_SETUP);
++ __VCMD(net_add, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
++ __VCMD(net_remove, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
++ __VCMD(net_add_ipv4_v1, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
++ __VCMD(net_rem_ipv4_v1, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
++ __VCMD(net_add_ipv4, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
++ __VCMD(net_rem_ipv4, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
++#ifdef CONFIG_IPV6
++ __VCMD(net_add_ipv6, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
++ __VCMD(net_remove_ipv6, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
++#endif
++ __VCMD(set_iattr, 7, VCA_NONE, 0);
++ __VCMD(fset_iattr, 7, VCA_NONE, 0);
++ __VCMD(set_dlimit, 7, VCA_NONE, VCF_ARES);
++ __VCMD(add_dlimit, 8, VCA_NONE, VCF_ARES);
++ __VCMD(rem_dlimit, 8, VCA_NONE, VCF_ARES);
++
++#ifdef CONFIG_VSERVER_DEVICE
++ __VCMD(set_mapping, 8, VCA_VXI, VCF_ARES|VCF_ZIDOK);
++ __VCMD(unset_mapping, 8, VCA_VXI, VCF_ARES|VCF_ZIDOK);
++#endif
++ /* debug level admin commands */
++#ifdef CONFIG_VSERVER_HISTORY
++ __VCMD(dump_history, 9, VCA_NONE, 0);
++ __VCMD(read_history, 9, VCA_NONE, 0);
++#endif
++
++ default:
++ perm = -1;
++ }
++
++ vxdprintk(VXD_CBIT(switch, 0),
++ "vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]",
++ VC_CATEGORY(cmd), VC_COMMAND(cmd),
++ VC_VERSION(cmd), id, data, compat,
++ perm, args, flags);
++
++ ret = -ENOSYS;
++ if (perm < 0)
++ goto out;
++
++ state = 1;
++ if (!capable(CAP_CONTEXT))
++ goto out;
++
++ state = 2;
++ /* moved here from the individual commands */
++ ret = -EPERM;
++ if ((perm > 1) && !capable(CAP_SYS_ADMIN))
++ goto out;
++
++ state = 3;
++ /* vcmd involves resource management */
++ ret = -EPERM;
++ if ((flags & VCF_ARES) && !capable(CAP_SYS_RESOURCE))
++ goto out;
++
++ state = 4;
++ /* various legacy exceptions */
++ switch (cmd) {
++ /* will go away when spectator is a cap */
++ case VCMD_ctx_migrate_v0:
++ case VCMD_ctx_migrate:
++ if (id == 1) {
++ current->xid = 1;
++ ret = 1;
++ goto out;
++ }
++ break;
++
++ /* will go away when spectator is a cap */
++ case VCMD_net_migrate:
++ if (id == 1) {
++ current->nid = 1;
++ ret = 1;
++ goto out;
++ }
++ break;
++ }
++
++ /* vcmds are fine by default */
++ permit = 1;
++
++ /* admin type vcmds require admin ... */
++ if (flags & VCF_ADMIN)
++ permit = vx_check(0, VS_ADMIN) ? 1 : 0;
++
++ /* ... but setup type vcmds override that */
++ if (!permit && (flags & VCF_SETUP))
++ permit = vx_flags(VXF_STATE_SETUP, 0) ? 2 : 0;
++
++ state = 5;
++ ret = -EPERM;
++ if (!permit)
++ goto out;
++
++ state = 6;
++ if (!id && (flags & VCF_ZIDOK))
++ goto skip_id;
++
++ ret = -ESRCH;
++ if (args & VCA_VXI) {
++ vxi = lookup_vx_info(id);
++ if (!vxi)
++ goto out;
++
++ if ((flags & VCF_ADMIN) &&
++ /* special case kill for shutdown */
++ (cmd != VCMD_ctx_kill) &&
++ /* can context be administrated? */
++ !vx_info_flags(vxi, VXF_STATE_ADMIN, 0)) {
++ ret = -EACCES;
++ goto out_vxi;
++ }
++ }
++ state = 7;
++ if (args & VCA_NXI) {
++ nxi = lookup_nx_info(id);
++ if (!nxi)
++ goto out_vxi;
++
++ if ((flags & VCF_ADMIN) &&
++ /* can context be administrated? */
++ !nx_info_flags(nxi, NXF_STATE_ADMIN, 0)) {
++ ret = -EACCES;
++ goto out_nxi;
++ }
++ }
++skip_id:
++ state = 8;
++ ret = do_vcmd(cmd, id, vxi, nxi, data, compat);
++
++out_nxi:
++ if ((args & VCA_NXI) && nxi)
++ put_nx_info(nxi);
++out_vxi:
++ if ((args & VCA_VXI) && vxi)
++ put_vx_info(vxi);
++out:
++ vxdprintk(VXD_CBIT(switch, 1),
++ "vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]",
++ VC_CATEGORY(cmd), VC_COMMAND(cmd),
++ VC_VERSION(cmd), ret, ret, state, permit);
++ return ret;
++}
++
++asmlinkage long
++sys_vserver(uint32_t cmd, uint32_t id, void __user *data)
++{
++ return do_vserver(cmd, id, data, 0);
++}
++
++#ifdef CONFIG_COMPAT
++
++asmlinkage long
++sys32_vserver(uint32_t cmd, uint32_t id, void __user *data)
++{
++ return do_vserver(cmd, id, data, 1);
++}
++
++#endif /* CONFIG_COMPAT */
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/sysctl.c linux-3.6.10-vs2.3.4.6/kernel/vserver/sysctl.c
+--- linux-3.6.10/kernel/vserver/sysctl.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/sysctl.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,247 @@
++/*
++ * kernel/vserver/sysctl.c
++ *
++ * Virtual Context Support
++ *
++ * Copyright (C) 2004-2007 Herbert Pötzl
++ *
++ * V0.01 basic structure
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/ctype.h>
++#include <linux/sysctl.h>
++#include <linux/parser.h>
++#include <asm/uaccess.h>
++
++enum {
++ CTL_DEBUG_ERROR = 0,
++ CTL_DEBUG_SWITCH = 1,
++ CTL_DEBUG_XID,
++ CTL_DEBUG_NID,
++ CTL_DEBUG_TAG,
++ CTL_DEBUG_NET,
++ CTL_DEBUG_LIMIT,
++ CTL_DEBUG_CRES,
++ CTL_DEBUG_DLIM,
++ CTL_DEBUG_QUOTA,
++ CTL_DEBUG_CVIRT,
++ CTL_DEBUG_SPACE,
++ CTL_DEBUG_PERM,
++ CTL_DEBUG_MISC,
++};
++
++
++unsigned int vs_debug_switch = 0;
++unsigned int vs_debug_xid = 0;
++unsigned int vs_debug_nid = 0;
++unsigned int vs_debug_tag = 0;
++unsigned int vs_debug_net = 0;
++unsigned int vs_debug_limit = 0;
++unsigned int vs_debug_cres = 0;
++unsigned int vs_debug_dlim = 0;
++unsigned int vs_debug_quota = 0;
++unsigned int vs_debug_cvirt = 0;
++unsigned int vs_debug_space = 0;
++unsigned int vs_debug_perm = 0;
++unsigned int vs_debug_misc = 0;
++
++
++static struct ctl_table_header *vserver_table_header;
++static ctl_table vserver_root_table[];
++
++
++void vserver_register_sysctl(void)
++{
++ if (!vserver_table_header) {
++ vserver_table_header = register_sysctl_table(vserver_root_table);
++ }
++
++}
++
++void vserver_unregister_sysctl(void)
++{
++ if (vserver_table_header) {
++ unregister_sysctl_table(vserver_table_header);
++ vserver_table_header = NULL;
++ }
++}
++
++
++static int proc_dodebug(ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ char tmpbuf[20], *p, c;
++ unsigned int value;
++ size_t left, len;
++
++ if ((*ppos && !write) || !*lenp) {
++ *lenp = 0;
++ return 0;
++ }
++
++ left = *lenp;
++
++ if (write) {
++ if (!access_ok(VERIFY_READ, buffer, left))
++ return -EFAULT;
++ p = (char *)buffer;
++ while (left && __get_user(c, p) >= 0 && isspace(c))
++ left--, p++;
++ if (!left)
++ goto done;
++
++ if (left > sizeof(tmpbuf) - 1)
++ return -EINVAL;
++ if (copy_from_user(tmpbuf, p, left))
++ return -EFAULT;
++ tmpbuf[left] = '\0';
++
++ for (p = tmpbuf, value = 0; '0' <= *p && *p <= '9'; p++, left--)
++ value = 10 * value + (*p - '0');
++ if (*p && !isspace(*p))
++ return -EINVAL;
++ while (left && isspace(*p))
++ left--, p++;
++ *(unsigned int *)table->data = value;
++ } else {
++ if (!access_ok(VERIFY_WRITE, buffer, left))
++ return -EFAULT;
++ len = sprintf(tmpbuf, "%d", *(unsigned int *)table->data);
++ if (len > left)
++ len = left;
++ if (__copy_to_user(buffer, tmpbuf, len))
++ return -EFAULT;
++ if ((left -= len) > 0) {
++ if (put_user('\n', (char *)buffer + len))
++ return -EFAULT;
++ left--;
++ }
++ }
++
++done:
++ *lenp -= left;
++ *ppos += *lenp;
++ return 0;
++}
++
++static int zero;
++
++#define CTL_ENTRY(ctl, name) \
++ { \
++ .procname = #name, \
++ .data = &vs_ ## name, \
++ .maxlen = sizeof(int), \
++ .mode = 0644, \
++ .proc_handler = &proc_dodebug, \
++ .extra1 = &zero, \
++ .extra2 = &zero, \
++ }
++
++static ctl_table vserver_debug_table[] = {
++ CTL_ENTRY(CTL_DEBUG_SWITCH, debug_switch),
++ CTL_ENTRY(CTL_DEBUG_XID, debug_xid),
++ CTL_ENTRY(CTL_DEBUG_NID, debug_nid),
++ CTL_ENTRY(CTL_DEBUG_TAG, debug_tag),
++ CTL_ENTRY(CTL_DEBUG_NET, debug_net),
++ CTL_ENTRY(CTL_DEBUG_LIMIT, debug_limit),
++ CTL_ENTRY(CTL_DEBUG_CRES, debug_cres),
++ CTL_ENTRY(CTL_DEBUG_DLIM, debug_dlim),
++ CTL_ENTRY(CTL_DEBUG_QUOTA, debug_quota),
++ CTL_ENTRY(CTL_DEBUG_CVIRT, debug_cvirt),
++ CTL_ENTRY(CTL_DEBUG_SPACE, debug_space),
++ CTL_ENTRY(CTL_DEBUG_PERM, debug_perm),
++ CTL_ENTRY(CTL_DEBUG_MISC, debug_misc),
++ { 0 }
++};
++
++static ctl_table vserver_root_table[] = {
++ {
++ .procname = "vserver",
++ .mode = 0555,
++ .child = vserver_debug_table
++ },
++ { 0 }
++};
++
++
++static match_table_t tokens = {
++ { CTL_DEBUG_SWITCH, "switch=%x" },
++ { CTL_DEBUG_XID, "xid=%x" },
++ { CTL_DEBUG_NID, "nid=%x" },
++ { CTL_DEBUG_TAG, "tag=%x" },
++ { CTL_DEBUG_NET, "net=%x" },
++ { CTL_DEBUG_LIMIT, "limit=%x" },
++ { CTL_DEBUG_CRES, "cres=%x" },
++ { CTL_DEBUG_DLIM, "dlim=%x" },
++ { CTL_DEBUG_QUOTA, "quota=%x" },
++ { CTL_DEBUG_CVIRT, "cvirt=%x" },
++ { CTL_DEBUG_SPACE, "space=%x" },
++ { CTL_DEBUG_PERM, "perm=%x" },
++ { CTL_DEBUG_MISC, "misc=%x" },
++ { CTL_DEBUG_ERROR, NULL }
++};
++
++#define HANDLE_CASE(id, name, val) \
++ case CTL_DEBUG_ ## id: \
++ vs_debug_ ## name = val; \
++ printk("vs_debug_" #name "=0x%x\n", val); \
++ break
++
++
++static int __init vs_debug_setup(char *str)
++{
++ char *p;
++ int token;
++
++ printk("vs_debug_setup(%s)\n", str);
++ while ((p = strsep(&str, ",")) != NULL) {
++ substring_t args[MAX_OPT_ARGS];
++ unsigned int value;
++
++ if (!*p)
++ continue;
++
++ token = match_token(p, tokens, args);
++ value = (token > 0) ? simple_strtoul(args[0].from, NULL, 0) : 0;
++
++ switch (token) {
++ HANDLE_CASE(SWITCH, switch, value);
++ HANDLE_CASE(XID, xid, value);
++ HANDLE_CASE(NID, nid, value);
++ HANDLE_CASE(TAG, tag, value);
++ HANDLE_CASE(NET, net, value);
++ HANDLE_CASE(LIMIT, limit, value);
++ HANDLE_CASE(CRES, cres, value);
++ HANDLE_CASE(DLIM, dlim, value);
++ HANDLE_CASE(QUOTA, quota, value);
++ HANDLE_CASE(CVIRT, cvirt, value);
++ HANDLE_CASE(SPACE, space, value);
++ HANDLE_CASE(PERM, perm, value);
++ HANDLE_CASE(MISC, misc, value);
++ default:
++ return -EINVAL;
++ break;
++ }
++ }
++ return 1;
++}
++
++__setup("vsdebug=", vs_debug_setup);
++
++
++
++EXPORT_SYMBOL_GPL(vs_debug_switch);
++EXPORT_SYMBOL_GPL(vs_debug_xid);
++EXPORT_SYMBOL_GPL(vs_debug_nid);
++EXPORT_SYMBOL_GPL(vs_debug_net);
++EXPORT_SYMBOL_GPL(vs_debug_limit);
++EXPORT_SYMBOL_GPL(vs_debug_cres);
++EXPORT_SYMBOL_GPL(vs_debug_dlim);
++EXPORT_SYMBOL_GPL(vs_debug_quota);
++EXPORT_SYMBOL_GPL(vs_debug_cvirt);
++EXPORT_SYMBOL_GPL(vs_debug_space);
++EXPORT_SYMBOL_GPL(vs_debug_perm);
++EXPORT_SYMBOL_GPL(vs_debug_misc);
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/tag.c linux-3.6.10-vs2.3.4.6/kernel/vserver/tag.c
+--- linux-3.6.10/kernel/vserver/tag.c 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/tag.c 2012-10-04 16:47:00.000000000 +0000
+@@ -0,0 +1,63 @@
++/*
++ * linux/kernel/vserver/tag.c
++ *
++ * Virtual Server: Shallow Tag Space
++ *
++ * Copyright (C) 2007 Herbert Pötzl
++ *
++ * V0.01 basic implementation
++ *
++ */
++
++#include <linux/sched.h>
++#include <linux/vserver/debug.h>
++#include <linux/vs_pid.h>
++#include <linux/vs_tag.h>
++
++#include <linux/vserver/tag_cmd.h>
++
++
++int dx_migrate_task(struct task_struct *p, tag_t tag)
++{
++ if (!p)
++ BUG();
++
++ vxdprintk(VXD_CBIT(tag, 5),
++ "dx_migrate_task(%p[#%d],#%d)", p, p->tag, tag);
++
++ task_lock(p);
++ p->tag = tag;
++ task_unlock(p);
++
++ vxdprintk(VXD_CBIT(tag, 5),
++ "moved task %p into [#%d]", p, tag);
++ return 0;
++}
++
++/* vserver syscall commands below here */
++
++/* taks xid and vx_info functions */
++
++
++int vc_task_tag(uint32_t id)
++{
++ tag_t tag;
++
++ if (id) {
++ struct task_struct *tsk;
++ rcu_read_lock();
++ tsk = find_task_by_real_pid(id);
++ tag = (tsk) ? tsk->tag : -ESRCH;
++ rcu_read_unlock();
++ } else
++ tag = dx_current_tag();
++ return tag;
++}
++
++
++int vc_tag_migrate(uint32_t tag)
++{
++ return dx_migrate_task(current, tag & 0xFFFF);
++}
++
++
+diff -NurpP --minimal linux-3.6.10/kernel/vserver/vci_config.h linux-3.6.10-vs2.3.4.6/kernel/vserver/vci_config.h
+--- linux-3.6.10/kernel/vserver/vci_config.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/kernel/vserver/vci_config.h 2012-12-12 12:10:25.000000000 +0000
+@@ -0,0 +1,80 @@
++
++/* interface version */
++
++#define VCI_VERSION 0x00020308
++
++
++enum {
++ VCI_KCBIT_NO_DYNAMIC = 0,
++
++ VCI_KCBIT_PROC_SECURE = 4,
++ /* VCI_KCBIT_HARDCPU = 5, */
++ /* VCI_KCBIT_IDLELIMIT = 6, */
++ /* VCI_KCBIT_IDLETIME = 7, */
++
++ VCI_KCBIT_COWBL = 8,
++ VCI_KCBIT_FULLCOWBL = 9,
++ VCI_KCBIT_SPACES = 10,
++ VCI_KCBIT_NETV2 = 11,
++ VCI_KCBIT_MEMCG = 12,
++ VCI_KCBIT_MEMCG_SWAP = 13,
++
++ VCI_KCBIT_DEBUG = 16,
++ VCI_KCBIT_HISTORY = 20,
++ VCI_KCBIT_TAGGED = 24,
++ VCI_KCBIT_PPTAG = 28,
++
++ VCI_KCBIT_MORE = 31,
++};
++
++
++static inline uint32_t vci_kernel_config(void)
++{
++ return
++ (1 << VCI_KCBIT_NO_DYNAMIC) |
++
++ /* configured features */
++#ifdef CONFIG_VSERVER_PROC_SECURE
++ (1 << VCI_KCBIT_PROC_SECURE) |
++#endif
++#ifdef CONFIG_VSERVER_COWBL
++ (1 << VCI_KCBIT_COWBL) |
++ (1 << VCI_KCBIT_FULLCOWBL) |
++#endif
++ (1 << VCI_KCBIT_SPACES) |
++ (1 << VCI_KCBIT_NETV2) |
++#ifdef CONFIG_MEMCG
++ (1 << VCI_KCBIT_MEMCG) |
++#endif
++#ifdef CONFIG_MEMCG_SWAP
++ (1 << VCI_KCBIT_MEMCG_SWAP) |
++#endif
++
++ /* debug options */
++#ifdef CONFIG_VSERVER_DEBUG
++ (1 << VCI_KCBIT_DEBUG) |
++#endif
++#ifdef CONFIG_VSERVER_HISTORY
++ (1 << VCI_KCBIT_HISTORY) |
++#endif
++
++ /* inode context tagging */
++#if defined(CONFIG_TAGGING_NONE)
++ (0 << VCI_KCBIT_TAGGED) |
++#elif defined(CONFIG_TAGGING_UID16)
++ (1 << VCI_KCBIT_TAGGED) |
++#elif defined(CONFIG_TAGGING_GID16)
++ (2 << VCI_KCBIT_TAGGED) |
++#elif defined(CONFIG_TAGGING_ID24)
++ (3 << VCI_KCBIT_TAGGED) |
++#elif defined(CONFIG_TAGGING_INTERN)
++ (4 << VCI_KCBIT_TAGGED) |
++#elif defined(CONFIG_TAGGING_RUNTIME)
++ (5 << VCI_KCBIT_TAGGED) |
++#else
++ (7 << VCI_KCBIT_TAGGED) |
++#endif
++ (1 << VCI_KCBIT_PPTAG) |
++ 0;
++}
++
+diff -NurpP --minimal linux-3.6.10/mm/memcontrol.c linux-3.6.10-vs2.3.4.6/mm/memcontrol.c
+--- linux-3.6.10/mm/memcontrol.c 2012-12-11 11:37:02.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/mm/memcontrol.c 2012-12-08 00:36:33.000000000 +0000
+@@ -885,6 +885,31 @@ struct mem_cgroup *mem_cgroup_from_task(
+ return mem_cgroup_from_css(task_subsys_state(p, mem_cgroup_subsys_id));
+ }
+
++u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member)
++{
++ return res_counter_read_u64(&mem->res, member);
++}
++
++u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member)
++{
++ return res_counter_read_u64(&mem->memsw, member);
++}
++
++s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem)
++{
++ return mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
++}
++
++s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem)
++{
++ return mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
++}
++
++s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem)
++{
++ return mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
++}
++
+ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
+ {
+ struct mem_cgroup *memcg = NULL;
+diff -NurpP --minimal linux-3.6.10/mm/oom_kill.c linux-3.6.10-vs2.3.4.6/mm/oom_kill.c
+--- linux-3.6.10/mm/oom_kill.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/mm/oom_kill.c 2012-10-04 16:53:46.000000000 +0000
+@@ -35,6 +35,8 @@
+ #include <linux/freezer.h>
+ #include <linux/ftrace.h>
+ #include <linux/ratelimit.h>
++#include <linux/reboot.h>
++#include <linux/vs_context.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/oom.h>
+@@ -155,11 +157,18 @@ struct task_struct *find_lock_task_mm(st
+ static bool oom_unkillable_task(struct task_struct *p,
+ const struct mem_cgroup *memcg, const nodemask_t *nodemask)
+ {
+- if (is_global_init(p))
++ unsigned xid = vx_current_xid();
++
++ /* skip the init task, global and per guest */
++ if (task_is_init(p))
+ return true;
+ if (p->flags & PF_KTHREAD)
+ return true;
+
++ /* skip other guest and host processes if oom in guest */
++ if (xid && vx_task_xid(p) != xid)
++ return true;
++
+ /* When mem_cgroup_out_of_memory() and p is not member of the group */
+ if (memcg && !task_in_mem_cgroup(p, memcg))
+ return true;
+@@ -472,8 +481,8 @@ void oom_kill_process(struct task_struct
+ dump_header(p, gfp_mask, order, memcg, nodemask);
+
+ task_lock(p);
+- pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
+- message, task_pid_nr(p), p->comm, points);
++ pr_err("%s: Kill process %d:#%u (%s) score %d or sacrifice child\n",
++ message, task_pid_nr(p), p->xid, p->comm, points);
+ task_unlock(p);
+
+ /*
+@@ -518,8 +527,8 @@ void oom_kill_process(struct task_struct
+
+ /* mm cannot safely be dereferenced after task_unlock(victim) */
+ mm = victim->mm;
+- pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
+- task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
++ pr_err("Killed process %d:#%u (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
++ task_pid_nr(victim), victim->xid, victim->comm, K(victim->mm->total_vm),
+ K(get_mm_counter(victim->mm, MM_ANONPAGES)),
+ K(get_mm_counter(victim->mm, MM_FILEPAGES)));
+ task_unlock(victim);
+@@ -589,6 +598,8 @@ int unregister_oom_notifier(struct notif
+ }
+ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
+
++long vs_oom_action(unsigned int);
++
+ /*
+ * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero
+ * if a parallel OOM killing is already taking place that includes a zone in
+@@ -738,7 +749,12 @@ void out_of_memory(struct zonelist *zone
+ /* Found nothing?!?! Either we hang forever, or we panic. */
+ if (!p) {
+ dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
+- panic("Out of memory and no killable processes...\n");
++
++ /* avoid panic for guest OOM */
++ if (current->xid)
++ vs_oom_action(LINUX_REBOOT_CMD_OOM);
++ else
++ panic("Out of memory and no killable processes...\n");
+ }
+ if (PTR_ERR(p) != -1UL) {
+ oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
+diff -NurpP --minimal linux-3.6.10/mm/page_alloc.c linux-3.6.10-vs2.3.4.6/mm/page_alloc.c
+--- linux-3.6.10/mm/page_alloc.c 2012-12-11 11:37:02.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/mm/page_alloc.c 2012-12-08 00:36:33.000000000 +0000
+@@ -58,6 +58,8 @@
+ #include <linux/prefetch.h>
+ #include <linux/migrate.h>
+ #include <linux/page-debug-flags.h>
++#include <linux/vs_base.h>
++#include <linux/vs_limit.h>
+
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+@@ -2773,6 +2775,9 @@ void si_meminfo(struct sysinfo *val)
+ val->totalhigh = totalhigh_pages;
+ val->freehigh = nr_free_highpages();
+ val->mem_unit = PAGE_SIZE;
++
++ if (vx_flags(VXF_VIRT_MEM, 0))
++ vx_vsi_meminfo(val);
+ }
+
+ EXPORT_SYMBOL(si_meminfo);
+@@ -2793,6 +2798,9 @@ void si_meminfo_node(struct sysinfo *val
+ val->freehigh = 0;
+ #endif
+ val->mem_unit = PAGE_SIZE;
++
++ if (vx_flags(VXF_VIRT_MEM, 0))
++ vx_vsi_meminfo(val);
+ }
+ #endif
+
+diff -NurpP --minimal linux-3.6.10/mm/pgtable-generic.c linux-3.6.10-vs2.3.4.6/mm/pgtable-generic.c
+--- linux-3.6.10/mm/pgtable-generic.c 2012-07-22 21:39:47.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/mm/pgtable-generic.c 2012-10-04 16:47:00.000000000 +0000
+@@ -6,6 +6,8 @@
+ * Copyright (C) 2010 Linus Torvalds
+ */
+
++#include <linux/mm.h>
++
+ #include <linux/pagemap.h>
+ #include <asm/tlb.h>
+ #include <asm-generic/pgtable.h>
+diff -NurpP --minimal linux-3.6.10/mm/shmem.c linux-3.6.10-vs2.3.4.6/mm/shmem.c
+--- linux-3.6.10/mm/shmem.c 2012-12-11 11:37:02.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/mm/shmem.c 2012-12-08 00:36:33.000000000 +0000
+@@ -1826,7 +1826,7 @@ static int shmem_statfs(struct dentry *d
+ {
+ struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
+
+- buf->f_type = TMPFS_MAGIC;
++ buf->f_type = TMPFS_SUPER_MAGIC;
+ buf->f_bsize = PAGE_CACHE_SIZE;
+ buf->f_namelen = NAME_MAX;
+ if (sbinfo->max_blocks) {
+@@ -2644,7 +2644,7 @@ int shmem_fill_super(struct super_block
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+- sb->s_magic = TMPFS_MAGIC;
++ sb->s_magic = TMPFS_SUPER_MAGIC;
+ sb->s_op = &shmem_ops;
+ sb->s_time_gran = 1;
+ #ifdef CONFIG_TMPFS_XATTR
+diff -NurpP --minimal linux-3.6.10/mm/slab.c linux-3.6.10-vs2.3.4.6/mm/slab.c
+--- linux-3.6.10/mm/slab.c 2012-12-11 11:37:03.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/mm/slab.c 2012-11-06 17:43:41.000000000 +0000
+@@ -445,6 +445,8 @@ static void kmem_list3_init(struct kmem_
+ #define STATS_INC_FREEMISS(x) do { } while (0)
+ #endif
+
++#include "slab_vs.h"
++
+ #if DEBUG
+
+ /*
+@@ -3539,6 +3541,7 @@ retry:
+
+ obj = slab_get_obj(cachep, slabp, nodeid);
+ check_slabp(cachep, slabp);
++ vx_slab_alloc(cachep, flags);
+ l3->free_objects--;
+ /* move slabp to correct slabp list: */
+ list_del(&slabp->list);
+@@ -3616,6 +3619,7 @@ __cache_alloc_node(struct kmem_cache *ca
+ /* ___cache_alloc_node can fall back to other nodes */
+ ptr = ____cache_alloc_node(cachep, flags, nodeid);
+ out:
++ vx_slab_alloc(cachep, flags);
+ local_irq_restore(save_flags);
+ ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+ kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
+@@ -3806,6 +3810,7 @@ static inline void __cache_free(struct k
+ check_irq_off();
+ kmemleak_free_recursive(objp, cachep->flags);
+ objp = cache_free_debugcheck(cachep, objp, caller);
++ vx_slab_free(cachep);
+
+ kmemcheck_slab_free(cachep, objp, cachep->object_size);
+
+diff -NurpP --minimal linux-3.6.10/mm/slab_vs.h linux-3.6.10-vs2.3.4.6/mm/slab_vs.h
+--- linux-3.6.10/mm/slab_vs.h 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/mm/slab_vs.h 2012-10-09 14:28:11.000000000 +0000
+@@ -0,0 +1,29 @@
++
++#include <linux/vserver/context.h>
++
++#include <linux/vs_context.h>
++
++static inline
++void vx_slab_alloc(struct kmem_cache *cachep, gfp_t flags)
++{
++ int what = gfp_zone(cachep->allocflags);
++ struct vx_info *vxi = current_vx_info();
++
++ if (!vxi)
++ return;
++
++ atomic_add(cachep->size, &vxi->cacct.slab[what]);
++}
++
++static inline
++void vx_slab_free(struct kmem_cache *cachep)
++{
++ int what = gfp_zone(cachep->allocflags);
++ struct vx_info *vxi = current_vx_info();
++
++ if (!vxi)
++ return;
++
++ atomic_sub(cachep->size, &vxi->cacct.slab[what]);
++}
++
+diff -NurpP --minimal linux-3.6.10/mm/swapfile.c linux-3.6.10-vs2.3.4.6/mm/swapfile.c
+--- linux-3.6.10/mm/swapfile.c 2012-10-04 13:27:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/mm/swapfile.c 2012-10-04 16:47:00.000000000 +0000
+@@ -39,6 +39,7 @@
+ #include <asm/tlbflush.h>
+ #include <linux/swapops.h>
+ #include <linux/page_cgroup.h>
++#include <linux/vs_base.h>
+
+ static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
+ unsigned char);
+@@ -1686,6 +1687,16 @@ static int swap_show(struct seq_file *sw
+
+ if (si == SEQ_START_TOKEN) {
+ seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
++ if (vx_flags(VXF_VIRT_MEM, 0)) {
++ struct sysinfo si;
++
++ vx_vsi_swapinfo(&si);
++ if (si.totalswap < (1 << 10))
++ return 0;
++ seq_printf(swap, "%s\t\t\t\t\t%s\t%lu\t%lu\t%d\n",
++ "hdv0", "partition", si.totalswap >> 10,
++ (si.totalswap - si.freeswap) >> 10, -1);
++ }
+ return 0;
+ }
+
+@@ -2113,6 +2124,8 @@ void si_swapinfo(struct sysinfo *val)
+ val->freeswap = nr_swap_pages + nr_to_be_unused;
+ val->totalswap = total_swap_pages + nr_to_be_unused;
+ spin_unlock(&swap_lock);
++ if (vx_flags(VXF_VIRT_MEM, 0))
++ vx_vsi_swapinfo(val);
+ }
+
+ /*
+diff -NurpP --minimal linux-3.6.10/net/bridge/br_multicast.c linux-3.6.10-vs2.3.4.6/net/bridge/br_multicast.c
+--- linux-3.6.10/net/bridge/br_multicast.c 2012-10-04 13:27:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/bridge/br_multicast.c 2012-10-04 16:47:00.000000000 +0000
+@@ -447,7 +447,7 @@ static struct sk_buff *br_ip6_multicast_
+ ip6h->hop_limit = 1;
+ ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
+ if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+- &ip6h->saddr)) {
++ &ip6h->saddr, NULL)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+diff -NurpP --minimal linux-3.6.10/net/core/dev.c linux-3.6.10-vs2.3.4.6/net/core/dev.c
+--- linux-3.6.10/net/core/dev.c 2012-12-11 11:37:03.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/core/dev.c 2012-12-08 00:36:33.000000000 +0000
+@@ -126,6 +126,7 @@
+ #include <linux/in.h>
+ #include <linux/jhash.h>
+ #include <linux/random.h>
++#include <linux/vs_inet.h>
+ #include <trace/events/napi.h>
+ #include <trace/events/net.h>
+ #include <trace/events/skb.h>
+@@ -620,7 +621,8 @@ struct net_device *__dev_get_by_name(str
+ struct hlist_head *head = dev_name_hash(net, name);
+
+ hlist_for_each_entry(dev, p, head, name_hlist)
+- if (!strncmp(dev->name, name, IFNAMSIZ))
++ if (!strncmp(dev->name, name, IFNAMSIZ) &&
++ nx_dev_visible(current_nx_info(), dev))
+ return dev;
+
+ return NULL;
+@@ -646,7 +648,8 @@ struct net_device *dev_get_by_name_rcu(s
+ struct hlist_head *head = dev_name_hash(net, name);
+
+ hlist_for_each_entry_rcu(dev, p, head, name_hlist)
+- if (!strncmp(dev->name, name, IFNAMSIZ))
++ if (!strncmp(dev->name, name, IFNAMSIZ) &&
++ nx_dev_visible(current_nx_info(), dev))
+ return dev;
+
+ return NULL;
+@@ -697,7 +700,8 @@ struct net_device *__dev_get_by_index(st
+ struct hlist_head *head = dev_index_hash(net, ifindex);
+
+ hlist_for_each_entry(dev, p, head, index_hlist)
+- if (dev->ifindex == ifindex)
++ if ((dev->ifindex == ifindex) &&
++ nx_dev_visible(current_nx_info(), dev))
+ return dev;
+
+ return NULL;
+@@ -715,7 +719,7 @@ EXPORT_SYMBOL(__dev_get_by_index);
+ * about locking. The caller must hold RCU lock.
+ */
+
+-struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
++struct net_device *dev_get_by_index_real_rcu(struct net *net, int ifindex)
+ {
+ struct hlist_node *p;
+ struct net_device *dev;
+@@ -727,6 +731,16 @@ struct net_device *dev_get_by_index_rcu(
+
+ return NULL;
+ }
++EXPORT_SYMBOL(dev_get_by_index_real_rcu);
++
++struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
++{
++ struct net_device *dev = dev_get_by_index_real_rcu(net, ifindex);
++
++ if (nx_dev_visible(current_nx_info(), dev))
++ return dev;
++ return NULL;
++}
+ EXPORT_SYMBOL(dev_get_by_index_rcu);
+
+
+@@ -775,7 +789,8 @@ struct net_device *dev_getbyhwaddr_rcu(s
+
+ for_each_netdev_rcu(net, dev)
+ if (dev->type == type &&
+- !memcmp(dev->dev_addr, ha, dev->addr_len))
++ !memcmp(dev->dev_addr, ha, dev->addr_len) &&
++ nx_dev_visible(current_nx_info(), dev))
+ return dev;
+
+ return NULL;
+@@ -787,9 +802,11 @@ struct net_device *__dev_getfirstbyhwtyp
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+- for_each_netdev(net, dev)
+- if (dev->type == type)
++ for_each_netdev(net, dev) {
++ if ((dev->type == type) &&
++ nx_dev_visible(current_nx_info(), dev))
+ return dev;
++ }
+
+ return NULL;
+ }
+@@ -907,6 +924,8 @@ static int __dev_alloc_name(struct net *
+ continue;
+ if (i < 0 || i >= max_netdevices)
+ continue;
++ if (!nx_dev_visible(current_nx_info(), d))
++ continue;
+
+ /* avoid cases where sscanf is not exact inverse of printf */
+ snprintf(buf, IFNAMSIZ, name, i);
+@@ -4076,6 +4095,8 @@ static int dev_ifconf(struct net *net, c
+
+ total = 0;
+ for_each_netdev(net, dev) {
++ if (!nx_dev_visible(current_nx_info(), dev))
++ continue;
+ for (i = 0; i < NPROTO; i++) {
+ if (gifconf_list[i]) {
+ int done;
+@@ -4178,6 +4199,10 @@ static void dev_seq_printf_stats(struct
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
+
++ /* device visible inside network context? */
++ if (!nx_dev_visible(current_nx_info(), dev))
++ return;
++
+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
+ dev->name, stats->rx_bytes, stats->rx_packets,
+diff -NurpP --minimal linux-3.6.10/net/core/rtnetlink.c linux-3.6.10-vs2.3.4.6/net/core/rtnetlink.c
+--- linux-3.6.10/net/core/rtnetlink.c 2012-10-04 13:27:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/core/rtnetlink.c 2012-10-04 16:47:00.000000000 +0000
+@@ -1080,6 +1080,8 @@ static int rtnl_dump_ifinfo(struct sk_bu
+ hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
++ if (!nx_dev_visible(skb->sk->sk_nx_info, dev))
++ continue;
+ if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, 0,
+@@ -1969,6 +1971,9 @@ void rtmsg_ifinfo(int type, struct net_d
+ int err = -ENOBUFS;
+ size_t if_info_size;
+
++ if (!nx_dev_visible(current_nx_info(), dev))
++ return;
++
+ skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
+ if (skb == NULL)
+ goto errout;
+diff -NurpP --minimal linux-3.6.10/net/core/sock.c linux-3.6.10-vs2.3.4.6/net/core/sock.c
+--- linux-3.6.10/net/core/sock.c 2012-10-04 13:27:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/core/sock.c 2012-10-04 16:47:00.000000000 +0000
+@@ -132,6 +132,10 @@
+ #include <net/netprio_cgroup.h>
+
+ #include <linux/filter.h>
++#include <linux/vs_socket.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
+
+ #include <trace/events/sock.h>
+
+@@ -1194,6 +1198,8 @@ static struct sock *sk_prot_alloc(struct
+ goto out_free_sec;
+ sk_tx_queue_clear(sk);
+ }
++ sock_vx_init(sk);
++ sock_nx_init(sk);
+
+ return sk;
+
+@@ -1302,6 +1308,11 @@ static void __sk_free(struct sock *sk)
+ put_cred(sk->sk_peer_cred);
+ put_pid(sk->sk_peer_pid);
+ put_net(sock_net(sk));
++ vx_sock_dec(sk);
++ clr_vx_info(&sk->sk_vx_info);
++ sk->sk_xid = -1;
++ clr_nx_info(&sk->sk_nx_info);
++ sk->sk_nid = -1;
+ sk_prot_free(sk->sk_prot_creator, sk);
+ }
+
+@@ -1362,6 +1373,8 @@ struct sock *sk_clone_lock(const struct
+
+ /* SANITY */
+ get_net(sock_net(newsk));
++ sock_vx_init(newsk);
++ sock_nx_init(newsk);
+ sk_node_init(&newsk->sk_node);
+ sock_lock_init(newsk);
+ bh_lock_sock(newsk);
+@@ -1418,6 +1431,12 @@ struct sock *sk_clone_lock(const struct
+ smp_wmb();
+ atomic_set(&newsk->sk_refcnt, 2);
+
++ set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
++ newsk->sk_xid = sk->sk_xid;
++ vx_sock_inc(newsk);
++ set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
++ newsk->sk_nid = sk->sk_nid;
++
+ /*
+ * Increment the counter in the same struct proto as the master
+ * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
+@@ -2187,6 +2206,12 @@ void sock_init_data(struct socket *sock,
+
+ sk->sk_stamp = ktime_set(-1L, 0);
+
++ set_vx_info(&sk->sk_vx_info, current_vx_info());
++ sk->sk_xid = vx_current_xid();
++ vx_sock_inc(sk);
++ set_nx_info(&sk->sk_nx_info, current_nx_info());
++ sk->sk_nid = nx_current_nid();
++
+ /*
+ * Before updating sk_refcnt, we must commit prior changes to memory
+ * (Documentation/RCU/rculist_nulls.txt for details)
+diff -NurpP --minimal linux-3.6.10/net/ipv4/af_inet.c linux-3.6.10-vs2.3.4.6/net/ipv4/af_inet.c
+--- linux-3.6.10/net/ipv4/af_inet.c 2012-10-04 13:27:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/af_inet.c 2012-10-04 16:47:00.000000000 +0000
+@@ -118,6 +118,7 @@
+ #ifdef CONFIG_IP_MROUTE
+ #include <linux/mroute.h>
+ #endif
++#include <linux/vs_limit.h>
+
+
+ /* The inetsw table contains everything that inet_create needs to
+@@ -326,9 +327,13 @@ lookup_protocol:
+ }
+
+ err = -EPERM;
++ if ((protocol == IPPROTO_ICMP) &&
++ nx_capable(CAP_NET_RAW, NXC_RAW_ICMP))
++ goto override;
++
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+ goto out_rcu_unlock;
+-
++override:
+ err = -EAFNOSUPPORT;
+ if (!inet_netns_ok(net, protocol))
+ goto out_rcu_unlock;
+@@ -453,6 +458,7 @@ int inet_bind(struct socket *sock, struc
+ struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet = inet_sk(sk);
++ struct nx_v4_sock_addr nsa;
+ unsigned short snum;
+ int chk_addr_ret;
+ int err;
+@@ -476,7 +482,11 @@ int inet_bind(struct socket *sock, struc
+ goto out;
+ }
+
+- chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
++ err = v4_map_sock_addr(inet, addr, &nsa);
++ if (err)
++ goto out;
++
++ chk_addr_ret = inet_addr_type(sock_net(sk), nsa.saddr);
+
+ /* Not specified by any standard per-se, however it breaks too
+ * many applications when removed. It is unfortunate since
+@@ -488,7 +498,7 @@ int inet_bind(struct socket *sock, struc
+ err = -EADDRNOTAVAIL;
+ if (!sysctl_ip_nonlocal_bind &&
+ !(inet->freebind || inet->transparent) &&
+- addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
++ nsa.saddr != htonl(INADDR_ANY) &&
+ chk_addr_ret != RTN_LOCAL &&
+ chk_addr_ret != RTN_MULTICAST &&
+ chk_addr_ret != RTN_BROADCAST)
+@@ -513,7 +523,7 @@ int inet_bind(struct socket *sock, struc
+ if (sk->sk_state != TCP_CLOSE || inet->inet_num)
+ goto out_release_sock;
+
+- inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
++ v4_set_sock_addr(inet, &nsa);
+ if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
+ inet->inet_saddr = 0; /* Use device */
+
+@@ -731,11 +741,13 @@ int inet_getname(struct socket *sock, st
+ peer == 1))
+ return -ENOTCONN;
+ sin->sin_port = inet->inet_dport;
+- sin->sin_addr.s_addr = inet->inet_daddr;
++ sin->sin_addr.s_addr =
++ nx_map_sock_lback(sk->sk_nx_info, inet->inet_daddr);
+ } else {
+ __be32 addr = inet->inet_rcv_saddr;
+ if (!addr)
+ addr = inet->inet_saddr;
++ addr = nx_map_sock_lback(sk->sk_nx_info, addr);
+ sin->sin_port = inet->inet_sport;
+ sin->sin_addr.s_addr = addr;
+ }
+diff -NurpP --minimal linux-3.6.10/net/ipv4/arp.c linux-3.6.10-vs2.3.4.6/net/ipv4/arp.c
+--- linux-3.6.10/net/ipv4/arp.c 2012-10-04 13:27:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/arp.c 2012-10-04 16:47:00.000000000 +0000
+@@ -1318,6 +1318,7 @@ static void arp_format_neigh_entry(struc
+ struct net_device *dev = n->dev;
+ int hatype = dev->type;
+
++ /* FIXME: check for network context */
+ read_lock(&n->lock);
+ /* Convert hardware address to XX:XX:XX:XX ... form. */
+ #if IS_ENABLED(CONFIG_AX25)
+@@ -1349,6 +1350,7 @@ static void arp_format_pneigh_entry(stru
+ int hatype = dev ? dev->type : 0;
+ char tbuf[16];
+
++ /* FIXME: check for network context */
+ sprintf(tbuf, "%pI4", n->key);
+ seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n",
+ tbuf, hatype, ATF_PUBL | ATF_PERM, "00:00:00:00:00:00",
+diff -NurpP --minimal linux-3.6.10/net/ipv4/devinet.c linux-3.6.10-vs2.3.4.6/net/ipv4/devinet.c
+--- linux-3.6.10/net/ipv4/devinet.c 2012-10-04 13:27:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/devinet.c 2012-10-04 16:47:00.000000000 +0000
+@@ -516,6 +516,7 @@ struct in_device *inetdev_by_index(struc
+ }
+ EXPORT_SYMBOL(inetdev_by_index);
+
++
+ /* Called only from RTNL semaphored context. No locks. */
+
+ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
+@@ -757,6 +758,8 @@ int devinet_ioctl(struct net *net, unsig
+
+ in_dev = __in_dev_get_rtnl(dev);
+ if (in_dev) {
++ struct nx_info *nxi = current_nx_info();
++
+ if (tryaddrmatch) {
+ /* Matthias Andree */
+ /* compare label and address (4.4BSD style) */
+@@ -765,6 +768,8 @@ int devinet_ioctl(struct net *net, unsig
+ This is checked above. */
+ for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+ ifap = &ifa->ifa_next) {
++ if (!nx_v4_ifa_visible(nxi, ifa))
++ continue;
+ if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
+ sin_orig.sin_addr.s_addr ==
+ ifa->ifa_local) {
+@@ -777,9 +782,12 @@ int devinet_ioctl(struct net *net, unsig
+ comparing just the label */
+ if (!ifa) {
+ for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+- ifap = &ifa->ifa_next)
++ ifap = &ifa->ifa_next) {
++ if (!nx_v4_ifa_visible(nxi, ifa))
++ continue;
+ if (!strcmp(ifr.ifr_name, ifa->ifa_label))
+ break;
++ }
+ }
+ }
+
+@@ -932,6 +940,8 @@ static int inet_gifconf(struct net_devic
+ goto out;
+
+ for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
++ if (!nx_v4_ifa_visible(current_nx_info(), ifa))
++ continue;
+ if (!buf) {
+ done += sizeof(ifr);
+ continue;
+@@ -1291,6 +1301,7 @@ static int inet_dump_ifaddr(struct sk_bu
+ struct net_device *dev;
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
++ struct sock *sk = skb->sk;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+@@ -1313,6 +1324,8 @@ static int inet_dump_ifaddr(struct sk_bu
+
+ for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
+ ifa = ifa->ifa_next, ip_idx++) {
++ if (sk && !nx_v4_ifa_visible(sk->sk_nx_info, ifa))
++ continue;
+ if (ip_idx < s_ip_idx)
+ continue;
+ if (inet_fill_ifaddr(skb, ifa,
+diff -NurpP --minimal linux-3.6.10/net/ipv4/fib_trie.c linux-3.6.10-vs2.3.4.6/net/ipv4/fib_trie.c
+--- linux-3.6.10/net/ipv4/fib_trie.c 2012-10-04 13:27:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/fib_trie.c 2012-10-04 16:47:00.000000000 +0000
+@@ -2554,6 +2554,7 @@ static int fib_route_seq_show(struct seq
+ || fa->fa_type == RTN_MULTICAST)
+ continue;
+
++ /* FIXME: check for network context? */
+ if (fi)
+ seq_printf(seq,
+ "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
+diff -NurpP --minimal linux-3.6.10/net/ipv4/inet_connection_sock.c linux-3.6.10-vs2.3.4.6/net/ipv4/inet_connection_sock.c
+--- linux-3.6.10/net/ipv4/inet_connection_sock.c 2012-12-11 11:37:03.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/inet_connection_sock.c 2012-11-06 17:43:41.000000000 +0000
+@@ -53,6 +53,37 @@ void inet_get_local_port_range(int *low,
+ }
+ EXPORT_SYMBOL(inet_get_local_port_range);
+
++int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
++{
++ __be32 sk1_rcv_saddr = sk_rcv_saddr(sk1),
++ sk2_rcv_saddr = sk_rcv_saddr(sk2);
++
++ if (inet_v6_ipv6only(sk2))
++ return 0;
++
++ if (sk1_rcv_saddr &&
++ sk2_rcv_saddr &&
++ sk1_rcv_saddr == sk2_rcv_saddr)
++ return 1;
++
++ if (sk1_rcv_saddr &&
++ !sk2_rcv_saddr &&
++ v4_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr, NXA_MASK_BIND))
++ return 1;
++
++ if (sk2_rcv_saddr &&
++ !sk1_rcv_saddr &&
++ v4_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr, NXA_MASK_BIND))
++ return 1;
++
++ if (!sk1_rcv_saddr &&
++ !sk2_rcv_saddr &&
++ nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info))
++ return 1;
++
++ return 0;
++}
++
+ int inet_csk_bind_conflict(const struct sock *sk,
+ const struct inet_bind_bucket *tb, bool relax)
+ {
+@@ -75,9 +106,7 @@ int inet_csk_bind_conflict(const struct
+ sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
+ if (!reuse || !sk2->sk_reuse ||
+ sk2->sk_state == TCP_LISTEN) {
+- const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
+- if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
+- sk2_rcv_saddr == sk_rcv_saddr(sk))
++ if (ipv4_rcv_saddr_equal(sk, sk2))
+ break;
+ }
+ if (!relax && reuse && sk2->sk_reuse &&
+diff -NurpP --minimal linux-3.6.10/net/ipv4/inet_diag.c linux-3.6.10-vs2.3.4.6/net/ipv4/inet_diag.c
+--- linux-3.6.10/net/ipv4/inet_diag.c 2012-12-11 11:37:03.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/inet_diag.c 2012-12-08 00:36:33.000000000 +0000
+@@ -31,6 +31,8 @@
+
+ #include <linux/inet.h>
+ #include <linux/stddef.h>
++#include <linux/vs_network.h>
++#include <linux/vs_inet.h>
+
+ #include <linux/inet_diag.h>
+ #include <linux/sock_diag.h>
+@@ -101,8 +103,10 @@ int inet_sk_diag_fill(struct sock *sk, s
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = inet->inet_dport;
+- r->id.idiag_src[0] = inet->inet_rcv_saddr;
+- r->id.idiag_dst[0] = inet->inet_daddr;
++ r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info,
++ inet->inet_rcv_saddr);
++ r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info,
++ inet->inet_daddr);
+
+ /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
+ * hence this needs to be included regardless of socket family.
+@@ -233,8 +237,8 @@ static int inet_twsk_diag_fill(struct in
+ sock_diag_save_cookie(tw, r->id.idiag_cookie);
+ r->id.idiag_sport = tw->tw_sport;
+ r->id.idiag_dport = tw->tw_dport;
+- r->id.idiag_src[0] = tw->tw_rcv_saddr;
+- r->id.idiag_dst[0] = tw->tw_daddr;
++ r->id.idiag_src[0] = nx_map_sock_lback(tw->tw_nx_info, tw->tw_rcv_saddr);
++ r->id.idiag_dst[0] = nx_map_sock_lback(tw->tw_nx_info, tw->tw_daddr);
+ r->idiag_state = tw->tw_substate;
+ r->idiag_timer = 3;
+ r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
+@@ -276,12 +280,14 @@ int inet_diag_dump_one_icsk(struct inet_
+
+ err = -EINVAL;
+ if (req->sdiag_family == AF_INET) {
++ /* TODO: lback */
+ sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
+ req->id.idiag_dport, req->id.idiag_src[0],
+ req->id.idiag_sport, req->id.idiag_if);
+ }
+ #if IS_ENABLED(CONFIG_IPV6)
+ else if (req->sdiag_family == AF_INET6) {
++ /* TODO: lback */
+ sk = inet6_lookup(net, hashinfo,
+ (struct in6_addr *)req->id.idiag_dst,
+ req->id.idiag_dport,
+@@ -476,6 +482,7 @@ int inet_diag_bc_sk(const struct nlattr
+ } else
+ #endif
+ {
++ /* TODO: lback */
+ entry.saddr = &inet->inet_rcv_saddr;
+ entry.daddr = &inet->inet_daddr;
+ }
+@@ -574,6 +581,7 @@ static int inet_twsk_diag_dump(struct in
+ } else
+ #endif
+ {
++ /* TODO: lback */
+ entry.saddr = &tw->tw_rcv_saddr;
+ entry.daddr = &tw->tw_daddr;
+ }
+@@ -620,8 +628,8 @@ static int inet_diag_fill_req(struct sk_
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = ireq->rmt_port;
+- r->id.idiag_src[0] = ireq->loc_addr;
+- r->id.idiag_dst[0] = ireq->rmt_addr;
++ r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->loc_addr);
++ r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->rmt_addr);
+ r->idiag_expires = jiffies_to_msecs(tmo);
+ r->idiag_rqueue = 0;
+ r->idiag_wqueue = 0;
+@@ -748,6 +757,8 @@ void inet_diag_dump_icsk(struct inet_has
+ if (!net_eq(sock_net(sk), net))
+ continue;
+
++ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (num < s_num) {
+ num++;
+ continue;
+@@ -820,6 +831,8 @@ skip_listen_ht:
+
+ if (!net_eq(sock_net(sk), net))
+ continue;
++ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (num < s_num)
+ goto next_normal;
+ if (!(r->idiag_states & (1 << sk->sk_state)))
+@@ -848,7 +861,8 @@ next_normal:
+ &head->twchain) {
+ if (!net_eq(twsk_net(tw), net))
+ continue;
+-
++ if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (num < s_num)
+ goto next_dying;
+ if (r->sdiag_family != AF_UNSPEC &&
+diff -NurpP --minimal linux-3.6.10/net/ipv4/inet_hashtables.c linux-3.6.10-vs2.3.4.6/net/ipv4/inet_hashtables.c
+--- linux-3.6.10/net/ipv4/inet_hashtables.c 2012-07-22 21:39:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/inet_hashtables.c 2012-10-04 16:47:00.000000000 +0000
+@@ -22,6 +22,7 @@
+ #include <net/inet_connection_sock.h>
+ #include <net/inet_hashtables.h>
+ #include <net/secure_seq.h>
++#include <net/route.h>
+ #include <net/ip.h>
+
+ /*
+@@ -156,6 +157,11 @@ static inline int compute_score(struct s
+ if (rcv_saddr != daddr)
+ return -1;
+ score += 2;
++ } else {
++ /* block non nx_info ips */
++ if (!v4_addr_in_nx_info(sk->sk_nx_info,
++ daddr, NXA_MASK_BIND))
++ return -1;
+ }
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
+@@ -173,7 +179,6 @@ static inline int compute_score(struct s
+ * wildcarded during the search since they can never be otherwise.
+ */
+
+-
+ struct sock *__inet_lookup_listener(struct net *net,
+ struct inet_hashinfo *hashinfo,
+ const __be32 daddr, const unsigned short hnum,
+@@ -196,6 +201,7 @@ begin:
+ hiscore = score;
+ }
+ }
++
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+diff -NurpP --minimal linux-3.6.10/net/ipv4/netfilter/nf_nat_helper.c linux-3.6.10-vs2.3.4.6/net/ipv4/netfilter/nf_nat_helper.c
+--- linux-3.6.10/net/ipv4/netfilter/nf_nat_helper.c 2012-10-04 13:27:50.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/netfilter/nf_nat_helper.c 2012-10-04 16:47:00.000000000 +0000
+@@ -20,6 +20,7 @@
+ #include <net/route.h>
+
+ #include <linux/netfilter_ipv4.h>
++#include <net/route.h>
+ #include <net/netfilter/nf_conntrack.h>
+ #include <net/netfilter/nf_conntrack_helper.h>
+ #include <net/netfilter/nf_conntrack_ecache.h>
+diff -NurpP --minimal linux-3.6.10/net/ipv4/netfilter.c linux-3.6.10-vs2.3.4.6/net/ipv4/netfilter.c
+--- linux-3.6.10/net/ipv4/netfilter.c 2012-07-22 21:39:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/netfilter.c 2012-10-04 16:47:00.000000000 +0000
+@@ -6,7 +6,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/gfp.h>
+ #include <linux/export.h>
+-#include <net/route.h>
++// #include <net/route.h>
+ #include <net/xfrm.h>
+ #include <net/ip.h>
+ #include <net/netfilter/nf_queue.h>
+diff -NurpP --minimal linux-3.6.10/net/ipv4/raw.c linux-3.6.10-vs2.3.4.6/net/ipv4/raw.c
+--- linux-3.6.10/net/ipv4/raw.c 2012-10-04 13:27:50.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/raw.c 2012-10-04 16:47:00.000000000 +0000
+@@ -118,7 +118,7 @@ static struct sock *__raw_v4_lookup(stru
+
+ if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
+ !(inet->inet_daddr && inet->inet_daddr != raddr) &&
+- !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
++ v4_sock_addr_match(sk->sk_nx_info, inet, laddr) &&
+ !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+ goto found; /* gotcha */
+ }
+@@ -397,6 +397,12 @@ static int raw_send_hdrinc(struct sock *
+ icmp_out_count(net, ((struct icmphdr *)
+ skb_transport_header(skb))->type);
+
++ err = -EPERM;
++ if (!nx_check(0, VS_ADMIN) && !capable(CAP_NET_RAW) &&
++ sk->sk_nx_info &&
++ !v4_addr_in_nx_info(sk->sk_nx_info, iph->saddr, NXA_MASK_BIND))
++ goto error_free;
++
+ err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
+ rt->dst.dev, dst_output);
+ if (err > 0)
+@@ -582,6 +588,16 @@ static int raw_sendmsg(struct kiocb *ioc
+ goto done;
+ }
+
++ if (sk->sk_nx_info) {
++ rt = ip_v4_find_src(sock_net(sk), sk->sk_nx_info, &fl4);
++ if (IS_ERR(rt)) {
++ err = PTR_ERR(rt);
++ rt = NULL;
++ goto done;
++ }
++ ip_rt_put(rt);
++ }
++
+ security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
+ rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+ if (IS_ERR(rt)) {
+@@ -658,17 +674,19 @@ static int raw_bind(struct sock *sk, str
+ {
+ struct inet_sock *inet = inet_sk(sk);
+ struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
++ struct nx_v4_sock_addr nsa = { 0 };
+ int ret = -EINVAL;
+ int chk_addr_ret;
+
+ if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
+ goto out;
+- chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
++ v4_map_sock_addr(inet, addr, &nsa);
++ chk_addr_ret = inet_addr_type(sock_net(sk), nsa.saddr);
+ ret = -EADDRNOTAVAIL;
+- if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
++ if (nsa.saddr && chk_addr_ret != RTN_LOCAL &&
+ chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
+ goto out;
+- inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
++ v4_set_sock_addr(inet, &nsa);
+ if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
+ inet->inet_saddr = 0; /* Use device */
+ sk_dst_reset(sk);
+@@ -720,7 +738,8 @@ static int raw_recvmsg(struct kiocb *ioc
+ /* Copy the address. */
+ if (sin) {
+ sin->sin_family = AF_INET;
+- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
++ sin->sin_addr.s_addr =
++ nx_map_sock_lback(sk->sk_nx_info, ip_hdr(skb)->saddr);
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+ }
+@@ -916,7 +935,8 @@ static struct sock *raw_get_first(struct
+ struct hlist_node *node;
+
+ sk_for_each(sk, node, &state->h->ht[state->bucket])
+- if (sock_net(sk) == seq_file_net(seq))
++ if ((sock_net(sk) == seq_file_net(seq)) &&
++ nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+ goto found;
+ }
+ sk = NULL;
+@@ -932,7 +952,8 @@ static struct sock *raw_get_next(struct
+ sk = sk_next(sk);
+ try_again:
+ ;
+- } while (sk && sock_net(sk) != seq_file_net(seq));
++ } while (sk && ((sock_net(sk) != seq_file_net(seq)) ||
++ !nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)));
+
+ if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
+ sk = sk_head(&state->h->ht[state->bucket]);
+diff -NurpP --minimal linux-3.6.10/net/ipv4/route.c linux-3.6.10-vs2.3.4.6/net/ipv4/route.c
+--- linux-3.6.10/net/ipv4/route.c 2012-12-11 11:37:03.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/route.c 2012-12-08 00:36:33.000000000 +0000
+@@ -1949,7 +1949,7 @@ struct rtable *__ip_route_output_key(str
+
+
+ if (fl4->flowi4_oif) {
+- dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
++ dev_out = dev_get_by_index_real_rcu(net, fl4->flowi4_oif);
+ rth = ERR_PTR(-ENODEV);
+ if (dev_out == NULL)
+ goto out;
+diff -NurpP --minimal linux-3.6.10/net/ipv4/tcp.c linux-3.6.10-vs2.3.4.6/net/ipv4/tcp.c
+--- linux-3.6.10/net/ipv4/tcp.c 2012-12-11 11:37:03.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/tcp.c 2012-12-08 00:36:33.000000000 +0000
+@@ -268,6 +268,7 @@
+ #include <linux/crypto.h>
+ #include <linux/time.h>
+ #include <linux/slab.h>
++#include <linux/in.h>
+
+ #include <net/icmp.h>
+ #include <net/inet_common.h>
+diff -NurpP --minimal linux-3.6.10/net/ipv4/tcp_ipv4.c linux-3.6.10-vs2.3.4.6/net/ipv4/tcp_ipv4.c
+--- linux-3.6.10/net/ipv4/tcp_ipv4.c 2012-12-11 11:37:03.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/tcp_ipv4.c 2012-11-06 17:43:41.000000000 +0000
+@@ -2034,6 +2034,12 @@ static void *listening_get_next(struct s
+ req = req->dl_next;
+ while (1) {
+ while (req) {
++ vxdprintk(VXD_CBIT(net, 6),
++ "sk,req: %p [#%d] (from %d)", req->sk,
++ (req->sk)?req->sk->sk_nid:0, nx_current_nid());
++ if (req->sk &&
++ !nx_check(req->sk->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (req->rsk_ops->family == st->family) {
+ cur = req;
+ goto out;
+@@ -2058,6 +2064,10 @@ get_req:
+ }
+ get_sk:
+ sk_nulls_for_each_from(sk, node) {
++ vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
++ sk, sk->sk_nid, nx_current_nid());
++ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (!net_eq(sock_net(sk), net))
+ continue;
+ if (sk->sk_family == st->family) {
+@@ -2134,6 +2144,11 @@ static void *established_get_first(struc
+
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
++ vxdprintk(VXD_CBIT(net, 6),
++ "sk,egf: %p [#%d] (from %d)",
++ sk, sk->sk_nid, nx_current_nid());
++ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (sk->sk_family != st->family ||
+ !net_eq(sock_net(sk), net)) {
+ continue;
+@@ -2144,6 +2159,11 @@ static void *established_get_first(struc
+ st->state = TCP_SEQ_STATE_TIME_WAIT;
+ inet_twsk_for_each(tw, node,
+ &tcp_hashinfo.ehash[st->bucket].twchain) {
++ vxdprintk(VXD_CBIT(net, 6),
++ "tw: %p [#%d] (from %d)",
++ tw, tw->tw_nid, nx_current_nid());
++ if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (tw->tw_family != st->family ||
+ !net_eq(twsk_net(tw), net)) {
+ continue;
+@@ -2173,7 +2193,9 @@ static void *established_get_next(struct
+ tw = cur;
+ tw = tw_next(tw);
+ get_tw:
+- while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
++ while (tw && (tw->tw_family != st->family ||
++ !net_eq(twsk_net(tw), net) ||
++ !nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))) {
+ tw = tw_next(tw);
+ }
+ if (tw) {
+@@ -2197,6 +2219,11 @@ get_tw:
+ sk = sk_nulls_next(sk);
+
+ sk_nulls_for_each_from(sk, node) {
++ vxdprintk(VXD_CBIT(net, 6),
++ "sk,egn: %p [#%d] (from %d)",
++ sk, sk->sk_nid, nx_current_nid());
++ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
+ goto found;
+ }
+@@ -2402,9 +2429,9 @@ static void get_openreq4(const struct so
+ seq_printf(f, "%4d: %08X:%04X %08X:%04X"
+ " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
+ i,
+- ireq->loc_addr,
++ nx_map_sock_lback(current_nx_info(), ireq->loc_addr),
+ ntohs(inet_sk(sk)->inet_sport),
+- ireq->rmt_addr,
++ nx_map_sock_lback(current_nx_info(), ireq->rmt_addr),
+ ntohs(ireq->rmt_port),
+ TCP_SYN_RECV,
+ 0, 0, /* could print option size, but that is af dependent. */
+@@ -2426,8 +2453,8 @@ static void get_tcp4_sock(struct sock *s
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
+- __be32 dest = inet->inet_daddr;
+- __be32 src = inet->inet_rcv_saddr;
++ __be32 dest = nx_map_sock_lback(current_nx_info(), inet->inet_daddr);
++ __be32 src = nx_map_sock_lback(current_nx_info(), inet->inet_rcv_saddr);
+ __u16 destp = ntohs(inet->inet_dport);
+ __u16 srcp = ntohs(inet->inet_sport);
+ int rx_queue;
+@@ -2484,8 +2511,8 @@ static void get_timewait4_sock(const str
+ if (ttd < 0)
+ ttd = 0;
+
+- dest = tw->tw_daddr;
+- src = tw->tw_rcv_saddr;
++ dest = nx_map_sock_lback(current_nx_info(), tw->tw_daddr);
++ src = nx_map_sock_lback(current_nx_info(), tw->tw_rcv_saddr);
+ destp = ntohs(tw->tw_dport);
+ srcp = ntohs(tw->tw_sport);
+
+diff -NurpP --minimal linux-3.6.10/net/ipv4/tcp_minisocks.c linux-3.6.10-vs2.3.4.6/net/ipv4/tcp_minisocks.c
+--- linux-3.6.10/net/ipv4/tcp_minisocks.c 2012-10-04 13:27:50.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/tcp_minisocks.c 2012-10-04 16:47:00.000000000 +0000
+@@ -23,6 +23,9 @@
+ #include <linux/slab.h>
+ #include <linux/sysctl.h>
+ #include <linux/workqueue.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_socket.h>
++#include <linux/vs_context.h>
+ #include <net/tcp.h>
+ #include <net/inet_common.h>
+ #include <net/xfrm.h>
+@@ -287,6 +290,11 @@ void tcp_time_wait(struct sock *sk, int
+ tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
+ tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
+
++ tw->tw_xid = sk->sk_xid;
++ tw->tw_vx_info = NULL;
++ tw->tw_nid = sk->sk_nid;
++ tw->tw_nx_info = NULL;
++
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (tw->tw_family == PF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+diff -NurpP --minimal linux-3.6.10/net/ipv4/udp.c linux-3.6.10-vs2.3.4.6/net/ipv4/udp.c
+--- linux-3.6.10/net/ipv4/udp.c 2012-10-04 13:27:50.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv4/udp.c 2012-10-04 16:47:00.000000000 +0000
+@@ -300,14 +300,7 @@ fail:
+ }
+ EXPORT_SYMBOL(udp_lib_get_port);
+
+-static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+-{
+- struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
+-
+- return (!ipv6_only_sock(sk2) &&
+- (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr ||
+- inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
+-}
++extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *);
+
+ static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
+ unsigned int port)
+@@ -342,6 +335,11 @@ static inline int compute_score(struct s
+ if (inet->inet_rcv_saddr != daddr)
+ return -1;
+ score += 2;
++ } else {
++ /* block non nx_info ips */
++ if (!v4_addr_in_nx_info(sk->sk_nx_info,
++ daddr, NXA_MASK_BIND))
++ return -1;
+ }
+ if (inet->inet_daddr) {
+ if (inet->inet_daddr != saddr)
+@@ -445,6 +443,7 @@ exact_match:
+ return result;
+ }
+
++
+ /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
+ * harder than this. -DaveM
+ */
+@@ -490,6 +489,11 @@ begin:
+ sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+ score = compute_score(sk, net, saddr, hnum, sport,
+ daddr, dport, dif);
++ /* FIXME: disabled?
++ if (score == 9) {
++ result = sk;
++ break;
++ } else */
+ if (score > badness) {
+ result = sk;
+ badness = score;
+@@ -503,6 +507,7 @@ begin:
+ if (get_nulls_value(node) != slot)
+ goto begin;
+
++
+ if (result) {
+ if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
+ result = NULL;
+@@ -512,6 +517,7 @@ begin:
+ goto begin;
+ }
+ }
++
+ rcu_read_unlock();
+ return result;
+ }
+@@ -555,8 +561,7 @@ static inline struct sock *udp_v4_mcast_
+ udp_sk(s)->udp_port_hash != hnum ||
+ (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
+ (inet->inet_dport != rmt_port && inet->inet_dport) ||
+- (inet->inet_rcv_saddr &&
+- inet->inet_rcv_saddr != loc_addr) ||
++ !v4_sock_addr_match(sk->sk_nx_info, inet, loc_addr) ||
+ ipv6_only_sock(s) ||
+ (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
+ continue;
+@@ -939,6 +944,16 @@ int udp_sendmsg(struct kiocb *iocb, stru
+ inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
+ faddr, saddr, dport, inet->inet_sport);
+
++ if (sk->sk_nx_info) {
++ rt = ip_v4_find_src(net, sk->sk_nx_info, fl4);
++ if (IS_ERR(rt)) {
++ err = PTR_ERR(rt);
++ rt = NULL;
++ goto out;
++ }
++ ip_rt_put(rt);
++ }
++
+ security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ rt = ip_route_output_flow(net, fl4, sk);
+ if (IS_ERR(rt)) {
+@@ -1244,7 +1259,8 @@ try_again:
+ if (sin) {
+ sin->sin_family = AF_INET;
+ sin->sin_port = udp_hdr(skb)->source;
+- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
++ sin->sin_addr.s_addr = nx_map_sock_lback(
++ skb->sk->sk_nx_info, ip_hdr(skb)->saddr);
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ }
+ if (inet->cmsg_flags)
+@@ -1992,6 +2008,8 @@ static struct sock *udp_get_first(struct
+ sk_nulls_for_each(sk, node, &hslot->head) {
+ if (!net_eq(sock_net(sk), net))
+ continue;
++ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (sk->sk_family == state->family)
+ goto found;
+ }
+@@ -2009,7 +2027,9 @@ static struct sock *udp_get_next(struct
+
+ do {
+ sk = sk_nulls_next(sk);
+- } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
++ } while (sk && (!net_eq(sock_net(sk), net) ||
++ sk->sk_family != state->family ||
++ !nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)));
+
+ if (!sk) {
+ if (state->bucket <= state->udp_table->mask)
+@@ -2105,8 +2125,8 @@ static void udp4_format_sock(struct sock
+ int bucket, int *len)
+ {
+ struct inet_sock *inet = inet_sk(sp);
+- __be32 dest = inet->inet_daddr;
+- __be32 src = inet->inet_rcv_saddr;
++ __be32 dest = nx_map_sock_lback(current_nx_info(), inet->inet_daddr);
++ __be32 src = nx_map_sock_lback(current_nx_info(), inet->inet_rcv_saddr);
+ __u16 destp = ntohs(inet->inet_dport);
+ __u16 srcp = ntohs(inet->inet_sport);
+
+diff -NurpP --minimal linux-3.6.10/net/ipv6/Kconfig linux-3.6.10-vs2.3.4.6/net/ipv6/Kconfig
+--- linux-3.6.10/net/ipv6/Kconfig 2012-07-22 21:39:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/Kconfig 2012-10-04 16:47:00.000000000 +0000
+@@ -4,8 +4,8 @@
+
+ # IPv6 as module will cause a CRASH if you try to unload it
+ menuconfig IPV6
+- tristate "The IPv6 protocol"
+- default m
++ bool "The IPv6 protocol"
++ default n
+ ---help---
+ This is complemental support for the IP version 6.
+ You will still be able to do traditional IPv4 networking as well.
+diff -NurpP --minimal linux-3.6.10/net/ipv6/addrconf.c linux-3.6.10-vs2.3.4.6/net/ipv6/addrconf.c
+--- linux-3.6.10/net/ipv6/addrconf.c 2012-12-11 11:37:04.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/addrconf.c 2012-11-06 17:43:41.000000000 +0000
+@@ -92,6 +92,8 @@
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
+ #include <linux/export.h>
++#include <linux/vs_network.h>
++#include <linux/vs_inet6.h>
+
+ /* Set to 3 to get tracing... */
+ #define ACONF_DEBUG 2
+@@ -1101,7 +1103,7 @@ out:
+
+ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
+ const struct in6_addr *daddr, unsigned int prefs,
+- struct in6_addr *saddr)
++ struct in6_addr *saddr, struct nx_info *nxi)
+ {
+ struct ipv6_saddr_score scores[2],
+ *score = &scores[0], *hiscore = &scores[1];
+@@ -1173,6 +1175,8 @@ int ipv6_dev_get_saddr(struct net *net,
+ dev->name);
+ continue;
+ }
++ if (!v6_addr_in_nx_info(nxi, &score->ifa->addr, -1))
++ continue;
+
+ score->rule = -1;
+ bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
+@@ -3159,7 +3163,10 @@ static void if6_seq_stop(struct seq_file
+ static int if6_seq_show(struct seq_file *seq, void *v)
+ {
+ struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
+- seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
++
++ if (nx_check(0, VS_ADMIN|VS_WATCH) ||
++ v6_addr_in_nx_info(current_nx_info(), &ifp->addr, -1))
++ seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
+ &ifp->addr,
+ ifp->idev->dev->ifindex,
+ ifp->prefix_len,
+@@ -3665,6 +3672,11 @@ static int in6_dump_addrs(struct inet6_d
+ struct ifacaddr6 *ifaca;
+ int err = 1;
+ int ip_idx = *p_ip_idx;
++ struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL;
++
++ /* disable ipv6 on non v6 guests */
++ if (nxi && !nx_info_has_v6(nxi))
++ return skb->len;
+
+ read_lock_bh(&idev->lock);
+ switch (type) {
+@@ -3675,6 +3687,8 @@ static int in6_dump_addrs(struct inet6_d
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ if (++ip_idx < s_ip_idx)
+ continue;
++ if (!v6_addr_in_nx_info(nxi, &ifa->addr, -1))
++ continue;
+ err = inet6_fill_ifaddr(skb, ifa,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+@@ -3691,6 +3705,8 @@ static int in6_dump_addrs(struct inet6_d
+ ifmca = ifmca->next, ip_idx++) {
+ if (ip_idx < s_ip_idx)
+ continue;
++ if (!v6_addr_in_nx_info(nxi, &ifmca->mca_addr, -1))
++ continue;
+ err = inet6_fill_ifmcaddr(skb, ifmca,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+@@ -3706,6 +3722,8 @@ static int in6_dump_addrs(struct inet6_d
+ ifaca = ifaca->aca_next, ip_idx++) {
+ if (ip_idx < s_ip_idx)
+ continue;
++ if (!v6_addr_in_nx_info(nxi, &ifaca->aca_addr, -1))
++ continue;
+ err = inet6_fill_ifacaddr(skb, ifaca,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+@@ -4089,6 +4107,11 @@ static int inet6_dump_ifinfo(struct sk_b
+ struct inet6_dev *idev;
+ struct hlist_head *head;
+ struct hlist_node *node;
++ struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL;
++
++ /* FIXME: maybe disable ipv6 on non v6 guests?
++ if (skb->sk && skb->sk->sk_vx_info)
++ return skb->len; */
+
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+@@ -4100,6 +4123,8 @@ static int inet6_dump_ifinfo(struct sk_b
+ hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
++ if (!v6_dev_in_nx_info(dev, nxi))
++ goto cont;
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ goto cont;
+diff -NurpP --minimal linux-3.6.10/net/ipv6/af_inet6.c linux-3.6.10-vs2.3.4.6/net/ipv6/af_inet6.c
+--- linux-3.6.10/net/ipv6/af_inet6.c 2012-12-11 11:37:04.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/af_inet6.c 2012-11-06 17:43:41.000000000 +0000
+@@ -43,6 +43,8 @@
+ #include <linux/netdevice.h>
+ #include <linux/icmpv6.h>
+ #include <linux/netfilter_ipv6.h>
++#include <linux/vs_inet.h>
++#include <linux/vs_inet6.h>
+
+ #include <net/ip.h>
+ #include <net/ipv6.h>
+@@ -160,9 +162,12 @@ lookup_protocol:
+ }
+
+ err = -EPERM;
++ if ((protocol == IPPROTO_ICMPV6) &&
++ nx_capable(CAP_NET_RAW, NXC_RAW_ICMP))
++ goto override;
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+ goto out_rcu_unlock;
+-
++override:
+ sock->ops = answer->ops;
+ answer_prot = answer->prot;
+ answer_no_check = answer->no_check;
+@@ -262,6 +267,7 @@ int inet6_bind(struct socket *sock, stru
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct net *net = sock_net(sk);
++ struct nx_v6_sock_addr nsa;
+ __be32 v4addr = 0;
+ unsigned short snum;
+ int addr_type = 0;
+@@ -277,6 +283,10 @@ int inet6_bind(struct socket *sock, stru
+ if (addr->sin6_family != AF_INET6)
+ return -EAFNOSUPPORT;
+
++ err = v6_map_sock_addr(inet, addr, &nsa);
++ if (err)
++ return err;
++
+ addr_type = ipv6_addr_type(&addr->sin6_addr);
+ if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
+ return -EINVAL;
+@@ -308,6 +318,7 @@ int inet6_bind(struct socket *sock, stru
+ /* Reproduce AF_INET checks to make the bindings consistent */
+ v4addr = addr->sin6_addr.s6_addr32[3];
+ chk_addr_ret = inet_addr_type(net, v4addr);
++
+ if (!sysctl_ip_nonlocal_bind &&
+ !(inet->freebind || inet->transparent) &&
+ v4addr != htonl(INADDR_ANY) &&
+@@ -317,6 +328,10 @@ int inet6_bind(struct socket *sock, stru
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
++ if (!v4_addr_in_nx_info(sk->sk_nx_info, v4addr, NXA_MASK_BIND)) {
++ err = -EADDRNOTAVAIL;
++ goto out;
++ }
+ } else {
+ if (addr_type != IPV6_ADDR_ANY) {
+ struct net_device *dev = NULL;
+@@ -343,6 +358,11 @@ int inet6_bind(struct socket *sock, stru
+ }
+ }
+
++ if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) {
++ err = -EADDRNOTAVAIL;
++ goto out_unlock;
++ }
++
+ /* ipv4 addr of the socket is invalid. Only the
+ * unspecified and mapped address have a v4 equivalent.
+ */
+@@ -359,6 +379,9 @@ int inet6_bind(struct socket *sock, stru
+ }
+ }
+
++ /* what's that for? */
++ v6_set_sock_addr(inet, &nsa);
++
+ inet->inet_rcv_saddr = v4addr;
+ inet->inet_saddr = v4addr;
+
+@@ -460,9 +483,11 @@ int inet6_getname(struct socket *sock, s
+ return -ENOTCONN;
+ sin->sin6_port = inet->inet_dport;
+ sin->sin6_addr = np->daddr;
++ /* FIXME: remap lback? */
+ if (np->sndflow)
+ sin->sin6_flowinfo = np->flow_label;
+ } else {
++ /* FIXME: remap lback? */
+ if (ipv6_addr_any(&np->rcv_saddr))
+ sin->sin6_addr = np->saddr;
+ else
+diff -NurpP --minimal linux-3.6.10/net/ipv6/datagram.c linux-3.6.10-vs2.3.4.6/net/ipv6/datagram.c
+--- linux-3.6.10/net/ipv6/datagram.c 2012-07-22 21:39:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/datagram.c 2012-10-04 16:47:00.000000000 +0000
+@@ -645,7 +645,7 @@ int datagram_send_ctl(struct net *net, s
+
+ rcu_read_lock();
+ if (fl6->flowi6_oif) {
+- dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
++ dev = dev_get_by_index_real_rcu(net, fl6->flowi6_oif);
+ if (!dev) {
+ rcu_read_unlock();
+ return -ENODEV;
+diff -NurpP --minimal linux-3.6.10/net/ipv6/fib6_rules.c linux-3.6.10-vs2.3.4.6/net/ipv6/fib6_rules.c
+--- linux-3.6.10/net/ipv6/fib6_rules.c 2012-07-22 21:39:48.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/fib6_rules.c 2012-10-04 16:47:00.000000000 +0000
+@@ -90,7 +90,7 @@ static int fib6_rule_action(struct fib_r
+ ip6_dst_idev(&rt->dst)->dev,
+ &flp6->daddr,
+ rt6_flags2srcprefs(flags),
+- &saddr))
++ &saddr, NULL))
+ goto again;
+ if (!ipv6_prefix_equal(&saddr, &r->src.addr,
+ r->src.plen))
+diff -NurpP --minimal linux-3.6.10/net/ipv6/inet6_hashtables.c linux-3.6.10-vs2.3.4.6/net/ipv6/inet6_hashtables.c
+--- linux-3.6.10/net/ipv6/inet6_hashtables.c 2011-10-24 16:45:34.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/inet6_hashtables.c 2012-10-04 16:47:00.000000000 +0000
+@@ -16,6 +16,7 @@
+
+ #include <linux/module.h>
+ #include <linux/random.h>
++#include <linux/vs_inet6.h>
+
+ #include <net/inet_connection_sock.h>
+ #include <net/inet_hashtables.h>
+@@ -83,7 +84,6 @@ struct sock *__inet6_lookup_established(
+ unsigned int slot = hash & hashinfo->ehash_mask;
+ struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
+
+-
+ rcu_read_lock();
+ begin:
+ sk_nulls_for_each_rcu(sk, node, &head->chain) {
+@@ -95,7 +95,7 @@ begin:
+ sock_put(sk);
+ goto begin;
+ }
+- goto out;
++ goto out;
+ }
+ }
+ if (get_nulls_value(node) != slot)
+@@ -141,6 +141,9 @@ static inline int compute_score(struct s
+ if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+ return -1;
+ score++;
++ } else {
++ if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1))
++ return -1;
+ }
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
+diff -NurpP --minimal linux-3.6.10/net/ipv6/ip6_output.c linux-3.6.10-vs2.3.4.6/net/ipv6/ip6_output.c
+--- linux-3.6.10/net/ipv6/ip6_output.c 2012-10-04 13:27:50.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/ip6_output.c 2012-10-04 16:47:00.000000000 +0000
+@@ -966,7 +966,8 @@ static int ip6_dst_lookup_tail(struct so
+ struct rt6_info *rt = (struct rt6_info *) *dst;
+ err = ip6_route_get_saddr(net, rt, &fl6->daddr,
+ sk ? inet6_sk(sk)->srcprefs : 0,
+- &fl6->saddr);
++ &fl6->saddr,
++ sk ? sk->sk_nx_info : NULL);
+ if (err)
+ goto out_err_release;
+ }
+diff -NurpP --minimal linux-3.6.10/net/ipv6/ndisc.c linux-3.6.10-vs2.3.4.6/net/ipv6/ndisc.c
+--- linux-3.6.10/net/ipv6/ndisc.c 2012-12-11 11:37:04.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/ndisc.c 2012-12-08 00:36:33.000000000 +0000
+@@ -517,7 +517,7 @@ static void ndisc_send_na(struct net_dev
+ } else {
+ if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
+ inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs,
+- &tmpaddr))
++ &tmpaddr, NULL))
+ return;
+ src_addr = &tmpaddr;
+ }
+diff -NurpP --minimal linux-3.6.10/net/ipv6/raw.c linux-3.6.10-vs2.3.4.6/net/ipv6/raw.c
+--- linux-3.6.10/net/ipv6/raw.c 2012-10-04 13:27:50.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/raw.c 2012-10-04 16:47:00.000000000 +0000
+@@ -30,6 +30,7 @@
+ #include <linux/icmpv6.h>
+ #include <linux/netfilter.h>
+ #include <linux/netfilter_ipv6.h>
++#include <linux/vs_inet6.h>
+ #include <linux/skbuff.h>
+ #include <linux/compat.h>
+ #include <asm/uaccess.h>
+@@ -284,6 +285,13 @@ static int rawv6_bind(struct sock *sk, s
+ goto out_unlock;
+ }
+
++ if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) {
++ err = -EADDRNOTAVAIL;
++ if (dev)
++ dev_put(dev);
++ goto out;
++ }
++
+ /* ipv4 addr of the socket is invalid. Only the
+ * unspecified and mapped address have a v4 equivalent.
+ */
+diff -NurpP --minimal linux-3.6.10/net/ipv6/route.c linux-3.6.10-vs2.3.4.6/net/ipv6/route.c
+--- linux-3.6.10/net/ipv6/route.c 2012-12-11 11:37:04.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/route.c 2012-12-08 00:36:33.000000000 +0000
+@@ -57,6 +57,7 @@
+ #include <net/xfrm.h>
+ #include <net/netevent.h>
+ #include <net/netlink.h>
++#include <linux/vs_inet6.h>
+
+ #include <asm/uaccess.h>
+
+@@ -2112,15 +2113,17 @@ int ip6_route_get_saddr(struct net *net,
+ struct rt6_info *rt,
+ const struct in6_addr *daddr,
+ unsigned int prefs,
+- struct in6_addr *saddr)
++ struct in6_addr *saddr,
++ struct nx_info *nxi)
+ {
+ struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
+ int err = 0;
+- if (rt->rt6i_prefsrc.plen)
++ if (rt->rt6i_prefsrc.plen && (!nxi ||
++ v6_addr_in_nx_info(nxi, &rt->rt6i_prefsrc.addr, NXA_TYPE_ADDR)))
+ *saddr = rt->rt6i_prefsrc.addr;
+ else
+ err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
+- daddr, prefs, saddr);
++ daddr, prefs, saddr, nxi);
+ return err;
+ }
+
+@@ -2456,7 +2459,8 @@ static int rt6_fill_node(struct net *net
+ goto nla_put_failure;
+ } else if (dst) {
+ struct in6_addr saddr_buf;
+- if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
++ if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf,
++ (skb->sk ? skb->sk->sk_nx_info : NULL)) == 0 &&
+ nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+ goto nla_put_failure;
+ }
+@@ -2674,6 +2678,7 @@ static int rt6_info_route(struct rt6_inf
+ struct seq_file *m = p_arg;
+ struct neighbour *n;
+
++ /* FIXME: check for network context? */
+ seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
+
+ #ifdef CONFIG_IPV6_SUBTREES
+diff -NurpP --minimal linux-3.6.10/net/ipv6/tcp_ipv6.c linux-3.6.10-vs2.3.4.6/net/ipv6/tcp_ipv6.c
+--- linux-3.6.10/net/ipv6/tcp_ipv6.c 2012-12-11 11:37:04.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/tcp_ipv6.c 2012-11-06 17:43:41.000000000 +0000
+@@ -71,6 +71,7 @@
+
+ #include <linux/crypto.h>
+ #include <linux/scatterlist.h>
++#include <linux/vs_inet6.h>
+
+ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
+ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+@@ -175,8 +176,15 @@ static int tcp_v6_connect(struct sock *s
+ * connect() to INADDR_ANY means loopback (BSD'ism).
+ */
+
+- if(ipv6_addr_any(&usin->sin6_addr))
+- usin->sin6_addr.s6_addr[15] = 0x1;
++ if(ipv6_addr_any(&usin->sin6_addr)) {
++ struct nx_info *nxi = sk->sk_nx_info;
++
++ if (nxi && nx_info_has_v6(nxi))
++ /* FIXME: remap lback? */
++ usin->sin6_addr = nxi->v6.ip;
++ else
++ usin->sin6_addr.s6_addr[15] = 0x1;
++ }
+
+ addr_type = ipv6_addr_type(&usin->sin6_addr);
+
+diff -NurpP --minimal linux-3.6.10/net/ipv6/udp.c linux-3.6.10-vs2.3.4.6/net/ipv6/udp.c
+--- linux-3.6.10/net/ipv6/udp.c 2012-10-04 13:27:50.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/udp.c 2012-10-04 16:56:03.000000000 +0000
+@@ -45,42 +45,68 @@
+ #include <net/tcp_states.h>
+ #include <net/ip6_checksum.h>
+ #include <net/xfrm.h>
++#include <linux/vs_inet6.h>
+
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
+ #include <trace/events/skb.h>
+ #include "udp_impl.h"
+
+-int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
++int ipv6_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+ {
+- const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
++ const struct in6_addr *sk1_rcv_saddr6 = &inet6_sk(sk1)->rcv_saddr;
+ const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
+- __be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
++ __be32 sk1_rcv_saddr = sk_rcv_saddr(sk1);
+ __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
+- int sk_ipv6only = ipv6_only_sock(sk);
++ int sk1_ipv6only = ipv6_only_sock(sk1);
+ int sk2_ipv6only = inet_v6_ipv6only(sk2);
+- int addr_type = ipv6_addr_type(sk_rcv_saddr6);
++ int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
+ int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
+
+ /* if both are mapped, treat as IPv4 */
+- if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
+- return (!sk2_ipv6only &&
++ if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
++ if (!sk2_ipv6only &&
+ (!sk1_rcv_saddr || !sk2_rcv_saddr ||
+- sk1_rcv_saddr == sk2_rcv_saddr));
++ sk1_rcv_saddr == sk2_rcv_saddr))
++ goto vs_v4;
++ else
++ return 0;
++ }
+
+ if (addr_type2 == IPV6_ADDR_ANY &&
+ !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
+- return 1;
++ goto vs;
+
+ if (addr_type == IPV6_ADDR_ANY &&
+- !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
+- return 1;
++ !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
++ goto vs;
+
+ if (sk2_rcv_saddr6 &&
+- ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
+- return 1;
++ ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
++ goto vs;
+
+ return 0;
++
++vs_v4:
++ if (!sk1_rcv_saddr && !sk2_rcv_saddr)
++ return nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info);
++ if (!sk2_rcv_saddr)
++ return v4_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr, -1);
++ if (!sk1_rcv_saddr)
++ return v4_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr, -1);
++ return 1;
++vs:
++ if (addr_type2 == IPV6_ADDR_ANY && addr_type == IPV6_ADDR_ANY)
++ return nx_v6_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info);
++ else if (addr_type2 == IPV6_ADDR_ANY)
++ return v6_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr6, -1);
++ else if (addr_type == IPV6_ADDR_ANY) {
++ if (addr_type2 == IPV6_ADDR_MAPPED)
++ return nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info);
++ else
++ return v6_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr6, -1);
++ }
++ return 1;
+ }
+
+ static unsigned int udp6_portaddr_hash(struct net *net,
+@@ -144,6 +170,10 @@ static inline int compute_score(struct s
+ if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+ return -1;
+ score++;
++ } else {
++ /* block non nx_info ips */
++ if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1))
++ return -1;
+ }
+ if (!ipv6_addr_any(&np->daddr)) {
+ if (!ipv6_addr_equal(&np->daddr, saddr))
+diff -NurpP --minimal linux-3.6.10/net/ipv6/xfrm6_policy.c linux-3.6.10-vs2.3.4.6/net/ipv6/xfrm6_policy.c
+--- linux-3.6.10/net/ipv6/xfrm6_policy.c 2012-10-04 13:27:50.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/ipv6/xfrm6_policy.c 2012-10-04 16:47:00.000000000 +0000
+@@ -63,7 +63,7 @@ static int xfrm6_get_saddr(struct net *n
+ dev = ip6_dst_idev(dst)->dev;
+ ipv6_dev_get_saddr(dev_net(dev), dev,
+ (struct in6_addr *)&daddr->a6, 0,
+- (struct in6_addr *)&saddr->a6);
++ (struct in6_addr *)&saddr->a6, NULL);
+ dst_release(dst);
+ return 0;
+ }
+diff -NurpP --minimal linux-3.6.10/net/netfilter/ipvs/ip_vs_xmit.c linux-3.6.10-vs2.3.4.6/net/netfilter/ipvs/ip_vs_xmit.c
+--- linux-3.6.10/net/netfilter/ipvs/ip_vs_xmit.c 2012-12-11 11:37:04.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/netfilter/ipvs/ip_vs_xmit.c 2012-11-06 17:43:41.000000000 +0000
+@@ -231,7 +231,7 @@ __ip_vs_route_output_v6(struct net *net,
+ return dst;
+ if (ipv6_addr_any(&fl6.saddr) &&
+ ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
+- &fl6.daddr, 0, &fl6.saddr) < 0)
++ &fl6.daddr, 0, &fl6.saddr, NULL) < 0)
+ goto out_err;
+ if (do_xfrm) {
+ dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+diff -NurpP --minimal linux-3.6.10/net/netlink/af_netlink.c linux-3.6.10-vs2.3.4.6/net/netlink/af_netlink.c
+--- linux-3.6.10/net/netlink/af_netlink.c 2012-12-11 11:37:04.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/netlink/af_netlink.c 2012-12-08 00:36:33.000000000 +0000
+@@ -55,6 +55,9 @@
+ #include <linux/types.h>
+ #include <linux/audit.h>
+ #include <linux/mutex.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vs_limit.h>
+
+ #include <net/net_namespace.h>
+ #include <net/sock.h>
+@@ -1983,6 +1986,8 @@ static struct sock *netlink_seq_socket_i
+ sk_for_each(s, node, &hash->table[j]) {
+ if (sock_net(s) != seq_file_net(seq))
+ continue;
++ if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (off == pos) {
+ iter->link = i;
+ iter->hash_idx = j;
+@@ -2017,7 +2022,8 @@ static void *netlink_seq_next(struct seq
+ s = v;
+ do {
+ s = sk_next(s);
+- } while (s && sock_net(s) != seq_file_net(seq));
++ } while (s && (sock_net(s) != seq_file_net(seq) ||
++ !nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)));
+ if (s)
+ return s;
+
+@@ -2029,7 +2035,8 @@ static void *netlink_seq_next(struct seq
+
+ for (; j <= hash->mask; j++) {
+ s = sk_head(&hash->table[j]);
+- while (s && sock_net(s) != seq_file_net(seq))
++ while (s && (sock_net(s) != seq_file_net(seq) ||
++ !nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)))
+ s = sk_next(s);
+ if (s) {
+ iter->link = i;
+diff -NurpP --minimal linux-3.6.10/net/socket.c linux-3.6.10-vs2.3.4.6/net/socket.c
+--- linux-3.6.10/net/socket.c 2012-10-04 13:27:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/socket.c 2012-10-04 16:47:00.000000000 +0000
+@@ -98,6 +98,10 @@
+
+ #include <net/sock.h>
+ #include <linux/netfilter.h>
++#include <linux/vs_base.h>
++#include <linux/vs_socket.h>
++#include <linux/vs_inet.h>
++#include <linux/vs_inet6.h>
+
+ #include <linux/if_tun.h>
+ #include <linux/ipv6_route.h>
+@@ -552,6 +556,7 @@ static inline int __sock_sendmsg_nosec(s
+ struct msghdr *msg, size_t size)
+ {
+ struct sock_iocb *si = kiocb_to_siocb(iocb);
++ size_t len;
+
+ sock_update_classid(sock->sk);
+
+@@ -560,7 +565,22 @@ static inline int __sock_sendmsg_nosec(s
+ si->msg = msg;
+ si->size = size;
+
+- return sock->ops->sendmsg(iocb, sock, msg, size);
++ len = sock->ops->sendmsg(iocb, sock, msg, size);
++ if (sock->sk) {
++ if (len == size)
++ vx_sock_send(sock->sk, size);
++ else
++ vx_sock_fail(sock->sk, size);
++ }
++ vxdprintk(VXD_CBIT(net, 7),
++ "__sock_sendmsg: %p[%p,%p,%p;%d/%d]:%d/%zu",
++ sock, sock->sk,
++ (sock->sk)?sock->sk->sk_nx_info:0,
++ (sock->sk)?sock->sk->sk_vx_info:0,
++ (sock->sk)?sock->sk->sk_xid:0,
++ (sock->sk)?sock->sk->sk_nid:0,
++ (unsigned int)size, len);
++ return len;
+ }
+
+ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+@@ -716,6 +736,7 @@ static inline int __sock_recvmsg_nosec(s
+ struct msghdr *msg, size_t size, int flags)
+ {
+ struct sock_iocb *si = kiocb_to_siocb(iocb);
++ int len;
+
+ sock_update_classid(sock->sk);
+
+@@ -725,7 +746,18 @@ static inline int __sock_recvmsg_nosec(s
+ si->size = size;
+ si->flags = flags;
+
+- return sock->ops->recvmsg(iocb, sock, msg, size, flags);
++ len = sock->ops->recvmsg(iocb, sock, msg, size, flags);
++ if ((len >= 0) && sock->sk)
++ vx_sock_recv(sock->sk, len);
++ vxdprintk(VXD_CBIT(net, 7),
++ "__sock_recvmsg: %p[%p,%p,%p;%d/%d]:%d/%d",
++ sock, sock->sk,
++ (sock->sk)?sock->sk->sk_nx_info:0,
++ (sock->sk)?sock->sk->sk_vx_info:0,
++ (sock->sk)?sock->sk->sk_xid:0,
++ (sock->sk)?sock->sk->sk_nid:0,
++ (unsigned int)size, len);
++ return len;
+ }
+
+ static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+@@ -1210,6 +1242,13 @@ int __sock_create(struct net *net, int f
+ if (type < 0 || type >= SOCK_MAX)
+ return -EINVAL;
+
++ if (!nx_check(0, VS_ADMIN)) {
++ if (family == PF_INET && !current_nx_info_has_v4())
++ return -EAFNOSUPPORT;
++ if (family == PF_INET6 && !current_nx_info_has_v6())
++ return -EAFNOSUPPORT;
++ }
++
+ /* Compatibility.
+
+ This uglymoron is moved from INET layer to here to avoid
+@@ -1344,6 +1383,7 @@ SYSCALL_DEFINE3(socket, int, family, int
+ if (retval < 0)
+ goto out;
+
++ set_bit(SOCK_USER_SOCKET, &sock->flags);
+ retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
+ if (retval < 0)
+ goto out_release;
+@@ -1385,10 +1425,12 @@ SYSCALL_DEFINE4(socketpair, int, family,
+ err = sock_create(family, type, protocol, &sock1);
+ if (err < 0)
+ goto out;
++ set_bit(SOCK_USER_SOCKET, &sock1->flags);
+
+ err = sock_create(family, type, protocol, &sock2);
+ if (err < 0)
+ goto out_release_1;
++ set_bit(SOCK_USER_SOCKET, &sock2->flags);
+
+ err = sock1->ops->socketpair(sock1, sock2);
+ if (err < 0)
+diff -NurpP --minimal linux-3.6.10/net/sunrpc/auth.c linux-3.6.10-vs2.3.4.6/net/sunrpc/auth.c
+--- linux-3.6.10/net/sunrpc/auth.c 2012-10-04 13:27:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/sunrpc/auth.c 2012-10-04 16:47:00.000000000 +0000
+@@ -15,6 +15,7 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/sunrpc/gss_api.h>
+ #include <linux/spinlock.h>
++#include <linux/vs_tag.h>
+
+ #ifdef RPC_DEBUG
+ # define RPCDBG_FACILITY RPCDBG_AUTH
+@@ -481,6 +482,7 @@ rpcauth_lookupcred(struct rpc_auth *auth
+ memset(&acred, 0, sizeof(acred));
+ acred.uid = cred->fsuid;
+ acred.gid = cred->fsgid;
++ acred.tag = dx_current_tag();
+ acred.group_info = get_group_info(((struct cred *)cred)->group_info);
+
+ ret = auth->au_ops->lookup_cred(auth, &acred, flags);
+@@ -521,6 +523,7 @@ rpcauth_bind_root_cred(struct rpc_task *
+ struct auth_cred acred = {
+ .uid = 0,
+ .gid = 0,
++ .tag = dx_current_tag(),
+ };
+
+ dprintk("RPC: %5u looking up %s cred\n",
+diff -NurpP --minimal linux-3.6.10/net/sunrpc/auth_unix.c linux-3.6.10-vs2.3.4.6/net/sunrpc/auth_unix.c
+--- linux-3.6.10/net/sunrpc/auth_unix.c 2012-07-22 21:39:49.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/sunrpc/auth_unix.c 2012-10-04 16:47:00.000000000 +0000
+@@ -13,11 +13,13 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/sunrpc/auth.h>
+ #include <linux/user_namespace.h>
++#include <linux/vs_tag.h>
+
+ #define NFS_NGROUPS 16
+
+ struct unx_cred {
+ struct rpc_cred uc_base;
++ tag_t uc_tag;
+ gid_t uc_gid;
+ gid_t uc_gids[NFS_NGROUPS];
+ };
+@@ -79,6 +81,7 @@ unx_create_cred(struct rpc_auth *auth, s
+ groups = NFS_NGROUPS;
+
+ cred->uc_gid = acred->gid;
++ cred->uc_tag = acred->tag;
+ for (i = 0; i < groups; i++) {
+ gid_t gid;
+ gid = from_kgid(&init_user_ns, GROUP_AT(acred->group_info, i));
+@@ -123,7 +126,9 @@ unx_match(struct auth_cred *acred, struc
+ unsigned int i;
+
+
+- if (cred->uc_uid != acred->uid || cred->uc_gid != acred->gid)
++ if (cred->uc_uid != acred->uid ||
++ cred->uc_gid != acred->gid ||
++ cred->uc_tag != acred->tag)
+ return 0;
+
+ if (acred->group_info != NULL)
+@@ -152,7 +157,7 @@ unx_marshal(struct rpc_task *task, __be3
+ struct rpc_clnt *clnt = task->tk_client;
+ struct unx_cred *cred = container_of(task->tk_rqstp->rq_cred, struct unx_cred, uc_base);
+ __be32 *base, *hold;
+- int i;
++ int i, tag;
+
+ *p++ = htonl(RPC_AUTH_UNIX);
+ base = p++;
+@@ -162,9 +167,12 @@ unx_marshal(struct rpc_task *task, __be3
+ * Copy the UTS nodename captured when the client was created.
+ */
+ p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
++ tag = task->tk_client->cl_tag;
+
+- *p++ = htonl((u32) cred->uc_uid);
+- *p++ = htonl((u32) cred->uc_gid);
++ *p++ = htonl((u32) TAGINO_UID(tag,
++ cred->uc_uid, cred->uc_tag));
++ *p++ = htonl((u32) TAGINO_GID(tag,
++ cred->uc_gid, cred->uc_tag));
+ hold = p++;
+ for (i = 0; i < 16 && cred->uc_gids[i] != (gid_t) NOGROUP; i++)
+ *p++ = htonl((u32) cred->uc_gids[i]);
+diff -NurpP --minimal linux-3.6.10/net/sunrpc/clnt.c linux-3.6.10-vs2.3.4.6/net/sunrpc/clnt.c
+--- linux-3.6.10/net/sunrpc/clnt.c 2012-10-04 13:27:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/sunrpc/clnt.c 2012-10-04 16:47:00.000000000 +0000
+@@ -31,6 +31,7 @@
+ #include <linux/in6.h>
+ #include <linux/un.h>
+ #include <linux/rcupdate.h>
++#include <linux/vs_cvirt.h>
+
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/sunrpc/rpc_pipe_fs.h>
+@@ -481,6 +482,9 @@ struct rpc_clnt *rpc_create(struct rpc_c
+ if (!(args->flags & RPC_CLNT_CREATE_QUIET))
+ clnt->cl_chatty = 1;
+
++ /* TODO: handle RPC_CLNT_CREATE_TAGGED
++ if (args->flags & RPC_CLNT_CREATE_TAGGED)
++ clnt->cl_tag = 1; */
+ return clnt;
+ }
+ EXPORT_SYMBOL_GPL(rpc_create);
+diff -NurpP --minimal linux-3.6.10/net/unix/af_unix.c linux-3.6.10-vs2.3.4.6/net/unix/af_unix.c
+--- linux-3.6.10/net/unix/af_unix.c 2012-10-04 13:27:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/net/unix/af_unix.c 2012-10-04 16:59:36.000000000 +0000
+@@ -114,6 +114,8 @@
+ #include <linux/mount.h>
+ #include <net/checksum.h>
+ #include <linux/security.h>
++#include <linux/vs_context.h>
++#include <linux/vs_limit.h>
+
+ struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
+ EXPORT_SYMBOL_GPL(unix_socket_table);
+@@ -271,6 +273,8 @@ static struct sock *__unix_find_socket_b
+ if (!net_eq(sock_net(s), net))
+ continue;
+
++ if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (u->addr->len == len &&
+ !memcmp(u->addr->name, sunname, len))
+ goto found;
+@@ -2258,6 +2262,8 @@ static struct sock *unix_from_bucket(str
+ for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
+ if (sock_net(sk) != seq_file_net(seq))
+ continue;
++ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (++count == offset)
+ break;
+ }
+@@ -2275,6 +2281,8 @@ static struct sock *unix_next_socket(str
+ sk = sk_next(sk);
+ if (!sk)
+ goto next_bucket;
++ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++ continue;
+ if (sock_net(sk) == seq_file_net(seq))
+ return sk;
+ }
+diff -NurpP --minimal linux-3.6.10/scripts/checksyscalls.sh linux-3.6.10-vs2.3.4.6/scripts/checksyscalls.sh
+--- linux-3.6.10/scripts/checksyscalls.sh 2012-10-04 13:27:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/scripts/checksyscalls.sh 2012-10-04 16:47:00.000000000 +0000
+@@ -193,7 +193,6 @@ cat << EOF
+ #define __IGNORE_afs_syscall
+ #define __IGNORE_getpmsg
+ #define __IGNORE_putpmsg
+-#define __IGNORE_vserver
+ EOF
+ }
+
+diff -NurpP --minimal linux-3.6.10/security/commoncap.c linux-3.6.10-vs2.3.4.6/security/commoncap.c
+--- linux-3.6.10/security/commoncap.c 2012-07-22 21:39:51.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/security/commoncap.c 2012-10-04 16:47:00.000000000 +0000
+@@ -76,14 +76,20 @@ int cap_netlink_send(struct sock *sk, st
+ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
+ int cap, int audit)
+ {
++ struct vx_info *vxi = current_vx_info(); /* FIXME: get vxi from cred? */
++
+ for (;;) {
+ /* The owner of the user namespace has all caps. */
+ if (targ_ns != &init_user_ns && uid_eq(targ_ns->owner, cred->euid))
+ return 0;
+
+ /* Do we have the necessary capabilities? */
+- if (targ_ns == cred->user_ns)
+- return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
++ if (targ_ns == cred->user_ns) {
++ if (vx_info_flags(vxi, VXF_STATE_SETUP, 0) &&
++ cap_raised(cred->cap_effective, cap))
++ return 0;
++ return vx_cap_raised(vxi, cred->cap_effective, cap) ? 0 : -EPERM;
++ }
+
+ /* Have we tried all of the parent namespaces? */
+ if (targ_ns == &init_user_ns)
+@@ -619,7 +625,7 @@ int cap_inode_setxattr(struct dentry *de
+
+ if (!strncmp(name, XATTR_SECURITY_PREFIX,
+ sizeof(XATTR_SECURITY_PREFIX) - 1) &&
+- !capable(CAP_SYS_ADMIN))
++ !vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY))
+ return -EPERM;
+ return 0;
+ }
+@@ -645,7 +651,7 @@ int cap_inode_removexattr(struct dentry
+
+ if (!strncmp(name, XATTR_SECURITY_PREFIX,
+ sizeof(XATTR_SECURITY_PREFIX) - 1) &&
+- !capable(CAP_SYS_ADMIN))
++ !vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY))
+ return -EPERM;
+ return 0;
+ }
+diff -NurpP --minimal linux-3.6.10/security/selinux/hooks.c linux-3.6.10-vs2.3.4.6/security/selinux/hooks.c
+--- linux-3.6.10/security/selinux/hooks.c 2012-10-04 13:27:53.000000000 +0000
++++ linux-3.6.10-vs2.3.4.6/security/selinux/hooks.c 2012-10-04 16:47:00.000000000 +0000
+@@ -66,7 +66,6 @@
+ #include <linux/dccp.h>
+ #include <linux/quota.h>
+ #include <linux/un.h> /* for Unix socket types */
+-#include <net/af_unix.h> /* for Unix socket types */
+ #include <linux/parser.h>
+ #include <linux/nfs_mount.h>
+ #include <net/ipv6.h>