diff -NurpP --minimal linux-3.10.19/Documentation/vserver/debug.txt linux-3.10.19-vs2.3.6.8/Documentation/vserver/debug.txt --- linux-3.10.19/Documentation/vserver/debug.txt 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/Documentation/vserver/debug.txt 2013-08-22 20:29:59.000000000 +0000 @@ -0,0 +1,154 @@ + +debug_cvirt: + + 2 4 "vx_map_tgid: %p/%llx: %d -> %d" + "vx_rmap_tgid: %p/%llx: %d -> %d" + +debug_dlim: + + 0 1 "ALLOC (%p,#%d)%c inode (%d)" + "FREE (%p,#%d)%c inode" + 1 2 "ALLOC (%p,#%d)%c %lld bytes (%d)" + "FREE (%p,#%d)%c %lld bytes" + 2 4 "ADJUST: %lld,%lld on %ld,%ld [mult=%d]" + 3 8 "ext3_has_free_blocks(%p): %lu<%lu+1, %c, %u!=%u r=%d" + "ext3_has_free_blocks(%p): free=%lu, root=%lu" + "rcu_free_dl_info(%p)" + 4 10 "alloc_dl_info(%p,%d) = %p" + "dealloc_dl_info(%p)" + "get_dl_info(%p[#%d.%d])" + "put_dl_info(%p[#%d.%d])" + 5 20 "alloc_dl_info(%p,%d)*" + 6 40 "__hash_dl_info: %p[#%d]" + "__unhash_dl_info: %p[#%d]" + 7 80 "locate_dl_info(%p,#%d) = %p" + +debug_misc: + + 0 1 "destroy_dqhash: %p [#0x%08x] c=%d" + "new_dqhash: %p [#0x%08x]" + "vroot[%d]_clr_dev: dev=%p[%lu,%d:%d]" + "vroot[%d]_get_real_bdev: dev=%p[%lu,%d:%d]" + "vroot[%d]_set_dev: dev=%p[%lu,%d:%d]" + "vroot_get_real_bdev not set" + 1 2 "cow_break_link(»%s«)" + "temp copy »%s«" + 2 4 "dentry_open(new): %p" + "dentry_open(old): %p" + "lookup_create(new): %p" + "old path »%s«" + "path_lookup(old): %d" + "vfs_create(new): %d" + "vfs_rename: %d" + "vfs_sendfile: %d" + 3 8 "fput(new_file=%p[#%d])" + "fput(old_file=%p[#%d])" + 4 10 "vx_info_kill(%p[#%d],%d,%d) = %d" + "vx_info_kill(%p[#%d],%d,%d)*" + 5 20 "vs_reboot(%p[#%d],%d)" + 6 40 "dropping task %p[#%u,%u] for %p[#%u,%u]" + +debug_net: + + 2 4 "nx_addr_conflict(%p,%p) %d.%d,%d.%d" + 3 8 "inet_bind(%p) %d.%d.%d.%d, %d.%d.%d.%d, %d.%d.%d.%d" + "inet_bind(%p)* %p,%p;%lx %d.%d.%d.%d" + 4 10 "ip_route_connect(%p) %p,%p;%lx" + 5 20 "__addr_in_socket(%p,%d.%d.%d.%d) %p:%d.%d.%d.%d %p;%lx" + 6 40 "sk,egf: %p [#%d] (from %d)" + "sk,egn: %p [#%d] (from %d)" + "sk,req: %p [#%d] (from %d)" + "sk: %p [#%d] (from %d)" + "tw: %p [#%d] (from %d)" + 7 80 "__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d" + "__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d" + +debug_nid: + + 0 1 "__lookup_nx_info(#%u): %p[#%u]" + "alloc_nx_info(%d) = %p" + "create_nx_info(%d) (dynamic rejected)" + "create_nx_info(%d) = %p (already there)" + "create_nx_info(%d) = %p (new)" + "dealloc_nx_info(%p)" + 1 2 "alloc_nx_info(%d)*" + "create_nx_info(%d)*" + 2 4 "get_nx_info(%p[#%d.%d])" + "put_nx_info(%p[#%d.%d])" + 3 8 "claim_nx_info(%p[#%d.%d.%d]) %p" + "clr_nx_info(%p[#%d.%d])" + "init_nx_info(%p[#%d.%d])" + "release_nx_info(%p[#%d.%d.%d]) %p" + "set_nx_info(%p[#%d.%d])" + 4 10 "__hash_nx_info: %p[#%d]" + "__nx_dynamic_id: [#%d]" + "__unhash_nx_info: %p[#%d.%d.%d]" + 5 20 "moved task %p into nxi:%p[#%d]" + "nx_migrate_task(%p,%p[#%d.%d.%d])" + "task_get_nx_info(%p)" + 6 40 "nx_clear_persistent(%p[#%d])" + +debug_quota: + + 0 1 "quota_sync_dqh(%p,%d) discard inode %p" + 1 2 "quota_sync_dqh(%p,%d)" + "sync_dquots(%p,%d)" + "sync_dquots_dqh(%p,%d)" + 3 8 "do_quotactl(%p,%d,cmd=%d,id=%d,%p)" + +debug_switch: + + 0 1 "vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]" + 1 2 "vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]" + 4 10 "%s: (%s %s) returned %s with %d" + +debug_tag: + + 7 80 "dx_parse_tag(»%s«): %d:#%d" + "dx_propagate_tag(%p[#%lu.%d]): %d,%d" + +debug_xid: + + 0 1 "__lookup_vx_info(#%u): %p[#%u]" + "alloc_vx_info(%d) = %p" + "alloc_vx_info(%d)*" + "create_vx_info(%d) (dynamic rejected)" + "create_vx_info(%d) = %p (already there)" + "create_vx_info(%d) = %p (new)" + "dealloc_vx_info(%p)" + "loc_vx_info(%d) = %p (found)" + "loc_vx_info(%d) = %p (new)" + "loc_vx_info(%d) = %p (not available)" + 1 2 "create_vx_info(%d)*" + "loc_vx_info(%d)*" + 2 4 "get_vx_info(%p[#%d.%d])" + "put_vx_info(%p[#%d.%d])" + 3 8 "claim_vx_info(%p[#%d.%d.%d]) %p" + "clr_vx_info(%p[#%d.%d])" + "init_vx_info(%p[#%d.%d])" + "release_vx_info(%p[#%d.%d.%d]) %p" + "set_vx_info(%p[#%d.%d])" + 4 10 "__hash_vx_info: %p[#%d]" + "__unhash_vx_info: %p[#%d.%d.%d]" + "__vx_dynamic_id: [#%d]" + 5 20 "enter_vx_info(%p[#%d],%p) %p[#%d,%p]" + "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]" + "moved task %p into vxi:%p[#%d]" + "task_get_vx_info(%p)" + "vx_migrate_task(%p,%p[#%d.%d])" + 6 40 "vx_clear_persistent(%p[#%d])" + "vx_exit_init(%p[#%d],%p[#%d,%d,%d])" + "vx_set_init(%p[#%d],%p[#%d,%d,%d])" + "vx_set_persistent(%p[#%d])" + "vx_set_reaper(%p[#%d],%p[#%d,%d])" + 7 80 "vx_child_reaper(%p[#%u,%u]) = %p[#%u,%u]" + + +debug_limit: + + n 2^n "vx_acc_cres[%5d,%s,%2d]: %5d%s" + "vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d" + + m 2^m "vx_acc_page[%5d,%s,%2d]: %5d%s" + "vx_acc_pages[%5d,%s,%2d]: %5d += %5d" + "vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d" diff -NurpP --minimal linux-3.10.19/Makefile linux-3.10.19-vs2.3.6.8/Makefile --- linux-3.10.19/Makefile 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/Makefile 2013-11-14 04:32:32.000000000 +0000 @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 10 SUBLEVEL = 19 -EXTRAVERSION = +EXTRAVERSION = -vs2.3.6.8 NAME = TOSSUG Baby Fish # *DOCUMENTATION* diff -NurpP --minimal linux-3.10.19/arch/alpha/Kconfig linux-3.10.19-vs2.3.6.8/arch/alpha/Kconfig --- linux-3.10.19/arch/alpha/Kconfig 2013-07-14 17:00:13.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/alpha/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -665,6 +665,8 @@ config DUMMY_CONSOLE depends on VGA_HOSE default y +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/alpha/kernel/systbls.S linux-3.10.19-vs2.3.6.8/arch/alpha/kernel/systbls.S --- linux-3.10.19/arch/alpha/kernel/systbls.S 2013-02-19 13:56:11.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/alpha/kernel/systbls.S 2013-08-22 20:29:59.000000000 +0000 @@ -446,7 +446,7 @@ sys_call_table: .quad sys_stat64 /* 425 */ .quad sys_lstat64 .quad sys_fstat64 - .quad sys_ni_syscall /* sys_vserver */ + .quad sys_vserver /* sys_vserver */ .quad sys_ni_syscall /* sys_mbind */ .quad sys_ni_syscall /* sys_get_mempolicy */ .quad sys_ni_syscall /* sys_set_mempolicy */ diff -NurpP --minimal linux-3.10.19/arch/alpha/kernel/traps.c linux-3.10.19-vs2.3.6.8/arch/alpha/kernel/traps.c --- linux-3.10.19/arch/alpha/kernel/traps.c 2013-07-14 17:00:13.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/alpha/kernel/traps.c 2013-08-22 20:29:59.000000000 +0000 @@ -177,7 +177,8 @@ die_if_kernel(char * str, struct pt_regs #ifdef CONFIG_SMP printk("CPU %d ", hard_smp_processor_id()); #endif - printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); + printk("%s(%d:#%u): %s %ld\n", current->comm, + task_pid_nr(current), current->xid, str, err); dik_show_regs(regs, r9_15); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); dik_show_trace((unsigned long *)(regs+1)); diff -NurpP --minimal linux-3.10.19/arch/arm/Kconfig linux-3.10.19-vs2.3.6.8/arch/arm/Kconfig --- linux-3.10.19/arch/arm/Kconfig 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/arm/Kconfig 2013-11-13 17:17:15.000000000 +0000 @@ -2238,6 +2238,8 @@ source "fs/Kconfig" source "arch/arm/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/arm/kernel/calls.S linux-3.10.19-vs2.3.6.8/arch/arm/kernel/calls.S --- linux-3.10.19/arch/arm/kernel/calls.S 2013-05-31 13:44:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/arm/kernel/calls.S 2013-08-22 20:29:59.000000000 +0000 @@ -322,7 +322,7 @@ /* 310 */ CALL(sys_request_key) CALL(sys_keyctl) CALL(ABI(sys_semtimedop, sys_oabi_semtimedop)) -/* vserver */ CALL(sys_ni_syscall) + CALL(sys_vserver) CALL(sys_ioprio_set) /* 315 */ CALL(sys_ioprio_get) CALL(sys_inotify_init) diff -NurpP --minimal linux-3.10.19/arch/arm/kernel/traps.c linux-3.10.19-vs2.3.6.8/arch/arm/kernel/traps.c --- linux-3.10.19/arch/arm/kernel/traps.c 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/arm/kernel/traps.c 2013-11-13 17:17:15.000000000 +0000 @@ -240,8 +240,8 @@ static int __die(const char *str, int er print_modules(); __show_regs(regs); - printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", - TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); + printk(KERN_EMERG "Process %.*s (pid: %d:#%u, stack limit = 0x%p)\n", + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), tsk->xid, end_of_stack(tsk)); if (!user_mode(regs) || in_interrupt()) { dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, diff -NurpP --minimal linux-3.10.19/arch/cris/Kconfig linux-3.10.19-vs2.3.6.8/arch/cris/Kconfig --- linux-3.10.19/arch/cris/Kconfig 2013-07-14 17:00:25.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/cris/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -673,6 +673,8 @@ source "drivers/staging/Kconfig" source "arch/cris/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/h8300/Kconfig linux-3.10.19-vs2.3.6.8/arch/h8300/Kconfig --- linux-3.10.19/arch/h8300/Kconfig 2013-07-14 17:00:25.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/h8300/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -218,6 +218,8 @@ source "fs/Kconfig" source "arch/h8300/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/ia64/Kconfig linux-3.10.19-vs2.3.6.8/arch/ia64/Kconfig --- linux-3.10.19/arch/ia64/Kconfig 2013-07-14 17:00:25.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/ia64/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -643,6 +643,8 @@ source "fs/Kconfig" source "arch/ia64/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/ia64/kernel/entry.S linux-3.10.19-vs2.3.6.8/arch/ia64/kernel/entry.S --- linux-3.10.19/arch/ia64/kernel/entry.S 2013-05-31 13:44:38.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/ia64/kernel/entry.S 2013-08-22 20:29:59.000000000 +0000 @@ -1719,7 +1719,7 @@ sys_call_table: data8 sys_mq_notify data8 sys_mq_getsetattr data8 sys_kexec_load - data8 sys_ni_syscall // reserved for vserver + data8 sys_vserver data8 sys_waitid // 1270 data8 sys_add_key data8 sys_request_key diff -NurpP --minimal linux-3.10.19/arch/ia64/kernel/ptrace.c linux-3.10.19-vs2.3.6.8/arch/ia64/kernel/ptrace.c --- linux-3.10.19/arch/ia64/kernel/ptrace.c 2013-02-19 13:56:51.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/ia64/kernel/ptrace.c 2013-08-22 20:29:59.000000000 +0000 @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff -NurpP --minimal linux-3.10.19/arch/ia64/kernel/traps.c linux-3.10.19-vs2.3.6.8/arch/ia64/kernel/traps.c --- linux-3.10.19/arch/ia64/kernel/traps.c 2013-05-31 13:44:38.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/ia64/kernel/traps.c 2013-08-22 20:29:59.000000000 +0000 @@ -60,8 +60,9 @@ die (const char *str, struct pt_regs *re put_cpu(); if (++die.lock_owner_depth < 3) { - printk("%s[%d]: %s %ld [%d]\n", - current->comm, task_pid_nr(current), str, err, ++die_counter); + printk("%s[%d:#%u]: %s %ld [%d]\n", + current->comm, task_pid_nr(current), current->xid, + str, err, ++die_counter); if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) != NOTIFY_STOP) show_regs(regs); @@ -324,8 +325,9 @@ handle_fpu_swa (int fp_fault, struct pt_ if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) { last.time = current_jiffies + 5 * HZ; printk(KERN_WARNING - "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", - current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr); + "%s(%d:#%u): floating-point assist fault at ip %016lx, isr %016lx\n", + current->comm, task_pid_nr(current), current->xid, + regs->cr_iip + ia64_psr(regs)->ri, isr); } } } diff -NurpP --minimal linux-3.10.19/arch/m32r/kernel/traps.c linux-3.10.19-vs2.3.6.8/arch/m32r/kernel/traps.c --- linux-3.10.19/arch/m32r/kernel/traps.c 2013-07-14 17:00:26.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/m32r/kernel/traps.c 2013-08-22 20:29:59.000000000 +0000 @@ -184,8 +184,9 @@ static void show_registers(struct pt_reg } else { printk("SPI: %08lx\n", sp); } - printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)", - current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current); + printk("Process %s (pid: %d:#%u, process nr: %d, stackpage=%08lx)", + current->comm, task_pid_nr(current), current->xid, + 0xffff & i, 4096+(unsigned long)current); /* * When in-kernel, we also print out the stack and code at the diff -NurpP --minimal linux-3.10.19/arch/m68k/Kconfig linux-3.10.19-vs2.3.6.8/arch/m68k/Kconfig --- linux-3.10.19/arch/m68k/Kconfig 2013-07-14 17:00:26.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/m68k/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -134,6 +134,8 @@ source "fs/Kconfig" source "arch/m68k/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/mips/Kconfig linux-3.10.19-vs2.3.6.8/arch/mips/Kconfig --- linux-3.10.19/arch/mips/Kconfig 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/mips/Kconfig 2013-11-13 17:17:15.000000000 +0000 @@ -2583,6 +2583,8 @@ source "fs/Kconfig" source "arch/mips/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/mips/kernel/ptrace.c linux-3.10.19-vs2.3.6.8/arch/mips/kernel/ptrace.c --- linux-3.10.19/arch/mips/kernel/ptrace.c 2013-05-31 13:44:42.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/mips/kernel/ptrace.c 2013-08-22 20:29:59.000000000 +0000 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -262,6 +263,9 @@ long arch_ptrace(struct task_struct *chi void __user *datavp = (void __user *) data; unsigned long __user *datalp = (void __user *) data; + if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT)) + goto out; + switch (request) { /* when I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ diff -NurpP --minimal linux-3.10.19/arch/mips/kernel/scall32-o32.S linux-3.10.19-vs2.3.6.8/arch/mips/kernel/scall32-o32.S --- linux-3.10.19/arch/mips/kernel/scall32-o32.S 2013-07-14 17:00:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/mips/kernel/scall32-o32.S 2013-08-22 20:29:59.000000000 +0000 @@ -521,7 +521,7 @@ einval: li v0, -ENOSYS sys sys_mq_timedreceive 5 sys sys_mq_notify 2 /* 4275 */ sys sys_mq_getsetattr 3 - sys sys_ni_syscall 0 /* sys_vserver */ + sys sys_vserver 3 sys sys_waitid 5 sys sys_ni_syscall 0 /* available, was setaltroot */ sys sys_add_key 5 /* 4280 */ diff -NurpP --minimal linux-3.10.19/arch/mips/kernel/scall64-64.S linux-3.10.19-vs2.3.6.8/arch/mips/kernel/scall64-64.S --- linux-3.10.19/arch/mips/kernel/scall64-64.S 2013-07-14 17:00:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/mips/kernel/scall64-64.S 2013-08-22 20:29:59.000000000 +0000 @@ -351,7 +351,7 @@ sys_call_table: PTR sys_mq_timedreceive PTR sys_mq_notify PTR sys_mq_getsetattr /* 5235 */ - PTR sys_ni_syscall /* sys_vserver */ + PTR sys_vserver PTR sys_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key diff -NurpP --minimal linux-3.10.19/arch/mips/kernel/scall64-n32.S linux-3.10.19-vs2.3.6.8/arch/mips/kernel/scall64-n32.S --- linux-3.10.19/arch/mips/kernel/scall64-n32.S 2013-07-14 17:00:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/mips/kernel/scall64-n32.S 2013-08-22 20:29:59.000000000 +0000 @@ -344,7 +344,7 @@ EXPORT(sysn32_call_table) PTR compat_sys_mq_timedreceive PTR compat_sys_mq_notify PTR compat_sys_mq_getsetattr - PTR sys_ni_syscall /* 6240, sys_vserver */ + PTR sys32_vserver /* 6240 */ PTR compat_sys_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key diff -NurpP --minimal linux-3.10.19/arch/mips/kernel/scall64-o32.S linux-3.10.19-vs2.3.6.8/arch/mips/kernel/scall64-o32.S --- linux-3.10.19/arch/mips/kernel/scall64-o32.S 2013-07-14 17:00:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/mips/kernel/scall64-o32.S 2013-08-22 20:29:59.000000000 +0000 @@ -469,7 +469,7 @@ sys_call_table: PTR compat_sys_mq_timedreceive PTR compat_sys_mq_notify /* 4275 */ PTR compat_sys_mq_getsetattr - PTR sys_ni_syscall /* sys_vserver */ + PTR sys32_vserver PTR compat_sys_waitid PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key /* 4280 */ diff -NurpP --minimal linux-3.10.19/arch/mips/kernel/traps.c linux-3.10.19-vs2.3.6.8/arch/mips/kernel/traps.c --- linux-3.10.19/arch/mips/kernel/traps.c 2013-07-14 17:00:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/mips/kernel/traps.c 2013-08-22 20:29:59.000000000 +0000 @@ -331,9 +331,10 @@ void show_registers(struct pt_regs *regs __show_regs(regs); print_modules(); - printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", - current->comm, current->pid, current_thread_info(), current, - field, current_thread_info()->tp_value); + printk("Process %s (pid: %d:#%u, threadinfo=%p, task=%p, tls=%0*lx)\n", + current->comm, task_pid_nr(current), current->xid, + current_thread_info(), current, + field, current_thread_info()->tp_value); if (cpu_has_userlocal) { unsigned long tls; diff -NurpP --minimal linux-3.10.19/arch/parisc/Kconfig linux-3.10.19-vs2.3.6.8/arch/parisc/Kconfig --- linux-3.10.19/arch/parisc/Kconfig 2013-07-14 17:00:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/parisc/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -318,6 +318,8 @@ source "fs/Kconfig" source "arch/parisc/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/parisc/kernel/syscall_table.S linux-3.10.19-vs2.3.6.8/arch/parisc/kernel/syscall_table.S --- linux-3.10.19/arch/parisc/kernel/syscall_table.S 2013-07-14 17:00:31.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/parisc/kernel/syscall_table.S 2013-08-22 20:29:59.000000000 +0000 @@ -358,7 +358,7 @@ ENTRY_COMP(mbind) /* 260 */ ENTRY_COMP(get_mempolicy) ENTRY_COMP(set_mempolicy) - ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ + ENTRY_DIFF(vserver) ENTRY_SAME(add_key) ENTRY_SAME(request_key) /* 265 */ ENTRY_SAME(keyctl) diff -NurpP --minimal linux-3.10.19/arch/parisc/kernel/traps.c linux-3.10.19-vs2.3.6.8/arch/parisc/kernel/traps.c --- linux-3.10.19/arch/parisc/kernel/traps.c 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/parisc/kernel/traps.c 2013-11-13 17:17:15.000000000 +0000 @@ -229,8 +229,9 @@ void die_if_kernel(char *str, struct pt_ if (err == 0) return; /* STFU */ - printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", - current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); + printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld) at " RFMT "\n", + current->comm, task_pid_nr(current), current->xid, + str, err, regs->iaoq[0]); #ifdef PRINT_USER_FAULTS /* XXX for debugging only */ show_regs(regs); @@ -263,8 +264,8 @@ void die_if_kernel(char *str, struct pt_ pdc_console_restart(); if (err) - printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n", - current->comm, task_pid_nr(current), str, err); + printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld)\n", + current->comm, task_pid_nr(current), current->xid, str, err); /* Wot's wrong wif bein' racy? */ if (current->thread.flags & PARISC_KERNEL_DEATH) { diff -NurpP --minimal linux-3.10.19/arch/parisc/mm/fault.c linux-3.10.19-vs2.3.6.8/arch/parisc/mm/fault.c --- linux-3.10.19/arch/parisc/mm/fault.c 2013-05-31 13:44:44.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/parisc/mm/fault.c 2013-08-22 20:29:59.000000000 +0000 @@ -257,8 +257,9 @@ bad_area: #ifdef PRINT_USER_FAULTS printk(KERN_DEBUG "\n"); - printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n", - task_pid_nr(tsk), tsk->comm, code, address); + printk(KERN_DEBUG "do_page_fault() pid=%d:#%u " + "command='%s' type=%lu address=0x%08lx\n", + task_pid_nr(tsk), tsk->xid, tsk->comm, code, address); if (vma) { printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n", vma->vm_start, vma->vm_end); diff -NurpP --minimal linux-3.10.19/arch/powerpc/Kconfig linux-3.10.19-vs2.3.6.8/arch/powerpc/Kconfig --- linux-3.10.19/arch/powerpc/Kconfig 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/powerpc/Kconfig 2013-11-13 17:17:15.000000000 +0000 @@ -1010,6 +1010,8 @@ source "lib/Kconfig" source "arch/powerpc/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" config KEYS_COMPAT diff -NurpP --minimal linux-3.10.19/arch/powerpc/include/uapi/asm/unistd.h linux-3.10.19-vs2.3.6.8/arch/powerpc/include/uapi/asm/unistd.h --- linux-3.10.19/arch/powerpc/include/uapi/asm/unistd.h 2013-05-31 13:44:44.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/powerpc/include/uapi/asm/unistd.h 2013-08-22 20:29:59.000000000 +0000 @@ -275,7 +275,7 @@ #endif #define __NR_rtas 255 #define __NR_sys_debug_setcontext 256 -/* Number 257 is reserved for vserver */ +#define __NR_vserver 257 #define __NR_migrate_pages 258 #define __NR_mbind 259 #define __NR_get_mempolicy 260 diff -NurpP --minimal linux-3.10.19/arch/powerpc/kernel/traps.c linux-3.10.19-vs2.3.6.8/arch/powerpc/kernel/traps.c --- linux-3.10.19/arch/powerpc/kernel/traps.c 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/powerpc/kernel/traps.c 2013-11-13 17:17:15.000000000 +0000 @@ -1231,8 +1231,9 @@ void nonrecoverable_exception(struct pt_ void trace_syscall(struct pt_regs *regs) { - printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", - current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], + printk("Task: %p(%d:#%u), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", + current, task_pid_nr(current), current->xid, + regs->nip, regs->link, regs->gpr[0], regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); } diff -NurpP --minimal linux-3.10.19/arch/s390/Kconfig linux-3.10.19-vs2.3.6.8/arch/s390/Kconfig --- linux-3.10.19/arch/s390/Kconfig 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/s390/Kconfig 2013-11-13 17:17:15.000000000 +0000 @@ -579,6 +579,8 @@ source "fs/Kconfig" source "arch/s390/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/s390/include/asm/tlb.h linux-3.10.19-vs2.3.6.8/arch/s390/include/asm/tlb.h --- linux-3.10.19/arch/s390/include/asm/tlb.h 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/s390/include/asm/tlb.h 2013-11-13 17:17:15.000000000 +0000 @@ -24,6 +24,7 @@ #include #include #include + #include #include #include diff -NurpP --minimal linux-3.10.19/arch/s390/include/uapi/asm/unistd.h linux-3.10.19-vs2.3.6.8/arch/s390/include/uapi/asm/unistd.h --- linux-3.10.19/arch/s390/include/uapi/asm/unistd.h 2013-02-19 13:57:16.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/s390/include/uapi/asm/unistd.h 2013-08-22 20:29:59.000000000 +0000 @@ -200,7 +200,7 @@ #define __NR_clock_gettime (__NR_timer_create+6) #define __NR_clock_getres (__NR_timer_create+7) #define __NR_clock_nanosleep (__NR_timer_create+8) -/* Number 263 is reserved for vserver */ +#define __NR_vserver 263 #define __NR_statfs64 265 #define __NR_fstatfs64 266 #define __NR_remap_file_pages 267 diff -NurpP --minimal linux-3.10.19/arch/s390/kernel/ptrace.c linux-3.10.19-vs2.3.6.8/arch/s390/kernel/ptrace.c --- linux-3.10.19/arch/s390/kernel/ptrace.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/s390/kernel/ptrace.c 2013-08-22 20:29:59.000000000 +0000 @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff -NurpP --minimal linux-3.10.19/arch/s390/kernel/syscalls.S linux-3.10.19-vs2.3.6.8/arch/s390/kernel/syscalls.S --- linux-3.10.19/arch/s390/kernel/syscalls.S 2013-07-14 17:00:34.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/s390/kernel/syscalls.S 2013-08-22 20:29:59.000000000 +0000 @@ -271,7 +271,7 @@ SYSCALL(sys_clock_settime,sys_clock_sett SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper) /* 260 */ SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper) SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper) -NI_SYSCALL /* reserved for vserver */ +SYSCALL(sys_vserver,sys_vserver,sys32_vserver) SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper) SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper) SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper) diff -NurpP --minimal linux-3.10.19/arch/sh/Kconfig linux-3.10.19-vs2.3.6.8/arch/sh/Kconfig --- linux-3.10.19/arch/sh/Kconfig 2013-07-14 17:00:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/sh/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -928,6 +928,8 @@ source "fs/Kconfig" source "arch/sh/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/sh/kernel/irq.c linux-3.10.19-vs2.3.6.8/arch/sh/kernel/irq.c --- linux-3.10.19/arch/sh/kernel/irq.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/sh/kernel/irq.c 2013-08-22 20:29:59.000000000 +0000 @@ -14,6 +14,7 @@ #include #include #include +// #include #include #include #include diff -NurpP --minimal linux-3.10.19/arch/sparc/Kconfig linux-3.10.19-vs2.3.6.8/arch/sparc/Kconfig --- linux-3.10.19/arch/sparc/Kconfig 2013-07-14 17:00:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/sparc/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -550,6 +550,8 @@ source "fs/Kconfig" source "arch/sparc/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/sparc/include/uapi/asm/unistd.h linux-3.10.19-vs2.3.6.8/arch/sparc/include/uapi/asm/unistd.h --- linux-3.10.19/arch/sparc/include/uapi/asm/unistd.h 2013-02-19 13:57:17.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/sparc/include/uapi/asm/unistd.h 2013-08-22 20:29:59.000000000 +0000 @@ -332,7 +332,7 @@ #define __NR_timer_getoverrun 264 #define __NR_timer_delete 265 #define __NR_timer_create 266 -/* #define __NR_vserver 267 Reserved for VSERVER */ +#define __NR_vserver 267 #define __NR_io_setup 268 #define __NR_io_destroy 269 #define __NR_io_submit 270 diff -NurpP --minimal linux-3.10.19/arch/sparc/kernel/systbls_32.S linux-3.10.19-vs2.3.6.8/arch/sparc/kernel/systbls_32.S --- linux-3.10.19/arch/sparc/kernel/systbls_32.S 2013-05-31 13:44:48.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/sparc/kernel/systbls_32.S 2013-08-22 20:29:59.000000000 +0000 @@ -70,7 +70,7 @@ sys_call_table: /*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_ni_syscall /*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep /*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun -/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy +/*265*/ .long sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy /*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink /*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid /*280*/ .long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat diff -NurpP --minimal linux-3.10.19/arch/sparc/kernel/systbls_64.S linux-3.10.19-vs2.3.6.8/arch/sparc/kernel/systbls_64.S --- linux-3.10.19/arch/sparc/kernel/systbls_64.S 2013-07-14 17:00:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/sparc/kernel/systbls_64.S 2013-08-22 20:29:59.000000000 +0000 @@ -71,7 +71,7 @@ sys_call_table32: /*250*/ .word sys_mremap, compat_sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep /*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun - .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy + .word sys_timer_delete, compat_sys_timer_create, sys32_vserver, compat_sys_io_setup, sys_io_destroy /*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid /*280*/ .word sys_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat @@ -149,7 +149,7 @@ sys_call_table: /*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep /*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun - .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy + .word sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy /*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid /*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat diff -NurpP --minimal linux-3.10.19/arch/um/Kconfig.rest linux-3.10.19-vs2.3.6.8/arch/um/Kconfig.rest --- linux-3.10.19/arch/um/Kconfig.rest 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/um/Kconfig.rest 2013-08-22 20:29:59.000000000 +0000 @@ -12,6 +12,8 @@ source "arch/um/Kconfig.net" source "fs/Kconfig" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/x86/Kconfig linux-3.10.19-vs2.3.6.8/arch/x86/Kconfig --- linux-3.10.19/arch/x86/Kconfig 2013-07-14 17:00:36.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/x86/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -2339,6 +2339,8 @@ source "fs/Kconfig" source "arch/x86/Kconfig.debug" +source "kernel/vserver/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff -NurpP --minimal linux-3.10.19/arch/x86/syscalls/syscall_32.tbl linux-3.10.19-vs2.3.6.8/arch/x86/syscalls/syscall_32.tbl --- linux-3.10.19/arch/x86/syscalls/syscall_32.tbl 2013-07-14 17:00:37.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/x86/syscalls/syscall_32.tbl 2013-08-22 20:29:59.000000000 +0000 @@ -279,7 +279,7 @@ 270 i386 tgkill sys_tgkill 271 i386 utimes sys_utimes compat_sys_utimes 272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64 -273 i386 vserver +273 i386 vserver sys_vserver sys32_vserver 274 i386 mbind sys_mbind 275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy 276 i386 set_mempolicy sys_set_mempolicy diff -NurpP --minimal linux-3.10.19/arch/x86/syscalls/syscall_64.tbl linux-3.10.19-vs2.3.6.8/arch/x86/syscalls/syscall_64.tbl --- linux-3.10.19/arch/x86/syscalls/syscall_64.tbl 2013-05-31 13:44:50.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/arch/x86/syscalls/syscall_64.tbl 2013-08-22 20:29:59.000000000 +0000 @@ -242,7 +242,7 @@ 233 common epoll_ctl sys_epoll_ctl 234 common tgkill sys_tgkill 235 common utimes sys_utimes -236 64 vserver +236 64 vserver sys_vserver 237 common mbind sys_mbind 238 common set_mempolicy sys_set_mempolicy 239 common get_mempolicy sys_get_mempolicy diff -NurpP --minimal linux-3.10.19/drivers/block/Kconfig linux-3.10.19-vs2.3.6.8/drivers/block/Kconfig --- linux-3.10.19/drivers/block/Kconfig 2013-05-31 13:44:51.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/block/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -278,6 +278,13 @@ config BLK_DEV_CRYPTOLOOP source "drivers/block/drbd/Kconfig" +config BLK_DEV_VROOT + tristate "Virtual Root device support" + depends on QUOTACTL + ---help--- + Saying Y here will allow you to use quota/fs ioctls on a shared + partition within a virtual server without compromising security. + config BLK_DEV_NBD tristate "Network block device support" depends on NET diff -NurpP --minimal linux-3.10.19/drivers/block/Makefile linux-3.10.19-vs2.3.6.8/drivers/block/Makefile --- linux-3.10.19/drivers/block/Makefile 2013-07-14 17:00:41.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/block/Makefile 2013-08-22 20:29:59.000000000 +0000 @@ -33,6 +33,7 @@ obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o obj-$(CONFIG_VIODASD) += viodasd.o obj-$(CONFIG_BLK_DEV_SX8) += sx8.o obj-$(CONFIG_BLK_DEV_HD) += hd.o +obj-$(CONFIG_BLK_DEV_VROOT) += vroot.o obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/ diff -NurpP --minimal linux-3.10.19/drivers/block/loop.c linux-3.10.19-vs2.3.6.8/drivers/block/loop.c --- linux-3.10.19/drivers/block/loop.c 2013-07-14 17:00:41.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/block/loop.c 2013-08-22 20:29:59.000000000 +0000 @@ -76,6 +76,7 @@ #include #include #include +#include #include @@ -884,6 +885,7 @@ static int loop_set_fd(struct loop_devic lo->lo_blocksize = lo_blocksize; lo->lo_device = bdev; lo->lo_flags = lo_flags; + lo->lo_xid = vx_current_xid(); lo->lo_backing_file = file; lo->transfer = transfer_none; lo->ioctl = NULL; @@ -1035,6 +1037,7 @@ static int loop_clr_fd(struct loop_devic lo->lo_sizelimit = 0; lo->lo_encrypt_key_size = 0; lo->lo_thread = NULL; + lo->lo_xid = 0; memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); @@ -1078,7 +1081,7 @@ loop_set_status(struct loop_device *lo, if (lo->lo_encrypt_key_size && !uid_eq(lo->lo_key_owner, uid) && - !capable(CAP_SYS_ADMIN)) + !vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP)) return -EPERM; if (lo->lo_state != Lo_bound) return -ENXIO; @@ -1168,7 +1171,8 @@ loop_get_status(struct loop_device *lo, memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); info->lo_encrypt_type = lo->lo_encryption ? lo->lo_encryption->number : 0; - if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { + if (lo->lo_encrypt_key_size && + vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP)) { info->lo_encrypt_key_size = lo->lo_encrypt_key_size; memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, lo->lo_encrypt_key_size); @@ -1510,6 +1514,11 @@ static int lo_open(struct block_device * goto out; } + if (!vx_check(lo->lo_xid, VS_IDENT|VS_HOSTID|VS_ADMIN_P)) { + err = -EACCES; + goto out; + } + mutex_lock(&lo->lo_ctl_mutex); lo->lo_refcnt++; mutex_unlock(&lo->lo_ctl_mutex); diff -NurpP --minimal linux-3.10.19/drivers/block/vroot.c linux-3.10.19-vs2.3.6.8/drivers/block/vroot.c --- linux-3.10.19/drivers/block/vroot.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/block/vroot.c 2013-08-23 00:23:45.000000000 +0000 @@ -0,0 +1,290 @@ +/* + * linux/drivers/block/vroot.c + * + * written by Herbert Pötzl, 9/11/2002 + * ported to 2.6.10 by Herbert Pötzl, 30/12/2004 + * + * based on the loop.c code by Theodore Ts'o. + * + * Copyright (C) 2002-2007 by Herbert Pötzl. + * Redistribution of this file is permitted under the + * GNU General Public License. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + + +static int max_vroot = 8; + +static struct vroot_device *vroot_dev; +static struct gendisk **disks; + + +static int vroot_set_dev( + struct vroot_device *vr, + struct block_device *bdev, + unsigned int arg) +{ + struct block_device *real_bdev; + struct file *file; + struct inode *inode; + int error; + + error = -EBUSY; + if (vr->vr_state != Vr_unbound) + goto out; + + error = -EBADF; + file = fget(arg); + if (!file) + goto out; + + error = -EINVAL; + inode = file->f_dentry->d_inode; + + + if (S_ISBLK(inode->i_mode)) { + real_bdev = inode->i_bdev; + vr->vr_device = real_bdev; + __iget(real_bdev->bd_inode); + } else + goto out_fput; + + vxdprintk(VXD_CBIT(misc, 0), + "vroot[%d]_set_dev: dev=" VXF_DEV, + vr->vr_number, VXD_DEV(real_bdev)); + + vr->vr_state = Vr_bound; + error = 0; + + out_fput: + fput(file); + out: + return error; +} + +static int vroot_clr_dev( + struct vroot_device *vr, + struct block_device *bdev) +{ + struct block_device *real_bdev; + + if (vr->vr_state != Vr_bound) + return -ENXIO; + if (vr->vr_refcnt > 1) /* we needed one fd for the ioctl */ + return -EBUSY; + + real_bdev = vr->vr_device; + + vxdprintk(VXD_CBIT(misc, 0), + "vroot[%d]_clr_dev: dev=" VXF_DEV, + vr->vr_number, VXD_DEV(real_bdev)); + + bdput(real_bdev); + vr->vr_state = Vr_unbound; + vr->vr_device = NULL; + return 0; +} + + +static int vr_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct vroot_device *vr = bdev->bd_disk->private_data; + int err; + + down(&vr->vr_ctl_mutex); + switch (cmd) { + case VROOT_SET_DEV: + err = vroot_set_dev(vr, bdev, arg); + break; + case VROOT_CLR_DEV: + err = vroot_clr_dev(vr, bdev); + break; + default: + err = -EINVAL; + break; + } + up(&vr->vr_ctl_mutex); + return err; +} + +static int vr_open(struct block_device *bdev, fmode_t mode) +{ + struct vroot_device *vr = bdev->bd_disk->private_data; + + down(&vr->vr_ctl_mutex); + vr->vr_refcnt++; + up(&vr->vr_ctl_mutex); + return 0; +} + +static void vr_release(struct gendisk *disk, fmode_t mode) +{ + struct vroot_device *vr = disk->private_data; + + down(&vr->vr_ctl_mutex); + --vr->vr_refcnt; + up(&vr->vr_ctl_mutex); +} + +static struct block_device_operations vr_fops = { + .owner = THIS_MODULE, + .open = vr_open, + .release = vr_release, + .ioctl = vr_ioctl, +}; + +static void vroot_make_request(struct request_queue *q, struct bio *bio) +{ + printk("vroot_make_request %p, %p\n", q, bio); + bio_io_error(bio); +} + +struct block_device *__vroot_get_real_bdev(struct block_device *bdev) +{ + struct inode *inode = bdev->bd_inode; + struct vroot_device *vr; + struct block_device *real_bdev; + int minor = iminor(inode); + + vr = &vroot_dev[minor]; + real_bdev = vr->vr_device; + + vxdprintk(VXD_CBIT(misc, 0), + "vroot[%d]_get_real_bdev: dev=" VXF_DEV, + vr->vr_number, VXD_DEV(real_bdev)); + + if (vr->vr_state != Vr_bound) + return ERR_PTR(-ENXIO); + + __iget(real_bdev->bd_inode); + return real_bdev; +} + + + +/* + * And now the modules code and kernel interface. + */ + +module_param(max_vroot, int, 0); + +MODULE_PARM_DESC(max_vroot, "Maximum number of vroot devices (1-256)"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_BLOCKDEV_MAJOR(VROOT_MAJOR); + +MODULE_AUTHOR ("Herbert Pötzl"); +MODULE_DESCRIPTION ("Virtual Root Device Mapper"); + + +int __init vroot_init(void) +{ + int err, i; + + if (max_vroot < 1 || max_vroot > 256) { + max_vroot = MAX_VROOT_DEFAULT; + printk(KERN_WARNING "vroot: invalid max_vroot " + "(must be between 1 and 256), " + "using default (%d)\n", max_vroot); + } + + if (register_blkdev(VROOT_MAJOR, "vroot")) + return -EIO; + + err = -ENOMEM; + vroot_dev = kmalloc(max_vroot * sizeof(struct vroot_device), GFP_KERNEL); + if (!vroot_dev) + goto out_mem1; + memset(vroot_dev, 0, max_vroot * sizeof(struct vroot_device)); + + disks = kmalloc(max_vroot * sizeof(struct gendisk *), GFP_KERNEL); + if (!disks) + goto out_mem2; + + for (i = 0; i < max_vroot; i++) { + disks[i] = alloc_disk(1); + if (!disks[i]) + goto out_mem3; + disks[i]->queue = blk_alloc_queue(GFP_KERNEL); + if (!disks[i]->queue) + goto out_mem3; + blk_queue_make_request(disks[i]->queue, vroot_make_request); + } + + for (i = 0; i < max_vroot; i++) { + struct vroot_device *vr = &vroot_dev[i]; + struct gendisk *disk = disks[i]; + + memset(vr, 0, sizeof(*vr)); + sema_init(&vr->vr_ctl_mutex, 1); + vr->vr_number = i; + disk->major = VROOT_MAJOR; + disk->first_minor = i; + disk->fops = &vr_fops; + sprintf(disk->disk_name, "vroot%d", i); + disk->private_data = vr; + } + + err = register_vroot_grb(&__vroot_get_real_bdev); + if (err) + goto out_mem3; + + for (i = 0; i < max_vroot; i++) + add_disk(disks[i]); + printk(KERN_INFO "vroot: loaded (max %d devices)\n", max_vroot); + return 0; + +out_mem3: + while (i--) + put_disk(disks[i]); + kfree(disks); +out_mem2: + kfree(vroot_dev); +out_mem1: + unregister_blkdev(VROOT_MAJOR, "vroot"); + printk(KERN_ERR "vroot: ran out of memory\n"); + return err; +} + +void vroot_exit(void) +{ + int i; + + if (unregister_vroot_grb(&__vroot_get_real_bdev)) + printk(KERN_WARNING "vroot: cannot unregister grb\n"); + + for (i = 0; i < max_vroot; i++) { + del_gendisk(disks[i]); + put_disk(disks[i]); + } + unregister_blkdev(VROOT_MAJOR, "vroot"); + + kfree(disks); + kfree(vroot_dev); +} + +module_init(vroot_init); +module_exit(vroot_exit); + +#ifndef MODULE + +static int __init max_vroot_setup(char *str) +{ + max_vroot = simple_strtol(str, NULL, 0); + return 1; +} + +__setup("max_vroot=", max_vroot_setup); + +#endif + diff -NurpP --minimal linux-3.10.19/drivers/infiniband/Kconfig linux-3.10.19-vs2.3.6.8/drivers/infiniband/Kconfig --- linux-3.10.19/drivers/infiniband/Kconfig 2013-07-14 17:00:49.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/infiniband/Kconfig 2013-08-22 20:29:59.000000000 +0000 @@ -39,7 +39,7 @@ config INFINIBAND_USER_MEM config INFINIBAND_ADDR_TRANS bool depends on INET - depends on !(INFINIBAND = y && IPV6 = m) + depends on !(INFINIBAND = y && IPV6 = y) default y source "drivers/infiniband/hw/mthca/Kconfig" diff -NurpP --minimal linux-3.10.19/drivers/infiniband/core/addr.c linux-3.10.19-vs2.3.6.8/drivers/infiniband/core/addr.c --- linux-3.10.19/drivers/infiniband/core/addr.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/infiniband/core/addr.c 2013-08-22 20:29:59.000000000 +0000 @@ -261,7 +261,7 @@ static int addr6_resolve(struct sockaddr if (ipv6_addr_any(&fl6.saddr)) { ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev, - &fl6.daddr, 0, &fl6.saddr); + &fl6.daddr, 0, &fl6.saddr, NULL); if (ret) goto put; diff -NurpP --minimal linux-3.10.19/drivers/md/dm-ioctl.c linux-3.10.19-vs2.3.6.8/drivers/md/dm-ioctl.c --- linux-3.10.19/drivers/md/dm-ioctl.c 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/md/dm-ioctl.c 2013-11-13 17:17:15.000000000 +0000 @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -106,7 +107,8 @@ static struct hash_cell *__get_name_cell unsigned int h = hash_str(str); list_for_each_entry (hc, _name_buckets + h, name_list) - if (!strcmp(hc->name, str)) { + if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) && + !strcmp(hc->name, str)) { dm_get(hc->md); return hc; } @@ -120,7 +122,8 @@ static struct hash_cell *__get_uuid_cell unsigned int h = hash_str(str); list_for_each_entry (hc, _uuid_buckets + h, uuid_list) - if (!strcmp(hc->uuid, str)) { + if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) && + !strcmp(hc->uuid, str)) { dm_get(hc->md); return hc; } @@ -131,13 +134,15 @@ static struct hash_cell *__get_uuid_cell static struct hash_cell *__get_dev_cell(uint64_t dev) { struct mapped_device *md; - struct hash_cell *hc; + struct hash_cell *hc = NULL; md = dm_get_md(huge_decode_dev(dev)); if (!md) return NULL; - hc = dm_get_mdptr(md); + if (vx_check(dm_get_xid(md), VS_WATCH_P | VS_IDENT)) + hc = dm_get_mdptr(md); + if (!hc) { dm_put(md); return NULL; @@ -445,6 +450,9 @@ typedef int (*ioctl_fn)(struct dm_ioctl static int remove_all(struct dm_ioctl *param, size_t param_size) { + if (!vx_check(0, VS_ADMIN)) + return -EPERM; + dm_hash_remove_all(1); param->data_size = 0; return 0; @@ -492,6 +500,8 @@ static int list_devices(struct dm_ioctl */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { + if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT)) + continue; needed += sizeof(struct dm_name_list); needed += strlen(hc->name) + 1; needed += ALIGN_MASK; @@ -515,6 +525,8 @@ static int list_devices(struct dm_ioctl */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { + if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT)) + continue; if (old_nl) old_nl->next = (uint32_t) ((void *) nl - (void *) old_nl); @@ -1725,8 +1737,8 @@ static int ctl_ioctl(uint command, struc size_t input_param_size; struct dm_ioctl param_kernel; - /* only root can play with this */ - if (!capable(CAP_SYS_ADMIN)) + /* only root and certain contexts can play with this */ + if (!vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_MAPPER)) return -EACCES; if (_IOC_TYPE(command) != DM_IOCTL) diff -NurpP --minimal linux-3.10.19/drivers/md/dm.c linux-3.10.19-vs2.3.6.8/drivers/md/dm.c --- linux-3.10.19/drivers/md/dm.c 2013-11-13 17:21:12.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/md/dm.c 2013-11-13 17:17:15.000000000 +0000 @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -125,6 +126,7 @@ struct mapped_device { rwlock_t map_lock; atomic_t holders; atomic_t open_count; + vxid_t xid; unsigned long flags; @@ -317,6 +319,7 @@ int dm_deleting_md(struct mapped_device static int dm_blk_open(struct block_device *bdev, fmode_t mode) { struct mapped_device *md; + int ret = -ENXIO; spin_lock(&_minor_lock); @@ -325,18 +328,19 @@ static int dm_blk_open(struct block_devi goto out; if (test_bit(DMF_FREEING, &md->flags) || - dm_deleting_md(md)) { - md = NULL; + dm_deleting_md(md)) + goto out; + + ret = -EACCES; + if (!vx_check(md->xid, VS_IDENT|VS_HOSTID)) goto out; - } dm_get(md); atomic_inc(&md->open_count); - + ret = 0; out: spin_unlock(&_minor_lock); - - return md ? 0 : -ENXIO; + return ret; } static void dm_blk_close(struct gendisk *disk, fmode_t mode) @@ -552,6 +556,14 @@ int dm_set_geometry(struct mapped_device return 0; } +/* + * Get the xid associated with a dm device + */ +vxid_t dm_get_xid(struct mapped_device *md) +{ + return md->xid; +} + /*----------------------------------------------------------------- * CRUD START: * A more elegant soln is in the works that uses the queue @@ -1889,6 +1901,7 @@ static struct mapped_device *alloc_dev(i INIT_LIST_HEAD(&md->uevent_list); spin_lock_init(&md->uevent_lock); + md->xid = vx_current_xid(); md->queue = blk_alloc_queue(GFP_KERNEL); if (!md->queue) goto bad_queue; diff -NurpP --minimal linux-3.10.19/drivers/md/dm.h linux-3.10.19-vs2.3.6.8/drivers/md/dm.h --- linux-3.10.19/drivers/md/dm.h 2013-02-19 13:57:51.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/md/dm.h 2013-08-22 20:29:59.000000000 +0000 @@ -46,6 +46,8 @@ struct dm_dev_internal { struct dm_table; struct dm_md_mempools; +vxid_t dm_get_xid(struct mapped_device *md); + /*----------------------------------------------------------------- * Internal table functions. *---------------------------------------------------------------*/ diff -NurpP --minimal linux-3.10.19/drivers/net/tun.c linux-3.10.19-vs2.3.6.8/drivers/net/tun.c --- linux-3.10.19/drivers/net/tun.c 2013-11-13 17:21:13.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/net/tun.c 2013-11-13 17:17:15.000000000 +0000 @@ -64,6 +64,7 @@ #include #include #include +#include #include #include #include @@ -164,6 +165,7 @@ struct tun_struct { unsigned int flags; kuid_t owner; kgid_t group; + vnid_t nid; struct net_device *dev; netdev_features_t set_features; @@ -380,6 +382,7 @@ static inline bool tun_not_capable(struc return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || (gid_valid(tun->group) && !in_egroup_p(tun->group))) && !ns_capable(net->user_ns, CAP_NET_ADMIN); + /* !cap_raised(current_cap(), CAP_NET_ADMIN) */ } static void tun_set_real_num_queues(struct tun_struct *tun) @@ -1425,6 +1428,7 @@ static void tun_setup(struct net_device tun->owner = INVALID_UID; tun->group = INVALID_GID; + tun->nid = nx_current_nid(); dev->ethtool_ops = &tun_ethtool_ops; dev->destructor = tun_free_netdev; @@ -1616,6 +1620,9 @@ static int tun_set_iff(struct net *net, if (err < 0) return err; + if (!nx_check(tun->nid, VS_IDENT | VS_HOSTID | VS_ADMIN_P)) + return -EPERM; + err = tun_attach(tun, file); if (err < 0) return err; @@ -1634,7 +1641,7 @@ static int tun_set_iff(struct net *net, int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1; - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + if (!nx_ns_capable(net->user_ns, CAP_NET_ADMIN, NXC_TUN_CREATE)) return -EPERM; err = security_tun_dev_create(); if (err < 0) @@ -1985,6 +1992,16 @@ static long __tun_chr_ioctl(struct file from_kgid(&init_user_ns, tun->group)); break; + case TUNSETNID: + if (!capable(CAP_CONTEXT)) + return -EPERM; + + /* Set nid owner of the device */ + tun->nid = (vnid_t) arg; + + tun_debug(KERN_INFO, tun, "nid owner set to %u\n", tun->nid); + break; + case TUNSETLINK: /* Only allow setting the type when the interface is down */ if (tun->dev->flags & IFF_UP) { diff -NurpP --minimal linux-3.10.19/drivers/tty/sysrq.c linux-3.10.19-vs2.3.6.8/drivers/tty/sysrq.c --- linux-3.10.19/drivers/tty/sysrq.c 2013-07-14 17:01:22.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/tty/sysrq.c 2013-08-22 20:58:35.000000000 +0000 @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -405,6 +406,21 @@ static struct sysrq_key_op sysrq_unrt_op .enable_mask = SYSRQ_ENABLE_RTNICE, }; + +#ifdef CONFIG_VSERVER_DEBUG +static void sysrq_handle_vxinfo(int key) +{ + dump_vx_info_inactive((key == 'x') ? 0 : 1); +} + +static struct sysrq_key_op sysrq_showvxinfo_op = { + .handler = sysrq_handle_vxinfo, + .help_msg = "conteXt", + .action_msg = "Show Context Info", + .enable_mask = SYSRQ_ENABLE_DUMP, +}; +#endif + /* Key Operations table and lock */ static DEFINE_SPINLOCK(sysrq_key_table_lock); @@ -460,7 +476,11 @@ static struct sysrq_key_op *sysrq_key_ta &sysrq_showstate_blocked_op, /* w */ /* x: May be registered on ppc/powerpc for xmon */ /* x: May be registered on sparc64 for global PMU dump */ +#ifdef CONFIG_VSERVER_DEBUG + &sysrq_showvxinfo_op, /* x */ +#else NULL, /* x */ +#endif /* y: May be registered on sparc64 for global register dump */ NULL, /* y */ &sysrq_ftrace_dump_op, /* z */ @@ -475,6 +495,8 @@ static int sysrq_key_table_key2index(int retval = key - '0'; else if ((key >= 'a') && (key <= 'z')) retval = key + 10 - 'a'; + else if ((key >= 'A') && (key <= 'Z')) + retval = key + 10 - 'A'; else retval = -1; return retval; diff -NurpP --minimal linux-3.10.19/drivers/tty/tty_io.c linux-3.10.19-vs2.3.6.8/drivers/tty/tty_io.c --- linux-3.10.19/drivers/tty/tty_io.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/drivers/tty/tty_io.c 2013-11-13 17:17:15.000000000 +0000 @@ -104,6 +104,7 @@ #include #include +#include #undef TTY_DEBUG_HANGUP @@ -2213,7 +2214,8 @@ static int tiocsti(struct tty_struct *tt char ch, mbz = 0; struct tty_ldisc *ld; - if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) + if (((current->signal->tty != tty) && + !vx_capable(CAP_SYS_ADMIN, VXC_TIOCSTI))) return -EPERM; if (get_user(ch, p)) return -EFAULT; @@ -2501,6 +2503,7 @@ static int tiocspgrp(struct tty_struct * return -ENOTTY; if (get_user(pgrp_nr, p)) return -EFAULT; + pgrp_nr = vx_rmap_pid(pgrp_nr); if (pgrp_nr < 0) return -EINVAL; rcu_read_lock(); diff -NurpP --minimal linux-3.10.19/fs/attr.c linux-3.10.19-vs2.3.6.8/fs/attr.c --- linux-3.10.19/fs/attr.c 2013-02-19 13:58:46.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/attr.c 2013-08-22 20:29:59.000000000 +0000 @@ -15,6 +15,9 @@ #include #include #include +#include +#include +#include /** * inode_change_ok - check if attribute changes to an inode are allowed @@ -77,6 +80,10 @@ int inode_change_ok(const struct inode * return -EPERM; } + /* check for inode tag permission */ + if (dx_permission(inode, MAY_WRITE)) + return -EACCES; + return 0; } EXPORT_SYMBOL(inode_change_ok); @@ -147,6 +154,8 @@ void setattr_copy(struct inode *inode, c inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; + if ((ia_valid & ATTR_TAG) && IS_TAGGED(inode)) + inode->i_tag = attr->ia_tag; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); @@ -177,7 +186,8 @@ int notify_change(struct dentry * dentry WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex)); - if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) { + if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | + ATTR_TAG | ATTR_TIMES_SET)) { if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; } diff -NurpP --minimal linux-3.10.19/fs/block_dev.c linux-3.10.19-vs2.3.6.8/fs/block_dev.c --- linux-3.10.19/fs/block_dev.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/block_dev.c 2013-11-13 17:17:15.000000000 +0000 @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "internal.h" @@ -536,6 +537,7 @@ struct block_device *bdget(dev_t dev) bdev->bd_invalidated = 0; inode->i_mode = S_IFBLK; inode->i_rdev = dev; + inode->i_mdev = dev; inode->i_bdev = bdev; inode->i_data.a_ops = &def_blk_aops; mapping_set_gfp_mask(&inode->i_data, GFP_USER); @@ -583,6 +585,11 @@ EXPORT_SYMBOL(bdput); static struct block_device *bd_acquire(struct inode *inode) { struct block_device *bdev; + dev_t mdev; + + if (!vs_map_blkdev(inode->i_rdev, &mdev, DATTR_OPEN)) + return NULL; + inode->i_mdev = mdev; spin_lock(&bdev_lock); bdev = inode->i_bdev; @@ -593,7 +600,7 @@ static struct block_device *bd_acquire(s } spin_unlock(&bdev_lock); - bdev = bdget(inode->i_rdev); + bdev = bdget(mdev); if (bdev) { spin_lock(&bdev_lock); if (!inode->i_bdev) { diff -NurpP --minimal linux-3.10.19/fs/btrfs/ctree.h linux-3.10.19-vs2.3.6.8/fs/btrfs/ctree.h --- linux-3.10.19/fs/btrfs/ctree.h 2013-07-14 17:01:26.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/btrfs/ctree.h 2013-08-22 20:29:59.000000000 +0000 @@ -707,11 +707,14 @@ struct btrfs_inode_item { /* modification sequence number for NFS */ __le64 sequence; + __le16 tag; /* * a little future expansion, for more than this we can * just grow the inode item and version it */ - __le64 reserved[4]; + __le16 reserved16; + __le32 reserved32; + __le64 reserved[3]; struct btrfs_timespec atime; struct btrfs_timespec ctime; struct btrfs_timespec mtime; @@ -1928,6 +1931,8 @@ struct btrfs_ioctl_defrag_range_args { #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22) +#define BTRFS_MOUNT_TAGGED (1 << 24) + #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) @@ -2197,6 +2202,7 @@ BTRFS_SETGET_FUNCS(inode_block_group, st BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); +BTRFS_SETGET_FUNCS(inode_tag, struct btrfs_inode_item, tag, 16); BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64); @@ -2250,6 +2256,10 @@ BTRFS_SETGET_FUNCS(extent_flags, struct BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); +#define BTRFS_INODE_IXUNLINK (1 << 24) +#define BTRFS_INODE_BARRIER (1 << 25) +#define BTRFS_INODE_COW (1 << 26) + BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); @@ -3578,6 +3588,7 @@ extern const struct dentry_operations bt long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); void btrfs_update_iflags(struct inode *inode); void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); +int btrfs_sync_flags(struct inode *inode, int, int); int btrfs_defrag_file(struct inode *inode, struct file *file, struct btrfs_ioctl_defrag_range_args *range, u64 newer_than, unsigned long max_pages); diff -NurpP --minimal linux-3.10.19/fs/btrfs/disk-io.c linux-3.10.19-vs2.3.6.8/fs/btrfs/disk-io.c --- linux-3.10.19/fs/btrfs/disk-io.c 2013-07-14 17:01:26.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/btrfs/disk-io.c 2013-08-22 20:29:59.000000000 +0000 @@ -2360,6 +2360,9 @@ int open_ctree(struct super_block *sb, goto fail_alloc; } + if (btrfs_test_opt(tree_root, TAGGED)) + sb->s_flags |= MS_TAGGED; + features = btrfs_super_incompat_flags(disk_super) & ~BTRFS_FEATURE_INCOMPAT_SUPP; if (features) { diff -NurpP --minimal linux-3.10.19/fs/btrfs/inode.c linux-3.10.19-vs2.3.6.8/fs/btrfs/inode.c --- linux-3.10.19/fs/btrfs/inode.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/btrfs/inode.c 2013-11-13 17:17:16.000000000 +0000 @@ -42,6 +42,7 @@ #include #include #include +#include #include "compat.h" #include "ctree.h" #include "disk-io.h" @@ -3327,6 +3328,9 @@ static void btrfs_read_locked_inode(stru struct btrfs_key location; int maybe_acls; u32 rdev; + kuid_t kuid; + kgid_t kgid; + ktag_t ktag; int ret; bool filled = false; @@ -3354,8 +3358,14 @@ static void btrfs_read_locked_inode(stru struct btrfs_inode_item); inode->i_mode = btrfs_inode_mode(leaf, inode_item); set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); - i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); - i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); + + kuid = make_kuid(&init_user_ns, btrfs_inode_uid(leaf, inode_item)); + kgid = make_kgid(&init_user_ns, btrfs_inode_gid(leaf, inode_item)); + ktag = make_ktag(&init_user_ns, btrfs_inode_tag(leaf, inode_item)); + + inode->i_uid = INOTAG_KUID(DX_TAG(inode), kuid, kgid); + inode->i_gid = INOTAG_KGID(DX_TAG(inode), kuid, kgid); + inode->i_tag = INOTAG_KTAG(DX_TAG(inode), kuid, kgid, ktag); btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); tspec = btrfs_inode_atime(inode_item); @@ -3446,11 +3456,18 @@ static void fill_inode_item(struct btrfs struct inode *inode) { struct btrfs_map_token token; + uid_t uid = from_kuid(&init_user_ns, + TAGINO_KUID(DX_TAG(inode), inode->i_uid, inode->i_tag)); + gid_t gid = from_kgid(&init_user_ns, + TAGINO_KGID(DX_TAG(inode), inode->i_gid, inode->i_tag)); btrfs_init_map_token(&token); - btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); - btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); + btrfs_set_token_inode_uid(leaf, item, uid, &token); + btrfs_set_token_inode_gid(leaf, item, gid, &token); +#ifdef CONFIG_TAGGING_INTERN + btrfs_set_token_inode_tag(leaf, item, i_tag_read(inode), &token); +#endif btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size, &token); btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); @@ -8720,11 +8737,13 @@ static const struct inode_operations btr .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, .permission = btrfs_permission, + .sync_flags = btrfs_sync_flags, .get_acl = btrfs_get_acl, }; static const struct inode_operations btrfs_dir_ro_inode_operations = { .lookup = btrfs_lookup, .permission = btrfs_permission, + .sync_flags = btrfs_sync_flags, .get_acl = btrfs_get_acl, }; diff -NurpP --minimal linux-3.10.19/fs/btrfs/ioctl.c linux-3.10.19-vs2.3.6.8/fs/btrfs/ioctl.c --- linux-3.10.19/fs/btrfs/ioctl.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/btrfs/ioctl.c 2013-11-13 17:17:16.000000000 +0000 @@ -75,10 +75,13 @@ static unsigned int btrfs_flags_to_ioctl { unsigned int iflags = 0; - if (flags & BTRFS_INODE_SYNC) - iflags |= FS_SYNC_FL; if (flags & BTRFS_INODE_IMMUTABLE) iflags |= FS_IMMUTABLE_FL; + if (flags & BTRFS_INODE_IXUNLINK) + iflags |= FS_IXUNLINK_FL; + + if (flags & BTRFS_INODE_SYNC) + iflags |= FS_SYNC_FL; if (flags & BTRFS_INODE_APPEND) iflags |= FS_APPEND_FL; if (flags & BTRFS_INODE_NODUMP) @@ -95,28 +98,78 @@ static unsigned int btrfs_flags_to_ioctl else if (flags & BTRFS_INODE_NOCOMPRESS) iflags |= FS_NOCOMP_FL; + if (flags & BTRFS_INODE_BARRIER) + iflags |= FS_BARRIER_FL; + if (flags & BTRFS_INODE_COW) + iflags |= FS_COW_FL; return iflags; } /* - * Update inode->i_flags based on the btrfs internal flags. + * Update inode->i_(v)flags based on the btrfs internal flags. */ void btrfs_update_iflags(struct inode *inode) { struct btrfs_inode *ip = BTRFS_I(inode); - inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); - if (ip->flags & BTRFS_INODE_SYNC) - inode->i_flags |= S_SYNC; if (ip->flags & BTRFS_INODE_IMMUTABLE) inode->i_flags |= S_IMMUTABLE; + if (ip->flags & BTRFS_INODE_IXUNLINK) + inode->i_flags |= S_IXUNLINK; + + if (ip->flags & BTRFS_INODE_SYNC) + inode->i_flags |= S_SYNC; if (ip->flags & BTRFS_INODE_APPEND) inode->i_flags |= S_APPEND; if (ip->flags & BTRFS_INODE_NOATIME) inode->i_flags |= S_NOATIME; if (ip->flags & BTRFS_INODE_DIRSYNC) inode->i_flags |= S_DIRSYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (ip->flags & BTRFS_INODE_BARRIER) + inode->i_vflags |= V_BARRIER; + if (ip->flags & BTRFS_INODE_COW) + inode->i_vflags |= V_COW; +} + +/* + * Update btrfs internal flags from inode->i_(v)flags. + */ +void btrfs_update_flags(struct inode *inode) +{ + struct btrfs_inode *ip = BTRFS_I(inode); + + unsigned int flags = inode->i_flags; + unsigned int vflags = inode->i_vflags; + + ip->flags &= ~(BTRFS_INODE_SYNC | BTRFS_INODE_APPEND | + BTRFS_INODE_IMMUTABLE | BTRFS_INODE_IXUNLINK | + BTRFS_INODE_NOATIME | BTRFS_INODE_DIRSYNC | + BTRFS_INODE_BARRIER | BTRFS_INODE_COW); + + if (flags & S_IMMUTABLE) + ip->flags |= BTRFS_INODE_IMMUTABLE; + if (flags & S_IXUNLINK) + ip->flags |= BTRFS_INODE_IXUNLINK; + + if (flags & S_SYNC) + ip->flags |= BTRFS_INODE_SYNC; + if (flags & S_APPEND) + ip->flags |= BTRFS_INODE_APPEND; + if (flags & S_NOATIME) + ip->flags |= BTRFS_INODE_NOATIME; + if (flags & S_DIRSYNC) + ip->flags |= BTRFS_INODE_DIRSYNC; + + if (vflags & V_BARRIER) + ip->flags |= BTRFS_INODE_BARRIER; + if (vflags & V_COW) + ip->flags |= BTRFS_INODE_COW; } /* @@ -132,6 +185,7 @@ void btrfs_inherit_iflags(struct inode * return; flags = BTRFS_I(dir)->flags; + flags &= ~BTRFS_INODE_BARRIER; if (flags & BTRFS_INODE_NOCOMPRESS) { BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS; @@ -150,6 +204,30 @@ void btrfs_inherit_iflags(struct inode * btrfs_update_iflags(inode); } +int btrfs_sync_flags(struct inode *inode, int flags, int vflags) +{ + struct btrfs_inode *ip = BTRFS_I(inode); + struct btrfs_root *root = ip->root; + struct btrfs_trans_handle *trans; + int ret; + + trans = btrfs_join_transaction(root); + BUG_ON(!trans); + + inode->i_flags = flags; + inode->i_vflags = vflags; + btrfs_update_flags(inode); + + ret = btrfs_update_inode(trans, root, inode); + BUG_ON(ret); + + btrfs_update_iflags(inode); + inode->i_ctime = CURRENT_TIME; + btrfs_end_transaction(trans, root); + + return 0; +} + static int btrfs_ioctl_getflags(struct file *file, void __user *arg) { struct btrfs_inode *ip = BTRFS_I(file_inode(file)); @@ -212,21 +290,27 @@ static int btrfs_ioctl_setflags(struct f flags = btrfs_mask_flags(inode->i_mode, flags); oldflags = btrfs_flags_to_ioctl(ip->flags); - if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { + if ((flags ^ oldflags) & (FS_APPEND_FL | + FS_IMMUTABLE_FL | FS_IXUNLINK_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { ret = -EPERM; goto out_unlock; } } - if (flags & FS_SYNC_FL) - ip->flags |= BTRFS_INODE_SYNC; - else - ip->flags &= ~BTRFS_INODE_SYNC; if (flags & FS_IMMUTABLE_FL) ip->flags |= BTRFS_INODE_IMMUTABLE; else ip->flags &= ~BTRFS_INODE_IMMUTABLE; + if (flags & FS_IXUNLINK_FL) + ip->flags |= BTRFS_INODE_IXUNLINK; + else + ip->flags &= ~BTRFS_INODE_IXUNLINK; + + if (flags & FS_SYNC_FL) + ip->flags |= BTRFS_INODE_SYNC; + else + ip->flags &= ~BTRFS_INODE_SYNC; if (flags & FS_APPEND_FL) ip->flags |= BTRFS_INODE_APPEND; else diff -NurpP --minimal linux-3.10.19/fs/btrfs/super.c linux-3.10.19-vs2.3.6.8/fs/btrfs/super.c --- linux-3.10.19/fs/btrfs/super.c 2013-07-14 17:01:26.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/btrfs/super.c 2013-08-22 20:29:59.000000000 +0000 @@ -319,7 +319,7 @@ enum { Opt_no_space_cache, Opt_recovery, Opt_skip_balance, Opt_check_integrity, Opt_check_integrity_including_extent_data, Opt_check_integrity_print_mask, Opt_fatal_errors, - Opt_err, + Opt_tag, Opt_notag, Opt_tagid, Opt_err, }; static match_table_t tokens = { @@ -359,6 +359,9 @@ static match_table_t tokens = { {Opt_check_integrity_including_extent_data, "check_int_data"}, {Opt_check_integrity_print_mask, "check_int_print_mask=%d"}, {Opt_fatal_errors, "fatal_errors=%s"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_err, NULL}, }; @@ -624,6 +627,22 @@ int btrfs_parse_options(struct btrfs_roo goto out; } break; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + printk(KERN_INFO "btrfs: use tagging\n"); + btrfs_set_opt(info->mount_opt, TAGGED); + break; + case Opt_notag: + printk(KERN_INFO "btrfs: disabled tagging\n"); + btrfs_clear_opt(info->mount_opt, TAGGED); + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + btrfs_set_opt(info->mount_opt, TAGGED); + break; +#endif case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); @@ -1251,6 +1270,12 @@ static int btrfs_remount(struct super_bl btrfs_resize_thread_pool(fs_info, fs_info->thread_pool_size, old_thread_pool_size); + if (btrfs_test_opt(root, TAGGED) && !(sb->s_flags & MS_TAGGED)) { + printk("btrfs: %s: tagging not permitted on remount.\n", + sb->s_id); + return -EINVAL; + } + if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) goto out; diff -NurpP --minimal linux-3.10.19/fs/char_dev.c linux-3.10.19-vs2.3.6.8/fs/char_dev.c --- linux-3.10.19/fs/char_dev.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/char_dev.c 2013-08-22 20:29:59.000000000 +0000 @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include "internal.h" @@ -371,14 +373,21 @@ static int chrdev_open(struct inode *ino struct cdev *p; struct cdev *new = NULL; int ret = 0; + dev_t mdev; + + if (!vs_map_chrdev(inode->i_rdev, &mdev, DATTR_OPEN)) + return -EPERM; + inode->i_mdev = mdev; spin_lock(&cdev_lock); p = inode->i_cdev; if (!p) { struct kobject *kobj; int idx; + spin_unlock(&cdev_lock); - kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); + + kobj = kobj_lookup(cdev_map, mdev, &idx); if (!kobj) return -ENXIO; new = container_of(kobj, struct cdev, kobj); diff -NurpP --minimal linux-3.10.19/fs/dcache.c linux-3.10.19-vs2.3.6.8/fs/dcache.c --- linux-3.10.19/fs/dcache.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/dcache.c 2013-11-13 17:17:16.000000000 +0000 @@ -37,6 +37,7 @@ #include #include #include +#include #include "internal.h" #include "mount.h" @@ -578,6 +579,8 @@ int d_invalidate(struct dentry * dentry) spin_lock(&dentry->d_lock); } + vx_dentry_dec(dentry); + /* * Somebody else still using it? * @@ -607,6 +610,7 @@ EXPORT_SYMBOL(d_invalidate); static inline void __dget_dlock(struct dentry *dentry) { dentry->d_count++; + vx_dentry_inc(dentry); } static inline void __dget(struct dentry *dentry) @@ -1239,6 +1243,9 @@ struct dentry *__d_alloc(struct super_bl struct dentry *dentry; char *dname; + if (!vx_dentry_avail(1)) + return NULL; + dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); if (!dentry) return NULL; @@ -1271,6 +1278,7 @@ struct dentry *__d_alloc(struct super_bl dentry->d_count = 1; dentry->d_flags = 0; + vx_dentry_inc(dentry); spin_lock_init(&dentry->d_lock); seqcount_init(&dentry->d_seq); dentry->d_inode = NULL; @@ -1971,6 +1979,7 @@ struct dentry *__d_lookup(const struct d } dentry->d_count++; + vx_dentry_inc(dentry); found = dentry; spin_unlock(&dentry->d_lock); break; diff -NurpP --minimal linux-3.10.19/fs/devpts/inode.c linux-3.10.19-vs2.3.6.8/fs/devpts/inode.c --- linux-3.10.19/fs/devpts/inode.c 2013-05-31 13:45:23.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/devpts/inode.c 2013-08-22 20:29:59.000000000 +0000 @@ -25,6 +25,7 @@ #include #include #include +#include #define DEVPTS_DEFAULT_MODE 0600 /* @@ -36,6 +37,21 @@ #define DEVPTS_DEFAULT_PTMX_MODE 0000 #define PTMX_MINOR 2 +static int devpts_permission(struct inode *inode, int mask) +{ + int ret = -EACCES; + + /* devpts is xid tagged */ + if (vx_check((vxid_t)i_tag_read(inode), VS_WATCH_P | VS_IDENT)) + ret = generic_permission(inode, mask); + return ret; +} + +static struct inode_operations devpts_file_inode_operations = { + .permission = devpts_permission, +}; + + /* * sysctl support for setting limits on the number of Unix98 ptys allocated. * Otherwise one can eat up all kernel memory by opening /dev/ptmx repeatedly. @@ -345,6 +361,34 @@ static int devpts_show_options(struct se return 0; } +static int devpts_filter(struct dentry *de) +{ + vxid_t xid = 0; + + /* devpts is xid tagged */ + if (de && de->d_inode) + xid = (vxid_t)i_tag_read(de->d_inode); +#ifdef CONFIG_VSERVER_WARN_DEVPTS + else + vxwprintk_task(1, "devpts " VS_Q("%.*s") " without inode.", + de->d_name.len, de->d_name.name); +#endif + return vx_check(xid, VS_WATCH_P | VS_IDENT); +} + +static int devpts_readdir(struct file * filp, void * dirent, filldir_t filldir) +{ + return dcache_readdir_filter(filp, dirent, filldir, devpts_filter); +} + +static struct file_operations devpts_dir_operations = { + .open = dcache_dir_open, + .release = dcache_dir_close, + .llseek = dcache_dir_lseek, + .read = generic_read_dir, + .readdir = devpts_readdir, +}; + static const struct super_operations devpts_sops = { .statfs = simple_statfs, .remount_fs = devpts_remount, @@ -388,8 +432,10 @@ devpts_fill_super(struct super_block *s, inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR; inode->i_op = &simple_dir_inode_operations; - inode->i_fop = &simple_dir_operations; + inode->i_fop = &devpts_dir_operations; set_nlink(inode, 2); + /* devpts is xid tagged */ + i_tag_write(inode, (vtag_t)vx_current_xid()); s->s_root = d_make_root(inode); if (s->s_root) @@ -592,6 +638,9 @@ struct inode *devpts_pty_new(struct inod inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; init_special_inode(inode, S_IFCHR|opts->mode, device); + /* devpts is xid tagged */ + i_tag_write(inode, (vtag_t)vx_current_xid()); + inode->i_op = &devpts_file_inode_operations; inode->i_private = priv; sprintf(s, "%d", index); diff -NurpP --minimal linux-3.10.19/fs/ext2/balloc.c linux-3.10.19-vs2.3.6.8/fs/ext2/balloc.c --- linux-3.10.19/fs/ext2/balloc.c 2013-05-31 13:45:23.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext2/balloc.c 2013-08-22 20:29:59.000000000 +0000 @@ -693,7 +693,6 @@ ext2_try_to_allocate(struct super_block start = 0; end = EXT2_BLOCKS_PER_GROUP(sb); } - BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb)); repeat: diff -NurpP --minimal linux-3.10.19/fs/ext2/ext2.h linux-3.10.19-vs2.3.6.8/fs/ext2/ext2.h --- linux-3.10.19/fs/ext2/ext2.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext2/ext2.h 2013-08-22 20:29:59.000000000 +0000 @@ -244,8 +244,12 @@ struct ext2_group_desc #define EXT2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */ #define EXT2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */ #define EXT2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/ +#define EXT2_IXUNLINK_FL FS_IXUNLINK_FL /* Immutable invert on unlink */ #define EXT2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */ +#define EXT2_BARRIER_FL FS_BARRIER_FL /* Barrier for chroot() */ +#define EXT2_COW_FL FS_COW_FL /* Copy on Write marker */ + #define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ #define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ @@ -329,7 +333,8 @@ struct ext2_inode { __u16 i_pad1; __le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_gid_high; /* were reserved2[0] */ - __u32 l_i_reserved2; + __le16 l_i_tag; /* Context Tag */ + __u16 l_i_reserved2; } linux2; struct { __u8 h_i_frag; /* Fragment number */ @@ -357,6 +362,7 @@ struct ext2_inode { #define i_gid_low i_gid #define i_uid_high osd2.linux2.l_i_uid_high #define i_gid_high osd2.linux2.l_i_gid_high +#define i_raw_tag osd2.linux2.l_i_tag #define i_reserved2 osd2.linux2.l_i_reserved2 /* @@ -384,6 +390,7 @@ struct ext2_inode { #define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */ #define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */ #define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */ +#define EXT2_MOUNT_TAGGED (1<<24) /* Enable Context Tags */ #define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt @@ -757,6 +764,7 @@ extern void ext2_set_inode_flags(struct extern void ext2_get_inode_flags(struct ext2_inode_info *); extern int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); +extern int ext2_sync_flags(struct inode *, int, int); /* ioctl.c */ extern long ext2_ioctl(struct file *, unsigned int, unsigned long); diff -NurpP --minimal linux-3.10.19/fs/ext2/file.c linux-3.10.19-vs2.3.6.8/fs/ext2/file.c --- linux-3.10.19/fs/ext2/file.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext2/file.c 2013-08-22 20:29:59.000000000 +0000 @@ -104,4 +104,5 @@ const struct inode_operations ext2_file_ .setattr = ext2_setattr, .get_acl = ext2_get_acl, .fiemap = ext2_fiemap, + .sync_flags = ext2_sync_flags, }; diff -NurpP --minimal linux-3.10.19/fs/ext2/ialloc.c linux-3.10.19-vs2.3.6.8/fs/ext2/ialloc.c --- linux-3.10.19/fs/ext2/ialloc.c 2013-05-31 13:45:23.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext2/ialloc.c 2013-08-22 20:29:59.000000000 +0000 @@ -17,6 +17,7 @@ #include #include #include +#include #include "ext2.h" #include "xattr.h" #include "acl.h" @@ -546,6 +547,7 @@ got: inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = dir->i_gid; + i_tag_write(inode, dx_current_fstag(sb)); } else inode_init_owner(inode, dir, mode); diff -NurpP --minimal linux-3.10.19/fs/ext2/inode.c linux-3.10.19-vs2.3.6.8/fs/ext2/inode.c --- linux-3.10.19/fs/ext2/inode.c 2013-07-14 17:01:27.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext2/inode.c 2013-08-22 21:46:54.000000000 +0000 @@ -32,6 +32,7 @@ #include #include #include +#include #include "ext2.h" #include "acl.h" #include "xip.h" @@ -1180,7 +1181,7 @@ static void ext2_truncate_blocks(struct return; if (ext2_inode_is_fast_symlink(inode)) return; - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + if (IS_APPEND(inode) || IS_IXORUNLINK(inode)) return; __ext2_truncate_blocks(inode, offset); } @@ -1271,36 +1272,61 @@ void ext2_set_inode_flags(struct inode * { unsigned int flags = EXT2_I(inode)->i_flags; - inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); + + + if (flags & EXT2_IMMUTABLE_FL) + inode->i_flags |= S_IMMUTABLE; + if (flags & EXT2_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; + if (flags & EXT2_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & EXT2_APPEND_FL) inode->i_flags |= S_APPEND; - if (flags & EXT2_IMMUTABLE_FL) - inode->i_flags |= S_IMMUTABLE; if (flags & EXT2_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & EXT2_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (flags & EXT2_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + if (flags & EXT2_COW_FL) + inode->i_vflags |= V_COW; } /* Propagate flags from i_flags to EXT2_I(inode)->i_flags */ void ext2_get_inode_flags(struct ext2_inode_info *ei) { unsigned int flags = ei->vfs_inode.i_flags; + unsigned int vflags = ei->vfs_inode.i_vflags; + + ei->i_flags &= ~(EXT2_SYNC_FL | EXT2_APPEND_FL | + EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL | + EXT2_NOATIME_FL | EXT2_DIRSYNC_FL | + EXT2_BARRIER_FL | EXT2_COW_FL); + + if (flags & S_IMMUTABLE) + ei->i_flags |= EXT2_IMMUTABLE_FL; + if (flags & S_IXUNLINK) + ei->i_flags |= EXT2_IXUNLINK_FL; - ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL| - EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL); if (flags & S_SYNC) ei->i_flags |= EXT2_SYNC_FL; if (flags & S_APPEND) ei->i_flags |= EXT2_APPEND_FL; - if (flags & S_IMMUTABLE) - ei->i_flags |= EXT2_IMMUTABLE_FL; if (flags & S_NOATIME) ei->i_flags |= EXT2_NOATIME_FL; if (flags & S_DIRSYNC) ei->i_flags |= EXT2_DIRSYNC_FL; + + if (vflags & V_BARRIER) + ei->i_flags |= EXT2_BARRIER_FL; + if (vflags & V_COW) + ei->i_flags |= EXT2_COW_FL; } struct inode *ext2_iget (struct super_block *sb, unsigned long ino) @@ -1336,8 +1362,10 @@ struct inode *ext2_iget (struct super_bl i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } - i_uid_write(inode, i_uid); - i_gid_write(inode, i_gid); + i_uid_write(inode, INOTAG_UID(DX_TAG(inode), i_uid, i_gid)); + i_gid_write(inode, INOTAG_GID(DX_TAG(inode), i_uid, i_gid)); + i_tag_write(inode, INOTAG_TAG(DX_TAG(inode), i_uid, i_gid, + le16_to_cpu(raw_inode->i_raw_tag))); set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); inode->i_size = le32_to_cpu(raw_inode->i_size); inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime); @@ -1435,8 +1463,10 @@ static int __ext2_write_inode(struct ino struct ext2_inode_info *ei = EXT2_I(inode); struct super_block *sb = inode->i_sb; ino_t ino = inode->i_ino; - uid_t uid = i_uid_read(inode); - gid_t gid = i_gid_read(inode); + uid_t uid = from_kuid(&init_user_ns, + TAGINO_KUID(DX_TAG(inode), inode->i_uid, inode->i_tag)); + gid_t gid = from_kgid(&init_user_ns, + TAGINO_KGID(DX_TAG(inode), inode->i_gid, inode->i_tag)); struct buffer_head * bh; struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh); int n; @@ -1472,6 +1502,9 @@ static int __ext2_write_inode(struct ino raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } +#ifdef CONFIG_TAGGING_INTERN + raw_inode->i_raw_tag = cpu_to_le16(i_tag_read(inode)); +#endif raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); raw_inode->i_size = cpu_to_le32(inode->i_size); raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); @@ -1552,7 +1585,8 @@ int ext2_setattr(struct dentry *dentry, if (is_quota_modification(inode, iattr)) dquot_initialize(inode); if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) || - (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) { + (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)) || + (iattr->ia_valid & ATTR_TAG && !tag_eq(iattr->ia_tag, inode->i_tag))) { error = dquot_transfer(inode, iattr); if (error) return error; diff -NurpP --minimal linux-3.10.19/fs/ext2/ioctl.c linux-3.10.19-vs2.3.6.8/fs/ext2/ioctl.c --- linux-3.10.19/fs/ext2/ioctl.c 2013-05-31 13:45:23.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext2/ioctl.c 2013-08-22 20:29:59.000000000 +0000 @@ -17,6 +17,16 @@ #include +int ext2_sync_flags(struct inode *inode, int flags, int vflags) +{ + inode->i_flags = flags; + inode->i_vflags = vflags; + ext2_get_inode_flags(EXT2_I(inode)); + inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(inode); + return 0; +} + long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); @@ -51,6 +61,11 @@ long ext2_ioctl(struct file *filp, unsig flags = ext2_mask_flags(inode->i_mode, flags); + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -EACCES; + } + mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { @@ -66,7 +81,9 @@ long ext2_ioctl(struct file *filp, unsig * * This test looks nicer. Thanks to Pauline Middelink */ - if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) { + if ((oldflags & EXT2_IMMUTABLE_FL) || + ((flags ^ oldflags) & (EXT2_APPEND_FL | + EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL))) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; @@ -74,7 +91,7 @@ long ext2_ioctl(struct file *filp, unsig } } - flags = flags & EXT2_FL_USER_MODIFIABLE; + flags &= EXT2_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; ei->i_flags = flags; diff -NurpP --minimal linux-3.10.19/fs/ext2/namei.c linux-3.10.19-vs2.3.6.8/fs/ext2/namei.c --- linux-3.10.19/fs/ext2/namei.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext2/namei.c 2013-08-22 20:29:59.000000000 +0000 @@ -32,6 +32,7 @@ #include #include +#include #include "ext2.h" #include "xattr.h" #include "acl.h" @@ -73,6 +74,7 @@ static struct dentry *ext2_lookup(struct (unsigned long) ino); return ERR_PTR(-EIO); } + dx_propagate_tag(nd, inode); } return d_splice_alias(inode, dentry); } @@ -397,6 +399,7 @@ const struct inode_operations ext2_dir_i .removexattr = generic_removexattr, #endif .setattr = ext2_setattr, + .sync_flags = ext2_sync_flags, .get_acl = ext2_get_acl, }; diff -NurpP --minimal linux-3.10.19/fs/ext2/super.c linux-3.10.19-vs2.3.6.8/fs/ext2/super.c --- linux-3.10.19/fs/ext2/super.c 2013-05-31 13:45:23.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext2/super.c 2013-08-22 20:29:59.000000000 +0000 @@ -395,7 +395,8 @@ enum { Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota, - Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation + Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation, + Opt_tag, Opt_notag, Opt_tagid }; static const match_table_t tokens = { @@ -423,6 +424,9 @@ static const match_table_t tokens = { {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_xip, "xip"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_grpquota, "grpquota"}, {Opt_ignore, "noquota"}, {Opt_quota, "quota"}, @@ -506,6 +510,20 @@ static int parse_options(char *options, case Opt_nouid32: set_opt (sbi->s_mount_opt, NO_UID32); break; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + set_opt (sbi->s_mount_opt, TAGGED); + break; + case Opt_notag: + clear_opt (sbi->s_mount_opt, TAGGED); + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + set_opt (sbi->s_mount_opt, TAGGED); + break; +#endif case Opt_nocheck: clear_opt (sbi->s_mount_opt, CHECK); break; @@ -864,6 +882,8 @@ static int ext2_fill_super(struct super_ if (!parse_options((char *) data, sb)) goto failed_mount; + if (EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); @@ -1269,6 +1289,14 @@ static int ext2_remount (struct super_bl err = -EINVAL; goto restore_opts; } + + if ((sbi->s_mount_opt & EXT2_MOUNT_TAGGED) && + !(sb->s_flags & MS_TAGGED)) { + printk("EXT2-fs: %s: tagging not permitted on remount.\n", + sb->s_id); + err = -EINVAL; + goto restore_opts; + } sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); diff -NurpP --minimal linux-3.10.19/fs/ext3/ext3.h linux-3.10.19-vs2.3.6.8/fs/ext3/ext3.h --- linux-3.10.19/fs/ext3/ext3.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext3/ext3.h 2013-08-22 20:29:59.000000000 +0000 @@ -151,10 +151,14 @@ struct ext3_group_desc #define EXT3_NOTAIL_FL 0x00008000 /* file tail should not be merged */ #define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ #define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ +#define EXT3_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */ #define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */ -#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ -#define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ +#define EXT3_BARRIER_FL 0x04000000 /* Barrier for chroot() */ +#define EXT3_COW_FL 0x20000000 /* Copy on Write marker */ + +#define EXT3_FL_USER_VISIBLE 0x0103DFFF /* User visible flags */ +#define EXT3_FL_USER_MODIFIABLE 0x010380FF /* User modifiable flags */ /* Flags that should be inherited by new inodes from their parent. */ #define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\ @@ -290,7 +294,8 @@ struct ext3_inode { __u16 i_pad1; __le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_gid_high; /* were reserved2[0] */ - __u32 l_i_reserved2; + __le16 l_i_tag; /* Context Tag */ + __u16 l_i_reserved2; } linux2; struct { __u8 h_i_frag; /* Fragment number */ @@ -320,6 +325,7 @@ struct ext3_inode { #define i_gid_low i_gid #define i_uid_high osd2.linux2.l_i_uid_high #define i_gid_high osd2.linux2.l_i_gid_high +#define i_raw_tag osd2.linux2.l_i_tag #define i_reserved2 osd2.linux2.l_i_reserved2 /* @@ -364,6 +370,7 @@ struct ext3_inode { #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ #define EXT3_MOUNT_DATA_ERR_ABORT 0x400000 /* Abort on file data write * error in ordered mode */ +#define EXT3_MOUNT_TAGGED (1<<24) /* Enable Context Tags */ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */ #ifndef _LINUX_EXT2_FS_H @@ -1061,6 +1068,7 @@ extern void ext3_get_inode_flags(struct extern void ext3_set_aops(struct inode *inode); extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); +extern int ext3_sync_flags(struct inode *, int, int); /* ioctl.c */ extern long ext3_ioctl(struct file *, unsigned int, unsigned long); diff -NurpP --minimal linux-3.10.19/fs/ext3/file.c linux-3.10.19-vs2.3.6.8/fs/ext3/file.c --- linux-3.10.19/fs/ext3/file.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext3/file.c 2013-08-22 20:29:59.000000000 +0000 @@ -76,5 +76,6 @@ const struct inode_operations ext3_file_ #endif .get_acl = ext3_get_acl, .fiemap = ext3_fiemap, + .sync_flags = ext3_sync_flags, }; diff -NurpP --minimal linux-3.10.19/fs/ext3/ialloc.c linux-3.10.19-vs2.3.6.8/fs/ext3/ialloc.c --- linux-3.10.19/fs/ext3/ialloc.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext3/ialloc.c 2013-08-22 20:29:59.000000000 +0000 @@ -14,6 +14,7 @@ #include #include +#include #include "ext3.h" #include "xattr.h" @@ -469,6 +470,7 @@ got: inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = dir->i_gid; + i_tag_write(inode, dx_current_fstag(sb)); } else inode_init_owner(inode, dir, mode); diff -NurpP --minimal linux-3.10.19/fs/ext3/inode.c linux-3.10.19-vs2.3.6.8/fs/ext3/inode.c --- linux-3.10.19/fs/ext3/inode.c 2013-07-14 17:01:27.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext3/inode.c 2013-08-22 21:21:18.000000000 +0000 @@ -28,6 +28,8 @@ #include #include #include +#include + #include "ext3.h" #include "xattr.h" #include "acl.h" @@ -2853,36 +2855,60 @@ void ext3_set_inode_flags(struct inode * { unsigned int flags = EXT3_I(inode)->i_flags; - inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); + + if (flags & EXT3_IMMUTABLE_FL) + inode->i_flags |= S_IMMUTABLE; + if (flags & EXT3_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; + if (flags & EXT3_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & EXT3_APPEND_FL) inode->i_flags |= S_APPEND; - if (flags & EXT3_IMMUTABLE_FL) - inode->i_flags |= S_IMMUTABLE; if (flags & EXT3_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & EXT3_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (flags & EXT3_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + if (flags & EXT3_COW_FL) + inode->i_vflags |= V_COW; } /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */ void ext3_get_inode_flags(struct ext3_inode_info *ei) { unsigned int flags = ei->vfs_inode.i_flags; + unsigned int vflags = ei->vfs_inode.i_vflags; + + ei->i_flags &= ~(EXT3_SYNC_FL | EXT3_APPEND_FL | + EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL | + EXT3_NOATIME_FL | EXT3_DIRSYNC_FL | + EXT3_BARRIER_FL | EXT3_COW_FL); + + if (flags & S_IMMUTABLE) + ei->i_flags |= EXT3_IMMUTABLE_FL; + if (flags & S_IXUNLINK) + ei->i_flags |= EXT3_IXUNLINK_FL; - ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL| - EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL); if (flags & S_SYNC) ei->i_flags |= EXT3_SYNC_FL; if (flags & S_APPEND) ei->i_flags |= EXT3_APPEND_FL; - if (flags & S_IMMUTABLE) - ei->i_flags |= EXT3_IMMUTABLE_FL; if (flags & S_NOATIME) ei->i_flags |= EXT3_NOATIME_FL; if (flags & S_DIRSYNC) ei->i_flags |= EXT3_DIRSYNC_FL; + + if (vflags & V_BARRIER) + ei->i_flags |= EXT3_BARRIER_FL; + if (vflags & V_COW) + ei->i_flags |= EXT3_COW_FL; } struct inode *ext3_iget(struct super_block *sb, unsigned long ino) @@ -2920,8 +2946,10 @@ struct inode *ext3_iget(struct super_blo i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } - i_uid_write(inode, i_uid); - i_gid_write(inode, i_gid); + i_uid_write(inode, INOTAG_UID(DX_TAG(inode), i_uid, i_gid)); + i_gid_write(inode, INOTAG_GID(DX_TAG(inode), i_uid, i_gid)); + i_tag_write(inode, INOTAG_TAG(DX_TAG(inode), i_uid, i_gid, + le16_to_cpu(raw_inode->i_raw_tag))); set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); inode->i_size = le32_to_cpu(raw_inode->i_size); inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime); @@ -3093,8 +3121,10 @@ again: ext3_get_inode_flags(ei); raw_inode->i_mode = cpu_to_le16(inode->i_mode); - i_uid = i_uid_read(inode); - i_gid = i_gid_read(inode); + i_uid = from_kuid(&init_user_ns, + TAGINO_KUID(DX_TAG(inode), inode->i_uid, inode->i_tag)); + i_gid = from_kgid(&init_user_ns, + TAGINO_KGID(DX_TAG(inode), inode->i_gid, inode->i_tag)); if(!(test_opt(inode->i_sb, NO_UID32))) { raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); @@ -3119,6 +3149,9 @@ again: raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } +#ifdef CONFIG_TAGGING_INTERN + raw_inode->i_raw_tag = cpu_to_le16(i_tag_read(inode)); +#endif raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); disksize = cpu_to_le32(ei->i_disksize); if (disksize != raw_inode->i_size) { @@ -3287,7 +3320,8 @@ int ext3_setattr(struct dentry *dentry, if (is_quota_modification(inode, attr)) dquot_initialize(inode); if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || - (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { + (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)) || + (ia_valid & ATTR_TAG && !tag_eq(attr->ia_tag, inode->i_tag))) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, @@ -3309,6 +3343,8 @@ int ext3_setattr(struct dentry *dentry, inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; + if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode)) + inode->i_tag = attr->ia_tag; error = ext3_mark_inode_dirty(handle, inode); ext3_journal_stop(handle); } diff -NurpP --minimal linux-3.10.19/fs/ext3/ioctl.c linux-3.10.19-vs2.3.6.8/fs/ext3/ioctl.c --- linux-3.10.19/fs/ext3/ioctl.c 2013-05-31 13:45:23.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext3/ioctl.c 2013-08-22 20:29:59.000000000 +0000 @@ -12,6 +12,34 @@ #include #include "ext3.h" + +int ext3_sync_flags(struct inode *inode, int flags, int vflags) +{ + handle_t *handle = NULL; + struct ext3_iloc iloc; + int err; + + handle = ext3_journal_start(inode, 1); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + if (IS_SYNC(inode)) + handle->h_sync = 1; + err = ext3_reserve_inode_write(handle, inode, &iloc); + if (err) + goto flags_err; + + inode->i_flags = flags; + inode->i_vflags = vflags; + ext3_get_inode_flags(EXT3_I(inode)); + inode->i_ctime = CURRENT_TIME_SEC; + + err = ext3_mark_iloc_dirty(handle, inode, &iloc); +flags_err: + ext3_journal_stop(handle); + return err; +} + long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); @@ -45,6 +73,11 @@ long ext3_ioctl(struct file *filp, unsig flags = ext3_mask_flags(inode->i_mode, flags); + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -EACCES; + } + mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ @@ -63,7 +96,9 @@ long ext3_ioctl(struct file *filp, unsig * * This test looks nicer. Thanks to Pauline Middelink */ - if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) { + if ((oldflags & EXT3_IMMUTABLE_FL) || + ((flags ^ oldflags) & (EXT3_APPEND_FL | + EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL))) { if (!capable(CAP_LINUX_IMMUTABLE)) goto flags_out; } @@ -88,7 +123,7 @@ long ext3_ioctl(struct file *filp, unsig if (err) goto flags_err; - flags = flags & EXT3_FL_USER_MODIFIABLE; + flags &= EXT3_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE; ei->i_flags = flags; diff -NurpP --minimal linux-3.10.19/fs/ext3/namei.c linux-3.10.19-vs2.3.6.8/fs/ext3/namei.c --- linux-3.10.19/fs/ext3/namei.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext3/namei.c 2013-11-13 17:17:16.000000000 +0000 @@ -25,6 +25,8 @@ */ #include +#include + #include "ext3.h" #include "namei.h" #include "xattr.h" @@ -915,6 +917,7 @@ restart: submit_bh(READ | REQ_META | REQ_PRIO, bh); } + dx_propagate_tag(nd, inode); } } if ((bh = bh_use[ra_ptr++]) == NULL) @@ -2524,6 +2527,7 @@ const struct inode_operations ext3_dir_i .listxattr = ext3_listxattr, .removexattr = generic_removexattr, #endif + .sync_flags = ext3_sync_flags, .get_acl = ext3_get_acl, }; diff -NurpP --minimal linux-3.10.19/fs/ext3/super.c linux-3.10.19-vs2.3.6.8/fs/ext3/super.c --- linux-3.10.19/fs/ext3/super.c 2013-07-14 17:01:27.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext3/super.c 2013-08-22 20:29:59.000000000 +0000 @@ -813,7 +813,8 @@ enum { Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, - Opt_resize, Opt_usrquota, Opt_grpquota + Opt_resize, Opt_usrquota, Opt_grpquota, + Opt_tag, Opt_notag, Opt_tagid }; static const match_table_t tokens = { @@ -870,6 +871,9 @@ static const match_table_t tokens = { {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_resize, "resize"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_err, NULL}, }; @@ -1037,6 +1041,20 @@ static int parse_options (char *options, case Opt_nouid32: set_opt (sbi->s_mount_opt, NO_UID32); break; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + set_opt (sbi->s_mount_opt, TAGGED); + break; + case Opt_notag: + clear_opt (sbi->s_mount_opt, TAGGED); + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + set_opt (sbi->s_mount_opt, TAGGED); + break; +#endif case Opt_nocheck: clear_opt (sbi->s_mount_opt, CHECK); break; @@ -1734,6 +1752,9 @@ static int ext3_fill_super (struct super NULL, 0)) goto failed_mount; + if (EXT3_SB(sb)->s_mount_opt & EXT3_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); @@ -2629,6 +2650,14 @@ static int ext3_remount (struct super_bl if (test_opt(sb, ABORT)) ext3_abort(sb, __func__, "Abort forced by user"); + if ((sbi->s_mount_opt & EXT3_MOUNT_TAGGED) && + !(sb->s_flags & MS_TAGGED)) { + printk("EXT3-fs: %s: tagging not permitted on remount.\n", + sb->s_id); + err = -EINVAL; + goto restore_opts; + } + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); diff -NurpP --minimal linux-3.10.19/fs/ext4/ext4.h linux-3.10.19-vs2.3.6.8/fs/ext4/ext4.h --- linux-3.10.19/fs/ext4/ext4.h 2013-07-14 17:01:27.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext4/ext4.h 2013-08-22 20:29:59.000000000 +0000 @@ -387,7 +387,10 @@ struct flex_groups { #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ #define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */ +#define EXT4_BARRIER_FL 0x04000000 /* Barrier for chroot() */ +#define EXT4_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */ #define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */ +#define EXT4_COW_FL 0x20000000 /* Copy on Write marker */ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ #define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */ @@ -663,7 +666,7 @@ struct ext4_inode { __le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_gid_high; /* were reserved2[0] */ __le16 l_i_checksum_lo;/* crc32c(uuid+inum+inode) LE */ - __le16 l_i_reserved; + __le16 l_i_tag; /* Context Tag */ } linux2; struct { __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */ @@ -781,6 +784,7 @@ do { \ #define i_gid_low i_gid #define i_uid_high osd2.linux2.l_i_uid_high #define i_gid_high osd2.linux2.l_i_gid_high +#define i_raw_tag osd2.linux2.l_i_tag #define i_checksum_lo osd2.linux2.l_i_checksum_lo #elif defined(__GNU__) @@ -958,6 +962,7 @@ struct ext4_inode_info { #define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */ #define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */ #define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */ +#define EXT4_MOUNT_TAGGED 0x40000 /* Enable Context Tags */ #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ @@ -2538,6 +2543,7 @@ extern struct buffer_head *ext4_get_firs extern int ext4_inline_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int *has_inline); +extern int ext4_sync_flags(struct inode *, int, int); extern int ext4_try_to_evict_inline_data(handle_t *handle, struct inode *inode, int needed); diff -NurpP --minimal linux-3.10.19/fs/ext4/file.c linux-3.10.19-vs2.3.6.8/fs/ext4/file.c --- linux-3.10.19/fs/ext4/file.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext4/file.c 2013-11-13 17:17:16.000000000 +0000 @@ -651,5 +651,6 @@ const struct inode_operations ext4_file_ .removexattr = generic_removexattr, .get_acl = ext4_get_acl, .fiemap = ext4_fiemap, + .sync_flags = ext4_sync_flags, }; diff -NurpP --minimal linux-3.10.19/fs/ext4/ialloc.c linux-3.10.19-vs2.3.6.8/fs/ext4/ialloc.c --- linux-3.10.19/fs/ext4/ialloc.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext4/ialloc.c 2013-11-13 17:17:16.000000000 +0000 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "ext4.h" @@ -679,6 +680,7 @@ struct inode *__ext4_new_inode(handle_t inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = dir->i_gid; + i_tag_write(inode, dx_current_fstag(sb)); } else inode_init_owner(inode, dir, mode); dquot_initialize(inode); diff -NurpP --minimal linux-3.10.19/fs/ext4/inode.c linux-3.10.19-vs2.3.6.8/fs/ext4/inode.c --- linux-3.10.19/fs/ext4/inode.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext4/inode.c 2013-11-13 17:17:16.000000000 +0000 @@ -38,6 +38,7 @@ #include #include #include +#include #include "ext4_jbd2.h" #include "xattr.h" @@ -4057,41 +4058,64 @@ void ext4_set_inode_flags(struct inode * { unsigned int flags = EXT4_I(inode)->i_flags; - inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); + + if (flags & EXT4_IMMUTABLE_FL) + inode->i_flags |= S_IMMUTABLE; + if (flags & EXT4_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; + if (flags & EXT4_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & EXT4_APPEND_FL) inode->i_flags |= S_APPEND; - if (flags & EXT4_IMMUTABLE_FL) - inode->i_flags |= S_IMMUTABLE; if (flags & EXT4_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & EXT4_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (flags & EXT4_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + if (flags & EXT4_COW_FL) + inode->i_vflags |= V_COW; } /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ void ext4_get_inode_flags(struct ext4_inode_info *ei) { - unsigned int vfs_fl; + unsigned int vfs_fl, vfs_vf; unsigned long old_fl, new_fl; do { vfs_fl = ei->vfs_inode.i_flags; + vfs_vf = ei->vfs_inode.i_vflags; old_fl = ei->i_flags; new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| - EXT4_DIRSYNC_FL); + EXT4_DIRSYNC_FL|EXT4_BARRIER_FL| + EXT4_COW_FL); + + if (vfs_fl & S_IMMUTABLE) + new_fl |= EXT4_IMMUTABLE_FL; + if (vfs_fl & S_IXUNLINK) + new_fl |= EXT4_IXUNLINK_FL; + if (vfs_fl & S_SYNC) new_fl |= EXT4_SYNC_FL; if (vfs_fl & S_APPEND) new_fl |= EXT4_APPEND_FL; - if (vfs_fl & S_IMMUTABLE) - new_fl |= EXT4_IMMUTABLE_FL; if (vfs_fl & S_NOATIME) new_fl |= EXT4_NOATIME_FL; if (vfs_fl & S_DIRSYNC) new_fl |= EXT4_DIRSYNC_FL; + + if (vfs_vf & V_BARRIER) + new_fl |= EXT4_BARRIER_FL; + if (vfs_vf & V_COW) + new_fl |= EXT4_COW_FL; } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); } @@ -4196,8 +4220,10 @@ struct inode *ext4_iget(struct super_blo i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } - i_uid_write(inode, i_uid); - i_gid_write(inode, i_gid); + i_uid_write(inode, INOTAG_UID(DX_TAG(inode), i_uid, i_gid)); + i_gid_write(inode, INOTAG_GID(DX_TAG(inode), i_uid, i_gid)); + i_tag_write(inode, INOTAG_TAG(DX_TAG(inode), i_uid, i_gid, + le16_to_cpu(raw_inode->i_raw_tag))); set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ @@ -4425,8 +4451,10 @@ static int ext4_do_update_inode(handle_t ext4_get_inode_flags(ei); raw_inode->i_mode = cpu_to_le16(inode->i_mode); - i_uid = i_uid_read(inode); - i_gid = i_gid_read(inode); + i_uid = from_kuid(&init_user_ns, + TAGINO_KUID(DX_TAG(inode), inode->i_uid, inode->i_tag)); + i_gid = from_kgid(&init_user_ns, + TAGINO_KGID(DX_TAG(inode), inode->i_gid, inode->i_tag)); if (!(test_opt(inode->i_sb, NO_UID32))) { raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); @@ -4449,6 +4477,9 @@ static int ext4_do_update_inode(handle_t raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } +#ifdef CONFIG_TAGGING_INTERN + raw_inode->i_raw_tag = cpu_to_le16(i_tag_read(inode)); +#endif raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); @@ -4679,7 +4710,8 @@ int ext4_setattr(struct dentry *dentry, if (is_quota_modification(inode, attr)) dquot_initialize(inode); if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || - (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { + (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)) || + (ia_valid & ATTR_TAG && !tag_eq(attr->ia_tag, inode->i_tag))) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, @@ -4702,6 +4734,8 @@ int ext4_setattr(struct dentry *dentry, inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; + if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode)) + inode->i_tag = attr->ia_tag; error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); } diff -NurpP --minimal linux-3.10.19/fs/ext4/ioctl.c linux-3.10.19-vs2.3.6.8/fs/ext4/ioctl.c --- linux-3.10.19/fs/ext4/ioctl.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext4/ioctl.c 2013-11-13 17:17:16.000000000 +0000 @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "ext4_jbd2.h" #include "ext4.h" @@ -214,6 +215,33 @@ swap_boot_out: return err; } +int ext4_sync_flags(struct inode *inode, int flags, int vflags) +{ + handle_t *handle = NULL; + struct ext4_iloc iloc; + int err; + + handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + if (IS_SYNC(inode)) + ext4_handle_sync(handle); + err = ext4_reserve_inode_write(handle, inode, &iloc); + if (err) + goto flags_err; + + inode->i_flags = flags; + inode->i_vflags = vflags; + ext4_get_inode_flags(EXT4_I(inode)); + inode->i_ctime = ext4_current_time(inode); + + err = ext4_mark_iloc_dirty(handle, inode, &iloc); +flags_err: + ext4_journal_stop(handle); + return err; +} + long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); @@ -247,6 +275,11 @@ long ext4_ioctl(struct file *filp, unsig flags = ext4_mask_flags(inode->i_mode, flags); + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -EACCES; + } + err = -EPERM; mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ @@ -264,7 +297,9 @@ long ext4_ioctl(struct file *filp, unsig * * This test looks nicer. Thanks to Pauline Middelink */ - if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) { + if ((oldflags & EXT4_IMMUTABLE_FL) || + ((flags ^ oldflags) & (EXT4_APPEND_FL | + EXT4_IMMUTABLE_FL | EXT4_IXUNLINK_FL))) { if (!capable(CAP_LINUX_IMMUTABLE)) goto flags_out; } diff -NurpP --minimal linux-3.10.19/fs/ext4/namei.c linux-3.10.19-vs2.3.6.8/fs/ext4/namei.c --- linux-3.10.19/fs/ext4/namei.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext4/namei.c 2013-11-13 17:17:16.000000000 +0000 @@ -34,6 +34,7 @@ #include #include #include +#include #include "ext4.h" #include "ext4_jbd2.h" @@ -1299,6 +1300,7 @@ restart: ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); } + dx_propagate_tag(nd, inode); } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; @@ -3177,6 +3179,7 @@ const struct inode_operations ext4_dir_i .removexattr = generic_removexattr, .get_acl = ext4_get_acl, .fiemap = ext4_fiemap, + .sync_flags = ext4_sync_flags, }; const struct inode_operations ext4_special_inode_operations = { diff -NurpP --minimal linux-3.10.19/fs/ext4/super.c linux-3.10.19-vs2.3.6.8/fs/ext4/super.c --- linux-3.10.19/fs/ext4/super.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ext4/super.c 2013-11-13 17:17:16.000000000 +0000 @@ -1129,7 +1129,7 @@ enum { Opt_inode_readahead_blks, Opt_journal_ioprio, Opt_dioread_nolock, Opt_dioread_lock, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, - Opt_max_dir_size_kb, + Opt_max_dir_size_kb, Opt_tag, Opt_notag, Opt_tagid }; static const match_table_t tokens = { @@ -1209,6 +1209,9 @@ static const match_table_t tokens = { {Opt_removed, "reservation"}, /* mount option from ext2/3 */ {Opt_removed, "noreservation"}, /* mount option from ext2/3 */ {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */ + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_err, NULL}, }; @@ -1439,6 +1442,20 @@ static int handle_mount_opt(struct super case Opt_i_version: sb->s_flags |= MS_I_VERSION; return 1; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + set_opt(sb, TAGGED); + return 1; + case Opt_notag: + clear_opt(sb, TAGGED); + return 1; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + set_opt(sb, TAGGED); + return 1; +#endif } for (m = ext4_mount_opts; m->token != Opt_err; m++) @@ -3452,6 +3469,9 @@ static int ext4_fill_super(struct super_ clear_opt(sb, DELALLOC); } + if (EXT4_SB(sb)->s_mount_opt & EXT4_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); @@ -4664,6 +4684,14 @@ static int ext4_remount(struct super_blo if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, "Abort forced by user"); + if ((sbi->s_mount_opt & EXT4_MOUNT_TAGGED) && + !(sb->s_flags & MS_TAGGED)) { + printk("EXT4-fs: %s: tagging not permitted on remount.\n", + sb->s_id); + err = -EINVAL; + goto restore_opts; + } + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); diff -NurpP --minimal linux-3.10.19/fs/fcntl.c linux-3.10.19-vs2.3.6.8/fs/fcntl.c --- linux-3.10.19/fs/fcntl.c 2013-05-31 13:45:23.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/fcntl.c 2013-08-22 20:29:59.000000000 +0000 @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -378,6 +379,8 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, f if (!f.file) goto out; + if (!vx_files_avail(1)) + goto out; if (unlikely(f.file->f_mode & FMODE_PATH)) { if (!check_fcntl_cmd(cmd)) diff -NurpP --minimal linux-3.10.19/fs/file.c linux-3.10.19-vs2.3.6.8/fs/file.c --- linux-3.10.19/fs/file.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/file.c 2013-08-22 20:29:59.000000000 +0000 @@ -22,6 +22,7 @@ #include #include #include +#include int sysctl_nr_open __read_mostly = 1024*1024; int sysctl_nr_open_min = BITS_PER_LONG; @@ -311,6 +312,8 @@ struct files_struct *dup_fd(struct files struct file *f = *old_fds++; if (f) { get_file(f); + /* TODO: sum it first for check and performance */ + vx_openfd_inc(open_files - i); } else { /* * The fd may be claimed in the fd bitmap but not yet @@ -376,9 +379,11 @@ static void close_files(struct files_str filp_close(file, files); cond_resched(); } + vx_openfd_dec(i); } i++; set >>= 1; + cond_resched(); } } } @@ -503,6 +508,7 @@ repeat: else __clear_close_on_exec(fd, fdt); error = fd; + vx_openfd_inc(fd); #if 1 /* Sanity check */ if (rcu_dereference_raw(fdt->fd[fd]) != NULL) { @@ -533,6 +539,7 @@ static void __put_unused_fd(struct files __clear_open_fd(fd, fdt); if (fd < files->next_fd) files->next_fd = fd; + vx_openfd_dec(fd); } void put_unused_fd(unsigned int fd) @@ -812,6 +819,8 @@ static int do_dup2(struct files_struct * if (tofree) filp_close(tofree, files); + else + vx_openfd_inc(fd); /* fd was unused */ return fd; diff -NurpP --minimal linux-3.10.19/fs/file_table.c linux-3.10.19-vs2.3.6.8/fs/file_table.c --- linux-3.10.19/fs/file_table.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/file_table.c 2013-08-22 20:29:59.000000000 +0000 @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include @@ -140,6 +142,8 @@ struct file *get_empty_filp(void) spin_lock_init(&f->f_lock); eventpoll_init_file(f); /* f->f_version: 0 */ + f->f_xid = vx_current_xid(); + vx_files_inc(f); return f; over: @@ -257,6 +261,8 @@ static void __fput(struct file *file) i_readcount_dec(inode); if (file->f_mode & FMODE_WRITE) drop_file_write_access(file); + vx_files_dec(file); + file->f_xid = 0; file->f_path.dentry = NULL; file->f_path.mnt = NULL; file->f_inode = NULL; @@ -345,6 +351,8 @@ void put_filp(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) { security_file_free(file); + vx_files_dec(file); + file->f_xid = 0; file_sb_list_del(file); file_free(file); } diff -NurpP --minimal linux-3.10.19/fs/fs_struct.c linux-3.10.19-vs2.3.6.8/fs/fs_struct.c --- linux-3.10.19/fs/fs_struct.c 2013-05-31 13:45:23.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/fs_struct.c 2013-08-22 20:29:59.000000000 +0000 @@ -4,6 +4,7 @@ #include #include #include +#include #include "internal.h" /* @@ -87,6 +88,7 @@ void free_fs_struct(struct fs_struct *fs { path_put(&fs->root); path_put(&fs->pwd); + atomic_dec(&vs_global_fs); kmem_cache_free(fs_cachep, fs); } @@ -124,6 +126,7 @@ struct fs_struct *copy_fs_struct(struct fs->pwd = old->pwd; path_get(&fs->pwd); spin_unlock(&old->lock); + atomic_inc(&vs_global_fs); } return fs; } diff -NurpP --minimal linux-3.10.19/fs/gfs2/file.c linux-3.10.19-vs2.3.6.8/fs/gfs2/file.c --- linux-3.10.19/fs/gfs2/file.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/gfs2/file.c 2013-08-22 20:29:59.000000000 +0000 @@ -144,6 +144,9 @@ static const u32 fsflags_to_gfs2[32] = { [12] = GFS2_DIF_EXHASH, [14] = GFS2_DIF_INHERIT_JDATA, [17] = GFS2_DIF_TOPDIR, + [27] = GFS2_DIF_IXUNLINK, + [26] = GFS2_DIF_BARRIER, + [29] = GFS2_DIF_COW, }; static const u32 gfs2_to_fsflags[32] = { @@ -154,6 +157,9 @@ static const u32 gfs2_to_fsflags[32] = { [gfs2fl_ExHash] = FS_INDEX_FL, [gfs2fl_TopLevel] = FS_TOPDIR_FL, [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL, + [gfs2fl_IXUnlink] = FS_IXUNLINK_FL, + [gfs2fl_Barrier] = FS_BARRIER_FL, + [gfs2fl_Cow] = FS_COW_FL, }; static int gfs2_get_flags(struct file *filp, u32 __user *ptr) @@ -184,12 +190,18 @@ void gfs2_set_inode_flags(struct inode * { struct gfs2_inode *ip = GFS2_I(inode); unsigned int flags = inode->i_flags; + unsigned int vflags = inode->i_vflags; + + flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC | S_NOSEC); - flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC); if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode)) inode->i_flags |= S_NOSEC; if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) flags |= S_IMMUTABLE; + if (ip->i_diskflags & GFS2_DIF_IXUNLINK) + flags |= S_IXUNLINK; + if (ip->i_diskflags & GFS2_DIF_APPENDONLY) flags |= S_APPEND; if (ip->i_diskflags & GFS2_DIF_NOATIME) @@ -197,6 +209,43 @@ void gfs2_set_inode_flags(struct inode * if (ip->i_diskflags & GFS2_DIF_SYNC) flags |= S_SYNC; inode->i_flags = flags; + + vflags &= ~(V_BARRIER | V_COW); + + if (ip->i_diskflags & GFS2_DIF_BARRIER) + vflags |= V_BARRIER; + if (ip->i_diskflags & GFS2_DIF_COW) + vflags |= V_COW; + inode->i_vflags = vflags; +} + +void gfs2_get_inode_flags(struct inode *inode) +{ + struct gfs2_inode *ip = GFS2_I(inode); + unsigned int flags = inode->i_flags; + unsigned int vflags = inode->i_vflags; + + ip->i_diskflags &= ~(GFS2_DIF_APPENDONLY | + GFS2_DIF_NOATIME | GFS2_DIF_SYNC | + GFS2_DIF_IMMUTABLE | GFS2_DIF_IXUNLINK | + GFS2_DIF_BARRIER | GFS2_DIF_COW); + + if (flags & S_IMMUTABLE) + ip->i_diskflags |= GFS2_DIF_IMMUTABLE; + if (flags & S_IXUNLINK) + ip->i_diskflags |= GFS2_DIF_IXUNLINK; + + if (flags & S_APPEND) + ip->i_diskflags |= GFS2_DIF_APPENDONLY; + if (flags & S_NOATIME) + ip->i_diskflags |= GFS2_DIF_NOATIME; + if (flags & S_SYNC) + ip->i_diskflags |= GFS2_DIF_SYNC; + + if (vflags & V_BARRIER) + ip->i_diskflags |= GFS2_DIF_BARRIER; + if (vflags & V_COW) + ip->i_diskflags |= GFS2_DIF_COW; } /* Flags that can be set by user space */ @@ -310,6 +359,37 @@ static int gfs2_set_flags(struct file *f return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA); } +int gfs2_sync_flags(struct inode *inode, int flags, int vflags) +{ + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_sbd *sdp = GFS2_SB(inode); + struct buffer_head *bh; + struct gfs2_holder gh; + int error; + + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); + if (error) + return error; + error = gfs2_trans_begin(sdp, RES_DINODE, 0); + if (error) + goto out; + error = gfs2_meta_inode_buffer(ip, &bh); + if (error) + goto out_trans_end; + gfs2_trans_add_meta(ip->i_gl, bh); + inode->i_flags = flags; + inode->i_vflags = vflags; + gfs2_get_inode_flags(inode); + gfs2_dinode_out(ip, bh->b_data); + brelse(bh); + gfs2_set_aops(inode); +out_trans_end: + gfs2_trans_end(sdp); +out: + gfs2_glock_dq_uninit(&gh); + return error; +} + static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch(cmd) { diff -NurpP --minimal linux-3.10.19/fs/gfs2/inode.h linux-3.10.19-vs2.3.6.8/fs/gfs2/inode.h --- linux-3.10.19/fs/gfs2/inode.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/gfs2/inode.h 2013-08-22 20:29:59.000000000 +0000 @@ -117,6 +117,7 @@ extern const struct file_operations gfs2 extern const struct file_operations gfs2_dir_fops_nolock; extern void gfs2_set_inode_flags(struct inode *inode); +extern int gfs2_sync_flags(struct inode *inode, int flags, int vflags); #ifdef CONFIG_GFS2_FS_LOCKING_DLM extern const struct file_operations gfs2_file_fops; diff -NurpP --minimal linux-3.10.19/fs/hostfs/hostfs.h linux-3.10.19-vs2.3.6.8/fs/hostfs/hostfs.h --- linux-3.10.19/fs/hostfs/hostfs.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/hostfs/hostfs.h 2013-08-22 20:29:59.000000000 +0000 @@ -42,6 +42,7 @@ struct hostfs_iattr { unsigned short ia_mode; uid_t ia_uid; gid_t ia_gid; + vtag_t ia_tag; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; diff -NurpP --minimal linux-3.10.19/fs/inode.c linux-3.10.19-vs2.3.6.8/fs/inode.c --- linux-3.10.19/fs/inode.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/inode.c 2013-08-22 20:29:59.000000000 +0000 @@ -17,6 +17,7 @@ #include #include /* for inode_has_buffers */ #include +#include #include "internal.h" /* @@ -128,6 +129,8 @@ int inode_init_always(struct super_block struct address_space *const mapping = &inode->i_data; inode->i_sb = sb; + + /* essential because of inode slab reuse */ inode->i_blkbits = sb->s_blocksize_bits; inode->i_flags = 0; atomic_set(&inode->i_count, 1); @@ -137,6 +140,7 @@ int inode_init_always(struct super_block inode->i_opflags = 0; i_uid_write(inode, 0); i_gid_write(inode, 0); + i_tag_write(inode, 0); atomic_set(&inode->i_writecount, 0); inode->i_size = 0; inode->i_blocks = 0; @@ -149,6 +153,7 @@ int inode_init_always(struct super_block inode->i_bdev = NULL; inode->i_cdev = NULL; inode->i_rdev = 0; + inode->i_mdev = 0; inode->dirtied_when = 0; if (security_inode_alloc(inode)) @@ -483,6 +488,8 @@ void __insert_inode_hash(struct inode *i } EXPORT_SYMBOL(__insert_inode_hash); +EXPORT_SYMBOL_GPL(__iget); + /** * __remove_inode_hash - remove an inode from the hash * @inode: inode to unhash @@ -1799,9 +1806,11 @@ void init_special_inode(struct inode *in if (S_ISCHR(mode)) { inode->i_fop = &def_chr_fops; inode->i_rdev = rdev; + inode->i_mdev = rdev; } else if (S_ISBLK(mode)) { inode->i_fop = &def_blk_fops; inode->i_rdev = rdev; + inode->i_mdev = rdev; } else if (S_ISFIFO(mode)) inode->i_fop = &pipefifo_fops; else if (S_ISSOCK(mode)) @@ -1830,6 +1839,7 @@ void inode_init_owner(struct inode *inod } else inode->i_gid = current_fsgid(); inode->i_mode = mode; + i_tag_write(inode, dx_current_fstag(inode->i_sb)); } EXPORT_SYMBOL(inode_init_owner); diff -NurpP --minimal linux-3.10.19/fs/ioctl.c linux-3.10.19-vs2.3.6.8/fs/ioctl.c --- linux-3.10.19/fs/ioctl.c 2013-05-31 13:45:24.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ioctl.c 2013-08-22 20:29:59.000000000 +0000 @@ -15,6 +15,9 @@ #include #include #include +#include +#include +#include #include diff -NurpP --minimal linux-3.10.19/fs/ioprio.c linux-3.10.19-vs2.3.6.8/fs/ioprio.c --- linux-3.10.19/fs/ioprio.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ioprio.c 2013-08-22 20:29:59.000000000 +0000 @@ -28,6 +28,7 @@ #include #include #include +#include int set_task_ioprio(struct task_struct *task, int ioprio) { @@ -105,6 +106,8 @@ SYSCALL_DEFINE3(ioprio_set, int, which, else pgrp = find_vpid(who); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT)) + continue; ret = set_task_ioprio(p, ioprio); if (ret) break; @@ -198,6 +201,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, else pgrp = find_vpid(who); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT)) + continue; tmpio = get_task_ioprio(p); if (tmpio < 0) continue; diff -NurpP --minimal linux-3.10.19/fs/jfs/file.c linux-3.10.19-vs2.3.6.8/fs/jfs/file.c --- linux-3.10.19/fs/jfs/file.c 2013-02-19 13:58:48.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/jfs/file.c 2013-08-22 20:29:59.000000000 +0000 @@ -109,7 +109,8 @@ int jfs_setattr(struct dentry *dentry, s if (is_quota_modification(inode, iattr)) dquot_initialize(inode); if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) || - (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) { + (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)) || + (iattr->ia_valid & ATTR_TAG && !tag_eq(iattr->ia_tag, inode->i_tag))) { rc = dquot_transfer(inode, iattr); if (rc) return rc; @@ -144,6 +145,7 @@ const struct inode_operations jfs_file_i #ifdef CONFIG_JFS_POSIX_ACL .get_acl = jfs_get_acl, #endif + .sync_flags = jfs_sync_flags, }; const struct file_operations jfs_file_operations = { diff -NurpP --minimal linux-3.10.19/fs/jfs/ioctl.c linux-3.10.19-vs2.3.6.8/fs/jfs/ioctl.c --- linux-3.10.19/fs/jfs/ioctl.c 2013-05-31 13:45:24.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/jfs/ioctl.c 2013-08-22 20:29:59.000000000 +0000 @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -56,6 +57,16 @@ static long jfs_map_ext2(unsigned long f } +int jfs_sync_flags(struct inode *inode, int flags, int vflags) +{ + inode->i_flags = flags; + inode->i_vflags = vflags; + jfs_get_inode_flags(JFS_IP(inode)); + inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(inode); + return 0; +} + long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); @@ -89,6 +100,11 @@ long jfs_ioctl(struct file *filp, unsign if (!S_ISDIR(inode->i_mode)) flags &= ~JFS_DIRSYNC_FL; + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -EACCES; + } + /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { err = -EPERM; @@ -106,8 +122,8 @@ long jfs_ioctl(struct file *filp, unsign * the relevant capability. */ if ((oldflags & JFS_IMMUTABLE_FL) || - ((flags ^ oldflags) & - (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) { + ((flags ^ oldflags) & (JFS_APPEND_FL | + JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL))) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); err = -EPERM; @@ -115,7 +131,7 @@ long jfs_ioctl(struct file *filp, unsign } } - flags = flags & JFS_FL_USER_MODIFIABLE; + flags &= JFS_FL_USER_MODIFIABLE; flags |= oldflags & ~JFS_FL_USER_MODIFIABLE; jfs_inode->mode2 = flags; diff -NurpP --minimal linux-3.10.19/fs/jfs/jfs_dinode.h linux-3.10.19-vs2.3.6.8/fs/jfs/jfs_dinode.h --- linux-3.10.19/fs/jfs/jfs_dinode.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/jfs/jfs_dinode.h 2013-08-22 20:29:59.000000000 +0000 @@ -161,9 +161,13 @@ struct dinode { #define JFS_APPEND_FL 0x01000000 /* writes to file may only append */ #define JFS_IMMUTABLE_FL 0x02000000 /* Immutable file */ +#define JFS_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */ -#define JFS_FL_USER_VISIBLE 0x03F80000 -#define JFS_FL_USER_MODIFIABLE 0x03F80000 +#define JFS_BARRIER_FL 0x04000000 /* Barrier for chroot() */ +#define JFS_COW_FL 0x20000000 /* Copy on Write marker */ + +#define JFS_FL_USER_VISIBLE 0x07F80000 +#define JFS_FL_USER_MODIFIABLE 0x07F80000 #define JFS_FL_INHERIT 0x03C80000 /* These are identical to EXT[23]_IOC_GETFLAGS/SETFLAGS */ diff -NurpP --minimal linux-3.10.19/fs/jfs/jfs_filsys.h linux-3.10.19-vs2.3.6.8/fs/jfs/jfs_filsys.h --- linux-3.10.19/fs/jfs/jfs_filsys.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/jfs/jfs_filsys.h 2013-08-22 20:29:59.000000000 +0000 @@ -266,6 +266,7 @@ #define JFS_NAME_MAX 255 #define JFS_PATH_MAX BPSIZE +#define JFS_TAGGED 0x00800000 /* Context Tagging */ /* * file system state (superblock state) diff -NurpP --minimal linux-3.10.19/fs/jfs/jfs_imap.c linux-3.10.19-vs2.3.6.8/fs/jfs/jfs_imap.c --- linux-3.10.19/fs/jfs/jfs_imap.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/jfs/jfs_imap.c 2013-08-22 20:29:59.000000000 +0000 @@ -46,6 +46,7 @@ #include #include #include +#include #include "jfs_incore.h" #include "jfs_inode.h" @@ -3058,6 +3059,8 @@ static int copy_from_dinode(struct dinod { struct jfs_inode_info *jfs_ip = JFS_IP(ip); struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); + kuid_t kuid; + kgid_t kgid; jfs_ip->fileset = le32_to_cpu(dip->di_fileset); jfs_ip->mode2 = le32_to_cpu(dip->di_mode); @@ -3078,14 +3081,18 @@ static int copy_from_dinode(struct dinod } set_nlink(ip, le32_to_cpu(dip->di_nlink)); - jfs_ip->saved_uid = make_kuid(&init_user_ns, le32_to_cpu(dip->di_uid)); + kuid = make_kuid(&init_user_ns, le32_to_cpu(dip->di_uid)); + kgid = make_kgid(&init_user_ns, le32_to_cpu(dip->di_gid)); + ip->i_tag = INOTAG_KTAG(DX_TAG(ip), kuid, kgid, GLOBAL_ROOT_TAG); + + jfs_ip->saved_uid = INOTAG_KUID(DX_TAG(ip), kuid, kgid); if (!uid_valid(sbi->uid)) ip->i_uid = jfs_ip->saved_uid; else { ip->i_uid = sbi->uid; } - jfs_ip->saved_gid = make_kgid(&init_user_ns, le32_to_cpu(dip->di_gid)); + jfs_ip->saved_gid = INOTAG_KGID(DX_TAG(ip), kuid, kgid); if (!gid_valid(sbi->gid)) ip->i_gid = jfs_ip->saved_gid; else { @@ -3150,16 +3157,14 @@ static void copy_to_dinode(struct dinode dip->di_size = cpu_to_le64(ip->i_size); dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks)); dip->di_nlink = cpu_to_le32(ip->i_nlink); - if (!uid_valid(sbi->uid)) - dip->di_uid = cpu_to_le32(i_uid_read(ip)); - else - dip->di_uid =cpu_to_le32(from_kuid(&init_user_ns, - jfs_ip->saved_uid)); - if (!gid_valid(sbi->gid)) - dip->di_gid = cpu_to_le32(i_gid_read(ip)); - else - dip->di_gid = cpu_to_le32(from_kgid(&init_user_ns, - jfs_ip->saved_gid)); + dip->di_uid = cpu_to_le32(from_kuid(&init_user_ns, + TAGINO_KUID(DX_TAG(ip), + !uid_valid(sbi->uid) ? ip->i_uid : jfs_ip->saved_uid, + ip->i_tag))); + dip->di_gid = cpu_to_le32(from_kgid(&init_user_ns, + TAGINO_KGID(DX_TAG(ip), + !gid_valid(sbi->gid) ? ip->i_gid : jfs_ip->saved_gid, + ip->i_tag))); jfs_get_inode_flags(jfs_ip); /* * mode2 is only needed for storing the higher order bits. diff -NurpP --minimal linux-3.10.19/fs/jfs/jfs_inode.c linux-3.10.19-vs2.3.6.8/fs/jfs/jfs_inode.c --- linux-3.10.19/fs/jfs/jfs_inode.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/jfs/jfs_inode.c 2013-11-13 17:22:25.000000000 +0000 @@ -18,6 +18,7 @@ #include #include +#include #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_filsys.h" @@ -30,29 +31,46 @@ void jfs_set_inode_flags(struct inode *i { unsigned int flags = JFS_IP(inode)->mode2; - inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | - S_NOATIME | S_DIRSYNC | S_SYNC); + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); if (flags & JFS_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; + if (flags & JFS_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; + + if (flags & JFS_SYNC_FL) + inode->i_flags |= S_SYNC; if (flags & JFS_APPEND_FL) inode->i_flags |= S_APPEND; if (flags & JFS_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & JFS_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; - if (flags & JFS_SYNC_FL) - inode->i_flags |= S_SYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (flags & JFS_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + if (flags & JFS_COW_FL) + inode->i_vflags |= V_COW; } void jfs_get_inode_flags(struct jfs_inode_info *jfs_ip) { unsigned int flags = jfs_ip->vfs_inode.i_flags; + unsigned int vflags = jfs_ip->vfs_inode.i_vflags; + + jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL | + JFS_APPEND_FL | JFS_NOATIME_FL | + JFS_DIRSYNC_FL | JFS_SYNC_FL | + JFS_BARRIER_FL | JFS_COW_FL); - jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_APPEND_FL | JFS_NOATIME_FL | - JFS_DIRSYNC_FL | JFS_SYNC_FL); if (flags & S_IMMUTABLE) jfs_ip->mode2 |= JFS_IMMUTABLE_FL; + if (flags & S_IXUNLINK) + jfs_ip->mode2 |= JFS_IXUNLINK_FL; + if (flags & S_APPEND) jfs_ip->mode2 |= JFS_APPEND_FL; if (flags & S_NOATIME) @@ -61,6 +79,11 @@ void jfs_get_inode_flags(struct jfs_inod jfs_ip->mode2 |= JFS_DIRSYNC_FL; if (flags & S_SYNC) jfs_ip->mode2 |= JFS_SYNC_FL; + + if (vflags & V_BARRIER) + jfs_ip->mode2 |= JFS_BARRIER_FL; + if (vflags & V_COW) + jfs_ip->mode2 |= JFS_COW_FL; } /* diff -NurpP --minimal linux-3.10.19/fs/jfs/jfs_inode.h linux-3.10.19-vs2.3.6.8/fs/jfs/jfs_inode.h --- linux-3.10.19/fs/jfs/jfs_inode.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/jfs/jfs_inode.h 2013-08-22 20:29:59.000000000 +0000 @@ -39,6 +39,7 @@ extern struct dentry *jfs_fh_to_dentry(s extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type); extern void jfs_set_inode_flags(struct inode *); +extern int jfs_sync_flags(struct inode *, int, int); extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); extern int jfs_setattr(struct dentry *, struct iattr *); diff -NurpP --minimal linux-3.10.19/fs/jfs/namei.c linux-3.10.19-vs2.3.6.8/fs/jfs/namei.c --- linux-3.10.19/fs/jfs/namei.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/jfs/namei.c 2013-08-22 20:29:59.000000000 +0000 @@ -22,6 +22,7 @@ #include #include #include +#include #include "jfs_incore.h" #include "jfs_superblock.h" #include "jfs_inode.h" @@ -1461,6 +1462,7 @@ static struct dentry *jfs_lookup(struct jfs_err("jfs_lookup: iget failed on inum %d", (uint)inum); } + dx_propagate_tag(nd, ip); return d_splice_alias(ip, dentry); } @@ -1525,6 +1527,7 @@ const struct inode_operations jfs_dir_in #ifdef CONFIG_JFS_POSIX_ACL .get_acl = jfs_get_acl, #endif + .sync_flags = jfs_sync_flags, }; const struct file_operations jfs_dir_operations = { diff -NurpP --minimal linux-3.10.19/fs/jfs/super.c linux-3.10.19-vs2.3.6.8/fs/jfs/super.c --- linux-3.10.19/fs/jfs/super.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/jfs/super.c 2013-08-22 20:29:59.000000000 +0000 @@ -199,7 +199,8 @@ enum { Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask, - Opt_discard, Opt_nodiscard, Opt_discard_minblk + Opt_discard, Opt_nodiscard, Opt_discard_minblk, + Opt_tag, Opt_notag, Opt_tagid }; static const match_table_t tokens = { @@ -209,6 +210,10 @@ static const match_table_t tokens = { {Opt_resize, "resize=%u"}, {Opt_resize_nosize, "resize"}, {Opt_errors, "errors=%s"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, + {Opt_tag, "tagxid"}, {Opt_ignore, "noquota"}, {Opt_ignore, "quota"}, {Opt_usrquota, "usrquota"}, @@ -385,7 +390,20 @@ static int parse_options(char *options, } break; } - +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + *flag |= JFS_TAGGED; + break; + case Opt_notag: + *flag &= JFS_TAGGED; + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + *flag |= JFS_TAGGED; + break; +#endif default: printk("jfs: Unrecognized mount option \"%s\" " " or missing value\n", p); @@ -417,6 +435,12 @@ static int jfs_remount(struct super_bloc return -EINVAL; } + if ((flag & JFS_TAGGED) && !(sb->s_flags & MS_TAGGED)) { + printk(KERN_ERR "JFS: %s: tagging not permitted on remount.\n", + sb->s_id); + return -EINVAL; + } + if (newLVSize) { if (sb->s_flags & MS_RDONLY) { pr_err("JFS: resize requires volume" \ @@ -502,6 +526,9 @@ static int jfs_fill_super(struct super_b #ifdef CONFIG_JFS_POSIX_ACL sb->s_flags |= MS_POSIXACL; #endif + /* map mount option tagxid */ + if (sbi->flag & JFS_TAGGED) + sb->s_flags |= MS_TAGGED; if (newLVSize) { pr_err("resize option for remount only\n"); diff -NurpP --minimal linux-3.10.19/fs/libfs.c linux-3.10.19-vs2.3.6.8/fs/libfs.c --- linux-3.10.19/fs/libfs.c 2013-02-19 13:58:48.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/libfs.c 2013-08-22 20:29:59.000000000 +0000 @@ -135,7 +135,8 @@ static inline unsigned char dt_type(stru * both impossible due to the lock on directory. */ -int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) +static inline int do_dcache_readdir_filter(struct file *filp, + void *dirent, filldir_t filldir, int (*filter)(struct dentry *dentry)) { struct dentry *dentry = filp->f_path.dentry; struct dentry *cursor = filp->private_data; @@ -166,6 +167,8 @@ int dcache_readdir(struct file * filp, v for (p=q->next; p != &dentry->d_subdirs; p=p->next) { struct dentry *next; next = list_entry(p, struct dentry, d_u.d_child); + if (filter && !filter(next)) + continue; spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); if (!simple_positive(next)) { spin_unlock(&next->d_lock); @@ -192,6 +195,17 @@ int dcache_readdir(struct file * filp, v return 0; } +int dcache_readdir(struct file *filp, void *dirent, filldir_t filldir) +{ + return do_dcache_readdir_filter(filp, dirent, filldir, NULL); +} + +int dcache_readdir_filter(struct file *filp, void *dirent, filldir_t filldir, + int (*filter)(struct dentry *)) +{ + return do_dcache_readdir_filter(filp, dirent, filldir, filter); +} + ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos) { return -EISDIR; @@ -981,6 +995,7 @@ EXPORT_SYMBOL(dcache_dir_close); EXPORT_SYMBOL(dcache_dir_lseek); EXPORT_SYMBOL(dcache_dir_open); EXPORT_SYMBOL(dcache_readdir); +EXPORT_SYMBOL(dcache_readdir_filter); EXPORT_SYMBOL(generic_read_dir); EXPORT_SYMBOL(mount_pseudo); EXPORT_SYMBOL(simple_write_begin); diff -NurpP --minimal linux-3.10.19/fs/locks.c linux-3.10.19-vs2.3.6.8/fs/locks.c --- linux-3.10.19/fs/locks.c 2013-05-31 13:45:24.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/locks.c 2013-08-22 20:29:59.000000000 +0000 @@ -126,6 +126,8 @@ #include #include #include +#include +#include #include @@ -184,11 +186,17 @@ static void locks_init_lock_heads(struct /* Allocate an empty lock structure. */ struct file_lock *locks_alloc_lock(void) { - struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); + struct file_lock *fl; - if (fl) - locks_init_lock_heads(fl); + if (!vx_locks_avail(1)) + return NULL; + fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); + + if (fl) { + locks_init_lock_heads(fl); + fl->fl_xid = -1; + } return fl; } EXPORT_SYMBOL_GPL(locks_alloc_lock); @@ -212,6 +220,7 @@ void locks_free_lock(struct file_lock *f BUG_ON(!list_empty(&fl->fl_block)); BUG_ON(!list_empty(&fl->fl_link)); + vx_locks_dec(fl); locks_release_private(fl); kmem_cache_free(filelock_cache, fl); } @@ -221,6 +230,7 @@ void locks_init_lock(struct file_lock *f { memset(fl, 0, sizeof(struct file_lock)); locks_init_lock_heads(fl); + fl->fl_xid = -1; } EXPORT_SYMBOL(locks_init_lock); @@ -261,6 +271,7 @@ void locks_copy_lock(struct file_lock *n new->fl_file = fl->fl_file; new->fl_ops = fl->fl_ops; new->fl_lmops = fl->fl_lmops; + new->fl_xid = fl->fl_xid; locks_copy_private(new, fl); } @@ -299,6 +310,11 @@ static int flock_make_lock(struct file * fl->fl_flags = FL_FLOCK; fl->fl_type = type; fl->fl_end = OFFSET_MAX; + + vxd_assert(filp->f_xid == vx_current_xid(), + "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid()); + fl->fl_xid = filp->f_xid; + vx_locks_inc(fl); *lock = fl; return 0; @@ -438,6 +454,7 @@ static int lease_init(struct file *filp, fl->fl_owner = current->files; fl->fl_pid = current->tgid; + fl->fl_xid = vx_current_xid(); fl->fl_file = filp; fl->fl_flags = FL_LEASE; @@ -457,6 +474,11 @@ static struct file_lock *lease_alloc(str if (fl == NULL) return ERR_PTR(error); + fl->fl_xid = vx_current_xid(); + if (filp) + vxd_assert(filp->f_xid == fl->fl_xid, + "f_xid(%d) == fl_xid(%d)", filp->f_xid, fl->fl_xid); + vx_locks_inc(fl); error = lease_init(filp, type, fl); if (error) { locks_free_lock(fl); @@ -753,6 +775,7 @@ static int flock_lock_file(struct file * lock_flocks(); } + new_fl->fl_xid = -1; find_conflict: for_each_lock(inode, before) { struct file_lock *fl = *before; @@ -773,6 +796,7 @@ find_conflict: goto out; locks_copy_lock(new_fl, request); locks_insert_lock(before, new_fl); + vx_locks_inc(new_fl); new_fl = NULL; error = 0; @@ -783,7 +807,8 @@ out: return error; } -static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) +static int __posix_lock_file(struct inode *inode, struct file_lock *request, + struct file_lock *conflock, vxid_t xid) { struct file_lock *fl; struct file_lock *new_fl = NULL; @@ -793,6 +818,8 @@ static int __posix_lock_file(struct inod struct file_lock **before; int error, added = 0; + vxd_assert(xid == vx_current_xid(), + "xid(%d) == current(%d)", xid, vx_current_xid()); /* * We may need two file_lock structures for this operation, * so we get them in advance to avoid races. @@ -803,7 +830,11 @@ static int __posix_lock_file(struct inod (request->fl_type != F_UNLCK || request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { new_fl = locks_alloc_lock(); + new_fl->fl_xid = xid; + vx_locks_inc(new_fl); new_fl2 = locks_alloc_lock(); + new_fl2->fl_xid = xid; + vx_locks_inc(new_fl2); } lock_flocks(); @@ -1002,7 +1033,8 @@ static int __posix_lock_file(struct inod int posix_lock_file(struct file *filp, struct file_lock *fl, struct file_lock *conflock) { - return __posix_lock_file(file_inode(filp), fl, conflock); + return __posix_lock_file(file_inode(filp), + fl, conflock, filp->f_xid); } EXPORT_SYMBOL(posix_lock_file); @@ -1092,7 +1124,7 @@ int locks_mandatory_area(int read_write, fl.fl_end = offset + count - 1; for (;;) { - error = __posix_lock_file(inode, &fl, NULL); + error = __posix_lock_file(inode, &fl, NULL, filp->f_xid); if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); @@ -1397,6 +1429,7 @@ int generic_add_lease(struct file *filp, goto out; locks_insert_lock(before, lease); + vx_locks_inc(lease); return 0; out: @@ -1836,6 +1869,11 @@ int fcntl_setlk(unsigned int fd, struct if (file_lock == NULL) return -ENOLCK; + vxd_assert(filp->f_xid == vx_current_xid(), + "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid()); + file_lock->fl_xid = filp->f_xid; + vx_locks_inc(file_lock); + /* * This might block, so we do it before checking the inode. */ @@ -1954,6 +1992,11 @@ int fcntl_setlk64(unsigned int fd, struc if (file_lock == NULL) return -ENOLCK; + vxd_assert(filp->f_xid == vx_current_xid(), + "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid()); + file_lock->fl_xid = filp->f_xid; + vx_locks_inc(file_lock); + /* * This might block, so we do it before checking the inode. */ @@ -2219,8 +2262,11 @@ static int locks_show(struct seq_file *f lock_get_status(f, fl, *((loff_t *)f->private), ""); - list_for_each_entry(bfl, &fl->fl_block, fl_block) + list_for_each_entry(bfl, &fl->fl_block, fl_block) { + if (!vx_check(fl->fl_xid, VS_WATCH_P | VS_IDENT)) + continue; lock_get_status(f, bfl, *((loff_t *)f->private), " ->"); + } return 0; } diff -NurpP --minimal linux-3.10.19/fs/mount.h linux-3.10.19-vs2.3.6.8/fs/mount.h --- linux-3.10.19/fs/mount.h 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/mount.h 2013-08-22 20:29:59.000000000 +0000 @@ -56,6 +56,7 @@ struct mount { int mnt_expiry_mark; /* true if marked for expiry */ int mnt_pinned; int mnt_ghosts; + vtag_t mnt_tag; /* tagging used for vfsmount */ }; #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ diff -NurpP --minimal linux-3.10.19/fs/namei.c linux-3.10.19-vs2.3.6.8/fs/namei.c --- linux-3.10.19/fs/namei.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/namei.c 2013-08-22 23:36:09.000000000 +0000 @@ -34,9 +34,19 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include "internal.h" +#include "proc/internal.h" #include "mount.h" /* [Feb-1997 T. Schoebel-Theuer] @@ -266,6 +276,89 @@ static int check_acl(struct inode *inode return -EAGAIN; } +static inline int dx_barrier(const struct inode *inode) +{ + if (IS_BARRIER(inode) && !vx_check(0, VS_ADMIN | VS_WATCH)) { + vxwprintk_task(1, "did hit the barrier."); + return 1; + } + return 0; +} + +static int __dx_permission(const struct inode *inode, int mask) +{ + if (dx_barrier(inode)) + return -EACCES; + + if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) { + /* devpts is xid tagged */ + if (S_ISDIR(inode->i_mode) || + vx_check((vxid_t)i_tag_read(inode), VS_IDENT | VS_WATCH_P)) + return 0; + + /* just pretend we didn't find anything */ + return -ENOENT; + } + else if (inode->i_sb->s_magic == PROC_SUPER_MAGIC) { + struct proc_dir_entry *de = PDE(inode); + + if (de && !vx_hide_check(0, de->vx_flags)) + goto out; + + if ((mask & (MAY_WRITE | MAY_APPEND))) { + struct pid *pid; + struct task_struct *tsk; + + if (vx_check(0, VS_ADMIN | VS_WATCH_P) || + vx_flags(VXF_STATE_SETUP, 0)) + return 0; + + pid = PROC_I(inode)->pid; + if (!pid) + goto out; + + rcu_read_lock(); + tsk = pid_task(pid, PIDTYPE_PID); + vxdprintk(VXD_CBIT(tag, 0), "accessing %p[#%u]", + tsk, (tsk ? vx_task_xid(tsk) : 0)); + if (tsk && + vx_check(vx_task_xid(tsk), VS_IDENT | VS_WATCH_P)) { + rcu_read_unlock(); + return 0; + } + rcu_read_unlock(); + } + else { + /* FIXME: Should we block some entries here? */ + return 0; + } + } + else { + if (dx_notagcheck(inode->i_sb) || + dx_check((vxid_t)i_tag_read(inode), + DX_HOSTID | DX_ADMIN | DX_WATCH | DX_IDENT)) + return 0; + } + +out: + return -EACCES; +} + +int dx_permission(const struct inode *inode, int mask) +{ + int ret = __dx_permission(inode, mask); + if (unlikely(ret)) { +#ifndef CONFIG_VSERVER_WARN_DEVPTS + if (inode->i_sb->s_magic != DEVPTS_SUPER_MAGIC) +#endif + vxwprintk_task(1, + "denied [0x%x] access to inode %s:%p[#%d,%lu]", + mask, inode->i_sb->s_id, inode, + i_tag_read(inode), inode->i_ino); + } + return ret; +} + /* * This does the basic permission checking */ @@ -388,10 +481,14 @@ int __inode_permission(struct inode *ino /* * Nobody gets write access to an immutable file. */ - if (IS_IMMUTABLE(inode)) + if (IS_IMMUTABLE(inode) && !IS_COW(inode)) return -EACCES; } + retval = dx_permission(inode, mask); + if (retval) + return retval; + retval = do_inode_permission(inode, mask); if (retval) return retval; @@ -1238,7 +1335,8 @@ static void follow_dotdot(struct nameida if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { - break; + /* for sane '/' avoid follow_mount() */ + return; } if (nd->path.dentry != nd->path.mnt->mnt_root) { /* rare case of legitimate dget_parent()... */ @@ -1383,6 +1481,9 @@ static int lookup_fast(struct nameidata goto unlazy; } } + + /* FIXME: check dx permission */ + path->mnt = mnt; path->dentry = dentry; if (unlikely(!__follow_mount_rcu(nd, path, inode))) @@ -1413,6 +1514,8 @@ unlazy: } } + /* FIXME: check dx permission */ + path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd->flags); @@ -2237,7 +2340,7 @@ static int may_delete(struct inode *dir, if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| - IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) + IS_IXORUNLINK(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) return -EPERM; if (isdir) { if (!S_ISDIR(victim->d_inode->i_mode)) @@ -2316,19 +2419,25 @@ int vfs_create(struct inode *dir, struct bool want_excl) { int error = may_create(dir, dentry); - if (error) + if (error) { + vxdprintk(VXD_CBIT(misc, 3), "may_create failed with %d", error); return error; + } if (!dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); - if (error) + if (error) { + vxdprintk(VXD_CBIT(misc, 3), "security_inode_create failed with %d", error); return error; + } error = dir->i_op->create(dir, dentry, mode, want_excl); if (!error) fsnotify_create(dir, dentry); + else + vxdprintk(VXD_CBIT(misc, 3), "i_op->create failed with %d", error); return error; } @@ -2363,6 +2472,15 @@ static int may_open(struct path *path, i break; } +#ifdef CONFIG_VSERVER_COWBL + if (IS_COW(inode) && + ((flag & O_ACCMODE) != O_RDONLY)) { + if (IS_COW_LINK(inode)) + return -EMLINK; + inode->i_flags &= ~(S_IXUNLINK|S_IMMUTABLE); + mark_inode_dirty(inode); + } +#endif error = inode_permission(inode, acc_mode); if (error) return error; @@ -2865,6 +2983,16 @@ finish_open: } finish_open_created: error = may_open(&nd->path, acc_mode, open_flag); +#ifdef CONFIG_VSERVER_COWBL + if (error == -EMLINK) { + struct dentry *dentry; + dentry = cow_break_link(name->name); + if (IS_ERR(dentry)) + error = PTR_ERR(dentry); + else + dput(dentry); + } +#endif if (error) goto out; file->f_path.mnt = nd->path.mnt; @@ -2929,6 +3057,7 @@ static struct file *path_openat(int dfd, int opened = 0; int error; +restart: file = get_empty_filp(); if (IS_ERR(file)) return file; @@ -2965,6 +3094,16 @@ static struct file *path_openat(int dfd, error = do_last(nd, &path, file, op, &opened, pathname); put_link(nd, &link, cookie); } + +#ifdef CONFIG_VSERVER_COWBL + if (error == -EMLINK) { + if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) + path_put(&nd->root); + if (base) + fput(base); + goto restart; + } +#endif out: if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) path_put(&nd->root); @@ -3079,6 +3218,11 @@ struct dentry *kern_path_create(int dfd, goto fail; } *path = nd.path; + vxdprintk(VXD_CBIT(misc, 3), "kern_path_create path.dentry = %p (%.*s), dentry = %p (%.*s), d_inode = %p", + path->dentry, path->dentry->d_name.len, + path->dentry->d_name.name, dentry, + dentry->d_name.len, dentry->d_name.name, + path->dentry->d_inode); return dentry; fail: dput(dentry); @@ -3573,7 +3717,7 @@ int vfs_link(struct dentry *old_dentry, /* * A link to an append-only or immutable file cannot be created. */ - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + if (IS_APPEND(inode) || IS_IXORUNLINK(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; @@ -3976,6 +4120,287 @@ int vfs_follow_link(struct nameidata *nd return __vfs_follow_link(nd, link); } + +#ifdef CONFIG_VSERVER_COWBL + +static inline +long do_cow_splice(struct file *in, struct file *out, size_t len) +{ + loff_t ppos = 0; + loff_t opos = 0; + + return do_splice_direct(in, &ppos, out, &opos, len, 0); +} + +struct dentry *cow_break_link(const char *pathname) +{ + int ret, mode, pathlen, redo = 0, drop = 1; + struct nameidata old_nd, dir_nd; + struct path dir_path, *old_path, *new_path; + struct dentry *dir, *old_dentry, *new_dentry = NULL; + struct file *old_file; + struct file *new_file; + char *to, *path, pad='\251'; + loff_t size; + + vxdprintk(VXD_CBIT(misc, 1), + "cow_break_link(" VS_Q("%s") ")", pathname); + + path = kmalloc(PATH_MAX, GFP_KERNEL); + ret = -ENOMEM; + if (!path) + goto out; + + /* old_nd.path will have refs to dentry and mnt */ + ret = do_path_lookup(AT_FDCWD, pathname, LOOKUP_FOLLOW, &old_nd); + vxdprintk(VXD_CBIT(misc, 2), + "do_path_lookup(old): %d", ret); + if (ret < 0) + goto out_free_path; + + /* dentry/mnt refs handed over to old_path */ + old_path = &old_nd.path; + /* no explicit reference for old_dentry here */ + old_dentry = old_path->dentry; + + mode = old_dentry->d_inode->i_mode; + to = d_path(old_path, path, PATH_MAX-2); + pathlen = strlen(to); + vxdprintk(VXD_CBIT(misc, 2), + "old path " VS_Q("%s") " [%p:" VS_Q("%.*s") ":%d]", to, + old_dentry, + old_dentry->d_name.len, old_dentry->d_name.name, + old_dentry->d_name.len); + + to[pathlen + 1] = 0; +retry: + new_dentry = NULL; + to[pathlen] = pad--; + ret = -ELOOP; + if (pad <= '\240') + goto out_rel_old; + + vxdprintk(VXD_CBIT(misc, 1), "temp copy " VS_Q("%s"), to); + + /* dir_nd.path will have refs to dentry and mnt */ + ret = do_path_lookup(AT_FDCWD, to, + LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE, &dir_nd); + vxdprintk(VXD_CBIT(misc, 2), "do_path_lookup(new): %d", ret); + if (ret < 0) + goto retry; + + /* this puppy downs the dir inode mutex if successful. + dir_path will hold refs to dentry and mnt and + we'll have write access to the mnt */ + new_dentry = kern_path_create(AT_FDCWD, to, &dir_path, 0); + if (!new_dentry || IS_ERR(new_dentry)) { + path_put(&dir_nd.path); + vxdprintk(VXD_CBIT(misc, 2), + "kern_path_create(new) failed with %ld", + PTR_ERR(new_dentry)); + goto retry; + } + vxdprintk(VXD_CBIT(misc, 2), + "kern_path_create(new): %p [" VS_Q("%.*s") ":%d]", + new_dentry, + new_dentry->d_name.len, new_dentry->d_name.name, + new_dentry->d_name.len); + + /* take a reference on new_dentry */ + dget(new_dentry); + + /* dentry/mnt refs handed over to new_path */ + new_path = &dir_path; + + /* dentry for old/new dir */ + dir = dir_nd.path.dentry; + + /* give up reference on dir */ + dput(new_path->dentry); + + /* new_dentry already has a reference */ + new_path->dentry = new_dentry; + + ret = vfs_create(dir->d_inode, new_dentry, mode, 1); + vxdprintk(VXD_CBIT(misc, 2), + "vfs_create(new): %d", ret); + if (ret == -EEXIST) { + path_put(&dir_nd.path); + mutex_unlock(&dir->d_inode->i_mutex); + mnt_drop_write(new_path->mnt); + path_put(new_path); + new_dentry = NULL; + goto retry; + } + else if (ret < 0) + goto out_unlock_new; + + /* drop out early, ret passes ENOENT */ + ret = -ENOENT; + if ((redo = d_unhashed(old_dentry))) + goto out_unlock_new; + + /* doesn't change refs for old_path */ + old_file = dentry_open(old_path, O_RDONLY, current_cred()); + vxdprintk(VXD_CBIT(misc, 2), + "dentry_open(old): %p", old_file); + if (IS_ERR(old_file)) { + ret = PTR_ERR(old_file); + goto out_unlock_new; + } + + /* doesn't change refs for new_path */ + new_file = dentry_open(new_path, O_WRONLY, current_cred()); + vxdprintk(VXD_CBIT(misc, 2), + "dentry_open(new): %p", new_file); + if (IS_ERR(new_file)) { + ret = PTR_ERR(new_file); + goto out_fput_old; + } + + /* unlock the inode mutex from kern_path_create() */ + mutex_unlock(&dir->d_inode->i_mutex); + + /* drop write access to mnt */ + mnt_drop_write(new_path->mnt); + + drop = 0; + + size = i_size_read(old_file->f_dentry->d_inode); + ret = do_cow_splice(old_file, new_file, size); + vxdprintk(VXD_CBIT(misc, 2), "do_splice_direct: %d", ret); + if (ret < 0) { + goto out_fput_both; + } else if (ret < size) { + ret = -ENOSPC; + goto out_fput_both; + } else { + struct inode *old_inode = old_dentry->d_inode; + struct inode *new_inode = new_dentry->d_inode; + struct iattr attr = { + .ia_uid = old_inode->i_uid, + .ia_gid = old_inode->i_gid, + .ia_valid = ATTR_UID | ATTR_GID + }; + + setattr_copy(new_inode, &attr); + mark_inode_dirty(new_inode); + } + + /* lock rename mutex */ + mutex_lock(&old_dentry->d_inode->i_sb->s_vfs_rename_mutex); + + /* drop out late */ + ret = -ENOENT; + if ((redo = d_unhashed(old_dentry))) + goto out_unlock; + + vxdprintk(VXD_CBIT(misc, 2), + "vfs_rename: [" VS_Q("%*s") ":%d] -> [" VS_Q("%*s") ":%d]", + new_dentry->d_name.len, new_dentry->d_name.name, + new_dentry->d_name.len, + old_dentry->d_name.len, old_dentry->d_name.name, + old_dentry->d_name.len); + ret = vfs_rename(dir_nd.path.dentry->d_inode, new_dentry, + old_dentry->d_parent->d_inode, old_dentry); + vxdprintk(VXD_CBIT(misc, 2), "vfs_rename: %d", ret); + +out_unlock: + mutex_unlock(&old_dentry->d_inode->i_sb->s_vfs_rename_mutex); + +out_fput_both: + vxdprintk(VXD_CBIT(misc, 3), + "fput(new_file=%p[#%ld])", new_file, + atomic_long_read(&new_file->f_count)); + fput(new_file); + +out_fput_old: + vxdprintk(VXD_CBIT(misc, 3), + "fput(old_file=%p[#%ld])", old_file, + atomic_long_read(&old_file->f_count)); + fput(old_file); + +out_unlock_new: + /* drop references from dir_nd.path */ + path_put(&dir_nd.path); + + if (drop) { + /* unlock the inode mutex from kern_path_create() */ + mutex_unlock(&dir->d_inode->i_mutex); + + /* drop write access to mnt */ + mnt_drop_write(new_path->mnt); + } + + if (!ret) + goto out_redo; + + /* error path cleanup */ + vfs_unlink(dir->d_inode, new_dentry); + +out_redo: + if (!redo) + goto out_rel_both; + + /* lookup dentry once again + old_nd.path will be freed as old_path in out_rel_old */ + ret = do_path_lookup(AT_FDCWD, pathname, LOOKUP_FOLLOW, &old_nd); + if (ret) + goto out_rel_both; + + /* drop reference on new_dentry */ + dput(new_dentry); + new_dentry = old_path->dentry; + dget(new_dentry); + vxdprintk(VXD_CBIT(misc, 2), + "do_path_lookup(redo): %p [" VS_Q("%.*s") ":%d]", + new_dentry, + new_dentry->d_name.len, new_dentry->d_name.name, + new_dentry->d_name.len); + +out_rel_both: + if (new_path) + path_put(new_path); +out_rel_old: + path_put(old_path); +out_free_path: + kfree(path); +out: + if (ret) { + dput(new_dentry); + new_dentry = ERR_PTR(ret); + } + vxdprintk(VXD_CBIT(misc, 3), + "cow_break_link returning with %p", new_dentry); + return new_dentry; +} + +#endif + +int vx_info_mnt_namespace(struct mnt_namespace *ns, char *buffer) +{ + struct path path; + struct vfsmount *vmnt; + char *pstr, *root; + int length = 0; + + pstr = kmalloc(PATH_MAX, GFP_KERNEL); + if (!pstr) + return 0; + + vmnt = &ns->root->mnt; + path.mnt = vmnt; + path.dentry = vmnt->mnt_root; + root = d_path(&path, pstr, PATH_MAX - 2); + length = sprintf(buffer + length, + "Namespace:\t%p [#%u]\n" + "RootPath:\t%s\n", + ns, atomic_read(&ns->count), + root); + kfree(pstr); + return length; +} + /* get the link contents into pagecache */ static char *page_getlink(struct dentry * dentry, struct page **ppage) { @@ -4099,3 +4524,4 @@ EXPORT_SYMBOL(vfs_symlink); EXPORT_SYMBOL(vfs_unlink); EXPORT_SYMBOL(dentry_unhash); EXPORT_SYMBOL(generic_readlink); +EXPORT_SYMBOL(vx_info_mnt_namespace); diff -NurpP --minimal linux-3.10.19/fs/namespace.c linux-3.10.19-vs2.3.6.8/fs/namespace.c --- linux-3.10.19/fs/namespace.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/namespace.c 2013-11-13 17:17:16.000000000 +0000 @@ -23,6 +23,11 @@ #include #include #include +#include +#include +#include +#include +#include #include "pnode.h" #include "internal.h" @@ -780,6 +785,10 @@ vfs_kern_mount(struct file_system_type * if (!type) return ERR_PTR(-ENODEV); + if ((type->fs_flags & FS_BINARY_MOUNTDATA) && + !vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT)) + return ERR_PTR(-EPERM); + mnt = alloc_vfsmnt(name); if (!mnt) return ERR_PTR(-ENOMEM); @@ -836,6 +845,7 @@ static struct mount *clone_mnt(struct mo mnt->mnt.mnt_root = dget(root); mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; + mnt->mnt_tag = old->mnt_tag; br_write_lock(&vfsmount_lock); list_add_tail(&mnt->mnt_instance, &sb->s_mounts); br_write_unlock(&vfsmount_lock); @@ -1291,7 +1301,8 @@ static int do_umount(struct mount *mnt, */ static inline bool may_mount(void) { - return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); + return vx_ns_capable(current->nsproxy->mnt_ns->user_ns, + CAP_SYS_ADMIN, VXC_SECURE_MOUNT); } /* @@ -1685,6 +1696,7 @@ static int do_change_type(struct path *p if (err) goto out_unlock; } + // mnt->mnt_flags = mnt_flags; br_write_lock(&vfsmount_lock); for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) @@ -1700,12 +1712,14 @@ static int do_change_type(struct path *p * do loopback mount. */ static int do_loopback(struct path *path, const char *old_name, - int recurse) + vtag_t tag, unsigned long flags, int mnt_flags) { struct path old_path; struct mount *mnt = NULL, *old, *parent; struct mountpoint *mp; + int recurse = flags & MS_REC; int err; + if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); @@ -1780,7 +1794,7 @@ static int change_mount_flags(struct vfs * on it - tough luck. */ static int do_remount(struct path *path, int flags, int mnt_flags, - void *data) + void *data, vxid_t xid) { int err; struct super_block *sb = path->mnt->mnt_sb; @@ -2264,6 +2278,7 @@ long do_mount(const char *dev_name, cons struct path path; int retval = 0; int mnt_flags = 0; + vtag_t tag = 0; /* Discard magic */ if ((flags & MS_MGC_MSK) == MS_MGC_VAL) @@ -2293,6 +2308,12 @@ long do_mount(const char *dev_name, cons if (!(flags & MS_NOATIME)) mnt_flags |= MNT_RELATIME; + if (dx_parse_tag(data_page, &tag, 1, &mnt_flags, &flags)) { + /* FIXME: bind and re-mounts get the tag flag? */ + if (flags & (MS_BIND|MS_REMOUNT)) + flags |= MS_TAGID; + } + /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) mnt_flags |= MNT_NOSUID; @@ -2309,15 +2330,17 @@ long do_mount(const char *dev_name, cons if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; + if (!vx_capable(CAP_SYS_ADMIN, VXC_DEV_MOUNT)) + mnt_flags |= MNT_NODEV; flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, - data_page); + data_page, tag); else if (flags & MS_BIND) - retval = do_loopback(&path, dev_name, flags & MS_REC); + retval = do_loopback(&path, dev_name, tag, flags, mnt_flags); else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) retval = do_change_type(&path, flags); else if (flags & MS_MOVE) @@ -2426,6 +2449,7 @@ static struct mnt_namespace *dup_mnt_ns( q = next_mnt(q, new); } namespace_unlock(); + atomic_inc(&vs_global_mnt_ns); if (rootmnt) mntput(rootmnt); @@ -2624,9 +2648,10 @@ SYSCALL_DEFINE2(pivot_root, const char _ new_mnt = real_mount(new.mnt); root_mnt = real_mount(root.mnt); old_mnt = real_mount(old.mnt); - if (IS_MNT_SHARED(old_mnt) || + if ((IS_MNT_SHARED(old_mnt) || IS_MNT_SHARED(new_mnt->mnt_parent) || - IS_MNT_SHARED(root_mnt->mnt_parent)) + IS_MNT_SHARED(root_mnt->mnt_parent)) && + !vx_flags(VXF_STATE_SETUP, 0)) goto out4; if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) goto out4; @@ -2752,6 +2777,7 @@ void put_mnt_ns(struct mnt_namespace *ns umount_tree(ns->root, 0); br_write_unlock(&vfsmount_lock); namespace_unlock(); + atomic_dec(&vs_global_mnt_ns); free_mnt_ns(ns); } diff -NurpP --minimal linux-3.10.19/fs/nfs/client.c linux-3.10.19-vs2.3.6.8/fs/nfs/client.c --- linux-3.10.19/fs/nfs/client.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/nfs/client.c 2013-08-22 20:29:59.000000000 +0000 @@ -684,6 +684,9 @@ int nfs_init_server_rpcclient(struct nfs if (server->flags & NFS_MOUNT_SOFT) server->client->cl_softrtry = 1; + server->client->cl_tag = 0; + if (server->flags & NFS_MOUNT_TAGGED) + server->client->cl_tag = 1; return 0; } EXPORT_SYMBOL_GPL(nfs_init_server_rpcclient); @@ -863,6 +866,10 @@ static void nfs_server_set_fsinfo(struct server->acdirmin = server->acdirmax = 0; } + /* FIXME: needs fsinfo + if (server->flags & NFS_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; */ + server->maxfilesize = fsinfo->maxfilesize; server->time_delta = fsinfo->time_delta; diff -NurpP --minimal linux-3.10.19/fs/nfs/dir.c linux-3.10.19-vs2.3.6.8/fs/nfs/dir.c --- linux-3.10.19/fs/nfs/dir.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/nfs/dir.c 2013-08-22 20:29:59.000000000 +0000 @@ -36,6 +36,7 @@ #include #include #include +#include #include "delegation.h" #include "iostat.h" @@ -1300,6 +1301,7 @@ struct dentry *nfs_lookup(struct inode * /* Success: notify readdir to use READDIRPLUS */ nfs_advise_use_readdirplus(dir); + dx_propagate_tag(nd, inode); no_entry: res = d_materialise_unique(dentry, inode); if (res != NULL) { diff -NurpP --minimal linux-3.10.19/fs/nfs/inode.c linux-3.10.19-vs2.3.6.8/fs/nfs/inode.c --- linux-3.10.19/fs/nfs/inode.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/nfs/inode.c 2013-08-22 20:29:59.000000000 +0000 @@ -39,6 +39,7 @@ #include #include #include +#include #include @@ -290,6 +291,8 @@ nfs_fhget(struct super_block *sb, struct if (inode->i_state & I_NEW) { struct nfs_inode *nfsi = NFS_I(inode); unsigned long now = jiffies; + kuid_t kuid; + kgid_t kgid; /* We set i_ino for the few things that still rely on it, * such as stat(2) */ @@ -334,8 +337,8 @@ nfs_fhget(struct super_block *sb, struct inode->i_version = 0; inode->i_size = 0; clear_nlink(inode); - inode->i_uid = make_kuid(&init_user_ns, -2); - inode->i_gid = make_kgid(&init_user_ns, -2); + kuid = make_kuid(&init_user_ns, -2); + kgid = make_kgid(&init_user_ns, -2); inode->i_blocks = 0; memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf)); nfsi->write_io = 0; @@ -369,11 +372,11 @@ nfs_fhget(struct super_block *sb, struct else if (nfs_server_capable(inode, NFS_CAP_NLINK)) nfsi->cache_validity |= NFS_INO_INVALID_ATTR; if (fattr->valid & NFS_ATTR_FATTR_OWNER) - inode->i_uid = fattr->uid; + kuid = fattr->uid; else if (nfs_server_capable(inode, NFS_CAP_OWNER)) nfsi->cache_validity |= NFS_INO_INVALID_ATTR; if (fattr->valid & NFS_ATTR_FATTR_GROUP) - inode->i_gid = fattr->gid; + kgid = fattr->gid; else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP)) nfsi->cache_validity |= NFS_INO_INVALID_ATTR; if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) @@ -384,6 +387,11 @@ nfs_fhget(struct super_block *sb, struct */ inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used); } + inode->i_uid = INOTAG_KUID(DX_TAG(inode), kuid, kgid); + inode->i_gid = INOTAG_KGID(DX_TAG(inode), kuid, kgid); + inode->i_tag = INOTAG_KTAG(DX_TAG(inode), kuid, kgid, GLOBAL_ROOT_TAG); + /* maybe fattr->xid someday */ + nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); nfsi->attrtimeo_timestamp = now; nfsi->access_cache = RB_ROOT; @@ -505,6 +513,8 @@ void nfs_setattr_update_inode(struct ino inode->i_uid = attr->ia_uid; if ((attr->ia_valid & ATTR_GID) != 0) inode->i_gid = attr->ia_gid; + if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode)) + inode->i_tag = attr->ia_tag; NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; spin_unlock(&inode->i_lock); } @@ -982,7 +992,9 @@ static int nfs_check_inode_attributes(st struct nfs_inode *nfsi = NFS_I(inode); loff_t cur_size, new_isize; unsigned long invalid = 0; - + kuid_t kuid; + kgid_t kgid; + ktag_t ktag; if (nfs_have_delegated_attributes(inode)) return 0; @@ -1007,13 +1019,18 @@ static int nfs_check_inode_attributes(st invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; } + kuid = INOTAG_KUID(DX_TAG(inode), fattr->uid, fattr->gid); + kgid = INOTAG_KGID(DX_TAG(inode), fattr->uid, fattr->gid); + ktag = INOTAG_KTAG(DX_TAG(inode), fattr->uid, fattr->gid, GLOBAL_ROOT_TAG); + /* Have any file permissions changed? */ if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL; - if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && !uid_eq(inode->i_uid, fattr->uid)) + if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && !uid_eq(inode->i_uid, kuid)) invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL; - if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && !gid_eq(inode->i_gid, fattr->gid)) + if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && !gid_eq(inode->i_gid, kgid)) invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL; + /* maybe check for tag too? */ /* Has the link count changed? */ if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink) @@ -1319,6 +1336,9 @@ static int nfs_update_inode(struct inode unsigned long invalid = 0; unsigned long now = jiffies; unsigned long save_cache_validity; + kuid_t kuid; + kgid_t kgid; + ktag_t ktag; dfprintk(VFS, "NFS: %s(%s/%ld fh_crc=0x%08x ct=%d info=0x%x)\n", __func__, inode->i_sb->s_id, inode->i_ino, @@ -1420,6 +1440,9 @@ static int nfs_update_inode(struct inode | NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED); + kuid = TAGINO_KUID(DX_TAG(inode), inode->i_uid, inode->i_tag); + kgid = TAGINO_KGID(DX_TAG(inode), inode->i_gid, inode->i_tag); + ktag = TAGINO_KTAG(DX_TAG(inode), inode->i_tag); if (fattr->valid & NFS_ATTR_FATTR_ATIME) memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime)); @@ -1462,6 +1485,10 @@ static int nfs_update_inode(struct inode | NFS_INO_INVALID_ACL | NFS_INO_REVAL_FORCED); + inode->i_uid = INOTAG_KUID(DX_TAG(inode), kuid, kgid); + inode->i_gid = INOTAG_KGID(DX_TAG(inode), kuid, kgid); + inode->i_tag = INOTAG_KTAG(DX_TAG(inode), kuid, kgid, ktag); + if (fattr->valid & NFS_ATTR_FATTR_NLINK) { if (inode->i_nlink != fattr->nlink) { invalid |= NFS_INO_INVALID_ATTR; diff -NurpP --minimal linux-3.10.19/fs/nfs/nfs3xdr.c linux-3.10.19-vs2.3.6.8/fs/nfs/nfs3xdr.c --- linux-3.10.19/fs/nfs/nfs3xdr.c 2013-05-31 13:45:24.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/nfs/nfs3xdr.c 2013-08-22 20:29:59.000000000 +0000 @@ -20,6 +20,7 @@ #include #include #include +#include #include "internal.h" #define NFSDBG_FACILITY NFSDBG_XDR @@ -558,7 +559,8 @@ static __be32 *xdr_decode_nfstime3(__be3 * set_mtime mtime; * }; */ -static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr) +static void encode_sattr3(struct xdr_stream *xdr, + const struct iattr *attr, int tag) { u32 nbytes; __be32 *p; @@ -590,15 +592,19 @@ static void encode_sattr3(struct xdr_str } else *p++ = xdr_zero; - if (attr->ia_valid & ATTR_UID) { + if (attr->ia_valid & ATTR_UID || + (tag && (attr->ia_valid & ATTR_TAG))) { *p++ = xdr_one; - *p++ = cpu_to_be32(from_kuid(&init_user_ns, attr->ia_uid)); + *p++ = cpu_to_be32(from_kuid(&init_user_ns, + TAGINO_KUID(tag, attr->ia_uid, attr->ia_tag))); } else *p++ = xdr_zero; - if (attr->ia_valid & ATTR_GID) { + if (attr->ia_valid & ATTR_GID || + (tag && (attr->ia_valid & ATTR_TAG))) { *p++ = xdr_one; - *p++ = cpu_to_be32(from_kgid(&init_user_ns, attr->ia_gid)); + *p++ = cpu_to_be32(from_kgid(&init_user_ns, + TAGINO_KGID(tag, attr->ia_gid, attr->ia_tag))); } else *p++ = xdr_zero; @@ -887,7 +893,7 @@ static void nfs3_xdr_enc_setattr3args(st const struct nfs3_sattrargs *args) { encode_nfs_fh3(xdr, args->fh); - encode_sattr3(xdr, args->sattr); + encode_sattr3(xdr, args->sattr, req->rq_task->tk_client->cl_tag); encode_sattrguard3(xdr, args); } @@ -1037,13 +1043,13 @@ static void nfs3_xdr_enc_write3args(stru * }; */ static void encode_createhow3(struct xdr_stream *xdr, - const struct nfs3_createargs *args) + const struct nfs3_createargs *args, int tag) { encode_uint32(xdr, args->createmode); switch (args->createmode) { case NFS3_CREATE_UNCHECKED: case NFS3_CREATE_GUARDED: - encode_sattr3(xdr, args->sattr); + encode_sattr3(xdr, args->sattr, tag); break; case NFS3_CREATE_EXCLUSIVE: encode_createverf3(xdr, args->verifier); @@ -1058,7 +1064,7 @@ static void nfs3_xdr_enc_create3args(str const struct nfs3_createargs *args) { encode_diropargs3(xdr, args->fh, args->name, args->len); - encode_createhow3(xdr, args); + encode_createhow3(xdr, args, req->rq_task->tk_client->cl_tag); } /* @@ -1074,7 +1080,7 @@ static void nfs3_xdr_enc_mkdir3args(stru const struct nfs3_mkdirargs *args) { encode_diropargs3(xdr, args->fh, args->name, args->len); - encode_sattr3(xdr, args->sattr); + encode_sattr3(xdr, args->sattr, req->rq_task->tk_client->cl_tag); } /* @@ -1091,9 +1097,9 @@ static void nfs3_xdr_enc_mkdir3args(stru * }; */ static void encode_symlinkdata3(struct xdr_stream *xdr, - const struct nfs3_symlinkargs *args) + const struct nfs3_symlinkargs *args, int tag) { - encode_sattr3(xdr, args->sattr); + encode_sattr3(xdr, args->sattr, tag); encode_nfspath3(xdr, args->pages, args->pathlen); } @@ -1102,7 +1108,7 @@ static void nfs3_xdr_enc_symlink3args(st const struct nfs3_symlinkargs *args) { encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen); - encode_symlinkdata3(xdr, args); + encode_symlinkdata3(xdr, args, req->rq_task->tk_client->cl_tag); } /* @@ -1130,24 +1136,24 @@ static void nfs3_xdr_enc_symlink3args(st * }; */ static void encode_devicedata3(struct xdr_stream *xdr, - const struct nfs3_mknodargs *args) + const struct nfs3_mknodargs *args, int tag) { - encode_sattr3(xdr, args->sattr); + encode_sattr3(xdr, args->sattr, tag); encode_specdata3(xdr, args->rdev); } static void encode_mknoddata3(struct xdr_stream *xdr, - const struct nfs3_mknodargs *args) + const struct nfs3_mknodargs *args, int tag) { encode_ftype3(xdr, args->type); switch (args->type) { case NF3CHR: case NF3BLK: - encode_devicedata3(xdr, args); + encode_devicedata3(xdr, args, tag); break; case NF3SOCK: case NF3FIFO: - encode_sattr3(xdr, args->sattr); + encode_sattr3(xdr, args->sattr, tag); break; case NF3REG: case NF3DIR: @@ -1162,7 +1168,7 @@ static void nfs3_xdr_enc_mknod3args(stru const struct nfs3_mknodargs *args) { encode_diropargs3(xdr, args->fh, args->name, args->len); - encode_mknoddata3(xdr, args); + encode_mknoddata3(xdr, args, req->rq_task->tk_client->cl_tag); } /* diff -NurpP --minimal linux-3.10.19/fs/nfs/super.c linux-3.10.19-vs2.3.6.8/fs/nfs/super.c --- linux-3.10.19/fs/nfs/super.c 2013-07-14 17:01:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/nfs/super.c 2013-08-22 20:29:59.000000000 +0000 @@ -55,6 +55,7 @@ #include #include #include +#include #include @@ -103,6 +104,7 @@ enum { Opt_mountport, Opt_mountvers, Opt_minorversion, + Opt_tagid, /* Mount options that take string arguments */ Opt_nfsvers, @@ -115,6 +117,9 @@ enum { /* Special mount options */ Opt_userspace, Opt_deprecated, Opt_sloppy, + /* Linux-VServer tagging options */ + Opt_tag, Opt_notag, + Opt_err }; @@ -184,6 +189,10 @@ static const match_table_t nfs_mount_opt { Opt_fscache_uniq, "fsc=%s" }, { Opt_local_lock, "local_lock=%s" }, + { Opt_tag, "tag" }, + { Opt_notag, "notag" }, + { Opt_tagid, "tagid=%u" }, + /* The following needs to be listed after all other options */ { Opt_nfsvers, "v%s" }, @@ -635,6 +644,7 @@ static void nfs_show_mount_options(struc { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, { NFS_MOUNT_UNSHARED, ",nosharecache", "" }, { NFS_MOUNT_NORESVPORT, ",noresvport", "" }, + { NFS_MOUNT_TAGGED, ",tag", "" }, { 0, NULL, NULL } }; const struct proc_nfs_info *nfs_infop; @@ -1261,6 +1271,14 @@ static int nfs_parse_mount_options(char case Opt_nomigration: mnt->options &= NFS_OPTION_MIGRATION; break; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + mnt->flags |= NFS_MOUNT_TAGGED; + break; + case Opt_notag: + mnt->flags &= ~NFS_MOUNT_TAGGED; + break; +#endif /* * options that take numeric values @@ -1347,6 +1365,12 @@ static int nfs_parse_mount_options(char goto out_invalid_value; mnt->minorversion = option; break; +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + nfs_data.flags |= NFS_MOUNT_TAGGED; + break; +#endif /* * options that take text values diff -NurpP --minimal linux-3.10.19/fs/nfsd/auth.c linux-3.10.19-vs2.3.6.8/fs/nfsd/auth.c --- linux-3.10.19/fs/nfsd/auth.c 2013-05-31 13:45:24.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/nfsd/auth.c 2013-08-22 20:29:59.000000000 +0000 @@ -2,6 +2,7 @@ #include #include +#include #include "nfsd.h" #include "auth.h" @@ -37,6 +38,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, new->fsuid = rqstp->rq_cred.cr_uid; new->fsgid = rqstp->rq_cred.cr_gid; + /* FIXME: this desperately needs a tag :) + new->xid = (vxid_t)INOTAG_TAG(DX_TAG_NFSD, cred.cr_uid, cred.cr_gid, 0); + */ rqgi = rqstp->rq_cred.cr_group_info; diff -NurpP --minimal linux-3.10.19/fs/nfsd/nfs3xdr.c linux-3.10.19-vs2.3.6.8/fs/nfsd/nfs3xdr.c --- linux-3.10.19/fs/nfsd/nfs3xdr.c 2013-05-31 13:45:24.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/nfsd/nfs3xdr.c 2013-08-22 20:29:59.000000000 +0000 @@ -8,6 +8,7 @@ #include #include +#include #include "xdr3.h" #include "auth.h" #include "netns.h" @@ -98,6 +99,8 @@ static __be32 * decode_sattr3(__be32 *p, struct iattr *iap) { u32 tmp; + kuid_t kuid = GLOBAL_ROOT_UID; + kgid_t kgid = GLOBAL_ROOT_GID; iap->ia_valid = 0; @@ -106,15 +109,18 @@ decode_sattr3(__be32 *p, struct iattr *i iap->ia_mode = ntohl(*p++); } if (*p++) { - iap->ia_uid = make_kuid(&init_user_ns, ntohl(*p++)); + kuid = make_kuid(&init_user_ns, ntohl(*p++)); if (uid_valid(iap->ia_uid)) iap->ia_valid |= ATTR_UID; } if (*p++) { - iap->ia_gid = make_kgid(&init_user_ns, ntohl(*p++)); + kgid = make_kgid(&init_user_ns, ntohl(*p++)); if (gid_valid(iap->ia_gid)) iap->ia_valid |= ATTR_GID; } + iap->ia_uid = INOTAG_KUID(DX_TAG_NFSD, kuid, kgid); + iap->ia_gid = INOTAG_KGID(DX_TAG_NFSD, kuid, kgid); + iap->ia_tag = INOTAG_KTAG(DX_TAG_NFSD, kuid, kgid, GLOBAL_ROOT_TAG); if (*p++) { u64 newsize; @@ -170,8 +176,12 @@ encode_fattr3(struct svc_rqst *rqstp, __ *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]); *p++ = htonl((u32) stat->mode); *p++ = htonl((u32) stat->nlink); - *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid)); - *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid)); + *p++ = htonl((u32) from_kuid(&init_user_ns, + TAGINO_KUID(0 /* FIXME: DX_TAG(dentry->d_inode) */, + stat->uid, stat->tag))); + *p++ = htonl((u32) from_kgid(&init_user_ns, + TAGINO_KGID(0 /* FIXME: DX_TAG(dentry->d_inode) */, + stat->gid, stat->tag))); if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) { p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN); } else { diff -NurpP --minimal linux-3.10.19/fs/nfsd/nfs4xdr.c linux-3.10.19-vs2.3.6.8/fs/nfsd/nfs4xdr.c --- linux-3.10.19/fs/nfsd/nfs4xdr.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/nfsd/nfs4xdr.c 2013-11-13 17:17:16.000000000 +0000 @@ -46,6 +46,7 @@ #include #include #include +#include #include "idmap.h" #include "acl.h" @@ -2320,14 +2321,18 @@ out_acl: WRITE32(stat.nlink); } if (bmval1 & FATTR4_WORD1_OWNER) { - status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen); + status = nfsd4_encode_user(rqstp, + TAGINO_KUID(DX_TAG(dentry->d_inode), + stat.uid, stat.tag), &p, &buflen); if (status == nfserr_resource) goto out_resource; if (status) goto out; } if (bmval1 & FATTR4_WORD1_OWNER_GROUP) { - status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen); + status = nfsd4_encode_group(rqstp, + TAGINO_KGID(DX_TAG(dentry->d_inode), + stat.gid, stat.tag), &p, &buflen); if (status == nfserr_resource) goto out_resource; if (status) diff -NurpP --minimal linux-3.10.19/fs/nfsd/nfsxdr.c linux-3.10.19-vs2.3.6.8/fs/nfsd/nfsxdr.c --- linux-3.10.19/fs/nfsd/nfsxdr.c 2013-05-31 13:45:24.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/nfsd/nfsxdr.c 2013-08-22 20:29:59.000000000 +0000 @@ -7,6 +7,7 @@ #include "vfs.h" #include "xdr.h" #include "auth.h" +#include #define NFSDDBG_FACILITY NFSDDBG_XDR @@ -89,6 +90,8 @@ static __be32 * decode_sattr(__be32 *p, struct iattr *iap) { u32 tmp, tmp1; + kuid_t kuid = GLOBAL_ROOT_UID; + kgid_t kgid = GLOBAL_ROOT_GID; iap->ia_valid = 0; @@ -101,15 +104,18 @@ decode_sattr(__be32 *p, struct iattr *ia iap->ia_mode = tmp; } if ((tmp = ntohl(*p++)) != (u32)-1) { - iap->ia_uid = make_kuid(&init_user_ns, tmp); + kuid = make_kuid(&init_user_ns, tmp); if (uid_valid(iap->ia_uid)) iap->ia_valid |= ATTR_UID; } if ((tmp = ntohl(*p++)) != (u32)-1) { - iap->ia_gid = make_kgid(&init_user_ns, tmp); + kgid = make_kgid(&init_user_ns, tmp); if (gid_valid(iap->ia_gid)) iap->ia_valid |= ATTR_GID; } + iap->ia_uid = INOTAG_KUID(DX_TAG_NFSD, kuid, kgid); + iap->ia_gid = INOTAG_KGID(DX_TAG_NFSD, kuid, kgid); + iap->ia_tag = INOTAG_KTAG(DX_TAG_NFSD, kuid, kgid, GLOBAL_ROOT_TAG); if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_SIZE; iap->ia_size = tmp; @@ -154,8 +160,10 @@ encode_fattr(struct svc_rqst *rqstp, __b *p++ = htonl(nfs_ftypes[type >> 12]); *p++ = htonl((u32) stat->mode); *p++ = htonl((u32) stat->nlink); - *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid)); - *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid)); + *p++ = htonl((u32) from_kuid(&init_user_ns, + TAGINO_KUID(DX_TAG(dentry->d_inode), stat->uid, stat->tag))); + *p++ = htonl((u32) from_kgid(&init_user_ns, + TAGINO_KGID(DX_TAG(dentry->d_inode), stat->gid, stat->tag))); if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) { *p++ = htonl(NFS_MAXPATHLEN); diff -NurpP --minimal linux-3.10.19/fs/ocfs2/dlmglue.c linux-3.10.19-vs2.3.6.8/fs/ocfs2/dlmglue.c --- linux-3.10.19/fs/ocfs2/dlmglue.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ocfs2/dlmglue.c 2013-08-22 20:29:59.000000000 +0000 @@ -2047,6 +2047,7 @@ static void __ocfs2_stuff_meta_lvb(struc lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters); lvb->lvb_iuid = cpu_to_be32(i_uid_read(inode)); lvb->lvb_igid = cpu_to_be32(i_gid_read(inode)); + lvb->lvb_itag = cpu_to_be16(i_tag_read(inode)); lvb->lvb_imode = cpu_to_be16(inode->i_mode); lvb->lvb_inlink = cpu_to_be16(inode->i_nlink); lvb->lvb_iatime_packed = @@ -2097,6 +2098,7 @@ static void ocfs2_refresh_inode_from_lvb i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid)); i_gid_write(inode, be32_to_cpu(lvb->lvb_igid)); + i_tag_write(inode, be16_to_cpu(lvb->lvb_itag)); inode->i_mode = be16_to_cpu(lvb->lvb_imode); set_nlink(inode, be16_to_cpu(lvb->lvb_inlink)); ocfs2_unpack_timespec(&inode->i_atime, diff -NurpP --minimal linux-3.10.19/fs/ocfs2/dlmglue.h linux-3.10.19-vs2.3.6.8/fs/ocfs2/dlmglue.h --- linux-3.10.19/fs/ocfs2/dlmglue.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ocfs2/dlmglue.h 2013-08-22 20:29:59.000000000 +0000 @@ -46,7 +46,8 @@ struct ocfs2_meta_lvb { __be16 lvb_inlink; __be32 lvb_iattr; __be32 lvb_igeneration; - __be32 lvb_reserved2; + __be16 lvb_itag; + __be16 lvb_reserved2; }; #define OCFS2_QINFO_LVB_VERSION 1 diff -NurpP --minimal linux-3.10.19/fs/ocfs2/file.c linux-3.10.19-vs2.3.6.8/fs/ocfs2/file.c --- linux-3.10.19/fs/ocfs2/file.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ocfs2/file.c 2013-08-22 20:29:59.000000000 +0000 @@ -1124,7 +1124,7 @@ int ocfs2_setattr(struct dentry *dentry, attr->ia_valid &= ~ATTR_SIZE; #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ - | ATTR_GID | ATTR_UID | ATTR_MODE) + | ATTR_GID | ATTR_UID | ATTR_TAG | ATTR_MODE) if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) return 0; diff -NurpP --minimal linux-3.10.19/fs/ocfs2/inode.c linux-3.10.19-vs2.3.6.8/fs/ocfs2/inode.c --- linux-3.10.19/fs/ocfs2/inode.c 2013-05-31 13:45:24.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ocfs2/inode.c 2013-08-22 20:29:59.000000000 +0000 @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -78,11 +79,13 @@ void ocfs2_set_inode_flags(struct inode { unsigned int flags = OCFS2_I(inode)->ip_attr; - inode->i_flags &= ~(S_IMMUTABLE | + inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK | S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); if (flags & OCFS2_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; + if (flags & OCFS2_IXUNLINK_FL) + inode->i_flags |= S_IXUNLINK; if (flags & OCFS2_SYNC_FL) inode->i_flags |= S_SYNC; @@ -92,25 +95,44 @@ void ocfs2_set_inode_flags(struct inode inode->i_flags |= S_NOATIME; if (flags & OCFS2_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; + + inode->i_vflags &= ~(V_BARRIER | V_COW); + + if (flags & OCFS2_BARRIER_FL) + inode->i_vflags |= V_BARRIER; + if (flags & OCFS2_COW_FL) + inode->i_vflags |= V_COW; } /* Propagate flags from i_flags to OCFS2_I(inode)->ip_attr */ void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi) { unsigned int flags = oi->vfs_inode.i_flags; + unsigned int vflags = oi->vfs_inode.i_vflags; + + oi->ip_attr &= ~(OCFS2_SYNC_FL | OCFS2_APPEND_FL | + OCFS2_IMMUTABLE_FL | OCFS2_IXUNLINK_FL | + OCFS2_NOATIME_FL | OCFS2_DIRSYNC_FL | + OCFS2_BARRIER_FL | OCFS2_COW_FL); + + if (flags & S_IMMUTABLE) + oi->ip_attr |= OCFS2_IMMUTABLE_FL; + if (flags & S_IXUNLINK) + oi->ip_attr |= OCFS2_IXUNLINK_FL; - oi->ip_attr &= ~(OCFS2_SYNC_FL|OCFS2_APPEND_FL| - OCFS2_IMMUTABLE_FL|OCFS2_NOATIME_FL|OCFS2_DIRSYNC_FL); if (flags & S_SYNC) oi->ip_attr |= OCFS2_SYNC_FL; if (flags & S_APPEND) oi->ip_attr |= OCFS2_APPEND_FL; - if (flags & S_IMMUTABLE) - oi->ip_attr |= OCFS2_IMMUTABLE_FL; if (flags & S_NOATIME) oi->ip_attr |= OCFS2_NOATIME_FL; if (flags & S_DIRSYNC) oi->ip_attr |= OCFS2_DIRSYNC_FL; + + if (vflags & V_BARRIER) + oi->ip_attr |= OCFS2_BARRIER_FL; + if (vflags & V_COW) + oi->ip_attr |= OCFS2_COW_FL; } struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno) @@ -241,6 +263,8 @@ void ocfs2_populate_inode(struct inode * struct super_block *sb; struct ocfs2_super *osb; int use_plocks = 1; + uid_t uid; + gid_t gid; sb = inode->i_sb; osb = OCFS2_SB(sb); @@ -269,8 +293,12 @@ void ocfs2_populate_inode(struct inode * inode->i_generation = le32_to_cpu(fe->i_generation); inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev)); inode->i_mode = le16_to_cpu(fe->i_mode); - i_uid_write(inode, le32_to_cpu(fe->i_uid)); - i_gid_write(inode, le32_to_cpu(fe->i_gid)); + uid = le32_to_cpu(fe->i_uid); + gid = le32_to_cpu(fe->i_gid); + i_uid_write(inode, INOTAG_UID(DX_TAG(inode), uid, gid)); + i_gid_write(inode, INOTAG_GID(DX_TAG(inode), uid, gid)); + i_tag_write(inode, INOTAG_TAG(DX_TAG(inode), uid, gid, + /* le16_to_cpu(raw_inode->i_raw_tag) */ 0)); /* Fast symlinks will have i_size but no allocated clusters. */ if (S_ISLNK(inode->i_mode) && !fe->i_clusters) { diff -NurpP --minimal linux-3.10.19/fs/ocfs2/inode.h linux-3.10.19-vs2.3.6.8/fs/ocfs2/inode.h --- linux-3.10.19/fs/ocfs2/inode.h 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ocfs2/inode.h 2013-08-22 20:29:59.000000000 +0000 @@ -152,6 +152,7 @@ struct buffer_head *ocfs2_bread(struct i void ocfs2_set_inode_flags(struct inode *inode); void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi); +int ocfs2_sync_flags(struct inode *inode, int, int); static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode) { diff -NurpP --minimal linux-3.10.19/fs/ocfs2/ioctl.c linux-3.10.19-vs2.3.6.8/fs/ocfs2/ioctl.c --- linux-3.10.19/fs/ocfs2/ioctl.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ocfs2/ioctl.c 2013-08-22 20:29:59.000000000 +0000 @@ -76,7 +76,41 @@ static int ocfs2_get_inode_attr(struct i return status; } -static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, +int ocfs2_sync_flags(struct inode *inode, int flags, int vflags) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *bh = NULL; + handle_t *handle = NULL; + int status; + + status = ocfs2_inode_lock(inode, &bh, 1); + if (status < 0) { + mlog_errno(status); + return status; + } + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto bail_unlock; + } + + inode->i_flags = flags; + inode->i_vflags = vflags; + ocfs2_get_inode_flags(OCFS2_I(inode)); + + status = ocfs2_mark_inode_dirty(handle, inode, bh); + if (status < 0) + mlog_errno(status); + + ocfs2_commit_trans(osb, handle); +bail_unlock: + ocfs2_inode_unlock(inode, 1); + brelse(bh); + return status; +} + +int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, unsigned mask) { struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode); @@ -116,6 +150,11 @@ static int ocfs2_set_inode_attr(struct i goto bail_unlock; } + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + goto bail_unlock; + } + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); @@ -881,6 +920,7 @@ bail: return status; } + long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); diff -NurpP --minimal linux-3.10.19/fs/ocfs2/namei.c linux-3.10.19-vs2.3.6.8/fs/ocfs2/namei.c --- linux-3.10.19/fs/ocfs2/namei.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ocfs2/namei.c 2013-08-22 20:29:59.000000000 +0000 @@ -41,6 +41,7 @@ #include #include #include +#include #include @@ -475,6 +476,7 @@ static int __ocfs2_mknod_locked(struct i struct ocfs2_dinode *fe = NULL; struct ocfs2_extent_list *fel; u16 feat; + ktag_t ktag; *new_fe_bh = NULL; @@ -512,8 +514,13 @@ static int __ocfs2_mknod_locked(struct i fe->i_suballoc_loc = cpu_to_le64(suballoc_loc); fe->i_suballoc_bit = cpu_to_le16(suballoc_bit); fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot); - fe->i_uid = cpu_to_le32(i_uid_read(inode)); - fe->i_gid = cpu_to_le32(i_gid_read(inode)); + + ktag = make_ktag(&init_user_ns, dx_current_fstag(osb->sb)); + fe->i_uid = cpu_to_le32(from_kuid(&init_user_ns, + TAGINO_KUID(DX_TAG(inode), inode->i_uid, ktag))); + fe->i_gid = cpu_to_le32(from_kgid(&init_user_ns, + TAGINO_KGID(DX_TAG(inode), inode->i_gid, ktag))); + inode->i_tag = ktag; /* is this correct? */ fe->i_mode = cpu_to_le16(inode->i_mode); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev)); diff -NurpP --minimal linux-3.10.19/fs/ocfs2/ocfs2.h linux-3.10.19-vs2.3.6.8/fs/ocfs2/ocfs2.h --- linux-3.10.19/fs/ocfs2/ocfs2.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ocfs2/ocfs2.h 2013-08-22 20:29:59.000000000 +0000 @@ -272,6 +272,7 @@ enum ocfs2_mount_options writes */ OCFS2_MOUNT_HB_NONE = 1 << 13, /* No heartbeat */ OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */ + OCFS2_MOUNT_TAGGED = 1 << 15, /* use tagging */ }; #define OCFS2_OSB_SOFT_RO 0x0001 diff -NurpP --minimal linux-3.10.19/fs/ocfs2/ocfs2_fs.h linux-3.10.19-vs2.3.6.8/fs/ocfs2/ocfs2_fs.h --- linux-3.10.19/fs/ocfs2/ocfs2_fs.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ocfs2/ocfs2_fs.h 2013-08-22 20:29:59.000000000 +0000 @@ -266,6 +266,11 @@ #define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/ #define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */ +#define OCFS2_IXUNLINK_FL FS_IXUNLINK_FL /* Immutable invert on unlink */ + +#define OCFS2_BARRIER_FL FS_BARRIER_FL /* Barrier for chroot() */ +#define OCFS2_COW_FL FS_COW_FL /* Copy on Write marker */ + #define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ #define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ diff -NurpP --minimal linux-3.10.19/fs/ocfs2/super.c linux-3.10.19-vs2.3.6.8/fs/ocfs2/super.c --- linux-3.10.19/fs/ocfs2/super.c 2013-05-31 13:45:25.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/ocfs2/super.c 2013-08-22 20:29:59.000000000 +0000 @@ -185,6 +185,7 @@ enum { Opt_coherency_full, Opt_resv_level, Opt_dir_resv_level, + Opt_tag, Opt_notag, Opt_tagid, Opt_err, }; @@ -216,6 +217,9 @@ static const match_table_t tokens = { {Opt_coherency_full, "coherency=full"}, {Opt_resv_level, "resv_level=%u"}, {Opt_dir_resv_level, "dir_resv_level=%u"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, + {Opt_tagid, "tagid=%u"}, {Opt_err, NULL} }; @@ -662,6 +666,13 @@ static int ocfs2_remount(struct super_bl goto out; } + if ((osb->s_mount_opt & OCFS2_MOUNT_TAGGED) != + (parsed_options.mount_opt & OCFS2_MOUNT_TAGGED)) { + ret = -EINVAL; + mlog(ML_ERROR, "Cannot change tagging on remount\n"); + goto out; + } + /* We're going to/from readonly mode. */ if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { /* Disable quota accounting before remounting RO */ @@ -1177,6 +1188,9 @@ static int ocfs2_fill_super(struct super ocfs2_complete_mount_recovery(osb); + if (osb->s_mount_opt & OCFS2_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; + if (ocfs2_mount_local(osb)) snprintf(nodestr, sizeof(nodestr), "local"); else @@ -1504,6 +1518,20 @@ static int ocfs2_parse_options(struct su option < OCFS2_MAX_RESV_LEVEL) mopt->dir_resv_level = option; break; +#ifndef CONFIG_TAGGING_NONE + case Opt_tag: + mopt->mount_opt |= OCFS2_MOUNT_TAGGED; + break; + case Opt_notag: + mopt->mount_opt &= ~OCFS2_MOUNT_TAGGED; + break; +#endif +#ifdef CONFIG_PROPAGATE + case Opt_tagid: + /* use args[0] */ + mopt->mount_opt |= OCFS2_MOUNT_TAGGED; + break; +#endif default: mlog(ML_ERROR, "Unrecognized mount option \"%s\" " diff -NurpP --minimal linux-3.10.19/fs/open.c linux-3.10.19-vs2.3.6.8/fs/open.c --- linux-3.10.19/fs/open.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/open.c 2013-08-22 20:29:59.000000000 +0000 @@ -31,6 +31,11 @@ #include #include #include +#include +#include +#include +#include +#include #include "internal.h" @@ -67,6 +72,11 @@ long vfs_truncate(struct path *path, lof struct inode *inode; long error; +#ifdef CONFIG_VSERVER_COWBL + error = cow_check_and_break(path); + if (error) + goto out; +#endif inode = path->dentry->d_inode; /* For directories it's -EISDIR, for other non-regulars - -EINVAL */ @@ -504,6 +514,13 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons unsigned int lookup_flags = LOOKUP_FOLLOW; retry: error = user_path_at(dfd, filename, lookup_flags, &path); +#ifdef CONFIG_VSERVER_COWBL + if (!error) { + error = cow_check_and_break(&path); + if (error) + path_put(&path); + } +#endif if (!error) { error = chmod_common(&path, mode); path_put(&path); @@ -536,13 +553,15 @@ static int chown_common(struct path *pat if (!uid_valid(uid)) return -EINVAL; newattrs.ia_valid |= ATTR_UID; - newattrs.ia_uid = uid; + newattrs.ia_uid = make_kuid(&init_user_ns, + dx_map_uid(user)); } if (group != (gid_t) -1) { if (!gid_valid(gid)) return -EINVAL; newattrs.ia_valid |= ATTR_GID; - newattrs.ia_gid = gid; + newattrs.ia_gid = make_kgid(&init_user_ns, + dx_map_gid(group)); } if (!S_ISDIR(inode->i_mode)) newattrs.ia_valid |= @@ -576,6 +595,18 @@ retry: error = mnt_want_write(path.mnt); if (error) goto out_release; +#ifdef CONFIG_VSERVER_COWBL + error = cow_check_and_break(&path); + if (!error) +#endif +#ifdef CONFIG_VSERVER_COWBL + error = cow_check_and_break(&path); + if (!error) +#endif +#ifdef CONFIG_VSERVER_COWBL + error = cow_check_and_break(&path); + if (!error) +#endif error = chown_common(&path, user, group); mnt_drop_write(path.mnt); out_release: diff -NurpP --minimal linux-3.10.19/fs/proc/array.c linux-3.10.19-vs2.3.6.8/fs/proc/array.c --- linux-3.10.19/fs/proc/array.c 2013-05-31 13:45:25.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/array.c 2013-08-22 20:29:59.000000000 +0000 @@ -82,6 +82,8 @@ #include #include #include +#include +#include #include #include @@ -173,6 +175,9 @@ static inline void task_state(struct seq rcu_read_lock(); ppid = pid_alive(p) ? task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0; + if (unlikely(vx_current_initpid(p->pid))) + ppid = 0; + tpid = 0; if (pid_alive(p)) { struct task_struct *tracer = ptrace_parent(p); @@ -297,7 +302,7 @@ static inline void task_sig(struct seq_f } static void render_cap_t(struct seq_file *m, const char *header, - kernel_cap_t *a) + struct vx_info *vxi, kernel_cap_t *a) { unsigned __capi; @@ -331,10 +336,11 @@ static inline void task_cap(struct seq_f NORM_CAPS(cap_effective); NORM_CAPS(cap_bset); - render_cap_t(m, "CapInh:\t", &cap_inheritable); - render_cap_t(m, "CapPrm:\t", &cap_permitted); - render_cap_t(m, "CapEff:\t", &cap_effective); - render_cap_t(m, "CapBnd:\t", &cap_bset); + /* FIXME: maybe move the p->vx_info masking to __task_cred() ? */ + render_cap_t(m, "CapInh:\t", p->vx_info, &cap_inheritable); + render_cap_t(m, "CapPrm:\t", p->vx_info, &cap_permitted); + render_cap_t(m, "CapEff:\t", p->vx_info, &cap_effective); + render_cap_t(m, "CapBnd:\t", p->vx_info, &cap_bset); } static inline void task_seccomp(struct seq_file *m, struct task_struct *p) @@ -363,6 +369,42 @@ static void task_cpus_allowed(struct seq seq_putc(m, '\n'); } +int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + seq_printf(m, "Proxy:\t%p(%c)\n" + "Count:\t%u\n" + "uts:\t%p(%c)\n" + "ipc:\t%p(%c)\n" + "mnt:\t%p(%c)\n" + "pid:\t%p(%c)\n" + "net:\t%p(%c)\n", + task->nsproxy, + (task->nsproxy == init_task.nsproxy ? 'I' : '-'), + atomic_read(&task->nsproxy->count), + task->nsproxy->uts_ns, + (task->nsproxy->uts_ns == init_task.nsproxy->uts_ns ? 'I' : '-'), + task->nsproxy->ipc_ns, + (task->nsproxy->ipc_ns == init_task.nsproxy->ipc_ns ? 'I' : '-'), + task->nsproxy->mnt_ns, + (task->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns ? 'I' : '-'), + task->nsproxy->pid_ns, + (task->nsproxy->pid_ns == init_task.nsproxy->pid_ns ? 'I' : '-'), + task->nsproxy->net_ns, + (task->nsproxy->net_ns == init_task.nsproxy->net_ns ? 'I' : '-')); + return 0; +} + +void task_vs_id(struct seq_file *m, struct task_struct *task) +{ + if (task_vx_flags(task, VXF_HIDE_VINFO, 0)) + return; + + seq_printf(m, "VxID: %d\n", vx_task_xid(task)); + seq_printf(m, "NxID: %d\n", nx_task_nid(task)); +} + + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { @@ -380,6 +422,7 @@ int proc_pid_status(struct seq_file *m, task_seccomp(m, task); task_cpus_allowed(m, task); cpuset_task_status_allowed(m, task); + task_vs_id(m, task); task_context_switch_counts(m, task); return 0; } @@ -489,6 +532,17 @@ static int do_task_stat(struct seq_file /* convert nsec -> ticks */ start_time = nsec_to_clock_t(start_time); + /* fixup start time for virt uptime */ + if (vx_flags(VXF_VIRT_UPTIME, 0)) { + unsigned long long bias = + current->vx_info->cvirt.bias_clock; + + if (start_time > bias) + start_time -= bias; + else + start_time = 0; + } + seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state); seq_put_decimal_ll(m, ' ', ppid); seq_put_decimal_ll(m, ' ', pgid); diff -NurpP --minimal linux-3.10.19/fs/proc/base.c linux-3.10.19-vs2.3.6.8/fs/proc/base.c --- linux-3.10.19/fs/proc/base.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/base.c 2013-08-22 22:18:21.000000000 +0000 @@ -87,6 +87,8 @@ #include #include #include +#include +#include #ifdef CONFIG_HARDWALL #include #endif @@ -976,11 +978,15 @@ static ssize_t oom_adj_write(struct file oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; if (oom_adj < task->signal->oom_score_adj && - !capable(CAP_SYS_RESOURCE)) { + !vx_capable(CAP_SYS_RESOURCE, VXC_OOM_ADJUST)) { err = -EACCES; goto err_sighand; } + /* prevent guest processes from circumventing the oom killer */ + if (vx_current_xid() && (oom_adj == OOM_DISABLE)) + oom_adj = OOM_ADJUST_MIN; + /* * /proc/pid/oom_adj is provided for legacy purposes, ask users to use * /proc/pid/oom_score_adj instead. @@ -1559,6 +1565,8 @@ struct inode *proc_pid_make_inode(struct inode->i_gid = cred->egid; rcu_read_unlock(); } + /* procfs is xid tagged */ + i_tag_write(inode, (vtag_t)vx_task_xid(task)); security_task_to_inode(task, inode); out: @@ -1604,6 +1612,8 @@ int pid_getattr(struct vfsmount *mnt, st /* dentry stuff */ +static unsigned name_to_int(struct dentry *dentry); + /* * Exceptional case: normally we are not allowed to unhash a busy * directory. In this case, however, we can do it - no aliasing problems @@ -1632,6 +1642,12 @@ int pid_revalidate(struct dentry *dentry task = get_proc_task(inode); if (task) { + unsigned pid = name_to_int(dentry); + + if (pid != ~0U && pid != vx_map_pid(task->pid)) { + put_task_struct(task); + goto drop; + } if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || task_dumpable(task)) { rcu_read_lock(); @@ -1648,6 +1664,7 @@ int pid_revalidate(struct dentry *dentry put_task_struct(task); return 1; } +drop: d_drop(dentry); return 0; } @@ -2196,6 +2213,13 @@ static struct dentry *proc_pident_lookup if (!task) goto out_no_task; + /* TODO: maybe we can come up with a generic approach? */ + if (task_vx_flags(task, VXF_HIDE_VINFO, 0) && + (dentry->d_name.len == 5) && + (!memcmp(dentry->d_name.name, "vinfo", 5) || + !memcmp(dentry->d_name.name, "ninfo", 5))) + goto out; + /* * Yes, it does not scale. And it should not. Don't add * new entries into /proc// without very good reasons. @@ -2630,6 +2654,9 @@ static int proc_pid_personality(struct s static const struct file_operations proc_task_operations; static const struct inode_operations proc_task_inode_operations; +extern int proc_pid_vx_info(struct task_struct *, char *); +extern int proc_pid_nx_info(struct task_struct *, char *); + static const struct pid_entry tgid_base_stuff[] = { DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations), DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), @@ -2696,6 +2723,8 @@ static const struct pid_entry tgid_base_ #ifdef CONFIG_CGROUPS REG("cgroup", S_IRUGO, proc_cgroup_operations), #endif + INF("vinfo", S_IRUGO, proc_pid_vx_info), + INF("ninfo", S_IRUGO, proc_pid_nx_info), INF("oom_score", S_IRUGO, proc_oom_score), REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), @@ -2910,7 +2939,7 @@ retry: iter.task = NULL; pid = find_ge_pid(iter.tgid, ns); if (pid) { - iter.tgid = pid_nr_ns(pid, ns); + iter.tgid = pid_unmapped_nr_ns(pid, ns); iter.task = pid_task(pid, PIDTYPE_PID); /* What we to know is if the pid we have find is the * pid of a thread_group_leader. Testing for task @@ -2940,7 +2969,7 @@ static int proc_pid_fill_cache(struct fi struct tgid_iter iter) { char name[PROC_NUMBUF]; - int len = snprintf(name, sizeof(name), "%d", iter.tgid); + int len = snprintf(name, sizeof(name), "%d", vx_map_tgid(iter.tgid)); return proc_fill_cache(filp, dirent, filldir, name, len, proc_pid_instantiate, iter.task, NULL); } @@ -2981,6 +3010,8 @@ int proc_pid_readdir(struct file * filp, __filldir = fake_filldir; filp->f_pos = iter.tgid + TGID_OFFSET; + if (!vx_proc_task_visible(iter.task)) + continue; if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) { put_task_struct(iter.task); goto out; @@ -3073,6 +3104,7 @@ static const struct pid_entry tid_base_s REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), #endif + ONE("nsproxy", S_IRUGO, proc_pid_nsproxy), }; static int proc_tid_base_readdir(struct file * filp, @@ -3141,6 +3173,8 @@ static struct dentry *proc_task_lookup(s tid = name_to_int(dentry); if (tid == ~0U) goto out; + if (vx_current_initpid(tid)) + goto out; ns = dentry->d_sb->s_fs_info; rcu_read_lock(); diff -NurpP --minimal linux-3.10.19/fs/proc/generic.c linux-3.10.19-vs2.3.6.8/fs/proc/generic.c --- linux-3.10.19/fs/proc/generic.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/generic.c 2013-08-22 20:29:59.000000000 +0000 @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "internal.h" @@ -203,6 +204,8 @@ struct dentry *proc_lookup_de(struct pro for (de = de->subdir; de ; de = de->next) { if (de->namelen != dentry->d_name.len) continue; + if (!vx_hide_check(0, de->vx_flags)) + continue; if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { pde_get(de); spin_unlock(&proc_subdir_lock); @@ -211,6 +214,8 @@ struct dentry *proc_lookup_de(struct pro return ERR_PTR(-ENOMEM); d_set_d_op(dentry, &proc_dentry_operations); d_add(dentry, inode); + /* generic proc entries belong to the host */ + i_tag_write(inode, 0); return NULL; } } @@ -279,6 +284,8 @@ int proc_readdir_de(struct proc_dir_entr /* filldir passes info to user space */ pde_get(de); + if (!vx_hide_check(0, de->vx_flags)) + goto skip; spin_unlock(&proc_subdir_lock); if (filldir(dirent, de->name, de->namelen, filp->f_pos, de->low_ino, de->mode >> 12) < 0) { @@ -286,6 +293,7 @@ int proc_readdir_de(struct proc_dir_entr goto out; } spin_lock(&proc_subdir_lock); + skip: filp->f_pos++; next = de->next; pde_put(de); @@ -395,6 +403,7 @@ static struct proc_dir_entry *__proc_cre ent->namelen = len; ent->mode = mode; ent->nlink = nlink; + ent->vx_flags = IATTR_PROC_DEFAULT; atomic_set(&ent->count, 1); spin_lock_init(&ent->pde_unload_lock); INIT_LIST_HEAD(&ent->pde_openers); @@ -418,7 +427,8 @@ struct proc_dir_entry *proc_symlink(cons kfree(ent->data); kfree(ent); ent = NULL; - } + } else + ent->vx_flags = IATTR_PROC_SYMLINK; } else { kfree(ent); ent = NULL; diff -NurpP --minimal linux-3.10.19/fs/proc/inode.c linux-3.10.19-vs2.3.6.8/fs/proc/inode.c --- linux-3.10.19/fs/proc/inode.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/inode.c 2013-08-22 20:29:59.000000000 +0000 @@ -387,6 +387,8 @@ struct inode *proc_get_inode(struct supe inode->i_uid = de->uid; inode->i_gid = de->gid; } + if (de->vx_flags) + PROC_I(inode)->vx_flags = de->vx_flags; if (de->size) inode->i_size = de->size; if (de->nlink) diff -NurpP --minimal linux-3.10.19/fs/proc/internal.h linux-3.10.19-vs2.3.6.8/fs/proc/internal.h --- linux-3.10.19/fs/proc/internal.h 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/internal.h 2013-08-22 21:52:30.000000000 +0000 @@ -14,6 +14,7 @@ #include #include #include +#include struct ctl_table_header; struct mempolicy; @@ -35,6 +36,7 @@ struct proc_dir_entry { nlink_t nlink; kuid_t uid; kgid_t gid; + int vx_flags; loff_t size; const struct inode_operations *proc_iops; const struct file_operations *proc_fops; @@ -50,16 +52,23 @@ struct proc_dir_entry { char name[]; }; +struct vx_info; +struct nx_info; + union proc_op { int (*proc_get_link)(struct dentry *, struct path *); int (*proc_read)(struct task_struct *task, char *page); int (*proc_show)(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task); + int (*proc_vs_read)(char *page); + int (*proc_vxi_read)(struct vx_info *vxi, char *page); + int (*proc_nxi_read)(struct nx_info *nxi, char *page); }; struct proc_inode { struct pid *pid; + int vx_flags; int fd; union proc_op op; struct proc_dir_entry *pde; @@ -92,11 +101,16 @@ static inline struct pid *proc_pid(struc return PROC_I(inode)->pid; } -static inline struct task_struct *get_proc_task(struct inode *inode) +static inline struct task_struct *get_proc_task_real(struct inode *inode) { return get_pid_task(proc_pid(inode), PIDTYPE_PID); } +static inline struct task_struct *get_proc_task(struct inode *inode) +{ + return vx_get_proc_task(inode, proc_pid(inode)); +} + static inline int task_dumpable(struct task_struct *task) { int dumpable = 0; @@ -155,6 +169,8 @@ extern int proc_pid_status(struct seq_fi struct pid *, struct task_struct *); extern int proc_pid_statm(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); +extern int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task); /* * base.c diff -NurpP --minimal linux-3.10.19/fs/proc/loadavg.c linux-3.10.19-vs2.3.6.8/fs/proc/loadavg.c --- linux-3.10.19/fs/proc/loadavg.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/loadavg.c 2013-08-22 20:30:00.000000000 +0000 @@ -12,15 +12,27 @@ static int loadavg_proc_show(struct seq_file *m, void *v) { + unsigned long running; + unsigned int threads; unsigned long avnrun[3]; get_avenrun(avnrun, FIXED_1/200, 0); + if (vx_flags(VXF_VIRT_LOAD, 0)) { + struct vx_info *vxi = current_vx_info(); + + running = atomic_read(&vxi->cvirt.nr_running); + threads = atomic_read(&vxi->cvirt.nr_threads); + } else { + running = nr_running(); + threads = nr_threads; + } + seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n", LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]), LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), - nr_running(), nr_threads, + running, threads, task_active_pid_ns(current)->last_pid); return 0; } diff -NurpP --minimal linux-3.10.19/fs/proc/meminfo.c linux-3.10.19-vs2.3.6.8/fs/proc/meminfo.c --- linux-3.10.19/fs/proc/meminfo.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/meminfo.c 2013-08-22 20:30:00.000000000 +0000 @@ -40,7 +40,8 @@ static int meminfo_proc_show(struct seq_ allowed = ((totalram_pages - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100) + total_swap_pages; - cached = global_page_state(NR_FILE_PAGES) - + cached = vx_flags(VXF_VIRT_MEM, 0) ? + vx_vsi_cached(&i) : global_page_state(NR_FILE_PAGES) - total_swapcache_pages() - i.bufferram; if (cached < 0) cached = 0; diff -NurpP --minimal linux-3.10.19/fs/proc/root.c linux-3.10.19-vs2.3.6.8/fs/proc/root.c --- linux-3.10.19/fs/proc/root.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/root.c 2013-11-13 17:19:37.000000000 +0000 @@ -20,9 +20,14 @@ #include #include #include +#include #include "internal.h" +struct proc_dir_entry *proc_virtual; + +extern void proc_vx_init(void); + static int proc_test_super(struct super_block *sb, void *data) { return sb->s_fs_info == data; @@ -111,7 +116,8 @@ static struct dentry *proc_mount(struct options = data; if (!current_user_ns()->may_mount_proc || - !ns_capable(ns->user_ns, CAP_SYS_ADMIN)) + !vx_ns_capable(ns->user_ns, + CAP_SYS_ADMIN, VXC_SECURE_MOUNT)) return ERR_PTR(-EPERM); } @@ -185,6 +191,7 @@ void __init proc_root_init(void) #endif proc_mkdir("bus", NULL); proc_sys_init(); + proc_vx_init(); } static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat @@ -251,6 +258,7 @@ struct proc_dir_entry proc_root = { .proc_iops = &proc_root_inode_operations, .proc_fops = &proc_root_operations, .parent = &proc_root, + .vx_flags = IATTR_ADMIN | IATTR_WATCH, .name = "/proc", }; diff -NurpP --minimal linux-3.10.19/fs/proc/self.c linux-3.10.19-vs2.3.6.8/fs/proc/self.c --- linux-3.10.19/fs/proc/self.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/self.c 2013-08-23 00:07:45.000000000 +0000 @@ -2,6 +2,7 @@ #include #include #include +#include #include "internal.h" /* @@ -62,6 +63,8 @@ int proc_setup_self(struct super_block * self = d_alloc_name(s->s_root, "self"); if (self) { struct inode *inode = new_inode_pseudo(s); + + // self->vx_flags = IATTR_PROC_SYMLINK; if (inode) { inode->i_ino = self_inum; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; diff -NurpP --minimal linux-3.10.19/fs/proc/stat.c linux-3.10.19-vs2.3.6.8/fs/proc/stat.c --- linux-3.10.19/fs/proc/stat.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/stat.c 2013-08-22 20:30:00.000000000 +0000 @@ -9,8 +9,10 @@ #include #include #include +#include #include #include +#include #ifndef arch_irq_stat_cpu #define arch_irq_stat_cpu(cpu) 0 @@ -87,14 +89,26 @@ static int show_stat(struct seq_file *p, u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec boottime; + cpumask_var_t cpus_allowed; + bool virt_cpu = vx_flags(VXF_VIRT_CPU, 0); user = nice = system = idle = iowait = irq = softirq = steal = 0; guest = guest_nice = 0; getboottime(&boottime); + + if (vx_flags(VXF_VIRT_UPTIME, 0)) + vx_vsi_boottime(&boottime); + + if (virt_cpu) + cpuset_cpus_allowed(current, cpus_allowed); + jif = boottime.tv_sec; for_each_possible_cpu(i) { + if (virt_cpu && !cpumask_test_cpu(i, cpus_allowed)) + continue; + user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; @@ -131,6 +145,9 @@ static int show_stat(struct seq_file *p, seq_putc(p, '\n'); for_each_online_cpu(i) { + if (virt_cpu && !cpumask_test_cpu(i, cpus_allowed)) + continue; + /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; diff -NurpP --minimal linux-3.10.19/fs/proc/uptime.c linux-3.10.19-vs2.3.6.8/fs/proc/uptime.c --- linux-3.10.19/fs/proc/uptime.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc/uptime.c 2013-08-22 20:30:00.000000000 +0000 @@ -5,6 +5,7 @@ #include #include #include +#include #include static int uptime_proc_show(struct seq_file *m, void *v) @@ -25,6 +26,10 @@ static int uptime_proc_show(struct seq_f nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC; idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); idle.tv_nsec = rem; + + if (vx_flags(VXF_VIRT_UPTIME, 0)) + vx_vsi_uptime(&uptime, &idle); + seq_printf(m, "%lu.%02lu %lu.%02lu\n", (unsigned long) uptime.tv_sec, (uptime.tv_nsec / (NSEC_PER_SEC / 100)), diff -NurpP --minimal linux-3.10.19/fs/proc_namespace.c linux-3.10.19-vs2.3.6.8/fs/proc_namespace.c --- linux-3.10.19/fs/proc_namespace.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/proc_namespace.c 2013-08-22 20:30:00.000000000 +0000 @@ -44,6 +44,8 @@ static int show_sb_opts(struct seq_file { MS_SYNCHRONOUS, ",sync" }, { MS_DIRSYNC, ",dirsync" }, { MS_MANDLOCK, ",mand" }, + { MS_TAGGED, ",tag" }, + { MS_NOTAGCHECK, ",notagcheck" }, { 0, NULL } }; const struct proc_fs_info *fs_infop; @@ -80,6 +82,40 @@ static inline void mangle(struct seq_fil seq_escape(m, s, " \t\n\\"); } +#ifdef CONFIG_VSERVER_EXTRA_MNT_CHECK + +static int mnt_is_reachable(struct vfsmount *vfsmnt) +{ + struct path root; + struct dentry *point; + struct mount *mnt = real_mount(vfsmnt); + struct mount *root_mnt; + int ret; + + if (mnt == mnt->mnt_ns->root) + return 1; + + br_read_lock(&vfsmount_lock); + root = current->fs->root; + root_mnt = real_mount(root.mnt); + point = root.dentry; + + while ((mnt != mnt->mnt_parent) && (mnt != root_mnt)) { + point = mnt->mnt_mountpoint; + mnt = mnt->mnt_parent; + } + + ret = (mnt == root_mnt) && is_subdir(point, root.dentry); + + br_read_unlock(&vfsmount_lock); + + return ret; +} + +#else +#define mnt_is_reachable(v) (1) +#endif + static void show_type(struct seq_file *m, struct super_block *sb) { mangle(m, sb->s_type->name); @@ -96,6 +132,17 @@ static int show_vfsmnt(struct seq_file * struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; struct super_block *sb = mnt_path.dentry->d_sb; + if (vx_flags(VXF_HIDE_MOUNT, 0)) + return SEQ_SKIP; + if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P)) + return SEQ_SKIP; + + if (!vx_check(0, VS_ADMIN|VS_WATCH) && + mnt == current->fs->root.mnt) { + seq_puts(m, "/dev/root / "); + goto type; + } + if (sb->s_op->show_devname) { err = sb->s_op->show_devname(m, mnt_path.dentry); if (err) @@ -106,6 +153,7 @@ static int show_vfsmnt(struct seq_file * seq_putc(m, ' '); seq_path(m, &mnt_path, " \t\n\\"); seq_putc(m, ' '); +type: show_type(m, sb); seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw"); err = show_sb_opts(m, sb); @@ -128,6 +176,11 @@ static int show_mountinfo(struct seq_fil struct path root = p->root; int err = 0; + if (vx_flags(VXF_HIDE_MOUNT, 0)) + return SEQ_SKIP; + if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P)) + return SEQ_SKIP; + seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id, MAJOR(sb->s_dev), MINOR(sb->s_dev)); if (sb->s_op->show_path) @@ -187,6 +240,17 @@ static int show_vfsstat(struct seq_file struct super_block *sb = mnt_path.dentry->d_sb; int err = 0; + if (vx_flags(VXF_HIDE_MOUNT, 0)) + return SEQ_SKIP; + if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P)) + return SEQ_SKIP; + + if (!vx_check(0, VS_ADMIN|VS_WATCH) && + mnt == current->fs->root.mnt) { + seq_puts(m, "device /dev/root mounted on / "); + goto type; + } + /* device */ if (sb->s_op->show_devname) { seq_puts(m, "device "); @@ -203,7 +267,7 @@ static int show_vfsstat(struct seq_file seq_puts(m, " mounted on "); seq_path(m, &mnt_path, " \t\n\\"); seq_putc(m, ' '); - +type: /* file system type */ seq_puts(m, "with fstype "); show_type(m, sb); diff -NurpP --minimal linux-3.10.19/fs/quota/dquot.c linux-3.10.19-vs2.3.6.8/fs/quota/dquot.c --- linux-3.10.19/fs/quota/dquot.c 2013-05-31 13:45:25.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/quota/dquot.c 2013-08-22 20:30:00.000000000 +0000 @@ -1585,6 +1585,9 @@ int __dquot_alloc_space(struct inode *in struct dquot **dquots = inode->i_dquot; int reserve = flags & DQUOT_SPACE_RESERVE; + if ((ret = dl_alloc_space(inode, number))) + return ret; + /* * First test before acquiring mutex - solves deadlocks when we * re-enter the quota code and are already holding the mutex @@ -1640,6 +1643,9 @@ int dquot_alloc_inode(const struct inode struct dquot_warn warn[MAXQUOTAS]; struct dquot * const *dquots = inode->i_dquot; + if ((ret = dl_alloc_inode(inode))) + return ret; + /* First test before acquiring mutex - solves deadlocks when we * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) @@ -1711,6 +1717,8 @@ void __dquot_free_space(struct inode *in struct dquot **dquots = inode->i_dquot; int reserve = flags & DQUOT_SPACE_RESERVE; + dl_free_space(inode, number); + /* First test before acquiring mutex - solves deadlocks when we * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) { @@ -1755,6 +1763,8 @@ void dquot_free_inode(const struct inode struct dquot_warn warn[MAXQUOTAS]; struct dquot * const *dquots = inode->i_dquot; + dl_free_inode(inode); + /* First test before acquiring mutex - solves deadlocks when we * re-enter the quota code and are already holding the mutex */ if (!dquot_active(inode)) diff -NurpP --minimal linux-3.10.19/fs/quota/quota.c linux-3.10.19-vs2.3.6.8/fs/quota/quota.c --- linux-3.10.19/fs/quota/quota.c 2013-02-19 13:58:49.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/quota/quota.c 2013-08-22 20:30:00.000000000 +0000 @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -37,7 +38,7 @@ static int check_quotactl_permission(str break; /*FALLTHROUGH*/ default: - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL)) return -EPERM; } @@ -309,6 +310,46 @@ static int do_quotactl(struct super_bloc #ifdef CONFIG_BLOCK +#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE) + +#include +#include +#include +#include +#include + +static vroot_grb_func *vroot_get_real_bdev = NULL; + +static DEFINE_SPINLOCK(vroot_grb_lock); + +int register_vroot_grb(vroot_grb_func *func) { + int ret = -EBUSY; + + spin_lock(&vroot_grb_lock); + if (!vroot_get_real_bdev) { + vroot_get_real_bdev = func; + ret = 0; + } + spin_unlock(&vroot_grb_lock); + return ret; +} +EXPORT_SYMBOL(register_vroot_grb); + +int unregister_vroot_grb(vroot_grb_func *func) { + int ret = -EINVAL; + + spin_lock(&vroot_grb_lock); + if (vroot_get_real_bdev) { + vroot_get_real_bdev = NULL; + ret = 0; + } + spin_unlock(&vroot_grb_lock); + return ret; +} +EXPORT_SYMBOL(unregister_vroot_grb); + +#endif + /* Return 1 if 'cmd' will block on frozen filesystem */ static int quotactl_cmd_write(int cmd) { @@ -343,6 +384,22 @@ static struct super_block *quotactl_bloc putname(tmp); if (IS_ERR(bdev)) return ERR_CAST(bdev); +#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE) + if (bdev && bdev->bd_inode && + imajor(bdev->bd_inode) == VROOT_MAJOR) { + struct block_device *bdnew = (void *)-EINVAL; + + if (vroot_get_real_bdev) + bdnew = vroot_get_real_bdev(bdev); + else + vxdprintk(VXD_CBIT(misc, 0), + "vroot_get_real_bdev not set"); + bdput(bdev); + if (IS_ERR(bdnew)) + return ERR_PTR(PTR_ERR(bdnew)); + bdev = bdnew; + } +#endif if (quotactl_cmd_write(cmd)) sb = get_super_thawed(bdev); else diff -NurpP --minimal linux-3.10.19/fs/stat.c linux-3.10.19-vs2.3.6.8/fs/stat.c --- linux-3.10.19/fs/stat.c 2013-05-31 13:45:25.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/stat.c 2013-08-22 20:30:00.000000000 +0000 @@ -26,6 +26,7 @@ void generic_fillattr(struct inode *inod stat->nlink = inode->i_nlink; stat->uid = inode->i_uid; stat->gid = inode->i_gid; + stat->tag = inode->i_tag; stat->rdev = inode->i_rdev; stat->size = i_size_read(inode); stat->atime = inode->i_atime; diff -NurpP --minimal linux-3.10.19/fs/statfs.c linux-3.10.19-vs2.3.6.8/fs/statfs.c --- linux-3.10.19/fs/statfs.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/statfs.c 2013-11-13 17:17:16.000000000 +0000 @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include "internal.h" static int flags_by_mnt(int mnt_flags) @@ -60,6 +62,8 @@ static int statfs_by_dentry(struct dentr retval = dentry->d_sb->s_op->statfs(dentry, buf); if (retval == 0 && buf->f_frsize == 0) buf->f_frsize = buf->f_bsize; + if (!vx_check(0, VS_ADMIN|VS_WATCH)) + vx_vsi_statfs(dentry->d_sb, buf); return retval; } diff -NurpP --minimal linux-3.10.19/fs/super.c linux-3.10.19-vs2.3.6.8/fs/super.c --- linux-3.10.19/fs/super.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/super.c 2013-11-13 17:17:16.000000000 +0000 @@ -34,6 +34,8 @@ #include #include #include +#include +#include #include "internal.h" @@ -1112,6 +1114,13 @@ mount_fs(struct file_system_type *type, WARN_ON(sb->s_bdi == &default_backing_dev_info); sb->s_flags |= MS_BORN; + error = -EPERM; + if (!vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT) && + !sb->s_bdev && + (sb->s_magic != PROC_SUPER_MAGIC) && + (sb->s_magic != DEVPTS_SUPER_MAGIC)) + goto out_sb; + error = security_sb_kern_mount(sb, flags, secdata); if (error) goto out_sb; diff -NurpP --minimal linux-3.10.19/fs/sysfs/mount.c linux-3.10.19-vs2.3.6.8/fs/sysfs/mount.c --- linux-3.10.19/fs/sysfs/mount.c 2013-05-31 13:45:25.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/sysfs/mount.c 2013-08-22 20:30:00.000000000 +0000 @@ -48,7 +48,7 @@ static int sysfs_fill_super(struct super sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = SYSFS_MAGIC; + sb->s_magic = SYSFS_SUPER_MAGIC; sb->s_op = &sysfs_ops; sb->s_time_gran = 1; diff -NurpP --minimal linux-3.10.19/fs/utimes.c linux-3.10.19-vs2.3.6.8/fs/utimes.c --- linux-3.10.19/fs/utimes.c 2013-02-19 13:58:49.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/utimes.c 2013-08-22 20:30:00.000000000 +0000 @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include @@ -52,12 +54,18 @@ static int utimes_common(struct path *pa { int error; struct iattr newattrs; - struct inode *inode = path->dentry->d_inode; + struct inode *inode; + + error = cow_check_and_break(path); + if (error) + goto out; error = mnt_want_write(path->mnt); if (error) goto out; + inode = path->dentry->d_inode; + if (times && times[0].tv_nsec == UTIME_NOW && times[1].tv_nsec == UTIME_NOW) times = NULL; diff -NurpP --minimal linux-3.10.19/fs/xattr.c linux-3.10.19-vs2.3.6.8/fs/xattr.c --- linux-3.10.19/fs/xattr.c 2013-02-19 13:58:49.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xattr.c 2013-08-22 20:30:00.000000000 +0000 @@ -21,6 +21,7 @@ #include #include #include +#include #include @@ -52,7 +53,7 @@ xattr_permission(struct inode *inode, co * The trusted.* namespace can only be accessed by privileged users. */ if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) { - if (!capable(CAP_SYS_ADMIN)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_FS_TRUSTED)) return (mask & MAY_WRITE) ? -EPERM : -ENODATA; return 0; } diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_dinode.h linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_dinode.h --- linux-3.10.19/fs/xfs/xfs_dinode.h 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_dinode.h 2013-08-22 20:30:00.000000000 +0000 @@ -51,7 +51,9 @@ typedef struct xfs_dinode { __be32 di_nlink; /* number of links to file */ __be16 di_projid_lo; /* lower part of owner's project id */ __be16 di_projid_hi; /* higher part owner's project id */ - __u8 di_pad[6]; /* unused, zeroed space */ + __u8 di_pad[2]; /* unused, zeroed space */ + __be16 di_tag; /* context tagging */ + __be16 di_vflags; /* vserver specific flags */ __be16 di_flushiter; /* incremented on flush */ xfs_timestamp_t di_atime; /* time last accessed */ xfs_timestamp_t di_mtime; /* time last modified */ @@ -209,6 +211,8 @@ static inline void xfs_dinode_put_rdev(s #define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */ #define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */ #define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */ +#define XFS_DIFLAG_IXUNLINK_BIT 15 /* Immutable inver on unlink */ + #define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) #define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) #define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) @@ -224,6 +228,7 @@ static inline void xfs_dinode_put_rdev(s #define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT) #define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT) #define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT) +#define XFS_DIFLAG_IXUNLINK (1 << XFS_DIFLAG_IXUNLINK_BIT) #ifdef CONFIG_XFS_RT #define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) @@ -236,6 +241,10 @@ static inline void xfs_dinode_put_rdev(s XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \ XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \ - XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM) + XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM | \ + XFS_DIFLAG_IXUNLINK) + +#define XFS_DIVFLAG_BARRIER 0x01 +#define XFS_DIVFLAG_COW 0x02 #endif /* __XFS_DINODE_H__ */ diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_fs.h linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_fs.h --- linux-3.10.19/fs/xfs/xfs_fs.h 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_fs.h 2013-08-22 20:30:00.000000000 +0000 @@ -67,6 +67,9 @@ struct fsxattr { #define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */ #define XFS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */ #define XFS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */ +#define XFS_XFLAG_IXUNLINK 0x00008000 /* immutable invert on unlink */ +#define XFS_XFLAG_BARRIER 0x10000000 /* chroot() barrier */ +#define XFS_XFLAG_COW 0x20000000 /* copy on write mark */ #define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ /* @@ -304,7 +307,8 @@ typedef struct xfs_bstat { #define bs_projid bs_projid_lo /* (previously just bs_projid) */ __u16 bs_forkoff; /* inode fork offset in bytes */ __u16 bs_projid_hi; /* higher part of project id */ - unsigned char bs_pad[10]; /* pad space, unused */ + unsigned char bs_pad[8]; /* pad space, unused */ + __u16 bs_tag; /* context tagging */ __u32 bs_dmevmask; /* DMIG event mask */ __u16 bs_dmstate; /* DMIG state info */ __u16 bs_aextents; /* attribute number of extents */ diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_ialloc.c linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_ialloc.c --- linux-3.10.19/fs/xfs/xfs_ialloc.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_ialloc.c 2013-08-22 20:30:00.000000000 +0000 @@ -39,7 +39,6 @@ #include "xfs_cksum.h" #include "xfs_buf_item.h" - /* * Allocation group level functions. */ diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_inode.c linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_inode.c --- linux-3.10.19/fs/xfs/xfs_inode.c 2013-07-14 17:01:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_inode.c 2013-08-22 21:50:34.000000000 +0000 @@ -16,6 +16,7 @@ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include +#include #include "xfs.h" #include "xfs_fs.h" @@ -836,15 +837,25 @@ xfs_iformat_btree( STATIC void xfs_dinode_from_disk( xfs_icdinode_t *to, - xfs_dinode_t *from) + xfs_dinode_t *from, + int tagged) { + uint32_t uid, gid, tag; + to->di_magic = be16_to_cpu(from->di_magic); to->di_mode = be16_to_cpu(from->di_mode); to->di_version = from ->di_version; to->di_format = from->di_format; to->di_onlink = be16_to_cpu(from->di_onlink); - to->di_uid = be32_to_cpu(from->di_uid); - to->di_gid = be32_to_cpu(from->di_gid); + + uid = be32_to_cpu(from->di_uid); + gid = be32_to_cpu(from->di_gid); + tag = be16_to_cpu(from->di_tag); + + to->di_uid = INOTAG_UID(tagged, uid, gid); + to->di_gid = INOTAG_GID(tagged, uid, gid); + to->di_tag = INOTAG_TAG(tagged, uid, gid, tag); + to->di_nlink = be32_to_cpu(from->di_nlink); to->di_projid_lo = be16_to_cpu(from->di_projid_lo); to->di_projid_hi = be16_to_cpu(from->di_projid_hi); @@ -866,6 +877,7 @@ xfs_dinode_from_disk( to->di_dmevmask = be32_to_cpu(from->di_dmevmask); to->di_dmstate = be16_to_cpu(from->di_dmstate); to->di_flags = be16_to_cpu(from->di_flags); + to->di_vflags = be16_to_cpu(from->di_vflags); to->di_gen = be32_to_cpu(from->di_gen); if (to->di_version == 3) { @@ -883,15 +895,19 @@ xfs_dinode_from_disk( void xfs_dinode_to_disk( xfs_dinode_t *to, - xfs_icdinode_t *from) + xfs_icdinode_t *from, + int tagged) { to->di_magic = cpu_to_be16(from->di_magic); to->di_mode = cpu_to_be16(from->di_mode); to->di_version = from ->di_version; to->di_format = from->di_format; to->di_onlink = cpu_to_be16(from->di_onlink); - to->di_uid = cpu_to_be32(from->di_uid); - to->di_gid = cpu_to_be32(from->di_gid); + + to->di_uid = cpu_to_be32(TAGINO_UID(tagged, from->di_uid, from->di_tag)); + to->di_gid = cpu_to_be32(TAGINO_GID(tagged, from->di_gid, from->di_tag)); + to->di_tag = cpu_to_be16(TAGINO_TAG(tagged, from->di_tag)); + to->di_nlink = cpu_to_be32(from->di_nlink); to->di_projid_lo = cpu_to_be16(from->di_projid_lo); to->di_projid_hi = cpu_to_be16(from->di_projid_hi); @@ -913,6 +929,7 @@ xfs_dinode_to_disk( to->di_dmevmask = cpu_to_be32(from->di_dmevmask); to->di_dmstate = cpu_to_be16(from->di_dmstate); to->di_flags = cpu_to_be16(from->di_flags); + to->di_vflags = cpu_to_be16(from->di_vflags); to->di_gen = cpu_to_be32(from->di_gen); if (from->di_version == 3) { @@ -929,7 +946,8 @@ xfs_dinode_to_disk( STATIC uint _xfs_dic2xflags( - __uint16_t di_flags) + __uint16_t di_flags, + __uint16_t di_vflags) { uint flags = 0; @@ -940,6 +958,8 @@ _xfs_dic2xflags( flags |= XFS_XFLAG_PREALLOC; if (di_flags & XFS_DIFLAG_IMMUTABLE) flags |= XFS_XFLAG_IMMUTABLE; + if (di_flags & XFS_DIFLAG_IXUNLINK) + flags |= XFS_XFLAG_IXUNLINK; if (di_flags & XFS_DIFLAG_APPEND) flags |= XFS_XFLAG_APPEND; if (di_flags & XFS_DIFLAG_SYNC) @@ -964,6 +984,10 @@ _xfs_dic2xflags( flags |= XFS_XFLAG_FILESTREAM; } + if (di_vflags & XFS_DIVFLAG_BARRIER) + flags |= FS_BARRIER_FL; + if (di_vflags & XFS_DIVFLAG_COW) + flags |= FS_COW_FL; return flags; } @@ -973,7 +997,7 @@ xfs_ip2xflags( { xfs_icdinode_t *dic = &ip->i_d; - return _xfs_dic2xflags(dic->di_flags) | + return _xfs_dic2xflags(dic->di_flags, dic->di_vflags) | (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); } @@ -981,7 +1005,8 @@ uint xfs_dic2xflags( xfs_dinode_t *dip) { - return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) | + return _xfs_dic2xflags(be16_to_cpu(dip->di_flags), + be16_to_cpu(dip->di_vflags)) | (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); } @@ -1072,7 +1097,8 @@ xfs_iread( * Otherwise, just get the truly permanent information. */ if (dip->di_mode) { - xfs_dinode_from_disk(&ip->i_d, dip); + xfs_dinode_from_disk(&ip->i_d, dip, + mp->m_flags & XFS_MOUNT_TAGGED); error = xfs_iformat(ip, dip); if (error) { #ifdef DEBUG @@ -1270,6 +1296,7 @@ xfs_ialloc( ASSERT(ip->i_d.di_nlink == nlink); ip->i_d.di_uid = current_fsuid(); ip->i_d.di_gid = current_fsgid(); + ip->i_d.di_tag = current_fstag(&ip->i_vnode); xfs_set_projid(ip, prid); memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); @@ -1329,6 +1356,7 @@ xfs_ialloc( ip->i_d.di_dmevmask = 0; ip->i_d.di_dmstate = 0; ip->i_d.di_flags = 0; + ip->i_d.di_vflags = 0; if (ip->i_d.di_version == 3) { ASSERT(ip->i_d.di_ino == ino); @@ -2052,6 +2080,7 @@ xfs_ifree( } ip->i_d.di_mode = 0; /* mark incore inode as free */ ip->i_d.di_flags = 0; + ip->i_d.di_vflags = 0; ip->i_d.di_dmevmask = 0; ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; @@ -2219,7 +2248,6 @@ xfs_iroot_realloc( return; } - /* * This is called when the amount of space needed for if_data * is increased or decreased. The change in size is indicated by @@ -2899,7 +2927,8 @@ xfs_iflush_int( * because if the inode is dirty at all the core must * be. */ - xfs_dinode_to_disk(dip, &ip->i_d); + xfs_dinode_to_disk(dip, &ip->i_d, + mp->m_flags & XFS_MOUNT_TAGGED); /* Wrap, we never let the log put out DI_MAX_FLUSH */ if (ip->i_d.di_flushiter == DI_MAX_FLUSH) diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_inode.h linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_inode.h --- linux-3.10.19/fs/xfs/xfs_inode.h 2013-07-14 17:01:30.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_inode.h 2013-08-22 20:30:00.000000000 +0000 @@ -134,7 +134,9 @@ typedef struct xfs_icdinode { __uint32_t di_nlink; /* number of links to file */ __uint16_t di_projid_lo; /* lower part of owner's project id */ __uint16_t di_projid_hi; /* higher part of owner's project id */ - __uint8_t di_pad[6]; /* unused, zeroed space */ + __uint8_t di_pad[2]; /* unused, zeroed space */ + __uint16_t di_tag; /* context tagging */ + __uint16_t di_vflags; /* vserver specific flags */ __uint16_t di_flushiter; /* incremented on flush */ xfs_ictimestamp_t di_atime; /* time last accessed */ xfs_ictimestamp_t di_mtime; /* time last modified */ @@ -583,7 +585,7 @@ int xfs_iread(struct xfs_mount *, struc struct xfs_inode *, uint); void xfs_dinode_calc_crc(struct xfs_mount *, struct xfs_dinode *); void xfs_dinode_to_disk(struct xfs_dinode *, - struct xfs_icdinode *); + struct xfs_icdinode *, int); void xfs_idestroy_fork(struct xfs_inode *, int); void xfs_idata_realloc(struct xfs_inode *, int, int); void xfs_iroot_realloc(struct xfs_inode *, int, int); diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_ioctl.c linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_ioctl.c --- linux-3.10.19/fs/xfs/xfs_ioctl.c 2013-07-14 17:01:30.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_ioctl.c 2013-08-22 20:30:00.000000000 +0000 @@ -26,7 +26,7 @@ #include "xfs_bmap_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" -#include "xfs_ioctl.h" +// #include "xfs_ioctl.h" #include "xfs_rtalloc.h" #include "xfs_itable.h" #include "xfs_error.h" @@ -769,6 +769,10 @@ xfs_merge_ioc_xflags( xflags |= XFS_XFLAG_IMMUTABLE; else xflags &= ~XFS_XFLAG_IMMUTABLE; + if (flags & FS_IXUNLINK_FL) + xflags |= XFS_XFLAG_IXUNLINK; + else + xflags &= ~XFS_XFLAG_IXUNLINK; if (flags & FS_APPEND_FL) xflags |= XFS_XFLAG_APPEND; else @@ -797,6 +801,8 @@ xfs_di2lxflags( if (di_flags & XFS_DIFLAG_IMMUTABLE) flags |= FS_IMMUTABLE_FL; + if (di_flags & XFS_DIFLAG_IXUNLINK) + flags |= FS_IXUNLINK_FL; if (di_flags & XFS_DIFLAG_APPEND) flags |= FS_APPEND_FL; if (di_flags & XFS_DIFLAG_SYNC) @@ -857,6 +863,8 @@ xfs_set_diflags( di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); if (xflags & XFS_XFLAG_IMMUTABLE) di_flags |= XFS_DIFLAG_IMMUTABLE; + if (xflags & XFS_XFLAG_IXUNLINK) + di_flags |= XFS_DIFLAG_IXUNLINK; if (xflags & XFS_XFLAG_APPEND) di_flags |= XFS_DIFLAG_APPEND; if (xflags & XFS_XFLAG_SYNC) @@ -899,6 +907,10 @@ xfs_diflags_to_linux( inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; + if (xflags & XFS_XFLAG_IXUNLINK) + inode->i_flags |= S_IXUNLINK; + else + inode->i_flags &= ~S_IXUNLINK; if (xflags & XFS_XFLAG_APPEND) inode->i_flags |= S_APPEND; else @@ -1403,10 +1415,18 @@ xfs_file_ioctl( case XFS_IOC_FSGETXATTRA: return xfs_ioc_fsgetxattr(ip, 1, arg); case XFS_IOC_FSSETXATTR: + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -XFS_ERROR(EACCES); + } return xfs_ioc_fssetxattr(ip, filp, arg); case XFS_IOC_GETXFLAGS: return xfs_ioc_getxflags(ip, arg); case XFS_IOC_SETXFLAGS: + if (IS_BARRIER(inode)) { + vxwprintk_task(1, "messing with the barrier."); + return -XFS_ERROR(EACCES); + } return xfs_ioc_setxflags(ip, filp, arg); case XFS_IOC_FSSETDM: { diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_ioctl.h linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_ioctl.h --- linux-3.10.19/fs/xfs/xfs_ioctl.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_ioctl.h 2013-08-22 20:30:00.000000000 +0000 @@ -70,6 +70,12 @@ xfs_handle_to_dentry( void __user *uhandle, u32 hlen); +extern int +xfs_sync_flags( + struct inode *inode, + int flags, + int vflags); + extern long xfs_file_ioctl( struct file *filp, diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_iops.c linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_iops.c --- linux-3.10.19/fs/xfs/xfs_iops.c 2013-07-14 17:01:30.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_iops.c 2013-08-22 20:30:00.000000000 +0000 @@ -28,6 +28,7 @@ #include "xfs_bmap_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" +#include "xfs_ioctl.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" @@ -47,6 +48,7 @@ #include #include #include +#include static int xfs_initxattrs( @@ -422,6 +424,7 @@ xfs_vn_getattr( stat->nlink = ip->i_d.di_nlink; stat->uid = ip->i_d.di_uid; stat->gid = ip->i_d.di_gid; + stat->tag = ip->i_d.di_tag; stat->ino = ip->i_ino; stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; @@ -1054,6 +1057,7 @@ static const struct inode_operations xfs .listxattr = xfs_vn_listxattr, .fiemap = xfs_vn_fiemap, .update_time = xfs_vn_update_time, + .sync_flags = xfs_sync_flags, }; static const struct inode_operations xfs_dir_inode_operations = { @@ -1080,6 +1084,7 @@ static const struct inode_operations xfs .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, .update_time = xfs_vn_update_time, + .sync_flags = xfs_sync_flags, }; static const struct inode_operations xfs_dir_ci_inode_operations = { @@ -1131,6 +1136,10 @@ xfs_diflags_to_iflags( inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; + if (ip->i_d.di_flags & XFS_DIFLAG_IXUNLINK) + inode->i_flags |= S_IXUNLINK; + else + inode->i_flags &= ~S_IXUNLINK; if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) inode->i_flags |= S_APPEND; else @@ -1143,6 +1152,15 @@ xfs_diflags_to_iflags( inode->i_flags |= S_NOATIME; else inode->i_flags &= ~S_NOATIME; + + if (ip->i_d.di_vflags & XFS_DIVFLAG_BARRIER) + inode->i_vflags |= V_BARRIER; + else + inode->i_vflags &= ~V_BARRIER; + if (ip->i_d.di_vflags & XFS_DIVFLAG_COW) + inode->i_vflags |= V_COW; + else + inode->i_vflags &= ~V_COW; } /* @@ -1174,6 +1192,7 @@ xfs_setup_inode( set_nlink(inode, ip->i_d.di_nlink); inode->i_uid = ip->i_d.di_uid; inode->i_gid = ip->i_d.di_gid; + inode->i_tag = ip->i_d.di_tag; switch (inode->i_mode & S_IFMT) { case S_IFBLK: diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_itable.c linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_itable.c --- linux-3.10.19/fs/xfs/xfs_itable.c 2013-02-19 13:58:49.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_itable.c 2013-08-22 20:30:00.000000000 +0000 @@ -97,6 +97,7 @@ xfs_bulkstat_one_int( buf->bs_mode = dic->di_mode; buf->bs_uid = dic->di_uid; buf->bs_gid = dic->di_gid; + buf->bs_tag = dic->di_tag; buf->bs_size = dic->di_size; buf->bs_atime.tv_sec = dic->di_atime.t_sec; buf->bs_atime.tv_nsec = dic->di_atime.t_nsec; diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_linux.h linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_linux.h --- linux-3.10.19/fs/xfs/xfs_linux.h 2013-07-14 17:01:30.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_linux.h 2013-08-22 20:30:00.000000000 +0000 @@ -124,6 +124,7 @@ #define current_cpu() (raw_smp_processor_id()) #define current_pid() (current->pid) +#define current_fstag(vp) (dx_current_fstag((vp)->i_sb)) #define current_test_flags(f) (current->flags & (f)) #define current_set_flags_nested(sp, f) \ (*(sp) = current->flags, current->flags |= (f)) diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_log_recover.c linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_log_recover.c --- linux-3.10.19/fs/xfs/xfs_log_recover.c 2013-07-14 17:01:31.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_log_recover.c 2013-08-22 21:48:01.000000000 +0000 @@ -2662,7 +2662,7 @@ xlog_recover_inode_pass2( } /* The core is in in-core format */ - xfs_dinode_to_disk(dip, dicp); + xfs_dinode_to_disk(dip, dicp, mp->m_flags & XFS_MOUNT_TAGGED); /* the rest is in on-disk format */ if (item->ri_buf[1].i_len > isize) { diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_mount.h linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_mount.h --- linux-3.10.19/fs/xfs/xfs_mount.h 2013-07-14 17:01:31.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_mount.h 2013-08-22 20:30:00.000000000 +0000 @@ -253,6 +253,7 @@ typedef struct xfs_mount { allocator */ #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ +#define XFS_MOUNT_TAGGED (1ULL << 31) /* context tagging */ /* * Default minimum read and write sizes. diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_super.c linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_super.c --- linux-3.10.19/fs/xfs/xfs_super.c 2013-07-14 17:01:31.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_super.c 2013-08-22 20:30:00.000000000 +0000 @@ -114,6 +114,9 @@ mempool_t *xfs_ioend_pool; #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */ #define MNTOPT_DISCARD "discard" /* Discard unused blocks */ #define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */ +#define MNTOPT_TAGXID "tagxid" /* context tagging for inodes */ +#define MNTOPT_TAGGED "tag" /* context tagging for inodes */ +#define MNTOPT_NOTAGTAG "notag" /* do not use context tagging */ /* * Table driven mount option parser. @@ -126,6 +129,8 @@ enum { Opt_nobarrier, Opt_inode64, Opt_inode32, + Opt_tag, + Opt_notag, Opt_err }; @@ -134,6 +139,9 @@ static const match_table_t tokens = { {Opt_nobarrier, "nobarrier"}, {Opt_inode64, "inode64"}, {Opt_inode32, "inode32"}, + {Opt_tag, "tagxid"}, + {Opt_tag, "tag"}, + {Opt_notag, "notag"}, {Opt_err, NULL} }; @@ -392,6 +400,19 @@ xfs_parseargs( } else if (!strcmp(this_char, "irixsgid")) { xfs_warn(mp, "irixsgid is now a sysctl(2) variable, option is deprecated."); +#ifndef CONFIG_TAGGING_NONE + } else if (!strcmp(this_char, MNTOPT_TAGGED)) { + mp->m_flags |= XFS_MOUNT_TAGGED; + } else if (!strcmp(this_char, MNTOPT_NOTAGTAG)) { + mp->m_flags &= ~XFS_MOUNT_TAGGED; + } else if (!strcmp(this_char, MNTOPT_TAGXID)) { + mp->m_flags |= XFS_MOUNT_TAGGED; +#endif +#ifdef CONFIG_PROPAGATE + } else if (!strcmp(this_char, MNTOPT_TAGGED)) { + /* use value */ + mp->m_flags |= XFS_MOUNT_TAGGED; +#endif } else { xfs_warn(mp, "unknown mount option [%s].", this_char); return EINVAL; @@ -1238,6 +1259,16 @@ xfs_fs_remount( case Opt_inode32: mp->m_maxagi = xfs_set_inode32(mp); break; + case Opt_tag: + if (!(sb->s_flags & MS_TAGGED)) { + printk(KERN_INFO + "XFS: %s: tagging not permitted on remount.\n", + sb->s_id); + return -EINVAL; + } + break; + case Opt_notag: + break; default: /* * Logically we would return an error here to prevent @@ -1469,6 +1500,9 @@ xfs_fs_fill_super( if (error) goto out_free_sb; + if (mp->m_flags & XFS_MOUNT_TAGGED) + sb->s_flags |= MS_TAGGED; + /* * we must configure the block size in the superblock before we run the * full mount process as the mount process can lookup and cache inodes. diff -NurpP --minimal linux-3.10.19/fs/xfs/xfs_vnodeops.c linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_vnodeops.c --- linux-3.10.19/fs/xfs/xfs_vnodeops.c 2013-07-14 17:01:31.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/fs/xfs/xfs_vnodeops.c 2013-08-22 20:30:00.000000000 +0000 @@ -155,6 +155,77 @@ xfs_free_eofblocks( return error; } + +STATIC void +xfs_get_inode_flags( + xfs_inode_t *ip) +{ + struct inode *inode = VFS_I(ip); + unsigned int flags = inode->i_flags; + unsigned int vflags = inode->i_vflags; + + if (flags & S_IMMUTABLE) + ip->i_d.di_flags |= XFS_DIFLAG_IMMUTABLE; + else + ip->i_d.di_flags &= ~XFS_DIFLAG_IMMUTABLE; + if (flags & S_IXUNLINK) + ip->i_d.di_flags |= XFS_DIFLAG_IXUNLINK; + else + ip->i_d.di_flags &= ~XFS_DIFLAG_IXUNLINK; + + if (vflags & V_BARRIER) + ip->i_d.di_vflags |= XFS_DIVFLAG_BARRIER; + else + ip->i_d.di_vflags &= ~XFS_DIVFLAG_BARRIER; + if (vflags & V_COW) + ip->i_d.di_vflags |= XFS_DIVFLAG_COW; + else + ip->i_d.di_vflags &= ~XFS_DIVFLAG_COW; +} + +int +xfs_sync_flags( + struct inode *inode, + int flags, + int vflags) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + unsigned int lock_flags = 0; + int code; + + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); + code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); + if (code) + goto error_out; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, 0); + + inode->i_flags = flags; + inode->i_vflags = vflags; + xfs_get_inode_flags(ip); + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); + + XFS_STATS_INC(xs_ig_attrchg); + + if (mp->m_flags & XFS_MOUNT_WSYNC) + xfs_trans_set_sync(tp); + code = xfs_trans_commit(tp, 0); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return code; + +error_out: + xfs_trans_cancel(tp, 0); + if (lock_flags) + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return code; +} + + int xfs_release( xfs_inode_t *ip) diff -NurpP --minimal linux-3.10.19/include/linux/cred.h linux-3.10.19-vs2.3.6.8/include/linux/cred.h --- linux-3.10.19/include/linux/cred.h 2013-02-19 13:58:50.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/cred.h 2013-08-22 20:30:00.000000000 +0000 @@ -143,6 +143,7 @@ extern void exit_creds(struct task_struc extern int copy_creds(struct task_struct *, unsigned long); extern const struct cred *get_task_cred(struct task_struct *); extern struct cred *cred_alloc_blank(void); +extern struct cred *__prepare_creds(const struct cred *); extern struct cred *prepare_creds(void); extern struct cred *prepare_exec_creds(void); extern int commit_creds(struct cred *); @@ -196,6 +197,31 @@ static inline void validate_process_cred } #endif +static inline void set_cred_subscribers(struct cred *cred, int n) +{ +#ifdef CONFIG_DEBUG_CREDENTIALS + atomic_set(&cred->subscribers, n); +#endif +} + +static inline int read_cred_subscribers(const struct cred *cred) +{ +#ifdef CONFIG_DEBUG_CREDENTIALS + return atomic_read(&cred->subscribers); +#else + return 0; +#endif +} + +static inline void alter_cred_subscribers(const struct cred *_cred, int n) +{ +#ifdef CONFIG_DEBUG_CREDENTIALS + struct cred *cred = (struct cred *) _cred; + + atomic_add(n, &cred->subscribers); +#endif +} + /** * get_new_cred - Get a reference on a new set of credentials * @cred: The new credentials to reference diff -NurpP --minimal linux-3.10.19/include/linux/devpts_fs.h linux-3.10.19-vs2.3.6.8/include/linux/devpts_fs.h --- linux-3.10.19/include/linux/devpts_fs.h 2013-02-19 13:58:50.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/devpts_fs.h 2013-08-22 20:30:00.000000000 +0000 @@ -45,5 +45,4 @@ static inline void devpts_pty_kill(struc #endif - #endif /* _LINUX_DEVPTS_FS_H */ diff -NurpP --minimal linux-3.10.19/include/linux/fs.h linux-3.10.19-vs2.3.6.8/include/linux/fs.h --- linux-3.10.19/include/linux/fs.h 2013-07-14 17:01:32.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/fs.h 2013-08-22 20:30:00.000000000 +0000 @@ -211,6 +211,7 @@ typedef void (dio_iodone_t)(struct kiocb #define ATTR_KILL_PRIV (1 << 14) #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ #define ATTR_TIMES_SET (1 << 16) +#define ATTR_TAG (1 << 17) /* * This is the Inode Attributes structure, used for notify_change(). It @@ -226,6 +227,7 @@ struct iattr { umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; + ktag_t ia_tag; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; @@ -523,7 +525,9 @@ struct inode { unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; - unsigned int i_flags; + ktag_t i_tag; + unsigned short i_flags; + unsigned short i_vflags; #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *i_acl; @@ -552,6 +556,7 @@ struct inode { unsigned int __i_nlink; }; dev_t i_rdev; + dev_t i_mdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; @@ -704,6 +709,11 @@ static inline gid_t i_gid_read(const str return from_kgid(&init_user_ns, inode->i_gid); } +static inline vtag_t i_tag_read(const struct inode *inode) +{ + return from_ktag(&init_user_ns, inode->i_tag); +} + static inline void i_uid_write(struct inode *inode, uid_t uid) { inode->i_uid = make_kuid(&init_user_ns, uid); @@ -714,14 +724,19 @@ static inline void i_gid_write(struct in inode->i_gid = make_kgid(&init_user_ns, gid); } +static inline void i_tag_write(struct inode *inode, vtag_t tag) +{ + inode->i_tag = make_ktag(&init_user_ns, tag); +} + static inline unsigned iminor(const struct inode *inode) { - return MINOR(inode->i_rdev); + return MINOR(inode->i_mdev); } static inline unsigned imajor(const struct inode *inode) { - return MAJOR(inode->i_rdev); + return MAJOR(inode->i_mdev); } extern struct block_device *I_BDEV(struct inode *inode); @@ -788,6 +803,7 @@ struct file { loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; + vxid_t f_xid; struct file_ra_state f_ra; u64 f_version; @@ -939,6 +955,7 @@ struct file_lock { struct file *fl_file; loff_t fl_start; loff_t fl_end; + vxid_t fl_xid; struct fasync_struct * fl_fasync; /* for lease break notifications */ /* for lease breaks: */ @@ -1569,6 +1586,7 @@ struct inode_operations { ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); + int (*sync_flags) (struct inode *, int, int); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); int (*update_time)(struct inode *, struct timespec *, int); @@ -1581,6 +1599,7 @@ ssize_t rw_copy_check_uvector(int type, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, struct iovec **ret_pointer); +ssize_t vfs_sendfile(struct file *, struct file *, loff_t *, size_t, loff_t); extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); @@ -1634,6 +1653,14 @@ struct super_operations { #define S_IMA 1024 /* Inode has an associated IMA struct */ #define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ #define S_NOSEC 4096 /* no suid or xattr security attributes */ +#define S_IXUNLINK 8192 /* Immutable Invert on unlink */ + +/* Linux-VServer related Inode flags */ + +#define V_VALID 1 +#define V_XATTR 2 +#define V_BARRIER 4 /* Barrier for chroot() */ +#define V_COW 8 /* Copy on Write */ /* * Note that nosuid etc flags are inode-specific: setting some file-system @@ -1658,10 +1685,13 @@ struct super_operations { #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK) #define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME) #define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION) +#define IS_TAGGED(inode) __IS_FLG(inode, MS_TAGGED) #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) +#define IS_IXUNLINK(inode) ((inode)->i_flags & S_IXUNLINK) +#define IS_IXORUNLINK(inode) ((IS_IXUNLINK(inode) ? S_IMMUTABLE : 0) ^ IS_IMMUTABLE(inode)) #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL) #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) @@ -1672,6 +1702,16 @@ struct super_operations { #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) +#define IS_BARRIER(inode) (S_ISDIR((inode)->i_mode) && ((inode)->i_vflags & V_BARRIER)) + +#ifdef CONFIG_VSERVER_COWBL +# define IS_COW(inode) (IS_IXUNLINK(inode) && IS_IMMUTABLE(inode)) +# define IS_COW_LINK(inode) (S_ISREG((inode)->i_mode) && ((inode)->i_nlink > 1)) +#else +# define IS_COW(inode) (0) +# define IS_COW_LINK(inode) (0) +#endif + /* * Inode state bits. Protected by inode->i_lock * @@ -1900,6 +1940,9 @@ extern int rw_verify_area(int, struct fi extern int locks_mandatory_locked(struct inode *); extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); +#define ATTR_FLAG_BARRIER 512 /* Barrier for chroot() */ +#define ATTR_FLAG_IXUNLINK 1024 /* Immutable invert on unlink */ + /* * Candidates for mandatory locking have the setgid bit set * but no group execute bit - an otherwise meaningless combination. @@ -2525,6 +2568,7 @@ extern int dcache_dir_open(struct inode extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, void *, filldir_t); +extern int dcache_readdir_filter(struct file *, void *, filldir_t, int (*)(struct dentry *)); extern int simple_setattr(struct dentry *, struct iattr *); extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int simple_statfs(struct dentry *, struct kstatfs *); diff -NurpP --minimal linux-3.10.19/include/linux/init_task.h linux-3.10.19-vs2.3.6.8/include/linux/init_task.h --- linux-3.10.19/include/linux/init_task.h 2013-05-31 13:45:27.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/init_task.h 2013-08-22 20:30:00.000000000 +0000 @@ -222,6 +222,10 @@ extern struct task_group root_task_group INIT_TASK_RCU_PREEMPT(tsk) \ INIT_CPUSET_SEQ \ INIT_VTIME(tsk) \ + .xid = 0, \ + .vx_info = NULL, \ + .nid = 0, \ + .nx_info = NULL, \ } diff -NurpP --minimal linux-3.10.19/include/linux/ipc.h linux-3.10.19-vs2.3.6.8/include/linux/ipc.h --- linux-3.10.19/include/linux/ipc.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/ipc.h 2013-08-22 20:30:00.000000000 +0000 @@ -16,6 +16,7 @@ struct kern_ipc_perm key_t key; kuid_t uid; kgid_t gid; + vxid_t xid; kuid_t cuid; kgid_t cgid; umode_t mode; diff -NurpP --minimal linux-3.10.19/include/linux/loop.h linux-3.10.19-vs2.3.6.8/include/linux/loop.h --- linux-3.10.19/include/linux/loop.h 2013-02-19 13:58:51.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/loop.h 2013-08-22 20:30:00.000000000 +0000 @@ -41,6 +41,7 @@ struct loop_device { struct loop_func_table *lo_encryption; __u32 lo_init[2]; kuid_t lo_key_owner; /* Who set the key */ + vxid_t lo_xid; int (*ioctl)(struct loop_device *, int cmd, unsigned long arg); diff -NurpP --minimal linux-3.10.19/include/linux/memcontrol.h linux-3.10.19-vs2.3.6.8/include/linux/memcontrol.h --- linux-3.10.19/include/linux/memcontrol.h 2013-05-31 13:45:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/memcontrol.h 2013-08-22 20:30:00.000000000 +0000 @@ -86,6 +86,13 @@ extern struct mem_cgroup *try_get_mem_cg extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); +extern u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member); +extern u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member); + +extern s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem); +extern s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem); +extern s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem); + static inline bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) { diff -NurpP --minimal linux-3.10.19/include/linux/mm_types.h linux-3.10.19-vs2.3.6.8/include/linux/mm_types.h --- linux-3.10.19/include/linux/mm_types.h 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/mm_types.h 2013-11-13 17:17:16.000000000 +0000 @@ -381,6 +381,7 @@ struct mm_struct { /* Architecture-specific MM context */ mm_context_t context; + struct vx_info *mm_vx_info; unsigned long flags; /* Must use atomic bitops to access the bits */ diff -NurpP --minimal linux-3.10.19/include/linux/mount.h linux-3.10.19-vs2.3.6.8/include/linux/mount.h --- linux-3.10.19/include/linux/mount.h 2013-05-31 13:45:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/mount.h 2013-08-22 20:30:00.000000000 +0000 @@ -49,6 +49,9 @@ struct mnt_namespace; #define MNT_LOCK_READONLY 0x400000 +#define MNT_TAGID 0x10000 +#define MNT_NOTAG 0x20000 + struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ diff -NurpP --minimal linux-3.10.19/include/linux/net.h linux-3.10.19-vs2.3.6.8/include/linux/net.h --- linux-3.10.19/include/linux/net.h 2013-07-14 17:01:32.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/net.h 2013-08-22 20:30:00.000000000 +0000 @@ -38,6 +38,7 @@ struct net; #define SOCK_PASSCRED 3 #define SOCK_PASSSEC 4 #define SOCK_EXTERNALLY_ALLOCATED 5 +#define SOCK_USER_SOCKET 6 #ifndef ARCH_HAS_SOCKET_TYPES /** diff -NurpP --minimal linux-3.10.19/include/linux/netdevice.h linux-3.10.19-vs2.3.6.8/include/linux/netdevice.h --- linux-3.10.19/include/linux/netdevice.h 2013-07-14 17:01:32.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/netdevice.h 2013-08-22 20:30:00.000000000 +0000 @@ -1694,6 +1694,7 @@ extern int init_dummy_netdev(struct net extern struct net_device *dev_get_by_index(struct net *net, int ifindex); extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); +extern struct net_device *dev_get_by_index_real_rcu(struct net *net, int ifindex); extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); extern int netdev_get_name(struct net *net, char *name, int ifindex); extern int dev_restart(struct net_device *dev); diff -NurpP --minimal linux-3.10.19/include/linux/nsproxy.h linux-3.10.19-vs2.3.6.8/include/linux/nsproxy.h --- linux-3.10.19/include/linux/nsproxy.h 2013-02-19 13:58:51.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/nsproxy.h 2013-08-22 20:30:00.000000000 +0000 @@ -3,6 +3,7 @@ #include #include +#include struct mnt_namespace; struct uts_namespace; @@ -63,6 +64,7 @@ static inline struct nsproxy *task_nspro } int copy_namespaces(unsigned long flags, struct task_struct *tsk); +struct nsproxy *copy_nsproxy(struct nsproxy *orig); void exit_task_namespaces(struct task_struct *tsk); void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); void free_nsproxy(struct nsproxy *ns); @@ -70,16 +72,26 @@ int unshare_nsproxy_namespaces(unsigned struct cred *, struct fs_struct *); int __init nsproxy_cache_init(void); -static inline void put_nsproxy(struct nsproxy *ns) +#define get_nsproxy(n) __get_nsproxy(n, __FILE__, __LINE__) + +static inline void __get_nsproxy(struct nsproxy *ns, + const char *_file, int _line) { - if (atomic_dec_and_test(&ns->count)) { - free_nsproxy(ns); - } + vxlprintk(VXD_CBIT(space, 0), "get_nsproxy(%p[%u])", + ns, atomic_read(&ns->count), _file, _line); + atomic_inc(&ns->count); } -static inline void get_nsproxy(struct nsproxy *ns) +#define put_nsproxy(n) __put_nsproxy(n, __FILE__, __LINE__) + +static inline void __put_nsproxy(struct nsproxy *ns, + const char *_file, int _line) { - atomic_inc(&ns->count); + vxlprintk(VXD_CBIT(space, 0), "put_nsproxy(%p[%u])", + ns, atomic_read(&ns->count), _file, _line); + if (atomic_dec_and_test(&ns->count)) { + free_nsproxy(ns); + } } #endif diff -NurpP --minimal linux-3.10.19/include/linux/pid.h linux-3.10.19-vs2.3.6.8/include/linux/pid.h --- linux-3.10.19/include/linux/pid.h 2013-05-31 13:45:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/pid.h 2013-08-22 20:30:00.000000000 +0000 @@ -8,7 +8,8 @@ enum pid_type PIDTYPE_PID, PIDTYPE_PGID, PIDTYPE_SID, - PIDTYPE_MAX + PIDTYPE_MAX, + PIDTYPE_REALPID }; /* @@ -172,6 +173,7 @@ static inline pid_t pid_nr(struct pid *p } pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns); +pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns); pid_t pid_vnr(struct pid *pid); #define do_each_pid_task(pid, type, task) \ diff -NurpP --minimal linux-3.10.19/include/linux/quotaops.h linux-3.10.19-vs2.3.6.8/include/linux/quotaops.h --- linux-3.10.19/include/linux/quotaops.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/quotaops.h 2013-08-22 20:30:00.000000000 +0000 @@ -8,6 +8,7 @@ #define _LINUX_QUOTAOPS_ #include +#include #define DQUOT_SPACE_WARN 0x1 #define DQUOT_SPACE_RESERVE 0x2 @@ -205,11 +206,12 @@ static inline void dquot_drop(struct ino static inline int dquot_alloc_inode(const struct inode *inode) { - return 0; + return dl_alloc_inode(inode); } static inline void dquot_free_inode(const struct inode *inode) { + dl_free_inode(inode); } static inline int dquot_transfer(struct inode *inode, struct iattr *iattr) @@ -220,6 +222,10 @@ static inline int dquot_transfer(struct static inline int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) { + int ret = 0; + + if ((ret = dl_alloc_space(inode, number))) + return ret; if (!(flags & DQUOT_SPACE_RESERVE)) inode_add_bytes(inode, number); return 0; @@ -230,6 +236,7 @@ static inline void __dquot_free_space(st { if (!(flags & DQUOT_SPACE_RESERVE)) inode_sub_bytes(inode, number); + dl_free_space(inode, number); } static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) diff -NurpP --minimal linux-3.10.19/include/linux/sched.h linux-3.10.19-vs2.3.6.8/include/linux/sched.h --- linux-3.10.19/include/linux/sched.h 2013-07-14 17:01:33.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/sched.h 2013-10-09 17:37:22.000000000 +0000 @@ -1233,6 +1233,14 @@ struct task_struct { #endif struct seccomp seccomp; +/* vserver context data */ + struct vx_info *vx_info; + struct nx_info *nx_info; + + vxid_t xid; + vnid_t nid; + vtag_t tag; + /* Thread group tracking */ u32 parent_exec_id; u32 self_exec_id; @@ -1476,6 +1484,11 @@ struct pid_namespace; pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); +#include +#include +#include +#include + static inline pid_t task_pid_nr(struct task_struct *tsk) { return tsk->pid; @@ -1489,7 +1502,8 @@ static inline pid_t task_pid_nr_ns(struc static inline pid_t task_pid_vnr(struct task_struct *tsk) { - return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); + // return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); + return vx_map_pid(__task_pid_nr_ns(tsk, PIDTYPE_PID, NULL)); } @@ -1502,7 +1516,7 @@ pid_t task_tgid_nr_ns(struct task_struct static inline pid_t task_tgid_vnr(struct task_struct *tsk) { - return pid_vnr(task_tgid(tsk)); + return vx_map_tgid(pid_vnr(task_tgid(tsk))); } diff -NurpP --minimal linux-3.10.19/include/linux/shmem_fs.h linux-3.10.19-vs2.3.6.8/include/linux/shmem_fs.h --- linux-3.10.19/include/linux/shmem_fs.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/shmem_fs.h 2013-08-22 20:30:00.000000000 +0000 @@ -9,6 +9,9 @@ /* inode in-kernel data */ +#define TMPFS_SUPER_MAGIC 0x01021994 + + struct shmem_inode_info { spinlock_t lock; unsigned long flags; diff -NurpP --minimal linux-3.10.19/include/linux/stat.h linux-3.10.19-vs2.3.6.8/include/linux/stat.h --- linux-3.10.19/include/linux/stat.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/stat.h 2013-08-22 20:30:00.000000000 +0000 @@ -25,6 +25,7 @@ struct kstat { unsigned int nlink; kuid_t uid; kgid_t gid; + ktag_t tag; dev_t rdev; loff_t size; struct timespec atime; diff -NurpP --minimal linux-3.10.19/include/linux/sunrpc/auth.h linux-3.10.19-vs2.3.6.8/include/linux/sunrpc/auth.h --- linux-3.10.19/include/linux/sunrpc/auth.h 2013-07-14 17:01:33.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/sunrpc/auth.h 2013-08-22 20:30:00.000000000 +0000 @@ -28,6 +28,7 @@ struct rpcsec_gss_info; struct auth_cred { kuid_t uid; kgid_t gid; + ktag_t tag; struct group_info *group_info; const char *principal; unsigned char machine_cred : 1; diff -NurpP --minimal linux-3.10.19/include/linux/sunrpc/clnt.h linux-3.10.19-vs2.3.6.8/include/linux/sunrpc/clnt.h --- linux-3.10.19/include/linux/sunrpc/clnt.h 2013-07-14 17:01:33.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/sunrpc/clnt.h 2013-08-22 20:30:00.000000000 +0000 @@ -49,7 +49,8 @@ struct rpc_clnt { unsigned int cl_softrtry : 1,/* soft timeouts */ cl_discrtry : 1,/* disconnect before retry */ cl_autobind : 1,/* use getport() */ - cl_chatty : 1;/* be verbose */ + cl_chatty : 1,/* be verbose */ + cl_tag : 1;/* context tagging */ struct rpc_rtt * cl_rtt; /* RTO estimator data */ const struct rpc_timeout *cl_timeout; /* Timeout strategy */ diff -NurpP --minimal linux-3.10.19/include/linux/sysfs.h linux-3.10.19-vs2.3.6.8/include/linux/sysfs.h --- linux-3.10.19/include/linux/sysfs.h 2013-05-31 13:45:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/sysfs.h 2013-08-22 20:30:00.000000000 +0000 @@ -19,6 +19,8 @@ #include #include +#define SYSFS_SUPER_MAGIC 0x62656572 + struct kobject; struct module; enum kobj_ns_type; diff -NurpP --minimal linux-3.10.19/include/linux/types.h linux-3.10.19-vs2.3.6.8/include/linux/types.h --- linux-3.10.19/include/linux/types.h 2013-02-19 13:58:52.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/types.h 2013-08-22 20:30:00.000000000 +0000 @@ -32,6 +32,9 @@ typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_uid16_t uid16_t; typedef __kernel_gid16_t gid16_t; +typedef unsigned int vxid_t; +typedef unsigned int vnid_t; +typedef unsigned int vtag_t; typedef unsigned long uintptr_t; diff -NurpP --minimal linux-3.10.19/include/linux/uidgid.h linux-3.10.19-vs2.3.6.8/include/linux/uidgid.h --- linux-3.10.19/include/linux/uidgid.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/uidgid.h 2013-08-22 20:30:00.000000000 +0000 @@ -23,13 +23,17 @@ typedef struct { uid_t val; } kuid_t; - typedef struct { gid_t val; } kgid_t; +typedef struct { + vtag_t val; +} ktag_t; + #define KUIDT_INIT(value) (kuid_t){ value } #define KGIDT_INIT(value) (kgid_t){ value } +#define KTAGT_INIT(value) (ktag_t){ value } static inline uid_t __kuid_val(kuid_t uid) { @@ -41,10 +45,16 @@ static inline gid_t __kgid_val(kgid_t gi return gid.val; } +static inline vtag_t __ktag_val(ktag_t tag) +{ + return tag.val; +} + #else typedef uid_t kuid_t; typedef gid_t kgid_t; +typedef vtag_t ktag_t; static inline uid_t __kuid_val(kuid_t uid) { @@ -56,16 +66,24 @@ static inline gid_t __kgid_val(kgid_t gi return gid; } +static inline vtag_t __ktag_val(ktag_t tag) +{ + return tag; +} + #define KUIDT_INIT(value) ((kuid_t) value ) #define KGIDT_INIT(value) ((kgid_t) value ) +#define KTAGT_INIT(value) ((ktag_t) value ) #endif #define GLOBAL_ROOT_UID KUIDT_INIT(0) #define GLOBAL_ROOT_GID KGIDT_INIT(0) +#define GLOBAL_ROOT_TAG KTAGT_INIT(0) #define INVALID_UID KUIDT_INIT(-1) #define INVALID_GID KGIDT_INIT(-1) +#define INVALID_TAG KTAGT_INIT(-1) static inline bool uid_eq(kuid_t left, kuid_t right) { @@ -77,6 +95,11 @@ static inline bool gid_eq(kgid_t left, k return __kgid_val(left) == __kgid_val(right); } +static inline bool tag_eq(ktag_t left, ktag_t right) +{ + return __ktag_val(left) == __ktag_val(right); +} + static inline bool uid_gt(kuid_t left, kuid_t right) { return __kuid_val(left) > __kuid_val(right); @@ -127,13 +150,21 @@ static inline bool gid_valid(kgid_t gid) return !gid_eq(gid, INVALID_GID); } +static inline bool tag_valid(ktag_t tag) +{ + return !tag_eq(tag, INVALID_TAG); +} + #ifdef CONFIG_USER_NS extern kuid_t make_kuid(struct user_namespace *from, uid_t uid); extern kgid_t make_kgid(struct user_namespace *from, gid_t gid); +extern krag_t make_ktag(struct user_namespace *from, gid_t gid); extern uid_t from_kuid(struct user_namespace *to, kuid_t uid); extern gid_t from_kgid(struct user_namespace *to, kgid_t gid); +extern vtag_t from_ktag(struct user_namespace *to, ktag_t tag); + extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid); extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid); @@ -159,6 +190,11 @@ static inline kgid_t make_kgid(struct us return KGIDT_INIT(gid); } +static inline ktag_t make_ktag(struct user_namespace *from, vtag_t tag) +{ + return KTAGT_INIT(tag); +} + static inline uid_t from_kuid(struct user_namespace *to, kuid_t kuid) { return __kuid_val(kuid); @@ -169,6 +205,11 @@ static inline gid_t from_kgid(struct use return __kgid_val(kgid); } +static inline vtag_t from_ktag(struct user_namespace *to, ktag_t ktag) +{ + return __ktag_val(ktag); +} + static inline uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid) { uid_t uid = from_kuid(to, kuid); diff -NurpP --minimal linux-3.10.19/include/linux/vroot.h linux-3.10.19-vs2.3.6.8/include/linux/vroot.h --- linux-3.10.19/include/linux/vroot.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vroot.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,51 @@ + +/* + * include/linux/vroot.h + * + * written by Herbert Pötzl, 9/11/2002 + * ported to 2.6 by Herbert Pötzl, 30/12/2004 + * + * Copyright (C) 2002-2007 by Herbert Pötzl. + * Redistribution of this file is permitted under the + * GNU General Public License. + */ + +#ifndef _LINUX_VROOT_H +#define _LINUX_VROOT_H + + +#ifdef __KERNEL__ + +/* Possible states of device */ +enum { + Vr_unbound, + Vr_bound, +}; + +struct vroot_device { + int vr_number; + int vr_refcnt; + + struct semaphore vr_ctl_mutex; + struct block_device *vr_device; + int vr_state; +}; + + +typedef struct block_device *(vroot_grb_func)(struct block_device *); + +extern int register_vroot_grb(vroot_grb_func *); +extern int unregister_vroot_grb(vroot_grb_func *); + +#endif /* __KERNEL__ */ + +#define MAX_VROOT_DEFAULT 8 + +/* + * IOCTL commands --- we will commandeer 0x56 ('V') + */ + +#define VROOT_SET_DEV 0x5600 +#define VROOT_CLR_DEV 0x5601 + +#endif /* _LINUX_VROOT_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vs_base.h linux-3.10.19-vs2.3.6.8/include/linux/vs_base.h --- linux-3.10.19/include/linux/vs_base.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_base.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,10 @@ +#ifndef _VS_BASE_H +#define _VS_BASE_H + +#include "vserver/base.h" +#include "vserver/check.h" +#include "vserver/debug.h" + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_context.h linux-3.10.19-vs2.3.6.8/include/linux/vs_context.h --- linux-3.10.19/include/linux/vs_context.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_context.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,242 @@ +#ifndef _VS_CONTEXT_H +#define _VS_CONTEXT_H + +#include "vserver/base.h" +#include "vserver/check.h" +#include "vserver/context.h" +#include "vserver/history.h" +#include "vserver/debug.h" + +#include + + +#define get_vx_info(i) __get_vx_info(i, __FILE__, __LINE__, __HERE__) + +static inline struct vx_info *__get_vx_info(struct vx_info *vxi, + const char *_file, int _line, void *_here) +{ + if (!vxi) + return NULL; + + vxlprintk(VXD_CBIT(xid, 2), "get_vx_info(%p[#%d.%d])", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + _file, _line); + __vxh_get_vx_info(vxi, _here); + + atomic_inc(&vxi->vx_usecnt); + return vxi; +} + + +extern void free_vx_info(struct vx_info *); + +#define put_vx_info(i) __put_vx_info(i, __FILE__, __LINE__, __HERE__) + +static inline void __put_vx_info(struct vx_info *vxi, + const char *_file, int _line, void *_here) +{ + if (!vxi) + return; + + vxlprintk(VXD_CBIT(xid, 2), "put_vx_info(%p[#%d.%d])", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + _file, _line); + __vxh_put_vx_info(vxi, _here); + + if (atomic_dec_and_test(&vxi->vx_usecnt)) + free_vx_info(vxi); +} + + +#define init_vx_info(p, i) \ + __init_vx_info(p, i, __FILE__, __LINE__, __HERE__) + +static inline void __init_vx_info(struct vx_info **vxp, struct vx_info *vxi, + const char *_file, int _line, void *_here) +{ + if (vxi) { + vxlprintk(VXD_CBIT(xid, 3), + "init_vx_info(%p[#%d.%d])", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + _file, _line); + __vxh_init_vx_info(vxi, vxp, _here); + + atomic_inc(&vxi->vx_usecnt); + } + *vxp = vxi; +} + + +#define set_vx_info(p, i) \ + __set_vx_info(p, i, __FILE__, __LINE__, __HERE__) + +static inline void __set_vx_info(struct vx_info **vxp, struct vx_info *vxi, + const char *_file, int _line, void *_here) +{ + struct vx_info *vxo; + + if (!vxi) + return; + + vxlprintk(VXD_CBIT(xid, 3), "set_vx_info(%p[#%d.%d])", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + _file, _line); + __vxh_set_vx_info(vxi, vxp, _here); + + atomic_inc(&vxi->vx_usecnt); + vxo = xchg(vxp, vxi); + BUG_ON(vxo); +} + + +#define clr_vx_info(p) __clr_vx_info(p, __FILE__, __LINE__, __HERE__) + +static inline void __clr_vx_info(struct vx_info **vxp, + const char *_file, int _line, void *_here) +{ + struct vx_info *vxo; + + vxo = xchg(vxp, NULL); + if (!vxo) + return; + + vxlprintk(VXD_CBIT(xid, 3), "clr_vx_info(%p[#%d.%d])", + vxo, vxo ? vxo->vx_id : 0, + vxo ? atomic_read(&vxo->vx_usecnt) : 0, + _file, _line); + __vxh_clr_vx_info(vxo, vxp, _here); + + if (atomic_dec_and_test(&vxo->vx_usecnt)) + free_vx_info(vxo); +} + + +#define claim_vx_info(v, p) \ + __claim_vx_info(v, p, __FILE__, __LINE__, __HERE__) + +static inline void __claim_vx_info(struct vx_info *vxi, + struct task_struct *task, + const char *_file, int _line, void *_here) +{ + vxlprintk(VXD_CBIT(xid, 3), "claim_vx_info(%p[#%d.%d.%d]) %p", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + vxi ? atomic_read(&vxi->vx_tasks) : 0, + task, _file, _line); + __vxh_claim_vx_info(vxi, task, _here); + + atomic_inc(&vxi->vx_tasks); +} + + +extern void unhash_vx_info(struct vx_info *); + +#define release_vx_info(v, p) \ + __release_vx_info(v, p, __FILE__, __LINE__, __HERE__) + +static inline void __release_vx_info(struct vx_info *vxi, + struct task_struct *task, + const char *_file, int _line, void *_here) +{ + vxlprintk(VXD_CBIT(xid, 3), "release_vx_info(%p[#%d.%d.%d]) %p", + vxi, vxi ? vxi->vx_id : 0, + vxi ? atomic_read(&vxi->vx_usecnt) : 0, + vxi ? atomic_read(&vxi->vx_tasks) : 0, + task, _file, _line); + __vxh_release_vx_info(vxi, task, _here); + + might_sleep(); + + if (atomic_dec_and_test(&vxi->vx_tasks)) + unhash_vx_info(vxi); +} + + +#define task_get_vx_info(p) \ + __task_get_vx_info(p, __FILE__, __LINE__, __HERE__) + +static inline struct vx_info *__task_get_vx_info(struct task_struct *p, + const char *_file, int _line, void *_here) +{ + struct vx_info *vxi; + + task_lock(p); + vxlprintk(VXD_CBIT(xid, 5), "task_get_vx_info(%p)", + p, _file, _line); + vxi = __get_vx_info(p->vx_info, _file, _line, _here); + task_unlock(p); + return vxi; +} + + +static inline void __wakeup_vx_info(struct vx_info *vxi) +{ + if (waitqueue_active(&vxi->vx_wait)) + wake_up_interruptible(&vxi->vx_wait); +} + + +#define enter_vx_info(v, s) __enter_vx_info(v, s, __FILE__, __LINE__) + +static inline void __enter_vx_info(struct vx_info *vxi, + struct vx_info_save *vxis, const char *_file, int _line) +{ + vxlprintk(VXD_CBIT(xid, 5), "enter_vx_info(%p[#%d],%p) %p[#%d,%p]", + vxi, vxi ? vxi->vx_id : 0, vxis, current, + current->xid, current->vx_info, _file, _line); + vxis->vxi = xchg(¤t->vx_info, vxi); + vxis->xid = current->xid; + current->xid = vxi ? vxi->vx_id : 0; +} + +#define leave_vx_info(s) __leave_vx_info(s, __FILE__, __LINE__) + +static inline void __leave_vx_info(struct vx_info_save *vxis, + const char *_file, int _line) +{ + vxlprintk(VXD_CBIT(xid, 5), "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]", + vxis, vxis->xid, vxis->vxi, current, + current->xid, current->vx_info, _file, _line); + (void)xchg(¤t->vx_info, vxis->vxi); + current->xid = vxis->xid; +} + + +static inline void __enter_vx_admin(struct vx_info_save *vxis) +{ + vxis->vxi = xchg(¤t->vx_info, NULL); + vxis->xid = xchg(¤t->xid, (vxid_t)0); +} + +static inline void __leave_vx_admin(struct vx_info_save *vxis) +{ + (void)xchg(¤t->xid, vxis->xid); + (void)xchg(¤t->vx_info, vxis->vxi); +} + +#define task_is_init(p) \ + __task_is_init(p, __FILE__, __LINE__, __HERE__) + +static inline int __task_is_init(struct task_struct *p, + const char *_file, int _line, void *_here) +{ + int is_init = is_global_init(p); + + task_lock(p); + if (p->vx_info) + is_init = p->vx_info->vx_initpid == p->pid; + task_unlock(p); + return is_init; +} + +extern void exit_vx_info(struct task_struct *, int); +extern void exit_vx_info_early(struct task_struct *, int); + + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_cowbl.h linux-3.10.19-vs2.3.6.8/include/linux/vs_cowbl.h --- linux-3.10.19/include/linux/vs_cowbl.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_cowbl.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,48 @@ +#ifndef _VS_COWBL_H +#define _VS_COWBL_H + +#include +#include +#include +#include + +extern struct dentry *cow_break_link(const char *pathname); + +static inline int cow_check_and_break(struct path *path) +{ + struct inode *inode = path->dentry->d_inode; + int error = 0; + + /* do we need this check? */ + if (IS_RDONLY(inode)) + return -EROFS; + + if (IS_COW(inode)) { + if (IS_COW_LINK(inode)) { + struct dentry *new_dentry, *old_dentry = path->dentry; + char *pp, *buf; + + buf = kmalloc(PATH_MAX, GFP_KERNEL); + if (!buf) { + return -ENOMEM; + } + pp = d_path(path, buf, PATH_MAX); + new_dentry = cow_break_link(pp); + kfree(buf); + if (!IS_ERR(new_dentry)) { + path->dentry = new_dentry; + dput(old_dentry); + } else + error = PTR_ERR(new_dentry); + } else { + inode->i_flags &= ~(S_IXUNLINK | S_IMMUTABLE); + inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); + } + } + return error; +} + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_cvirt.h linux-3.10.19-vs2.3.6.8/include/linux/vs_cvirt.h --- linux-3.10.19/include/linux/vs_cvirt.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_cvirt.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,50 @@ +#ifndef _VS_CVIRT_H +#define _VS_CVIRT_H + +#include "vserver/cvirt.h" +#include "vserver/context.h" +#include "vserver/base.h" +#include "vserver/check.h" +#include "vserver/debug.h" + + +static inline void vx_activate_task(struct task_struct *p) +{ + struct vx_info *vxi; + + if ((vxi = p->vx_info)) { + vx_update_load(vxi); + atomic_inc(&vxi->cvirt.nr_running); + } +} + +static inline void vx_deactivate_task(struct task_struct *p) +{ + struct vx_info *vxi; + + if ((vxi = p->vx_info)) { + vx_update_load(vxi); + atomic_dec(&vxi->cvirt.nr_running); + } +} + +static inline void vx_uninterruptible_inc(struct task_struct *p) +{ + struct vx_info *vxi; + + if ((vxi = p->vx_info)) + atomic_inc(&vxi->cvirt.nr_uninterruptible); +} + +static inline void vx_uninterruptible_dec(struct task_struct *p) +{ + struct vx_info *vxi; + + if ((vxi = p->vx_info)) + atomic_dec(&vxi->cvirt.nr_uninterruptible); +} + + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_device.h linux-3.10.19-vs2.3.6.8/include/linux/vs_device.h --- linux-3.10.19/include/linux/vs_device.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_device.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,45 @@ +#ifndef _VS_DEVICE_H +#define _VS_DEVICE_H + +#include "vserver/base.h" +#include "vserver/device.h" +#include "vserver/debug.h" + + +#ifdef CONFIG_VSERVER_DEVICE + +int vs_map_device(struct vx_info *, dev_t, dev_t *, umode_t); + +#define vs_device_perm(v, d, m, p) \ + ((vs_map_device(current_vx_info(), d, NULL, m) & (p)) == (p)) + +#else + +static inline +int vs_map_device(struct vx_info *vxi, + dev_t device, dev_t *target, umode_t mode) +{ + if (target) + *target = device; + return ~0; +} + +#define vs_device_perm(v, d, m, p) ((p) == (p)) + +#endif + + +#define vs_map_chrdev(d, t, p) \ + ((vs_map_device(current_vx_info(), d, t, S_IFCHR) & (p)) == (p)) +#define vs_map_blkdev(d, t, p) \ + ((vs_map_device(current_vx_info(), d, t, S_IFBLK) & (p)) == (p)) + +#define vs_chrdev_perm(d, p) \ + vs_device_perm(current_vx_info(), d, S_IFCHR, p) +#define vs_blkdev_perm(d, p) \ + vs_device_perm(current_vx_info(), d, S_IFBLK, p) + + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_dlimit.h linux-3.10.19-vs2.3.6.8/include/linux/vs_dlimit.h --- linux-3.10.19/include/linux/vs_dlimit.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_dlimit.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,215 @@ +#ifndef _VS_DLIMIT_H +#define _VS_DLIMIT_H + +#include + +#include "vserver/dlimit.h" +#include "vserver/base.h" +#include "vserver/debug.h" + + +#define get_dl_info(i) __get_dl_info(i, __FILE__, __LINE__) + +static inline struct dl_info *__get_dl_info(struct dl_info *dli, + const char *_file, int _line) +{ + if (!dli) + return NULL; + vxlprintk(VXD_CBIT(dlim, 4), "get_dl_info(%p[#%d.%d])", + dli, dli ? dli->dl_tag : 0, + dli ? atomic_read(&dli->dl_usecnt) : 0, + _file, _line); + atomic_inc(&dli->dl_usecnt); + return dli; +} + + +#define free_dl_info(i) \ + call_rcu(&(i)->dl_rcu, rcu_free_dl_info) + +#define put_dl_info(i) __put_dl_info(i, __FILE__, __LINE__) + +static inline void __put_dl_info(struct dl_info *dli, + const char *_file, int _line) +{ + if (!dli) + return; + vxlprintk(VXD_CBIT(dlim, 4), "put_dl_info(%p[#%d.%d])", + dli, dli ? dli->dl_tag : 0, + dli ? atomic_read(&dli->dl_usecnt) : 0, + _file, _line); + if (atomic_dec_and_test(&dli->dl_usecnt)) + free_dl_info(dli); +} + + +#define __dlimit_char(d) ((d) ? '*' : ' ') + +static inline int __dl_alloc_space(struct super_block *sb, + vtag_t tag, dlsize_t nr, const char *file, int line) +{ + struct dl_info *dli = NULL; + int ret = 0; + + if (nr == 0) + goto out; + dli = locate_dl_info(sb, tag); + if (!dli) + goto out; + + spin_lock(&dli->dl_lock); + ret = (dli->dl_space_used + nr > dli->dl_space_total); + if (!ret) + dli->dl_space_used += nr; + spin_unlock(&dli->dl_lock); + put_dl_info(dli); +out: + vxlprintk(VXD_CBIT(dlim, 1), + "ALLOC (%p,#%d)%c %lld bytes (%d)", + sb, tag, __dlimit_char(dli), (long long)nr, + ret, file, line); + return ret ? -ENOSPC : 0; +} + +static inline void __dl_free_space(struct super_block *sb, + vtag_t tag, dlsize_t nr, const char *_file, int _line) +{ + struct dl_info *dli = NULL; + + if (nr == 0) + goto out; + dli = locate_dl_info(sb, tag); + if (!dli) + goto out; + + spin_lock(&dli->dl_lock); + if (dli->dl_space_used > nr) + dli->dl_space_used -= nr; + else + dli->dl_space_used = 0; + spin_unlock(&dli->dl_lock); + put_dl_info(dli); +out: + vxlprintk(VXD_CBIT(dlim, 1), + "FREE (%p,#%d)%c %lld bytes", + sb, tag, __dlimit_char(dli), (long long)nr, + _file, _line); +} + +static inline int __dl_alloc_inode(struct super_block *sb, + vtag_t tag, const char *_file, int _line) +{ + struct dl_info *dli; + int ret = 0; + + dli = locate_dl_info(sb, tag); + if (!dli) + goto out; + + spin_lock(&dli->dl_lock); + dli->dl_inodes_used++; + ret = (dli->dl_inodes_used > dli->dl_inodes_total); + spin_unlock(&dli->dl_lock); + put_dl_info(dli); +out: + vxlprintk(VXD_CBIT(dlim, 0), + "ALLOC (%p,#%d)%c inode (%d)", + sb, tag, __dlimit_char(dli), ret, _file, _line); + return ret ? -ENOSPC : 0; +} + +static inline void __dl_free_inode(struct super_block *sb, + vtag_t tag, const char *_file, int _line) +{ + struct dl_info *dli; + + dli = locate_dl_info(sb, tag); + if (!dli) + goto out; + + spin_lock(&dli->dl_lock); + if (dli->dl_inodes_used > 1) + dli->dl_inodes_used--; + else + dli->dl_inodes_used = 0; + spin_unlock(&dli->dl_lock); + put_dl_info(dli); +out: + vxlprintk(VXD_CBIT(dlim, 0), + "FREE (%p,#%d)%c inode", + sb, tag, __dlimit_char(dli), _file, _line); +} + +static inline void __dl_adjust_block(struct super_block *sb, vtag_t tag, + unsigned long long *free_blocks, unsigned long long *root_blocks, + const char *_file, int _line) +{ + struct dl_info *dli; + uint64_t broot, bfree; + + dli = locate_dl_info(sb, tag); + if (!dli) + return; + + spin_lock(&dli->dl_lock); + broot = (dli->dl_space_total - + (dli->dl_space_total >> 10) * dli->dl_nrlmult) + >> sb->s_blocksize_bits; + bfree = (dli->dl_space_total - dli->dl_space_used) + >> sb->s_blocksize_bits; + spin_unlock(&dli->dl_lock); + + vxlprintk(VXD_CBIT(dlim, 2), + "ADJUST: %lld,%lld on %lld,%lld [mult=%d]", + (long long)bfree, (long long)broot, + *free_blocks, *root_blocks, dli->dl_nrlmult, + _file, _line); + if (free_blocks) { + if (*free_blocks > bfree) + *free_blocks = bfree; + } + if (root_blocks) { + if (*root_blocks > broot) + *root_blocks = broot; + } + put_dl_info(dli); +} + +#define dl_prealloc_space(in, bytes) \ + __dl_alloc_space((in)->i_sb, i_tag_read(in), (dlsize_t)(bytes), \ + __FILE__, __LINE__ ) + +#define dl_alloc_space(in, bytes) \ + __dl_alloc_space((in)->i_sb, i_tag_read(in), (dlsize_t)(bytes), \ + __FILE__, __LINE__ ) + +#define dl_reserve_space(in, bytes) \ + __dl_alloc_space((in)->i_sb, i_tag_read(in), (dlsize_t)(bytes), \ + __FILE__, __LINE__ ) + +#define dl_claim_space(in, bytes) (0) + +#define dl_release_space(in, bytes) \ + __dl_free_space((in)->i_sb, i_tag_read(in), (dlsize_t)(bytes), \ + __FILE__, __LINE__ ) + +#define dl_free_space(in, bytes) \ + __dl_free_space((in)->i_sb, i_tag_read(in), (dlsize_t)(bytes), \ + __FILE__, __LINE__ ) + + + +#define dl_alloc_inode(in) \ + __dl_alloc_inode((in)->i_sb, i_tag_read(in), __FILE__, __LINE__ ) + +#define dl_free_inode(in) \ + __dl_free_inode((in)->i_sb, i_tag_read(in), __FILE__, __LINE__ ) + + +#define dl_adjust_block(sb, tag, fb, rb) \ + __dl_adjust_block(sb, tag, fb, rb, __FILE__, __LINE__ ) + + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_inet.h linux-3.10.19-vs2.3.6.8/include/linux/vs_inet.h --- linux-3.10.19/include/linux/vs_inet.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_inet.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,364 @@ +#ifndef _VS_INET_H +#define _VS_INET_H + +#include "vserver/base.h" +#include "vserver/network.h" +#include "vserver/debug.h" + +#define IPI_LOOPBACK htonl(INADDR_LOOPBACK) + +#define NXAV4(a) NIPQUAD((a)->ip[0]), NIPQUAD((a)->ip[1]), \ + NIPQUAD((a)->mask), (a)->type +#define NXAV4_FMT "[" NIPQUAD_FMT "-" NIPQUAD_FMT "/" NIPQUAD_FMT ":%04x]" + +#define NIPQUAD(addr) \ + ((unsigned char *)&addr)[0], \ + ((unsigned char *)&addr)[1], \ + ((unsigned char *)&addr)[2], \ + ((unsigned char *)&addr)[3] + +#define NIPQUAD_FMT "%u.%u.%u.%u" + + +static inline +int v4_addr_match(struct nx_addr_v4 *nxa, __be32 addr, uint16_t tmask) +{ + __be32 ip = nxa->ip[0].s_addr; + __be32 mask = nxa->mask.s_addr; + __be32 bcast = ip | ~mask; + int ret = 0; + + switch (nxa->type & tmask) { + case NXA_TYPE_MASK: + ret = (ip == (addr & mask)); + break; + case NXA_TYPE_ADDR: + ret = 3; + if (addr == ip) + break; + /* fall through to broadcast */ + case NXA_MOD_BCAST: + ret = ((tmask & NXA_MOD_BCAST) && (addr == bcast)); + break; + case NXA_TYPE_RANGE: + ret = ((nxa->ip[0].s_addr <= addr) && + (nxa->ip[1].s_addr > addr)); + break; + case NXA_TYPE_ANY: + ret = 2; + break; + } + + vxdprintk(VXD_CBIT(net, 0), + "v4_addr_match(%p" NXAV4_FMT "," NIPQUAD_FMT ",%04x) = %d", + nxa, NXAV4(nxa), NIPQUAD(addr), tmask, ret); + return ret; +} + +static inline +int v4_addr_in_nx_info(struct nx_info *nxi, __be32 addr, uint16_t tmask) +{ + struct nx_addr_v4 *nxa; + unsigned long irqflags; + int ret = 1; + + if (!nxi) + goto out; + + ret = 2; + /* allow 127.0.0.1 when remapping lback */ + if ((tmask & NXA_LOOPBACK) && + (addr == IPI_LOOPBACK) && + nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) + goto out; + ret = 3; + /* check for lback address */ + if ((tmask & NXA_MOD_LBACK) && + (nxi->v4_lback.s_addr == addr)) + goto out; + ret = 4; + /* check for broadcast address */ + if ((tmask & NXA_MOD_BCAST) && + (nxi->v4_bcast.s_addr == addr)) + goto out; + ret = 5; + + /* check for v4 addresses */ + spin_lock_irqsave(&nxi->addr_lock, irqflags); + for (nxa = &nxi->v4; nxa; nxa = nxa->next) + if (v4_addr_match(nxa, addr, tmask)) + goto out_unlock; + ret = 0; +out_unlock: + spin_unlock_irqrestore(&nxi->addr_lock, irqflags); +out: + vxdprintk(VXD_CBIT(net, 0), + "v4_addr_in_nx_info(%p[#%u]," NIPQUAD_FMT ",%04x) = %d", + nxi, nxi ? nxi->nx_id : 0, NIPQUAD(addr), tmask, ret); + return ret; +} + +static inline +int v4_nx_addr_match(struct nx_addr_v4 *nxa, struct nx_addr_v4 *addr, uint16_t mask) +{ + /* FIXME: needs full range checks */ + return v4_addr_match(nxa, addr->ip[0].s_addr, mask); +} + +static inline +int v4_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v4 *nxa, uint16_t mask) +{ + struct nx_addr_v4 *ptr; + unsigned long irqflags; + int ret = 1; + + spin_lock_irqsave(&nxi->addr_lock, irqflags); + for (ptr = &nxi->v4; ptr; ptr = ptr->next) + if (v4_nx_addr_match(ptr, nxa, mask)) + goto out_unlock; + ret = 0; +out_unlock: + spin_unlock_irqrestore(&nxi->addr_lock, irqflags); + return ret; +} + +#include + +/* + * Check if a given address matches for a socket + * + * nxi: the socket's nx_info if any + * addr: to be verified address + */ +static inline +int v4_sock_addr_match ( + struct nx_info *nxi, + struct inet_sock *inet, + __be32 addr) +{ + __be32 saddr = inet->inet_rcv_saddr; + __be32 bcast = nxi ? nxi->v4_bcast.s_addr : INADDR_BROADCAST; + + if (addr && (saddr == addr || bcast == addr)) + return 1; + if (!saddr) + return v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND); + return 0; +} + + +/* inet related checks and helpers */ + + +struct in_ifaddr; +struct net_device; +struct sock; + +#ifdef CONFIG_INET + +#include +#include +#include +#include + + +int dev_in_nx_info(struct net_device *, struct nx_info *); +int v4_dev_in_nx_info(struct net_device *, struct nx_info *); +int nx_v4_addr_conflict(struct nx_info *, struct nx_info *); + + +/* + * check if address is covered by socket + * + * sk: the socket to check against + * addr: the address in question (must be != 0) + */ + +static inline +int __v4_addr_match_socket(const struct sock *sk, struct nx_addr_v4 *nxa) +{ + struct nx_info *nxi = sk->sk_nx_info; + __be32 saddr = sk_rcv_saddr(sk); + + vxdprintk(VXD_CBIT(net, 5), + "__v4_addr_in_socket(%p," NXAV4_FMT ") %p:" NIPQUAD_FMT " %p;%lx", + sk, NXAV4(nxa), nxi, NIPQUAD(saddr), sk->sk_socket, + (sk->sk_socket?sk->sk_socket->flags:0)); + + if (saddr) { /* direct address match */ + return v4_addr_match(nxa, saddr, -1); + } else if (nxi) { /* match against nx_info */ + return v4_nx_addr_in_nx_info(nxi, nxa, -1); + } else { /* unrestricted any socket */ + return 1; + } +} + + + +static inline +int nx_dev_visible(struct nx_info *nxi, struct net_device *dev) +{ + vxdprintk(VXD_CBIT(net, 1), + "nx_dev_visible(%p[#%u],%p " VS_Q("%s") ") %d", + nxi, nxi ? nxi->nx_id : 0, dev, dev->name, + nxi ? dev_in_nx_info(dev, nxi) : 0); + + if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0)) + return 1; + if (dev_in_nx_info(dev, nxi)) + return 1; + return 0; +} + + +static inline +int v4_ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi) +{ + if (!nxi) + return 1; + if (!ifa) + return 0; + return v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW); +} + +static inline +int nx_v4_ifa_visible(struct nx_info *nxi, struct in_ifaddr *ifa) +{ + vxdprintk(VXD_CBIT(net, 1), "nx_v4_ifa_visible(%p[#%u],%p) %d", + nxi, nxi ? nxi->nx_id : 0, ifa, + nxi ? v4_ifa_in_nx_info(ifa, nxi) : 0); + + if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0)) + return 1; + if (v4_ifa_in_nx_info(ifa, nxi)) + return 1; + return 0; +} + + +struct nx_v4_sock_addr { + __be32 saddr; /* Address used for validation */ + __be32 baddr; /* Address used for socket bind */ +}; + +static inline +int v4_map_sock_addr(struct inet_sock *inet, struct sockaddr_in *addr, + struct nx_v4_sock_addr *nsa) +{ + struct sock *sk = &inet->sk; + struct nx_info *nxi = sk->sk_nx_info; + __be32 saddr = addr->sin_addr.s_addr; + __be32 baddr = saddr; + + vxdprintk(VXD_CBIT(net, 3), + "inet_bind(%p)* %p,%p;%lx " NIPQUAD_FMT, + sk, sk->sk_nx_info, sk->sk_socket, + (sk->sk_socket ? sk->sk_socket->flags : 0), + NIPQUAD(saddr)); + + if (nxi) { + if (saddr == INADDR_ANY) { + if (nx_info_flags(nxi, NXF_SINGLE_IP, 0)) + baddr = nxi->v4.ip[0].s_addr; + } else if (saddr == IPI_LOOPBACK) { + if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) + baddr = nxi->v4_lback.s_addr; + } else if (!ipv4_is_multicast(saddr) || + !nx_info_ncaps(nxi, NXC_MULTICAST)) { + /* normal address bind */ + if (!v4_addr_in_nx_info(nxi, saddr, NXA_MASK_BIND)) + return -EADDRNOTAVAIL; + } + } + + vxdprintk(VXD_CBIT(net, 3), + "inet_bind(%p) " NIPQUAD_FMT ", " NIPQUAD_FMT, + sk, NIPQUAD(saddr), NIPQUAD(baddr)); + + nsa->saddr = saddr; + nsa->baddr = baddr; + return 0; +} + +static inline +void v4_set_sock_addr(struct inet_sock *inet, struct nx_v4_sock_addr *nsa) +{ + inet->inet_saddr = nsa->baddr; + inet->inet_rcv_saddr = nsa->baddr; +} + + +/* + * helper to simplify inet_lookup_listener + * + * nxi: the socket's nx_info if any + * addr: to be verified address + * saddr: socket address + */ +static inline int v4_inet_addr_match ( + struct nx_info *nxi, + __be32 addr, + __be32 saddr) +{ + if (addr && (saddr == addr)) + return 1; + if (!saddr) + return nxi ? v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND) : 1; + return 0; +} + +static inline __be32 nx_map_sock_lback(struct nx_info *nxi, __be32 addr) +{ + if (nx_info_flags(nxi, NXF_HIDE_LBACK, 0) && + (addr == nxi->v4_lback.s_addr)) + return IPI_LOOPBACK; + return addr; +} + +static inline +int nx_info_has_v4(struct nx_info *nxi) +{ + if (!nxi) + return 1; + if (NX_IPV4(nxi)) + return 1; + if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) + return 1; + return 0; +} + +#else /* CONFIG_INET */ + +static inline +int nx_dev_visible(struct nx_info *n, struct net_device *d) +{ + return 1; +} + +static inline +int nx_v4_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s) +{ + return 1; +} + +static inline +int v4_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n) +{ + return 1; +} + +static inline +int nx_info_has_v4(struct nx_info *nxi) +{ + return 0; +} + +#endif /* CONFIG_INET */ + +#define current_nx_info_has_v4() \ + nx_info_has_v4(current_nx_info()) + +#else +// #warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_inet6.h linux-3.10.19-vs2.3.6.8/include/linux/vs_inet6.h --- linux-3.10.19/include/linux/vs_inet6.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_inet6.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,257 @@ +#ifndef _VS_INET6_H +#define _VS_INET6_H + +#include "vserver/base.h" +#include "vserver/network.h" +#include "vserver/debug.h" + +#include + +#define NXAV6(a) &(a)->ip, &(a)->mask, (a)->prefix, (a)->type +#define NXAV6_FMT "[%pI6/%pI6/%d:%04x]" + + +#ifdef CONFIG_IPV6 + +static inline +int v6_addr_match(struct nx_addr_v6 *nxa, + const struct in6_addr *addr, uint16_t mask) +{ + int ret = 0; + + switch (nxa->type & mask) { + case NXA_TYPE_MASK: + ret = ipv6_masked_addr_cmp(&nxa->ip, &nxa->mask, addr); + break; + case NXA_TYPE_ADDR: + ret = ipv6_addr_equal(&nxa->ip, addr); + break; + case NXA_TYPE_ANY: + ret = 1; + break; + } + vxdprintk(VXD_CBIT(net, 0), + "v6_addr_match(%p" NXAV6_FMT ",%pI6,%04x) = %d", + nxa, NXAV6(nxa), addr, mask, ret); + return ret; +} + +static inline +int v6_addr_in_nx_info(struct nx_info *nxi, + const struct in6_addr *addr, uint16_t mask) +{ + struct nx_addr_v6 *nxa; + unsigned long irqflags; + int ret = 1; + + if (!nxi) + goto out; + + spin_lock_irqsave(&nxi->addr_lock, irqflags); + for (nxa = &nxi->v6; nxa; nxa = nxa->next) + if (v6_addr_match(nxa, addr, mask)) + goto out_unlock; + ret = 0; +out_unlock: + spin_unlock_irqrestore(&nxi->addr_lock, irqflags); +out: + vxdprintk(VXD_CBIT(net, 0), + "v6_addr_in_nx_info(%p[#%u],%pI6,%04x) = %d", + nxi, nxi ? nxi->nx_id : 0, addr, mask, ret); + return ret; +} + +static inline +int v6_nx_addr_match(struct nx_addr_v6 *nxa, struct nx_addr_v6 *addr, uint16_t mask) +{ + /* FIXME: needs full range checks */ + return v6_addr_match(nxa, &addr->ip, mask); +} + +static inline +int v6_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v6 *nxa, uint16_t mask) +{ + struct nx_addr_v6 *ptr; + unsigned long irqflags; + int ret = 1; + + spin_lock_irqsave(&nxi->addr_lock, irqflags); + for (ptr = &nxi->v6; ptr; ptr = ptr->next) + if (v6_nx_addr_match(ptr, nxa, mask)) + goto out_unlock; + ret = 0; +out_unlock: + spin_unlock_irqrestore(&nxi->addr_lock, irqflags); + return ret; +} + + +/* + * Check if a given address matches for a socket + * + * nxi: the socket's nx_info if any + * addr: to be verified address + */ +static inline +int v6_sock_addr_match ( + struct nx_info *nxi, + struct inet_sock *inet, + struct in6_addr *addr) +{ + struct sock *sk = &inet->sk; + struct in6_addr *saddr = inet6_rcv_saddr(sk); + + if (!ipv6_addr_any(addr) && + ipv6_addr_equal(saddr, addr)) + return 1; + if (ipv6_addr_any(saddr)) + return v6_addr_in_nx_info(nxi, addr, -1); + return 0; +} + +/* + * check if address is covered by socket + * + * sk: the socket to check against + * addr: the address in question (must be != 0) + */ + +static inline +int __v6_addr_match_socket(const struct sock *sk, struct nx_addr_v6 *nxa) +{ + struct nx_info *nxi = sk->sk_nx_info; + struct in6_addr *saddr = inet6_rcv_saddr(sk); + + vxdprintk(VXD_CBIT(net, 5), + "__v6_addr_in_socket(%p," NXAV6_FMT ") %p:%pI6 %p;%lx", + sk, NXAV6(nxa), nxi, saddr, sk->sk_socket, + (sk->sk_socket?sk->sk_socket->flags:0)); + + if (!ipv6_addr_any(saddr)) { /* direct address match */ + return v6_addr_match(nxa, saddr, -1); + } else if (nxi) { /* match against nx_info */ + return v6_nx_addr_in_nx_info(nxi, nxa, -1); + } else { /* unrestricted any socket */ + return 1; + } +} + + +/* inet related checks and helpers */ + + +struct in_ifaddr; +struct net_device; +struct sock; + + +#include +#include +#include + + +int dev_in_nx_info(struct net_device *, struct nx_info *); +int v6_dev_in_nx_info(struct net_device *, struct nx_info *); +int nx_v6_addr_conflict(struct nx_info *, struct nx_info *); + + + +static inline +int v6_ifa_in_nx_info(struct inet6_ifaddr *ifa, struct nx_info *nxi) +{ + if (!nxi) + return 1; + if (!ifa) + return 0; + return v6_addr_in_nx_info(nxi, &ifa->addr, -1); +} + +static inline +int nx_v6_ifa_visible(struct nx_info *nxi, struct inet6_ifaddr *ifa) +{ + vxdprintk(VXD_CBIT(net, 1), "nx_v6_ifa_visible(%p[#%u],%p) %d", + nxi, nxi ? nxi->nx_id : 0, ifa, + nxi ? v6_ifa_in_nx_info(ifa, nxi) : 0); + + if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0)) + return 1; + if (v6_ifa_in_nx_info(ifa, nxi)) + return 1; + return 0; +} + + +struct nx_v6_sock_addr { + struct in6_addr saddr; /* Address used for validation */ + struct in6_addr baddr; /* Address used for socket bind */ +}; + +static inline +int v6_map_sock_addr(struct inet_sock *inet, struct sockaddr_in6 *addr, + struct nx_v6_sock_addr *nsa) +{ + // struct sock *sk = &inet->sk; + // struct nx_info *nxi = sk->sk_nx_info; + struct in6_addr saddr = addr->sin6_addr; + struct in6_addr baddr = saddr; + + nsa->saddr = saddr; + nsa->baddr = baddr; + return 0; +} + +static inline +void v6_set_sock_addr(struct inet_sock *inet, struct nx_v6_sock_addr *nsa) +{ + // struct sock *sk = &inet->sk; + // struct in6_addr *saddr = inet6_rcv_saddr(sk); + + // *saddr = nsa->baddr; + // inet->inet_saddr = nsa->baddr; +} + +static inline +int nx_info_has_v6(struct nx_info *nxi) +{ + if (!nxi) + return 1; + if (NX_IPV6(nxi)) + return 1; + return 0; +} + +#else /* CONFIG_IPV6 */ + +static inline +int nx_v6_dev_visible(struct nx_info *n, struct net_device *d) +{ + return 1; +} + + +static inline +int nx_v6_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s) +{ + return 1; +} + +static inline +int v6_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n) +{ + return 1; +} + +static inline +int nx_info_has_v6(struct nx_info *nxi) +{ + return 0; +} + +#endif /* CONFIG_IPV6 */ + +#define current_nx_info_has_v6() \ + nx_info_has_v6(current_nx_info()) + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_limit.h linux-3.10.19-vs2.3.6.8/include/linux/vs_limit.h --- linux-3.10.19/include/linux/vs_limit.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_limit.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,140 @@ +#ifndef _VS_LIMIT_H +#define _VS_LIMIT_H + +#include "vserver/limit.h" +#include "vserver/base.h" +#include "vserver/context.h" +#include "vserver/debug.h" +#include "vserver/context.h" +#include "vserver/limit_int.h" + + +#define vx_acc_cres(v, d, p, r) \ + __vx_acc_cres(v, r, d, p, __FILE__, __LINE__) + +#define vx_acc_cres_cond(x, d, p, r) \ + __vx_acc_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \ + r, d, p, __FILE__, __LINE__) + + +#define vx_add_cres(v, a, p, r) \ + __vx_add_cres(v, r, a, p, __FILE__, __LINE__) +#define vx_sub_cres(v, a, p, r) vx_add_cres(v, -(a), p, r) + +#define vx_add_cres_cond(x, a, p, r) \ + __vx_add_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \ + r, a, p, __FILE__, __LINE__) +#define vx_sub_cres_cond(x, a, p, r) vx_add_cres_cond(x, -(a), p, r) + + +/* process and file limits */ + +#define vx_nproc_inc(p) \ + vx_acc_cres((p)->vx_info, 1, p, RLIMIT_NPROC) + +#define vx_nproc_dec(p) \ + vx_acc_cres((p)->vx_info,-1, p, RLIMIT_NPROC) + +#define vx_files_inc(f) \ + vx_acc_cres_cond((f)->f_xid, 1, f, RLIMIT_NOFILE) + +#define vx_files_dec(f) \ + vx_acc_cres_cond((f)->f_xid,-1, f, RLIMIT_NOFILE) + +#define vx_locks_inc(l) \ + vx_acc_cres_cond((l)->fl_xid, 1, l, RLIMIT_LOCKS) + +#define vx_locks_dec(l) \ + vx_acc_cres_cond((l)->fl_xid,-1, l, RLIMIT_LOCKS) + +#define vx_openfd_inc(f) \ + vx_acc_cres(current_vx_info(), 1, (void *)(long)(f), VLIMIT_OPENFD) + +#define vx_openfd_dec(f) \ + vx_acc_cres(current_vx_info(),-1, (void *)(long)(f), VLIMIT_OPENFD) + + +#define vx_cres_avail(v, n, r) \ + __vx_cres_avail(v, r, n, __FILE__, __LINE__) + + +#define vx_nproc_avail(n) \ + vx_cres_avail(current_vx_info(), n, RLIMIT_NPROC) + +#define vx_files_avail(n) \ + vx_cres_avail(current_vx_info(), n, RLIMIT_NOFILE) + +#define vx_locks_avail(n) \ + vx_cres_avail(current_vx_info(), n, RLIMIT_LOCKS) + +#define vx_openfd_avail(n) \ + vx_cres_avail(current_vx_info(), n, VLIMIT_OPENFD) + + +/* dentry limits */ + +#define vx_dentry_inc(d) do { \ + if ((d)->d_count == 1) \ + vx_acc_cres(current_vx_info(), 1, d, VLIMIT_DENTRY); \ + } while (0) + +#define vx_dentry_dec(d) do { \ + if ((d)->d_count == 0) \ + vx_acc_cres(current_vx_info(),-1, d, VLIMIT_DENTRY); \ + } while (0) + +#define vx_dentry_avail(n) \ + vx_cres_avail(current_vx_info(), n, VLIMIT_DENTRY) + + +/* socket limits */ + +#define vx_sock_inc(s) \ + vx_acc_cres((s)->sk_vx_info, 1, s, VLIMIT_NSOCK) + +#define vx_sock_dec(s) \ + vx_acc_cres((s)->sk_vx_info,-1, s, VLIMIT_NSOCK) + +#define vx_sock_avail(n) \ + vx_cres_avail(current_vx_info(), n, VLIMIT_NSOCK) + + +/* ipc resource limits */ + +#define vx_ipcmsg_add(v, u, a) \ + vx_add_cres(v, a, u, RLIMIT_MSGQUEUE) + +#define vx_ipcmsg_sub(v, u, a) \ + vx_sub_cres(v, a, u, RLIMIT_MSGQUEUE) + +#define vx_ipcmsg_avail(v, a) \ + vx_cres_avail(v, a, RLIMIT_MSGQUEUE) + + +#define vx_ipcshm_add(v, k, a) \ + vx_add_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM) + +#define vx_ipcshm_sub(v, k, a) \ + vx_sub_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM) + +#define vx_ipcshm_avail(v, a) \ + vx_cres_avail(v, a, VLIMIT_SHMEM) + + +#define vx_semary_inc(a) \ + vx_acc_cres(current_vx_info(), 1, a, VLIMIT_SEMARY) + +#define vx_semary_dec(a) \ + vx_acc_cres(current_vx_info(), -1, a, VLIMIT_SEMARY) + + +#define vx_nsems_add(a,n) \ + vx_add_cres(current_vx_info(), n, a, VLIMIT_NSEMS) + +#define vx_nsems_sub(a,n) \ + vx_sub_cres(current_vx_info(), n, a, VLIMIT_NSEMS) + + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_network.h linux-3.10.19-vs2.3.6.8/include/linux/vs_network.h --- linux-3.10.19/include/linux/vs_network.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_network.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,169 @@ +#ifndef _NX_VS_NETWORK_H +#define _NX_VS_NETWORK_H + +#include "vserver/context.h" +#include "vserver/network.h" +#include "vserver/base.h" +#include "vserver/check.h" +#include "vserver/debug.h" + +#include + + +#define get_nx_info(i) __get_nx_info(i, __FILE__, __LINE__) + +static inline struct nx_info *__get_nx_info(struct nx_info *nxi, + const char *_file, int _line) +{ + if (!nxi) + return NULL; + + vxlprintk(VXD_CBIT(nid, 2), "get_nx_info(%p[#%d.%d])", + nxi, nxi ? nxi->nx_id : 0, + nxi ? atomic_read(&nxi->nx_usecnt) : 0, + _file, _line); + + atomic_inc(&nxi->nx_usecnt); + return nxi; +} + + +extern void free_nx_info(struct nx_info *); + +#define put_nx_info(i) __put_nx_info(i, __FILE__, __LINE__) + +static inline void __put_nx_info(struct nx_info *nxi, const char *_file, int _line) +{ + if (!nxi) + return; + + vxlprintk(VXD_CBIT(nid, 2), "put_nx_info(%p[#%d.%d])", + nxi, nxi ? nxi->nx_id : 0, + nxi ? atomic_read(&nxi->nx_usecnt) : 0, + _file, _line); + + if (atomic_dec_and_test(&nxi->nx_usecnt)) + free_nx_info(nxi); +} + + +#define init_nx_info(p, i) __init_nx_info(p, i, __FILE__, __LINE__) + +static inline void __init_nx_info(struct nx_info **nxp, struct nx_info *nxi, + const char *_file, int _line) +{ + if (nxi) { + vxlprintk(VXD_CBIT(nid, 3), + "init_nx_info(%p[#%d.%d])", + nxi, nxi ? nxi->nx_id : 0, + nxi ? atomic_read(&nxi->nx_usecnt) : 0, + _file, _line); + + atomic_inc(&nxi->nx_usecnt); + } + *nxp = nxi; +} + + +#define set_nx_info(p, i) __set_nx_info(p, i, __FILE__, __LINE__) + +static inline void __set_nx_info(struct nx_info **nxp, struct nx_info *nxi, + const char *_file, int _line) +{ + struct nx_info *nxo; + + if (!nxi) + return; + + vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d])", + nxi, nxi ? nxi->nx_id : 0, + nxi ? atomic_read(&nxi->nx_usecnt) : 0, + _file, _line); + + atomic_inc(&nxi->nx_usecnt); + nxo = xchg(nxp, nxi); + BUG_ON(nxo); +} + +#define clr_nx_info(p) __clr_nx_info(p, __FILE__, __LINE__) + +static inline void __clr_nx_info(struct nx_info **nxp, + const char *_file, int _line) +{ + struct nx_info *nxo; + + nxo = xchg(nxp, NULL); + if (!nxo) + return; + + vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d])", + nxo, nxo ? nxo->nx_id : 0, + nxo ? atomic_read(&nxo->nx_usecnt) : 0, + _file, _line); + + if (atomic_dec_and_test(&nxo->nx_usecnt)) + free_nx_info(nxo); +} + + +#define claim_nx_info(v, p) __claim_nx_info(v, p, __FILE__, __LINE__) + +static inline void __claim_nx_info(struct nx_info *nxi, + struct task_struct *task, const char *_file, int _line) +{ + vxlprintk(VXD_CBIT(nid, 3), "claim_nx_info(%p[#%d.%d.%d]) %p", + nxi, nxi ? nxi->nx_id : 0, + nxi?atomic_read(&nxi->nx_usecnt):0, + nxi?atomic_read(&nxi->nx_tasks):0, + task, _file, _line); + + atomic_inc(&nxi->nx_tasks); +} + + +extern void unhash_nx_info(struct nx_info *); + +#define release_nx_info(v, p) __release_nx_info(v, p, __FILE__, __LINE__) + +static inline void __release_nx_info(struct nx_info *nxi, + struct task_struct *task, const char *_file, int _line) +{ + vxlprintk(VXD_CBIT(nid, 3), "release_nx_info(%p[#%d.%d.%d]) %p", + nxi, nxi ? nxi->nx_id : 0, + nxi ? atomic_read(&nxi->nx_usecnt) : 0, + nxi ? atomic_read(&nxi->nx_tasks) : 0, + task, _file, _line); + + might_sleep(); + + if (atomic_dec_and_test(&nxi->nx_tasks)) + unhash_nx_info(nxi); +} + + +#define task_get_nx_info(i) __task_get_nx_info(i, __FILE__, __LINE__) + +static __inline__ struct nx_info *__task_get_nx_info(struct task_struct *p, + const char *_file, int _line) +{ + struct nx_info *nxi; + + task_lock(p); + vxlprintk(VXD_CBIT(nid, 5), "task_get_nx_info(%p)", + p, _file, _line); + nxi = __get_nx_info(p->nx_info, _file, _line); + task_unlock(p); + return nxi; +} + + +static inline void exit_nx_info(struct task_struct *p) +{ + if (p->nx_info) + release_nx_info(p->nx_info, p); +} + + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_pid.h linux-3.10.19-vs2.3.6.8/include/linux/vs_pid.h --- linux-3.10.19/include/linux/vs_pid.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_pid.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,50 @@ +#ifndef _VS_PID_H +#define _VS_PID_H + +#include "vserver/base.h" +#include "vserver/check.h" +#include "vserver/context.h" +#include "vserver/debug.h" +#include "vserver/pid.h" +#include + + +#define VXF_FAKE_INIT (VXF_INFO_INIT | VXF_STATE_INIT) + +static inline +int vx_proc_task_visible(struct task_struct *task) +{ + if ((task->pid == 1) && + !vx_flags(VXF_FAKE_INIT, VXF_FAKE_INIT)) + /* show a blend through init */ + goto visible; + if (vx_check(vx_task_xid(task), VS_WATCH | VS_IDENT)) + goto visible; + return 0; +visible: + return 1; +} + +#define find_task_by_real_pid(pid) find_task_by_pid_ns(pid, &init_pid_ns) + + +static inline +struct task_struct *vx_get_proc_task(struct inode *inode, struct pid *pid) +{ + struct task_struct *task = get_pid_task(pid, PIDTYPE_PID); + + if (task && !vx_proc_task_visible(task)) { + vxdprintk(VXD_CBIT(misc, 6), + "dropping task (get) %p[#%u,%u] for %p[#%u,%u]", + task, task->xid, task->pid, + current, current->xid, current->pid); + put_task_struct(task); + task = NULL; + } + return task; +} + + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_sched.h linux-3.10.19-vs2.3.6.8/include/linux/vs_sched.h --- linux-3.10.19/include/linux/vs_sched.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_sched.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,40 @@ +#ifndef _VS_SCHED_H +#define _VS_SCHED_H + +#include "vserver/base.h" +#include "vserver/context.h" +#include "vserver/sched.h" + + +#define MAX_PRIO_BIAS 20 +#define MIN_PRIO_BIAS -20 + +static inline +int vx_adjust_prio(struct task_struct *p, int prio, int max_user) +{ + struct vx_info *vxi = p->vx_info; + + if (vxi) + prio += vx_cpu(vxi, sched_pc).prio_bias; + return prio; +} + +static inline void vx_account_user(struct vx_info *vxi, + cputime_t cputime, int nice) +{ + if (!vxi) + return; + vx_cpu(vxi, sched_pc).user_ticks += cputime; +} + +static inline void vx_account_system(struct vx_info *vxi, + cputime_t cputime, int idle) +{ + if (!vxi) + return; + vx_cpu(vxi, sched_pc).sys_ticks += cputime; +} + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_socket.h linux-3.10.19-vs2.3.6.8/include/linux/vs_socket.h --- linux-3.10.19/include/linux/vs_socket.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_socket.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,67 @@ +#ifndef _VS_SOCKET_H +#define _VS_SOCKET_H + +#include "vserver/debug.h" +#include "vserver/base.h" +#include "vserver/cacct.h" +#include "vserver/context.h" +#include "vserver/tag.h" + + +/* socket accounting */ + +#include + +static inline int vx_sock_type(int family) +{ + switch (family) { + case PF_UNSPEC: + return VXA_SOCK_UNSPEC; + case PF_UNIX: + return VXA_SOCK_UNIX; + case PF_INET: + return VXA_SOCK_INET; + case PF_INET6: + return VXA_SOCK_INET6; + case PF_PACKET: + return VXA_SOCK_PACKET; + default: + return VXA_SOCK_OTHER; + } +} + +#define vx_acc_sock(v, f, p, s) \ + __vx_acc_sock(v, f, p, s, __FILE__, __LINE__) + +static inline void __vx_acc_sock(struct vx_info *vxi, + int family, int pos, int size, char *file, int line) +{ + if (vxi) { + int type = vx_sock_type(family); + + atomic_long_inc(&vxi->cacct.sock[type][pos].count); + atomic_long_add(size, &vxi->cacct.sock[type][pos].total); + } +} + +#define vx_sock_recv(sk, s) \ + vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 0, s) +#define vx_sock_send(sk, s) \ + vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 1, s) +#define vx_sock_fail(sk, s) \ + vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 2, s) + + +#define sock_vx_init(s) do { \ + (s)->sk_xid = 0; \ + (s)->sk_vx_info = NULL; \ + } while (0) + +#define sock_nx_init(s) do { \ + (s)->sk_nid = 0; \ + (s)->sk_nx_info = NULL; \ + } while (0) + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_tag.h linux-3.10.19-vs2.3.6.8/include/linux/vs_tag.h --- linux-3.10.19/include/linux/vs_tag.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_tag.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,47 @@ +#ifndef _VS_TAG_H +#define _VS_TAG_H + +#include + +/* check conditions */ + +#define DX_ADMIN 0x0001 +#define DX_WATCH 0x0002 +#define DX_HOSTID 0x0008 + +#define DX_IDENT 0x0010 + +#define DX_ARG_MASK 0x0010 + + +#define dx_task_tag(t) ((t)->tag) + +#define dx_current_tag() dx_task_tag(current) + +#define dx_check(c, m) __dx_check(dx_current_tag(), c, m) + +#define dx_weak_check(c, m) ((m) ? dx_check(c, m) : 1) + + +/* + * check current context for ADMIN/WATCH and + * optionally against supplied argument + */ +static inline int __dx_check(vtag_t cid, vtag_t id, unsigned int mode) +{ + if (mode & DX_ARG_MASK) { + if ((mode & DX_IDENT) && (id == cid)) + return 1; + } + return (((mode & DX_ADMIN) && (cid == 0)) || + ((mode & DX_WATCH) && (cid == 1)) || + ((mode & DX_HOSTID) && (id == 0))); +} + +struct inode; +int dx_permission(const struct inode *inode, int mask); + + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vs_time.h linux-3.10.19-vs2.3.6.8/include/linux/vs_time.h --- linux-3.10.19/include/linux/vs_time.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vs_time.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,19 @@ +#ifndef _VS_TIME_H +#define _VS_TIME_H + + +/* time faking stuff */ + +#ifdef CONFIG_VSERVER_VTIME + +extern void vx_adjust_timespec(struct timespec *ts); +extern int vx_settimeofday(const struct timespec *ts); + +#else +#define vx_adjust_timespec(t) do { } while (0) +#define vx_settimeofday(t) do_settimeofday(t) +#endif + +#else +#warning duplicate inclusion +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vserver/base.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/base.h --- linux-3.10.19/include/linux/vserver/base.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/base.h 2013-10-27 03:40:46.000000000 +0000 @@ -0,0 +1,184 @@ +#ifndef _VSERVER_BASE_H +#define _VSERVER_BASE_H + + +/* context state changes */ + +enum { + VSC_STARTUP = 1, + VSC_SHUTDOWN, + + VSC_NETUP, + VSC_NETDOWN, +}; + + + +#define vx_task_xid(t) ((t)->xid) + +#define vx_current_xid() vx_task_xid(current) + +#define current_vx_info() (current->vx_info) + + +#define nx_task_nid(t) ((t)->nid) + +#define nx_current_nid() nx_task_nid(current) + +#define current_nx_info() (current->nx_info) + + +/* generic flag merging */ + +#define vs_check_flags(v, m, f) (((v) & (m)) ^ (f)) + +#define vs_mask_flags(v, f, m) (((v) & ~(m)) | ((f) & (m))) + +#define vs_mask_mask(v, f, m) (((v) & ~(m)) | ((v) & (f) & (m))) + +#define vs_check_bit(v, n) ((v) & (1LL << (n))) + + +/* context flags */ + +#define __vx_flags(v) ((v) ? (v)->vx_flags : 0) + +#define vx_current_flags() __vx_flags(current_vx_info()) + +#define vx_info_flags(v, m, f) \ + vs_check_flags(__vx_flags(v), m, f) + +#define task_vx_flags(t, m, f) \ + ((t) && vx_info_flags((t)->vx_info, m, f)) + +#define vx_flags(m, f) vx_info_flags(current_vx_info(), m, f) + + +/* context caps */ + +#define __vx_ccaps(v) ((v) ? (v)->vx_ccaps : 0) + +#define vx_current_ccaps() __vx_ccaps(current_vx_info()) + +#define vx_info_ccaps(v, c) (__vx_ccaps(v) & (c)) + +#define vx_ccaps(c) vx_info_ccaps(current_vx_info(), (c)) + + + +/* network flags */ + +#define __nx_flags(n) ((n) ? (n)->nx_flags : 0) + +#define nx_current_flags() __nx_flags(current_nx_info()) + +#define nx_info_flags(n, m, f) \ + vs_check_flags(__nx_flags(n), m, f) + +#define task_nx_flags(t, m, f) \ + ((t) && nx_info_flags((t)->nx_info, m, f)) + +#define nx_flags(m, f) nx_info_flags(current_nx_info(), m, f) + + +/* network caps */ + +#define __nx_ncaps(n) ((n) ? (n)->nx_ncaps : 0) + +#define nx_current_ncaps() __nx_ncaps(current_nx_info()) + +#define nx_info_ncaps(n, c) (__nx_ncaps(n) & (c)) + +#define nx_ncaps(c) nx_info_ncaps(current_nx_info(), c) + + +/* context mask capabilities */ + +#define __vx_mcaps(v) ((v) ? (v)->vx_ccaps >> 32UL : ~0 ) + +#define vx_info_mcaps(v, c) (__vx_mcaps(v) & (c)) + +#define vx_mcaps(c) vx_info_mcaps(current_vx_info(), c) + + +/* context bcap mask */ + +#define __vx_bcaps(v) ((v)->vx_bcaps) + +#define vx_current_bcaps() __vx_bcaps(current_vx_info()) + + +/* mask given bcaps */ + +#define vx_info_mbcaps(v, c) ((v) ? cap_intersect(__vx_bcaps(v), c) : c) + +#define vx_mbcaps(c) vx_info_mbcaps(current_vx_info(), c) + + +/* masked cap_bset */ + +#define vx_info_cap_bset(v) vx_info_mbcaps(v, current->cap_bset) + +#define vx_current_cap_bset() vx_info_cap_bset(current_vx_info()) + +#if 0 +#define vx_info_mbcap(v, b) \ + (!vx_info_flags(v, VXF_STATE_SETUP, 0) ? \ + vx_info_bcaps(v, b) : (b)) + +#define task_vx_mbcap(t, b) \ + vx_info_mbcap((t)->vx_info, (t)->b) + +#define vx_mbcap(b) task_vx_mbcap(current, b) +#endif + +#define vx_cap_raised(v, c, f) cap_raised(vx_info_mbcaps(v, c), f) + +#define vx_capable(b, c) (capable(b) || \ + (cap_raised(current_cap(), b) && vx_ccaps(c))) + +#define vx_ns_capable(n, b, c) (ns_capable(n, b) || \ + (cap_raised(current_cap(), b) && vx_ccaps(c))) + +#define nx_capable(b, c) (capable(b) || \ + (cap_raised(current_cap(), b) && nx_ncaps(c))) + +#define nx_ns_capable(n, b, c) (ns_capable(n, b) || \ + (cap_raised(current_cap(), b) && nx_ncaps(c))) + +#define vx_task_initpid(t, n) \ + ((t)->vx_info && \ + ((t)->vx_info->vx_initpid == (n))) + +#define vx_current_initpid(n) vx_task_initpid(current, n) + + +/* context unshare mask */ + +#define __vx_umask(v) ((v)->vx_umask) + +#define vx_current_umask() __vx_umask(current_vx_info()) + +#define vx_can_unshare(b, f) (capable(b) || \ + (cap_raised(current_cap(), b) && \ + !((f) & ~vx_current_umask()))) + +#define vx_ns_can_unshare(n, b, f) (ns_capable(n, b) || \ + (cap_raised(current_cap(), b) && \ + !((f) & ~vx_current_umask()))) + +#define __vx_wmask(v) ((v)->vx_wmask) + +#define vx_current_wmask() __vx_wmask(current_vx_info()) + + +#define __vx_state(v) ((v) ? ((v)->vx_state) : 0) + +#define vx_info_state(v, m) (__vx_state(v) & (m)) + + +#define __nx_state(n) ((n) ? ((n)->nx_state) : 0) + +#define nx_info_state(n, m) (__nx_state(n) & (m)) + +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vserver/cacct.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/cacct.h --- linux-3.10.19/include/linux/vserver/cacct.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/cacct.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,15 @@ +#ifndef _VSERVER_CACCT_H +#define _VSERVER_CACCT_H + + +enum sock_acc_field { + VXA_SOCK_UNSPEC = 0, + VXA_SOCK_UNIX, + VXA_SOCK_INET, + VXA_SOCK_INET6, + VXA_SOCK_PACKET, + VXA_SOCK_OTHER, + VXA_SOCK_SIZE /* array size */ +}; + +#endif /* _VSERVER_CACCT_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/cacct_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/cacct_cmd.h --- linux-3.10.19/include/linux/vserver/cacct_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/cacct_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,10 @@ +#ifndef _VSERVER_CACCT_CMD_H +#define _VSERVER_CACCT_CMD_H + + +#include +#include + +extern int vc_sock_stat(struct vx_info *, void __user *); + +#endif /* _VSERVER_CACCT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/cacct_def.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/cacct_def.h --- linux-3.10.19/include/linux/vserver/cacct_def.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/cacct_def.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,43 @@ +#ifndef _VSERVER_CACCT_DEF_H +#define _VSERVER_CACCT_DEF_H + +#include +#include + + +struct _vx_sock_acc { + atomic_long_t count; + atomic_long_t total; +}; + +/* context sub struct */ + +struct _vx_cacct { + struct _vx_sock_acc sock[VXA_SOCK_SIZE][3]; + atomic_t slab[8]; + atomic_t page[6][8]; +}; + +#ifdef CONFIG_VSERVER_DEBUG + +static inline void __dump_vx_cacct(struct _vx_cacct *cacct) +{ + int i, j; + + printk("\t_vx_cacct:"); + for (i = 0; i < 6; i++) { + struct _vx_sock_acc *ptr = cacct->sock[i]; + + printk("\t [%d] =", i); + for (j = 0; j < 3; j++) { + printk(" [%d] = %8lu, %8lu", j, + atomic_long_read(&ptr[j].count), + atomic_long_read(&ptr[j].total)); + } + printk("\n"); + } +} + +#endif + +#endif /* _VSERVER_CACCT_DEF_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/cacct_int.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/cacct_int.h --- linux-3.10.19/include/linux/vserver/cacct_int.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/cacct_int.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,17 @@ +#ifndef _VSERVER_CACCT_INT_H +#define _VSERVER_CACCT_INT_H + +static inline +unsigned long vx_sock_count(struct _vx_cacct *cacct, int type, int pos) +{ + return atomic_long_read(&cacct->sock[type][pos].count); +} + + +static inline +unsigned long vx_sock_total(struct _vx_cacct *cacct, int type, int pos) +{ + return atomic_long_read(&cacct->sock[type][pos].total); +} + +#endif /* _VSERVER_CACCT_INT_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/check.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/check.h --- linux-3.10.19/include/linux/vserver/check.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/check.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,89 @@ +#ifndef _VSERVER_CHECK_H +#define _VSERVER_CHECK_H + + +#define MAX_S_CONTEXT 65535 /* Arbitrary limit */ + +#ifdef CONFIG_VSERVER_DYNAMIC_IDS +#define MIN_D_CONTEXT 49152 /* dynamic contexts start here */ +#else +#define MIN_D_CONTEXT 65536 +#endif + +/* check conditions */ + +#define VS_ADMIN 0x0001 +#define VS_WATCH 0x0002 +#define VS_HIDE 0x0004 +#define VS_HOSTID 0x0008 + +#define VS_IDENT 0x0010 +#define VS_EQUIV 0x0020 +#define VS_PARENT 0x0040 +#define VS_CHILD 0x0080 + +#define VS_ARG_MASK 0x00F0 + +#define VS_DYNAMIC 0x0100 +#define VS_STATIC 0x0200 + +#define VS_ATR_MASK 0x0F00 + +#ifdef CONFIG_VSERVER_PRIVACY +#define VS_ADMIN_P (0) +#define VS_WATCH_P (0) +#else +#define VS_ADMIN_P VS_ADMIN +#define VS_WATCH_P VS_WATCH +#endif + +#define VS_HARDIRQ 0x1000 +#define VS_SOFTIRQ 0x2000 +#define VS_IRQ 0x4000 + +#define VS_IRQ_MASK 0xF000 + +#include + +/* + * check current context for ADMIN/WATCH and + * optionally against supplied argument + */ +static inline int __vs_check(int cid, int id, unsigned int mode) +{ + if (mode & VS_ARG_MASK) { + if ((mode & VS_IDENT) && (id == cid)) + return 1; + } + if (mode & VS_ATR_MASK) { + if ((mode & VS_DYNAMIC) && + (id >= MIN_D_CONTEXT) && + (id <= MAX_S_CONTEXT)) + return 1; + if ((mode & VS_STATIC) && + (id > 1) && (id < MIN_D_CONTEXT)) + return 1; + } + if (mode & VS_IRQ_MASK) { + if ((mode & VS_IRQ) && unlikely(in_interrupt())) + return 1; + if ((mode & VS_HARDIRQ) && unlikely(in_irq())) + return 1; + if ((mode & VS_SOFTIRQ) && unlikely(in_softirq())) + return 1; + } + return (((mode & VS_ADMIN) && (cid == 0)) || + ((mode & VS_WATCH) && (cid == 1)) || + ((mode & VS_HOSTID) && (id == 0))); +} + +#define vx_check(c, m) __vs_check(vx_current_xid(), c, (m) | VS_IRQ) + +#define vx_weak_check(c, m) ((m) ? vx_check(c, m) : 1) + + +#define nx_check(c, m) __vs_check(nx_current_nid(), c, m) + +#define nx_weak_check(c, m) ((m) ? nx_check(c, m) : 1) + +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vserver/context.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/context.h --- linux-3.10.19/include/linux/vserver/context.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/context.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,110 @@ +#ifndef _VSERVER_CONTEXT_H +#define _VSERVER_CONTEXT_H + + +#include +#include +#include +#include + +#include "limit_def.h" +#include "sched_def.h" +#include "cvirt_def.h" +#include "cacct_def.h" +#include "device_def.h" + +#define VX_SPACES 2 + +struct _vx_info_pc { + struct _vx_sched_pc sched_pc; + struct _vx_cvirt_pc cvirt_pc; +}; + +struct _vx_space { + unsigned long vx_nsmask; /* assignment mask */ + struct nsproxy *vx_nsproxy; /* private namespaces */ + struct fs_struct *vx_fs; /* private namespace fs */ + const struct cred *vx_cred; /* task credentials */ +}; + +struct vx_info { + struct hlist_node vx_hlist; /* linked list of contexts */ + vxid_t vx_id; /* context id */ + atomic_t vx_usecnt; /* usage count */ + atomic_t vx_tasks; /* tasks count */ + struct vx_info *vx_parent; /* parent context */ + int vx_state; /* context state */ + + struct _vx_space space[VX_SPACES]; /* namespace store */ + + uint64_t vx_flags; /* context flags */ + uint64_t vx_ccaps; /* context caps (vserver) */ + uint64_t vx_umask; /* unshare mask (guest) */ + uint64_t vx_wmask; /* warn mask (guest) */ + kernel_cap_t vx_bcaps; /* bounding caps (system) */ + + struct task_struct *vx_reaper; /* guest reaper process */ + pid_t vx_initpid; /* PID of guest init */ + int64_t vx_badness_bias; /* OOM points bias */ + + struct _vx_limit limit; /* vserver limits */ + struct _vx_sched sched; /* vserver scheduler */ + struct _vx_cvirt cvirt; /* virtual/bias stuff */ + struct _vx_cacct cacct; /* context accounting */ + + struct _vx_device dmap; /* default device map targets */ + +#ifndef CONFIG_SMP + struct _vx_info_pc info_pc; /* per cpu data */ +#else + struct _vx_info_pc *ptr_pc; /* per cpu array */ +#endif + + wait_queue_head_t vx_wait; /* context exit waitqueue */ + int reboot_cmd; /* last sys_reboot() cmd */ + int exit_code; /* last process exit code */ + + char vx_name[65]; /* vserver name */ +}; + +#ifndef CONFIG_SMP +#define vx_ptr_pc(vxi) (&(vxi)->info_pc) +#define vx_per_cpu(vxi, v, id) vx_ptr_pc(vxi)->v +#else +#define vx_ptr_pc(vxi) ((vxi)->ptr_pc) +#define vx_per_cpu(vxi, v, id) per_cpu_ptr(vx_ptr_pc(vxi), id)->v +#endif + +#define vx_cpu(vxi, v) vx_per_cpu(vxi, v, smp_processor_id()) + + +struct vx_info_save { + struct vx_info *vxi; + vxid_t xid; +}; + + +/* status flags */ + +#define VXS_HASHED 0x0001 +#define VXS_PAUSED 0x0010 +#define VXS_SHUTDOWN 0x0100 +#define VXS_HELPER 0x1000 +#define VXS_RELEASED 0x8000 + + +extern void claim_vx_info(struct vx_info *, struct task_struct *); +extern void release_vx_info(struct vx_info *, struct task_struct *); + +extern struct vx_info *lookup_vx_info(int); +extern struct vx_info *lookup_or_create_vx_info(int); + +extern int get_xid_list(int, unsigned int *, int); +extern int xid_is_hashed(vxid_t); + +extern int vx_migrate_task(struct task_struct *, struct vx_info *, int); + +extern long vs_state_change(struct vx_info *, unsigned int); + + +#endif /* _VSERVER_CONTEXT_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/context_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/context_cmd.h --- linux-3.10.19/include/linux/vserver/context_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/context_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,33 @@ +#ifndef _VSERVER_CONTEXT_CMD_H +#define _VSERVER_CONTEXT_CMD_H + +#include + +extern int vc_task_xid(uint32_t); + +extern int vc_vx_info(struct vx_info *, void __user *); + +extern int vc_ctx_stat(struct vx_info *, void __user *); + +extern int vc_ctx_create(uint32_t, void __user *); +extern int vc_ctx_migrate(struct vx_info *, void __user *); + +extern int vc_get_cflags(struct vx_info *, void __user *); +extern int vc_set_cflags(struct vx_info *, void __user *); + +extern int vc_get_ccaps(struct vx_info *, void __user *); +extern int vc_set_ccaps(struct vx_info *, void __user *); + +extern int vc_get_bcaps(struct vx_info *, void __user *); +extern int vc_set_bcaps(struct vx_info *, void __user *); + +extern int vc_get_umask(struct vx_info *, void __user *); +extern int vc_set_umask(struct vx_info *, void __user *); + +extern int vc_get_wmask(struct vx_info *, void __user *); +extern int vc_set_wmask(struct vx_info *, void __user *); + +extern int vc_get_badness(struct vx_info *, void __user *); +extern int vc_set_badness(struct vx_info *, void __user *); + +#endif /* _VSERVER_CONTEXT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/cvirt.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/cvirt.h --- linux-3.10.19/include/linux/vserver/cvirt.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/cvirt.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,18 @@ +#ifndef _VSERVER_CVIRT_H +#define _VSERVER_CVIRT_H + +struct timespec; + +void vx_vsi_boottime(struct timespec *); + +void vx_vsi_uptime(struct timespec *, struct timespec *); + + +struct vx_info; + +void vx_update_load(struct vx_info *); + + +int vx_do_syslog(int, char __user *, int); + +#endif /* _VSERVER_CVIRT_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/cvirt_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/cvirt_cmd.h --- linux-3.10.19/include/linux/vserver/cvirt_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/cvirt_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,13 @@ +#ifndef _VSERVER_CVIRT_CMD_H +#define _VSERVER_CVIRT_CMD_H + + +#include +#include + +extern int vc_set_vhi_name(struct vx_info *, void __user *); +extern int vc_get_vhi_name(struct vx_info *, void __user *); + +extern int vc_virt_stat(struct vx_info *, void __user *); + +#endif /* _VSERVER_CVIRT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/cvirt_def.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/cvirt_def.h --- linux-3.10.19/include/linux/vserver/cvirt_def.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/cvirt_def.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,80 @@ +#ifndef _VSERVER_CVIRT_DEF_H +#define _VSERVER_CVIRT_DEF_H + +#include +#include +#include +#include +#include + + +struct _vx_usage_stat { + uint64_t user; + uint64_t nice; + uint64_t system; + uint64_t softirq; + uint64_t irq; + uint64_t idle; + uint64_t iowait; +}; + +struct _vx_syslog { + wait_queue_head_t log_wait; + spinlock_t logbuf_lock; /* lock for the log buffer */ + + unsigned long log_start; /* next char to be read by syslog() */ + unsigned long con_start; /* next char to be sent to consoles */ + unsigned long log_end; /* most-recently-written-char + 1 */ + unsigned long logged_chars; /* #chars since last read+clear operation */ + + char log_buf[1024]; +}; + + +/* context sub struct */ + +struct _vx_cvirt { + atomic_t nr_threads; /* number of current threads */ + atomic_t nr_running; /* number of running threads */ + atomic_t nr_uninterruptible; /* number of uninterruptible threads */ + + atomic_t nr_onhold; /* processes on hold */ + uint32_t onhold_last; /* jiffies when put on hold */ + + struct timespec bias_ts; /* time offset to the host */ + struct timespec bias_idle; + struct timespec bias_uptime; /* context creation point */ + uint64_t bias_clock; /* offset in clock_t */ + + spinlock_t load_lock; /* lock for the load averages */ + atomic_t load_updates; /* nr of load updates done so far */ + uint32_t load_last; /* last time load was calculated */ + uint32_t load[3]; /* load averages 1,5,15 */ + + atomic_t total_forks; /* number of forks so far */ + + struct _vx_syslog syslog; +}; + +struct _vx_cvirt_pc { + struct _vx_usage_stat cpustat; +}; + + +#ifdef CONFIG_VSERVER_DEBUG + +static inline void __dump_vx_cvirt(struct _vx_cvirt *cvirt) +{ + printk("\t_vx_cvirt:\n"); + printk("\t threads: %4d, %4d, %4d, %4d\n", + atomic_read(&cvirt->nr_threads), + atomic_read(&cvirt->nr_running), + atomic_read(&cvirt->nr_uninterruptible), + atomic_read(&cvirt->nr_onhold)); + /* add rest here */ + printk("\t total_forks = %d\n", atomic_read(&cvirt->total_forks)); +} + +#endif + +#endif /* _VSERVER_CVIRT_DEF_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/debug.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/debug.h --- linux-3.10.19/include/linux/vserver/debug.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/debug.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,146 @@ +#ifndef _VSERVER_DEBUG_H +#define _VSERVER_DEBUG_H + + +#define VXD_CBIT(n, m) (vs_debug_ ## n & (1 << (m))) +#define VXD_CMIN(n, m) (vs_debug_ ## n > (m)) +#define VXD_MASK(n, m) (vs_debug_ ## n & (m)) + +#define VXD_DEV(d) (d), (d)->bd_inode->i_ino, \ + imajor((d)->bd_inode), iminor((d)->bd_inode) +#define VXF_DEV "%p[%lu,%d:%d]" + +#if defined(CONFIG_QUOTES_UTF8) +#define VS_Q_LQM "\xc2\xbb" +#define VS_Q_RQM "\xc2\xab" +#elif defined(CONFIG_QUOTES_ASCII) +#define VS_Q_LQM "\x27" +#define VS_Q_RQM "\x27" +#else +#define VS_Q_LQM "\xbb" +#define VS_Q_RQM "\xab" +#endif + +#define VS_Q(f) VS_Q_LQM f VS_Q_RQM + + +#define vxd_path(p) \ + ({ static char _buffer[PATH_MAX]; \ + d_path(p, _buffer, sizeof(_buffer)); }) + +#define vxd_cond_path(n) \ + ((n) ? vxd_path(&(n)->path) : "" ) + + +#ifdef CONFIG_VSERVER_DEBUG + +extern unsigned int vs_debug_switch; +extern unsigned int vs_debug_xid; +extern unsigned int vs_debug_nid; +extern unsigned int vs_debug_tag; +extern unsigned int vs_debug_net; +extern unsigned int vs_debug_limit; +extern unsigned int vs_debug_cres; +extern unsigned int vs_debug_dlim; +extern unsigned int vs_debug_quota; +extern unsigned int vs_debug_cvirt; +extern unsigned int vs_debug_space; +extern unsigned int vs_debug_perm; +extern unsigned int vs_debug_misc; + + +#define VX_LOGLEVEL "vxD: " +#define VX_PROC_FMT "%p: " +#define VX_PROCESS current + +#define vxdprintk(c, f, x...) \ + do { \ + if (c) \ + printk(VX_LOGLEVEL VX_PROC_FMT f "\n", \ + VX_PROCESS , ##x); \ + } while (0) + +#define vxlprintk(c, f, x...) \ + do { \ + if (c) \ + printk(VX_LOGLEVEL f " @%s:%d\n", x); \ + } while (0) + +#define vxfprintk(c, f, x...) \ + do { \ + if (c) \ + printk(VX_LOGLEVEL f " %s@%s:%d\n", x); \ + } while (0) + + +struct vx_info; + +void dump_vx_info(struct vx_info *, int); +void dump_vx_info_inactive(int); + +#else /* CONFIG_VSERVER_DEBUG */ + +#define vs_debug_switch 0 +#define vs_debug_xid 0 +#define vs_debug_nid 0 +#define vs_debug_tag 0 +#define vs_debug_net 0 +#define vs_debug_limit 0 +#define vs_debug_cres 0 +#define vs_debug_dlim 0 +#define vs_debug_quota 0 +#define vs_debug_cvirt 0 +#define vs_debug_space 0 +#define vs_debug_perm 0 +#define vs_debug_misc 0 + +#define vxdprintk(x...) do { } while (0) +#define vxlprintk(x...) do { } while (0) +#define vxfprintk(x...) do { } while (0) + +#endif /* CONFIG_VSERVER_DEBUG */ + + +#ifdef CONFIG_VSERVER_WARN + +#define VX_WARNLEVEL KERN_WARNING "vxW: " +#define VX_WARN_TASK "[" VS_Q("%s") ",%u:#%u|%u|%u] " +#define VX_WARN_XID "[xid #%u] " +#define VX_WARN_NID "[nid #%u] " +#define VX_WARN_TAG "[tag #%u] " + +#define vxwprintk(c, f, x...) \ + do { \ + if (c) \ + printk(VX_WARNLEVEL f "\n", ##x); \ + } while (0) + +#else /* CONFIG_VSERVER_WARN */ + +#define vxwprintk(x...) do { } while (0) + +#endif /* CONFIG_VSERVER_WARN */ + +#define vxwprintk_task(c, f, x...) \ + vxwprintk(c, VX_WARN_TASK f, \ + current->comm, current->pid, \ + current->xid, current->nid, \ + current->tag, ##x) +#define vxwprintk_xid(c, f, x...) \ + vxwprintk(c, VX_WARN_XID f, current->xid, x) +#define vxwprintk_nid(c, f, x...) \ + vxwprintk(c, VX_WARN_NID f, current->nid, x) +#define vxwprintk_tag(c, f, x...) \ + vxwprintk(c, VX_WARN_TAG f, current->tag, x) + +#ifdef CONFIG_VSERVER_DEBUG +#define vxd_assert_lock(l) assert_spin_locked(l) +#define vxd_assert(c, f, x...) vxlprintk(!(c), \ + "assertion [" f "] failed.", ##x, __FILE__, __LINE__) +#else +#define vxd_assert_lock(l) do { } while (0) +#define vxd_assert(c, f, x...) do { } while (0) +#endif + + +#endif /* _VSERVER_DEBUG_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/debug_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/debug_cmd.h --- linux-3.10.19/include/linux/vserver/debug_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/debug_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,37 @@ +#ifndef _VSERVER_DEBUG_CMD_H +#define _VSERVER_DEBUG_CMD_H + +#include + + +#ifdef CONFIG_COMPAT + +#include + +struct vcmd_read_history_v0_x32 { + uint32_t index; + uint32_t count; + compat_uptr_t data_ptr; +}; + +struct vcmd_read_monitor_v0_x32 { + uint32_t index; + uint32_t count; + compat_uptr_t data_ptr; +}; + +#endif /* CONFIG_COMPAT */ + +extern int vc_dump_history(uint32_t); + +extern int vc_read_history(uint32_t, void __user *); +extern int vc_read_monitor(uint32_t, void __user *); + +#ifdef CONFIG_COMPAT + +extern int vc_read_history_x32(uint32_t, void __user *); +extern int vc_read_monitor_x32(uint32_t, void __user *); + +#endif /* CONFIG_COMPAT */ + +#endif /* _VSERVER_DEBUG_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/device.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/device.h --- linux-3.10.19/include/linux/vserver/device.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/device.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,9 @@ +#ifndef _VSERVER_DEVICE_H +#define _VSERVER_DEVICE_H + + +#include + +#else /* _VSERVER_DEVICE_H */ +#warning duplicate inclusion +#endif /* _VSERVER_DEVICE_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/device_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/device_cmd.h --- linux-3.10.19/include/linux/vserver/device_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/device_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,31 @@ +#ifndef _VSERVER_DEVICE_CMD_H +#define _VSERVER_DEVICE_CMD_H + +#include + + +#ifdef CONFIG_COMPAT + +#include + +struct vcmd_set_mapping_v0_x32 { + compat_uptr_t device_ptr; + compat_uptr_t target_ptr; + uint32_t flags; +}; + +#endif /* CONFIG_COMPAT */ + +#include + +extern int vc_set_mapping(struct vx_info *, void __user *); +extern int vc_unset_mapping(struct vx_info *, void __user *); + +#ifdef CONFIG_COMPAT + +extern int vc_set_mapping_x32(struct vx_info *, void __user *); +extern int vc_unset_mapping_x32(struct vx_info *, void __user *); + +#endif /* CONFIG_COMPAT */ + +#endif /* _VSERVER_DEVICE_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/device_def.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/device_def.h --- linux-3.10.19/include/linux/vserver/device_def.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/device_def.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,17 @@ +#ifndef _VSERVER_DEVICE_DEF_H +#define _VSERVER_DEVICE_DEF_H + +#include + +struct vx_dmap_target { + dev_t target; + uint32_t flags; +}; + +struct _vx_device { +#ifdef CONFIG_VSERVER_DEVICE + struct vx_dmap_target targets[2]; +#endif +}; + +#endif /* _VSERVER_DEVICE_DEF_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/dlimit.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/dlimit.h --- linux-3.10.19/include/linux/vserver/dlimit.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/dlimit.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,54 @@ +#ifndef _VSERVER_DLIMIT_H +#define _VSERVER_DLIMIT_H + +#include "switch.h" + + +#ifdef __KERNEL__ + +/* keep in sync with CDLIM_INFINITY */ + +#define DLIM_INFINITY (~0ULL) + +#include +#include + +struct super_block; + +struct dl_info { + struct hlist_node dl_hlist; /* linked list of contexts */ + struct rcu_head dl_rcu; /* the rcu head */ + vtag_t dl_tag; /* context tag */ + atomic_t dl_usecnt; /* usage count */ + atomic_t dl_refcnt; /* reference count */ + + struct super_block *dl_sb; /* associated superblock */ + + spinlock_t dl_lock; /* protect the values */ + + unsigned long long dl_space_used; /* used space in bytes */ + unsigned long long dl_space_total; /* maximum space in bytes */ + unsigned long dl_inodes_used; /* used inodes */ + unsigned long dl_inodes_total; /* maximum inodes */ + + unsigned int dl_nrlmult; /* non root limit mult */ +}; + +struct rcu_head; + +extern void rcu_free_dl_info(struct rcu_head *); +extern void unhash_dl_info(struct dl_info *); + +extern struct dl_info *locate_dl_info(struct super_block *, vtag_t); + + +struct kstatfs; + +extern void vx_vsi_statfs(struct super_block *, struct kstatfs *); + +typedef uint64_t dlsize_t; + +#endif /* __KERNEL__ */ +#else /* _VSERVER_DLIMIT_H */ +#warning duplicate inclusion +#endif /* _VSERVER_DLIMIT_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/dlimit_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/dlimit_cmd.h --- linux-3.10.19/include/linux/vserver/dlimit_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/dlimit_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,46 @@ +#ifndef _VSERVER_DLIMIT_CMD_H +#define _VSERVER_DLIMIT_CMD_H + +#include + + +#ifdef CONFIG_COMPAT + +#include + +struct vcmd_ctx_dlimit_base_v0_x32 { + compat_uptr_t name_ptr; + uint32_t flags; +}; + +struct vcmd_ctx_dlimit_v0_x32 { + compat_uptr_t name_ptr; + uint32_t space_used; /* used space in kbytes */ + uint32_t space_total; /* maximum space in kbytes */ + uint32_t inodes_used; /* used inodes */ + uint32_t inodes_total; /* maximum inodes */ + uint32_t reserved; /* reserved for root in % */ + uint32_t flags; +}; + +#endif /* CONFIG_COMPAT */ + +#include + +extern int vc_add_dlimit(uint32_t, void __user *); +extern int vc_rem_dlimit(uint32_t, void __user *); + +extern int vc_set_dlimit(uint32_t, void __user *); +extern int vc_get_dlimit(uint32_t, void __user *); + +#ifdef CONFIG_COMPAT + +extern int vc_add_dlimit_x32(uint32_t, void __user *); +extern int vc_rem_dlimit_x32(uint32_t, void __user *); + +extern int vc_set_dlimit_x32(uint32_t, void __user *); +extern int vc_get_dlimit_x32(uint32_t, void __user *); + +#endif /* CONFIG_COMPAT */ + +#endif /* _VSERVER_DLIMIT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/global.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/global.h --- linux-3.10.19/include/linux/vserver/global.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/global.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,19 @@ +#ifndef _VSERVER_GLOBAL_H +#define _VSERVER_GLOBAL_H + + +extern atomic_t vx_global_ctotal; +extern atomic_t vx_global_cactive; + +extern atomic_t nx_global_ctotal; +extern atomic_t nx_global_cactive; + +extern atomic_t vs_global_nsproxy; +extern atomic_t vs_global_fs; +extern atomic_t vs_global_mnt_ns; +extern atomic_t vs_global_uts_ns; +extern atomic_t vs_global_user_ns; +extern atomic_t vs_global_pid_ns; + + +#endif /* _VSERVER_GLOBAL_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/history.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/history.h --- linux-3.10.19/include/linux/vserver/history.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/history.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,197 @@ +#ifndef _VSERVER_HISTORY_H +#define _VSERVER_HISTORY_H + + +enum { + VXH_UNUSED = 0, + VXH_THROW_OOPS = 1, + + VXH_GET_VX_INFO, + VXH_PUT_VX_INFO, + VXH_INIT_VX_INFO, + VXH_SET_VX_INFO, + VXH_CLR_VX_INFO, + VXH_CLAIM_VX_INFO, + VXH_RELEASE_VX_INFO, + VXH_ALLOC_VX_INFO, + VXH_DEALLOC_VX_INFO, + VXH_HASH_VX_INFO, + VXH_UNHASH_VX_INFO, + VXH_LOC_VX_INFO, + VXH_LOOKUP_VX_INFO, + VXH_CREATE_VX_INFO, +}; + +struct _vxhe_vxi { + struct vx_info *ptr; + unsigned xid; + unsigned usecnt; + unsigned tasks; +}; + +struct _vxhe_set_clr { + void *data; +}; + +struct _vxhe_loc_lookup { + unsigned arg; +}; + +struct _vx_hist_entry { + void *loc; + unsigned short seq; + unsigned short type; + struct _vxhe_vxi vxi; + union { + struct _vxhe_set_clr sc; + struct _vxhe_loc_lookup ll; + }; +}; + +#ifdef CONFIG_VSERVER_HISTORY + +extern unsigned volatile int vxh_active; + +struct _vx_hist_entry *vxh_advance(void *loc); + + +static inline +void __vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi) +{ + entry->vxi.ptr = vxi; + if (vxi) { + entry->vxi.usecnt = atomic_read(&vxi->vx_usecnt); + entry->vxi.tasks = atomic_read(&vxi->vx_tasks); + entry->vxi.xid = vxi->vx_id; + } +} + + +#define __HERE__ current_text_addr() + +#define __VXH_BODY(__type, __data, __here) \ + struct _vx_hist_entry *entry; \ + \ + preempt_disable(); \ + entry = vxh_advance(__here); \ + __data; \ + entry->type = __type; \ + preempt_enable(); + + + /* pass vxi only */ + +#define __VXH_SMPL \ + __vxh_copy_vxi(entry, vxi) + +static inline +void __vxh_smpl(struct vx_info *vxi, int __type, void *__here) +{ + __VXH_BODY(__type, __VXH_SMPL, __here) +} + + /* pass vxi and data (void *) */ + +#define __VXH_DATA \ + __vxh_copy_vxi(entry, vxi); \ + entry->sc.data = data + +static inline +void __vxh_data(struct vx_info *vxi, void *data, + int __type, void *__here) +{ + __VXH_BODY(__type, __VXH_DATA, __here) +} + + /* pass vxi and arg (long) */ + +#define __VXH_LONG \ + __vxh_copy_vxi(entry, vxi); \ + entry->ll.arg = arg + +static inline +void __vxh_long(struct vx_info *vxi, long arg, + int __type, void *__here) +{ + __VXH_BODY(__type, __VXH_LONG, __here) +} + + +static inline +void __vxh_throw_oops(void *__here) +{ + __VXH_BODY(VXH_THROW_OOPS, {}, __here); + /* prevent further acquisition */ + vxh_active = 0; +} + + +#define vxh_throw_oops() __vxh_throw_oops(__HERE__); + +#define __vxh_get_vx_info(v, h) __vxh_smpl(v, VXH_GET_VX_INFO, h); +#define __vxh_put_vx_info(v, h) __vxh_smpl(v, VXH_PUT_VX_INFO, h); + +#define __vxh_init_vx_info(v, d, h) \ + __vxh_data(v, d, VXH_INIT_VX_INFO, h); +#define __vxh_set_vx_info(v, d, h) \ + __vxh_data(v, d, VXH_SET_VX_INFO, h); +#define __vxh_clr_vx_info(v, d, h) \ + __vxh_data(v, d, VXH_CLR_VX_INFO, h); + +#define __vxh_claim_vx_info(v, d, h) \ + __vxh_data(v, d, VXH_CLAIM_VX_INFO, h); +#define __vxh_release_vx_info(v, d, h) \ + __vxh_data(v, d, VXH_RELEASE_VX_INFO, h); + +#define vxh_alloc_vx_info(v) \ + __vxh_smpl(v, VXH_ALLOC_VX_INFO, __HERE__); +#define vxh_dealloc_vx_info(v) \ + __vxh_smpl(v, VXH_DEALLOC_VX_INFO, __HERE__); + +#define vxh_hash_vx_info(v) \ + __vxh_smpl(v, VXH_HASH_VX_INFO, __HERE__); +#define vxh_unhash_vx_info(v) \ + __vxh_smpl(v, VXH_UNHASH_VX_INFO, __HERE__); + +#define vxh_loc_vx_info(v, l) \ + __vxh_long(v, l, VXH_LOC_VX_INFO, __HERE__); +#define vxh_lookup_vx_info(v, l) \ + __vxh_long(v, l, VXH_LOOKUP_VX_INFO, __HERE__); +#define vxh_create_vx_info(v, l) \ + __vxh_long(v, l, VXH_CREATE_VX_INFO, __HERE__); + +extern void vxh_dump_history(void); + + +#else /* CONFIG_VSERVER_HISTORY */ + +#define __HERE__ 0 + +#define vxh_throw_oops() do { } while (0) + +#define __vxh_get_vx_info(v, h) do { } while (0) +#define __vxh_put_vx_info(v, h) do { } while (0) + +#define __vxh_init_vx_info(v, d, h) do { } while (0) +#define __vxh_set_vx_info(v, d, h) do { } while (0) +#define __vxh_clr_vx_info(v, d, h) do { } while (0) + +#define __vxh_claim_vx_info(v, d, h) do { } while (0) +#define __vxh_release_vx_info(v, d, h) do { } while (0) + +#define vxh_alloc_vx_info(v) do { } while (0) +#define vxh_dealloc_vx_info(v) do { } while (0) + +#define vxh_hash_vx_info(v) do { } while (0) +#define vxh_unhash_vx_info(v) do { } while (0) + +#define vxh_loc_vx_info(v, l) do { } while (0) +#define vxh_lookup_vx_info(v, l) do { } while (0) +#define vxh_create_vx_info(v, l) do { } while (0) + +#define vxh_dump_history() do { } while (0) + + +#endif /* CONFIG_VSERVER_HISTORY */ + +#endif /* _VSERVER_HISTORY_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/inode.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/inode.h --- linux-3.10.19/include/linux/vserver/inode.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/inode.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,19 @@ +#ifndef _VSERVER_INODE_H +#define _VSERVER_INODE_H + +#include + + +#ifdef CONFIG_VSERVER_PROC_SECURE +#define IATTR_PROC_DEFAULT ( IATTR_ADMIN | IATTR_HIDE ) +#define IATTR_PROC_SYMLINK ( IATTR_ADMIN ) +#else +#define IATTR_PROC_DEFAULT ( IATTR_ADMIN ) +#define IATTR_PROC_SYMLINK ( IATTR_ADMIN ) +#endif + +#define vx_hide_check(c, m) (((m) & IATTR_HIDE) ? vx_check(c, m) : 1) + +#else /* _VSERVER_INODE_H */ +#warning duplicate inclusion +#endif /* _VSERVER_INODE_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/inode_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/inode_cmd.h --- linux-3.10.19/include/linux/vserver/inode_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/inode_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,36 @@ +#ifndef _VSERVER_INODE_CMD_H +#define _VSERVER_INODE_CMD_H + +#include + + + +#ifdef CONFIG_COMPAT + +#include + +struct vcmd_ctx_iattr_v1_x32 { + compat_uptr_t name_ptr; + uint32_t tag; + uint32_t flags; + uint32_t mask; +}; + +#endif /* CONFIG_COMPAT */ + +#include + +extern int vc_get_iattr(void __user *); +extern int vc_set_iattr(void __user *); + +extern int vc_fget_iattr(uint32_t, void __user *); +extern int vc_fset_iattr(uint32_t, void __user *); + +#ifdef CONFIG_COMPAT + +extern int vc_get_iattr_x32(void __user *); +extern int vc_set_iattr_x32(void __user *); + +#endif /* CONFIG_COMPAT */ + +#endif /* _VSERVER_INODE_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/limit.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/limit.h --- linux-3.10.19/include/linux/vserver/limit.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/limit.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,61 @@ +#ifndef _VSERVER_LIMIT_H +#define _VSERVER_LIMIT_H + +#include + + +#define VLIM_NOCHECK ((1L << VLIMIT_DENTRY) | (1L << RLIMIT_RSS)) + +/* keep in sync with CRLIM_INFINITY */ + +#define VLIM_INFINITY (~0ULL) + +#include +#include + +#ifndef RLIM_INFINITY +#warning RLIM_INFINITY is undefined +#endif + +#define __rlim_val(l, r, v) ((l)->res[r].v) + +#define __rlim_soft(l, r) __rlim_val(l, r, soft) +#define __rlim_hard(l, r) __rlim_val(l, r, hard) + +#define __rlim_rcur(l, r) __rlim_val(l, r, rcur) +#define __rlim_rmin(l, r) __rlim_val(l, r, rmin) +#define __rlim_rmax(l, r) __rlim_val(l, r, rmax) + +#define __rlim_lhit(l, r) __rlim_val(l, r, lhit) +#define __rlim_hit(l, r) atomic_inc(&__rlim_lhit(l, r)) + +typedef atomic_long_t rlim_atomic_t; +typedef unsigned long rlim_t; + +#define __rlim_get(l, r) atomic_long_read(&__rlim_rcur(l, r)) +#define __rlim_set(l, r, v) atomic_long_set(&__rlim_rcur(l, r), v) +#define __rlim_inc(l, r) atomic_long_inc(&__rlim_rcur(l, r)) +#define __rlim_dec(l, r) atomic_long_dec(&__rlim_rcur(l, r)) +#define __rlim_add(l, r, v) atomic_long_add(v, &__rlim_rcur(l, r)) +#define __rlim_sub(l, r, v) atomic_long_sub(v, &__rlim_rcur(l, r)) + + +#if (RLIM_INFINITY == VLIM_INFINITY) +#define VX_VLIM(r) ((long long)(long)(r)) +#define VX_RLIM(v) ((rlim_t)(v)) +#else +#define VX_VLIM(r) (((r) == RLIM_INFINITY) \ + ? VLIM_INFINITY : (long long)(r)) +#define VX_RLIM(v) (((v) == VLIM_INFINITY) \ + ? RLIM_INFINITY : (rlim_t)(v)) +#endif + +struct sysinfo; + +void vx_vsi_meminfo(struct sysinfo *); +void vx_vsi_swapinfo(struct sysinfo *); +long vx_vsi_cached(struct sysinfo *); + +#define NUM_LIMITS 24 + +#endif /* _VSERVER_LIMIT_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/limit_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/limit_cmd.h --- linux-3.10.19/include/linux/vserver/limit_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/limit_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,35 @@ +#ifndef _VSERVER_LIMIT_CMD_H +#define _VSERVER_LIMIT_CMD_H + +#include + + +#ifdef CONFIG_IA32_EMULATION + +struct vcmd_ctx_rlimit_v0_x32 { + uint32_t id; + uint64_t minimum; + uint64_t softlimit; + uint64_t maximum; +} __attribute__ ((packed)); + +#endif /* CONFIG_IA32_EMULATION */ + +#include + +extern int vc_get_rlimit_mask(uint32_t, void __user *); +extern int vc_get_rlimit(struct vx_info *, void __user *); +extern int vc_set_rlimit(struct vx_info *, void __user *); +extern int vc_reset_hits(struct vx_info *, void __user *); +extern int vc_reset_minmax(struct vx_info *, void __user *); + +extern int vc_rlimit_stat(struct vx_info *, void __user *); + +#ifdef CONFIG_IA32_EMULATION + +extern int vc_get_rlimit_x32(struct vx_info *, void __user *); +extern int vc_set_rlimit_x32(struct vx_info *, void __user *); + +#endif /* CONFIG_IA32_EMULATION */ + +#endif /* _VSERVER_LIMIT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/limit_def.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/limit_def.h --- linux-3.10.19/include/linux/vserver/limit_def.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/limit_def.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,47 @@ +#ifndef _VSERVER_LIMIT_DEF_H +#define _VSERVER_LIMIT_DEF_H + +#include +#include + +#include "limit.h" + + +struct _vx_res_limit { + rlim_t soft; /* Context soft limit */ + rlim_t hard; /* Context hard limit */ + + rlim_atomic_t rcur; /* Current value */ + rlim_t rmin; /* Context minimum */ + rlim_t rmax; /* Context maximum */ + + atomic_t lhit; /* Limit hits */ +}; + +/* context sub struct */ + +struct _vx_limit { + struct _vx_res_limit res[NUM_LIMITS]; +}; + +#ifdef CONFIG_VSERVER_DEBUG + +static inline void __dump_vx_limit(struct _vx_limit *limit) +{ + int i; + + printk("\t_vx_limit:"); + for (i = 0; i < NUM_LIMITS; i++) { + printk("\t [%2d] = %8lu %8lu/%8lu, %8ld/%8ld, %8d\n", + i, (unsigned long)__rlim_get(limit, i), + (unsigned long)__rlim_rmin(limit, i), + (unsigned long)__rlim_rmax(limit, i), + (long)__rlim_soft(limit, i), + (long)__rlim_hard(limit, i), + atomic_read(&__rlim_lhit(limit, i))); + } +} + +#endif + +#endif /* _VSERVER_LIMIT_DEF_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/limit_int.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/limit_int.h --- linux-3.10.19/include/linux/vserver/limit_int.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/limit_int.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,193 @@ +#ifndef _VSERVER_LIMIT_INT_H +#define _VSERVER_LIMIT_INT_H + +#define VXD_RCRES_COND(r) VXD_CBIT(cres, r) +#define VXD_RLIMIT_COND(r) VXD_CBIT(limit, r) + +extern const char *vlimit_name[NUM_LIMITS]; + +static inline void __vx_acc_cres(struct vx_info *vxi, + int res, int dir, void *_data, char *_file, int _line) +{ + if (VXD_RCRES_COND(res)) + vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5ld%s (%p)", + (vxi ? vxi->vx_id : -1), vlimit_name[res], res, + (vxi ? (long)__rlim_get(&vxi->limit, res) : 0), + (dir > 0) ? "++" : "--", _data, _file, _line); + if (!vxi) + return; + + if (dir > 0) + __rlim_inc(&vxi->limit, res); + else + __rlim_dec(&vxi->limit, res); +} + +static inline void __vx_add_cres(struct vx_info *vxi, + int res, int amount, void *_data, char *_file, int _line) +{ + if (VXD_RCRES_COND(res)) + vxlprintk(1, "vx_add_cres[%5d,%s,%2d]: %5ld += %5d (%p)", + (vxi ? vxi->vx_id : -1), vlimit_name[res], res, + (vxi ? (long)__rlim_get(&vxi->limit, res) : 0), + amount, _data, _file, _line); + if (amount == 0) + return; + if (!vxi) + return; + __rlim_add(&vxi->limit, res, amount); +} + +static inline +int __vx_cres_adjust_max(struct _vx_limit *limit, int res, rlim_t value) +{ + int cond = (value > __rlim_rmax(limit, res)); + + if (cond) + __rlim_rmax(limit, res) = value; + return cond; +} + +static inline +int __vx_cres_adjust_min(struct _vx_limit *limit, int res, rlim_t value) +{ + int cond = (value < __rlim_rmin(limit, res)); + + if (cond) + __rlim_rmin(limit, res) = value; + return cond; +} + +static inline +void __vx_cres_fixup(struct _vx_limit *limit, int res, rlim_t value) +{ + if (!__vx_cres_adjust_max(limit, res, value)) + __vx_cres_adjust_min(limit, res, value); +} + + +/* return values: + +1 ... no limit hit + -1 ... over soft limit + 0 ... over hard limit */ + +static inline int __vx_cres_avail(struct vx_info *vxi, + int res, int num, char *_file, int _line) +{ + struct _vx_limit *limit; + rlim_t value; + + if (VXD_RLIMIT_COND(res)) + vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld/%5ld > %5ld + %5d", + (vxi ? vxi->vx_id : -1), vlimit_name[res], res, + (vxi ? (long)__rlim_soft(&vxi->limit, res) : -1), + (vxi ? (long)__rlim_hard(&vxi->limit, res) : -1), + (vxi ? (long)__rlim_get(&vxi->limit, res) : 0), + num, _file, _line); + if (!vxi) + return 1; + + limit = &vxi->limit; + value = __rlim_get(limit, res); + + if (!__vx_cres_adjust_max(limit, res, value)) + __vx_cres_adjust_min(limit, res, value); + + if (num == 0) + return 1; + + if (__rlim_soft(limit, res) == RLIM_INFINITY) + return -1; + if (value + num <= __rlim_soft(limit, res)) + return -1; + + if (__rlim_hard(limit, res) == RLIM_INFINITY) + return 1; + if (value + num <= __rlim_hard(limit, res)) + return 1; + + __rlim_hit(limit, res); + return 0; +} + + +static const int VLA_RSS[] = { RLIMIT_RSS, VLIMIT_ANON, VLIMIT_MAPPED, 0 }; + +static inline +rlim_t __vx_cres_array_sum(struct _vx_limit *limit, const int *array) +{ + rlim_t value, sum = 0; + int res; + + while ((res = *array++)) { + value = __rlim_get(limit, res); + __vx_cres_fixup(limit, res, value); + sum += value; + } + return sum; +} + +static inline +rlim_t __vx_cres_array_fixup(struct _vx_limit *limit, const int *array) +{ + rlim_t value = __vx_cres_array_sum(limit, array + 1); + int res = *array; + + if (value == __rlim_get(limit, res)) + return value; + + __rlim_set(limit, res, value); + /* now adjust min/max */ + if (!__vx_cres_adjust_max(limit, res, value)) + __vx_cres_adjust_min(limit, res, value); + + return value; +} + +static inline int __vx_cres_array_avail(struct vx_info *vxi, + const int *array, int num, char *_file, int _line) +{ + struct _vx_limit *limit; + rlim_t value = 0; + int res; + + if (num == 0) + return 1; + if (!vxi) + return 1; + + limit = &vxi->limit; + res = *array; + value = __vx_cres_array_sum(limit, array + 1); + + __rlim_set(limit, res, value); + __vx_cres_fixup(limit, res, value); + + return __vx_cres_avail(vxi, res, num, _file, _line); +} + + +static inline void vx_limit_fixup(struct _vx_limit *limit, int id) +{ + rlim_t value; + int res; + + /* complex resources first */ + if ((id < 0) || (id == RLIMIT_RSS)) + __vx_cres_array_fixup(limit, VLA_RSS); + + for (res = 0; res < NUM_LIMITS; res++) { + if ((id > 0) && (res != id)) + continue; + + value = __rlim_get(limit, res); + __vx_cres_fixup(limit, res, value); + + /* not supposed to happen, maybe warn? */ + if (__rlim_rmax(limit, res) > __rlim_hard(limit, res)) + __rlim_rmax(limit, res) = __rlim_hard(limit, res); + } +} + + +#endif /* _VSERVER_LIMIT_INT_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/monitor.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/monitor.h --- linux-3.10.19/include/linux/vserver/monitor.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/monitor.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,6 @@ +#ifndef _VSERVER_MONITOR_H +#define _VSERVER_MONITOR_H + +#include + +#endif /* _VSERVER_MONITOR_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/network.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/network.h --- linux-3.10.19/include/linux/vserver/network.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/network.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,76 @@ +#ifndef _VSERVER_NETWORK_H +#define _VSERVER_NETWORK_H + + +#include +#include +#include +#include +#include +#include +#include + +struct nx_addr_v4 { + struct nx_addr_v4 *next; + struct in_addr ip[2]; + struct in_addr mask; + uint16_t type; + uint16_t flags; +}; + +struct nx_addr_v6 { + struct nx_addr_v6 *next; + struct in6_addr ip; + struct in6_addr mask; + uint32_t prefix; + uint16_t type; + uint16_t flags; +}; + +struct nx_info { + struct hlist_node nx_hlist; /* linked list of nxinfos */ + vnid_t nx_id; /* vnet id */ + atomic_t nx_usecnt; /* usage count */ + atomic_t nx_tasks; /* tasks count */ + int nx_state; /* context state */ + + uint64_t nx_flags; /* network flag word */ + uint64_t nx_ncaps; /* network capabilities */ + + spinlock_t addr_lock; /* protect address changes */ + struct in_addr v4_lback; /* Loopback address */ + struct in_addr v4_bcast; /* Broadcast address */ + struct nx_addr_v4 v4; /* First/Single ipv4 address */ +#ifdef CONFIG_IPV6 + struct nx_addr_v6 v6; /* First/Single ipv6 address */ +#endif + char nx_name[65]; /* network context name */ +}; + + +/* status flags */ + +#define NXS_HASHED 0x0001 +#define NXS_SHUTDOWN 0x0100 +#define NXS_RELEASED 0x8000 + +extern struct nx_info *lookup_nx_info(int); + +extern int get_nid_list(int, unsigned int *, int); +extern int nid_is_hashed(vnid_t); + +extern int nx_migrate_task(struct task_struct *, struct nx_info *); + +extern long vs_net_change(struct nx_info *, unsigned int); + +struct sock; + + +#define NX_IPV4(n) ((n)->v4.type != NXA_TYPE_NONE) +#ifdef CONFIG_IPV6 +#define NX_IPV6(n) ((n)->v6.type != NXA_TYPE_NONE) +#else +#define NX_IPV6(n) (0) +#endif + +#endif /* _VSERVER_NETWORK_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/network_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/network_cmd.h --- linux-3.10.19/include/linux/vserver/network_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/network_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,37 @@ +#ifndef _VSERVER_NETWORK_CMD_H +#define _VSERVER_NETWORK_CMD_H + +#include + +extern int vc_task_nid(uint32_t); + +extern int vc_nx_info(struct nx_info *, void __user *); + +extern int vc_net_create(uint32_t, void __user *); +extern int vc_net_migrate(struct nx_info *, void __user *); + +extern int vc_net_add(struct nx_info *, void __user *); +extern int vc_net_remove(struct nx_info *, void __user *); + +extern int vc_net_add_ipv4_v1(struct nx_info *, void __user *); +extern int vc_net_add_ipv4(struct nx_info *, void __user *); + +extern int vc_net_rem_ipv4_v1(struct nx_info *, void __user *); +extern int vc_net_rem_ipv4(struct nx_info *, void __user *); + +extern int vc_net_add_ipv6(struct nx_info *, void __user *); +extern int vc_net_remove_ipv6(struct nx_info *, void __user *); + +extern int vc_add_match_ipv4(struct nx_info *, void __user *); +extern int vc_get_match_ipv4(struct nx_info *, void __user *); + +extern int vc_add_match_ipv6(struct nx_info *, void __user *); +extern int vc_get_match_ipv6(struct nx_info *, void __user *); + +extern int vc_get_nflags(struct nx_info *, void __user *); +extern int vc_set_nflags(struct nx_info *, void __user *); + +extern int vc_get_ncaps(struct nx_info *, void __user *); +extern int vc_set_ncaps(struct nx_info *, void __user *); + +#endif /* _VSERVER_CONTEXT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/percpu.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/percpu.h --- linux-3.10.19/include/linux/vserver/percpu.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/percpu.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,14 @@ +#ifndef _VSERVER_PERCPU_H +#define _VSERVER_PERCPU_H + +#include "cvirt_def.h" +#include "sched_def.h" + +struct _vx_percpu { + struct _vx_cvirt_pc cvirt; + struct _vx_sched_pc sched; +}; + +#define PERCPU_PERCTX (sizeof(struct _vx_percpu)) + +#endif /* _VSERVER_PERCPU_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/pid.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/pid.h --- linux-3.10.19/include/linux/vserver/pid.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/pid.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,51 @@ +#ifndef _VSERVER_PID_H +#define _VSERVER_PID_H + +/* pid faking stuff */ + +#define vx_info_map_pid(v, p) \ + __vx_info_map_pid((v), (p), __func__, __FILE__, __LINE__) +#define vx_info_map_tgid(v,p) vx_info_map_pid(v,p) +#define vx_map_pid(p) vx_info_map_pid(current_vx_info(), p) +#define vx_map_tgid(p) vx_map_pid(p) + +static inline int __vx_info_map_pid(struct vx_info *vxi, int pid, + const char *func, const char *file, int line) +{ + if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) { + vxfprintk(VXD_CBIT(cvirt, 2), + "vx_map_tgid: %p/%llx: %d -> %d", + vxi, (long long)vxi->vx_flags, pid, + (pid && pid == vxi->vx_initpid) ? 1 : pid, + func, file, line); + if (pid == 0) + return 0; + if (pid == vxi->vx_initpid) + return 1; + } + return pid; +} + +#define vx_info_rmap_pid(v, p) \ + __vx_info_rmap_pid((v), (p), __func__, __FILE__, __LINE__) +#define vx_rmap_pid(p) vx_info_rmap_pid(current_vx_info(), p) +#define vx_rmap_tgid(p) vx_rmap_pid(p) + +static inline int __vx_info_rmap_pid(struct vx_info *vxi, int pid, + const char *func, const char *file, int line) +{ + if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) { + vxfprintk(VXD_CBIT(cvirt, 2), + "vx_rmap_tgid: %p/%llx: %d -> %d", + vxi, (long long)vxi->vx_flags, pid, + (pid == 1) ? vxi->vx_initpid : pid, + func, file, line); + if ((pid == 1) && vxi->vx_initpid) + return vxi->vx_initpid; + if (pid == vxi->vx_initpid) + return ~0U; + } + return pid; +} + +#endif diff -NurpP --minimal linux-3.10.19/include/linux/vserver/sched.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/sched.h --- linux-3.10.19/include/linux/vserver/sched.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/sched.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,23 @@ +#ifndef _VSERVER_SCHED_H +#define _VSERVER_SCHED_H + + +#ifdef __KERNEL__ + +struct timespec; + +void vx_vsi_uptime(struct timespec *, struct timespec *); + + +struct vx_info; + +void vx_update_load(struct vx_info *); + + +void vx_update_sched_param(struct _vx_sched *sched, + struct _vx_sched_pc *sched_pc); + +#endif /* __KERNEL__ */ +#else /* _VSERVER_SCHED_H */ +#warning duplicate inclusion +#endif /* _VSERVER_SCHED_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/sched_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/sched_cmd.h --- linux-3.10.19/include/linux/vserver/sched_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/sched_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,11 @@ +#ifndef _VSERVER_SCHED_CMD_H +#define _VSERVER_SCHED_CMD_H + + +#include +#include + +extern int vc_set_prio_bias(struct vx_info *, void __user *); +extern int vc_get_prio_bias(struct vx_info *, void __user *); + +#endif /* _VSERVER_SCHED_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/sched_def.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/sched_def.h --- linux-3.10.19/include/linux/vserver/sched_def.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/sched_def.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,38 @@ +#ifndef _VSERVER_SCHED_DEF_H +#define _VSERVER_SCHED_DEF_H + +#include +#include +#include +#include +#include + + +/* context sub struct */ + +struct _vx_sched { + int prio_bias; /* bias offset for priority */ + + cpumask_t update; /* CPUs which should update */ +}; + +struct _vx_sched_pc { + int prio_bias; /* bias offset for priority */ + + uint64_t user_ticks; /* token tick events */ + uint64_t sys_ticks; /* token tick events */ + uint64_t hold_ticks; /* token ticks paused */ +}; + + +#ifdef CONFIG_VSERVER_DEBUG + +static inline void __dump_vx_sched(struct _vx_sched *sched) +{ + printk("\t_vx_sched:\n"); + printk("\t priority = %4d\n", sched->prio_bias); +} + +#endif + +#endif /* _VSERVER_SCHED_DEF_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/signal.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/signal.h --- linux-3.10.19/include/linux/vserver/signal.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/signal.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,14 @@ +#ifndef _VSERVER_SIGNAL_H +#define _VSERVER_SIGNAL_H + + +#ifdef __KERNEL__ + +struct vx_info; + +int vx_info_kill(struct vx_info *, int, int); + +#endif /* __KERNEL__ */ +#else /* _VSERVER_SIGNAL_H */ +#warning duplicate inclusion +#endif /* _VSERVER_SIGNAL_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/signal_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/signal_cmd.h --- linux-3.10.19/include/linux/vserver/signal_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/signal_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,14 @@ +#ifndef _VSERVER_SIGNAL_CMD_H +#define _VSERVER_SIGNAL_CMD_H + +#include + + +extern int vc_ctx_kill(struct vx_info *, void __user *); +extern int vc_wait_exit(struct vx_info *, void __user *); + + +extern int vc_get_pflags(uint32_t pid, void __user *); +extern int vc_set_pflags(uint32_t pid, void __user *); + +#endif /* _VSERVER_SIGNAL_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/space.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/space.h --- linux-3.10.19/include/linux/vserver/space.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/space.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,12 @@ +#ifndef _VSERVER_SPACE_H +#define _VSERVER_SPACE_H + +#include + +struct vx_info; + +int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index); + +#else /* _VSERVER_SPACE_H */ +#warning duplicate inclusion +#endif /* _VSERVER_SPACE_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/space_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/space_cmd.h --- linux-3.10.19/include/linux/vserver/space_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/space_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,13 @@ +#ifndef _VSERVER_SPACE_CMD_H +#define _VSERVER_SPACE_CMD_H + +#include + + +extern int vc_enter_space_v1(struct vx_info *, void __user *); +extern int vc_set_space_v1(struct vx_info *, void __user *); +extern int vc_enter_space(struct vx_info *, void __user *); +extern int vc_set_space(struct vx_info *, void __user *); +extern int vc_get_space_mask(void __user *, int); + +#endif /* _VSERVER_SPACE_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/switch.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/switch.h --- linux-3.10.19/include/linux/vserver/switch.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/switch.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,8 @@ +#ifndef _VSERVER_SWITCH_H +#define _VSERVER_SWITCH_H + + +#include +#include + +#endif /* _VSERVER_SWITCH_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/tag.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/tag.h --- linux-3.10.19/include/linux/vserver/tag.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/tag.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,160 @@ +#ifndef _DX_TAG_H +#define _DX_TAG_H + +#include +#include + + +#define DX_TAG(in) (IS_TAGGED(in)) + + +#ifdef CONFIG_TAG_NFSD +#define DX_TAG_NFSD 1 +#else +#define DX_TAG_NFSD 0 +#endif + + +#ifdef CONFIG_TAGGING_NONE + +#define MAX_UID 0xFFFFFFFF +#define MAX_GID 0xFFFFFFFF + +#define INOTAG_TAG(cond, uid, gid, tag) (0) + +#define TAGINO_UID(cond, uid, tag) (uid) +#define TAGINO_GID(cond, gid, tag) (gid) + +#endif + + +#ifdef CONFIG_TAGGING_GID16 + +#define MAX_UID 0xFFFFFFFF +#define MAX_GID 0x0000FFFF + +#define INOTAG_TAG(cond, uid, gid, tag) \ + ((cond) ? (((gid) >> 16) & 0xFFFF) : 0) + +#define TAGINO_UID(cond, uid, tag) (uid) +#define TAGINO_GID(cond, gid, tag) \ + ((cond) ? (((gid) & 0xFFFF) | ((tag) << 16)) : (gid)) + +#endif + + +#ifdef CONFIG_TAGGING_ID24 + +#define MAX_UID 0x00FFFFFF +#define MAX_GID 0x00FFFFFF + +#define INOTAG_TAG(cond, uid, gid, tag) \ + ((cond) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0) + +#define TAGINO_UID(cond, uid, tag) \ + ((cond) ? (((uid) & 0xFFFFFF) | (((tag) & 0xFF00) << 16)) : (uid)) +#define TAGINO_GID(cond, gid, tag) \ + ((cond) ? (((gid) & 0xFFFFFF) | (((tag) & 0x00FF) << 24)) : (gid)) + +#endif + + +#ifdef CONFIG_TAGGING_UID16 + +#define MAX_UID 0x0000FFFF +#define MAX_GID 0xFFFFFFFF + +#define INOTAG_TAG(cond, uid, gid, tag) \ + ((cond) ? (((uid) >> 16) & 0xFFFF) : 0) + +#define TAGINO_UID(cond, uid, tag) \ + ((cond) ? (((uid) & 0xFFFF) | ((tag) << 16)) : (uid)) +#define TAGINO_GID(cond, gid, tag) (gid) + +#endif + + +#ifdef CONFIG_TAGGING_INTERN + +#define MAX_UID 0xFFFFFFFF +#define MAX_GID 0xFFFFFFFF + +#define INOTAG_TAG(cond, uid, gid, tag) \ + ((cond) ? (tag) : 0) + +#define TAGINO_UID(cond, uid, tag) (uid) +#define TAGINO_GID(cond, gid, tag) (gid) + +#endif + + +#ifndef CONFIG_TAGGING_NONE +#define dx_current_fstag(sb) \ + ((sb)->s_flags & MS_TAGGED ? dx_current_tag() : 0) +#else +#define dx_current_fstag(sb) (0) +#endif + +#ifndef CONFIG_TAGGING_INTERN +#define TAGINO_TAG(cond, tag) (0) +#else +#define TAGINO_TAG(cond, tag) ((cond) ? (tag) : 0) +#endif + +#define TAGINO_KUID(cond, kuid, ktag) \ + KUIDT_INIT(TAGINO_UID(cond, __kuid_val(kuid), __ktag_val(ktag))) +#define TAGINO_KGID(cond, kgid, ktag) \ + KGIDT_INIT(TAGINO_GID(cond, __kgid_val(kgid), __ktag_val(ktag))) +#define TAGINO_KTAG(cond, ktag) \ + KTAGT_INIT(TAGINO_TAG(cond, __ktag_val(ktag))) + + +#define INOTAG_UID(cond, uid, gid) \ + ((cond) ? ((uid) & MAX_UID) : (uid)) +#define INOTAG_GID(cond, uid, gid) \ + ((cond) ? ((gid) & MAX_GID) : (gid)) + +#define INOTAG_KUID(cond, kuid, kgid) \ + KUIDT_INIT(INOTAG_UID(cond, __kuid_val(kuid), __kgid_val(kgid))) +#define INOTAG_KGID(cond, kuid, kgid) \ + KGIDT_INIT(INOTAG_GID(cond, __kuid_val(kuid), __kgid_val(kgid))) +#define INOTAG_KTAG(cond, kuid, kgid, ktag) \ + KTAGT_INIT(INOTAG_TAG(cond, \ + __kuid_val(kuid), __kgid_val(kgid), __ktag_val(ktag))) + + +static inline uid_t dx_map_uid(uid_t uid) +{ + if ((uid > MAX_UID) && (uid != -1)) + uid = -2; + return (uid & MAX_UID); +} + +static inline gid_t dx_map_gid(gid_t gid) +{ + if ((gid > MAX_GID) && (gid != -1)) + gid = -2; + return (gid & MAX_GID); +} + +struct peer_tag { + int32_t xid; + int32_t nid; +}; + +#define dx_notagcheck(sb) ((sb) && ((sb)->s_flags & MS_NOTAGCHECK)) + +int dx_parse_tag(char *string, vtag_t *tag, int remove, int *mnt_flags, + unsigned long *flags); + +#ifdef CONFIG_PROPAGATE + +void __dx_propagate_tag(struct nameidata *nd, struct inode *inode); + +#define dx_propagate_tag(n, i) __dx_propagate_tag(n, i) + +#else +#define dx_propagate_tag(n, i) do { } while (0) +#endif + +#endif /* _DX_TAG_H */ diff -NurpP --minimal linux-3.10.19/include/linux/vserver/tag_cmd.h linux-3.10.19-vs2.3.6.8/include/linux/vserver/tag_cmd.h --- linux-3.10.19/include/linux/vserver/tag_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/linux/vserver/tag_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,10 @@ +#ifndef _VSERVER_TAG_CMD_H +#define _VSERVER_TAG_CMD_H + +#include + +extern int vc_task_tag(uint32_t); + +extern int vc_tag_migrate(uint32_t); + +#endif /* _VSERVER_TAG_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/net/addrconf.h linux-3.10.19-vs2.3.6.8/include/net/addrconf.h --- linux-3.10.19/include/net/addrconf.h 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/net/addrconf.h 2013-11-13 17:19:37.000000000 +0000 @@ -89,7 +89,8 @@ extern int ipv6_dev_get_saddr(struct n const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, - struct in6_addr *saddr); + struct in6_addr *saddr, + struct nx_info *nxi); extern int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, unsigned char banned_flags); diff -NurpP --minimal linux-3.10.19/include/net/af_unix.h linux-3.10.19-vs2.3.6.8/include/net/af_unix.h --- linux-3.10.19/include/net/af_unix.h 2013-07-14 17:01:33.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/net/af_unix.h 2013-08-22 20:30:00.000000000 +0000 @@ -4,6 +4,7 @@ #include #include #include +#include #include extern void unix_inflight(struct file *fp); diff -NurpP --minimal linux-3.10.19/include/net/inet_timewait_sock.h linux-3.10.19-vs2.3.6.8/include/net/inet_timewait_sock.h --- linux-3.10.19/include/net/inet_timewait_sock.h 2013-05-31 13:45:28.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/net/inet_timewait_sock.h 2013-08-22 20:30:00.000000000 +0000 @@ -116,6 +116,10 @@ struct inet_timewait_sock { #define tw_dport __tw_common.skc_dport #define tw_num __tw_common.skc_num #define tw_portpair __tw_common.skc_portpair +#define tw_xid __tw_common.skc_xid +#define tw_vx_info __tw_common.skc_vx_info +#define tw_nid __tw_common.skc_nid +#define tw_nx_info __tw_common.skc_nx_info int tw_timeout; volatile unsigned char tw_substate; diff -NurpP --minimal linux-3.10.19/include/net/ip6_route.h linux-3.10.19-vs2.3.6.8/include/net/ip6_route.h --- linux-3.10.19/include/net/ip6_route.h 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/net/ip6_route.h 2013-11-13 17:22:25.000000000 +0000 @@ -95,7 +95,8 @@ extern int ip6_route_get_saddr(struct struct rt6_info *rt, const struct in6_addr *daddr, unsigned int prefs, - struct in6_addr *saddr); + struct in6_addr *saddr, + struct nx_info *nxi); extern struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, diff -NurpP --minimal linux-3.10.19/include/net/route.h linux-3.10.19-vs2.3.6.8/include/net/route.h --- linux-3.10.19/include/net/route.h 2013-02-19 13:58:52.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/net/route.h 2013-08-22 20:30:00.000000000 +0000 @@ -207,6 +207,9 @@ static inline void ip_rt_put(struct rtab dst_release(&rt->dst); } +#include +#include + #define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3) extern const __u8 ip_tos2prio[16]; @@ -256,6 +259,9 @@ static inline void ip_route_connect_init protocol, flow_flags, dst, src, dport, sport); } +extern struct rtable *ip_v4_find_src(struct net *net, struct nx_info *, + struct flowi4 *); + static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst, __be32 src, u32 tos, int oif, u8 protocol, @@ -264,11 +270,25 @@ static inline struct rtable *ip_route_co { struct net *net = sock_net(sk); struct rtable *rt; + struct nx_info *nx_info = current_nx_info(); ip_route_connect_init(fl4, dst, src, tos, oif, protocol, sport, dport, sk, can_sleep); - if (!dst || !src) { + if (sk) + nx_info = sk->sk_nx_info; + + vxdprintk(VXD_CBIT(net, 4), + "ip_route_connect(%p) %p,%p;%lx", + sk, nx_info, sk->sk_socket, + (sk->sk_socket?sk->sk_socket->flags:0)); + + rt = ip_v4_find_src(net, nx_info, fl4); + if (IS_ERR(rt)) + return rt; + ip_rt_put(rt); + + if (!fl4->daddr || !fl4->saddr) { rt = __ip_route_output_key(net, fl4); if (IS_ERR(rt)) return rt; diff -NurpP --minimal linux-3.10.19/include/net/sock.h linux-3.10.19-vs2.3.6.8/include/net/sock.h --- linux-3.10.19/include/net/sock.h 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/net/sock.h 2013-11-13 17:22:25.000000000 +0000 @@ -191,6 +191,10 @@ struct sock_common { #ifdef CONFIG_NET_NS struct net *skc_net; #endif + vxid_t skc_xid; + struct vx_info *skc_vx_info; + vnid_t skc_nid; + struct nx_info *skc_nx_info; /* * fields between dontcopy_begin/dontcopy_end * are not copied in sock_copy() @@ -305,6 +309,10 @@ struct sock { #define sk_bind_node __sk_common.skc_bind_node #define sk_prot __sk_common.skc_prot #define sk_net __sk_common.skc_net +#define sk_xid __sk_common.skc_xid +#define sk_vx_info __sk_common.skc_vx_info +#define sk_nid __sk_common.skc_nid +#define sk_nx_info __sk_common.skc_nx_info socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; /* diff -NurpP --minimal linux-3.10.19/include/uapi/Kbuild linux-3.10.19-vs2.3.6.8/include/uapi/Kbuild --- linux-3.10.19/include/uapi/Kbuild 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/Kbuild 2013-08-22 20:30:00.000000000 +0000 @@ -12,3 +12,4 @@ header-y += video/ header-y += drm/ header-y += xen/ header-y += scsi/ +header-y += vserver/ diff -NurpP --minimal linux-3.10.19/include/uapi/linux/capability.h linux-3.10.19-vs2.3.6.8/include/uapi/linux/capability.h --- linux-3.10.19/include/uapi/linux/capability.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/linux/capability.h 2013-08-22 20:30:00.000000000 +0000 @@ -259,6 +259,7 @@ struct vfs_cap_data { arbitrary SCSI commands */ /* Allow setting encryption key on loopback filesystem */ /* Allow setting zone reclaim policy */ +/* Allow the selection of a security context */ #define CAP_SYS_ADMIN 21 @@ -345,7 +346,12 @@ struct vfs_cap_data { #define CAP_LAST_CAP CAP_BLOCK_SUSPEND -#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) +/* Allow context manipulations */ +/* Allow changing context info on files */ + +#define CAP_CONTEXT 63 + +#define cap_valid(x) ((x) >= 0 && ((x) <= CAP_LAST_CAP || (x) == CAP_CONTEXT)) /* * Bit location of each capability (used by user-space library and kernel) diff -NurpP --minimal linux-3.10.19/include/uapi/linux/fs.h linux-3.10.19-vs2.3.6.8/include/uapi/linux/fs.h --- linux-3.10.19/include/uapi/linux/fs.h 2013-07-14 17:01:34.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/linux/fs.h 2013-08-22 20:30:00.000000000 +0000 @@ -86,6 +86,9 @@ struct inodes_stat_t { #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ #define MS_I_VERSION (1<<23) /* Update inode I_version field */ #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_TAGGED (1<<8) /* use generic inode tagging */ +#define MS_NOTAGCHECK (1<<9) /* don't check tags */ +#define MS_TAGID (1<<25) /* use specific tag for this mount */ /* These sb flags are internal to the kernel */ #define MS_NOSEC (1<<28) @@ -191,11 +194,14 @@ struct inodes_stat_t { #define FS_EXTENT_FL 0x00080000 /* Extents */ #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ +#define FS_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */ #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ -#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ -#define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ +#define FS_BARRIER_FL 0x04000000 /* Barrier for chroot() */ +#define FS_COW_FL 0x20000000 /* Copy on Write marker */ +#define FS_FL_USER_VISIBLE 0x0103DFFF /* User visible flags */ +#define FS_FL_USER_MODIFIABLE 0x010380FF /* User modifiable flags */ #define SYNC_FILE_RANGE_WAIT_BEFORE 1 #define SYNC_FILE_RANGE_WRITE 2 diff -NurpP --minimal linux-3.10.19/include/uapi/linux/gfs2_ondisk.h linux-3.10.19-vs2.3.6.8/include/uapi/linux/gfs2_ondisk.h --- linux-3.10.19/include/uapi/linux/gfs2_ondisk.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/linux/gfs2_ondisk.h 2013-08-22 20:30:00.000000000 +0000 @@ -225,6 +225,9 @@ enum { gfs2fl_Sync = 8, gfs2fl_System = 9, gfs2fl_TopLevel = 10, + gfs2fl_IXUnlink = 16, + gfs2fl_Barrier = 17, + gfs2fl_Cow = 18, gfs2fl_TruncInProg = 29, gfs2fl_InheritDirectio = 30, gfs2fl_InheritJdata = 31, @@ -242,6 +245,9 @@ enum { #define GFS2_DIF_SYNC 0x00000100 #define GFS2_DIF_SYSTEM 0x00000200 /* New in gfs2 */ #define GFS2_DIF_TOPDIR 0x00000400 /* New in gfs2 */ +#define GFS2_DIF_IXUNLINK 0x00010000 +#define GFS2_DIF_BARRIER 0x00020000 +#define GFS2_DIF_COW 0x00040000 #define GFS2_DIF_TRUNC_IN_PROG 0x20000000 /* New in gfs2 */ #define GFS2_DIF_INHERIT_DIRECTIO 0x40000000 /* only in gfs1 */ #define GFS2_DIF_INHERIT_JDATA 0x80000000 diff -NurpP --minimal linux-3.10.19/include/uapi/linux/if_tun.h linux-3.10.19-vs2.3.6.8/include/uapi/linux/if_tun.h --- linux-3.10.19/include/uapi/linux/if_tun.h 2013-02-19 13:58:55.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/linux/if_tun.h 2013-08-22 20:30:00.000000000 +0000 @@ -56,6 +56,7 @@ #define TUNGETVNETHDRSZ _IOR('T', 215, int) #define TUNSETVNETHDRSZ _IOW('T', 216, int) #define TUNSETQUEUE _IOW('T', 217, int) +#define TUNSETNID _IOW('T', 218, int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 diff -NurpP --minimal linux-3.10.19/include/uapi/linux/major.h linux-3.10.19-vs2.3.6.8/include/uapi/linux/major.h --- linux-3.10.19/include/uapi/linux/major.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/linux/major.h 2013-08-22 20:30:00.000000000 +0000 @@ -15,6 +15,7 @@ #define HD_MAJOR IDE0_MAJOR #define PTY_SLAVE_MAJOR 3 #define TTY_MAJOR 4 +#define VROOT_MAJOR 4 #define TTYAUX_MAJOR 5 #define LP_MAJOR 6 #define VCS_MAJOR 7 diff -NurpP --minimal linux-3.10.19/include/uapi/linux/nfs_mount.h linux-3.10.19-vs2.3.6.8/include/uapi/linux/nfs_mount.h --- linux-3.10.19/include/uapi/linux/nfs_mount.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/linux/nfs_mount.h 2013-08-22 20:30:00.000000000 +0000 @@ -63,7 +63,8 @@ struct nfs_mount_data { #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ #define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */ #define NFS_MOUNT_UNSHARED 0x8000 /* 5 */ -#define NFS_MOUNT_FLAGMASK 0xFFFF +#define NFS_MOUNT_TAGGED 0x10000 /* context tagging */ +#define NFS_MOUNT_FLAGMASK 0x1FFFF /* The following are for internal use only */ #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 diff -NurpP --minimal linux-3.10.19/include/uapi/linux/reboot.h linux-3.10.19-vs2.3.6.8/include/uapi/linux/reboot.h --- linux-3.10.19/include/uapi/linux/reboot.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/linux/reboot.h 2013-08-22 20:30:00.000000000 +0000 @@ -33,7 +33,7 @@ #define LINUX_REBOOT_CMD_RESTART2 0xA1B2C3D4 #define LINUX_REBOOT_CMD_SW_SUSPEND 0xD000FCE2 #define LINUX_REBOOT_CMD_KEXEC 0x45584543 - +#define LINUX_REBOOT_CMD_OOM 0xDEADBEEF #endif /* _UAPI_LINUX_REBOOT_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/linux/sysctl.h linux-3.10.19-vs2.3.6.8/include/uapi/linux/sysctl.h --- linux-3.10.19/include/uapi/linux/sysctl.h 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/linux/sysctl.h 2013-08-22 20:30:00.000000000 +0000 @@ -60,6 +60,7 @@ enum CTL_ABI=9, /* Binary emulation */ CTL_CPU=10, /* CPU stuff (speed scaling, etc) */ CTL_ARLAN=254, /* arlan wireless driver */ + CTL_VSERVER=4242, /* Linux-VServer debug */ CTL_S390DBF=5677, /* s390 debug */ CTL_SUNRPC=7249, /* sunrpc debug */ CTL_PM=9899, /* frv power management */ @@ -94,6 +95,7 @@ enum KERN_PANIC=15, /* int: panic timeout */ KERN_REALROOTDEV=16, /* real root device to mount after initrd */ + KERN_VSHELPER=17, /* string: path to vshelper policy agent */ KERN_SPARC_REBOOT=21, /* reboot command on Sparc */ KERN_CTLALTDEL=22, /* int: allow ctl-alt-del to reboot */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/Kbuild linux-3.10.19-vs2.3.6.8/include/uapi/vserver/Kbuild --- linux-3.10.19/include/uapi/vserver/Kbuild 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/Kbuild 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,9 @@ + +header-y += context_cmd.h network_cmd.h space_cmd.h \ + cacct_cmd.h cvirt_cmd.h limit_cmd.h dlimit_cmd.h \ + inode_cmd.h tag_cmd.h sched_cmd.h signal_cmd.h \ + debug_cmd.h device_cmd.h + +header-y += switch.h context.h network.h monitor.h \ + limit.h inode.h device.h + diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/cacct_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/cacct_cmd.h --- linux-3.10.19/include/uapi/vserver/cacct_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/cacct_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,15 @@ +#ifndef _UAPI_VS_CACCT_CMD_H +#define _UAPI_VS_CACCT_CMD_H + + +/* virtual host info name commands */ + +#define VCMD_sock_stat VC_CMD(VSTAT, 5, 0) + +struct vcmd_sock_stat_v0 { + uint32_t field; + uint32_t count[3]; + uint64_t total[3]; +}; + +#endif /* _UAPI_VS_CACCT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/context.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/context.h --- linux-3.10.19/include/uapi/vserver/context.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/context.h 2013-10-27 03:43:35.000000000 +0000 @@ -0,0 +1,81 @@ +#ifndef _UAPI_VS_CONTEXT_H +#define _UAPI_VS_CONTEXT_H + +#include +#include + + +/* context flags */ + +#define VXF_INFO_SCHED 0x00000002 +#define VXF_INFO_NPROC 0x00000004 +#define VXF_INFO_PRIVATE 0x00000008 + +#define VXF_INFO_INIT 0x00000010 +#define VXF_INFO_HIDE 0x00000020 +#define VXF_INFO_ULIMIT 0x00000040 +#define VXF_INFO_NSPACE 0x00000080 + +#define VXF_SCHED_HARD 0x00000100 +#define VXF_SCHED_PRIO 0x00000200 +#define VXF_SCHED_PAUSE 0x00000400 + +#define VXF_VIRT_MEM 0x00010000 +#define VXF_VIRT_UPTIME 0x00020000 +#define VXF_VIRT_CPU 0x00040000 +#define VXF_VIRT_LOAD 0x00080000 +#define VXF_VIRT_TIME 0x00100000 + +#define VXF_HIDE_MOUNT 0x01000000 +/* was VXF_HIDE_NETIF 0x02000000 */ +#define VXF_HIDE_VINFO 0x04000000 + +#define VXF_STATE_SETUP (1ULL << 32) +#define VXF_STATE_INIT (1ULL << 33) +#define VXF_STATE_ADMIN (1ULL << 34) + +#define VXF_SC_HELPER (1ULL << 36) +#define VXF_REBOOT_KILL (1ULL << 37) +#define VXF_PERSISTENT (1ULL << 38) + +#define VXF_FORK_RSS (1ULL << 48) +#define VXF_PROLIFIC (1ULL << 49) + +#define VXF_IGNEG_NICE (1ULL << 52) + +#define VXF_ONE_TIME (0x0007ULL << 32) + +#define VXF_INIT_SET (VXF_STATE_SETUP | VXF_STATE_INIT | VXF_STATE_ADMIN) + + +/* context migration */ + +#define VXM_SET_INIT 0x00000001 +#define VXM_SET_REAPER 0x00000002 + +/* context caps */ + +#define VXC_SET_UTSNAME 0x00000001 +#define VXC_SET_RLIMIT 0x00000002 +#define VXC_FS_SECURITY 0x00000004 +#define VXC_FS_TRUSTED 0x00000008 +#define VXC_TIOCSTI 0x00000010 + +/* was VXC_RAW_ICMP 0x00000100 */ +#define VXC_SYSLOG 0x00001000 +#define VXC_OOM_ADJUST 0x00002000 +#define VXC_AUDIT_CONTROL 0x00004000 + +#define VXC_SECURE_MOUNT 0x00010000 +/* #define VXC_SECURE_REMOUNT 0x00020000 */ +#define VXC_BINARY_MOUNT 0x00040000 +#define VXC_DEV_MOUNT 0x00080000 + +#define VXC_QUOTA_CTL 0x00100000 +#define VXC_ADMIN_MAPPER 0x00200000 +#define VXC_ADMIN_CLOOP 0x00400000 + +#define VXC_KTHREAD 0x01000000 +#define VXC_NAMESPACE 0x02000000 + +#endif /* _UAPI_VS_CONTEXT_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/context_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/context_cmd.h --- linux-3.10.19/include/uapi/vserver/context_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/context_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,115 @@ +#ifndef _UAPI_VS_CONTEXT_CMD_H +#define _UAPI_VS_CONTEXT_CMD_H + + +/* vinfo commands */ + +#define VCMD_task_xid VC_CMD(VINFO, 1, 0) + + +#define VCMD_vx_info VC_CMD(VINFO, 5, 0) + +struct vcmd_vx_info_v0 { + uint32_t xid; + uint32_t initpid; + /* more to come */ +}; + + +#define VCMD_ctx_stat VC_CMD(VSTAT, 0, 0) + +struct vcmd_ctx_stat_v0 { + uint32_t usecnt; + uint32_t tasks; + /* more to come */ +}; + + +/* context commands */ + +#define VCMD_ctx_create_v0 VC_CMD(VPROC, 1, 0) +#define VCMD_ctx_create VC_CMD(VPROC, 1, 1) + +struct vcmd_ctx_create { + uint64_t flagword; +}; + +#define VCMD_ctx_migrate_v0 VC_CMD(PROCMIG, 1, 0) +#define VCMD_ctx_migrate VC_CMD(PROCMIG, 1, 1) + +struct vcmd_ctx_migrate { + uint64_t flagword; +}; + + + +/* flag commands */ + +#define VCMD_get_cflags VC_CMD(FLAGS, 1, 0) +#define VCMD_set_cflags VC_CMD(FLAGS, 2, 0) + +struct vcmd_ctx_flags_v0 { + uint64_t flagword; + uint64_t mask; +}; + + + +/* context caps commands */ + +#define VCMD_get_ccaps VC_CMD(FLAGS, 3, 1) +#define VCMD_set_ccaps VC_CMD(FLAGS, 4, 1) + +struct vcmd_ctx_caps_v1 { + uint64_t ccaps; + uint64_t cmask; +}; + + + +/* bcaps commands */ + +#define VCMD_get_bcaps VC_CMD(FLAGS, 9, 0) +#define VCMD_set_bcaps VC_CMD(FLAGS, 10, 0) + +struct vcmd_bcaps { + uint64_t bcaps; + uint64_t bmask; +}; + + + +/* umask commands */ + +#define VCMD_get_umask VC_CMD(FLAGS, 13, 0) +#define VCMD_set_umask VC_CMD(FLAGS, 14, 0) + +struct vcmd_umask { + uint64_t umask; + uint64_t mask; +}; + + + +/* wmask commands */ + +#define VCMD_get_wmask VC_CMD(FLAGS, 15, 0) +#define VCMD_set_wmask VC_CMD(FLAGS, 16, 0) + +struct vcmd_wmask { + uint64_t wmask; + uint64_t mask; +}; + + + +/* OOM badness */ + +#define VCMD_get_badness VC_CMD(MEMCTRL, 5, 0) +#define VCMD_set_badness VC_CMD(MEMCTRL, 6, 0) + +struct vcmd_badness_v0 { + int64_t bias; +}; + +#endif /* _UAPI_VS_CONTEXT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/cvirt_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/cvirt_cmd.h --- linux-3.10.19/include/uapi/vserver/cvirt_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/cvirt_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,41 @@ +#ifndef _UAPI_VS_CVIRT_CMD_H +#define _UAPI_VS_CVIRT_CMD_H + + +/* virtual host info name commands */ + +#define VCMD_set_vhi_name VC_CMD(VHOST, 1, 0) +#define VCMD_get_vhi_name VC_CMD(VHOST, 2, 0) + +struct vcmd_vhi_name_v0 { + uint32_t field; + char name[65]; +}; + + +enum vhi_name_field { + VHIN_CONTEXT = 0, + VHIN_SYSNAME, + VHIN_NODENAME, + VHIN_RELEASE, + VHIN_VERSION, + VHIN_MACHINE, + VHIN_DOMAINNAME, +}; + + + +#define VCMD_virt_stat VC_CMD(VSTAT, 3, 0) + +struct vcmd_virt_stat_v0 { + uint64_t offset; + uint64_t uptime; + uint32_t nr_threads; + uint32_t nr_running; + uint32_t nr_uninterruptible; + uint32_t nr_onhold; + uint32_t nr_forks; + uint32_t load[3]; +}; + +#endif /* _UAPI_VS_CVIRT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/debug_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/debug_cmd.h --- linux-3.10.19/include/uapi/vserver/debug_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/debug_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,24 @@ +#ifndef _UAPI_VS_DEBUG_CMD_H +#define _UAPI_VS_DEBUG_CMD_H + + +/* debug commands */ + +#define VCMD_dump_history VC_CMD(DEBUG, 1, 0) + +#define VCMD_read_history VC_CMD(DEBUG, 5, 0) +#define VCMD_read_monitor VC_CMD(DEBUG, 6, 0) + +struct vcmd_read_history_v0 { + uint32_t index; + uint32_t count; + char __user *data; +}; + +struct vcmd_read_monitor_v0 { + uint32_t index; + uint32_t count; + char __user *data; +}; + +#endif /* _UAPI_VS_DEBUG_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/device.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/device.h --- linux-3.10.19/include/uapi/vserver/device.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/device.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,12 @@ +#ifndef _UAPI_VS_DEVICE_H +#define _UAPI_VS_DEVICE_H + + +#define DATTR_CREATE 0x00000001 +#define DATTR_OPEN 0x00000002 + +#define DATTR_REMAP 0x00000010 + +#define DATTR_MASK 0x00000013 + +#endif /* _UAPI_VS_DEVICE_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/device_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/device_cmd.h --- linux-3.10.19/include/uapi/vserver/device_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/device_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,16 @@ +#ifndef _UAPI_VS_DEVICE_CMD_H +#define _UAPI_VS_DEVICE_CMD_H + + +/* device vserver commands */ + +#define VCMD_set_mapping VC_CMD(DEVICE, 1, 0) +#define VCMD_unset_mapping VC_CMD(DEVICE, 2, 0) + +struct vcmd_set_mapping_v0 { + const char __user *device; + const char __user *target; + uint32_t flags; +}; + +#endif /* _UAPI_VS_DEVICE_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/dlimit_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/dlimit_cmd.h --- linux-3.10.19/include/uapi/vserver/dlimit_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/dlimit_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,67 @@ +#ifndef _UAPI_VS_DLIMIT_CMD_H +#define _UAPI_VS_DLIMIT_CMD_H + + +/* dlimit vserver commands */ + +#define VCMD_add_dlimit VC_CMD(DLIMIT, 1, 0) +#define VCMD_rem_dlimit VC_CMD(DLIMIT, 2, 0) + +#define VCMD_set_dlimit VC_CMD(DLIMIT, 5, 0) +#define VCMD_get_dlimit VC_CMD(DLIMIT, 6, 0) + +struct vcmd_ctx_dlimit_base_v0 { + const char __user *name; + uint32_t flags; +}; + +struct vcmd_ctx_dlimit_v0 { + const char __user *name; + uint32_t space_used; /* used space in kbytes */ + uint32_t space_total; /* maximum space in kbytes */ + uint32_t inodes_used; /* used inodes */ + uint32_t inodes_total; /* maximum inodes */ + uint32_t reserved; /* reserved for root in % */ + uint32_t flags; +}; + +#define CDLIM_UNSET ((uint32_t)0UL) +#define CDLIM_INFINITY ((uint32_t)~0UL) +#define CDLIM_KEEP ((uint32_t)~1UL) + +#define DLIME_UNIT 0 +#define DLIME_KILO 1 +#define DLIME_MEGA 2 +#define DLIME_GIGA 3 + +#define DLIMF_SHIFT 0x10 + +#define DLIMS_USED 0 +#define DLIMS_TOTAL 2 + +static inline +uint64_t dlimit_space_32to64(uint32_t val, uint32_t flags, int shift) +{ + int exp = (flags & DLIMF_SHIFT) ? + (flags >> shift) & DLIME_GIGA : DLIME_KILO; + return ((uint64_t)val) << (10 * exp); +} + +static inline +uint32_t dlimit_space_64to32(uint64_t val, uint32_t *flags, int shift) +{ + int exp = 0; + + if (*flags & DLIMF_SHIFT) { + while (val > (1LL << 32) && (exp < 3)) { + val >>= 10; + exp++; + } + *flags &= ~(DLIME_GIGA << shift); + *flags |= exp << shift; + } else + val >>= 10; + return val; +} + +#endif /* _UAPI_VS_DLIMIT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/inode.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/inode.h --- linux-3.10.19/include/uapi/vserver/inode.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/inode.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,23 @@ +#ifndef _UAPI_VS_INODE_H +#define _UAPI_VS_INODE_H + + +#define IATTR_TAG 0x01000000 + +#define IATTR_ADMIN 0x00000001 +#define IATTR_WATCH 0x00000002 +#define IATTR_HIDE 0x00000004 +#define IATTR_FLAGS 0x00000007 + +#define IATTR_BARRIER 0x00010000 +#define IATTR_IXUNLINK 0x00020000 +#define IATTR_IMMUTABLE 0x00040000 +#define IATTR_COW 0x00080000 + + +/* inode ioctls */ + +#define FIOC_GETXFLG _IOR('x', 5, long) +#define FIOC_SETXFLG _IOW('x', 6, long) + +#endif /* _UAPI_VS_INODE_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/inode_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/inode_cmd.h --- linux-3.10.19/include/uapi/vserver/inode_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/inode_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,26 @@ +#ifndef _UAPI_VS_INODE_CMD_H +#define _UAPI_VS_INODE_CMD_H + + +/* inode vserver commands */ + +#define VCMD_get_iattr VC_CMD(INODE, 1, 1) +#define VCMD_set_iattr VC_CMD(INODE, 2, 1) + +#define VCMD_fget_iattr VC_CMD(INODE, 3, 0) +#define VCMD_fset_iattr VC_CMD(INODE, 4, 0) + +struct vcmd_ctx_iattr_v1 { + const char __user *name; + uint32_t tag; + uint32_t flags; + uint32_t mask; +}; + +struct vcmd_ctx_fiattr_v0 { + uint32_t tag; + uint32_t flags; + uint32_t mask; +}; + +#endif /* _UAPI_VS_INODE_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/limit.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/limit.h --- linux-3.10.19/include/uapi/vserver/limit.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/limit.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,14 @@ +#ifndef _UAPI_VS_LIMIT_H +#define _UAPI_VS_LIMIT_H + + +#define VLIMIT_NSOCK 16 +#define VLIMIT_OPENFD 17 +#define VLIMIT_ANON 18 +#define VLIMIT_SHMEM 19 +#define VLIMIT_SEMARY 20 +#define VLIMIT_NSEMS 21 +#define VLIMIT_DENTRY 22 +#define VLIMIT_MAPPED 23 + +#endif /* _UAPI_VS_LIMIT_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/limit_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/limit_cmd.h --- linux-3.10.19/include/uapi/vserver/limit_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/limit_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,40 @@ +#ifndef _UAPI_VS_LIMIT_CMD_H +#define _UAPI_VS_LIMIT_CMD_H + + +/* rlimit vserver commands */ + +#define VCMD_get_rlimit VC_CMD(RLIMIT, 1, 0) +#define VCMD_set_rlimit VC_CMD(RLIMIT, 2, 0) +#define VCMD_get_rlimit_mask VC_CMD(RLIMIT, 3, 0) +#define VCMD_reset_hits VC_CMD(RLIMIT, 7, 0) +#define VCMD_reset_minmax VC_CMD(RLIMIT, 9, 0) + +struct vcmd_ctx_rlimit_v0 { + uint32_t id; + uint64_t minimum; + uint64_t softlimit; + uint64_t maximum; +}; + +struct vcmd_ctx_rlimit_mask_v0 { + uint32_t minimum; + uint32_t softlimit; + uint32_t maximum; +}; + +#define VCMD_rlimit_stat VC_CMD(VSTAT, 1, 0) + +struct vcmd_rlimit_stat_v0 { + uint32_t id; + uint32_t hits; + uint64_t value; + uint64_t minimum; + uint64_t maximum; +}; + +#define CRLIM_UNSET (0ULL) +#define CRLIM_INFINITY (~0ULL) +#define CRLIM_KEEP (~1ULL) + +#endif /* _UAPI_VS_LIMIT_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/monitor.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/monitor.h --- linux-3.10.19/include/uapi/vserver/monitor.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/monitor.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,96 @@ +#ifndef _UAPI_VS_MONITOR_H +#define _UAPI_VS_MONITOR_H + +#include + + +enum { + VXM_UNUSED = 0, + + VXM_SYNC = 0x10, + + VXM_UPDATE = 0x20, + VXM_UPDATE_1, + VXM_UPDATE_2, + + VXM_RQINFO_1 = 0x24, + VXM_RQINFO_2, + + VXM_ACTIVATE = 0x40, + VXM_DEACTIVATE, + VXM_IDLE, + + VXM_HOLD = 0x44, + VXM_UNHOLD, + + VXM_MIGRATE = 0x48, + VXM_RESCHED, + + /* all other bits are flags */ + VXM_SCHED = 0x80, +}; + +struct _vxm_update_1 { + uint32_t tokens_max; + uint32_t fill_rate; + uint32_t interval; +}; + +struct _vxm_update_2 { + uint32_t tokens_min; + uint32_t fill_rate; + uint32_t interval; +}; + +struct _vxm_rqinfo_1 { + uint16_t running; + uint16_t onhold; + uint16_t iowait; + uint16_t uintr; + uint32_t idle_tokens; +}; + +struct _vxm_rqinfo_2 { + uint32_t norm_time; + uint32_t idle_time; + uint32_t idle_skip; +}; + +struct _vxm_sched { + uint32_t tokens; + uint32_t norm_time; + uint32_t idle_time; +}; + +struct _vxm_task { + uint16_t pid; + uint16_t state; +}; + +struct _vxm_event { + uint32_t jif; + union { + uint32_t seq; + uint32_t sec; + }; + union { + uint32_t tokens; + uint32_t nsec; + struct _vxm_task tsk; + }; +}; + +struct _vx_mon_entry { + uint16_t type; + uint16_t xid; + union { + struct _vxm_event ev; + struct _vxm_sched sd; + struct _vxm_update_1 u1; + struct _vxm_update_2 u2; + struct _vxm_rqinfo_1 q1; + struct _vxm_rqinfo_2 q2; + }; +}; + +#endif /* _UAPI_VS_MONITOR_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/network.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/network.h --- linux-3.10.19/include/uapi/vserver/network.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/network.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,76 @@ +#ifndef _UAPI_VS_NETWORK_H +#define _UAPI_VS_NETWORK_H + +#include + + +#define MAX_N_CONTEXT 65535 /* Arbitrary limit */ + + +/* network flags */ + +#define NXF_INFO_PRIVATE 0x00000008 + +#define NXF_SINGLE_IP 0x00000100 +#define NXF_LBACK_REMAP 0x00000200 +#define NXF_LBACK_ALLOW 0x00000400 + +#define NXF_HIDE_NETIF 0x02000000 +#define NXF_HIDE_LBACK 0x04000000 + +#define NXF_STATE_SETUP (1ULL << 32) +#define NXF_STATE_ADMIN (1ULL << 34) + +#define NXF_SC_HELPER (1ULL << 36) +#define NXF_PERSISTENT (1ULL << 38) + +#define NXF_ONE_TIME (0x0005ULL << 32) + + +#define NXF_INIT_SET (__nxf_init_set()) + +static inline uint64_t __nxf_init_set(void) { + return NXF_STATE_ADMIN +#ifdef CONFIG_VSERVER_AUTO_LBACK + | NXF_LBACK_REMAP + | NXF_HIDE_LBACK +#endif +#ifdef CONFIG_VSERVER_AUTO_SINGLE + | NXF_SINGLE_IP +#endif + | NXF_HIDE_NETIF; +} + + +/* network caps */ + +#define NXC_TUN_CREATE 0x00000001 + +#define NXC_RAW_ICMP 0x00000100 + +#define NXC_MULTICAST 0x00001000 + + +/* address types */ + +#define NXA_TYPE_IPV4 0x0001 +#define NXA_TYPE_IPV6 0x0002 + +#define NXA_TYPE_NONE 0x0000 +#define NXA_TYPE_ANY 0x00FF + +#define NXA_TYPE_ADDR 0x0010 +#define NXA_TYPE_MASK 0x0020 +#define NXA_TYPE_RANGE 0x0040 + +#define NXA_MASK_ALL (NXA_TYPE_ADDR | NXA_TYPE_MASK | NXA_TYPE_RANGE) + +#define NXA_MOD_BCAST 0x0100 +#define NXA_MOD_LBACK 0x0200 + +#define NXA_LOOPBACK 0x1000 + +#define NXA_MASK_BIND (NXA_MASK_ALL | NXA_MOD_BCAST | NXA_MOD_LBACK) +#define NXA_MASK_SHOW (NXA_MASK_ALL | NXA_LOOPBACK) + +#endif /* _UAPI_VS_NETWORK_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/network_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/network_cmd.h --- linux-3.10.19/include/uapi/vserver/network_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/network_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,123 @@ +#ifndef _UAPI_VS_NETWORK_CMD_H +#define _UAPI_VS_NETWORK_CMD_H + + +/* vinfo commands */ + +#define VCMD_task_nid VC_CMD(VINFO, 2, 0) + + +#define VCMD_nx_info VC_CMD(VINFO, 6, 0) + +struct vcmd_nx_info_v0 { + uint32_t nid; + /* more to come */ +}; + + +#include +#include + +#define VCMD_net_create_v0 VC_CMD(VNET, 1, 0) +#define VCMD_net_create VC_CMD(VNET, 1, 1) + +struct vcmd_net_create { + uint64_t flagword; +}; + +#define VCMD_net_migrate VC_CMD(NETMIG, 1, 0) + +#define VCMD_net_add VC_CMD(NETALT, 1, 0) +#define VCMD_net_remove VC_CMD(NETALT, 2, 0) + +struct vcmd_net_addr_v0 { + uint16_t type; + uint16_t count; + struct in_addr ip[4]; + struct in_addr mask[4]; +}; + +#define VCMD_net_add_ipv4_v1 VC_CMD(NETALT, 1, 1) +#define VCMD_net_rem_ipv4_v1 VC_CMD(NETALT, 2, 1) + +struct vcmd_net_addr_ipv4_v1 { + uint16_t type; + uint16_t flags; + struct in_addr ip; + struct in_addr mask; +}; + +#define VCMD_net_add_ipv4 VC_CMD(NETALT, 1, 2) +#define VCMD_net_rem_ipv4 VC_CMD(NETALT, 2, 2) + +struct vcmd_net_addr_ipv4_v2 { + uint16_t type; + uint16_t flags; + struct in_addr ip; + struct in_addr ip2; + struct in_addr mask; +}; + +#define VCMD_net_add_ipv6 VC_CMD(NETALT, 3, 1) +#define VCMD_net_remove_ipv6 VC_CMD(NETALT, 4, 1) + +struct vcmd_net_addr_ipv6_v1 { + uint16_t type; + uint16_t flags; + uint32_t prefix; + struct in6_addr ip; + struct in6_addr mask; +}; + +#define VCMD_add_match_ipv4 VC_CMD(NETALT, 5, 0) +#define VCMD_get_match_ipv4 VC_CMD(NETALT, 6, 0) + +struct vcmd_match_ipv4_v0 { + uint16_t type; + uint16_t flags; + uint16_t parent; + uint16_t prefix; + struct in_addr ip; + struct in_addr ip2; + struct in_addr mask; +}; + +#define VCMD_add_match_ipv6 VC_CMD(NETALT, 7, 0) +#define VCMD_get_match_ipv6 VC_CMD(NETALT, 8, 0) + +struct vcmd_match_ipv6_v0 { + uint16_t type; + uint16_t flags; + uint16_t parent; + uint16_t prefix; + struct in6_addr ip; + struct in6_addr ip2; + struct in6_addr mask; +}; + + + + +/* flag commands */ + +#define VCMD_get_nflags VC_CMD(FLAGS, 5, 0) +#define VCMD_set_nflags VC_CMD(FLAGS, 6, 0) + +struct vcmd_net_flags_v0 { + uint64_t flagword; + uint64_t mask; +}; + + + +/* network caps commands */ + +#define VCMD_get_ncaps VC_CMD(FLAGS, 7, 0) +#define VCMD_set_ncaps VC_CMD(FLAGS, 8, 0) + +struct vcmd_net_caps_v0 { + uint64_t ncaps; + uint64_t cmask; +}; + +#endif /* _UAPI_VS_NETWORK_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/sched_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/sched_cmd.h --- linux-3.10.19/include/uapi/vserver/sched_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/sched_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,13 @@ +#ifndef _UAPI_VS_SCHED_CMD_H +#define _UAPI_VS_SCHED_CMD_H + + +struct vcmd_prio_bias { + int32_t cpu_id; + int32_t prio_bias; +}; + +#define VCMD_set_prio_bias VC_CMD(SCHED, 4, 0) +#define VCMD_get_prio_bias VC_CMD(SCHED, 5, 0) + +#endif /* _UAPI_VS_SCHED_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/signal_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/signal_cmd.h --- linux-3.10.19/include/uapi/vserver/signal_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/signal_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,31 @@ +#ifndef _UAPI_VS_SIGNAL_CMD_H +#define _UAPI_VS_SIGNAL_CMD_H + + +/* signalling vserver commands */ + +#define VCMD_ctx_kill VC_CMD(PROCTRL, 1, 0) +#define VCMD_wait_exit VC_CMD(EVENT, 99, 0) + +struct vcmd_ctx_kill_v0 { + int32_t pid; + int32_t sig; +}; + +struct vcmd_wait_exit_v0 { + int32_t reboot_cmd; + int32_t exit_code; +}; + + +/* process alteration commands */ + +#define VCMD_get_pflags VC_CMD(PROCALT, 5, 0) +#define VCMD_set_pflags VC_CMD(PROCALT, 6, 0) + +struct vcmd_pflags_v0 { + uint32_t flagword; + uint32_t mask; +}; + +#endif /* _UAPI_VS_SIGNAL_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/space_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/space_cmd.h --- linux-3.10.19/include/uapi/vserver/space_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/space_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,28 @@ +#ifndef _UAPI_VS_SPACE_CMD_H +#define _UAPI_VS_SPACE_CMD_H + + +#define VCMD_enter_space_v0 VC_CMD(PROCALT, 1, 0) +#define VCMD_enter_space_v1 VC_CMD(PROCALT, 1, 1) +#define VCMD_enter_space VC_CMD(PROCALT, 1, 2) + +#define VCMD_set_space_v0 VC_CMD(PROCALT, 3, 0) +#define VCMD_set_space_v1 VC_CMD(PROCALT, 3, 1) +#define VCMD_set_space VC_CMD(PROCALT, 3, 2) + +#define VCMD_get_space_mask_v0 VC_CMD(PROCALT, 4, 0) + +#define VCMD_get_space_mask VC_CMD(VSPACE, 0, 1) +#define VCMD_get_space_default VC_CMD(VSPACE, 1, 0) + + +struct vcmd_space_mask_v1 { + uint64_t mask; +}; + +struct vcmd_space_mask_v2 { + uint64_t mask; + uint32_t index; +}; + +#endif /* _UAPI_VS_SPACE_CMD_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/switch.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/switch.h --- linux-3.10.19/include/uapi/vserver/switch.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/switch.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,90 @@ +#ifndef _UAPI_VS_SWITCH_H +#define _UAPI_VS_SWITCH_H + +#include + + +#define VC_CATEGORY(c) (((c) >> 24) & 0x3F) +#define VC_COMMAND(c) (((c) >> 16) & 0xFF) +#define VC_VERSION(c) ((c) & 0xFFF) + +#define VC_CMD(c, i, v) ((((VC_CAT_ ## c) & 0x3F) << 24) \ + | (((i) & 0xFF) << 16) | ((v) & 0xFFF)) + +/* + + Syscall Matrix V2.8 + + |VERSION|CREATE |MODIFY |MIGRATE|CONTROL|EXPERIM| |SPECIAL|SPECIAL| + |STATS |DESTROY|ALTER |CHANGE |LIMIT |TEST | | | | + |INFO |SETUP | |MOVE | | | | | | + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + SYSTEM |VERSION|VSETUP |VHOST | | | | |DEVICE | | + HOST | 00| 01| 02| 03| 04| 05| | 06| 07| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + CPU | |VPROC |PROCALT|PROCMIG|PROCTRL| | |SCHED. | | + PROCESS| 08| 09| 10| 11| 12| 13| | 14| 15| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + MEMORY | | | | |MEMCTRL| | |SWAP | | + | 16| 17| 18| 19| 20| 21| | 22| 23| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + NETWORK| |VNET |NETALT |NETMIG |NETCTL | | |SERIAL | | + | 24| 25| 26| 27| 28| 29| | 30| 31| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + DISK | | | |TAGMIG |DLIMIT | | |INODE | | + VFS | 32| 33| 34| 35| 36| 37| | 38| 39| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + OTHER |VSTAT | | | | | | |VINFO | | + | 40| 41| 42| 43| 44| 45| | 46| 47| + =======+=======+=======+=======+=======+=======+=======+ +=======+=======+ + SPECIAL|EVENT | | | |FLAGS | | |VSPACE | | + | 48| 49| 50| 51| 52| 53| | 54| 55| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + SPECIAL|DEBUG | | | |RLIMIT |SYSCALL| | |COMPAT | + | 56| 57| 58| 59| 60|TEST 61| | 62| 63| + -------+-------+-------+-------+-------+-------+-------+ +-------+-------+ + +*/ + +#define VC_CAT_VERSION 0 + +#define VC_CAT_VSETUP 1 +#define VC_CAT_VHOST 2 + +#define VC_CAT_DEVICE 6 + +#define VC_CAT_VPROC 9 +#define VC_CAT_PROCALT 10 +#define VC_CAT_PROCMIG 11 +#define VC_CAT_PROCTRL 12 + +#define VC_CAT_SCHED 14 +#define VC_CAT_MEMCTRL 20 + +#define VC_CAT_VNET 25 +#define VC_CAT_NETALT 26 +#define VC_CAT_NETMIG 27 +#define VC_CAT_NETCTRL 28 + +#define VC_CAT_TAGMIG 35 +#define VC_CAT_DLIMIT 36 +#define VC_CAT_INODE 38 + +#define VC_CAT_VSTAT 40 +#define VC_CAT_VINFO 46 +#define VC_CAT_EVENT 48 + +#define VC_CAT_FLAGS 52 +#define VC_CAT_VSPACE 54 +#define VC_CAT_DEBUG 56 +#define VC_CAT_RLIMIT 60 + +#define VC_CAT_SYSTEST 61 +#define VC_CAT_COMPAT 63 + +/* query version */ + +#define VCMD_get_version VC_CMD(VERSION, 0, 0) +#define VCMD_get_vci VC_CMD(VERSION, 1, 0) + +#endif /* _UAPI_VS_SWITCH_H */ diff -NurpP --minimal linux-3.10.19/include/uapi/vserver/tag_cmd.h linux-3.10.19-vs2.3.6.8/include/uapi/vserver/tag_cmd.h --- linux-3.10.19/include/uapi/vserver/tag_cmd.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/include/uapi/vserver/tag_cmd.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,14 @@ +#ifndef _UAPI_VS_TAG_CMD_H +#define _UAPI_VS_TAG_CMD_H + + +/* vinfo commands */ + +#define VCMD_task_tag VC_CMD(VINFO, 3, 0) + + +/* context commands */ + +#define VCMD_tag_migrate VC_CMD(TAGMIG, 1, 0) + +#endif /* _UAPI_VS_TAG_CMD_H */ diff -NurpP --minimal linux-3.10.19/init/Kconfig linux-3.10.19-vs2.3.6.8/init/Kconfig --- linux-3.10.19/init/Kconfig 2013-07-14 17:01:34.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/init/Kconfig 2013-08-22 20:30:00.000000000 +0000 @@ -805,6 +805,7 @@ config NUMA_BALANCING menuconfig CGROUPS boolean "Control Group support" depends on EVENTFD + default y help This option adds support for grouping sets of processes together, for use with process control subsystems such as Cpusets, CFS, memory @@ -1067,6 +1068,7 @@ config IPC_NS config USER_NS bool "User namespace" depends on UIDGID_CONVERTED + depends on VSERVER_DISABLED select UIDGID_STRICT_TYPE_CHECKS default n diff -NurpP --minimal linux-3.10.19/init/main.c linux-3.10.19-vs2.3.6.8/init/main.c --- linux-3.10.19/init/main.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/init/main.c 2013-11-13 17:19:37.000000000 +0000 @@ -75,6 +75,7 @@ #include #include #include +#include #include #include diff -NurpP --minimal linux-3.10.19/ipc/mqueue.c linux-3.10.19-vs2.3.6.8/ipc/mqueue.c --- linux-3.10.19/ipc/mqueue.c 2013-05-31 13:45:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/ipc/mqueue.c 2013-08-22 20:30:00.000000000 +0000 @@ -35,6 +35,8 @@ #include #include #include +#include +#include #include #include "util.h" @@ -76,6 +78,7 @@ struct mqueue_inode_info { struct pid* notify_owner; struct user_namespace *notify_user_ns; struct user_struct *user; /* user who created, for accounting */ + struct vx_info *vxi; struct sock *notify_sock; struct sk_buff *notify_cookie; @@ -234,6 +237,7 @@ static struct inode *mqueue_get_inode(st if (S_ISREG(mode)) { struct mqueue_inode_info *info; unsigned long mq_bytes, mq_treesize; + struct vx_info *vxi = current_vx_info(); inode->i_fop = &mqueue_file_operations; inode->i_size = FILENT_SIZE; @@ -247,6 +251,7 @@ static struct inode *mqueue_get_inode(st info->notify_user_ns = NULL; info->qsize = 0; info->user = NULL; /* set when all is ok */ + info->vxi = NULL; info->msg_tree = RB_ROOT; info->node_cache = NULL; memset(&info->attr, 0, sizeof(info->attr)); @@ -280,17 +285,20 @@ static struct inode *mqueue_get_inode(st spin_lock(&mq_lock); if (u->mq_bytes + mq_bytes < u->mq_bytes || - u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { + u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE) || + !vx_ipcmsg_avail(vxi, mq_bytes)) { spin_unlock(&mq_lock); /* mqueue_evict_inode() releases info->messages */ ret = -EMFILE; goto out_inode; } u->mq_bytes += mq_bytes; + vx_ipcmsg_add(vxi, u, mq_bytes); spin_unlock(&mq_lock); /* all is ok */ info->user = get_uid(u); + info->vxi = get_vx_info(vxi); } else if (S_ISDIR(mode)) { inc_nlink(inode); /* Some things misbehave if size == 0 on a directory */ @@ -402,8 +410,11 @@ static void mqueue_evict_inode(struct in user = info->user; if (user) { + struct vx_info *vxi = info->vxi; + spin_lock(&mq_lock); user->mq_bytes -= mq_bytes; + vx_ipcmsg_sub(vxi, user, mq_bytes); /* * get_ns_from_inode() ensures that the * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns @@ -413,6 +424,7 @@ static void mqueue_evict_inode(struct in if (ipc_ns) ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); + put_vx_info(vxi); free_uid(user); } if (ipc_ns) diff -NurpP --minimal linux-3.10.19/ipc/msg.c linux-3.10.19-vs2.3.6.8/ipc/msg.c --- linux-3.10.19/ipc/msg.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/ipc/msg.c 2013-11-13 17:17:16.000000000 +0000 @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -194,6 +195,7 @@ static int newque(struct ipc_namespace * msq->q_perm.mode = msgflg & S_IRWXUGO; msq->q_perm.key = key; + msq->q_perm.xid = vx_current_xid(); msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); diff -NurpP --minimal linux-3.10.19/ipc/sem.c linux-3.10.19-vs2.3.6.8/ipc/sem.c --- linux-3.10.19/ipc/sem.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/ipc/sem.c 2013-11-13 17:19:37.000000000 +0000 @@ -86,6 +86,8 @@ #include #include #include +#include +#include #include #include "util.h" @@ -500,6 +502,7 @@ static int newary(struct ipc_namespace * sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; + sma->sem_perm.xid = vx_current_xid(); sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); @@ -514,6 +517,9 @@ static int newary(struct ipc_namespace * return id; } ns->used_sems += nsems; + /* FIXME: obsoleted? */ + vx_semary_inc(sma); + vx_nsems_add(sma, nsems); sma->sem_base = (struct sem *) &sma[1]; @@ -1103,6 +1109,9 @@ static void freeary(struct ipc_namespace wake_up_sem_queue_do(&tasks); ns->used_sems -= sma->sem_nsems; + /* FIXME: obsoleted? */ + vx_nsems_sub(sma, sma->sem_nsems); + vx_semary_dec(sma); ipc_rcu_putref(sma, sem_rcu_free); } diff -NurpP --minimal linux-3.10.19/ipc/shm.c linux-3.10.19-vs2.3.6.8/ipc/shm.c --- linux-3.10.19/ipc/shm.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/ipc/shm.c 2013-11-13 17:19:37.000000000 +0000 @@ -42,6 +42,8 @@ #include #include #include +#include +#include #include @@ -208,7 +210,12 @@ static void shm_open(struct vm_area_stru */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { - ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; + struct vx_info *vxi = lookup_vx_info(shp->shm_perm.xid); + int numpages = (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; + + vx_ipcshm_sub(vxi, shp, numpages); + ns->shm_tot -= numpages; + shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shp->shm_file)) @@ -217,6 +224,7 @@ static void shm_destroy(struct ipc_names user_shm_unlock(file_inode(shp->shm_file)->i_size, shp->mlock_user); fput (shp->shm_file); + put_vx_info(vxi); ipc_rcu_putref(shp, shm_rcu_free); } @@ -494,11 +502,15 @@ static int newseg(struct ipc_namespace * if (ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; + if (!vx_ipcshm_avail(current_vx_info(), numpages)) + return -ENOSPC; + shp = ipc_rcu_alloc(sizeof(*shp)); if (!shp) return -ENOMEM; shp->shm_perm.key = key; + shp->shm_perm.xid = vx_current_xid(); shp->shm_perm.mode = (shmflg & S_IRWXUGO); shp->mlock_user = NULL; @@ -567,6 +579,7 @@ static int newseg(struct ipc_namespace * ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); + vx_ipcshm_add(current_vx_info(), key, numpages); return error; no_id: diff -NurpP --minimal linux-3.10.19/kernel/Makefile linux-3.10.19-vs2.3.6.8/kernel/Makefile --- linux-3.10.19/kernel/Makefile 2013-07-14 17:01:34.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/Makefile 2013-08-22 21:18:32.000000000 +0000 @@ -25,6 +25,7 @@ endif obj-y += sched/ obj-y += power/ obj-y += cpu/ +obj-y += vserver/ obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o obj-$(CONFIG_FREEZER) += freezer.o diff -NurpP --minimal linux-3.10.19/kernel/auditsc.c linux-3.10.19-vs2.3.6.8/kernel/auditsc.c --- linux-3.10.19/kernel/auditsc.c 2013-07-14 17:01:34.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/auditsc.c 2013-08-22 20:30:00.000000000 +0000 @@ -1976,7 +1976,7 @@ int audit_set_loginuid(kuid_t loginuid) if (audit_loginuid_set(task)) return -EPERM; #else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */ - if (!capable(CAP_AUDIT_CONTROL)) + if (!vx_capable(CAP_AUDIT_CONTROL, VXC_AUDIT_CONTROL)) return -EPERM; #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */ diff -NurpP --minimal linux-3.10.19/kernel/capability.c linux-3.10.19-vs2.3.6.8/kernel/capability.c --- linux-3.10.19/kernel/capability.c 2013-05-31 13:45:29.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/capability.c 2013-08-22 20:30:00.000000000 +0000 @@ -15,6 +15,7 @@ #include #include #include +#include #include /* @@ -116,6 +117,7 @@ static int cap_validate_magic(cap_user_h return 0; } + /* * The only thing that can change the capabilities of the current * process is the current process. As such, we can't be in this code @@ -349,6 +351,8 @@ bool has_ns_capability_noaudit(struct ta return (ret == 0); } +#include + /** * has_capability_noaudit - Does a task have a capability (unaudited) in the * initial user ns diff -NurpP --minimal linux-3.10.19/kernel/compat.c linux-3.10.19-vs2.3.6.8/kernel/compat.c --- linux-3.10.19/kernel/compat.c 2013-07-14 17:01:34.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/compat.c 2013-08-22 20:30:00.000000000 +0000 @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -1040,7 +1041,7 @@ asmlinkage long compat_sys_stime(compat_ if (err) return err; - do_settimeofday(&tv); + vx_settimeofday(&tv); return 0; } diff -NurpP --minimal linux-3.10.19/kernel/cred.c linux-3.10.19-vs2.3.6.8/kernel/cred.c --- linux-3.10.19/kernel/cred.c 2013-02-19 13:58:56.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/cred.c 2013-08-22 20:30:00.000000000 +0000 @@ -56,31 +56,6 @@ struct cred init_cred = { .group_info = &init_groups, }; -static inline void set_cred_subscribers(struct cred *cred, int n) -{ -#ifdef CONFIG_DEBUG_CREDENTIALS - atomic_set(&cred->subscribers, n); -#endif -} - -static inline int read_cred_subscribers(const struct cred *cred) -{ -#ifdef CONFIG_DEBUG_CREDENTIALS - return atomic_read(&cred->subscribers); -#else - return 0; -#endif -} - -static inline void alter_cred_subscribers(const struct cred *_cred, int n) -{ -#ifdef CONFIG_DEBUG_CREDENTIALS - struct cred *cred = (struct cred *) _cred; - - atomic_add(n, &cred->subscribers); -#endif -} - /* * The RCU callback to actually dispose of a set of credentials */ @@ -232,21 +207,16 @@ error: * * Call commit_creds() or abort_creds() to clean up. */ -struct cred *prepare_creds(void) +struct cred *__prepare_creds(const struct cred *old) { - struct task_struct *task = current; - const struct cred *old; struct cred *new; - validate_process_creds(); - new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) return NULL; kdebug("prepare_creds() alloc %p", new); - old = task->cred; memcpy(new, old, sizeof(struct cred)); atomic_set(&new->usage, 1); @@ -275,6 +245,13 @@ error: abort_creds(new); return NULL; } + +struct cred *prepare_creds(void) +{ + validate_process_creds(); + + return __prepare_creds(current->cred); +} EXPORT_SYMBOL(prepare_creds); /* diff -NurpP --minimal linux-3.10.19/kernel/exit.c linux-3.10.19-vs2.3.6.8/kernel/exit.c --- linux-3.10.19/kernel/exit.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/exit.c 2013-08-22 20:30:00.000000000 +0000 @@ -48,6 +48,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include @@ -514,15 +518,25 @@ static struct task_struct *find_new_reap __acquires(&tasklist_lock) { struct pid_namespace *pid_ns = task_active_pid_ns(father); - struct task_struct *thread; + struct vx_info *vxi = task_get_vx_info(father); + struct task_struct *thread = father; + struct task_struct *reaper; - thread = father; while_each_thread(father, thread) { if (thread->flags & PF_EXITING) continue; if (unlikely(pid_ns->child_reaper == father)) pid_ns->child_reaper = thread; - return thread; + reaper = thread; + goto out_put; + } + + reaper = pid_ns->child_reaper; + if (vxi) { + BUG_ON(!vxi->vx_reaper); + if (vxi->vx_reaper != init_pid_ns.child_reaper && + vxi->vx_reaper != father) + reaper = vxi->vx_reaper; } if (unlikely(pid_ns->child_reaper == father)) { @@ -560,7 +574,9 @@ static struct task_struct *find_new_reap } } - return pid_ns->child_reaper; +out_put: + put_vx_info(vxi); + return reaper; } /* @@ -611,10 +627,15 @@ static void forget_original_parent(struc list_for_each_entry_safe(p, n, &father->children, sibling) { struct task_struct *t = p; do { - t->real_parent = reaper; + struct task_struct *new_parent = reaper; + + if (unlikely(p == reaper)) + new_parent = task_active_pid_ns(p)->child_reaper; + + t->real_parent = new_parent; if (t->parent == father) { BUG_ON(t->ptrace); - t->parent = t->real_parent; + t->parent = new_parent; } if (t->pdeath_signal) group_send_sig_info(t->pdeath_signal, @@ -821,6 +842,9 @@ void do_exit(long code) */ ptrace_put_breakpoints(tsk); + /* needs to stay before exit_notify() */ + exit_vx_info_early(tsk, code); + exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA task_lock(tsk); @@ -874,10 +898,15 @@ void do_exit(long code) smp_mb(); raw_spin_unlock_wait(&tsk->pi_lock); + /* needs to stay after exit_notify() */ + exit_vx_info(tsk, code); + exit_nx_info(tsk); + /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ schedule(); + printk("bad task: %p [%lx]\n", current, current->state); BUG(); /* Avoid "noreturn function does return". */ for (;;) diff -NurpP --minimal linux-3.10.19/kernel/fork.c linux-3.10.19-vs2.3.6.8/kernel/fork.c --- linux-3.10.19/kernel/fork.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/fork.c 2013-11-13 17:17:16.000000000 +0000 @@ -71,6 +71,9 @@ #include #include #include +#include +#include +#include #include #include @@ -211,6 +214,8 @@ void free_task(struct task_struct *tsk) arch_release_thread_info(tsk->stack); free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); + clr_vx_info(&tsk->vx_info); + clr_nx_info(&tsk->nx_info); ftrace_graph_exit_task(tsk); put_seccomp_filter(tsk); arch_release_task_struct(tsk); @@ -548,6 +553,7 @@ static struct mm_struct *mm_init(struct if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; mmu_notifier_mm_init(mm); + set_vx_info(&mm->mm_vx_info, p->vx_info); return mm; } @@ -600,6 +606,7 @@ void __mmdrop(struct mm_struct *mm) destroy_context(mm); mmu_notifier_mm_destroy(mm); check_mm(mm); + clr_vx_info(&mm->mm_vx_info); free_mm(mm); } EXPORT_SYMBOL_GPL(__mmdrop); @@ -819,6 +826,7 @@ struct mm_struct *dup_mm(struct task_str goto fail_nomem; memcpy(mm, oldmm, sizeof(*mm)); + mm->mm_vx_info = NULL; mm_init_cpumask(mm); #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -860,6 +868,7 @@ fail_nocontext: * If init_new_context() failed, we cannot use mmput() to free the mm * because it calls destroy_context() */ + clr_vx_info(&mm->mm_vx_info); mm_free_pgd(mm); free_mm(mm); return NULL; @@ -1138,6 +1147,8 @@ static struct task_struct *copy_process( { int retval; struct task_struct *p; + struct vx_info *vxi; + struct nx_info *nxi; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); @@ -1197,7 +1208,12 @@ static struct task_struct *copy_process( DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif + init_vx_info(&p->vx_info, current_vx_info()); + init_nx_info(&p->nx_info, current_nx_info()); + retval = -EAGAIN; + if (!vx_nproc_avail(1)) + goto bad_fork_free; if (atomic_read(&p->real_cred->user->processes) >= task_rlimit(p, RLIMIT_NPROC)) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && @@ -1478,6 +1494,18 @@ static struct task_struct *copy_process( total_forks++; spin_unlock(¤t->sighand->siglock); + + /* p is copy of current */ + vxi = p->vx_info; + if (vxi) { + claim_vx_info(vxi, p); + atomic_inc(&vxi->cvirt.nr_threads); + atomic_inc(&vxi->cvirt.total_forks); + vx_nproc_inc(p); + } + nxi = p->nx_info; + if (nxi) + claim_nx_info(nxi, p); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p); diff -NurpP --minimal linux-3.10.19/kernel/kthread.c linux-3.10.19-vs2.3.6.8/kernel/kthread.c --- linux-3.10.19/kernel/kthread.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/kthread.c 2013-08-22 21:14:28.000000000 +0000 @@ -18,6 +18,7 @@ #include #include #include +#include #include static DEFINE_SPINLOCK(kthread_create_lock); diff -NurpP --minimal linux-3.10.19/kernel/nsproxy.c linux-3.10.19-vs2.3.6.8/kernel/nsproxy.c --- linux-3.10.19/kernel/nsproxy.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/nsproxy.c 2013-08-22 21:09:55.000000000 +0000 @@ -20,11 +20,14 @@ #include #include #include +#include +#include #include #include #include #include #include +#include "../fs/mount.h" static struct kmem_cache *nsproxy_cachep; @@ -46,8 +49,11 @@ static inline struct nsproxy *create_nsp struct nsproxy *nsproxy; nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL); - if (nsproxy) + if (nsproxy) { atomic_set(&nsproxy->count, 1); + atomic_inc(&vs_global_nsproxy); + } + vxdprintk(VXD_CBIT(space, 2), "create_nsproxy = %p[1]", nsproxy); return nsproxy; } @@ -56,9 +62,12 @@ static inline struct nsproxy *create_nsp * Return the newly created nsproxy. Do not attach this to the task, * leave it to the caller to do proper locking and attach it to task. */ -static struct nsproxy *create_new_namespaces(unsigned long flags, - struct task_struct *tsk, struct user_namespace *user_ns, - struct fs_struct *new_fs) +static struct nsproxy *unshare_namespaces( + unsigned long flags, + struct nsproxy *orig, + struct fs_struct *new_fs, + struct user_namespace *new_user, + struct pid_namespace *new_pid) { struct nsproxy *new_nsp; int err; @@ -67,31 +76,31 @@ static struct nsproxy *create_new_namesp if (!new_nsp) return ERR_PTR(-ENOMEM); - new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, user_ns, new_fs); + new_nsp->mnt_ns = copy_mnt_ns(flags, orig->mnt_ns, new_user, new_fs); if (IS_ERR(new_nsp->mnt_ns)) { err = PTR_ERR(new_nsp->mnt_ns); goto out_ns; } - new_nsp->uts_ns = copy_utsname(flags, user_ns, tsk->nsproxy->uts_ns); + new_nsp->uts_ns = copy_utsname(flags, new_user, orig->uts_ns); if (IS_ERR(new_nsp->uts_ns)) { err = PTR_ERR(new_nsp->uts_ns); goto out_uts; } - new_nsp->ipc_ns = copy_ipcs(flags, user_ns, tsk->nsproxy->ipc_ns); + new_nsp->ipc_ns = copy_ipcs(flags, new_user, orig->ipc_ns); if (IS_ERR(new_nsp->ipc_ns)) { err = PTR_ERR(new_nsp->ipc_ns); goto out_ipc; } - new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns); + new_nsp->pid_ns = copy_pid_ns(flags, new_user, new_pid); if (IS_ERR(new_nsp->pid_ns)) { err = PTR_ERR(new_nsp->pid_ns); goto out_pid; } - new_nsp->net_ns = copy_net_ns(flags, user_ns, tsk->nsproxy->net_ns); + new_nsp->net_ns = copy_net_ns(flags, new_user, orig->net_ns); if (IS_ERR(new_nsp->net_ns)) { err = PTR_ERR(new_nsp->net_ns); goto out_net; @@ -116,6 +125,41 @@ out_ns: return ERR_PTR(err); } +static struct nsproxy *create_new_namespaces(unsigned long flags, + struct task_struct *tsk, struct user_namespace *user_ns, + struct fs_struct *new_fs) + +{ + return unshare_namespaces(flags, tsk->nsproxy, + new_fs, user_ns, task_active_pid_ns(tsk)); +} + +/* + * copies the nsproxy, setting refcount to 1, and grabbing a + * reference to all contained namespaces. + */ +struct nsproxy *copy_nsproxy(struct nsproxy *orig) +{ + struct nsproxy *ns = create_nsproxy(); + + if (ns) { + memcpy(ns, orig, sizeof(struct nsproxy)); + atomic_set(&ns->count, 1); + + if (ns->mnt_ns) + get_mnt_ns(ns->mnt_ns); + if (ns->uts_ns) + get_uts_ns(ns->uts_ns); + if (ns->ipc_ns) + get_ipc_ns(ns->ipc_ns); + if (ns->pid_ns) + get_pid_ns(ns->pid_ns); + if (ns->net_ns) + get_net(ns->net_ns); + } + return ns; +} + /* * called from clone. This now handles copy for nsproxy and all * namespaces therein. @@ -124,9 +168,12 @@ int copy_namespaces(unsigned long flags, { struct nsproxy *old_ns = tsk->nsproxy; struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns); - struct nsproxy *new_ns; + struct nsproxy *new_ns = NULL; int err = 0; + vxdprintk(VXD_CBIT(space, 7), "copy_namespaces(0x%08lx,%p[%p])", + flags, tsk, old_ns); + if (!old_ns) return 0; @@ -136,7 +183,7 @@ int copy_namespaces(unsigned long flags, CLONE_NEWPID | CLONE_NEWNET))) return 0; - if (!ns_capable(user_ns, CAP_SYS_ADMIN)) { + if (!vx_ns_can_unshare(user_ns, CAP_SYS_ADMIN, flags)) { err = -EPERM; goto out; } @@ -163,6 +210,9 @@ int copy_namespaces(unsigned long flags, out: put_nsproxy(old_ns); + vxdprintk(VXD_CBIT(space, 3), + "copy_namespaces(0x%08lx,%p[%p]) = %d [%p]", + flags, tsk, old_ns, err, new_ns); return err; } @@ -176,7 +226,9 @@ void free_nsproxy(struct nsproxy *ns) put_ipc_ns(ns->ipc_ns); if (ns->pid_ns) put_pid_ns(ns->pid_ns); - put_net(ns->net_ns); + if (ns->net_ns) + put_net(ns->net_ns); + atomic_dec(&vs_global_nsproxy); kmem_cache_free(nsproxy_cachep, ns); } @@ -190,12 +242,16 @@ int unshare_nsproxy_namespaces(unsigned struct user_namespace *user_ns; int err = 0; + vxdprintk(VXD_CBIT(space, 4), + "unshare_nsproxy_namespaces(0x%08lx,[%p])", + unshare_flags, current->nsproxy); + if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWNET | CLONE_NEWPID))) return 0; user_ns = new_cred ? new_cred->user_ns : current_user_ns(); - if (!ns_capable(user_ns, CAP_SYS_ADMIN)) + if (!vx_ns_can_unshare(user_ns, CAP_SYS_ADMIN, unshare_flags)) return -EPERM; *new_nsp = create_new_namespaces(unshare_flags, current, user_ns, diff -NurpP --minimal linux-3.10.19/kernel/pid.c linux-3.10.19-vs2.3.6.8/kernel/pid.c --- linux-3.10.19/kernel/pid.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/pid.c 2013-11-13 17:17:16.000000000 +0000 @@ -38,6 +38,7 @@ #include #include #include +#include #define pid_hashfn(nr, ns) \ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) @@ -367,7 +368,7 @@ EXPORT_SYMBOL_GPL(find_pid_ns); struct pid *find_vpid(int nr) { - return find_pid_ns(nr, task_active_pid_ns(current)); + return find_pid_ns(vx_rmap_pid(nr), task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(find_vpid); @@ -427,6 +428,9 @@ void transfer_pid(struct task_struct *ol struct task_struct *pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result = NULL; + + if (type == PIDTYPE_REALPID) + type = PIDTYPE_PID; if (pid) { struct hlist_node *first; first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), @@ -446,7 +450,7 @@ struct task_struct *find_task_by_pid_ns( rcu_lockdep_assert(rcu_read_lock_held(), "find_task_by_pid_ns() needs rcu_read_lock()" " protection"); - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); + return pid_task(find_pid_ns(vx_rmap_pid(nr), ns), PIDTYPE_PID); } struct task_struct *find_task_by_vpid(pid_t vnr) @@ -490,7 +494,7 @@ struct pid *find_get_pid(pid_t nr) } EXPORT_SYMBOL_GPL(find_get_pid); -pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) +pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns) { struct upid *upid; pid_t nr = 0; @@ -504,6 +508,11 @@ pid_t pid_nr_ns(struct pid *pid, struct } EXPORT_SYMBOL_GPL(pid_nr_ns); +pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) +{ + return vx_map_pid(pid_unmapped_nr_ns(pid, ns)); +} + pid_t pid_vnr(struct pid *pid) { return pid_nr_ns(pid, task_active_pid_ns(current)); diff -NurpP --minimal linux-3.10.19/kernel/pid_namespace.c linux-3.10.19-vs2.3.6.8/kernel/pid_namespace.c --- linux-3.10.19/kernel/pid_namespace.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/pid_namespace.c 2013-08-22 20:30:00.000000000 +0000 @@ -18,6 +18,7 @@ #include #include #include +#include struct pid_cache { int nr_ids; @@ -110,6 +111,7 @@ static struct pid_namespace *create_pid_ goto out_free_map; kref_init(&ns->kref); + atomic_inc(&vs_global_pid_ns); ns->level = level; ns->parent = get_pid_ns(parent_pid_ns); ns->user_ns = get_user_ns(user_ns); @@ -140,6 +142,7 @@ static void destroy_pid_namespace(struct for (i = 0; i < PIDMAP_ENTRIES; i++) kfree(ns->pidmap[i].page); put_user_ns(ns->user_ns); + atomic_dec(&vs_global_pid_ns); kmem_cache_free(pid_ns_cachep, ns); } diff -NurpP --minimal linux-3.10.19/kernel/posix-timers.c linux-3.10.19-vs2.3.6.8/kernel/posix-timers.c --- linux-3.10.19/kernel/posix-timers.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/posix-timers.c 2013-08-22 21:10:34.000000000 +0000 @@ -48,6 +48,7 @@ #include #include #include +#include /* * Management arrays for POSIX timers. Timers are now kept in static hash table @@ -398,6 +399,7 @@ int posix_timer_event(struct k_itimer *t { struct task_struct *task; int shared, ret = -1; + /* * FIXME: if ->sigq is queued we can race with * dequeue_signal()->do_schedule_next_timer(). @@ -414,10 +416,18 @@ int posix_timer_event(struct k_itimer *t rcu_read_lock(); task = pid_task(timr->it_pid, PIDTYPE_PID); if (task) { + struct vx_info_save vxis; + struct vx_info *vxi; + + vxi = get_vx_info(task->vx_info); + enter_vx_info(vxi, &vxis); shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); ret = send_sigqueue(timr->sigq, task, shared); + leave_vx_info(&vxis); + put_vx_info(vxi); } rcu_read_unlock(); + /* If we failed to send the signal the timer stops. */ return ret > 0; } diff -NurpP --minimal linux-3.10.19/kernel/printk.c linux-3.10.19-vs2.3.6.8/kernel/printk.c --- linux-3.10.19/kernel/printk.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/printk.c 2013-11-13 17:17:16.000000000 +0000 @@ -45,6 +45,7 @@ #include #include #include +#include #include @@ -391,7 +392,7 @@ static int check_syslog_permissions(int return 0; if (syslog_action_restricted(type)) { - if (capable(CAP_SYSLOG)) + if (vx_capable(CAP_SYSLOG, VXC_SYSLOG)) return 0; /* * For historical reasons, accept CAP_SYS_ADMIN too, with @@ -1140,12 +1141,9 @@ int do_syslog(int type, char __user *buf if (error) return error; - switch (type) { - case SYSLOG_ACTION_CLOSE: /* Close log */ - break; - case SYSLOG_ACTION_OPEN: /* Open log */ - break; - case SYSLOG_ACTION_READ: /* Read from log */ + if ((type == SYSLOG_ACTION_READ) || + (type == SYSLOG_ACTION_READ_ALL) || + (type == SYSLOG_ACTION_READ_CLEAR)) { error = -EINVAL; if (!buf || len < 0) goto out; @@ -1156,6 +1154,16 @@ int do_syslog(int type, char __user *buf error = -EFAULT; goto out; } + } + if (!vx_check(0, VS_ADMIN|VS_WATCH)) + return vx_do_syslog(type, buf, len); + + switch (type) { + case SYSLOG_ACTION_CLOSE: /* Close log */ + break; + case SYSLOG_ACTION_OPEN: /* Open log */ + break; + case SYSLOG_ACTION_READ: /* Read from log */ error = wait_event_interruptible(log_wait, syslog_seq != log_next_seq); if (error) @@ -1168,16 +1176,6 @@ int do_syslog(int type, char __user *buf /* FALL THRU */ /* Read last kernel messages */ case SYSLOG_ACTION_READ_ALL: - error = -EINVAL; - if (!buf || len < 0) - goto out; - error = 0; - if (!len) - goto out; - if (!access_ok(VERIFY_WRITE, buf, len)) { - error = -EFAULT; - goto out; - } error = syslog_print_all(buf, len, clear); break; /* Clear ring buffer */ diff -NurpP --minimal linux-3.10.19/kernel/ptrace.c linux-3.10.19-vs2.3.6.8/kernel/ptrace.c --- linux-3.10.19/kernel/ptrace.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/ptrace.c 2013-08-22 20:30:00.000000000 +0000 @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -263,6 +264,11 @@ ok: } rcu_read_unlock(); + if (!vx_check(task->xid, VS_ADMIN_P|VS_WATCH_P|VS_IDENT)) + return -EPERM; + if (!vx_check(task->xid, VS_IDENT) && + !task_vx_flags(task, VXF_STATE_ADMIN, 0)) + return -EACCES; return security_ptrace_access_check(task, mode); } diff -NurpP --minimal linux-3.10.19/kernel/sched/core.c linux-3.10.19-vs2.3.6.8/kernel/sched/core.c --- linux-3.10.19/kernel/sched/core.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/sched/core.c 2013-08-22 20:30:00.000000000 +0000 @@ -73,6 +73,8 @@ #include #include #include +#include +#include #include #include @@ -2139,9 +2141,17 @@ EXPORT_SYMBOL(avenrun); /* should be rem */ void get_avenrun(unsigned long *loads, unsigned long offset, int shift) { - loads[0] = (avenrun[0] + offset) << shift; - loads[1] = (avenrun[1] + offset) << shift; - loads[2] = (avenrun[2] + offset) << shift; + if (vx_flags(VXF_VIRT_LOAD, 0)) { + struct vx_info *vxi = current_vx_info(); + + loads[0] = (vxi->cvirt.load[0] + offset) << shift; + loads[1] = (vxi->cvirt.load[1] + offset) << shift; + loads[2] = (vxi->cvirt.load[2] + offset) << shift; + } else { + loads[0] = (avenrun[0] + offset) << shift; + loads[1] = (avenrun[1] + offset) << shift; + loads[2] = (avenrun[2] + offset) << shift; + } } static long calc_load_fold_active(struct rq *this_rq) @@ -3738,7 +3748,7 @@ SYSCALL_DEFINE1(nice, int, increment) nice = 19; if (increment < 0 && !can_nice(current, nice)) - return -EPERM; + return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM; retval = security_task_setnice(current, nice); if (retval) diff -NurpP --minimal linux-3.10.19/kernel/sched/cputime.c linux-3.10.19-vs2.3.6.8/kernel/sched/cputime.c --- linux-3.10.19/kernel/sched/cputime.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/sched/cputime.c 2013-11-13 17:17:16.000000000 +0000 @@ -4,6 +4,7 @@ #include #include #include +#include #include "sched.h" @@ -135,14 +136,17 @@ static inline void task_group_account_fi void account_user_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { + struct vx_info *vxi = p->vx_info; /* p is _always_ current */ + int nice = (TASK_NICE(p) > 0); int index; /* Add user time to process. */ p->utime += cputime; p->utimescaled += cputime_scaled; + vx_account_user(vxi, cputime, nice); account_group_user_time(p, cputime); - index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; + index = (nice) ? CPUTIME_NICE : CPUTIME_USER; /* Add user time to cpustat. */ task_group_account_field(p, index, (__force u64) cputime); @@ -189,9 +193,12 @@ static inline void __account_system_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled, int index) { + struct vx_info *vxi = p->vx_info; /* p is _always_ current */ + /* Add system time to process. */ p->stime += cputime; p->stimescaled += cputime_scaled; + vx_account_system(vxi, cputime, 0 /* do we have idle time? */); account_group_system_time(p, cputime); /* Add system time to cpustat. */ diff -NurpP --minimal linux-3.10.19/kernel/sched/fair.c linux-3.10.19-vs2.3.6.8/kernel/sched/fair.c --- linux-3.10.19/kernel/sched/fair.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/sched/fair.c 2013-11-13 17:17:16.000000000 +0000 @@ -29,6 +29,7 @@ #include #include #include +#include #include @@ -1736,6 +1737,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, st __enqueue_entity(cfs_rq, se); se->on_rq = 1; + if (entity_is_task(se)) + vx_activate_task(task_of(se)); if (cfs_rq->nr_running == 1) { list_add_leaf_cfs_rq(cfs_rq); check_enqueue_throttle(cfs_rq); @@ -1817,6 +1820,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, st if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); se->on_rq = 0; + if (entity_is_task(se)) + vx_deactivate_task(task_of(se)); account_entity_dequeue(cfs_rq, se); /* diff -NurpP --minimal linux-3.10.19/kernel/signal.c linux-3.10.19-vs2.3.6.8/kernel/signal.c --- linux-3.10.19/kernel/signal.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/signal.c 2013-08-22 21:13:50.000000000 +0000 @@ -33,6 +33,8 @@ #include #include #include +#include +#include #define CREATE_TRACE_POINTS #include @@ -790,9 +792,18 @@ static int check_kill_permission(int sig struct pid *sid; int error; + vxdprintk(VXD_CBIT(misc, 7), + "check_kill_permission(%d,%p,%p[#%u,%u])", + sig, info, t, vx_task_xid(t), t->pid); + if (!valid_signal(sig)) return -EINVAL; +/* FIXME: needed? if so, why? + if ((info != SEND_SIG_NOINFO) && + (is_si_special(info) || !si_fromuser(info))) + goto skip; */ + if (!si_fromuser(info)) return 0; @@ -816,6 +827,20 @@ static int check_kill_permission(int sig } } + error = -EPERM; + if (t->pid == 1 && current->xid) + return error; + + error = -ESRCH; + /* FIXME: we shouldn't return ESRCH ever, to avoid + loops, maybe ENOENT or EACCES? */ + if (!vx_check(vx_task_xid(t), VS_WATCH_P | VS_IDENT)) { + vxdprintk(current->xid || VXD_CBIT(misc, 7), + "signal %d[%p] xid mismatch %p[#%u,%u] xid=#%u", + sig, info, t, vx_task_xid(t), t->pid, current->xid); + return error; + } +/* skip: */ return security_task_kill(t, info, sig, 0); } @@ -1353,7 +1378,7 @@ int kill_pid_info(int sig, struct siginf rcu_read_lock(); retry: p = pid_task(pid, PIDTYPE_PID); - if (p) { + if (p && vx_check(vx_task_xid(p), VS_IDENT)) { error = group_send_sig_info(sig, info, p); if (unlikely(error == -ESRCH)) /* @@ -1401,7 +1426,7 @@ int kill_pid_info_as_cred(int sig, struc rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); - if (!p) { + if (!p || !vx_check(vx_task_xid(p), VS_IDENT)) { ret = -ESRCH; goto out_unlock; } @@ -1453,8 +1478,10 @@ static int kill_something_info(int sig, struct task_struct * p; for_each_process(p) { - if (task_pid_vnr(p) > 1 && - !same_thread_group(p, current)) { + if (vx_check(vx_task_xid(p), VS_ADMIN|VS_IDENT) && + task_pid_vnr(p) > 1 && + !same_thread_group(p, current) && + !vx_current_initpid(p->pid)) { int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) @@ -2308,6 +2335,11 @@ relock: !sig_kernel_only(signr)) continue; + /* virtual init is protected against user signals */ + if ((info->si_code == SI_USER) && + vx_current_initpid(current->pid)) + continue; + if (sig_kernel_stop(signr)) { /* * The default action is to stop all threads in diff -NurpP --minimal linux-3.10.19/kernel/softirq.c linux-3.10.19-vs2.3.6.8/kernel/softirq.c --- linux-3.10.19/kernel/softirq.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/softirq.c 2013-11-13 17:17:16.000000000 +0000 @@ -25,6 +25,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include diff -NurpP --minimal linux-3.10.19/kernel/sys.c linux-3.10.19-vs2.3.6.8/kernel/sys.c --- linux-3.10.19/kernel/sys.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/sys.c 2013-08-22 20:30:00.000000000 +0000 @@ -55,6 +55,7 @@ #include #include +#include /* Move somewhere else to avoid recompiling? */ #include @@ -160,7 +161,10 @@ static int set_one_prio(struct task_stru goto out; } if (niceval < task_nice(p) && !can_nice(p, niceval)) { - error = -EACCES; + if (vx_flags(VXF_IGNEG_NICE, 0)) + error = 0; + else + error = -EACCES; goto out; } no_nice = security_task_setnice(p, niceval); @@ -211,6 +215,8 @@ SYSCALL_DEFINE3(setpriority, int, which, else pgrp = task_pgrp(current); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT)) + continue; error = set_one_prio(p, niceval, error); } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; @@ -276,6 +282,8 @@ SYSCALL_DEFINE2(getpriority, int, which, else pgrp = task_pgrp(current); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT)) + continue; niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; @@ -452,6 +460,8 @@ EXPORT_SYMBOL_GPL(kernel_power_off); static DEFINE_MUTEX(reboot_mutex); +long vs_reboot(unsigned int, void __user *); + /* * Reboot system call: for obvious reasons only root may call it, * and even root needs to set up some magic numbers in the registers @@ -494,6 +504,9 @@ SYSCALL_DEFINE4(reboot, int, magic1, int if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) cmd = LINUX_REBOOT_CMD_HALT; + if (!vx_check(0, VS_ADMIN|VS_WATCH)) + return vs_reboot(cmd, arg); + mutex_lock(&reboot_mutex); switch (cmd) { case LINUX_REBOOT_CMD_RESTART: @@ -1462,7 +1475,8 @@ SYSCALL_DEFINE2(sethostname, char __user int errno; char tmp[__NEW_UTS_LEN]; - if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) + if (!vx_ns_capable(current->nsproxy->uts_ns->user_ns, + CAP_SYS_ADMIN, VXC_SET_UTSNAME)) return -EPERM; if (len < 0 || len > __NEW_UTS_LEN) @@ -1513,7 +1527,8 @@ SYSCALL_DEFINE2(setdomainname, char __us int errno; char tmp[__NEW_UTS_LEN]; - if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) + if (!vx_ns_capable(current->nsproxy->uts_ns->user_ns, + CAP_SYS_ADMIN, VXC_SET_UTSNAME)) return -EPERM; if (len < 0 || len > __NEW_UTS_LEN) return -EINVAL; @@ -1632,7 +1647,7 @@ int do_prlimit(struct task_struct *tsk, /* Keep the capable check against init_user_ns until cgroups can contain all limits */ if (new_rlim->rlim_max > rlim->rlim_max && - !capable(CAP_SYS_RESOURCE)) + !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT)) retval = -EPERM; if (!retval) retval = security_task_setrlimit(tsk->group_leader, @@ -1685,7 +1700,8 @@ static int check_prlimit_permission(stru gid_eq(cred->gid, tcred->sgid) && gid_eq(cred->gid, tcred->gid)) return 0; - if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) + if (vx_ns_capable(tcred->user_ns, + CAP_SYS_RESOURCE, VXC_SET_RLIMIT)) return 0; return -EPERM; diff -NurpP --minimal linux-3.10.19/kernel/sysctl.c linux-3.10.19-vs2.3.6.8/kernel/sysctl.c --- linux-3.10.19/kernel/sysctl.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/sysctl.c 2013-08-22 20:30:00.000000000 +0000 @@ -83,6 +83,7 @@ #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT) #include #endif +extern char vshelper_path[]; #ifdef CONFIG_CHR_DEV_SG #include #endif @@ -628,6 +629,13 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dostring, }, + { + .procname = "vshelper", + .data = &vshelper_path, + .maxlen = 256, + .mode = 0644, + .proc_handler = &proc_dostring, + }, #ifdef CONFIG_CHR_DEV_SG { diff -NurpP --minimal linux-3.10.19/kernel/sysctl_binary.c linux-3.10.19-vs2.3.6.8/kernel/sysctl_binary.c --- linux-3.10.19/kernel/sysctl_binary.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/sysctl_binary.c 2013-08-22 20:30:00.000000000 +0000 @@ -74,6 +74,7 @@ static const struct bin_table bin_kern_t { CTL_INT, KERN_PANIC, "panic" }, { CTL_INT, KERN_REALROOTDEV, "real-root-dev" }, + { CTL_STR, KERN_VSHELPER, "vshelper" }, { CTL_STR, KERN_SPARC_REBOOT, "reboot-cmd" }, { CTL_INT, KERN_CTLALTDEL, "ctrl-alt-del" }, diff -NurpP --minimal linux-3.10.19/kernel/time/timekeeping.c linux-3.10.19-vs2.3.6.8/kernel/time/timekeeping.c --- linux-3.10.19/kernel/time/timekeeping.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/time/timekeeping.c 2013-11-13 17:17:16.000000000 +0000 @@ -22,6 +22,7 @@ #include #include #include +#include #include "tick-internal.h" #include "ntp_internal.h" @@ -693,6 +694,7 @@ void getrawmonotonic(struct timespec *ts } while (read_seqcount_retry(&timekeeper_seq, seq)); timespec_add_ns(ts, nsecs); + vx_adjust_timespec(ts); } EXPORT_SYMBOL(getrawmonotonic); diff -NurpP --minimal linux-3.10.19/kernel/time.c linux-3.10.19-vs2.3.6.8/kernel/time.c --- linux-3.10.19/kernel/time.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/time.c 2013-08-22 20:30:00.000000000 +0000 @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -92,7 +93,7 @@ SYSCALL_DEFINE1(stime, time_t __user *, if (err) return err; - do_settimeofday(&tv); + vx_settimeofday(&tv); return 0; } @@ -181,7 +182,7 @@ int do_sys_settimeofday(const struct tim } } if (tv) - return do_settimeofday(tv); + return vx_settimeofday(tv); return 0; } diff -NurpP --minimal linux-3.10.19/kernel/timer.c linux-3.10.19-vs2.3.6.8/kernel/timer.c --- linux-3.10.19/kernel/timer.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/timer.c 2013-11-13 17:17:16.000000000 +0000 @@ -42,6 +42,10 @@ #include #include #include +#include +#include +#include +#include #include #include diff -NurpP --minimal linux-3.10.19/kernel/user_namespace.c linux-3.10.19-vs2.3.6.8/kernel/user_namespace.c --- linux-3.10.19/kernel/user_namespace.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/user_namespace.c 2013-11-13 17:17:16.000000000 +0000 @@ -22,6 +22,7 @@ #include #include #include +#include static struct kmem_cache *user_ns_cachep __read_mostly; @@ -94,6 +95,7 @@ int create_user_ns(struct cred *new) atomic_set(&ns->count, 1); /* Leave the new->user_ns reference with the new user namespace. */ + atomic_inc(&vs_global_user_ns); ns->parent = parent_ns; ns->level = parent_ns->level + 1; ns->owner = owner; @@ -844,6 +846,8 @@ static void *userns_get(struct task_stru static void userns_put(void *ns) { + /* FIXME: maybe move into destroyer? */ + atomic_dec(&vs_global_user_ns); put_user_ns(ns); } diff -NurpP --minimal linux-3.10.19/kernel/utsname.c linux-3.10.19-vs2.3.6.8/kernel/utsname.c --- linux-3.10.19/kernel/utsname.c 2013-07-14 17:01:35.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/utsname.c 2013-08-22 21:03:08.000000000 +0000 @@ -16,14 +16,17 @@ #include #include #include +#include static struct uts_namespace *create_uts_ns(void) { struct uts_namespace *uts_ns; uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL); - if (uts_ns) - kref_init(&uts_ns->kref); + if (uts_ns) { + kref_init(&uts_ns->kref); + atomic_inc(&vs_global_uts_ns); + } return uts_ns; } @@ -85,6 +88,7 @@ void free_uts_ns(struct kref *kref) ns = container_of(kref, struct uts_namespace, kref); put_user_ns(ns->user_ns); proc_free_inum(ns->proc_inum); + atomic_dec(&vs_global_uts_ns); kfree(ns); } diff -NurpP --minimal linux-3.10.19/kernel/vserver/Kconfig linux-3.10.19-vs2.3.6.8/kernel/vserver/Kconfig --- linux-3.10.19/kernel/vserver/Kconfig 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/Kconfig 2013-11-14 04:29:47.000000000 +0000 @@ -0,0 +1,230 @@ +# +# Linux VServer configuration +# + +menu "Linux VServer" + +config VSERVER_AUTO_LBACK + bool "Automatically Assign Loopback IP" + default y + help + Automatically assign a guest specific loopback + IP and add it to the kernel network stack on + startup. + +config VSERVER_AUTO_SINGLE + bool "Automatic Single IP Special Casing" + default n + help + This allows network contexts with a single IP to + automatically remap 0.0.0.0 bindings to that IP, + avoiding further network checks and improving + performance. + + (note: such guests do not allow to change the ip + on the fly and do not show loopback addresses) + +config VSERVER_COWBL + bool "Enable COW Immutable Link Breaking" + default y + help + This enables the COW (Copy-On-Write) link break code. + It allows you to treat unified files like normal files + when writing to them (which will implicitely break the + link and create a copy of the unified file) + +config VSERVER_VTIME + bool "Enable Virtualized Guest Time (EXPERIMENTAL)" + default n + help + This enables per guest time offsets to allow for + adjusting the system clock individually per guest. + this adds some overhead to the time functions and + therefore should not be enabled without good reason. + +config VSERVER_DEVICE + bool "Enable Guest Device Mapping (EXPERIMENTAL)" + default n + help + This enables generic device remapping. + +config VSERVER_PROC_SECURE + bool "Enable Proc Security" + depends on PROC_FS + default y + help + This configures ProcFS security to initially hide + non-process entries for all contexts except the main and + spectator context (i.e. for all guests), which is a secure + default. + + (note: on 1.2x the entries were visible by default) + +choice + prompt "Persistent Inode Tagging" + default TAGGING_ID24 + help + This adds persistent context information to filesystems + mounted with the tagxid option. Tagging is a requirement + for per-context disk limits and per-context quota. + + +config TAGGING_NONE + bool "Disabled" + help + do not store per-context information in inodes. + +config TAGGING_UID16 + bool "UID16/GID32" + help + reduces UID to 16 bit, but leaves GID at 32 bit. + +config TAGGING_GID16 + bool "UID32/GID16" + help + reduces GID to 16 bit, but leaves UID at 32 bit. + +config TAGGING_ID24 + bool "UID24/GID24" + help + uses the upper 8bit from UID and GID for XID tagging + which leaves 24bit for UID/GID each, which should be + more than sufficient for normal use. + +config TAGGING_INTERN + bool "UID32/GID32" + help + this uses otherwise reserved inode fields in the on + disk representation, which limits the use to a few + filesystems (currently ext2 and ext3) + +endchoice + +config TAG_NFSD + bool "Tag NFSD User Auth and Files" + default n + help + Enable this if you do want the in-kernel NFS + Server to use the tagging specified above. + (will require patched clients too) + +config VSERVER_PRIVACY + bool "Honor Privacy Aspects of Guests" + default n + help + When enabled, most context checks will disallow + access to structures assigned to a specific context, + like ptys or loop devices. + +config VSERVER_CONTEXTS + int "Maximum number of Contexts (1-65533)" if EMBEDDED + range 1 65533 + default "768" if 64BIT + default "256" + help + This setting will optimize certain data structures + and memory allocations according to the expected + maximum. + + note: this is not a strict upper limit. + +config VSERVER_WARN + bool "VServer Warnings" + default y + help + This enables various runtime warnings, which will + notify about potential manipulation attempts or + resource shortage. It is generally considered to + be a good idea to have that enabled. + +config VSERVER_WARN_DEVPTS + bool "VServer DevPTS Warnings" + depends on VSERVER_WARN + default y + help + This enables DevPTS related warnings, issued when a + process inside a context tries to lookup or access + a dynamic pts from the host or a different context. + +config VSERVER_DEBUG + bool "VServer Debugging Code" + default n + help + Set this to yes if you want to be able to activate + debugging output at runtime. It adds a very small + overhead to all vserver related functions and + increases the kernel size by about 20k. + +config VSERVER_HISTORY + bool "VServer History Tracing" + depends on VSERVER_DEBUG + default n + help + Set this to yes if you want to record the history of + linux-vserver activities, so they can be replayed in + the event of a kernel panic or oops. + +config VSERVER_HISTORY_SIZE + int "Per-CPU History Size (32-65536)" + depends on VSERVER_HISTORY + range 32 65536 + default 64 + help + This allows you to specify the number of entries in + the per-CPU history buffer. + +config VSERVER_EXTRA_MNT_CHECK + bool "Extra Checks for Reachability" + default n + help + Set this to yes if you want to do extra checks for + vfsmount reachability in the proc filesystem code. + This shouldn't be required on any setup utilizing + mnt namespaces. + +choice + prompt "Quotes used in debug and warn messages" + default QUOTES_ISO8859 + +config QUOTES_ISO8859 + bool "Extended ASCII (ISO 8859) angle quotes" + help + This uses the extended ASCII characters \xbb + and \xab for quoting file and process names. + +config QUOTES_UTF8 + bool "UTF-8 angle quotes" + help + This uses the the UTF-8 sequences for angle + quotes to quote file and process names. + +config QUOTES_ASCII + bool "ASCII single quotes" + help + This uses the ASCII single quote character + (\x27) to quote file and process names. + +endchoice + +endmenu + + +config VSERVER + bool + default y + select NAMESPACES + select UTS_NS + select IPC_NS +# select USER_NS + select SYSVIPC + +config VSERVER_SECURITY + bool + depends on SECURITY + default y + select SECURITY_CAPABILITIES + +config VSERVER_DISABLED + bool + default n + diff -NurpP --minimal linux-3.10.19/kernel/vserver/Makefile linux-3.10.19-vs2.3.6.8/kernel/vserver/Makefile --- linux-3.10.19/kernel/vserver/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/Makefile 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,18 @@ +# +# Makefile for the Linux vserver routines. +# + + +obj-y += vserver.o + +vserver-y := switch.o context.o space.o sched.o network.o inode.o \ + limit.o cvirt.o cacct.o signal.o helper.o init.o \ + dlimit.o tag.o + +vserver-$(CONFIG_INET) += inet.o +vserver-$(CONFIG_PROC_FS) += proc.o +vserver-$(CONFIG_VSERVER_DEBUG) += sysctl.o debug.o +vserver-$(CONFIG_VSERVER_HISTORY) += history.o +vserver-$(CONFIG_VSERVER_MONITOR) += monitor.o +vserver-$(CONFIG_VSERVER_DEVICE) += device.o + diff -NurpP --minimal linux-3.10.19/kernel/vserver/cacct.c linux-3.10.19-vs2.3.6.8/kernel/vserver/cacct.c --- linux-3.10.19/kernel/vserver/cacct.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/cacct.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,42 @@ +/* + * linux/kernel/vserver/cacct.c + * + * Virtual Server: Context Accounting + * + * Copyright (C) 2006-2007 Herbert Pötzl + * + * V0.01 added accounting stats + * + */ + +#include +#include +#include +#include + +#include +#include + + +int vc_sock_stat(struct vx_info *vxi, void __user *data) +{ + struct vcmd_sock_stat_v0 vc_data; + int j, field; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + field = vc_data.field; + if ((field < 0) || (field >= VXA_SOCK_SIZE)) + return -EINVAL; + + for (j = 0; j < 3; j++) { + vc_data.count[j] = vx_sock_count(&vxi->cacct, field, j); + vc_data.total[j] = vx_sock_total(&vxi->cacct, field, j); + } + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + diff -NurpP --minimal linux-3.10.19/kernel/vserver/cacct_init.h linux-3.10.19-vs2.3.6.8/kernel/vserver/cacct_init.h --- linux-3.10.19/kernel/vserver/cacct_init.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/cacct_init.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,25 @@ + + +static inline void vx_info_init_cacct(struct _vx_cacct *cacct) +{ + int i, j; + + + for (i = 0; i < VXA_SOCK_SIZE; i++) { + for (j = 0; j < 3; j++) { + atomic_long_set(&cacct->sock[i][j].count, 0); + atomic_long_set(&cacct->sock[i][j].total, 0); + } + } + for (i = 0; i < 8; i++) + atomic_set(&cacct->slab[i], 0); + for (i = 0; i < 5; i++) + for (j = 0; j < 4; j++) + atomic_set(&cacct->page[i][j], 0); +} + +static inline void vx_info_exit_cacct(struct _vx_cacct *cacct) +{ + return; +} + diff -NurpP --minimal linux-3.10.19/kernel/vserver/cacct_proc.h linux-3.10.19-vs2.3.6.8/kernel/vserver/cacct_proc.h --- linux-3.10.19/kernel/vserver/cacct_proc.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/cacct_proc.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,53 @@ +#ifndef _VX_CACCT_PROC_H +#define _VX_CACCT_PROC_H + +#include + + +#define VX_SOCKA_TOP \ + "Type\t recv #/bytes\t\t send #/bytes\t\t fail #/bytes\n" + +static inline int vx_info_proc_cacct(struct _vx_cacct *cacct, char *buffer) +{ + int i, j, length = 0; + static char *type[VXA_SOCK_SIZE] = { + "UNSPEC", "UNIX", "INET", "INET6", "PACKET", "OTHER" + }; + + length += sprintf(buffer + length, VX_SOCKA_TOP); + for (i = 0; i < VXA_SOCK_SIZE; i++) { + length += sprintf(buffer + length, "%s:", type[i]); + for (j = 0; j < 3; j++) { + length += sprintf(buffer + length, + "\t%10lu/%-10lu", + vx_sock_count(cacct, i, j), + vx_sock_total(cacct, i, j)); + } + buffer[length++] = '\n'; + } + + length += sprintf(buffer + length, "\n"); + length += sprintf(buffer + length, + "slab:\t %8u %8u %8u %8u\n", + atomic_read(&cacct->slab[1]), + atomic_read(&cacct->slab[4]), + atomic_read(&cacct->slab[0]), + atomic_read(&cacct->slab[2])); + + length += sprintf(buffer + length, "\n"); + for (i = 0; i < 5; i++) { + length += sprintf(buffer + length, + "page[%d]: %8u %8u %8u %8u\t %8u %8u %8u %8u\n", i, + atomic_read(&cacct->page[i][0]), + atomic_read(&cacct->page[i][1]), + atomic_read(&cacct->page[i][2]), + atomic_read(&cacct->page[i][3]), + atomic_read(&cacct->page[i][4]), + atomic_read(&cacct->page[i][5]), + atomic_read(&cacct->page[i][6]), + atomic_read(&cacct->page[i][7])); + } + return length; +} + +#endif /* _VX_CACCT_PROC_H */ diff -NurpP --minimal linux-3.10.19/kernel/vserver/context.c linux-3.10.19-vs2.3.6.8/kernel/vserver/context.c --- linux-3.10.19/kernel/vserver/context.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/context.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,1119 @@ +/* + * linux/kernel/vserver/context.c + * + * Virtual Server: Context Support + * + * Copyright (C) 2003-2011 Herbert Pötzl + * + * V0.01 context helper + * V0.02 vx_ctx_kill syscall command + * V0.03 replaced context_info calls + * V0.04 redesign of struct (de)alloc + * V0.05 rlimit basic implementation + * V0.06 task_xid and info commands + * V0.07 context flags and caps + * V0.08 switch to RCU based hash + * V0.09 revert to non RCU for now + * V0.10 and back to working RCU hash + * V0.11 and back to locking again + * V0.12 referenced context store + * V0.13 separate per cpu data + * V0.14 changed vcmds to vxi arg + * V0.15 added context stat + * V0.16 have __create claim() the vxi + * V0.17 removed older and legacy stuff + * V0.18 added user credentials + * V0.19 added warn mask + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "cvirt_init.h" +#include "cacct_init.h" +#include "limit_init.h" +#include "sched_init.h" + + +atomic_t vx_global_ctotal = ATOMIC_INIT(0); +atomic_t vx_global_cactive = ATOMIC_INIT(0); + + +/* now inactive context structures */ + +static struct hlist_head vx_info_inactive = HLIST_HEAD_INIT; + +static DEFINE_SPINLOCK(vx_info_inactive_lock); + + +/* __alloc_vx_info() + + * allocate an initialized vx_info struct + * doesn't make it visible (hash) */ + +static struct vx_info *__alloc_vx_info(vxid_t xid) +{ + struct vx_info *new = NULL; + int cpu, index; + + vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid); + + /* would this benefit from a slab cache? */ + new = kmalloc(sizeof(struct vx_info), GFP_KERNEL); + if (!new) + return 0; + + memset(new, 0, sizeof(struct vx_info)); +#ifdef CONFIG_SMP + new->ptr_pc = alloc_percpu(struct _vx_info_pc); + if (!new->ptr_pc) + goto error; +#endif + new->vx_id = xid; + INIT_HLIST_NODE(&new->vx_hlist); + atomic_set(&new->vx_usecnt, 0); + atomic_set(&new->vx_tasks, 0); + new->vx_parent = NULL; + new->vx_state = 0; + init_waitqueue_head(&new->vx_wait); + + /* prepare reaper */ + get_task_struct(init_pid_ns.child_reaper); + new->vx_reaper = init_pid_ns.child_reaper; + new->vx_badness_bias = 0; + + /* rest of init goes here */ + vx_info_init_limit(&new->limit); + vx_info_init_sched(&new->sched); + vx_info_init_cvirt(&new->cvirt); + vx_info_init_cacct(&new->cacct); + + /* per cpu data structures */ + for_each_possible_cpu(cpu) { + vx_info_init_sched_pc( + &vx_per_cpu(new, sched_pc, cpu), cpu); + vx_info_init_cvirt_pc( + &vx_per_cpu(new, cvirt_pc, cpu), cpu); + } + + new->vx_flags = VXF_INIT_SET; + new->vx_bcaps = CAP_FULL_SET; // maybe ~CAP_SETPCAP + new->vx_ccaps = 0; + new->vx_umask = 0; + new->vx_wmask = 0; + + new->reboot_cmd = 0; + new->exit_code = 0; + + // preconfig spaces + for (index = 0; index < VX_SPACES; index++) { + struct _vx_space *space = &new->space[index]; + + // filesystem + spin_lock(&init_fs.lock); + init_fs.users++; + spin_unlock(&init_fs.lock); + space->vx_fs = &init_fs; + + /* FIXME: do we want defaults? */ + // space->vx_real_cred = 0; + // space->vx_cred = 0; + } + + + vxdprintk(VXD_CBIT(xid, 0), + "alloc_vx_info(%d) = %p", xid, new); + vxh_alloc_vx_info(new); + atomic_inc(&vx_global_ctotal); + return new; +#ifdef CONFIG_SMP +error: + kfree(new); + return 0; +#endif +} + +/* __dealloc_vx_info() + + * final disposal of vx_info */ + +static void __dealloc_vx_info(struct vx_info *vxi) +{ +#ifdef CONFIG_VSERVER_WARN + struct vx_info_save vxis; + int cpu; +#endif + vxdprintk(VXD_CBIT(xid, 0), + "dealloc_vx_info(%p)", vxi); + vxh_dealloc_vx_info(vxi); + +#ifdef CONFIG_VSERVER_WARN + enter_vx_info(vxi, &vxis); + vx_info_exit_limit(&vxi->limit); + vx_info_exit_sched(&vxi->sched); + vx_info_exit_cvirt(&vxi->cvirt); + vx_info_exit_cacct(&vxi->cacct); + + for_each_possible_cpu(cpu) { + vx_info_exit_sched_pc( + &vx_per_cpu(vxi, sched_pc, cpu), cpu); + vx_info_exit_cvirt_pc( + &vx_per_cpu(vxi, cvirt_pc, cpu), cpu); + } + leave_vx_info(&vxis); +#endif + + vxi->vx_id = -1; + vxi->vx_state |= VXS_RELEASED; + +#ifdef CONFIG_SMP + free_percpu(vxi->ptr_pc); +#endif + kfree(vxi); + atomic_dec(&vx_global_ctotal); +} + +static void __shutdown_vx_info(struct vx_info *vxi) +{ + struct nsproxy *nsproxy; + struct fs_struct *fs; + struct cred *cred; + int index, kill; + + might_sleep(); + + vxi->vx_state |= VXS_SHUTDOWN; + vs_state_change(vxi, VSC_SHUTDOWN); + + for (index = 0; index < VX_SPACES; index++) { + struct _vx_space *space = &vxi->space[index]; + + nsproxy = xchg(&space->vx_nsproxy, NULL); + if (nsproxy) + put_nsproxy(nsproxy); + + fs = xchg(&space->vx_fs, NULL); + spin_lock(&fs->lock); + kill = !--fs->users; + spin_unlock(&fs->lock); + if (kill) + free_fs_struct(fs); + + cred = (struct cred *)xchg(&space->vx_cred, NULL); + if (cred) + abort_creds(cred); + } +} + +/* exported stuff */ + +void free_vx_info(struct vx_info *vxi) +{ + unsigned long flags; + unsigned index; + + /* check for reference counts first */ + BUG_ON(atomic_read(&vxi->vx_usecnt)); + BUG_ON(atomic_read(&vxi->vx_tasks)); + + /* context must not be hashed */ + BUG_ON(vx_info_state(vxi, VXS_HASHED)); + + /* context shutdown is mandatory */ + BUG_ON(!vx_info_state(vxi, VXS_SHUTDOWN)); + + /* spaces check */ + for (index = 0; index < VX_SPACES; index++) { + struct _vx_space *space = &vxi->space[index]; + + BUG_ON(space->vx_nsproxy); + BUG_ON(space->vx_fs); + // BUG_ON(space->vx_real_cred); + // BUG_ON(space->vx_cred); + } + + spin_lock_irqsave(&vx_info_inactive_lock, flags); + hlist_del(&vxi->vx_hlist); + spin_unlock_irqrestore(&vx_info_inactive_lock, flags); + + __dealloc_vx_info(vxi); +} + + +/* hash table for vx_info hash */ + +#define VX_HASH_SIZE 13 + +static struct hlist_head vx_info_hash[VX_HASH_SIZE] = + { [0 ... VX_HASH_SIZE-1] = HLIST_HEAD_INIT }; + +static DEFINE_SPINLOCK(vx_info_hash_lock); + + +static inline unsigned int __hashval(vxid_t xid) +{ + return (xid % VX_HASH_SIZE); +} + + + +/* __hash_vx_info() + + * add the vxi to the global hash table + * requires the hash_lock to be held */ + +static inline void __hash_vx_info(struct vx_info *vxi) +{ + struct hlist_head *head; + + vxd_assert_lock(&vx_info_hash_lock); + vxdprintk(VXD_CBIT(xid, 4), + "__hash_vx_info: %p[#%d]", vxi, vxi->vx_id); + vxh_hash_vx_info(vxi); + + /* context must not be hashed */ + BUG_ON(vx_info_state(vxi, VXS_HASHED)); + + vxi->vx_state |= VXS_HASHED; + head = &vx_info_hash[__hashval(vxi->vx_id)]; + hlist_add_head(&vxi->vx_hlist, head); + atomic_inc(&vx_global_cactive); +} + +/* __unhash_vx_info() + + * remove the vxi from the global hash table + * requires the hash_lock to be held */ + +static inline void __unhash_vx_info(struct vx_info *vxi) +{ + unsigned long flags; + + vxd_assert_lock(&vx_info_hash_lock); + vxdprintk(VXD_CBIT(xid, 4), + "__unhash_vx_info: %p[#%d.%d.%d]", vxi, vxi->vx_id, + atomic_read(&vxi->vx_usecnt), atomic_read(&vxi->vx_tasks)); + vxh_unhash_vx_info(vxi); + + /* context must be hashed */ + BUG_ON(!vx_info_state(vxi, VXS_HASHED)); + /* but without tasks */ + BUG_ON(atomic_read(&vxi->vx_tasks)); + + vxi->vx_state &= ~VXS_HASHED; + hlist_del_init(&vxi->vx_hlist); + spin_lock_irqsave(&vx_info_inactive_lock, flags); + hlist_add_head(&vxi->vx_hlist, &vx_info_inactive); + spin_unlock_irqrestore(&vx_info_inactive_lock, flags); + atomic_dec(&vx_global_cactive); +} + + +/* __lookup_vx_info() + + * requires the hash_lock to be held + * doesn't increment the vx_refcnt */ + +static inline struct vx_info *__lookup_vx_info(vxid_t xid) +{ + struct hlist_head *head = &vx_info_hash[__hashval(xid)]; + struct hlist_node *pos; + struct vx_info *vxi; + + vxd_assert_lock(&vx_info_hash_lock); + hlist_for_each(pos, head) { + vxi = hlist_entry(pos, struct vx_info, vx_hlist); + + if (vxi->vx_id == xid) + goto found; + } + vxi = NULL; +found: + vxdprintk(VXD_CBIT(xid, 0), + "__lookup_vx_info(#%u): %p[#%u]", + xid, vxi, vxi ? vxi->vx_id : 0); + vxh_lookup_vx_info(vxi, xid); + return vxi; +} + + +/* __create_vx_info() + + * create the requested context + * get(), claim() and hash it */ + +static struct vx_info *__create_vx_info(int id) +{ + struct vx_info *new, *vxi = NULL; + + vxdprintk(VXD_CBIT(xid, 1), "create_vx_info(%d)*", id); + + if (!(new = __alloc_vx_info(id))) + return ERR_PTR(-ENOMEM); + + /* required to make dynamic xids unique */ + spin_lock(&vx_info_hash_lock); + + /* static context requested */ + if ((vxi = __lookup_vx_info(id))) { + vxdprintk(VXD_CBIT(xid, 0), + "create_vx_info(%d) = %p (already there)", id, vxi); + if (vx_info_flags(vxi, VXF_STATE_SETUP, 0)) + vxi = ERR_PTR(-EBUSY); + else + vxi = ERR_PTR(-EEXIST); + goto out_unlock; + } + /* new context */ + vxdprintk(VXD_CBIT(xid, 0), + "create_vx_info(%d) = %p (new)", id, new); + claim_vx_info(new, NULL); + __hash_vx_info(get_vx_info(new)); + vxi = new, new = NULL; + +out_unlock: + spin_unlock(&vx_info_hash_lock); + vxh_create_vx_info(IS_ERR(vxi) ? NULL : vxi, id); + if (new) + __dealloc_vx_info(new); + return vxi; +} + + +/* exported stuff */ + + +void unhash_vx_info(struct vx_info *vxi) +{ + spin_lock(&vx_info_hash_lock); + __unhash_vx_info(vxi); + spin_unlock(&vx_info_hash_lock); + __shutdown_vx_info(vxi); + __wakeup_vx_info(vxi); +} + + +/* lookup_vx_info() + + * search for a vx_info and get() it + * negative id means current */ + +struct vx_info *lookup_vx_info(int id) +{ + struct vx_info *vxi = NULL; + + if (id < 0) { + vxi = get_vx_info(current_vx_info()); + } else if (id > 1) { + spin_lock(&vx_info_hash_lock); + vxi = get_vx_info(__lookup_vx_info(id)); + spin_unlock(&vx_info_hash_lock); + } + return vxi; +} + +/* xid_is_hashed() + + * verify that xid is still hashed */ + +int xid_is_hashed(vxid_t xid) +{ + int hashed; + + spin_lock(&vx_info_hash_lock); + hashed = (__lookup_vx_info(xid) != NULL); + spin_unlock(&vx_info_hash_lock); + return hashed; +} + +#ifdef CONFIG_PROC_FS + +/* get_xid_list() + + * get a subset of hashed xids for proc + * assumes size is at least one */ + +int get_xid_list(int index, unsigned int *xids, int size) +{ + int hindex, nr_xids = 0; + + /* only show current and children */ + if (!vx_check(0, VS_ADMIN | VS_WATCH)) { + if (index > 0) + return 0; + xids[nr_xids] = vx_current_xid(); + return 1; + } + + for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) { + struct hlist_head *head = &vx_info_hash[hindex]; + struct hlist_node *pos; + + spin_lock(&vx_info_hash_lock); + hlist_for_each(pos, head) { + struct vx_info *vxi; + + if (--index > 0) + continue; + + vxi = hlist_entry(pos, struct vx_info, vx_hlist); + xids[nr_xids] = vxi->vx_id; + if (++nr_xids >= size) { + spin_unlock(&vx_info_hash_lock); + goto out; + } + } + /* keep the lock time short */ + spin_unlock(&vx_info_hash_lock); + } +out: + return nr_xids; +} +#endif + +#ifdef CONFIG_VSERVER_DEBUG + +void dump_vx_info_inactive(int level) +{ + struct hlist_node *entry, *next; + + hlist_for_each_safe(entry, next, &vx_info_inactive) { + struct vx_info *vxi = + list_entry(entry, struct vx_info, vx_hlist); + + dump_vx_info(vxi, level); + } +} + +#endif + +#if 0 +int vx_migrate_user(struct task_struct *p, struct vx_info *vxi) +{ + struct user_struct *new_user, *old_user; + + if (!p || !vxi) + BUG(); + + if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0)) + return -EACCES; + + new_user = alloc_uid(vxi->vx_id, p->uid); + if (!new_user) + return -ENOMEM; + + old_user = p->user; + if (new_user != old_user) { + atomic_inc(&new_user->processes); + atomic_dec(&old_user->processes); + p->user = new_user; + } + free_uid(old_user); + return 0; +} +#endif + +#if 0 +void vx_mask_cap_bset(struct vx_info *vxi, struct task_struct *p) +{ + // p->cap_effective &= vxi->vx_cap_bset; + p->cap_effective = + cap_intersect(p->cap_effective, vxi->cap_bset); + // p->cap_inheritable &= vxi->vx_cap_bset; + p->cap_inheritable = + cap_intersect(p->cap_inheritable, vxi->cap_bset); + // p->cap_permitted &= vxi->vx_cap_bset; + p->cap_permitted = + cap_intersect(p->cap_permitted, vxi->cap_bset); +} +#endif + + +#include +#include + +static int vx_openfd_task(struct task_struct *tsk) +{ + struct files_struct *files = tsk->files; + struct fdtable *fdt; + const unsigned long *bptr; + int count, total; + + /* no rcu_read_lock() because of spin_lock() */ + spin_lock(&files->file_lock); + fdt = files_fdtable(files); + bptr = fdt->open_fds; + count = fdt->max_fds / (sizeof(unsigned long) * 8); + for (total = 0; count > 0; count--) { + if (*bptr) + total += hweight_long(*bptr); + bptr++; + } + spin_unlock(&files->file_lock); + return total; +} + + +/* for *space compatibility */ + +asmlinkage long sys_unshare(unsigned long); + +/* + * migrate task to new context + * gets vxi, puts old_vxi on change + * optionally unshares namespaces (hack) + */ + +int vx_migrate_task(struct task_struct *p, struct vx_info *vxi, int unshare) +{ + struct vx_info *old_vxi; + int ret = 0; + + if (!p || !vxi) + BUG(); + + vxdprintk(VXD_CBIT(xid, 5), + "vx_migrate_task(%p,%p[#%d.%d])", p, vxi, + vxi->vx_id, atomic_read(&vxi->vx_usecnt)); + + if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0) && + !vx_info_flags(vxi, VXF_STATE_SETUP, 0)) + return -EACCES; + + if (vx_info_state(vxi, VXS_SHUTDOWN)) + return -EFAULT; + + old_vxi = task_get_vx_info(p); + if (old_vxi == vxi) + goto out; + +// if (!(ret = vx_migrate_user(p, vxi))) { + { + int openfd; + + task_lock(p); + openfd = vx_openfd_task(p); + + if (old_vxi) { + atomic_dec(&old_vxi->cvirt.nr_threads); + atomic_dec(&old_vxi->cvirt.nr_running); + __rlim_dec(&old_vxi->limit, RLIMIT_NPROC); + /* FIXME: what about the struct files here? */ + __rlim_sub(&old_vxi->limit, VLIMIT_OPENFD, openfd); + /* account for the executable */ + __rlim_dec(&old_vxi->limit, VLIMIT_DENTRY); + } + atomic_inc(&vxi->cvirt.nr_threads); + atomic_inc(&vxi->cvirt.nr_running); + __rlim_inc(&vxi->limit, RLIMIT_NPROC); + /* FIXME: what about the struct files here? */ + __rlim_add(&vxi->limit, VLIMIT_OPENFD, openfd); + /* account for the executable */ + __rlim_inc(&vxi->limit, VLIMIT_DENTRY); + + if (old_vxi) { + release_vx_info(old_vxi, p); + clr_vx_info(&p->vx_info); + } + claim_vx_info(vxi, p); + set_vx_info(&p->vx_info, vxi); + p->xid = vxi->vx_id; + + vxdprintk(VXD_CBIT(xid, 5), + "moved task %p into vxi:%p[#%d]", + p, vxi, vxi->vx_id); + + // vx_mask_cap_bset(vxi, p); + task_unlock(p); + + /* hack for *spaces to provide compatibility */ + if (unshare) { + struct nsproxy *old_nsp, *new_nsp; + + ret = unshare_nsproxy_namespaces( + CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER, + &new_nsp, NULL, NULL); + if (ret) + goto out; + + old_nsp = xchg(&p->nsproxy, new_nsp); + vx_set_space(vxi, + CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER, 0); + put_nsproxy(old_nsp); + } + } +out: + put_vx_info(old_vxi); + return ret; +} + +int vx_set_reaper(struct vx_info *vxi, struct task_struct *p) +{ + struct task_struct *old_reaper; + struct vx_info *reaper_vxi; + + if (!vxi) + return -EINVAL; + + vxdprintk(VXD_CBIT(xid, 6), + "vx_set_reaper(%p[#%d],%p[#%d,%d])", + vxi, vxi->vx_id, p, p->xid, p->pid); + + old_reaper = vxi->vx_reaper; + if (old_reaper == p) + return 0; + + reaper_vxi = task_get_vx_info(p); + if (reaper_vxi && reaper_vxi != vxi) { + vxwprintk(1, + "Unsuitable reaper [" VS_Q("%s") ",%u:#%u] " + "for [xid #%u]", + p->comm, p->pid, p->xid, vx_current_xid()); + goto out; + } + + /* set new child reaper */ + get_task_struct(p); + vxi->vx_reaper = p; + put_task_struct(old_reaper); +out: + put_vx_info(reaper_vxi); + return 0; +} + +int vx_set_init(struct vx_info *vxi, struct task_struct *p) +{ + if (!vxi) + return -EINVAL; + + vxdprintk(VXD_CBIT(xid, 6), + "vx_set_init(%p[#%d],%p[#%d,%d,%d])", + vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid); + + vxi->vx_flags &= ~VXF_STATE_INIT; + // vxi->vx_initpid = p->tgid; + vxi->vx_initpid = p->pid; + return 0; +} + +void vx_exit_init(struct vx_info *vxi, struct task_struct *p, int code) +{ + vxdprintk(VXD_CBIT(xid, 6), + "vx_exit_init(%p[#%d],%p[#%d,%d,%d])", + vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid); + + vxi->exit_code = code; + vxi->vx_initpid = 0; +} + + +void vx_set_persistent(struct vx_info *vxi) +{ + vxdprintk(VXD_CBIT(xid, 6), + "vx_set_persistent(%p[#%d])", vxi, vxi->vx_id); + + get_vx_info(vxi); + claim_vx_info(vxi, NULL); +} + +void vx_clear_persistent(struct vx_info *vxi) +{ + vxdprintk(VXD_CBIT(xid, 6), + "vx_clear_persistent(%p[#%d])", vxi, vxi->vx_id); + + release_vx_info(vxi, NULL); + put_vx_info(vxi); +} + +void vx_update_persistent(struct vx_info *vxi) +{ + if (vx_info_flags(vxi, VXF_PERSISTENT, 0)) + vx_set_persistent(vxi); + else + vx_clear_persistent(vxi); +} + + +/* task must be current or locked */ + +void exit_vx_info(struct task_struct *p, int code) +{ + struct vx_info *vxi = p->vx_info; + + if (vxi) { + atomic_dec(&vxi->cvirt.nr_threads); + vx_nproc_dec(p); + + vxi->exit_code = code; + release_vx_info(vxi, p); + } +} + +void exit_vx_info_early(struct task_struct *p, int code) +{ + struct vx_info *vxi = p->vx_info; + + if (vxi) { + if (vxi->vx_initpid == p->pid) + vx_exit_init(vxi, p, code); + if (vxi->vx_reaper == p) + vx_set_reaper(vxi, init_pid_ns.child_reaper); + } +} + + +/* vserver syscall commands below here */ + +/* taks xid and vx_info functions */ + +#include + + +int vc_task_xid(uint32_t id) +{ + vxid_t xid; + + if (id) { + struct task_struct *tsk; + + rcu_read_lock(); + tsk = find_task_by_real_pid(id); + xid = (tsk) ? tsk->xid : -ESRCH; + rcu_read_unlock(); + } else + xid = vx_current_xid(); + return xid; +} + + +int vc_vx_info(struct vx_info *vxi, void __user *data) +{ + struct vcmd_vx_info_v0 vc_data; + + vc_data.xid = vxi->vx_id; + vc_data.initpid = vxi->vx_initpid; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +int vc_ctx_stat(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_stat_v0 vc_data; + + vc_data.usecnt = atomic_read(&vxi->vx_usecnt); + vc_data.tasks = atomic_read(&vxi->vx_tasks); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +/* context functions */ + +int vc_ctx_create(uint32_t xid, void __user *data) +{ + struct vcmd_ctx_create vc_data = { .flagword = VXF_INIT_SET }; + struct vx_info *new_vxi; + int ret; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + if ((xid > MAX_S_CONTEXT) || (xid < 2)) + return -EINVAL; + + new_vxi = __create_vx_info(xid); + if (IS_ERR(new_vxi)) + return PTR_ERR(new_vxi); + + /* initial flags */ + new_vxi->vx_flags = vc_data.flagword; + + ret = -ENOEXEC; + if (vs_state_change(new_vxi, VSC_STARTUP)) + goto out; + + ret = vx_migrate_task(current, new_vxi, (!data)); + if (ret) + goto out; + + /* return context id on success */ + ret = new_vxi->vx_id; + + /* get a reference for persistent contexts */ + if ((vc_data.flagword & VXF_PERSISTENT)) + vx_set_persistent(new_vxi); +out: + release_vx_info(new_vxi, NULL); + put_vx_info(new_vxi); + return ret; +} + + +int vc_ctx_migrate(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_migrate vc_data = { .flagword = 0 }; + int ret; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = vx_migrate_task(current, vxi, 0); + if (ret) + return ret; + if (vc_data.flagword & VXM_SET_INIT) + ret = vx_set_init(vxi, current); + if (ret) + return ret; + if (vc_data.flagword & VXM_SET_REAPER) + ret = vx_set_reaper(vxi, current); + return ret; +} + + +int vc_get_cflags(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_flags_v0 vc_data; + + vc_data.flagword = vxi->vx_flags; + + /* special STATE flag handling */ + vc_data.mask = vs_mask_flags(~0ULL, vxi->vx_flags, VXF_ONE_TIME); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_cflags(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_flags_v0 vc_data; + uint64_t mask, trigger; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + /* special STATE flag handling */ + mask = vs_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME); + trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword); + + if (vxi == current_vx_info()) { + /* if (trigger & VXF_STATE_SETUP) + vx_mask_cap_bset(vxi, current); */ + if (trigger & VXF_STATE_INIT) { + int ret; + + ret = vx_set_init(vxi, current); + if (ret) + return ret; + ret = vx_set_reaper(vxi, current); + if (ret) + return ret; + } + } + + vxi->vx_flags = vs_mask_flags(vxi->vx_flags, + vc_data.flagword, mask); + if (trigger & VXF_PERSISTENT) + vx_update_persistent(vxi); + + return 0; +} + + +static inline uint64_t caps_from_cap_t(kernel_cap_t c) +{ + uint64_t v = c.cap[0] | ((uint64_t)c.cap[1] << 32); + + // printk("caps_from_cap_t(%08x:%08x) = %016llx\n", c.cap[1], c.cap[0], v); + return v; +} + +static inline kernel_cap_t cap_t_from_caps(uint64_t v) +{ + kernel_cap_t c = __cap_empty_set; + + c.cap[0] = v & 0xFFFFFFFF; + c.cap[1] = (v >> 32) & 0xFFFFFFFF; + + // printk("cap_t_from_caps(%016llx) = %08x:%08x\n", v, c.cap[1], c.cap[0]); + return c; +} + + +static int do_get_caps(struct vx_info *vxi, uint64_t *bcaps, uint64_t *ccaps) +{ + if (bcaps) + *bcaps = caps_from_cap_t(vxi->vx_bcaps); + if (ccaps) + *ccaps = vxi->vx_ccaps; + + return 0; +} + +int vc_get_ccaps(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_caps_v1 vc_data; + int ret; + + ret = do_get_caps(vxi, NULL, &vc_data.ccaps); + if (ret) + return ret; + vc_data.cmask = ~0ULL; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +static int do_set_caps(struct vx_info *vxi, + uint64_t bcaps, uint64_t bmask, uint64_t ccaps, uint64_t cmask) +{ + uint64_t bcold = caps_from_cap_t(vxi->vx_bcaps); + +#if 0 + printk("do_set_caps(%16llx, %16llx, %16llx, %16llx)\n", + bcaps, bmask, ccaps, cmask); +#endif + vxi->vx_bcaps = cap_t_from_caps( + vs_mask_flags(bcold, bcaps, bmask)); + vxi->vx_ccaps = vs_mask_flags(vxi->vx_ccaps, ccaps, cmask); + + return 0; +} + +int vc_set_ccaps(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_caps_v1 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_caps(vxi, 0, 0, vc_data.ccaps, vc_data.cmask); +} + +int vc_get_bcaps(struct vx_info *vxi, void __user *data) +{ + struct vcmd_bcaps vc_data; + int ret; + + ret = do_get_caps(vxi, &vc_data.bcaps, NULL); + if (ret) + return ret; + vc_data.bmask = ~0ULL; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_bcaps(struct vx_info *vxi, void __user *data) +{ + struct vcmd_bcaps vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_caps(vxi, vc_data.bcaps, vc_data.bmask, 0, 0); +} + + +int vc_get_umask(struct vx_info *vxi, void __user *data) +{ + struct vcmd_umask vc_data; + + vc_data.umask = vxi->vx_umask; + vc_data.mask = ~0ULL; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_umask(struct vx_info *vxi, void __user *data) +{ + struct vcmd_umask vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + vxi->vx_umask = vs_mask_flags(vxi->vx_umask, + vc_data.umask, vc_data.mask); + return 0; +} + + +int vc_get_wmask(struct vx_info *vxi, void __user *data) +{ + struct vcmd_wmask vc_data; + + vc_data.wmask = vxi->vx_wmask; + vc_data.mask = ~0ULL; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_wmask(struct vx_info *vxi, void __user *data) +{ + struct vcmd_wmask vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + vxi->vx_wmask = vs_mask_flags(vxi->vx_wmask, + vc_data.wmask, vc_data.mask); + return 0; +} + + +int vc_get_badness(struct vx_info *vxi, void __user *data) +{ + struct vcmd_badness_v0 vc_data; + + vc_data.bias = vxi->vx_badness_bias; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_badness(struct vx_info *vxi, void __user *data) +{ + struct vcmd_badness_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + vxi->vx_badness_bias = vc_data.bias; + return 0; +} + +#include + +EXPORT_SYMBOL_GPL(free_vx_info); + diff -NurpP --minimal linux-3.10.19/kernel/vserver/cvirt.c linux-3.10.19-vs2.3.6.8/kernel/vserver/cvirt.c --- linux-3.10.19/kernel/vserver/cvirt.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/cvirt.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,313 @@ +/* + * linux/kernel/vserver/cvirt.c + * + * Virtual Server: Context Virtualization + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 broken out from limit.c + * V0.02 added utsname stuff + * V0.03 changed vcmds to vxi arg + * + */ + +#include +#include +#include +#include +#include + +#include + + +void vx_vsi_boottime(struct timespec *boottime) +{ + struct vx_info *vxi = current_vx_info(); + + set_normalized_timespec(boottime, + boottime->tv_sec + vxi->cvirt.bias_uptime.tv_sec, + boottime->tv_nsec + vxi->cvirt.bias_uptime.tv_nsec); + return; +} + +void vx_vsi_uptime(struct timespec *uptime, struct timespec *idle) +{ + struct vx_info *vxi = current_vx_info(); + + set_normalized_timespec(uptime, + uptime->tv_sec - vxi->cvirt.bias_uptime.tv_sec, + uptime->tv_nsec - vxi->cvirt.bias_uptime.tv_nsec); + if (!idle) + return; + set_normalized_timespec(idle, + idle->tv_sec - vxi->cvirt.bias_idle.tv_sec, + idle->tv_nsec - vxi->cvirt.bias_idle.tv_nsec); + return; +} + +uint64_t vx_idle_jiffies(void) +{ + return init_task.utime + init_task.stime; +} + + + +static inline uint32_t __update_loadavg(uint32_t load, + int wsize, int delta, int n) +{ + unsigned long long calc, prev; + + /* just set it to n */ + if (unlikely(delta >= wsize)) + return (n << FSHIFT); + + calc = delta * n; + calc <<= FSHIFT; + prev = (wsize - delta); + prev *= load; + calc += prev; + do_div(calc, wsize); + return calc; +} + + +void vx_update_load(struct vx_info *vxi) +{ + uint32_t now, last, delta; + unsigned int nr_running, nr_uninterruptible; + unsigned int total; + unsigned long flags; + + spin_lock_irqsave(&vxi->cvirt.load_lock, flags); + + now = jiffies; + last = vxi->cvirt.load_last; + delta = now - last; + + if (delta < 5*HZ) + goto out; + + nr_running = atomic_read(&vxi->cvirt.nr_running); + nr_uninterruptible = atomic_read(&vxi->cvirt.nr_uninterruptible); + total = nr_running + nr_uninterruptible; + + vxi->cvirt.load[0] = __update_loadavg(vxi->cvirt.load[0], + 60*HZ, delta, total); + vxi->cvirt.load[1] = __update_loadavg(vxi->cvirt.load[1], + 5*60*HZ, delta, total); + vxi->cvirt.load[2] = __update_loadavg(vxi->cvirt.load[2], + 15*60*HZ, delta, total); + + vxi->cvirt.load_last = now; +out: + atomic_inc(&vxi->cvirt.load_updates); + spin_unlock_irqrestore(&vxi->cvirt.load_lock, flags); +} + + +/* + * Commands to do_syslog: + * + * 0 -- Close the log. Currently a NOP. + * 1 -- Open the log. Currently a NOP. + * 2 -- Read from the log. + * 3 -- Read all messages remaining in the ring buffer. + * 4 -- Read and clear all messages remaining in the ring buffer + * 5 -- Clear ring buffer. + * 6 -- Disable printk's to console + * 7 -- Enable printk's to console + * 8 -- Set level of messages printed to console + * 9 -- Return number of unread characters in the log buffer + * 10 -- Return size of the log buffer + */ +int vx_do_syslog(int type, char __user *buf, int len) +{ + int error = 0; + int do_clear = 0; + struct vx_info *vxi = current_vx_info(); + struct _vx_syslog *log; + + if (!vxi) + return -EINVAL; + log = &vxi->cvirt.syslog; + + switch (type) { + case 0: /* Close log */ + case 1: /* Open log */ + break; + case 2: /* Read from log */ + error = wait_event_interruptible(log->log_wait, + (log->log_start - log->log_end)); + if (error) + break; + spin_lock_irq(&log->logbuf_lock); + spin_unlock_irq(&log->logbuf_lock); + break; + case 4: /* Read/clear last kernel messages */ + do_clear = 1; + /* fall through */ + case 3: /* Read last kernel messages */ + return 0; + + case 5: /* Clear ring buffer */ + return 0; + + case 6: /* Disable logging to console */ + case 7: /* Enable logging to console */ + case 8: /* Set level of messages printed to console */ + break; + + case 9: /* Number of chars in the log buffer */ + return 0; + case 10: /* Size of the log buffer */ + return 0; + default: + error = -EINVAL; + break; + } + return error; +} + + +/* virtual host info names */ + +static char *vx_vhi_name(struct vx_info *vxi, int id) +{ + struct nsproxy *nsproxy; + struct uts_namespace *uts; + + if (id == VHIN_CONTEXT) + return vxi->vx_name; + + nsproxy = vxi->space[0].vx_nsproxy; + if (!nsproxy) + return NULL; + + uts = nsproxy->uts_ns; + if (!uts) + return NULL; + + switch (id) { + case VHIN_SYSNAME: + return uts->name.sysname; + case VHIN_NODENAME: + return uts->name.nodename; + case VHIN_RELEASE: + return uts->name.release; + case VHIN_VERSION: + return uts->name.version; + case VHIN_MACHINE: + return uts->name.machine; + case VHIN_DOMAINNAME: + return uts->name.domainname; + default: + return NULL; + } + return NULL; +} + +int vc_set_vhi_name(struct vx_info *vxi, void __user *data) +{ + struct vcmd_vhi_name_v0 vc_data; + char *name; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + name = vx_vhi_name(vxi, vc_data.field); + if (!name) + return -EINVAL; + + memcpy(name, vc_data.name, 65); + return 0; +} + +int vc_get_vhi_name(struct vx_info *vxi, void __user *data) +{ + struct vcmd_vhi_name_v0 vc_data; + char *name; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + name = vx_vhi_name(vxi, vc_data.field); + if (!name) + return -EINVAL; + + memcpy(vc_data.name, name, 65); + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +int vc_virt_stat(struct vx_info *vxi, void __user *data) +{ + struct vcmd_virt_stat_v0 vc_data; + struct _vx_cvirt *cvirt = &vxi->cvirt; + struct timespec uptime; + + do_posix_clock_monotonic_gettime(&uptime); + set_normalized_timespec(&uptime, + uptime.tv_sec - cvirt->bias_uptime.tv_sec, + uptime.tv_nsec - cvirt->bias_uptime.tv_nsec); + + vc_data.offset = timespec_to_ns(&cvirt->bias_ts); + vc_data.uptime = timespec_to_ns(&uptime); + vc_data.nr_threads = atomic_read(&cvirt->nr_threads); + vc_data.nr_running = atomic_read(&cvirt->nr_running); + vc_data.nr_uninterruptible = atomic_read(&cvirt->nr_uninterruptible); + vc_data.nr_onhold = atomic_read(&cvirt->nr_onhold); + vc_data.nr_forks = atomic_read(&cvirt->total_forks); + vc_data.load[0] = cvirt->load[0]; + vc_data.load[1] = cvirt->load[1]; + vc_data.load[2] = cvirt->load[2]; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +#ifdef CONFIG_VSERVER_VTIME + +/* virtualized time base */ + +void vx_adjust_timespec(struct timespec *ts) +{ + struct vx_info *vxi; + + if (!vx_flags(VXF_VIRT_TIME, 0)) + return; + + vxi = current_vx_info(); + ts->tv_sec += vxi->cvirt.bias_ts.tv_sec; + ts->tv_nsec += vxi->cvirt.bias_ts.tv_nsec; + + if (ts->tv_nsec >= NSEC_PER_SEC) { + ts->tv_sec++; + ts->tv_nsec -= NSEC_PER_SEC; + } else if (ts->tv_nsec < 0) { + ts->tv_sec--; + ts->tv_nsec += NSEC_PER_SEC; + } +} + +int vx_settimeofday(const struct timespec *ts) +{ + struct timespec ats, delta; + struct vx_info *vxi; + + if (!vx_flags(VXF_VIRT_TIME, 0)) + return do_settimeofday(ts); + + getnstimeofday(&ats); + delta = timespec_sub(*ts, ats); + + vxi = current_vx_info(); + vxi->cvirt.bias_ts = timespec_add(vxi->cvirt.bias_ts, delta); + return 0; +} + +#endif + diff -NurpP --minimal linux-3.10.19/kernel/vserver/cvirt_init.h linux-3.10.19-vs2.3.6.8/kernel/vserver/cvirt_init.h --- linux-3.10.19/kernel/vserver/cvirt_init.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/cvirt_init.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,70 @@ + + +extern uint64_t vx_idle_jiffies(void); + +static inline void vx_info_init_cvirt(struct _vx_cvirt *cvirt) +{ + uint64_t idle_jiffies = vx_idle_jiffies(); + uint64_t nsuptime; + + do_posix_clock_monotonic_gettime(&cvirt->bias_uptime); + nsuptime = (unsigned long long)cvirt->bias_uptime.tv_sec + * NSEC_PER_SEC + cvirt->bias_uptime.tv_nsec; + cvirt->bias_clock = nsec_to_clock_t(nsuptime); + cvirt->bias_ts.tv_sec = 0; + cvirt->bias_ts.tv_nsec = 0; + + jiffies_to_timespec(idle_jiffies, &cvirt->bias_idle); + atomic_set(&cvirt->nr_threads, 0); + atomic_set(&cvirt->nr_running, 0); + atomic_set(&cvirt->nr_uninterruptible, 0); + atomic_set(&cvirt->nr_onhold, 0); + + spin_lock_init(&cvirt->load_lock); + cvirt->load_last = jiffies; + atomic_set(&cvirt->load_updates, 0); + cvirt->load[0] = 0; + cvirt->load[1] = 0; + cvirt->load[2] = 0; + atomic_set(&cvirt->total_forks, 0); + + spin_lock_init(&cvirt->syslog.logbuf_lock); + init_waitqueue_head(&cvirt->syslog.log_wait); + cvirt->syslog.log_start = 0; + cvirt->syslog.log_end = 0; + cvirt->syslog.con_start = 0; + cvirt->syslog.logged_chars = 0; +} + +static inline +void vx_info_init_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu) +{ + // cvirt_pc->cpustat = { 0 }; +} + +static inline void vx_info_exit_cvirt(struct _vx_cvirt *cvirt) +{ +#ifdef CONFIG_VSERVER_WARN + int value; +#endif + vxwprintk_xid((value = atomic_read(&cvirt->nr_threads)), + "!!! cvirt: %p[nr_threads] = %d on exit.", + cvirt, value); + vxwprintk_xid((value = atomic_read(&cvirt->nr_running)), + "!!! cvirt: %p[nr_running] = %d on exit.", + cvirt, value); + vxwprintk_xid((value = atomic_read(&cvirt->nr_uninterruptible)), + "!!! cvirt: %p[nr_uninterruptible] = %d on exit.", + cvirt, value); + vxwprintk_xid((value = atomic_read(&cvirt->nr_onhold)), + "!!! cvirt: %p[nr_onhold] = %d on exit.", + cvirt, value); + return; +} + +static inline +void vx_info_exit_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu) +{ + return; +} + diff -NurpP --minimal linux-3.10.19/kernel/vserver/cvirt_proc.h linux-3.10.19-vs2.3.6.8/kernel/vserver/cvirt_proc.h --- linux-3.10.19/kernel/vserver/cvirt_proc.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/cvirt_proc.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,123 @@ +#ifndef _VX_CVIRT_PROC_H +#define _VX_CVIRT_PROC_H + +#include +#include +#include +#include +#include + +extern int vx_info_mnt_namespace(struct mnt_namespace *, char *); + +static inline +int vx_info_proc_nsproxy(struct nsproxy *nsproxy, char *buffer) +{ + struct mnt_namespace *ns; + struct uts_namespace *uts; + struct ipc_namespace *ipc; + int length = 0; + + if (!nsproxy) + goto out; + + length += sprintf(buffer + length, + "NSProxy:\t%p [%p,%p,%p]\n", + nsproxy, nsproxy->mnt_ns, + nsproxy->uts_ns, nsproxy->ipc_ns); + + ns = nsproxy->mnt_ns; + if (!ns) + goto skip_ns; + + length += vx_info_mnt_namespace(ns, buffer + length); + +skip_ns: + + uts = nsproxy->uts_ns; + if (!uts) + goto skip_uts; + + length += sprintf(buffer + length, + "SysName:\t%.*s\n" + "NodeName:\t%.*s\n" + "Release:\t%.*s\n" + "Version:\t%.*s\n" + "Machine:\t%.*s\n" + "DomainName:\t%.*s\n", + __NEW_UTS_LEN, uts->name.sysname, + __NEW_UTS_LEN, uts->name.nodename, + __NEW_UTS_LEN, uts->name.release, + __NEW_UTS_LEN, uts->name.version, + __NEW_UTS_LEN, uts->name.machine, + __NEW_UTS_LEN, uts->name.domainname); +skip_uts: + + ipc = nsproxy->ipc_ns; + if (!ipc) + goto skip_ipc; + + length += sprintf(buffer + length, + "SEMS:\t\t%d %d %d %d %d\n" + "MSG:\t\t%d %d %d\n" + "SHM:\t\t%lu %lu %d %ld\n", + ipc->sem_ctls[0], ipc->sem_ctls[1], + ipc->sem_ctls[2], ipc->sem_ctls[3], + ipc->used_sems, + ipc->msg_ctlmax, ipc->msg_ctlmnb, ipc->msg_ctlmni, + (unsigned long)ipc->shm_ctlmax, + (unsigned long)ipc->shm_ctlall, + ipc->shm_ctlmni, ipc->shm_tot); +skip_ipc: +out: + return length; +} + + +#include + +#define LOAD_INT(x) ((x) >> FSHIFT) +#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1 - 1)) * 100) + +static inline +int vx_info_proc_cvirt(struct _vx_cvirt *cvirt, char *buffer) +{ + int length = 0; + int a, b, c; + + length += sprintf(buffer + length, + "BiasUptime:\t%lu.%02lu\n", + (unsigned long)cvirt->bias_uptime.tv_sec, + (cvirt->bias_uptime.tv_nsec / (NSEC_PER_SEC / 100))); + + a = cvirt->load[0] + (FIXED_1 / 200); + b = cvirt->load[1] + (FIXED_1 / 200); + c = cvirt->load[2] + (FIXED_1 / 200); + length += sprintf(buffer + length, + "nr_threads:\t%d\n" + "nr_running:\t%d\n" + "nr_unintr:\t%d\n" + "nr_onhold:\t%d\n" + "load_updates:\t%d\n" + "loadavg:\t%d.%02d %d.%02d %d.%02d\n" + "total_forks:\t%d\n", + atomic_read(&cvirt->nr_threads), + atomic_read(&cvirt->nr_running), + atomic_read(&cvirt->nr_uninterruptible), + atomic_read(&cvirt->nr_onhold), + atomic_read(&cvirt->load_updates), + LOAD_INT(a), LOAD_FRAC(a), + LOAD_INT(b), LOAD_FRAC(b), + LOAD_INT(c), LOAD_FRAC(c), + atomic_read(&cvirt->total_forks)); + return length; +} + +static inline +int vx_info_proc_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, + char *buffer, int cpu) +{ + int length = 0; + return length; +} + +#endif /* _VX_CVIRT_PROC_H */ diff -NurpP --minimal linux-3.10.19/kernel/vserver/debug.c linux-3.10.19-vs2.3.6.8/kernel/vserver/debug.c --- linux-3.10.19/kernel/vserver/debug.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/debug.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,32 @@ +/* + * kernel/vserver/debug.c + * + * Copyright (C) 2005-2007 Herbert Pötzl + * + * V0.01 vx_info dump support + * + */ + +#include + +#include + + +void dump_vx_info(struct vx_info *vxi, int level) +{ + printk("vx_info %p[#%d, %d.%d, %4x]\n", vxi, vxi->vx_id, + atomic_read(&vxi->vx_usecnt), + atomic_read(&vxi->vx_tasks), + vxi->vx_state); + if (level > 0) { + __dump_vx_limit(&vxi->limit); + __dump_vx_sched(&vxi->sched); + __dump_vx_cvirt(&vxi->cvirt); + __dump_vx_cacct(&vxi->cacct); + } + printk("---\n"); +} + + +EXPORT_SYMBOL_GPL(dump_vx_info); + diff -NurpP --minimal linux-3.10.19/kernel/vserver/device.c linux-3.10.19-vs2.3.6.8/kernel/vserver/device.c --- linux-3.10.19/kernel/vserver/device.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/device.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,443 @@ +/* + * linux/kernel/vserver/device.c + * + * Linux-VServer: Device Support + * + * Copyright (C) 2006 Herbert Pötzl + * Copyright (C) 2007 Daniel Hokka Zakrisson + * + * V0.01 device mapping basics + * V0.02 added defaults + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +#define DMAP_HASH_BITS 4 + + +struct vs_mapping { + union { + struct hlist_node hlist; + struct list_head list; + } u; +#define dm_hlist u.hlist +#define dm_list u.list + vxid_t xid; + dev_t device; + struct vx_dmap_target target; +}; + + +static struct hlist_head dmap_main_hash[1 << DMAP_HASH_BITS]; + +static DEFINE_SPINLOCK(dmap_main_hash_lock); + +static struct vx_dmap_target dmap_defaults[2] = { + { .flags = DATTR_OPEN }, + { .flags = DATTR_OPEN }, +}; + + +struct kmem_cache *dmap_cachep __read_mostly; + +int __init dmap_cache_init(void) +{ + dmap_cachep = kmem_cache_create("dmap_cache", + sizeof(struct vs_mapping), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + return 0; +} + +__initcall(dmap_cache_init); + + +static inline unsigned int __hashval(dev_t dev, int bits) +{ + return hash_long((unsigned long)dev, bits); +} + + +/* __hash_mapping() + * add the mapping to the hash table + */ +static inline void __hash_mapping(struct vx_info *vxi, struct vs_mapping *vdm) +{ + spinlock_t *hash_lock = &dmap_main_hash_lock; + struct hlist_head *head, *hash = dmap_main_hash; + int device = vdm->device; + + spin_lock(hash_lock); + vxdprintk(VXD_CBIT(misc, 8), "__hash_mapping: %p[#%d] %08x:%08x", + vxi, vxi ? vxi->vx_id : 0, device, vdm->target.target); + + head = &hash[__hashval(device, DMAP_HASH_BITS)]; + hlist_add_head(&vdm->dm_hlist, head); + spin_unlock(hash_lock); +} + + +static inline int __mode_to_default(umode_t mode) +{ + switch (mode) { + case S_IFBLK: + return 0; + case S_IFCHR: + return 1; + default: + BUG(); + } +} + + +/* __set_default() + * set a default + */ +static inline void __set_default(struct vx_info *vxi, umode_t mode, + struct vx_dmap_target *vdmt) +{ + spinlock_t *hash_lock = &dmap_main_hash_lock; + spin_lock(hash_lock); + + if (vxi) + vxi->dmap.targets[__mode_to_default(mode)] = *vdmt; + else + dmap_defaults[__mode_to_default(mode)] = *vdmt; + + + spin_unlock(hash_lock); + + vxdprintk(VXD_CBIT(misc, 8), "__set_default: %p[#%u] %08x %04x", + vxi, vxi ? vxi->vx_id : 0, vdmt->target, vdmt->flags); +} + + +/* __remove_default() + * remove a default + */ +static inline int __remove_default(struct vx_info *vxi, umode_t mode) +{ + spinlock_t *hash_lock = &dmap_main_hash_lock; + spin_lock(hash_lock); + + if (vxi) + vxi->dmap.targets[__mode_to_default(mode)].flags = 0; + else /* remove == reset */ + dmap_defaults[__mode_to_default(mode)].flags = DATTR_OPEN | mode; + + spin_unlock(hash_lock); + return 0; +} + + +/* __find_mapping() + * find a mapping in the hash table + * + * caller must hold hash_lock + */ +static inline int __find_mapping(vxid_t xid, dev_t device, umode_t mode, + struct vs_mapping **local, struct vs_mapping **global) +{ + struct hlist_head *hash = dmap_main_hash; + struct hlist_head *head = &hash[__hashval(device, DMAP_HASH_BITS)]; + struct hlist_node *pos; + struct vs_mapping *vdm; + + *local = NULL; + if (global) + *global = NULL; + + hlist_for_each(pos, head) { + vdm = hlist_entry(pos, struct vs_mapping, dm_hlist); + + if ((vdm->device == device) && + !((vdm->target.flags ^ mode) & S_IFMT)) { + if (vdm->xid == xid) { + *local = vdm; + return 1; + } else if (global && vdm->xid == 0) + *global = vdm; + } + } + + if (global && *global) + return 0; + else + return -ENOENT; +} + + +/* __lookup_mapping() + * find a mapping and store the result in target and flags + */ +static inline int __lookup_mapping(struct vx_info *vxi, + dev_t device, dev_t *target, int *flags, umode_t mode) +{ + spinlock_t *hash_lock = &dmap_main_hash_lock; + struct vs_mapping *vdm, *global; + struct vx_dmap_target *vdmt; + int ret = 0; + vxid_t xid = vxi->vx_id; + int index; + + spin_lock(hash_lock); + if (__find_mapping(xid, device, mode, &vdm, &global) > 0) { + ret = 1; + vdmt = &vdm->target; + goto found; + } + + index = __mode_to_default(mode); + if (vxi && vxi->dmap.targets[index].flags) { + ret = 2; + vdmt = &vxi->dmap.targets[index]; + } else if (global) { + ret = 3; + vdmt = &global->target; + goto found; + } else { + ret = 4; + vdmt = &dmap_defaults[index]; + } + +found: + if (target && (vdmt->flags & DATTR_REMAP)) + *target = vdmt->target; + else if (target) + *target = device; + if (flags) + *flags = vdmt->flags; + + spin_unlock(hash_lock); + + return ret; +} + + +/* __remove_mapping() + * remove a mapping from the hash table + */ +static inline int __remove_mapping(struct vx_info *vxi, dev_t device, + umode_t mode) +{ + spinlock_t *hash_lock = &dmap_main_hash_lock; + struct vs_mapping *vdm = NULL; + int ret = 0; + + spin_lock(hash_lock); + + ret = __find_mapping((vxi ? vxi->vx_id : 0), device, mode, &vdm, + NULL); + vxdprintk(VXD_CBIT(misc, 8), "__remove_mapping: %p[#%d] %08x %04x", + vxi, vxi ? vxi->vx_id : 0, device, mode); + if (ret < 0) + goto out; + hlist_del(&vdm->dm_hlist); + +out: + spin_unlock(hash_lock); + if (vdm) + kmem_cache_free(dmap_cachep, vdm); + return ret; +} + + + +int vs_map_device(struct vx_info *vxi, + dev_t device, dev_t *target, umode_t mode) +{ + int ret, flags = DATTR_MASK; + + if (!vxi) { + if (target) + *target = device; + goto out; + } + ret = __lookup_mapping(vxi, device, target, &flags, mode); + vxdprintk(VXD_CBIT(misc, 8), "vs_map_device: %08x target: %08x flags: %04x mode: %04x mapped=%d", + device, target ? *target : 0, flags, mode, ret); +out: + return (flags & DATTR_MASK); +} + + + +static int do_set_mapping(struct vx_info *vxi, + dev_t device, dev_t target, int flags, umode_t mode) +{ + if (device) { + struct vs_mapping *new; + + new = kmem_cache_alloc(dmap_cachep, GFP_KERNEL); + if (!new) + return -ENOMEM; + + INIT_HLIST_NODE(&new->dm_hlist); + new->device = device; + new->target.target = target; + new->target.flags = flags | mode; + new->xid = (vxi ? vxi->vx_id : 0); + + vxdprintk(VXD_CBIT(misc, 8), "do_set_mapping: %08x target: %08x flags: %04x", device, target, flags); + __hash_mapping(vxi, new); + } else { + struct vx_dmap_target new = { + .target = target, + .flags = flags | mode, + }; + __set_default(vxi, mode, &new); + } + return 0; +} + + +static int do_unset_mapping(struct vx_info *vxi, + dev_t device, dev_t target, int flags, umode_t mode) +{ + int ret = -EINVAL; + + if (device) { + ret = __remove_mapping(vxi, device, mode); + if (ret < 0) + goto out; + } else { + ret = __remove_default(vxi, mode); + if (ret < 0) + goto out; + } + +out: + return ret; +} + + +static inline int __user_device(const char __user *name, dev_t *dev, + umode_t *mode) +{ + struct nameidata nd; + int ret; + + if (!name) { + *dev = 0; + return 0; + } + ret = user_lpath(name, &nd.path); + if (ret) + return ret; + if (nd.path.dentry->d_inode) { + *dev = nd.path.dentry->d_inode->i_rdev; + *mode = nd.path.dentry->d_inode->i_mode; + } + path_put(&nd.path); + return 0; +} + +static inline int __mapping_mode(dev_t device, dev_t target, + umode_t device_mode, umode_t target_mode, umode_t *mode) +{ + if (device) + *mode = device_mode & S_IFMT; + else if (target) + *mode = target_mode & S_IFMT; + else + return -EINVAL; + + /* if both given, device and target mode have to match */ + if (device && target && + ((device_mode ^ target_mode) & S_IFMT)) + return -EINVAL; + return 0; +} + + +static inline int do_mapping(struct vx_info *vxi, const char __user *device_path, + const char __user *target_path, int flags, int set) +{ + dev_t device = ~0, target = ~0; + umode_t device_mode = 0, target_mode = 0, mode; + int ret; + + ret = __user_device(device_path, &device, &device_mode); + if (ret) + return ret; + ret = __user_device(target_path, &target, &target_mode); + if (ret) + return ret; + + ret = __mapping_mode(device, target, + device_mode, target_mode, &mode); + if (ret) + return ret; + + if (set) + return do_set_mapping(vxi, device, target, + flags, mode); + else + return do_unset_mapping(vxi, device, target, + flags, mode); +} + + +int vc_set_mapping(struct vx_info *vxi, void __user *data) +{ + struct vcmd_set_mapping_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_mapping(vxi, vc_data.device, vc_data.target, + vc_data.flags, 1); +} + +int vc_unset_mapping(struct vx_info *vxi, void __user *data) +{ + struct vcmd_set_mapping_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_mapping(vxi, vc_data.device, vc_data.target, + vc_data.flags, 0); +} + + +#ifdef CONFIG_COMPAT + +int vc_set_mapping_x32(struct vx_info *vxi, void __user *data) +{ + struct vcmd_set_mapping_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_mapping(vxi, compat_ptr(vc_data.device_ptr), + compat_ptr(vc_data.target_ptr), vc_data.flags, 1); +} + +int vc_unset_mapping_x32(struct vx_info *vxi, void __user *data) +{ + struct vcmd_set_mapping_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_mapping(vxi, compat_ptr(vc_data.device_ptr), + compat_ptr(vc_data.target_ptr), vc_data.flags, 0); +} + +#endif /* CONFIG_COMPAT */ + + diff -NurpP --minimal linux-3.10.19/kernel/vserver/dlimit.c linux-3.10.19-vs2.3.6.8/kernel/vserver/dlimit.c --- linux-3.10.19/kernel/vserver/dlimit.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/dlimit.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,528 @@ +/* + * linux/kernel/vserver/dlimit.c + * + * Virtual Server: Context Disk Limits + * + * Copyright (C) 2004-2009 Herbert Pötzl + * + * V0.01 initial version + * V0.02 compat32 splitup + * V0.03 extended interface + * + */ + +#include +#include +#include +#include +#include +#include +#include +// #include + +#include + +/* __alloc_dl_info() + + * allocate an initialized dl_info struct + * doesn't make it visible (hash) */ + +static struct dl_info *__alloc_dl_info(struct super_block *sb, vtag_t tag) +{ + struct dl_info *new = NULL; + + vxdprintk(VXD_CBIT(dlim, 5), + "alloc_dl_info(%p,%d)*", sb, tag); + + /* would this benefit from a slab cache? */ + new = kmalloc(sizeof(struct dl_info), GFP_KERNEL); + if (!new) + return 0; + + memset(new, 0, sizeof(struct dl_info)); + new->dl_tag = tag; + new->dl_sb = sb; + // INIT_RCU_HEAD(&new->dl_rcu); + INIT_HLIST_NODE(&new->dl_hlist); + spin_lock_init(&new->dl_lock); + atomic_set(&new->dl_refcnt, 0); + atomic_set(&new->dl_usecnt, 0); + + /* rest of init goes here */ + + vxdprintk(VXD_CBIT(dlim, 4), + "alloc_dl_info(%p,%d) = %p", sb, tag, new); + return new; +} + +/* __dealloc_dl_info() + + * final disposal of dl_info */ + +static void __dealloc_dl_info(struct dl_info *dli) +{ + vxdprintk(VXD_CBIT(dlim, 4), + "dealloc_dl_info(%p)", dli); + + dli->dl_hlist.next = LIST_POISON1; + dli->dl_tag = -1; + dli->dl_sb = 0; + + BUG_ON(atomic_read(&dli->dl_usecnt)); + BUG_ON(atomic_read(&dli->dl_refcnt)); + + kfree(dli); +} + + +/* hash table for dl_info hash */ + +#define DL_HASH_SIZE 13 + +struct hlist_head dl_info_hash[DL_HASH_SIZE]; + +static DEFINE_SPINLOCK(dl_info_hash_lock); + + +static inline unsigned int __hashval(struct super_block *sb, vtag_t tag) +{ + return ((tag ^ (unsigned long)sb) % DL_HASH_SIZE); +} + + + +/* __hash_dl_info() + + * add the dli to the global hash table + * requires the hash_lock to be held */ + +static inline void __hash_dl_info(struct dl_info *dli) +{ + struct hlist_head *head; + + vxdprintk(VXD_CBIT(dlim, 6), + "__hash_dl_info: %p[#%d]", dli, dli->dl_tag); + get_dl_info(dli); + head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_tag)]; + hlist_add_head_rcu(&dli->dl_hlist, head); +} + +/* __unhash_dl_info() + + * remove the dli from the global hash table + * requires the hash_lock to be held */ + +static inline void __unhash_dl_info(struct dl_info *dli) +{ + vxdprintk(VXD_CBIT(dlim, 6), + "__unhash_dl_info: %p[#%d]", dli, dli->dl_tag); + hlist_del_rcu(&dli->dl_hlist); + put_dl_info(dli); +} + + +/* __lookup_dl_info() + + * requires the rcu_read_lock() + * doesn't increment the dl_refcnt */ + +static inline struct dl_info *__lookup_dl_info(struct super_block *sb, vtag_t tag) +{ + struct hlist_head *head = &dl_info_hash[__hashval(sb, tag)]; + struct dl_info *dli; + + hlist_for_each_entry_rcu(dli, head, dl_hlist) { + if (dli->dl_tag == tag && dli->dl_sb == sb) + return dli; + } + return NULL; +} + + +struct dl_info *locate_dl_info(struct super_block *sb, vtag_t tag) +{ + struct dl_info *dli; + + rcu_read_lock(); + dli = get_dl_info(__lookup_dl_info(sb, tag)); + vxdprintk(VXD_CBIT(dlim, 7), + "locate_dl_info(%p,#%d) = %p", sb, tag, dli); + rcu_read_unlock(); + return dli; +} + +void rcu_free_dl_info(struct rcu_head *head) +{ + struct dl_info *dli = container_of(head, struct dl_info, dl_rcu); + int usecnt, refcnt; + + BUG_ON(!dli || !head); + + usecnt = atomic_read(&dli->dl_usecnt); + BUG_ON(usecnt < 0); + + refcnt = atomic_read(&dli->dl_refcnt); + BUG_ON(refcnt < 0); + + vxdprintk(VXD_CBIT(dlim, 3), + "rcu_free_dl_info(%p)", dli); + if (!usecnt) + __dealloc_dl_info(dli); + else + printk("!!! rcu didn't free\n"); +} + + + + +static int do_addrem_dlimit(uint32_t id, const char __user *name, + uint32_t flags, int add) +{ + struct path path; + int ret; + + ret = user_lpath(name, &path); + if (!ret) { + struct super_block *sb; + struct dl_info *dli; + + ret = -EINVAL; + if (!path.dentry->d_inode) + goto out_release; + if (!(sb = path.dentry->d_inode->i_sb)) + goto out_release; + + if (add) { + dli = __alloc_dl_info(sb, id); + spin_lock(&dl_info_hash_lock); + + ret = -EEXIST; + if (__lookup_dl_info(sb, id)) + goto out_unlock; + __hash_dl_info(dli); + dli = NULL; + } else { + spin_lock(&dl_info_hash_lock); + dli = __lookup_dl_info(sb, id); + + ret = -ESRCH; + if (!dli) + goto out_unlock; + __unhash_dl_info(dli); + } + ret = 0; + out_unlock: + spin_unlock(&dl_info_hash_lock); + if (add && dli) + __dealloc_dl_info(dli); + out_release: + path_put(&path); + } + return ret; +} + +int vc_add_dlimit(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_base_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1); +} + +int vc_rem_dlimit(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_base_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0); +} + +#ifdef CONFIG_COMPAT + +int vc_add_dlimit_x32(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_base_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_addrem_dlimit(id, + compat_ptr(vc_data.name_ptr), vc_data.flags, 1); +} + +int vc_rem_dlimit_x32(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_base_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_addrem_dlimit(id, + compat_ptr(vc_data.name_ptr), vc_data.flags, 0); +} + +#endif /* CONFIG_COMPAT */ + + +static inline +int do_set_dlimit(uint32_t id, const char __user *name, + uint32_t space_used, uint32_t space_total, + uint32_t inodes_used, uint32_t inodes_total, + uint32_t reserved, uint32_t flags) +{ + struct path path; + int ret; + + ret = user_lpath(name, &path); + if (!ret) { + struct super_block *sb; + struct dl_info *dli; + + ret = -EINVAL; + if (!path.dentry->d_inode) + goto out_release; + if (!(sb = path.dentry->d_inode->i_sb)) + goto out_release; + + /* sanity checks */ + if ((reserved != CDLIM_KEEP && + reserved > 100) || + (inodes_used != CDLIM_KEEP && + inodes_used > inodes_total) || + (space_used != CDLIM_KEEP && + space_used > space_total)) + goto out_release; + + ret = -ESRCH; + dli = locate_dl_info(sb, id); + if (!dli) + goto out_release; + + spin_lock(&dli->dl_lock); + + if (inodes_used != CDLIM_KEEP) + dli->dl_inodes_used = inodes_used; + if (inodes_total != CDLIM_KEEP) + dli->dl_inodes_total = inodes_total; + if (space_used != CDLIM_KEEP) + dli->dl_space_used = dlimit_space_32to64( + space_used, flags, DLIMS_USED); + + if (space_total == CDLIM_INFINITY) + dli->dl_space_total = DLIM_INFINITY; + else if (space_total != CDLIM_KEEP) + dli->dl_space_total = dlimit_space_32to64( + space_total, flags, DLIMS_TOTAL); + + if (reserved != CDLIM_KEEP) + dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100; + + spin_unlock(&dli->dl_lock); + + put_dl_info(dli); + ret = 0; + + out_release: + path_put(&path); + } + return ret; +} + +int vc_set_dlimit(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_dlimit(id, vc_data.name, + vc_data.space_used, vc_data.space_total, + vc_data.inodes_used, vc_data.inodes_total, + vc_data.reserved, vc_data.flags); +} + +#ifdef CONFIG_COMPAT + +int vc_set_dlimit_x32(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_dlimit(id, compat_ptr(vc_data.name_ptr), + vc_data.space_used, vc_data.space_total, + vc_data.inodes_used, vc_data.inodes_total, + vc_data.reserved, vc_data.flags); +} + +#endif /* CONFIG_COMPAT */ + + +static inline +int do_get_dlimit(uint32_t id, const char __user *name, + uint32_t *space_used, uint32_t *space_total, + uint32_t *inodes_used, uint32_t *inodes_total, + uint32_t *reserved, uint32_t *flags) +{ + struct path path; + int ret; + + ret = user_lpath(name, &path); + if (!ret) { + struct super_block *sb; + struct dl_info *dli; + + ret = -EINVAL; + if (!path.dentry->d_inode) + goto out_release; + if (!(sb = path.dentry->d_inode->i_sb)) + goto out_release; + + ret = -ESRCH; + dli = locate_dl_info(sb, id); + if (!dli) + goto out_release; + + spin_lock(&dli->dl_lock); + *inodes_used = dli->dl_inodes_used; + *inodes_total = dli->dl_inodes_total; + + *space_used = dlimit_space_64to32( + dli->dl_space_used, flags, DLIMS_USED); + + if (dli->dl_space_total == DLIM_INFINITY) + *space_total = CDLIM_INFINITY; + else + *space_total = dlimit_space_64to32( + dli->dl_space_total, flags, DLIMS_TOTAL); + + *reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10); + spin_unlock(&dli->dl_lock); + + put_dl_info(dli); + ret = -EFAULT; + + ret = 0; + out_release: + path_put(&path); + } + return ret; +} + + +int vc_get_dlimit(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_v0 vc_data; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_get_dlimit(id, vc_data.name, + &vc_data.space_used, &vc_data.space_total, + &vc_data.inodes_used, &vc_data.inodes_total, + &vc_data.reserved, &vc_data.flags); + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +#ifdef CONFIG_COMPAT + +int vc_get_dlimit_x32(uint32_t id, void __user *data) +{ + struct vcmd_ctx_dlimit_v0_x32 vc_data; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr), + &vc_data.space_used, &vc_data.space_total, + &vc_data.inodes_used, &vc_data.inodes_total, + &vc_data.reserved, &vc_data.flags); + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +#endif /* CONFIG_COMPAT */ + + +void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf) +{ + struct dl_info *dli; + __u64 blimit, bfree, bavail; + __u32 ifree; + + dli = locate_dl_info(sb, dx_current_tag()); + if (!dli) + return; + + spin_lock(&dli->dl_lock); + if (dli->dl_inodes_total == (unsigned long)DLIM_INFINITY) + goto no_ilim; + + /* reduce max inodes available to limit */ + if (buf->f_files > dli->dl_inodes_total) + buf->f_files = dli->dl_inodes_total; + + ifree = dli->dl_inodes_total - dli->dl_inodes_used; + /* reduce free inodes to min */ + if (ifree < buf->f_ffree) + buf->f_ffree = ifree; + +no_ilim: + if (dli->dl_space_total == DLIM_INFINITY) + goto no_blim; + + blimit = dli->dl_space_total >> sb->s_blocksize_bits; + + if (dli->dl_space_total < dli->dl_space_used) + bfree = 0; + else + bfree = (dli->dl_space_total - dli->dl_space_used) + >> sb->s_blocksize_bits; + + bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult); + if (bavail < dli->dl_space_used) + bavail = 0; + else + bavail = (bavail - dli->dl_space_used) + >> sb->s_blocksize_bits; + + /* reduce max space available to limit */ + if (buf->f_blocks > blimit) + buf->f_blocks = blimit; + + /* reduce free space to min */ + if (bfree < buf->f_bfree) + buf->f_bfree = bfree; + + /* reduce avail space to min */ + if (bavail < buf->f_bavail) + buf->f_bavail = bavail; + +no_blim: + spin_unlock(&dli->dl_lock); + put_dl_info(dli); + + return; +} + +#include + +EXPORT_SYMBOL_GPL(locate_dl_info); +EXPORT_SYMBOL_GPL(rcu_free_dl_info); + diff -NurpP --minimal linux-3.10.19/kernel/vserver/helper.c linux-3.10.19-vs2.3.6.8/kernel/vserver/helper.c --- linux-3.10.19/kernel/vserver/helper.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/helper.c 2013-08-23 00:55:48.000000000 +0000 @@ -0,0 +1,242 @@ +/* + * linux/kernel/vserver/helper.c + * + * Virtual Context Support + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 basic helper + * + */ + +#include +#include +#include +#include +#include + + +char vshelper_path[255] = "/sbin/vshelper"; + +static int vshelper_init(struct subprocess_info *info, struct cred *new_cred) +{ + current->flags &= ~PF_NO_SETAFFINITY; + return 0; +} + +static int vs_call_usermodehelper(char *path, char **argv, char **envp, int wait) +{ + struct subprocess_info *info; + gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; + + info = call_usermodehelper_setup(path, argv, envp, gfp_mask, + vshelper_init, NULL, NULL); + if (info == NULL) + return -ENOMEM; + + return call_usermodehelper_exec(info, wait); +} + +static int do_vshelper(char *name, char *argv[], char *envp[], int sync) +{ + int ret; + + if ((ret = vs_call_usermodehelper(name, argv, envp, + sync ? UMH_WAIT_PROC : UMH_WAIT_EXEC))) { + printk(KERN_WARNING "%s: (%s %s) returned %s with %d\n", + name, argv[1], argv[2], + sync ? "sync" : "async", ret); + } + vxdprintk(VXD_CBIT(switch, 4), + "%s: (%s %s) returned %s with %d", + name, argv[1], argv[2], sync ? "sync" : "async", ret); + return ret; +} + +/* + * vshelper path is set via /proc/sys + * invoked by vserver sys_reboot(), with + * the following arguments + * + * argv [0] = vshelper_path; + * argv [1] = action: "restart", "halt", "poweroff", ... + * argv [2] = context identifier + * + * envp [*] = type-specific parameters + */ + +long vs_reboot_helper(struct vx_info *vxi, int cmd, void __user *arg) +{ + char id_buf[8], cmd_buf[16]; + char uid_buf[16], pid_buf[16]; + int ret; + + char *argv[] = {vshelper_path, NULL, id_buf, 0}; + char *envp[] = {"HOME=/", "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", + uid_buf, pid_buf, cmd_buf, 0}; + + if (vx_info_state(vxi, VXS_HELPER)) + return -EAGAIN; + vxi->vx_state |= VXS_HELPER; + + snprintf(id_buf, sizeof(id_buf), "%d", vxi->vx_id); + + snprintf(cmd_buf, sizeof(cmd_buf), "VS_CMD=%08x", cmd); + snprintf(uid_buf, sizeof(uid_buf), "VS_UID=%d", + from_kuid(&init_user_ns, current_uid())); + snprintf(pid_buf, sizeof(pid_buf), "VS_PID=%d", current->pid); + + switch (cmd) { + case LINUX_REBOOT_CMD_RESTART: + argv[1] = "restart"; + break; + + case LINUX_REBOOT_CMD_HALT: + argv[1] = "halt"; + break; + + case LINUX_REBOOT_CMD_POWER_OFF: + argv[1] = "poweroff"; + break; + + case LINUX_REBOOT_CMD_SW_SUSPEND: + argv[1] = "swsusp"; + break; + + case LINUX_REBOOT_CMD_OOM: + argv[1] = "oom"; + break; + + default: + vxi->vx_state &= ~VXS_HELPER; + return 0; + } + + ret = do_vshelper(vshelper_path, argv, envp, 0); + vxi->vx_state &= ~VXS_HELPER; + __wakeup_vx_info(vxi); + return (ret) ? -EPERM : 0; +} + + +long vs_reboot(unsigned int cmd, void __user *arg) +{ + struct vx_info *vxi = current_vx_info(); + long ret = 0; + + vxdprintk(VXD_CBIT(misc, 5), + "vs_reboot(%p[#%d],%u)", + vxi, vxi ? vxi->vx_id : 0, cmd); + + ret = vs_reboot_helper(vxi, cmd, arg); + if (ret) + return ret; + + vxi->reboot_cmd = cmd; + if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) { + switch (cmd) { + case LINUX_REBOOT_CMD_RESTART: + case LINUX_REBOOT_CMD_HALT: + case LINUX_REBOOT_CMD_POWER_OFF: + vx_info_kill(vxi, 0, SIGKILL); + vx_info_kill(vxi, 1, SIGKILL); + default: + break; + } + } + return 0; +} + +long vs_oom_action(unsigned int cmd) +{ + struct vx_info *vxi = current_vx_info(); + long ret = 0; + + vxdprintk(VXD_CBIT(misc, 5), + "vs_oom_action(%p[#%d],%u)", + vxi, vxi ? vxi->vx_id : 0, cmd); + + ret = vs_reboot_helper(vxi, cmd, NULL); + if (ret) + return ret; + + vxi->reboot_cmd = cmd; + if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) { + vx_info_kill(vxi, 0, SIGKILL); + vx_info_kill(vxi, 1, SIGKILL); + } + return 0; +} + +/* + * argv [0] = vshelper_path; + * argv [1] = action: "startup", "shutdown" + * argv [2] = context identifier + * + * envp [*] = type-specific parameters + */ + +long vs_state_change(struct vx_info *vxi, unsigned int cmd) +{ + char id_buf[8], cmd_buf[16]; + char *argv[] = {vshelper_path, NULL, id_buf, 0}; + char *envp[] = {"HOME=/", "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0}; + + if (!vx_info_flags(vxi, VXF_SC_HELPER, 0)) + return 0; + + snprintf(id_buf, sizeof(id_buf), "%d", vxi->vx_id); + snprintf(cmd_buf, sizeof(cmd_buf), "VS_CMD=%08x", cmd); + + switch (cmd) { + case VSC_STARTUP: + argv[1] = "startup"; + break; + case VSC_SHUTDOWN: + argv[1] = "shutdown"; + break; + default: + return 0; + } + + return do_vshelper(vshelper_path, argv, envp, 1); +} + + +/* + * argv [0] = vshelper_path; + * argv [1] = action: "netup", "netdown" + * argv [2] = context identifier + * + * envp [*] = type-specific parameters + */ + +long vs_net_change(struct nx_info *nxi, unsigned int cmd) +{ + char id_buf[8], cmd_buf[16]; + char *argv[] = {vshelper_path, NULL, id_buf, 0}; + char *envp[] = {"HOME=/", "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0}; + + if (!nx_info_flags(nxi, NXF_SC_HELPER, 0)) + return 0; + + snprintf(id_buf, sizeof(id_buf), "%d", nxi->nx_id); + snprintf(cmd_buf, sizeof(cmd_buf), "VS_CMD=%08x", cmd); + + switch (cmd) { + case VSC_NETUP: + argv[1] = "netup"; + break; + case VSC_NETDOWN: + argv[1] = "netdown"; + break; + default: + return 0; + } + + return do_vshelper(vshelper_path, argv, envp, 1); +} + diff -NurpP --minimal linux-3.10.19/kernel/vserver/history.c linux-3.10.19-vs2.3.6.8/kernel/vserver/history.c --- linux-3.10.19/kernel/vserver/history.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/history.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,258 @@ +/* + * kernel/vserver/history.c + * + * Virtual Context History Backtrace + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 basic structure + * V0.02 hash/unhash and trace + * V0.03 preemption fixes + * + */ + +#include +#include + +#include +#include +#include +#include + + +#ifdef CONFIG_VSERVER_HISTORY +#define VXH_SIZE CONFIG_VSERVER_HISTORY_SIZE +#else +#define VXH_SIZE 64 +#endif + +struct _vx_history { + unsigned int counter; + + struct _vx_hist_entry entry[VXH_SIZE + 1]; +}; + + +DEFINE_PER_CPU(struct _vx_history, vx_history_buffer); + +unsigned volatile int vxh_active = 1; + +static atomic_t sequence = ATOMIC_INIT(0); + + +/* vxh_advance() + + * requires disabled preemption */ + +struct _vx_hist_entry *vxh_advance(void *loc) +{ + unsigned int cpu = smp_processor_id(); + struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu); + struct _vx_hist_entry *entry; + unsigned int index; + + index = vxh_active ? (hist->counter++ % VXH_SIZE) : VXH_SIZE; + entry = &hist->entry[index]; + + entry->seq = atomic_inc_return(&sequence); + entry->loc = loc; + return entry; +} + +EXPORT_SYMBOL_GPL(vxh_advance); + + +#define VXH_LOC_FMTS "(#%04x,*%d):%p" + +#define VXH_LOC_ARGS(e) (e)->seq, cpu, (e)->loc + + +#define VXH_VXI_FMTS "%p[#%d,%d.%d]" + +#define VXH_VXI_ARGS(e) (e)->vxi.ptr, \ + (e)->vxi.ptr ? (e)->vxi.xid : 0, \ + (e)->vxi.ptr ? (e)->vxi.usecnt : 0, \ + (e)->vxi.ptr ? (e)->vxi.tasks : 0 + +void vxh_dump_entry(struct _vx_hist_entry *e, unsigned cpu) +{ + switch (e->type) { + case VXH_THROW_OOPS: + printk( VXH_LOC_FMTS " oops \n", VXH_LOC_ARGS(e)); + break; + + case VXH_GET_VX_INFO: + case VXH_PUT_VX_INFO: + printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n", + VXH_LOC_ARGS(e), + (e->type == VXH_GET_VX_INFO) ? "get" : "put", + VXH_VXI_ARGS(e)); + break; + + case VXH_INIT_VX_INFO: + case VXH_SET_VX_INFO: + case VXH_CLR_VX_INFO: + printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n", + VXH_LOC_ARGS(e), + (e->type == VXH_INIT_VX_INFO) ? "init" : + ((e->type == VXH_SET_VX_INFO) ? "set" : "clr"), + VXH_VXI_ARGS(e), e->sc.data); + break; + + case VXH_CLAIM_VX_INFO: + case VXH_RELEASE_VX_INFO: + printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n", + VXH_LOC_ARGS(e), + (e->type == VXH_CLAIM_VX_INFO) ? "claim" : "release", + VXH_VXI_ARGS(e), e->sc.data); + break; + + case VXH_ALLOC_VX_INFO: + case VXH_DEALLOC_VX_INFO: + printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n", + VXH_LOC_ARGS(e), + (e->type == VXH_ALLOC_VX_INFO) ? "alloc" : "dealloc", + VXH_VXI_ARGS(e)); + break; + + case VXH_HASH_VX_INFO: + case VXH_UNHASH_VX_INFO: + printk( VXH_LOC_FMTS " __%s_vx_info " VXH_VXI_FMTS "\n", + VXH_LOC_ARGS(e), + (e->type == VXH_HASH_VX_INFO) ? "hash" : "unhash", + VXH_VXI_ARGS(e)); + break; + + case VXH_LOC_VX_INFO: + case VXH_LOOKUP_VX_INFO: + case VXH_CREATE_VX_INFO: + printk( VXH_LOC_FMTS " __%s_vx_info [#%d] -> " VXH_VXI_FMTS "\n", + VXH_LOC_ARGS(e), + (e->type == VXH_CREATE_VX_INFO) ? "create" : + ((e->type == VXH_LOC_VX_INFO) ? "loc" : "lookup"), + e->ll.arg, VXH_VXI_ARGS(e)); + break; + } +} + +static void __vxh_dump_history(void) +{ + unsigned int i, cpu; + + printk("History:\tSEQ: %8x\tNR_CPUS: %d\n", + atomic_read(&sequence), NR_CPUS); + + for (i = 0; i < VXH_SIZE; i++) { + for_each_online_cpu(cpu) { + struct _vx_history *hist = + &per_cpu(vx_history_buffer, cpu); + unsigned int index = (hist->counter - i) % VXH_SIZE; + struct _vx_hist_entry *entry = &hist->entry[index]; + + vxh_dump_entry(entry, cpu); + } + } +} + +void vxh_dump_history(void) +{ + vxh_active = 0; +#ifdef CONFIG_SMP + local_irq_enable(); + smp_send_stop(); + local_irq_disable(); +#endif + __vxh_dump_history(); +} + + +/* vserver syscall commands below here */ + + +int vc_dump_history(uint32_t id) +{ + vxh_active = 0; + __vxh_dump_history(); + vxh_active = 1; + + return 0; +} + + +int do_read_history(struct __user _vx_hist_entry *data, + int cpu, uint32_t *index, uint32_t *count) +{ + int pos, ret = 0; + struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu); + int end = hist->counter; + int start = end - VXH_SIZE + 2; + int idx = *index; + + /* special case: get current pos */ + if (!*count) { + *index = end; + return 0; + } + + /* have we lost some data? */ + if (idx < start) + idx = start; + + for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) { + struct _vx_hist_entry *entry = + &hist->entry[idx % VXH_SIZE]; + + /* send entry to userspace */ + ret = copy_to_user(&data[pos], entry, sizeof(*entry)); + if (ret) + break; + } + /* save new index and count */ + *index = idx; + *count = pos; + return ret ? ret : (*index < end); +} + +int vc_read_history(uint32_t id, void __user *data) +{ + struct vcmd_read_history_v0 vc_data; + int ret; + + if (id >= NR_CPUS) + return -EINVAL; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_read_history((struct __user _vx_hist_entry *)vc_data.data, + id, &vc_data.index, &vc_data.count); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return ret; +} + +#ifdef CONFIG_COMPAT + +int vc_read_history_x32(uint32_t id, void __user *data) +{ + struct vcmd_read_history_v0_x32 vc_data; + int ret; + + if (id >= NR_CPUS) + return -EINVAL; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_read_history((struct __user _vx_hist_entry *) + compat_ptr(vc_data.data_ptr), + id, &vc_data.index, &vc_data.count); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return ret; +} + +#endif /* CONFIG_COMPAT */ + diff -NurpP --minimal linux-3.10.19/kernel/vserver/inet.c linux-3.10.19-vs2.3.6.8/kernel/vserver/inet.c --- linux-3.10.19/kernel/vserver/inet.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/inet.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,236 @@ + +#include +#include +#include +#include +#include +#include +#include +#include + + +int nx_v4_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2) +{ + int ret = 0; + + if (!nxi1 || !nxi2 || nxi1 == nxi2) + ret = 1; + else { + struct nx_addr_v4 *ptr; + unsigned long irqflags; + + spin_lock_irqsave(&nxi1->addr_lock, irqflags); + for (ptr = &nxi1->v4; ptr; ptr = ptr->next) { + if (v4_nx_addr_in_nx_info(nxi2, ptr, -1)) { + ret = 1; + break; + } + } + spin_unlock_irqrestore(&nxi1->addr_lock, irqflags); + } + + vxdprintk(VXD_CBIT(net, 2), + "nx_v4_addr_conflict(%p,%p): %d", + nxi1, nxi2, ret); + + return ret; +} + + +#ifdef CONFIG_IPV6 + +int nx_v6_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2) +{ + int ret = 0; + + if (!nxi1 || !nxi2 || nxi1 == nxi2) + ret = 1; + else { + struct nx_addr_v6 *ptr; + unsigned long irqflags; + + spin_lock_irqsave(&nxi1->addr_lock, irqflags); + for (ptr = &nxi1->v6; ptr; ptr = ptr->next) { + if (v6_nx_addr_in_nx_info(nxi2, ptr, -1)) { + ret = 1; + break; + } + } + spin_unlock_irqrestore(&nxi1->addr_lock, irqflags); + } + + vxdprintk(VXD_CBIT(net, 2), + "nx_v6_addr_conflict(%p,%p): %d", + nxi1, nxi2, ret); + + return ret; +} + +#endif + +int v4_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi) +{ + struct in_device *in_dev; + struct in_ifaddr **ifap; + struct in_ifaddr *ifa; + int ret = 0; + + if (!dev) + goto out; + in_dev = in_dev_get(dev); + if (!in_dev) + goto out; + + for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; + ifap = &ifa->ifa_next) { + if (v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW)) { + ret = 1; + break; + } + } + in_dev_put(in_dev); +out: + return ret; +} + + +#ifdef CONFIG_IPV6 + +int v6_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi) +{ + struct inet6_dev *in_dev; + struct inet6_ifaddr *ifa; + int ret = 0; + + if (!dev) + goto out; + in_dev = in6_dev_get(dev); + if (!in_dev) + goto out; + + // for (ifap = &in_dev->addr_list; (ifa = *ifap) != NULL; + list_for_each_entry(ifa, &in_dev->addr_list, if_list) { + if (v6_addr_in_nx_info(nxi, &ifa->addr, -1)) { + ret = 1; + break; + } + } + in6_dev_put(in_dev); +out: + return ret; +} + +#endif + +int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi) +{ + int ret = 1; + + if (!nxi) + goto out; + if (nxi->v4.type && v4_dev_in_nx_info(dev, nxi)) + goto out; +#ifdef CONFIG_IPV6 + ret = 2; + if (nxi->v6.type && v6_dev_in_nx_info(dev, nxi)) + goto out; +#endif + ret = 0; +out: + vxdprintk(VXD_CBIT(net, 3), + "dev_in_nx_info(%p,%p[#%d]) = %d", + dev, nxi, nxi ? nxi->nx_id : 0, ret); + return ret; +} + +struct rtable *ip_v4_find_src(struct net *net, struct nx_info *nxi, + struct flowi4 *fl4) +{ + struct rtable *rt; + + if (!nxi) + return NULL; + + /* FIXME: handle lback only case */ + if (!NX_IPV4(nxi)) + return ERR_PTR(-EPERM); + + vxdprintk(VXD_CBIT(net, 4), + "ip_v4_find_src(%p[#%u]) " NIPQUAD_FMT " -> " NIPQUAD_FMT, + nxi, nxi ? nxi->nx_id : 0, + NIPQUAD(fl4->saddr), NIPQUAD(fl4->daddr)); + + /* single IP is unconditional */ + if (nx_info_flags(nxi, NXF_SINGLE_IP, 0) && + (fl4->saddr == INADDR_ANY)) + fl4->saddr = nxi->v4.ip[0].s_addr; + + if (fl4->saddr == INADDR_ANY) { + struct nx_addr_v4 *ptr; + __be32 found = 0; + + rt = __ip_route_output_key(net, fl4); + if (!IS_ERR(rt)) { + found = fl4->saddr; + ip_rt_put(rt); + vxdprintk(VXD_CBIT(net, 4), + "ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT, + nxi, nxi ? nxi->nx_id : 0, fl4->flowi4_oif, NIPQUAD(found)); + if (v4_addr_in_nx_info(nxi, found, NXA_MASK_BIND)) + goto found; + } + + WARN_ON_ONCE(in_irq()); + spin_lock_bh(&nxi->addr_lock); + for (ptr = &nxi->v4; ptr; ptr = ptr->next) { + __be32 primary = ptr->ip[0].s_addr; + __be32 mask = ptr->mask.s_addr; + __be32 neta = primary & mask; + + vxdprintk(VXD_CBIT(net, 4), "ip_v4_find_src(%p[#%u]) chk: " + NIPQUAD_FMT "/" NIPQUAD_FMT "/" NIPQUAD_FMT, + nxi, nxi ? nxi->nx_id : 0, NIPQUAD(primary), + NIPQUAD(mask), NIPQUAD(neta)); + if ((found & mask) != neta) + continue; + + fl4->saddr = primary; + rt = __ip_route_output_key(net, fl4); + vxdprintk(VXD_CBIT(net, 4), + "ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT, + nxi, nxi ? nxi->nx_id : 0, fl4->flowi4_oif, NIPQUAD(primary)); + if (!IS_ERR(rt)) { + found = fl4->saddr; + ip_rt_put(rt); + if (found == primary) + goto found_unlock; + } + } + /* still no source ip? */ + found = ipv4_is_loopback(fl4->daddr) + ? IPI_LOOPBACK : nxi->v4.ip[0].s_addr; + found_unlock: + spin_unlock_bh(&nxi->addr_lock); + found: + /* assign src ip to flow */ + fl4->saddr = found; + + } else { + if (!v4_addr_in_nx_info(nxi, fl4->saddr, NXA_MASK_BIND)) + return ERR_PTR(-EPERM); + } + + if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) { + if (ipv4_is_loopback(fl4->daddr)) + fl4->daddr = nxi->v4_lback.s_addr; + if (ipv4_is_loopback(fl4->saddr)) + fl4->saddr = nxi->v4_lback.s_addr; + } else if (ipv4_is_loopback(fl4->daddr) && + !nx_info_flags(nxi, NXF_LBACK_ALLOW, 0)) + return ERR_PTR(-EPERM); + + return NULL; +} + +EXPORT_SYMBOL_GPL(ip_v4_find_src); + diff -NurpP --minimal linux-3.10.19/kernel/vserver/init.c linux-3.10.19-vs2.3.6.8/kernel/vserver/init.c --- linux-3.10.19/kernel/vserver/init.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/init.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,45 @@ +/* + * linux/kernel/init.c + * + * Virtual Server Init + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 basic structure + * + */ + +#include + +int vserver_register_sysctl(void); +void vserver_unregister_sysctl(void); + + +static int __init init_vserver(void) +{ + int ret = 0; + +#ifdef CONFIG_VSERVER_DEBUG + vserver_register_sysctl(); +#endif + return ret; +} + + +static void __exit exit_vserver(void) +{ + +#ifdef CONFIG_VSERVER_DEBUG + vserver_unregister_sysctl(); +#endif + return; +} + +/* FIXME: GFP_ZONETYPES gone +long vx_slab[GFP_ZONETYPES]; */ +long vx_area; + + +module_init(init_vserver); +module_exit(exit_vserver); + diff -NurpP --minimal linux-3.10.19/kernel/vserver/inode.c linux-3.10.19-vs2.3.6.8/kernel/vserver/inode.c --- linux-3.10.19/kernel/vserver/inode.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/inode.c 2013-08-22 23:14:01.000000000 +0000 @@ -0,0 +1,440 @@ +/* + * linux/kernel/vserver/inode.c + * + * Virtual Server: File System Support + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 separated from vcontext V0.05 + * V0.02 moved to tag (instead of xid) + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include <../../fs/proc/internal.h> + + +static int __vc_get_iattr(struct inode *in, uint32_t *tag, uint32_t *flags, uint32_t *mask) +{ + struct proc_dir_entry *entry; + + if (!in || !in->i_sb) + return -ESRCH; + + *flags = IATTR_TAG + | (IS_IMMUTABLE(in) ? IATTR_IMMUTABLE : 0) + | (IS_IXUNLINK(in) ? IATTR_IXUNLINK : 0) + | (IS_BARRIER(in) ? IATTR_BARRIER : 0) + | (IS_COW(in) ? IATTR_COW : 0); + *mask = IATTR_IXUNLINK | IATTR_IMMUTABLE | IATTR_COW; + + if (S_ISDIR(in->i_mode)) + *mask |= IATTR_BARRIER; + + if (IS_TAGGED(in)) { + *tag = i_tag_read(in); + *mask |= IATTR_TAG; + } + + switch (in->i_sb->s_magic) { + case PROC_SUPER_MAGIC: + entry = PROC_I(in)->pde; + + /* check for specific inodes? */ + if (entry) + *mask |= IATTR_FLAGS; + if (entry) + *flags |= (entry->vx_flags & IATTR_FLAGS); + else + *flags |= (PROC_I(in)->vx_flags & IATTR_FLAGS); + break; + + case DEVPTS_SUPER_MAGIC: + *tag = i_tag_read(in); + *mask |= IATTR_TAG; + break; + + default: + break; + } + return 0; +} + +int vc_get_iattr(void __user *data) +{ + struct path path; + struct vcmd_ctx_iattr_v1 vc_data = { .tag = -1 }; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = user_lpath(vc_data.name, &path); + if (!ret) { + ret = __vc_get_iattr(path.dentry->d_inode, + &vc_data.tag, &vc_data.flags, &vc_data.mask); + path_put(&path); + } + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + +#ifdef CONFIG_COMPAT + +int vc_get_iattr_x32(void __user *data) +{ + struct path path; + struct vcmd_ctx_iattr_v1_x32 vc_data = { .tag = -1 }; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = user_lpath(compat_ptr(vc_data.name_ptr), &path); + if (!ret) { + ret = __vc_get_iattr(path.dentry->d_inode, + &vc_data.tag, &vc_data.flags, &vc_data.mask); + path_put(&path); + } + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + +#endif /* CONFIG_COMPAT */ + + +int vc_fget_iattr(uint32_t fd, void __user *data) +{ + struct file *filp; + struct vcmd_ctx_fiattr_v0 vc_data = { .tag = -1 }; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + filp = fget(fd); + if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode) + return -EBADF; + + ret = __vc_get_iattr(filp->f_dentry->d_inode, + &vc_data.tag, &vc_data.flags, &vc_data.mask); + + fput(filp); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + + +static int __vc_set_iattr(struct dentry *de, uint32_t *tag, uint32_t *flags, uint32_t *mask) +{ + struct inode *in = de->d_inode; + int error = 0, is_proc = 0, has_tag = 0; + struct iattr attr = { 0 }; + + if (!in || !in->i_sb) + return -ESRCH; + + is_proc = (in->i_sb->s_magic == PROC_SUPER_MAGIC); + if ((*mask & IATTR_FLAGS) && !is_proc) + return -EINVAL; + + has_tag = IS_TAGGED(in) || + (in->i_sb->s_magic == DEVPTS_SUPER_MAGIC); + if ((*mask & IATTR_TAG) && !has_tag) + return -EINVAL; + + mutex_lock(&in->i_mutex); + if (*mask & IATTR_TAG) { + attr.ia_tag = make_ktag(&init_user_ns, *tag); + attr.ia_valid |= ATTR_TAG; + } + + if (*mask & IATTR_FLAGS) { + struct proc_dir_entry *entry = PROC_I(in)->pde; + unsigned int iflags = PROC_I(in)->vx_flags; + + iflags = (iflags & ~(*mask & IATTR_FLAGS)) + | (*flags & IATTR_FLAGS); + PROC_I(in)->vx_flags = iflags; + if (entry) + entry->vx_flags = iflags; + } + + if (*mask & (IATTR_IMMUTABLE | IATTR_IXUNLINK | + IATTR_BARRIER | IATTR_COW)) { + int iflags = in->i_flags; + int vflags = in->i_vflags; + + if (*mask & IATTR_IMMUTABLE) { + if (*flags & IATTR_IMMUTABLE) + iflags |= S_IMMUTABLE; + else + iflags &= ~S_IMMUTABLE; + } + if (*mask & IATTR_IXUNLINK) { + if (*flags & IATTR_IXUNLINK) + iflags |= S_IXUNLINK; + else + iflags &= ~S_IXUNLINK; + } + if (S_ISDIR(in->i_mode) && (*mask & IATTR_BARRIER)) { + if (*flags & IATTR_BARRIER) + vflags |= V_BARRIER; + else + vflags &= ~V_BARRIER; + } + if (S_ISREG(in->i_mode) && (*mask & IATTR_COW)) { + if (*flags & IATTR_COW) + vflags |= V_COW; + else + vflags &= ~V_COW; + } + if (in->i_op && in->i_op->sync_flags) { + error = in->i_op->sync_flags(in, iflags, vflags); + if (error) + goto out; + } + } + + if (attr.ia_valid) { + if (in->i_op && in->i_op->setattr) + error = in->i_op->setattr(de, &attr); + else { + error = inode_change_ok(in, &attr); + if (!error) { + setattr_copy(in, &attr); + mark_inode_dirty(in); + } + } + } + +out: + mutex_unlock(&in->i_mutex); + return error; +} + +int vc_set_iattr(void __user *data) +{ + struct path path; + struct vcmd_ctx_iattr_v1 vc_data; + int ret; + + if (!capable(CAP_LINUX_IMMUTABLE)) + return -EPERM; + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = user_lpath(vc_data.name, &path); + if (!ret) { + ret = __vc_set_iattr(path.dentry, + &vc_data.tag, &vc_data.flags, &vc_data.mask); + path_put(&path); + } + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + +#ifdef CONFIG_COMPAT + +int vc_set_iattr_x32(void __user *data) +{ + struct path path; + struct vcmd_ctx_iattr_v1_x32 vc_data; + int ret; + + if (!capable(CAP_LINUX_IMMUTABLE)) + return -EPERM; + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = user_lpath(compat_ptr(vc_data.name_ptr), &path); + if (!ret) { + ret = __vc_set_iattr(path.dentry, + &vc_data.tag, &vc_data.flags, &vc_data.mask); + path_put(&path); + } + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + +#endif /* CONFIG_COMPAT */ + +int vc_fset_iattr(uint32_t fd, void __user *data) +{ + struct file *filp; + struct vcmd_ctx_fiattr_v0 vc_data; + int ret; + + if (!capable(CAP_LINUX_IMMUTABLE)) + return -EPERM; + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + filp = fget(fd); + if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode) + return -EBADF; + + ret = __vc_set_iattr(filp->f_dentry, &vc_data.tag, + &vc_data.flags, &vc_data.mask); + + fput(filp); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return ret; +} + + +enum { Opt_notagcheck, Opt_tag, Opt_notag, Opt_tagid, Opt_err }; + +static match_table_t tokens = { + {Opt_notagcheck, "notagcheck"}, +#ifdef CONFIG_PROPAGATE + {Opt_notag, "notag"}, + {Opt_tag, "tag"}, + {Opt_tagid, "tagid=%u"}, +#endif + {Opt_err, NULL} +}; + + +static void __dx_parse_remove(char *string, char *opt) +{ + char *p = strstr(string, opt); + char *q = p; + + if (p) { + while (*q != '\0' && *q != ',') + q++; + while (*q) + *p++ = *q++; + while (*p) + *p++ = '\0'; + } +} + +int dx_parse_tag(char *string, vtag_t *tag, int remove, int *mnt_flags, + unsigned long *flags) +{ + int set = 0; + substring_t args[MAX_OPT_ARGS]; + int token; + char *s, *p, *opts; +#if defined(CONFIG_PROPAGATE) || defined(CONFIG_VSERVER_DEBUG) + int option = 0; +#endif + + if (!string) + return 0; + s = kstrdup(string, GFP_KERNEL | GFP_ATOMIC); + if (!s) + return 0; + + opts = s; + while ((p = strsep(&opts, ",")) != NULL) { + token = match_token(p, tokens, args); + + switch (token) { +#ifdef CONFIG_PROPAGATE + case Opt_tag: + if (tag) + *tag = 0; + if (remove) + __dx_parse_remove(s, "tag"); + *mnt_flags |= MNT_TAGID; + set |= MNT_TAGID; + break; + case Opt_notag: + if (remove) + __dx_parse_remove(s, "notag"); + *mnt_flags |= MNT_NOTAG; + set |= MNT_NOTAG; + break; + case Opt_tagid: + if (tag && !match_int(args, &option)) + *tag = option; + if (remove) + __dx_parse_remove(s, "tagid"); + *mnt_flags |= MNT_TAGID; + set |= MNT_TAGID; + break; +#endif /* CONFIG_PROPAGATE */ + case Opt_notagcheck: + if (remove) + __dx_parse_remove(s, "notagcheck"); + *flags |= MS_NOTAGCHECK; + set |= MS_NOTAGCHECK; + break; + } + vxdprintk(VXD_CBIT(tag, 7), + "dx_parse_tag(" VS_Q("%s") "): %d:#%d", + p, token, option); + } + if (set) + strcpy(string, s); + kfree(s); + return set; +} + +#ifdef CONFIG_PROPAGATE + +void __dx_propagate_tag(struct nameidata *nd, struct inode *inode) +{ + vtag_t new_tag = 0; + struct vfsmount *mnt; + int propagate; + + if (!nd) + return; + mnt = nd->path.mnt; + if (!mnt) + return; + + propagate = (mnt->mnt_flags & MNT_TAGID); + if (propagate) + new_tag = mnt->mnt_tag; + + vxdprintk(VXD_CBIT(tag, 7), + "dx_propagate_tag(%p[#%lu.%d]): %d,%d", + inode, inode->i_ino, inode->i_tag, + new_tag, (propagate) ? 1 : 0); + + if (propagate) + i_tag_write(inode, new_tag); +} + +#include + +EXPORT_SYMBOL_GPL(__dx_propagate_tag); + +#endif /* CONFIG_PROPAGATE */ + diff -NurpP --minimal linux-3.10.19/kernel/vserver/limit.c linux-3.10.19-vs2.3.6.8/kernel/vserver/limit.c --- linux-3.10.19/kernel/vserver/limit.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/limit.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,345 @@ +/* + * linux/kernel/vserver/limit.c + * + * Virtual Server: Context Limits + * + * Copyright (C) 2004-2010 Herbert Pötzl + * + * V0.01 broken out from vcontext V0.05 + * V0.02 changed vcmds to vxi arg + * V0.03 added memory cgroup support + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + + +const char *vlimit_name[NUM_LIMITS] = { + [RLIMIT_CPU] = "CPU", + [RLIMIT_NPROC] = "NPROC", + [RLIMIT_NOFILE] = "NOFILE", + [RLIMIT_LOCKS] = "LOCKS", + [RLIMIT_SIGPENDING] = "SIGP", + [RLIMIT_MSGQUEUE] = "MSGQ", + + [VLIMIT_NSOCK] = "NSOCK", + [VLIMIT_OPENFD] = "OPENFD", + [VLIMIT_SHMEM] = "SHMEM", + [VLIMIT_DENTRY] = "DENTRY", +}; + +EXPORT_SYMBOL_GPL(vlimit_name); + +#define MASK_ENTRY(x) (1 << (x)) + +const struct vcmd_ctx_rlimit_mask_v0 vlimit_mask = { + /* minimum */ + 0 + , /* softlimit */ + 0 + , /* maximum */ + MASK_ENTRY( RLIMIT_NPROC ) | + MASK_ENTRY( RLIMIT_NOFILE ) | + MASK_ENTRY( RLIMIT_LOCKS ) | + MASK_ENTRY( RLIMIT_MSGQUEUE ) | + + MASK_ENTRY( VLIMIT_NSOCK ) | + MASK_ENTRY( VLIMIT_OPENFD ) | + MASK_ENTRY( VLIMIT_SHMEM ) | + MASK_ENTRY( VLIMIT_DENTRY ) | + 0 +}; + /* accounting only */ +uint32_t account_mask = + MASK_ENTRY( VLIMIT_SEMARY ) | + MASK_ENTRY( VLIMIT_NSEMS ) | + MASK_ENTRY( VLIMIT_MAPPED ) | + 0; + + +static int is_valid_vlimit(int id) +{ + uint32_t mask = vlimit_mask.minimum | + vlimit_mask.softlimit | vlimit_mask.maximum; + return mask & (1 << id); +} + +static int is_accounted_vlimit(int id) +{ + if (is_valid_vlimit(id)) + return 1; + return account_mask & (1 << id); +} + + +static inline uint64_t vc_get_soft(struct vx_info *vxi, int id) +{ + rlim_t limit = __rlim_soft(&vxi->limit, id); + return VX_VLIM(limit); +} + +static inline uint64_t vc_get_hard(struct vx_info *vxi, int id) +{ + rlim_t limit = __rlim_hard(&vxi->limit, id); + return VX_VLIM(limit); +} + +static int do_get_rlimit(struct vx_info *vxi, uint32_t id, + uint64_t *minimum, uint64_t *softlimit, uint64_t *maximum) +{ + if (!is_valid_vlimit(id)) + return -EINVAL; + + if (minimum) + *minimum = CRLIM_UNSET; + if (softlimit) + *softlimit = vc_get_soft(vxi, id); + if (maximum) + *maximum = vc_get_hard(vxi, id); + return 0; +} + +int vc_get_rlimit(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_rlimit_v0 vc_data; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_get_rlimit(vxi, vc_data.id, + &vc_data.minimum, &vc_data.softlimit, &vc_data.maximum); + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +static int do_set_rlimit(struct vx_info *vxi, uint32_t id, + uint64_t minimum, uint64_t softlimit, uint64_t maximum) +{ + if (!is_valid_vlimit(id)) + return -EINVAL; + + if (maximum != CRLIM_KEEP) + __rlim_hard(&vxi->limit, id) = VX_RLIM(maximum); + if (softlimit != CRLIM_KEEP) + __rlim_soft(&vxi->limit, id) = VX_RLIM(softlimit); + + /* clamp soft limit */ + if (__rlim_soft(&vxi->limit, id) > __rlim_hard(&vxi->limit, id)) + __rlim_soft(&vxi->limit, id) = __rlim_hard(&vxi->limit, id); + + return 0; +} + +int vc_set_rlimit(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_rlimit_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_rlimit(vxi, vc_data.id, + vc_data.minimum, vc_data.softlimit, vc_data.maximum); +} + +#ifdef CONFIG_IA32_EMULATION + +int vc_set_rlimit_x32(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_rlimit_v0_x32 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_rlimit(vxi, vc_data.id, + vc_data.minimum, vc_data.softlimit, vc_data.maximum); +} + +int vc_get_rlimit_x32(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_rlimit_v0_x32 vc_data; + int ret; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + ret = do_get_rlimit(vxi, vc_data.id, + &vc_data.minimum, &vc_data.softlimit, &vc_data.maximum); + if (ret) + return ret; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +#endif /* CONFIG_IA32_EMULATION */ + + +int vc_get_rlimit_mask(uint32_t id, void __user *data) +{ + if (copy_to_user(data, &vlimit_mask, sizeof(vlimit_mask))) + return -EFAULT; + return 0; +} + + +static inline void vx_reset_hits(struct _vx_limit *limit) +{ + int lim; + + for (lim = 0; lim < NUM_LIMITS; lim++) { + atomic_set(&__rlim_lhit(limit, lim), 0); + } +} + +int vc_reset_hits(struct vx_info *vxi, void __user *data) +{ + vx_reset_hits(&vxi->limit); + return 0; +} + +static inline void vx_reset_minmax(struct _vx_limit *limit) +{ + rlim_t value; + int lim; + + for (lim = 0; lim < NUM_LIMITS; lim++) { + value = __rlim_get(limit, lim); + __rlim_rmax(limit, lim) = value; + __rlim_rmin(limit, lim) = value; + } +} + +int vc_reset_minmax(struct vx_info *vxi, void __user *data) +{ + vx_reset_minmax(&vxi->limit); + return 0; +} + + +int vc_rlimit_stat(struct vx_info *vxi, void __user *data) +{ + struct vcmd_rlimit_stat_v0 vc_data; + struct _vx_limit *limit = &vxi->limit; + int id; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + id = vc_data.id; + if (!is_accounted_vlimit(id)) + return -EINVAL; + + vx_limit_fixup(limit, id); + vc_data.hits = atomic_read(&__rlim_lhit(limit, id)); + vc_data.value = __rlim_get(limit, id); + vc_data.minimum = __rlim_rmin(limit, id); + vc_data.maximum = __rlim_rmax(limit, id); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +void vx_vsi_meminfo(struct sysinfo *val) +{ +#ifdef CONFIG_MEMCG + struct mem_cgroup *mcg; + u64 res_limit, res_usage; + + rcu_read_lock(); + mcg = mem_cgroup_from_task(current); + rcu_read_unlock(); + if (!mcg) + goto out; + + res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT); + res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE); + + if (res_limit != RESOURCE_MAX) + val->totalram = (res_limit >> PAGE_SHIFT); + val->freeram = val->totalram - (res_usage >> PAGE_SHIFT); + val->bufferram = 0; + val->totalhigh = 0; + val->freehigh = 0; +out: +#endif /* CONFIG_MEMCG */ + return; +} + +void vx_vsi_swapinfo(struct sysinfo *val) +{ +#ifdef CONFIG_MEMCG +#ifdef CONFIG_MEMCG_SWAP + struct mem_cgroup *mcg; + u64 res_limit, res_usage, memsw_limit, memsw_usage; + s64 swap_limit, swap_usage; + + rcu_read_lock(); + mcg = mem_cgroup_from_task(current); + rcu_read_unlock(); + if (!mcg) + goto out; + + res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT); + res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE); + memsw_limit = mem_cgroup_memsw_read_u64(mcg, RES_LIMIT); + memsw_usage = mem_cgroup_memsw_read_u64(mcg, RES_USAGE); + + /* memory unlimited */ + if (res_limit == RESOURCE_MAX) + goto out; + + swap_limit = memsw_limit - res_limit; + /* we have a swap limit? */ + if (memsw_limit != RESOURCE_MAX) + val->totalswap = swap_limit >> PAGE_SHIFT; + + /* calculate swap part */ + swap_usage = (memsw_usage > res_usage) ? + memsw_usage - res_usage : 0; + + /* total shown minus usage gives free swap */ + val->freeswap = (swap_usage < swap_limit) ? + val->totalswap - (swap_usage >> PAGE_SHIFT) : 0; +out: +#else /* !CONFIG_MEMCG_SWAP */ + val->totalswap = 0; + val->freeswap = 0; +#endif /* !CONFIG_MEMCG_SWAP */ +#endif /* CONFIG_MEMCG */ + return; +} + +long vx_vsi_cached(struct sysinfo *val) +{ + long cache = 0; +#ifdef CONFIG_MEMCG + struct mem_cgroup *mcg; + + rcu_read_lock(); + mcg = mem_cgroup_from_task(current); + rcu_read_unlock(); + if (!mcg) + goto out; + + cache = mem_cgroup_stat_read_cache(mcg); +out: +#endif + return cache; +} + diff -NurpP --minimal linux-3.10.19/kernel/vserver/limit_init.h linux-3.10.19-vs2.3.6.8/kernel/vserver/limit_init.h --- linux-3.10.19/kernel/vserver/limit_init.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/limit_init.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,31 @@ + + +static inline void vx_info_init_limit(struct _vx_limit *limit) +{ + int lim; + + for (lim = 0; lim < NUM_LIMITS; lim++) { + __rlim_soft(limit, lim) = RLIM_INFINITY; + __rlim_hard(limit, lim) = RLIM_INFINITY; + __rlim_set(limit, lim, 0); + atomic_set(&__rlim_lhit(limit, lim), 0); + __rlim_rmin(limit, lim) = 0; + __rlim_rmax(limit, lim) = 0; + } +} + +static inline void vx_info_exit_limit(struct _vx_limit *limit) +{ + rlim_t value; + int lim; + + for (lim = 0; lim < NUM_LIMITS; lim++) { + if ((1 << lim) & VLIM_NOCHECK) + continue; + value = __rlim_get(limit, lim); + vxwprintk_xid(value, + "!!! limit: %p[%s,%d] = %ld on exit.", + limit, vlimit_name[lim], lim, (long)value); + } +} + diff -NurpP --minimal linux-3.10.19/kernel/vserver/limit_proc.h linux-3.10.19-vs2.3.6.8/kernel/vserver/limit_proc.h --- linux-3.10.19/kernel/vserver/limit_proc.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/limit_proc.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,57 @@ +#ifndef _VX_LIMIT_PROC_H +#define _VX_LIMIT_PROC_H + +#include + + +#define VX_LIMIT_FMT ":\t%8ld\t%8ld/%8ld\t%8lld/%8lld\t%6d\n" +#define VX_LIMIT_TOP \ + "Limit\t current\t min/max\t\t soft/hard\t\thits\n" + +#define VX_LIMIT_ARG(r) \ + (unsigned long)__rlim_get(limit, r), \ + (unsigned long)__rlim_rmin(limit, r), \ + (unsigned long)__rlim_rmax(limit, r), \ + VX_VLIM(__rlim_soft(limit, r)), \ + VX_VLIM(__rlim_hard(limit, r)), \ + atomic_read(&__rlim_lhit(limit, r)) + +static inline int vx_info_proc_limit(struct _vx_limit *limit, char *buffer) +{ + vx_limit_fixup(limit, -1); + return sprintf(buffer, VX_LIMIT_TOP + "PROC" VX_LIMIT_FMT + "VM" VX_LIMIT_FMT + "VML" VX_LIMIT_FMT + "RSS" VX_LIMIT_FMT + "ANON" VX_LIMIT_FMT + "RMAP" VX_LIMIT_FMT + "FILES" VX_LIMIT_FMT + "OFD" VX_LIMIT_FMT + "LOCKS" VX_LIMIT_FMT + "SOCK" VX_LIMIT_FMT + "MSGQ" VX_LIMIT_FMT + "SHM" VX_LIMIT_FMT + "SEMA" VX_LIMIT_FMT + "SEMS" VX_LIMIT_FMT + "DENT" VX_LIMIT_FMT, + VX_LIMIT_ARG(RLIMIT_NPROC), + VX_LIMIT_ARG(RLIMIT_AS), + VX_LIMIT_ARG(RLIMIT_MEMLOCK), + VX_LIMIT_ARG(RLIMIT_RSS), + VX_LIMIT_ARG(VLIMIT_ANON), + VX_LIMIT_ARG(VLIMIT_MAPPED), + VX_LIMIT_ARG(RLIMIT_NOFILE), + VX_LIMIT_ARG(VLIMIT_OPENFD), + VX_LIMIT_ARG(RLIMIT_LOCKS), + VX_LIMIT_ARG(VLIMIT_NSOCK), + VX_LIMIT_ARG(RLIMIT_MSGQUEUE), + VX_LIMIT_ARG(VLIMIT_SHMEM), + VX_LIMIT_ARG(VLIMIT_SEMARY), + VX_LIMIT_ARG(VLIMIT_NSEMS), + VX_LIMIT_ARG(VLIMIT_DENTRY)); +} + +#endif /* _VX_LIMIT_PROC_H */ + + diff -NurpP --minimal linux-3.10.19/kernel/vserver/network.c linux-3.10.19-vs2.3.6.8/kernel/vserver/network.c --- linux-3.10.19/kernel/vserver/network.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/network.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,1053 @@ +/* + * linux/kernel/vserver/network.c + * + * Virtual Server: Network Support + * + * Copyright (C) 2003-2007 Herbert Pötzl + * + * V0.01 broken out from vcontext V0.05 + * V0.02 cleaned up implementation + * V0.03 added equiv nx commands + * V0.04 switch to RCU based hash + * V0.05 and back to locking again + * V0.06 changed vcmds to nxi arg + * V0.07 have __create claim() the nxi + * + */ + +#include +#include +#include +#include + +#include +#include +#include + + +atomic_t nx_global_ctotal = ATOMIC_INIT(0); +atomic_t nx_global_cactive = ATOMIC_INIT(0); + +static struct kmem_cache *nx_addr_v4_cachep = NULL; +static struct kmem_cache *nx_addr_v6_cachep = NULL; + + +static int __init init_network(void) +{ + nx_addr_v4_cachep = kmem_cache_create("nx_v4_addr_cache", + sizeof(struct nx_addr_v4), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + nx_addr_v6_cachep = kmem_cache_create("nx_v6_addr_cache", + sizeof(struct nx_addr_v6), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + return 0; +} + + +/* __alloc_nx_addr_v4() */ + +static inline struct nx_addr_v4 *__alloc_nx_addr_v4(void) +{ + struct nx_addr_v4 *nxa = kmem_cache_alloc( + nx_addr_v4_cachep, GFP_KERNEL); + + if (!IS_ERR(nxa)) + memset(nxa, 0, sizeof(*nxa)); + return nxa; +} + +/* __dealloc_nx_addr_v4() */ + +static inline void __dealloc_nx_addr_v4(struct nx_addr_v4 *nxa) +{ + kmem_cache_free(nx_addr_v4_cachep, nxa); +} + +/* __dealloc_nx_addr_v4_all() */ + +static inline void __dealloc_nx_addr_v4_all(struct nx_addr_v4 *nxa) +{ + while (nxa) { + struct nx_addr_v4 *next = nxa->next; + + __dealloc_nx_addr_v4(nxa); + nxa = next; + } +} + + +#ifdef CONFIG_IPV6 + +/* __alloc_nx_addr_v6() */ + +static inline struct nx_addr_v6 *__alloc_nx_addr_v6(void) +{ + struct nx_addr_v6 *nxa = kmem_cache_alloc( + nx_addr_v6_cachep, GFP_KERNEL); + + if (!IS_ERR(nxa)) + memset(nxa, 0, sizeof(*nxa)); + return nxa; +} + +/* __dealloc_nx_addr_v6() */ + +static inline void __dealloc_nx_addr_v6(struct nx_addr_v6 *nxa) +{ + kmem_cache_free(nx_addr_v6_cachep, nxa); +} + +/* __dealloc_nx_addr_v6_all() */ + +static inline void __dealloc_nx_addr_v6_all(struct nx_addr_v6 *nxa) +{ + while (nxa) { + struct nx_addr_v6 *next = nxa->next; + + __dealloc_nx_addr_v6(nxa); + nxa = next; + } +} + +#endif /* CONFIG_IPV6 */ + +/* __alloc_nx_info() + + * allocate an initialized nx_info struct + * doesn't make it visible (hash) */ + +static struct nx_info *__alloc_nx_info(vnid_t nid) +{ + struct nx_info *new = NULL; + + vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid); + + /* would this benefit from a slab cache? */ + new = kmalloc(sizeof(struct nx_info), GFP_KERNEL); + if (!new) + return 0; + + memset(new, 0, sizeof(struct nx_info)); + new->nx_id = nid; + INIT_HLIST_NODE(&new->nx_hlist); + atomic_set(&new->nx_usecnt, 0); + atomic_set(&new->nx_tasks, 0); + spin_lock_init(&new->addr_lock); + new->nx_state = 0; + + new->nx_flags = NXF_INIT_SET; + + /* rest of init goes here */ + + new->v4_lback.s_addr = htonl(INADDR_LOOPBACK); + new->v4_bcast.s_addr = htonl(INADDR_BROADCAST); + + vxdprintk(VXD_CBIT(nid, 0), + "alloc_nx_info(%d) = %p", nid, new); + atomic_inc(&nx_global_ctotal); + return new; +} + +/* __dealloc_nx_info() + + * final disposal of nx_info */ + +static void __dealloc_nx_info(struct nx_info *nxi) +{ + vxdprintk(VXD_CBIT(nid, 0), + "dealloc_nx_info(%p)", nxi); + + nxi->nx_hlist.next = LIST_POISON1; + nxi->nx_id = -1; + + BUG_ON(atomic_read(&nxi->nx_usecnt)); + BUG_ON(atomic_read(&nxi->nx_tasks)); + + __dealloc_nx_addr_v4_all(nxi->v4.next); +#ifdef CONFIG_IPV6 + __dealloc_nx_addr_v6_all(nxi->v6.next); +#endif + + nxi->nx_state |= NXS_RELEASED; + kfree(nxi); + atomic_dec(&nx_global_ctotal); +} + +static void __shutdown_nx_info(struct nx_info *nxi) +{ + nxi->nx_state |= NXS_SHUTDOWN; + vs_net_change(nxi, VSC_NETDOWN); +} + +/* exported stuff */ + +void free_nx_info(struct nx_info *nxi) +{ + /* context shutdown is mandatory */ + BUG_ON(nxi->nx_state != NXS_SHUTDOWN); + + /* context must not be hashed */ + BUG_ON(nxi->nx_state & NXS_HASHED); + + BUG_ON(atomic_read(&nxi->nx_usecnt)); + BUG_ON(atomic_read(&nxi->nx_tasks)); + + __dealloc_nx_info(nxi); +} + + +void __nx_set_lback(struct nx_info *nxi) +{ + int nid = nxi->nx_id; + __be32 lback = htonl(INADDR_LOOPBACK ^ ((nid & 0xFFFF) << 8)); + + nxi->v4_lback.s_addr = lback; +} + +extern int __nx_inet_add_lback(__be32 addr); +extern int __nx_inet_del_lback(__be32 addr); + + +/* hash table for nx_info hash */ + +#define NX_HASH_SIZE 13 + +struct hlist_head nx_info_hash[NX_HASH_SIZE]; + +static DEFINE_SPINLOCK(nx_info_hash_lock); + + +static inline unsigned int __hashval(vnid_t nid) +{ + return (nid % NX_HASH_SIZE); +} + + + +/* __hash_nx_info() + + * add the nxi to the global hash table + * requires the hash_lock to be held */ + +static inline void __hash_nx_info(struct nx_info *nxi) +{ + struct hlist_head *head; + + vxd_assert_lock(&nx_info_hash_lock); + vxdprintk(VXD_CBIT(nid, 4), + "__hash_nx_info: %p[#%d]", nxi, nxi->nx_id); + + /* context must not be hashed */ + BUG_ON(nx_info_state(nxi, NXS_HASHED)); + + nxi->nx_state |= NXS_HASHED; + head = &nx_info_hash[__hashval(nxi->nx_id)]; + hlist_add_head(&nxi->nx_hlist, head); + atomic_inc(&nx_global_cactive); +} + +/* __unhash_nx_info() + + * remove the nxi from the global hash table + * requires the hash_lock to be held */ + +static inline void __unhash_nx_info(struct nx_info *nxi) +{ + vxd_assert_lock(&nx_info_hash_lock); + vxdprintk(VXD_CBIT(nid, 4), + "__unhash_nx_info: %p[#%d.%d.%d]", nxi, nxi->nx_id, + atomic_read(&nxi->nx_usecnt), atomic_read(&nxi->nx_tasks)); + + /* context must be hashed */ + BUG_ON(!nx_info_state(nxi, NXS_HASHED)); + /* but without tasks */ + BUG_ON(atomic_read(&nxi->nx_tasks)); + + nxi->nx_state &= ~NXS_HASHED; + hlist_del(&nxi->nx_hlist); + atomic_dec(&nx_global_cactive); +} + + +/* __lookup_nx_info() + + * requires the hash_lock to be held + * doesn't increment the nx_refcnt */ + +static inline struct nx_info *__lookup_nx_info(vnid_t nid) +{ + struct hlist_head *head = &nx_info_hash[__hashval(nid)]; + struct hlist_node *pos; + struct nx_info *nxi; + + vxd_assert_lock(&nx_info_hash_lock); + hlist_for_each(pos, head) { + nxi = hlist_entry(pos, struct nx_info, nx_hlist); + + if (nxi->nx_id == nid) + goto found; + } + nxi = NULL; +found: + vxdprintk(VXD_CBIT(nid, 0), + "__lookup_nx_info(#%u): %p[#%u]", + nid, nxi, nxi ? nxi->nx_id : 0); + return nxi; +} + + +/* __create_nx_info() + + * create the requested context + * get(), claim() and hash it */ + +static struct nx_info *__create_nx_info(int id) +{ + struct nx_info *new, *nxi = NULL; + + vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id); + + if (!(new = __alloc_nx_info(id))) + return ERR_PTR(-ENOMEM); + + /* required to make dynamic xids unique */ + spin_lock(&nx_info_hash_lock); + + /* static context requested */ + if ((nxi = __lookup_nx_info(id))) { + vxdprintk(VXD_CBIT(nid, 0), + "create_nx_info(%d) = %p (already there)", id, nxi); + if (nx_info_flags(nxi, NXF_STATE_SETUP, 0)) + nxi = ERR_PTR(-EBUSY); + else + nxi = ERR_PTR(-EEXIST); + goto out_unlock; + } + /* new context */ + vxdprintk(VXD_CBIT(nid, 0), + "create_nx_info(%d) = %p (new)", id, new); + claim_nx_info(new, NULL); + __nx_set_lback(new); + __hash_nx_info(get_nx_info(new)); + nxi = new, new = NULL; + +out_unlock: + spin_unlock(&nx_info_hash_lock); + if (new) + __dealloc_nx_info(new); + return nxi; +} + + + +/* exported stuff */ + + +void unhash_nx_info(struct nx_info *nxi) +{ + __shutdown_nx_info(nxi); + spin_lock(&nx_info_hash_lock); + __unhash_nx_info(nxi); + spin_unlock(&nx_info_hash_lock); +} + +/* lookup_nx_info() + + * search for a nx_info and get() it + * negative id means current */ + +struct nx_info *lookup_nx_info(int id) +{ + struct nx_info *nxi = NULL; + + if (id < 0) { + nxi = get_nx_info(current_nx_info()); + } else if (id > 1) { + spin_lock(&nx_info_hash_lock); + nxi = get_nx_info(__lookup_nx_info(id)); + spin_unlock(&nx_info_hash_lock); + } + return nxi; +} + +/* nid_is_hashed() + + * verify that nid is still hashed */ + +int nid_is_hashed(vnid_t nid) +{ + int hashed; + + spin_lock(&nx_info_hash_lock); + hashed = (__lookup_nx_info(nid) != NULL); + spin_unlock(&nx_info_hash_lock); + return hashed; +} + + +#ifdef CONFIG_PROC_FS + +/* get_nid_list() + + * get a subset of hashed nids for proc + * assumes size is at least one */ + +int get_nid_list(int index, unsigned int *nids, int size) +{ + int hindex, nr_nids = 0; + + /* only show current and children */ + if (!nx_check(0, VS_ADMIN | VS_WATCH)) { + if (index > 0) + return 0; + nids[nr_nids] = nx_current_nid(); + return 1; + } + + for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) { + struct hlist_head *head = &nx_info_hash[hindex]; + struct hlist_node *pos; + + spin_lock(&nx_info_hash_lock); + hlist_for_each(pos, head) { + struct nx_info *nxi; + + if (--index > 0) + continue; + + nxi = hlist_entry(pos, struct nx_info, nx_hlist); + nids[nr_nids] = nxi->nx_id; + if (++nr_nids >= size) { + spin_unlock(&nx_info_hash_lock); + goto out; + } + } + /* keep the lock time short */ + spin_unlock(&nx_info_hash_lock); + } +out: + return nr_nids; +} +#endif + + +/* + * migrate task to new network + * gets nxi, puts old_nxi on change + */ + +int nx_migrate_task(struct task_struct *p, struct nx_info *nxi) +{ + struct nx_info *old_nxi; + int ret = 0; + + if (!p || !nxi) + BUG(); + + vxdprintk(VXD_CBIT(nid, 5), + "nx_migrate_task(%p,%p[#%d.%d.%d])", + p, nxi, nxi->nx_id, + atomic_read(&nxi->nx_usecnt), + atomic_read(&nxi->nx_tasks)); + + if (nx_info_flags(nxi, NXF_INFO_PRIVATE, 0) && + !nx_info_flags(nxi, NXF_STATE_SETUP, 0)) + return -EACCES; + + if (nx_info_state(nxi, NXS_SHUTDOWN)) + return -EFAULT; + + /* maybe disallow this completely? */ + old_nxi = task_get_nx_info(p); + if (old_nxi == nxi) + goto out; + + task_lock(p); + if (old_nxi) + clr_nx_info(&p->nx_info); + claim_nx_info(nxi, p); + set_nx_info(&p->nx_info, nxi); + p->nid = nxi->nx_id; + task_unlock(p); + + vxdprintk(VXD_CBIT(nid, 5), + "moved task %p into nxi:%p[#%d]", + p, nxi, nxi->nx_id); + + if (old_nxi) + release_nx_info(old_nxi, p); + ret = 0; +out: + put_nx_info(old_nxi); + return ret; +} + + +void nx_set_persistent(struct nx_info *nxi) +{ + vxdprintk(VXD_CBIT(nid, 6), + "nx_set_persistent(%p[#%d])", nxi, nxi->nx_id); + + get_nx_info(nxi); + claim_nx_info(nxi, NULL); +} + +void nx_clear_persistent(struct nx_info *nxi) +{ + vxdprintk(VXD_CBIT(nid, 6), + "nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id); + + release_nx_info(nxi, NULL); + put_nx_info(nxi); +} + +void nx_update_persistent(struct nx_info *nxi) +{ + if (nx_info_flags(nxi, NXF_PERSISTENT, 0)) + nx_set_persistent(nxi); + else + nx_clear_persistent(nxi); +} + +/* vserver syscall commands below here */ + +/* taks nid and nx_info functions */ + +#include + + +int vc_task_nid(uint32_t id) +{ + vnid_t nid; + + if (id) { + struct task_struct *tsk; + + rcu_read_lock(); + tsk = find_task_by_real_pid(id); + nid = (tsk) ? tsk->nid : -ESRCH; + rcu_read_unlock(); + } else + nid = nx_current_nid(); + return nid; +} + + +int vc_nx_info(struct nx_info *nxi, void __user *data) +{ + struct vcmd_nx_info_v0 vc_data; + + vc_data.nid = nxi->nx_id; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + + +/* network functions */ + +int vc_net_create(uint32_t nid, void __user *data) +{ + struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET }; + struct nx_info *new_nxi; + int ret; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + if ((nid > MAX_S_CONTEXT) || (nid < 2)) + return -EINVAL; + + new_nxi = __create_nx_info(nid); + if (IS_ERR(new_nxi)) + return PTR_ERR(new_nxi); + + /* initial flags */ + new_nxi->nx_flags = vc_data.flagword; + + ret = -ENOEXEC; + if (vs_net_change(new_nxi, VSC_NETUP)) + goto out; + + ret = nx_migrate_task(current, new_nxi); + if (ret) + goto out; + + /* return context id on success */ + ret = new_nxi->nx_id; + + /* get a reference for persistent contexts */ + if ((vc_data.flagword & NXF_PERSISTENT)) + nx_set_persistent(new_nxi); +out: + release_nx_info(new_nxi, NULL); + put_nx_info(new_nxi); + return ret; +} + + +int vc_net_migrate(struct nx_info *nxi, void __user *data) +{ + return nx_migrate_task(current, nxi); +} + + +static inline +struct nx_addr_v4 *__find_v4_addr(struct nx_info *nxi, + __be32 ip, __be32 ip2, __be32 mask, uint16_t type, uint16_t flags, + struct nx_addr_v4 **prev) +{ + struct nx_addr_v4 *nxa = &nxi->v4; + + for (; nxa; nxa = nxa->next) { + if ((nxa->ip[0].s_addr == ip) && + (nxa->ip[1].s_addr == ip2) && + (nxa->mask.s_addr == mask) && + (nxa->type == type) && + (nxa->flags == flags)) + return nxa; + + /* save previous entry */ + if (prev) + *prev = nxa; + } + return NULL; +} + +int do_add_v4_addr(struct nx_info *nxi, __be32 ip, __be32 ip2, __be32 mask, + uint16_t type, uint16_t flags) +{ + struct nx_addr_v4 *nxa = NULL; + struct nx_addr_v4 *new = __alloc_nx_addr_v4(); + unsigned long irqflags; + int ret = -EEXIST; + + if (IS_ERR(new)) + return PTR_ERR(new); + + spin_lock_irqsave(&nxi->addr_lock, irqflags); + if (__find_v4_addr(nxi, ip, ip2, mask, type, flags, &nxa)) + goto out_unlock; + + if (NX_IPV4(nxi)) { + nxa->next = new; + nxa = new; + new = NULL; + + /* remove single ip for ip list */ + nxi->nx_flags &= ~NXF_SINGLE_IP; + } + + nxa->ip[0].s_addr = ip; + nxa->ip[1].s_addr = ip2; + nxa->mask.s_addr = mask; + nxa->type = type; + nxa->flags = flags; + ret = 0; +out_unlock: + spin_unlock_irqrestore(&nxi->addr_lock, irqflags); + if (new) + __dealloc_nx_addr_v4(new); + return ret; +} + +int do_remove_v4_addr(struct nx_info *nxi, __be32 ip, __be32 ip2, __be32 mask, + uint16_t type, uint16_t flags) +{ + struct nx_addr_v4 *nxa = NULL; + struct nx_addr_v4 *old = NULL; + unsigned long irqflags; + int ret = 0; + + spin_lock_irqsave(&nxi->addr_lock, irqflags); + switch (type) { + case NXA_TYPE_ADDR: + old = __find_v4_addr(nxi, ip, ip2, mask, type, flags, &nxa); + if (old) { + if (nxa) { + nxa->next = old->next; + old->next = NULL; + } else { + if (old->next) { + nxa = old; + old = old->next; + *nxa = *old; + old->next = NULL; + } else { + memset(old, 0, sizeof(*old)); + old = NULL; + } + } + } else + ret = -ESRCH; + break; + + case NXA_TYPE_ANY: + nxa = &nxi->v4; + old = nxa->next; + memset(nxa, 0, sizeof(*nxa)); + break; + + default: + ret = -EINVAL; + } + spin_unlock_irqrestore(&nxi->addr_lock, irqflags); + __dealloc_nx_addr_v4_all(old); + return ret; +} + + +int vc_net_add(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_v0 vc_data; + int index, ret = 0; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_IPV4: + if ((vc_data.count < 1) || (vc_data.count > 4)) + return -EINVAL; + + index = 0; + while (index < vc_data.count) { + ret = do_add_v4_addr(nxi, vc_data.ip[index].s_addr, 0, + vc_data.mask[index].s_addr, NXA_TYPE_ADDR, 0); + if (ret) + return ret; + index++; + } + ret = index; + break; + + case NXA_TYPE_IPV4|NXA_MOD_BCAST: + nxi->v4_bcast = vc_data.ip[0]; + ret = 1; + break; + + case NXA_TYPE_IPV4|NXA_MOD_LBACK: + nxi->v4_lback = vc_data.ip[0]; + ret = 1; + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + +int vc_net_remove(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_v0 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_ANY: + return do_remove_v4_addr(nxi, 0, 0, 0, vc_data.type, 0); + default: + return -EINVAL; + } + return 0; +} + + +int vc_net_add_ipv4_v1(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_ipv4_v1 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_ADDR: + case NXA_TYPE_MASK: + return do_add_v4_addr(nxi, vc_data.ip.s_addr, 0, + vc_data.mask.s_addr, vc_data.type, vc_data.flags); + + case NXA_TYPE_ADDR | NXA_MOD_BCAST: + nxi->v4_bcast = vc_data.ip; + break; + + case NXA_TYPE_ADDR | NXA_MOD_LBACK: + nxi->v4_lback = vc_data.ip; + break; + + default: + return -EINVAL; + } + return 0; +} + +int vc_net_add_ipv4(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_ipv4_v2 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_ADDR: + case NXA_TYPE_MASK: + case NXA_TYPE_RANGE: + return do_add_v4_addr(nxi, vc_data.ip.s_addr, vc_data.ip2.s_addr, + vc_data.mask.s_addr, vc_data.type, vc_data.flags); + + case NXA_TYPE_ADDR | NXA_MOD_BCAST: + nxi->v4_bcast = vc_data.ip; + break; + + case NXA_TYPE_ADDR | NXA_MOD_LBACK: + nxi->v4_lback = vc_data.ip; + break; + + default: + return -EINVAL; + } + return 0; +} + +int vc_net_rem_ipv4_v1(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_ipv4_v1 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_remove_v4_addr(nxi, vc_data.ip.s_addr, 0, + vc_data.mask.s_addr, vc_data.type, vc_data.flags); +} + +int vc_net_rem_ipv4(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_ipv4_v2 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_remove_v4_addr(nxi, vc_data.ip.s_addr, vc_data.ip2.s_addr, + vc_data.mask.s_addr, vc_data.type, vc_data.flags); +} + +#ifdef CONFIG_IPV6 + +static inline +struct nx_addr_v6 *__find_v6_addr(struct nx_info *nxi, + struct in6_addr *ip, struct in6_addr *mask, + uint32_t prefix, uint16_t type, uint16_t flags, + struct nx_addr_v6 **prev) +{ + struct nx_addr_v6 *nxa = &nxi->v6; + + for (; nxa; nxa = nxa->next) { + if (ipv6_addr_equal(&nxa->ip, ip) && + ipv6_addr_equal(&nxa->mask, mask) && + (nxa->prefix == prefix) && + (nxa->type == type) && + (nxa->flags == flags)) + return nxa; + + /* save previous entry */ + if (prev) + *prev = nxa; + } + return NULL; +} + + +int do_add_v6_addr(struct nx_info *nxi, + struct in6_addr *ip, struct in6_addr *mask, + uint32_t prefix, uint16_t type, uint16_t flags) +{ + struct nx_addr_v6 *nxa = NULL; + struct nx_addr_v6 *new = __alloc_nx_addr_v6(); + unsigned long irqflags; + int ret = -EEXIST; + + if (IS_ERR(new)) + return PTR_ERR(new); + + spin_lock_irqsave(&nxi->addr_lock, irqflags); + if (__find_v6_addr(nxi, ip, mask, prefix, type, flags, &nxa)) + goto out_unlock; + + if (NX_IPV6(nxi)) { + nxa->next = new; + nxa = new; + new = NULL; + } + + nxa->ip = *ip; + nxa->mask = *mask; + nxa->prefix = prefix; + nxa->type = type; + nxa->flags = flags; + ret = 0; +out_unlock: + spin_unlock_irqrestore(&nxi->addr_lock, irqflags); + if (new) + __dealloc_nx_addr_v6(new); + return ret; +} + +int do_remove_v6_addr(struct nx_info *nxi, + struct in6_addr *ip, struct in6_addr *mask, + uint32_t prefix, uint16_t type, uint16_t flags) +{ + struct nx_addr_v6 *nxa = NULL; + struct nx_addr_v6 *old = NULL; + unsigned long irqflags; + int ret = 0; + + spin_lock_irqsave(&nxi->addr_lock, irqflags); + switch (type) { + case NXA_TYPE_ADDR: + old = __find_v6_addr(nxi, ip, mask, prefix, type, flags, &nxa); + if (old) { + if (nxa) { + nxa->next = old->next; + old->next = NULL; + } else { + if (old->next) { + nxa = old; + old = old->next; + *nxa = *old; + old->next = NULL; + } else { + memset(old, 0, sizeof(*old)); + old = NULL; + } + } + } else + ret = -ESRCH; + break; + + case NXA_TYPE_ANY: + nxa = &nxi->v6; + old = nxa->next; + memset(nxa, 0, sizeof(*nxa)); + break; + + default: + ret = -EINVAL; + } + spin_unlock_irqrestore(&nxi->addr_lock, irqflags); + __dealloc_nx_addr_v6_all(old); + return ret; +} + +int vc_net_add_ipv6(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_ipv6_v1 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_ADDR: + memset(&vc_data.mask, ~0, sizeof(vc_data.mask)); + /* fallthrough */ + case NXA_TYPE_MASK: + return do_add_v6_addr(nxi, &vc_data.ip, &vc_data.mask, + vc_data.prefix, vc_data.type, vc_data.flags); + default: + return -EINVAL; + } + return 0; +} + +int vc_net_remove_ipv6(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_addr_ipv6_v1 vc_data; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + switch (vc_data.type) { + case NXA_TYPE_ADDR: + memset(&vc_data.mask, ~0, sizeof(vc_data.mask)); + /* fallthrough */ + case NXA_TYPE_MASK: + return do_remove_v6_addr(nxi, &vc_data.ip, &vc_data.mask, + vc_data.prefix, vc_data.type, vc_data.flags); + case NXA_TYPE_ANY: + return do_remove_v6_addr(nxi, NULL, NULL, 0, vc_data.type, 0); + default: + return -EINVAL; + } + return 0; +} + +#endif /* CONFIG_IPV6 */ + + +int vc_get_nflags(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_flags_v0 vc_data; + + vc_data.flagword = nxi->nx_flags; + + /* special STATE flag handling */ + vc_data.mask = vs_mask_flags(~0ULL, nxi->nx_flags, NXF_ONE_TIME); + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_nflags(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_flags_v0 vc_data; + uint64_t mask, trigger; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + /* special STATE flag handling */ + mask = vs_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME); + trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword); + + nxi->nx_flags = vs_mask_flags(nxi->nx_flags, + vc_data.flagword, mask); + if (trigger & NXF_PERSISTENT) + nx_update_persistent(nxi); + + return 0; +} + +int vc_get_ncaps(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_caps_v0 vc_data; + + vc_data.ncaps = nxi->nx_ncaps; + vc_data.cmask = ~0ULL; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + +int vc_set_ncaps(struct nx_info *nxi, void __user *data) +{ + struct vcmd_net_caps_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + nxi->nx_ncaps = vs_mask_flags(nxi->nx_ncaps, + vc_data.ncaps, vc_data.cmask); + return 0; +} + + +#include + +module_init(init_network); + +EXPORT_SYMBOL_GPL(free_nx_info); +EXPORT_SYMBOL_GPL(unhash_nx_info); + diff -NurpP --minimal linux-3.10.19/kernel/vserver/proc.c linux-3.10.19-vs2.3.6.8/kernel/vserver/proc.c --- linux-3.10.19/kernel/vserver/proc.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/proc.c 2013-08-22 23:31:37.000000000 +0000 @@ -0,0 +1,1113 @@ +/* + * linux/kernel/vserver/proc.c + * + * Virtual Context Support + * + * Copyright (C) 2003-2011 Herbert Pötzl + * + * V0.01 basic structure + * V0.02 adaptation vs1.3.0 + * V0.03 proc permissions + * V0.04 locking/generic + * V0.05 next generation procfs + * V0.06 inode validation + * V0.07 generic rewrite vid + * V0.08 remove inode type + * V0.09 added u/wmask info + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "cvirt_proc.h" +#include "cacct_proc.h" +#include "limit_proc.h" +#include "sched_proc.h" +#include "vci_config.h" + +#include <../../fs/proc/internal.h> + + +static inline char *print_cap_t(char *buffer, kernel_cap_t *c) +{ + unsigned __capi; + + CAP_FOR_EACH_U32(__capi) { + buffer += sprintf(buffer, "%08x", + c->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]); + } + return buffer; +} + + +static struct proc_dir_entry *proc_virtual; + +static struct proc_dir_entry *proc_virtnet; + + +/* first the actual feeds */ + + +static int proc_vci(char *buffer) +{ + return sprintf(buffer, + "VCIVersion:\t%04x:%04x\n" + "VCISyscall:\t%d\n" + "VCIKernel:\t%08x\n", + VCI_VERSION >> 16, + VCI_VERSION & 0xFFFF, + __NR_vserver, + vci_kernel_config()); +} + +static int proc_virtual_info(char *buffer) +{ + return proc_vci(buffer); +} + +static int proc_virtual_status(char *buffer) +{ + return sprintf(buffer, + "#CTotal:\t%d\n" + "#CActive:\t%d\n" + "#NSProxy:\t%d\t%d %d %d %d %d %d\n" + "#InitTask:\t%d\t%d %d\n", + atomic_read(&vx_global_ctotal), + atomic_read(&vx_global_cactive), + atomic_read(&vs_global_nsproxy), + atomic_read(&vs_global_fs), + atomic_read(&vs_global_mnt_ns), + atomic_read(&vs_global_uts_ns), + atomic_read(&nr_ipc_ns), + atomic_read(&vs_global_user_ns), + atomic_read(&vs_global_pid_ns), + atomic_read(&init_task.usage), + atomic_read(&init_task.nsproxy->count), + init_task.fs->users); +} + + +int proc_vxi_info(struct vx_info *vxi, char *buffer) +{ + int length; + + length = sprintf(buffer, + "ID:\t%d\n" + "Info:\t%p\n" + "Init:\t%d\n" + "OOM:\t%lld\n", + vxi->vx_id, + vxi, + vxi->vx_initpid, + vxi->vx_badness_bias); + return length; +} + +int proc_vxi_status(struct vx_info *vxi, char *buffer) +{ + char *orig = buffer; + + buffer += sprintf(buffer, + "UseCnt:\t%d\n" + "Tasks:\t%d\n" + "Flags:\t%016llx\n", + atomic_read(&vxi->vx_usecnt), + atomic_read(&vxi->vx_tasks), + (unsigned long long)vxi->vx_flags); + + buffer += sprintf(buffer, "BCaps:\t"); + buffer = print_cap_t(buffer, &vxi->vx_bcaps); + buffer += sprintf(buffer, "\n"); + + buffer += sprintf(buffer, + "CCaps:\t%016llx\n" + "Umask:\t%16llx\n" + "Wmask:\t%16llx\n" + "Spaces:\t%08lx %08lx\n", + (unsigned long long)vxi->vx_ccaps, + (unsigned long long)vxi->vx_umask, + (unsigned long long)vxi->vx_wmask, + vxi->space[0].vx_nsmask, vxi->space[1].vx_nsmask); + return buffer - orig; +} + +int proc_vxi_limit(struct vx_info *vxi, char *buffer) +{ + return vx_info_proc_limit(&vxi->limit, buffer); +} + +int proc_vxi_sched(struct vx_info *vxi, char *buffer) +{ + int cpu, length; + + length = vx_info_proc_sched(&vxi->sched, buffer); + for_each_online_cpu(cpu) { + length += vx_info_proc_sched_pc( + &vx_per_cpu(vxi, sched_pc, cpu), + buffer + length, cpu); + } + return length; +} + +int proc_vxi_nsproxy0(struct vx_info *vxi, char *buffer) +{ + return vx_info_proc_nsproxy(vxi->space[0].vx_nsproxy, buffer); +} + +int proc_vxi_nsproxy1(struct vx_info *vxi, char *buffer) +{ + return vx_info_proc_nsproxy(vxi->space[1].vx_nsproxy, buffer); +} + +int proc_vxi_cvirt(struct vx_info *vxi, char *buffer) +{ + int cpu, length; + + vx_update_load(vxi); + length = vx_info_proc_cvirt(&vxi->cvirt, buffer); + for_each_online_cpu(cpu) { + length += vx_info_proc_cvirt_pc( + &vx_per_cpu(vxi, cvirt_pc, cpu), + buffer + length, cpu); + } + return length; +} + +int proc_vxi_cacct(struct vx_info *vxi, char *buffer) +{ + return vx_info_proc_cacct(&vxi->cacct, buffer); +} + + +static int proc_virtnet_info(char *buffer) +{ + return proc_vci(buffer); +} + +static int proc_virtnet_status(char *buffer) +{ + return sprintf(buffer, + "#CTotal:\t%d\n" + "#CActive:\t%d\n", + atomic_read(&nx_global_ctotal), + atomic_read(&nx_global_cactive)); +} + +int proc_nxi_info(struct nx_info *nxi, char *buffer) +{ + struct nx_addr_v4 *v4a; +#ifdef CONFIG_IPV6 + struct nx_addr_v6 *v6a; +#endif + int length, i; + + length = sprintf(buffer, + "ID:\t%d\n" + "Info:\t%p\n" + "Bcast:\t" NIPQUAD_FMT "\n" + "Lback:\t" NIPQUAD_FMT "\n", + nxi->nx_id, + nxi, + NIPQUAD(nxi->v4_bcast.s_addr), + NIPQUAD(nxi->v4_lback.s_addr)); + + if (!NX_IPV4(nxi)) + goto skip_v4; + for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next) + length += sprintf(buffer + length, "%d:\t" NXAV4_FMT "\n", + i, NXAV4(v4a)); +skip_v4: +#ifdef CONFIG_IPV6 + if (!NX_IPV6(nxi)) + goto skip_v6; + for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next) + length += sprintf(buffer + length, "%d:\t" NXAV6_FMT "\n", + i, NXAV6(v6a)); +skip_v6: +#endif + return length; +} + +int proc_nxi_status(struct nx_info *nxi, char *buffer) +{ + int length; + + length = sprintf(buffer, + "UseCnt:\t%d\n" + "Tasks:\t%d\n" + "Flags:\t%016llx\n" + "NCaps:\t%016llx\n", + atomic_read(&nxi->nx_usecnt), + atomic_read(&nxi->nx_tasks), + (unsigned long long)nxi->nx_flags, + (unsigned long long)nxi->nx_ncaps); + return length; +} + + + +/* here the inode helpers */ + +struct vs_entry { + int len; + char *name; + mode_t mode; + struct inode_operations *iop; + struct file_operations *fop; + union proc_op op; +}; + +static struct inode *vs_proc_make_inode(struct super_block *sb, struct vs_entry *p) +{ + struct inode *inode = new_inode(sb); + + if (!inode) + goto out; + + inode->i_mode = p->mode; + if (p->iop) + inode->i_op = p->iop; + if (p->fop) + inode->i_fop = p->fop; + + set_nlink(inode, (p->mode & S_IFDIR) ? 2 : 1); + inode->i_flags |= S_IMMUTABLE; + + inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + + i_uid_write(inode, 0); + i_gid_write(inode, 0); + i_tag_write(inode, 0); +out: + return inode; +} + +static struct dentry *vs_proc_instantiate(struct inode *dir, + struct dentry *dentry, int id, void *ptr) +{ + struct vs_entry *p = ptr; + struct inode *inode = vs_proc_make_inode(dir->i_sb, p); + struct dentry *error = ERR_PTR(-EINVAL); + + if (!inode) + goto out; + + PROC_I(inode)->op = p->op; + PROC_I(inode)->fd = id; + d_add(dentry, inode); + error = NULL; +out: + return error; +} + +/* Lookups */ + +typedef struct dentry *vx_instantiate_t(struct inode *, struct dentry *, int, void *); + + +/* + * Fill a directory entry. + * + * If possible create the dcache entry and derive our inode number and + * file type from dcache entry. + * + * Since all of the proc inode numbers are dynamically generated, the inode + * numbers do not exist until the inode is cache. This means creating the + * the dcache entry in readdir is necessary to keep the inode numbers + * reported by readdir in sync with the inode numbers reported + * by stat. + */ +static int vx_proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, + char *name, int len, vx_instantiate_t instantiate, int id, void *ptr) +{ + struct dentry *child, *dir = filp->f_dentry; + struct inode *inode; + struct qstr qname; + ino_t ino = 0; + unsigned type = DT_UNKNOWN; + + qname.name = name; + qname.len = len; + qname.hash = full_name_hash(name, len); + + child = d_lookup(dir, &qname); + if (!child) { + struct dentry *new; + new = d_alloc(dir, &qname); + if (new) { + child = instantiate(dir->d_inode, new, id, ptr); + if (child) + dput(new); + else + child = new; + } + } + if (!child || IS_ERR(child) || !child->d_inode) + goto end_instantiate; + inode = child->d_inode; + if (inode) { + ino = inode->i_ino; + type = inode->i_mode >> 12; + } + dput(child); +end_instantiate: + if (!ino) + ino = find_inode_number(dir, &qname); + if (!ino) + ino = 1; + return filldir(dirent, name, len, filp->f_pos, ino, type); +} + + + +/* get and revalidate vx_info/xid */ + +static inline +struct vx_info *get_proc_vx_info(struct inode *inode) +{ + return lookup_vx_info(PROC_I(inode)->fd); +} + +static int proc_xid_revalidate(struct dentry *dentry, unsigned int flags) +{ + struct inode *inode = dentry->d_inode; + vxid_t xid = PROC_I(inode)->fd; + + if (flags & LOOKUP_RCU) /* FIXME: can be dropped? */ + return -ECHILD; + + if (!xid || xid_is_hashed(xid)) + return 1; + d_drop(dentry); + return 0; +} + + +/* get and revalidate nx_info/nid */ + +static int proc_nid_revalidate(struct dentry *dentry, unsigned int flags) +{ + struct inode *inode = dentry->d_inode; + vnid_t nid = PROC_I(inode)->fd; + + if (flags & LOOKUP_RCU) /* FIXME: can be dropped? */ + return -ECHILD; + + if (!nid || nid_is_hashed(nid)) + return 1; + d_drop(dentry); + return 0; +} + + + +#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) + +static ssize_t proc_vs_info_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct inode *inode = file->f_dentry->d_inode; + unsigned long page; + ssize_t length = 0; + + if (count > PROC_BLOCK_SIZE) + count = PROC_BLOCK_SIZE; + + /* fade that out as soon as stable */ + WARN_ON(PROC_I(inode)->fd); + + if (!(page = __get_free_page(GFP_KERNEL))) + return -ENOMEM; + + BUG_ON(!PROC_I(inode)->op.proc_vs_read); + length = PROC_I(inode)->op.proc_vs_read((char *)page); + + if (length >= 0) + length = simple_read_from_buffer(buf, count, ppos, + (char *)page, length); + + free_page(page); + return length; +} + +static ssize_t proc_vx_info_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct inode *inode = file->f_dentry->d_inode; + struct vx_info *vxi = NULL; + vxid_t xid = PROC_I(inode)->fd; + unsigned long page; + ssize_t length = 0; + + if (count > PROC_BLOCK_SIZE) + count = PROC_BLOCK_SIZE; + + /* fade that out as soon as stable */ + WARN_ON(!xid); + vxi = lookup_vx_info(xid); + if (!vxi) + goto out; + + length = -ENOMEM; + if (!(page = __get_free_page(GFP_KERNEL))) + goto out_put; + + BUG_ON(!PROC_I(inode)->op.proc_vxi_read); + length = PROC_I(inode)->op.proc_vxi_read(vxi, (char *)page); + + if (length >= 0) + length = simple_read_from_buffer(buf, count, ppos, + (char *)page, length); + + free_page(page); +out_put: + put_vx_info(vxi); +out: + return length; +} + +static ssize_t proc_nx_info_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct inode *inode = file->f_dentry->d_inode; + struct nx_info *nxi = NULL; + vnid_t nid = PROC_I(inode)->fd; + unsigned long page; + ssize_t length = 0; + + if (count > PROC_BLOCK_SIZE) + count = PROC_BLOCK_SIZE; + + /* fade that out as soon as stable */ + WARN_ON(!nid); + nxi = lookup_nx_info(nid); + if (!nxi) + goto out; + + length = -ENOMEM; + if (!(page = __get_free_page(GFP_KERNEL))) + goto out_put; + + BUG_ON(!PROC_I(inode)->op.proc_nxi_read); + length = PROC_I(inode)->op.proc_nxi_read(nxi, (char *)page); + + if (length >= 0) + length = simple_read_from_buffer(buf, count, ppos, + (char *)page, length); + + free_page(page); +out_put: + put_nx_info(nxi); +out: + return length; +} + + + +/* here comes the lower level */ + + +#define NOD(NAME, MODE, IOP, FOP, OP) { \ + .len = sizeof(NAME) - 1, \ + .name = (NAME), \ + .mode = MODE, \ + .iop = IOP, \ + .fop = FOP, \ + .op = OP, \ +} + + +#define DIR(NAME, MODE, OTYPE) \ + NOD(NAME, (S_IFDIR | (MODE)), \ + &proc_ ## OTYPE ## _inode_operations, \ + &proc_ ## OTYPE ## _file_operations, { } ) + +#define INF(NAME, MODE, OTYPE) \ + NOD(NAME, (S_IFREG | (MODE)), NULL, \ + &proc_vs_info_file_operations, \ + { .proc_vs_read = &proc_##OTYPE } ) + +#define VINF(NAME, MODE, OTYPE) \ + NOD(NAME, (S_IFREG | (MODE)), NULL, \ + &proc_vx_info_file_operations, \ + { .proc_vxi_read = &proc_##OTYPE } ) + +#define NINF(NAME, MODE, OTYPE) \ + NOD(NAME, (S_IFREG | (MODE)), NULL, \ + &proc_nx_info_file_operations, \ + { .proc_nxi_read = &proc_##OTYPE } ) + + +static struct file_operations proc_vs_info_file_operations = { + .read = proc_vs_info_read, +}; + +static struct file_operations proc_vx_info_file_operations = { + .read = proc_vx_info_read, +}; + +static struct dentry_operations proc_xid_dentry_operations = { + .d_revalidate = proc_xid_revalidate, +}; + +static struct vs_entry vx_base_stuff[] = { + VINF("info", S_IRUGO, vxi_info), + VINF("status", S_IRUGO, vxi_status), + VINF("limit", S_IRUGO, vxi_limit), + VINF("sched", S_IRUGO, vxi_sched), + VINF("nsproxy", S_IRUGO, vxi_nsproxy0), + VINF("nsproxy1",S_IRUGO, vxi_nsproxy1), + VINF("cvirt", S_IRUGO, vxi_cvirt), + VINF("cacct", S_IRUGO, vxi_cacct), + {} +}; + + + + +static struct dentry *proc_xid_instantiate(struct inode *dir, + struct dentry *dentry, int id, void *ptr) +{ + dentry->d_op = &proc_xid_dentry_operations; + return vs_proc_instantiate(dir, dentry, id, ptr); +} + +static struct dentry *proc_xid_lookup(struct inode *dir, + struct dentry *dentry, unsigned int flags) +{ + struct vs_entry *p = vx_base_stuff; + struct dentry *error = ERR_PTR(-ENOENT); + + for (; p->name; p++) { + if (p->len != dentry->d_name.len) + continue; + if (!memcmp(dentry->d_name.name, p->name, p->len)) + break; + } + if (!p->name) + goto out; + + error = proc_xid_instantiate(dir, dentry, PROC_I(dir)->fd, p); +out: + return error; +} + +static int proc_xid_readdir(struct file *filp, + void *dirent, filldir_t filldir) +{ + struct dentry *dentry = filp->f_dentry; + struct inode *inode = dentry->d_inode; + struct vs_entry *p = vx_base_stuff; + int size = sizeof(vx_base_stuff) / sizeof(struct vs_entry); + int pos, index; + u64 ino; + + pos = filp->f_pos; + switch (pos) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + default: + index = pos - 2; + if (index >= size) + goto out; + for (p += index; p->name; p++) { + if (vx_proc_fill_cache(filp, dirent, filldir, p->name, p->len, + vs_proc_instantiate, PROC_I(inode)->fd, p)) + goto out; + pos++; + } + } +out: + filp->f_pos = pos; + return 1; +} + + + +static struct file_operations proc_nx_info_file_operations = { + .read = proc_nx_info_read, +}; + +static struct dentry_operations proc_nid_dentry_operations = { + .d_revalidate = proc_nid_revalidate, +}; + +static struct vs_entry nx_base_stuff[] = { + NINF("info", S_IRUGO, nxi_info), + NINF("status", S_IRUGO, nxi_status), + {} +}; + + +static struct dentry *proc_nid_instantiate(struct inode *dir, + struct dentry *dentry, int id, void *ptr) +{ + dentry->d_op = &proc_nid_dentry_operations; + return vs_proc_instantiate(dir, dentry, id, ptr); +} + +static struct dentry *proc_nid_lookup(struct inode *dir, + struct dentry *dentry, unsigned int flags) +{ + struct vs_entry *p = nx_base_stuff; + struct dentry *error = ERR_PTR(-ENOENT); + + for (; p->name; p++) { + if (p->len != dentry->d_name.len) + continue; + if (!memcmp(dentry->d_name.name, p->name, p->len)) + break; + } + if (!p->name) + goto out; + + error = proc_nid_instantiate(dir, dentry, PROC_I(dir)->fd, p); +out: + return error; +} + +static int proc_nid_readdir(struct file *filp, + void *dirent, filldir_t filldir) +{ + struct dentry *dentry = filp->f_dentry; + struct inode *inode = dentry->d_inode; + struct vs_entry *p = nx_base_stuff; + int size = sizeof(nx_base_stuff) / sizeof(struct vs_entry); + int pos, index; + u64 ino; + + pos = filp->f_pos; + switch (pos) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + default: + index = pos - 2; + if (index >= size) + goto out; + for (p += index; p->name; p++) { + if (vx_proc_fill_cache(filp, dirent, filldir, p->name, p->len, + vs_proc_instantiate, PROC_I(inode)->fd, p)) + goto out; + pos++; + } + } +out: + filp->f_pos = pos; + return 1; +} + + +#define MAX_MULBY10 ((~0U - 9) / 10) + +static inline int atovid(const char *str, int len) +{ + int vid, c; + + vid = 0; + while (len-- > 0) { + c = *str - '0'; + str++; + if (c > 9) + return -1; + if (vid >= MAX_MULBY10) + return -1; + vid *= 10; + vid += c; + if (!vid) + return -1; + } + return vid; +} + +/* now the upper level (virtual) */ + + +static struct file_operations proc_xid_file_operations = { + .read = generic_read_dir, + .readdir = proc_xid_readdir, +}; + +static struct inode_operations proc_xid_inode_operations = { + .lookup = proc_xid_lookup, +}; + +static struct vs_entry vx_virtual_stuff[] = { + INF("info", S_IRUGO, virtual_info), + INF("status", S_IRUGO, virtual_status), + DIR(NULL, S_IRUGO | S_IXUGO, xid), +}; + + +static struct dentry *proc_virtual_lookup(struct inode *dir, + struct dentry *dentry, unsigned int flags) +{ + struct vs_entry *p = vx_virtual_stuff; + struct dentry *error = ERR_PTR(-ENOENT); + int id = 0; + + for (; p->name; p++) { + if (p->len != dentry->d_name.len) + continue; + if (!memcmp(dentry->d_name.name, p->name, p->len)) + break; + } + if (p->name) + goto instantiate; + + id = atovid(dentry->d_name.name, dentry->d_name.len); + if ((id < 0) || !xid_is_hashed(id)) + goto out; + +instantiate: + error = proc_xid_instantiate(dir, dentry, id, p); +out: + return error; +} + +static struct file_operations proc_nid_file_operations = { + .read = generic_read_dir, + .readdir = proc_nid_readdir, +}; + +static struct inode_operations proc_nid_inode_operations = { + .lookup = proc_nid_lookup, +}; + +static struct vs_entry nx_virtnet_stuff[] = { + INF("info", S_IRUGO, virtnet_info), + INF("status", S_IRUGO, virtnet_status), + DIR(NULL, S_IRUGO | S_IXUGO, nid), +}; + + +static struct dentry *proc_virtnet_lookup(struct inode *dir, + struct dentry *dentry, unsigned int flags) +{ + struct vs_entry *p = nx_virtnet_stuff; + struct dentry *error = ERR_PTR(-ENOENT); + int id = 0; + + for (; p->name; p++) { + if (p->len != dentry->d_name.len) + continue; + if (!memcmp(dentry->d_name.name, p->name, p->len)) + break; + } + if (p->name) + goto instantiate; + + id = atovid(dentry->d_name.name, dentry->d_name.len); + if ((id < 0) || !nid_is_hashed(id)) + goto out; + +instantiate: + error = proc_nid_instantiate(dir, dentry, id, p); +out: + return error; +} + + +#define PROC_MAXVIDS 32 + +int proc_virtual_readdir(struct file *filp, + void *dirent, filldir_t filldir) +{ + struct dentry *dentry = filp->f_dentry; + struct inode *inode = dentry->d_inode; + struct vs_entry *p = vx_virtual_stuff; + int size = sizeof(vx_virtual_stuff) / sizeof(struct vs_entry); + int pos, index; + unsigned int xid_array[PROC_MAXVIDS]; + char buf[PROC_NUMBUF]; + unsigned int nr_xids, i; + u64 ino; + + pos = filp->f_pos; + switch (pos) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + default: + index = pos - 2; + if (index >= size) + goto entries; + for (p += index; p->name; p++) { + if (vx_proc_fill_cache(filp, dirent, filldir, p->name, p->len, + vs_proc_instantiate, 0, p)) + goto out; + pos++; + } + entries: + index = pos - size; + p = &vx_virtual_stuff[size - 1]; + nr_xids = get_xid_list(index, xid_array, PROC_MAXVIDS); + for (i = 0; i < nr_xids; i++) { + int n, xid = xid_array[i]; + unsigned int j = PROC_NUMBUF; + + n = xid; + do + buf[--j] = '0' + (n % 10); + while (n /= 10); + + if (vx_proc_fill_cache(filp, dirent, filldir, + buf + j, PROC_NUMBUF - j, + vs_proc_instantiate, xid, p)) + goto out; + pos++; + } + } +out: + filp->f_pos = pos; + return 0; +} + +static int proc_virtual_getattr(struct vfsmount *mnt, + struct dentry *dentry, struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + + generic_fillattr(inode, stat); + stat->nlink = 2 + atomic_read(&vx_global_cactive); + return 0; +} + +static struct file_operations proc_virtual_dir_operations = { + .read = generic_read_dir, + .readdir = proc_virtual_readdir, +}; + +static struct inode_operations proc_virtual_dir_inode_operations = { + .getattr = proc_virtual_getattr, + .lookup = proc_virtual_lookup, +}; + + + + + +int proc_virtnet_readdir(struct file *filp, + void *dirent, filldir_t filldir) +{ + struct dentry *dentry = filp->f_dentry; + struct inode *inode = dentry->d_inode; + struct vs_entry *p = nx_virtnet_stuff; + int size = sizeof(nx_virtnet_stuff) / sizeof(struct vs_entry); + int pos, index; + unsigned int nid_array[PROC_MAXVIDS]; + char buf[PROC_NUMBUF]; + unsigned int nr_nids, i; + u64 ino; + + pos = filp->f_pos; + switch (pos) { + case 0: + ino = inode->i_ino; + if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + case 1: + ino = parent_ino(dentry); + if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) + goto out; + pos++; + /* fall through */ + default: + index = pos - 2; + if (index >= size) + goto entries; + for (p += index; p->name; p++) { + if (vx_proc_fill_cache(filp, dirent, filldir, p->name, p->len, + vs_proc_instantiate, 0, p)) + goto out; + pos++; + } + entries: + index = pos - size; + p = &nx_virtnet_stuff[size - 1]; + nr_nids = get_nid_list(index, nid_array, PROC_MAXVIDS); + for (i = 0; i < nr_nids; i++) { + int n, nid = nid_array[i]; + unsigned int j = PROC_NUMBUF; + + n = nid; + do + buf[--j] = '0' + (n % 10); + while (n /= 10); + + if (vx_proc_fill_cache(filp, dirent, filldir, + buf + j, PROC_NUMBUF - j, + vs_proc_instantiate, nid, p)) + goto out; + pos++; + } + } +out: + filp->f_pos = pos; + return 0; +} + +static int proc_virtnet_getattr(struct vfsmount *mnt, + struct dentry *dentry, struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + + generic_fillattr(inode, stat); + stat->nlink = 2 + atomic_read(&nx_global_cactive); + return 0; +} + +static struct file_operations proc_virtnet_dir_operations = { + .read = generic_read_dir, + .readdir = proc_virtnet_readdir, +}; + +static struct inode_operations proc_virtnet_dir_inode_operations = { + .getattr = proc_virtnet_getattr, + .lookup = proc_virtnet_lookup, +}; + + + +void proc_vx_init(void) +{ + struct proc_dir_entry *ent; + + ent = proc_mkdir("virtual", 0); + if (ent) { + ent->proc_fops = &proc_virtual_dir_operations; + ent->proc_iops = &proc_virtual_dir_inode_operations; + } + proc_virtual = ent; + + ent = proc_mkdir("virtnet", 0); + if (ent) { + ent->proc_fops = &proc_virtnet_dir_operations; + ent->proc_iops = &proc_virtnet_dir_inode_operations; + } + proc_virtnet = ent; +} + + + + +/* per pid info */ + + +int proc_pid_vx_info(struct task_struct *p, char *buffer) +{ + struct vx_info *vxi; + char *orig = buffer; + + buffer += sprintf(buffer, "XID:\t%d\n", vx_task_xid(p)); + + vxi = task_get_vx_info(p); + if (!vxi) + goto out; + + buffer += sprintf(buffer, "BCaps:\t"); + buffer = print_cap_t(buffer, &vxi->vx_bcaps); + buffer += sprintf(buffer, "\n"); + buffer += sprintf(buffer, "CCaps:\t%016llx\n", + (unsigned long long)vxi->vx_ccaps); + buffer += sprintf(buffer, "CFlags:\t%016llx\n", + (unsigned long long)vxi->vx_flags); + buffer += sprintf(buffer, "CIPid:\t%d\n", vxi->vx_initpid); + + put_vx_info(vxi); +out: + return buffer - orig; +} + + +int proc_pid_nx_info(struct task_struct *p, char *buffer) +{ + struct nx_info *nxi; + struct nx_addr_v4 *v4a; +#ifdef CONFIG_IPV6 + struct nx_addr_v6 *v6a; +#endif + char *orig = buffer; + int i; + + buffer += sprintf(buffer, "NID:\t%d\n", nx_task_nid(p)); + + nxi = task_get_nx_info(p); + if (!nxi) + goto out; + + buffer += sprintf(buffer, "NCaps:\t%016llx\n", + (unsigned long long)nxi->nx_ncaps); + buffer += sprintf(buffer, "NFlags:\t%016llx\n", + (unsigned long long)nxi->nx_flags); + + buffer += sprintf(buffer, + "V4Root[bcast]:\t" NIPQUAD_FMT "\n", + NIPQUAD(nxi->v4_bcast.s_addr)); + buffer += sprintf (buffer, + "V4Root[lback]:\t" NIPQUAD_FMT "\n", + NIPQUAD(nxi->v4_lback.s_addr)); + if (!NX_IPV4(nxi)) + goto skip_v4; + for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next) + buffer += sprintf(buffer, "V4Root[%d]:\t" NXAV4_FMT "\n", + i, NXAV4(v4a)); +skip_v4: +#ifdef CONFIG_IPV6 + if (!NX_IPV6(nxi)) + goto skip_v6; + for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next) + buffer += sprintf(buffer, "V6Root[%d]:\t" NXAV6_FMT "\n", + i, NXAV6(v6a)); +skip_v6: +#endif + put_nx_info(nxi); +out: + return buffer - orig; +} + diff -NurpP --minimal linux-3.10.19/kernel/vserver/sched.c linux-3.10.19-vs2.3.6.8/kernel/vserver/sched.c --- linux-3.10.19/kernel/vserver/sched.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/sched.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,83 @@ +/* + * linux/kernel/vserver/sched.c + * + * Virtual Server: Scheduler Support + * + * Copyright (C) 2004-2010 Herbert Pötzl + * + * V0.01 adapted Sam Vilains version to 2.6.3 + * V0.02 removed legacy interface + * V0.03 changed vcmds to vxi arg + * V0.04 removed older and legacy interfaces + * V0.05 removed scheduler code/commands + * + */ + +#include +#include +#include +#include + +#include + + +void vx_update_sched_param(struct _vx_sched *sched, + struct _vx_sched_pc *sched_pc) +{ + sched_pc->prio_bias = sched->prio_bias; +} + +static int do_set_prio_bias(struct vx_info *vxi, struct vcmd_prio_bias *data) +{ + int cpu; + + if (data->prio_bias > MAX_PRIO_BIAS) + data->prio_bias = MAX_PRIO_BIAS; + if (data->prio_bias < MIN_PRIO_BIAS) + data->prio_bias = MIN_PRIO_BIAS; + + if (data->cpu_id != ~0) { + vxi->sched.update = cpumask_of_cpu(data->cpu_id); + cpumask_and(&vxi->sched.update, &vxi->sched.update, + cpu_online_mask); + } else + cpumask_copy(&vxi->sched.update, cpu_online_mask); + + for_each_cpu_mask(cpu, vxi->sched.update) + vx_update_sched_param(&vxi->sched, + &vx_per_cpu(vxi, sched_pc, cpu)); + return 0; +} + +int vc_set_prio_bias(struct vx_info *vxi, void __user *data) +{ + struct vcmd_prio_bias vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return do_set_prio_bias(vxi, &vc_data); +} + +int vc_get_prio_bias(struct vx_info *vxi, void __user *data) +{ + struct vcmd_prio_bias vc_data; + struct _vx_sched_pc *pcd; + int cpu; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + cpu = vc_data.cpu_id; + + if (!cpu_possible(cpu)) + return -EINVAL; + + pcd = &vx_per_cpu(vxi, sched_pc, cpu); + vc_data.prio_bias = pcd->prio_bias; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + return -EFAULT; + return 0; +} + diff -NurpP --minimal linux-3.10.19/kernel/vserver/sched_init.h linux-3.10.19-vs2.3.6.8/kernel/vserver/sched_init.h --- linux-3.10.19/kernel/vserver/sched_init.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/sched_init.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,27 @@ + +static inline void vx_info_init_sched(struct _vx_sched *sched) +{ + /* scheduling; hard code starting values as constants */ + sched->prio_bias = 0; +} + +static inline +void vx_info_init_sched_pc(struct _vx_sched_pc *sched_pc, int cpu) +{ + sched_pc->prio_bias = 0; + + sched_pc->user_ticks = 0; + sched_pc->sys_ticks = 0; + sched_pc->hold_ticks = 0; +} + +static inline void vx_info_exit_sched(struct _vx_sched *sched) +{ + return; +} + +static inline +void vx_info_exit_sched_pc(struct _vx_sched_pc *sched_pc, int cpu) +{ + return; +} diff -NurpP --minimal linux-3.10.19/kernel/vserver/sched_proc.h linux-3.10.19-vs2.3.6.8/kernel/vserver/sched_proc.h --- linux-3.10.19/kernel/vserver/sched_proc.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/sched_proc.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,32 @@ +#ifndef _VX_SCHED_PROC_H +#define _VX_SCHED_PROC_H + + +static inline +int vx_info_proc_sched(struct _vx_sched *sched, char *buffer) +{ + int length = 0; + + length += sprintf(buffer, + "PrioBias:\t%8d\n", + sched->prio_bias); + return length; +} + +static inline +int vx_info_proc_sched_pc(struct _vx_sched_pc *sched_pc, + char *buffer, int cpu) +{ + int length = 0; + + length += sprintf(buffer + length, + "cpu %d: %lld %lld %lld", cpu, + (unsigned long long)sched_pc->user_ticks, + (unsigned long long)sched_pc->sys_ticks, + (unsigned long long)sched_pc->hold_ticks); + length += sprintf(buffer + length, + " %d\n", sched_pc->prio_bias); + return length; +} + +#endif /* _VX_SCHED_PROC_H */ diff -NurpP --minimal linux-3.10.19/kernel/vserver/signal.c linux-3.10.19-vs2.3.6.8/kernel/vserver/signal.c --- linux-3.10.19/kernel/vserver/signal.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/signal.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,134 @@ +/* + * linux/kernel/vserver/signal.c + * + * Virtual Server: Signal Support + * + * Copyright (C) 2003-2007 Herbert Pötzl + * + * V0.01 broken out from vcontext V0.05 + * V0.02 changed vcmds to vxi arg + * V0.03 adjusted siginfo for kill + * + */ + +#include + +#include +#include +#include + + +int vx_info_kill(struct vx_info *vxi, int pid, int sig) +{ + int retval, count = 0; + struct task_struct *p; + struct siginfo *sip = SEND_SIG_PRIV; + + retval = -ESRCH; + vxdprintk(VXD_CBIT(misc, 4), + "vx_info_kill(%p[#%d],%d,%d)*", + vxi, vxi->vx_id, pid, sig); + read_lock(&tasklist_lock); + switch (pid) { + case 0: + case -1: + for_each_process(p) { + int err = 0; + + if (vx_task_xid(p) != vxi->vx_id || p->pid <= 1 || + (pid && vxi->vx_initpid == p->pid)) + continue; + + err = group_send_sig_info(sig, sip, p); + ++count; + if (err != -EPERM) + retval = err; + } + break; + + case 1: + if (vxi->vx_initpid) { + pid = vxi->vx_initpid; + /* for now, only SIGINT to private init ... */ + if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) && + /* ... as long as there are tasks left */ + (atomic_read(&vxi->vx_tasks) > 1)) + sig = SIGINT; + } + /* fallthrough */ + default: + rcu_read_lock(); + p = find_task_by_real_pid(pid); + rcu_read_unlock(); + if (p) { + if (vx_task_xid(p) == vxi->vx_id) + retval = group_send_sig_info(sig, sip, p); + } + break; + } + read_unlock(&tasklist_lock); + vxdprintk(VXD_CBIT(misc, 4), + "vx_info_kill(%p[#%d],%d,%d,%ld) = %d", + vxi, vxi->vx_id, pid, sig, (long)sip, retval); + return retval; +} + +int vc_ctx_kill(struct vx_info *vxi, void __user *data) +{ + struct vcmd_ctx_kill_v0 vc_data; + + if (copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + /* special check to allow guest shutdown */ + if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) && + /* forbid killall pid=0 when init is present */ + (((vc_data.pid < 1) && vxi->vx_initpid) || + (vc_data.pid > 1))) + return -EACCES; + + return vx_info_kill(vxi, vc_data.pid, vc_data.sig); +} + + +static int __wait_exit(struct vx_info *vxi) +{ + DECLARE_WAITQUEUE(wait, current); + int ret = 0; + + add_wait_queue(&vxi->vx_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + +wait: + if (vx_info_state(vxi, + VXS_SHUTDOWN | VXS_HASHED | VXS_HELPER) == VXS_SHUTDOWN) + goto out; + if (signal_pending(current)) { + ret = -ERESTARTSYS; + goto out; + } + schedule(); + goto wait; + +out: + set_current_state(TASK_RUNNING); + remove_wait_queue(&vxi->vx_wait, &wait); + return ret; +} + + + +int vc_wait_exit(struct vx_info *vxi, void __user *data) +{ + struct vcmd_wait_exit_v0 vc_data; + int ret; + + ret = __wait_exit(vxi); + vc_data.reboot_cmd = vxi->reboot_cmd; + vc_data.exit_code = vxi->exit_code; + + if (copy_to_user(data, &vc_data, sizeof(vc_data))) + ret = -EFAULT; + return ret; +} + diff -NurpP --minimal linux-3.10.19/kernel/vserver/space.c linux-3.10.19-vs2.3.6.8/kernel/vserver/space.c --- linux-3.10.19/kernel/vserver/space.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/space.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,436 @@ +/* + * linux/kernel/vserver/space.c + * + * Virtual Server: Context Space Support + * + * Copyright (C) 2003-2010 Herbert Pötzl + * + * V0.01 broken out from context.c 0.07 + * V0.02 added task locking for namespace + * V0.03 broken out vx_enter_namespace + * V0.04 added *space support and commands + * V0.05 added credential support + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +atomic_t vs_global_nsproxy = ATOMIC_INIT(0); +atomic_t vs_global_fs = ATOMIC_INIT(0); +atomic_t vs_global_mnt_ns = ATOMIC_INIT(0); +atomic_t vs_global_uts_ns = ATOMIC_INIT(0); +atomic_t vs_global_user_ns = ATOMIC_INIT(0); +atomic_t vs_global_pid_ns = ATOMIC_INIT(0); + + +/* namespace functions */ + +#include +#include +#include +#include +#include +#include "../fs/mount.h" + + +static const struct vcmd_space_mask_v1 space_mask_v0 = { + .mask = CLONE_FS | + CLONE_NEWNS | +#ifdef CONFIG_UTS_NS + CLONE_NEWUTS | +#endif +#ifdef CONFIG_IPC_NS + CLONE_NEWIPC | +#endif +#ifdef CONFIG_USER_NS + CLONE_NEWUSER | +#endif + 0 +}; + +static const struct vcmd_space_mask_v1 space_mask = { + .mask = CLONE_FS | + CLONE_NEWNS | +#ifdef CONFIG_UTS_NS + CLONE_NEWUTS | +#endif +#ifdef CONFIG_IPC_NS + CLONE_NEWIPC | +#endif +#ifdef CONFIG_USER_NS + CLONE_NEWUSER | +#endif +#ifdef CONFIG_PID_NS + CLONE_NEWPID | +#endif +#ifdef CONFIG_NET_NS + CLONE_NEWNET | +#endif + 0 +}; + +static const struct vcmd_space_mask_v1 default_space_mask = { + .mask = CLONE_FS | + CLONE_NEWNS | +#ifdef CONFIG_UTS_NS + CLONE_NEWUTS | +#endif +#ifdef CONFIG_IPC_NS + CLONE_NEWIPC | +#endif +#ifdef CONFIG_USER_NS + CLONE_NEWUSER | +#endif +#ifdef CONFIG_PID_NS +// CLONE_NEWPID | +#endif + 0 +}; + +/* + * build a new nsproxy mix + * assumes that both proxies are 'const' + * does not touch nsproxy refcounts + * will hold a reference on the result. + */ + +struct nsproxy *vs_mix_nsproxy(struct nsproxy *old_nsproxy, + struct nsproxy *new_nsproxy, unsigned long mask) +{ + struct mnt_namespace *old_ns; + struct uts_namespace *old_uts; + struct ipc_namespace *old_ipc; +#ifdef CONFIG_PID_NS + struct pid_namespace *old_pid; +#endif +#ifdef CONFIG_NET_NS + struct net *old_net; +#endif + struct nsproxy *nsproxy; + + nsproxy = copy_nsproxy(old_nsproxy); + if (!nsproxy) + goto out; + + if (mask & CLONE_NEWNS) { + old_ns = nsproxy->mnt_ns; + nsproxy->mnt_ns = new_nsproxy->mnt_ns; + if (nsproxy->mnt_ns) + get_mnt_ns(nsproxy->mnt_ns); + } else + old_ns = NULL; + + if (mask & CLONE_NEWUTS) { + old_uts = nsproxy->uts_ns; + nsproxy->uts_ns = new_nsproxy->uts_ns; + if (nsproxy->uts_ns) + get_uts_ns(nsproxy->uts_ns); + } else + old_uts = NULL; + + if (mask & CLONE_NEWIPC) { + old_ipc = nsproxy->ipc_ns; + nsproxy->ipc_ns = new_nsproxy->ipc_ns; + if (nsproxy->ipc_ns) + get_ipc_ns(nsproxy->ipc_ns); + } else + old_ipc = NULL; + +#ifdef CONFIG_PID_NS + if (mask & CLONE_NEWPID) { + old_pid = nsproxy->pid_ns; + nsproxy->pid_ns = new_nsproxy->pid_ns; + if (nsproxy->pid_ns) + get_pid_ns(nsproxy->pid_ns); + } else + old_pid = NULL; +#endif +#ifdef CONFIG_NET_NS + if (mask & CLONE_NEWNET) { + old_net = nsproxy->net_ns; + nsproxy->net_ns = new_nsproxy->net_ns; + if (nsproxy->net_ns) + get_net(nsproxy->net_ns); + } else + old_net = NULL; +#endif + if (old_ns) + put_mnt_ns(old_ns); + if (old_uts) + put_uts_ns(old_uts); + if (old_ipc) + put_ipc_ns(old_ipc); +#ifdef CONFIG_PID_NS + if (old_pid) + put_pid_ns(old_pid); +#endif +#ifdef CONFIG_NET_NS + if (old_net) + put_net(old_net); +#endif +out: + return nsproxy; +} + + +/* + * merge two nsproxy structs into a new one. + * will hold a reference on the result. + */ + +static inline +struct nsproxy *__vs_merge_nsproxy(struct nsproxy *old, + struct nsproxy *proxy, unsigned long mask) +{ + struct nsproxy null_proxy = { .mnt_ns = NULL }; + + if (!proxy) + return NULL; + + if (mask) { + /* vs_mix_nsproxy returns with reference */ + return vs_mix_nsproxy(old ? old : &null_proxy, + proxy, mask); + } + get_nsproxy(proxy); + return proxy; +} + + +int vx_enter_space(struct vx_info *vxi, unsigned long mask, unsigned index) +{ + struct nsproxy *proxy, *proxy_cur, *proxy_new; + struct fs_struct *fs_cur, *fs = NULL; + struct _vx_space *space; + int ret, kill = 0; + + vxdprintk(VXD_CBIT(space, 8), "vx_enter_space(%p[#%u],0x%08lx,%d)", + vxi, vxi->vx_id, mask, index); + + if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0)) + return -EACCES; + + if (index >= VX_SPACES) + return -EINVAL; + + space = &vxi->space[index]; + + if (!mask) + mask = space->vx_nsmask; + + if ((mask & space->vx_nsmask) != mask) + return -EINVAL; + + if (mask & CLONE_FS) { + fs = copy_fs_struct(space->vx_fs); + if (!fs) + return -ENOMEM; + } + proxy = space->vx_nsproxy; + + vxdprintk(VXD_CBIT(space, 9), + "vx_enter_space(%p[#%u],0x%08lx,%d) -> (%p,%p)", + vxi, vxi->vx_id, mask, index, proxy, fs); + + task_lock(current); + fs_cur = current->fs; + + if (mask & CLONE_FS) { + spin_lock(&fs_cur->lock); + current->fs = fs; + kill = !--fs_cur->users; + spin_unlock(&fs_cur->lock); + } + + proxy_cur = current->nsproxy; + get_nsproxy(proxy_cur); + task_unlock(current); + + if (kill) + free_fs_struct(fs_cur); + + proxy_new = __vs_merge_nsproxy(proxy_cur, proxy, mask); + if (IS_ERR(proxy_new)) { + ret = PTR_ERR(proxy_new); + goto out_put; + } + + proxy_new = xchg(¤t->nsproxy, proxy_new); + + if (mask & CLONE_NEWUSER) { + struct cred *cred; + + vxdprintk(VXD_CBIT(space, 10), + "vx_enter_space(%p[#%u],%p) cred (%p,%p)", + vxi, vxi->vx_id, space->vx_cred, + current->real_cred, current->cred); + + if (space->vx_cred) { + cred = __prepare_creds(space->vx_cred); + if (cred) + commit_creds(cred); + } + } + + ret = 0; + + if (proxy_new) + put_nsproxy(proxy_new); +out_put: + if (proxy_cur) + put_nsproxy(proxy_cur); + return ret; +} + + +int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index) +{ + struct nsproxy *proxy_vxi, *proxy_cur, *proxy_new; + struct fs_struct *fs_vxi, *fs = NULL; + struct _vx_space *space; + int ret, kill = 0; + + vxdprintk(VXD_CBIT(space, 8), "vx_set_space(%p[#%u],0x%08lx,%d)", + vxi, vxi->vx_id, mask, index); + + if ((mask & space_mask.mask) != mask) + return -EINVAL; + + if (index >= VX_SPACES) + return -EINVAL; + + space = &vxi->space[index]; + + proxy_vxi = space->vx_nsproxy; + fs_vxi = space->vx_fs; + + if (mask & CLONE_FS) { + fs = copy_fs_struct(current->fs); + if (!fs) + return -ENOMEM; + } + + task_lock(current); + + if (mask & CLONE_FS) { + spin_lock(&fs_vxi->lock); + space->vx_fs = fs; + kill = !--fs_vxi->users; + spin_unlock(&fs_vxi->lock); + } + + proxy_cur = current->nsproxy; + get_nsproxy(proxy_cur); + task_unlock(current); + + if (kill) + free_fs_struct(fs_vxi); + + proxy_new = __vs_merge_nsproxy(proxy_vxi, proxy_cur, mask); + if (IS_ERR(proxy_new)) { + ret = PTR_ERR(proxy_new); + goto out_put; + } + + proxy_new = xchg(&space->vx_nsproxy, proxy_new); + space->vx_nsmask |= mask; + + if (mask & CLONE_NEWUSER) { + struct cred *cred; + + vxdprintk(VXD_CBIT(space, 10), + "vx_set_space(%p[#%u],%p) cred (%p,%p)", + vxi, vxi->vx_id, space->vx_cred, + current->real_cred, current->cred); + + cred = prepare_creds(); + cred = (struct cred *)xchg(&space->vx_cred, cred); + if (cred) + abort_creds(cred); + } + + ret = 0; + + if (proxy_new) + put_nsproxy(proxy_new); +out_put: + if (proxy_cur) + put_nsproxy(proxy_cur); + return ret; +} + + +int vc_enter_space_v1(struct vx_info *vxi, void __user *data) +{ + struct vcmd_space_mask_v1 vc_data = { .mask = 0 }; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return vx_enter_space(vxi, vc_data.mask, 0); +} + +int vc_enter_space(struct vx_info *vxi, void __user *data) +{ + struct vcmd_space_mask_v2 vc_data = { .mask = 0 }; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + if (vc_data.index >= VX_SPACES) + return -EINVAL; + + return vx_enter_space(vxi, vc_data.mask, vc_data.index); +} + +int vc_set_space_v1(struct vx_info *vxi, void __user *data) +{ + struct vcmd_space_mask_v1 vc_data = { .mask = 0 }; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + return vx_set_space(vxi, vc_data.mask, 0); +} + +int vc_set_space(struct vx_info *vxi, void __user *data) +{ + struct vcmd_space_mask_v2 vc_data = { .mask = 0 }; + + if (data && copy_from_user(&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + if (vc_data.index >= VX_SPACES) + return -EINVAL; + + return vx_set_space(vxi, vc_data.mask, vc_data.index); +} + +int vc_get_space_mask(void __user *data, int type) +{ + const struct vcmd_space_mask_v1 *mask; + + if (type == 0) + mask = &space_mask_v0; + else if (type == 1) + mask = &space_mask; + else + mask = &default_space_mask; + + vxdprintk(VXD_CBIT(space, 10), + "vc_get_space_mask(%d) = %08llx", type, mask->mask); + + if (copy_to_user(data, mask, sizeof(*mask))) + return -EFAULT; + return 0; +} + diff -NurpP --minimal linux-3.10.19/kernel/vserver/switch.c linux-3.10.19-vs2.3.6.8/kernel/vserver/switch.c --- linux-3.10.19/kernel/vserver/switch.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/switch.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,556 @@ +/* + * linux/kernel/vserver/switch.c + * + * Virtual Server: Syscall Switch + * + * Copyright (C) 2003-2011 Herbert Pötzl + * + * V0.01 syscall switch + * V0.02 added signal to context + * V0.03 added rlimit functions + * V0.04 added iattr, task/xid functions + * V0.05 added debug/history stuff + * V0.06 added compat32 layer + * V0.07 vcmd args and perms + * V0.08 added status commands + * V0.09 added tag commands + * V0.10 added oom bias + * V0.11 added device commands + * V0.12 added warn mask + * + */ + +#include +#include +#include + +#include "vci_config.h" + + +static inline +int vc_get_version(uint32_t id) +{ + return VCI_VERSION; +} + +static inline +int vc_get_vci(uint32_t id) +{ + return vci_kernel_config(); +} + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +#ifdef CONFIG_COMPAT +#define __COMPAT(name, id, data, compat) \ + (compat) ? name ## _x32(id, data) : name(id, data) +#define __COMPAT_NO_ID(name, data, compat) \ + (compat) ? name ## _x32(data) : name(data) +#else +#define __COMPAT(name, id, data, compat) \ + name(id, data) +#define __COMPAT_NO_ID(name, data, compat) \ + name(data) +#endif + + +static inline +long do_vcmd(uint32_t cmd, uint32_t id, + struct vx_info *vxi, struct nx_info *nxi, + void __user *data, int compat) +{ + switch (cmd) { + + case VCMD_get_version: + return vc_get_version(id); + case VCMD_get_vci: + return vc_get_vci(id); + + case VCMD_task_xid: + return vc_task_xid(id); + case VCMD_vx_info: + return vc_vx_info(vxi, data); + + case VCMD_task_nid: + return vc_task_nid(id); + case VCMD_nx_info: + return vc_nx_info(nxi, data); + + case VCMD_task_tag: + return vc_task_tag(id); + + case VCMD_set_space_v1: + return vc_set_space_v1(vxi, data); + /* this is version 2 */ + case VCMD_set_space: + return vc_set_space(vxi, data); + + case VCMD_get_space_mask_v0: + return vc_get_space_mask(data, 0); + /* this is version 1 */ + case VCMD_get_space_mask: + return vc_get_space_mask(data, 1); + + case VCMD_get_space_default: + return vc_get_space_mask(data, -1); + + case VCMD_set_umask: + return vc_set_umask(vxi, data); + + case VCMD_get_umask: + return vc_get_umask(vxi, data); + + case VCMD_set_wmask: + return vc_set_wmask(vxi, data); + + case VCMD_get_wmask: + return vc_get_wmask(vxi, data); +#ifdef CONFIG_IA32_EMULATION + case VCMD_get_rlimit: + return __COMPAT(vc_get_rlimit, vxi, data, compat); + case VCMD_set_rlimit: + return __COMPAT(vc_set_rlimit, vxi, data, compat); +#else + case VCMD_get_rlimit: + return vc_get_rlimit(vxi, data); + case VCMD_set_rlimit: + return vc_set_rlimit(vxi, data); +#endif + case VCMD_get_rlimit_mask: + return vc_get_rlimit_mask(id, data); + case VCMD_reset_hits: + return vc_reset_hits(vxi, data); + case VCMD_reset_minmax: + return vc_reset_minmax(vxi, data); + + case VCMD_get_vhi_name: + return vc_get_vhi_name(vxi, data); + case VCMD_set_vhi_name: + return vc_set_vhi_name(vxi, data); + + case VCMD_ctx_stat: + return vc_ctx_stat(vxi, data); + case VCMD_virt_stat: + return vc_virt_stat(vxi, data); + case VCMD_sock_stat: + return vc_sock_stat(vxi, data); + case VCMD_rlimit_stat: + return vc_rlimit_stat(vxi, data); + + case VCMD_set_cflags: + return vc_set_cflags(vxi, data); + case VCMD_get_cflags: + return vc_get_cflags(vxi, data); + + /* this is version 1 */ + case VCMD_set_ccaps: + return vc_set_ccaps(vxi, data); + /* this is version 1 */ + case VCMD_get_ccaps: + return vc_get_ccaps(vxi, data); + case VCMD_set_bcaps: + return vc_set_bcaps(vxi, data); + case VCMD_get_bcaps: + return vc_get_bcaps(vxi, data); + + case VCMD_set_badness: + return vc_set_badness(vxi, data); + case VCMD_get_badness: + return vc_get_badness(vxi, data); + + case VCMD_set_nflags: + return vc_set_nflags(nxi, data); + case VCMD_get_nflags: + return vc_get_nflags(nxi, data); + + case VCMD_set_ncaps: + return vc_set_ncaps(nxi, data); + case VCMD_get_ncaps: + return vc_get_ncaps(nxi, data); + + case VCMD_set_prio_bias: + return vc_set_prio_bias(vxi, data); + case VCMD_get_prio_bias: + return vc_get_prio_bias(vxi, data); + case VCMD_add_dlimit: + return __COMPAT(vc_add_dlimit, id, data, compat); + case VCMD_rem_dlimit: + return __COMPAT(vc_rem_dlimit, id, data, compat); + case VCMD_set_dlimit: + return __COMPAT(vc_set_dlimit, id, data, compat); + case VCMD_get_dlimit: + return __COMPAT(vc_get_dlimit, id, data, compat); + + case VCMD_ctx_kill: + return vc_ctx_kill(vxi, data); + + case VCMD_wait_exit: + return vc_wait_exit(vxi, data); + + case VCMD_get_iattr: + return __COMPAT_NO_ID(vc_get_iattr, data, compat); + case VCMD_set_iattr: + return __COMPAT_NO_ID(vc_set_iattr, data, compat); + + case VCMD_fget_iattr: + return vc_fget_iattr(id, data); + case VCMD_fset_iattr: + return vc_fset_iattr(id, data); + + case VCMD_enter_space_v0: + return vc_enter_space_v1(vxi, NULL); + case VCMD_enter_space_v1: + return vc_enter_space_v1(vxi, data); + /* this is version 2 */ + case VCMD_enter_space: + return vc_enter_space(vxi, data); + + case VCMD_ctx_create_v0: + return vc_ctx_create(id, NULL); + case VCMD_ctx_create: + return vc_ctx_create(id, data); + case VCMD_ctx_migrate_v0: + return vc_ctx_migrate(vxi, NULL); + case VCMD_ctx_migrate: + return vc_ctx_migrate(vxi, data); + + case VCMD_net_create_v0: + return vc_net_create(id, NULL); + case VCMD_net_create: + return vc_net_create(id, data); + case VCMD_net_migrate: + return vc_net_migrate(nxi, data); + + case VCMD_tag_migrate: + return vc_tag_migrate(id); + + case VCMD_net_add: + return vc_net_add(nxi, data); + case VCMD_net_remove: + return vc_net_remove(nxi, data); + + case VCMD_net_add_ipv4_v1: + return vc_net_add_ipv4_v1(nxi, data); + /* this is version 2 */ + case VCMD_net_add_ipv4: + return vc_net_add_ipv4(nxi, data); + + case VCMD_net_rem_ipv4_v1: + return vc_net_rem_ipv4_v1(nxi, data); + /* this is version 2 */ + case VCMD_net_rem_ipv4: + return vc_net_rem_ipv4(nxi, data); +#ifdef CONFIG_IPV6 + case VCMD_net_add_ipv6: + return vc_net_add_ipv6(nxi, data); + case VCMD_net_remove_ipv6: + return vc_net_remove_ipv6(nxi, data); +#endif +/* case VCMD_add_match_ipv4: + return vc_add_match_ipv4(nxi, data); + case VCMD_get_match_ipv4: + return vc_get_match_ipv4(nxi, data); +#ifdef CONFIG_IPV6 + case VCMD_add_match_ipv6: + return vc_add_match_ipv6(nxi, data); + case VCMD_get_match_ipv6: + return vc_get_match_ipv6(nxi, data); +#endif */ + +#ifdef CONFIG_VSERVER_DEVICE + case VCMD_set_mapping: + return __COMPAT(vc_set_mapping, vxi, data, compat); + case VCMD_unset_mapping: + return __COMPAT(vc_unset_mapping, vxi, data, compat); +#endif +#ifdef CONFIG_VSERVER_HISTORY + case VCMD_dump_history: + return vc_dump_history(id); + case VCMD_read_history: + return __COMPAT(vc_read_history, id, data, compat); +#endif + default: + vxwprintk_task(1, "unimplemented VCMD_%02d_%d[%d]", + VC_CATEGORY(cmd), VC_COMMAND(cmd), VC_VERSION(cmd)); + } + return -ENOSYS; +} + + +#define __VCMD(vcmd, _perm, _args, _flags) \ + case VCMD_ ## vcmd: perm = _perm; \ + args = _args; flags = _flags; break + + +#define VCA_NONE 0x00 +#define VCA_VXI 0x01 +#define VCA_NXI 0x02 + +#define VCF_NONE 0x00 +#define VCF_INFO 0x01 +#define VCF_ADMIN 0x02 +#define VCF_ARES 0x06 /* includes admin */ +#define VCF_SETUP 0x08 + +#define VCF_ZIDOK 0x10 /* zero id okay */ + + +static inline +long do_vserver(uint32_t cmd, uint32_t id, void __user *data, int compat) +{ + long ret; + int permit = -1, state = 0; + int perm = -1, args = 0, flags = 0; + struct vx_info *vxi = NULL; + struct nx_info *nxi = NULL; + + switch (cmd) { + /* unpriviledged commands */ + __VCMD(get_version, 0, VCA_NONE, 0); + __VCMD(get_vci, 0, VCA_NONE, 0); + __VCMD(get_rlimit_mask, 0, VCA_NONE, 0); + __VCMD(get_space_mask_v0,0, VCA_NONE, 0); + __VCMD(get_space_mask, 0, VCA_NONE, 0); + __VCMD(get_space_default,0, VCA_NONE, 0); + + /* info commands */ + __VCMD(task_xid, 2, VCA_NONE, 0); + __VCMD(reset_hits, 2, VCA_VXI, 0); + __VCMD(reset_minmax, 2, VCA_VXI, 0); + __VCMD(vx_info, 3, VCA_VXI, VCF_INFO); + __VCMD(get_bcaps, 3, VCA_VXI, VCF_INFO); + __VCMD(get_ccaps, 3, VCA_VXI, VCF_INFO); + __VCMD(get_cflags, 3, VCA_VXI, VCF_INFO); + __VCMD(get_umask, 3, VCA_VXI, VCF_INFO); + __VCMD(get_wmask, 3, VCA_VXI, VCF_INFO); + __VCMD(get_badness, 3, VCA_VXI, VCF_INFO); + __VCMD(get_vhi_name, 3, VCA_VXI, VCF_INFO); + __VCMD(get_rlimit, 3, VCA_VXI, VCF_INFO); + + __VCMD(ctx_stat, 3, VCA_VXI, VCF_INFO); + __VCMD(virt_stat, 3, VCA_VXI, VCF_INFO); + __VCMD(sock_stat, 3, VCA_VXI, VCF_INFO); + __VCMD(rlimit_stat, 3, VCA_VXI, VCF_INFO); + + __VCMD(task_nid, 2, VCA_NONE, 0); + __VCMD(nx_info, 3, VCA_NXI, VCF_INFO); + __VCMD(get_ncaps, 3, VCA_NXI, VCF_INFO); + __VCMD(get_nflags, 3, VCA_NXI, VCF_INFO); + + __VCMD(task_tag, 2, VCA_NONE, 0); + + __VCMD(get_iattr, 2, VCA_NONE, 0); + __VCMD(fget_iattr, 2, VCA_NONE, 0); + __VCMD(get_dlimit, 3, VCA_NONE, VCF_INFO); + __VCMD(get_prio_bias, 3, VCA_VXI, VCF_INFO); + + /* lower admin commands */ + __VCMD(wait_exit, 4, VCA_VXI, VCF_INFO); + __VCMD(ctx_create_v0, 5, VCA_NONE, 0); + __VCMD(ctx_create, 5, VCA_NONE, 0); + __VCMD(ctx_migrate_v0, 5, VCA_VXI, VCF_ADMIN); + __VCMD(ctx_migrate, 5, VCA_VXI, VCF_ADMIN); + __VCMD(enter_space_v0, 5, VCA_VXI, VCF_ADMIN); + __VCMD(enter_space_v1, 5, VCA_VXI, VCF_ADMIN); + __VCMD(enter_space, 5, VCA_VXI, VCF_ADMIN); + + __VCMD(net_create_v0, 5, VCA_NONE, 0); + __VCMD(net_create, 5, VCA_NONE, 0); + __VCMD(net_migrate, 5, VCA_NXI, VCF_ADMIN); + + __VCMD(tag_migrate, 5, VCA_NONE, VCF_ADMIN); + + /* higher admin commands */ + __VCMD(ctx_kill, 6, VCA_VXI, VCF_ARES); + __VCMD(set_space_v1, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_space, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + + __VCMD(set_ccaps, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_bcaps, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_cflags, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_umask, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_wmask, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_badness, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + + __VCMD(set_vhi_name, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_rlimit, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_prio_bias, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + + __VCMD(set_ncaps, 7, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(set_nflags, 7, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_add, 8, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_remove, 8, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_add_ipv4_v1, 8, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_rem_ipv4_v1, 8, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_add_ipv4, 8, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_rem_ipv4, 8, VCA_NXI, VCF_ARES | VCF_SETUP); +#ifdef CONFIG_IPV6 + __VCMD(net_add_ipv6, 8, VCA_NXI, VCF_ARES | VCF_SETUP); + __VCMD(net_remove_ipv6, 8, VCA_NXI, VCF_ARES | VCF_SETUP); +#endif + __VCMD(set_iattr, 7, VCA_NONE, 0); + __VCMD(fset_iattr, 7, VCA_NONE, 0); + __VCMD(set_dlimit, 7, VCA_NONE, VCF_ARES); + __VCMD(add_dlimit, 8, VCA_NONE, VCF_ARES); + __VCMD(rem_dlimit, 8, VCA_NONE, VCF_ARES); + +#ifdef CONFIG_VSERVER_DEVICE + __VCMD(set_mapping, 8, VCA_VXI, VCF_ARES|VCF_ZIDOK); + __VCMD(unset_mapping, 8, VCA_VXI, VCF_ARES|VCF_ZIDOK); +#endif + /* debug level admin commands */ +#ifdef CONFIG_VSERVER_HISTORY + __VCMD(dump_history, 9, VCA_NONE, 0); + __VCMD(read_history, 9, VCA_NONE, 0); +#endif + + default: + perm = -1; + } + + vxdprintk(VXD_CBIT(switch, 0), + "vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]", + VC_CATEGORY(cmd), VC_COMMAND(cmd), + VC_VERSION(cmd), id, data, compat, + perm, args, flags); + + ret = -ENOSYS; + if (perm < 0) + goto out; + + state = 1; + if (!capable(CAP_CONTEXT)) + goto out; + + state = 2; + /* moved here from the individual commands */ + ret = -EPERM; + if ((perm > 1) && !capable(CAP_SYS_ADMIN)) + goto out; + + state = 3; + /* vcmd involves resource management */ + ret = -EPERM; + if ((flags & VCF_ARES) && !capable(CAP_SYS_RESOURCE)) + goto out; + + state = 4; + /* various legacy exceptions */ + switch (cmd) { + /* will go away when spectator is a cap */ + case VCMD_ctx_migrate_v0: + case VCMD_ctx_migrate: + if (id == 1) { + current->xid = 1; + ret = 1; + goto out; + } + break; + + /* will go away when spectator is a cap */ + case VCMD_net_migrate: + if (id == 1) { + current->nid = 1; + ret = 1; + goto out; + } + break; + } + + /* vcmds are fine by default */ + permit = 1; + + /* admin type vcmds require admin ... */ + if (flags & VCF_ADMIN) + permit = vx_check(0, VS_ADMIN) ? 1 : 0; + + /* ... but setup type vcmds override that */ + if (!permit && (flags & VCF_SETUP)) + permit = vx_flags(VXF_STATE_SETUP, 0) ? 2 : 0; + + state = 5; + ret = -EPERM; + if (!permit) + goto out; + + state = 6; + if (!id && (flags & VCF_ZIDOK)) + goto skip_id; + + ret = -ESRCH; + if (args & VCA_VXI) { + vxi = lookup_vx_info(id); + if (!vxi) + goto out; + + if ((flags & VCF_ADMIN) && + /* special case kill for shutdown */ + (cmd != VCMD_ctx_kill) && + /* can context be administrated? */ + !vx_info_flags(vxi, VXF_STATE_ADMIN, 0)) { + ret = -EACCES; + goto out_vxi; + } + } + state = 7; + if (args & VCA_NXI) { + nxi = lookup_nx_info(id); + if (!nxi) + goto out_vxi; + + if ((flags & VCF_ADMIN) && + /* can context be administrated? */ + !nx_info_flags(nxi, NXF_STATE_ADMIN, 0)) { + ret = -EACCES; + goto out_nxi; + } + } +skip_id: + state = 8; + ret = do_vcmd(cmd, id, vxi, nxi, data, compat); + +out_nxi: + if ((args & VCA_NXI) && nxi) + put_nx_info(nxi); +out_vxi: + if ((args & VCA_VXI) && vxi) + put_vx_info(vxi); +out: + vxdprintk(VXD_CBIT(switch, 1), + "vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]", + VC_CATEGORY(cmd), VC_COMMAND(cmd), + VC_VERSION(cmd), ret, ret, state, permit); + return ret; +} + +asmlinkage long +sys_vserver(uint32_t cmd, uint32_t id, void __user *data) +{ + return do_vserver(cmd, id, data, 0); +} + +#ifdef CONFIG_COMPAT + +asmlinkage long +sys32_vserver(uint32_t cmd, uint32_t id, void __user *data) +{ + return do_vserver(cmd, id, data, 1); +} + +#endif /* CONFIG_COMPAT */ diff -NurpP --minimal linux-3.10.19/kernel/vserver/sysctl.c linux-3.10.19-vs2.3.6.8/kernel/vserver/sysctl.c --- linux-3.10.19/kernel/vserver/sysctl.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/sysctl.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,247 @@ +/* + * kernel/vserver/sysctl.c + * + * Virtual Context Support + * + * Copyright (C) 2004-2007 Herbert Pötzl + * + * V0.01 basic structure + * + */ + +#include +#include +#include +#include +#include + +enum { + CTL_DEBUG_ERROR = 0, + CTL_DEBUG_SWITCH = 1, + CTL_DEBUG_XID, + CTL_DEBUG_NID, + CTL_DEBUG_TAG, + CTL_DEBUG_NET, + CTL_DEBUG_LIMIT, + CTL_DEBUG_CRES, + CTL_DEBUG_DLIM, + CTL_DEBUG_QUOTA, + CTL_DEBUG_CVIRT, + CTL_DEBUG_SPACE, + CTL_DEBUG_PERM, + CTL_DEBUG_MISC, +}; + + +unsigned int vs_debug_switch = 0; +unsigned int vs_debug_xid = 0; +unsigned int vs_debug_nid = 0; +unsigned int vs_debug_tag = 0; +unsigned int vs_debug_net = 0; +unsigned int vs_debug_limit = 0; +unsigned int vs_debug_cres = 0; +unsigned int vs_debug_dlim = 0; +unsigned int vs_debug_quota = 0; +unsigned int vs_debug_cvirt = 0; +unsigned int vs_debug_space = 0; +unsigned int vs_debug_perm = 0; +unsigned int vs_debug_misc = 0; + + +static struct ctl_table_header *vserver_table_header; +static ctl_table vserver_root_table[]; + + +void vserver_register_sysctl(void) +{ + if (!vserver_table_header) { + vserver_table_header = register_sysctl_table(vserver_root_table); + } + +} + +void vserver_unregister_sysctl(void) +{ + if (vserver_table_header) { + unregister_sysctl_table(vserver_table_header); + vserver_table_header = NULL; + } +} + + +static int proc_dodebug(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + char tmpbuf[20], *p, c; + unsigned int value; + size_t left, len; + + if ((*ppos && !write) || !*lenp) { + *lenp = 0; + return 0; + } + + left = *lenp; + + if (write) { + if (!access_ok(VERIFY_READ, buffer, left)) + return -EFAULT; + p = (char *)buffer; + while (left && __get_user(c, p) >= 0 && isspace(c)) + left--, p++; + if (!left) + goto done; + + if (left > sizeof(tmpbuf) - 1) + return -EINVAL; + if (copy_from_user(tmpbuf, p, left)) + return -EFAULT; + tmpbuf[left] = '\0'; + + for (p = tmpbuf, value = 0; '0' <= *p && *p <= '9'; p++, left--) + value = 10 * value + (*p - '0'); + if (*p && !isspace(*p)) + return -EINVAL; + while (left && isspace(*p)) + left--, p++; + *(unsigned int *)table->data = value; + } else { + if (!access_ok(VERIFY_WRITE, buffer, left)) + return -EFAULT; + len = sprintf(tmpbuf, "%d", *(unsigned int *)table->data); + if (len > left) + len = left; + if (__copy_to_user(buffer, tmpbuf, len)) + return -EFAULT; + if ((left -= len) > 0) { + if (put_user('\n', (char *)buffer + len)) + return -EFAULT; + left--; + } + } + +done: + *lenp -= left; + *ppos += *lenp; + return 0; +} + +static int zero; + +#define CTL_ENTRY(ctl, name) \ + { \ + .procname = #name, \ + .data = &vs_ ## name, \ + .maxlen = sizeof(int), \ + .mode = 0644, \ + .proc_handler = &proc_dodebug, \ + .extra1 = &zero, \ + .extra2 = &zero, \ + } + +static ctl_table vserver_debug_table[] = { + CTL_ENTRY(CTL_DEBUG_SWITCH, debug_switch), + CTL_ENTRY(CTL_DEBUG_XID, debug_xid), + CTL_ENTRY(CTL_DEBUG_NID, debug_nid), + CTL_ENTRY(CTL_DEBUG_TAG, debug_tag), + CTL_ENTRY(CTL_DEBUG_NET, debug_net), + CTL_ENTRY(CTL_DEBUG_LIMIT, debug_limit), + CTL_ENTRY(CTL_DEBUG_CRES, debug_cres), + CTL_ENTRY(CTL_DEBUG_DLIM, debug_dlim), + CTL_ENTRY(CTL_DEBUG_QUOTA, debug_quota), + CTL_ENTRY(CTL_DEBUG_CVIRT, debug_cvirt), + CTL_ENTRY(CTL_DEBUG_SPACE, debug_space), + CTL_ENTRY(CTL_DEBUG_PERM, debug_perm), + CTL_ENTRY(CTL_DEBUG_MISC, debug_misc), + { 0 } +}; + +static ctl_table vserver_root_table[] = { + { + .procname = "vserver", + .mode = 0555, + .child = vserver_debug_table + }, + { 0 } +}; + + +static match_table_t tokens = { + { CTL_DEBUG_SWITCH, "switch=%x" }, + { CTL_DEBUG_XID, "xid=%x" }, + { CTL_DEBUG_NID, "nid=%x" }, + { CTL_DEBUG_TAG, "tag=%x" }, + { CTL_DEBUG_NET, "net=%x" }, + { CTL_DEBUG_LIMIT, "limit=%x" }, + { CTL_DEBUG_CRES, "cres=%x" }, + { CTL_DEBUG_DLIM, "dlim=%x" }, + { CTL_DEBUG_QUOTA, "quota=%x" }, + { CTL_DEBUG_CVIRT, "cvirt=%x" }, + { CTL_DEBUG_SPACE, "space=%x" }, + { CTL_DEBUG_PERM, "perm=%x" }, + { CTL_DEBUG_MISC, "misc=%x" }, + { CTL_DEBUG_ERROR, NULL } +}; + +#define HANDLE_CASE(id, name, val) \ + case CTL_DEBUG_ ## id: \ + vs_debug_ ## name = val; \ + printk("vs_debug_" #name "=0x%x\n", val); \ + break + + +static int __init vs_debug_setup(char *str) +{ + char *p; + int token; + + printk("vs_debug_setup(%s)\n", str); + while ((p = strsep(&str, ",")) != NULL) { + substring_t args[MAX_OPT_ARGS]; + unsigned int value; + + if (!*p) + continue; + + token = match_token(p, tokens, args); + value = (token > 0) ? simple_strtoul(args[0].from, NULL, 0) : 0; + + switch (token) { + HANDLE_CASE(SWITCH, switch, value); + HANDLE_CASE(XID, xid, value); + HANDLE_CASE(NID, nid, value); + HANDLE_CASE(TAG, tag, value); + HANDLE_CASE(NET, net, value); + HANDLE_CASE(LIMIT, limit, value); + HANDLE_CASE(CRES, cres, value); + HANDLE_CASE(DLIM, dlim, value); + HANDLE_CASE(QUOTA, quota, value); + HANDLE_CASE(CVIRT, cvirt, value); + HANDLE_CASE(SPACE, space, value); + HANDLE_CASE(PERM, perm, value); + HANDLE_CASE(MISC, misc, value); + default: + return -EINVAL; + break; + } + } + return 1; +} + +__setup("vsdebug=", vs_debug_setup); + + + +EXPORT_SYMBOL_GPL(vs_debug_switch); +EXPORT_SYMBOL_GPL(vs_debug_xid); +EXPORT_SYMBOL_GPL(vs_debug_nid); +EXPORT_SYMBOL_GPL(vs_debug_net); +EXPORT_SYMBOL_GPL(vs_debug_limit); +EXPORT_SYMBOL_GPL(vs_debug_cres); +EXPORT_SYMBOL_GPL(vs_debug_dlim); +EXPORT_SYMBOL_GPL(vs_debug_quota); +EXPORT_SYMBOL_GPL(vs_debug_cvirt); +EXPORT_SYMBOL_GPL(vs_debug_space); +EXPORT_SYMBOL_GPL(vs_debug_perm); +EXPORT_SYMBOL_GPL(vs_debug_misc); + diff -NurpP --minimal linux-3.10.19/kernel/vserver/tag.c linux-3.10.19-vs2.3.6.8/kernel/vserver/tag.c --- linux-3.10.19/kernel/vserver/tag.c 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/tag.c 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,63 @@ +/* + * linux/kernel/vserver/tag.c + * + * Virtual Server: Shallow Tag Space + * + * Copyright (C) 2007 Herbert Pötzl + * + * V0.01 basic implementation + * + */ + +#include +#include +#include +#include + +#include + + +int dx_migrate_task(struct task_struct *p, vtag_t tag) +{ + if (!p) + BUG(); + + vxdprintk(VXD_CBIT(tag, 5), + "dx_migrate_task(%p[#%d],#%d)", p, p->tag, tag); + + task_lock(p); + p->tag = tag; + task_unlock(p); + + vxdprintk(VXD_CBIT(tag, 5), + "moved task %p into [#%d]", p, tag); + return 0; +} + +/* vserver syscall commands below here */ + +/* taks xid and vx_info functions */ + + +int vc_task_tag(uint32_t id) +{ + vtag_t tag; + + if (id) { + struct task_struct *tsk; + rcu_read_lock(); + tsk = find_task_by_real_pid(id); + tag = (tsk) ? tsk->tag : -ESRCH; + rcu_read_unlock(); + } else + tag = dx_current_tag(); + return tag; +} + + +int vc_tag_migrate(uint32_t tag) +{ + return dx_migrate_task(current, tag & 0xFFFF); +} + + diff -NurpP --minimal linux-3.10.19/kernel/vserver/vci_config.h linux-3.10.19-vs2.3.6.8/kernel/vserver/vci_config.h --- linux-3.10.19/kernel/vserver/vci_config.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/kernel/vserver/vci_config.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,80 @@ + +/* interface version */ + +#define VCI_VERSION 0x00020308 + + +enum { + VCI_KCBIT_NO_DYNAMIC = 0, + + VCI_KCBIT_PROC_SECURE = 4, + /* VCI_KCBIT_HARDCPU = 5, */ + /* VCI_KCBIT_IDLELIMIT = 6, */ + /* VCI_KCBIT_IDLETIME = 7, */ + + VCI_KCBIT_COWBL = 8, + VCI_KCBIT_FULLCOWBL = 9, + VCI_KCBIT_SPACES = 10, + VCI_KCBIT_NETV2 = 11, + VCI_KCBIT_MEMCG = 12, + VCI_KCBIT_MEMCG_SWAP = 13, + + VCI_KCBIT_DEBUG = 16, + VCI_KCBIT_HISTORY = 20, + VCI_KCBIT_TAGGED = 24, + VCI_KCBIT_PPTAG = 28, + + VCI_KCBIT_MORE = 31, +}; + + +static inline uint32_t vci_kernel_config(void) +{ + return + (1 << VCI_KCBIT_NO_DYNAMIC) | + + /* configured features */ +#ifdef CONFIG_VSERVER_PROC_SECURE + (1 << VCI_KCBIT_PROC_SECURE) | +#endif +#ifdef CONFIG_VSERVER_COWBL + (1 << VCI_KCBIT_COWBL) | + (1 << VCI_KCBIT_FULLCOWBL) | +#endif + (1 << VCI_KCBIT_SPACES) | + (1 << VCI_KCBIT_NETV2) | +#ifdef CONFIG_MEMCG + (1 << VCI_KCBIT_MEMCG) | +#endif +#ifdef CONFIG_MEMCG_SWAP + (1 << VCI_KCBIT_MEMCG_SWAP) | +#endif + + /* debug options */ +#ifdef CONFIG_VSERVER_DEBUG + (1 << VCI_KCBIT_DEBUG) | +#endif +#ifdef CONFIG_VSERVER_HISTORY + (1 << VCI_KCBIT_HISTORY) | +#endif + + /* inode context tagging */ +#if defined(CONFIG_TAGGING_NONE) + (0 << VCI_KCBIT_TAGGED) | +#elif defined(CONFIG_TAGGING_UID16) + (1 << VCI_KCBIT_TAGGED) | +#elif defined(CONFIG_TAGGING_GID16) + (2 << VCI_KCBIT_TAGGED) | +#elif defined(CONFIG_TAGGING_ID24) + (3 << VCI_KCBIT_TAGGED) | +#elif defined(CONFIG_TAGGING_INTERN) + (4 << VCI_KCBIT_TAGGED) | +#elif defined(CONFIG_TAGGING_RUNTIME) + (5 << VCI_KCBIT_TAGGED) | +#else + (7 << VCI_KCBIT_TAGGED) | +#endif + (1 << VCI_KCBIT_PPTAG) | + 0; +} + diff -NurpP --minimal linux-3.10.19/mm/memcontrol.c linux-3.10.19-vs2.3.6.8/mm/memcontrol.c --- linux-3.10.19/mm/memcontrol.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/mm/memcontrol.c 2013-11-13 17:17:16.000000000 +0000 @@ -1082,6 +1082,31 @@ struct mem_cgroup *mem_cgroup_from_task( return mem_cgroup_from_css(task_subsys_state(p, mem_cgroup_subsys_id)); } +u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member) +{ + return res_counter_read_u64(&mem->res, member); +} + +u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member) +{ + return res_counter_read_u64(&mem->memsw, member); +} + +s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem) +{ + return mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); +} + +s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem) +{ + return mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); +} + +s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem) +{ + return mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); +} + struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) { struct mem_cgroup *memcg = NULL; diff -NurpP --minimal linux-3.10.19/mm/oom_kill.c linux-3.10.19-vs2.3.6.8/mm/oom_kill.c --- linux-3.10.19/mm/oom_kill.c 2013-05-31 13:45:31.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/mm/oom_kill.c 2013-08-22 20:30:00.000000000 +0000 @@ -35,6 +35,8 @@ #include #include #include +#include +#include #define CREATE_TRACE_POINTS #include @@ -113,11 +115,18 @@ struct task_struct *find_lock_task_mm(st static bool oom_unkillable_task(struct task_struct *p, const struct mem_cgroup *memcg, const nodemask_t *nodemask) { - if (is_global_init(p)) + unsigned xid = vx_current_xid(); + + /* skip the init task, global and per guest */ + if (task_is_init(p)) return true; if (p->flags & PF_KTHREAD) return true; + /* skip other guest and host processes if oom in guest */ + if (xid && vx_task_xid(p) != xid) + return true; + /* When mem_cgroup_out_of_memory() and p is not member of the group */ if (memcg && !task_in_mem_cgroup(p, memcg)) return true; @@ -426,8 +435,8 @@ void oom_kill_process(struct task_struct dump_header(p, gfp_mask, order, memcg, nodemask); task_lock(p); - pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n", - message, task_pid_nr(p), p->comm, points); + pr_err("%s: Kill process %d:#%u (%s) score %d or sacrifice child\n", + message, task_pid_nr(p), p->xid, p->comm, points); task_unlock(p); /* @@ -472,8 +481,8 @@ void oom_kill_process(struct task_struct /* mm cannot safely be dereferenced after task_unlock(victim) */ mm = victim->mm; - pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", - task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), + pr_err("Killed process %d:#%u (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", + task_pid_nr(victim), victim->xid, victim->comm, K(victim->mm->total_vm), K(get_mm_counter(victim->mm, MM_ANONPAGES)), K(get_mm_counter(victim->mm, MM_FILEPAGES))); task_unlock(victim); @@ -543,6 +552,8 @@ int unregister_oom_notifier(struct notif } EXPORT_SYMBOL_GPL(unregister_oom_notifier); +long vs_oom_action(unsigned int); + /* * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero * if a parallel OOM killing is already taking place that includes a zone in @@ -655,7 +666,12 @@ void out_of_memory(struct zonelist *zone /* Found nothing?!?! Either we hang forever, or we panic. */ if (!p) { dump_header(NULL, gfp_mask, order, NULL, mpol_mask); - panic("Out of memory and no killable processes...\n"); + + /* avoid panic for guest OOM */ + if (vx_current_xid()) + vs_oom_action(LINUX_REBOOT_CMD_OOM); + else + panic("Out of memory and no killable processes...\n"); } if (PTR_ERR(p) != -1UL) { oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, diff -NurpP --minimal linux-3.10.19/mm/page_alloc.c linux-3.10.19-vs2.3.6.8/mm/page_alloc.c --- linux-3.10.19/mm/page_alloc.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/mm/page_alloc.c 2013-11-13 17:17:16.000000000 +0000 @@ -60,6 +60,8 @@ #include #include #include +#include +#include #include #include @@ -2899,6 +2901,9 @@ void si_meminfo(struct sysinfo *val) val->totalhigh = totalhigh_pages; val->freehigh = nr_free_highpages(); val->mem_unit = PAGE_SIZE; + + if (vx_flags(VXF_VIRT_MEM, 0)) + vx_vsi_meminfo(val); } EXPORT_SYMBOL(si_meminfo); @@ -2919,6 +2924,9 @@ void si_meminfo_node(struct sysinfo *val val->freehigh = 0; #endif val->mem_unit = PAGE_SIZE; + + if (vx_flags(VXF_VIRT_MEM, 0)) + vx_vsi_meminfo(val); } #endif diff -NurpP --minimal linux-3.10.19/mm/pgtable-generic.c linux-3.10.19-vs2.3.6.8/mm/pgtable-generic.c --- linux-3.10.19/mm/pgtable-generic.c 2013-02-19 13:58:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/mm/pgtable-generic.c 2013-08-22 20:30:00.000000000 +0000 @@ -6,6 +6,8 @@ * Copyright (C) 2010 Linus Torvalds */ +#include + #include #include #include diff -NurpP --minimal linux-3.10.19/mm/shmem.c linux-3.10.19-vs2.3.6.8/mm/shmem.c --- linux-3.10.19/mm/shmem.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/mm/shmem.c 2013-11-13 17:17:16.000000000 +0000 @@ -1911,7 +1911,7 @@ static int shmem_statfs(struct dentry *d { struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); - buf->f_type = TMPFS_MAGIC; + buf->f_type = TMPFS_SUPER_MAGIC; buf->f_bsize = PAGE_CACHE_SIZE; buf->f_namelen = NAME_MAX; if (sbinfo->max_blocks) { @@ -2608,7 +2608,7 @@ int shmem_fill_super(struct super_block sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = TMPFS_MAGIC; + sb->s_magic = TMPFS_SUPER_MAGIC; sb->s_op = &shmem_ops; sb->s_time_gran = 1; #ifdef CONFIG_TMPFS_XATTR diff -NurpP --minimal linux-3.10.19/mm/slab.c linux-3.10.19-vs2.3.6.8/mm/slab.c --- linux-3.10.19/mm/slab.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/mm/slab.c 2013-11-13 17:17:16.000000000 +0000 @@ -388,6 +388,8 @@ static void kmem_cache_node_init(struct #define STATS_INC_FREEMISS(x) do { } while (0) #endif +#include "slab_vs.h" + #if DEBUG /* @@ -3314,6 +3316,7 @@ retry: obj = slab_get_obj(cachep, slabp, nodeid); check_slabp(cachep, slabp); + vx_slab_alloc(cachep, flags); n->free_objects--; /* move slabp to correct slabp list: */ list_del(&slabp->list); @@ -3393,6 +3396,7 @@ slab_alloc_node(struct kmem_cache *cache /* ___cache_alloc_node can fall back to other nodes */ ptr = ____cache_alloc_node(cachep, flags, nodeid); out: + vx_slab_alloc(cachep, flags); local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, @@ -3585,6 +3589,7 @@ static inline void __cache_free(struct k check_irq_off(); kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, caller); + vx_slab_free(cachep); kmemcheck_slab_free(cachep, objp, cachep->object_size); diff -NurpP --minimal linux-3.10.19/mm/slab_vs.h linux-3.10.19-vs2.3.6.8/mm/slab_vs.h --- linux-3.10.19/mm/slab_vs.h 1970-01-01 00:00:00.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/mm/slab_vs.h 2013-08-22 20:30:00.000000000 +0000 @@ -0,0 +1,29 @@ + +#include + +#include + +static inline +void vx_slab_alloc(struct kmem_cache *cachep, gfp_t flags) +{ + int what = gfp_zone(cachep->allocflags); + struct vx_info *vxi = current_vx_info(); + + if (!vxi) + return; + + atomic_add(cachep->size, &vxi->cacct.slab[what]); +} + +static inline +void vx_slab_free(struct kmem_cache *cachep) +{ + int what = gfp_zone(cachep->allocflags); + struct vx_info *vxi = current_vx_info(); + + if (!vxi) + return; + + atomic_sub(cachep->size, &vxi->cacct.slab[what]); +} + diff -NurpP --minimal linux-3.10.19/mm/swapfile.c linux-3.10.19-vs2.3.6.8/mm/swapfile.c --- linux-3.10.19/mm/swapfile.c 2013-07-14 17:01:36.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/mm/swapfile.c 2013-08-22 20:30:00.000000000 +0000 @@ -39,6 +39,7 @@ #include #include #include +#include static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); @@ -1768,6 +1769,16 @@ static int swap_show(struct seq_file *sw if (si == SEQ_START_TOKEN) { seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); + if (vx_flags(VXF_VIRT_MEM, 0)) { + struct sysinfo si; + + vx_vsi_swapinfo(&si); + if (si.totalswap < (1 << 10)) + return 0; + seq_printf(swap, "%s\t\t\t\t\t%s\t%lu\t%lu\t%d\n", + "hdv0", "partition", si.totalswap >> 10, + (si.totalswap - si.freeswap) >> 10, -1); + } return 0; } @@ -2196,6 +2207,8 @@ void si_swapinfo(struct sysinfo *val) val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; val->totalswap = total_swap_pages + nr_to_be_unused; spin_unlock(&swap_lock); + if (vx_flags(VXF_VIRT_MEM, 0)) + vx_vsi_swapinfo(val); } /* diff -NurpP --minimal linux-3.10.19/net/bridge/br_multicast.c linux-3.10.19-vs2.3.6.8/net/bridge/br_multicast.c --- linux-3.10.19/net/bridge/br_multicast.c 2013-11-13 17:21:14.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/bridge/br_multicast.c 2013-11-13 17:17:16.000000000 +0000 @@ -443,7 +443,7 @@ static struct sk_buff *br_ip6_multicast_ ip6h->hop_limit = 1; ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, - &ip6h->saddr)) { + &ip6h->saddr, NULL)) { kfree_skb(skb); return NULL; } diff -NurpP --minimal linux-3.10.19/net/core/dev.c linux-3.10.19-vs2.3.6.8/net/core/dev.c --- linux-3.10.19/net/core/dev.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/core/dev.c 2013-11-13 17:17:16.000000000 +0000 @@ -122,6 +122,7 @@ #include #include #include +#include #include #include #include @@ -660,7 +661,8 @@ struct net_device *__dev_get_by_name(str struct hlist_head *head = dev_name_hash(net, name); hlist_for_each_entry(dev, head, name_hlist) - if (!strncmp(dev->name, name, IFNAMSIZ)) + if (!strncmp(dev->name, name, IFNAMSIZ) && + nx_dev_visible(current_nx_info(), dev)) return dev; return NULL; @@ -685,7 +687,8 @@ struct net_device *dev_get_by_name_rcu(s struct hlist_head *head = dev_name_hash(net, name); hlist_for_each_entry_rcu(dev, head, name_hlist) - if (!strncmp(dev->name, name, IFNAMSIZ)) + if (!strncmp(dev->name, name, IFNAMSIZ) && + nx_dev_visible(current_nx_info(), dev)) return dev; return NULL; @@ -735,7 +738,8 @@ struct net_device *__dev_get_by_index(st struct hlist_head *head = dev_index_hash(net, ifindex); hlist_for_each_entry(dev, head, index_hlist) - if (dev->ifindex == ifindex) + if ((dev->ifindex == ifindex) && + nx_dev_visible(current_nx_info(), dev)) return dev; return NULL; @@ -753,7 +757,7 @@ EXPORT_SYMBOL(__dev_get_by_index); * about locking. The caller must hold RCU lock. */ -struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) +struct net_device *dev_get_by_index_real_rcu(struct net *net, int ifindex) { struct net_device *dev; struct hlist_head *head = dev_index_hash(net, ifindex); @@ -764,6 +768,16 @@ struct net_device *dev_get_by_index_rcu( return NULL; } +EXPORT_SYMBOL(dev_get_by_index_real_rcu); + +struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) +{ + struct net_device *dev = dev_get_by_index_real_rcu(net, ifindex); + + if (nx_dev_visible(current_nx_info(), dev)) + return dev; + return NULL; +} EXPORT_SYMBOL(dev_get_by_index_rcu); @@ -846,7 +860,8 @@ struct net_device *dev_getbyhwaddr_rcu(s for_each_netdev_rcu(net, dev) if (dev->type == type && - !memcmp(dev->dev_addr, ha, dev->addr_len)) + !memcmp(dev->dev_addr, ha, dev->addr_len) && + nx_dev_visible(current_nx_info(), dev)) return dev; return NULL; @@ -858,9 +873,11 @@ struct net_device *__dev_getfirstbyhwtyp struct net_device *dev; ASSERT_RTNL(); - for_each_netdev(net, dev) - if (dev->type == type) + for_each_netdev(net, dev) { + if ((dev->type == type) && + nx_dev_visible(current_nx_info(), dev)) return dev; + } return NULL; } @@ -872,7 +889,8 @@ struct net_device *dev_getfirstbyhwtype( rcu_read_lock(); for_each_netdev_rcu(net, dev) - if (dev->type == type) { + if ((dev->type == type) && + nx_dev_visible(current_nx_info(), dev)) { dev_hold(dev); ret = dev; break; @@ -900,7 +918,8 @@ struct net_device *dev_get_by_flags_rcu( ret = NULL; for_each_netdev_rcu(net, dev) { - if (((dev->flags ^ if_flags) & mask) == 0) { + if ((((dev->flags ^ if_flags) & mask) == 0) && + nx_dev_visible(current_nx_info(), dev)) { ret = dev; break; } @@ -978,6 +997,8 @@ static int __dev_alloc_name(struct net * continue; if (i < 0 || i >= max_netdevices) continue; + if (!nx_dev_visible(current_nx_info(), d)) + continue; /* avoid cases where sscanf is not exact inverse of printf */ snprintf(buf, IFNAMSIZ, name, i); diff -NurpP --minimal linux-3.10.19/net/core/net-procfs.c linux-3.10.19-vs2.3.6.8/net/core/net-procfs.c --- linux-3.10.19/net/core/net-procfs.c 2013-07-14 17:01:37.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/core/net-procfs.c 2013-08-22 20:30:00.000000000 +0000 @@ -1,6 +1,7 @@ #include #include #include +#include #include #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1) @@ -77,8 +78,13 @@ static void dev_seq_stop(struct seq_file static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { struct rtnl_link_stats64 temp; - const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); + const struct rtnl_link_stats64 *stats; + + /* device visible inside network context? */ + if (!nx_dev_visible(current_nx_info(), dev)) + return; + stats = dev_get_stats(dev, &temp); seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", dev->name, stats->rx_bytes, stats->rx_packets, diff -NurpP --minimal linux-3.10.19/net/core/rtnetlink.c linux-3.10.19-vs2.3.6.8/net/core/rtnetlink.c --- linux-3.10.19/net/core/rtnetlink.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/core/rtnetlink.c 2013-11-13 17:17:16.000000000 +0000 @@ -1059,6 +1059,8 @@ static int rtnl_dump_ifinfo(struct sk_bu hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; + if (!nx_dev_visible(skb->sk->sk_nx_info, dev)) + continue; if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 0, @@ -1951,6 +1953,9 @@ void rtmsg_ifinfo(int type, struct net_d int err = -ENOBUFS; size_t if_info_size; + if (!nx_dev_visible(current_nx_info(), dev)) + return; + skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL); if (skb == NULL) goto errout; diff -NurpP --minimal linux-3.10.19/net/core/sock.c linux-3.10.19-vs2.3.6.8/net/core/sock.c --- linux-3.10.19/net/core/sock.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/core/sock.c 2013-11-13 17:22:25.000000000 +0000 @@ -132,6 +132,10 @@ #include #include +#include +#include +#include +#include #include @@ -1252,6 +1256,8 @@ static struct sock *sk_prot_alloc(struct goto out_free_sec; sk_tx_queue_clear(sk); } + sock_vx_init(sk); + sock_nx_init(sk); return sk; @@ -1360,6 +1366,11 @@ static void __sk_free(struct sock *sk) put_cred(sk->sk_peer_cred); put_pid(sk->sk_peer_pid); put_net(sock_net(sk)); + vx_sock_dec(sk); + clr_vx_info(&sk->sk_vx_info); + sk->sk_xid = -1; + clr_nx_info(&sk->sk_nx_info); + sk->sk_nid = -1; sk_prot_free(sk->sk_prot_creator, sk); } @@ -1420,6 +1431,8 @@ struct sock *sk_clone_lock(const struct /* SANITY */ get_net(sock_net(newsk)); + sock_vx_init(newsk); + sock_nx_init(newsk); sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); @@ -1476,6 +1489,12 @@ struct sock *sk_clone_lock(const struct smp_wmb(); atomic_set(&newsk->sk_refcnt, 2); + set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info); + newsk->sk_xid = sk->sk_xid; + vx_sock_inc(newsk); + set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info); + newsk->sk_nid = sk->sk_nid; + /* * Increment the counter in the same struct proto as the master * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that @@ -2271,6 +2290,12 @@ void sock_init_data(struct socket *sock, sk->sk_stamp = ktime_set(-1L, 0); + set_vx_info(&sk->sk_vx_info, current_vx_info()); + sk->sk_xid = vx_current_xid(); + vx_sock_inc(sk); + set_nx_info(&sk->sk_nx_info, current_nx_info()); + sk->sk_nid = nx_current_nid(); + sk->sk_pacing_rate = ~0U; /* * Before updating sk_refcnt, we must commit prior changes to memory diff -NurpP --minimal linux-3.10.19/net/ipv4/af_inet.c linux-3.10.19-vs2.3.6.8/net/ipv4/af_inet.c --- linux-3.10.19/net/ipv4/af_inet.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/af_inet.c 2013-11-13 17:17:16.000000000 +0000 @@ -118,6 +118,7 @@ #ifdef CONFIG_IP_MROUTE #include #endif +#include /* The inetsw table contains everything that inet_create needs to @@ -336,10 +337,13 @@ lookup_protocol: } err = -EPERM; + if ((protocol == IPPROTO_ICMP) && + nx_capable(CAP_NET_RAW, NXC_RAW_ICMP)) + goto override; if (sock->type == SOCK_RAW && !kern && !ns_capable(net->user_ns, CAP_NET_RAW)) goto out_rcu_unlock; - +override: sock->ops = answer->ops; answer_prot = answer->prot; answer_no_check = answer->no_check; @@ -460,6 +464,7 @@ int inet_bind(struct socket *sock, struc struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); + struct nx_v4_sock_addr nsa; struct net *net = sock_net(sk); unsigned short snum; int chk_addr_ret; @@ -484,7 +489,11 @@ int inet_bind(struct socket *sock, struc goto out; } - chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); + err = v4_map_sock_addr(inet, addr, &nsa); + if (err) + goto out; + + chk_addr_ret = inet_addr_type(net, nsa.saddr); /* Not specified by any standard per-se, however it breaks too * many applications when removed. It is unfortunate since @@ -496,7 +505,7 @@ int inet_bind(struct socket *sock, struc err = -EADDRNOTAVAIL; if (!sysctl_ip_nonlocal_bind && !(inet->freebind || inet->transparent) && - addr->sin_addr.s_addr != htonl(INADDR_ANY) && + nsa.saddr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) @@ -522,7 +531,7 @@ int inet_bind(struct socket *sock, struc if (sk->sk_state != TCP_CLOSE || inet->inet_num) goto out_release_sock; - inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; + v4_set_sock_addr(inet, &nsa); if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ @@ -741,11 +750,13 @@ int inet_getname(struct socket *sock, st peer == 1)) return -ENOTCONN; sin->sin_port = inet->inet_dport; - sin->sin_addr.s_addr = inet->inet_daddr; + sin->sin_addr.s_addr = + nx_map_sock_lback(sk->sk_nx_info, inet->inet_daddr); } else { __be32 addr = inet->inet_rcv_saddr; if (!addr) addr = inet->inet_saddr; + addr = nx_map_sock_lback(sk->sk_nx_info, addr); sin->sin_port = inet->inet_sport; sin->sin_addr.s_addr = addr; } diff -NurpP --minimal linux-3.10.19/net/ipv4/arp.c linux-3.10.19-vs2.3.6.8/net/ipv4/arp.c --- linux-3.10.19/net/ipv4/arp.c 2013-07-14 17:01:37.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/arp.c 2013-08-22 20:30:00.000000000 +0000 @@ -1332,6 +1332,7 @@ static void arp_format_neigh_entry(struc struct net_device *dev = n->dev; int hatype = dev->type; + /* FIXME: check for network context */ read_lock(&n->lock); /* Convert hardware address to XX:XX:XX:XX ... form. */ #if IS_ENABLED(CONFIG_AX25) @@ -1363,6 +1364,7 @@ static void arp_format_pneigh_entry(stru int hatype = dev ? dev->type : 0; char tbuf[16]; + /* FIXME: check for network context */ sprintf(tbuf, "%pI4", n->key); seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", tbuf, hatype, ATF_PUBL | ATF_PERM, "00:00:00:00:00:00", diff -NurpP --minimal linux-3.10.19/net/ipv4/devinet.c linux-3.10.19-vs2.3.6.8/net/ipv4/devinet.c --- linux-3.10.19/net/ipv4/devinet.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/devinet.c 2013-11-13 17:17:16.000000000 +0000 @@ -522,6 +522,7 @@ struct in_device *inetdev_by_index(struc } EXPORT_SYMBOL(inetdev_by_index); + /* Called only from RTNL semaphored context. No locks. */ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, @@ -942,6 +943,8 @@ int devinet_ioctl(struct net *net, unsig in_dev = __in_dev_get_rtnl(dev); if (in_dev) { + struct nx_info *nxi = current_nx_info(); + if (tryaddrmatch) { /* Matthias Andree */ /* compare label and address (4.4BSD style) */ @@ -950,6 +953,8 @@ int devinet_ioctl(struct net *net, unsig This is checked above. */ for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; ifap = &ifa->ifa_next) { + if (!nx_v4_ifa_visible(nxi, ifa)) + continue; if (!strcmp(ifr.ifr_name, ifa->ifa_label) && sin_orig.sin_addr.s_addr == ifa->ifa_local) { @@ -962,9 +967,12 @@ int devinet_ioctl(struct net *net, unsig comparing just the label */ if (!ifa) { for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; - ifap = &ifa->ifa_next) + ifap = &ifa->ifa_next) { + if (!nx_v4_ifa_visible(nxi, ifa)) + continue; if (!strcmp(ifr.ifr_name, ifa->ifa_label)) break; + } } } @@ -1118,6 +1126,8 @@ static int inet_gifconf(struct net_devic goto out; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { + if (!nx_v4_ifa_visible(current_nx_info(), ifa)) + continue; if (!buf) { done += sizeof(ifr); continue; @@ -1521,6 +1531,7 @@ static int inet_dump_ifaddr(struct sk_bu struct net_device *dev; struct in_device *in_dev; struct in_ifaddr *ifa; + struct sock *sk = skb->sk; struct hlist_head *head; s_h = cb->args[0]; @@ -1544,6 +1555,8 @@ static int inet_dump_ifaddr(struct sk_bu for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; ifa = ifa->ifa_next, ip_idx++) { + if (sk && !nx_v4_ifa_visible(sk->sk_nx_info, ifa)) + continue; if (ip_idx < s_ip_idx) continue; if (inet_fill_ifaddr(skb, ifa, diff -NurpP --minimal linux-3.10.19/net/ipv4/fib_trie.c linux-3.10.19-vs2.3.6.8/net/ipv4/fib_trie.c --- linux-3.10.19/net/ipv4/fib_trie.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/fib_trie.c 2013-11-13 17:17:16.000000000 +0000 @@ -2536,6 +2536,7 @@ static int fib_route_seq_show(struct seq || fa->fa_type == RTN_MULTICAST) continue; + /* FIXME: check for network context? */ if (fi) seq_printf(seq, "%s\t%08X\t%08X\t%04X\t%d\t%u\t" diff -NurpP --minimal linux-3.10.19/net/ipv4/inet_connection_sock.c linux-3.10.19-vs2.3.6.8/net/ipv4/inet_connection_sock.c --- linux-3.10.19/net/ipv4/inet_connection_sock.c 2013-07-14 17:01:37.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/inet_connection_sock.c 2013-08-22 20:30:00.000000000 +0000 @@ -53,6 +53,37 @@ void inet_get_local_port_range(int *low, } EXPORT_SYMBOL(inet_get_local_port_range); +int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) +{ + __be32 sk1_rcv_saddr = sk_rcv_saddr(sk1), + sk2_rcv_saddr = sk_rcv_saddr(sk2); + + if (inet_v6_ipv6only(sk2)) + return 0; + + if (sk1_rcv_saddr && + sk2_rcv_saddr && + sk1_rcv_saddr == sk2_rcv_saddr) + return 1; + + if (sk1_rcv_saddr && + !sk2_rcv_saddr && + v4_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr, NXA_MASK_BIND)) + return 1; + + if (sk2_rcv_saddr && + !sk1_rcv_saddr && + v4_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr, NXA_MASK_BIND)) + return 1; + + if (!sk1_rcv_saddr && + !sk2_rcv_saddr && + nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info)) + return 1; + + return 0; +} + int inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb, bool relax) { @@ -79,17 +110,12 @@ int inet_csk_bind_conflict(const struct (!reuseport || !sk2->sk_reuseport || (sk2->sk_state != TCP_TIME_WAIT && !uid_eq(uid, sock_i_uid(sk2))))) { - const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); - if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || - sk2_rcv_saddr == sk_rcv_saddr(sk)) + if (ipv4_rcv_saddr_equal(sk, sk2)) break; } if (!relax && reuse && sk2->sk_reuse && sk2->sk_state != TCP_LISTEN) { - const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); - - if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || - sk2_rcv_saddr == sk_rcv_saddr(sk)) + if (ipv4_rcv_saddr_equal(sk, sk2)) break; } } diff -NurpP --minimal linux-3.10.19/net/ipv4/inet_diag.c linux-3.10.19-vs2.3.6.8/net/ipv4/inet_diag.c --- linux-3.10.19/net/ipv4/inet_diag.c 2013-07-14 17:01:37.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/inet_diag.c 2013-08-22 20:30:00.000000000 +0000 @@ -31,6 +31,8 @@ #include #include +#include +#include #include #include @@ -106,8 +108,10 @@ int inet_sk_diag_fill(struct sock *sk, s r->id.idiag_sport = inet->inet_sport; r->id.idiag_dport = inet->inet_dport; - r->id.idiag_src[0] = inet->inet_rcv_saddr; - r->id.idiag_dst[0] = inet->inet_daddr; + r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, + inet->inet_rcv_saddr); + r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, + inet->inet_daddr); if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) goto errout; @@ -244,8 +248,8 @@ static int inet_twsk_diag_fill(struct in sock_diag_save_cookie(tw, r->id.idiag_cookie); r->id.idiag_sport = tw->tw_sport; r->id.idiag_dport = tw->tw_dport; - r->id.idiag_src[0] = tw->tw_rcv_saddr; - r->id.idiag_dst[0] = tw->tw_daddr; + r->id.idiag_src[0] = nx_map_sock_lback(tw->tw_nx_info, tw->tw_rcv_saddr); + r->id.idiag_dst[0] = nx_map_sock_lback(tw->tw_nx_info, tw->tw_daddr); r->idiag_state = tw->tw_substate; r->idiag_timer = 3; r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ); @@ -289,12 +293,14 @@ int inet_diag_dump_one_icsk(struct inet_ err = -EINVAL; if (req->sdiag_family == AF_INET) { + /* TODO: lback */ sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0], req->id.idiag_dport, req->id.idiag_src[0], req->id.idiag_sport, req->id.idiag_if); } #if IS_ENABLED(CONFIG_IPV6) else if (req->sdiag_family == AF_INET6) { + /* TODO: lback */ sk = inet6_lookup(net, hashinfo, (struct in6_addr *)req->id.idiag_dst, req->id.idiag_dport, @@ -496,6 +502,7 @@ int inet_diag_bc_sk(const struct nlattr } else #endif { + /* TODO: lback */ entry.saddr = &inet->inet_rcv_saddr; entry.daddr = &inet->inet_daddr; } @@ -654,6 +661,7 @@ static int inet_twsk_diag_dump(struct in } else #endif { + /* TODO: lback */ entry.saddr = &tw->tw_rcv_saddr; entry.daddr = &tw->tw_daddr; } @@ -732,8 +740,8 @@ static int inet_diag_fill_req(struct sk_ r->id.idiag_sport = inet->inet_sport; r->id.idiag_dport = ireq->rmt_port; - r->id.idiag_src[0] = ireq->loc_addr; - r->id.idiag_dst[0] = ireq->rmt_addr; + r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->loc_addr); + r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->rmt_addr); r->idiag_expires = jiffies_to_msecs(tmo); r->idiag_rqueue = 0; r->idiag_wqueue = 0; @@ -796,6 +804,7 @@ static int inet_diag_dump_reqs(struct sk r->id.idiag_dport) continue; + /* TODO: lback */ if (bc) { inet_diag_req_addrs(sk, req, &entry); entry.dport = ntohs(ireq->rmt_port); @@ -852,6 +861,8 @@ void inet_diag_dump_icsk(struct inet_has if (!net_eq(sock_net(sk), net)) continue; + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (num < s_num) { num++; continue; @@ -924,6 +935,8 @@ skip_listen_ht: if (!net_eq(sock_net(sk), net)) continue; + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (num < s_num) goto next_normal; if (!(r->idiag_states & (1 << sk->sk_state))) @@ -952,7 +965,8 @@ next_normal: &head->twchain) { if (!net_eq(twsk_net(tw), net)) continue; - + if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT)) + continue; if (num < s_num) goto next_dying; if (r->sdiag_family != AF_UNSPEC && diff -NurpP --minimal linux-3.10.19/net/ipv4/inet_hashtables.c linux-3.10.19-vs2.3.6.8/net/ipv4/inet_hashtables.c --- linux-3.10.19/net/ipv4/inet_hashtables.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/inet_hashtables.c 2013-11-13 17:22:25.000000000 +0000 @@ -22,6 +22,7 @@ #include #include #include +#include #include /* @@ -156,6 +157,11 @@ static inline int compute_score(struct s if (rcv_saddr != daddr) return -1; score += 4; + } else { + /* block non nx_info ips */ + if (!v4_addr_in_nx_info(sk->sk_nx_info, + daddr, NXA_MASK_BIND)) + return -1; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) @@ -173,7 +179,6 @@ static inline int compute_score(struct s * wildcarded during the search since they can never be otherwise. */ - struct sock *__inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, const __be32 saddr, __be16 sport, @@ -209,6 +214,7 @@ begin: phash = next_pseudo_random32(phash); } } + /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. diff -NurpP --minimal linux-3.10.19/net/ipv4/netfilter.c linux-3.10.19-vs2.3.6.8/net/ipv4/netfilter.c --- linux-3.10.19/net/ipv4/netfilter.c 2013-07-14 17:01:37.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/netfilter.c 2013-08-22 20:30:00.000000000 +0000 @@ -11,7 +11,7 @@ #include #include #include -#include +// #include #include #include #include diff -NurpP --minimal linux-3.10.19/net/ipv4/raw.c linux-3.10.19-vs2.3.6.8/net/ipv4/raw.c --- linux-3.10.19/net/ipv4/raw.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/raw.c 2013-11-13 17:17:16.000000000 +0000 @@ -116,7 +116,7 @@ static struct sock *__raw_v4_lookup(stru if (net_eq(sock_net(sk), net) && inet->inet_num == num && !(inet->inet_daddr && inet->inet_daddr != raddr) && - !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && + v4_sock_addr_match(sk->sk_nx_info, inet, laddr) && !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) goto found; /* gotcha */ } @@ -395,6 +395,12 @@ static int raw_send_hdrinc(struct sock * icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); + err = -EPERM; + if (!nx_check(0, VS_ADMIN) && !capable(CAP_NET_RAW) && + sk->sk_nx_info && + !v4_addr_in_nx_info(sk->sk_nx_info, iph->saddr, NXA_MASK_BIND)) + goto error_free; + err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, rt->dst.dev, dst_output); if (err > 0) @@ -581,6 +587,16 @@ static int raw_sendmsg(struct kiocb *ioc goto done; } + if (sk->sk_nx_info) { + rt = ip_v4_find_src(sock_net(sk), sk->sk_nx_info, &fl4); + if (IS_ERR(rt)) { + err = PTR_ERR(rt); + rt = NULL; + goto done; + } + ip_rt_put(rt); + } + security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(sock_net(sk), &fl4, sk); if (IS_ERR(rt)) { @@ -657,17 +673,19 @@ static int raw_bind(struct sock *sk, str { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; + struct nx_v4_sock_addr nsa = { 0 }; int ret = -EINVAL; int chk_addr_ret; if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) goto out; - chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); + v4_map_sock_addr(inet, addr, &nsa); + chk_addr_ret = inet_addr_type(sock_net(sk), nsa.saddr); ret = -EADDRNOTAVAIL; - if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && + if (nsa.saddr && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; - inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; + v4_set_sock_addr(inet, &nsa); if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); @@ -719,7 +737,8 @@ static int raw_recvmsg(struct kiocb *ioc /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; - sin->sin_addr.s_addr = ip_hdr(skb)->saddr; + sin->sin_addr.s_addr = + nx_map_sock_lback(sk->sk_nx_info, ip_hdr(skb)->saddr); sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } @@ -914,7 +933,8 @@ static struct sock *raw_get_first(struct for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; ++state->bucket) { sk_for_each(sk, &state->h->ht[state->bucket]) - if (sock_net(sk) == seq_file_net(seq)) + if ((sock_net(sk) == seq_file_net(seq)) && + nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) goto found; } sk = NULL; @@ -930,7 +950,8 @@ static struct sock *raw_get_next(struct sk = sk_next(sk); try_again: ; - } while (sk && sock_net(sk) != seq_file_net(seq)); + } while (sk && ((sock_net(sk) != seq_file_net(seq)) || + !nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))); if (!sk && ++state->bucket < RAW_HTABLE_SIZE) { sk = sk_head(&state->h->ht[state->bucket]); diff -NurpP --minimal linux-3.10.19/net/ipv4/route.c linux-3.10.19-vs2.3.6.8/net/ipv4/route.c --- linux-3.10.19/net/ipv4/route.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/route.c 2013-11-13 17:22:25.000000000 +0000 @@ -2003,7 +2003,7 @@ struct rtable *__ip_route_output_key(str if (fl4->flowi4_oif) { - dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); + dev_out = dev_get_by_index_real_rcu(net, fl4->flowi4_oif); rth = ERR_PTR(-ENODEV); if (dev_out == NULL) goto out; diff -NurpP --minimal linux-3.10.19/net/ipv4/tcp.c linux-3.10.19-vs2.3.6.8/net/ipv4/tcp.c --- linux-3.10.19/net/ipv4/tcp.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/tcp.c 2013-11-13 17:22:25.000000000 +0000 @@ -268,6 +268,7 @@ #include #include #include +#include #include #include diff -NurpP --minimal linux-3.10.19/net/ipv4/tcp_ipv4.c linux-3.10.19-vs2.3.6.8/net/ipv4/tcp_ipv4.c --- linux-3.10.19/net/ipv4/tcp_ipv4.c 2013-07-14 17:01:37.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/tcp_ipv4.c 2013-08-22 20:30:00.000000000 +0000 @@ -2263,6 +2263,12 @@ static void *listening_get_next(struct s req = req->dl_next; while (1) { while (req) { + vxdprintk(VXD_CBIT(net, 6), + "sk,req: %p [#%d] (from %d)", req->sk, + (req->sk)?req->sk->sk_nid:0, nx_current_nid()); + if (req->sk && + !nx_check(req->sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (req->rsk_ops->family == st->family) { cur = req; goto out; @@ -2287,6 +2293,10 @@ get_req: } get_sk: sk_nulls_for_each_from(sk, node) { + vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)", + sk, sk->sk_nid, nx_current_nid()); + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == st->family) { @@ -2363,6 +2373,11 @@ static void *established_get_first(struc spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { + vxdprintk(VXD_CBIT(net, 6), + "sk,egf: %p [#%d] (from %d)", + sk, sk->sk_nid, nx_current_nid()); + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (sk->sk_family != st->family || !net_eq(sock_net(sk), net)) { continue; @@ -2373,6 +2388,11 @@ static void *established_get_first(struc st->state = TCP_SEQ_STATE_TIME_WAIT; inet_twsk_for_each(tw, node, &tcp_hashinfo.ehash[st->bucket].twchain) { + vxdprintk(VXD_CBIT(net, 6), + "tw: %p [#%d] (from %d)", + tw, tw->tw_nid, nx_current_nid()); + if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT)) + continue; if (tw->tw_family != st->family || !net_eq(twsk_net(tw), net)) { continue; @@ -2402,7 +2422,9 @@ static void *established_get_next(struct tw = cur; tw = tw_next(tw); get_tw: - while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) { + while (tw && (tw->tw_family != st->family || + !net_eq(twsk_net(tw), net) || + !nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))) { tw = tw_next(tw); } if (tw) { @@ -2426,6 +2448,11 @@ get_tw: sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { + vxdprintk(VXD_CBIT(net, 6), + "sk,egn: %p [#%d] (from %d)", + sk, sk->sk_nid, nx_current_nid()); + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) goto found; } @@ -2631,9 +2658,9 @@ static void get_openreq4(const struct so seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n", i, - ireq->loc_addr, + nx_map_sock_lback(current_nx_info(), ireq->loc_addr), ntohs(inet_sk(sk)->inet_sport), - ireq->rmt_addr, + nx_map_sock_lback(current_nx_info(), ireq->rmt_addr), ntohs(ireq->rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ @@ -2656,8 +2683,8 @@ static void get_tcp4_sock(struct sock *s const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_sock *inet = inet_sk(sk); struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq; - __be32 dest = inet->inet_daddr; - __be32 src = inet->inet_rcv_saddr; + __be32 dest = nx_map_sock_lback(current_nx_info(), inet->inet_daddr); + __be32 src = nx_map_sock_lback(current_nx_info(), inet->inet_rcv_saddr); __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); int rx_queue; @@ -2715,8 +2742,8 @@ static void get_timewait4_sock(const str __u16 destp, srcp; long delta = tw->tw_ttd - jiffies; - dest = tw->tw_daddr; - src = tw->tw_rcv_saddr; + dest = nx_map_sock_lback(current_nx_info(), tw->tw_daddr); + src = nx_map_sock_lback(current_nx_info(), tw->tw_rcv_saddr); destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); diff -NurpP --minimal linux-3.10.19/net/ipv4/tcp_minisocks.c linux-3.10.19-vs2.3.6.8/net/ipv4/tcp_minisocks.c --- linux-3.10.19/net/ipv4/tcp_minisocks.c 2013-07-14 17:01:37.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/tcp_minisocks.c 2013-08-22 20:30:00.000000000 +0000 @@ -23,6 +23,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -290,6 +293,11 @@ void tcp_time_wait(struct sock *sk, int tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; tcptw->tw_ts_offset = tp->tsoffset; + tw->tw_xid = sk->sk_xid; + tw->tw_vx_info = NULL; + tw->tw_nid = sk->sk_nid; + tw->tw_nx_info = NULL; + #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); diff -NurpP --minimal linux-3.10.19/net/ipv4/udp.c linux-3.10.19-vs2.3.6.8/net/ipv4/udp.c --- linux-3.10.19/net/ipv4/udp.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv4/udp.c 2013-11-13 17:17:16.000000000 +0000 @@ -306,14 +306,7 @@ fail: } EXPORT_SYMBOL(udp_lib_get_port); -static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) -{ - struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); - - return (!ipv6_only_sock(sk2) && - (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr || - inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)); -} +extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *); static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, unsigned int port) @@ -348,6 +341,11 @@ static inline int compute_score(struct s if (inet->inet_rcv_saddr != daddr) return -1; score += 4; + } else { + /* block non nx_info ips */ + if (!v4_addr_in_nx_info(sk->sk_nx_info, + daddr, NXA_MASK_BIND)) + return -1; } if (inet->inet_daddr) { if (inet->inet_daddr != saddr) @@ -458,6 +456,7 @@ begin: return result; } + /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ @@ -504,6 +503,11 @@ begin: sk_nulls_for_each_rcu(sk, node, &hslot->head) { score = compute_score(sk, net, saddr, hnum, sport, daddr, dport, dif); + /* FIXME: disabled? + if (score == 9) { + result = sk; + break; + } else */ if (score > badness) { result = sk; badness = score; @@ -528,6 +532,7 @@ begin: if (get_nulls_value(node) != slot) goto begin; + if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; @@ -537,6 +542,7 @@ begin: goto begin; } } + rcu_read_unlock(); return result; } @@ -580,8 +586,7 @@ static inline struct sock *udp_v4_mcast_ udp_sk(s)->udp_port_hash != hnum || (inet->inet_daddr && inet->inet_daddr != rmt_addr) || (inet->inet_dport != rmt_port && inet->inet_dport) || - (inet->inet_rcv_saddr && - inet->inet_rcv_saddr != loc_addr) || + !v4_sock_addr_match(sk->sk_nx_info, inet, loc_addr) || ipv6_only_sock(s) || (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) continue; @@ -965,6 +970,16 @@ int udp_sendmsg(struct kiocb *iocb, stru inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP, faddr, saddr, dport, inet->inet_sport); + if (sk->sk_nx_info) { + rt = ip_v4_find_src(net, sk->sk_nx_info, fl4); + if (IS_ERR(rt)) { + err = PTR_ERR(rt); + rt = NULL; + goto out; + } + ip_rt_put(rt); + } + security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) { @@ -1272,7 +1287,8 @@ try_again: if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; - sin->sin_addr.s_addr = ip_hdr(skb)->saddr; + sin->sin_addr.s_addr = nx_map_sock_lback( + skb->sk->sk_nx_info, ip_hdr(skb)->saddr); memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) @@ -2033,6 +2049,8 @@ static struct sock *udp_get_first(struct sk_nulls_for_each(sk, node, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (sk->sk_family == state->family) goto found; } @@ -2050,7 +2068,9 @@ static struct sock *udp_get_next(struct do { sk = sk_nulls_next(sk); - } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); + } while (sk && (!net_eq(sock_net(sk), net) || + sk->sk_family != state->family || + !nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))); if (!sk) { if (state->bucket <= state->udp_table->mask) @@ -2146,8 +2166,8 @@ static void udp4_format_sock(struct sock int bucket, int *len) { struct inet_sock *inet = inet_sk(sp); - __be32 dest = inet->inet_daddr; - __be32 src = inet->inet_rcv_saddr; + __be32 dest = nx_map_sock_lback(current_nx_info(), inet->inet_daddr); + __be32 src = nx_map_sock_lback(current_nx_info(), inet->inet_rcv_saddr); __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); diff -NurpP --minimal linux-3.10.19/net/ipv6/Kconfig linux-3.10.19-vs2.3.6.8/net/ipv6/Kconfig --- linux-3.10.19/net/ipv6/Kconfig 2013-07-14 17:01:38.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/Kconfig 2013-08-22 20:30:00.000000000 +0000 @@ -4,8 +4,8 @@ # IPv6 as module will cause a CRASH if you try to unload it menuconfig IPV6 - tristate "The IPv6 protocol" - default m + bool "The IPv6 protocol" + default n ---help--- This is complemental support for the IP version 6. You will still be able to do traditional IPv4 networking as well. diff -NurpP --minimal linux-3.10.19/net/ipv6/addrconf.c linux-3.10.19-vs2.3.6.8/net/ipv6/addrconf.c --- linux-3.10.19/net/ipv6/addrconf.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/addrconf.c 2013-11-13 17:17:16.000000000 +0000 @@ -94,6 +94,8 @@ #include #include #include +#include +#include /* Set to 3 to get tracing... */ #define ACONF_DEBUG 2 @@ -1321,7 +1323,7 @@ out: int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, const struct in6_addr *daddr, unsigned int prefs, - struct in6_addr *saddr) + struct in6_addr *saddr, struct nx_info *nxi) { struct ipv6_saddr_score scores[2], *score = &scores[0], *hiscore = &scores[1]; @@ -1393,6 +1395,8 @@ int ipv6_dev_get_saddr(struct net *net, dev->name); continue; } + if (!v6_addr_in_nx_info(nxi, &score->ifa->addr, -1)) + continue; score->rule = -1; bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX); @@ -3448,7 +3452,10 @@ static void if6_seq_stop(struct seq_file static int if6_seq_show(struct seq_file *seq, void *v) { struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v; - seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n", + + if (nx_check(0, VS_ADMIN|VS_WATCH) || + v6_addr_in_nx_info(current_nx_info(), &ifp->addr, -1)) + seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n", &ifp->addr, ifp->idev->dev->ifindex, ifp->prefix_len, @@ -3952,6 +3959,11 @@ static int in6_dump_addrs(struct inet6_d struct ifacaddr6 *ifaca; int err = 1; int ip_idx = *p_ip_idx; + struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL; + + /* disable ipv6 on non v6 guests */ + if (nxi && !nx_info_has_v6(nxi)) + return skb->len; read_lock_bh(&idev->lock); switch (type) { @@ -3962,6 +3974,8 @@ static int in6_dump_addrs(struct inet6_d list_for_each_entry(ifa, &idev->addr_list, if_list) { if (++ip_idx < s_ip_idx) continue; + if (!v6_addr_in_nx_info(nxi, &ifa->addr, -1)) + continue; err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -3979,6 +3993,8 @@ static int in6_dump_addrs(struct inet6_d ifmca = ifmca->next, ip_idx++) { if (ip_idx < s_ip_idx) continue; + if (!v6_addr_in_nx_info(nxi, &ifmca->mca_addr, -1)) + continue; err = inet6_fill_ifmcaddr(skb, ifmca, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -3994,6 +4010,8 @@ static int in6_dump_addrs(struct inet6_d ifaca = ifaca->aca_next, ip_idx++) { if (ip_idx < s_ip_idx) continue; + if (!v6_addr_in_nx_info(nxi, &ifaca->aca_addr, -1)) + continue; err = inet6_fill_ifacaddr(skb, ifaca, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -4022,6 +4040,10 @@ static int inet6_dump_addr(struct sk_buf struct inet6_dev *idev; struct hlist_head *head; + /* FIXME: maybe disable ipv6 on non v6 guests? + if (skb->sk && skb->sk->sk_vx_info) + return skb->len; */ + s_h = cb->args[0]; s_idx = idx = cb->args[1]; s_ip_idx = ip_idx = cb->args[2]; @@ -4457,6 +4479,7 @@ static int inet6_dump_ifinfo(struct sk_b struct net_device *dev; struct inet6_dev *idev; struct hlist_head *head; + struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL; s_h = cb->args[0]; s_idx = cb->args[1]; @@ -4468,6 +4491,8 @@ static int inet6_dump_ifinfo(struct sk_b hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; + if (!v6_dev_in_nx_info(dev, nxi)) + goto cont; idev = __in6_dev_get(dev); if (!idev) goto cont; diff -NurpP --minimal linux-3.10.19/net/ipv6/af_inet6.c linux-3.10.19-vs2.3.6.8/net/ipv6/af_inet6.c --- linux-3.10.19/net/ipv6/af_inet6.c 2013-07-14 17:01:38.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/af_inet6.c 2013-08-22 20:30:00.000000000 +0000 @@ -43,6 +43,8 @@ #include #include #include +#include +#include #include #include @@ -159,10 +161,13 @@ lookup_protocol: } err = -EPERM; + if ((protocol == IPPROTO_ICMPV6) && + nx_capable(CAP_NET_RAW, NXC_RAW_ICMP)) + goto override; if (sock->type == SOCK_RAW && !kern && !ns_capable(net->user_ns, CAP_NET_RAW)) goto out_rcu_unlock; - +override: sock->ops = answer->ops; answer_prot = answer->prot; answer_no_check = answer->no_check; @@ -262,6 +267,7 @@ int inet6_bind(struct socket *sock, stru struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); + struct nx_v6_sock_addr nsa; __be32 v4addr = 0; unsigned short snum; int addr_type = 0; @@ -277,6 +283,10 @@ int inet6_bind(struct socket *sock, stru if (addr->sin6_family != AF_INET6) return -EAFNOSUPPORT; + err = v6_map_sock_addr(inet, addr, &nsa); + if (err) + return err; + addr_type = ipv6_addr_type(&addr->sin6_addr); if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM) return -EINVAL; @@ -308,6 +318,7 @@ int inet6_bind(struct socket *sock, stru /* Reproduce AF_INET checks to make the bindings consistent */ v4addr = addr->sin6_addr.s6_addr32[3]; chk_addr_ret = inet_addr_type(net, v4addr); + if (!sysctl_ip_nonlocal_bind && !(inet->freebind || inet->transparent) && v4addr != htonl(INADDR_ANY) && @@ -317,6 +328,10 @@ int inet6_bind(struct socket *sock, stru err = -EADDRNOTAVAIL; goto out; } + if (!v4_addr_in_nx_info(sk->sk_nx_info, v4addr, NXA_MASK_BIND)) { + err = -EADDRNOTAVAIL; + goto out; + } } else { if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; @@ -343,6 +358,11 @@ int inet6_bind(struct socket *sock, stru } } + if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) { + err = -EADDRNOTAVAIL; + goto out_unlock; + } + /* ipv4 addr of the socket is invalid. Only the * unspecified and mapped address have a v4 equivalent. */ @@ -359,6 +379,9 @@ int inet6_bind(struct socket *sock, stru } } + /* what's that for? */ + v6_set_sock_addr(inet, &nsa); + inet->inet_rcv_saddr = v4addr; inet->inet_saddr = v4addr; @@ -460,9 +483,11 @@ int inet6_getname(struct socket *sock, s return -ENOTCONN; sin->sin6_port = inet->inet_dport; sin->sin6_addr = np->daddr; + /* FIXME: remap lback? */ if (np->sndflow) sin->sin6_flowinfo = np->flow_label; } else { + /* FIXME: remap lback? */ if (ipv6_addr_any(&np->rcv_saddr)) sin->sin6_addr = np->saddr; else diff -NurpP --minimal linux-3.10.19/net/ipv6/datagram.c linux-3.10.19-vs2.3.6.8/net/ipv6/datagram.c --- linux-3.10.19/net/ipv6/datagram.c 2013-07-14 17:01:38.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/datagram.c 2013-08-22 20:30:00.000000000 +0000 @@ -652,7 +652,7 @@ int ip6_datagram_send_ctl(struct net *ne rcu_read_lock(); if (fl6->flowi6_oif) { - dev = dev_get_by_index_rcu(net, fl6->flowi6_oif); + dev = dev_get_by_index_real_rcu(net, fl6->flowi6_oif); if (!dev) { rcu_read_unlock(); return -ENODEV; diff -NurpP --minimal linux-3.10.19/net/ipv6/fib6_rules.c linux-3.10.19-vs2.3.6.8/net/ipv6/fib6_rules.c --- linux-3.10.19/net/ipv6/fib6_rules.c 2013-02-19 13:58:58.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/fib6_rules.c 2013-08-22 20:30:00.000000000 +0000 @@ -90,7 +90,7 @@ static int fib6_rule_action(struct fib_r ip6_dst_idev(&rt->dst)->dev, &flp6->daddr, rt6_flags2srcprefs(flags), - &saddr)) + &saddr, NULL)) goto again; if (!ipv6_prefix_equal(&saddr, &r->src.addr, r->src.plen)) diff -NurpP --minimal linux-3.10.19/net/ipv6/inet6_hashtables.c linux-3.10.19-vs2.3.6.8/net/ipv6/inet6_hashtables.c --- linux-3.10.19/net/ipv6/inet6_hashtables.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/inet6_hashtables.c 2013-11-13 17:22:25.000000000 +0000 @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -83,7 +84,6 @@ struct sock *__inet6_lookup_established( unsigned int slot = hash & hashinfo->ehash_mask; struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; - rcu_read_lock(); begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { @@ -97,7 +97,7 @@ begin: sock_put(sk); goto begin; } - goto out; + goto out; } } if (get_nulls_value(node) != slot) @@ -147,6 +147,9 @@ static inline int compute_score(struct s if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) return -1; score++; + } else { + if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1)) + return -1; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) diff -NurpP --minimal linux-3.10.19/net/ipv6/ip6_output.c linux-3.10.19-vs2.3.6.8/net/ipv6/ip6_output.c --- linux-3.10.19/net/ipv6/ip6_output.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/ip6_output.c 2013-11-13 17:22:25.000000000 +0000 @@ -882,7 +882,8 @@ static int ip6_dst_lookup_tail(struct so struct rt6_info *rt = (struct rt6_info *) *dst; err = ip6_route_get_saddr(net, rt, &fl6->daddr, sk ? inet6_sk(sk)->srcprefs : 0, - &fl6->saddr); + &fl6->saddr, + sk ? sk->sk_nx_info : NULL); if (err) goto out_err_release; } diff -NurpP --minimal linux-3.10.19/net/ipv6/ndisc.c linux-3.10.19-vs2.3.6.8/net/ipv6/ndisc.c --- linux-3.10.19/net/ipv6/ndisc.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/ndisc.c 2013-11-13 17:17:16.000000000 +0000 @@ -487,7 +487,7 @@ static void ndisc_send_na(struct net_dev } else { if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs, - &tmpaddr)) + &tmpaddr, NULL)) return; src_addr = &tmpaddr; } diff -NurpP --minimal linux-3.10.19/net/ipv6/netfilter/ip6t_MASQUERADE.c linux-3.10.19-vs2.3.6.8/net/ipv6/netfilter/ip6t_MASQUERADE.c --- linux-3.10.19/net/ipv6/netfilter/ip6t_MASQUERADE.c 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/netfilter/ip6t_MASQUERADE.c 2013-08-22 20:30:00.000000000 +0000 @@ -34,7 +34,7 @@ masquerade_tg6(struct sk_buff *skb, cons ctinfo == IP_CT_RELATED_REPLY)); if (ipv6_dev_get_saddr(dev_net(par->out), par->out, - &ipv6_hdr(skb)->daddr, 0, &src) < 0) + &ipv6_hdr(skb)->daddr, 0, &src, NULL) < 0) return NF_DROP; nfct_nat(ct)->masq_index = par->out->ifindex; diff -NurpP --minimal linux-3.10.19/net/ipv6/raw.c linux-3.10.19-vs2.3.6.8/net/ipv6/raw.c --- linux-3.10.19/net/ipv6/raw.c 2013-07-14 17:01:38.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/raw.c 2013-08-22 20:30:00.000000000 +0000 @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -283,6 +284,13 @@ static int rawv6_bind(struct sock *sk, s goto out_unlock; } + if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) { + err = -EADDRNOTAVAIL; + if (dev) + dev_put(dev); + goto out; + } + /* ipv4 addr of the socket is invalid. Only the * unspecified and mapped address have a v4 equivalent. */ diff -NurpP --minimal linux-3.10.19/net/ipv6/route.c linux-3.10.19-vs2.3.6.8/net/ipv6/route.c --- linux-3.10.19/net/ipv6/route.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/route.c 2013-11-13 17:22:25.000000000 +0000 @@ -58,6 +58,7 @@ #include #include #include +#include #include @@ -2129,15 +2130,17 @@ int ip6_route_get_saddr(struct net *net, struct rt6_info *rt, const struct in6_addr *daddr, unsigned int prefs, - struct in6_addr *saddr) + struct in6_addr *saddr, + struct nx_info *nxi) { struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt); int err = 0; - if (rt->rt6i_prefsrc.plen) + if (rt->rt6i_prefsrc.plen && (!nxi || + v6_addr_in_nx_info(nxi, &rt->rt6i_prefsrc.addr, NXA_TYPE_ADDR))) *saddr = rt->rt6i_prefsrc.addr; else err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, - daddr, prefs, saddr); + daddr, prefs, saddr, nxi); return err; } @@ -2557,7 +2560,8 @@ static int rt6_fill_node(struct net *net goto nla_put_failure; } else if (dst) { struct in6_addr saddr_buf; - if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 && + if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf, + (skb->sk ? skb->sk->sk_nx_info : NULL)) == 0 && nla_put(skb, RTA_PREFSRC, 16, &saddr_buf)) goto nla_put_failure; } @@ -2769,6 +2773,7 @@ static int rt6_info_route(struct rt6_inf { struct seq_file *m = p_arg; + /* FIXME: check for network context? */ seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen); #ifdef CONFIG_IPV6_SUBTREES diff -NurpP --minimal linux-3.10.19/net/ipv6/tcp_ipv6.c linux-3.10.19-vs2.3.6.8/net/ipv6/tcp_ipv6.c --- linux-3.10.19/net/ipv6/tcp_ipv6.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/tcp_ipv6.c 2013-11-13 17:17:16.000000000 +0000 @@ -71,6 +71,7 @@ #include #include +#include static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, @@ -164,8 +165,15 @@ static int tcp_v6_connect(struct sock *s * connect() to INADDR_ANY means loopback (BSD'ism). */ - if(ipv6_addr_any(&usin->sin6_addr)) - usin->sin6_addr.s6_addr[15] = 0x1; + if(ipv6_addr_any(&usin->sin6_addr)) { + struct nx_info *nxi = sk->sk_nx_info; + + if (nxi && nx_info_has_v6(nxi)) + /* FIXME: remap lback? */ + usin->sin6_addr = nxi->v6.ip; + else + usin->sin6_addr.s6_addr[15] = 0x1; + } addr_type = ipv6_addr_type(&usin->sin6_addr); diff -NurpP --minimal linux-3.10.19/net/ipv6/udp.c linux-3.10.19-vs2.3.6.8/net/ipv6/udp.c --- linux-3.10.19/net/ipv6/udp.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/udp.c 2013-11-13 17:17:16.000000000 +0000 @@ -46,42 +46,68 @@ #include #include #include +#include #include #include #include #include "udp_impl.h" -int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) +int ipv6_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) { - const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; + const struct in6_addr *sk1_rcv_saddr6 = &inet6_sk(sk1)->rcv_saddr; const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); - __be32 sk1_rcv_saddr = sk_rcv_saddr(sk); + __be32 sk1_rcv_saddr = sk_rcv_saddr(sk1); __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); - int sk_ipv6only = ipv6_only_sock(sk); + int sk1_ipv6only = ipv6_only_sock(sk1); int sk2_ipv6only = inet_v6_ipv6only(sk2); - int addr_type = ipv6_addr_type(sk_rcv_saddr6); + int addr_type = ipv6_addr_type(sk1_rcv_saddr6); int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; /* if both are mapped, treat as IPv4 */ - if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) - return (!sk2_ipv6only && + if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) { + if (!sk2_ipv6only && (!sk1_rcv_saddr || !sk2_rcv_saddr || - sk1_rcv_saddr == sk2_rcv_saddr)); + sk1_rcv_saddr == sk2_rcv_saddr)) + goto vs_v4; + else + return 0; + } if (addr_type2 == IPV6_ADDR_ANY && !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) - return 1; + goto vs; if (addr_type == IPV6_ADDR_ANY && - !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) - return 1; + !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) + goto vs; if (sk2_rcv_saddr6 && - ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6)) - return 1; + ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6)) + goto vs; return 0; + +vs_v4: + if (!sk1_rcv_saddr && !sk2_rcv_saddr) + return nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info); + if (!sk2_rcv_saddr) + return v4_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr, -1); + if (!sk1_rcv_saddr) + return v4_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr, -1); + return 1; +vs: + if (addr_type2 == IPV6_ADDR_ANY && addr_type == IPV6_ADDR_ANY) + return nx_v6_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info); + else if (addr_type2 == IPV6_ADDR_ANY) + return v6_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr6, -1); + else if (addr_type == IPV6_ADDR_ANY) { + if (addr_type2 == IPV6_ADDR_MAPPED) + return nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info); + else + return v6_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr6, -1); + } + return 1; } static unsigned int udp6_portaddr_hash(struct net *net, @@ -145,6 +171,10 @@ static inline int compute_score(struct s if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) return -1; score++; + } else { + /* block non nx_info ips */ + if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1)) + return -1; } if (!ipv6_addr_any(&np->daddr)) { if (!ipv6_addr_equal(&np->daddr, saddr)) diff -NurpP --minimal linux-3.10.19/net/ipv6/xfrm6_policy.c linux-3.10.19-vs2.3.6.8/net/ipv6/xfrm6_policy.c --- linux-3.10.19/net/ipv6/xfrm6_policy.c 2013-07-14 17:01:38.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/ipv6/xfrm6_policy.c 2013-08-22 20:30:00.000000000 +0000 @@ -63,7 +63,7 @@ static int xfrm6_get_saddr(struct net *n dev = ip6_dst_idev(dst)->dev; ipv6_dev_get_saddr(dev_net(dev), dev, (struct in6_addr *)&daddr->a6, 0, - (struct in6_addr *)&saddr->a6); + (struct in6_addr *)&saddr->a6, NULL); dst_release(dst); return 0; } diff -NurpP --minimal linux-3.10.19/net/netfilter/ipvs/ip_vs_xmit.c linux-3.10.19-vs2.3.6.8/net/netfilter/ipvs/ip_vs_xmit.c --- linux-3.10.19/net/netfilter/ipvs/ip_vs_xmit.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/netfilter/ipvs/ip_vs_xmit.c 2013-11-13 17:17:16.000000000 +0000 @@ -316,7 +316,7 @@ __ip_vs_route_output_v6(struct net *net, return dst; if (ipv6_addr_any(&fl6.saddr) && ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev, - &fl6.daddr, 0, &fl6.saddr) < 0) + &fl6.daddr, 0, &fl6.saddr, NULL) < 0) goto out_err; if (do_xfrm) { dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); diff -NurpP --minimal linux-3.10.19/net/netlink/af_netlink.c linux-3.10.19-vs2.3.6.8/net/netlink/af_netlink.c --- linux-3.10.19/net/netlink/af_netlink.c 2013-07-14 17:01:39.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/netlink/af_netlink.c 2013-08-22 21:19:02.000000000 +0000 @@ -57,6 +57,9 @@ #include #include #include +#include +#include +#include #include #include @@ -2684,6 +2687,8 @@ static struct sock *netlink_seq_socket_i sk_for_each(s, &hash->table[j]) { if (sock_net(s) != seq_file_net(seq)) continue; + if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (off == pos) { iter->link = i; iter->hash_idx = j; @@ -2718,7 +2723,8 @@ static void *netlink_seq_next(struct seq s = v; do { s = sk_next(s); - } while (s && sock_net(s) != seq_file_net(seq)); + } while (s && (sock_net(s) != seq_file_net(seq) || + !nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))); if (s) return s; @@ -2730,7 +2736,8 @@ static void *netlink_seq_next(struct seq for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); - while (s && sock_net(s) != seq_file_net(seq)) + while (s && (sock_net(s) != seq_file_net(seq) || + !nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))) s = sk_next(s); if (s) { iter->link = i; diff -NurpP --minimal linux-3.10.19/net/socket.c linux-3.10.19-vs2.3.6.8/net/socket.c --- linux-3.10.19/net/socket.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/socket.c 2013-11-13 17:22:25.000000000 +0000 @@ -98,6 +98,10 @@ #include #include +#include +#include +#include +#include #include #include @@ -616,13 +620,29 @@ static inline int __sock_sendmsg_nosec(s struct msghdr *msg, size_t size) { struct sock_iocb *si = kiocb_to_siocb(iocb); + size_t len; si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; - return sock->ops->sendmsg(iocb, sock, msg, size); + len = sock->ops->sendmsg(iocb, sock, msg, size); + if (sock->sk) { + if (len == size) + vx_sock_send(sock->sk, size); + else + vx_sock_fail(sock->sk, size); + } + vxdprintk(VXD_CBIT(net, 7), + "__sock_sendmsg: %p[%p,%p,%p;%d/%d]:%d/%zu", + sock, sock->sk, + (sock->sk)?sock->sk->sk_nx_info:0, + (sock->sk)?sock->sk->sk_vx_info:0, + (sock->sk)?sock->sk->sk_xid:0, + (sock->sk)?sock->sk->sk_nid:0, + (unsigned int)size, len); + return len; } static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, @@ -766,6 +786,7 @@ static inline int __sock_recvmsg_nosec(s struct msghdr *msg, size_t size, int flags) { struct sock_iocb *si = kiocb_to_siocb(iocb); + int len; si->sock = sock; si->scm = NULL; @@ -773,7 +794,18 @@ static inline int __sock_recvmsg_nosec(s si->size = size; si->flags = flags; - return sock->ops->recvmsg(iocb, sock, msg, size, flags); + len = sock->ops->recvmsg(iocb, sock, msg, size, flags); + if ((len >= 0) && sock->sk) + vx_sock_recv(sock->sk, len); + vxdprintk(VXD_CBIT(net, 7), + "__sock_recvmsg: %p[%p,%p,%p;%d/%d]:%d/%d", + sock, sock->sk, + (sock->sk)?sock->sk->sk_nx_info:0, + (sock->sk)?sock->sk->sk_vx_info:0, + (sock->sk)?sock->sk->sk_xid:0, + (sock->sk)?sock->sk->sk_nid:0, + (unsigned int)size, len); + return len; } static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, @@ -1247,6 +1279,13 @@ int __sock_create(struct net *net, int f if (type < 0 || type >= SOCK_MAX) return -EINVAL; + if (!nx_check(0, VS_ADMIN)) { + if (family == PF_INET && !current_nx_info_has_v4()) + return -EAFNOSUPPORT; + if (family == PF_INET6 && !current_nx_info_has_v6()) + return -EAFNOSUPPORT; + } + /* Compatibility. This uglymoron is moved from INET layer to here to avoid @@ -1381,6 +1420,7 @@ SYSCALL_DEFINE3(socket, int, family, int if (retval < 0) goto out; + set_bit(SOCK_USER_SOCKET, &sock->flags); retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; @@ -1422,10 +1462,12 @@ SYSCALL_DEFINE4(socketpair, int, family, err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; + set_bit(SOCK_USER_SOCKET, &sock1->flags); err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; + set_bit(SOCK_USER_SOCKET, &sock2->flags); err = sock1->ops->socketpair(sock1, sock2); if (err < 0) diff -NurpP --minimal linux-3.10.19/net/sunrpc/auth.c linux-3.10.19-vs2.3.6.8/net/sunrpc/auth.c --- linux-3.10.19/net/sunrpc/auth.c 2013-07-14 17:01:39.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/sunrpc/auth.c 2013-08-22 20:30:00.000000000 +0000 @@ -15,6 +15,7 @@ #include #include #include +#include #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH @@ -553,6 +554,7 @@ rpcauth_lookupcred(struct rpc_auth *auth memset(&acred, 0, sizeof(acred)); acred.uid = cred->fsuid; acred.gid = cred->fsgid; + acred.tag = make_ktag(&init_user_ns, dx_current_tag()); acred.group_info = get_group_info(((struct cred *)cred)->group_info); ret = auth->au_ops->lookup_cred(auth, &acred, flags); @@ -593,6 +595,7 @@ rpcauth_bind_root_cred(struct rpc_task * struct auth_cred acred = { .uid = GLOBAL_ROOT_UID, .gid = GLOBAL_ROOT_GID, + .tag = KTAGT_INIT(dx_current_tag()), }; dprintk("RPC: %5u looking up %s cred\n", diff -NurpP --minimal linux-3.10.19/net/sunrpc/auth_unix.c linux-3.10.19-vs2.3.6.8/net/sunrpc/auth_unix.c --- linux-3.10.19/net/sunrpc/auth_unix.c 2013-05-31 13:45:33.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/sunrpc/auth_unix.c 2013-08-22 20:30:00.000000000 +0000 @@ -13,11 +13,13 @@ #include #include #include +#include #define NFS_NGROUPS 16 struct unx_cred { struct rpc_cred uc_base; + ktag_t uc_tag; kgid_t uc_gid; kgid_t uc_gids[NFS_NGROUPS]; }; @@ -80,6 +82,7 @@ unx_create_cred(struct rpc_auth *auth, s groups = NFS_NGROUPS; cred->uc_gid = acred->gid; + cred->uc_tag = acred->tag; for (i = 0; i < groups; i++) cred->uc_gids[i] = GROUP_AT(acred->group_info, i); if (i < NFS_NGROUPS) @@ -121,7 +124,9 @@ unx_match(struct auth_cred *acred, struc unsigned int i; - if (!uid_eq(cred->uc_uid, acred->uid) || !gid_eq(cred->uc_gid, acred->gid)) + if (!uid_eq(cred->uc_uid, acred->uid) || + !gid_eq(cred->uc_gid, acred->gid) || + !tag_eq(cred->uc_tag, acred->tag)) return 0; if (acred->group_info != NULL) @@ -146,7 +151,7 @@ unx_marshal(struct rpc_task *task, __be3 struct rpc_clnt *clnt = task->tk_client; struct unx_cred *cred = container_of(task->tk_rqstp->rq_cred, struct unx_cred, uc_base); __be32 *base, *hold; - int i; + int i, tag; *p++ = htonl(RPC_AUTH_UNIX); base = p++; @@ -157,8 +162,11 @@ unx_marshal(struct rpc_task *task, __be3 */ p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen); - *p++ = htonl((u32) from_kuid(&init_user_ns, cred->uc_uid)); - *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gid)); + tag = task->tk_client->cl_tag; + *p++ = htonl((u32) from_kuid(&init_user_ns, + TAGINO_KUID(tag, cred->uc_uid, cred->uc_tag))); + *p++ = htonl((u32) from_kgid(&init_user_ns, + TAGINO_KGID(tag, cred->uc_gid, cred->uc_tag))); hold = p++; for (i = 0; i < 16 && gid_valid(cred->uc_gids[i]); i++) *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gids[i])); diff -NurpP --minimal linux-3.10.19/net/sunrpc/clnt.c linux-3.10.19-vs2.3.6.8/net/sunrpc/clnt.c --- linux-3.10.19/net/sunrpc/clnt.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/sunrpc/clnt.c 2013-11-13 17:17:16.000000000 +0000 @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -486,6 +487,9 @@ struct rpc_clnt *rpc_create(struct rpc_c if (!(args->flags & RPC_CLNT_CREATE_QUIET)) clnt->cl_chatty = 1; + /* TODO: handle RPC_CLNT_CREATE_TAGGED + if (args->flags & RPC_CLNT_CREATE_TAGGED) + clnt->cl_tag = 1; */ return clnt; } EXPORT_SYMBOL_GPL(rpc_create); diff -NurpP --minimal linux-3.10.19/net/unix/af_unix.c linux-3.10.19-vs2.3.6.8/net/unix/af_unix.c --- linux-3.10.19/net/unix/af_unix.c 2013-11-13 17:21:15.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/net/unix/af_unix.c 2013-11-13 17:22:25.000000000 +0000 @@ -114,6 +114,8 @@ #include #include #include +#include +#include struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; EXPORT_SYMBOL_GPL(unix_socket_table); @@ -270,6 +272,8 @@ static struct sock *__unix_find_socket_b if (!net_eq(sock_net(s), net)) continue; + if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (u->addr->len == len && !memcmp(u->addr->name, sunname, len)) goto found; @@ -2267,6 +2271,8 @@ static struct sock *unix_from_bucket(str for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) { if (sock_net(sk) != seq_file_net(seq)) continue; + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (++count == offset) break; } @@ -2284,6 +2290,8 @@ static struct sock *unix_next_socket(str sk = sk_next(sk); if (!sk) goto next_bucket; + if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)) + continue; if (sock_net(sk) == seq_file_net(seq)) return sk; } diff -NurpP --minimal linux-3.10.19/scripts/checksyscalls.sh linux-3.10.19-vs2.3.6.8/scripts/checksyscalls.sh --- linux-3.10.19/scripts/checksyscalls.sh 2012-12-11 03:30:57.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/scripts/checksyscalls.sh 2013-08-22 20:30:00.000000000 +0000 @@ -193,7 +193,6 @@ cat << EOF #define __IGNORE_afs_syscall #define __IGNORE_getpmsg #define __IGNORE_putpmsg -#define __IGNORE_vserver EOF } diff -NurpP --minimal linux-3.10.19/security/commoncap.c linux-3.10.19-vs2.3.6.8/security/commoncap.c --- linux-3.10.19/security/commoncap.c 2013-05-31 13:45:34.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/security/commoncap.c 2013-08-22 20:30:00.000000000 +0000 @@ -76,6 +76,7 @@ int cap_netlink_send(struct sock *sk, st int cap_capable(const struct cred *cred, struct user_namespace *targ_ns, int cap, int audit) { + struct vx_info *vxi = current_vx_info(); /* FIXME: get vxi from cred? */ struct user_namespace *ns = targ_ns; /* See if cred has the capability in the target user namespace @@ -84,8 +85,12 @@ int cap_capable(const struct cred *cred, */ for (;;) { /* Do we have the necessary capabilities? */ - if (ns == cred->user_ns) - return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; + if (ns == cred->user_ns) { + if (vx_info_flags(vxi, VXF_STATE_SETUP, 0) && + cap_raised(cred->cap_effective, cap)) + return 0; + return vx_cap_raised(vxi, cred->cap_effective, cap) ? 0 : -EPERM; + } /* Have we tried all of the parent namespaces? */ if (ns == &init_user_ns) @@ -628,7 +633,7 @@ int cap_inode_setxattr(struct dentry *de if (!strncmp(name, XATTR_SECURITY_PREFIX, sizeof(XATTR_SECURITY_PREFIX) - 1) && - !capable(CAP_SYS_ADMIN)) + !vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY)) return -EPERM; return 0; } @@ -654,7 +659,7 @@ int cap_inode_removexattr(struct dentry if (!strncmp(name, XATTR_SECURITY_PREFIX, sizeof(XATTR_SECURITY_PREFIX) - 1) && - !capable(CAP_SYS_ADMIN)) + !vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY)) return -EPERM; return 0; } diff -NurpP --minimal linux-3.10.19/security/selinux/hooks.c linux-3.10.19-vs2.3.6.8/security/selinux/hooks.c --- linux-3.10.19/security/selinux/hooks.c 2013-07-14 17:01:42.000000000 +0000 +++ linux-3.10.19-vs2.3.6.8/security/selinux/hooks.c 2013-08-22 20:30:00.000000000 +0000 @@ -67,7 +67,6 @@ #include #include #include /* for Unix socket types */ -#include /* for Unix socket types */ #include #include #include