diff options
author | Natanael Copa <ncopa@alpinelinux.org> | 2014-07-24 12:06:16 +0000 |
---|---|---|
committer | Natanael Copa <ncopa@alpinelinux.org> | 2014-07-24 13:59:52 +0000 |
commit | e971bdbd51404cbaa79d23c9053e06223f3185b1 (patch) | |
tree | e5dfaebe24cd1398f7835f6ced85083b94c29901 /main/open-vm-tools-grsec | |
parent | 06f984daee0981ccfce8ae0a8b8b663a1c6667f1 (diff) | |
download | aports-e971bdbd51404cbaa79d23c9053e06223f3185b1.tar.bz2 aports-e971bdbd51404cbaa79d23c9053e06223f3185b1.tar.xz |
main/open-vm-tools-grsec: fix buildling with newer kernels
patches found on arch linux AUR:
https://aur.archlinux.org/packages/open-vm-tools-dkms/
Diffstat (limited to 'main/open-vm-tools-grsec')
8 files changed, 3150 insertions, 396 deletions
diff --git a/main/open-vm-tools-grsec/0001-Remove-unused-DEPRECATED-macro.patch b/main/open-vm-tools-grsec/0001-Remove-unused-DEPRECATED-macro.patch new file mode 100755 index 0000000000..ff230a7ef6 --- /dev/null +++ b/main/open-vm-tools-grsec/0001-Remove-unused-DEPRECATED-macro.patch @@ -0,0 +1,33 @@ +From 0a49c04428ff99fdf29edf32e043e04fae492b6d Mon Sep 17 00:00:00 2001 +From: "Scott M. Kroll" <skroll@gmail.com> +Date: Mon, 14 Jul 2014 11:24:44 -0400 +Subject: [PATCH 1/5] Remove unused DEPRECATED macro + +--- + open-vm-tools/lib/include/vm_assert.h | 10 ---------- + 1 file changed, 10 deletions(-) + +diff --git a/open-vm-tools/lib/include/vm_assert.h b/open-vm-tools/lib/include/vm_assert.h +index 5b02eed..48c9f1d 100644 +--- a/lib/include/vm_assert.h ++++ b/lib/include/vm_assert.h +@@ -282,16 +282,6 @@ void WarningThrottled(uint32 *count, const char *fmt, ...) + + #define LOG_ONCE(_s) DO_ONCE(Log _s) + +-#ifdef VMX86_DEVEL +- #define DEPRECATED(_fix) DO_ONCE( \ +- Warning("%s:%d: %s is DEPRECATED; %s\n", \ +- __FILE__, __LINE__, __FUNCTION__, \ +- _fix)) +-#else +- #define DEPRECATED(_fix) do {} while (0) +-#endif +- +- + /* + * Redefine macros that are only in debug versions + */ +-- +2.0.1 + diff --git a/main/open-vm-tools-grsec/0002-Conditionally-define-g_info-macro.patch b/main/open-vm-tools-grsec/0002-Conditionally-define-g_info-macro.patch new file mode 100755 index 0000000000..71a586cd7c --- /dev/null +++ b/main/open-vm-tools-grsec/0002-Conditionally-define-g_info-macro.patch @@ -0,0 +1,34 @@ +From 9a38a9da20c898c4c21e84e1cf4f97c5b63f6a87 Mon Sep 17 00:00:00 2001 +From: "Scott M. Kroll" <skroll@gmail.com> +Date: Mon, 14 Jul 2014 11:25:10 -0400 +Subject: [PATCH 2/5] Conditionally define g_info macro + +* Some versions of GLib define this macro. +--- + open-vm-tools/lib/include/vmware/tools/log.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/open-vm-tools/lib/include/vmware/tools/log.h b/open-vm-tools/lib/include/vmware/tools/log.h +index 526b7c2..fa7deb4 100644 +--- a/lib/include/vmware/tools/log.h ++++ b/lib/include/vmware/tools/log.h +@@ -121,6 +121,7 @@ + # define FUNC __FUNCTION__ + #endif + ++#ifndef g_info + /* + ******************************************************************************* + * g_info -- */ /** +@@ -135,7 +136,7 @@ + */ + + #define g_info(fmt, ...) g_log(G_LOG_DOMAIN, G_LOG_LEVEL_INFO, fmt, ## __VA_ARGS__) +- ++#endif + + /* + ******************************************************************************* +-- +2.0.1 + diff --git a/main/open-vm-tools-grsec/0003-Add-kuid_t-kgid_t-compatibility-layer.patch b/main/open-vm-tools-grsec/0003-Add-kuid_t-kgid_t-compatibility-layer.patch new file mode 100755 index 0000000000..33e9b13417 --- /dev/null +++ b/main/open-vm-tools-grsec/0003-Add-kuid_t-kgid_t-compatibility-layer.patch @@ -0,0 +1,311 @@ +From 327938705e9223cdc15c5e0d85b0cdfafb4b6cd7 Mon Sep 17 00:00:00 2001 +From: "Scott M. Kroll" <skroll@gmail.com> +Date: Sun, 13 Jul 2014 18:19:35 -0400 +Subject: [PATCH 3/5] Add kuid_t/kgid_t compatibility layer + +--- + open-vm-tools/modules/linux/vmhgfs/filesystem.c | 20 ++-- + open-vm-tools/modules/linux/vmhgfs/fsutil.c | 118 ++++++++++++++++++++---- + open-vm-tools/modules/linux/vmhgfs/fsutil.h | 5 +- + open-vm-tools/modules/linux/vmhgfs/inode.c | 18 +++- + open-vm-tools/modules/linux/vmhgfs/module.h | 14 ++- + 5 files changed, 145 insertions(+), 30 deletions(-) + +diff --git a/open-vm-tools/modules/linux/vmhgfs/filesystem.c b/open-vm-tools/modules/linux/vmhgfs/filesystem.c +index f101ca7..c845b36 100644 +--- a/modules/linux/vmhgfs/filesystem.c ++++ b/modules/linux/vmhgfs/filesystem.c +@@ -228,17 +228,25 @@ HgfsInitSuperInfo(HgfsMountInfo *mountInfo) // IN: Passed down from the user + * or gid given to us by the server. + */ + si->uidSet = mountInfo->uidSet; ++ si->uid = current_uid(); + if (si->uidSet) { +- si->uid = mountInfo->uid; +- } else { +- si->uid = current_uid(); ++ kuid_t mntUid = make_kuid(current_user_ns(), mountInfo->uid); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) ++ if (uid_valid(mntUid)) ++#endif ++ si->uid = mntUid; + } ++ + si->gidSet = mountInfo->gidSet; ++ si->gid = current_gid(); + if (si->gidSet) { +- si->gid = mountInfo->gid; +- } else { +- si->gid = current_gid(); ++ kgid_t mntGid = make_kgid(current_user_ns(), mountInfo->gid); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) ++ if (gid_valid(mntGid)) ++#endif ++ si->gid = mntGid; + } ++ + si->fmask = mountInfo->fmask; + si->dmask = mountInfo->dmask; + si->ttl = mountInfo->ttl * HZ; // in ticks +diff --git a/open-vm-tools/modules/linux/vmhgfs/fsutil.c b/open-vm-tools/modules/linux/vmhgfs/fsutil.c +index 28858bc..1028cc9 100644 +--- a/modules/linux/vmhgfs/fsutil.c ++++ b/modules/linux/vmhgfs/fsutil.c +@@ -545,6 +545,105 @@ HgfsUnpackCommonAttr(HgfsReq *req, // IN: Reply packet + /* + *---------------------------------------------------------------------- + * ++ * HgfsCalcBlockSize -- ++ * ++ * Calculate the number of 512 byte blocks used. ++ * ++ * Round the size to the next whole block and divide by the block size ++ * to get the number of 512 byte blocks. ++ * Note, this is taken from the nfs client and is simply performing: ++ * (size + 512-1)/ 512) ++ * ++ * Results: ++ * The number of 512 byte blocks for the size. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) ++static inline blkcnt_t ++HgfsCalcBlockSize(uint64 tsize) ++{ ++ blkcnt_t used = (tsize + 511) >> 9; ++ return (used > ULONG_MAX) ? ULONG_MAX : used; ++} ++#else ++static inline unsigned long ++HgfsCalcBlockSize(uint64 tsize) ++{ ++ loff_t used = (tsize + 511) >> 9; ++ return (used > ULONG_MAX) ? ULONG_MAX : used; ++} ++#endif ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsSetInodeUidGid -- ++ * ++ * Set the UID and GID of the inode. ++ * ++ * Update an inode's UID and GID to match those of the HgfsAttr returned ++ * by the server. ++ * ++ * Results: ++ * The number of 512 byte blocks for the size. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static void ++HgfsSetInodeUidGid(struct inode *inode, // IN/OUT: Inode ++ HgfsSuperInfo *si, // IN: New attrs ++ HgfsAttrInfo const *attr) // IN: New attrs ++{ ++ /* ++ * Use the stored uid and gid if we were given them at mount-time, or if ++ * the server didn't give us a uid or gid. ++ */ ++ if (si->uidSet || (attr->mask & HGFS_ATTR_VALID_USERID) == 0) { ++ inode->i_uid = si->uid; ++ } else { ++ kuid_t attrUid = make_kuid(&init_user_ns, attr->userId); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) ++ if (uid_valid(attrUid)) { ++ inode->i_uid = attrUid; ++ } else { ++ inode->i_uid = si->uid; ++ } ++#else ++ inode->i_uid = attrUid; ++#endif ++ LOG(6, (KERN_DEBUG "VMware hgfs: %s: inode uid %u\n", ++ __func__, from_kuid(&init_user_ns, inode->i_uid))); ++ } ++ if (si->gidSet || (attr->mask & HGFS_ATTR_VALID_GROUPID) == 0) { ++ inode->i_gid = si->gid; ++ } else { ++ kgid_t attrGid = make_kgid(&init_user_ns, attr->groupId); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) ++ if (gid_valid(attrGid)) { ++ inode->i_gid = attrGid; ++ } else { ++ inode->i_gid = si->gid; ++ } ++#else ++ inode->i_gid = attrGid; ++#endif ++ LOG(6, (KERN_DEBUG "VMware hgfs: %s: inode gid %u\n", ++ __func__, from_kgid(&init_user_ns, inode->i_gid))); ++ } ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * + * HgfsChangeFileAttributes -- + * + * Update an inode's attributes to match those of the HgfsAttr. May +@@ -634,20 +733,7 @@ HgfsChangeFileAttributes(struct inode *inode, // IN/OUT: Inode + */ + set_nlink(inode, 1); + +- /* +- * Use the stored uid and gid if we were given them at mount-time, or if +- * the server didn't give us a uid or gid. +- */ +- if (si->uidSet || (attr->mask & HGFS_ATTR_VALID_USERID) == 0) { +- inode->i_uid = si->uid; +- } else { +- inode->i_uid = attr->userId; +- } +- if (si->gidSet || (attr->mask & HGFS_ATTR_VALID_GROUPID) == 0) { +- inode->i_gid = si->gid; +- } else { +- inode->i_gid = attr->groupId; +- } ++ HgfsSetInodeUidGid(inode, si, attr); + + inode->i_rdev = 0; /* Device nodes are not supported */ + #if !defined VMW_INODE_2618 +@@ -1618,8 +1704,8 @@ HgfsStatusConvertToLinux(HgfsStatus hgfsStatus) // IN: Status code to convert + void + HgfsSetUidGid(struct inode *parent, // IN: parent inode + struct dentry *dentry, // IN: dentry of file to update +- uid_t uid, // IN: uid to set +- gid_t gid) // IN: gid to set ++ kuid_t uid, // IN: uid to set ++ kgid_t gid) // IN: gid to set + { + struct iattr setUidGid; + +diff --git a/open-vm-tools/modules/linux/vmhgfs/fsutil.h b/open-vm-tools/modules/linux/vmhgfs/fsutil.h +index da5c5a1..2767099 100644 +--- a/modules/linux/vmhgfs/fsutil.h ++++ b/modules/linux/vmhgfs/fsutil.h +@@ -32,6 +32,7 @@ + #include <linux/signal.h> + #include "compat_fs.h" + ++#include "module.h" /* For kuid_t kgid_t types. */ + #include "inode.h" + #include "request.h" + #include "vm_basic_types.h" +@@ -91,8 +92,8 @@ int HgfsGetHandle(struct inode *inode, + int HgfsStatusConvertToLinux(HgfsStatus hgfsStatus); + void HgfsSetUidGid(struct inode *parent, + struct dentry *dentry, +- uid_t uid, +- gid_t gid); ++ kuid_t uid, ++ kgid_t gid); + struct inode *HgfsGetInode(struct super_block *sb, ino_t ino); + void HgfsDoReadInode(struct inode *inode); + +diff --git a/open-vm-tools/modules/linux/vmhgfs/inode.c b/open-vm-tools/modules/linux/vmhgfs/inode.c +index 859b3ff..caaa41a 100644 +--- a/modules/linux/vmhgfs/inode.c ++++ b/modules/linux/vmhgfs/inode.c +@@ -404,6 +404,8 @@ HgfsPackSetattrRequest(struct iattr *iattr, // IN: Inode attrs to update from + size_t reqBufferSize; + size_t reqSize; + int result = 0; ++ uid_t attrUid = -1; ++ gid_t attrGid = -1; + + ASSERT(iattr); + ASSERT(dentry); +@@ -412,6 +414,14 @@ HgfsPackSetattrRequest(struct iattr *iattr, // IN: Inode attrs to update from + + valid = iattr->ia_valid; + ++ if (valid & ATTR_UID) { ++ attrUid = from_kuid(&init_user_ns, iattr->ia_uid); ++ } ++ ++ if (valid & ATTR_GID) { ++ attrGid = from_kgid(&init_user_ns, iattr->ia_gid); ++ } ++ + switch (opUsed) { + case HGFS_OP_SETATTR_V3: { + HgfsRequest *requestHeader; +@@ -488,13 +498,13 @@ HgfsPackSetattrRequest(struct iattr *iattr, // IN: Inode attrs to update from + + if (valid & ATTR_UID) { + attrV2->mask |= HGFS_ATTR_VALID_USERID; +- attrV2->userId = iattr->ia_uid; ++ attrV2->userId = attrUid; + *changed = TRUE; + } + + if (valid & ATTR_GID) { + attrV2->mask |= HGFS_ATTR_VALID_GROUPID; +- attrV2->groupId = iattr->ia_gid; ++ attrV2->groupId = attrGid; + *changed = TRUE; + } + +@@ -591,13 +601,13 @@ HgfsPackSetattrRequest(struct iattr *iattr, // IN: Inode attrs to update from + + if (valid & ATTR_UID) { + attrV2->mask |= HGFS_ATTR_VALID_USERID; +- attrV2->userId = iattr->ia_uid; ++ attrV2->userId = attrUid; + *changed = TRUE; + } + + if (valid & ATTR_GID) { + attrV2->mask |= HGFS_ATTR_VALID_GROUPID; +- attrV2->groupId = iattr->ia_gid; ++ attrV2->groupId = attrGid; + *changed = TRUE; + } + +diff --git a/open-vm-tools/modules/linux/vmhgfs/module.h b/open-vm-tools/modules/linux/vmhgfs/module.h +index 3e0973b..b6bcd1e 100644 +--- a/modules/linux/vmhgfs/module.h ++++ b/modules/linux/vmhgfs/module.h +@@ -74,6 +74,16 @@ extern int LOGLEVEL_THRESHOLD; + * Macros for accessing members that are private to this code in + * sb/inode/file structs. + */ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) ++typedef uid_t kuid_t; ++typedef gid_t kgid_t; ++#define from_kuid(_ns, _kuid) (_kuid) ++#define from_kgid(_ns, _kgid) (_kgid) ++#define make_kuid(_ns, _uid) (_uid) ++#define make_kgid(_ns, _gid) (_gid) ++#endif ++ + #define HGFS_SET_SB_TO_COMMON(sb, common) do { (sb)->s_fs_info = (common); } while (0) + #define HGFS_SB_TO_COMMON(sb) ((HgfsSuperInfo *)(sb)->s_fs_info) + +@@ -110,9 +120,9 @@ extern int LOGLEVEL_THRESHOLD; + + /* Data kept in each superblock in sb->u. */ + typedef struct HgfsSuperInfo { +- uid_t uid; /* UID of user who mounted this fs. */ ++ kuid_t uid; /* UID of user who mounted this fs. */ ++ kgid_t gid; /* GID of user who mounted this fs. */ + Bool uidSet; /* Was the UID specified at mount-time? */ +- gid_t gid; /* GID of user who mounted this fs. */ + Bool gidSet; /* Was the GID specified at mount-time? */ + mode_t fmask; /* File permission mask. */ + mode_t dmask; /* Directory permission mask. */ +-- +2.0.1 + diff --git a/main/open-vm-tools-grsec/0004-Use-new-link-helpers.patch b/main/open-vm-tools-grsec/0004-Use-new-link-helpers.patch new file mode 100755 index 0000000000..a60fa6faed --- /dev/null +++ b/main/open-vm-tools-grsec/0004-Use-new-link-helpers.patch @@ -0,0 +1,53 @@ +From 20437d731289126ee5363a6f73e4171d39f2e3d9 Mon Sep 17 00:00:00 2001 +From: "Scott M. Kroll" <skroll@gmail.com> +Date: Mon, 14 Jul 2014 11:32:35 -0400 +Subject: [PATCH 4/5] Use new link helpers + +* vfs_follow_link was removed in 3.12. +* vfs_readlink was removed in 3.15. +--- + open-vm-tools/modules/linux/vmhgfs/link.c | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +diff --git a/open-vm-tools/modules/linux/vmhgfs/link.c b/open-vm-tools/modules/linux/vmhgfs/link.c +index 9fb95a5..06ea953 100644 +--- a/modules/linux/vmhgfs/link.c ++++ b/modules/linux/vmhgfs/link.c +@@ -110,9 +110,15 @@ HgfsFollowlink(struct dentry *dentry, // IN: Dentry containing link + "on something that wasn't a symlink\n")); + error = -EINVAL; + } else { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsFollowlink: calling " ++ "nd_set_link\n")); ++ nd_set_link(nd, fileName); ++#else + LOG(6, (KERN_DEBUG "VMware hgfs: HgfsFollowlink: calling " + "vfs_follow_link\n")); + error = vfs_follow_link(nd, fileName); ++#endif + } + kfree(fileName); + } +@@ -172,9 +178,18 @@ HgfsReadlink(struct dentry *dentry, // IN: Dentry containing link + "on something that wasn't a symlink\n")); + error = -EINVAL; + } else { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsReadlink: calling " ++ "readlink_copy\n")); ++ LOG(6, (KERN_DEBUG "VMware hgfs: %s: calling " ++ "readlink_copy\n", ++ __func__)); ++ error = readlink_copy(buffer, buflen, fileName); ++#else + LOG(6, (KERN_DEBUG "VMware hgfs: HgfsReadlink: calling " + "vfs_readlink\n")); + error = vfs_readlink(dentry, buffer, buflen, fileName); ++#endif + } + kfree(fileName); + } +-- +2.0.1 + diff --git a/main/open-vm-tools-grsec/0005-Update-hgfs-file-operations-for-newer-kernels.patch b/main/open-vm-tools-grsec/0005-Update-hgfs-file-operations-for-newer-kernels.patch new file mode 100755 index 0000000000..a885d84d47 --- /dev/null +++ b/main/open-vm-tools-grsec/0005-Update-hgfs-file-operations-for-newer-kernels.patch @@ -0,0 +1,2688 @@ +From c1a0f4254812d3588b3716204190a521e8f87db8 Mon Sep 17 00:00:00 2001 +From: "Scott M. Kroll" <skroll@gmail.com> +Date: Mon, 14 Jul 2014 12:42:06 -0400 +Subject: [PATCH 5/5] Update hgfs file operations for newer kernels + +* Keep track of write back pages so concurrent file validations do not + invalidate the cache. +* Handle file flush operations. +--- + open-vm-tools/modules/linux/vmhgfs/file.c | 210 +++++- + open-vm-tools/modules/linux/vmhgfs/filesystem.c | 103 +-- + open-vm-tools/modules/linux/vmhgfs/fsutil.c | 743 ++++++++++++++++---- + open-vm-tools/modules/linux/vmhgfs/fsutil.h | 2 + + open-vm-tools/modules/linux/vmhgfs/inode.c | 66 +- + open-vm-tools/modules/linux/vmhgfs/link.c | 57 +- + open-vm-tools/modules/linux/vmhgfs/module.h | 7 + + open-vm-tools/modules/linux/vmhgfs/page.c | 862 ++++++++++++++++++++++-- + 8 files changed, 1735 insertions(+), 315 deletions(-) + +diff --git a/open-vm-tools/modules/linux/vmhgfs/file.c b/open-vm-tools/modules/linux/vmhgfs/file.c +index 3568f4a..825cebe 100644 +--- a/modules/linux/vmhgfs/file.c ++++ b/modules/linux/vmhgfs/file.c +@@ -47,6 +47,20 @@ + #include "vm_assert.h" + #include "vm_basic_types.h" + ++/* ++ * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using ++ * the O_SYNC flag. We continue to use the existing numerical value ++ * for O_DSYNC semantics now, but using the correct symbolic name for it. ++ * This new value is used to request true Posix O_SYNC semantics. It is ++ * defined in this strange way to make sure applications compiled against ++ * new headers get at least O_DSYNC semantics on older kernels. ++ */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33) ++#define HGFS_FILECTL_SYNC(flags) ((flags) & O_DSYNC) ++#else ++#define HGFS_FILECTL_SYNC(flags) ((flags) & O_SYNC) ++#endif ++ + /* Private functions. */ + static int HgfsPackOpenRequest(struct inode *inode, + struct file *file, +@@ -84,6 +98,15 @@ static ssize_t HgfsWrite(struct file *file, + static loff_t HgfsSeek(struct file *file, + loff_t offset, + int origin); ++static int HgfsFlush(struct file *file ++#if !defined VMW_FLUSH_HAS_1_ARG ++ ,fl_owner_t id ++#endif ++ ); ++ ++#if !defined VMW_FSYNC_31 ++static int HgfsDoFsync(struct inode *inode); ++#endif + + static int HgfsFsync(struct file *file, + #if defined VMW_FSYNC_OLD +@@ -126,7 +149,10 @@ struct file_operations HgfsFileFileOperations = { + .owner = THIS_MODULE, + .open = HgfsOpen, + .llseek = HgfsSeek, ++ .flush = HgfsFlush, + #if defined VMW_USE_AIO ++ .read = do_sync_read, ++ .write = do_sync_write, + .aio_read = HgfsAioRead, + .aio_write = HgfsAioWrite, + #else +@@ -797,22 +823,63 @@ HgfsAioWrite(struct kiocb *iocb, // IN: I/O control block + loff_t offset) // IN: Offset at which to read + { + int result; ++ struct dentry *writeDentry; ++ HgfsInodeInfo *iinfo; + + ASSERT(iocb); + ASSERT(iocb->ki_filp); + ASSERT(iocb->ki_filp->f_dentry); + ASSERT(iov); + +- LOG(6, (KERN_DEBUG "VMware hgfs: HgfsAioWrite: was called\n")); ++ writeDentry = iocb->ki_filp->f_dentry; ++ iinfo = INODE_GET_II_P(writeDentry->d_inode); + +- result = HgfsRevalidate(iocb->ki_filp->f_dentry); ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsAioWrite(%s/%s, %lu@%Ld)\n", ++ writeDentry->d_parent->d_name.name, writeDentry->d_name.name, ++ (unsigned long) iov_length(iov, numSegs), (long long) offset)); ++ ++ spin_lock(&writeDentry->d_inode->i_lock); ++ /* ++ * Guard against dentry revalidation invalidating the inode underneath us. ++ * ++ * Data is being written and may have valid data in a page in the cache. ++ * This action prevents any invalidating of the inode when a flushing of ++ * cache data occurs prior to syncing the file with the server's attributes. ++ * The flushing of cache data would empty our in memory write pages list and ++ * would cause the inode modified write time to be updated and so the inode ++ * would also be invalidated. ++ */ ++ iinfo->numWbPages++; ++ spin_unlock(&writeDentry->d_inode->i_lock); ++ ++ result = HgfsRevalidate(writeDentry); + if (result) { + LOG(4, (KERN_DEBUG "VMware hgfs: HgfsAioWrite: invalid dentry\n")); + goto out; + } + + result = generic_file_aio_write(iocb, iov, numSegs, offset); +- out: ++ ++ if (result >= 0) { ++ if (IS_SYNC(writeDentry->d_inode) || ++ HGFS_FILECTL_SYNC(iocb->ki_filp->f_flags)) { ++ int error; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) ++ error = vfs_fsync(iocb->ki_filp, 0); ++#else ++ error = HgfsDoFsync(writeDentry->d_inode); ++#endif ++ ++ if (error < 0) { ++ result = error; ++ } ++ } ++ } ++ ++out: ++ spin_lock(&writeDentry->d_inode->i_lock); ++ iinfo->numWbPages--; ++ spin_unlock(&writeDentry->d_inode->i_lock); + return result; + } + +@@ -962,6 +1029,98 @@ HgfsSeek(struct file *file, // IN: File to seek + } + + ++#if !defined VMW_FSYNC_31 ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsDoFsync -- ++ * ++ * Helper for HgfsFlush() and HgfsFsync(). ++ * ++ * The hgfs protocol doesn't support fsync explicityly yet. ++ * So for now, we flush all the pages to presumably honor the ++ * intent of an app calling fsync() which is to get the ++ * data onto persistent storage. As things stand now we're at ++ * the whim of the hgfs server code running on the host to fsync or ++ * not if and when it pleases. ++ * ++ * ++ * Results: ++ * Returns zero on success. Otherwise an error. ++ * ++ * Side effects: ++ * None. ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static int ++HgfsDoFsync(struct inode *inode) // IN: File we operate on ++{ ++ int ret; ++ ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDoFsync(%"FMT64"u)\n", ++ INODE_GET_II_P(inode)->hostFileId)); ++ ++ ret = compat_filemap_write_and_wait(inode->i_mapping); ++ ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDoFsync: returns %d\n", ret)); ++ ++ return ret; ++} ++#endif ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsFlush -- ++ * ++ * Called when user process calls fflush() on an hgfs file. ++ * Flush all dirty pages and check for write errors. ++ * ++ * ++ * Results: ++ * Returns zero on success. (Currently always succeeds). ++ * ++ * Side effects: ++ * None. ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static int ++HgfsFlush(struct file *file // IN: file to flush ++#if !defined VMW_FLUSH_HAS_1_ARG ++ ,fl_owner_t id // IN: id not used ++#endif ++ ) ++{ ++ int ret = 0; ++ ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsFlush(%s/%s)\n", ++ file->f_dentry->d_parent->d_name.name, ++ file->f_dentry->d_name.name)); ++ ++ if ((file->f_mode & FMODE_WRITE) == 0) { ++ goto exit; ++ } ++ ++ ++ /* Flush writes to the server and return any errors */ ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsFlush: calling vfs_sync ... \n")); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) ++ ret = vfs_fsync(file, 0); ++#else ++ ret = HgfsDoFsync(file->f_dentry->d_inode); ++#endif ++ ++exit: ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsFlush: returns %d\n", ret)); ++ return ret; ++} ++ ++ + /* + *---------------------------------------------------------------------- + * +@@ -969,21 +1128,13 @@ HgfsSeek(struct file *file, // IN: File to seek + * + * Called when user process calls fsync() on hgfs file. + * +- * The hgfs protocol doesn't support fsync yet, so for now, we punt +- * and just return success. This is a little less sketchy than it +- * might sound, because hgfs skips the buffer cache in the guest +- * anyway (we always write to the host immediately). +- * +- * In the future we might want to try harder though, since +- * presumably the intent of an app calling fsync() is to get the ++ * The hgfs protocol doesn't support fsync explicitly yet, ++ * so for now, we flush all the pages to presumably honor the ++ * intent of an app calling fsync() which is to get the + * data onto persistent storage, and as things stand now we're at + * the whim of the hgfs server code running on the host to fsync or + * not if and when it pleases. + * +- * Note that do_fsync will call filemap_fdatawrite() before us and +- * filemap_fdatawait() after us, so there's no need to do anything +- * here w.r.t. writing out dirty pages. +- * + * Results: + * Returns zero on success. (Currently always succeeds). + * +@@ -1003,9 +1154,36 @@ HgfsFsync(struct file *file, // IN: File we operate on + #endif + int datasync) // IN: fdatasync or fsync + { +- LOG(6, (KERN_DEBUG "VMware hgfs: HgfsFsync: was called\n")); ++ int ret = 0; ++ loff_t startRange; ++ loff_t endRange; ++ struct inode *inode; ++ ++#if defined VMW_FSYNC_31 ++ startRange = start; ++ endRange = end; ++#else ++ startRange = 0; ++ endRange = MAX_INT64; ++#endif + +- return 0; ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsFsync(%s/%s, %lld, %lld, %d)\n", ++ file->f_dentry->d_parent->d_name.name, ++ file->f_dentry->d_name.name, ++ startRange, endRange, ++ datasync)); ++ ++ /* Flush writes to the server and return any errors */ ++ inode = file->f_dentry->d_inode; ++#if defined VMW_FSYNC_31 ++ ret = filemap_write_and_wait_range(inode->i_mapping, startRange, endRange); ++#else ++ ret = HgfsDoFsync(inode); ++#endif ++ ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsFsync: written pages %lld, %lld returns %d)\n", ++ startRange, endRange, ret)); ++ return ret; + } + + +diff --git a/open-vm-tools/modules/linux/vmhgfs/filesystem.c b/open-vm-tools/modules/linux/vmhgfs/filesystem.c +index c845b36..dc0adcd 100644 +--- a/modules/linux/vmhgfs/filesystem.c ++++ b/modules/linux/vmhgfs/filesystem.c +@@ -83,7 +83,6 @@ HgfsOp hgfsVersionCreateSymlink; + static inline unsigned long HgfsComputeBlockBits(unsigned long blockSize); + static compat_kmem_cache_ctor HgfsInodeCacheCtor; + static HgfsSuperInfo *HgfsInitSuperInfo(HgfsMountInfo *mountInfo); +-static int HgfsGetRootDentry(struct super_block *sb, struct dentry **rootDentry); + static int HgfsReadSuper(struct super_block *sb, + void *rawData, + int flags); +@@ -335,103 +334,6 @@ HgfsInitSuperInfo(HgfsMountInfo *mountInfo) // IN: Passed down from the user + + + /* +- *---------------------------------------------------------------------------- +- * +- * HgfsGetRootDentry -- +- * +- * Gets the root dentry for a given super block. +- * +- * Results: +- * zero and a valid root dentry on success +- * negative value on failure +- * +- * Side effects: +- * None. +- * +- *---------------------------------------------------------------------------- +- */ +- +-static int +-HgfsGetRootDentry(struct super_block *sb, // IN: Super block object +- struct dentry **rootDentry) // OUT: Root dentry +-{ +- int result = -ENOMEM; +- struct inode *rootInode; +- struct dentry *tempRootDentry = NULL; +- struct HgfsAttrInfo rootDentryAttr; +- HgfsInodeInfo *iinfo; +- +- ASSERT(sb); +- ASSERT(rootDentry); +- +- LOG(6, (KERN_DEBUG "VMware hgfs: %s: entered\n", __func__)); +- +- rootInode = HgfsGetInode(sb, HGFS_ROOT_INO); +- if (rootInode == NULL) { +- LOG(6, (KERN_DEBUG "VMware hgfs: %s: Could not get the root inode\n", +- __func__)); +- goto exit; +- } +- +- /* +- * On an allocation failure in read_super, the inode will have been +- * marked "bad". If it was, we certainly don't want to start playing with +- * the HgfsInodeInfo. So quietly put the inode back and fail. +- */ +- if (is_bad_inode(rootInode)) { +- LOG(6, (KERN_DEBUG "VMware hgfs: %s: encountered bad inode\n", +- __func__)); +- goto exit; +- } +- +- tempRootDentry = d_make_root(rootInode); +- /* +- * d_make_root() does iput() on failure; if d_make_root() completes +- * successfully then subsequent dput() will do iput() for us, so we +- * should just ignore root inode from now on. +- */ +- rootInode = NULL; +- +- if (tempRootDentry == NULL) { +- LOG(4, (KERN_WARNING "VMware hgfs: %s: Could not get " +- "root dentry\n", __func__)); +- goto exit; +- } +- +- result = HgfsPrivateGetattr(tempRootDentry, &rootDentryAttr, NULL); +- if (result) { +- LOG(4, (KERN_WARNING "VMware hgfs: HgfsReadSuper: Could not" +- "instantiate the root dentry\n")); +- goto exit; +- } +- +- iinfo = INODE_GET_II_P(tempRootDentry->d_inode); +- iinfo->isFakeInodeNumber = FALSE; +- iinfo->isReferencedInode = TRUE; +- +- if (rootDentryAttr.mask & HGFS_ATTR_VALID_FILEID) { +- iinfo->hostFileId = rootDentryAttr.hostFileId; +- } +- +- HgfsChangeFileAttributes(tempRootDentry->d_inode, &rootDentryAttr); +- HgfsDentryAgeReset(tempRootDentry); +- tempRootDentry->d_op = &HgfsDentryOperations; +- +- *rootDentry = tempRootDentry; +- result = 0; +- +- LOG(6, (KERN_DEBUG "VMware hgfs: %s: finished\n", __func__)); +-exit: +- if (result) { +- iput(rootInode); +- dput(tempRootDentry); +- *rootDentry = NULL; +- } +- return result; +-} +- +- +-/* + *----------------------------------------------------------------------------- + * + * HgfsReadSuper -- +@@ -511,7 +413,10 @@ HgfsReadSuper(struct super_block *sb, // OUT: Superblock object + sb->s_blocksize_bits = HgfsComputeBlockBits(HGFS_BLOCKSIZE); + sb->s_blocksize = 1 << sb->s_blocksize_bits; + +- result = HgfsGetRootDentry(sb, &rootDentry); ++ /* ++ * Create the root dentry and its corresponding inode. ++ */ ++ result = HgfsInstantiateRoot(sb, &rootDentry); + if (result) { + LOG(4, (KERN_WARNING "VMware hgfs: HgfsReadSuper: Could not instantiate " + "root dentry\n")); +diff --git a/open-vm-tools/modules/linux/vmhgfs/fsutil.c b/open-vm-tools/modules/linux/vmhgfs/fsutil.c +index 1028cc9..72f81f1 100644 +--- a/modules/linux/vmhgfs/fsutil.c ++++ b/modules/linux/vmhgfs/fsutil.c +@@ -1,5 +1,5 @@ + /********************************************************* +- * Copyright (C) 2006 VMware, Inc. All rights reserved. ++ * Copyright (C) 2006-2014 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the +@@ -53,10 +53,13 @@ static int HgfsUnpackGetattrReply(HgfsReq *req, + HgfsAttrInfo *attr, + char **fileName); + static int HgfsPackGetattrRequest(HgfsReq *req, +- struct dentry *dentry, ++ HgfsOp opUsed, + Bool allowHandleReuse, +- HgfsOp opUsed, ++ struct dentry *dentry, + HgfsAttrInfo *attr); ++static int HgfsBuildRootPath(char *buffer, ++ size_t bufferLen, ++ HgfsSuperInfo *si); + + /* + * Private function implementations. +@@ -234,13 +237,17 @@ HgfsUnpackGetattrReply(HgfsReq *req, // IN: Reply packet + /* + *---------------------------------------------------------------------- + * +- * HgfsPackGetattrRequest -- ++ * HgfsPackCommonattr -- + * +- * Setup the getattr request, depending on the op version. When possible, +- * we will issue the getattr using an existing open HGFS handle. ++ * This function abstracts the HgfsAttr struct behind HgfsAttrInfo. ++ * Callers can pass one of four replies into it and receive back the ++ * attributes for those replies. ++ * ++ * Callers must populate attr->requestType so that we know whether to ++ * expect a V1 or V2 Attr struct. + * + * Results: +- * Returns zero on success, or negative error on failure. ++ * Zero on success, non-zero otherwise. + * + * Side effects: + * None +@@ -249,22 +256,18 @@ HgfsUnpackGetattrReply(HgfsReq *req, // IN: Reply packet + */ + + static int +-HgfsPackGetattrRequest(HgfsReq *req, // IN/OUT: Request buffer +- struct dentry *dentry, // IN: Dentry containing name +- Bool allowHandleReuse, // IN: Can we use a handle? +- HgfsOp opUsed, // IN: Op to be used +- HgfsAttrInfo *attr) // OUT: Attrs to update ++HgfsPackCommonattr(HgfsReq *req, // IN/OUT: request buffer ++ HgfsOp opUsed, // IN: Op to be used ++ Bool allowHandleReuse, // IN: Can we use a handle? ++ struct inode *fileInode, // IN: file inode ++ size_t *reqSize, // OUT: request size ++ size_t *reqBufferSize, // OUT: request buffer size ++ char **fileName, // OUT: pointer to request file name ++ uint32 **fileNameLength, // OUT: pointer to request file name length ++ HgfsAttrInfo *attr) // OUT: Attrs to update + { +- size_t reqBufferSize; +- size_t reqSize; +- int result = 0; + HgfsHandle handle; +- char *fileName = NULL; +- uint32 *fileNameLength = NULL; +- +- ASSERT(attr); +- ASSERT(dentry); +- ASSERT(req); ++ int result = 0; + + attr->requestType = opUsed; + +@@ -287,24 +290,25 @@ HgfsPackGetattrRequest(HgfsReq *req, // IN/OUT: Request buffer + * by name. + */ + requestV3->hints = 0; +- if (allowHandleReuse && HgfsGetHandle(dentry->d_inode, ++ if (allowHandleReuse && HgfsGetHandle(fileInode, + 0, + &handle) == 0) { + requestV3->fileName.flags = HGFS_FILE_NAME_USE_FILE_DESC; + requestV3->fileName.fid = handle; + requestV3->fileName.length = 0; + requestV3->fileName.caseType = HGFS_FILE_NAME_DEFAULT_CASE; +- fileName = NULL; ++ *fileName = NULL; ++ *fileNameLength = NULL; + } else { +- fileName = requestV3->fileName.name; +- fileNameLength = &requestV3->fileName.length; ++ *fileName = requestV3->fileName.name; ++ *fileNameLength = &requestV3->fileName.length; + requestV3->fileName.flags = 0; + requestV3->fileName.fid = HGFS_INVALID_HANDLE; + requestV3->fileName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; + } + requestV3->reserved = 0; +- reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); +- reqBufferSize = HGFS_NAME_BUFFER_SIZET(req->bufferSize, reqSize); ++ *reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); ++ *reqBufferSize = HGFS_NAME_BUFFER_SIZET(req->bufferSize, *reqSize); + break; + } + +@@ -321,19 +325,20 @@ HgfsPackGetattrRequest(HgfsReq *req, // IN/OUT: Request buffer + * correct regardless. If we don't find a handle, fall back on getattr + * by name. + */ +- if (allowHandleReuse && HgfsGetHandle(dentry->d_inode, ++ if (allowHandleReuse && HgfsGetHandle(fileInode, + 0, + &handle) == 0) { + requestV2->hints = HGFS_ATTR_HINT_USE_FILE_DESC; + requestV2->file = handle; +- fileName = NULL; ++ *fileName = NULL; ++ *fileNameLength = NULL; + } else { + requestV2->hints = 0; +- fileName = requestV2->fileName.name; +- fileNameLength = &requestV2->fileName.length; ++ *fileName = requestV2->fileName.name; ++ *fileNameLength = &requestV2->fileName.length; + } +- reqSize = sizeof *requestV2; +- reqBufferSize = HGFS_NAME_BUFFER_SIZE(req->bufferSize, requestV2); ++ *reqSize = sizeof *requestV2; ++ *reqBufferSize = HGFS_NAME_BUFFER_SIZE(req->bufferSize, requestV2); + break; + } + +@@ -344,10 +349,10 @@ HgfsPackGetattrRequest(HgfsReq *req, // IN/OUT: Request buffer + requestV1->header.op = opUsed; + requestV1->header.id = req->id; + +- fileName = requestV1->fileName.name; +- fileNameLength = &requestV1->fileName.length; +- reqSize = sizeof *requestV1; +- reqBufferSize = HGFS_NAME_BUFFER_SIZE(req->bufferSize, requestV1); ++ *fileName = requestV1->fileName.name; ++ *fileNameLength = &requestV1->fileName.length; ++ *reqSize = sizeof *requestV1; ++ *reqBufferSize = HGFS_NAME_BUFFER_SIZE(req->bufferSize, requestV1); + break; + } + +@@ -355,6 +360,57 @@ HgfsPackGetattrRequest(HgfsReq *req, // IN/OUT: Request buffer + LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackGetattrRequest: unexpected " + "OP type encountered\n")); + result = -EPROTO; ++ break; ++ } ++ ++ return result; ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsPackGetattrRequest -- ++ * ++ * Setup the getattr request, depending on the op version. When possible, ++ * we will issue the getattr using an existing open HGFS handle. ++ * ++ * Results: ++ * Returns zero on success, or negative error on failure. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static int ++HgfsPackGetattrRequest(HgfsReq *req, // IN/OUT: Request buffer ++ HgfsOp opUsed, // IN: Op to be used ++ Bool allowHandleReuse, // IN: Can we use a handle? ++ struct dentry *dentry, // IN: Dentry containing name ++ HgfsAttrInfo *attr) // OUT: Attrs to update ++{ ++ size_t reqBufferSize; ++ size_t reqSize; ++ char *fileName = NULL; ++ uint32 *fileNameLength = NULL; ++ int result = 0; ++ ++ ASSERT(attr); ++ ASSERT(dentry); ++ ASSERT(req); ++ ++ result = HgfsPackCommonattr(req, ++ opUsed, ++ allowHandleReuse, ++ dentry->d_inode, ++ &reqSize, ++ &reqBufferSize, ++ &fileName, ++ &fileNameLength, ++ attr); ++ if (0 > result) { + goto out; + } + +@@ -364,8 +420,90 @@ HgfsPackGetattrRequest(HgfsReq *req, // IN/OUT: Request buffer + /* Build full name to send to server. */ + if (HgfsBuildPath(fileName, reqBufferSize, + dentry) < 0) { +- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackGetattrRequest: build path " +- "failed\n")); ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackGetattrRequest: build path failed\n")); ++ result = -EINVAL; ++ goto out; ++ } ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPackGetattrRequest: getting attrs for \"%s\"\n", ++ fileName)); ++ ++ /* Convert to CP name. */ ++ result = CPName_ConvertTo(fileName, ++ reqBufferSize, ++ fileName); ++ if (result < 0) { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackGetattrRequest: CP conversion failed\n")); ++ result = -EINVAL; ++ goto out; ++ } ++ ++ *fileNameLength = result; ++ } ++ ++ req->payloadSize = reqSize + result; ++ result = 0; ++ ++out: ++ return result; ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsPackGetattrRootRequest -- ++ * ++ * Setup the getattr request for the root of the HGFS file system. ++ * ++ * When possible, we will issue the getattr using an existing open HGFS handle. ++ * ++ * Results: ++ * Returns zero on success, or negative error on failure. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static int ++HgfsPackGetattrRootRequest(HgfsReq *req, // IN/OUT: Request buffer ++ HgfsOp opUsed, // IN: Op to be used ++ struct super_block *sb, // IN: Super block entry ++ HgfsAttrInfo *attr) // OUT: Attrs to update ++{ ++ size_t reqBufferSize; ++ size_t reqSize; ++ char *fileName = NULL; ++ uint32 *fileNameLength = NULL; ++ int result = 0; ++ ++ ASSERT(attr); ++ ASSERT(sb); ++ ASSERT(req); ++ ++ result = HgfsPackCommonattr(req, ++ opUsed, ++ FALSE, ++ NULL, ++ &reqSize, ++ &reqBufferSize, ++ &fileName, ++ &fileNameLength, ++ attr); ++ if (0 > result) { ++ goto out; ++ } ++ ++ /* Avoid all this extra work when we're doing a getattr by handle. */ ++ if (fileName != NULL) { ++ HgfsSuperInfo *si = HGFS_SB_TO_COMMON(sb); ++ ++ /* Build full name to send to server. */ ++ if (HgfsBuildRootPath(fileName, ++ reqBufferSize, ++ si) < 0) { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackGetattrRootRequest: build path failed\n")); + result = -EINVAL; + goto out; + } +@@ -511,7 +649,8 @@ HgfsUnpackCommonAttr(HgfsReq *req, // IN: Reply packet + attrInfo->groupId = attrV2->groupId; + attrInfo->mask |= HGFS_ATTR_VALID_GROUPID; + } +- if (attrV2->mask & HGFS_ATTR_VALID_FILEID) { ++ if (attrV2->mask & (HGFS_ATTR_VALID_FILEID | ++ HGFS_ATTR_VALID_NON_STATIC_FILEID)) { + attrInfo->hostFileId = attrV2->hostFileId; + attrInfo->mask |= HGFS_ATTR_VALID_FILEID; + } +@@ -578,6 +717,18 @@ HgfsCalcBlockSize(uint64 tsize) + } + #endif + ++ ++static inline int ++hgfs_timespec_compare(const struct timespec *lhs, const struct timespec *rhs) ++{ ++ if (lhs->tv_sec < rhs->tv_sec) ++ return -1; ++ if (lhs->tv_sec > rhs->tv_sec) ++ return 1; ++ return lhs->tv_nsec - rhs->tv_nsec; ++} ++ ++ + /* + *---------------------------------------------------------------------- + * +@@ -640,6 +791,74 @@ HgfsSetInodeUidGid(struct inode *inode, // IN/OUT: Inode + } + } + ++/* ++ *----------------------------------------------------------------------------- ++ * ++ * HgfsIsInodeWritable -- ++ * ++ * Helper function for verifying if a file is under write access. ++ * ++ * Results: ++ * TRUE if file is writable, FALSE otherwise. ++ * ++ * Side effects: ++ * None. ++ * ++ *----------------------------------------------------------------------------- ++ */ ++ ++static Bool ++HgfsIsInodeWritable(struct inode *inode) // IN: File we're writing to ++{ ++ HgfsInodeInfo *iinfo; ++ struct list_head *cur; ++ Bool isWritable = FALSE; ++ ++ iinfo = INODE_GET_II_P(inode); ++ /* ++ * Iterate over the open handles for this inode, and find if there ++ * is one that allows the write mode. ++ * Note, the mode is stored as incremented by one to prevent overload of ++ * the zero value. ++ */ ++ spin_lock(&hgfsBigLock); ++ list_for_each(cur, &iinfo->files) { ++ HgfsFileInfo *finfo = list_entry(cur, HgfsFileInfo, list); ++ ++ if (0 != (finfo->mode & (HGFS_OPEN_MODE_WRITE_ONLY + 1))) { ++ isWritable = TRUE; ++ break; ++ } ++ } ++ spin_unlock(&hgfsBigLock); ++ ++ return isWritable; ++} ++ ++ ++/* ++ *----------------------------------------------------------------------------- ++ * ++ * HgfsIsSafeToChange -- ++ * ++ * Helper function for verifying if a file inode size and time fields is safe ++ * to update. It is deemed safe only if there is not an open writer to the file. ++ * ++ * Results: ++ * TRUE if safe to change inode, FALSE otherwise. ++ * ++ * Side effects: ++ * None. ++ * ++ *----------------------------------------------------------------------------- ++ */ ++ ++static Bool ++HgfsIsSafeToChange(struct inode *inode) // IN: File we're writing to ++{ ++ return !HgfsIsInodeWritable(inode); ++} ++ + + /* + *---------------------------------------------------------------------- +@@ -665,13 +884,34 @@ HgfsChangeFileAttributes(struct inode *inode, // IN/OUT: Inode + HgfsAttrInfo const *attr) // IN: New attrs + { + HgfsSuperInfo *si; ++ HgfsInodeInfo *iinfo; + Bool needInvalidate = FALSE; ++ Bool isSafeToChange; + + ASSERT(inode); + ASSERT(inode->i_sb); + ASSERT(attr); + + si = HGFS_SB_TO_COMMON(inode->i_sb); ++ iinfo = INODE_GET_II_P(inode); ++ ++ /* ++ * We do not want to update the file size from server or invalidate the inode ++ * for inodes open for write. We need to avoid races with the write page ++ * extending the file. This also will cause the server to possibly update the ++ * server side file's mod time too. For those situations we do not want to blindly ++ * go and invalidate the inode pages thus losing changes in flight and corrupting the ++ * file. ++ * We only need to invalidate the inode pages if the file has truly been modified ++ * on the server side by another server side application, not by our writes. ++ * If there are no writers it is safe to assume that newer mod time means the file ++ * changed on the server side underneath us. ++ */ ++ isSafeToChange = HgfsIsSafeToChange(inode); ++ ++ spin_lock(&inode->i_lock); ++ ++ iinfo = INODE_GET_II_P(inode); + + LOG(6, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: entered\n")); + HgfsSetFileType(inode, attr); +@@ -742,21 +982,23 @@ HgfsChangeFileAttributes(struct inode *inode, // IN/OUT: Inode + + /* + * Invalidate cached pages if we didn't receive the file size, or if it has +- * changed on the server. ++ * changed on the server, and no writes in flight. + */ + if (attr->mask & HGFS_ATTR_VALID_SIZE) { + loff_t oldSize = compat_i_size_read(inode); + inode->i_blocks = (attr->size + HGFS_BLOCKSIZE - 1) / HGFS_BLOCKSIZE; + if (oldSize != attr->size) { +- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: new file " +- "size: %"FMT64"u, old file size: %Lu\n", attr->size, oldSize)); +- needInvalidate = TRUE; ++ if (oldSize < attr->size || (iinfo->numWbPages == 0 && isSafeToChange)) { ++ needInvalidate = TRUE; ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: new file " ++ "size: %"FMT64"u, old file size: %Lu\n", attr->size, oldSize)); ++ inode->i_blocks = HgfsCalcBlockSize(attr->size); ++ compat_i_size_write(inode, attr->size); ++ } + } +- compat_i_size_write(inode, attr->size); + } else { + LOG(4, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: did not " + "get file size\n")); +- needInvalidate = TRUE; + } + + if (attr->mask & HGFS_ATTR_VALID_ACCESS_TIME) { +@@ -767,12 +1009,15 @@ HgfsChangeFileAttributes(struct inode *inode, // IN/OUT: Inode + + /* + * Invalidate cached pages if we didn't receive the modification time, or if +- * it has changed on the server. ++ * it has changed on the server and we don't have writes in flight and any open ++ * open writers. + */ + if (attr->mask & HGFS_ATTR_VALID_WRITE_TIME) { + HGFS_DECLARE_TIME(newTime); + HGFS_SET_TIME(newTime, attr->writeTime); +- if (!HGFS_EQUAL_TIME(newTime, inode->i_mtime)) { ++ if (hgfs_timespec_compare(&newTime, &inode->i_mtime) > 0 && ++ iinfo->numWbPages == 0 && ++ isSafeToChange) { + LOG(4, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: new mod " + "time: %ld:%lu, old mod time: %ld:%lu\n", + HGFS_PRINT_TIME(newTime), HGFS_PRINT_TIME(inode->i_mtime))); +@@ -780,7 +1025,6 @@ HgfsChangeFileAttributes(struct inode *inode, // IN/OUT: Inode + } + HGFS_SET_TIME(inode->i_mtime, attr->writeTime); + } else { +- needInvalidate = TRUE; + LOG(4, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: did not " + "get mod time\n")); + HGFS_SET_TIME(inode->i_mtime, HGFS_GET_CURRENT_TIME()); +@@ -798,6 +1042,8 @@ HgfsChangeFileAttributes(struct inode *inode, // IN/OUT: Inode + HGFS_SET_TIME(inode->i_ctime, HGFS_GET_CURRENT_TIME()); + } + ++ spin_unlock(&inode->i_lock); ++ + /* + * Compare old size and write time with new size and write time. If there's + * a difference (or if we didn't get a new size or write time), the file +@@ -815,17 +1061,14 @@ HgfsChangeFileAttributes(struct inode *inode, // IN/OUT: Inode + /* + *---------------------------------------------------------------------- + * +- * HgfsPrivateGetattr -- ++ * HgfsCanRetryGetattrRequest -- + * +- * Internal getattr routine. Send a getattr request to the server +- * for the indicated remote name, and if it succeeds copy the +- * results of the getattr into the provided HgfsAttrInfo. +- * +- * fileName (if supplied) will be set to a newly allocated string +- * if the file is a symlink; it's the caller's duty to free it. ++ * Checks the getattr request version and downgrades the global getattr ++ * version if we can. + * + * Results: +- * Returns zero on success, or a negative error on failure. ++ * Returns TRUE on success and downgrades the global getattr protocol version, ++ * or FALSE if no retry is possible. + * + * Side effects: + * None +@@ -833,44 +1076,63 @@ HgfsChangeFileAttributes(struct inode *inode, // IN/OUT: Inode + *---------------------------------------------------------------------- + */ + +-int +-HgfsPrivateGetattr(struct dentry *dentry, // IN: Dentry containing name +- HgfsAttrInfo *attr, // OUT: Attr to copy into +- char **fileName) // OUT: pointer to allocated file name ++static Bool ++HgfsCanRetryGetattrRequest(HgfsOp getattrOp) // IN: getattrOp version used + { +- HgfsReq *req; +- HgfsStatus replyStatus; +- HgfsOp opUsed; +- int result = 0; +- Bool allowHandleReuse = TRUE; ++ Bool canRetry = FALSE; ++ ++ /* Retry with older version(s). Set globally. */ ++ if (getattrOp == HGFS_OP_GETATTR_V3) { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsCanRetryGetattrRequest: Version 3 " ++ "not supported. Falling back to version 2.\n")); ++ hgfsVersionGetattr = HGFS_OP_GETATTR_V2; ++ canRetry = TRUE; ++ } else if (getattrOp == HGFS_OP_GETATTR_V2) { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsCanRetryGetattrRequest: Version 2 " ++ "not supported. Falling back to version 1.\n")); ++ hgfsVersionGetattr = HGFS_OP_GETATTR; ++ canRetry = TRUE; ++ } ++ return canRetry; ++} + +- ASSERT(dentry); +- ASSERT(dentry->d_sb); +- ASSERT(attr); + +- req = HgfsGetNewRequest(); +- if (!req) { +- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattr: out of memory " +- "while getting new request\n")); +- result = -ENOMEM; +- goto out; +- } ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsSendGetattrRequest -- ++ * ++ * Send the getattr request and handle the reply. ++ * ++ * Results: ++ * Returns zero on success, or a negative error on failure. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ + +- retry: ++int ++HgfsSendGetattrRequest(HgfsReq *req, // IN: getattr request ++ Bool *doRetry, // OUT: Retry getattr request ++ Bool *allowHandleReuse, // IN/OUT: handle reuse ++ HgfsAttrInfo *attr, // OUT: Attr to copy into ++ char **fileName) // OUT: pointer to allocated file name ++{ ++ int result; + +- opUsed = hgfsVersionGetattr; +- result = HgfsPackGetattrRequest(req, dentry, allowHandleReuse, opUsed, attr); +- if (result != 0) { +- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattr: no attrs\n")); +- goto out; +- } ++ *doRetry = FALSE; + + result = HgfsSendRequest(req); + if (result == 0) { +- LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattr: got reply\n")); +- replyStatus = HgfsReplyStatus(req); ++ HgfsStatus replyStatus = HgfsReplyStatus(req); ++ + result = HgfsStatusConvertToLinux(replyStatus); + ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsSendGetattrRequest: reply status %d -> %d\n", ++ replyStatus, result)); ++ + /* + * If the getattr succeeded on the server, copy the stats + * into the HgfsAttrInfo, otherwise return an error. +@@ -889,7 +1151,7 @@ HgfsPrivateGetattr(struct dentry *dentry, // IN: Dentry containing name + * and it doesn't display any valid shares too. So as a workaround, we + * remap EIO to success and create minimal fake attributes. + */ +- LOG(1, (KERN_DEBUG "Hgfs:Server returned EIO on unknown file\n")); ++ LOG(1, (KERN_DEBUG "Hgfs: HgfsSetInodeUidGid: Server returned EIO on unknown file\n")); + /* Create fake attributes */ + attr->mask = HGFS_ATTR_VALID_TYPE | HGFS_ATTR_VALID_SIZE; + attr->type = HGFS_FILE_TYPE_DIRECTORY; +@@ -906,9 +1168,9 @@ HgfsPrivateGetattr(struct dentry *dentry, // IN: Dentry containing name + * "goto retry" would cause an infinite loop. Instead, let's retry + * with a getattr by name. + */ +- if (allowHandleReuse) { +- allowHandleReuse = FALSE; +- goto retry; ++ if (*allowHandleReuse) { ++ *allowHandleReuse = FALSE; ++ *doRetry = TRUE; + } + + /* +@@ -920,19 +1182,11 @@ HgfsPrivateGetattr(struct dentry *dentry, // IN: Dentry containing name + + case -EPROTO: + /* Retry with older version(s). Set globally. */ +- if (attr->requestType == HGFS_OP_GETATTR_V3) { +- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattr: Version 3 " +- "not supported. Falling back to version 2.\n")); +- hgfsVersionGetattr = HGFS_OP_GETATTR_V2; +- goto retry; +- } else if (attr->requestType == HGFS_OP_GETATTR_V2) { +- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattr: Version 2 " +- "not supported. Falling back to version 1.\n")); +- hgfsVersionGetattr = HGFS_OP_GETATTR; +- goto retry; ++ if (HgfsCanRetryGetattrRequest(attr->requestType)) { ++ *doRetry = TRUE; + } ++ break; + +- /* Fallthrough. */ + default: + break; + } +@@ -942,8 +1196,129 @@ HgfsPrivateGetattr(struct dentry *dentry, // IN: Dentry containing name + LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattr: server " + "returned error: %d\n", result)); + } else { +- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattr: unknown error: " +- "%d\n", result)); ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSendGetattrRequest: unknown error: %d\n", ++ result)); ++ } ++ ++ return result; ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsPrivateGetattrRoot -- ++ * ++ * The getattr for the root. Send a getattr request to the server ++ * for the indicated remote name, and if it succeeds copy the ++ * results of the getattr into the provided HgfsAttrInfo. ++ * ++ * fileName (of the root) will be set to a newly allocated string. ++ * ++ * Results: ++ * Returns zero on success, or a negative error on failure. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++int ++HgfsPrivateGetattrRoot(struct super_block *sb, // IN: Super block object ++ HgfsAttrInfo *attr) // OUT: Attr to copy into ++{ ++ HgfsReq *req; ++ HgfsOp opUsed; ++ int result = 0; ++ Bool doRetry; ++ Bool allowHandleReuse = FALSE; ++ ++ ASSERT(sb); ++ ASSERT(attr); ++ ++ req = HgfsGetNewRequest(); ++ if (!req) { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattrRoot: out of memory " ++ "while getting new request\n")); ++ result = -ENOMEM; ++ goto out; ++ } ++ ++retry: ++ opUsed = hgfsVersionGetattr; ++ result = HgfsPackGetattrRootRequest(req, opUsed, sb, attr); ++ if (result != 0) { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattrRoot: no attrs\n")); ++ goto out; ++ } ++ ++ result = HgfsSendGetattrRequest(req, &doRetry, &allowHandleReuse, attr, NULL); ++ if (0 != result && doRetry) { ++ goto retry; ++ } ++ ++out: ++ HgfsFreeRequest(req); ++ return result; ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsPrivateGetattr -- ++ * ++ * Internal getattr routine. Send a getattr request to the server ++ * for the indicated remote name, and if it succeeds copy the ++ * results of the getattr into the provided HgfsAttrInfo. ++ * ++ * fileName (if supplied) will be set to a newly allocated string ++ * if the file is a symlink; it's the caller's duty to free it. ++ * ++ * Results: ++ * Returns zero on success, or a negative error on failure. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++int ++HgfsPrivateGetattr(struct dentry *dentry, // IN: Dentry containing name ++ HgfsAttrInfo *attr, // OUT: Attr to copy into ++ char **fileName) // OUT: pointer to allocated file name ++{ ++ HgfsReq *req; ++ HgfsOp opUsed; ++ int result = 0; ++ Bool doRetry; ++ Bool allowHandleReuse = TRUE; ++ ++ ASSERT(dentry); ++ ASSERT(dentry->d_sb); ++ ASSERT(attr); ++ ++ req = HgfsGetNewRequest(); ++ if (!req) { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattr: out of memory " ++ "while getting new request\n")); ++ result = -ENOMEM; ++ goto out; ++ } ++ ++retry: ++ opUsed = hgfsVersionGetattr; ++ result = HgfsPackGetattrRequest(req, opUsed, allowHandleReuse, dentry, attr); ++ if (result != 0) { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateGetattr: no attrs\n")); ++ goto out; ++ } ++ ++ result = HgfsSendGetattrRequest(req, &doRetry, &allowHandleReuse, attr, fileName); ++ if (0 != result && doRetry) { ++ goto retry; + } + + out: +@@ -1099,6 +1474,106 @@ HgfsIget(struct super_block *sb, // IN: Superblock of this fs + /* + *----------------------------------------------------------------------------- + * ++ * HgfsInstantiateRoot -- ++ * ++ * Gets the root dentry for a given super block. ++ * ++ * Results: ++ * zero and a valid root dentry on success ++ * negative value on failure ++ * ++ * Side effects: ++ * None. ++ * ++ *----------------------------------------------------------------------------- ++ */ ++ ++int ++HgfsInstantiateRoot(struct super_block *sb, // IN: Super block object ++ struct dentry **rootDentry) // OUT: Root dentry ++{ ++ int result = -ENOMEM; ++ struct inode *rootInode; ++ struct dentry *tempRootDentry = NULL; ++ struct HgfsAttrInfo rootDentryAttr; ++ HgfsInodeInfo *iinfo; ++ ++ ASSERT(sb); ++ ASSERT(rootDentry); ++ ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsInstantiateRoot: entered\n")); ++ ++ rootInode = HgfsGetInode(sb, HGFS_ROOT_INO); ++ if (rootInode == NULL) { ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsInstantiateRoot: Could not get the root inode\n")); ++ goto exit; ++ } ++ ++ /* ++ * On an allocation failure in read_super, the inode will have been ++ * marked "bad". If it was, we certainly don't want to start playing with ++ * the HgfsInodeInfo. So quietly put the inode back and fail. ++ */ ++ if (is_bad_inode(rootInode)) { ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsInstantiateRoot: encountered bad inode\n")); ++ goto exit; ++ } ++ ++ LOG(8, (KERN_DEBUG "VMware hgfs: HgfsInstantiateRoot: retrieve root attrs\n")); ++ result = HgfsPrivateGetattrRoot(sb, &rootDentryAttr); ++ if (result) { ++ LOG(4, (KERN_WARNING "VMware hgfs: HgfsInstantiateRoot: Could not the root attrs\n")); ++ goto exit; ++ } ++ ++ iinfo = INODE_GET_II_P(rootInode); ++ iinfo->isFakeInodeNumber = FALSE; ++ iinfo->isReferencedInode = TRUE; ++ ++ if (rootDentryAttr.mask & HGFS_ATTR_VALID_FILEID) { ++ iinfo->hostFileId = rootDentryAttr.hostFileId; ++ } ++ ++ HgfsChangeFileAttributes(rootInode, &rootDentryAttr); ++ ++ /* ++ * Now the initialization of the inode is complete we can create ++ * the root dentry which has flags initialized from the inode itself. ++ */ ++ tempRootDentry = d_make_root(rootInode); ++ /* ++ * d_make_root() does iput() on failure; if d_make_root() completes ++ * successfully then subsequent dput() will do iput() for us, so we ++ * should just ignore root inode from now on. ++ */ ++ rootInode = NULL; ++ ++ if (tempRootDentry == NULL) { ++ LOG(4, (KERN_WARNING "VMware hgfs: HgfsInstantiateRoot: Could not get " ++ "root dentry\n")); ++ goto exit; ++ } ++ ++ HgfsDentryAgeReset(tempRootDentry); ++ tempRootDentry->d_op = &HgfsDentryOperations; ++ ++ *rootDentry = tempRootDentry; ++ result = 0; ++ ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsInstantiateRoot: finished\n")); ++exit: ++ if (result) { ++ iput(rootInode); ++ dput(tempRootDentry); ++ *rootDentry = NULL; ++ } ++ return result; ++} ++ ++ ++/* ++ *----------------------------------------------------------------------------- ++ * + * HgfsInstantiate -- + * + * Tie a dentry to a looked up or created inode. Callers may choose to +@@ -1163,6 +1638,45 @@ HgfsInstantiate(struct dentry *dentry, // IN: Dentry to use + /* + *----------------------------------------------------------------------------- + * ++ * HgfsBuildRootPath -- ++ * ++ * Constructs the root path given the super info. ++ * ++ * Results: ++ * If non-negative, the length of the buffer written. ++ * Otherwise, an error code. ++ * ++ * Side effects: ++ * None ++ * ++ *----------------------------------------------------------------------------- ++ */ ++ ++int ++HgfsBuildRootPath(char *buffer, // IN/OUT: Buffer to write into ++ size_t bufferLen, // IN: Size of buffer ++ HgfsSuperInfo *si) // IN: First dentry to walk ++{ ++ size_t shortestNameLength; ++ /* ++ * Buffer must hold at least the share name (which is already prefixed with ++ * a forward slash), and nul. ++ */ ++ shortestNameLength = si->shareNameLen + 1; ++ if (bufferLen < shortestNameLength) { ++ return -ENAMETOOLONG; ++ } ++ memcpy(buffer, si->shareName, shortestNameLength); ++ ++ /* Short-circuit if we're at the root already. */ ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsBuildRootPath: root path \"%s\"\n", buffer)); ++ return shortestNameLength; ++} ++ ++ ++/* ++ *----------------------------------------------------------------------------- ++ * + * HgfsBuildPath -- + * + * Constructs the full path given a dentry by walking the dentry and its +@@ -1184,7 +1698,7 @@ HgfsBuildPath(char *buffer, // IN/OUT: Buffer to write into + size_t bufferLen, // IN: Size of buffer + struct dentry *dentry) // IN: First dentry to walk + { +- int retval = 0; ++ int retval; + size_t shortestNameLength; + HgfsSuperInfo *si; + +@@ -1194,26 +1708,23 @@ HgfsBuildPath(char *buffer, // IN/OUT: Buffer to write into + + si = HGFS_SB_TO_COMMON(dentry->d_sb); + +- /* +- * Buffer must hold at least the share name (which is already prefixed with +- * a forward slash), and nul. +- */ +- shortestNameLength = si->shareNameLen + 1; +- if (bufferLen < shortestNameLength) { +- return -ENAMETOOLONG; ++ retval = HgfsBuildRootPath(buffer, bufferLen, si); ++ if (0 > retval) { ++ return retval; + } +- memcpy(buffer, si->shareName, shortestNameLength); + + /* Short-circuit if we're at the root already. */ + if (IS_ROOT(dentry)) { + LOG(4, (KERN_DEBUG "VMware hgfs: HgfsBuildPath: Sending root \"%s\"\n", + buffer)); +- return shortestNameLength; ++ return retval; + } + + /* Skip the share name, but overwrite our previous nul. */ ++ shortestNameLength = retval; + buffer += shortestNameLength - 1; + bufferLen -= shortestNameLength - 1; ++ retval = 0; + + /* + * Build the path string walking the tree backward from end to ROOT +@@ -1230,8 +1741,8 @@ HgfsBuildPath(char *buffer, // IN/OUT: Buffer to write into + if (bufferLen < 0) { + compat_unlock_dentry(dentry); + dput(dentry); +- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsBuildPath: Ran out of space " +- "while writing dentry name\n")); ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsBuildPath: Ran out of space " ++ "while writing dentry name\n")); + return -ENAMETOOLONG; + } + buffer[bufferLen] = '/'; +@@ -1305,7 +1816,7 @@ HgfsDentryAgeReset(struct dentry *dentry) // IN: Dentry whose age to reset + /* + *----------------------------------------------------------------------------- + * +- * HgfsDentryAgeReset -- ++ * HgfsDentryAgeForce -- + * + * Set the dentry's time to 0. This makes the dentry's age "too old" and + * forces subsequent HgfsRevalidates to go to the server for attributes. +@@ -1808,5 +2319,7 @@ HgfsDoReadInode(struct inode *inode) // IN: Inode to initialize + iinfo->isReferencedInode = FALSE; + iinfo->isFakeInodeNumber = FALSE; + iinfo->createdAndUnopened = FALSE; ++ iinfo->numWbPages = 0; ++ INIT_LIST_HEAD(&iinfo->listWbPages); + + } +diff --git a/open-vm-tools/modules/linux/vmhgfs/fsutil.h b/open-vm-tools/modules/linux/vmhgfs/fsutil.h +index 2767099..6cfc71a 100644 +--- a/modules/linux/vmhgfs/fsutil.h ++++ b/modules/linux/vmhgfs/fsutil.h +@@ -74,6 +74,8 @@ int HgfsPrivateGetattr(struct dentry *dentry, + struct inode *HgfsIget(struct super_block *sb, + ino_t ino, + HgfsAttrInfo const *attr); ++int HgfsInstantiateRoot(struct super_block *sb, ++ struct dentry **rootDentry); + int HgfsInstantiate(struct dentry *dentry, + ino_t ino, + HgfsAttrInfo const *attr); +diff --git a/open-vm-tools/modules/linux/vmhgfs/inode.c b/open-vm-tools/modules/linux/vmhgfs/inode.c +index caaa41a..93e28bf 100644 +--- a/modules/linux/vmhgfs/inode.c ++++ b/modules/linux/vmhgfs/inode.c +@@ -159,6 +159,38 @@ struct inode_operations HgfsFileInodeOperations = { + * Private functions implementations. + */ + ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsClearReadOnly -- ++ * ++ * Try to remove the file/dir read only attribute. ++ * ++ * Note when running on Windows servers the entry may have the read-only ++ * flag set and prevent a rename or delete operation from occuring. ++ * ++ * Results: ++ * Returns zero on success, or a negative error on failure. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static int ++HgfsClearReadOnly(struct dentry *dentry) // IN: file/dir to remove read only ++{ ++ struct iattr enableWrite; ++ ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsClearReadOnly: removing read-only\n")); ++ enableWrite.ia_mode = (dentry->d_inode->i_mode | S_IWUSR); ++ enableWrite.ia_valid = ATTR_MODE; ++ return HgfsSetattr(dentry, &enableWrite); ++} ++ ++ + /* + *---------------------------------------------------------------------- + * +@@ -309,14 +341,8 @@ HgfsDelete(struct inode *dir, // IN: Parent dir of file/dir to delete + * safe? + */ + if (!secondAttempt) { +- struct iattr enableWrite; + secondAttempt = TRUE; +- +- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: access denied, " +- "attempting to work around read-only bit\n")); +- enableWrite.ia_mode = (dentry->d_inode->i_mode | S_IWUSR); +- enableWrite.ia_valid = ATTR_MODE; +- result = HgfsSetattr(dentry, &enableWrite); ++ result = HgfsClearReadOnly(dentry); + if (result == 0) { + LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: file is no " + "longer read-only, retrying delete\n")); +@@ -1336,6 +1362,7 @@ HgfsRename(struct inode *oldDir, // IN: Inode of original directory + HgfsReq *req = NULL; + char *oldName; + char *newName; ++ Bool secondAttempt=FALSE; + uint32 *oldNameLength; + uint32 *newNameLength; + int result = 0; +@@ -1500,6 +1527,31 @@ retry: + "returned error: %d\n", result)); + goto out; + } ++ } else if ((-EACCES == result) || (-EPERM == result)) { ++ /* ++ * It's possible that we're talking to a Windows server with ++ * a file marked read-only. Let's try again, after removing ++ * the read-only bit from the file. ++ * ++ * XXX: I think old servers will send -EPERM here. Is this entirely ++ * safe? ++ */ ++ if (!secondAttempt) { ++ secondAttempt = TRUE; ++ result = HgfsClearReadOnly(newDentry); ++ if (result == 0) { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: file is no " ++ "longer read-only, retrying rename\n")); ++ goto retry; ++ } ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: failed to remove " ++ "read-only property\n")); ++ } else { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: second attempt at " ++ "rename failed\n")); ++ } ++ } else if (0 != result) { ++ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: failed with result %d\n", result)); + } + } else if (result == -EIO) { + LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: timed out\n")); +diff --git a/open-vm-tools/modules/linux/vmhgfs/link.c b/open-vm-tools/modules/linux/vmhgfs/link.c +index 06ea953..9140f4e 100644 +--- a/modules/linux/vmhgfs/link.c ++++ b/modules/linux/vmhgfs/link.c +@@ -45,11 +45,20 @@ static int HgfsFollowlink(struct dentry *dentry, + static int HgfsReadlink(struct dentry *dentry, + char __user *buffer, + int buflen); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) ++static void HgfsPutlink(struct dentry *dentry, ++ struct nameidata *nd, ++ void *cookie); ++#else ++static void HgfsPutlink(struct dentry *dentry, ++ struct nameidata *nd); ++#endif + + /* HGFS inode operations structure for symlinks. */ + struct inode_operations HgfsLinkInodeOperations = { + .follow_link = HgfsFollowlink, + .readlink = HgfsReadlink, ++ .put_link = HgfsPutlink, + }; + + /* +@@ -109,6 +118,7 @@ HgfsFollowlink(struct dentry *dentry, // IN: Dentry containing link + LOG(6, (KERN_DEBUG "VMware hgfs: HgfsFollowlink: got called " + "on something that wasn't a symlink\n")); + error = -EINVAL; ++ kfree(fileName); + } else { + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) + LOG(6, (KERN_DEBUG "VMware hgfs: HgfsFollowlink: calling " +@@ -120,7 +130,6 @@ HgfsFollowlink(struct dentry *dentry, // IN: Dentry containing link + error = vfs_follow_link(nd, fileName); + #endif + } +- kfree(fileName); + } + out: + +@@ -181,9 +190,6 @@ HgfsReadlink(struct dentry *dentry, // IN: Dentry containing link + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + LOG(6, (KERN_DEBUG "VMware hgfs: HgfsReadlink: calling " + "readlink_copy\n")); +- LOG(6, (KERN_DEBUG "VMware hgfs: %s: calling " +- "readlink_copy\n", +- __func__)); + error = readlink_copy(buffer, buflen, fileName); + #else + LOG(6, (KERN_DEBUG "VMware hgfs: HgfsReadlink: calling " +@@ -195,3 +201,46 @@ HgfsReadlink(struct dentry *dentry, // IN: Dentry containing link + } + return error; + } ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsPutlink -- ++ * ++ * Modeled after page_put_link from a 2.6.9 kernel so it'll work ++ * across all kernel revisions we care about. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) ++static void ++HgfsPutlink(struct dentry *dentry, // dentry ++ struct nameidata *nd, // lookup name information ++ void *cookie) // cookie ++#else ++static void ++HgfsPutlink(struct dentry *dentry, // dentry ++ struct nameidata *nd) // lookup name information ++#endif ++{ ++ char *fileName = NULL; ++ ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPutlink: put for %s\n", ++ dentry->d_name.name)); ++ ++ fileName = nd_get_link(nd); ++ if (!IS_ERR(fileName)) { ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPutlink: putting %s\n", ++ fileName)); ++ kfree(fileName); ++ nd_set_link(nd, NULL); ++ } ++} +diff --git a/open-vm-tools/modules/linux/vmhgfs/module.h b/open-vm-tools/modules/linux/vmhgfs/module.h +index b6bcd1e..0c0a842 100644 +--- a/modules/linux/vmhgfs/module.h ++++ b/modules/linux/vmhgfs/module.h +@@ -147,6 +147,13 @@ typedef struct HgfsInodeInfo { + /* Is this a fake inode created in HgfsCreate that has yet to be opened? */ + Bool createdAndUnopened; + ++ /* ++ * The number of write back pages to the file which is tracked so any ++ * concurrent file validations such as reads will not invalidate the cache. ++ */ ++ unsigned long numWbPages; ++ struct list_head listWbPages; ++ + /* Is this inode referenced by HGFS? (needed by HgfsInodeLookup()) */ + Bool isReferencedInode; + +diff --git a/open-vm-tools/modules/linux/vmhgfs/page.c b/open-vm-tools/modules/linux/vmhgfs/page.c +index 6d8b50f..cf3b8c9 100644 +--- a/modules/linux/vmhgfs/page.c ++++ b/modules/linux/vmhgfs/page.c +@@ -1,5 +1,5 @@ + /********************************************************* +- * Copyright (C) 2006 VMware, Inc. All rights reserved. ++ * Copyright (C) 2006-2014 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the +@@ -64,15 +64,18 @@ static int HgfsDoWritepage(HgfsHandle handle, + struct page *page, + unsigned pageFrom, + unsigned pageTo); +-static void HgfsDoWriteBegin(struct page *page, +- unsigned pageFrom, +- unsigned pageTo); ++static int HgfsDoWriteBegin(struct file *file, ++ struct page *page, ++ unsigned pageFrom, ++ unsigned pageTo); + static int HgfsDoWriteEnd(struct file *file, + struct page *page, + unsigned pageFrom, + unsigned pageTo, + loff_t writeTo, + unsigned copied); ++static void HgfsDoExtendFile(struct inode *inode, ++ loff_t writeTo); + + /* HGFS address space operations. */ + static int HgfsReadpage(struct file *file, +@@ -128,6 +131,27 @@ struct address_space_operations HgfsAddressSpaceOperations = { + .set_page_dirty = __set_page_dirty_nobuffers, + }; + ++enum { ++ PG_BUSY = 0, ++}; ++ ++typedef struct HgfsWbPage { ++ struct list_head wb_list; /* Defines state of page: */ ++ struct page *wb_page; /* page to read in/write out */ ++ pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ ++ struct kref wb_kref; /* reference count */ ++ unsigned long wb_flags; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13) ++ wait_queue_head_t wb_queue; ++#endif ++} HgfsWbPage; ++ ++static void HgfsInodePageWbAdd(struct inode *inode, ++ struct page *page); ++static void HgfsInodePageWbRemove(struct inode *inode, ++ struct page *page); ++static void HgfsWbRequestDestroy(HgfsWbPage *req); ++ + + /* + * Private functions. +@@ -690,11 +714,11 @@ HgfsDoWritepage(HgfsHandle handle, // IN: Handle to use for writing + pageFrom += result; + + /* Update the inode's size now rather than waiting for a revalidate. */ +- if (curOffset > compat_i_size_read(inode)) { +- compat_i_size_write(inode, curOffset); +- } ++ HgfsDoExtendFile(inode, curOffset); + } while ((result > 0) && (remainingCount > 0)); + ++ HgfsInodePageWbRemove(inode, page); ++ + result = 0; + + out: +@@ -866,7 +890,7 @@ HgfsWritepage(struct page *page, // IN: Page to write from + * Initialize the page if the file is to be appended. + * + * Results: +- * None. ++ * Zero on success, always. + * + * Side effects: + * None. +@@ -874,37 +898,35 @@ HgfsWritepage(struct page *page, // IN: Page to write from + *----------------------------------------------------------------------------- + */ + +-static void +-HgfsDoWriteBegin(struct page *page, // IN: Page to be written ++static int ++HgfsDoWriteBegin(struct file *file, // IN: File to be written ++ struct page *page, // IN: Page to be written + unsigned pageFrom, // IN: Starting page offset + unsigned pageTo) // IN: Ending page offset + { +- loff_t offset; +- loff_t currentFileSize; +- + ASSERT(page); + +- offset = (loff_t)page->index << PAGE_CACHE_SHIFT; +- currentFileSize = compat_i_size_read(page->mapping->host); + +- /* +- * If we are doing a partial write into a new page (beyond end of +- * file), then intialize it. This allows other writes to this page +- * to accumulate before we need to write it to the server. +- */ +- if ((offset >= currentFileSize) || +- ((pageFrom == 0) && (offset + pageTo) >= currentFileSize)) { +- void *kaddr = compat_kmap_atomic(page); +- +- if (pageFrom) { ++ if (!PageUptodate(page)) { ++ /* ++ * If we are doing a partial write into a new page (beyond end of ++ * file), then intialize it. This allows other writes to this page ++ * to accumulate before we need to write it to the server. ++ */ ++ if (pageTo - pageFrom != PAGE_CACHE_SIZE) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) ++ zero_user_segments(page, 0, pageFrom, pageTo, PAGE_CACHE_SIZE); ++#else ++ void *kaddr = compat_kmap_atomic(page); + memset(kaddr, 0, pageFrom); +- } +- if (pageTo < PAGE_CACHE_SIZE) { + memset(kaddr + pageTo, 0, PAGE_CACHE_SIZE - pageTo); ++ flush_dcache_page(page); ++ compat_kunmap_atomic(kaddr); ++#endif + } +- compat_kunmap_atomic(kaddr); +- flush_dcache_page(page); + } ++ ++ return 0; + } + + +@@ -919,7 +941,7 @@ HgfsDoWriteBegin(struct page *page, // IN: Page to be written + * receiving the write. + * + * Results: +- * Always zero. ++ * On success zero, always. + * + * Side effects: + * None. +@@ -928,14 +950,12 @@ HgfsDoWriteBegin(struct page *page, // IN: Page to be written + */ + + static int +-HgfsPrepareWrite(struct file *file, // IN: Ignored ++HgfsPrepareWrite(struct file *file, // IN: File to be written + struct page *page, // IN: Page to prepare + unsigned pageFrom, // IN: Beginning page offset + unsigned pageTo) // IN: Ending page offset + { +- HgfsDoWriteBegin(page, pageFrom, pageTo); +- +- return 0; ++ return HgfsDoWriteBegin(file, page, pageFrom, pageTo); + } + + #else +@@ -971,18 +991,29 @@ HgfsWriteBegin(struct file *file, // IN: File to be written + void **clientData) // OUT: Opaque to pass to write_end, unused + { + pgoff_t index = pos >> PAGE_CACHE_SHIFT; +- unsigned pageFrom = pos & (PAGE_CACHE_SHIFT - 1); +- unsigned pageTo = pos + len; ++ unsigned pageFrom = pos & (PAGE_CACHE_SIZE - 1); ++ unsigned pageTo = pageFrom + len; + struct page *page; ++ int result; + + page = compat_grab_cache_page_write_begin(mapping, index, flags); + if (page == NULL) { +- return -ENOMEM; ++ result = -ENOMEM; ++ goto exit; + } + *pagePtr = page; + +- HgfsDoWriteBegin(page, pageFrom, pageTo); +- return 0; ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsWriteBegin: file size %Lu @ %Lu page %u to %u\n", ++ (loff_t)compat_i_size_read(page->mapping->host), ++ (loff_t)page->index << PAGE_CACHE_SHIFT, ++ pageFrom, pageTo)); ++ ++ result = HgfsDoWriteBegin(file, page, pageFrom, pageTo); ++ ASSERT(result == 0); ++ ++exit: ++ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsWriteBegin: return %d\n", result)); ++ return result; + } + #endif + +@@ -990,6 +1021,40 @@ HgfsWriteBegin(struct file *file, // IN: File to be written + /* + *----------------------------------------------------------------------------- + * ++ * HgfsDoExtendFile -- ++ * ++ * Helper function for extending a file size. ++ * ++ * This function updates the inode->i_size, under the inode lock. ++ * ++ * Results: ++ * None. ++ * ++ * Side effects: ++ * None. ++ * ++ *----------------------------------------------------------------------------- ++ */ ++ ++static void ++HgfsDoExtendFile(struct inode *inode, // IN: File we're writing to ++ loff_t writeTo) // IN: Offset we're written to ++{ ++ loff_t currentFileSize; ++ ++ spin_lock(&inode->i_lock); ++ currentFileSize = compat_i_size_read(inode); ++ ++ if (writeTo > currentFileSize) { ++ compat_i_size_write(inode, writeTo); ++ } ++ spin_unlock(&inode->i_lock); ++} ++ ++ ++/* ++ *----------------------------------------------------------------------------- ++ * + * HgfsDoWriteEnd -- + * + * Helper function for HgfsWriteEnd. +@@ -1014,54 +1079,31 @@ HgfsDoWriteEnd(struct file *file, // IN: File we're writing to + loff_t writeTo, // IN: File position to write to + unsigned copied) // IN: Number of bytes copied to the page + { +- HgfsHandle handle; + struct inode *inode; +- loff_t currentFileSize; +- loff_t offset; + + ASSERT(file); + ASSERT(page); + inode = page->mapping->host; +- currentFileSize = compat_i_size_read(inode); +- offset = (loff_t)page->index << PAGE_CACHE_SHIFT; +- +- if (writeTo > currentFileSize) { +- compat_i_size_write(inode, writeTo); +- } +- +- /* We wrote a complete page, so it is up to date. */ +- if (copied == PAGE_CACHE_SIZE) { +- SetPageUptodate(page); +- } + + /* +- * Check if this is a partial write to a new page, which was +- * initialized in HgfsDoWriteBegin. ++ * Zero any uninitialised parts of the page, and then mark the page ++ * as up to date if it turns out that we're extending the file. + */ +- if ((offset >= currentFileSize) || +- ((pageFrom == 0) && (writeTo >= currentFileSize))) { ++ if (!PageUptodate(page)) { + SetPageUptodate(page); + } + + /* +- * If the page is uptodate, then just mark it dirty and let +- * the page cache write it when it wants to. ++ * Track the pages being written. + */ +- if (PageUptodate(page)) { +- set_page_dirty(page); +- return 0; +- } ++ HgfsInodePageWbAdd(inode, page); + +- /* +- * We've recieved a partial write to page that is not uptodate, so +- * do the write now while the page is still locked. Another +- * alternative would be to read the page in HgfsDoWriteBegin, which +- * would make it uptodate (ie a complete cached page). +- */ +- handle = FILE_GET_FI_P(file)->handle; +- LOG(6, (KERN_WARNING "VMware hgfs: %s: writing to handle %u\n", __func__, +- handle)); +- return HgfsDoWritepage(handle, page, pageFrom, pageTo); ++ HgfsDoExtendFile(inode, writeTo); ++ ++ set_page_dirty(page); ++ ++ LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoWriteEnd: return 0\n")); ++ return 0; + } + + +@@ -1143,7 +1185,7 @@ HgfsWriteEnd(struct file *file, // IN: File to write + void *clientData) // IN: From write_begin, unused. + { + unsigned pageFrom = pos & (PAGE_CACHE_SIZE - 1); +- unsigned pageTo = pageFrom + copied; ++ unsigned pageTo = pageFrom + len; + loff_t writeTo = pos + copied; + int ret; + +@@ -1151,6 +1193,10 @@ HgfsWriteEnd(struct file *file, // IN: File to write + ASSERT(mapping); + ASSERT(page); + ++ if (copied < len) { ++ zero_user_segment(page, pageFrom + copied, pageFrom + len); ++ } ++ + ret = HgfsDoWriteEnd(file, page, pageFrom, pageTo, writeTo, copied); + if (ret == 0) { + ret = copied; +@@ -1161,3 +1207,671 @@ HgfsWriteEnd(struct file *file, // IN: File to write + return ret; + } + #endif ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbPageAlloc -- ++ * ++ * Allocates a write-back page object. ++ * ++ * Results: ++ * The write-back page object ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static inline HgfsWbPage * ++HgfsWbPageAlloc(void) ++{ ++ return kmalloc(sizeof (HgfsWbPage), GFP_KERNEL); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbPageAlloc -- ++ * ++ * Frees a write-back page object. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++ ++static inline void ++HgfsWbPageFree(HgfsWbPage *page) // IN: request of page data to write ++{ ++ ASSERT(page); ++ kfree(page); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestFree -- ++ * ++ * Frees the resources for a write-back page request. ++ * Calls the request destroy and then frees the object memory. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static void ++HgfsWbRequestFree(struct kref *kref) // IN: ref field request of page data to write ++{ ++ HgfsWbPage *req = container_of(kref, HgfsWbPage, wb_kref); ++ ++ /* Release write back request page and free it. */ ++ HgfsWbRequestDestroy(req); ++ HgfsWbPageFree(req); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestGet -- ++ * ++ * Reference the write-back page request. ++ * Calls the request destroy and then frees the object memory. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++void ++HgfsWbRequestGet(HgfsWbPage *req) // IN: request of page data to write ++{ ++ kref_get(&req->wb_kref); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestPut -- ++ * ++ * Remove a reference the write-back page request. ++ * Calls the request free to tear down the object memory if it was the ++ * final one. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * Destroys the request if last one. ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++void ++HgfsWbRequestPut(HgfsWbPage *req) // IN: request of page data to write ++{ ++ kref_put(&req->wb_kref, HgfsWbRequestFree); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestWaitUninterruptible -- ++ * ++ * Sleep function while waiting for requests to complete. ++ * ++ * Results: ++ * Always zero. ++ * ++ * Side effects: ++* None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) ++static int ++HgfsWbRequestWaitUninterruptible(void *word) // IN:unused ++{ ++ io_schedule(); ++ return 0; ++} ++#endif ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestWait -- ++ * ++ * Wait for a write-back page request to complete. ++ * Interruptible by fatal signals only. ++ * The user is responsible for holding a count on the request. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++ ++int ++HgfsWbRequestWait(HgfsWbPage *req) // IN: request of page data to write ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) ++ return wait_on_bit(&req->wb_flags, ++ PG_BUSY, ++ HgfsWbRequestWaitUninterruptible, ++ TASK_UNINTERRUPTIBLE); ++#else ++ wait_event(req->wb_queue, ++ !test_bit(PG_BUSY, &req->wb_flags)); ++ return 0; ++#endif ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestLock -- ++ * ++ * Lock the write-back page request. ++ * ++ * Results: ++ * Non-zero if the lock was not already locked ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static inline int ++HgfsWbRequestLock(HgfsWbPage *req) // IN: request of page data to write ++{ ++ return !test_and_set_bit(PG_BUSY, &req->wb_flags); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestUnlock -- ++ * ++ * Unlock the write-back page request. ++ * Wakes up any waiting threads on the lock. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static void ++HgfsWbRequestUnlock(HgfsWbPage *req) // IN: request of page data to write ++{ ++ if (!test_bit(PG_BUSY,&req->wb_flags)) { ++ LOG(6, (KERN_WARNING "VMware Hgfs: HgfsWbRequestUnlock: Invalid unlock attempted\n")); ++ return; ++ } ++ smp_mb__before_clear_bit(); ++ clear_bit(PG_BUSY, &req->wb_flags); ++ smp_mb__after_clear_bit(); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) ++ wake_up_bit(&req->wb_flags, PG_BUSY); ++#else ++ wake_up(&req->wb_queue); ++#endif ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestUnlockAndPut -- ++ * ++ * Unlock the write-back page request and removes a reference. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static void ++HgfsWbRequestUnlockAndPut(HgfsWbPage *req) // IN: request of page data to write ++{ ++ HgfsWbRequestUnlock(req); ++ HgfsWbRequestPut(req); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestListAdd -- ++ * ++ * Add the write-back page request into the list. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static inline void ++HgfsWbRequestListAdd(HgfsWbPage *req, // IN: request of page data to write ++ struct list_head *head) // IN: list of requests ++{ ++ list_add_tail(&req->wb_list, head); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestListRemove -- ++ * ++ * Remove the write-back page request from the list. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static inline void ++HgfsWbRequestListRemove(HgfsWbPage *req) // IN: request of page data to write ++{ ++ if (!list_empty(&req->wb_list)) { ++ list_del_init(&req->wb_list); ++ } ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestCreate -- ++ * ++ * Create the write-back page request. ++ * ++ * Results: ++ * The new write-back page request. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++HgfsWbPage * ++HgfsWbRequestCreate(struct page *page) // IN: page of data to write ++{ ++ HgfsWbPage *wbReq; ++ /* try to allocate the request struct */ ++ wbReq = HgfsWbPageAlloc(); ++ if (wbReq == NULL) { ++ wbReq = ERR_PTR(-ENOMEM); ++ goto exit; ++ } ++ ++ /* ++ * Initialize the request struct. Initially, we assume a ++ * long write-back delay. This will be adjusted in ++ * update_nfs_request below if the region is not locked. ++ */ ++ wbReq->wb_flags = 0; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13) ++ init_waitqueue_head(&wbReq->wb_queue); ++#endif ++ INIT_LIST_HEAD(&wbReq->wb_list); ++ wbReq->wb_page = page; ++ wbReq->wb_index = page->index; ++ page_cache_get(page); ++ kref_init(&wbReq->wb_kref); ++ ++exit: ++ LOG(6, (KERN_WARNING "VMware hgfs: HgfsWbRequestCreate: (%p, %p)\n", ++ wbReq, page)); ++ return wbReq; ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsWbRequestDestroy -- ++ * ++ * Destroys by freeing up all resources allocated to the request. ++ * Release page associated with a write-back request after it has completed. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static void ++HgfsWbRequestDestroy(HgfsWbPage *req) // IN: write page request ++{ ++ struct page *page = req->wb_page; ++ ++ LOG(6, (KERN_WARNING"VMware hgfs: HgfsWbRequestDestroy: (%p, %p)\n", ++ req, req->wb_page)); ++ ++ if (page != NULL) { ++ page_cache_release(page); ++ req->wb_page = NULL; ++ } ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsInodeFindWbRequest -- ++ * ++ * Finds if there is a write-back page request on this inode and returns it. ++ * ++ * Results: ++ * NULL or the write-back request for the page. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static HgfsWbPage * ++HgfsInodeFindWbRequest(struct inode *inode, // IN: inode of file to write to ++ struct page *page) // IN: page of data to write ++{ ++ HgfsInodeInfo *iinfo; ++ HgfsWbPage *req = NULL; ++ HgfsWbPage *cur; ++ ++ iinfo = INODE_GET_II_P(inode); ++ ++ /* Linearly search the write back list for the correct req */ ++ list_for_each_entry(cur, &iinfo->listWbPages, wb_list) { ++ if (cur->wb_page == page) { ++ req = cur; ++ break; ++ } ++ } ++ ++ if (req != NULL) { ++ HgfsWbRequestGet(req); ++ } ++ ++ return req; ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsInodeFindExistingWbRequest -- ++ * ++ * Finds if there is a write-back page request on this inode and returns ++ * locked. ++ * If the request is busy (locked) then it drops the lock and waits for it ++ * be not locked and searches the list again. ++ * ++ * Results: ++ * NULL or the write-back request for the page. ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static HgfsWbPage * ++HgfsInodeFindExistingWbRequest(struct inode *inode, // IN: inode of file to write to ++ struct page *page) // IN: page of data to write ++{ ++ HgfsWbPage *req; ++ int error; ++ ++ spin_lock(&inode->i_lock); ++ ++ for (;;) { ++ req = HgfsInodeFindWbRequest(inode, page); ++ if (req == NULL) { ++ goto out_exit; ++ } ++ ++ /* ++ * Try and lock the request if not already locked. ++ * If we find it is already locked, busy, then we drop ++ * the reference and wait to try again. Otherwise, ++ * once newly locked we break out and return to the caller. ++ */ ++ if (HgfsWbRequestLock(req)) { ++ break; ++ } ++ ++ /* The request was in use, so wait and then retry */ ++ spin_unlock(&inode->i_lock); ++ error = HgfsWbRequestWait(req); ++ HgfsWbRequestPut(req); ++ if (error != 0) { ++ goto out_nolock; ++ } ++ ++ spin_lock(&inode->i_lock); ++ } ++ ++out_exit: ++ spin_unlock(&inode->i_lock); ++ return req; ++ ++out_nolock: ++ return ERR_PTR(error); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsInodeAddWbRequest -- ++ * ++ * Add a write-back page request to an inode. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static void ++HgfsInodeAddWbRequest(struct inode *inode, // IN: inode of file to write to ++ HgfsWbPage *req) // IN: page write request ++{ ++ HgfsInodeInfo *iinfo = INODE_GET_II_P(inode); ++ ++ LOG(6, (KERN_WARNING "VMware hgfs: HgfsInodeAddWbRequest: (%p, %p, %lu)\n", ++ inode, req->wb_page, iinfo->numWbPages)); ++ ++ /* Lock the request! */ ++ HgfsWbRequestLock(req); ++ ++ HgfsWbRequestListAdd(req, &iinfo->listWbPages); ++ iinfo->numWbPages++; ++ HgfsWbRequestGet(req); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsInodeAddWbRequest -- ++ * ++ * Remove a write-back page request from an inode. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static void ++HgfsInodeRemoveWbRequest(struct inode *inode, // IN: inode of file written to ++ HgfsWbPage *req) // IN: page write request ++{ ++ HgfsInodeInfo *iinfo = INODE_GET_II_P(inode); ++ ++ LOG(6, (KERN_CRIT "VMware hgfs: HgfsInodeRemoveWbRequest: (%p, %p, %lu)\n", ++ inode, req->wb_page, iinfo->numWbPages)); ++ ++ iinfo->numWbPages--; ++ HgfsWbRequestListRemove(req); ++ HgfsWbRequestPut(req); ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsInodeAddWbRequest -- ++ * ++ * Add a write-back page request to an inode. ++ * If the page is already exists in the list for this inode nothing is ++ * done, otherwise a new object is created for the page and added to the ++ * inode list. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static void ++HgfsInodePageWbAdd(struct inode *inode, // IN: inode of file to write to ++ struct page *page) // IN: page of data to write ++{ ++ HgfsWbPage *req; ++ ++ LOG(6, (KERN_CRIT "VMware hgfs: HgfsInodePageWbAdd: (%p, %p)\n", ++ inode, page)); ++ ++ req = HgfsInodeFindExistingWbRequest(inode, page); ++ if (req != NULL) { ++ goto exit; ++ } ++ ++ /* ++ * We didn't find an existing write back request for that page so ++ * we create one. ++ */ ++ req = HgfsWbRequestCreate(page); ++ if (IS_ERR(req)) { ++ goto exit; ++ } ++ ++ spin_lock(&inode->i_lock); ++ /* ++ * Add the new write request for the page into our inode list to track. ++ */ ++ HgfsInodeAddWbRequest(inode, req); ++ spin_unlock(&inode->i_lock); ++ ++exit: ++ if (!IS_ERR(req)) { ++ HgfsWbRequestUnlockAndPut(req); ++ } ++} ++ ++ ++/* ++ *---------------------------------------------------------------------- ++ * ++ * HgfsInodePageWbRemove -- ++ * ++ * Remove a write-back page request from an inode. ++ * ++ * Results: ++ * None ++ * ++ * Side effects: ++ * None ++ * ++ *---------------------------------------------------------------------- ++ */ ++ ++static void ++HgfsInodePageWbRemove(struct inode *inode, // IN: inode of file written to ++ struct page *page) // IN: page of data written ++{ ++ HgfsWbPage *req; ++ ++ LOG(6, (KERN_WARNING "VMware hgfs: HgfsInodePageWbRemove: (%p, %p)\n", ++ inode, page)); ++ ++ req = HgfsInodeFindExistingWbRequest(inode, page); ++ if (req == NULL) { ++ goto exit; ++ } ++ spin_lock(&inode->i_lock); ++ /* ++ * Add the new write request for the page into our inode list to track. ++ */ ++ HgfsInodeRemoveWbRequest(inode, req); ++ HgfsWbRequestUnlockAndPut(req); ++ spin_unlock(&inode->i_lock); ++ ++exit: ++ return; ++} ++ +-- +2.0.1 + diff --git a/main/open-vm-tools-grsec/APKBUILD b/main/open-vm-tools-grsec/APKBUILD index d0220f345c..e4cf5a1fe9 100644 --- a/main/open-vm-tools-grsec/APKBUILD +++ b/main/open-vm-tools-grsec/APKBUILD @@ -3,10 +3,10 @@ _flavor=grsec _kpkg=linux-$_flavor _realname=open-vm-tools -_kver=3.14.12 -_kpkgrel=1 +_kver=3.14.13 +_kpkgrel=0 -_realver=9.4.0_p1280544 +_realver=9.4.6_p1770165 _ver=${_realver/_p/-} _mypkgrel=0 @@ -33,15 +33,20 @@ pkgrel=$(($_kpkgrel + $_mypkgrel)) pkgdesc="The Open Virtual Machine Tools are the open source implementation of VMware Tools." url="http://open-vm-tools.sourceforge.net/" -arch="" +arch="all" license="LGPL" subpackages="" depends="linux-${_flavor}=${_kernelver}" depends_dev="bash glib-dev gettext-dev linux-${_flavor}-dev=${_kernelver}" -makedepends="$depends_dev" +makedepends="$depends_dev autoconf automake libtool" source="http://downloads.sourceforge.net/project/open-vm-tools/open-vm-tools/stable-${_realver%.*}.x/open-vm-tools-${_ver}.tar.gz - linux-3.10.patch - kernel.patch + + 0001-Remove-unused-DEPRECATED-macro.patch + 0002-Conditionally-define-g_info-macro.patch + 0003-Add-kuid_t-kgid_t-compatibility-layer.patch + 0004-Use-new-link-helpers.patch + 0005-Update-hgfs-file-operations-for-newer-kernels.patch + vmware-modules.initd " install_if="linux-${_flavor}=${_kernelver} open-vm-tools" @@ -56,6 +61,7 @@ prepare() { *.patch) msg $i; patch -p1 -i "$srcdir"/$i || return 1;; esac done + libtoolize && aclocal && autoconf && automake --add-missing } build() { @@ -86,15 +92,24 @@ package() { done } -md5sums="91f74bf6e42a3f460a42b3be31db31dc open-vm-tools-9.4.0-1280544.tar.gz -cc82715976e9af9fb8f44465af2ca5aa linux-3.10.patch -70398edfbcac530ca9a7a4714bedffeb kernel.patch +md5sums="3969daf1535d34e1c5f0c87a779b7642 open-vm-tools-9.4.6-1770165.tar.gz +dc9753f2f617a8b3db9176c440e8450c 0001-Remove-unused-DEPRECATED-macro.patch +be1dc71ed74d6dc53c198f248d668f7b 0002-Conditionally-define-g_info-macro.patch +51cc014684037f5a0fc0a3744615da0f 0003-Add-kuid_t-kgid_t-compatibility-layer.patch +1be086d0028447ba310fcb1e39c08573 0004-Use-new-link-helpers.patch +cb6b4214604d4cd116ffa824d515581e 0005-Update-hgfs-file-operations-for-newer-kernels.patch afba2c3487d0b12cee80eb2f04b05ba1 vmware-modules.initd" -sha256sums="5d30652eb0f6dc5e930781029c184837e700be5543b6a7116db4c62a6f3ca399 open-vm-tools-9.4.0-1280544.tar.gz -f3108ab321f9b32ecb59aa4760f9958f97335492345a04de79d95bd98a7af8ff linux-3.10.patch -96b4f0bf5f956803fbdffb388488c7d425f99e22ca069a66bb0283bc5cae67ed kernel.patch +sha256sums="54d7a83d8115124e4b809098b08d7017ba50828801c2f105cdadbc85a064a079 open-vm-tools-9.4.6-1770165.tar.gz +1458c4133a96f12a98c3d87df9893a02620b4b8700fb6aa5db0d3268da8102a5 0001-Remove-unused-DEPRECATED-macro.patch +1f5e61ee1f025ba02abfe1557e55b1871ada68b9289e78fc6d65f974689ad69e 0002-Conditionally-define-g_info-macro.patch +b11776423f3579217b67dda65c60e8e7e2cb72ba10325e18c45a4f6b199416b2 0003-Add-kuid_t-kgid_t-compatibility-layer.patch +bdc6d05d3ef409b1ed639448c0d74d2905ab5d6e0a09c13868a4084148947ebb 0004-Use-new-link-helpers.patch +ac099e639fc6aa0edf9c6f16a31cf50915bc670947c5a1ecc8940389fd74d065 0005-Update-hgfs-file-operations-for-newer-kernels.patch 6ceb5c75b002991c511d9dadb6cf91720771e76b701e5f2d91ac9ede4b168265 vmware-modules.initd" -sha512sums="07b36f49713140606f74864b59c2e01c9ed83a960dd3f5d172b84fc75e760f7370065774a0e26897924af60454c86b682ca0fd32276b0e9fb17b268247f8cbdf open-vm-tools-9.4.0-1280544.tar.gz -0b36a6318ef419832817f22d50f85d8a5ae080e9496ecdf16753b17e8a693573e46eb896c3dc83a44fc596e6737130245663908befe8aa4a038e16b08cc499a3 linux-3.10.patch -af51159f7357b1442449ec11d9328b2051c2b3865044ff2db184ad522e3495a9a0a76a3cc01d82e79e7d7d293b43b0a9f10fa4767d2a5efd3d7095f598e6896c kernel.patch +sha512sums="a16dc51a51a182031c1849776be9ac0e13bef9d9cb85807e03fbb816d4e9109b64e60f1919f6686a471c79042f30ca93f0193985c2150c6254bc72e3fd8ffa49 open-vm-tools-9.4.6-1770165.tar.gz +707f6458a9492d2c7f0ac344711ac24d05e0c3423859c140379e46a7c0905a53daf7b2b24a47a8a03c07be51c40bdfba3e75f5d429769463e6af4010e1f88358 0001-Remove-unused-DEPRECATED-macro.patch +c364f35f6aae7c34c2bb1d4f69016b761302853fa1a606e40829cd024f152e3d764c02fbbfaac0efcb4cc44c269843a410c35f528d3523ff1447b329a6f26b42 0002-Conditionally-define-g_info-macro.patch +75ccda4d30ec872fa3950e4076fd157f30c49134763551082765eb94b73b1c777c0953257fbdf16043dd5192f1fc9f04c7031f00199b8f180a5ac4efb2aaaf21 0003-Add-kuid_t-kgid_t-compatibility-layer.patch +319cd1879ead9b6d89971c2a3a38d0a65e2c98808325db1db6d1f99b54d8062080a1fb7ba445ac40d5ac9b64659e9a0ebf1dcc2daec9fca0b4a6276a6ee98982 0004-Use-new-link-helpers.patch +d055fa759e2bedc0476de4d1b6df8018b56eefcf3e1b64cc662a9676d332b0cd26c623c2c0f7206d905a2d771d84cb4d6ef121385d5c2612595a996b91656c36 0005-Update-hgfs-file-operations-for-newer-kernels.patch 639098221975cadaed0ae0f32454a6718ceaa5f43f17d949a84a85dee56fbf5f9e6248899c10a46b12c9c9cf28b837d83a37c25aba62b11cb7849a1cf8d32e1e vmware-modules.initd" diff --git a/main/open-vm-tools-grsec/kernel.patch b/main/open-vm-tools-grsec/kernel.patch deleted file mode 100644 index 819b63d9a9..0000000000 --- a/main/open-vm-tools-grsec/kernel.patch +++ /dev/null @@ -1,364 +0,0 @@ -From 34503f06e284eed7be41801bc6f1dbe9920ee4b2 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?D=C4=81vis?= <davispuh@gmail.com> -Date: Mon, 5 May 2014 22:32:43 +0300 -Subject: [PATCH] Update for changes in latest kernel ~ 3.11 - ---- - .../modules/linux/vmblock/linux/control.c | 26 +++++++++++++++++----- - open-vm-tools/modules/linux/vmblock/linux/dentry.c | 14 ++++++++++++ - open-vm-tools/modules/linux/vmblock/linux/file.c | 18 +++++++++++++++ - open-vm-tools/modules/linux/vmblock/linux/inode.c | 12 ++++++++-- - open-vm-tools/modules/linux/vmhgfs/inode.c | 15 ++++++++++--- - open-vm-tools/modules/linux/vmsync/sync.c | 25 ++++++++++++++++++++- - 6 files changed, 99 insertions(+), 11 deletions(-) - -diff --git a/modules/linux/vmblock/linux/control.c b/modules/linux/vmblock/linux/control.c -index 79716bd..306f193 100644 ---- a/modules/linux/vmblock/linux/control.c -+++ b/modules/linux/vmblock/linux/control.c -@@ -208,17 +208,23 @@ SetupProcDevice(void) - VMBlockSetProcEntryOwner(controlProcMountpoint); - - /* Create /proc/fs/vmblock/dev */ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) -+ controlProcEntry = proc_create(VMBLOCK_CONTROL_DEVNAME, VMBLOCK_CONTROL_MODE, controlProcDirEntry, &ControlFileOps); -+#else - controlProcEntry = create_proc_entry(VMBLOCK_CONTROL_DEVNAME, - VMBLOCK_CONTROL_MODE, - controlProcDirEntry); -- if (!controlProcEntry) { -+#endif -+ if (controlProcEntry == NULL) { - Warning("SetupProcDevice: could not create " VMBLOCK_DEVICE "\n"); - remove_proc_entry(VMBLOCK_CONTROL_MOUNTPOINT, controlProcDirEntry); - remove_proc_entry(VMBLOCK_CONTROL_PROC_DIRNAME, NULL); - return -EINVAL; - } - -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) - controlProcEntry->proc_fops = &ControlFileOps; -+#endif - return 0; - } - -@@ -278,22 +284,32 @@ ExecuteBlockOp(const char __user *buf, // IN: buffer with name - int (*blockOp)(const char *filename, // IN: block operation - const os_blocker_id_t blocker)) - { -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) -+ struct filename *fname; -+#else -+ char *fname; -+#endif - char *name; - int i; - int retval; - -- name = getname(buf); -- if (IS_ERR(name)) { -- return PTR_ERR(name); -+ fname = getname(buf); -+ if (IS_ERR(fname)) { -+ return PTR_ERR(fname); - } - -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) -+ name = (char *)fname->name; -+#else -+ name = fname; -+#endif - for (i = strlen(name) - 1; i >= 0 && name[i] == '/'; i--) { - name[i] = '\0'; - } - - retval = i < 0 ? -EINVAL : blockOp(name, blocker); - -- putname(name); -+ __putname(name); - - return retval; - } -diff --git a/modules/linux/vmblock/linux/dentry.c b/modules/linux/vmblock/linux/dentry.c -index 05ea95a..3d43c5e 100644 ---- a/modules/linux/vmblock/linux/dentry.c -+++ b/modules/linux/vmblock/linux/dentry.c -@@ -32,7 +32,11 @@ - #include "block.h" - - -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) -+static int DentryOpRevalidate(struct dentry *dentry, unsigned int flags); -+#else - static int DentryOpRevalidate(struct dentry *dentry, struct nameidata *nd); -+#endif - - struct dentry_operations LinkDentryOps = { - .d_revalidate = DentryOpRevalidate, -@@ -58,9 +62,15 @@ struct dentry_operations LinkDentryOps = { - *---------------------------------------------------------------------------- - */ - -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) -+static int -+DentryOpRevalidate(struct dentry *dentry, // IN: dentry revalidating -+ unsigned int flags) // IN: lookup flags -+#else - static int - DentryOpRevalidate(struct dentry *dentry, // IN: dentry revalidating - struct nameidata *nd) // IN: lookup flags & intent -+#endif - { - VMBlockInodeInfo *iinfo; - struct nameidata actualNd; -@@ -101,7 +111,11 @@ DentryOpRevalidate(struct dentry *dentry, // IN: dentry revalidating - if (actualDentry && - actualDentry->d_op && - actualDentry->d_op->d_revalidate) { -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) -+ return actualDentry->d_op->d_revalidate(actualDentry, flags); -+#else - return actualDentry->d_op->d_revalidate(actualDentry, nd); -+#endif - } - - if (compat_path_lookup(iinfo->name, 0, &actualNd)) { -diff --git a/modules/linux/vmblock/linux/file.c b/modules/linux/vmblock/linux/file.c -index d7ac1f6..513f2d5 100644 ---- a/modules/linux/vmblock/linux/file.c -+++ b/modules/linux/vmblock/linux/file.c -@@ -38,6 +38,7 @@ typedef u64 inode_num_t; - typedef ino_t inode_num_t; - #endif - -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) - /* Specifically for our filldir_t callback */ - typedef struct FilldirInfo { - filldir_t filldir; -@@ -76,6 +77,7 @@ Filldir(void *buf, // IN: Dirent buffer passed from FileOpReaddir - /* Specify DT_LNK regardless */ - return info->filldir(info->dirent, name, namelen, offset, ino, DT_LNK); - } -+#endif - - - /* File operations */ -@@ -164,13 +166,21 @@ FileOpOpen(struct inode *inode, // IN - *---------------------------------------------------------------------------- - */ - -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) -+static int -+FileOpIterate(struct file *file, // IN -+ struct dir_context *ctx) // IN -+#else - static int - FileOpReaddir(struct file *file, // IN - void *dirent, // IN - filldir_t filldir) // IN -+#endif - { - int ret; -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) - FilldirInfo info; -+#endif - struct file *actualFile; - - if (!file) { -@@ -184,12 +194,16 @@ FileOpReaddir(struct file *file, // IN - return -EINVAL; - } - -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) -+ ret = iterate_dir(actualFile, ctx); -+#else - info.filldir = filldir; - info.dirent = dirent; - - actualFile->f_pos = file->f_pos; - ret = vfs_readdir(actualFile, Filldir, &info); - file->f_pos = actualFile->f_pos; -+#endif - - return ret; - } -@@ -237,7 +251,11 @@ FileOpRelease(struct inode *inode, // IN - - - struct file_operations RootFileOps = { -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) -+ .iterate = FileOpIterate, -+#else - .readdir = FileOpReaddir, -+#endif - .open = FileOpOpen, - .release = FileOpRelease, - }; -diff --git a/modules/linux/vmblock/linux/inode.c b/modules/linux/vmblock/linux/inode.c -index 098c94c..37dc0fc 100644 ---- a/modules/linux/vmblock/linux/inode.c -+++ b/modules/linux/vmblock/linux/inode.c -@@ -36,7 +36,11 @@ - - /* Inode operations */ - static struct dentry *InodeOpLookup(struct inode *dir, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) -+ struct dentry *dentry, unsigned int flags); -+#else - struct dentry *dentry, struct nameidata *nd); -+#endif - static int InodeOpReadlink(struct dentry *dentry, char __user *buffer, int buflen); - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) - static void *InodeOpFollowlink(struct dentry *dentry, struct nameidata *nd); -@@ -75,7 +79,11 @@ static struct inode_operations LinkInodeOps = { - static struct dentry * - InodeOpLookup(struct inode *dir, // IN: parent directory's inode - struct dentry *dentry, // IN: dentry to lookup -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) -+ unsigned int flags) // IN: lookup flags -+#else - struct nameidata *nd) // IN: lookup intent and information -+#endif - { - char *filename; - struct inode *inode; -@@ -206,7 +214,7 @@ static int - InodeOpFollowlink(struct dentry *dentry, // IN : dentry of symlink - struct nameidata *nd) // OUT: stores result - { -- int ret; -+ int ret = 0; - VMBlockInodeInfo *iinfo; - - if (!dentry) { -@@ -221,7 +229,7 @@ InodeOpFollowlink(struct dentry *dentry, // IN : dentry of symlink - goto out; - } - -- ret = vfs_follow_link(nd, iinfo->name); -+ nd_set_link(nd, iinfo->name); - - out: - #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) -diff --git a/modules/linux/vmhgfs/inode.c b/modules/linux/vmhgfs/inode.c -index 2999b94..f82a57b 100644 ---- a/modules/linux/vmhgfs/inode.c -+++ b/modules/linux/vmhgfs/inode.c -@@ -31,6 +31,9 @@ - #include <linux/namei.h> - #endif - #include <linux/highmem.h> -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) -+#include <linux/dcache.h> -+#endif - - #include "compat_cred.h" - #include "compat_fs.h" -@@ -1890,7 +1893,11 @@ HgfsPermission(struct inode *inode, - #endif - &inode->i_dentry, - d_alias) { -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) -+ int dcount = d_count(dentry); -+#else - int dcount = dentry->d_count; -+#endif - if (dcount) { - LOG(4, ("Found %s %d \n", dentry->d_name.name, dcount)); - return HgfsAccessInt(dentry, mask & (MAY_READ | MAY_WRITE | MAY_EXEC)); -@@ -1943,10 +1950,12 @@ HgfsPermission(struct inode *inode, - list_for_each(pos, &inode->i_dentry) { - int dcount; - struct dentry *dentry = list_entry(pos, struct dentry, d_alias); --#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) -- dcount = atomic_read(&dentry->d_count); --#else -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) -+ dcount = d_count(dentry); -+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) - dcount = dentry->d_count; -+#else -+ dcount = atomic_read(&dentry->d_count); - #endif - if (dcount) { - LOG(4, ("Found %s %d \n", (dentry)->d_name.name, dcount)); -diff --git a/modules/linux/vmsync/sync.c b/modules/linux/vmsync/sync.c -index d05ccad..1869771 100644 ---- a/modules/linux/vmsync/sync.c -+++ b/modules/linux/vmsync/sync.c -@@ -162,7 +162,11 @@ VmSyncThawDevices(void *_state) // IN - cancel_delayed_work(&state->thawTask); - list_for_each_safe(cur, tmp, &state->devices) { - dev = list_entry(cur, VmSyncBlockDevice, list); -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) -+ if (dev->sb != NULL && dev->sb->s_writers.frozen != SB_UNFROZEN) { -+#else - if (dev->sb != NULL && dev->sb->s_frozen != SB_UNFROZEN) { -+#endif - thaw_bdev(dev->bdev, dev->sb); - atomic_dec(&gFreezeCount); - } -@@ -237,7 +241,11 @@ VmSyncAddPath(const VmSyncState *state, // IN - * the superblock is already frozen. - */ - if (inode->i_sb->s_bdev == NULL || -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) -+ inode->i_sb->s_writers.frozen != SB_UNFROZEN) { -+#else - inode->i_sb->s_frozen != SB_UNFROZEN) { -+#endif - result = (inode->i_sb->s_bdev == NULL) ? -EINVAL : -EALREADY; - compat_path_release(&nd); - goto exit; -@@ -303,7 +311,11 @@ VmSyncFreezeDevices(VmSyncState *state, // IN - const char __user *userPaths) // IN - { - int result = 0; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) -+ struct filename *paths; -+#else - char *paths; -+#endif - char *currPath; - char *nextSep; - struct list_head *cur, *tmp; -@@ -328,7 +340,11 @@ VmSyncFreezeDevices(VmSyncState *state, // IN - /* - * First, try to add all paths to the list of paths to be frozen. - */ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) -+ currPath = (char *)paths->name; -+#else - currPath = paths; -+#endif - do { - nextSep = strchr(currPath, ':'); - if (nextSep != NULL) { -@@ -670,17 +686,24 @@ init_module(void) - } - - /* Create /proc/driver/vmware-sync */ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) -+ controlProcEntry = proc_create("driver/vmware-sync", S_IFREG | S_IRUSR | S_IRGRP | S_IROTH, -+ NULL, &VmSyncFileOps); -+#else - controlProcEntry = create_proc_entry("driver/vmware-sync", - S_IFREG | S_IRUSR | S_IRGRP | S_IROTH, - NULL); -- if (!controlProcEntry) { -+#endif -+ if (controlProcEntry == NULL) { - printk(KERN_ERR "vmsync: could not create /proc/driver/vmware-sync\n"); - kmem_cache_destroy(gSyncStateCache); - kmem_cache_destroy(gBlockDeviceCache); - return -EINVAL; - } - -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) - controlProcEntry->proc_fops = &VmSyncFileOps; -+#endif - return 0; - } - --- -1.9.1 - diff --git a/main/open-vm-tools-grsec/linux-3.10.patch b/main/open-vm-tools-grsec/linux-3.10.patch deleted file mode 100644 index d3ce6f2c4b..0000000000 --- a/main/open-vm-tools-grsec/linux-3.10.patch +++ /dev/null @@ -1,16 +0,0 @@ -From: -http://sourceforge.net/p/open-vm-tools/tracker/173/ - ---- ./modules/linux/vmhgfs/file.c.orig 2013-08-05 12:29:23.915888235 +0000 -+++ ./modules/linux/vmhgfs/file.c 2013-08-05 12:35:46.056640104 +0000 -@@ -25,6 +25,10 @@ - /* Must come before any kernel header file. */ - #include "driver-config.h" - -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) -+#include <linux/aio.h> -+#endif -+ - #include <linux/errno.h> - #include <linux/module.h> - #include <linux/signal.h> |