* [RFC 1/2] misc: Add vboxguest driver for Virtual Box Guest integration
2017-08-11 13:23 [RFC 0/2] Add Virtual Box vboxguest and vboxsf guest drivers to the mainline kernel Hans de Goede
@ 2017-08-11 13:23 ` Hans de Goede
2017-08-11 21:23 ` Arnd Bergmann
2017-08-11 13:23 ` [RFC 2/2] fs: Add VirtualBox guest shared folder (vboxsf) support Hans de Goede
` (2 subsequent siblings)
3 siblings, 1 reply; 18+ messages in thread
From: Hans de Goede @ 2017-08-11 13:23 UTC (permalink / raw)
To: Arnd Bergmann, Greg Kroah-Hartman, Alexander Viro
Cc: Hans de Goede, Michael Thayer, Knut St . Osmundsen, Larry Finger,
linux-kernel, linux-fsdevel
This commit adds a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add support for
Virtual Box Guest integration features such as copy-and-paste, seamless
mode and OpenGL pass-through.
This driver also offers vboxguest IPC functionality which is needed
for the vboxfs driver which offers folder sharing support.
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
drivers/misc/Kconfig | 1 +
drivers/misc/Makefile | 1 +
drivers/misc/vboxguest/Kconfig | 16 +
drivers/misc/vboxguest/Makefile | 3 +
drivers/misc/vboxguest/vboxguest_core.c | 1818 ++++++++++++++++++++++++++++
drivers/misc/vboxguest/vboxguest_core.h | 194 +++
drivers/misc/vboxguest/vboxguest_linux.c | 464 +++++++
drivers/misc/vboxguest/vboxguest_utils.c | 1124 +++++++++++++++++
drivers/misc/vboxguest/vboxguest_version.h | 18 +
include/linux/vbox_err.h | 6 +
include/linux/vbox_ostypes.h | 6 +
include/linux/vbox_utils.h | 98 ++
include/linux/vbox_vmmdev.h | 128 ++
include/linux/vboxguest.h | 6 +
include/uapi/linux/vbox_err.h | 178 +++
include/uapi/linux/vbox_ostypes.h | 158 +++
include/uapi/linux/vbox_vmmdev.h | 1743 ++++++++++++++++++++++++++
include/uapi/linux/vboxguest.h | 374 ++++++
18 files changed, 6336 insertions(+)
create mode 100644 drivers/misc/vboxguest/Kconfig
create mode 100644 drivers/misc/vboxguest/Makefile
create mode 100644 drivers/misc/vboxguest/vboxguest_core.c
create mode 100644 drivers/misc/vboxguest/vboxguest_core.h
create mode 100644 drivers/misc/vboxguest/vboxguest_linux.c
create mode 100644 drivers/misc/vboxguest/vboxguest_utils.c
create mode 100644 drivers/misc/vboxguest/vboxguest_version.h
create mode 100644 include/linux/vbox_err.h
create mode 100644 include/linux/vbox_ostypes.h
create mode 100644 include/linux/vbox_utils.h
create mode 100644 include/linux/vbox_vmmdev.h
create mode 100644 include/linux/vboxguest.h
create mode 100644 include/uapi/linux/vbox_err.h
create mode 100644 include/uapi/linux/vbox_ostypes.h
create mode 100644 include/uapi/linux/vbox_vmmdev.h
create mode 100644 include/uapi/linux/vboxguest.h
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 8136dc7e863d..ca8bb2c4b62c 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -518,4 +518,5 @@ source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/vboxguest/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b0b766416306..301b69224929 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
+obj-y += vboxguest/
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
diff --git a/drivers/misc/vboxguest/Kconfig b/drivers/misc/vboxguest/Kconfig
new file mode 100644
index 000000000000..e88ee46c31d4
--- /dev/null
+++ b/drivers/misc/vboxguest/Kconfig
@@ -0,0 +1,16 @@
+config VBOXGUEST
+ tristate "Virtual Box Guest integration support"
+ depends on X86 && PCI && INPUT
+ help
+ This is a driver for the Virtual Box Guest PCI device used in
+ Virtual Box virtual machines. Enabling this driver will add
+ support for Virtual Box Guest integration features such as
+ copy-and-paste, seamless mode and OpenGL pass-through.
+
+ This driver also offers vboxguest IPC functionality which is needed
+ for the vboxfs driver which offers folder sharing support.
+
+ Although it is possible to build this module in, it is advised
+ to build this driver as a module, so that it can be updated
+ independently of the kernel. Select M to build this driver as a
+ module.
diff --git a/drivers/misc/vboxguest/Makefile b/drivers/misc/vboxguest/Makefile
new file mode 100644
index 000000000000..203b8f465817
--- /dev/null
+++ b/drivers/misc/vboxguest/Makefile
@@ -0,0 +1,3 @@
+vboxguest-y := vboxguest_linux.o vboxguest_core.o vboxguest_utils.o
+
+obj-$(CONFIG_VBOXGUEST) += vboxguest.o
diff --git a/drivers/misc/vboxguest/vboxguest_core.c b/drivers/misc/vboxguest/vboxguest_core.c
new file mode 100644
index 000000000000..50d18a37222f
--- /dev/null
+++ b/drivers/misc/vboxguest/vboxguest_core.c
@@ -0,0 +1,1818 @@
+/*
+ * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
+ *
+ * Copyright (C) 2007-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include <linux/vmalloc.h>
+#include "vboxguest_core.h"
+#include "vboxguest_version.h"
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define GUEST_MAPPINGS_TRIES 5
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
+static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT gdev,
+ u32 fFixedEvents);
+static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT gdev);
+static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ u32 fOrMask, u32 fNotMask,
+ bool fSessionTermination);
+static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ u32 fOrMask, u32 fNoMask,
+ bool fSessionTermination);
+
+/**
+ * Reserves memory in which the VMM can relocate any guest mappings
+ * that are floating around.
+ *
+ * This operation is a little bit tricky since the VMM might not accept
+ * just any address because of address clashes between the three contexts
+ * it operates in, so we try several times.
+ *
+ * Failure to reserve the guest mappings is ignored.
+ *
+ * @param gdev The Guest extension device.
+ */
+static void vgdrvInitFixateGuestMappings(PVBOXGUESTDEVEXT gdev)
+{
+ VMMDevReqHypervisorInfo *req = NULL;
+ void *guest_mappings[GUEST_MAPPINGS_TRIES];
+ struct page **pages = NULL;
+ u32 size, hypervisor_size;
+ int i, rc;
+
+ /* Query the required space. */
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_GetHypervisorInfo);
+ if (!req)
+ return;
+
+ req->hypervisorStart = 0;
+ req->hypervisorSize = 0;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ goto out;
+
+ /*
+ * The VMM will report back if there is nothing it wants to map, like
+ * for instance in VT-x and AMD-V mode.
+ */
+ if (req->hypervisorSize == 0)
+ goto out;
+
+ hypervisor_size = req->hypervisorSize;
+ /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
+ size = PAGE_ALIGN(req->hypervisorSize) + SZ_4M;
+
+ pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL);
+ if (!pages)
+ goto out;
+
+ gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
+ if (!gdev->guest_mappings_dummy_page)
+ goto out;
+
+ for (i = 0; i < (size >> PAGE_SHIFT); i++)
+ pages[i] = gdev->guest_mappings_dummy_page;
+
+ /* Try several times, the host can be picky about certain addresses. */
+ for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
+ guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
+ VM_MAP, PAGE_KERNEL_RO);
+ if (!guest_mappings[i])
+ break;
+
+ req->header.requestType = VMMDevReq_SetHypervisorInfo;
+ req->header.rc = VERR_INTERNAL_ERROR;
+ req->hypervisorSize = hypervisor_size;
+ req->hypervisorStart =
+ (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc >= 0) {
+ gdev->guest_mappings = guest_mappings[i];
+ break;
+ }
+ }
+
+ /* Free vmap's from failed attempts. */
+ while (--i >= 0)
+ vunmap(guest_mappings[i]);
+
+ /* On failure free the dummy-page backing the vmap */
+ if (!gdev->guest_mappings) {
+ __free_page(gdev->guest_mappings_dummy_page);
+ gdev->guest_mappings_dummy_page = NULL;
+ }
+
+out:
+ kfree(req);
+ kfree(pages);
+}
+
+/**
+ * Undo what vgdrvInitFixateGuestMappings did.
+ *
+ * @param gdev The Guest extension device.
+ */
+static void vgdrvTermUnfixGuestMappings(PVBOXGUESTDEVEXT gdev)
+{
+ VMMDevReqHypervisorInfo *req;
+ int rc;
+
+ if (!gdev->guest_mappings)
+ return;
+
+ /*
+ * Tell the host that we're going to free the memory we reserved for
+ * it, the free it up. (Leak the memory if anything goes wrong here.)
+ */
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetHypervisorInfo);
+ if (!req)
+ return;
+
+ req->hypervisorStart = 0;
+ req->hypervisorSize = 0;
+
+ rc = vbg_req_perform(gdev, req);
+
+ kfree(req);
+
+ if (rc < 0) {
+ vbg_err("vgdrvTermUnfixGuestMappings: vbg_req_perform error: %d\n",
+ rc);
+ return;
+ }
+
+ vunmap(gdev->guest_mappings);
+ gdev->guest_mappings = NULL;
+
+ __free_page(gdev->guest_mappings_dummy_page);
+ gdev->guest_mappings_dummy_page = NULL;
+}
+
+/**
+ * Report the guest information to the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ */
+static int vbg_report_guest_info(PVBOXGUESTDEVEXT gdev)
+{
+ /*
+ * Allocate and fill in the two guest info reports.
+ */
+ VMMDevReportGuestInfo *req1 = NULL;
+ VMMDevReportGuestInfo2 *req2 = NULL;
+ int rc, ret = -ENOMEM;
+
+ req1 = vbg_req_alloc(sizeof(*req1), VMMDevReq_ReportGuestInfo);
+ req2 = vbg_req_alloc(sizeof(*req2), VMMDevReq_ReportGuestInfo2);
+ if (!req1 || !req2)
+ goto out_free;
+
+ req1->guestInfo.interfaceVersion = VMMDEV_VERSION;
+#ifdef CONFIG_X86_64
+ req1->guestInfo.osType = VBOXOSTYPE_Linux26_x64;
+#else
+ req1->guestInfo.osType = VBOXOSTYPE_Linux26;
+#endif
+
+ req2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
+ req2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
+ req2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
+ req2->guestInfo.additionsRevision = VBOX_SVN_REV;
+ /* (no features defined yet) */
+ req2->guestInfo.additionsFeatures = 0;
+ strlcpy(req2->guestInfo.szName, VBOX_VERSION_STRING,
+ sizeof(req2->guestInfo.szName));
+
+ /*
+ * There are two protocols here:
+ * 1. Info2 + Info1. Supported by >=3.2.51.
+ * 2. Info1 and optionally Info2. The old protocol.
+ *
+ * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
+ * if not supported by the VMMDev (message ordering requirement).
+ */
+ rc = vbg_req_perform(gdev, req2);
+ if (rc >= 0) {
+ rc = vbg_req_perform(gdev, req1);
+ } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
+ rc = vbg_req_perform(gdev, req1);
+ if (rc >= 0) {
+ rc = vbg_req_perform(gdev, req2);
+ if (rc == VERR_NOT_IMPLEMENTED)
+ rc = VINF_SUCCESS;
+ }
+ }
+ ret = -vbg_status_code_to_errno(rc);
+
+out_free:
+ kfree(req2);
+ kfree(req1);
+ return ret;
+}
+
+/**
+ * Report the guest driver status to the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ * @param active Flag whether the driver is now active or not.
+ */
+static int vgdrvReportDriverStatus(PVBOXGUESTDEVEXT gdev, bool active)
+{
+ VMMDevReportGuestStatus *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_ReportGuestStatus);
+ if (!req)
+ return -ENOMEM;
+
+ req->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
+ req->guestStatus.status = active ? VBoxGuestFacilityStatus_Active :
+ VBoxGuestFacilityStatus_Inactive;
+ req->guestStatus.flags = 0;
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
+ rc = VINF_SUCCESS;
+
+ kfree(req);
+
+ return -vbg_status_code_to_errno(rc);
+}
+
+/** @name Memory Ballooning
+ * @{
+ */
+
+/**
+ * Inflate the balloon by one chunk.
+ *
+ * The caller owns the balloon mutex.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param chunk_idx Index of the chunk.
+ */
+static int vbg_balloon_inflate(PVBOXGUESTDEVEXT gdev, u32 chunk_idx)
+{
+ VMMDevChangeMemBalloon *req = gdev->mem_balloon.change_req;
+ struct page **pages;
+ int i, rc;
+
+ pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!pages)
+ return VERR_NO_MEMORY;
+
+ req->header.size = sizeof(*req);
+ req->inflate = true;
+ req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
+
+ for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
+ pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
+ if (!pages[i]) {
+ rc = VERR_NO_MEMORY;
+ goto out_error;
+ }
+
+ req->phys_page[i] = page_to_phys(pages[i]);
+ }
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ vbg_err("vbg_balloon_inflate: vbg_req_perform error: %d\n", rc);
+ goto out_error;
+ }
+
+ gdev->mem_balloon.pages[chunk_idx] = pages;
+
+ return VINF_SUCCESS;
+
+out_error:
+ while (--i >= 0)
+ __free_page(pages[i]);
+ kfree(pages);
+
+ return rc;
+}
+
+/**
+ * Deflate the balloon by one chunk.
+ *
+ * The caller owns the balloon mutex.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param chunk_idx Index of the chunk.
+ */
+static int vbg_balloon_deflate(PVBOXGUESTDEVEXT gdev, u32 chunk_idx)
+{
+ VMMDevChangeMemBalloon *req = gdev->mem_balloon.change_req;
+ struct page **pages = gdev->mem_balloon.pages[chunk_idx];
+ int i, rc;
+
+ req->header.size = sizeof(*req);
+ req->inflate = false;
+ req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
+
+ for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
+ req->phys_page[i] = page_to_phys(pages[i]);
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ vbg_err("vbg_balloon_deflate: vbg_req_perform error: %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+ gdev->mem_balloon.pages[chunk_idx] = NULL;
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Cleanup the memory balloon of a session.
+ *
+ * Will request the balloon mutex, so it must be valid and the caller must not
+ * own it already.
+ *
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ */
+static void vbg_balloon_close(PVBOXGUESTDEVEXT gdev, PVBOXGUESTSESSION session)
+{
+ int i, rc;
+
+ mutex_lock(&gdev->mem_balloon.mutex);
+
+ if (gdev->mem_balloon.owner == session) {
+ for (i = gdev->mem_balloon.chunks - 1; i >= 0; i--) {
+ rc = vbg_balloon_deflate(gdev, i);
+ if (rc < 0)
+ break;
+
+ gdev->mem_balloon.chunks--;
+ }
+ gdev->mem_balloon.owner = NULL;
+ }
+
+ mutex_unlock(&gdev->mem_balloon.mutex);
+}
+
+/** @} */
+
+/** @name Heartbeat
+ * @{
+ */
+
+/**
+ * Callback for heartbeat timer.
+ */
+static void vbg_heartbeat_timer(unsigned long data)
+{
+ PVBOXGUESTDEVEXT gdev = (PVBOXGUESTDEVEXT)data;
+
+ vbg_req_perform(gdev, gdev->guest_heartbeat_req);
+ mod_timer(&gdev->heartbeat_timer,
+ msecs_to_jiffies(gdev->heartbeat_interval_ms));
+}
+
+/**
+ * Configure the host to check guest's heartbeat
+ * and get heartbeat interval from the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ * @param fEnabled Set true to enable guest heartbeat checks on host.
+ */
+static int vgdrvHeartbeatHostConfigure(PVBOXGUESTDEVEXT gdev, bool fEnabled)
+{
+ VMMDevReqHeartbeat *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_HeartbeatConfigure);
+ if (!req)
+ return -ENOMEM;
+
+ req->fEnabled = fEnabled;
+ req->cNsInterval = 0;
+ rc = vbg_req_perform(gdev, req);
+ do_div(req->cNsInterval, 1000000); /* ns -> ms */
+ gdev->heartbeat_interval_ms = req->cNsInterval;
+ kfree(req);
+
+ return -vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Initializes the heartbeat timer.
+ *
+ * This feature may be disabled by the host.
+ *
+ * @returns 0 or negative errno value (ignored).
+ * @param gdev The Guest extension device.
+ */
+static int vgdrvHeartbeatInit(PVBOXGUESTDEVEXT gdev)
+{
+ int ret;
+
+ /* Make sure that heartbeat checking is disabled if we fail. */
+ ret = vgdrvHeartbeatHostConfigure(gdev, false);
+ if (ret < 0)
+ return ret;
+
+ ret = vgdrvHeartbeatHostConfigure(gdev, true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Preallocate the request to use it from the timer callback because:
+ * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
+ * and the timer callback runs at DISPATCH_LEVEL;
+ * 2) avoid repeated allocations.
+ */
+ gdev->guest_heartbeat_req = vbg_req_alloc(
+ sizeof(*gdev->guest_heartbeat_req),
+ VMMDevReq_GuestHeartbeat);
+ if (!gdev->guest_heartbeat_req)
+ return -ENOMEM;
+
+ vbg_info("vgdrvHeartbeatInit: Setting up heartbeat to trigger every %d milliseconds\n",
+ gdev->heartbeat_interval_ms);
+ mod_timer(&gdev->heartbeat_timer, 0);
+
+ return 0;
+}
+
+/** @} */
+
+/**
+ * vbg_query_host_version try get the host feature mask and version information
+ * (vbg_host_version).
+ *
+ * @returns 0 or negative errno value (ignored).
+ * @param gdev The Guest extension device.
+ */
+static int vbg_query_host_version(PVBOXGUESTDEVEXT gdev)
+{
+ VMMDevReqHostVersion *req;
+ int rc, ret;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_GetHostVersion);
+ if (!req)
+ return -ENOMEM;
+
+ rc = vbg_req_perform(gdev, req);
+ ret = -vbg_status_code_to_errno(rc);
+ if (ret)
+ goto out;
+
+ vbg_info("vboxguest: host-version: %u.%u.%ur%u %#x\n",
+ req->major, req->minor, req->build,
+ req->revision, req->features);
+
+ if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
+ vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
+ ret = -ENODEV;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+/**
+ * Initializes the VBoxGuest device extension when the
+ * device driver is loaded.
+ *
+ * The native code locates the VMMDev on the PCI bus and retrieve
+ * the MMIO and I/O port ranges, this function will take care of
+ * mapping the MMIO memory (if present). Upon successful return
+ * the native code should set up the interrupt handler.
+ *
+ * @returns 0 or negative errno value.
+ *
+ * @param gdev The Guest extension device.
+ * @param fixed_events Events that will be enabled upon init and no client
+ * will ever be allowed to mask.
+ */
+int vbg_core_init(PVBOXGUESTDEVEXT gdev, u32 fixed_events)
+{
+ int ret = -ENOMEM;
+
+ gdev->fFixedEvents = fixed_events | VMMDEV_EVENT_HGCM;
+ gdev->fEventFilterHost = U32_MAX; /* forces a report */
+ gdev->fGuestCapsHost = U32_MAX; /* forces a report */
+
+ init_waitqueue_head(&gdev->event_wq);
+ init_waitqueue_head(&gdev->hgcm_wq);
+ INIT_LIST_HEAD(&gdev->session_list);
+ spin_lock_init(&gdev->event_spinlock);
+ spin_lock_init(&gdev->session_spinlock);
+ mutex_init(&gdev->cancel_req_mutex);
+ mutex_init(&gdev->mem_balloon.mutex);
+ setup_timer(&gdev->heartbeat_timer, vbg_heartbeat_timer,
+ (unsigned long)gdev);
+
+ vgdrvBitUsageTrackerClear(&gdev->EventFilterTracker);
+ vgdrvBitUsageTrackerClear(&gdev->SetGuestCapsTracker);
+
+ gdev->mem_balloon.change_req =
+ vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
+ VMMDevReq_ChangeMemBalloon);
+ gdev->cancel_req =
+ vbg_req_alloc(sizeof(*(gdev->cancel_req)),
+ VMMDevReq_HGCMCancel2);
+ gdev->pIrqAckEvents =
+ vbg_req_alloc(sizeof(*gdev->pIrqAckEvents),
+ VMMDevReq_AcknowledgeEvents);
+ gdev->mouse_status_req =
+ vbg_req_alloc(sizeof(*gdev->mouse_status_req),
+ VMMDevReq_GetMouseStatus);
+
+ if (!gdev->mem_balloon.change_req || !gdev->cancel_req ||
+ !gdev->pIrqAckEvents || !gdev->mouse_status_req)
+ goto err_free_reqs;
+
+ ret = vbg_query_host_version(gdev);
+ if (ret)
+ goto err_free_reqs;
+
+ ret = vbg_report_guest_info(gdev);
+ if (ret) {
+ vbg_err("vboxguest: VBoxReportGuestInfo error: %d\n", ret);
+ goto err_free_reqs;
+ }
+
+ ret = vgdrvResetEventFilterOnHost(gdev, gdev->fFixedEvents);
+ if (ret) {
+ vbg_err("vboxguest: Error setting fixed event filter: %d\n", ret);
+ goto err_free_reqs;
+ }
+
+ ret = vgdrvResetCapabilitiesOnHost(gdev);
+ if (ret) {
+ vbg_err("vboxguest: Error clearing guest capabilities: %d\n", ret);
+ goto err_free_reqs;
+ }
+
+ ret = vbg_core_set_mouse_status(gdev, 0);
+ if (ret) {
+ vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
+ goto err_free_reqs;
+ }
+
+ /* These may fail without requiring the driver init to fail. */
+ vgdrvInitFixateGuestMappings(gdev);
+ vgdrvHeartbeatInit(gdev);
+
+ /* All Done! */
+ ret = vgdrvReportDriverStatus(gdev, true);
+ if (ret < 0)
+ vbg_err("vboxguest: VBoxReportGuestDriverStatus error: %d\n",
+ ret);
+
+ return 0;
+
+err_free_reqs:
+ kfree(gdev->mouse_status_req);
+ kfree(gdev->pIrqAckEvents);
+ kfree(gdev->cancel_req);
+ kfree(gdev->mem_balloon.change_req);
+ return ret;
+}
+
+/**
+ * Call this on exit to clean-up vboxguest-core managed resources.
+ *
+ * The native code should call this before the driver is loaded,
+ * but don't call this on shutdown.
+ *
+ * @param gdev The Guest extension device.
+ */
+void vbg_core_exit(PVBOXGUESTDEVEXT gdev)
+{
+ /* Stop HB timer and disable host heartbeat checking. */
+ del_timer_sync(&gdev->heartbeat_timer);
+ vgdrvHeartbeatHostConfigure(gdev, false);
+ kfree(gdev->guest_heartbeat_req);
+
+ /* Clean up the bits that involves the host first. */
+ vgdrvTermUnfixGuestMappings(gdev);
+
+ /* Clear the host flags (mouse status etc). */
+ vgdrvResetEventFilterOnHost(gdev, 0);
+ vgdrvResetCapabilitiesOnHost(gdev);
+ vbg_core_set_mouse_status(gdev, 0);
+
+ kfree(gdev->pIrqAckEvents);
+ kfree(gdev->cancel_req);
+ kfree(gdev->mem_balloon.change_req);
+}
+
+/**
+ * Creates a VBoxGuest user session.
+ *
+ * vboxguest_linux.c calls this when userspace opens the char-device.
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ * @param session_ret Where to store the session on success.
+ * @param user_session Set if this is a session for the vboxuser device.
+ */
+int vbg_core_open_session(PVBOXGUESTDEVEXT gdev,
+ VBOXGUESTSESSION **session_ret, bool user_session)
+{
+ VBOXGUESTSESSION *session;
+ unsigned long flags;
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session)
+ return -ENOMEM;
+
+ session->gdev = gdev;
+ session->user_session = user_session;
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ list_add(&session->list_node, &gdev->session_list);
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ *session_ret = session;
+
+ return 0;
+}
+
+/**
+ * Closes a VBoxGuest session.
+ *
+ * @param session The session to close (and free).
+ */
+void vbg_core_close_session(VBOXGUESTSESSION *session)
+{
+ PVBOXGUESTDEVEXT gdev = session->gdev;
+ unsigned long flags;
+ unsigned i;
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ list_del(&session->list_node);
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ vgdrvSetSessionCapabilities(gdev, session, 0 /*fOrMask */ ,
+ U32_MAX /*fNotMask */ ,
+ true /*fSessionTermination */ );
+ vgdrvSetSessionEventFilter(gdev, session, 0 /*fOrMask */ ,
+ U32_MAX /*fNotMask */ ,
+ true /*fSessionTermination */ );
+
+ for (i = 0; i < ARRAY_SIZE(session->aHGCMClientIds); i++) {
+ if (session->aHGCMClientIds[i])
+ vbg_hgcm_disconnect(gdev, session->aHGCMClientIds[i]);
+ }
+
+ vbg_balloon_close(gdev, session);
+ kfree(session);
+}
+
+/**
+ * Used by VGDrvCommonISR as well as the acquire guest capability code.
+ * The caller must held the event_spinlock.
+ *
+ * @param gdev The VBoxGuest device extension.
+ * @param events The events to dispatch.
+ */
+static void vbg_dispatch_events_locked(PVBOXGUESTDEVEXT gdev, u32 events)
+{
+ gdev->f32PendingEvents |= events;
+
+ wake_up(&gdev->event_wq);
+}
+
+static bool vbg_wait_event_cond(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestWaitEventInfo * info)
+{
+ unsigned long flags;
+ bool wakeup;
+ u32 events;
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+ events = gdev->f32PendingEvents & info->u32EventMaskIn;
+ wakeup = events || session->cancel_waiters;
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ return wakeup;
+}
+
+/* Must be called with the event_lock held */
+static u32 vbg_consume_events_locked(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestWaitEventInfo * info)
+{
+ u32 events = gdev->f32PendingEvents & info->u32EventMaskIn;
+
+ gdev->f32PendingEvents &= ~events;
+ return events;
+}
+
+static int vbg_ioctl_wait_event(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestWaitEventInfo *info,
+ size_t *info_len_ret)
+{
+ unsigned long flags;
+ long timeout;
+ int rc = VINF_SUCCESS;
+
+ if (info->u32TimeoutIn == U32_MAX)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(info->u32TimeoutIn);
+
+ info->u32Result = VBOXGUEST_WAITEVENT_OK;
+ info->u32EventFlagsOut = 0;
+
+ if (info_len_ret)
+ *info_len_ret = sizeof(*info);
+
+ do {
+ timeout = wait_event_interruptible_timeout(
+ gdev->event_wq,
+ vbg_wait_event_cond(gdev, session, info),
+ timeout);
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+ if (timeout < 0 || session->cancel_waiters) {
+ info->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
+ rc = VERR_INTERRUPTED;
+ } else if (timeout == 0) {
+ info->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
+ rc = VERR_TIMEOUT;
+ } else {
+ info->u32EventFlagsOut =
+ vbg_consume_events_locked(gdev, session, info);
+ }
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ /*
+ * Someone else may have consumed the event(s) first, in
+ * which case we go back to waiting.
+ */
+ } while (rc == VINF_SUCCESS && info->u32EventFlagsOut == 0);
+
+ return rc;
+}
+
+static int vbg_ioctl_cancel_all_wait_events(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+ session->cancel_waiters = true;
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Checks if the VMM request is allowed in the context of the given session.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param session The calling session.
+ * @param req The request.
+ */
+static int vbg_req_allowed(PVBOXGUESTDEVEXT gdev, PVBOXGUESTSESSION session,
+ VMMDevRequestHeader const *req)
+{
+ const VMMDevReportGuestStatus *guest_status;
+ bool trusted_apps_only;
+
+ switch (req->requestType) {
+ /* Trusted users apps only. */
+ case VMMDevReq_QueryCredentials:
+ case VMMDevReq_ReportCredentialsJudgement:
+ case VMMDevReq_RegisterSharedModule:
+ case VMMDevReq_UnregisterSharedModule:
+ case VMMDevReq_WriteCoreDump:
+ case VMMDevReq_GetCpuHotPlugRequest:
+ case VMMDevReq_SetCpuHotPlugStatus:
+ case VMMDevReq_CheckSharedModules:
+ case VMMDevReq_GetPageSharingStatus:
+ case VMMDevReq_DebugIsPageShared:
+ case VMMDevReq_ReportGuestStats:
+ case VMMDevReq_ReportGuestUserState:
+ case VMMDevReq_GetStatisticsChangeRequest:
+ case VMMDevReq_ChangeMemBalloon:
+ trusted_apps_only = true;
+ break;
+
+ /* Anyone. */
+ case VMMDevReq_GetMouseStatus:
+ case VMMDevReq_SetMouseStatus:
+ case VMMDevReq_SetPointerShape:
+ case VMMDevReq_GetHostVersion:
+ case VMMDevReq_Idle:
+ case VMMDevReq_GetHostTime:
+ case VMMDevReq_SetPowerStatus:
+ case VMMDevReq_AcknowledgeEvents:
+ case VMMDevReq_CtlGuestFilterMask:
+ case VMMDevReq_ReportGuestStatus:
+ case VMMDevReq_GetDisplayChangeRequest:
+ case VMMDevReq_VideoModeSupported:
+ case VMMDevReq_GetHeightReduction:
+ case VMMDevReq_GetDisplayChangeRequest2:
+ case VMMDevReq_VideoModeSupported2:
+ case VMMDevReq_VideoAccelEnable:
+ case VMMDevReq_VideoAccelFlush:
+ case VMMDevReq_VideoSetVisibleRegion:
+ case VMMDevReq_GetDisplayChangeRequestEx:
+ case VMMDevReq_GetSeamlessChangeRequest:
+ case VMMDevReq_GetVRDPChangeRequest:
+ case VMMDevReq_LogString:
+ case VMMDevReq_GetSessionId:
+ trusted_apps_only = false;
+ break;
+
+ /**
+ * @todo this have to be changed into an I/O control and the facilities
+ * tracked in the session so they can automatically be failed when
+ * the session terminates without reporting the new status.
+ *
+ * The information presented by IGuest is not reliable without this!
+ */
+ /* Depends on the request parameters... */
+ case VMMDevReq_ReportGuestCapabilities:
+ guest_status = (const VMMDevReportGuestStatus *)req;
+ switch (guest_status->guestStatus.facility) {
+ case VBoxGuestFacilityType_All:
+ case VBoxGuestFacilityType_VBoxGuestDriver:
+ vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
+ guest_status->guestStatus.facility);
+ return VERR_PERMISSION_DENIED;
+ case VBoxGuestFacilityType_VBoxService:
+ trusted_apps_only = true;
+ break;
+ case VBoxGuestFacilityType_VBoxTrayClient:
+ case VBoxGuestFacilityType_Seamless:
+ case VBoxGuestFacilityType_Graphics:
+ default:
+ trusted_apps_only = false;
+ break;
+ }
+ break;
+
+ /* Anything else is not allowed. */
+ default:
+ vbg_err("Denying userspace vmm call type %#08x\n",
+ req->requestType);
+ return VERR_PERMISSION_DENIED;
+ }
+
+ if (trusted_apps_only && session->user_session) {
+ vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
+ req->requestType);
+ return VERR_PERMISSION_DENIED;
+ }
+
+ return VINF_SUCCESS;
+}
+
+static int vgdrvIoCtl_VMMRequest(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VMMDevRequestHeader *req, size_t req_size,
+ size_t *req_size_ret)
+{
+ int rc;
+
+ rc = vbg_req_verify(req, req_size);
+ if (rc < 0)
+ return rc;
+
+ rc = vbg_req_allowed(gdev, session, req);
+ if (rc < 0)
+ return rc;
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc >= 0) {
+ WARN_ON(rc == VINF_HGCM_ASYNC_EXECUTE);
+ *req_size_ret = req->size;
+ }
+
+ return rc;
+}
+
+static int vgdrvIoCtl_HGCMConnect(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestHGCMConnectInfo * pInfo,
+ size_t *info_size_ret)
+{
+ unsigned long flags;
+ u32 client_id;
+ int i, rc;
+
+ /* Find a free place in the sessions clients array and claim it */
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ for (i = 0; i < ARRAY_SIZE(session->aHGCMClientIds); i++) {
+ if (!session->aHGCMClientIds[i]) {
+ session->aHGCMClientIds[i] = U32_MAX;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ if (i >= ARRAY_SIZE(session->aHGCMClientIds))
+ return VERR_TOO_MANY_OPEN_FILES;
+
+ rc = vbg_hgcm_connect(gdev, &pInfo->Loc, &client_id);
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ if (rc >= 0) {
+ pInfo->result = VINF_SUCCESS;
+ pInfo->u32ClientID = client_id;
+ *info_size_ret = sizeof(*pInfo);
+
+ session->aHGCMClientIds[i] = client_id;
+ } else {
+ session->aHGCMClientIds[i] = 0;
+ }
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ return rc;
+}
+
+static int vgdrvIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestHGCMDisconnectInfo * pInfo,
+ size_t *info_size_ret)
+{
+ u32 client_id = pInfo->u32ClientID;
+ unsigned long flags;
+ int i, rc;
+
+ if (client_id == 0 || client_id == U32_MAX)
+ return VERR_INVALID_HANDLE;
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ for (i = 0; i < ARRAY_SIZE(session->aHGCMClientIds); i++) {
+ if (session->aHGCMClientIds[i] == client_id) {
+ session->aHGCMClientIds[i] = U32_MAX;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ if (i >= ARRAY_SIZE(session->aHGCMClientIds))
+ return VERR_INVALID_HANDLE;
+
+ rc = vbg_hgcm_disconnect(gdev, client_id);
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ if (rc >= 0) {
+ pInfo->result = VINF_SUCCESS;
+ *info_size_ret = sizeof(*pInfo);
+
+ session->aHGCMClientIds[i] = 0;
+ } else {
+ session->aHGCMClientIds[i] = client_id;
+ }
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ return rc;
+}
+
+static int vgdrvIoCtl_HGCMCall(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestHGCMCallInfo *pInfo, u32 cMillies,
+ bool f32bit, size_t cbExtra, size_t cbData,
+ size_t *info_size_ret)
+{
+ u32 cbInfo, client_id = pInfo->u32ClientID;
+ unsigned long flags;
+ size_t cbActual;
+ unsigned i;
+ int rc;
+
+ if (pInfo->cParms > VBOX_HGCM_MAX_PARMS)
+ return VERR_INVALID_PARAMETER;
+
+ if (client_id == 0 || client_id == U32_MAX)
+ return VERR_INVALID_HANDLE;
+
+ cbActual = cbExtra + sizeof(*pInfo);
+ if (f32bit)
+ cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
+ else
+ cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
+ if (cbData < cbActual) {
+ vbg_debug("VBOXGUEST_IOCTL_HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
+ cbData, cbData, cbActual, cbActual);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Validate the client id.
+ */
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ for (i = 0; i < ARRAY_SIZE(session->aHGCMClientIds); i++)
+ if (session->aHGCMClientIds[i] == client_id)
+ break;
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+ if (i >= ARRAY_SIZE(session->aHGCMClientIds)) {
+ vbg_debug("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%#08x\n",
+ client_id);
+ return VERR_INVALID_HANDLE;
+ }
+
+ cbInfo = (u32) (cbData - cbExtra);
+
+ if (f32bit)
+ rc = vbg_hgcm_call32(gdev, pInfo, cbInfo, cMillies, true);
+ else
+ rc = vbg_hgcm_call(gdev, pInfo, cbInfo, cMillies, true);
+
+ if (rc >= 0) {
+ *info_size_ret = cbActual;
+ } else if (rc == VERR_INTERRUPTED || rc == VERR_TIMEOUT ||
+ rc == VERR_OUT_OF_RANGE) {
+ vbg_debug("VBOXGUEST_IOCTL_HGCM_CALL%s error: %d\n",
+ f32bit ? "32" : "64", rc);
+ } else {
+ vbg_err("VBOXGUEST_IOCTL_HGCM_CALL%s error: %d\n",
+ f32bit ? "32" : "64", rc);
+ }
+ return rc;
+}
+
+/**
+ * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
+ *
+ * Ask the host for the size of the balloon and try to set it accordingly.
+ *
+ * @returns VBox status code
+ *
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ * @param info The output buffer.
+ * @param info_size_ret Where to store the amount of returned data.
+ */
+static int vgdrvIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestCheckBalloonInfo *info,
+ size_t *info_size_ret)
+{
+ VMMDevGetMemBalloonChangeRequest *req = NULL;
+ u32 i, chunks;
+ int rc;
+
+ mutex_lock(&gdev->mem_balloon.mutex);
+
+ /*
+ * The first user trying to query/change the balloon becomes the
+ * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
+ */
+ if (gdev->mem_balloon.owner && gdev->mem_balloon.owner != session) {
+ rc = VERR_PERMISSION_DENIED;
+ goto out;
+ }
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_GetMemBalloonChangeRequest);
+ if (!req) {
+ rc = VERR_NO_MEMORY;
+ goto out;
+ }
+
+ /*
+ * Setting this bit means that we request the value from the host and
+ * change the guest memory balloon according to the returned value.
+ */
+ req->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ goto out;
+
+ /*
+ * The host always returns the same maximum amount of chunks, so
+ * we do this once.
+ */
+ if (!gdev->mem_balloon.max_chunks) {
+ gdev->mem_balloon.pages =
+ devm_kcalloc(gdev->dev, req->cPhysMemChunks,
+ sizeof(struct page **), GFP_KERNEL);
+ if (!gdev->mem_balloon.pages) {
+ rc = VERR_NO_MEMORY;
+ goto out;
+ }
+ gdev->mem_balloon.max_chunks = req->cPhysMemChunks;
+ }
+
+ chunks = req->cBalloonChunks;
+ if (chunks > gdev->mem_balloon.max_chunks) {
+ vbg_err("VBOXGUEST_IOCTL_CHECK_BALLOON: illegal balloon size %u (max=%u)\n",
+ chunks, gdev->mem_balloon.max_chunks);
+ rc = VERR_INVALID_PARAMETER;
+ goto out;
+ }
+
+ if (req->cBalloonChunks > gdev->mem_balloon.chunks) {
+ /* inflate */
+ for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
+ rc = vbg_balloon_inflate(gdev, i);
+ if (rc < 0) {
+ /* Failure to alloc memory is not an error */
+ if (rc == VERR_NO_MEMORY)
+ rc = VINF_SUCCESS;
+ break;
+ }
+ gdev->mem_balloon.chunks++;
+ }
+ } else {
+ /* deflate */
+ for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
+ rc = vbg_balloon_deflate(gdev, i);
+ if (rc < 0)
+ break;
+
+ gdev->mem_balloon.chunks--;
+ }
+ }
+
+ info->cBalloonChunks = gdev->mem_balloon.chunks;
+ /* Under Linux we always handle the balloon in R0 / in the kernel */
+ info->fHandleInR3 = false;
+ *info_size_ret = sizeof(VBoxGuestCheckBalloonInfo);
+
+ gdev->mem_balloon.owner = session;
+out:
+ kfree(req);
+ mutex_unlock(&gdev->mem_balloon.mutex);
+ return rc;
+}
+
+/**
+ * Handle a request for writing a core dump of the guest on the host.
+ *
+ * @returns VBox status code
+ *
+ * @param gdev The Guest extension device.
+ * @param pInfo The output buffer.
+ */
+static int vgdrvIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT gdev,
+ VBoxGuestWriteCoreDump * pInfo)
+{
+ VMMDevReqWriteCoreDump *req = NULL;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_WriteCoreDump);
+ if (!req)
+ return VERR_NO_MEMORY;
+
+ req->fFlags = pInfo->fFlags;
+ rc = vbg_req_perform(gdev, req);
+
+ kfree(req);
+ return rc;
+}
+
+/** @name Guest Capabilities, Mouse Status and Event Filter
+ * @{
+ */
+
+/**
+ * Clears a bit usage tracker (init time).
+ *
+ * @param pTracker The tracker to clear.
+ */
+static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
+{
+ u32 iBit;
+
+ for (iBit = 0; iBit < 32; iBit++)
+ pTracker->acPerBitUsage[iBit] = 0;
+ pTracker->fMask = 0;
+}
+
+/**
+ * Applies a change to the bit usage tracker.
+ *
+ * @returns true if the mask changed, false if not.
+ * @param pTracker The bit usage tracker.
+ * @param changed The bits to change.
+ * @param previous The previous value of the bits.
+ */
+static bool vgdrvBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker,
+ u32 changed, u32 previous)
+{
+ bool global_change = false;
+
+ while (changed) {
+ u32 const iBit = ffs(changed) - 1;
+ u32 const fBitMask = BIT(iBit);
+
+ if (fBitMask & previous) {
+ pTracker->acPerBitUsage[iBit] -= 1;
+ if (pTracker->acPerBitUsage[iBit] == 0) {
+ global_change = true;
+ pTracker->fMask &= ~fBitMask;
+ }
+ } else {
+ pTracker->acPerBitUsage[iBit] += 1;
+ if (pTracker->acPerBitUsage[iBit] == 1) {
+ global_change = true;
+ pTracker->fMask |= fBitMask;
+ }
+ }
+
+ changed &= ~fBitMask;
+ }
+
+ return global_change;
+}
+
+/**
+ * Init and termination worker for resetting the (host) event filter on the host
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ * @param fFixedEvents Fixed events (init time).
+ */
+static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT gdev,
+ u32 fFixedEvents)
+{
+ VMMDevCtlGuestFilterMask *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_CtlGuestFilterMask);
+ if (!req)
+ return -ENOMEM;
+
+ req->u32NotMask = U32_MAX & ~fFixedEvents;
+ req->u32OrMask = fFixedEvents;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ vbg_err("vgdrvResetEventFilterOnHost error: %d\n", rc);
+
+ kfree(req);
+ return -vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Changes the event filter mask for the given session.
+ *
+ * This is called in response to VBOXGUEST_IOCTL_CTL_FILTER_MASK as well as to
+ * do session cleanup.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ * @param fOrMask The events to add.
+ * @param fNotMask The events to remove.
+ * @param fSessionTermination Set if we're called by the session cleanup code.
+ * This tweaks the error handling so we perform
+ * proper session cleanup even if the host
+ * misbehaves.
+ *
+ * @remarks Takes the session spinlock.
+ */
+static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ u32 fOrMask, u32 fNotMask,
+ bool fSessionTermination)
+{
+ VMMDevCtlGuestFilterMask *req;
+ u32 changed, previous;
+ unsigned long flags;
+ bool global_change;
+ int rc = VINF_SUCCESS;
+
+ /* Allocate a request buffer before taking the spinlock */
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_CtlGuestFilterMask);
+ if (!req) {
+ if (!fSessionTermination)
+ return VERR_NO_MEMORY;
+ /* Ignore failure, we must do session cleanup. */
+ }
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+
+ /* Apply the changes to the session mask. */
+ previous = session->fEventFilter;
+ session->fEventFilter |= fOrMask;
+ session->fEventFilter &= ~fNotMask;
+
+ /* If anything actually changed, update the global usage counters. */
+ changed = previous ^ session->fEventFilter;
+ if (!changed)
+ goto out;
+
+ global_change = vgdrvBitUsageTrackerChange(&gdev->EventFilterTracker,
+ changed, previous);
+
+ if (!(global_change || gdev->fEventFilterHost == U32_MAX) || !req)
+ goto out;
+
+ req->u32OrMask = gdev->fFixedEvents | gdev->EventFilterTracker.fMask;
+ if (req->u32OrMask == gdev->fEventFilterHost)
+ goto out;
+
+ gdev->fEventFilterHost = req->u32OrMask;
+ req->u32NotMask = ~req->u32OrMask;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ /* Failed, roll back (unless it's session termination time). */
+ gdev->fEventFilterHost = U32_MAX;
+ if (fSessionTermination)
+ goto out;
+
+ vgdrvBitUsageTrackerChange(&gdev->EventFilterTracker, changed,
+ session->fEventFilter);
+ session->fEventFilter = previous;
+ }
+
+out:
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+ kfree(req);
+
+ return rc;
+}
+
+/**
+ * Handle VBOXGUEST_IOCTL_CTL_FILTER_MASK.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ * @param pInfo The request.
+ */
+static int vgdrvIoCtl_CtlFilterMask(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestFilterMaskInfo * pInfo)
+{
+ if ((pInfo->u32OrMask | pInfo->u32NotMask) &
+ ~VMMDEV_EVENT_VALID_EVENT_MASK)
+ return VERR_INVALID_PARAMETER;
+
+ return vgdrvSetSessionEventFilter(gdev, session, pInfo->u32OrMask,
+ pInfo->u32NotMask,
+ false /*fSessionTermination */ );
+}
+
+/**
+ * Report guest supported mouse-features to the host.
+ *
+ * @returns 0 or negative errno value.
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param features The set of features to report to the host.
+ */
+int vbg_core_set_mouse_status(PVBOXGUESTDEVEXT gdev, u32 features)
+{
+ VMMDevReqMouseStatus *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetMouseStatus);
+ if (!req)
+ return -ENOMEM;
+
+ req->mouseFeatures = features;
+ req->pointerXPos = 0;
+ req->pointerYPos = 0;
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ vbg_err("vbg_core_set_mouse_status error: %d\n", rc);
+
+ kfree(req);
+ return -vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Init and termination worker for set guest capabilities to zero on the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ */
+static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT gdev)
+{
+ VMMDevReqGuestCapabilities2 *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetGuestCapabilities);
+ if (!req)
+ return -ENOMEM;
+
+ req->u32NotMask = U32_MAX;
+ req->u32OrMask = 0;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ vbg_err("vgdrvResetCapabilitiesOnHost error: %d\n", rc);
+
+ kfree(req);
+ return -vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Sets the guest capabilities to the host while holding the lock.
+ *
+ * This will ASSUME that we're the ones in charge of the mask, so
+ * we'll simply clear all bits we don't set.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param req The request.
+ */
+static int vgdrvUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT gdev,
+ VMMDevReqGuestCapabilities2 *req)
+{
+ int rc;
+
+ req->u32OrMask = gdev->SetGuestCapsTracker.fMask;
+
+ if (req->u32OrMask == gdev->fGuestCapsHost)
+ return VINF_SUCCESS;
+
+ gdev->fGuestCapsHost = req->u32OrMask;
+ req->u32NotMask = ~req->u32OrMask;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ gdev->fGuestCapsHost = U32_MAX;
+
+ return rc;
+}
+
+/**
+ * Sets the guest capabilities for a session.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ * @param fOrMask The capabilities to add.
+ * @param fNotMask The capabilities to remove.
+ * @param fSessionTermination Set if we're called by the session cleanup code.
+ * This tweaks the error handling so we perform
+ * proper session cleanup even if the host
+ * misbehaves.
+ *
+ * @remarks Takes the session spinlock.
+ */
+static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ u32 fOrMask, u32 fNotMask,
+ bool fSessionTermination)
+{
+ VMMDevReqGuestCapabilities2 *req;
+ unsigned long flags;
+ int rc = VINF_SUCCESS;
+ u32 changed, previous;
+
+ /* Allocate a request buffer before taking the spinlock */
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetGuestCapabilities);
+ if (!req) {
+ if (!fSessionTermination)
+ return VERR_NO_MEMORY;
+ /* Ignore failure, we must do session cleanup. */
+ }
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+
+ /*
+ * Apply the changes to the session mask.
+ */
+ previous = session->fCapabilities;
+ session->fCapabilities |= fOrMask;
+ session->fCapabilities &= ~fNotMask;
+
+ /*
+ * If anything actually changed, update the global usage counters.
+ */
+ changed = previous ^ session->fCapabilities;
+ if (changed) {
+ bool global_change =
+ vgdrvBitUsageTrackerChange(&gdev->
+ SetGuestCapsTracker,
+ changed, previous);
+
+ /*
+ * If there are global changes, update the capabilities on the host.
+ */
+ if (global_change
+ || gdev->fGuestCapsHost == U32_MAX) {
+ if (req) {
+ rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(gdev, req);
+
+ /* On failure, roll back (unless it's session termination time). */
+ if (rc < 0 && !fSessionTermination) {
+ vgdrvBitUsageTrackerChange
+ (&gdev->
+ SetGuestCapsTracker,
+ changed,
+ session->fCapabilities);
+ session->fCapabilities =
+ previous;
+ }
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+ kfree(req);
+
+ return rc;
+}
+
+/**
+ * Handle VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ * @param pInfo The request.
+ */
+static int vgdrvIoCtl_SetCapabilities(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestSetCapabilitiesInfo * pInfo)
+{
+ if ((pInfo->u32OrMask | pInfo->u32NotMask) &
+ ~VMMDEV_GUEST_CAPABILITIES_MASK)
+ return VERR_INVALID_PARAMETER;
+
+ return vgdrvSetSessionCapabilities(gdev, session,
+ pInfo->u32OrMask, pInfo->u32NotMask,
+ false);
+}
+
+/** @} */
+
+/**
+ * Common IOCtl for user to kernel communication.
+ *
+ * This function only does the basic validation and then invokes
+ * worker functions that takes care of each specific function.
+ *
+ * @returns VBox status code
+ * @param iFunction The requested function.
+ * @param gdev The Guest extension device.
+ * @param session The client session.
+ * @param pvData The input/output data buffer. Can be NULL depending on the function.
+ * @param cbData The max size of the data buffer.
+ * @param data_size_ret Where to store the amount of returned data.
+ */
+int VGDrvCommonIoCtl(unsigned iFunction, PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session, void *pvData, size_t cbData,
+ size_t *data_size_ret)
+{
+ int rc;
+
+ *data_size_ret = 0;
+
+ /*
+ * Deal with variably sized requests first.
+ */
+ if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0))) {
+ rc = vgdrvIoCtl_VMMRequest(gdev, session,
+ (VMMDevRequestHeader *) pvData,
+ cbData, data_size_ret);
+ }
+ /*
+ * These ones are a bit tricky.
+ */
+ else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0))) {
+ if (cbData < sizeof(VBoxGuestHGCMCallInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMCall(gdev, session,
+ (VBoxGuestHGCMCallInfo *)pvData,
+ U32_MAX, false, 0,
+ cbData, data_size_ret);
+ } else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED
+ (0))) {
+ VBoxGuestHGCMCallInfoTimed *pInfo =
+ (VBoxGuestHGCMCallInfoTimed *) pvData;
+ if (cbData < sizeof(VBoxGuestHGCMCallInfoTimed))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMCall(gdev, session, &pInfo->info,
+ pInfo->u32Timeout, false,
+ offsetof(VBoxGuestHGCMCallInfoTimed,
+ info),
+ cbData, data_size_ret);
+ }
+#ifdef CONFIG_X86_64
+ else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0))) {
+ if (cbData < sizeof(VBoxGuestHGCMCallInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMCall(gdev, session,
+ (VBoxGuestHGCMCallInfo *)pvData,
+ U32_MAX, true, 0,
+ cbData, data_size_ret);
+ } else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32
+ (0))) {
+ VBoxGuestHGCMCallInfoTimed *pInfo = pvData;
+
+ if (cbData < sizeof(VBoxGuestHGCMCallInfoTimed))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMCall(gdev, session, &pInfo->info,
+ pInfo->u32Timeout, true,
+ offsetof(VBoxGuestHGCMCallInfoTimed,
+ info),
+ cbData, data_size_ret);
+ }
+#endif
+ else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0))) {
+ vbg_info("%.*s", (int)cbData, (char *)pvData);
+ } else {
+ switch (iFunction) {
+ case VBOXGUEST_IOCTL_WAITEVENT:
+ if (cbData < sizeof(VBoxGuestWaitEventInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vbg_ioctl_wait_event(gdev, session,
+ (VBoxGuestWaitEventInfo *)
+ pvData, data_size_ret);
+ break;
+
+ case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
+ if (cbData != 0)
+ return VERR_BUFFER_OVERFLOW;
+ rc = vbg_ioctl_cancel_all_wait_events(gdev,
+ session);
+ break;
+
+ case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
+ if (cbData < sizeof(VBoxGuestFilterMaskInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_CtlFilterMask(gdev, session,
+ (VBoxGuestFilterMaskInfo
+ *) pvData);
+ break;
+
+ case VBOXGUEST_IOCTL_HGCM_CONNECT:
+#ifdef CONFIG_X86_64 /* Needed because these are identical on 32 bit builds */
+ case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
+#endif
+ if (cbData < sizeof(VBoxGuestHGCMConnectInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMConnect(gdev, session,
+ (VBoxGuestHGCMConnectInfo *)
+ pvData, data_size_ret);
+ break;
+
+ case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
+#ifdef CONFIG_X86_64
+ case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
+#endif
+ if (cbData < sizeof(VBoxGuestHGCMDisconnectInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMDisconnect(gdev, session,
+ (VBoxGuestHGCMDisconnectInfo
+ *) pvData,
+ data_size_ret);
+ break;
+
+ case VBOXGUEST_IOCTL_CHECK_BALLOON:
+ if (cbData < sizeof(VBoxGuestCheckBalloonInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_CheckMemoryBalloon(gdev, session,
+ (VBoxGuestCheckBalloonInfo
+ *) pvData,
+ data_size_ret);
+ break;
+
+ case VBOXGUEST_IOCTL_CHANGE_BALLOON:
+ /* Under Linux we always handle the balloon in R0. */
+ rc = VERR_PERMISSION_DENIED;
+ break;
+
+ case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
+ if (cbData < sizeof(VBoxGuestWriteCoreDump))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_WriteCoreDump(gdev,
+ (VBoxGuestWriteCoreDump *)
+ pvData);
+ break;
+
+ case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
+ vbg_err("VGDrvCommonIoCtl: VBOXGUEST_IOCTL_SET_MOUSE_STATUS should not be used under Linux\n");
+ rc = VERR_NOT_SUPPORTED;
+ break;
+
+ case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
+ vbg_err("VGDrvCommonIoCtl: VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE should not be used under Linux\n");
+ rc = VERR_NOT_SUPPORTED;
+ break;
+
+ case VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES:
+ if (cbData < sizeof(VBoxGuestSetCapabilitiesInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_SetCapabilities(gdev, session,
+ (VBoxGuestSetCapabilitiesInfo
+ *) pvData);
+ break;
+
+ default:
+ vbg_debug("VGDrvCommonIoCtl: Unknown request %#08x\n",
+ VBOXGUEST_IOCTL_STRIP_SIZE(iFunction));
+ rc = VERR_NOT_SUPPORTED;
+ }
+ }
+
+ return rc;
+}
+
+/** Core interrupt service routine. */
+irqreturn_t vbg_core_isr(int irq, void *dev_id)
+{
+ PVBOXGUESTDEVEXT gdev = dev_id;
+ VMMDevEvents *req = gdev->pIrqAckEvents;
+ bool fMousePositionChanged = false;
+ bool fOurIrq;
+ unsigned long flags;
+ int rc;
+
+ /*
+ * Enter the spinlock and check if it's our IRQ or not.
+ */
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+ fOurIrq = gdev->pVMMDevMemory->V.V1_04.fHaveEvents;
+ if (fOurIrq) {
+ /* Acknowlegde events. */
+ req->header.rc = VERR_INTERNAL_ERROR;
+ req->events = 0;
+ rc = vbg_req_perform(gdev, req);
+ if (rc >= 0) {
+ u32 fEvents = req->events;
+
+ /*
+ * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
+ */
+ if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
+ fMousePositionChanged = true;
+ fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
+ }
+
+ /*
+ * The HGCM event/list is kind of different in that we evaluate all entries.
+ */
+ if (fEvents & VMMDEV_EVENT_HGCM) {
+ wake_up(&gdev->hgcm_wq);
+ fEvents &= ~VMMDEV_EVENT_HGCM;
+ }
+
+ /*
+ * Normal FIFO waiter evaluation.
+ */
+ vbg_dispatch_events_locked(gdev, fEvents);
+ }
+ }
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ if (fMousePositionChanged)
+ VGDrvNativeISRMousePollEvent(gdev);
+
+ return fOurIrq ? IRQ_HANDLED : IRQ_NONE;
+}
diff --git a/drivers/misc/vboxguest/vboxguest_core.h b/drivers/misc/vboxguest/vboxguest_core.h
new file mode 100644
index 000000000000..81a36c6627d2
--- /dev/null
+++ b/drivers/misc/vboxguest/vboxguest_core.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2010-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __VBOXGUEST_CORE_H__
+#define __VBOXGUEST_CORE_H__
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/vbox_vmmdev.h>
+#include <linux/vboxguest.h>
+
+/** Pointer to the VBoxGuest per session data. */
+typedef struct VBOXGUESTSESSION *PVBOXGUESTSESSION;
+
+/** VBox guest memory balloon. */
+typedef struct VBOXGUESTMEMBALLOON {
+ /** Mutex protecting the members below from concurrent access. */
+ struct mutex mutex;
+ /** Pre-allocated VMMDevChangeMemBalloon req for inflate / deflate */
+ VMMDevChangeMemBalloon *change_req;
+ /** The current number of chunks in the balloon. */
+ u32 chunks;
+ /** The maximum number of chunks in the balloon. */
+ u32 max_chunks;
+ /** The current owner of the balloon. */
+ PVBOXGUESTSESSION owner;
+ /**
+ * Array of pointers to page arrays. A page * array is allocated for
+ * each chunk when inflating, and freed when the deflating.
+ */
+ struct page ***pages;
+} VBOXGUESTMEMBALLOON;
+
+/**
+ * Per bit usage tracker for a u32 mask.
+ *
+ * Used for optimal handling of guest properties and event filter.
+ */
+typedef struct VBOXGUESTBITUSAGETRACER {
+ /** Per bit usage counters. */
+ u32 acPerBitUsage[32];
+ /** The current mask according to acPerBitUsage. */
+ u32 fMask;
+} VBOXGUESTBITUSAGETRACER;
+/** Pointer to a per bit usage tracker. */
+typedef VBOXGUESTBITUSAGETRACER *PVBOXGUESTBITUSAGETRACER;
+/** Pointer to a const per bit usage tracker. */
+typedef VBOXGUESTBITUSAGETRACER const *PCVBOXGUESTBITUSAGETRACER;
+
+/**
+ * VBox guest device (data) extension.
+ */
+typedef struct VBOXGUESTDEVEXT {
+ struct device *dev;
+ /** The base of the adapter I/O ports. */
+ u16 IOPortBase;
+ /** Pointer to the mapping of the VMMDev adapter memory. */
+ VMMDevMemory volatile *pVMMDevMemory;
+ /**
+ * Dummy page and vmap address for reserved kernel virtual-address
+ * space for the guest mappings, only used on hosts lacking vtx.
+ */
+ struct page *guest_mappings_dummy_page;
+ void *guest_mappings;
+ /** Spinlock protecting the signaling and resetting of the wait-for-event
+ * semaphores as well as the event acking in the ISR. */
+ spinlock_t event_spinlock;
+ /** Preallocated VMMDevEvents for the IRQ handler. */
+ VMMDevEvents *pIrqAckEvents;
+ /** Wait-for-event list for threads waiting for multiple events. */
+ wait_queue_head_t event_wq;
+ /** Mask of pending events. */
+ u32 f32PendingEvents;
+ /** Wait-for-event list for threads waiting on HGCM async completion. */
+ wait_queue_head_t hgcm_wq;
+ /** Pre-allocated hgcm cancel2 req. for cancellation on timeout */
+ VMMDevHGCMCancel2 *cancel_req;
+ /** Mutex protecting cancel_req accesses */
+ struct mutex cancel_req_mutex;
+ /** Pre-allocated mouse-status request for the input-device handling. */
+ VMMDevReqMouseStatus *mouse_status_req;
+ /** Input device for reporting abs mouse coordinates to the guest. */
+ struct input_dev *input;
+
+ /** Spinlock various items in the VBOXGUESTSESSION. */
+ spinlock_t session_spinlock;
+ /** List of guest sessions (VBOXGUESTSESSION). We currently traverse this
+ * but do not search it, so a list data type should be fine. Use under the
+ * #SessionSpinlock lock. */
+ struct list_head session_list;
+ /** Memory balloon information. */
+ VBOXGUESTMEMBALLOON mem_balloon;
+
+ /** @name Host Event Filtering
+ * @{ */
+ /** Events we won't permit anyone to filter out. */
+ u32 fFixedEvents;
+ /** Usage counters for the host events. (Fixed events are not included.) */
+ VBOXGUESTBITUSAGETRACER EventFilterTracker;
+ /** The event filter last reported to the host (UINT32_MAX on failure). */
+ u32 fEventFilterHost;
+ /** @} */
+
+ /** @name Guest Capabilities
+ * @{ */
+ /** Usage counters for guest capabilities in "set" mode. Indexed by
+ * capability bit number, one count per session using a capability. */
+ VBOXGUESTBITUSAGETRACER SetGuestCapsTracker;
+ /** The guest capabilities last reported to the host (UINT32_MAX on failure). */
+ u32 fGuestCapsHost;
+ /** @} */
+
+ /** Heartbeat timer which fires with interval
+ * cNsHearbeatInterval and its handler sends
+ * VMMDevReq_GuestHeartbeat to VMMDev. */
+ struct timer_list heartbeat_timer;
+ /** Heartbeat timer interval in ms. */
+ int heartbeat_interval_ms;
+ /** Preallocated VMMDevReq_GuestHeartbeat request. */
+ VMMDevRequestHeader *guest_heartbeat_req;
+
+ /** "vboxguest" char-device */
+ struct miscdevice misc_device;
+ /** "vboxuser" char-device */
+ struct miscdevice misc_device_user;
+} VBOXGUESTDEVEXT;
+/** Pointer to the VBoxGuest driver data. */
+typedef VBOXGUESTDEVEXT *PVBOXGUESTDEVEXT;
+
+/**
+ * The VBoxGuest per session data.
+ */
+typedef struct VBOXGUESTSESSION {
+ /** The list node. */
+ struct list_head list_node;
+ /** Pointer to the device extension. */
+ PVBOXGUESTDEVEXT gdev;
+
+ /** Array containing HGCM client IDs associated with this session.
+ * This will be automatically disconnected when the session is closed. */
+ u32 volatile aHGCMClientIds[64];
+ /** Host events requested by the session.
+ * An event type requested in any guest session will be added to the host
+ * filter. Protected by VBOXGUESTDEVEXT::SessionSpinlock. */
+ u32 fEventFilter;
+ /** Guest capabilities in "set" mode for this session.
+ * These accumulated for sessions via VBOXGUESTDEVEXT::acGuestCapsSet and
+ * reported to the host. Protected by VBOXGUESTDEVEXT::SessionSpinlock. */
+ u32 fCapabilities;
+ /** Does this session belong to a root process or a user one? */
+ bool user_session;
+
+ /** Set on CANCEL_ALL_WAITEVENTS, protected by the event_spinlock. */
+ bool cancel_waiters;
+} VBOXGUESTSESSION;
+
+int vbg_core_init(PVBOXGUESTDEVEXT gdev, u32 fixed_events);
+void vbg_core_exit(PVBOXGUESTDEVEXT gdev);
+int vbg_core_open_session(PVBOXGUESTDEVEXT gdev,
+ VBOXGUESTSESSION **session_ret, bool user_session);
+void vbg_core_close_session(VBOXGUESTSESSION *session);
+irqreturn_t vbg_core_isr(int irq, void *dev_id);
+
+int VGDrvCommonIoCtl(unsigned iFunction, PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session, void *data, size_t data_size,
+ size_t *data_size_ret);
+
+int vbg_core_set_mouse_status(PVBOXGUESTDEVEXT gdev, u32 features);
+void VGDrvNativeISRMousePollEvent(PVBOXGUESTDEVEXT gdev);
+
+#endif
diff --git a/drivers/misc/vboxguest/vboxguest_linux.c b/drivers/misc/vboxguest/vboxguest_linux.c
new file mode 100644
index 000000000000..8468c7139b98
--- /dev/null
+++ b/drivers/misc/vboxguest/vboxguest_linux.c
@@ -0,0 +1,464 @@
+/*
+ * vboxguest linux pci driver, char-dev and input-device code,
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vbox_utils.h>
+#include "vboxguest_core.h"
+
+/** The device name. */
+#define DEVICE_NAME "vboxguest"
+/** The device name for the device node open to everyone. */
+#define DEVICE_NAME_USER "vboxuser"
+/** VirtualBox PCI vendor ID. */
+#define VBOX_VENDORID 0x80ee
+/** VMMDev PCI card product ID. */
+#define VMMDEV_DEVICEID 0xcafe
+
+/** Mutex protecting the global vbg_gdev pointer used by vbg_get/put_gdev. */
+static DEFINE_MUTEX(vbg_gdev_mutex);
+/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
+static PVBOXGUESTDEVEXT vbg_gdev;
+
+static long vgdrvLinuxIOCtl(struct file *pFilp, unsigned int uCmd,
+ unsigned long ulArg);
+
+static int vbg_misc_device_open(struct inode *inode, struct file *filp)
+{
+ PVBOXGUESTSESSION session;
+ PVBOXGUESTDEVEXT gdev;
+ int ret;
+
+ /* misc_open sets filp->private_data to our misc device */
+ gdev = container_of(filp->private_data, VBOXGUESTDEVEXT, misc_device);
+
+ ret = vbg_core_open_session(gdev, &session, false);
+ if (ret)
+ return -ENOMEM;
+
+ filp->private_data = session;
+ return 0;
+}
+
+static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
+{
+ PVBOXGUESTSESSION session;
+ PVBOXGUESTDEVEXT gdev;
+ int ret;
+
+ /* misc_open sets filp->private_data to our misc device */
+ gdev = container_of(filp->private_data, VBOXGUESTDEVEXT,
+ misc_device_user);
+
+ ret = vbg_core_open_session(gdev, &session, true);
+ if (ret)
+ return ret;
+
+ filp->private_data = session;
+ return 0;
+}
+
+/**
+ * Close device.
+ *
+ * @param inode Pointer to inode info structure.
+ * @param filp Associated file pointer.
+ */
+static int vbg_misc_device_close(struct inode *inode, struct file *filp)
+{
+ vbg_core_close_session(filp->private_data);
+ filp->private_data = NULL;
+ return 0;
+}
+
+/** The file_operations structures. */
+static const struct file_operations vbg_misc_device_fops = {
+ .owner = THIS_MODULE,
+ .open = vbg_misc_device_open,
+ .release = vbg_misc_device_close,
+ .unlocked_ioctl = vgdrvLinuxIOCtl,
+};
+static const struct file_operations vbg_misc_device_user_fops = {
+ .owner = THIS_MODULE,
+ .open = vbg_misc_device_user_open,
+ .release = vbg_misc_device_close,
+ .unlocked_ioctl = vgdrvLinuxIOCtl,
+};
+
+/**
+ * Called when the input device is first opened.
+ *
+ * Sets up absolute mouse reporting.
+ */
+static int vbg_input_open(struct input_dev *input)
+{
+ PVBOXGUESTDEVEXT gdev = input_get_drvdata(input);
+ u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL;
+ int ret;
+
+ ret = vbg_core_set_mouse_status(gdev, feat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Called if all open handles to the input device are closed.
+ *
+ * Disables absolute reporting.
+ */
+static void vbg_input_close(struct input_dev *input)
+{
+ PVBOXGUESTDEVEXT gdev = input_get_drvdata(input);
+
+ vbg_core_set_mouse_status(gdev, 0);
+}
+
+/**
+ * Creates the kernel input device.
+ *
+ * @returns 0 on success, negated errno on failure.
+ */
+static int vbg_create_input_device(PVBOXGUESTDEVEXT gdev)
+{
+ struct input_dev *input;
+
+ input = devm_input_allocate_device(gdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->id.bustype = BUS_PCI;
+ input->id.vendor = VBOX_VENDORID;
+ input->id.product = VMMDEV_DEVICEID;
+ input->open = vbg_input_open;
+ input->close = vbg_input_close;
+ input->dev.parent = gdev->dev;
+ input->name = "VirtualBox mouse integration";
+
+ input_set_abs_params(input, ABS_X, VMMDEV_MOUSE_RANGE_MIN,
+ VMMDEV_MOUSE_RANGE_MAX, 0, 0);
+ input_set_abs_params(input, ABS_Y, VMMDEV_MOUSE_RANGE_MIN,
+ VMMDEV_MOUSE_RANGE_MAX, 0, 0);
+ input_set_capability(input, EV_KEY, BTN_MOUSE);
+ input_set_drvdata(input, gdev);
+
+ gdev->input = input;
+
+ return input_register_device(gdev->input);
+}
+
+/**
+ * Does the PCI detection and init of the device.
+ *
+ * @returns 0 on success, negated errno on failure.
+ */
+static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct device *dev = &pci->dev;
+ resource_size_t io, io_len, mmio, mmio_len;
+ volatile VMMDevMemory *vmmdev;
+ PVBOXGUESTDEVEXT gdev;
+ int ret;
+
+ gdev = devm_kzalloc(dev, sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ ret = pci_enable_device(pci);
+ if (ret != 0) {
+ vbg_err("vboxguest: Error enabling device: %d\n", ret);
+ return ret;
+ }
+
+ ret = -ENODEV;
+
+ io = pci_resource_start(pci, 0);
+ io_len = pci_resource_len(pci, 0);
+ if (!io || !io_len) {
+ vbg_err("vboxguest: Error IO-port resource (0) is missing\n");
+ goto err_disable_pcidev;
+ }
+ if (devm_request_region(dev, io, io_len, DEVICE_NAME) == NULL) {
+ vbg_err("vboxguest: Error could not claim IO resource\n");
+ ret = -EBUSY;
+ goto err_disable_pcidev;
+ }
+
+ mmio = pci_resource_start(pci, 1);
+ mmio_len = pci_resource_len(pci, 1);
+ if (!mmio || !mmio_len) {
+ vbg_err("vboxguest: Error MMIO resource (1) is missing\n");
+ goto err_disable_pcidev;
+ }
+
+ if (devm_request_mem_region(dev, mmio, mmio_len, DEVICE_NAME) == NULL) {
+ vbg_err("vboxguest: Error could not claim MMIO resource\n");
+ ret = -EBUSY;
+ goto err_disable_pcidev;
+ }
+
+ vmmdev = devm_ioremap(dev, mmio, mmio_len);
+ if (!vmmdev) {
+ vbg_err("vboxguest: Error ioremap failed; MMIO addr=%p size=%d\n",
+ (void *)mmio, (int)mmio_len);
+ goto err_disable_pcidev;
+ }
+
+ /* Validate MMIO region version and size. */
+ if (vmmdev->u32Version != VMMDEV_MEMORY_VERSION ||
+ vmmdev->u32Size < 32 || vmmdev->u32Size > mmio_len) {
+ vbg_err("vboxguest: Bogus VMMDev memory; u32Version=%08x (expected %08x) u32Size=%d (expected <= %d)\n",
+ vmmdev->u32Version, VMMDEV_MEMORY_VERSION,
+ vmmdev->u32Size, (int)mmio_len);
+ goto err_disable_pcidev;
+ }
+
+ gdev->IOPortBase = io;
+ gdev->pVMMDevMemory = vmmdev;
+ gdev->dev = dev;
+ gdev->misc_device.minor = MISC_DYNAMIC_MINOR;
+ gdev->misc_device.name = DEVICE_NAME;
+ gdev->misc_device.fops = &vbg_misc_device_fops;
+ gdev->misc_device_user.minor = MISC_DYNAMIC_MINOR;
+ gdev->misc_device_user.name = DEVICE_NAME_USER;
+ gdev->misc_device_user.fops = &vbg_misc_device_user_fops;
+
+ ret = vbg_core_init(gdev, VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
+ if (ret)
+ goto err_disable_pcidev;
+
+ ret = vbg_create_input_device(gdev);
+ if (ret) {
+ vbg_err("vboxguest: Error creating input device: %d\n", ret);
+ goto err_vbg_core_exit;
+ }
+
+ ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED,
+ DEVICE_NAME, gdev);
+ if (ret) {
+ vbg_err("vboxguest: Error requesting irq: %d\n", ret);
+ goto err_vbg_core_exit;
+ }
+
+ ret = misc_register(&gdev->misc_device);
+ if (ret) {
+ vbg_err("vboxguest: Error misc_register %s failed: %d\n",
+ DEVICE_NAME, ret);
+ goto err_vbg_core_exit;
+ }
+
+ ret = misc_register(&gdev->misc_device_user);
+ if (ret) {
+ vbg_err("vboxguest: Error misc_register %s failed: %d\n",
+ DEVICE_NAME_USER, ret);
+ goto err_unregister_misc_device;
+ }
+
+ mutex_lock(&vbg_gdev_mutex);
+ if (!vbg_gdev)
+ vbg_gdev = gdev;
+ else
+ ret = -EBUSY;
+ mutex_unlock(&vbg_gdev_mutex);
+
+ if (ret) {
+ vbg_err("vboxguest: Error more then 1 vbox guest pci device\n");
+ goto err_unregister_misc_device_user;
+ }
+
+ pci_set_drvdata(pci, gdev);
+ vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %p (size 0x%d)\n",
+ gdev->misc_device.minor, pci->irq, gdev->IOPortBase,
+ (void *)mmio, (int)mmio_len);
+
+ return 0;
+
+err_unregister_misc_device_user:
+ misc_deregister(&gdev->misc_device_user);
+err_unregister_misc_device:
+ misc_deregister(&gdev->misc_device);
+err_vbg_core_exit:
+ vbg_core_exit(gdev);
+err_disable_pcidev:
+ pci_disable_device(pci);
+
+ return ret;
+}
+
+static void vbg_pci_remove(struct pci_dev *pci)
+{
+ PVBOXGUESTDEVEXT gdev = pci_get_drvdata(pci);
+
+ mutex_lock(&vbg_gdev_mutex);
+ vbg_gdev = NULL;
+ mutex_unlock(&vbg_gdev_mutex);
+
+ misc_deregister(&gdev->misc_device_user);
+ misc_deregister(&gdev->misc_device);
+ vbg_core_exit(gdev);
+ pci_disable_device(pci);
+}
+
+/**
+ * Helper for the vboxsf driver to get a reference to the guest pci device.
+ * @returns a pointer to the gdev; or a ERR_PTR value on error.
+ */
+PVBOXGUESTDEVEXT vbg_get_gdev(void)
+{
+ mutex_lock(&vbg_gdev_mutex);
+
+ /*
+ * Note on success we keep the mutex locked until vbg_put_gdev(),
+ * this stops vbg_pci_remove from removing the device from underneath
+ * vboxsf. vboxsf will only hold a reference for a short while.
+ */
+ if (vbg_gdev)
+ return vbg_gdev;
+
+ mutex_unlock(&vbg_gdev_mutex);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(vbg_get_gdev);
+
+/**
+ * Helper for the vboxsf driver to put a guest pci device reference.
+ * @param gdev Reference returned by vbg_get_gdev to put.
+ */
+void vbg_put_gdev(PVBOXGUESTDEVEXT gdev)
+{
+ WARN_ON(gdev != vbg_gdev);
+ mutex_unlock(&vbg_gdev_mutex);
+}
+EXPORT_SYMBOL(vbg_put_gdev);
+
+/**
+ * Device I/O Control entry point.
+ *
+ * @returns -ENOMEM or -EFAULT for errors inside the ioctl callback; 0
+ * on success, or a positive VBox status code on vbox guest-device errors.
+ *
+ * @param pInode Associated inode pointer.
+ * @param pFilp Associated file pointer.
+ * @param uCmd The function specified to ioctl().
+ * @param ulArg The argument specified to ioctl().
+ */
+static long vgdrvLinuxIOCtl(struct file *pFilp, unsigned int uCmd,
+ unsigned long ulArg)
+{
+ PVBOXGUESTSESSION session = (PVBOXGUESTSESSION) pFilp->private_data;
+ u32 cbData = _IOC_SIZE(uCmd);
+ void *pvBufFree;
+ void *pvBuf;
+ int rc, ret = 0;
+ u64 au64Buf[32 / sizeof(u64)];
+
+ /*
+ * For small amounts of data being passed we use a stack based buffer
+ * except for VMMREQUESTs where the data must not be on the stack.
+ */
+ if (cbData <= sizeof(au64Buf) &&
+ VBOXGUEST_IOCTL_STRIP_SIZE(uCmd) !=
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0))) {
+ pvBufFree = NULL;
+ pvBuf = &au64Buf[0];
+ } else {
+ /* __GFP_DMA32 for VBOXGUEST_IOCTL_VMMREQUEST */
+ pvBufFree = pvBuf = kmalloc(cbData, GFP_KERNEL | __GFP_DMA32);
+ if (!pvBuf)
+ return -ENOMEM;
+ }
+ if (copy_from_user(pvBuf, (void *)ulArg, cbData) == 0) {
+ /*
+ * Process the IOCtl.
+ */
+ size_t returned_data_size;
+ rc = VGDrvCommonIoCtl(uCmd, session->gdev, session, pvBuf, cbData,
+ &returned_data_size);
+
+ /*
+ * Copy ioctl data and output buffer back to user space.
+ */
+ if (rc >= 0) {
+ if (returned_data_size > cbData) {
+ vbg_debug("vgdrvLinuxIOCtl: too much output data %zu > %u\n",
+ returned_data_size, cbData);
+ returned_data_size = cbData;
+ }
+ if (returned_data_size > 0) {
+ if (copy_to_user((void *)ulArg, pvBuf,
+ returned_data_size) != 0)
+ ret = -EFAULT;
+ }
+ } else {
+ /* Negate the Vbox status code to make it positive. */
+ ret = -rc;
+ }
+ } else {
+ ret = -EFAULT;
+ }
+
+ kfree(pvBufFree);
+
+ return ret;
+}
+
+/**
+ * ISR callback for mouse events.
+ *
+ * This is called at the end of the ISR, after leaving the event spinlock, if
+ * VMMDEV_EVENT_MOUSE_POSITION_CHANGED was raised by the host.
+ *
+ * @param gdev The device extension.
+ */
+void VGDrvNativeISRMousePollEvent(PVBOXGUESTDEVEXT gdev)
+{
+ int rc;
+
+ /* Report events to the kernel input device */
+ gdev->mouse_status_req->mouseFeatures = 0;
+ gdev->mouse_status_req->pointerXPos = 0;
+ gdev->mouse_status_req->pointerYPos = 0;
+ rc = vbg_req_perform(gdev, gdev->mouse_status_req);
+ if (rc >= 0) {
+ input_report_abs(gdev->input, ABS_X,
+ gdev->mouse_status_req->pointerXPos);
+ input_report_abs(gdev->input, ABS_Y,
+ gdev->mouse_status_req->pointerYPos);
+ input_sync(gdev->input);
+ }
+}
+
+static const struct pci_device_id vbg_pci_ids[] = {
+ { .vendor = VBOX_VENDORID, .device = VMMDEV_DEVICEID },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, vbg_pci_ids);
+
+static struct pci_driver vbg_pci_driver = {
+ .name = DEVICE_NAME,
+ .id_table = vbg_pci_ids,
+ .probe = vbg_pci_probe,
+ .remove = vbg_pci_remove,
+};
+
+module_pci_driver(vbg_pci_driver);
+
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_DESCRIPTION("Oracle VM VirtualBox Guest Additions for Linux Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/vboxguest/vboxguest_utils.c b/drivers/misc/vboxguest/vboxguest_utils.c
new file mode 100644
index 000000000000..560d550312a1
--- /dev/null
+++ b/drivers/misc/vboxguest/vboxguest_utils.c
@@ -0,0 +1,1124 @@
+/*
+ * vboxguest vmm-req and hgcm-call code, GenericRequest.c and HGCMInternal.c
+ * in vbox upstream svn.
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include "vboxguest_core.h"
+
+/** The max parameter buffer size for a user request. */
+#define VBGLR0_MAX_HGCM_USER_PARM (24 * SZ_1M)
+/** The max parameter buffer size for a kernel request. */
+#define VBGLR0_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
+
+#define VBG_DEBUG_PORT 0x504
+
+/* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
+static DEFINE_SPINLOCK(vbg_log_lock);
+static char vbg_log_buf[128];
+
+#define VBG_LOG(name, pr_func) \
+void name(const char *fmt, ...) \
+{ \
+ unsigned long flags; \
+ va_list args; \
+ int i, count; \
+ \
+ va_start(args, fmt); \
+ spin_lock_irqsave(&vbg_log_lock, flags); \
+ \
+ count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
+ for (i = 0; i < count; i++) \
+ outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
+ \
+ pr_func("%s", vbg_log_buf); \
+ \
+ spin_unlock_irqrestore(&vbg_log_lock, flags); \
+ va_end(args); \
+} \
+EXPORT_SYMBOL(name);
+
+VBG_LOG(vbg_info, pr_info)
+VBG_LOG(vbg_warn, pr_warn)
+VBG_LOG(vbg_err, pr_err)
+#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+VBG_LOG(vbg_debug, pr_debug)
+#endif
+
+/**
+ * Helper to determine the minimum request size for the given request.
+ * Returns 0 if the given operation is not handled and/or supported.
+ *
+ * @returns Size.
+ * @param req The VMMDev request to get the size for.
+ */
+static size_t vbg_req_get_min_size(const VMMDevRequestHeader *req)
+{
+ switch (req->requestType) {
+ case VMMDevReq_GetMouseStatus:
+ case VMMDevReq_SetMouseStatus:
+ return sizeof(VMMDevReqMouseStatus);
+ case VMMDevReq_SetPointerShape:
+ return sizeof(VMMDevReqMousePointer);
+ case VMMDevReq_GetHostVersion:
+ return sizeof(VMMDevReqHostVersion);
+ case VMMDevReq_Idle:
+ return sizeof(VMMDevReqIdle);
+ case VMMDevReq_GetHostTime:
+ return sizeof(VMMDevReqHostTime);
+ case VMMDevReq_GetHypervisorInfo:
+ case VMMDevReq_SetHypervisorInfo:
+ return sizeof(VMMDevReqHypervisorInfo);
+ case VMMDevReq_RegisterPatchMemory:
+ case VMMDevReq_DeregisterPatchMemory:
+ return sizeof(VMMDevReqPatchMemory);
+ case VMMDevReq_SetPowerStatus:
+ return sizeof(VMMDevPowerStateRequest);
+ case VMMDevReq_AcknowledgeEvents:
+ return sizeof(VMMDevEvents);
+ case VMMDevReq_ReportGuestInfo:
+ return sizeof(VMMDevReportGuestInfo);
+ case VMMDevReq_ReportGuestInfo2:
+ return sizeof(VMMDevReportGuestInfo2);
+ case VMMDevReq_ReportGuestStatus:
+ return sizeof(VMMDevReportGuestStatus);
+ case VMMDevReq_ReportGuestUserState:
+ return sizeof(VMMDevReportGuestUserState);
+ case VMMDevReq_GetDisplayChangeRequest:
+ return sizeof(VMMDevDisplayChangeRequest);
+ case VMMDevReq_GetDisplayChangeRequest2:
+ return sizeof(VMMDevDisplayChangeRequest2);
+ case VMMDevReq_GetDisplayChangeRequestEx:
+ return sizeof(VMMDevDisplayChangeRequestEx);
+ case VMMDevReq_VideoModeSupported:
+ return sizeof(VMMDevVideoModeSupportedRequest);
+ case VMMDevReq_GetHeightReduction:
+ return sizeof(VMMDevGetHeightReductionRequest);
+ case VMMDevReq_ReportGuestCapabilities:
+ return sizeof(VMMDevReqGuestCapabilities);
+ case VMMDevReq_SetGuestCapabilities:
+ return sizeof(VMMDevReqGuestCapabilities2);
+ case VMMDevReq_HGCMConnect:
+ return sizeof(VMMDevHGCMConnect);
+ case VMMDevReq_HGCMDisconnect:
+ return sizeof(VMMDevHGCMDisconnect);
+ case VMMDevReq_HGCMCall32:
+ return sizeof(VMMDevHGCMCall);
+ case VMMDevReq_HGCMCall64:
+ return sizeof(VMMDevHGCMCall);
+ case VMMDevReq_HGCMCancel:
+ return sizeof(VMMDevHGCMCancel);
+ case VMMDevReq_VideoAccelEnable:
+ return sizeof(VMMDevVideoAccelEnable);
+ case VMMDevReq_VideoAccelFlush:
+ return sizeof(VMMDevVideoAccelFlush);
+ case VMMDevReq_VideoSetVisibleRegion:
+ /*
+ * The original protocol didn't consider a guest with NO visible
+ * windows.
+ */
+ return sizeof(VMMDevVideoSetVisibleRegion) - sizeof(RTRECT);
+ case VMMDevReq_GetSeamlessChangeRequest:
+ return sizeof(VMMDevSeamlessChangeRequest);
+ case VMMDevReq_QueryCredentials:
+ return sizeof(VMMDevCredentials);
+ case VMMDevReq_ReportGuestStats:
+ return sizeof(VMMDevReportGuestStats);
+ case VMMDevReq_GetMemBalloonChangeRequest:
+ return sizeof(VMMDevGetMemBalloonChangeRequest);
+ case VMMDevReq_GetStatisticsChangeRequest:
+ return sizeof(VMMDevGetStatisticsChangeRequest);
+ case VMMDevReq_ChangeMemBalloon:
+ return sizeof(VMMDevChangeMemBalloon);
+ case VMMDevReq_GetVRDPChangeRequest:
+ return sizeof(VMMDevVRDPChangeRequest);
+ case VMMDevReq_LogString:
+ return sizeof(VMMDevReqLogString);
+ case VMMDevReq_CtlGuestFilterMask:
+ return sizeof(VMMDevCtlGuestFilterMask);
+ case VMMDevReq_GetCpuHotPlugRequest:
+ return sizeof(VMMDevGetCpuHotPlugRequest);
+ case VMMDevReq_SetCpuHotPlugStatus:
+ return sizeof(VMMDevCpuHotPlugStatusRequest);
+ case VMMDevReq_RegisterSharedModule:
+ return sizeof(VMMDevSharedModuleRegistrationRequest);
+ case VMMDevReq_UnregisterSharedModule:
+ return sizeof(VMMDevSharedModuleUnregistrationRequest);
+ case VMMDevReq_CheckSharedModules:
+ return sizeof(VMMDevSharedModuleCheckRequest);
+ case VMMDevReq_GetPageSharingStatus:
+ return sizeof(VMMDevPageSharingStatusRequest);
+ case VMMDevReq_DebugIsPageShared:
+ return sizeof(VMMDevPageIsSharedRequest);
+ case VMMDevReq_GetSessionId:
+ return sizeof(VMMDevReqSessionId);
+ case VMMDevReq_HeartbeatConfigure:
+ return sizeof(VMMDevReqHeartbeat);
+ case VMMDevReq_GuestHeartbeat:
+ return sizeof(VMMDevRequestHeader);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int vbg_req_verify(const VMMDevRequestHeader *req, size_t buffer_size)
+{
+ size_t min_size;
+
+ if (!req || buffer_size < sizeof(VMMDevRequestHeader)) {
+ vbg_debug("VbglGRVerify: Invalid parameter: req = %p, buffer_size = %zu\n",
+ req, buffer_size);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (req->size > buffer_size) {
+ vbg_debug("VbglGRVerify: request size %u > buffer size %zu\n",
+ req->size, buffer_size);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /* The request size must correspond to the request type. */
+ min_size = vbg_req_get_min_size(req);
+
+ if (buffer_size < min_size) {
+ vbg_debug("VbglGRVerify: buffer size %zu < expected size %zu\n",
+ buffer_size, min_size);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (req->size < min_size) {
+ vbg_debug("VbglGRVerify: header size %u < expected size %zu\n",
+ req->size, min_size);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (buffer_size == min_size) {
+ /*
+ * This is most likely a fixed size request, and in this case the
+ * request size must be also equal to the expected size.
+ */
+ if (req->size != min_size) {
+ vbg_debug("VbglGRVerify: request size %u != expected size %zu\n",
+ req->size, min_size);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * This can be a variable size request. Check the request type and limit
+ * the size to VMMDEV_MAX_VMMDEVREQ_SIZE, which is max size supported by
+ * the host.
+ *
+ * Note: Keep this list sorted for easier human lookup!
+ */
+ if (req->requestType == VMMDevReq_ChangeMemBalloon ||
+ req->requestType == VMMDevReq_HGCMCall32 ||
+ req->requestType == VMMDevReq_HGCMCall64 ||
+ req->requestType == VMMDevReq_RegisterSharedModule ||
+ req->requestType == VMMDevReq_ReportGuestUserState ||
+ req->requestType == VMMDevReq_LogString ||
+ req->requestType == VMMDevReq_SetPointerShape ||
+ req->requestType == VMMDevReq_VideoSetVisibleRegion) {
+ if (buffer_size > VMMDEV_MAX_VMMDEVREQ_SIZE) {
+ vbg_debug("VbglGRVerify: VMMDevReq_LogString: buffer size %zu too big\n",
+ buffer_size);
+ return VERR_BUFFER_OVERFLOW;
+ }
+ } else {
+ vbg_debug("VbglGRVerify: unknown request-type %#08x\n",
+ req->requestType);
+ return VERR_IO_BAD_LENGTH; /* ??? */
+ }
+
+ return VINF_SUCCESS;
+}
+
+void *vbg_req_alloc(size_t len, VMMDevRequestType req_type)
+{
+ VMMDevRequestHeader *req;
+
+ req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
+ if (!req)
+ return NULL;
+
+ memset(req, 0xaa, len);
+
+ req->size = len;
+ req->version = VMMDEV_REQUEST_HEADER_VERSION;
+ req->requestType = req_type;
+ req->rc = VERR_GENERAL_FAILURE;
+ req->reserved1 = 0;
+ req->reserved2 = 0;
+
+ return req;
+}
+
+/* Note this function returns a VBox status code, not a negative errno!! */
+int vbg_req_perform(VBOXGUESTDEVEXT *gdev, void *req)
+{
+ unsigned long phys_req = virt_to_phys(req);
+
+ outl(phys_req, gdev->IOPortBase + VMMDEV_PORT_OFF_REQUEST);
+ mb();
+
+ return ((VMMDevRequestHeader *)req)->rc;
+}
+
+static bool hgcm_req_done(VBOXGUESTDEVEXT *gdev,
+ VMMDevHGCMRequestHeader * header)
+{
+ unsigned long flags;
+ bool done;
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+ done = header->fu32Flags & VBOX_HGCM_REQ_DONE;
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ return done;
+}
+
+int vbg_hgcm_connect(VBOXGUESTDEVEXT *gdev, HGCMServiceLocation * loc,
+ u32 * client_id)
+{
+ VMMDevHGCMConnect *hgcm_connect = NULL;
+ int rc;
+
+ hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
+ VMMDevReq_HGCMConnect);
+ if (!hgcm_connect)
+ return VERR_NO_MEMORY;
+
+ hgcm_connect->header.fu32Flags = 0;
+ memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
+ hgcm_connect->u32ClientID = 0;
+
+ rc = vbg_req_perform(gdev, hgcm_connect);
+
+ if (rc == VINF_HGCM_ASYNC_EXECUTE)
+ wait_event(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &hgcm_connect->header));
+
+ if (rc >= 0) {
+ *client_id = hgcm_connect->u32ClientID;
+ rc = hgcm_connect->header.result;
+ }
+
+ kfree(hgcm_connect);
+
+ return rc;
+}
+EXPORT_SYMBOL(vbg_hgcm_connect);
+
+int vbg_hgcm_disconnect(VBOXGUESTDEVEXT *gdev, u32 client_id)
+{
+ VMMDevHGCMDisconnect *hgcm_disconnect = NULL;
+ int rc;
+
+ hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
+ VMMDevReq_HGCMDisconnect);
+ if (!hgcm_disconnect)
+ return VERR_NO_MEMORY;
+
+ hgcm_disconnect->header.fu32Flags = 0;
+ hgcm_disconnect->u32ClientID = client_id;
+
+ rc = vbg_req_perform(gdev, hgcm_disconnect);
+
+ if (rc == VINF_HGCM_ASYNC_EXECUTE)
+ wait_event(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &hgcm_disconnect->header));
+
+ if (rc >= 0)
+ rc = hgcm_disconnect->header.result;
+
+ kfree(hgcm_disconnect);
+
+ return rc;
+}
+EXPORT_SYMBOL(vbg_hgcm_disconnect);
+
+static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
+{
+ u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
+
+ return size >> PAGE_SHIFT;
+}
+
+static void hgcm_call_inc_pcb_extra(void *buf, u32 len, size_t * pcb_extra)
+{
+ u32 pages;
+
+ pages = hgcm_call_buf_size_in_pages(buf, len);
+ *pcb_extra += offsetof(HGCMPageListInfo, aPages[pages]);
+}
+
+/* Kernel mode use only, use WARN_ON for sanity checks. */
+static int hgcm_call_check_pagelist(const HGCMFunctionParameter *src_parm,
+ const VBoxGuestHGCMCallInfo *callinfo, u32 callinfo_size,
+ size_t *pcb_extra)
+{
+ HGCMPageListInfo *pg_lst;
+ u32 u, offset, size;
+
+ offset = src_parm->u.PageList.offset;
+ size = src_parm->u.PageList.size;
+ if (!size)
+ return VINF_SUCCESS;
+
+ if (WARN_ON(size > VBGLR0_MAX_HGCM_KERNEL_PARM))
+ return VERR_OUT_OF_RANGE;
+
+ if (WARN_ON(offset < callinfo->cParms * sizeof(HGCMFunctionParameter) ||
+ offset > callinfo_size - sizeof(HGCMPageListInfo)))
+ return VERR_INVALID_PARAMETER;
+
+ pg_lst = (HGCMPageListInfo *)((u8 *)callinfo + offset);
+
+ u = offset + offsetof(HGCMPageListInfo, aPages[pg_lst->cPages]);
+ if (WARN_ON(u > callinfo_size))
+ return VERR_INVALID_PARAMETER;
+
+ if (WARN_ON(pg_lst->offFirstPage >= PAGE_SIZE))
+ return VERR_INVALID_PARAMETER;
+
+ u = PAGE_ALIGN(pg_lst->offFirstPage + size) >> PAGE_SHIFT;
+ if (WARN_ON(u != pg_lst->cPages))
+ return VERR_INVALID_PARAMETER;
+
+ if (WARN_ON(!VBOX_HGCM_F_PARM_ARE_VALID(pg_lst->flags)))
+ return VERR_INVALID_PARAMETER;
+
+ for (u = 0; u < pg_lst->cPages; u++) {
+ if (WARN_ON(pg_lst->aPages[u] &
+ (0xfff0000000000000ULL | ~PAGE_MASK)))
+ return VERR_INVALID_PARAMETER;
+ }
+
+ *pcb_extra += offsetof(HGCMPageListInfo, aPages[pg_lst->cPages]);
+
+ return VINF_SUCCESS;
+}
+
+static int hgcm_call_preprocess_linaddr(const HGCMFunctionParameter *src_parm,
+ bool is_user, void **bounce_buf_ret,
+ size_t *pcb_extra)
+{
+ void *buf, *bounce_buf;
+ bool copy_in;
+ u32 len;
+ int ret;
+
+ buf = (void *)src_parm->u.Pointer.u.linearAddr;
+ len = src_parm->u.Pointer.size;
+ copy_in = src_parm->type != VMMDevHGCMParmType_LinAddr_Out;
+
+ if (!is_user) {
+ if (WARN_ON(len > VBGLR0_MAX_HGCM_KERNEL_PARM))
+ return VERR_OUT_OF_RANGE;
+
+ hgcm_call_inc_pcb_extra(buf, len, pcb_extra);
+ return VINF_SUCCESS;
+ }
+
+ if (len > VBGLR0_MAX_HGCM_USER_PARM)
+ return VERR_OUT_OF_RANGE;
+
+ if (len <= PAGE_SIZE * 2)
+ bounce_buf = kmalloc(len, GFP_KERNEL);
+ else
+ bounce_buf = vmalloc(len);
+
+ if (!bounce_buf)
+ return VERR_NO_MEMORY;
+
+ if (copy_in) {
+ ret = copy_from_user(bounce_buf, (void __user *)buf, len);
+ if (ret)
+ return VERR_ACCESS_DENIED;
+ } else {
+ memset(bounce_buf, 0, len);
+ }
+
+ *bounce_buf_ret = bounce_buf;
+ hgcm_call_inc_pcb_extra(bounce_buf, len, pcb_extra);
+ return VINF_SUCCESS;
+}
+
+/**
+ * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
+ * figure out how much extra storage we need for page lists.
+ *
+ * @returns VBox status code
+ *
+ * @param call The call info.
+ * @param call_size The size of the call info structure.
+ * @param is_user Is it a user request or kernel request.
+ * @param bounce_bufs_ret Where to return the allocated bouncebuffer array
+ * @param pcb_extra Where to return the extra request space needed for
+ * physical page lists.
+ */
+static int hgcm_call_preprocess(const VBoxGuestHGCMCallInfo *call,
+ u32 call_size, bool is_user, void ***bounce_bufs_ret, size_t *pcb_extra)
+{
+ const HGCMFunctionParameter *src_parm = VBOXGUEST_HGCM_CALL_PARMS(call);
+ u32 i, parms = call->cParms;
+ void **bounce_bufs = NULL;
+ int rc;
+
+ *bounce_bufs_ret = NULL;
+ *pcb_extra = 0;
+
+ for (i = 0; i < parms; i++, src_parm++) {
+ switch (src_parm->type) {
+ case VMMDevHGCMParmType_32bit:
+ case VMMDevHGCMParmType_64bit:
+ break;
+
+ case VMMDevHGCMParmType_PageList:
+ if (is_user)
+ return VERR_INVALID_PARAMETER;
+
+ rc = hgcm_call_check_pagelist(src_parm, call,
+ call_size, pcb_extra);
+ if (rc)
+ return rc;
+
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_In:
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ if (is_user && !bounce_bufs) {
+ bounce_bufs =
+ (void **)kcalloc(parms, sizeof(void *),
+ GFP_KERNEL);
+ if (!bounce_bufs)
+ return VERR_NO_MEMORY;
+
+ *bounce_bufs_ret = bounce_bufs;
+ }
+
+ rc = hgcm_call_preprocess_linaddr(src_parm, is_user,
+ &bounce_bufs[i],
+ pcb_extra);
+ if (rc)
+ return rc;
+
+ break;
+
+ default:
+ return VERR_INVALID_PARAMETER;
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Translates linear address types to page list direction flags.
+ *
+ * @returns page list flags.
+ * @param enmType The type.
+ */
+static u32
+vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
+{
+ switch (enmType) {
+ case VMMDevHGCMParmType_LinAddr_In:
+ return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+
+ case VMMDevHGCMParmType_LinAddr_Out:
+ return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+
+ default:
+ WARN_ON(1);
+ case VMMDevHGCMParmType_LinAddr:
+ return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
+ }
+}
+
+static void hgcm_call_init_linaddr(VMMDevHGCMCall *call,
+ HGCMFunctionParameter *dst_parm,
+ void *buf, u32 len,
+ HGCMFunctionParameterType type,
+ u32 *off_extra)
+{
+ HGCMPageListInfo *dst_pg_lst;
+ struct page *page;
+ bool is_vmalloc;
+ u32 i, pages;
+
+ dst_parm->type = type;
+
+ if (len == 0) {
+ dst_parm->u.Pointer.size = 0;
+ dst_parm->u.Pointer.u.linearAddr = 0;
+ return;
+ }
+
+ dst_pg_lst = (void *)call + *off_extra;
+ pages = hgcm_call_buf_size_in_pages(buf, len);
+ is_vmalloc = is_vmalloc_addr(buf);
+
+ dst_parm->type = VMMDevHGCMParmType_PageList;
+ dst_parm->u.PageList.size = len;
+ dst_parm->u.PageList.offset = *off_extra;
+ dst_pg_lst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(type);
+ dst_pg_lst->offFirstPage = (unsigned long)buf & ~PAGE_MASK;
+ dst_pg_lst->cPages = pages;
+
+ for (i = 0; i < pages; i++) {
+ if (is_vmalloc)
+ page = vmalloc_to_page(buf);
+ else
+ page = virt_to_page(buf);
+
+ dst_pg_lst->aPages[i] = page_to_phys(page);
+ buf += PAGE_SIZE;
+ }
+
+ *off_extra += offsetof(HGCMPageListInfo, aPages[pages]);
+}
+
+/**
+ * Initializes the call request that we're sending to the host.
+ *
+ * @param pHGCMCall The call to initialize.
+ * @param pCallInfo The call info.
+ * @param cbCallInfo The size of the call info structure.
+ * @param fIsUser Is it a user request or kernel request.
+ * @param bounce_bufs The bouncebuffer array.
+ */
+static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall * pHGCMCall,
+ VBoxGuestHGCMCallInfo const *pCallInfo,
+ u32 cbCallInfo, void **bounce_bufs)
+{
+ HGCMFunctionParameter const *pSrcParm =
+ VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
+ HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
+ u32 cParms = pCallInfo->cParms;
+ u32 offExtra =
+ (u32) ((uintptr_t) (pDstParm + cParms) -
+ (uintptr_t) pHGCMCall);
+ u32 iParm;
+ void *buf;
+
+ /*
+ * The call request headers.
+ */
+ pHGCMCall->header.fu32Flags = 0;
+ pHGCMCall->header.result = VINF_SUCCESS;
+
+ pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
+ pHGCMCall->u32Function = pCallInfo->u32Function;
+ pHGCMCall->cParms = cParms;
+
+ /*
+ * The parameters.
+ */
+ for (iParm = 0; iParm < pCallInfo->cParms;
+ iParm++, pSrcParm++, pDstParm++) {
+ switch (pSrcParm->type) {
+ case VMMDevHGCMParmType_32bit:
+ case VMMDevHGCMParmType_64bit:
+ *pDstParm = *pSrcParm;
+ break;
+
+ case VMMDevHGCMParmType_PageList:
+ pDstParm->type = VMMDevHGCMParmType_PageList;
+ pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
+ if (pSrcParm->u.PageList.size) {
+ HGCMPageListInfo const *pSrcPgLst =
+ (HGCMPageListInfo *) ((u8 *) pCallInfo
+ +
+ pSrcParm->u.PageList.
+ offset);
+ HGCMPageListInfo *pDstPgLst =
+ (HGCMPageListInfo *) ((u8 *) pHGCMCall
+ + offExtra);
+ u32 const cPages = pSrcPgLst->cPages;
+ u32 iPage;
+
+ pDstParm->u.PageList.offset = offExtra;
+ pDstPgLst->flags = pSrcPgLst->flags;
+ pDstPgLst->offFirstPage =
+ pSrcPgLst->offFirstPage;
+ pDstPgLst->cPages = cPages;
+ for (iPage = 0; iPage < cPages; iPage++)
+ pDstPgLst->aPages[iPage] =
+ pSrcPgLst->aPages[iPage];
+
+ offExtra +=
+ offsetof(HGCMPageListInfo,
+ aPages[cPages]);
+ } else
+ pDstParm->u.PageList.offset = 0;
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_In:
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ if (bounce_bufs && bounce_bufs[iParm])
+ buf = bounce_bufs[iParm];
+ else
+ buf = (void *)pSrcParm->u.Pointer.u.linearAddr;
+
+ hgcm_call_init_linaddr(pHGCMCall, pDstParm, buf,
+ pSrcParm->u.Pointer.size,
+ pSrcParm->type, &offExtra);
+ break;
+
+ default:
+ WARN_ON(1);
+ pDstParm->type = VMMDevHGCMParmType_Invalid;
+ }
+ }
+}
+
+/* Note this function returns a VBox status code, not a negative errno!! */
+static int hgcm_cancel_call(VBOXGUESTDEVEXT *gdev, VMMDevHGCMCall * call)
+{
+ int rc;
+
+ /*
+ * We use a pre-allocated request for cancellations, which is
+ * protected by cancel_req_mutex. This means that all cancellations
+ * get serialized, this should be fine since they should be rare.
+ */
+ mutex_lock(&gdev->cancel_req_mutex);
+ gdev->cancel_req->physReqToCancel = virt_to_phys(call);
+ rc = vbg_req_perform(gdev, gdev->cancel_req);
+ mutex_unlock(&gdev->cancel_req_mutex);
+
+ /** @todo ADDVER: Remove this on next minor version change. */
+ if (rc == VERR_NOT_IMPLEMENTED) {
+ call->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
+ call->header.header.requestType = VMMDevReq_HGCMCancel;
+
+ rc = vbg_req_perform(gdev, call);
+ if (rc == VERR_INVALID_PARAMETER)
+ rc = VERR_NOT_FOUND;
+ }
+
+ if (rc >= 0)
+ call->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
+
+ return rc;
+}
+
+/**
+ * Performs the call and completion wait.
+ *
+ * @returns VBox status code
+ *
+ * @param gdev The VBoxGuest device extension.
+ * @param call The HGCM call info.
+ * @param timeout_ms Timeout in ms.
+ * @param is_user Is this an in kernel call or from userspace ?
+ * @param leak_it Where to return the leak it / free it,
+ * indicator. Cancellation fun.
+ */
+static int vbg_hgcm_do_call(VBOXGUESTDEVEXT *gdev, VMMDevHGCMCall *call,
+ u32 timeout_ms, bool is_user, bool *leak_it)
+{
+ long timeout;
+ int rc, cancel_rc;
+
+ *leak_it = false;
+
+ rc = vbg_req_perform(gdev, call);
+
+ /*
+ * If the call failed, then pretend success.
+ * Upper layers will interpret the result code in the packet.
+ */
+ if (rc < 0) {
+ WARN_ON(!(call->header.fu32Flags & VBOX_HGCM_REQ_DONE));
+ return VINF_SUCCESS;
+ }
+
+ if (rc != VINF_HGCM_ASYNC_EXECUTE)
+ return rc;
+
+ /* Host decided to process the request asynchronously, wait for it */
+ if (timeout_ms == U32_MAX)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(timeout_ms);
+
+ if (is_user) {
+ timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
+ hgcm_req_done
+ (gdev,
+ &call->header),
+ timeout);
+ } else {
+ timeout = wait_event_timeout(gdev->hgcm_wq,
+ hgcm_req_done(gdev,
+ &call->header),
+ timeout);
+ }
+
+ /* timeout > 0 means hgcm_req_done has returned true, so success */
+ if (timeout > 0)
+ return VINF_SUCCESS;
+
+ if (timeout == 0)
+ rc = VERR_TIMEOUT;
+ else
+ rc = VERR_INTERRUPTED;
+
+ /* Cancel the request */
+ cancel_rc = hgcm_cancel_call(gdev, call);
+ if (cancel_rc >= 0)
+ return rc;
+
+ /*
+ * Failed to cancel, this should mean that the cancel has lost the
+ * race with normal completion, wait while the host completes it.
+ */
+ if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
+ timeout = msecs_to_jiffies(500);
+ else
+ timeout = msecs_to_jiffies(2000);
+
+ timeout = wait_event_timeout(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &call->header),
+ timeout);
+
+ if (WARN_ON(timeout == 0)) {
+ /* We really should never get here */
+ vbg_err("vbg_hgcm_do_call: Call timedout and cancelation failed, leaking the request\n");
+ *leak_it = true;
+ return rc;
+ }
+
+ /* The call has completed normally after all */
+ return VINF_SUCCESS;
+}
+
+/**
+ * Copies the result of the call back to the caller info structure and user
+ * buffers.
+ *
+ * @returns VBox status code
+ * @param pCallInfo Call info structure to update.
+ * @param pHGCMCall HGCM call request.
+ * @param bounce_bufs The bouncebuffer array.
+ */
+static int vbglR0HGCMInternalCopyBackResult(VBoxGuestHGCMCallInfo * pCallInfo,
+ VMMDevHGCMCall const *pHGCMCall,
+ void **bounce_bufs)
+{
+ HGCMFunctionParameter const *pSrcParm =
+ VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
+ HGCMFunctionParameter *pDstParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
+ u32 cParms = pCallInfo->cParms;
+ u32 iParm;
+ int ret;
+
+ /*
+ * The call result.
+ */
+ pCallInfo->result = pHGCMCall->header.result;
+
+ /*
+ * Copy back parameters.
+ */
+ for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++) {
+ switch (pDstParm->type) {
+ case VMMDevHGCMParmType_32bit:
+ case VMMDevHGCMParmType_64bit:
+ *pDstParm = *pSrcParm;
+ break;
+
+ case VMMDevHGCMParmType_PageList:
+ pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_In:
+ pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ if (bounce_bufs && bounce_bufs[iParm]) {
+ ret = copy_to_user((void __user *)
+ pDstParm->u.Pointer.u.
+ linearAddr,
+ bounce_bufs[iParm],
+ min(pSrcParm->u.Pointer.size,
+ pDstParm->u.Pointer.
+ size));
+ if (ret)
+ return VERR_ACCESS_DENIED;
+ }
+ pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
+ break;
+
+ default:
+ WARN_ON(1);
+ return VERR_INTERNAL_ERROR_4;
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+int vbg_hgcm_call(VBOXGUESTDEVEXT *gdev, VBoxGuestHGCMCallInfo *pCallInfo,
+ u32 cbCallInfo, u32 timeout_ms, bool is_user)
+{
+ VMMDevHGCMCall *pHGCMCall;
+ void **bounce_bufs;
+ size_t cbExtra;
+ bool leak_it;
+ int i, rc;
+
+ /*
+ * Validate, lock and buffer the parameters for the call.
+ * This will calculate the amount of extra space for physical page list.
+ */
+ rc = hgcm_call_preprocess(pCallInfo, cbCallInfo, is_user,
+ &bounce_bufs, &cbExtra);
+ if (rc) {
+ /* Even on error bounce bufs may still have been allocated */
+ goto free_bounce_bufs;
+ }
+
+ pHGCMCall = vbg_req_alloc(sizeof(VMMDevHGCMCall) + pCallInfo->cParms *
+ sizeof(HGCMFunctionParameter) + cbExtra,
+ VMMDevReq_HGCMCall);
+ if (!pHGCMCall) {
+ rc = VERR_NO_MEMORY;
+ goto free_bounce_bufs;
+ }
+
+ vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, bounce_bufs);
+
+ rc = vbg_hgcm_do_call(gdev, pHGCMCall, timeout_ms, is_user, &leak_it);
+ if (rc >= 0)
+ rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall,
+ bounce_bufs);
+
+ if (!leak_it)
+ kfree(pHGCMCall);
+
+free_bounce_bufs:
+ if (bounce_bufs) {
+ for (i = 0; i < pCallInfo->cParms; i++) {
+ if (is_vmalloc_addr(bounce_bufs[i]))
+ vfree(bounce_bufs[i]);
+ else
+ kfree(bounce_bufs[i]);
+ }
+ kfree(bounce_bufs);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(vbg_hgcm_call);
+
+#ifdef CONFIG_X86_64
+int vbg_hgcm_call32(VBOXGUESTDEVEXT *gdev, VBoxGuestHGCMCallInfo * pCallInfo,
+ u32 cbCallInfo, u32 timeout_ms, bool is_user)
+{
+ VBoxGuestHGCMCallInfo *pCallInfo64 = NULL;
+ HGCMFunctionParameter *pParm64 = NULL;
+ HGCMFunctionParameter32 *pParm32 = NULL;
+ u32 cParms = pCallInfo->cParms;
+ u32 iParm;
+ int rc = VINF_SUCCESS;
+
+ /*
+ * The simple approach, allocate a temporary request and convert the parameters.
+ */
+ pCallInfo64 = kzalloc(sizeof(*pCallInfo64) +
+ cParms * sizeof(HGCMFunctionParameter),
+ GFP_KERNEL);
+ if (!pCallInfo64)
+ return VERR_NO_MEMORY;
+
+ *pCallInfo64 = *pCallInfo;
+ pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
+ pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
+ for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++) {
+ switch (pParm32->type) {
+ case VMMDevHGCMParmType_32bit:
+ pParm64->type = VMMDevHGCMParmType_32bit;
+ pParm64->u.value32 = pParm32->u.value32;
+ break;
+
+ case VMMDevHGCMParmType_64bit:
+ pParm64->type = VMMDevHGCMParmType_64bit;
+ pParm64->u.value64 = pParm32->u.value64;
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ case VMMDevHGCMParmType_LinAddr_In:
+ pParm64->type = pParm32->type;
+ pParm64->u.Pointer.size = pParm32->u.Pointer.size;
+ pParm64->u.Pointer.u.linearAddr =
+ pParm32->u.Pointer.u.linearAddr;
+ break;
+
+ default:
+ rc = VERR_INVALID_PARAMETER;
+ }
+ if (rc < 0)
+ goto out_free;
+ }
+
+ rc = vbg_hgcm_call(gdev, pCallInfo64,
+ sizeof(*pCallInfo64) +
+ cParms * sizeof(HGCMFunctionParameter),
+ timeout_ms, is_user);
+ if (rc >= 0) {
+ *pCallInfo = *pCallInfo64;
+
+ /*
+ * Copy back.
+ */
+ pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
+ pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
+ for (iParm = 0; iParm < cParms;
+ iParm++, pParm32++, pParm64++) {
+ switch (pParm64->type) {
+ case VMMDevHGCMParmType_32bit:
+ pParm32->u.value32 = pParm64->u.value32;
+ break;
+
+ case VMMDevHGCMParmType_64bit:
+ pParm32->u.value64 = pParm64->u.value64;
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ case VMMDevHGCMParmType_LinAddr_In:
+ pParm32->u.Pointer.size =
+ pParm64->u.Pointer.size;
+ break;
+ default:
+ WARN_ON(1);
+ rc = VERR_INTERNAL_ERROR_3;
+ }
+ }
+ }
+
+out_free:
+ kfree(pCallInfo64);
+ return rc;
+}
+#endif
+
+int vbg_status_code_to_errno(int rc)
+{
+ if (rc >= 0)
+ return 0;
+
+ switch (rc) {
+ case VERR_ACCESS_DENIED: return EPERM;
+ case VERR_FILE_NOT_FOUND: return ENOENT;
+ case VERR_PROCESS_NOT_FOUND: return ESRCH;
+ case VERR_INTERRUPTED: return EINTR;
+ case VERR_DEV_IO_ERROR: return EIO;
+ case VERR_TOO_MUCH_DATA: return E2BIG;
+ case VERR_BAD_EXE_FORMAT: return ENOEXEC;
+ case VERR_INVALID_HANDLE: return EBADF;
+ case VERR_TRY_AGAIN: return EAGAIN;
+ case VERR_NO_MEMORY: return ENOMEM;
+ case VERR_INVALID_POINTER: return EFAULT;
+ case VERR_RESOURCE_BUSY: return EBUSY;
+ case VERR_ALREADY_EXISTS: return EEXIST;
+ case VERR_NOT_SAME_DEVICE: return EXDEV;
+ case VERR_NOT_A_DIRECTORY:
+ case VERR_PATH_NOT_FOUND: return ENOTDIR;
+ case VERR_IS_A_DIRECTORY: return EISDIR;
+ case VERR_INVALID_PARAMETER: return EINVAL;
+ case VERR_TOO_MANY_OPEN_FILES: return ENFILE;
+ case VERR_INVALID_FUNCTION: return ENOTTY;
+ case VERR_SHARING_VIOLATION: return ETXTBSY;
+ case VERR_FILE_TOO_BIG: return EFBIG;
+ case VERR_DISK_FULL: return ENOSPC;
+ case VERR_SEEK_ON_DEVICE: return ESPIPE;
+ case VERR_WRITE_PROTECT: return EROFS;
+ case VERR_BROKEN_PIPE: return EPIPE;
+ case VERR_DEADLOCK: return EDEADLK;
+ case VERR_FILENAME_TOO_LONG: return ENAMETOOLONG;
+ case VERR_FILE_LOCK_FAILED: return ENOLCK;
+ case VERR_NOT_IMPLEMENTED:
+ case VERR_NOT_SUPPORTED: return ENOSYS;
+ case VERR_DIR_NOT_EMPTY: return ENOTEMPTY;
+ case VERR_TOO_MANY_SYMLINKS: return ELOOP;
+ case VERR_NO_DATA: return ENODATA;
+ case VERR_NET_NO_NETWORK: return ENONET;
+ case VERR_NET_NOT_UNIQUE_NAME: return ENOTUNIQ;
+ case VERR_NO_TRANSLATION: return EILSEQ;
+ case VERR_NET_NOT_SOCKET: return ENOTSOCK;
+ case VERR_NET_DEST_ADDRESS_REQUIRED: return EDESTADDRREQ;
+ case VERR_NET_MSG_SIZE: return EMSGSIZE;
+ case VERR_NET_PROTOCOL_TYPE: return EPROTOTYPE;
+ case VERR_NET_PROTOCOL_NOT_AVAILABLE: return ENOPROTOOPT;
+ case VERR_NET_PROTOCOL_NOT_SUPPORTED: return EPROTONOSUPPORT;
+ case VERR_NET_SOCKET_TYPE_NOT_SUPPORTED: return ESOCKTNOSUPPORT;
+ case VERR_NET_OPERATION_NOT_SUPPORTED: return EOPNOTSUPP;
+ case VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED: return EPFNOSUPPORT;
+ case VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED: return EAFNOSUPPORT;
+ case VERR_NET_ADDRESS_IN_USE: return EADDRINUSE;
+ case VERR_NET_ADDRESS_NOT_AVAILABLE: return EADDRNOTAVAIL;
+ case VERR_NET_DOWN: return ENETDOWN;
+ case VERR_NET_UNREACHABLE: return ENETUNREACH;
+ case VERR_NET_CONNECTION_RESET: return ENETRESET;
+ case VERR_NET_CONNECTION_ABORTED: return ECONNABORTED;
+ case VERR_NET_CONNECTION_RESET_BY_PEER: return ECONNRESET;
+ case VERR_NET_NO_BUFFER_SPACE: return ENOBUFS;
+ case VERR_NET_ALREADY_CONNECTED: return EISCONN;
+ case VERR_NET_NOT_CONNECTED: return ENOTCONN;
+ case VERR_NET_SHUTDOWN: return ESHUTDOWN;
+ case VERR_NET_TOO_MANY_REFERENCES: return ETOOMANYREFS;
+ case VERR_TIMEOUT: return ETIMEDOUT;
+ case VERR_NET_CONNECTION_REFUSED: return ECONNREFUSED;
+ case VERR_NET_HOST_DOWN: return EHOSTDOWN;
+ case VERR_NET_HOST_UNREACHABLE: return EHOSTUNREACH;
+ case VERR_NET_ALREADY_IN_PROGRESS: return EALREADY;
+ case VERR_NET_IN_PROGRESS: return EINPROGRESS;
+ case VERR_MEDIA_NOT_PRESENT: return ENOMEDIUM;
+ case VERR_MEDIA_NOT_RECOGNIZED: return EMEDIUMTYPE;
+ default:
+ vbg_warn("vbg_status_code_to_errno: Unhandled err %d\n", rc);
+ return EPROTO;
+ }
+}
+EXPORT_SYMBOL(vbg_status_code_to_errno);
diff --git a/drivers/misc/vboxguest/vboxguest_version.h b/drivers/misc/vboxguest/vboxguest_version.h
new file mode 100644
index 000000000000..47a53151fcd0
--- /dev/null
+++ b/drivers/misc/vboxguest/vboxguest_version.h
@@ -0,0 +1,18 @@
+/*
+ * VBox Guest additions version info, this is used by the host to determine
+ * supported guest-addition features in some cases. So this will need to be
+ * synced with vbox upstreams versioning scheme when we implement / port
+ * new features from the upstream out-of-tree vboxguest driver.
+ */
+
+#ifndef __VBOX_VERSION_H__
+#define __VBOX_VERSION_H__
+
+/* Last synced July 12th 2017 */
+#define VBOX_VERSION_MAJOR 5
+#define VBOX_VERSION_MINOR 1
+#define VBOX_VERSION_BUILD 51
+#define VBOX_SVN_REV 67325
+#define VBOX_VERSION_STRING "5.1.51"
+
+#endif
diff --git a/include/linux/vbox_err.h b/include/linux/vbox_err.h
new file mode 100644
index 000000000000..906ff7d2585d
--- /dev/null
+++ b/include/linux/vbox_err.h
@@ -0,0 +1,6 @@
+#ifndef __VBOX_ERR_H__
+#define __VBOX_ERR_H__
+
+#include <uapi/linux/vbox_err.h>
+
+#endif
diff --git a/include/linux/vbox_ostypes.h b/include/linux/vbox_ostypes.h
new file mode 100644
index 000000000000..ea2a391f135f
--- /dev/null
+++ b/include/linux/vbox_ostypes.h
@@ -0,0 +1,6 @@
+#ifndef __VBOX_OSTYPES_H__
+#define __VBOX_OSTYPES_H__
+
+#include <uapi/linux/vbox_ostypes.h>
+
+#endif
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
new file mode 100644
index 000000000000..fcb77249dfe7
--- /dev/null
+++ b/include/linux/vbox_utils.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __VBOX_UTILS_H__
+#define __VBOX_UTILS_H__
+
+#include <linux/printk.h>
+#include <linux/vbox_vmmdev.h>
+#include <linux/vboxguest.h>
+
+struct VBOXGUESTDEVEXT;
+
+/**
+ * vboxguest logging functions, these log both to the backdoor and call
+ * the equivalent kernel pr_foo function.
+ */
+__printf(1, 2) void vbg_info(const char *fmt, ...);
+__printf(1, 2) void vbg_warn(const char *fmt, ...);
+__printf(1, 2) void vbg_err(const char *fmt, ...);
+
+/* Only use backdoor logging for non-dynamic debug builds */
+#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+__printf(1, 2) void vbg_debug(const char *fmt, ...);
+#else
+#define vbg_debug pr_debug
+#endif
+
+/** @name Generic request functions.
+ * @{
+ */
+
+/**
+ * Allocate memory for generic request and initialize the request header.
+ *
+ * @returns the allocated memory
+ * @param len Size of memory block required for the request.
+ * @param req_type The generic request type.
+ */
+void *vbg_req_alloc(size_t len, VMMDevRequestType req_type);
+
+/**
+ * Perform a generic request.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param req Pointer the request structure.
+ */
+int vbg_req_perform(struct VBOXGUESTDEVEXT *gdev, void *req);
+
+/**
+ * Verify the generic request header.
+ *
+ * @returns VBox status code
+ * @param req pointer the request header structure.
+ * @param buffer_size size of the request memory block. It should be equal to
+ * the request size for fixed size requests. It can be
+ * greater than the request size for variable size requests.
+ */
+int vbg_req_verify(const VMMDevRequestHeader *req, size_t buffer_size);
+/** @} */
+
+int vbg_hgcm_connect(struct VBOXGUESTDEVEXT *gdev, HGCMServiceLocation *loc,
+ u32 *client_id);
+
+int vbg_hgcm_disconnect(struct VBOXGUESTDEVEXT *gdev, u32 client_id);
+
+int vbg_hgcm_call(struct VBOXGUESTDEVEXT *gdev,
+ VBoxGuestHGCMCallInfo *pCallInfo, u32 cbCallInfo,
+ u32 timeout_ms, bool is_user);
+
+int vbg_hgcm_call32(struct VBOXGUESTDEVEXT *gdev,
+ VBoxGuestHGCMCallInfo *pCallInfo, u32 cbCallInfo,
+ u32 timeout_ms, bool is_user);
+
+int vbg_status_code_to_errno(int rc);
+
+struct VBOXGUESTDEVEXT *vbg_get_gdev(void);
+void vbg_put_gdev(struct VBOXGUESTDEVEXT *gdev);
+
+#endif
diff --git a/include/linux/vbox_vmmdev.h b/include/linux/vbox_vmmdev.h
new file mode 100644
index 000000000000..8867c1f4eb08
--- /dev/null
+++ b/include/linux/vbox_vmmdev.h
@@ -0,0 +1,128 @@
+/*
+ * Virtual Device for Guest <-> VMM/Host communication (ADD,DEV).
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __VBOX_VMMDEV_H__
+#define __VBOX_VMMDEV_H__
+
+#include <linux/sizes.h>
+#include <uapi/linux/vbox_vmmdev.h>
+
+/**
+ * @name VBVA ring defines.
+ *
+ * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
+ * data. For example big bitmaps which do not fit to the buffer.
+ *
+ * Guest starts writing to the buffer by initializing a record entry in the
+ * aRecords queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
+ * written. As data is written to the ring buffer, the guest increases off32End
+ * for the record.
+ *
+ * The host reads the aRecords on flushes and processes all completed records.
+ * When host encounters situation when only a partial record presents and
+ * cbRecord & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
+ * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
+ * off32Head. After that on each flush the host continues fetching the data
+ * until the record is completed.
+ *
+ * @{
+ */
+#define VMMDEV_VBVA_RING_BUFFER_SIZE (SZ_4M - SZ_1K)
+#define VMMDEV_VBVA_RING_BUFFER_THRESHOLD (SZ_4K)
+
+#define VMMDEV_VBVA_MAX_RECORDS (64)
+/** @} */
+
+/** VBVA record. */
+typedef struct VMMDEVVBVARECORD {
+ /** The length of the record. Changed by guest. */
+ u32 cbRecord;
+} VMMDEVVBVARECORD;
+VMMDEV_ASSERT_SIZE(VMMDEVVBVARECORD, 4);
+
+/**
+ * VBVA memory layout.
+ *
+ * This is a subsection of the VMMDevMemory structure.
+ */
+typedef struct VBVAMEMORY {
+ /** VBVA_F_MODE_*. */
+ u32 fu32ModeFlags;
+
+ /** The offset where the data start in the buffer. */
+ u32 off32Data;
+ /** The offset where next data must be placed in the buffer. */
+ u32 off32Free;
+
+ /** The ring buffer for data. */
+ u8 au8RingBuffer[VMMDEV_VBVA_RING_BUFFER_SIZE];
+
+ /** The queue of record descriptions. */
+ VMMDEVVBVARECORD aRecords[VMMDEV_VBVA_MAX_RECORDS];
+ u32 indexRecordFirst;
+ u32 indexRecordFree;
+
+ /**
+ * RDP orders supported by the client. The guest reports only them
+ * and falls back to DIRTY rects for not supported ones.
+ *
+ * (1 << VBVA_VRDP_*)
+ */
+ u32 fu32SupportedOrders;
+
+} VBVAMEMORY;
+VMMDEV_ASSERT_SIZE(VBVAMEMORY, 12 + (SZ_4M-SZ_1K) + 4*64 + 12);
+
+/**
+ * The layout of VMMDEV RAM region that contains information for guest.
+ */
+typedef struct VMMDevMemory {
+ /** The size of this structure. */
+ u32 u32Size;
+ /** The structure version. (VMMDEV_MEMORY_VERSION) */
+ u32 u32Version;
+
+ union {
+ struct {
+ /** Flag telling that VMMDev has events pending. */
+ bool fHaveEvents;
+ } V1_04;
+
+ struct {
+ /** Pending events flags, set by host. */
+ u32 u32HostEvents;
+ /** Mask of events the guest wants, set by guest. */
+ u32 u32GuestEventMask;
+ } V1_03;
+ } V;
+
+ VBVAMEMORY vbvaMemory;
+
+} VMMDevMemory;
+VMMDEV_ASSERT_SIZE(VMMDevMemory, 8 + 8 + sizeof(VBVAMEMORY));
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevMemory, vbvaMemory, 16);
+
+/** Version of VMMDevMemory structure (VMMDevMemory::u32Version). */
+#define VMMDEV_MEMORY_VERSION (1)
+
+#endif
diff --git a/include/linux/vboxguest.h b/include/linux/vboxguest.h
new file mode 100644
index 000000000000..fca5d199a884
--- /dev/null
+++ b/include/linux/vboxguest.h
@@ -0,0 +1,6 @@
+#ifndef __VBOXGUEST_H__
+#define __VBOXGUEST_H__
+
+#include <uapi/linux/vboxguest.h>
+
+#endif
diff --git a/include/uapi/linux/vbox_err.h b/include/uapi/linux/vbox_err.h
new file mode 100644
index 000000000000..e6e7ba835e36
--- /dev/null
+++ b/include/uapi/linux/vbox_err.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __UAPI_VBOX_ERR_H__
+#define __UAPI_VBOX_ERR_H__
+
+/**
+ * @name VirtualBox virtual-hardware error macros
+ * @{
+ */
+
+#define VINF_SUCCESS 0
+#define VERR_GENERAL_FAILURE (-1)
+#define VERR_INVALID_PARAMETER (-2)
+#define VERR_INVALID_MAGIC (-3)
+#define VERR_INVALID_HANDLE (-4)
+#define VERR_LOCK_FAILED (-5)
+#define VERR_INVALID_POINTER (-6)
+#define VERR_IDT_FAILED (-7)
+#define VERR_NO_MEMORY (-8)
+#define VERR_ALREADY_LOADED (-9)
+#define VERR_PERMISSION_DENIED (-10)
+#define VERR_VERSION_MISMATCH (-11)
+#define VERR_NOT_IMPLEMENTED (-12)
+#define VERR_INVALID_FLAGS (-13)
+
+#define VERR_NOT_EQUAL (-18)
+#define VERR_NOT_SYMLINK (-19)
+#define VERR_NO_TMP_MEMORY (-20)
+#define VERR_INVALID_FMODE (-21)
+#define VERR_WRONG_ORDER (-22)
+#define VERR_NO_TLS_FOR_SELF (-23)
+#define VERR_FAILED_TO_SET_SELF_TLS (-24)
+#define VERR_NO_CONT_MEMORY (-26)
+#define VERR_NO_PAGE_MEMORY (-27)
+#define VERR_THREAD_IS_DEAD (-29)
+#define VERR_THREAD_NOT_WAITABLE (-30)
+#define VERR_PAGE_TABLE_NOT_PRESENT (-31)
+#define VERR_INVALID_CONTEXT (-32)
+#define VERR_TIMER_BUSY (-33)
+#define VERR_ADDRESS_CONFLICT (-34)
+#define VERR_UNRESOLVED_ERROR (-35)
+#define VERR_INVALID_FUNCTION (-36)
+#define VERR_NOT_SUPPORTED (-37)
+#define VERR_ACCESS_DENIED (-38)
+#define VERR_INTERRUPTED (-39)
+#define VERR_TIMEOUT (-40)
+#define VERR_BUFFER_OVERFLOW (-41)
+#define VERR_TOO_MUCH_DATA (-42)
+#define VERR_MAX_THRDS_REACHED (-43)
+#define VERR_MAX_PROCS_REACHED (-44)
+#define VERR_SIGNAL_REFUSED (-45)
+#define VERR_SIGNAL_PENDING (-46)
+#define VERR_SIGNAL_INVALID (-47)
+#define VERR_STATE_CHANGED (-48)
+#define VERR_INVALID_UUID_FORMAT (-49)
+#define VERR_PROCESS_NOT_FOUND (-50)
+#define VERR_PROCESS_RUNNING (-51)
+#define VERR_TRY_AGAIN (-52)
+#define VERR_PARSE_ERROR (-53)
+#define VERR_OUT_OF_RANGE (-54)
+#define VERR_NUMBER_TOO_BIG (-55)
+#define VERR_NO_DIGITS (-56)
+#define VERR_NEGATIVE_UNSIGNED (-57)
+#define VERR_NO_TRANSLATION (-58)
+
+#define VERR_NOT_FOUND (-78)
+#define VERR_INVALID_STATE (-79)
+#define VERR_OUT_OF_RESOURCES (-80)
+
+#define VERR_FILE_NOT_FOUND (-102)
+#define VERR_PATH_NOT_FOUND (-103)
+#define VERR_INVALID_NAME (-104)
+#define VERR_ALREADY_EXISTS (-105)
+#define VERR_TOO_MANY_OPEN_FILES (-106)
+#define VERR_SEEK (-107)
+#define VERR_NEGATIVE_SEEK (-108)
+#define VERR_SEEK_ON_DEVICE (-109)
+#define VERR_EOF (-110)
+#define VERR_READ_ERROR (-111)
+#define VERR_WRITE_ERROR (-112)
+#define VERR_WRITE_PROTECT (-113)
+#define VERR_SHARING_VIOLATION (-114)
+#define VERR_FILE_LOCK_FAILED (-115)
+#define VERR_FILE_LOCK_VIOLATION (-116)
+#define VERR_CANT_CREATE (-117)
+#define VERR_CANT_DELETE_DIRECTORY (-118)
+#define VERR_NOT_SAME_DEVICE (-119)
+#define VERR_FILENAME_TOO_LONG (-120)
+#define VERR_MEDIA_NOT_PRESENT (-121)
+#define VERR_MEDIA_NOT_RECOGNIZED (-122)
+#define VERR_FILE_NOT_LOCKED (-123)
+#define VERR_FILE_LOCK_LOST (-124)
+#define VERR_DIR_NOT_EMPTY (-125)
+#define VERR_NOT_A_DIRECTORY (-126)
+#define VERR_IS_A_DIRECTORY (-127)
+#define VERR_FILE_TOO_BIG (-128)
+
+#define VERR_NET_IO_ERROR (-400)
+#define VERR_NET_OUT_OF_RESOURCES (-401)
+#define VERR_NET_HOST_NOT_FOUND (-402)
+#define VERR_NET_PATH_NOT_FOUND (-403)
+#define VERR_NET_PRINT_ERROR (-404)
+#define VERR_NET_NO_NETWORK (-405)
+#define VERR_NET_NOT_UNIQUE_NAME (-406)
+
+#define VERR_NET_IN_PROGRESS (-436)
+#define VERR_NET_ALREADY_IN_PROGRESS (-437)
+#define VERR_NET_NOT_SOCKET (-438)
+#define VERR_NET_DEST_ADDRESS_REQUIRED (-439)
+#define VERR_NET_MSG_SIZE (-440)
+#define VERR_NET_PROTOCOL_TYPE (-441)
+#define VERR_NET_PROTOCOL_NOT_AVAILABLE (-442)
+#define VERR_NET_PROTOCOL_NOT_SUPPORTED (-443)
+#define VERR_NET_SOCKET_TYPE_NOT_SUPPORTED (-444)
+#define VERR_NET_OPERATION_NOT_SUPPORTED (-445)
+#define VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED (-446)
+#define VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED (-447)
+#define VERR_NET_ADDRESS_IN_USE (-448)
+#define VERR_NET_ADDRESS_NOT_AVAILABLE (-449)
+#define VERR_NET_DOWN (-450)
+#define VERR_NET_UNREACHABLE (-451)
+#define VERR_NET_CONNECTION_RESET (-452)
+#define VERR_NET_CONNECTION_ABORTED (-453)
+#define VERR_NET_CONNECTION_RESET_BY_PEER (-454)
+#define VERR_NET_NO_BUFFER_SPACE (-455)
+#define VERR_NET_ALREADY_CONNECTED (-456)
+#define VERR_NET_NOT_CONNECTED (-457)
+#define VERR_NET_SHUTDOWN (-458)
+#define VERR_NET_TOO_MANY_REFERENCES (-459)
+#define VERR_NET_CONNECTION_TIMED_OUT (-460)
+#define VERR_NET_CONNECTION_REFUSED (-461)
+#define VERR_NET_HOST_DOWN (-464)
+#define VERR_NET_HOST_UNREACHABLE (-465)
+#define VERR_NET_PROTOCOL_ERROR (-466)
+#define VERR_NET_INCOMPLETE_TX_PACKET (-467)
+
+/* misc. unsorted codes */
+#define VERR_RESOURCE_BUSY (-138)
+#define VERR_DISK_FULL (-152)
+#define VERR_TOO_MANY_SYMLINKS (-156)
+#define VERR_NO_MORE_FILES (-201)
+#define VERR_INTERNAL_ERROR (-225)
+#define VERR_INTERNAL_ERROR_2 (-226)
+#define VERR_INTERNAL_ERROR_3 (-227)
+#define VERR_INTERNAL_ERROR_4 (-228)
+#define VERR_DEV_IO_ERROR (-250)
+#define VERR_IO_BAD_LENGTH (-255)
+#define VERR_BROKEN_PIPE (-301)
+#define VERR_NO_DATA (-304)
+#define VERR_SEM_DESTROYED (-363)
+#define VERR_DEADLOCK (-365)
+#define VERR_BAD_EXE_FORMAT (-608)
+#define VINF_HGCM_ASYNC_EXECUTE (2903)
+
+#define RT_SUCCESS(rc) ((rc) >= 0)
+#define RT_FAILURE(rc) ((rc) < 0)
+
+#endif
diff --git a/include/uapi/linux/vbox_ostypes.h b/include/uapi/linux/vbox_ostypes.h
new file mode 100644
index 000000000000..abe9a38ebfbd
--- /dev/null
+++ b/include/uapi/linux/vbox_ostypes.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * VirtualBox - Global Guest Operating System definition.
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __UAPI_VBOX_OSTYPES_H__
+#define __UAPI_VBOX_OSTYPES_H__
+
+/** The bit number which indicates 64-bit or 32-bit. */
+#define VBOXOSTYPE_x64_BIT 8
+
+/**
+ * Global list of guest operating system types.
+ *
+ * They are grouped into families. A family identifer is always has
+ * mod 0x10000 == 0. New entries can be added, however other components
+ * depend on the values (e.g. the Qt GUI and guest additions) so the
+ * existing values MUST stay the same.
+ *
+ * Note: distinguish between 32 & 64 bits guest OSes by checking bit 8.
+ */
+typedef enum VBOXOSTYPE {
+ VBOXOSTYPE_Unknown = 0,
+ VBOXOSTYPE_Unknown_x64 = 0x00100,
+ VBOXOSTYPE_DOS = 0x10000,
+ VBOXOSTYPE_Win31 = 0x15000,
+ VBOXOSTYPE_Win9x = 0x20000,
+ VBOXOSTYPE_Win95 = 0x21000,
+ VBOXOSTYPE_Win98 = 0x22000,
+ VBOXOSTYPE_WinMe = 0x23000,
+ VBOXOSTYPE_WinNT = 0x30000,
+ VBOXOSTYPE_WinNT_x64 = 0x30100,
+ VBOXOSTYPE_WinNT4 = 0x31000,
+ VBOXOSTYPE_Win2k = 0x32000,
+ VBOXOSTYPE_WinXP = 0x33000,
+ VBOXOSTYPE_WinXP_x64 = 0x33100,
+ VBOXOSTYPE_Win2k3 = 0x34000,
+ VBOXOSTYPE_Win2k3_x64 = 0x34100,
+ VBOXOSTYPE_WinVista = 0x35000,
+ VBOXOSTYPE_WinVista_x64 = 0x35100,
+ VBOXOSTYPE_Win2k8 = 0x36000,
+ VBOXOSTYPE_Win2k8_x64 = 0x36100,
+ VBOXOSTYPE_Win7 = 0x37000,
+ VBOXOSTYPE_Win7_x64 = 0x37100,
+ VBOXOSTYPE_Win8 = 0x38000,
+ VBOXOSTYPE_Win8_x64 = 0x38100,
+ VBOXOSTYPE_Win2k12_x64 = 0x39100,
+ VBOXOSTYPE_Win81 = 0x3A000,
+ VBOXOSTYPE_Win81_x64 = 0x3A100,
+ VBOXOSTYPE_Win10 = 0x3B000,
+ VBOXOSTYPE_Win10_x64 = 0x3B100,
+ VBOXOSTYPE_Win2k16_x64 = 0x3C100,
+ VBOXOSTYPE_OS2 = 0x40000,
+ VBOXOSTYPE_OS2Warp3 = 0x41000,
+ VBOXOSTYPE_OS2Warp4 = 0x42000,
+ VBOXOSTYPE_OS2Warp45 = 0x43000,
+ VBOXOSTYPE_ECS = 0x44000,
+ VBOXOSTYPE_OS21x = 0x48000,
+ VBOXOSTYPE_Linux = 0x50000,
+ VBOXOSTYPE_Linux_x64 = 0x50100,
+ VBOXOSTYPE_Linux22 = 0x51000,
+ VBOXOSTYPE_Linux24 = 0x52000,
+ VBOXOSTYPE_Linux24_x64 = 0x52100,
+ VBOXOSTYPE_Linux26 = 0x53000,
+ VBOXOSTYPE_Linux26_x64 = 0x53100,
+ VBOXOSTYPE_ArchLinux = 0x54000,
+ VBOXOSTYPE_ArchLinux_x64 = 0x54100,
+ VBOXOSTYPE_Debian = 0x55000,
+ VBOXOSTYPE_Debian_x64 = 0x55100,
+ VBOXOSTYPE_OpenSUSE = 0x56000,
+ VBOXOSTYPE_OpenSUSE_x64 = 0x56100,
+ VBOXOSTYPE_FedoraCore = 0x57000,
+ VBOXOSTYPE_FedoraCore_x64 = 0x57100,
+ VBOXOSTYPE_Gentoo = 0x58000,
+ VBOXOSTYPE_Gentoo_x64 = 0x58100,
+ VBOXOSTYPE_Mandriva = 0x59000,
+ VBOXOSTYPE_Mandriva_x64 = 0x59100,
+ VBOXOSTYPE_RedHat = 0x5A000,
+ VBOXOSTYPE_RedHat_x64 = 0x5A100,
+ VBOXOSTYPE_Turbolinux = 0x5B000,
+ VBOXOSTYPE_Turbolinux_x64 = 0x5B100,
+ VBOXOSTYPE_Ubuntu = 0x5C000,
+ VBOXOSTYPE_Ubuntu_x64 = 0x5C100,
+ VBOXOSTYPE_Xandros = 0x5D000,
+ VBOXOSTYPE_Xandros_x64 = 0x5D100,
+ VBOXOSTYPE_Oracle = 0x5E000,
+ VBOXOSTYPE_Oracle_x64 = 0x5E100,
+ VBOXOSTYPE_FreeBSD = 0x60000,
+ VBOXOSTYPE_FreeBSD_x64 = 0x60100,
+ VBOXOSTYPE_OpenBSD = 0x61000,
+ VBOXOSTYPE_OpenBSD_x64 = 0x61100,
+ VBOXOSTYPE_NetBSD = 0x62000,
+ VBOXOSTYPE_NetBSD_x64 = 0x62100,
+ VBOXOSTYPE_Netware = 0x70000,
+ VBOXOSTYPE_Solaris = 0x80000,
+ VBOXOSTYPE_Solaris_x64 = 0x80100,
+ VBOXOSTYPE_OpenSolaris = 0x81000,
+ VBOXOSTYPE_OpenSolaris_x64 = 0x81100,
+ VBOXOSTYPE_Solaris11_x64 = 0x82100,
+ VBOXOSTYPE_L4 = 0x90000,
+ VBOXOSTYPE_QNX = 0xA0000,
+ VBOXOSTYPE_MacOS = 0xB0000,
+ VBOXOSTYPE_MacOS_x64 = 0xB0100,
+ VBOXOSTYPE_MacOS106 = 0xB2000,
+ VBOXOSTYPE_MacOS106_x64 = 0xB2100,
+ VBOXOSTYPE_MacOS107_x64 = 0xB3100,
+ VBOXOSTYPE_MacOS108_x64 = 0xB4100,
+ VBOXOSTYPE_MacOS109_x64 = 0xB5100,
+ VBOXOSTYPE_MacOS1010_x64 = 0xB6100,
+ VBOXOSTYPE_MacOS1011_x64 = 0xB7100,
+ VBOXOSTYPE_JRockitVE = 0xC0000,
+ VBOXOSTYPE_Haiku = 0xD0000,
+ VBOXOSTYPE_Haiku_x64 = 0xD0100,
+ VBOXOSTYPE_VBoxBS_x64 = 0xE0100,
+ /** The mask which indicates 64-bit. */
+ VBOXOSTYPE_x64 = 1 << VBOXOSTYPE_x64_BIT,
+ /** The usual 32-bit hack. */
+ VBOXOSTYPE_32BIT_HACK = 0x7fffffff
+} VBOXOSTYPE;
+
+/**
+ * Global list of guest OS families.
+ */
+typedef enum VBOXOSFAMILY {
+ VBOXOSFAMILY_Unknown = 0,
+ VBOXOSFAMILY_Windows32 = 1,
+ VBOXOSFAMILY_Windows64 = 2,
+ VBOXOSFAMILY_Linux32 = 3,
+ VBOXOSFAMILY_Linux64 = 4,
+ VBOXOSFAMILY_FreeBSD32 = 5,
+ VBOXOSFAMILY_FreeBSD64 = 6,
+ VBOXOSFAMILY_Solaris32 = 7,
+ VBOXOSFAMILY_Solaris64 = 8,
+ VBOXOSFAMILY_MacOSX32 = 9,
+ VBOXOSFAMILY_MacOSX64 = 10,
+ /** The usual 32-bit hack. */
+ VBOXOSFAMILY_32BIT_HACK = 0x7fffffff
+} VBOXOSFAMILY;
+
+#endif
diff --git a/include/uapi/linux/vbox_vmmdev.h b/include/uapi/linux/vbox_vmmdev.h
new file mode 100644
index 000000000000..d19265dbf906
--- /dev/null
+++ b/include/uapi/linux/vbox_vmmdev.h
@@ -0,0 +1,1743 @@
+/*
+ * Virtual Device for Guest <-> VMM/Host communication (ADD,DEV).
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __UAPI_VBOX_VMMDEV_H__
+#define __UAPI_VBOX_VMMDEV_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/types.h>
+#include <linux/vbox_ostypes.h>
+
+/*
+ * We cannot use linux' compiletime_assert here because it expects to be used
+ * inside a function only. Use a typedef to a char array with a negative size.
+ */
+#define VMMDEV_ASSERT_SIZE(type, size) \
+ typedef char type ## _assert_size[1 - 2*!!(sizeof(type) != (size))]
+#define VMMDEV_ASSERT_MEMBER_OFFSET(type, member, offset) \
+ typedef char type ## _ ## member ## _assert_member_offset \
+ [1 - 2*!!(offsetof(type, member) != (offset))]
+
+/*
+ * The host expects dwords / 32 bit packing. Using __aligned(4)
+ * everywhere is not really practical and also does not seem to work.
+ * Specifically I've been unable to get structs using bools and
+ * HGCMFunctionParameter32 / 64 to compile to the right size using __aligned(),
+ * so lets we're sticking with pragma pack(4) here.
+ */
+#pragma pack(4)
+
+/**
+ * @defgroup grp_vmmdev VMM Device
+ *
+ * @note This interface cannot be changed, it can only be extended!
+ *
+ * @{
+ */
+
+/** Port for generic request interface (relative offset). */
+#define VMMDEV_PORT_OFF_REQUEST 0
+
+/**
+ * @name VMMDev events.
+ *
+ * Used mainly by VMMDevReq_AcknowledgeEvents/VMMDevEvents and version 1.3 of
+ * VMMDevMemory.
+ *
+ * @{
+ */
+/** Host mouse capabilities has been changed. */
+#define VMMDEV_EVENT_MOUSE_CAPABILITIES_CHANGED BIT(0)
+/** HGCM event. */
+#define VMMDEV_EVENT_HGCM BIT(1)
+/** A display change request has been issued. */
+#define VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST BIT(2)
+/** Credentials are available for judgement. */
+#define VMMDEV_EVENT_JUDGE_CREDENTIALS BIT(3)
+/** The guest has been restored. */
+#define VMMDEV_EVENT_RESTORED BIT(4)
+/** Seamless mode state changed. */
+#define VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST BIT(5)
+/** Memory balloon size changed. */
+#define VMMDEV_EVENT_BALLOON_CHANGE_REQUEST BIT(6)
+/** Statistics interval changed. */
+#define VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST BIT(7)
+/** VRDP status changed. */
+#define VMMDEV_EVENT_VRDP BIT(8)
+/** New mouse position data available. */
+#define VMMDEV_EVENT_MOUSE_POSITION_CHANGED BIT(9)
+/** CPU hotplug event occurred. */
+#define VMMDEV_EVENT_CPU_HOTPLUG BIT(10)
+/** The mask of valid events, for sanity checking. */
+#define VMMDEV_EVENT_VALID_EVENT_MASK 0x000007ffU
+/** @} */
+
+/** @defgroup grp_vmmdev_req VMMDev Generic Request Interface
+ * @{
+ */
+
+/** @name Current version of the VMMDev interface.
+ *
+ * Additions are allowed to work only if
+ * additions_major == vmmdev_current && additions_minor <= vmmdev_current.
+ * Additions version is reported to host (VMMDev) by VMMDevReq_ReportGuestInfo.
+ *
+ * @remarks These defines also live in the 16-bit and assembly versions of this
+ * header.
+ */
+#define VMMDEV_VERSION 0x00010004
+#define VMMDEV_VERSION_MAJOR (VMMDEV_VERSION >> 16)
+#define VMMDEV_VERSION_MINOR (VMMDEV_VERSION & 0xffff)
+/** @} */
+
+/** Maximum request packet size. */
+#define VMMDEV_MAX_VMMDEVREQ_SIZE 1048576
+/** Maximum number of HGCM parameters. */
+#define VMMDEV_MAX_HGCM_PARMS 1024
+/** Maximum total size of hgcm buffers in one call. */
+#define VMMDEV_MAX_HGCM_DATA_SIZE 0x7fffffffU
+
+/**
+ * VMMDev request types.
+ * @note when updating this, adjust vmmdevGetRequestSize() as well
+ */
+typedef enum {
+ VMMDevReq_InvalidRequest = 0,
+ VMMDevReq_GetMouseStatus = 1,
+ VMMDevReq_SetMouseStatus = 2,
+ VMMDevReq_SetPointerShape = 3,
+ VMMDevReq_GetHostVersion = 4,
+ VMMDevReq_Idle = 5,
+ VMMDevReq_GetHostTime = 10,
+ VMMDevReq_GetHypervisorInfo = 20,
+ VMMDevReq_SetHypervisorInfo = 21,
+ VMMDevReq_RegisterPatchMemory = 22, /* since version 3.0.6 */
+ VMMDevReq_DeregisterPatchMemory = 23, /* since version 3.0.6 */
+ VMMDevReq_SetPowerStatus = 30,
+ VMMDevReq_AcknowledgeEvents = 41,
+ VMMDevReq_CtlGuestFilterMask = 42,
+ VMMDevReq_ReportGuestInfo = 50,
+ VMMDevReq_ReportGuestInfo2 = 58, /* since version 3.2.0 */
+ VMMDevReq_ReportGuestStatus = 59, /* since version 3.2.8 */
+ VMMDevReq_ReportGuestUserState = 74, /* since version 4.3 */
+ /**
+ * Retrieve a display resize request sent by the host using
+ * @a IDisplay:setVideoModeHint. Deprecated.
+ *
+ * Similar to @a VMMDevReq_GetDisplayChangeRequest2, except that it only
+ * considers host requests sent for the first virtual display. This
+ * guest-req should not be used in new guest code, and the results are
+ * undefined if a guest mixes calls to this and
+ * @a VMMDevReq_GetDisplayChangeRequest2.
+ */
+ VMMDevReq_GetDisplayChangeRequest = 51,
+ VMMDevReq_VideoModeSupported = 52,
+ VMMDevReq_GetHeightReduction = 53,
+ /**
+ * Retrieve a display resize request sent by the host using
+ * @a IDisplay:setVideoModeHint.
+ *
+ * Queries a display resize request sent from the host. If the
+ * @a eventAck member is sent to true and there is an unqueried request
+ * available for one of the virtual display then that request will
+ * be returned. If several displays have unqueried requests the lowest
+ * numbered display will be chosen first. Only the most recent unseen
+ * request for each display is remembered.
+ * If @a eventAck is set to false, the last host request queried with
+ * @a eventAck set is resent, or failing that the most recent received
+ * from the host. If no host request was ever received then all zeros
+ * are returned.
+ */
+ VMMDevReq_GetDisplayChangeRequest2 = 54,
+ VMMDevReq_ReportGuestCapabilities = 55,
+ VMMDevReq_SetGuestCapabilities = 56,
+ VMMDevReq_VideoModeSupported2 = 57, /* since version 3.2.0 */
+ VMMDevReq_GetDisplayChangeRequestEx = 80, /* since version 4.2.4 */
+ VMMDevReq_HGCMConnect = 60,
+ VMMDevReq_HGCMDisconnect = 61,
+ VMMDevReq_HGCMCall32 = 62,
+ VMMDevReq_HGCMCall64 = 63,
+ VMMDevReq_HGCMCancel = 64,
+ VMMDevReq_HGCMCancel2 = 65,
+ VMMDevReq_VideoAccelEnable = 70,
+ VMMDevReq_VideoAccelFlush = 71,
+ VMMDevReq_VideoSetVisibleRegion = 72,
+ VMMDevReq_GetSeamlessChangeRequest = 73,
+ VMMDevReq_QueryCredentials = 100,
+ VMMDevReq_ReportCredentialsJudgement = 101,
+ VMMDevReq_ReportGuestStats = 110,
+ VMMDevReq_GetMemBalloonChangeRequest = 111,
+ VMMDevReq_GetStatisticsChangeRequest = 112,
+ VMMDevReq_ChangeMemBalloon = 113,
+ VMMDevReq_GetVRDPChangeRequest = 150,
+ VMMDevReq_LogString = 200,
+ VMMDevReq_GetCpuHotPlugRequest = 210,
+ VMMDevReq_SetCpuHotPlugStatus = 211,
+ VMMDevReq_RegisterSharedModule = 212,
+ VMMDevReq_UnregisterSharedModule = 213,
+ VMMDevReq_CheckSharedModules = 214,
+ VMMDevReq_GetPageSharingStatus = 215,
+ VMMDevReq_DebugIsPageShared = 216,
+ VMMDevReq_GetSessionId = 217, /* since version 3.2.8 */
+ VMMDevReq_WriteCoreDump = 218,
+ VMMDevReq_GuestHeartbeat = 219,
+ VMMDevReq_HeartbeatConfigure = 220,
+ VMMDevReq_SizeHack = 0x7fffffff
+} VMMDevRequestType;
+
+#if __BITS_PER_LONG == 64
+#define VMMDevReq_HGCMCall VMMDevReq_HGCMCall64
+#else
+#define VMMDevReq_HGCMCall VMMDevReq_HGCMCall32
+#endif
+
+/** Version of VMMDevRequestHeader structure. */
+#define VMMDEV_REQUEST_HEADER_VERSION (0x10001)
+
+/**
+ * Generic VMMDev request header.
+ */
+typedef struct {
+ /** IN: Size of the structure in bytes (including body). */
+ u32 size;
+ /** IN: Version of the structure. */
+ u32 version;
+ /** IN: Type of the request. */
+ VMMDevRequestType requestType;
+ /** OUT: Return code. */
+ s32 rc;
+ /** Reserved field no.1. MBZ. */
+ u32 reserved1;
+ /** Reserved field no.2. MBZ. */
+ u32 reserved2;
+} VMMDevRequestHeader;
+VMMDEV_ASSERT_SIZE(VMMDevRequestHeader, 24);
+
+/**
+ * Mouse status request structure.
+ *
+ * Used by VMMDevReq_GetMouseStatus and VMMDevReq_SetMouseStatus.
+ */
+typedef struct {
+ /** header */
+ VMMDevRequestHeader header;
+ /** Mouse feature mask. See VMMDEV_MOUSE_*. */
+ u32 mouseFeatures;
+ /** Mouse x position. */
+ s32 pointerXPos;
+ /** Mouse y position. */
+ s32 pointerYPos;
+} VMMDevReqMouseStatus;
+VMMDEV_ASSERT_SIZE(VMMDevReqMouseStatus, 24+12);
+
+/**
+ * @name Mouse capability bits (VMMDevReqMouseStatus::mouseFeatures).
+ * @{
+ */
+/** The guest can (== wants to) handle absolute coordinates. */
+#define VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE BIT(0)
+/**
+ * The host can (== wants to) send absolute coordinates.
+ * (Input not captured.)
+ */
+#define VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE BIT(1)
+/**
+ * The guest can *NOT* switch to software cursor and therefore depends on the
+ * host cursor.
+ *
+ * When guest additions are installed and the host has promised to display the
+ * cursor itself, the guest installs a hardware mouse driver. Don't ask the
+ * guest to switch to a software cursor then.
+ */
+#define VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR BIT(2)
+/** The host does NOT provide support for drawing the cursor itself. */
+#define VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER BIT(3)
+/** The guest can read VMMDev events to find out about pointer movement */
+#define VMMDEV_MOUSE_NEW_PROTOCOL BIT(4)
+/**
+ * If the guest changes the status of the VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR
+ * bit, the host will honour this.
+ */
+#define VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR BIT(5)
+/**
+ * The host supplies an absolute pointing device. The Guest Additions may
+ * wish to use this to decide whether to install their own driver.
+ */
+#define VMMDEV_MOUSE_HOST_HAS_ABS_DEV BIT(6)
+/** The mask of all VMMDEV_MOUSE_* flags */
+#define VMMDEV_MOUSE_MASK 0x0000007fU
+/**
+ * The mask of guest capability changes for which notification events should
+ * be sent.
+ */
+#define VMMDEV_MOUSE_NOTIFY_HOST_MASK \
+ (VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)
+/** The mask of all capabilities which the guest can legitimately change */
+#define VMMDEV_MOUSE_GUEST_MASK \
+ (VMMDEV_MOUSE_NOTIFY_HOST_MASK | VMMDEV_MOUSE_NEW_PROTOCOL)
+/**
+ * The mask of host capability changes for which notification events should
+ * be sent.
+ */
+#define VMMDEV_MOUSE_NOTIFY_GUEST_MASK \
+ VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE
+/** The mask of all capabilities which the host can legitimately change */
+#define VMMDEV_MOUSE_HOST_MASK \
+ (VMMDEV_MOUSE_NOTIFY_GUEST_MASK |\
+ VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER |\
+ VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR| \
+ VMMDEV_MOUSE_HOST_HAS_ABS_DEV)
+/** @} */
+
+/**
+ * @name Absolute mouse reporting range
+ * @{
+ */
+/** @todo Should these be here? They are needed by both host and guest. */
+/** The minumum value our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE_MIN 0
+/** The maximum value our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE_MAX 0xFFFF
+/** The full range our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE (VMMDEV_MOUSE_RANGE_MAX - VMMDEV_MOUSE_RANGE_MIN)
+/** @} */
+
+/**
+ * Mouse pointer shape/visibility change request.
+ *
+ * Used by VMMDevReq_SetPointerShape. The size is variable.
+ */
+typedef struct VMMDevReqMousePointer {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** VBOX_MOUSE_POINTER_* bit flags from VBox/Graphics/VBoxVideo.h. */
+ u32 fFlags;
+ /** x coordinate of hot spot. */
+ u32 xHot;
+ /** y coordinate of hot spot. */
+ u32 yHot;
+ /** Width of the pointer in pixels. */
+ u32 width;
+ /** Height of the pointer in scanlines. */
+ u32 height;
+ /**
+ * Pointer data.
+ *
+ ****
+ * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
+ * mask.
+ *
+ * For pointers without alpha channel the XOR mask pixels are 32 bit
+ * values: (lsb)BGR0(msb).
+ * For pointers with alpha channel the XOR mask consists of
+ * (lsb)BGRA(msb) 32 bit values.
+ *
+ * Guest driver must create the AND mask for pointers with alpha chan,
+ * so if host does not support alpha, the pointer could be displayed as
+ * a normal color pointer. The AND mask can be constructed from alpha
+ * values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
+ *
+ * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
+ * mask, therefore, is cbAnd = (width + 7) / 8 * height. The padding
+ * bits at the end of any scanline are undefined.
+ *
+ * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
+ * u8 *pXor = pAnd + (cbAnd + 3) & ~3
+ * Bytes in the gap between the AND and the XOR mask are undefined.
+ * XOR mask scanlines have no gap between them and size of XOR mask is:
+ * cXor = width * 4 * height.
+ ****
+ *
+ * Preallocate 4 bytes for accessing actual data as p->pointerData.
+ */
+ char pointerData[4];
+} VMMDevReqMousePointer;
+VMMDEV_ASSERT_SIZE(VMMDevReqMousePointer, 24+24);
+
+/**
+ * String log request structure.
+ *
+ * Used by VMMDevReq_LogString.
+ * @deprecated Use the IPRT logger or VbglR3WriteLog instead.
+ */
+typedef struct {
+ /** header */
+ VMMDevRequestHeader header;
+ /** variable length string data */
+ char szString[1];
+} VMMDevReqLogString;
+VMMDEV_ASSERT_SIZE(VMMDevReqLogString, 24+4);
+
+/**
+ * VirtualBox host version request structure.
+ *
+ * Used by VMMDevReq_GetHostVersion.
+ *
+ * @remarks VBGL uses this to detect the precense of new features in the
+ * interface.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Major version. */
+ u16 major;
+ /** Minor version. */
+ u16 minor;
+ /** Build number. */
+ u32 build;
+ /** SVN revision. */
+ u32 revision;
+ /** Feature mask. */
+ u32 features;
+} VMMDevReqHostVersion;
+VMMDEV_ASSERT_SIZE(VMMDevReqHostVersion, 24+16);
+
+/**
+ * @name VMMDevReqHostVersion::features
+ * @{
+ */
+/** Physical page lists are supported by HGCM. */
+#define VMMDEV_HVF_HGCM_PHYS_PAGE_LIST BIT(0)
+/** @} */
+
+/**
+ * Guest capabilities structure.
+ *
+ * Used by VMMDevReq_ReportGuestCapabilities.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Capabilities (VMMDEV_GUEST_*). */
+ u32 caps;
+} VMMDevReqGuestCapabilities;
+VMMDEV_ASSERT_SIZE(VMMDevReqGuestCapabilities, 24+4);
+
+/**
+ * Guest capabilities structure, version 2.
+ *
+ * Used by VMMDevReq_SetGuestCapabilities.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Mask of capabilities to be added. */
+ u32 u32OrMask;
+ /** Mask of capabilities to be removed. */
+ u32 u32NotMask;
+} VMMDevReqGuestCapabilities2;
+VMMDEV_ASSERT_SIZE(VMMDevReqGuestCapabilities2, 24+8);
+
+/**
+ * @name Guest capability bits.
+ * Used by VMMDevReq_ReportGuestCapabilities and VMMDevReq_SetGuestCapabilities.
+ * @{
+ */
+/** The guest supports seamless display rendering. */
+#define VMMDEV_GUEST_SUPPORTS_SEAMLESS BIT(0)
+/** The guest supports mapping guest to host windows. */
+#define VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING BIT(1)
+/**
+ * The guest graphical additions are active.
+ * Used for fast activation and deactivation of certain graphical operations
+ * (e.g. resizing & seamless). The legacy VMMDevReq_ReportGuestCapabilities
+ * request sets this automatically, but VMMDevReq_SetGuestCapabilities does
+ * not.
+ */
+#define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2)
+/** The mask of valid events, for sanity checking. */
+#define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U
+/** @} */
+
+/**
+ * Idle request structure.
+ *
+ * Used by VMMDevReq_Idle.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+} VMMDevReqIdle;
+VMMDEV_ASSERT_SIZE(VMMDevReqIdle, 24);
+
+/**
+ * Host time request structure.
+ *
+ * Used by VMMDevReq_GetHostTime.
+ */
+typedef struct {
+ /** Header */
+ VMMDevRequestHeader header;
+ /** OUT: Time in milliseconds since unix epoch. */
+ u64 time;
+} VMMDevReqHostTime;
+VMMDEV_ASSERT_SIZE(VMMDevReqHostTime, 24+8);
+
+/**
+ * Hypervisor info structure.
+ *
+ * Used by VMMDevReq_GetHypervisorInfo and VMMDevReq_SetHypervisorInfo.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /**
+ * Guest virtual address of proposed hypervisor start.
+ * Not used by VMMDevReq_GetHypervisorInfo.
+ * @todo Make this 64-bit compatible?
+ */
+ u32 hypervisorStart;
+ /** Hypervisor size in bytes. */
+ u32 hypervisorSize;
+} VMMDevReqHypervisorInfo;
+VMMDEV_ASSERT_SIZE(VMMDevReqHypervisorInfo, 24+8);
+
+/**
+ * @name Default patch memory size .
+ * Used by VMMDevReq_RegisterPatchMemory and VMMDevReq_DeregisterPatchMemory.
+ * @{
+ */
+#define VMMDEV_GUEST_DEFAULT_PATCHMEM_SIZE 8192
+/** @} */
+
+/**
+ * Patching memory structure. (locked executable & read-only page from the
+ * guest's perspective)
+ *
+ * Used by VMMDevReq_RegisterPatchMemory and VMMDevReq_DeregisterPatchMemory
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest virtual address of the patching page(s). */
+ u64 pPatchMem;
+ /** Patch page size in bytes. */
+ u32 cbPatchMem;
+} VMMDevReqPatchMemory;
+VMMDEV_ASSERT_SIZE(VMMDevReqPatchMemory, 24+12);
+
+/**
+ * Guest power requests.
+ *
+ * See VMMDevReq_SetPowerStatus and VMMDevPowerStateRequest.
+ */
+typedef enum {
+ VMMDevPowerState_Invalid = 0,
+ VMMDevPowerState_Pause = 1,
+ VMMDevPowerState_PowerOff = 2,
+ VMMDevPowerState_SaveState = 3,
+ VMMDevPowerState_SizeHack = 0x7fffffff
+} VMMDevPowerState;
+VMMDEV_ASSERT_SIZE(VMMDevPowerState, 4);
+
+/**
+ * VM power status structure.
+ *
+ * Used by VMMDevReq_SetPowerStatus.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Power state request. */
+ VMMDevPowerState powerState;
+} VMMDevPowerStateRequest;
+VMMDEV_ASSERT_SIZE(VMMDevPowerStateRequest, 24+4);
+
+/**
+ * Pending events structure.
+ *
+ * Used by VMMDevReq_AcknowledgeEvents.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** OUT: Pending event mask. */
+ u32 events;
+} VMMDevEvents;
+VMMDEV_ASSERT_SIZE(VMMDevEvents, 24+4);
+
+/**
+ * Guest event filter mask control.
+ *
+ * Used by VMMDevReq_CtlGuestFilterMask.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Mask of events to be added to the filter. */
+ u32 u32OrMask;
+ /** Mask of events to be removed from the filter. */
+ u32 u32NotMask;
+} VMMDevCtlGuestFilterMask;
+VMMDEV_ASSERT_SIZE(VMMDevCtlGuestFilterMask, 24+8);
+
+/**
+ * Guest information structure.
+ *
+ * Used by VMMDevReportGuestInfo and PDMIVMMDEVCONNECTOR::pfnUpdateGuestVersion.
+ */
+typedef struct VBoxGuestInfo {
+ /**
+ * The VMMDev interface version expected by additions.
+ * *Deprecated*, do not use anymore! Will be removed.
+ */
+ u32 interfaceVersion;
+ /** Guest OS type. */
+ VBOXOSTYPE osType;
+} VBoxGuestInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestInfo, 8);
+
+/**
+ * Guest information report.
+ *
+ * Used by VMMDevReq_ReportGuestInfo.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest information. */
+ VBoxGuestInfo guestInfo;
+} VMMDevReportGuestInfo;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestInfo, 24+8);
+
+/**
+ * Guest information structure, version 2.
+ *
+ * Used by VMMDevReportGuestInfo2.
+ */
+typedef struct VBoxGuestInfo2 {
+ /** Major version. */
+ u16 additionsMajor;
+ /** Minor version. */
+ u16 additionsMinor;
+ /** Build number. */
+ u32 additionsBuild;
+ /** SVN revision. */
+ u32 additionsRevision;
+ /** Feature mask, currently unused. */
+ u32 additionsFeatures;
+ /**
+ * The intentional meaning of this field was:
+ * Some additional information, for example 'Beta 1' or something like
+ * that.
+ *
+ * The way it was implemented was implemented: VBOX_VERSION_STRING.
+ *
+ * This means the first three members are duplicated in this field (if
+ * the guest build config is sane). So, the user must check this and
+ * chop it off before usage. There is, because of the Main code's blind
+ * trust in the field's content, no way back.
+ */
+ char szName[128];
+} VBoxGuestInfo2;
+VMMDEV_ASSERT_SIZE(VBoxGuestInfo2, 144);
+
+/**
+ * Guest information report, version 2.
+ *
+ * Used by VMMDevReq_ReportGuestInfo2.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest information. */
+ VBoxGuestInfo2 guestInfo;
+} VMMDevReportGuestInfo2;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestInfo2, 24+144);
+
+/**
+ * The guest facility.
+ * This needs to be kept in sync with AdditionsFacilityType of the Main API!
+ */
+typedef enum {
+ VBoxGuestFacilityType_Unknown = 0,
+ VBoxGuestFacilityType_VBoxGuestDriver = 20,
+ /* VBoxGINA / VBoxCredProv / pam_vbox. */
+ VBoxGuestFacilityType_AutoLogon = 90,
+ VBoxGuestFacilityType_VBoxService = 100,
+ /* VBoxTray (Windows), VBoxClient (Linux, Unix). */
+ VBoxGuestFacilityType_VBoxTrayClient = 101,
+ VBoxGuestFacilityType_Seamless = 1000,
+ VBoxGuestFacilityType_Graphics = 1100,
+ VBoxGuestFacilityType_All = 0x7ffffffe,
+ VBoxGuestFacilityType_SizeHack = 0x7fffffff
+} VBoxGuestFacilityType;
+VMMDEV_ASSERT_SIZE(VBoxGuestFacilityType, 4);
+
+/**
+ * The current guest status of a facility.
+ * This needs to be kept in sync with AdditionsFacilityStatus of the Main API!
+ *
+ * @remarks r=bird: Pretty please, for future types like this, simply do a
+ * linear allocation without any gaps. This stuff is impossible to work
+ * efficiently with, let alone validate. Applies to the other facility
+ * enums too.
+ */
+typedef enum {
+ VBoxGuestFacilityStatus_Inactive = 0,
+ VBoxGuestFacilityStatus_Paused = 1,
+ VBoxGuestFacilityStatus_PreInit = 20,
+ VBoxGuestFacilityStatus_Init = 30,
+ VBoxGuestFacilityStatus_Active = 50,
+ VBoxGuestFacilityStatus_Terminating = 100,
+ VBoxGuestFacilityStatus_Terminated = 101,
+ VBoxGuestFacilityStatus_Failed = 800,
+ VBoxGuestFacilityStatus_Unknown = 999,
+ VBoxGuestFacilityStatus_SizeHack = 0x7fffffff
+} VBoxGuestFacilityStatus;
+VMMDEV_ASSERT_SIZE(VBoxGuestFacilityStatus, 4);
+
+/**
+ * The facility class.
+ * This needs to be kept in sync with AdditionsFacilityClass of the Main API!
+ */
+typedef enum {
+ VBoxGuestFacilityClass_None = 0,
+ VBoxGuestFacilityClass_Driver = 10,
+ VBoxGuestFacilityClass_Service = 30,
+ VBoxGuestFacilityClass_Program = 50,
+ VBoxGuestFacilityClass_Feature = 100,
+ VBoxGuestFacilityClass_ThirdParty = 999,
+ VBoxGuestFacilityClass_All = 0x7ffffffe,
+ VBoxGuestFacilityClass_SizeHack = 0x7fffffff
+} VBoxGuestFacilityClass;
+VMMDEV_ASSERT_SIZE(VBoxGuestFacilityClass, 4);
+
+/**
+ * Guest status structure.
+ *
+ * Used by VMMDevReqGuestStatus.
+ */
+typedef struct VBoxGuestStatus {
+ /** Facility the status is indicated for. */
+ VBoxGuestFacilityType facility;
+ /** Current guest status. */
+ VBoxGuestFacilityStatus status;
+ /** Flags, not used at the moment. */
+ u32 flags;
+} VBoxGuestStatus;
+VMMDEV_ASSERT_SIZE(VBoxGuestStatus, 12);
+
+/**
+ * Guest Additions status structure.
+ *
+ * Used by VMMDevReq_ReportGuestStatus.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest information. */
+ VBoxGuestStatus guestStatus;
+} VMMDevReportGuestStatus;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestStatus, 24+12);
+
+/**
+ * The current status of specific guest user.
+ * This needs to be kept in sync with GuestUserState of the Main API!
+ */
+typedef enum VBoxGuestUserState {
+ VBoxGuestUserState_Unknown = 0,
+ VBoxGuestUserState_LoggedIn = 1,
+ VBoxGuestUserState_LoggedOut = 2,
+ VBoxGuestUserState_Locked = 3,
+ VBoxGuestUserState_Unlocked = 4,
+ VBoxGuestUserState_Disabled = 5,
+ VBoxGuestUserState_Idle = 6,
+ VBoxGuestUserState_InUse = 7,
+ VBoxGuestUserState_Created = 8,
+ VBoxGuestUserState_Deleted = 9,
+ VBoxGuestUserState_SessionChanged = 10,
+ VBoxGuestUserState_CredentialsChanged = 11,
+ VBoxGuestUserState_RoleChanged = 12,
+ VBoxGuestUserState_GroupAdded = 13,
+ VBoxGuestUserState_GroupRemoved = 14,
+ VBoxGuestUserState_Elevated = 15,
+ VBoxGuestUserState_SizeHack = 0x7fffffff
+} VBoxGuestUserState;
+VMMDEV_ASSERT_SIZE(VBoxGuestUserState, 4);
+
+/**
+ * Guest user status updates.
+ */
+typedef struct VBoxGuestUserStatus {
+ /** The guest user state to send. */
+ VBoxGuestUserState state;
+ /** Size (in bytes) of szUser. */
+ u32 cbUser;
+ /** Size (in bytes) of szDomain. */
+ u32 cbDomain;
+ /** Size (in bytes) of aDetails. */
+ u32 cbDetails;
+ /** Note: Here begins the dynamically allocated region. */
+ /** Guest user to report state for. */
+ char szUser[1];
+ /** Domain the guest user is bound to. */
+ char szDomain[1];
+ /** Optional details of the state. */
+ u8 aDetails[1];
+} VBoxGuestUserStatus;
+VMMDEV_ASSERT_SIZE(VBoxGuestUserStatus, 20);
+
+/**
+ * Guest user status structure.
+ *
+ * Used by VMMDevReq_ReportGuestUserStatus.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest user status. */
+ VBoxGuestUserStatus status;
+} VMMDevReportGuestUserState;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestUserState, 24+20);
+
+/**
+ * Guest statistics structure.
+ *
+ * Used by VMMDevReportGuestStats and PDMIVMMDEVCONNECTOR::pfnReportStatistics.
+ */
+typedef struct VBoxGuestStatistics {
+ /** Virtual CPU ID. */
+ u32 u32CpuId;
+ /** Reported statistics. */
+ u32 u32StatCaps;
+ /** Idle CPU load (0-100) for last interval. */
+ u32 u32CpuLoad_Idle;
+ /** Kernel CPU load (0-100) for last interval. */
+ u32 u32CpuLoad_Kernel;
+ /** User CPU load (0-100) for last interval. */
+ u32 u32CpuLoad_User;
+ /** Nr of threads. */
+ u32 u32Threads;
+ /** Nr of processes. */
+ u32 u32Processes;
+ /** Nr of handles. */
+ u32 u32Handles;
+ /** Memory load (0-100). */
+ u32 u32MemoryLoad;
+ /** Page size of guest system. */
+ u32 u32PageSize;
+ /** Total physical memory (in 4KB pages). */
+ u32 u32PhysMemTotal;
+ /** Available physical memory (in 4KB pages). */
+ u32 u32PhysMemAvail;
+ /** Ballooned physical memory (in 4KB pages). */
+ u32 u32PhysMemBalloon;
+ /** Total committed memory (not necessarily in-use) (in 4KB pages). */
+ u32 u32MemCommitTotal;
+ /** Total amount of memory used by the kernel (in 4KB pages). */
+ u32 u32MemKernelTotal;
+ /** Total amount of paged memory used by the kernel (in 4KB pages). */
+ u32 u32MemKernelPaged;
+ /** Total amount of nonpaged memory used by the kernel (4KB pages). */
+ u32 u32MemKernelNonPaged;
+ /** Total amount of memory used for the system cache (in 4KB pages). */
+ u32 u32MemSystemCache;
+ /** Pagefile size (in 4KB pages). */
+ u32 u32PageFileSize;
+} VBoxGuestStatistics;
+VMMDEV_ASSERT_SIZE(VBoxGuestStatistics, 19*4);
+
+/**
+ * @name Guest statistics values (VBoxGuestStatistics::u32StatCaps).
+ * @{
+ */
+#define VBOX_GUEST_STAT_CPU_LOAD_IDLE BIT(0)
+#define VBOX_GUEST_STAT_CPU_LOAD_KERNEL BIT(1)
+#define VBOX_GUEST_STAT_CPU_LOAD_USER BIT(2)
+#define VBOX_GUEST_STAT_THREADS BIT(3)
+#define VBOX_GUEST_STAT_PROCESSES BIT(4)
+#define VBOX_GUEST_STAT_HANDLES BIT(5)
+#define VBOX_GUEST_STAT_MEMORY_LOAD BIT(6)
+#define VBOX_GUEST_STAT_PHYS_MEM_TOTAL BIT(7)
+#define VBOX_GUEST_STAT_PHYS_MEM_AVAIL BIT(8)
+#define VBOX_GUEST_STAT_PHYS_MEM_BALLOON BIT(9)
+#define VBOX_GUEST_STAT_MEM_COMMIT_TOTAL BIT(10)
+#define VBOX_GUEST_STAT_MEM_KERNEL_TOTAL BIT(11)
+#define VBOX_GUEST_STAT_MEM_KERNEL_PAGED BIT(12)
+#define VBOX_GUEST_STAT_MEM_KERNEL_NONPAGED BIT(13)
+#define VBOX_GUEST_STAT_MEM_SYSTEM_CACHE BIT(14)
+#define VBOX_GUEST_STAT_PAGE_FILE_SIZE BIT(15)
+/** @} */
+
+/**
+ * Guest statistics command structure.
+ *
+ * Used by VMMDevReq_ReportGuestStats.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest information. */
+ VBoxGuestStatistics guestStats;
+} VMMDevReportGuestStats;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestStats, 24+19*4);
+
+/**
+ * @name The ballooning chunk size which VMMDev works at.
+ * @{
+ */
+#define VMMDEV_MEMORY_BALLOON_CHUNK_SIZE (1048576)
+#define VMMDEV_MEMORY_BALLOON_CHUNK_PAGES (1048576 / 4096)
+/** @} */
+
+/**
+ * Poll for ballooning change request.
+ *
+ * Used by VMMDevReq_GetMemBalloonChangeRequest.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Balloon size in megabytes. */
+ u32 cBalloonChunks;
+ /** Guest ram size in megabytes. */
+ u32 cPhysMemChunks;
+ /**
+ * Setting this to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST indicates that
+ * the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+} VMMDevGetMemBalloonChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetMemBalloonChangeRequest, 24+12);
+
+/**
+ * Change the size of the balloon.
+ *
+ * Used by VMMDevReq_ChangeMemBalloon.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** The number of pages in the array. */
+ u32 pages;
+ /** true = inflate, false = deflate. */
+ u32 inflate;
+ /** Physical address (u64) of each page. */
+ u64 phys_page[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES];
+} VMMDevChangeMemBalloon;
+
+/**
+ * Guest statistics interval change request structure.
+ *
+ * Used by VMMDevReq_GetStatisticsChangeRequest.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** The interval in seconds. */
+ u32 u32StatInterval;
+ /**
+ * Setting this to VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST
+ * indicates that the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+} VMMDevGetStatisticsChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetStatisticsChangeRequest, 24+8);
+
+/**
+ * The size of a string field in the credentials request (including '\\0').
+ * @see VMMDevCredentials
+ */
+#define VMMDEV_CREDENTIALS_SZ_SIZE 128
+
+/**
+ * Credentials request structure.
+ *
+ * Used by VMMDevReq_QueryCredentials.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** IN/OUT: Request flags. */
+ u32 u32Flags;
+ /** OUT: User name (UTF-8). */
+ char szUserName[VMMDEV_CREDENTIALS_SZ_SIZE];
+ /** OUT: Password (UTF-8). */
+ char szPassword[VMMDEV_CREDENTIALS_SZ_SIZE];
+ /** OUT: Domain name (UTF-8). */
+ char szDomain[VMMDEV_CREDENTIALS_SZ_SIZE];
+} VMMDevCredentials;
+VMMDEV_ASSERT_SIZE(VMMDevCredentials, 24+4+3*128);
+
+/**
+ * @name Credentials request flag (VMMDevCredentials::u32Flags)
+ * @{
+ */
+/** query from host whether credentials are present */
+#define VMMDEV_CREDENTIALS_QUERYPRESENCE BIT(1)
+/** read credentials from host (can be combined with clear) */
+#define VMMDEV_CREDENTIALS_READ BIT(2)
+/** clear credentials on host (can be combined with read) */
+#define VMMDEV_CREDENTIALS_CLEAR BIT(3)
+/** read credentials for judgement in the guest */
+#define VMMDEV_CREDENTIALS_READJUDGE BIT(8)
+/** clear credentials for judegement on the host */
+#define VMMDEV_CREDENTIALS_CLEARJUDGE BIT(9)
+/** report credentials acceptance by guest */
+#define VMMDEV_CREDENTIALS_JUDGE_OK BIT(10)
+/** report credentials denial by guest */
+#define VMMDEV_CREDENTIALS_JUDGE_DENY BIT(11)
+/** report that no judgement could be made by guest */
+#define VMMDEV_CREDENTIALS_JUDGE_NOJUDGEMENT BIT(12)
+
+/** flag telling the guest that credentials are present */
+#define VMMDEV_CREDENTIALS_PRESENT BIT(16)
+/** flag telling guest that local logons should be prohibited */
+#define VMMDEV_CREDENTIALS_NOLOCALLOGON BIT(17)
+/** @} */
+
+/**
+ * Seamless mode.
+ *
+ * Used by VbglR3SeamlessWaitEvent
+ *
+ * @ingroup grp_vmmdev_req
+ *
+ * @todo DARN! DARN! DARN! Who forgot to do the 32-bit hack here???
+ * FIXME! XXX!
+ *
+ * We will now have to carefully check how our compilers have treated this
+ * flag. If any are compressing it into a byte type, we'll have to check
+ * how the request memory is initialized. If we are 104% sure it's ok to
+ * expand it, we'll expand it. If not, we must redefine the field to a
+ * u8 and a 3 byte padding.
+ */
+typedef enum {
+ /** normal mode; entire guest desktop displayed. */
+ VMMDev_Seamless_Disabled = 0,
+ /** visible region mode; only top-level guest windows displayed. */
+ VMMDev_Seamless_Visible_Region = 1,
+ /**
+ * windowed mode; each top-level guest window is represented in a
+ * host window.
+ */
+ VMMDev_Seamless_Host_Window = 2
+} VMMDevSeamlessMode;
+
+/**
+ * Seamless mode change request structure.
+ *
+ * Used by VMMDevReq_GetSeamlessChangeRequest.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+
+ /** New seamless mode. */
+ VMMDevSeamlessMode mode;
+ /**
+ * Setting this to VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST indicates
+ * that the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+} VMMDevSeamlessChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSeamlessChangeRequest, 24+8);
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevSeamlessChangeRequest, eventAck, 24+4);
+
+/**
+ * Display change request structure.
+ *
+ * Used by VMMDevReq_GetDisplayChangeRequest.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Horizontal pixel resolution (0 = do not change). */
+ u32 xres;
+ /** Vertical pixel resolution (0 = do not change). */
+ u32 yres;
+ /** Bits per pixel (0 = do not change). */
+ u32 bpp;
+ /**
+ * Setting this to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST indicates
+ * that the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+} VMMDevDisplayChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevDisplayChangeRequest, 24+16);
+
+/**
+ * Display change request structure, version 2.
+ *
+ * Used by VMMDevReq_GetDisplayChangeRequest2.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Horizontal pixel resolution (0 = do not change). */
+ u32 xres;
+ /** Vertical pixel resolution (0 = do not change). */
+ u32 yres;
+ /** Bits per pixel (0 = do not change). */
+ u32 bpp;
+ /**
+ * Setting this to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST indicates
+ * that the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+ /** 0 for primary display, 1 for the first secondary, etc. */
+ u32 display;
+} VMMDevDisplayChangeRequest2;
+VMMDEV_ASSERT_SIZE(VMMDevDisplayChangeRequest2, 24+20);
+
+/**
+ * Display change request structure, version Extended.
+ *
+ * Used by VMMDevReq_GetDisplayChangeRequestEx.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Horizontal pixel resolution (0 = do not change). */
+ u32 xres;
+ /** Vertical pixel resolution (0 = do not change). */
+ u32 yres;
+ /** Bits per pixel (0 = do not change). */
+ u32 bpp;
+ /**
+ * Setting this to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST indicates
+ * that the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+ /** 0 for primary display, 1 for the first secondary, etc. */
+ u32 display;
+ /** New OriginX of secondary virtual screen */
+ u32 cxOrigin;
+ /** New OriginY of secondary virtual screen */
+ u32 cyOrigin;
+ /** Change in origin of the secondary virtual screen is required */
+ bool fChangeOrigin;
+ /** Secondary virtual screen enabled or disabled */
+ bool fEnabled;
+} VMMDevDisplayChangeRequestEx;
+VMMDEV_ASSERT_SIZE(VMMDevDisplayChangeRequestEx, 24+32);
+
+/**
+ * Video mode supported request structure.
+ *
+ * Used by VMMDevReq_VideoModeSupported.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** IN: Horizontal pixel resolution. */
+ u32 width;
+ /** IN: Vertical pixel resolution. */
+ u32 height;
+ /** IN: Bits per pixel. */
+ u32 bpp;
+ /** OUT: Support indicator. */
+ bool fSupported;
+} VMMDevVideoModeSupportedRequest;
+VMMDEV_ASSERT_SIZE(VMMDevVideoModeSupportedRequest, 24+16);
+
+/**
+ * Video mode supported request structure for a specific display.
+ *
+ * Used by VMMDevReq_VideoModeSupported2.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** IN: The guest display number. */
+ u32 display;
+ /** IN: Horizontal pixel resolution. */
+ u32 width;
+ /** IN: Vertical pixel resolution. */
+ u32 height;
+ /** IN: Bits per pixel. */
+ u32 bpp;
+ /** OUT: Support indicator. */
+ bool fSupported;
+} VMMDevVideoModeSupportedRequest2;
+VMMDEV_ASSERT_SIZE(VMMDevVideoModeSupportedRequest2, 24+20);
+
+/**
+ * Video modes height reduction request structure.
+ *
+ * Used by VMMDevReq_GetHeightReduction.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** OUT: Height reduction in pixels. */
+ u32 heightReduction;
+} VMMDevGetHeightReductionRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetHeightReductionRequest, 24+4);
+
+/**
+ * VRDP change request structure.
+ *
+ * Used by VMMDevReq_GetVRDPChangeRequest.
+ */
+typedef struct {
+ /** Header */
+ VMMDevRequestHeader header;
+ /** Whether VRDP is active or not. */
+ u8 u8VRDPActive;
+ /** The configured experience level for active VRDP. */
+ u32 u32VRDPExperienceLevel;
+} VMMDevVRDPChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevVRDPChangeRequest, 24+8);
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevVRDPChangeRequest, u8VRDPActive, 24);
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevVRDPChangeRequest, u32VRDPExperienceLevel,
+ 24+4);
+
+/**
+ * @name VRDP Experience level (VMMDevVRDPChangeRequest::u32VRDPExperienceLevel)
+ * @{
+ */
+#define VRDP_EXPERIENCE_LEVEL_ZERO 0 /**< Theming disabled. */
+#define VRDP_EXPERIENCE_LEVEL_LOW 1 /**< Full win drag + wallpaper dis. */
+#define VRDP_EXPERIENCE_LEVEL_MEDIUM 2 /**< Font smoothing, gradients. */
+#define VRDP_EXPERIENCE_LEVEL_HIGH 3 /**< Animation effects disabled. */
+#define VRDP_EXPERIENCE_LEVEL_FULL 4 /**< Everything enabled. */
+/** @} */
+
+/**
+ * VBVA enable request structure.
+ *
+ * Used by VMMDevReq_VideoAccelEnable.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** 0 - disable, !0 - enable. */
+ u32 u32Enable;
+ /**
+ * The size of VBVAMEMORY::au8RingBuffer expected by driver.
+ * The host will refuse to enable VBVA if the size is not equal to
+ * VBVA_RING_BUFFER_SIZE.
+ */
+ u32 cbRingBuffer;
+ /**
+ * Guest initializes the status to 0. Host sets appropriate
+ * VBVA_F_STATUS_ flags.
+ */
+ u32 fu32Status;
+} VMMDevVideoAccelEnable;
+VMMDEV_ASSERT_SIZE(VMMDevVideoAccelEnable, 24+12);
+
+/**
+ * @name VMMDevVideoAccelEnable::fu32Status.
+ * @{
+ */
+#define VBVA_F_STATUS_ACCEPTED (0x01)
+#define VBVA_F_STATUS_ENABLED (0x02)
+/** @} */
+
+/**
+ * VBVA flush request structure.
+ *
+ * Used by VMMDevReq_VideoAccelFlush.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+} VMMDevVideoAccelFlush;
+VMMDEV_ASSERT_SIZE(VMMDevVideoAccelFlush, 24);
+
+/**
+ * Rectangle data type, double point.
+ */
+typedef struct RTRECT {
+ /** left X coordinate. */
+ s32 xLeft;
+ /** top Y coordinate. */
+ s32 yTop;
+ /** right X coordinate. (exclusive) */
+ s32 xRight;
+ /** bottom Y coordinate. (exclusive) */
+ s32 yBottom;
+} RTRECT;
+
+/**
+ * VBVA set visible region request structure.
+ *
+ * Used by VMMDevReq_VideoSetVisibleRegion.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Number of rectangles */
+ u32 cRect;
+ /**
+ * Rectangle array.
+ * @todo array is spelled aRects[1].
+ */
+ RTRECT Rect;
+} VMMDevVideoSetVisibleRegion;
+VMMDEV_ASSERT_SIZE(RTRECT, 16);
+VMMDEV_ASSERT_SIZE(VMMDevVideoSetVisibleRegion, 24+4+16);
+
+/**
+ * CPU event types.
+ */
+typedef enum {
+ VMMDevCpuStatusType_Invalid = 0,
+ VMMDevCpuStatusType_Disable = 1,
+ VMMDevCpuStatusType_Enable = 2,
+ VMMDevCpuStatusType_SizeHack = 0x7fffffff
+} VMMDevCpuStatusType;
+
+/**
+ * CPU hotplug event status request.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Status type */
+ VMMDevCpuStatusType enmStatusType;
+} VMMDevCpuHotPlugStatusRequest;
+VMMDEV_ASSERT_SIZE(VMMDevCpuHotPlugStatusRequest, 24+4);
+
+/**
+ * CPU event types.
+ *
+ * Used by VbglR3CpuHotplugWaitForEvent
+ *
+ * @ingroup grp_vmmdev_req
+ */
+typedef enum {
+ VMMDevCpuEventType_Invalid = 0,
+ VMMDevCpuEventType_None = 1,
+ VMMDevCpuEventType_Plug = 2,
+ VMMDevCpuEventType_Unplug = 3,
+ VMMDevCpuEventType_SizeHack = 0x7fffffff
+} VMMDevCpuEventType;
+
+/**
+ * Get the ID of the changed CPU and event type.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Event type */
+ VMMDevCpuEventType enmEventType;
+ /** core id of the CPU changed */
+ u32 idCpuCore;
+ /** package id of the CPU changed */
+ u32 idCpuPackage;
+} VMMDevGetCpuHotPlugRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetCpuHotPlugRequest, 24+4+4+4);
+
+/**
+ * Shared region description
+ */
+typedef struct VMMDEVSHAREDREGIONDESC {
+ u64 GCRegionAddr;
+ u32 cbRegion;
+ u32 u32Alignment;
+} VMMDEVSHAREDREGIONDESC;
+VMMDEV_ASSERT_SIZE(VMMDEVSHAREDREGIONDESC, 16);
+
+#define VMMDEVSHAREDREGIONDESC_MAX 32
+
+/**
+ * Shared module registration
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Shared module size. */
+ u32 cbModule;
+ /** Number of included region descriptors */
+ u32 cRegions;
+ /** Base address of the shared module. */
+ u64 GCBaseAddr;
+ /** Guest OS type. */
+ VBOXOSFAMILY enmGuestOS;
+ /** Alignment. */
+ u32 u32Align;
+ /** Module name */
+ char szName[128];
+ /** Module version */
+ char szVersion[16];
+ /** Shared region descriptor(s). */
+ VMMDEVSHAREDREGIONDESC aRegions[1];
+} VMMDevSharedModuleRegistrationRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSharedModuleRegistrationRequest,
+ 24+4+4+8+4+4+128+16+16);
+
+/**
+ * Shared module unregistration
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Shared module size. */
+ u32 cbModule;
+ /** Align at 8 byte boundary. */
+ u32 u32Alignment;
+ /** Base address of the shared module. */
+ u64 GCBaseAddr;
+ /** Module name */
+ char szName[128];
+ /** Module version */
+ char szVersion[16];
+} VMMDevSharedModuleUnregistrationRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSharedModuleUnregistrationRequest, 24+4+4+8+128+16);
+
+/**
+ * Shared module periodic check
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+} VMMDevSharedModuleCheckRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSharedModuleCheckRequest, 24);
+
+/**
+ * Paging sharing enabled query
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Enabled flag (out) */
+ bool fEnabled;
+ /** Alignment */
+ bool fAlignment[3];
+} VMMDevPageSharingStatusRequest;
+VMMDEV_ASSERT_SIZE(VMMDevPageSharingStatusRequest, 24+4);
+
+/**
+ * Page sharing status query (debug build only)
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Page address, 32 bits on 32 bit builds, 64 bit on 64 bit builds */
+ unsigned long GCPtrPage;
+ /** Page flags. */
+ u64 uPageFlags;
+ /** Shared flag (out) */
+ bool fShared;
+ /** Alignment */
+ bool fAlignment[3];
+} VMMDevPageIsSharedRequest;
+
+/**
+ * Session id request structure.
+ *
+ * Used by VMMDevReq_GetSessionId.
+ */
+typedef struct {
+ /** Header */
+ VMMDevRequestHeader header;
+ /**
+ * OUT: unique session id; the id will be different after each start,
+ * reset or restore of the VM.
+ */
+ u64 idSession;
+} VMMDevReqSessionId;
+VMMDEV_ASSERT_SIZE(VMMDevReqSessionId, 24+8);
+
+/**
+ * Write Core Dump request.
+ *
+ * Used by VMMDevReq_WriteCoreDump.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Flags (reserved, MBZ). */
+ u32 fFlags;
+} VMMDevReqWriteCoreDump;
+VMMDEV_ASSERT_SIZE(VMMDevReqWriteCoreDump, 24+4);
+
+/** Heart beat check state structure. Used by VMMDevReq_HeartbeatConfigure. */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** OUT: Guest heartbeat interval in nanosec. */
+ u64 cNsInterval;
+ /** Heartbeat check flag. */
+ bool fEnabled;
+} VMMDevReqHeartbeat;
+VMMDEV_ASSERT_SIZE(VMMDevReqHeartbeat, 24+12);
+
+/**
+ * @name HGCM flags.
+ * @{
+ */
+#define VBOX_HGCM_REQ_DONE BIT(VBOX_HGCM_REQ_DONE_BIT)
+#define VBOX_HGCM_REQ_DONE_BIT 0
+#define VBOX_HGCM_REQ_CANCELLED (0x2)
+/** @} */
+
+/**
+ * HGCM request header.
+ */
+typedef struct VMMDevHGCMRequestHeader {
+ /** Request header. */
+ VMMDevRequestHeader header;
+
+ /** HGCM flags. */
+ u32 fu32Flags;
+
+ /** Result code. */
+ s32 result;
+} VMMDevHGCMRequestHeader;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMRequestHeader, 24+8);
+
+/**
+ * HGCM service location types.
+ * @ingroup grp_vmmdev_req
+ */
+typedef enum {
+ VMMDevHGCMLoc_Invalid = 0,
+ VMMDevHGCMLoc_LocalHost = 1,
+ VMMDevHGCMLoc_LocalHost_Existing = 2,
+ VMMDevHGCMLoc_SizeHack = 0x7fffffff
+} HGCMServiceLocationType;
+VMMDEV_ASSERT_SIZE(HGCMServiceLocationType, 4);
+
+/**
+ * HGCM host service location.
+ * @ingroup grp_vmmdev_req
+ */
+typedef struct {
+ char achName[128]; /**< This is really szName. */
+} HGCMServiceLocationHost;
+VMMDEV_ASSERT_SIZE(HGCMServiceLocationHost, 128);
+
+/**
+ * HGCM service location.
+ * @ingroup grp_vmmdev_req
+ */
+typedef struct HGCMSERVICELOCATION {
+ /** Type of the location. */
+ HGCMServiceLocationType type;
+
+ union {
+ HGCMServiceLocationHost host;
+ } u;
+} HGCMServiceLocation;
+VMMDEV_ASSERT_SIZE(HGCMServiceLocation, 128+4);
+
+/**
+ * HGCM connect request structure.
+ *
+ * Used by VMMDevReq_HGCMConnect.
+ */
+typedef struct {
+ /** HGCM request header. */
+ VMMDevHGCMRequestHeader header;
+
+ /** IN: Description of service to connect to. */
+ HGCMServiceLocation loc;
+
+ /** OUT: Client identifier assigned by local instance of HGCM. */
+ u32 u32ClientID;
+} VMMDevHGCMConnect;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMConnect, 32+132+4);
+
+/**
+ * HGCM disconnect request structure.
+ *
+ * Used by VMMDevReq_HGCMDisconnect.
+ */
+typedef struct {
+ /** HGCM request header. */
+ VMMDevHGCMRequestHeader header;
+
+ /** IN: Client identifier. */
+ u32 u32ClientID;
+} VMMDevHGCMDisconnect;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMDisconnect, 32+4);
+
+/**
+ * HGCM parameter type.
+ */
+typedef enum {
+ VMMDevHGCMParmType_Invalid = 0,
+ VMMDevHGCMParmType_32bit = 1,
+ VMMDevHGCMParmType_64bit = 2,
+ /** @deprecated Doesn't work, use PageList. */
+ VMMDevHGCMParmType_PhysAddr = 3,
+ /** In and Out */
+ VMMDevHGCMParmType_LinAddr = 4,
+ /** In (read; host<-guest) */
+ VMMDevHGCMParmType_LinAddr_In = 5,
+ /** Out (write; host->guest) */
+ VMMDevHGCMParmType_LinAddr_Out = 6,
+ /* 7 - 9 VMMDevHGCMParmType_LinAddr_Locked*, non Linux R0 usage only */
+ /** Physical addresses of locked pages for a buffer. */
+ VMMDevHGCMParmType_PageList = 10,
+ VMMDevHGCMParmType_SizeHack = 0x7fffffff
+} HGCMFunctionParameterType;
+VMMDEV_ASSERT_SIZE(HGCMFunctionParameterType, 4);
+
+/**
+ * HGCM function parameter, 32-bit client.
+ */
+typedef struct HGCMFunctionParameter32 {
+ HGCMFunctionParameterType type;
+ union {
+ u32 value32;
+ u64 value64;
+ struct {
+ u32 size;
+ union {
+ u32 physAddr;
+ u32 linearAddr;
+ } u;
+ } Pointer;
+ struct {
+ /** Size of the buffer described by the page list. */
+ u32 size;
+ /** Relative to the request header. */
+ u32 offset;
+ } PageList;
+ } u;
+} HGCMFunctionParameter32;
+VMMDEV_ASSERT_SIZE(HGCMFunctionParameter32, 4+8);
+
+/**
+ * HGCM function parameter, 64-bit client.
+ */
+typedef struct HGCMFunctionParameter64 {
+ HGCMFunctionParameterType type;
+ union {
+ u32 value32;
+ u64 value64;
+ struct {
+ u32 size;
+ union {
+ u64 physAddr;
+ u64 linearAddr;
+ } u;
+ } Pointer;
+ struct {
+ /** Size of the buffer described by the page list. */
+ u32 size;
+ /** Relative to the request header. */
+ u32 offset;
+ } PageList;
+ } u;
+} HGCMFunctionParameter64;
+VMMDEV_ASSERT_SIZE(HGCMFunctionParameter64, 4+12);
+
+#if __BITS_PER_LONG == 64
+#define HGCMFunctionParameter HGCMFunctionParameter64
+#else
+#define HGCMFunctionParameter HGCMFunctionParameter32
+#endif
+
+/**
+ * HGCM call request structure.
+ *
+ * Used by VMMDevReq_HGCMCall32 and VMMDevReq_HGCMCall64.
+ */
+typedef struct {
+ /* request header */
+ VMMDevHGCMRequestHeader header;
+
+ /** IN: Client identifier. */
+ u32 u32ClientID;
+ /** IN: Service function number. */
+ u32 u32Function;
+ /** IN: Number of parameters. */
+ u32 cParms;
+ /** Parameters follow in form: HGCMFunctionParameter32|64 aParms[X]; */
+} VMMDevHGCMCall;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMCall, 32+12);
+
+/**
+ * @name Direction of data transfer (HGCMPageListInfo::flags). Bit flags.
+ * @{
+ */
+#define VBOX_HGCM_F_PARM_DIRECTION_NONE 0x00000000U
+#define VBOX_HGCM_F_PARM_DIRECTION_TO_HOST 0x00000001U
+#define VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST 0x00000002U
+#define VBOX_HGCM_F_PARM_DIRECTION_BOTH 0x00000003U
+/**
+ * Macro for validating that the specified flags are valid.
+ * Note BOTH is not valid.
+ */
+#define VBOX_HGCM_F_PARM_ARE_VALID(fFlags) \
+ ((fFlags) > VBOX_HGCM_F_PARM_DIRECTION_NONE && \
+ (fFlags) < VBOX_HGCM_F_PARM_DIRECTION_BOTH)
+/** @} */
+
+/**
+ * VMMDevHGCMParmType_PageList points to this structure to actually describe the
+ * buffer.
+ */
+typedef struct {
+ u32 flags; /**< VBOX_HGCM_F_PARM_*. */
+ u16 offFirstPage; /**< Offset in the first page where data begins. */
+ u16 cPages; /**< Number of pages. */
+ u64 aPages[1]; /**< Page addresses. */
+} HGCMPageListInfo;
+VMMDEV_ASSERT_SIZE(HGCMPageListInfo, 4+2+2+8);
+
+/** Get the pointer to the first parmater of a HGCM call request. */
+#define VMMDEV_HGCM_CALL_PARMS(a) \
+ ((HGCMFunctionParameter *)((u8 *)(a) + sizeof(VMMDevHGCMCall)))
+
+#define VBOX_HGCM_MAX_PARMS 32
+
+/**
+ * HGCM cancel request structure.
+ *
+ * The Cancel request is issued using the same physical memory address as was
+ * used for the corresponding initial HGCMCall.
+ *
+ * Used by VMMDevReq_HGCMCancel.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevHGCMRequestHeader header;
+} VMMDevHGCMCancel;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMCancel, 32);
+
+/**
+ * HGCM cancel request structure, version 2.
+ *
+ * Used by VMMDevReq_HGCMCancel2.
+ *
+ * VINF_SUCCESS when cancelled.
+ * VERR_NOT_FOUND if the specified request cannot be found.
+ * VERR_INVALID_PARAMETER if the address is invalid valid.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** The physical address of the request to cancel. */
+ u32 physReqToCancel;
+} VMMDevHGCMCancel2;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMCancel2, 24+4);
+
+/** @} */
+
+#pragma pack()
+
+#endif
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
new file mode 100644
index 000000000000..3f2342838489
--- /dev/null
+++ b/include/uapi/linux/vboxguest.h
@@ -0,0 +1,374 @@
+/*
+ * VBoxGuest - VirtualBox Guest Additions Driver Interface. (ADD,DEV)
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __UAPI_VBOXGUEST_H__
+#define __UAPI_VBOXGUEST_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/ioctl.h>
+#include <linux/vbox_vmmdev.h> /* For HGCMServiceLocation */
+
+/*
+ * We cannot use linux' compiletime_assert here because it expects to be used
+ * inside a function only. Use a typedef to a char array with a negative size.
+ */
+#define VBOXGUEST_ASSERT_SIZE(type, size) \
+ typedef char type ## _assert_size[1 - 2*!!(sizeof(type) != (size))]
+
+/**
+ * @defgroup grp_vboxguest VirtualBox Guest Additions Device Driver
+ *
+ * Also know as VBoxGuest.
+ *
+ * @{
+ */
+
+/**
+ * @defgroup grp_vboxguest_ioc VirtualBox Guest Additions Driver Interface
+ * @{
+ */
+
+/**
+ * @name VBoxGuest IOCTL codes and structures.
+ *
+ * The range 0..15 is for basic driver communication.
+ * The range 16..31 is for HGCM communication.
+ * The range 32..47 is reserved for future use.
+ * The range 48..63 is for OS specific communication.
+ * The 7th bit is reserved for future hacks.
+ * The 8th bit is reserved for distinguishing between 32-bit and 64-bit
+ * processes in future 64-bit guest additions.
+ * @{
+ */
+#if __BITS_PER_LONG == 64
+#define VBOXGUEST_IOCTL_FLAG 128
+#else
+#define VBOXGUEST_IOCTL_FLAG 0
+#endif
+/** @} */
+
+#define VBOXGUEST_IOCTL_CODE_(function, size) \
+ _IOC(_IOC_READ|_IOC_WRITE, 'V', (function), (size))
+#define VBOXGUEST_IOCTL_STRIP_SIZE(code) \
+ VBOXGUEST_IOCTL_CODE_(_IOC_NR((code)), 0)
+
+#define VBOXGUEST_IOCTL_CODE(function, size) \
+ VBOXGUEST_IOCTL_CODE_((function) | VBOXGUEST_IOCTL_FLAG, size)
+/* Define 32 bit codes to support 32 bit applications in 64 bit guest driver. */
+#define VBOXGUEST_IOCTL_CODE_32(function, size) \
+VBOXGUEST_IOCTL_CODE_(function, size)
+
+
+/** IOCTL to VBoxGuest to wait for a VMMDev host notification */
+#define VBOXGUEST_IOCTL_WAITEVENT \
+ VBOXGUEST_IOCTL_CODE_(2, sizeof(VBoxGuestWaitEventInfo))
+
+/**
+ * @name Result codes for VBoxGuestWaitEventInfo::u32Result
+ * @{
+ */
+/** Successful completion, an event occurred. */
+#define VBOXGUEST_WAITEVENT_OK (0)
+/** Successful completion, timed out. */
+#define VBOXGUEST_WAITEVENT_TIMEOUT (1)
+/** Wait was interrupted. */
+#define VBOXGUEST_WAITEVENT_INTERRUPTED (2)
+/** An error occurred while processing the request. */
+#define VBOXGUEST_WAITEVENT_ERROR (3)
+/** @} */
+
+/** Input and output buffers layout of the IOCTL_VBOXGUEST_WAITEVENT */
+typedef struct VBoxGuestWaitEventInfo {
+ /** timeout in milliseconds */
+ u32 u32TimeoutIn;
+ /** events to wait for */
+ u32 u32EventMaskIn;
+ /** result code */
+ u32 u32Result;
+ /** events occurred */
+ u32 u32EventFlagsOut;
+} VBoxGuestWaitEventInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestWaitEventInfo, 16);
+
+
+/**
+ * IOCTL to VBoxGuest to perform a VMM request
+ * @remark The data buffer for this IOCtl has an variable size, keep this in
+ * mind on systems where this matters.
+ */
+#define VBOXGUEST_IOCTL_VMMREQUEST(size) \
+ VBOXGUEST_IOCTL_CODE_(3, (size))
+
+
+/** IOCTL to VBoxGuest to control event filter mask. */
+#define VBOXGUEST_IOCTL_CTL_FILTER_MASK \
+ VBOXGUEST_IOCTL_CODE_(4, sizeof(VBoxGuestFilterMaskInfo))
+
+/** Input and output buffer layout of the IOCTL_VBOXGUEST_CTL_FILTER_MASK. */
+typedef struct VBoxGuestFilterMaskInfo {
+ u32 u32OrMask;
+ u32 u32NotMask;
+} VBoxGuestFilterMaskInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestFilterMaskInfo, 8);
+
+/**
+ * IOCTL to VBoxGuest to interrupt (cancel) any pending WAITEVENTs and return.
+ * Handled inside the guest additions and not seen by the host at all.
+ * After calling this, VBOXGUEST_IOCTL_WAITEVENT should no longer be called in
+ * the same session. Any VBOXGUEST_IOCTL_WAITEVENT calls in the same session
+ * done after calling this will directly exit with VERR_INTERRUPTED.
+ * @see VBOXGUEST_IOCTL_WAITEVENT
+ */
+#define VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS \
+ VBOXGUEST_IOCTL_CODE_(5, 0)
+
+/**
+ * IOCTL to VBoxGuest to perform backdoor logging.
+ * The argument is a string buffer of the specified size.
+ */
+#define VBOXGUEST_IOCTL_LOG(size) \
+ VBOXGUEST_IOCTL_CODE_(6, (size))
+
+/**
+ * IOCTL to VBoxGuest to check memory ballooning. The guest kernel module /
+ * device driver will ask the host for the current size of the balloon and
+ * adjust the size. Or it will set fHandledInR0 = false and R3 is responsible
+ * for allocating memory and calling R0 (VBOXGUEST_IOCTL_CHANGE_BALLOON).
+ */
+#define VBOXGUEST_IOCTL_CHECK_BALLOON \
+ VBOXGUEST_IOCTL_CODE_(7, sizeof(VBoxGuestCheckBalloonInfo))
+
+/** Output buffer layout of the VBOXGUEST_IOCTL_CHECK_BALLOON. */
+typedef struct VBoxGuestCheckBalloonInfo {
+ /** The size of the balloon in chunks of 1MB. */
+ u32 cBalloonChunks;
+ /**
+ * false = handled in R0, no further action required.
+ * true = allocate balloon memory in R3.
+ */
+ u32 fHandleInR3;
+} VBoxGuestCheckBalloonInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestCheckBalloonInfo, 8);
+
+/**
+ * IOCTL to VBoxGuest to supply or revoke one chunk for ballooning.
+ * The guest kernel module / device driver will lock down supplied memory or
+ * unlock reclaimed memory and then forward the physical addresses of the
+ * changed balloon chunk to the host.
+ */
+#define VBOXGUEST_IOCTL_CHANGE_BALLOON \
+ VBOXGUEST_IOCTL_CODE_(8, sizeof(VBoxGuestChangeBalloonInfo))
+
+/**
+ * Input buffer layout of the VBOXGUEST_IOCTL_CHANGE_BALLOON request.
+ * Information about a memory chunk used to inflate or deflate the balloon.
+ */
+typedef struct VBoxGuestChangeBalloonInfo {
+ /** Address of the chunk. */
+ u64 u64ChunkAddr;
+ /** true = inflate, false = deflate. */
+ u32 fInflate;
+ /** Alignment padding. */
+ u32 u32Align;
+} VBoxGuestChangeBalloonInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestChangeBalloonInfo, 16);
+
+/** IOCTL to VBoxGuest to write guest core. */
+#define VBOXGUEST_IOCTL_WRITE_CORE_DUMP \
+ VBOXGUEST_IOCTL_CODE(9, sizeof(VBoxGuestWriteCoreDump))
+
+/** Input and output buffer layout of the VBOXGUEST_IOCTL_WRITE_CORE request. */
+typedef struct VBoxGuestWriteCoreDump {
+ /** Flags (reserved, MBZ). */
+ u32 fFlags;
+} VBoxGuestWriteCoreDump;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestWriteCoreDump, 4);
+
+/** IOCTL to VBoxGuest to update the mouse status features. */
+#define VBOXGUEST_IOCTL_SET_MOUSE_STATUS \
+ VBOXGUEST_IOCTL_CODE_(10, sizeof(u32))
+
+/** IOCTL to VBoxGuest to connect to a HGCM service. */
+#define VBOXGUEST_IOCTL_HGCM_CONNECT \
+ VBOXGUEST_IOCTL_CODE(16, sizeof(VBoxGuestHGCMConnectInfo))
+
+/**
+ * HGCM connect info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_CONNECT.
+ */
+struct VBoxGuestHGCMConnectInfo {
+ s32 result; /**< OUT */
+ HGCMServiceLocation Loc; /**< IN */
+ u32 u32ClientID; /**< OUT */
+} __packed;
+typedef struct VBoxGuestHGCMConnectInfo VBoxGuestHGCMConnectInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestHGCMConnectInfo, 4+4+128+4);
+
+/** IOCTL to VBoxGuest to disconnect from a HGCM service. */
+#define VBOXGUEST_IOCTL_HGCM_DISCONNECT \
+ VBOXGUEST_IOCTL_CODE(17, sizeof(VBoxGuestHGCMDisconnectInfo))
+
+/**
+ * HGCM disconnect info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_DISCONNECT.
+ */
+typedef struct VBoxGuestHGCMDisconnectInfo {
+ s32 result; /**< OUT */
+ u32 u32ClientID; /**< IN */
+} VBoxGuestHGCMDisconnectInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestHGCMDisconnectInfo, 8);
+
+/**
+ * IOCTL to VBoxGuest to make a call to a HGCM service.
+ * @see VBoxGuestHGCMCallInfo
+ */
+#define VBOXGUEST_IOCTL_HGCM_CALL(size) \
+ VBOXGUEST_IOCTL_CODE(18, (size))
+
+/**
+ * HGCM call info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_CALL.
+ */
+typedef struct VBoxGuestHGCMCallInfo {
+ s32 result; /**< OUT Host HGCM return code.*/
+ u32 u32ClientID; /**< IN The id of the caller. */
+ u32 u32Function; /**< IN Function number. */
+ u32 cParms; /**< IN How many parms. */
+ /* Parameters follow in form HGCMFunctionParameter aParms[cParms] */
+} VBoxGuestHGCMCallInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestHGCMCallInfo, 16);
+
+/** IOCTL to VBoxGuest to make a timed call to a HGCM service. */
+#define VBOXGUEST_IOCTL_HGCM_CALL_TIMED(size) \
+ VBOXGUEST_IOCTL_CODE(20, (size))
+
+/**
+ * HGCM call info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_CALL_TIMED.
+ */
+struct VBoxGuestHGCMCallInfoTimed {
+ /** IN How long to wait for completion before cancelling the call. */
+ u32 u32Timeout;
+ /** IN Is this request interruptible? */
+ u32 fInterruptible;
+ /**
+ * IN/OUT The rest of the call information. Placed after the timeout
+ * so that the parameters follow as they would for a normal call.
+ */
+ VBoxGuestHGCMCallInfo info;
+ /* Parameters follow in form HGCMFunctionParameter aParms[cParms] */
+} __packed;
+typedef struct VBoxGuestHGCMCallInfoTimed VBoxGuestHGCMCallInfoTimed;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestHGCMCallInfoTimed, 8+16);
+
+/**
+ * @name IOCTL numbers that 32-bit clients, like the Windows OpenGL guest
+ * driver, will use when talking to a 64-bit driver.
+ * @remarks These are only used by the driver implementation!
+ * @{
+ */
+#define VBOXGUEST_IOCTL_HGCM_CONNECT_32 \
+ VBOXGUEST_IOCTL_CODE_32(16, sizeof(VBoxGuestHGCMConnectInfo))
+#define VBOXGUEST_IOCTL_HGCM_DISCONNECT_32 \
+ VBOXGUEST_IOCTL_CODE_32(17, sizeof(VBoxGuestHGCMDisconnectInfo))
+#define VBOXGUEST_IOCTL_HGCM_CALL_32(size) \
+ VBOXGUEST_IOCTL_CODE_32(18, (size))
+#define VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(size) \
+ VBOXGUEST_IOCTL_CODE_32(20, (size))
+/** @} */
+
+/** Get the pointer to the first HGCM parameter. */
+#define VBOXGUEST_HGCM_CALL_PARMS(a) \
+ ((HGCMFunctionParameter *)((u8 *)(a) + sizeof(VBoxGuestHGCMCallInfo)))
+/** Get the pointer to the first HGCM parameter in a 32-bit request. */
+#define VBOXGUEST_HGCM_CALL_PARMS32(a) \
+ ((HGCMFunctionParameter32 *)((u8 *)(a) + sizeof(VBoxGuestHGCMCallInfo)))
+
+typedef enum VBOXGUESTCAPSACQUIRE_FLAGS {
+ VBOXGUESTCAPSACQUIRE_FLAGS_NONE = 0,
+ /*
+ * Configures VBoxGuest to use the specified caps in Acquire mode, w/o
+ * making any caps acquisition/release. so far it is only possible to
+ * set acquire mode for caps, but not clear it, so u32NotMask is
+ * ignored for this request.
+ */
+ VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE,
+ /* To ensure enum is 32bit */
+ VBOXGUESTCAPSACQUIRE_FLAGS_32bit = 0x7fffffff
+} VBOXGUESTCAPSACQUIRE_FLAGS;
+
+typedef struct VBoxGuestCapsAquire {
+ /*
+ * result status
+ * VINF_SUCCESS - on success
+ * VERR_RESOURCE_BUSY - some caps in the u32OrMask are acquired by some
+ * other VBoxGuest connection. NOTE: no u32NotMask caps are cleaned
+ * in this case, No modifications are done on failure.
+ * VER_INVALID_PARAMETER - invalid Caps are specified with either
+ * u32OrMask or u32NotMask. No modifications are done on failure.
+ */
+ s32 rc;
+ /* Acquire command */
+ VBOXGUESTCAPSACQUIRE_FLAGS enmFlags;
+ /* caps to acquire, OR-ed VMMDEV_GUEST_SUPPORTS_XXX flags */
+ u32 u32OrMask;
+ /* caps to release, OR-ed VMMDEV_GUEST_SUPPORTS_XXX flags */
+ u32 u32NotMask;
+} VBoxGuestCapsAquire;
+
+/**
+ * IOCTL to for Acquiring/Releasing Guest Caps
+ * This is used for multiple purposes:
+ * 1. By doing Acquire r3 client application (e.g. VBoxTray) claims it will use
+ * the given connection for performing operations like Auto-resize, or
+ * Seamless. If the application terminates, the driver will automatically
+ * cleanup the caps reported to host, so that host knows guest does not
+ * support them anymore.
+ * 2. In a multy-user environment this will not allow r3 applications (like
+ * VBoxTray) running in different user sessions simultaneously to interfere
+ * with each other. An r3 client application (like VBoxTray) is responsible
+ * for Acquiring/Releasing caps properly as needed.
+ **/
+#define VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE \
+ VBOXGUEST_IOCTL_CODE(32, sizeof(VBoxGuestCapsAquire))
+
+/** IOCTL to VBoxGuest to set guest capabilities. */
+#define VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES \
+ VBOXGUEST_IOCTL_CODE_(33, sizeof(VBoxGuestSetCapabilitiesInfo))
+
+/** Input/output buffer layout for VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES. */
+typedef struct VBoxGuestSetCapabilitiesInfo {
+ u32 u32OrMask;
+ u32 u32NotMask;
+} VBoxGuestSetCapabilitiesInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestSetCapabilitiesInfo, 8);
+
+/** @} */
+
+/** @} */
+
+#endif
--
2.13.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [RFC 2/2] fs: Add VirtualBox guest shared folder (vboxsf) support
2017-08-11 13:23 [RFC 0/2] Add Virtual Box vboxguest and vboxsf guest drivers to the mainline kernel Hans de Goede
2017-08-11 13:23 ` [RFC 1/2] misc: Add vboxguest driver for Virtual Box Guest integration Hans de Goede
@ 2017-08-11 13:23 ` Hans de Goede
2017-08-11 15:44 ` [RFC 0/2] Add Virtual Box vboxguest and vboxsf guest drivers to the mainline kernel Greg Kroah-Hartman
2017-08-11 16:02 ` Arnd Bergmann
3 siblings, 0 replies; 18+ messages in thread
From: Hans de Goede @ 2017-08-11 13:23 UTC (permalink / raw)
To: Arnd Bergmann, Greg Kroah-Hartman, Alexander Viro
Cc: Hans de Goede, Michael Thayer, Knut St . Osmundsen, Larry Finger,
linux-kernel, linux-fsdevel
VirtualBox hosts can share folders with guests, this commit adds a
VFS driver implementing the Linux-guest side of this, allowing folders
exported by the host to be mounted under Linux.
This driver depends on the guest <-> host IPC functions exported by
the vboxguest driver.
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
fs/Kconfig | 1 +
fs/Makefile | 1 +
fs/vboxsf/Kconfig | 9 +
fs/vboxsf/Makefile | 3 +
fs/vboxsf/dirops.c | 730 ++++++++++++++++++++++++
fs/vboxsf/lnkops.c | 41 ++
fs/vboxsf/regops.c | 557 ++++++++++++++++++
fs/vboxsf/shfl_hostintf.h | 1318 +++++++++++++++++++++++++++++++++++++++++++
fs/vboxsf/utils.c | 671 ++++++++++++++++++++++
fs/vboxsf/vboxsf_wrappers.c | 542 ++++++++++++++++++
fs/vboxsf/vboxsf_wrappers.h | 104 ++++
fs/vboxsf/vbsfmount.h | 69 +++
fs/vboxsf/vfsmod.c | 412 ++++++++++++++
fs/vboxsf/vfsmod.h | 111 ++++
14 files changed, 4569 insertions(+)
create mode 100644 fs/vboxsf/Kconfig
create mode 100644 fs/vboxsf/Makefile
create mode 100644 fs/vboxsf/dirops.c
create mode 100644 fs/vboxsf/lnkops.c
create mode 100644 fs/vboxsf/regops.c
create mode 100644 fs/vboxsf/shfl_hostintf.h
create mode 100644 fs/vboxsf/utils.c
create mode 100644 fs/vboxsf/vboxsf_wrappers.c
create mode 100644 fs/vboxsf/vboxsf_wrappers.h
create mode 100644 fs/vboxsf/vbsfmount.h
create mode 100644 fs/vboxsf/vfsmod.c
create mode 100644 fs/vboxsf/vfsmod.h
diff --git a/fs/Kconfig b/fs/Kconfig
index 7aee6d699fd6..7f80ad1cf591 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -248,6 +248,7 @@ source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
+source "fs/vboxsf/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/Makefile b/fs/Makefile
index 7bbaca9c67b1..6b5039e3ade5 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -128,3 +128,4 @@ obj-y += exofs/ # Multiple modules
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_VBOXSF_FS) += vboxsf/
diff --git a/fs/vboxsf/Kconfig b/fs/vboxsf/Kconfig
new file mode 100644
index 000000000000..620e2232969c
--- /dev/null
+++ b/fs/vboxsf/Kconfig
@@ -0,0 +1,9 @@
+config VBOXSF_FS
+ tristate "VirtualBox guest shared folder (vboxsf) support"
+ depends on VBOXGUEST
+ help
+ VirtualBox hosts can share folders with guests, this driver
+ implements the Linux-guest side of this allowing folders exported
+ by the host to be mounted under Linux.
+
+ If you want to use shared folders in VirtualBox guests, answer Y or M.
diff --git a/fs/vboxsf/Makefile b/fs/vboxsf/Makefile
new file mode 100644
index 000000000000..f3529ea02d5c
--- /dev/null
+++ b/fs/vboxsf/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VBOXSF_FS) += vboxsf.o
+
+vboxsf-objs := dirops.o lnkops.o regops.o utils.o vboxsf_wrappers.o vfsmod.o
diff --git a/fs/vboxsf/dirops.c b/fs/vboxsf/dirops.c
new file mode 100644
index 000000000000..e32040abca98
--- /dev/null
+++ b/fs/vboxsf/dirops.c
@@ -0,0 +1,730 @@
+/*
+ * VirtualBox Guest Shared Folders support: Directory inode and file operations
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include "vfsmod.h"
+
+/**
+ * Open a directory. Read the complete content into a buffer.
+ *
+ * @param inode inode
+ * @param file file
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_dir_open(struct inode *inode, struct file *file)
+{
+ int rc;
+ int err;
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
+ struct sf_dir_info *sf_d;
+ struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
+ struct shfl_createparms params = {};
+
+ BUG_ON(!sf_g);
+ BUG_ON(!sf_i);
+
+ if (file->private_data)
+ return 0;
+
+ sf_d = sf_dir_info_alloc();
+ if (!sf_d)
+ return -ENOMEM;
+
+ params.Handle = SHFL_HANDLE_NIL;
+ params.CreateFlags = 0
+ | SHFL_CF_DIRECTORY
+ | SHFL_CF_ACT_OPEN_IF_EXISTS
+ | SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ;
+
+ rc = vboxsf_create(sf_g->root, sf_i->path, ¶ms);
+ if (rc >= 0) {
+ if (params.Result == SHFL_FILE_EXISTS) {
+ err = sf_dir_read_all(sf_g, sf_i, sf_d, params.Handle);
+ if (!err)
+ file->private_data = sf_d;
+ } else
+ err = -ENOENT;
+
+ vboxsf_close(sf_g->root, params.Handle);
+ } else
+ err = -EPERM;
+
+ if (err)
+ sf_dir_info_free(sf_d);
+
+ return err;
+}
+
+/**
+ * This is called when reference count of [file] goes to zero. Notify
+ * the host that it can free whatever is associated with this directory
+ * and deallocate our own internal buffers
+ *
+ * @param inode inode
+ * @param file file
+ * returns 0 on success, Linux error code otherwise
+ */
+static int sf_dir_release(struct inode *inode, struct file *file)
+{
+ if (file->private_data)
+ sf_dir_info_free(file->private_data);
+
+ return 0;
+}
+
+/**
+ * Translate RTFMODE into DT_xxx (in conjunction to rtDirType())
+ * @param fMode file mode
+ * returns d_type
+ */
+static int sf_get_d_type(u32 fMode)
+{
+ int d_type;
+ switch (fMode & SHFL_TYPE_MASK) {
+ case SHFL_TYPE_FIFO:
+ d_type = DT_FIFO;
+ break;
+ case SHFL_TYPE_DEV_CHAR:
+ d_type = DT_CHR;
+ break;
+ case SHFL_TYPE_DIRECTORY:
+ d_type = DT_DIR;
+ break;
+ case SHFL_TYPE_DEV_BLOCK:
+ d_type = DT_BLK;
+ break;
+ case SHFL_TYPE_FILE:
+ d_type = DT_REG;
+ break;
+ case SHFL_TYPE_SYMLINK:
+ d_type = DT_LNK;
+ break;
+ case SHFL_TYPE_SOCKET:
+ d_type = DT_SOCK;
+ break;
+ case SHFL_TYPE_WHITEOUT:
+ d_type = DT_WHT;
+ break;
+ default:
+ d_type = DT_UNKNOWN;
+ break;
+ }
+ return d_type;
+}
+
+/**
+ * Extract element ([dir]->f_pos) from the directory [dir] into [d_name].
+ *
+ * @returns 0 for success, 1 for end reached, Linux error code otherwise.
+ */
+static int sf_getdent(struct file *dir, char d_name[NAME_MAX], int *d_type)
+{
+ loff_t cur;
+ struct sf_glob_info *sf_g;
+ struct sf_dir_info *sf_d;
+ struct sf_inode_info *sf_i;
+ struct inode *inode;
+ struct list_head *pos, *list;
+
+ inode = GET_F_DENTRY(dir)->d_inode;
+ sf_i = GET_INODE_INFO(inode);
+ sf_g = GET_GLOB_INFO(inode->i_sb);
+ sf_d = dir->private_data;
+
+ BUG_ON(!sf_g);
+ BUG_ON(!sf_d);
+ BUG_ON(!sf_i);
+
+ if (sf_i->force_reread) {
+ int rc;
+ int err;
+ struct shfl_createparms params = {};
+
+ params.Handle = SHFL_HANDLE_NIL;
+ params.CreateFlags = 0
+ | SHFL_CF_DIRECTORY
+ | SHFL_CF_ACT_OPEN_IF_EXISTS
+ | SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ;
+
+ rc = vboxsf_create(sf_g->root, sf_i->path, ¶ms);
+ if (rc < 0)
+ return -EPERM;
+
+ if (params.Result != SHFL_FILE_EXISTS) {
+ sf_dir_info_free(sf_d);
+ return -ENOENT;
+ }
+
+ sf_dir_info_empty(sf_d);
+ err = sf_dir_read_all(sf_g, sf_i, sf_d, params.Handle);
+ vboxsf_close(sf_g->root, params.Handle);
+ if (err)
+ return err;
+
+ sf_i->force_reread = 0;
+ }
+
+ cur = 0;
+ list = &sf_d->info_list;
+ list_for_each(pos, list) {
+ struct sf_dir_buf *b;
+ SHFLDIRINFO *info;
+ loff_t i;
+
+ b = list_entry(pos, struct sf_dir_buf, head);
+ if (dir->f_pos >= cur + b->cEntries) {
+ cur += b->cEntries;
+ continue;
+ }
+
+ for (i = 0, info = b->buf; i < dir->f_pos - cur; ++i) {
+ size_t size;
+
+ size =
+ offsetof(SHFLDIRINFO,
+ name.String) + info->name.u16Size;
+ info = (SHFLDIRINFO *) ((uintptr_t) info + size);
+ }
+
+ *d_type = sf_get_d_type(info->Info.Attr.fMode);
+
+ return sf_nlscpy(sf_g, d_name, NAME_MAX,
+ info->name.String.utf8, info->name.u16Length);
+ }
+
+ return 1;
+}
+
+/**
+ * This is called when vfs wants to populate internal buffers with
+ * directory [dir]s contents. [opaque] is an argument to the
+ * [filldir]. [filldir] magically modifies it's argument - [opaque]
+ * and takes following additional arguments (which i in turn get from
+ * the host via sf_getdent):
+ *
+ * name : name of the entry (i must also supply it's length huh?)
+ * type : type of the entry (FILE | DIR | etc) (i ellect to use DT_UNKNOWN)
+ * pos : position/index of the entry
+ * ino : inode number of the entry (i fake those)
+ *
+ * [dir] contains:
+ * f_pos : cursor into the directory listing
+ * private_data : mean of communication with the host side
+ *
+ * Extract elements from the directory listing (incrementing f_pos
+ * along the way) and feed them to [filldir] until:
+ *
+ * a. there are no more entries (i.e. sf_getdent set done to 1)
+ * b. failure to compute fake inode number
+ * c. filldir returns an error (see comment on that)
+ */
+static int sf_dir_iterate(struct file *dir, struct dir_context *ctx)
+{
+ for (;;) {
+ int err;
+ ino_t fake_ino;
+ loff_t sanity;
+ char d_name[NAME_MAX];
+ int d_type = DT_UNKNOWN;
+
+ err = sf_getdent(dir, d_name, &d_type);
+ switch (err) {
+ case 1:
+ return 0;
+
+ case 0:
+ break;
+
+ case -1:
+ default:
+ /* skip erroneous entry and proceed */
+ dir->f_pos += 1;
+ ctx->pos += 1;
+ continue;
+ }
+
+ /* d_name now contains a valid entry name */
+ sanity = ctx->pos + 0xbeef;
+ fake_ino = sanity;
+ /*
+ * On 32 bit systems pos is 64 signed, while ino is 32 bit
+ * unsigned so fake_ino may overflow, check for this.
+ */
+ if (sanity - fake_ino) {
+ vbg_err("vboxsf: can not compute ino\n");
+ return -EINVAL;
+ }
+ if (!dir_emit(ctx, d_name, strlen(d_name), fake_ino, d_type))
+ return 0;
+
+ dir->f_pos += 1;
+ ctx->pos += 1;
+ }
+}
+
+struct file_operations sf_dir_fops = {
+ .open = sf_dir_open,
+ .iterate = sf_dir_iterate,
+ .release = sf_dir_release,
+ .read = generic_read_dir,
+ .llseek = generic_file_llseek,
+};
+
+/* iops */
+
+/**
+ * This is called when vfs failed to locate dentry in the cache. The
+ * job of this function is to allocate inode and link it to dentry.
+ * [dentry] contains the name to be looked in the [parent] directory.
+ * Failure to locate the name is not a "hard" error, in this case NULL
+ * inode is added to [dentry] and vfs should proceed trying to create
+ * the entry via other means. NULL(or "positive" pointer) ought to be
+ * returned in case of success and "negative" pointer on error
+ */
+static struct dentry *sf_lookup(struct inode *parent, struct dentry *dentry,
+ unsigned int flags)
+{
+ int err;
+ struct sf_inode_info *sf_i, *sf_new_i;
+ struct sf_glob_info *sf_g;
+ SHFLSTRING *path;
+ struct inode *inode;
+ ino_t ino;
+ struct shfl_fsobjinfo fsinfo;
+
+ sf_g = GET_GLOB_INFO(parent->i_sb);
+ sf_i = GET_INODE_INFO(parent);
+
+ BUG_ON(!sf_g);
+ BUG_ON(!sf_i);
+
+ err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path);
+ if (err)
+ goto fail0;
+
+ err = sf_stat(__func__, sf_g, path, &fsinfo, 1);
+ if (err) {
+ if (err == -ENOENT) {
+ /* -ENOENT: add NULL inode to dentry so it later can be
+ created via call to create/mkdir/open */
+ kfree(path);
+ inode = NULL;
+ } else
+ goto fail1;
+ } else {
+ sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL);
+ if (!sf_new_i) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+ sf_new_i->handle = SHFL_HANDLE_NIL;
+ sf_new_i->force_reread = 0;
+
+ ino = iunique(parent->i_sb, 1);
+ inode = iget_locked(parent->i_sb, ino);
+ if (!inode) {
+ err = -ENOMEM; /* XXX: ??? */
+ goto fail2;
+ }
+
+ SET_INODE_INFO(inode, sf_new_i);
+ sf_init_inode(sf_g, inode, &fsinfo);
+ sf_new_i->path = path;
+
+ unlock_new_inode(inode);
+ }
+
+ sf_i->force_restat = 0;
+ dentry->d_time = jiffies;
+ d_set_d_op(dentry, &sf_dentry_ops);
+ d_add(dentry, inode);
+ return NULL;
+
+fail2:
+ kfree(sf_new_i);
+
+fail1:
+ kfree(path);
+
+fail0:
+ return ERR_PTR(err);
+}
+
+/**
+ * This should allocate memory for sf_inode_info, compute a unique inode
+ * number, get an inode from vfs, initialize inode info, instantiate
+ * dentry.
+ *
+ * @param parent inode entry of the directory
+ * @param dentry directory cache entry
+ * @param path path name
+ * @param info file information
+ * @param handle handle
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_instantiate(struct inode *parent, struct dentry *dentry,
+ SHFLSTRING * path, struct shfl_fsobjinfo *info,
+ SHFLHANDLE handle)
+{
+ int err;
+ ino_t ino;
+ struct inode *inode;
+ struct sf_inode_info *sf_new_i;
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb);
+
+ BUG_ON(!sf_g);
+
+ sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL);
+ if (!sf_new_i) {
+ err = -ENOMEM;
+ goto fail0;
+ }
+
+ ino = iunique(parent->i_sb, 1);
+ inode = iget_locked(parent->i_sb, ino);
+ if (!inode) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ sf_init_inode(sf_g, inode, info);
+ sf_new_i->path = path;
+ SET_INODE_INFO(inode, sf_new_i);
+ sf_new_i->force_restat = 1;
+ sf_new_i->force_reread = 0;
+
+ d_instantiate(dentry, inode);
+ unlock_new_inode(inode);
+
+ /* Store this handle if we leave the handle open. */
+ sf_new_i->handle = handle;
+ return 0;
+
+fail1:
+ kfree(sf_new_i);
+
+fail0:
+ return err;
+
+}
+
+/**
+ * Create a new regular file / directory.
+ *
+ * @param parent inode of the directory
+ * @param dentry directory cache entry
+ * @param mode file mode
+ * @param fDirectory true if directory, false otherwise
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_create_aux(struct inode *parent, struct dentry *dentry,
+ umode_t mode, int fDirectory)
+{
+ int rc, err;
+ struct shfl_createparms params = {};
+ SHFLSTRING *path;
+ struct sf_inode_info *sf_i = GET_INODE_INFO(parent);
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb);
+
+ BUG_ON(!sf_i);
+ BUG_ON(!sf_g);
+
+ err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path);
+ if (err)
+ goto fail0;
+
+ params.Handle = SHFL_HANDLE_NIL;
+ params.CreateFlags = 0
+ | SHFL_CF_ACT_CREATE_IF_NEW
+ | SHFL_CF_ACT_FAIL_IF_EXISTS
+ | SHFL_CF_ACCESS_READWRITE | (fDirectory ? SHFL_CF_DIRECTORY : 0);
+ params.Info.Attr.fMode = 0
+ | (fDirectory ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE)
+ | (mode & S_IRWXUGO);
+ params.Info.Attr.enmAdditional = SHFLFSOBJATTRADD_NOTHING;
+
+ rc = vboxsf_create(sf_g->root, path, ¶ms);
+ if (rc < 0) {
+ if (rc == VERR_WRITE_PROTECT) {
+ err = -EROFS;
+ goto fail1;
+ }
+ err = -EPROTO;
+ goto fail1;
+ }
+
+ if (params.Result != SHFL_FILE_CREATED) {
+ err = -EPERM;
+ goto fail1;
+ }
+
+ err = sf_instantiate(parent, dentry, path, ¶ms.Info,
+ fDirectory ? SHFL_HANDLE_NIL : params.Handle);
+ if (err)
+ goto fail2;
+
+ /*
+ * Don't close this handle right now. We assume that the same file is
+ * opened with sf_reg_open() and later closed with sf_reg_close(). Save
+ * the handle in between. Does not apply to directories. True?
+ */
+ if (fDirectory)
+ vboxsf_close(sf_g->root, params.Handle);
+
+ sf_i->force_restat = 1;
+ return 0;
+
+fail2:
+ vboxsf_close(sf_g->root, params.Handle);
+
+fail1:
+ kfree(path);
+
+fail0:
+ return err;
+}
+
+/**
+ * Create a new regular file.
+ *
+ * @param parent inode of the directory
+ * @param dentry directory cache entry
+ * @param mode file mode
+ * @param excl Possible O_EXCL...
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_create(struct inode *parent, struct dentry *dentry, umode_t mode,
+ bool excl)
+{
+ return sf_create_aux(parent, dentry, mode, 0);
+}
+
+/**
+ * Create a new directory.
+ *
+ * @param parent inode of the directory
+ * @param dentry directory cache entry
+ * @param mode file mode
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_mkdir(struct inode *parent, struct dentry *dentry, umode_t mode)
+{
+ return sf_create_aux(parent, dentry, mode, 1);
+}
+
+/**
+ * Remove a regular file / directory.
+ *
+ * @param parent inode of the directory
+ * @param dentry directory cache entry
+ * @param fDirectory true if directory, false otherwise
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_unlink_aux(struct inode *parent, struct dentry *dentry,
+ int fDirectory)
+{
+ int rc, err;
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb);
+ struct sf_inode_info *sf_i = GET_INODE_INFO(parent);
+ SHFLSTRING *path;
+ uint32_t fFlags;
+
+ BUG_ON(!sf_g);
+
+ err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path);
+ if (err)
+ goto fail0;
+
+ fFlags = fDirectory ? SHFL_REMOVE_DIR : SHFL_REMOVE_FILE;
+ if (dentry
+ && dentry->d_inode
+ && ((dentry->d_inode->i_mode & S_IFLNK) == S_IFLNK))
+ fFlags |= SHFL_REMOVE_SYMLINK;
+ rc = vboxsf_remove(sf_g->root, path, fFlags);
+ if (rc < 0) {
+ err = -vbg_status_code_to_errno(rc);
+ goto fail1;
+ }
+
+ /* directory access/change time changed */
+ sf_i->force_restat = 1;
+ /* directory content changed */
+ sf_i->force_reread = 1;
+
+ err = 0;
+
+fail1:
+ kfree(path);
+
+fail0:
+ return err;
+}
+
+/**
+ * Remove a regular file.
+ *
+ * @param parent inode of the directory
+ * @param dentry directory cache entry
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_unlink(struct inode *parent, struct dentry *dentry)
+{
+ return sf_unlink_aux(parent, dentry, 0);
+}
+
+/**
+ * Remove a directory.
+ *
+ * @param parent inode of the directory
+ * @param dentry directory cache entry
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_rmdir(struct inode *parent, struct dentry *dentry)
+{
+ return sf_unlink_aux(parent, dentry, 1);
+}
+
+/**
+ * Rename a regular file / directory.
+ *
+ * @param old_parent inode of the old parent directory
+ * @param old_dentry old directory cache entry
+ * @param new_parent inode of the new parent directory
+ * @param new_dentry new directory cache entry
+ * @param flags flags
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_rename(struct inode *old_parent, struct dentry *old_dentry,
+ struct inode *new_parent, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ int err = 0, rc = VINF_SUCCESS;
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(old_parent->i_sb);
+ u32 shfl_flags = SHFL_RENAME_FILE | SHFL_RENAME_REPLACE_IF_EXISTS;
+
+ if (flags)
+ return -EINVAL;
+
+ if (sf_g != GET_GLOB_INFO(new_parent->i_sb)) {
+ err = -EINVAL;
+ } else {
+ struct sf_inode_info *sf_old_i = GET_INODE_INFO(old_parent);
+ struct sf_inode_info *sf_new_i = GET_INODE_INFO(new_parent);
+ /*
+ * As we save the relative path inside the inode structure,
+ * we need to change this if the rename is successful.
+ */
+ struct sf_inode_info *sf_file_i =
+ GET_INODE_INFO(old_dentry->d_inode);
+ SHFLSTRING *old_path;
+ SHFLSTRING *new_path;
+
+ BUG_ON(!sf_old_i);
+ BUG_ON(!sf_new_i);
+ BUG_ON(!sf_file_i);
+
+ old_path = sf_file_i->path;
+ err = sf_path_from_dentry(__func__, sf_g, sf_new_i,
+ new_dentry, &new_path);
+ if (err == 0) {
+ if (old_dentry->d_inode->i_mode & S_IFDIR)
+ shfl_flags = 0;
+
+ rc = vboxsf_rename(sf_g->root, old_path, new_path,
+ shfl_flags);
+ if (rc >= 0) {
+ kfree(old_path);
+ sf_new_i->force_restat = 1;
+ sf_old_i->force_restat = 1; /* XXX: needed? */
+ /* Set the new relative path in the inode. */
+ sf_file_i->path = new_path;
+ } else {
+ err = -vbg_status_code_to_errno(rc);
+ kfree(new_path);
+ }
+ }
+ }
+ return err;
+}
+
+static int sf_symlink(struct inode *parent, struct dentry *dentry,
+ const char *symname)
+{
+ int err;
+ int rc;
+ struct sf_inode_info *sf_i;
+ struct sf_glob_info *sf_g;
+ SHFLSTRING *path, *ssymname;
+ struct shfl_fsobjinfo info;
+ int symname_len = strlen(symname) + 1;
+
+ sf_g = GET_GLOB_INFO(parent->i_sb);
+ sf_i = GET_INODE_INFO(parent);
+
+ BUG_ON(!sf_g);
+ BUG_ON(!sf_i);
+
+ err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path);
+ if (err)
+ goto fail0;
+
+ ssymname =
+ kmalloc(offsetof(SHFLSTRING, String.utf8) + symname_len,
+ GFP_KERNEL);
+ if (!ssymname) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ ssymname->u16Length = symname_len - 1;
+ ssymname->u16Size = symname_len;
+ memcpy(ssymname->String.utf8, symname, symname_len);
+
+ rc = vboxsf_symlink(sf_g->root, path, ssymname, &info);
+ kfree(ssymname);
+
+ if (rc < 0) {
+ if (rc == VERR_WRITE_PROTECT) {
+ err = -EROFS;
+ goto fail1;
+ }
+ err = -EPROTO;
+ goto fail1;
+ }
+
+ err = sf_instantiate(parent, dentry, path, &info, SHFL_HANDLE_NIL);
+ if (err)
+ goto fail1;
+
+ sf_i->force_restat = 1;
+ return 0;
+
+fail1:
+ kfree(path);
+fail0:
+ return err;
+}
+
+struct inode_operations sf_dir_iops = {
+ .lookup = sf_lookup,
+ .create = sf_create,
+ .mkdir = sf_mkdir,
+ .rmdir = sf_rmdir,
+ .unlink = sf_unlink,
+ .rename = sf_rename,
+ .getattr = sf_getattr,
+ .setattr = sf_setattr,
+ .symlink = sf_symlink
+};
diff --git a/fs/vboxsf/lnkops.c b/fs/vboxsf/lnkops.c
new file mode 100644
index 000000000000..546b2a83166a
--- /dev/null
+++ b/fs/vboxsf/lnkops.c
@@ -0,0 +1,41 @@
+/*
+ * VirtualBox Guest Shared Folders: Operations for symbolic links.
+ *
+ * Copyright (C) 2010-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#include "vfsmod.h"
+
+static const char *sf_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
+{
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
+ struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
+ char *path;
+ int rc;
+
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+ path = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!path)
+ return ERR_PTR(-ENOMEM);
+ rc = vboxsf_readlink(sf_g->root, sf_i->path, PATH_MAX, path);
+ if (rc < 0) {
+ kfree(path);
+ return ERR_PTR(-EPROTO);
+ }
+ set_delayed_call(done, kfree_link, path);
+ return path;
+}
+
+struct inode_operations sf_lnk_iops = {
+ .get_link = sf_get_link
+};
diff --git a/fs/vboxsf/regops.c b/fs/vboxsf/regops.c
new file mode 100644
index 000000000000..c3ee582df2a2
--- /dev/null
+++ b/fs/vboxsf/regops.c
@@ -0,0 +1,557 @@
+/*
+ * VirtualBox Guest Shared Folders support: Regular file inode and file ops.
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/*
+ * Limitations: only COW memory mapping is supported
+ */
+
+#include <linux/vbox_utils.h>
+#include "vfsmod.h"
+
+static void *alloc_bounce_buffer(size_t * tmp_sizep, u64 * physp, size_t
+ xfer_size, const char *caller)
+{
+ size_t tmp_size;
+ void *tmp;
+
+ /* try for big first. */
+ tmp_size = PAGE_ALIGN(xfer_size);
+ if (tmp_size > SZ_16K)
+ tmp_size = SZ_16K;
+ tmp = kmalloc(tmp_size, GFP_KERNEL);
+ if (!tmp) {
+ /* fall back on a page sized buffer. */
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return NULL;
+ tmp_size = PAGE_SIZE;
+ }
+
+ *tmp_sizep = tmp_size;
+ *physp = virt_to_phys(tmp);
+ return tmp;
+}
+
+static void free_bounce_buffer(void *tmp)
+{
+ kfree(tmp);
+}
+
+/* fops */
+static int sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g,
+ struct sf_reg_info *sf_r, void *buf,
+ uint32_t * nread, uint64_t pos)
+{
+ /**
+ * @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
+ * contiguous in physical memory (kmalloc or single page), we should
+ * use a physical address here to speed things up.
+ */
+ int rc = vboxsf_read(sf_g->root, sf_r->handle, pos, nread, buf);
+ if (rc < 0)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g,
+ struct sf_reg_info *sf_r, void *buf,
+ uint32_t * nwritten, uint64_t pos)
+{
+ /**
+ * @todo bird: yes, kmap() and kmalloc() input only. Since the buffer
+ * is contiguous in physical memory (kmalloc or single page), we should
+ * use a physical address here to speed things up.
+ */
+ int rc = vboxsf_write(sf_g->root, sf_r->handle, pos, nwritten, buf);
+ if (rc < 0)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
+ * Read from a regular file.
+ *
+ * @param file the file
+ * @param buf the buffer
+ * @param size length of the buffer
+ * @param off offset within the file
+ * @returns the number of read bytes on success, Linux error code otherwise
+ */
+static ssize_t sf_reg_read(struct file *file, char *buf, size_t size,
+ loff_t * off)
+{
+ int err;
+ void *tmp;
+ u64 tmp_phys;
+ size_t tmp_size;
+ size_t left = size;
+ ssize_t total_bytes_read = 0;
+ struct inode *inode = GET_F_DENTRY(file)->d_inode;
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
+ struct sf_reg_info *sf_r = file->private_data;
+ loff_t pos = *off;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ /** XXX Check read permission according to inode->i_mode! */
+
+ if (!size)
+ return 0;
+
+ tmp =
+ alloc_bounce_buffer(&tmp_size, &tmp_phys, size,
+ __PRETTY_FUNCTION__);
+ if (!tmp)
+ return -ENOMEM;
+
+ while (left) {
+ uint32_t to_read, nread;
+
+ to_read = tmp_size;
+ if (to_read > left)
+ to_read = (uint32_t) left;
+
+ nread = to_read;
+
+ err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos);
+ if (err)
+ goto fail;
+
+ if (copy_to_user(buf, tmp, nread)) {
+ err = -EFAULT;
+ goto fail;
+ }
+
+ pos += nread;
+ left -= nread;
+ buf += nread;
+ total_bytes_read += nread;
+ if (nread != to_read)
+ break;
+ }
+
+ *off += total_bytes_read;
+ free_bounce_buffer(tmp);
+ return total_bytes_read;
+
+fail:
+ free_bounce_buffer(tmp);
+ return err;
+}
+
+/**
+ * Write to a regular file.
+ *
+ * @param file the file
+ * @param buf the buffer
+ * @param size length of the buffer
+ * @param off offset within the file
+ * @returns the number of written bytes on success, Linux error code otherwise
+ */
+static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size,
+ loff_t * off)
+{
+ int err;
+ void *tmp;
+ u64 tmp_phys;
+ size_t tmp_size;
+ size_t left = size;
+ ssize_t total_bytes_written = 0;
+ struct inode *inode = GET_F_DENTRY(file)->d_inode;
+ struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
+ struct sf_reg_info *sf_r = file->private_data;
+ loff_t pos;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ pos = *off;
+ if (file->f_flags & O_APPEND) {
+ pos = inode->i_size;
+ *off = pos;
+ }
+
+ /** XXX Check write permission according to inode->i_mode! */
+
+ if (!size)
+ return 0;
+
+ tmp =
+ alloc_bounce_buffer(&tmp_size, &tmp_phys, size,
+ __PRETTY_FUNCTION__);
+ if (!tmp)
+ return -ENOMEM;
+
+ while (left) {
+ uint32_t to_write, nwritten;
+
+ to_write = tmp_size;
+ if (to_write > left)
+ to_write = (uint32_t) left;
+
+ nwritten = to_write;
+
+ if (copy_from_user(tmp, buf, to_write)) {
+ err = -EFAULT;
+ goto fail;
+ }
+
+ err = vboxsf_write_physcont(sf_g->root, sf_r->handle, pos,
+ &nwritten, tmp_phys);
+ if (err < 0) {
+ err = -EPROTO;
+ goto fail;
+ }
+
+ pos += nwritten;
+ left -= nwritten;
+ buf += nwritten;
+ total_bytes_written += nwritten;
+ if (nwritten != to_write)
+ break;
+ }
+
+ *off += total_bytes_written;
+ if (*off > inode->i_size)
+ inode->i_size = *off;
+
+ sf_i->force_restat = 1;
+ free_bounce_buffer(tmp);
+ return total_bytes_written;
+
+fail:
+ free_bounce_buffer(tmp);
+ return err;
+}
+
+/**
+ * Open a regular file.
+ *
+ * @param inode the inode
+ * @param file the file
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_reg_open(struct inode *inode, struct file *file)
+{
+ int rc, rc_linux = 0;
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
+ struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
+ struct sf_reg_info *sf_r;
+ struct shfl_createparms params = {};
+
+ sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
+ if (!sf_r)
+ return -ENOMEM;
+
+ /* Already open? */
+ if (sf_i->handle != SHFL_HANDLE_NIL) {
+ /*
+ * This inode was created with sf_create_aux(). Check the CreateFlags:
+ * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
+ * about the access flags (SHFL_CF_ACCESS_*).
+ */
+ sf_i->force_restat = 1;
+ sf_r->handle = sf_i->handle;
+ sf_i->handle = SHFL_HANDLE_NIL;
+ sf_i->file = file;
+ file->private_data = sf_r;
+ return 0;
+ }
+
+ params.Handle = SHFL_HANDLE_NIL;
+ /* We check the value of params.Handle afterwards to find out if
+ * the call succeeded or failed, as the API does not seem to cleanly
+ * distinguish error and informational messages.
+ *
+ * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
+ * make the shared folders host service use our fMode parameter */
+
+ if (file->f_flags & O_CREAT) {
+ params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
+ /* We ignore O_EXCL, as the Linux kernel seems to call create
+ beforehand itself, so O_EXCL should always fail. */
+ if (file->f_flags & O_TRUNC)
+ params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
+ else
+ params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
+ } else {
+ params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
+ if (file->f_flags & O_TRUNC)
+ params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
+ }
+
+ switch (file->f_flags & O_ACCMODE) {
+ case O_RDONLY:
+ params.CreateFlags |= SHFL_CF_ACCESS_READ;
+ break;
+
+ case O_WRONLY:
+ params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
+ break;
+
+ case O_RDWR:
+ params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
+ break;
+
+ default:
+ BUG();
+ }
+
+ if (file->f_flags & O_APPEND)
+ params.CreateFlags |= SHFL_CF_ACCESS_APPEND;
+
+ params.Info.Attr.fMode = inode->i_mode;
+ rc = vboxsf_create(sf_g->root, sf_i->path, ¶ms);
+ if (rc < 0) {
+ kfree(sf_r);
+ return -vbg_status_code_to_errno(rc);
+ }
+
+ if (SHFL_HANDLE_NIL == params.Handle) {
+ switch (params.Result) {
+ case SHFL_PATH_NOT_FOUND:
+ case SHFL_FILE_NOT_FOUND:
+ rc_linux = -ENOENT;
+ break;
+ case SHFL_FILE_EXISTS:
+ rc_linux = -EEXIST;
+ break;
+ default:
+ break;
+ }
+ }
+
+ sf_i->force_restat = 1;
+ sf_r->handle = params.Handle;
+ sf_i->file = file;
+ file->private_data = sf_r;
+ return rc_linux;
+}
+
+/**
+ * Close a regular file.
+ *
+ * @param inode the inode
+ * @param file the file
+ * @returns 0 on success, Linux error code otherwise
+ */
+static int sf_reg_release(struct inode *inode, struct file *file)
+{
+ struct sf_reg_info *sf_r;
+ struct sf_glob_info *sf_g;
+ struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
+
+ sf_g = GET_GLOB_INFO(inode->i_sb);
+ sf_r = file->private_data;
+
+ filemap_write_and_wait(inode->i_mapping);
+
+ vboxsf_close(sf_g->root, sf_r->handle);
+
+ kfree(sf_r);
+ sf_i->file = NULL;
+ sf_i->handle = SHFL_HANDLE_NIL;
+ file->private_data = NULL;
+ return 0;
+}
+
+static int sf_reg_fault(struct vm_fault *vmf)
+{
+ struct page *page;
+ char *buf;
+ loff_t off;
+ uint32_t nread = PAGE_SIZE;
+ int err;
+ struct vm_area_struct *vma = vmf->vma;
+ struct file *file = vma->vm_file;
+ struct inode *inode = GET_F_DENTRY(file)->d_inode;
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
+ struct sf_reg_info *sf_r = file->private_data;
+
+ if (vmf->pgoff > vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
+ /*
+ * Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls
+ * vboxsf_read() which works on virtual addresses.
+ */
+ page = alloc_page(GFP_USER);
+ if (!page)
+ return VM_FAULT_OOM;
+
+ buf = kmap(page);
+ off = (vmf->pgoff << PAGE_SHIFT);
+ err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
+ if (err) {
+ kunmap(page);
+ put_page(page);
+ return VM_FAULT_SIGBUS;
+ }
+
+ if (!nread)
+ clear_user_page(page_address(page), vmf->pgoff, page);
+ else
+ memset(buf + nread, 0, PAGE_SIZE - nread);
+
+ flush_dcache_page(page);
+ kunmap(page);
+ vmf->page = page;
+ return 0;
+}
+
+static struct vm_operations_struct sf_vma_ops = {
+ .fault = sf_reg_fault
+};
+
+static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_SHARED)
+ return -EINVAL;
+
+ vma->vm_ops = &sf_vma_ops;
+ return 0;
+}
+
+struct file_operations sf_reg_fops = {
+ .read = sf_reg_read,
+ .open = sf_reg_open,
+ .write = sf_reg_write,
+ .release = sf_reg_release,
+ .mmap = sf_reg_mmap,
+ .splice_read = generic_file_splice_read,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
+ .fsync = noop_fsync,
+ .llseek = generic_file_llseek,
+};
+
+struct inode_operations sf_reg_iops = {
+ .getattr = sf_getattr,
+ .setattr = sf_setattr
+};
+
+static int sf_readpage(struct file *file, struct page *page)
+{
+ struct inode *inode = GET_F_DENTRY(file)->d_inode;
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
+ struct sf_reg_info *sf_r = file->private_data;
+ uint32_t nread = PAGE_SIZE;
+ char *buf;
+ loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
+ int err;
+
+ buf = kmap(page);
+ err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
+ if (err) {
+ kunmap(page);
+ if (PageLocked(page))
+ unlock_page(page);
+ return err;
+ }
+ memset(&buf[nread], 0, PAGE_SIZE - nread);
+ flush_dcache_page(page);
+ kunmap(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ return 0;
+}
+
+static int sf_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
+ struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
+ struct file *file = sf_i->file;
+ struct sf_reg_info *sf_r = file->private_data;
+ char *buf;
+ uint32_t nwritten = PAGE_SIZE;
+ int end_index = inode->i_size >> PAGE_SHIFT;
+ loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
+ int err;
+
+ if (page->index >= end_index)
+ nwritten = inode->i_size & (PAGE_SIZE - 1);
+
+ buf = kmap(page);
+
+ err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off);
+ if (err < 0) {
+ ClearPageUptodate(page);
+ goto out;
+ }
+
+ if (off > inode->i_size)
+ inode->i_size = off;
+
+ if (PageError(page))
+ ClearPageError(page);
+ err = 0;
+
+out:
+ kunmap(page);
+
+ unlock_page(page);
+ return err;
+}
+
+int sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
+ unsigned len, unsigned flags, struct page **pagep,
+ void **fsdata)
+{
+ return simple_write_begin(file, mapping, pos, len, flags, pagep,
+ fsdata);
+}
+
+int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
+ unsigned len, unsigned copied, struct page *page, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
+ struct sf_reg_info *sf_r = file->private_data;
+ void *buf;
+ unsigned from = pos & (PAGE_SIZE - 1);
+ uint32_t nwritten = len;
+ int err;
+
+ buf = kmap(page);
+ err =
+ sf_reg_write_aux(__func__, sf_g, sf_r, buf + from, &nwritten, pos);
+ kunmap(page);
+
+ if (!PageUptodate(page) && err == PAGE_SIZE)
+ SetPageUptodate(page);
+
+ if (err >= 0) {
+ pos += nwritten;
+ if (pos > inode->i_size)
+ inode->i_size = pos;
+ }
+
+ unlock_page(page);
+ put_page(page);
+
+ return nwritten;
+}
+
+struct address_space_operations sf_reg_aops = {
+ .readpage = sf_readpage,
+ .writepage = sf_writepage,
+ .write_begin = sf_write_begin,
+ .write_end = sf_write_end,
+};
diff --git a/fs/vboxsf/shfl_hostintf.h b/fs/vboxsf/shfl_hostintf.h
new file mode 100644
index 000000000000..071f72cfbb86
--- /dev/null
+++ b/fs/vboxsf/shfl_hostintf.h
@@ -0,0 +1,1318 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * VirtualBox Shared Folders: host interface definition.
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef SHFL_HOSTINTF_H
+#define SHFL_HOSTINTF_H
+
+#include <linux/vbox_vmmdev.h>
+
+/*
+ * We cannot use linux' compiletime_assert here because it expects to be used
+ * inside a function only. Use a typedef to a char array with a negative size.
+ */
+#define VBOXSF_ASSERT_SIZE(type, size) \
+ typedef char type ## _assert_size[1 - 2*!!(sizeof(struct type) != (size))]
+
+/**
+ * Structures shared between guest and the service
+ * can be relocated and use offsets to point to variable
+ * length parts.
+ */
+
+/**
+ * Shared folders protocol works with handles.
+ * Before doing any action on a file system object,
+ * one have to obtain the object handle via a SHFL_FN_CREATE
+ * request. A handle must be closed with SHFL_FN_CLOSE.
+ */
+
+/** Shared Folders service functions. (guest)
+ * @{
+ */
+
+/** Query mappings changes. */
+#define SHFL_FN_QUERY_MAPPINGS (1)
+/** Query mappings changes. */
+#define SHFL_FN_QUERY_MAP_NAME (2)
+/** Open/create object. */
+#define SHFL_FN_CREATE (3)
+/** Close object handle. */
+#define SHFL_FN_CLOSE (4)
+/** Read object content. */
+#define SHFL_FN_READ (5)
+/** Write new object content. */
+#define SHFL_FN_WRITE (6)
+/** Lock/unlock a range in the object. */
+#define SHFL_FN_LOCK (7)
+/** List object content. */
+#define SHFL_FN_LIST (8)
+/** Query/set object information. */
+#define SHFL_FN_INFORMATION (9)
+/** Remove object */
+#define SHFL_FN_REMOVE (11)
+/** Map folder (legacy) */
+#define SHFL_FN_MAP_FOLDER_OLD (12)
+/** Unmap folder */
+#define SHFL_FN_UNMAP_FOLDER (13)
+/** Rename object (possibly moving it to another directory) */
+#define SHFL_FN_RENAME (14)
+/** Flush file */
+#define SHFL_FN_FLUSH (15)
+/** @todo macl, a description, please. */
+#define SHFL_FN_SET_UTF8 (16)
+#define SHFL_CPARMS_SET_UTF8 0
+/** Map folder */
+#define SHFL_FN_MAP_FOLDER (17)
+/** Read symlink destination (as of VBox 4.0) */
+#define SHFL_FN_READLINK (18)
+/** Create symlink (as of VBox 4.0) */
+#define SHFL_FN_SYMLINK (19)
+/** Ask host to show symlinks (as of VBox 4.0) */
+#define SHFL_FN_SET_SYMLINKS (20)
+#define SHFL_CPARMS_SET_SYMLINKS 0
+
+/** @} */
+
+/** Shared Folders service functions. (host)
+ * @{
+ */
+
+/** Add shared folder mapping. */
+#define SHFL_FN_ADD_MAPPING (1)
+/** Remove shared folder mapping. */
+#define SHFL_FN_REMOVE_MAPPING (2)
+/** Set the led status light address. */
+#define SHFL_FN_SET_STATUS_LED (3)
+/** Allow the guest to create symbolic links (as of VBox 4.0) */
+#define SHFL_FN_ALLOW_SYMLINKS_CREATE (4)
+/** @} */
+
+/** Root handle for a mapping. Root handles are unique.
+ * @note
+ * Function parameters structures consider
+ * the root handle as 32 bit value. If the typedef
+ * will be changed, then function parameters must be
+ * changed accordingly. All those parameters are marked
+ * with SHFLROOT in comments.
+ */
+typedef uint32_t SHFLROOT;
+
+#define SHFL_ROOT_NIL ((SHFLROOT)~0)
+
+/** A shared folders handle for an opened object. */
+typedef uint64_t SHFLHANDLE;
+
+#define SHFL_HANDLE_NIL ((SHFLHANDLE)~0LL)
+#define SHFL_HANDLE_ROOT ((SHFLHANDLE)0LL)
+
+/** Hardcoded maximum length (in chars) of a shared folder name. */
+#define SHFL_MAX_LEN (256)
+/** Hardcoded maximum number of shared folder mapping available to the guest. */
+#define SHFL_MAX_MAPPINGS (64)
+
+/** @name Shared Folders strings. They can be either UTF-8 or UTF-16.
+ * @{
+ */
+
+/**
+ * Shared folder string buffer structure.
+ */
+typedef struct SHFLSTRING {
+ /** Allocated size of the String member in bytes. */
+ uint16_t u16Size;
+
+ /** Length of string without trailing nul in bytes. */
+ uint16_t u16Length;
+
+ /** UTF-8 or UTF-16 string. Nul terminated. */
+ union {
+ u8 utf8[1];
+ u16 utf16[1];
+ u16 ucs2[1]; /**< misnomer, use utf16. */
+ } String;
+} SHFLSTRING;
+VMMDEV_ASSERT_SIZE(SHFLSTRING, 6);
+VMMDEV_ASSERT_MEMBER_OFFSET(SHFLSTRING, String, 4);
+/** The size of SHFLSTRING w/o the string part. */
+#define SHFLSTRING_HEADER_SIZE 4
+VMMDEV_ASSERT_MEMBER_OFFSET(SHFLSTRING, String, SHFLSTRING_HEADER_SIZE);
+
+/** Pointer to a shared folder string buffer. */
+typedef SHFLSTRING *PSHFLSTRING;
+/** Pointer to a const shared folder string buffer. */
+typedef const SHFLSTRING *PCSHFLSTRING;
+
+/** Calculate size of the string. */
+static inline u32 ShflStringSizeOfBuffer(const struct SHFLSTRING *string)
+{
+ return string ? (uint32_t) (SHFLSTRING_HEADER_SIZE +
+ string->u16Size) : 0;
+}
+
+/** @} */
+
+/** Set user id on execution (S_ISUID). */
+#define SHFL_UNIX_ISUID 0004000U
+/** Set group id on execution (S_ISGID). */
+#define SHFL_UNIX_ISGID 0002000U
+/** Sticky bit (S_ISVTX / S_ISTXT). */
+#define SHFL_UNIX_ISTXT 0001000U
+
+/** Owner readable (S_IRUSR). */
+#define SHFL_UNIX_IRUSR 0000400U
+/** Owner writable (S_IWUSR). */
+#define SHFL_UNIX_IWUSR 0000200U
+/** Owner executable (S_IXUSR). */
+#define SHFL_UNIX_IXUSR 0000100U
+
+/** Group readable (S_IRGRP). */
+#define SHFL_UNIX_IRGRP 0000040U
+/** Group writable (S_IWGRP). */
+#define SHFL_UNIX_IWGRP 0000020U
+/** Group executable (S_IXGRP). */
+#define SHFL_UNIX_IXGRP 0000010U
+
+/** Other readable (S_IROTH). */
+#define SHFL_UNIX_IROTH 0000004U
+/** Other writable (S_IWOTH). */
+#define SHFL_UNIX_IWOTH 0000002U
+/** Other executable (S_IXOTH). */
+#define SHFL_UNIX_IXOTH 0000001U
+
+/** Named pipe (fifo) (S_IFIFO). */
+#define SHFL_TYPE_FIFO 0010000U
+/** Character device (S_IFCHR). */
+#define SHFL_TYPE_DEV_CHAR 0020000U
+/** Directory (S_IFDIR). */
+#define SHFL_TYPE_DIRECTORY 0040000U
+/** Block device (S_IFBLK). */
+#define SHFL_TYPE_DEV_BLOCK 0060000U
+/** Regular file (S_IFREG). */
+#define SHFL_TYPE_FILE 0100000U
+/** Symbolic link (S_IFLNK). */
+#define SHFL_TYPE_SYMLINK 0120000U
+/** Socket (S_IFSOCK). */
+#define SHFL_TYPE_SOCKET 0140000U
+/** Whiteout (S_IFWHT). */
+#define SHFL_TYPE_WHITEOUT 0160000U
+/** Type mask (S_IFMT). */
+#define SHFL_TYPE_MASK 0170000U
+
+/** Checks the mode flags indicate a directory (S_ISDIR). */
+#define SHFL_IS_DIRECTORY(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_DIRECTORY)
+/** Checks the mode flags indicate a symbolic link (S_ISLNK). */
+#define SHFL_IS_SYMLINK(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_SYMLINK)
+
+/**
+ * The available additional information in a shfl_fsobjattr object.
+ */
+enum shfl_fsobjattr_add {
+ /** No additional information is available / requested. */
+ SHFLFSOBJATTRADD_NOTHING = 1,
+ /**
+ * The additional unix attributes (shfl_fsobjattr::u::Unix) are
+ * available / requested.
+ */
+ SHFLFSOBJATTRADD_UNIX,
+ /**
+ * The additional extended attribute size (shfl_fsobjattr::u::EASize) is
+ * available / requested.
+ */
+ SHFLFSOBJATTRADD_EASIZE,
+ /**
+ * The last valid item (inclusive).
+ * The valid range is SHFLFSOBJATTRADD_NOTHING thru
+ * SHFLFSOBJATTRADD_LAST.
+ */
+ SHFLFSOBJATTRADD_LAST = SHFLFSOBJATTRADD_EASIZE,
+
+ /** The usual 32-bit hack. */
+ SHFLFSOBJATTRADD_32BIT_SIZE_HACK = 0x7fffffff
+};
+
+/**
+ * Additional Unix Attributes, these are available when
+ * shfl_fsobjattr.enmAdditional == SHFLFSOBJATTRADD_UNIX.
+ */
+struct shfl_fsobjattr_unix {
+ /**
+ * The user owning the filesystem object (st_uid).
+ * This field is ~0U if not supported.
+ */
+ u32 uid;
+
+ /**
+ * The group the filesystem object is assigned (st_gid).
+ * This field is ~0U if not supported.
+ */
+ u32 gid;
+
+ /**
+ * Number of hard links to this filesystem object (st_nlink).
+ * This field is 1 if the filesystem doesn't support hardlinking or
+ * the information isn't available.
+ */
+ u32 cHardlinks;
+
+ /**
+ * The device number of the device which this filesystem object resides on (st_dev).
+ * This field is 0 if this information is not available.
+ */
+ u32 INodeIdDevice;
+
+ /**
+ * The unique identifier (within the filesystem) of this filesystem object (st_ino).
+ * Together with INodeIdDevice, this field can be used as a OS wide unique id
+ * when both their values are not 0.
+ * This field is 0 if the information is not available.
+ */
+ u64 INodeId;
+
+ /**
+ * User flags (st_flags).
+ * This field is 0 if this information is not available.
+ */
+ u32 fFlags;
+
+ /**
+ * The current generation number (st_gen).
+ * This field is 0 if this information is not available.
+ */
+ u32 GenerationId;
+
+ /**
+ * The device number of a character or block device type object (st_rdev).
+ * This field is 0 if the file isn't of a character or block device type and
+ * when the OS doesn't subscribe to the major+minor device idenfication scheme.
+ */
+ u32 Device;
+} __packed;
+
+/** Extended attribute size. */
+struct SHFLFSOBJATTREASIZE {
+ s64 cb; /**< Size of EAs. */
+} __packed;
+
+/** Shared folder filesystem object attributes. */
+struct shfl_fsobjattr {
+ /**
+ * Mode flags (st_mode). SHFL_UNIX_*, SHFL_TYPE_*, and SHFL_DOS_*.
+ * @remarks We depend on a number of SHFL_ defines to remain unchanged.
+ * Fortuntately, these are depending on windows, dos and unix
+ * standard values, so this shouldn't be much of a pain.
+ */
+ u32 fMode;
+
+ /** The additional attributes available. */
+ enum shfl_fsobjattr_add enmAdditional;
+
+ /**
+ * Additional attributes.
+ *
+ * Unless explicitly specified to an API, the API can provide additional
+ * data as it is provided by the underlying OS.
+ */
+ union {
+ struct shfl_fsobjattr_unix Unix;
+ struct shfl_fsobjattr_unix EASize;
+ } __packed u;
+} __packed;
+VBOXSF_ASSERT_SIZE(shfl_fsobjattr, 44);
+
+struct shfl_timespec {
+ s64 ns_relative_to_unix_epoch;
+};
+
+/**
+ * Filesystem object information structure.
+ */
+struct shfl_fsobjinfo {
+ /**
+ * Logical size (st_size).
+ * For normal files this is the size of the file.
+ * For symbolic links, this is the length of the path name contained
+ * in the symbolic link.
+ * For other objects this fields needs to be specified.
+ */
+ s64 cbObject;
+
+ /** Disk allocation size (st_blocks * DEV_BSIZE). */
+ s64 cbAllocated;
+
+ /**
+ * Time of last access (st_atime).
+ * @remarks Here (and other places) we depend on the IPRT timespec to
+ * remain unchanged.
+ */
+ struct shfl_timespec AccessTime;
+
+ /** Time of last data modification (st_mtime). */
+ struct shfl_timespec ModificationTime;
+
+ /**
+ * Time of last status change (st_ctime).
+ * If not available this is set to ModificationTime.
+ */
+ struct shfl_timespec ChangeTime;
+
+ /**
+ * Time of file birth (st_birthtime).
+ * If not available this is set to ChangeTime.
+ */
+ struct shfl_timespec BirthTime;
+
+ /** Attributes. */
+ struct shfl_fsobjattr Attr;
+
+} __packed;
+VBOXSF_ASSERT_SIZE(shfl_fsobjinfo, 92);
+
+/**
+ * Result of an open/create request.
+ * Along with handle value the result code
+ * identifies what has happened while
+ * trying to open the object.
+ */
+typedef enum _SHFLCREATERESULT {
+ SHFL_NO_RESULT,
+ /** Specified path does not exist. */
+ SHFL_PATH_NOT_FOUND,
+ /** Path to file exists, but the last component does not. */
+ SHFL_FILE_NOT_FOUND,
+ /** File already exists and either has been opened or not. */
+ SHFL_FILE_EXISTS,
+ /** New file was created. */
+ SHFL_FILE_CREATED,
+ /** Existing file was replaced or overwritten. */
+ SHFL_FILE_REPLACED
+} SHFLCREATERESULT;
+
+/** Open/create flags.
+ * @{
+ */
+
+/** No flags. Initialization value. */
+#define SHFL_CF_NONE (0x00000000)
+
+/** Lookup only the object, do not return a handle. All other flags are ignored. */
+#define SHFL_CF_LOOKUP (0x00000001)
+
+/** Open parent directory of specified object.
+ * Useful for the corresponding Windows FSD flag
+ * and for opening paths like \\dir\\*.* to search the 'dir'.
+ * @todo possibly not needed???
+ */
+#define SHFL_CF_OPEN_TARGET_DIRECTORY (0x00000002)
+
+/** Create/open a directory. */
+#define SHFL_CF_DIRECTORY (0x00000004)
+
+/**
+ * Open/create action to do if object exists
+ * and if the object does not exists.
+ * REPLACE file means atomically DELETE and CREATE.
+ * OVERWRITE file means truncating the file to 0 and
+ * setting new size.
+ * When opening an existing directory REPLACE and OVERWRITE
+ * actions are considered invalid, and cause returning
+ * FILE_EXISTS with NIL handle.
+ */
+#define SHFL_CF_ACT_MASK_IF_EXISTS (0x000000F0)
+#define SHFL_CF_ACT_MASK_IF_NEW (0x00000F00)
+
+/** What to do if object exists. */
+#define SHFL_CF_ACT_OPEN_IF_EXISTS (0x00000000)
+#define SHFL_CF_ACT_FAIL_IF_EXISTS (0x00000010)
+#define SHFL_CF_ACT_REPLACE_IF_EXISTS (0x00000020)
+#define SHFL_CF_ACT_OVERWRITE_IF_EXISTS (0x00000030)
+
+/** What to do if object does not exist. */
+#define SHFL_CF_ACT_CREATE_IF_NEW (0x00000000)
+#define SHFL_CF_ACT_FAIL_IF_NEW (0x00000100)
+
+/** Read/write requested access for the object. */
+#define SHFL_CF_ACCESS_MASK_RW (0x00003000)
+
+/** No access requested. */
+#define SHFL_CF_ACCESS_NONE (0x00000000)
+/** Read access requested. */
+#define SHFL_CF_ACCESS_READ (0x00001000)
+/** Write access requested. */
+#define SHFL_CF_ACCESS_WRITE (0x00002000)
+/** Read/Write access requested. */
+#define SHFL_CF_ACCESS_READWRITE (SHFL_CF_ACCESS_READ | SHFL_CF_ACCESS_WRITE)
+
+/** Requested share access for the object. */
+#define SHFL_CF_ACCESS_MASK_DENY (0x0000C000)
+
+/** Allow any access. */
+#define SHFL_CF_ACCESS_DENYNONE (0x00000000)
+/** Do not allow read. */
+#define SHFL_CF_ACCESS_DENYREAD (0x00004000)
+/** Do not allow write. */
+#define SHFL_CF_ACCESS_DENYWRITE (0x00008000)
+/** Do not allow access. */
+#define SHFL_CF_ACCESS_DENYALL (SHFL_CF_ACCESS_DENYREAD | SHFL_CF_ACCESS_DENYWRITE)
+
+/** Requested access to attributes of the object. */
+#define SHFL_CF_ACCESS_MASK_ATTR (0x00030000)
+
+/** No access requested. */
+#define SHFL_CF_ACCESS_ATTR_NONE (0x00000000)
+/** Read access requested. */
+#define SHFL_CF_ACCESS_ATTR_READ (0x00010000)
+/** Write access requested. */
+#define SHFL_CF_ACCESS_ATTR_WRITE (0x00020000)
+/** Read/Write access requested. */
+#define SHFL_CF_ACCESS_ATTR_READWRITE (SHFL_CF_ACCESS_ATTR_READ | SHFL_CF_ACCESS_ATTR_WRITE)
+
+/** The file is opened in append mode. Ignored if SHFL_CF_ACCESS_WRITE is not set. */
+#define SHFL_CF_ACCESS_APPEND (0x00040000)
+
+/** @} */
+
+struct shfl_createparms {
+ /* Returned handle of opened object. */
+ SHFLHANDLE Handle;
+
+ /* Returned result of the operation */
+ SHFLCREATERESULT Result;
+
+ /* SHFL_CF_* */
+ uint32_t CreateFlags;
+
+ /*
+ * Attributes of object to create and
+ * returned actual attributes of opened/created object.
+ */
+ struct shfl_fsobjinfo Info;
+} __packed;
+
+/** Shared Folders mappings.
+ * @{
+ */
+
+/** The mapping has been added since last query. */
+#define SHFL_MS_NEW (1)
+/** The mapping has been deleted since last query. */
+#define SHFL_MS_DELETED (2)
+
+typedef struct _SHFLMAPPING {
+ /** Mapping status. */
+ uint32_t u32Status;
+ /** Root handle. */
+ SHFLROOT root;
+} SHFLMAPPING;
+/** Pointer to a SHFLMAPPING structure. */
+typedef SHFLMAPPING *PSHFLMAPPING;
+
+/** @} */
+
+/** Shared Folder directory information
+ * @{
+ */
+
+typedef struct _SHFLDIRINFO {
+ /** Full information about the object. */
+ struct shfl_fsobjinfo Info;
+ /**
+ * The length of the short field (number of UTF16 chars).
+ * It is 16-bit for reasons of alignment.
+ */
+ uint16_t cucShortName;
+ /**
+ * The short name for 8.3 compatibility.
+ * Empty string if not available.
+ */
+ u16 uszShortName[14];
+ /** @todo malc, a description, please. */
+ SHFLSTRING name;
+} SHFLDIRINFO, *PSHFLDIRINFO;
+
+/**
+ * Shared folder filesystem properties.
+ */
+typedef struct SHFLFSPROPERTIES {
+ /**
+ * The maximum size of a filesystem object name.
+ * This does not include the '\\0'.
+ */
+ uint32_t cbMaxComponent;
+
+ /**
+ * True if the filesystem is remote.
+ * False if the filesystem is local.
+ */
+ bool fRemote;
+
+ /**
+ * True if the filesystem is case sensitive.
+ * False if the filesystem is case insensitive.
+ */
+ bool fCaseSensitive;
+
+ /**
+ * True if the filesystem is mounted read only.
+ * False if the filesystem is mounted read write.
+ */
+ bool fReadOnly;
+
+ /**
+ * True if the filesystem can encode unicode object names.
+ * False if it can't.
+ */
+ bool fSupportsUnicode;
+
+ /**
+ * True if the filesystem is compresses.
+ * False if it isn't or we don't know.
+ */
+ bool fCompressed;
+
+ /**
+ * True if the filesystem compresses of individual files.
+ * False if it doesn't or we don't know.
+ */
+ bool fFileCompression;
+
+ /** @todo more? */
+} SHFLFSPROPERTIES;
+VMMDEV_ASSERT_SIZE(SHFLFSPROPERTIES, 12);
+/** Pointer to a shared folder filesystem properties structure. */
+typedef SHFLFSPROPERTIES *PSHFLFSPROPERTIES;
+/** Pointer to a const shared folder filesystem properties structure. */
+typedef SHFLFSPROPERTIES const *PCSHFLFSPROPERTIES;
+
+typedef struct _SHFLVOLINFO {
+ s64 ullTotalAllocationBytes;
+ s64 ullAvailableAllocationBytes;
+ uint32_t ulBytesPerAllocationUnit;
+ uint32_t ulBytesPerSector;
+ uint32_t ulSerial;
+ SHFLFSPROPERTIES fsProperties;
+} SHFLVOLINFO, *PSHFLVOLINFO;
+
+/** @} */
+
+/**
+ * Function parameter structures.
+ * @{
+ */
+
+/** SHFL_FN_QUERY_MAPPINGS */
+
+/**
+ * Validation mask. Needs to be adjusted
+ * whenever a new SHFL_MF_ flag is added.
+ */
+#define SHFL_MF_MASK (0x00000011)
+/** UC2 enconded strings. */
+#define SHFL_MF_UCS2 (0x00000000)
+/** Guest uses UTF8 strings, if not set then the strings are unicode (UCS2). */
+#define SHFL_MF_UTF8 (0x00000001)
+/** Just handle the auto-mounted folders. */
+#define SHFL_MF_AUTOMOUNT (0x00000010)
+
+/** Type of guest system. For future system dependent features. */
+#define SHFL_MF_SYSTEM_MASK (0x0000FF00)
+#define SHFL_MF_SYSTEM_NONE (0x00000000)
+#define SHFL_MF_SYSTEM_WINDOWS (0x00000100)
+#define SHFL_MF_SYSTEM_LINUX (0x00000200)
+
+/** Parameters structure. */
+typedef struct _VBoxSFQueryMappings {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * 32bit, in:
+ * Flags describing various client needs.
+ */
+ HGCMFunctionParameter flags;
+
+ /**
+ * 32bit, in/out:
+ * Number of mappings the client expects.
+ * This is the number of elements in the
+ * mappings array.
+ */
+ HGCMFunctionParameter numberOfMappings;
+
+ /**
+ * pointer, in/out:
+ * Points to array of SHFLMAPPING structures.
+ */
+ HGCMFunctionParameter mappings;
+
+} VBoxSFQueryMappings;
+
+/** Number of parameters */
+#define SHFL_CPARMS_QUERY_MAPPINGS (3)
+
+/**
+ * SHFL_FN_QUERY_MAP_NAME
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFQueryMapName {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * 32bit, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * pointer, in/out:
+ * Points to SHFLSTRING buffer.
+ */
+ HGCMFunctionParameter name;
+
+} VBoxSFQueryMapName;
+
+/** Number of parameters */
+#define SHFL_CPARMS_QUERY_MAP_NAME (2)
+
+/**
+ * SHFL_FN_MAP_FOLDER_OLD
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFMapFolder_Old {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in:
+ * Points to SHFLSTRING buffer.
+ */
+ HGCMFunctionParameter path;
+
+ /**
+ * pointer, out: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * pointer, in: UTF16
+ * Path delimiter
+ */
+ HGCMFunctionParameter delimiter;
+
+} VBoxSFMapFolder_Old;
+
+/** Number of parameters */
+#define SHFL_CPARMS_MAP_FOLDER_OLD (3)
+
+/**
+ * SHFL_FN_MAP_FOLDER
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFMapFolder {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in:
+ * Points to SHFLSTRING buffer.
+ */
+ HGCMFunctionParameter path;
+
+ /**
+ * pointer, out: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * pointer, in: UTF16
+ * Path delimiter
+ */
+ HGCMFunctionParameter delimiter;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Case senstive flag
+ */
+ HGCMFunctionParameter fCaseSensitive;
+
+} VBoxSFMapFolder;
+
+/** Number of parameters */
+#define SHFL_CPARMS_MAP_FOLDER (4)
+
+/**
+ * SHFL_FN_UNMAP_FOLDER
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFUnmapFolder {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+} VBoxSFUnmapFolder;
+
+/** Number of parameters */
+#define SHFL_CPARMS_UNMAP_FOLDER (1)
+
+/**
+ * SHFL_FN_CREATE
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFCreate {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * pointer, in:
+ * Points to SHFLSTRING buffer.
+ */
+ HGCMFunctionParameter path;
+
+ /**
+ * pointer, in/out:
+ * Points to SHFLCREATEPARMS buffer.
+ */
+ HGCMFunctionParameter parms;
+
+} VBoxSFCreate;
+
+/** Number of parameters */
+#define SHFL_CPARMS_CREATE (3)
+
+/**
+ * SHFL_FN_CLOSE
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFClose {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE of object to close.
+ */
+ HGCMFunctionParameter handle;
+
+} VBoxSFClose;
+
+/** Number of parameters */
+#define SHFL_CPARMS_CLOSE (2)
+
+/**
+ * SHFL_FN_READ
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFRead {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE of object to read from.
+ */
+ HGCMFunctionParameter handle;
+
+ /**
+ * value64, in:
+ * Offset to read from.
+ */
+ HGCMFunctionParameter offset;
+
+ /**
+ * value64, in/out:
+ * Bytes to read/How many were read.
+ */
+ HGCMFunctionParameter cb;
+
+ /**
+ * pointer, out:
+ * Buffer to place data to.
+ */
+ HGCMFunctionParameter buffer;
+
+} VBoxSFRead;
+
+/** Number of parameters */
+#define SHFL_CPARMS_READ (5)
+
+/**
+ * SHFL_FN_WRITE
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFWrite {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE of object to write to.
+ */
+ HGCMFunctionParameter handle;
+
+ /**
+ * value64, in:
+ * Offset to write to.
+ */
+ HGCMFunctionParameter offset;
+
+ /**
+ * value64, in/out:
+ * Bytes to write/How many were written.
+ */
+ HGCMFunctionParameter cb;
+
+ /**
+ * pointer, in:
+ * Data to write.
+ */
+ HGCMFunctionParameter buffer;
+
+} VBoxSFWrite;
+
+/** Number of parameters */
+#define SHFL_CPARMS_WRITE (5)
+
+/**
+ * SHFL_FN_LOCK
+ */
+
+/** Lock owner is the HGCM client. */
+
+/** Lock mode bit mask. */
+#define SHFL_LOCK_MODE_MASK (0x3)
+/** Cancel lock on the given range. */
+#define SHFL_LOCK_CANCEL (0x0)
+/** Acquire read only lock. Prevent write to the range. */
+#define SHFL_LOCK_SHARED (0x1)
+/** Acquire write lock. Prevent both write and read to the range. */
+#define SHFL_LOCK_EXCLUSIVE (0x2)
+
+/** Do not wait for lock if it can not be acquired at the time. */
+#define SHFL_LOCK_NOWAIT (0x0)
+/** Wait and acquire lock. */
+#define SHFL_LOCK_WAIT (0x4)
+
+/** Lock the specified range. */
+#define SHFL_LOCK_PARTIAL (0x0)
+/** Lock entire object. */
+#define SHFL_LOCK_ENTIRE (0x8)
+
+/** Parameters structure. */
+typedef struct _VBoxSFLock {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE of object to be locked.
+ */
+ HGCMFunctionParameter handle;
+
+ /**
+ * value64, in:
+ * Starting offset of lock range.
+ */
+ HGCMFunctionParameter offset;
+
+ /**
+ * value64, in:
+ * Length of range.
+ */
+ HGCMFunctionParameter length;
+
+ /**
+ * value32, in:
+ * Lock flags SHFL_LOCK_*.
+ */
+ HGCMFunctionParameter flags;
+
+} VBoxSFLock;
+
+/** Number of parameters */
+#define SHFL_CPARMS_LOCK (5)
+
+/**
+ * SHFL_FN_FLUSH
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFFlush {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE of object to be locked.
+ */
+ HGCMFunctionParameter handle;
+
+} VBoxSFFlush;
+
+/** Number of parameters */
+#define SHFL_CPARMS_FLUSH (2)
+
+/**
+ * SHFL_FN_LIST
+ */
+
+/** Listing information includes variable length RTDIRENTRY[EX] structures. */
+
+/** @todo might be necessary for future. */
+#define SHFL_LIST_NONE 0
+#define SHFL_LIST_RETURN_ONE 1
+
+/** Parameters structure. */
+typedef struct _VBoxSFList {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE of object to be listed.
+ */
+ HGCMFunctionParameter handle;
+
+ /**
+ * value32, in:
+ * List flags SHFL_LIST_*.
+ */
+ HGCMFunctionParameter flags;
+
+ /**
+ * value32, in/out:
+ * Bytes to be used for listing information/How many bytes were used.
+ */
+ HGCMFunctionParameter cb;
+
+ /**
+ * pointer, in/optional
+ * Points to SHFLSTRING buffer that specifies a search path.
+ */
+ HGCMFunctionParameter path;
+
+ /**
+ * pointer, out:
+ * Buffer to place listing information to. (SHFLDIRINFO)
+ */
+ HGCMFunctionParameter buffer;
+
+ /**
+ * value32, in/out:
+ * Indicates a key where the listing must be resumed.
+ * in: 0 means start from begin of object.
+ * out: 0 means listing completed.
+ */
+ HGCMFunctionParameter resumePoint;
+
+ /**
+ * pointer, out:
+ * Number of files returned
+ */
+ HGCMFunctionParameter cFiles;
+
+} VBoxSFList;
+
+/** Number of parameters */
+#define SHFL_CPARMS_LIST (8)
+
+/**
+ * SHFL_FN_READLINK
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFReadLink {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * pointer, in:
+ * Points to SHFLSTRING buffer.
+ */
+ HGCMFunctionParameter path;
+
+ /**
+ * pointer, out:
+ * Buffer to place data to.
+ */
+ HGCMFunctionParameter buffer;
+
+} VBoxSFReadLink;
+
+/** Number of parameters */
+#define SHFL_CPARMS_READLINK (3)
+
+/**
+ * SHFL_FN_INFORMATION
+ */
+
+/** Mask of Set/Get bit. */
+#define SHFL_INFO_MODE_MASK (0x1)
+/** Get information */
+#define SHFL_INFO_GET (0x0)
+/** Set information */
+#define SHFL_INFO_SET (0x1)
+
+/** Get name of the object. */
+#define SHFL_INFO_NAME (0x2)
+/** Set size of object (extend/trucate); only applies to file objects */
+#define SHFL_INFO_SIZE (0x4)
+/** Get/Set file object info. */
+#define SHFL_INFO_FILE (0x8)
+/** Get volume information. */
+#define SHFL_INFO_VOLUME (0x10)
+
+/** @todo different file info structures */
+
+/** Parameters structure. */
+typedef struct _VBoxSFInformation {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE of object to be listed.
+ */
+ HGCMFunctionParameter handle;
+
+ /**
+ * value32, in:
+ * SHFL_INFO_*
+ */
+ HGCMFunctionParameter flags;
+
+ /**
+ * value32, in/out:
+ * Bytes to be used for information/How many bytes were used.
+ */
+ HGCMFunctionParameter cb;
+
+ /**
+ * pointer, in/out:
+ * Information to be set/get (shfl_fsobjinfo or SHFLSTRING). Do not forget
+ * to set the shfl_fsobjinfo::Attr::enmAdditional for Get operation as well.
+ */
+ HGCMFunctionParameter info;
+
+} VBoxSFInformation;
+
+/** Number of parameters */
+#define SHFL_CPARMS_INFORMATION (5)
+
+/**
+ * SHFL_FN_REMOVE
+ */
+
+#define SHFL_REMOVE_FILE (0x1)
+#define SHFL_REMOVE_DIR (0x2)
+#define SHFL_REMOVE_SYMLINK (0x4)
+
+/** Parameters structure. */
+typedef struct _VBoxSFRemove {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * pointer, in:
+ * Points to SHFLSTRING buffer.
+ */
+ HGCMFunctionParameter path;
+
+ /**
+ * value32, in:
+ * remove flags (file/directory)
+ */
+ HGCMFunctionParameter flags;
+
+} VBoxSFRemove;
+
+#define SHFL_CPARMS_REMOVE (3)
+
+/**
+ * SHFL_FN_RENAME
+ */
+
+#define SHFL_RENAME_FILE (0x1)
+#define SHFL_RENAME_DIR (0x2)
+#define SHFL_RENAME_REPLACE_IF_EXISTS (0x4)
+
+/** Parameters structure. */
+typedef struct _VBoxSFRename {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * pointer, in:
+ * Points to SHFLSTRING src.
+ */
+ HGCMFunctionParameter src;
+
+ /**
+ * pointer, in:
+ * Points to SHFLSTRING dest.
+ */
+ HGCMFunctionParameter dest;
+
+ /**
+ * value32, in:
+ * rename flags (file/directory)
+ */
+ HGCMFunctionParameter flags;
+
+} VBoxSFRename;
+
+#define SHFL_CPARMS_RENAME (4)
+
+/**
+ * SHFL_FN_SYMLINK
+ */
+
+/** Parameters structure. */
+typedef struct _VBoxSFSymlink {
+ VBoxGuestHGCMCallInfo callInfo;
+
+ /**
+ * pointer, in: SHFLROOT
+ * Root handle of the mapping which name is queried.
+ */
+ HGCMFunctionParameter root;
+
+ /**
+ * pointer, in:
+ * Points to SHFLSTRING of path for the new symlink.
+ */
+ HGCMFunctionParameter newPath;
+
+ /**
+ * pointer, in:
+ * Points to SHFLSTRING of destination for symlink.
+ */
+ HGCMFunctionParameter oldPath;
+
+ /**
+ * pointer, out:
+ * Information about created symlink.
+ */
+ HGCMFunctionParameter info;
+
+} VBoxSFSymlink;
+
+#define SHFL_CPARMS_SYMLINK (4)
+
+/**
+ * SHFL_FN_ADD_MAPPING
+ * Host call, no guest structure is used.
+ */
+
+/** mapping is writable */
+#define SHFL_ADD_MAPPING_F_WRITABLE (RT_BIT_32(0))
+/** mapping is automounted by the guest */
+#define SHFL_ADD_MAPPING_F_AUTOMOUNT (RT_BIT_32(1))
+/** allow the guest to create symlinks */
+#define SHFL_ADD_MAPPING_F_CREATE_SYMLINKS (RT_BIT_32(2))
+/** mapping is actually missing on the host */
+#define SHFL_ADD_MAPPING_F_MISSING (RT_BIT_32(3))
+
+#define SHFL_CPARMS_ADD_MAPPING (3)
+
+/**
+ * SHFL_FN_REMOVE_MAPPING
+ * Host call, no guest structure is used.
+ */
+
+#define SHFL_CPARMS_REMOVE_MAPPING (1)
+
+/**
+ * SHFL_FN_SET_STATUS_LED
+ * Host call, no guest structure is used.
+ */
+
+#define SHFL_CPARMS_SET_STATUS_LED (1)
+
+/** @} */
+
+#endif
diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c
new file mode 100644
index 000000000000..533dd539a480
--- /dev/null
+++ b/fs/vboxsf/utils.c
@@ -0,0 +1,671 @@
+/*
+ * VirtualBox Guest Shared Folders support: Utility functions.
+ * Mainly conversion from/to VirtualBox/Linux data structures.
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#include <linux/namei.h>
+#include <linux/nfs_fs.h>
+#include <linux/nls.h>
+#include <linux/vfs.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include "vfsmod.h"
+
+/*
+ * sf_reg_aops and sf_backing_dev_info are just quick implementations to make
+ * sendfile work. For more information have a look at
+ *
+ * http://us1.samba.org/samba/ftp/cifs-cvs/ols2006-fs-tutorial-smf.odp
+ *
+ * and the sample implementation
+ *
+ * http://pserver.samba.org/samba/ftp/cifs-cvs/samplefs.tar.gz
+ */
+
+static void sf_timespec_from_vbox(struct timespec *tv,
+ const struct shfl_timespec *ts)
+{
+ s64 nsec, t = ts->ns_relative_to_unix_epoch;
+
+ nsec = do_div(t, 1000000000);
+ tv->tv_sec = t;
+ tv->tv_nsec = nsec;
+}
+
+static void sf_timespec_to_vbox(struct shfl_timespec *ts,
+ const struct timespec *tv)
+{
+ s64 t = (s64) tv->tv_nsec + (s64) tv->tv_sec * 1000000000;
+
+ ts->ns_relative_to_unix_epoch = t;
+}
+
+/* set [inode] attributes based on [info], uid/gid based on [sf_g] */
+void sf_init_inode(struct sf_glob_info *sf_g, struct inode *inode,
+ const struct shfl_fsobjinfo *info)
+{
+ const struct shfl_fsobjattr *attr;
+ s64 allocated;
+ int mode;
+
+ attr = &info->Attr;
+
+#define mode_set(r) attr->fMode & (SHFL_UNIX_##r) ? (S_##r) : 0;
+
+ mode = mode_set(ISUID);
+ mode |= mode_set(ISGID);
+
+ mode |= mode_set(IRUSR);
+ mode |= mode_set(IWUSR);
+ mode |= mode_set(IXUSR);
+
+ mode |= mode_set(IRGRP);
+ mode |= mode_set(IWGRP);
+ mode |= mode_set(IXGRP);
+
+ mode |= mode_set(IROTH);
+ mode |= mode_set(IWOTH);
+ mode |= mode_set(IXOTH);
+
+#undef mode_set
+
+ inode->i_mapping->a_ops = &sf_reg_aops;
+
+ if (SHFL_IS_DIRECTORY(attr->fMode)) {
+ inode->i_mode = sf_g->dmode != ~0 ? (sf_g->dmode & 0777) : mode;
+ inode->i_mode &= ~sf_g->dmask;
+ inode->i_mode |= S_IFDIR;
+ inode->i_op = &sf_dir_iops;
+ inode->i_fop = &sf_dir_fops;
+ /* XXX: this probably should be set to the number of entries
+ in the directory plus two (. ..) */
+ set_nlink(inode, 1);
+ } else if (SHFL_IS_SYMLINK(attr->fMode)) {
+ inode->i_mode = sf_g->fmode != ~0 ? (sf_g->fmode & 0777) : mode;
+ inode->i_mode &= ~sf_g->fmask;
+ inode->i_mode |= S_IFLNK;
+ inode->i_op = &sf_lnk_iops;
+ set_nlink(inode, 1);
+ } else {
+ inode->i_mode = sf_g->fmode != ~0 ? (sf_g->fmode & 0777) : mode;
+ inode->i_mode &= ~sf_g->fmask;
+ inode->i_mode |= S_IFREG;
+ inode->i_op = &sf_reg_iops;
+ inode->i_fop = &sf_reg_fops;
+ set_nlink(inode, 1);
+ }
+
+ inode->i_uid = make_kuid(current_user_ns(), sf_g->uid);
+ inode->i_gid = make_kgid(current_user_ns(), sf_g->gid);
+
+ inode->i_size = info->cbObject;
+ inode->i_blkbits = 12;
+ /* i_blocks always in units of 512 bytes! */
+ allocated = info->cbAllocated + 511;
+ do_div(allocated, 512);
+ inode->i_blocks = allocated;
+
+ sf_timespec_from_vbox(&inode->i_atime, &info->AccessTime);
+ sf_timespec_from_vbox(&inode->i_ctime, &info->ChangeTime);
+ sf_timespec_from_vbox(&inode->i_mtime, &info->ModificationTime);
+}
+
+int sf_stat(const char *caller, struct sf_glob_info *sf_g,
+ SHFLSTRING * path, struct shfl_fsobjinfo *result, int ok_to_fail)
+{
+ struct shfl_createparms params = {};
+ int rc;
+
+ params.Handle = SHFL_HANDLE_NIL;
+ params.CreateFlags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW;
+ rc = vboxsf_create(sf_g->root, path, ¶ms);
+ if (rc == VERR_INVALID_NAME) {
+ /* this can happen for names like 'foo*' on a Windows host */
+ return -ENOENT;
+ }
+ if (rc < 0)
+ return -EPROTO;
+
+ if (params.Result != SHFL_FILE_EXISTS)
+ return -ENOENT;
+
+ *result = params.Info;
+ return 0;
+}
+
+/* this is called directly as iop on 2.4, indirectly as dop
+ [sf_dentry_revalidate] on 2.4/2.6, indirectly as iop through
+ [sf_getattr] on 2.6. the job is to find out whether dentry/inode is
+ still valid. the test is failed if [dentry] does not have an inode
+ or [sf_stat] is unsuccessful, otherwise we return success and
+ update inode attributes */
+int sf_inode_revalidate(struct dentry *dentry)
+{
+ int err;
+ struct sf_glob_info *sf_g;
+ struct sf_inode_info *sf_i;
+ struct shfl_fsobjinfo info;
+
+ if (!dentry || !dentry->d_inode)
+ return -EINVAL;
+
+ sf_g = GET_GLOB_INFO(dentry->d_inode->i_sb);
+ sf_i = GET_INODE_INFO(dentry->d_inode);
+
+ BUG_ON(!sf_g);
+ BUG_ON(!sf_i);
+
+ if (!sf_i->force_restat) {
+ if (jiffies - dentry->d_time < sf_g->ttl)
+ return 0;
+ }
+
+ err = sf_stat(__func__, sf_g, sf_i->path, &info, 1);
+ if (err)
+ return err;
+
+ dentry->d_time = jiffies;
+ sf_init_inode(sf_g, dentry->d_inode, &info);
+ return 0;
+}
+
+/* this is called during name resolution/lookup to check if the
+ [dentry] in the cache is still valid. the job is handled by
+ [sf_inode_revalidate] */
+static int sf_dentry_revalidate(struct dentry *dentry, unsigned flags)
+{
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ if (sf_inode_revalidate(dentry))
+ return 0;
+
+ return 1;
+}
+
+int sf_getattr(const struct path *path, struct kstat *kstat, u32 request_mask,
+ unsigned int flags)
+{
+ int err;
+ struct dentry *dentry = path->dentry;
+
+ err = sf_inode_revalidate(dentry);
+ if (err)
+ return err;
+
+ generic_fillattr(dentry->d_inode, kstat);
+ return 0;
+}
+
+int sf_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ struct sf_glob_info *sf_g;
+ struct sf_inode_info *sf_i;
+ struct shfl_createparms params = {};
+ struct shfl_fsobjinfo info = {};
+ uint32_t cbBuffer;
+ int rc, err;
+
+ sf_g = GET_GLOB_INFO(dentry->d_inode->i_sb);
+ sf_i = GET_INODE_INFO(dentry->d_inode);
+ err = 0;
+
+ params.Handle = SHFL_HANDLE_NIL;
+ params.CreateFlags = SHFL_CF_ACT_OPEN_IF_EXISTS
+ | SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_ATTR_WRITE;
+
+ /* this is at least required for Posix hosts */
+ if (iattr->ia_valid & ATTR_SIZE)
+ params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
+
+ rc = vboxsf_create(sf_g->root, sf_i->path, ¶ms);
+ if (rc < 0) {
+ err = -vbg_status_code_to_errno(rc);
+ goto fail2;
+ }
+ if (params.Result != SHFL_FILE_EXISTS) {
+ err = -ENOENT;
+ goto fail1;
+ }
+#define mode_set(r) ((iattr->ia_mode & (S_##r)) ? SHFL_UNIX_##r : 0)
+
+ /*
+ * Setting the file size and setting the other attributes has to
+ * be handled separately.
+ */
+ if (iattr->ia_valid & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME)) {
+ if (iattr->ia_valid & ATTR_MODE) {
+ info.Attr.fMode = mode_set(ISUID);
+ info.Attr.fMode |= mode_set(ISGID);
+ info.Attr.fMode |= mode_set(IRUSR);
+ info.Attr.fMode |= mode_set(IWUSR);
+ info.Attr.fMode |= mode_set(IXUSR);
+ info.Attr.fMode |= mode_set(IRGRP);
+ info.Attr.fMode |= mode_set(IWGRP);
+ info.Attr.fMode |= mode_set(IXGRP);
+ info.Attr.fMode |= mode_set(IROTH);
+ info.Attr.fMode |= mode_set(IWOTH);
+ info.Attr.fMode |= mode_set(IXOTH);
+
+ if (iattr->ia_mode & S_IFDIR)
+ info.Attr.fMode |= SHFL_TYPE_DIRECTORY;
+ else
+ info.Attr.fMode |= SHFL_TYPE_FILE;
+ }
+
+ if (iattr->ia_valid & ATTR_ATIME)
+ sf_timespec_to_vbox(&info.AccessTime, &iattr->ia_atime);
+ if (iattr->ia_valid & ATTR_MTIME)
+ sf_timespec_to_vbox(&info.ModificationTime,
+ &iattr->ia_mtime);
+ /* ignore ctime (inode change time) as it can't be set from userland anyway */
+
+ cbBuffer = sizeof(info);
+ rc = vboxsf_fsinfo(sf_g->root, params.Handle,
+ SHFL_INFO_SET | SHFL_INFO_FILE, &cbBuffer,
+ (PSHFLDIRINFO) & info);
+ if (rc < 0) {
+ err = -vbg_status_code_to_errno(rc);
+ goto fail1;
+ }
+ }
+#undef mode_set
+
+ if (iattr->ia_valid & ATTR_SIZE) {
+ memset(&info, 0, sizeof(info));
+ info.cbObject = iattr->ia_size;
+ cbBuffer = sizeof(info);
+ rc = vboxsf_fsinfo(sf_g->root, params.Handle,
+ SHFL_INFO_SET | SHFL_INFO_SIZE, &cbBuffer,
+ (PSHFLDIRINFO) & info);
+ if (rc < 0) {
+ err = -vbg_status_code_to_errno(rc);
+ goto fail1;
+ }
+ }
+
+ vboxsf_close(sf_g->root, params.Handle);
+
+ return sf_inode_revalidate(dentry);
+
+fail1:
+ vboxsf_close(sf_g->root, params.Handle);
+fail2:
+ return err;
+}
+
+static int sf_make_path(const char *caller, struct sf_inode_info *sf_i,
+ const char *d_name, size_t d_len, SHFLSTRING ** result)
+{
+ size_t path_len, shflstring_len;
+ SHFLSTRING *tmp;
+ uint16_t p_len;
+ uint8_t *p_name;
+ int fRoot = 0;
+
+ p_len = sf_i->path->u16Length;
+ p_name = sf_i->path->String.utf8;
+
+ if (p_len == 1 && *p_name == '/') {
+ path_len = d_len + 1;
+ fRoot = 1;
+ } else {
+ /* lengths of constituents plus terminating zero plus slash */
+ path_len = p_len + d_len + 2;
+ if (path_len > 0xffff)
+ return -ENAMETOOLONG;
+ }
+
+ shflstring_len = offsetof(SHFLSTRING, String.utf8) + path_len;
+ tmp = kmalloc(shflstring_len, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->u16Length = path_len - 1;
+ tmp->u16Size = path_len;
+
+ if (fRoot)
+ memcpy(&tmp->String.utf8[0], d_name, d_len + 1);
+ else {
+ memcpy(&tmp->String.utf8[0], p_name, p_len);
+ tmp->String.utf8[p_len] = '/';
+ memcpy(&tmp->String.utf8[p_len + 1], d_name, d_len);
+ tmp->String.utf8[p_len + 1 + d_len] = '\0';
+ }
+
+ *result = tmp;
+ return 0;
+}
+
+/**
+ * [dentry] contains string encoded in coding system that corresponds
+ * to [sf_g]->nls, we must convert it to UTF8 here and pass down to
+ * [sf_make_path] which will allocate SHFLSTRING and fill it in
+ */
+int sf_path_from_dentry(const char *caller, struct sf_glob_info *sf_g,
+ struct sf_inode_info *sf_i, struct dentry *dentry,
+ SHFLSTRING ** result)
+{
+ int err;
+ const char *d_name;
+ size_t d_len;
+ const char *name;
+ size_t len = 0;
+
+ d_name = dentry->d_name.name;
+ d_len = dentry->d_name.len;
+
+ if (sf_g->nls) {
+ size_t in_len, i, out_bound_len;
+ const char *in;
+ char *out;
+
+ in = d_name;
+ in_len = d_len;
+
+ out_bound_len = PATH_MAX;
+ out = kmalloc(out_bound_len, GFP_KERNEL);
+ name = out;
+
+ for (i = 0; i < d_len; ++i) {
+ wchar_t uni;
+ int nb;
+
+ nb = sf_g->nls->char2uni(in, in_len, &uni);
+ if (nb < 0) {
+ err = -EINVAL;
+ goto fail1;
+ }
+ in_len -= nb;
+ in += nb;
+
+ nb = utf32_to_utf8(uni, out, out_bound_len);
+ if (nb < 0) {
+ err = -EINVAL;
+ goto fail1;
+ }
+ out_bound_len -= nb;
+ out += nb;
+ len += nb;
+ }
+ if (len >= PATH_MAX - 1) {
+ err = -ENAMETOOLONG;
+ goto fail1;
+ }
+
+ *out = 0;
+ } else {
+ name = d_name;
+ len = d_len;
+ }
+
+ err = sf_make_path(caller, sf_i, name, len, result);
+ if (name != d_name)
+ kfree(name);
+
+ return err;
+
+fail1:
+ kfree(name);
+ return err;
+}
+
+int sf_nlscpy(struct sf_glob_info *sf_g,
+ char *name, size_t name_bound_len,
+ const unsigned char *utf8_name, size_t utf8_len)
+{
+ if (sf_g->nls) {
+ const char *in;
+ char *out;
+ size_t out_len;
+ size_t out_bound_len;
+ size_t in_bound_len;
+
+ in = utf8_name;
+ in_bound_len = utf8_len;
+
+ out = name;
+ out_len = 0;
+ out_bound_len = name_bound_len;
+
+ while (in_bound_len) {
+ int nb;
+ unicode_t uni;
+
+ nb = utf8_to_utf32(in, in_bound_len, &uni);
+ if (nb < 0)
+ return -EINVAL;
+
+ in += nb;
+ in_bound_len -= nb;
+
+ nb = sf_g->nls->uni2char(uni, out, out_bound_len);
+ if (nb < 0)
+ return nb;
+
+ out += nb;
+ out_bound_len -= nb;
+ out_len += nb;
+ }
+
+ *out = 0;
+ } else {
+ if (utf8_len + 1 > name_bound_len)
+ return -ENAMETOOLONG;
+
+ memcpy(name, utf8_name, utf8_len + 1);
+ }
+ return 0;
+}
+
+static struct sf_dir_buf *sf_dir_buf_alloc(void)
+{
+ struct sf_dir_buf *b;
+
+ b = kmalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL);
+ if (!b->buf) {
+ kfree(b);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&b->head);
+ b->cEntries = 0;
+ b->cbUsed = 0;
+ b->cbFree = DIR_BUFFER_SIZE;
+ return b;
+}
+
+static void sf_dir_buf_free(struct sf_dir_buf *b)
+{
+ BUG_ON(!b || !b->buf);
+
+ list_del(&b->head);
+ kfree(b->buf);
+ kfree(b);
+}
+
+/**
+ * Free the directory buffer.
+ */
+void sf_dir_info_free(struct sf_dir_info *p)
+{
+ struct list_head *list, *pos, *tmp;
+
+ list = &p->info_list;
+ list_for_each_safe(pos, tmp, list) {
+ struct sf_dir_buf *b;
+
+ b = list_entry(pos, struct sf_dir_buf, head);
+ sf_dir_buf_free(b);
+ }
+ kfree(p);
+}
+
+/**
+ * Empty (but not free) the directory buffer.
+ */
+void sf_dir_info_empty(struct sf_dir_info *p)
+{
+ struct list_head *list, *pos, *tmp;
+ list = &p->info_list;
+ list_for_each_safe(pos, tmp, list) {
+ struct sf_dir_buf *b;
+ b = list_entry(pos, struct sf_dir_buf, head);
+ b->cEntries = 0;
+ b->cbUsed = 0;
+ b->cbFree = DIR_BUFFER_SIZE;
+ }
+}
+
+/**
+ * Create a new directory buffer descriptor.
+ */
+struct sf_dir_info *sf_dir_info_alloc(void)
+{
+ struct sf_dir_info *p;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ INIT_LIST_HEAD(&p->info_list);
+ return p;
+}
+
+/**
+ * Search for an empty directory content buffer.
+ */
+static struct sf_dir_buf *sf_get_empty_dir_buf(struct sf_dir_info *sf_d)
+{
+ struct list_head *list, *pos;
+
+ list = &sf_d->info_list;
+ list_for_each(pos, list) {
+ struct sf_dir_buf *b;
+
+ b = list_entry(pos, struct sf_dir_buf, head);
+ if (!b)
+ return NULL;
+ else {
+ if (b->cbUsed == 0)
+ return b;
+ }
+ }
+
+ return NULL;
+}
+
+int sf_dir_read_all(struct sf_glob_info *sf_g, struct sf_inode_info *sf_i,
+ struct sf_dir_info *sf_d, SHFLHANDLE handle)
+{
+ int err;
+ SHFLSTRING *mask;
+ struct sf_dir_buf *b;
+
+ err = sf_make_path(__func__, sf_i, "*", 1, &mask);
+ if (err)
+ goto fail0;
+
+ for (;;) {
+ int rc;
+ void *buf;
+ uint32_t cbSize;
+ uint32_t cEntries;
+
+ b = sf_get_empty_dir_buf(sf_d);
+ if (!b) {
+ b = sf_dir_buf_alloc();
+ if (!b) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+ list_add(&b->head, &sf_d->info_list);
+ }
+
+ buf = b->buf;
+ cbSize = b->cbFree;
+
+ rc = vboxsf_dirinfo(sf_g->root, handle, mask, 0, 0,
+ &cbSize, buf, &cEntries);
+ switch (rc) {
+ case VINF_SUCCESS:
+ /* fallthrough */
+ case VERR_NO_MORE_FILES:
+ break;
+ case VERR_NO_TRANSLATION:
+ /* XXX */
+ break;
+ default:
+ err = -vbg_status_code_to_errno(rc);
+ goto fail1;
+ }
+
+ b->cEntries += cEntries;
+ b->cbFree -= cbSize;
+ b->cbUsed += cbSize;
+
+ if (rc < 0)
+ break;
+ }
+ err = 0;
+
+fail1:
+ kfree(mask);
+
+fail0:
+ return err;
+}
+
+int sf_get_volume_info(struct super_block *sb, struct kstatfs *stat)
+{
+ struct sf_glob_info *sf_g;
+ SHFLVOLINFO SHFLVolumeInfo;
+ u32 cbBuffer;
+ int rc;
+
+ sf_g = GET_GLOB_INFO(sb);
+ cbBuffer = sizeof(SHFLVolumeInfo);
+ rc = vboxsf_fsinfo(sf_g->root, 0,
+ SHFL_INFO_GET | SHFL_INFO_VOLUME, &cbBuffer,
+ (PSHFLDIRINFO)&SHFLVolumeInfo);
+ if (rc < 0)
+ return -vbg_status_code_to_errno(rc);
+
+ stat->f_type = NFS_SUPER_MAGIC; /* XXX vboxsf type? */
+ stat->f_bsize = SHFLVolumeInfo.ulBytesPerAllocationUnit;
+
+ do_div(SHFLVolumeInfo.ullTotalAllocationBytes,
+ SHFLVolumeInfo.ulBytesPerAllocationUnit);
+ stat->f_blocks = SHFLVolumeInfo.ullTotalAllocationBytes;
+
+ do_div(SHFLVolumeInfo.ullAvailableAllocationBytes,
+ SHFLVolumeInfo.ulBytesPerAllocationUnit);
+ stat->f_bfree = SHFLVolumeInfo.ullAvailableAllocationBytes;
+ stat->f_bavail = SHFLVolumeInfo.ullAvailableAllocationBytes;
+
+ stat->f_files = 1000;
+ stat->f_ffree = 1000; /* don't return 0 here since the guest may think
+ * that it is not possible to create any more files */
+ stat->f_fsid.val[0] = 0;
+ stat->f_fsid.val[1] = 0;
+ stat->f_namelen = 255;
+ return 0;
+}
+
+struct dentry_operations sf_dentry_ops = {
+ .d_revalidate = sf_dentry_revalidate
+};
diff --git a/fs/vboxsf/vboxsf_wrappers.c b/fs/vboxsf/vboxsf_wrappers.c
new file mode 100644
index 000000000000..620b6c9b2829
--- /dev/null
+++ b/fs/vboxsf/vboxsf_wrappers.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * Wrapper functions for the shfl host calls.
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include "vboxsf_wrappers.h"
+
+#define VBOX_INIT_CALL(a, b, c) \
+do { \
+ (a)->result = VINF_SUCCESS; \
+ (a)->u32ClientID = vboxsf_client_id; \
+ (a)->u32Function = SHFL_FN_##b; \
+ (a)->cParms = SHFL_CPARMS_##b; \
+} while (0)
+
+/* globals */
+static u32 vboxsf_client_id;
+
+int vboxsf_connect(void)
+{
+ struct VBOXGUESTDEVEXT *gdev;
+ HGCMServiceLocation loc;
+ int rc;
+
+ loc.type = VMMDevHGCMLoc_LocalHost_Existing;
+ strcpy(loc.u.host.achName, "VBoxSharedFolders");
+
+ gdev = vbg_get_gdev();
+ if (IS_ERR(gdev))
+ return VERR_NOT_SUPPORTED; /* No guest-device */
+
+ rc = vbg_hgcm_connect(gdev, &loc, &vboxsf_client_id);
+ vbg_put_gdev(gdev);
+
+ return rc;
+}
+
+void vboxsf_disconnect(void)
+{
+ struct VBOXGUESTDEVEXT *gdev;
+
+ gdev = vbg_get_gdev();
+ if (IS_ERR(gdev))
+ return; /* guest-device is gone, already disconnected */
+
+ vbg_hgcm_disconnect(gdev, vboxsf_client_id);
+ vbg_put_gdev(gdev);
+}
+
+static int vboxsf_hgcm_call(void *data, u32 len)
+{
+ VBoxGuestHGCMCallInfo *info = data;
+ struct VBOXGUESTDEVEXT *gdev;
+ int rc;
+
+ gdev = vbg_get_gdev();
+ if (IS_ERR(gdev))
+ return VERR_DEV_IO_ERROR; /* guest-dev removed underneath us */
+
+ rc = vbg_hgcm_call(gdev, info, len, U32_MAX, false);
+ vbg_put_gdev(gdev);
+
+ if (rc >= 0)
+ rc = info->result;
+
+ return rc;
+}
+
+int vboxsf_query_mappings(SHFLMAPPING paMappings[], u32 *pcMappings)
+{
+ int rc;
+ VBoxSFQueryMappings data;
+
+ VBOX_INIT_CALL(&data.callInfo, QUERY_MAPPINGS, pClient);
+
+ data.flags.type = VMMDevHGCMParmType_32bit;
+ data.flags.u.value32 = SHFL_MF_UCS2;
+
+ data.numberOfMappings.type = VMMDevHGCMParmType_32bit;
+ data.numberOfMappings.u.value32 = *pcMappings;
+
+ data.mappings.type = VMMDevHGCMParmType_LinAddr;
+ data.mappings.u.Pointer.size = sizeof(SHFLMAPPING) * *pcMappings;
+ data.mappings.u.Pointer.u.linearAddr = (uintptr_t)&paMappings[0];
+
+ rc = vboxsf_hgcm_call(&data, sizeof(data));
+ if (rc >= 0)
+ *pcMappings = data.numberOfMappings.u.value32;
+
+ return rc;
+}
+
+int vboxsf_query_mapname(SHFLROOT root, SHFLSTRING *pString, u32 size)
+{
+ VBoxSFQueryMapName data;
+
+ VBOX_INIT_CALL(&data.callInfo, QUERY_MAP_NAME, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.name.type = VMMDevHGCMParmType_LinAddr;
+ data.name.u.Pointer.size = size;
+ data.name.u.Pointer.u.linearAddr = (uintptr_t)pString;
+
+ return vboxsf_hgcm_call(&data, sizeof(data));
+}
+
+int vboxsf_map_folder(PSHFLSTRING szFolderName, SHFLROOT *root)
+{
+ int rc;
+ VBoxSFMapFolder data;
+
+ VBOX_INIT_CALL(&data.callInfo, MAP_FOLDER, pClient);
+
+ data.path.type = VMMDevHGCMParmType_LinAddr;
+ data.path.u.Pointer.size = ShflStringSizeOfBuffer(szFolderName);
+ data.path.u.Pointer.u.linearAddr = (uintptr_t)szFolderName;
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = 0;
+
+ data.delimiter.type = VMMDevHGCMParmType_32bit;
+ data.delimiter.u.value32 = '/';
+
+ data.fCaseSensitive.type = VMMDevHGCMParmType_32bit;
+ data.fCaseSensitive.u.value32 = 1;
+
+ rc = vboxsf_hgcm_call(&data, sizeof(data));
+ if (rc >= 0) {
+ *root = data.root.u.value32;
+ } else if (rc == VERR_NOT_IMPLEMENTED) {
+ /* try the legacy interface too; temporary to assure backwards compatibility */
+ VBoxSFMapFolder_Old OldData;
+
+ VBOX_INIT_CALL(&OldData.callInfo, MAP_FOLDER_OLD, pClient);
+
+ OldData.path.type = VMMDevHGCMParmType_LinAddr;
+ OldData.path.u.Pointer.size =
+ ShflStringSizeOfBuffer(szFolderName);
+ OldData.path.u.Pointer.u.linearAddr = (uintptr_t)szFolderName;
+
+ OldData.root.type = VMMDevHGCMParmType_32bit;
+ OldData.root.u.value32 = 0;
+
+ OldData.delimiter.type = VMMDevHGCMParmType_32bit;
+ OldData.delimiter.u.value32 = '/';
+
+ rc = vboxsf_hgcm_call(&OldData, sizeof(OldData));
+ if (rc >= 0)
+ *root = OldData.root.u.value32;
+ }
+ return rc;
+}
+
+int vboxsf_unmap_folder(SHFLROOT root)
+{
+ VBoxSFUnmapFolder data;
+
+ VBOX_INIT_CALL(&data.callInfo, UNMAP_FOLDER, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ return vboxsf_hgcm_call(&data, sizeof(data));
+}
+
+int vboxsf_create(SHFLROOT root, PSHFLSTRING pParsedPath,
+ struct shfl_createparms *pCreateParms)
+{
+ /** @todo copy buffers to physical or mapped memory. */
+ VBoxSFCreate data;
+
+ VBOX_INIT_CALL(&data.callInfo, CREATE, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.path.type = VMMDevHGCMParmType_LinAddr;
+ data.path.u.Pointer.size = ShflStringSizeOfBuffer(pParsedPath);
+ data.path.u.Pointer.u.linearAddr = (uintptr_t)pParsedPath;
+
+ data.parms.type = VMMDevHGCMParmType_LinAddr;
+ data.parms.u.Pointer.size = sizeof(struct shfl_createparms);
+ data.parms.u.Pointer.u.linearAddr = (uintptr_t)pCreateParms;
+
+ return vboxsf_hgcm_call(&data, sizeof(data));
+}
+
+int vboxsf_close(SHFLROOT root, SHFLHANDLE Handle)
+{
+ VBoxSFClose data;
+
+ VBOX_INIT_CALL(&data.callInfo, CLOSE, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.handle.type = VMMDevHGCMParmType_64bit;
+ data.handle.u.value64 = Handle;
+
+ return vboxsf_hgcm_call(&data, sizeof(data));
+}
+
+int vboxsf_remove(SHFLROOT root, PSHFLSTRING pParsedPath, u32 flags)
+{
+ VBoxSFRemove data;
+
+ VBOX_INIT_CALL(&data.callInfo, REMOVE, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.path.type = VMMDevHGCMParmType_LinAddr_In;
+ data.path.u.Pointer.size = ShflStringSizeOfBuffer(pParsedPath);
+ data.path.u.Pointer.u.linearAddr = (uintptr_t)pParsedPath;
+
+ data.flags.type = VMMDevHGCMParmType_32bit;
+ data.flags.u.value32 = flags;
+
+ return vboxsf_hgcm_call(&data, sizeof(data));
+}
+
+int vboxsf_rename(SHFLROOT root, PSHFLSTRING pSrcPath, PSHFLSTRING pDestPath,
+ u32 flags)
+{
+ VBoxSFRename data;
+
+ VBOX_INIT_CALL(&data.callInfo, RENAME, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.src.type = VMMDevHGCMParmType_LinAddr_In;
+ data.src.u.Pointer.size = ShflStringSizeOfBuffer(pSrcPath);
+ data.src.u.Pointer.u.linearAddr = (uintptr_t)pSrcPath;
+
+ data.dest.type = VMMDevHGCMParmType_LinAddr_In;
+ data.dest.u.Pointer.size = ShflStringSizeOfBuffer(pDestPath);
+ data.dest.u.Pointer.u.linearAddr = (uintptr_t)pDestPath;
+
+ data.flags.type = VMMDevHGCMParmType_32bit;
+ data.flags.u.value32 = flags;
+
+ return vboxsf_hgcm_call(&data, sizeof(data));
+}
+
+int vboxsf_read(SHFLROOT root, SHFLHANDLE hFile, u64 offset,
+ u32 *pcbBuffer, u8 *pBuffer)
+{
+ int rc;
+ VBoxSFRead data;
+
+ VBOX_INIT_CALL(&data.callInfo, READ, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.handle.type = VMMDevHGCMParmType_64bit;
+ data.handle.u.value64 = hFile;
+ data.offset.type = VMMDevHGCMParmType_64bit;
+ data.offset.u.value64 = offset;
+ data.cb.type = VMMDevHGCMParmType_32bit;
+ data.cb.u.value32 = *pcbBuffer;
+ data.buffer.type = VMMDevHGCMParmType_LinAddr_Out;
+ data.buffer.u.Pointer.size = *pcbBuffer;
+ data.buffer.u.Pointer.u.linearAddr = (uintptr_t)pBuffer;
+
+ rc = vboxsf_hgcm_call(&data, sizeof(data));
+ if (rc >= 0)
+ *pcbBuffer = data.cb.u.value32;
+
+ return rc;
+}
+
+int vboxsf_write(SHFLROOT root, SHFLHANDLE hFile, u64 offset,
+ u32 *pcbBuffer, u8 *pBuffer)
+{
+ int rc;
+ VBoxSFWrite data;
+
+ VBOX_INIT_CALL(&data.callInfo, WRITE, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.handle.type = VMMDevHGCMParmType_64bit;
+ data.handle.u.value64 = hFile;
+ data.offset.type = VMMDevHGCMParmType_64bit;
+ data.offset.u.value64 = offset;
+ data.cb.type = VMMDevHGCMParmType_32bit;
+ data.cb.u.value32 = *pcbBuffer;
+ data.buffer.type = VMMDevHGCMParmType_LinAddr_In;
+ data.buffer.u.Pointer.size = *pcbBuffer;
+ data.buffer.u.Pointer.u.linearAddr = (uintptr_t)pBuffer;
+
+ rc = vboxsf_hgcm_call(&data, sizeof(data));
+ if (rc >= 0)
+ *pcbBuffer = data.cb.u.value32;
+
+ return rc;
+}
+
+int vboxsf_write_physcont(SHFLROOT root, SHFLHANDLE hFile, u64 offset,
+ u32 *pcbBuffer, u64 PhysBuffer)
+{
+ u32 cPages, cbData, cbToWrite = *pcbBuffer;
+ HGCMPageListInfo *pPgLst;
+ VBoxSFWrite *pData;
+ u32 iPage;
+ int rc;
+
+ cPages =
+ PAGE_ALIGN((PhysBuffer & ~PAGE_MASK) + cbToWrite) >> PAGE_SHIFT;
+ cbData =
+ sizeof(VBoxSFWrite) + offsetof(HGCMPageListInfo, aPages[cPages]);
+ pData = kmalloc(cbData, GFP_KERNEL);
+ if (!pData)
+ return VERR_NO_TMP_MEMORY;
+
+ VBOX_INIT_CALL(&pData->callInfo, WRITE, pClient);
+
+ pData->root.type = VMMDevHGCMParmType_32bit;
+ pData->root.u.value32 = root;
+
+ pData->handle.type = VMMDevHGCMParmType_64bit;
+ pData->handle.u.value64 = hFile;
+ pData->offset.type = VMMDevHGCMParmType_64bit;
+ pData->offset.u.value64 = offset;
+ pData->cb.type = VMMDevHGCMParmType_32bit;
+ pData->cb.u.value32 = cbToWrite;
+ pData->buffer.type = VMMDevHGCMParmType_PageList;
+ pData->buffer.u.PageList.size = cbToWrite;
+ pData->buffer.u.PageList.offset = sizeof(VBoxSFWrite);
+
+ pPgLst = (HGCMPageListInfo *) (pData + 1);
+ pPgLst->flags = VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+ pPgLst->offFirstPage = (u16)(PhysBuffer & ~PAGE_MASK);
+ pPgLst->cPages = cPages;
+ PhysBuffer &= ~(u64) ~ PAGE_MASK;
+ for (iPage = 0; iPage < cPages; iPage++, PhysBuffer += PAGE_SIZE)
+ pPgLst->aPages[iPage] = PhysBuffer;
+
+ rc = vboxsf_hgcm_call(pData, cbData);
+ if (rc >= 0)
+ *pcbBuffer = pData->cb.u.value32;
+
+ kfree(pData);
+ return rc;
+}
+
+int vboxsf_flush(SHFLROOT root, SHFLHANDLE hFile)
+{
+ VBoxSFFlush data;
+
+ VBOX_INIT_CALL(&data.callInfo, FLUSH, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.handle.type = VMMDevHGCMParmType_64bit;
+ data.handle.u.value64 = hFile;
+
+ return vboxsf_hgcm_call(&data, sizeof(data));
+}
+
+int vboxsf_dirinfo(SHFLROOT root, SHFLHANDLE hFile, PSHFLSTRING ParsedPath,
+ u32 flags, u32 index, u32 *pcbBuffer,
+ PSHFLDIRINFO pBuffer, u32 *pcFiles)
+{
+ int rc;
+ VBoxSFList data;
+
+ VBOX_INIT_CALL(&data.callInfo, LIST, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.handle.type = VMMDevHGCMParmType_64bit;
+ data.handle.u.value64 = hFile;
+ data.flags.type = VMMDevHGCMParmType_32bit;
+ data.flags.u.value32 = flags;
+ data.cb.type = VMMDevHGCMParmType_32bit;
+ data.cb.u.value32 = *pcbBuffer;
+ data.path.type = VMMDevHGCMParmType_LinAddr_In;
+ data.path.u.Pointer.size =
+ ParsedPath ? ShflStringSizeOfBuffer(ParsedPath) : 0;
+ data.path.u.Pointer.u.linearAddr = (uintptr_t)ParsedPath;
+
+ data.buffer.type = VMMDevHGCMParmType_LinAddr_Out;
+ data.buffer.u.Pointer.size = *pcbBuffer;
+ data.buffer.u.Pointer.u.linearAddr = (uintptr_t)pBuffer;
+
+ data.resumePoint.type = VMMDevHGCMParmType_32bit;
+ data.resumePoint.u.value32 = index;
+ data.cFiles.type = VMMDevHGCMParmType_32bit;
+ data.cFiles.u.value32 = 0; /* out parameters only */
+
+ rc = vboxsf_hgcm_call(&data, sizeof(data));
+
+ *pcbBuffer = data.cb.u.value32;
+ *pcFiles = data.cFiles.u.value32;
+
+ return rc;
+}
+
+int vboxsf_fsinfo(SHFLROOT root, SHFLHANDLE hFile, u32 flags,
+ u32 *pcbBuffer, PSHFLDIRINFO pBuffer)
+{
+ int rc;
+ VBoxSFInformation data;
+
+ VBOX_INIT_CALL(&data.callInfo, INFORMATION, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.handle.type = VMMDevHGCMParmType_64bit;
+ data.handle.u.value64 = hFile;
+ data.flags.type = VMMDevHGCMParmType_32bit;
+ data.flags.u.value32 = flags;
+ data.cb.type = VMMDevHGCMParmType_32bit;
+ data.cb.u.value32 = *pcbBuffer;
+ data.info.type = VMMDevHGCMParmType_LinAddr;
+ data.info.u.Pointer.size = *pcbBuffer;
+ data.info.u.Pointer.u.linearAddr = (uintptr_t)pBuffer;
+
+ rc = vboxsf_hgcm_call(&data, sizeof(data));
+ if (rc >= 0)
+ *pcbBuffer = data.cb.u.value32;
+
+ return rc;
+}
+
+int vboxsf_lock(SHFLROOT root, SHFLHANDLE hFile, u64 offset,
+ u64 cbSize, u32 fLock)
+{
+ VBoxSFLock data;
+
+ VBOX_INIT_CALL(&data.callInfo, LOCK, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.handle.type = VMMDevHGCMParmType_64bit;
+ data.handle.u.value64 = hFile;
+ data.offset.type = VMMDevHGCMParmType_64bit;
+ data.offset.u.value64 = offset;
+ data.length.type = VMMDevHGCMParmType_64bit;
+ data.length.u.value64 = cbSize;
+
+ data.flags.type = VMMDevHGCMParmType_32bit;
+ data.flags.u.value32 = fLock;
+
+ return vboxsf_hgcm_call(&data, sizeof(data));
+}
+
+int vboxsf_set_utf8(void)
+{
+ VBoxGuestHGCMCallInfo callInfo;
+
+ VBOX_INIT_CALL(&callInfo, SET_UTF8, pClient);
+
+ return vboxsf_hgcm_call(&callInfo, sizeof(callInfo));
+}
+
+int vboxsf_readlink(SHFLROOT root, PSHFLSTRING pParsedPath, u32 cbBuffer,
+ u8 *pBuffer)
+{
+ VBoxSFReadLink data;
+
+ VBOX_INIT_CALL(&data.callInfo, READLINK, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.path.type = VMMDevHGCMParmType_LinAddr_In;
+ data.path.u.Pointer.size = ShflStringSizeOfBuffer(pParsedPath);
+ data.path.u.Pointer.u.linearAddr = (uintptr_t)pParsedPath;
+
+ data.buffer.type = VMMDevHGCMParmType_LinAddr_Out;
+ data.buffer.u.Pointer.size = cbBuffer;
+ data.buffer.u.Pointer.u.linearAddr = (uintptr_t)pBuffer;
+
+ return vboxsf_hgcm_call(&data, sizeof(data));
+}
+
+int vboxsf_symlink(SHFLROOT root, PSHFLSTRING pNewPath, PSHFLSTRING pOldPath,
+ struct shfl_fsobjinfo *pBuffer)
+{
+ VBoxSFSymlink data;
+
+ VBOX_INIT_CALL(&data.callInfo, SYMLINK, pClient);
+
+ data.root.type = VMMDevHGCMParmType_32bit;
+ data.root.u.value32 = root;
+
+ data.newPath.type = VMMDevHGCMParmType_LinAddr_In;
+ data.newPath.u.Pointer.size = ShflStringSizeOfBuffer(pNewPath);
+ data.newPath.u.Pointer.u.linearAddr = (uintptr_t)pNewPath;
+
+ data.oldPath.type = VMMDevHGCMParmType_LinAddr_In;
+ data.oldPath.u.Pointer.size = ShflStringSizeOfBuffer(pOldPath);
+ data.oldPath.u.Pointer.u.linearAddr = (uintptr_t)pOldPath;
+
+ data.info.type = VMMDevHGCMParmType_LinAddr_Out;
+ data.info.u.Pointer.size = sizeof(struct shfl_fsobjinfo);
+ data.info.u.Pointer.u.linearAddr = (uintptr_t)pBuffer;
+
+ return vboxsf_hgcm_call(&data, sizeof(data));
+}
+
+int vboxsf_set_symlinks(void)
+{
+ VBoxGuestHGCMCallInfo callInfo;
+
+ VBOX_INIT_CALL(&callInfo, SET_SYMLINKS, pClient);
+
+ return vboxsf_hgcm_call(&callInfo, sizeof(callInfo));
+}
diff --git a/fs/vboxsf/vboxsf_wrappers.h b/fs/vboxsf/vboxsf_wrappers.h
new file mode 100644
index 000000000000..987b924a2c28
--- /dev/null
+++ b/fs/vboxsf/vboxsf_wrappers.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * Protype declarations for the wrapper functions for the shfl host calls.
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef VBOXSF_WRAPPERS_H
+#define VBOXSF_WRAPPERS_H
+
+#include <linux/vboxguest.h> /* For VBoxGuestHGCMCallInfo */
+#include "shfl_hostintf.h"
+
+/**
+ * @addtogroup grp_vboxguest_lib_r0
+ *
+ * Note all these functions (all functions prefixed with vboxsf_)
+ * return a vbox status code rather then a negative errno on error.
+ * @{
+ */
+
+int vboxsf_connect(void);
+void vboxsf_disconnect(void);
+
+int vboxsf_query_mappings(SHFLMAPPING paMappings[], u32 *pcMappings);
+int vboxsf_query_mapname(SHFLROOT root, SHFLSTRING *pString, u32 size);
+
+/**
+ * Create a new file or folder or open an existing one in a shared folder. Proxies
+ * to vbsfCreate in the host shared folder service.
+ *
+ * @returns IPRT status code, but see note below
+ * @param pClient Host-guest communication connection
+ * @param root Root of the shared folder in which to create the file
+ * @param pParsedPath The path of the file or folder relative to the shared
+ * folder
+ * @param pCreateParms Parameters for file/folder creation. See the
+ * structure description in shflsvc.h
+ * @retval pCreateParms See the structure description in shflsvc.h
+ *
+ * @note This function reports errors as follows. The return value is always
+ * VINF_SUCCESS unless an exceptional condition occurs - out of
+ * memory, invalid arguments, etc. If the file or folder could not be
+ * opened or created, pCreateParms->Handle will be set to
+ * SHFL_HANDLE_NIL on return. In this case the value in
+ * pCreateParms->Result provides information as to why (e.g.
+ * SHFL_FILE_EXISTS). pCreateParms->Result is also set on success
+ * as additional information.
+ */
+int vboxsf_create(SHFLROOT root, PSHFLSTRING pParsedPath,
+ struct shfl_createparms *pCreateParms);
+
+int vboxsf_close(SHFLROOT root, SHFLHANDLE Handle);
+int vboxsf_remove(SHFLROOT root, PSHFLSTRING pParsedPath, u32 flags);
+int vboxsf_rename(SHFLROOT root, PSHFLSTRING pSrcPath, PSHFLSTRING pDestPath,
+ u32 flags);
+int vboxsf_flush(SHFLROOT root, SHFLHANDLE hFile);
+
+int vboxsf_read(SHFLROOT root, SHFLHANDLE hFile, u64 offset,
+ u32 *pcbBuffer, u8 *pBuffer);
+int vboxsf_write(SHFLROOT root, SHFLHANDLE hFile, u64 offset,
+ u32 *pcbBuffer, u8 *pBuffer);
+int vboxsf_write_physcont(SHFLROOT root, SHFLHANDLE hFile, u64 offset,
+ u32 *pcbBuffer, u64 PhysBuffer);
+
+int vboxsf_lock(SHFLROOT root, SHFLHANDLE hFile, u64 offset,
+ u64 cbSize, u32 fLock);
+
+int vboxsf_dirinfo(SHFLROOT root, SHFLHANDLE hFile, PSHFLSTRING ParsedPath,
+ u32 flags, u32 index, u32 *pcbBuffer,
+ PSHFLDIRINFO pBuffer, u32 *pcFiles);
+int vboxsf_fsinfo(SHFLROOT root, SHFLHANDLE hFile, u32 flags,
+ u32 *pcbBuffer, PSHFLDIRINFO pBuffer);
+
+int vboxsf_map_folder(PSHFLSTRING szFolderName, SHFLROOT *root);
+int vboxsf_unmap_folder(SHFLROOT root);
+
+int vboxsf_readlink(SHFLROOT root, PSHFLSTRING pParsedPath, u32 cbBuffer,
+ u8 *pBuffer);
+int vboxsf_symlink(SHFLROOT root, PSHFLSTRING pNewPath, PSHFLSTRING pOldPath,
+ struct shfl_fsobjinfo *pBuffer);
+
+int vboxsf_set_utf8(void);
+int vboxsf_set_symlinks(void);
+
+/** @} */
+
+#endif
diff --git a/fs/vboxsf/vbsfmount.h b/fs/vboxsf/vbsfmount.h
new file mode 100644
index 000000000000..1d5d12bc9166
--- /dev/null
+++ b/fs/vboxsf/vbsfmount.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * VirtualBox Guest Shared Folders: mount(2) parameter structure.
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VBFS_MOUNT_H
+#define VBFS_MOUNT_H
+
+#define MAX_HOST_NAME 256
+#define MAX_NLS_NAME 32
+
+/* Linux constraints the size of data mount argument to PAGE_SIZE - 1. */
+struct vbsf_mount_info_old {
+ char name[MAX_HOST_NAME];
+ char nls_name[MAX_NLS_NAME];
+ int uid;
+ int gid;
+ int ttl;
+};
+
+#define VBSF_MOUNT_SIGNATURE_BYTE_0 '\377'
+#define VBSF_MOUNT_SIGNATURE_BYTE_1 '\376'
+#define VBSF_MOUNT_SIGNATURE_BYTE_2 '\375'
+
+struct vbsf_mount_info_new {
+ char nullchar; /* name cannot be '\0' -- we use this field
+ to distinguish between the old structure
+ and the new structure */
+ char signature[3]; /* signature */
+ int length; /* length of the whole structure */
+ char name[MAX_HOST_NAME]; /* share name */
+ char nls_name[MAX_NLS_NAME]; /* name of an I/O charset */
+ int uid; /* user ID for all entries, default 0=root */
+ int gid; /* group ID for all entries, default 0=root */
+ int ttl; /* time to live */
+ int dmode; /* mode for directories if != 0xffffffff */
+ int fmode; /* mode for regular files if != 0xffffffff */
+ int dmask; /* umask applied to directories */
+ int fmask; /* umask applied to regular files */
+};
+
+struct vbsf_mount_opts {
+ int uid;
+ int gid;
+ int ttl;
+ int dmode;
+ int fmode;
+ int dmask;
+ int fmask;
+ int ronly;
+ int sloppy;
+ int noexec;
+ int nodev;
+ int nosuid;
+ int remount;
+ char nls_name[MAX_NLS_NAME];
+ char *convertcp;
+};
+
+#endif /* vbsfmount.h */
diff --git a/fs/vboxsf/vfsmod.c b/fs/vboxsf/vfsmod.c
new file mode 100644
index 000000000000..49a77e44debe
--- /dev/null
+++ b/fs/vboxsf/vfsmod.c
@@ -0,0 +1,412 @@
+/*
+ * VirtualBox Guest Shared Folders support: Virtual File System.
+ *
+ * Module initialization/finalization
+ * File system registration/deregistration
+ * Superblock reading
+ * Few utility functions
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+/**
+ * @note Anyone wishing to make changes here might wish to take a look at
+ * http://www.atnf.csiro.au/people/rgooch/linux/vfs.txt
+ * which seems to be the closest there is to official documentation on
+ * writing filesystem drivers for Linux.
+ */
+
+#include <linux/module.h>
+#include <linux/nls.h>
+#include <linux/vbox_utils.h>
+#include "vbsfmount.h"
+#include "vfsmod.h"
+
+MODULE_DESCRIPTION("Oracle VM VirtualBox Module for Host File System Access");
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_FS("vboxsf");
+
+/* forward declarations */
+static struct super_operations sf_super_ops;
+
+/* allocate global info, try to map host share */
+static int sf_glob_alloc(struct vbsf_mount_info_new *info,
+ struct sf_glob_info **sf_gp)
+{
+ int err, rc;
+ SHFLSTRING *str_name;
+ size_t name_len, str_len;
+ struct sf_glob_info *sf_g;
+#ifdef CONFIG_NLS_DEFAULT
+ const char *nls_name = CONFIG_NLS_DEFAULT;
+#else
+ const char *nls_name = "";
+#endif
+
+ sf_g = kzalloc(sizeof(*sf_g), GFP_KERNEL);
+ if (!sf_g) {
+ err = -ENOMEM;
+ goto fail0;
+ }
+
+ if (info->nullchar != '\0'
+ || info->signature[0] != VBSF_MOUNT_SIGNATURE_BYTE_0
+ || info->signature[1] != VBSF_MOUNT_SIGNATURE_BYTE_1
+ || info->signature[2] != VBSF_MOUNT_SIGNATURE_BYTE_2) {
+ /* An old version of mount.vboxsf made the syscall. Translate the
+ * old parameters to the new structure. */
+ struct vbsf_mount_info_old *info_old =
+ (struct vbsf_mount_info_old *)info;
+ static struct vbsf_mount_info_new info_compat;
+
+ info = &info_compat;
+ memset(info, 0, sizeof(*info));
+ memcpy(&info->name, &info_old->name, MAX_HOST_NAME);
+ memcpy(&info->nls_name, &info_old->nls_name, MAX_NLS_NAME);
+ info->length = offsetof(struct vbsf_mount_info_new, dmode);
+ info->uid = info_old->uid;
+ info->gid = info_old->gid;
+ info->ttl = info_old->ttl;
+ }
+
+ info->name[sizeof(info->name) - 1] = 0;
+ info->nls_name[sizeof(info->nls_name) - 1] = 0;
+
+ name_len = strlen(info->name);
+ if (name_len > 0xfffe) {
+ err = -ENAMETOOLONG;
+ goto fail1;
+ }
+
+ str_len = offsetof(SHFLSTRING, String.utf8) + name_len + 1;
+ str_name = kmalloc(str_len, GFP_KERNEL);
+ if (!str_name) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ str_name->u16Length = name_len;
+ str_name->u16Size = name_len + 1;
+ memcpy(str_name->String.utf8, info->name, name_len + 1);
+
+ if (info->nls_name[0])
+ nls_name = info->nls_name;
+
+ /* Load nls if not utf8 */
+ if (nls_name[0] && strcmp(nls_name, "utf8") != 0) {
+ sf_g->nls = load_nls(info->nls_name);
+ if (!sf_g->nls) {
+ err = -EINVAL;
+ goto fail1;
+ }
+ } else {
+ sf_g->nls = NULL;
+ }
+
+ rc = vboxsf_map_folder(str_name, &sf_g->root);
+ kfree(str_name);
+
+ if (rc < 0) {
+ err = -EPROTO;
+ goto fail2;
+ }
+
+ sf_g->ttl = info->ttl;
+ sf_g->uid = info->uid;
+ sf_g->gid = info->gid;
+
+ if ((unsigned)info->length >= sizeof(struct vbsf_mount_info_new)) {
+ /* new fields */
+ sf_g->dmode = info->dmode;
+ sf_g->fmode = info->fmode;
+ sf_g->dmask = info->dmask;
+ sf_g->fmask = info->fmask;
+ } else {
+ sf_g->dmode = ~0;
+ sf_g->fmode = ~0;
+ }
+
+ *sf_gp = sf_g;
+ return 0;
+
+fail2:
+ if (sf_g->nls)
+ unload_nls(sf_g->nls);
+
+fail1:
+ kfree(sf_g);
+
+fail0:
+ return err;
+}
+
+/* unmap the share and free global info [sf_g] */
+static void sf_glob_free(struct sf_glob_info *sf_g)
+{
+ vboxsf_unmap_folder(sf_g->root);
+
+ if (sf_g->nls)
+ unload_nls(sf_g->nls);
+
+ kfree(sf_g);
+}
+
+/**
+ * This is called when vfs mounts the fs and wants to read the super_block.
+ *
+ * calls [sf_glob_alloc] to map the folder and allocate global
+ * information structure.
+ *
+ * initializes [sb], initializes root inode and dentry.
+ *
+ * should respect [flags]
+ */
+static int sf_read_super(struct super_block *sb, void *data, int flags)
+{
+ int err;
+ struct dentry *droot;
+ struct inode *iroot;
+ struct sf_inode_info *sf_i;
+ struct sf_glob_info *sf_g;
+ struct shfl_fsobjinfo fsinfo;
+ struct vbsf_mount_info_new *info;
+
+ if (!data)
+ return -EINVAL;
+
+ info = data;
+
+ if (flags & MS_REMOUNT)
+ return -ENOSYS;
+
+ err = sf_glob_alloc(info, &sf_g);
+ if (err)
+ goto fail0;
+
+ sf_i = kmalloc(sizeof(*sf_i), GFP_KERNEL);
+ if (!sf_i) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ sf_i->handle = SHFL_HANDLE_NIL;
+ sf_i->path = kmalloc(sizeof(SHFLSTRING) + 1, GFP_KERNEL);
+ if (!sf_i->path) {
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ sf_i->path->u16Length = 1;
+ sf_i->path->u16Size = 2;
+ sf_i->path->String.utf8[0] = '/';
+ sf_i->path->String.utf8[1] = 0;
+ sf_i->force_reread = 0;
+
+ err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0);
+ if (err)
+ goto fail3;
+
+ sb->s_magic = 0xface;
+ sb->s_blocksize = 1024;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_op = &sf_super_ops;
+
+ iroot = iget_locked(sb, 0);
+ if (!iroot) {
+ err = -ENOMEM; /* XXX */
+ goto fail3;
+ }
+
+ sf_init_inode(sf_g, iroot, &fsinfo);
+ SET_INODE_INFO(iroot, sf_i);
+
+ unlock_new_inode(iroot);
+
+ droot = d_make_root(iroot);
+ if (!droot) {
+ err = -ENOMEM;
+ goto fail3;
+ }
+
+ sb->s_root = droot;
+ SET_GLOB_INFO(sb, sf_g);
+ return 0;
+
+fail3:
+ kfree(sf_i->path);
+
+fail2:
+ kfree(sf_i);
+
+fail1:
+ sf_glob_free(sf_g);
+
+fail0:
+ return err;
+}
+
+/*
+ * This is called when vfs is about to destroy the [inode]. all
+ * resources associated with this [inode] must be cleared here.
+ */
+static void sf_evict_inode(struct inode *inode)
+{
+ struct sf_inode_info *sf_i;
+
+ truncate_inode_pages(&inode->i_data, 0);
+ clear_inode(inode);
+
+ sf_i = GET_INODE_INFO(inode);
+ if (!sf_i)
+ return;
+
+ kfree(sf_i->path);
+ kfree(sf_i);
+ SET_INODE_INFO(inode, NULL);
+}
+
+/*
+ * vfs is done with [sb] (umount called) call [sf_glob_free] to unmap
+ * the folder and free [sf_g]
+ */
+static void sf_put_super(struct super_block *sb)
+{
+ struct sf_glob_info *sf_g;
+
+ sf_g = GET_GLOB_INFO(sb);
+ sf_glob_free(sf_g);
+}
+
+static int sf_statfs(struct dentry *dentry, struct kstatfs* stat)
+{
+ struct super_block *sb = dentry->d_inode->i_sb;
+
+ return sf_get_volume_info(sb, stat);
+}
+
+static int sf_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+ struct sf_glob_info *sf_g;
+ struct sf_inode_info *sf_i;
+ struct inode *iroot;
+ struct shfl_fsobjinfo fsinfo;
+ int err;
+
+ sf_g = GET_GLOB_INFO(sb);
+ if (data && data[0] != 0) {
+ struct vbsf_mount_info_new *info =
+ (struct vbsf_mount_info_new *)data;
+ if (info->signature[0] == VBSF_MOUNT_SIGNATURE_BYTE_0
+ && info->signature[1] == VBSF_MOUNT_SIGNATURE_BYTE_1
+ && info->signature[2] == VBSF_MOUNT_SIGNATURE_BYTE_2) {
+ sf_g->uid = info->uid;
+ sf_g->gid = info->gid;
+ sf_g->ttl = info->ttl;
+ sf_g->dmode = info->dmode;
+ sf_g->fmode = info->fmode;
+ sf_g->dmask = info->dmask;
+ sf_g->fmask = info->fmask;
+ }
+ }
+
+ iroot = ilookup(sb, 0);
+ if (!iroot)
+ return -ENOSYS;
+
+ sf_i = GET_INODE_INFO(iroot);
+ err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0);
+ sf_init_inode(sf_g, iroot, &fsinfo);
+
+ return 0;
+}
+
+static struct super_operations sf_super_ops = {
+ .evict_inode = sf_evict_inode,
+ .put_super = sf_put_super,
+ .statfs = sf_statfs,
+ .remount_fs = sf_remount_fs
+};
+
+static struct dentry *sf_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_nodev(fs_type, flags, data, sf_read_super);
+}
+
+static struct file_system_type vboxsf_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "vboxsf",
+ .mount = sf_mount,
+ .kill_sb = kill_anon_super
+};
+
+static int follow_symlinks = 0;
+module_param(follow_symlinks, int, 0);
+MODULE_PARM_DESC(follow_symlinks,
+ "Let host resolve symlinks rather than showing them");
+
+/* Module initialization/finalization handlers */
+static int __init init(void)
+{
+ int rc, err;
+
+ if (sizeof(struct vbsf_mount_info_new) > PAGE_SIZE) {
+ vbg_err
+ ("vboxsf: Mount information structure is too large %zd; Must be less than or equal to %ld\n",
+ sizeof(struct vbsf_mount_info_new), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ err = register_filesystem(&vboxsf_fs_type);
+ if (err)
+ return err;
+
+ rc = vboxsf_connect();
+ if (rc < 0) {
+ vbg_err("vboxsf_Connect failed, rc=%d\n", rc);
+ err = -EPROTO;
+ goto fail1;
+ }
+
+ rc = vboxsf_set_utf8();
+ if (rc < 0) {
+ vbg_err("vboxsf_setutf8 failed, rc=%d\n", rc);
+ err = -EPROTO;
+ goto fail2;
+ }
+
+ if (!follow_symlinks) {
+ rc = vboxsf_set_symlinks();
+ if (rc < 0) {
+ vbg_warn("vboxsf: Host unable to show symlinks, rc=%d\n",
+ rc);
+ }
+ }
+
+ return 0;
+
+fail2:
+ vboxsf_disconnect();
+
+fail1:
+ unregister_filesystem(&vboxsf_fs_type);
+ return err;
+}
+
+static void __exit fini(void)
+{
+ vboxsf_disconnect();
+ unregister_filesystem(&vboxsf_fs_type);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/fs/vboxsf/vfsmod.h b/fs/vboxsf/vfsmod.h
new file mode 100644
index 000000000000..ebfdaffb46d4
--- /dev/null
+++ b/fs/vboxsf/vfsmod.h
@@ -0,0 +1,111 @@
+/*
+ * VirtualBox Guest Shared Folders support: module header.
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VFSMOD_H
+#define VFSMOD_H
+
+#include <linux/backing-dev.h>
+#include <linux/version.h>
+#include "vboxsf_wrappers.h"
+
+#define DIR_BUFFER_SIZE SZ_16K
+
+/* per-shared folder information */
+struct sf_glob_info {
+ SHFLROOT root;
+ struct nls_table *nls;
+ int ttl;
+ int uid;
+ int gid;
+ int dmode;
+ int fmode;
+ int dmask;
+ int fmask;
+};
+
+/* per-inode information */
+struct sf_inode_info {
+ /* which file */
+ SHFLSTRING *path;
+ /* some information was changed, update data on next revalidate */
+ int force_restat;
+ /* directory content changed, update the whole directory on next sf_getdent */
+ int force_reread;
+ /* file structure, only valid between open() and release() */
+ struct file *file;
+ /* handle valid if a file was created with sf_create_aux until it will
+ * be opened with sf_reg_open() */
+ SHFLHANDLE handle;
+};
+
+struct sf_dir_info {
+ struct list_head info_list;
+};
+
+struct sf_dir_buf {
+ size_t cEntries;
+ size_t cbFree;
+ size_t cbUsed;
+ void *buf;
+ struct list_head head;
+};
+
+struct sf_reg_info {
+ SHFLHANDLE handle;
+};
+
+/* globals */
+extern struct inode_operations sf_dir_iops;
+extern struct inode_operations sf_lnk_iops;
+extern struct inode_operations sf_reg_iops;
+extern struct file_operations sf_dir_fops;
+extern struct file_operations sf_reg_fops;
+extern struct dentry_operations sf_dentry_ops;
+extern struct address_space_operations sf_reg_aops;
+
+void sf_init_inode(struct sf_glob_info *sf_g, struct inode *inode,
+ const struct shfl_fsobjinfo *info);
+int sf_stat(const char *caller, struct sf_glob_info *sf_g,
+ SHFLSTRING * path, struct shfl_fsobjinfo *result, int ok_to_fail);
+int sf_inode_revalidate(struct dentry *dentry);
+int sf_getattr(const struct path *path, struct kstat *kstat,
+ u32 request_mask, unsigned int query_flags);
+int sf_setattr(struct dentry *dentry, struct iattr *iattr);
+int sf_path_from_dentry(const char *caller, struct sf_glob_info *sf_g,
+ struct sf_inode_info *sf_i,
+ struct dentry *dentry, SHFLSTRING ** result);
+int sf_nlscpy(struct sf_glob_info *sf_g, char *name,
+ size_t name_bound_len, const unsigned char *utf8_name,
+ size_t utf8_len);
+void sf_dir_info_free(struct sf_dir_info *p);
+void sf_dir_info_empty(struct sf_dir_info *p);
+struct sf_dir_info *sf_dir_info_alloc(void);
+int sf_dir_read_all(struct sf_glob_info *sf_g,
+ struct sf_inode_info *sf_i, struct sf_dir_info *sf_d,
+ SHFLHANDLE handle);
+int sf_get_volume_info(struct super_block *sb, struct kstatfs *stat);
+
+/*
+ * Following casts are here to prevent assignment of void * to
+ * pointers of arbitrary type.
+ */
+#define GET_GLOB_INFO(sb) ((struct sf_glob_info *) (sb)->s_fs_info)
+#define SET_GLOB_INFO(sb, sf_g) (sb)->s_fs_info = sf_g
+
+#define GET_INODE_INFO(i) ((struct sf_inode_info *) (i)->i_private)
+#define SET_INODE_INFO(i, sf_i) (i)->i_private = sf_i
+
+#define GET_F_DENTRY(f) (f->f_path.dentry)
+
+#endif
--
2.13.3
^ permalink raw reply related [flat|nested] 18+ messages in thread