Merge tag 'for-linus-4.14b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - the new pvcalls backend for routing socket calls from a guest to dom0

 - some cleanups of Xen code

 - a fix for wrong usage of {get,put}_cpu()

* tag 'for-linus-4.14b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (27 commits)
  xen/mmu: set MMU_NORMAL_PT_UPDATE in remap_area_mfn_pte_fn
  xen: Don't try to call xen_alloc_p2m_entry() on autotranslating guests
  xen/events: events_fifo: Don't use {get,put}_cpu() in xen_evtchn_fifo_init()
  xen/pvcalls: use WARN_ON(1) instead of __WARN()
  xen: remove not used trace functions
  xen: remove unused function xen_set_domain_pte()
  xen: remove tests for pvh mode in pure pv paths
  xen-platform: constify pci_device_id.
  xen: cleanup xen.h
  xen: introduce a Kconfig option to enable the pvcalls backend
  xen/pvcalls: implement write
  xen/pvcalls: implement read
  xen/pvcalls: implement the ioworker functions
  xen/pvcalls: disconnect and module_exit
  xen/pvcalls: implement release command
  xen/pvcalls: implement poll command
  xen/pvcalls: implement accept command
  xen/pvcalls: implement listen command
  xen/pvcalls: implement bind command
  xen/pvcalls: implement connect command
  ...
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 8417ef7..07b6531 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -158,9 +158,6 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
 	unsigned long pfn;
 	int ret;
 
-	if (xen_feature(XENFEAT_auto_translated_physmap))
-		return mfn;
-
 	if (unlikely(mfn >= machine_to_phys_nr))
 		return ~0;
 
@@ -317,8 +314,6 @@ static inline pte_t __pte_ma(pteval_t x)
 #define p4d_val_ma(x)	((x).p4d)
 #endif
 
-void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
-
 xmaddr_t arbitrary_virt_to_machine(void *address);
 unsigned long arbitrary_virt_to_mfn(void *vaddr);
 void make_lowmem_page_readonly(void *vaddr);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3be06f3..3e15345 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -84,7 +84,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
 	else
 		rmd->mfn++;
 
-	rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
+	rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
 	rmd->mmu_update->val = pte_val_ma(pte);
 	rmd->mmu_update++;
 
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index e437714..6b983b3 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -162,26 +162,6 @@ static bool xen_page_pinned(void *ptr)
 	return PagePinned(page);
 }
 
-void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
-{
-	struct multicall_space mcs;
-	struct mmu_update *u;
-
-	trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
-
-	mcs = xen_mc_entry(sizeof(*u));
-	u = mcs.args;
-
-	/* ptep might be kmapped when using 32-bit HIGHPTE */
-	u->ptr = virt_to_machine(ptep).maddr;
-	u->val = pte_val_ma(pteval);
-
-	MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
-
-	xen_mc_issue(PARAVIRT_LAZY_MMU);
-}
-EXPORT_SYMBOL_GPL(xen_set_domain_pte);
-
 static void xen_extend_mmu_update(const struct mmu_update *update)
 {
 	struct multicall_space mcs;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 276da63..6083ba4 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -212,8 +212,7 @@ void __ref xen_build_mfn_list_list(void)
 	unsigned int level, topidx, mididx;
 	unsigned long *mid_mfn_p;
 
-	if (xen_feature(XENFEAT_auto_translated_physmap) ||
-	    xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
+	if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
 		return;
 
 	/* Pre-initialize p2m_top_mfn to be completely missing */
@@ -269,9 +268,6 @@ void __ref xen_build_mfn_list_list(void)
 
 void xen_setup_mfn_list_list(void)
 {
-	if (xen_feature(XENFEAT_auto_translated_physmap))
-		return;
-
 	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 
 	if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
@@ -291,9 +287,6 @@ void __init xen_build_dynamic_phys_to_machine(void)
 {
 	unsigned long pfn;
 
-	 if (xen_feature(XENFEAT_auto_translated_physmap))
-		return;
-
 	xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
 	xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
 
@@ -540,9 +533,6 @@ int xen_alloc_p2m_entry(unsigned long pfn)
 	unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
 	unsigned long p2m_pfn;
 
-	if (xen_feature(XENFEAT_auto_translated_physmap))
-		return 0;
-
 	ptep = lookup_address(addr, &level);
 	BUG_ON(!ptep || level != PG_LEVEL_4K);
 	pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
@@ -640,9 +630,6 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
 	if (unlikely(pfn_s >= xen_p2m_size))
 		return 0;
 
-	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
-		return pfn_e - pfn_s;
-
 	if (pfn_s > pfn_e)
 		return 0;
 
@@ -660,10 +647,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 	pte_t *ptep;
 	unsigned int level;
 
-	/* don't track P2M changes in autotranslate guests */
-	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
-		return true;
-
 	if (unlikely(pfn >= xen_p2m_size)) {
 		BUG_ON(mfn != INVALID_P2M_ENTRY);
 		return true;
@@ -711,9 +694,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
 	int i, ret = 0;
 	pte_t *pte;
 
-	if (xen_feature(XENFEAT_auto_translated_physmap))
-		return 0;
-
 	if (kmap_ops) {
 		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
 						kmap_ops, count);
@@ -756,9 +736,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
 {
 	int i, ret = 0;
 
-	if (xen_feature(XENFEAT_auto_translated_physmap))
-		return 0;
-
 	for (i = 0; i < count; i++) {
 		unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
 		unsigned long pfn = page_to_pfn(pages[i]);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index c810463..ac55c02 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -340,8 +340,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
 
 	WARN_ON(size == 0);
 
-	BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
-
 	mfn_save = virt_to_mfn(buf);
 
 	for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
@@ -1024,8 +1022,7 @@ void __init xen_pvmmu_arch_setup(void)
 void __init xen_arch_setup(void)
 {
 	xen_panic_handler_init();
-	if (!xen_feature(XENFEAT_auto_translated_physmap))
-		xen_pvmmu_arch_setup();
+	xen_pvmmu_arch_setup();
 
 #ifdef CONFIG_ACPI
 	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index f15bb3b7..4545561 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -196,6 +196,18 @@
 
 	  If in doubt, say m.
 
+config XEN_PVCALLS_BACKEND
+	bool "XEN PV Calls backend driver"
+	depends on INET && XEN && XEN_BACKEND
+	default n
+	help
+	  Experimental backend for the Xen PV Calls protocol
+	  (https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
+	  allows PV Calls frontends to send POSIX calls to the backend,
+	  which implements them.
+
+	  If in doubt, say n.
+
 config XEN_SCSI_BACKEND
 	tristate "XEN SCSI backend driver"
 	depends on XEN && XEN_BACKEND && TARGET_CORE
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 7f188b8..caaa15d 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -35,6 +35,7 @@
 obj-$(CONFIG_XEN_EFI)			+= efi.o
 obj-$(CONFIG_XEN_SCSI_BACKEND)		+= xen-scsiback.o
 obj-$(CONFIG_XEN_AUTO_XLATE)		+= xlate_mmu.o
+obj-$(CONFIG_XEN_PVCALLS_BACKEND)	+= pvcalls-back.o
 xen-evtchn-y				:= evtchn.o
 xen-gntdev-y				:= gntdev.o
 xen-gntalloc-y				:= gntalloc.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index ab60925..f77e499af 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -664,9 +664,11 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
 			 */
 			BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
 
-			ret = xen_alloc_p2m_entry(page_to_pfn(page));
-			if (ret < 0)
-				goto out_undo;
+			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+				ret = xen_alloc_p2m_entry(page_to_pfn(page));
+				if (ret < 0)
+					goto out_undo;
+			}
 #endif
 		} else {
 			ret = add_ballooned_pages(nr_pages - pgno);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 3c41470..76b318e 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -432,12 +432,12 @@ static int xen_evtchn_cpu_dead(unsigned int cpu)
 
 int __init xen_evtchn_fifo_init(void)
 {
-	int cpu = get_cpu();
+	int cpu = smp_processor_id();
 	int ret;
 
 	ret = evtchn_fifo_alloc_control_block(cpu);
 	if (ret < 0)
-		goto out;
+		return ret;
 
 	pr_info("Using FIFO-based ABI\n");
 
@@ -446,7 +446,6 @@ int __init xen_evtchn_fifo_init(void)
 	cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
 				  "xen/evtchn:prepare",
 				  xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
-out:
-	put_cpu();
+
 	return ret;
 }
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 1275df8..5d7dcad 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -175,7 +175,7 @@ static int platform_pci_probe(struct pci_dev *pdev,
 	return ret;
 }
 
-static struct pci_device_id platform_pci_tbl[] = {
+static const struct pci_device_id platform_pci_tbl[] = {
 	{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{0,}
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
new file mode 100644
index 0000000..b209cd4
--- /dev/null
+++ b/drivers/xen/pvcalls-back.c
@@ -0,0 +1,1240 @@
+/*
+ * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/inet.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/module.h>
+#include <linux/semaphore.h>
+#include <linux/wait.h>
+#include <net/sock.h>
+#include <net/inet_common.h>
+#include <net/inet_connection_sock.h>
+#include <net/request_sock.h>
+
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/interface/io/pvcalls.h>
+
+#define PVCALLS_VERSIONS "1"
+#define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
+
+struct pvcalls_back_global {
+	struct list_head frontends;
+	struct semaphore frontends_lock;
+} pvcalls_back_global;
+
+/*
+ * Per-frontend data structure. It contains pointers to the command
+ * ring, its event channel, a list of active sockets and a tree of
+ * passive sockets.
+ */
+struct pvcalls_fedata {
+	struct list_head list;
+	struct xenbus_device *dev;
+	struct xen_pvcalls_sring *sring;
+	struct xen_pvcalls_back_ring ring;
+	int irq;
+	struct list_head socket_mappings;
+	struct radix_tree_root socketpass_mappings;
+	struct semaphore socket_lock;
+};
+
+struct pvcalls_ioworker {
+	struct work_struct register_work;
+	struct workqueue_struct *wq;
+};
+
+struct sock_mapping {
+	struct list_head list;
+	struct pvcalls_fedata *fedata;
+	struct sockpass_mapping *sockpass;
+	struct socket *sock;
+	uint64_t id;
+	grant_ref_t ref;
+	struct pvcalls_data_intf *ring;
+	void *bytes;
+	struct pvcalls_data data;
+	uint32_t ring_order;
+	int irq;
+	atomic_t read;
+	atomic_t write;
+	atomic_t io;
+	atomic_t release;
+	void (*saved_data_ready)(struct sock *sk);
+	struct pvcalls_ioworker ioworker;
+};
+
+struct sockpass_mapping {
+	struct list_head list;
+	struct pvcalls_fedata *fedata;
+	struct socket *sock;
+	uint64_t id;
+	struct xen_pvcalls_request reqcopy;
+	spinlock_t copy_lock;
+	struct workqueue_struct *wq;
+	struct work_struct register_work;
+	void (*saved_data_ready)(struct sock *sk);
+};
+
+static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
+static int pvcalls_back_release_active(struct xenbus_device *dev,
+				       struct pvcalls_fedata *fedata,
+				       struct sock_mapping *map);
+
+static void pvcalls_conn_back_read(void *opaque)
+{
+	struct sock_mapping *map = (struct sock_mapping *)opaque;
+	struct msghdr msg;
+	struct kvec vec[2];
+	RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons;
+	int32_t error;
+	struct pvcalls_data_intf *intf = map->ring;
+	struct pvcalls_data *data = &map->data;
+	unsigned long flags;
+	int ret;
+
+	array_size = XEN_FLEX_RING_SIZE(map->ring_order);
+	cons = intf->in_cons;
+	prod = intf->in_prod;
+	error = intf->in_error;
+	/* read the indexes first, then deal with the data */
+	virt_mb();
+
+	if (error)
+		return;
+
+	size = pvcalls_queued(prod, cons, array_size);
+	if (size >= array_size)
+		return;
+	spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
+	if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
+		atomic_set(&map->read, 0);
+		spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
+				flags);
+		return;
+	}
+	spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
+	wanted = array_size - size;
+	masked_prod = pvcalls_mask(prod, array_size);
+	masked_cons = pvcalls_mask(cons, array_size);
+
+	memset(&msg, 0, sizeof(msg));
+	msg.msg_iter.type = ITER_KVEC|WRITE;
+	msg.msg_iter.count = wanted;
+	if (masked_prod < masked_cons) {
+		vec[0].iov_base = data->in + masked_prod;
+		vec[0].iov_len = wanted;
+		msg.msg_iter.kvec = vec;
+		msg.msg_iter.nr_segs = 1;
+	} else {
+		vec[0].iov_base = data->in + masked_prod;
+		vec[0].iov_len = array_size - masked_prod;
+		vec[1].iov_base = data->in;
+		vec[1].iov_len = wanted - vec[0].iov_len;
+		msg.msg_iter.kvec = vec;
+		msg.msg_iter.nr_segs = 2;
+	}
+
+	atomic_set(&map->read, 0);
+	ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
+	WARN_ON(ret > wanted);
+	if (ret == -EAGAIN) /* shouldn't happen */
+		return;
+	if (!ret)
+		ret = -ENOTCONN;
+	spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
+	if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
+		atomic_inc(&map->read);
+	spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
+
+	/* write the data, then modify the indexes */
+	virt_wmb();
+	if (ret < 0)
+		intf->in_error = ret;
+	else
+		intf->in_prod = prod + ret;
+	/* update the indexes, then notify the other end */
+	virt_wmb();
+	notify_remote_via_irq(map->irq);
+
+	return;
+}
+
+static void pvcalls_conn_back_write(struct sock_mapping *map)
+{
+	struct pvcalls_data_intf *intf = map->ring;
+	struct pvcalls_data *data = &map->data;
+	struct msghdr msg;
+	struct kvec vec[2];
+	RING_IDX cons, prod, size, array_size;
+	int ret;
+
+	cons = intf->out_cons;
+	prod = intf->out_prod;
+	/* read the indexes before dealing with the data */
+	virt_mb();
+
+	array_size = XEN_FLEX_RING_SIZE(map->ring_order);
+	size = pvcalls_queued(prod, cons, array_size);
+	if (size == 0)
+		return;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.msg_flags |= MSG_DONTWAIT;
+	msg.msg_iter.type = ITER_KVEC|READ;
+	msg.msg_iter.count = size;
+	if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
+		vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
+		vec[0].iov_len = size;
+		msg.msg_iter.kvec = vec;
+		msg.msg_iter.nr_segs = 1;
+	} else {
+		vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
+		vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
+		vec[1].iov_base = data->out;
+		vec[1].iov_len = size - vec[0].iov_len;
+		msg.msg_iter.kvec = vec;
+		msg.msg_iter.nr_segs = 2;
+	}
+
+	atomic_set(&map->write, 0);
+	ret = inet_sendmsg(map->sock, &msg, size);
+	if (ret == -EAGAIN || (ret >= 0 && ret < size)) {
+		atomic_inc(&map->write);
+		atomic_inc(&map->io);
+	}
+	if (ret == -EAGAIN)
+		return;
+
+	/* write the data, then update the indexes */
+	virt_wmb();
+	if (ret < 0) {
+		intf->out_error = ret;
+	} else {
+		intf->out_error = 0;
+		intf->out_cons = cons + ret;
+		prod = intf->out_prod;
+	}
+	/* update the indexes, then notify the other end */
+	virt_wmb();
+	if (prod != cons + ret)
+		atomic_inc(&map->write);
+	notify_remote_via_irq(map->irq);
+}
+
+static void pvcalls_back_ioworker(struct work_struct *work)
+{
+	struct pvcalls_ioworker *ioworker = container_of(work,
+		struct pvcalls_ioworker, register_work);
+	struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
+		ioworker);
+
+	while (atomic_read(&map->io) > 0) {
+		if (atomic_read(&map->release) > 0) {
+			atomic_set(&map->release, 0);
+			return;
+		}
+
+		if (atomic_read(&map->read) > 0)
+			pvcalls_conn_back_read(map);
+		if (atomic_read(&map->write) > 0)
+			pvcalls_conn_back_write(map);
+
+		atomic_dec(&map->io);
+	}
+}
+
+static int pvcalls_back_socket(struct xenbus_device *dev,
+		struct xen_pvcalls_request *req)
+{
+	struct pvcalls_fedata *fedata;
+	int ret;
+	struct xen_pvcalls_response *rsp;
+
+	fedata = dev_get_drvdata(&dev->dev);
+
+	if (req->u.socket.domain != AF_INET ||
+	    req->u.socket.type != SOCK_STREAM ||
+	    (req->u.socket.protocol != IPPROTO_IP &&
+	     req->u.socket.protocol != AF_INET))
+		ret = -EAFNOSUPPORT;
+	else
+		ret = 0;
+
+	/* leave the actual socket allocation for later */
+
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->cmd = req->cmd;
+	rsp->u.socket.id = req->u.socket.id;
+	rsp->ret = ret;
+
+	return 0;
+}
+
+static void pvcalls_sk_state_change(struct sock *sock)
+{
+	struct sock_mapping *map = sock->sk_user_data;
+	struct pvcalls_data_intf *intf;
+
+	if (map == NULL)
+		return;
+
+	intf = map->ring;
+	intf->in_error = -ENOTCONN;
+	notify_remote_via_irq(map->irq);
+}
+
+static void pvcalls_sk_data_ready(struct sock *sock)
+{
+	struct sock_mapping *map = sock->sk_user_data;
+	struct pvcalls_ioworker *iow;
+
+	if (map == NULL)
+		return;
+
+	iow = &map->ioworker;
+	atomic_inc(&map->read);
+	atomic_inc(&map->io);
+	queue_work(iow->wq, &iow->register_work);
+}
+
+static struct sock_mapping *pvcalls_new_active_socket(
+		struct pvcalls_fedata *fedata,
+		uint64_t id,
+		grant_ref_t ref,
+		uint32_t evtchn,
+		struct socket *sock)
+{
+	int ret;
+	struct sock_mapping *map;
+	void *page;
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (map == NULL)
+		return NULL;
+
+	map->fedata = fedata;
+	map->sock = sock;
+	map->id = id;
+	map->ref = ref;
+
+	ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
+	if (ret < 0)
+		goto out;
+	map->ring = page;
+	map->ring_order = map->ring->ring_order;
+	/* first read the order, then map the data ring */
+	virt_rmb();
+	if (map->ring_order > MAX_RING_ORDER) {
+		pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n",
+				__func__, map->ring_order, MAX_RING_ORDER);
+		goto out;
+	}
+	ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
+				     (1 << map->ring_order), &page);
+	if (ret < 0)
+		goto out;
+	map->bytes = page;
+
+	ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id,
+						    evtchn,
+						    pvcalls_back_conn_event,
+						    0,
+						    "pvcalls-backend",
+						    map);
+	if (ret < 0)
+		goto out;
+	map->irq = ret;
+
+	map->data.in = map->bytes;
+	map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
+
+	map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
+	if (!map->ioworker.wq)
+		goto out;
+	atomic_set(&map->io, 1);
+	INIT_WORK(&map->ioworker.register_work,	pvcalls_back_ioworker);
+
+	down(&fedata->socket_lock);
+	list_add_tail(&map->list, &fedata->socket_mappings);
+	up(&fedata->socket_lock);
+
+	write_lock_bh(&map->sock->sk->sk_callback_lock);
+	map->saved_data_ready = map->sock->sk->sk_data_ready;
+	map->sock->sk->sk_user_data = map;
+	map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
+	map->sock->sk->sk_state_change = pvcalls_sk_state_change;
+	write_unlock_bh(&map->sock->sk->sk_callback_lock);
+
+	return map;
+out:
+	down(&fedata->socket_lock);
+	list_del(&map->list);
+	pvcalls_back_release_active(fedata->dev, fedata, map);
+	up(&fedata->socket_lock);
+	return NULL;
+}
+
+static int pvcalls_back_connect(struct xenbus_device *dev,
+				struct xen_pvcalls_request *req)
+{
+	struct pvcalls_fedata *fedata;
+	int ret = -EINVAL;
+	struct socket *sock;
+	struct sock_mapping *map;
+	struct xen_pvcalls_response *rsp;
+	struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
+
+	fedata = dev_get_drvdata(&dev->dev);
+
+	if (req->u.connect.len < sizeof(sa->sa_family) ||
+	    req->u.connect.len > sizeof(req->u.connect.addr) ||
+	    sa->sa_family != AF_INET)
+		goto out;
+
+	ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
+	if (ret < 0)
+		goto out;
+	ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
+	if (ret < 0) {
+		sock_release(sock);
+		goto out;
+	}
+
+	map = pvcalls_new_active_socket(fedata,
+					req->u.connect.id,
+					req->u.connect.ref,
+					req->u.connect.evtchn,
+					sock);
+	if (!map) {
+		ret = -EFAULT;
+		sock_release(map->sock);
+	}
+
+out:
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->cmd = req->cmd;
+	rsp->u.connect.id = req->u.connect.id;
+	rsp->ret = ret;
+
+	return 0;
+}
+
+static int pvcalls_back_release_active(struct xenbus_device *dev,
+				       struct pvcalls_fedata *fedata,
+				       struct sock_mapping *map)
+{
+	disable_irq(map->irq);
+	if (map->sock->sk != NULL) {
+		write_lock_bh(&map->sock->sk->sk_callback_lock);
+		map->sock->sk->sk_user_data = NULL;
+		map->sock->sk->sk_data_ready = map->saved_data_ready;
+		write_unlock_bh(&map->sock->sk->sk_callback_lock);
+	}
+
+	atomic_set(&map->release, 1);
+	flush_work(&map->ioworker.register_work);
+
+	xenbus_unmap_ring_vfree(dev, map->bytes);
+	xenbus_unmap_ring_vfree(dev, (void *)map->ring);
+	unbind_from_irqhandler(map->irq, map);
+
+	sock_release(map->sock);
+	kfree(map);
+
+	return 0;
+}
+
+static int pvcalls_back_release_passive(struct xenbus_device *dev,
+					struct pvcalls_fedata *fedata,
+					struct sockpass_mapping *mappass)
+{
+	if (mappass->sock->sk != NULL) {
+		write_lock_bh(&mappass->sock->sk->sk_callback_lock);
+		mappass->sock->sk->sk_user_data = NULL;
+		mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
+		write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
+	}
+	sock_release(mappass->sock);
+	flush_workqueue(mappass->wq);
+	destroy_workqueue(mappass->wq);
+	kfree(mappass);
+
+	return 0;
+}
+
+static int pvcalls_back_release(struct xenbus_device *dev,
+				struct xen_pvcalls_request *req)
+{
+	struct pvcalls_fedata *fedata;
+	struct sock_mapping *map, *n;
+	struct sockpass_mapping *mappass;
+	int ret = 0;
+	struct xen_pvcalls_response *rsp;
+
+	fedata = dev_get_drvdata(&dev->dev);
+
+	down(&fedata->socket_lock);
+	list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
+		if (map->id == req->u.release.id) {
+			list_del(&map->list);
+			up(&fedata->socket_lock);
+			ret = pvcalls_back_release_active(dev, fedata, map);
+			goto out;
+		}
+	}
+	mappass = radix_tree_lookup(&fedata->socketpass_mappings,
+				    req->u.release.id);
+	if (mappass != NULL) {
+		radix_tree_delete(&fedata->socketpass_mappings, mappass->id);
+		up(&fedata->socket_lock);
+		ret = pvcalls_back_release_passive(dev, fedata, mappass);
+	} else
+		up(&fedata->socket_lock);
+
+out:
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->u.release.id = req->u.release.id;
+	rsp->cmd = req->cmd;
+	rsp->ret = ret;
+	return 0;
+}
+
+static void __pvcalls_back_accept(struct work_struct *work)
+{
+	struct sockpass_mapping *mappass = container_of(
+		work, struct sockpass_mapping, register_work);
+	struct sock_mapping *map;
+	struct pvcalls_ioworker *iow;
+	struct pvcalls_fedata *fedata;
+	struct socket *sock;
+	struct xen_pvcalls_response *rsp;
+	struct xen_pvcalls_request *req;
+	int notify;
+	int ret = -EINVAL;
+	unsigned long flags;
+
+	fedata = mappass->fedata;
+	/*
+	 * __pvcalls_back_accept can race against pvcalls_back_accept.
+	 * We only need to check the value of "cmd" on read. It could be
+	 * done atomically, but to simplify the code on the write side, we
+	 * use a spinlock.
+	 */
+	spin_lock_irqsave(&mappass->copy_lock, flags);
+	req = &mappass->reqcopy;
+	if (req->cmd != PVCALLS_ACCEPT) {
+		spin_unlock_irqrestore(&mappass->copy_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+	sock = sock_alloc();
+	if (sock == NULL)
+		goto out_error;
+	sock->type = mappass->sock->type;
+	sock->ops = mappass->sock->ops;
+
+	ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
+	if (ret == -EAGAIN) {
+		sock_release(sock);
+		goto out_error;
+	}
+
+	map = pvcalls_new_active_socket(fedata,
+					req->u.accept.id_new,
+					req->u.accept.ref,
+					req->u.accept.evtchn,
+					sock);
+	if (!map) {
+		ret = -EFAULT;
+		sock_release(sock);
+		goto out_error;
+	}
+
+	map->sockpass = mappass;
+	iow = &map->ioworker;
+	atomic_inc(&map->read);
+	atomic_inc(&map->io);
+	queue_work(iow->wq, &iow->register_work);
+
+out_error:
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->cmd = req->cmd;
+	rsp->u.accept.id = req->u.accept.id;
+	rsp->ret = ret;
+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
+	if (notify)
+		notify_remote_via_irq(fedata->irq);
+
+	mappass->reqcopy.cmd = 0;
+}
+
+static void pvcalls_pass_sk_data_ready(struct sock *sock)
+{
+	struct sockpass_mapping *mappass = sock->sk_user_data;
+	struct pvcalls_fedata *fedata;
+	struct xen_pvcalls_response *rsp;
+	unsigned long flags;
+	int notify;
+
+	if (mappass == NULL)
+		return;
+
+	fedata = mappass->fedata;
+	spin_lock_irqsave(&mappass->copy_lock, flags);
+	if (mappass->reqcopy.cmd == PVCALLS_POLL) {
+		rsp = RING_GET_RESPONSE(&fedata->ring,
+					fedata->ring.rsp_prod_pvt++);
+		rsp->req_id = mappass->reqcopy.req_id;
+		rsp->u.poll.id = mappass->reqcopy.u.poll.id;
+		rsp->cmd = mappass->reqcopy.cmd;
+		rsp->ret = 0;
+
+		mappass->reqcopy.cmd = 0;
+		spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
+		if (notify)
+			notify_remote_via_irq(mappass->fedata->irq);
+	} else {
+		spin_unlock_irqrestore(&mappass->copy_lock, flags);
+		queue_work(mappass->wq, &mappass->register_work);
+	}
+}
+
+static int pvcalls_back_bind(struct xenbus_device *dev,
+			     struct xen_pvcalls_request *req)
+{
+	struct pvcalls_fedata *fedata;
+	int ret;
+	struct sockpass_mapping *map;
+	struct xen_pvcalls_response *rsp;
+
+	fedata = dev_get_drvdata(&dev->dev);
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (map == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	INIT_WORK(&map->register_work, __pvcalls_back_accept);
+	spin_lock_init(&map->copy_lock);
+	map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
+	if (!map->wq) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
+	if (ret < 0)
+		goto out;
+
+	ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
+			req->u.bind.len);
+	if (ret < 0)
+		goto out;
+
+	map->fedata = fedata;
+	map->id = req->u.bind.id;
+
+	down(&fedata->socket_lock);
+	ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
+				map);
+	up(&fedata->socket_lock);
+	if (ret)
+		goto out;
+
+	write_lock_bh(&map->sock->sk->sk_callback_lock);
+	map->saved_data_ready = map->sock->sk->sk_data_ready;
+	map->sock->sk->sk_user_data = map;
+	map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
+	write_unlock_bh(&map->sock->sk->sk_callback_lock);
+
+out:
+	if (ret) {
+		if (map && map->sock)
+			sock_release(map->sock);
+		if (map && map->wq)
+			destroy_workqueue(map->wq);
+		kfree(map);
+	}
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->cmd = req->cmd;
+	rsp->u.bind.id = req->u.bind.id;
+	rsp->ret = ret;
+	return 0;
+}
+
+static int pvcalls_back_listen(struct xenbus_device *dev,
+			       struct xen_pvcalls_request *req)
+{
+	struct pvcalls_fedata *fedata;
+	int ret = -EINVAL;
+	struct sockpass_mapping *map;
+	struct xen_pvcalls_response *rsp;
+
+	fedata = dev_get_drvdata(&dev->dev);
+
+	down(&fedata->socket_lock);
+	map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
+	up(&fedata->socket_lock);
+	if (map == NULL)
+		goto out;
+
+	ret = inet_listen(map->sock, req->u.listen.backlog);
+
+out:
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->cmd = req->cmd;
+	rsp->u.listen.id = req->u.listen.id;
+	rsp->ret = ret;
+	return 0;
+}
+
+static int pvcalls_back_accept(struct xenbus_device *dev,
+			       struct xen_pvcalls_request *req)
+{
+	struct pvcalls_fedata *fedata;
+	struct sockpass_mapping *mappass;
+	int ret = -EINVAL;
+	struct xen_pvcalls_response *rsp;
+	unsigned long flags;
+
+	fedata = dev_get_drvdata(&dev->dev);
+
+	down(&fedata->socket_lock);
+	mappass = radix_tree_lookup(&fedata->socketpass_mappings,
+		req->u.accept.id);
+	up(&fedata->socket_lock);
+	if (mappass == NULL)
+		goto out_error;
+
+	/*
+	 * Limitation of the current implementation: only support one
+	 * concurrent accept or poll call on one socket.
+	 */
+	spin_lock_irqsave(&mappass->copy_lock, flags);
+	if (mappass->reqcopy.cmd != 0) {
+		spin_unlock_irqrestore(&mappass->copy_lock, flags);
+		ret = -EINTR;
+		goto out_error;
+	}
+
+	mappass->reqcopy = *req;
+	spin_unlock_irqrestore(&mappass->copy_lock, flags);
+	queue_work(mappass->wq, &mappass->register_work);
+
+	/* Tell the caller we don't need to send back a notification yet */
+	return -1;
+
+out_error:
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->cmd = req->cmd;
+	rsp->u.accept.id = req->u.accept.id;
+	rsp->ret = ret;
+	return 0;
+}
+
+static int pvcalls_back_poll(struct xenbus_device *dev,
+			     struct xen_pvcalls_request *req)
+{
+	struct pvcalls_fedata *fedata;
+	struct sockpass_mapping *mappass;
+	struct xen_pvcalls_response *rsp;
+	struct inet_connection_sock *icsk;
+	struct request_sock_queue *queue;
+	unsigned long flags;
+	int ret;
+	bool data;
+
+	fedata = dev_get_drvdata(&dev->dev);
+
+	down(&fedata->socket_lock);
+	mappass = radix_tree_lookup(&fedata->socketpass_mappings,
+				    req->u.poll.id);
+	up(&fedata->socket_lock);
+	if (mappass == NULL)
+		return -EINVAL;
+
+	/*
+	 * Limitation of the current implementation: only support one
+	 * concurrent accept or poll call on one socket.
+	 */
+	spin_lock_irqsave(&mappass->copy_lock, flags);
+	if (mappass->reqcopy.cmd != 0) {
+		ret = -EINTR;
+		goto out;
+	}
+
+	mappass->reqcopy = *req;
+	icsk = inet_csk(mappass->sock->sk);
+	queue = &icsk->icsk_accept_queue;
+	data = queue->rskq_accept_head != NULL;
+	if (data) {
+		mappass->reqcopy.cmd = 0;
+		ret = 0;
+		goto out;
+	}
+	spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+	/* Tell the caller we don't need to send back a notification yet */
+	return -1;
+
+out:
+	spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->cmd = req->cmd;
+	rsp->u.poll.id = req->u.poll.id;
+	rsp->ret = ret;
+	return 0;
+}
+
+static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
+				   struct xen_pvcalls_request *req)
+{
+	int ret = 0;
+
+	switch (req->cmd) {
+	case PVCALLS_SOCKET:
+		ret = pvcalls_back_socket(dev, req);
+		break;
+	case PVCALLS_CONNECT:
+		ret = pvcalls_back_connect(dev, req);
+		break;
+	case PVCALLS_RELEASE:
+		ret = pvcalls_back_release(dev, req);
+		break;
+	case PVCALLS_BIND:
+		ret = pvcalls_back_bind(dev, req);
+		break;
+	case PVCALLS_LISTEN:
+		ret = pvcalls_back_listen(dev, req);
+		break;
+	case PVCALLS_ACCEPT:
+		ret = pvcalls_back_accept(dev, req);
+		break;
+	case PVCALLS_POLL:
+		ret = pvcalls_back_poll(dev, req);
+		break;
+	default:
+	{
+		struct pvcalls_fedata *fedata;
+		struct xen_pvcalls_response *rsp;
+
+		fedata = dev_get_drvdata(&dev->dev);
+		rsp = RING_GET_RESPONSE(
+				&fedata->ring, fedata->ring.rsp_prod_pvt++);
+		rsp->req_id = req->req_id;
+		rsp->cmd = req->cmd;
+		rsp->ret = -ENOTSUPP;
+		break;
+	}
+	}
+	return ret;
+}
+
+static void pvcalls_back_work(struct pvcalls_fedata *fedata)
+{
+	int notify, notify_all = 0, more = 1;
+	struct xen_pvcalls_request req;
+	struct xenbus_device *dev = fedata->dev;
+
+	while (more) {
+		while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
+			RING_COPY_REQUEST(&fedata->ring,
+					  fedata->ring.req_cons++,
+					  &req);
+
+			if (!pvcalls_back_handle_cmd(dev, &req)) {
+				RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
+					&fedata->ring, notify);
+				notify_all += notify;
+			}
+		}
+
+		if (notify_all) {
+			notify_remote_via_irq(fedata->irq);
+			notify_all = 0;
+		}
+
+		RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
+	}
+}
+
+static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
+{
+	struct xenbus_device *dev = dev_id;
+	struct pvcalls_fedata *fedata = NULL;
+
+	if (dev == NULL)
+		return IRQ_HANDLED;
+
+	fedata = dev_get_drvdata(&dev->dev);
+	if (fedata == NULL)
+		return IRQ_HANDLED;
+
+	pvcalls_back_work(fedata);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
+{
+	struct sock_mapping *map = sock_map;
+	struct pvcalls_ioworker *iow;
+
+	if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
+		map->sock->sk->sk_user_data != map)
+		return IRQ_HANDLED;
+
+	iow = &map->ioworker;
+
+	atomic_inc(&map->write);
+	atomic_inc(&map->io);
+	queue_work(iow->wq, &iow->register_work);
+
+	return IRQ_HANDLED;
+}
+
+static int backend_connect(struct xenbus_device *dev)
+{
+	int err, evtchn;
+	grant_ref_t ring_ref;
+	struct pvcalls_fedata *fedata = NULL;
+
+	fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
+	if (!fedata)
+		return -ENOMEM;
+
+	fedata->irq = -1;
+	err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
+			   &evtchn);
+	if (err != 1) {
+		err = -EINVAL;
+		xenbus_dev_fatal(dev, err, "reading %s/event-channel",
+				 dev->otherend);
+		goto error;
+	}
+
+	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
+	if (err != 1) {
+		err = -EINVAL;
+		xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
+				 dev->otherend);
+		goto error;
+	}
+
+	err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn);
+	if (err < 0)
+		goto error;
+	fedata->irq = err;
+
+	err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event,
+				   IRQF_ONESHOT, "pvcalls-back", dev);
+	if (err < 0)
+		goto error;
+
+	err = xenbus_map_ring_valloc(dev, &ring_ref, 1,
+				     (void **)&fedata->sring);
+	if (err < 0)
+		goto error;
+
+	BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
+	fedata->dev = dev;
+
+	INIT_LIST_HEAD(&fedata->socket_mappings);
+	INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL);
+	sema_init(&fedata->socket_lock, 1);
+	dev_set_drvdata(&dev->dev, fedata);
+
+	down(&pvcalls_back_global.frontends_lock);
+	list_add_tail(&fedata->list, &pvcalls_back_global.frontends);
+	up(&pvcalls_back_global.frontends_lock);
+
+	return 0;
+
+ error:
+	if (fedata->irq >= 0)
+		unbind_from_irqhandler(fedata->irq, dev);
+	if (fedata->sring != NULL)
+		xenbus_unmap_ring_vfree(dev, fedata->sring);
+	kfree(fedata);
+	return err;
+}
+
+static int backend_disconnect(struct xenbus_device *dev)
+{
+	struct pvcalls_fedata *fedata;
+	struct sock_mapping *map, *n;
+	struct sockpass_mapping *mappass;
+	struct radix_tree_iter iter;
+	void **slot;
+
+
+	fedata = dev_get_drvdata(&dev->dev);
+
+	down(&fedata->socket_lock);
+	list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
+		list_del(&map->list);
+		pvcalls_back_release_active(dev, fedata, map);
+	}
+
+	radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
+		mappass = radix_tree_deref_slot(slot);
+		if (!mappass)
+			continue;
+		if (radix_tree_exception(mappass)) {
+			if (radix_tree_deref_retry(mappass))
+				slot = radix_tree_iter_retry(&iter);
+		} else {
+			radix_tree_delete(&fedata->socketpass_mappings,
+					  mappass->id);
+			pvcalls_back_release_passive(dev, fedata, mappass);
+		}
+	}
+	up(&fedata->socket_lock);
+
+	unbind_from_irqhandler(fedata->irq, dev);
+	xenbus_unmap_ring_vfree(dev, fedata->sring);
+
+	list_del(&fedata->list);
+	kfree(fedata);
+	dev_set_drvdata(&dev->dev, NULL);
+
+	return 0;
+}
+
+static int pvcalls_back_probe(struct xenbus_device *dev,
+			      const struct xenbus_device_id *id)
+{
+	int err, abort;
+	struct xenbus_transaction xbt;
+
+again:
+	abort = 1;
+
+	err = xenbus_transaction_start(&xbt);
+	if (err) {
+		pr_warn("%s cannot create xenstore transaction\n", __func__);
+		return err;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "versions", "%s",
+			    PVCALLS_VERSIONS);
+	if (err) {
+		pr_warn("%s write out 'versions' failed\n", __func__);
+		goto abort;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
+			    MAX_RING_ORDER);
+	if (err) {
+		pr_warn("%s write out 'max-page-order' failed\n", __func__);
+		goto abort;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "function-calls",
+			    XENBUS_FUNCTIONS_CALLS);
+	if (err) {
+		pr_warn("%s write out 'function-calls' failed\n", __func__);
+		goto abort;
+	}
+
+	abort = 0;
+abort:
+	err = xenbus_transaction_end(xbt, abort);
+	if (err) {
+		if (err == -EAGAIN && !abort)
+			goto again;
+		pr_warn("%s cannot complete xenstore transaction\n", __func__);
+		return err;
+	}
+
+	if (abort)
+		return -EFAULT;
+
+	xenbus_switch_state(dev, XenbusStateInitWait);
+
+	return 0;
+}
+
+static void set_backend_state(struct xenbus_device *dev,
+			      enum xenbus_state state)
+{
+	while (dev->state != state) {
+		switch (dev->state) {
+		case XenbusStateClosed:
+			switch (state) {
+			case XenbusStateInitWait:
+			case XenbusStateConnected:
+				xenbus_switch_state(dev, XenbusStateInitWait);
+				break;
+			case XenbusStateClosing:
+				xenbus_switch_state(dev, XenbusStateClosing);
+				break;
+			default:
+				WARN_ON(1);
+			}
+			break;
+		case XenbusStateInitWait:
+		case XenbusStateInitialised:
+			switch (state) {
+			case XenbusStateConnected:
+				backend_connect(dev);
+				xenbus_switch_state(dev, XenbusStateConnected);
+				break;
+			case XenbusStateClosing:
+			case XenbusStateClosed:
+				xenbus_switch_state(dev, XenbusStateClosing);
+				break;
+			default:
+				WARN_ON(1);
+			}
+			break;
+		case XenbusStateConnected:
+			switch (state) {
+			case XenbusStateInitWait:
+			case XenbusStateClosing:
+			case XenbusStateClosed:
+				down(&pvcalls_back_global.frontends_lock);
+				backend_disconnect(dev);
+				up(&pvcalls_back_global.frontends_lock);
+				xenbus_switch_state(dev, XenbusStateClosing);
+				break;
+			default:
+				WARN_ON(1);
+			}
+			break;
+		case XenbusStateClosing:
+			switch (state) {
+			case XenbusStateInitWait:
+			case XenbusStateConnected:
+			case XenbusStateClosed:
+				xenbus_switch_state(dev, XenbusStateClosed);
+				break;
+			default:
+				WARN_ON(1);
+			}
+			break;
+		default:
+			WARN_ON(1);
+		}
+	}
+}
+
+static void pvcalls_back_changed(struct xenbus_device *dev,
+				 enum xenbus_state frontend_state)
+{
+	switch (frontend_state) {
+	case XenbusStateInitialising:
+		set_backend_state(dev, XenbusStateInitWait);
+		break;
+
+	case XenbusStateInitialised:
+	case XenbusStateConnected:
+		set_backend_state(dev, XenbusStateConnected);
+		break;
+
+	case XenbusStateClosing:
+		set_backend_state(dev, XenbusStateClosing);
+		break;
+
+	case XenbusStateClosed:
+		set_backend_state(dev, XenbusStateClosed);
+		if (xenbus_dev_is_online(dev))
+			break;
+		device_unregister(&dev->dev);
+		break;
+	case XenbusStateUnknown:
+		set_backend_state(dev, XenbusStateClosed);
+		device_unregister(&dev->dev);
+		break;
+
+	default:
+		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
+				 frontend_state);
+		break;
+	}
+}
+
+static int pvcalls_back_remove(struct xenbus_device *dev)
+{
+	return 0;
+}
+
+static int pvcalls_back_uevent(struct xenbus_device *xdev,
+			       struct kobj_uevent_env *env)
+{
+	return 0;
+}
+
+static const struct xenbus_device_id pvcalls_back_ids[] = {
+	{ "pvcalls" },
+	{ "" }
+};
+
+static struct xenbus_driver pvcalls_back_driver = {
+	.ids = pvcalls_back_ids,
+	.probe = pvcalls_back_probe,
+	.remove = pvcalls_back_remove,
+	.uevent = pvcalls_back_uevent,
+	.otherend_changed = pvcalls_back_changed,
+};
+
+static int __init pvcalls_back_init(void)
+{
+	int ret;
+
+	if (!xen_domain())
+		return -ENODEV;
+
+	ret = xenbus_register_backend(&pvcalls_back_driver);
+	if (ret < 0)
+		return ret;
+
+	sema_init(&pvcalls_back_global.frontends_lock, 1);
+	INIT_LIST_HEAD(&pvcalls_back_global.frontends);
+	return 0;
+}
+module_init(pvcalls_back_init);
+
+static void __exit pvcalls_back_fin(void)
+{
+	struct pvcalls_fedata *fedata, *nfedata;
+
+	down(&pvcalls_back_global.frontends_lock);
+	list_for_each_entry_safe(fedata, nfedata,
+				 &pvcalls_back_global.frontends, list) {
+		backend_disconnect(fedata->dev);
+	}
+	up(&pvcalls_back_global.frontends_lock);
+
+	xenbus_unregister_driver(&pvcalls_back_driver);
+}
+
+module_exit(pvcalls_back_fin);
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index b70a38b..1b4fed7 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -149,24 +149,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
 DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
 DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
 
-TRACE_EVENT(xen_mmu_set_domain_pte,
-	    TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid),
-	    TP_ARGS(ptep, pteval, domid),
-	    TP_STRUCT__entry(
-		    __field(pte_t *, ptep)
-		    __field(pteval_t, pteval)
-		    __field(unsigned, domid)
-		    ),
-	    TP_fast_assign(__entry->ptep = ptep;
-			   __entry->pteval = pteval.pte;
-			   __entry->domid = domid),
-	    TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u",
-		      __entry->ptep,
-		      (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
-		      (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval,
-		      __entry->domid)
-	);
-
 TRACE_EVENT(xen_mmu_set_pte_at,
 	    TP_PROTO(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, pte_t pteval),
@@ -266,16 +248,6 @@ TRACE_EVENT(xen_mmu_set_p4d,
 		      (int)sizeof(p4dval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->p4dval)),
 		      (int)sizeof(p4dval_t) * 2, (unsigned long long)__entry->p4dval)
 	);
-
-TRACE_EVENT(xen_mmu_pud_clear,
-	    TP_PROTO(pud_t *pudp),
-	    TP_ARGS(pudp),
-	    TP_STRUCT__entry(
-		    __field(pud_t *, pudp)
-		    ),
-	    TP_fast_assign(__entry->pudp = pudp),
-	    TP_printk("pudp %p", __entry->pudp)
-	);
 #else
 
 TRACE_EVENT(xen_mmu_set_pud,
@@ -295,16 +267,6 @@ TRACE_EVENT(xen_mmu_set_pud,
 
 #endif
 
-TRACE_EVENT(xen_mmu_pgd_clear,
-	    TP_PROTO(pgd_t *pgdp),
-	    TP_ARGS(pgdp),
-	    TP_STRUCT__entry(
-		    __field(pgd_t *, pgdp)
-		    ),
-	    TP_fast_assign(__entry->pgdp = pgdp),
-	    TP_printk("pgdp %p", __entry->pgdp)
-	);
-
 DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot,
 	    TP_PROTO(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, pte_t pteval),
diff --git a/include/xen/interface/io/pvcalls.h b/include/xen/interface/io/pvcalls.h
new file mode 100644
index 0000000..ccf97b8
--- /dev/null
+++ b/include/xen/interface/io/pvcalls.h
@@ -0,0 +1,121 @@
+#ifndef __XEN_PUBLIC_IO_XEN_PVCALLS_H__
+#define __XEN_PUBLIC_IO_XEN_PVCALLS_H__
+
+#include <linux/net.h>
+#include <xen/interface/io/ring.h>
+#include <xen/interface/grant_table.h>
+
+/* "1" means socket, connect, release, bind, listen, accept and poll */
+#define XENBUS_FUNCTIONS_CALLS "1"
+
+/*
+ * See docs/misc/pvcalls.markdown in xen.git for the full specification:
+ * https://xenbits.xen.org/docs/unstable/misc/pvcalls.html
+ */
+struct pvcalls_data_intf {
+    RING_IDX in_cons, in_prod, in_error;
+
+    uint8_t pad1[52];
+
+    RING_IDX out_cons, out_prod, out_error;
+
+    uint8_t pad2[52];
+
+    RING_IDX ring_order;
+    grant_ref_t ref[];
+};
+DEFINE_XEN_FLEX_RING(pvcalls);
+
+#define PVCALLS_SOCKET         0
+#define PVCALLS_CONNECT        1
+#define PVCALLS_RELEASE        2
+#define PVCALLS_BIND           3
+#define PVCALLS_LISTEN         4
+#define PVCALLS_ACCEPT         5
+#define PVCALLS_POLL           6
+
+struct xen_pvcalls_request {
+    uint32_t req_id; /* private to guest, echoed in response */
+    uint32_t cmd;    /* command to execute */
+    union {
+        struct xen_pvcalls_socket {
+            uint64_t id;
+            uint32_t domain;
+            uint32_t type;
+            uint32_t protocol;
+        } socket;
+        struct xen_pvcalls_connect {
+            uint64_t id;
+            uint8_t addr[28];
+            uint32_t len;
+            uint32_t flags;
+            grant_ref_t ref;
+            uint32_t evtchn;
+        } connect;
+        struct xen_pvcalls_release {
+            uint64_t id;
+            uint8_t reuse;
+        } release;
+        struct xen_pvcalls_bind {
+            uint64_t id;
+            uint8_t addr[28];
+            uint32_t len;
+        } bind;
+        struct xen_pvcalls_listen {
+            uint64_t id;
+            uint32_t backlog;
+        } listen;
+        struct xen_pvcalls_accept {
+            uint64_t id;
+            uint64_t id_new;
+            grant_ref_t ref;
+            uint32_t evtchn;
+        } accept;
+        struct xen_pvcalls_poll {
+            uint64_t id;
+        } poll;
+        /* dummy member to force sizeof(struct xen_pvcalls_request)
+         * to match across archs */
+        struct xen_pvcalls_dummy {
+            uint8_t dummy[56];
+        } dummy;
+    } u;
+};
+
+struct xen_pvcalls_response {
+    uint32_t req_id;
+    uint32_t cmd;
+    int32_t ret;
+    uint32_t pad;
+    union {
+        struct _xen_pvcalls_socket {
+            uint64_t id;
+        } socket;
+        struct _xen_pvcalls_connect {
+            uint64_t id;
+        } connect;
+        struct _xen_pvcalls_release {
+            uint64_t id;
+        } release;
+        struct _xen_pvcalls_bind {
+            uint64_t id;
+        } bind;
+        struct _xen_pvcalls_listen {
+            uint64_t id;
+        } listen;
+        struct _xen_pvcalls_accept {
+            uint64_t id;
+        } accept;
+        struct _xen_pvcalls_poll {
+            uint64_t id;
+        } poll;
+        struct _xen_pvcalls_dummy {
+            uint8_t dummy[8];
+        } dummy;
+    } u;
+};
+
+DEFINE_RING_TYPES(xen_pvcalls, struct xen_pvcalls_request,
+                  struct xen_pvcalls_response);
+
+#endif
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index c794568..e547088 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -9,6 +9,8 @@
 #ifndef __XEN_PUBLIC_IO_RING_H__
 #define __XEN_PUBLIC_IO_RING_H__
 
+#include <xen/interface/grant_table.h>
+
 typedef unsigned int RING_IDX;
 
 /* Round a 32-bit unsigned constant down to the nearest power of two. */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 6e8b7fc..28c59ca5 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -13,11 +13,16 @@ extern enum xen_domain_type xen_domain_type;
 #define xen_domain_type		XEN_NATIVE
 #endif
 
+#ifdef CONFIG_XEN_PVH
+extern bool xen_pvh;
+#else
+#define xen_pvh			0
+#endif
+
 #define xen_domain()		(xen_domain_type != XEN_NATIVE)
-#define xen_pv_domain()		(xen_domain() &&			\
-				 xen_domain_type == XEN_PV_DOMAIN)
-#define xen_hvm_domain()	(xen_domain() &&			\
-				 xen_domain_type == XEN_HVM_DOMAIN)
+#define xen_pv_domain()		(xen_domain_type == XEN_PV_DOMAIN)
+#define xen_hvm_domain()	(xen_domain_type == XEN_HVM_DOMAIN)
+#define xen_pvh_domain()	(xen_pvh)
 
 #ifdef CONFIG_XEN_DOM0
 #include <xen/interface/xen.h>
@@ -29,11 +34,4 @@ extern enum xen_domain_type xen_domain_type;
 #define xen_initial_domain()	(0)
 #endif	/* CONFIG_XEN_DOM0 */
 
-#ifdef CONFIG_XEN_PVH
-extern bool xen_pvh;
-#define xen_pvh_domain()	(xen_hvm_domain() && xen_pvh)
-#else
-#define xen_pvh_domain()	(0)
-#endif
-
 #endif	/* _XEN_XEN_H */