firewire: core: fix DMA mapping direction

Seen with recent libdc1394:  If a client mmap()s the buffer of an
isochronous reception buffer with PROT_READ|PROT_WRITE instead of just
PROT_READ, firewire-core sets the wrong DMA mapping direction during
buffer initialization.

The fix is to split fw_iso_buffer_init() into allocation and DMA mapping
and to perform the latter after both buffer and DMA context were
allocated.  Buffer allocation and context allocation may happen in any
order, but we need the context type (reception or transmission) in order
to set the DMA direction of the buffer.

Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 2e6b245..2783f69 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -22,6 +22,7 @@
 #include <linux/compat.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/dma-mapping.h>
 #include <linux/errno.h>
 #include <linux/firewire.h>
 #include <linux/firewire-cdev.h>
@@ -70,6 +71,7 @@
 	u64 iso_closure;
 	struct fw_iso_buffer buffer;
 	unsigned long vm_start;
+	bool buffer_is_mapped;
 
 	struct list_head phy_receiver_link;
 	u64 phy_receiver_closure;
@@ -959,11 +961,20 @@
 		    sizeof(e->interrupt), NULL, 0);
 }
 
+static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
+{
+		if (context->type == FW_ISO_CONTEXT_TRANSMIT)
+			return DMA_TO_DEVICE;
+		else
+			return DMA_FROM_DEVICE;
+}
+
 static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
 {
 	struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
 	struct fw_iso_context *context;
 	fw_iso_callback_t cb;
+	int ret;
 
 	BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
 		     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE  ||
@@ -1004,8 +1015,21 @@
 	if (client->iso_context != NULL) {
 		spin_unlock_irq(&client->lock);
 		fw_iso_context_destroy(context);
+
 		return -EBUSY;
 	}
+	if (!client->buffer_is_mapped) {
+		ret = fw_iso_buffer_map_dma(&client->buffer,
+					    client->device->card,
+					    iso_dma_direction(context));
+		if (ret < 0) {
+			spin_unlock_irq(&client->lock);
+			fw_iso_context_destroy(context);
+
+			return ret;
+		}
+		client->buffer_is_mapped = true;
+	}
 	client->iso_closure = a->closure;
 	client->iso_context = context;
 	spin_unlock_irq(&client->lock);
@@ -1651,7 +1675,6 @@
 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct client *client = file->private_data;
-	enum dma_data_direction direction;
 	unsigned long size;
 	int page_count, ret;
 
@@ -1674,20 +1697,28 @@
 	if (size & ~PAGE_MASK)
 		return -EINVAL;
 
-	if (vma->vm_flags & VM_WRITE)
-		direction = DMA_TO_DEVICE;
-	else
-		direction = DMA_FROM_DEVICE;
-
-	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
-				 page_count, direction);
+	ret = fw_iso_buffer_alloc(&client->buffer, page_count);
 	if (ret < 0)
 		return ret;
 
-	ret = fw_iso_buffer_map(&client->buffer, vma);
+	spin_lock_irq(&client->lock);
+	if (client->iso_context) {
+		ret = fw_iso_buffer_map_dma(&client->buffer,
+				client->device->card,
+				iso_dma_direction(client->iso_context));
+		client->buffer_is_mapped = (ret == 0);
+	}
+	spin_unlock_irq(&client->lock);
 	if (ret < 0)
-		fw_iso_buffer_destroy(&client->buffer, client->device->card);
+		goto fail;
 
+	ret = fw_iso_buffer_map_vma(&client->buffer, vma);
+	if (ret < 0)
+		goto fail;
+
+	return 0;
+ fail:
+	fw_iso_buffer_destroy(&client->buffer, client->device->card);
 	return ret;
 }