caif: Add ref-count to framing layer

Introduce Per-cpu reference for lower part of CAIF Stack.
Before freeing payload is disabled, synchronize_rcu() is called,
and then ref-count verified to be zero.

Signed-off-by: Sjur Brændeland <sjur.brandeland@stericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/include/net/caif/cffrml.h b/include/net/caif/cffrml.h
index 4f126d7..afac1a4 100644
--- a/include/net/caif/cffrml.h
+++ b/include/net/caif/cffrml.h
@@ -7,12 +7,15 @@
 #ifndef CFFRML_H_
 #define CFFRML_H_
 #include <net/caif/caif_layer.h>
+#include <linux/netdevice.h>
 
 struct cffrml;
-struct cflayer *cffrml_create(u16 phyid, bool DoFCS);
+struct cflayer *cffrml_create(u16 phyid, bool use_fcs);
+void cffrml_free(struct cflayer *layr);
 void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up);
 void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn);
 void cffrml_put(struct cflayer *layr);
 void cffrml_hold(struct cflayer *layr);
+int cffrml_refcnt_read(struct cflayer *layr);
 
 #endif /* CFFRML_H_ */
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 7892cc0..3f4f31f 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -519,6 +519,13 @@
 	caif_assert(phy_layer->id == phyid);
 	caif_assert(phyinfo->frm_layer->id == phyid);
 
+	/* Fail if reference count is not zero */
+	if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) {
+		pr_info("Wait for device inuse\n");
+		mutex_unlock(&cnfg->lock);
+		return -EAGAIN;
+	}
+
 	list_del_rcu(&phyinfo->node);
 	synchronize_rcu();
 
@@ -537,7 +544,7 @@
 	if (phyinfo->phy_layer != frml_dn)
 		kfree(frml_dn);
 
-	kfree(frml);
+	cffrml_free(frml);
 	kfree(phyinfo);
 	mutex_unlock(&cnfg->lock);
 
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index f4b8892..4f4f756 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -12,6 +12,7 @@
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/crc-ccitt.h>
+#include <linux/netdevice.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfpkt.h>
 #include <net/caif/cffrml.h>
@@ -21,6 +22,7 @@
 struct cffrml {
 	struct cflayer layer;
 	bool dofcs;		/* !< FCS active */
+	int __percpu		*pcpu_refcnt;
 };
 
 static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
@@ -31,12 +33,19 @@
 static u32 cffrml_rcv_error;
 static u32 cffrml_rcv_checsum_error;
 struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
+
 {
 	struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
 	if (!this) {
 		pr_warn("Out of memory\n");
 		return NULL;
 	}
+	this->pcpu_refcnt = alloc_percpu(int);
+	if (this->pcpu_refcnt == NULL) {
+		kfree(this);
+		return NULL;
+	}
+
 	caif_assert(offsetof(struct cffrml, layer) == 0);
 
 	memset(this, 0, sizeof(struct cflayer));
@@ -49,6 +58,13 @@
 	return (struct cflayer *) this;
 }
 
+void cffrml_free(struct cflayer *layer)
+{
+	struct cffrml *this = container_obj(layer);
+	free_percpu(this->pcpu_refcnt);
+	kfree(layer);
+}
+
 void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up)
 {
 	this->up = up;
@@ -148,8 +164,23 @@
 
 void cffrml_put(struct cflayer *layr)
 {
+	struct cffrml *this = container_obj(layr);
+	if (layr != NULL && this->pcpu_refcnt != NULL)
+		irqsafe_cpu_dec(*this->pcpu_refcnt);
 }
 
 void cffrml_hold(struct cflayer *layr)
 {
+	struct cffrml *this = container_obj(layr);
+	if (layr != NULL && this->pcpu_refcnt != NULL)
+		irqsafe_cpu_inc(*this->pcpu_refcnt);
+}
+
+int cffrml_refcnt_read(struct cflayer *layr)
+{
+	int i, refcnt = 0;
+	struct cffrml *this = container_obj(layr);
+	for_each_possible_cpu(i)
+		refcnt += *per_cpu_ptr(this->pcpu_refcnt, i);
+	return refcnt;
 }