drbd: Create a dedicated struct drbd_device_work

drbd_device_work is a work item that has a reference to a device,
while drbd_work is a more generic work item that does not carry
a reference to a device.

All callbacks get a pointer to a drbd_work instance, those callbacks
that expect a drbd_device_work use the container_of macro to get it.

Signed-off-by: Andreas Gruenbacher <agruen@linbit.com>
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index e262b0b..8711436 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -209,7 +209,7 @@
 	   stop to examine the list... */
 
 	list_for_each_safe(le, tle, &device->net_ee) {
-		peer_req = list_entry(le, struct drbd_peer_request, w.list);
+		peer_req = list_entry(le, struct drbd_peer_request, dw.w.list);
 		if (drbd_peer_req_has_active_page(peer_req))
 			break;
 		list_move(le, to_be_freed);
@@ -225,7 +225,7 @@
 	reclaim_finished_net_peer_reqs(device, &reclaimed);
 	spin_unlock_irq(&device->resource->req_lock);
 
-	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
+	list_for_each_entry_safe(peer_req, t, &reclaimed, dw.w.list)
 		drbd_free_net_peer_req(device, peer_req);
 }
 
@@ -363,7 +363,7 @@
 	peer_req->i.waiting = false;
 
 	peer_req->epoch = NULL;
-	peer_req->w.device = device;
+	peer_req->dw.device = device;
 	peer_req->pages = page;
 	atomic_set(&peer_req->pending_bios, 0);
 	peer_req->flags = 0;
@@ -402,7 +402,7 @@
 	list_splice_init(list, &work_list);
 	spin_unlock_irq(&device->resource->req_lock);
 
-	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
+	list_for_each_entry_safe(peer_req, t, &work_list, dw.w.list) {
 		__drbd_free_peer_req(device, peer_req, is_net);
 		count++;
 	}
@@ -424,18 +424,18 @@
 	list_splice_init(&device->done_ee, &work_list);
 	spin_unlock_irq(&device->resource->req_lock);
 
-	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
+	list_for_each_entry_safe(peer_req, t, &reclaimed, dw.w.list)
 		drbd_free_net_peer_req(device, peer_req);
 
 	/* possible callbacks here:
 	 * e_end_block, and e_end_resync_block, e_send_superseded.
 	 * all ignore the last argument.
 	 */
-	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
+	list_for_each_entry_safe(peer_req, t, &work_list, dw.w.list) {
 		int err2;
 
 		/* list_del not necessary, next/prev members not touched */
-		err2 = peer_req->w.cb(&peer_req->w, !!err);
+		err2 = peer_req->dw.w.cb(&peer_req->dw.w, !!err);
 		if (!err)
 			err = err2;
 		drbd_free_peer_req(device, peer_req);
@@ -1664,9 +1664,10 @@
  */
 static int e_end_resync_block(struct drbd_work *w, int unused)
 {
+	struct drbd_device_work *dw = device_work(w);
 	struct drbd_peer_request *peer_req =
-		container_of(w, struct drbd_peer_request, w);
-	struct drbd_device *device = w->device;
+		container_of(dw, struct drbd_peer_request, dw);
+	struct drbd_device *device = dw->device;
 	sector_t sector = peer_req->i.sector;
 	int err;
 
@@ -1702,10 +1703,10 @@
 	/* corresponding dec_unacked() in e_end_resync_block()
 	 * respective _drbd_clear_done_ee */
 
-	peer_req->w.cb = e_end_resync_block;
+	peer_req->dw.w.cb = e_end_resync_block;
 
 	spin_lock_irq(&device->resource->req_lock);
-	list_add(&peer_req->w.list, &device->sync_ee);
+	list_add(&peer_req->dw.w.list, &device->sync_ee);
 	spin_unlock_irq(&device->resource->req_lock);
 
 	atomic_add(data_size >> 9, &device->rs_sect_ev);
@@ -1715,7 +1716,7 @@
 	/* don't care for the reason here */
 	drbd_err(device, "submit failed, triggering re-connect\n");
 	spin_lock_irq(&device->resource->req_lock);
-	list_del(&peer_req->w.list);
+	list_del(&peer_req->dw.w.list);
 	spin_unlock_irq(&device->resource->req_lock);
 
 	drbd_free_peer_req(device, peer_req);
@@ -1835,9 +1836,10 @@
  */
 static int e_end_block(struct drbd_work *w, int cancel)
 {
+	struct drbd_device_work *dw = device_work(w);
 	struct drbd_peer_request *peer_req =
-		container_of(w, struct drbd_peer_request, w);
-	struct drbd_device *device = w->device;
+		container_of(dw, struct drbd_peer_request, dw);
+	struct drbd_device *device = dw->device;
 	sector_t sector = peer_req->i.sector;
 	int err = 0, pcmd;
 
@@ -1874,11 +1876,11 @@
 	return err;
 }
 
-static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
+static int e_send_ack(struct drbd_device_work *dw, enum drbd_packet ack)
 {
-	struct drbd_device *device = w->device;
+	struct drbd_device *device = dw->device;
 	struct drbd_peer_request *peer_req =
-		container_of(w, struct drbd_peer_request, w);
+		container_of(dw, struct drbd_peer_request, dw);
 	int err;
 
 	err = drbd_send_ack(first_peer_device(device), ack, peer_req);
@@ -1889,14 +1891,15 @@
 
 static int e_send_superseded(struct drbd_work *w, int unused)
 {
-	return e_send_ack(w, P_SUPERSEDED);
+	return e_send_ack(device_work(w), P_SUPERSEDED);
 }
 
 static int e_send_retry_write(struct drbd_work *w, int unused)
 {
-	struct drbd_connection *connection = first_peer_device(w->device)->connection;
+	struct drbd_device_work *dw = device_work(w);
+	struct drbd_connection *connection = first_peer_device(dw->device)->connection;
 
-	return e_send_ack(w, connection->agreed_pro_version >= 100 ?
+	return e_send_ack(dw, connection->agreed_pro_version >= 100 ?
 			     P_RETRY_WRITE : P_SUPERSEDED);
 }
 
@@ -1943,7 +1946,7 @@
 	bool rv = 0;
 
 	spin_lock_irq(&device->resource->req_lock);
-	list_for_each_entry(rs_req, &device->sync_ee, w.list) {
+	list_for_each_entry(rs_req, &device->sync_ee, dw.w.list) {
 		if (overlaps(peer_req->i.sector, peer_req->i.size,
 			     rs_req->i.sector, rs_req->i.size)) {
 			rv = 1;
@@ -2114,9 +2117,9 @@
 					  superseded ? "local" : "remote");
 
 			inc_unacked(device);
-			peer_req->w.cb = superseded ? e_send_superseded :
+			peer_req->dw.w.cb = superseded ? e_send_superseded :
 						   e_send_retry_write;
-			list_add_tail(&peer_req->w.list, &device->done_ee);
+			list_add_tail(&peer_req->dw.w.list, &device->done_ee);
 			wake_asender(first_peer_device(device)->connection);
 
 			err = -ENOENT;
@@ -2212,7 +2215,7 @@
 		return -EIO;
 	}
 
-	peer_req->w.cb = e_end_block;
+	peer_req->dw.w.cb = e_end_block;
 
 	dp_flags = be32_to_cpu(p->dp_flags);
 	rw |= wire_flags_to_bio(device, dp_flags);
@@ -2252,7 +2255,7 @@
 		update_peer_seq(peer_device, peer_seq);
 		spin_lock_irq(&device->resource->req_lock);
 	}
-	list_add(&peer_req->w.list, &device->active_ee);
+	list_add(&peer_req->dw.w.list, &device->active_ee);
 	spin_unlock_irq(&device->resource->req_lock);
 
 	if (device->state.conn == C_SYNC_TARGET)
@@ -2299,7 +2302,7 @@
 	/* don't care for the reason here */
 	drbd_err(device, "submit failed, triggering re-connect\n");
 	spin_lock_irq(&device->resource->req_lock);
-	list_del(&peer_req->w.list);
+	list_del(&peer_req->dw.w.list);
 	drbd_remove_epoch_entry_interval(device, peer_req);
 	spin_unlock_irq(&device->resource->req_lock);
 	if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
@@ -2454,13 +2457,13 @@
 
 	switch (pi->cmd) {
 	case P_DATA_REQUEST:
-		peer_req->w.cb = w_e_end_data_req;
+		peer_req->dw.w.cb = w_e_end_data_req;
 		fault_type = DRBD_FAULT_DT_RD;
 		/* application IO, don't drbd_rs_begin_io */
 		goto submit;
 
 	case P_RS_DATA_REQUEST:
-		peer_req->w.cb = w_e_end_rsdata_req;
+		peer_req->dw.w.cb = w_e_end_rsdata_req;
 		fault_type = DRBD_FAULT_RS_RD;
 		/* used in the sector offset progress display */
 		device->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -2484,13 +2487,13 @@
 
 		if (pi->cmd == P_CSUM_RS_REQUEST) {
 			D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
-			peer_req->w.cb = w_e_end_csum_rs_req;
+			peer_req->dw.w.cb = w_e_end_csum_rs_req;
 			/* used in the sector offset progress display */
 			device->bm_resync_fo = BM_SECT_TO_BIT(sector);
 		} else if (pi->cmd == P_OV_REPLY) {
 			/* track progress, we may need to throttle */
 			atomic_add(size >> 9, &device->rs_sect_in);
-			peer_req->w.cb = w_e_end_ov_reply;
+			peer_req->dw.w.cb = w_e_end_ov_reply;
 			dec_rs_pending(device);
 			/* drbd_rs_begin_io done when we sent this request,
 			 * but accounting still needs to be done. */
@@ -2514,7 +2517,7 @@
 			drbd_info(device, "Online Verify start sector: %llu\n",
 					(unsigned long long)sector);
 		}
-		peer_req->w.cb = w_e_end_ov_req;
+		peer_req->dw.w.cb = w_e_end_ov_req;
 		fault_type = DRBD_FAULT_RS_RD;
 		break;
 
@@ -2555,7 +2558,7 @@
 submit:
 	inc_unacked(device);
 	spin_lock_irq(&device->resource->req_lock);
-	list_add_tail(&peer_req->w.list, &device->read_ee);
+	list_add_tail(&peer_req->dw.w.list, &device->read_ee);
 	spin_unlock_irq(&device->resource->req_lock);
 
 	if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
@@ -2564,7 +2567,7 @@
 	/* don't care for the reason here */
 	drbd_err(device, "submit failed, triggering re-connect\n");
 	spin_lock_irq(&device->resource->req_lock);
-	list_del(&peer_req->w.list);
+	list_del(&peer_req->dw.w.list);
 	spin_unlock_irq(&device->resource->req_lock);
 	/* no drbd_rs_complete_io(), we are dropping the connection anyways */
 
@@ -4495,7 +4498,6 @@
 	struct drbd_wq_barrier barr;
 
 	barr.w.cb = w_complete;
-	barr.w.connection = connection;
 	init_completion(&barr.done);
 	drbd_queue_work(&connection->sender_work, &barr.w);
 	wait_for_completion(&barr.done);
@@ -5218,7 +5220,7 @@
 	struct drbd_peer_device *peer_device;
 	struct drbd_device *device;
 	struct p_block_ack *p = pi->data;
-	struct drbd_work *w;
+	struct drbd_device_work *dw;
 	sector_t sector;
 	int size;
 
@@ -5250,13 +5252,13 @@
 		drbd_advance_rs_marks(device, device->ov_left);
 
 	if (device->ov_left == 0) {
-		w = kmalloc(sizeof(*w), GFP_NOIO);
-		if (w) {
-			w->cb = w_ov_finished;
-			w->device = device;
-			drbd_queue_work(&peer_device->connection->sender_work, w);
+		dw = kmalloc(sizeof(*dw), GFP_NOIO);
+		if (dw) {
+			dw->w.cb = w_ov_finished;
+			dw->device = device;
+			drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
 		} else {
-			drbd_err(device, "kmalloc(w) failed.");
+			drbd_err(device, "kmalloc(dw) failed.");
 			ov_out_of_sync_print(device);
 			drbd_resync_finished(device);
 		}