bcachefs: EINTR -> BCH_ERR_transaction_restart

Now that we have error codes, with subtypes, we can switch to our own
error code for transaction restarts - and even better, a distinct error
code for each transaction restart reason: clearer code and better
debugging.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 00cd40a..7edebee 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -234,7 +234,7 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
 			&X_SEARCH(acl_to_xattr_type(type), "", 0),
 			0);
 	if (ret) {
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			goto retry;
 		if (ret != -ENOENT)
 			acl = ERR_PTR(ret);
@@ -334,7 +334,7 @@ int bch2_set_acl(struct mnt_idmap *idmap,
 btree_err:
 	bch2_trans_iter_exit(&trans, &inode_iter);
 
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 	if (unlikely(ret))
 		goto err;
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index eb44a8b..15c3c9a 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -995,7 +995,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
 				     GFP_KERNEL);
 		*discard_pos_done = iter.pos;
 
-		ret = bch2_trans_relock(trans) ? 0 : -EINTR;
+		ret = bch2_trans_relock(trans);
 		if (ret)
 			goto out;
 	}
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index a9f8933..99fbf1d2 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -470,8 +470,9 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
 		for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
 		     alloc_cursor < k.k->p.offset;
 		     alloc_cursor++) {
-			if (btree_trans_too_many_iters(trans)) {
-				ob = ERR_PTR(-EINTR);
+			ret = btree_trans_too_many_iters(trans);
+			if (ret) {
+				ob = ERR_PTR(ret);
 				break;
 			}
 
@@ -488,7 +489,8 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
 				break;
 			}
 		}
-		if (ob)
+
+		if (ob || ret)
 			break;
 	}
 	bch2_trans_iter_exit(trans, &iter);
@@ -738,7 +740,7 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
 
 		ret = PTR_ERR_OR_ZERO(ob);
 		if (ret) {
-			if (ret == -EINTR || cl)
+			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
 				break;
 			continue;
 		}
@@ -925,7 +927,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
 						 target, erasure_code,
 						 nr_replicas, nr_effective,
 						 have_cache, flags, _cl);
-			if (ret == -EINTR ||
+			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
 			    bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
 			    bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
 				return ret;
@@ -949,7 +951,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
 				nr_replicas, nr_effective, have_cache,
 				reserve, flags, cl);
 	if (ret &&
-	    ret != -EINTR &&
+	    !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
 	    !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
 	    !cl && _cl) {
 		cl = _cl;
@@ -1191,7 +1193,8 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
 					      nr_replicas, &nr_effective,
 					      &have_cache, reserve,
 					      ob_flags, NULL);
-		if (!ret || ret == -EINTR)
+		if (!ret ||
+		    bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			goto alloc_done;
 
 		ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 1f80f08..4032c27 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -7,6 +7,7 @@
 #include "btree_iter.h"
 #include "btree_locking.h"
 #include "debug.h"
+#include "errcode.h"
 #include "error.h"
 #include "trace.h"
 
@@ -692,8 +693,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
 	if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
 		trace_trans_restart_relock_parent_for_fill(trans->fn,
 					_THIS_IP_, btree_id, &path->pos);
-		btree_trans_restart(trans);
-		return ERR_PTR(-EINTR);
+		return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
 	}
 
 	b = bch2_btree_node_mem_alloc(c, level != 0);
@@ -702,8 +702,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
 		trans->memory_allocation_failure = true;
 		trace_trans_restart_memory_allocation_failure(trans->fn,
 				_THIS_IP_, btree_id, &path->pos);
-		btree_trans_restart(trans);
-		return ERR_PTR(-EINTR);
+
+		return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
 	}
 
 	if (IS_ERR(b))
@@ -740,18 +740,19 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
 	if (!sync)
 		return NULL;
 
-	if (trans &&
-	    (!bch2_trans_relock(trans) ||
-	     !bch2_btree_path_relock_intent(trans, path))) {
-		BUG_ON(!trans->restarted);
-		return ERR_PTR(-EINTR);
+	if (trans) {
+		int ret = bch2_trans_relock(trans) ?:
+			bch2_btree_path_relock_intent(trans, path);
+		if (ret) {
+			BUG_ON(!trans->restarted);
+			return ERR_PTR(ret);
+		}
 	}
 
 	if (!six_relock_type(&b->c.lock, lock_type, seq)) {
 		trace_trans_restart_relock_after_fill(trans->fn, _THIS_IP_,
 					   btree_id, &path->pos);
-		btree_trans_restart(trans);
-		return ERR_PTR(-EINTR);
+		return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
 	}
 
 	return b;
@@ -762,7 +763,9 @@ static int lock_node_check_fn(struct six_lock *lock, void *p)
 	struct btree *b = container_of(lock, struct btree, c.lock);
 	const struct bkey_i *k = p;
 
-	return b->hash_val == btree_ptr_hash_val(k) ? 0 : -1;
+	if (b->hash_val != btree_ptr_hash_val(k))
+		return BCH_ERR_lock_fail_node_reused;
+	return 0;
 }
 
 static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
@@ -821,6 +824,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
 	struct btree_cache *bc = &c->btree_cache;
 	struct btree *b;
 	struct bset_tree *t;
+	int ret;
 
 	EBUG_ON(level >= BTREE_MAX_DEPTH);
 
@@ -885,11 +889,14 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
 		if (btree_node_read_locked(path, level + 1))
 			btree_node_unlock(trans, path, level + 1);
 
-		if (!btree_node_lock(trans, path, b, k->k.p, level, lock_type,
-				     lock_node_check_fn, (void *) k, trace_ip)) {
-			if (!trans->restarted)
+		ret = btree_node_lock(trans, path, b, k->k.p, level, lock_type,
+				      lock_node_check_fn, (void *) k, trace_ip);
+		if (unlikely(ret)) {
+			if (bch2_err_matches(ret, BCH_ERR_lock_fail_node_reused))
 				goto retry;
-			return ERR_PTR(-EINTR);
+			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+				return ERR_PTR(ret);
+			BUG();
 		}
 
 		if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
@@ -903,8 +910,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
 							      trace_ip,
 							      path->btree_id,
 							      &path->pos);
-			btree_trans_restart(trans);
-			return ERR_PTR(-EINTR);
+			return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
 		}
 	}
 
@@ -920,11 +926,13 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
 		 * should_be_locked is not set on this path yet, so we need to
 		 * relock it specifically:
 		 */
-		if (trans &&
-		    (!bch2_trans_relock(trans) ||
-		     !bch2_btree_path_relock_intent(trans, path))) {
-			BUG_ON(!trans->restarted);
-			return ERR_PTR(-EINTR);
+		if (trans) {
+			int ret = bch2_trans_relock(trans) ?:
+				bch2_btree_path_relock_intent(trans, path);
+			if (ret) {
+				BUG_ON(!trans->restarted);
+				return ERR_PTR(ret);
+			}
 		}
 
 		if (!six_relock_type(&b->c.lock, lock_type, seq))
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 45ecd196..db247c9 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -34,7 +34,7 @@ static inline int bch2_trans_cond_resched(struct btree_trans *trans)
 	if (need_resched() || race_fault()) {
 		bch2_trans_unlock(trans);
 		schedule();
-		return bch2_trans_relock(trans) ? 0 : -EINTR;
+		return bch2_trans_relock(trans);
 	} else {
 		return 0;
 	}
@@ -285,13 +285,13 @@ static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
 }
 
 /* Slowpath: */
-bool __bch2_btree_node_lock(struct btree_trans *trans,
-			    struct btree_path *path,
-			    struct btree *b,
-			    struct bpos pos, unsigned level,
-			    enum six_lock_type type,
-			    six_lock_should_sleep_fn should_sleep_fn, void *p,
-			    unsigned long ip)
+int __bch2_btree_node_lock(struct btree_trans *trans,
+			   struct btree_path *path,
+			   struct btree *b,
+			   struct bpos pos, unsigned level,
+			   enum six_lock_type type,
+			   six_lock_should_sleep_fn should_sleep_fn, void *p,
+			   unsigned long ip)
 {
 	struct btree_path *linked;
 	unsigned reason;
@@ -369,8 +369,7 @@ bool __bch2_btree_node_lock(struct btree_trans *trans,
 			path->btree_id,
 			path->cached,
 			&pos);
-	btree_trans_restart(trans);
-	return false;
+	return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
 }
 
 /* Btree iterator locking: */
@@ -408,8 +407,8 @@ static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
 /*
  * Only for btree_cache.c - only relocks intent locks
  */
-bool bch2_btree_path_relock_intent(struct btree_trans *trans,
-				   struct btree_path *path)
+int bch2_btree_path_relock_intent(struct btree_trans *trans,
+				  struct btree_path *path)
 {
 	unsigned l;
 
@@ -421,16 +420,15 @@ bool bch2_btree_path_relock_intent(struct btree_trans *trans,
 			btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
 			trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
 						   path->btree_id, &path->pos);
-			btree_trans_restart(trans);
-			return false;
+			return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
 		}
 	}
 
-	return true;
+	return 0;
 }
 
 noinline __flatten
-static bool __bch2_btree_path_relock(struct btree_trans *trans,
+static int __bch2_btree_path_relock(struct btree_trans *trans,
 			struct btree_path *path, unsigned long trace_ip)
 {
 	bool ret = btree_path_get_locks(trans, path, false);
@@ -438,16 +436,17 @@ static bool __bch2_btree_path_relock(struct btree_trans *trans,
 	if (!ret) {
 		trace_trans_restart_relock_path(trans->fn, trace_ip,
 						path->btree_id, &path->pos);
-		btree_trans_restart(trans);
+		return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
 	}
-	return ret;
+
+	return 0;
 }
 
-static inline bool bch2_btree_path_relock(struct btree_trans *trans,
+static inline int bch2_btree_path_relock(struct btree_trans *trans,
 			struct btree_path *path, unsigned long trace_ip)
 {
 	return btree_node_locked(path, path->level)
-		? true
+		? 0
 		: __bch2_btree_path_relock(trans, path, trace_ip);
 }
 
@@ -532,22 +531,22 @@ void bch2_trans_downgrade(struct btree_trans *trans)
 
 /* Btree transaction locking: */
 
-bool bch2_trans_relock(struct btree_trans *trans)
+int bch2_trans_relock(struct btree_trans *trans)
 {
 	struct btree_path *path;
 
 	if (unlikely(trans->restarted))
-		return false;
+		return -BCH_ERR_transaction_restart_relock;
 
 	trans_for_each_path(trans, path)
 		if (path->should_be_locked &&
-		    !bch2_btree_path_relock(trans, path, _RET_IP_)) {
+		    bch2_btree_path_relock(trans, path, _RET_IP_)) {
 			trace_trans_restart_relock(trans->fn, _RET_IP_,
 					path->btree_id, &path->pos);
 			BUG_ON(!trans->restarted);
-			return false;
+			return -BCH_ERR_transaction_restart_relock;
 		}
-	return true;
+	return 0;
 }
 
 void bch2_trans_unlock(struct btree_trans *trans)
@@ -1187,7 +1186,9 @@ static int lock_root_check_fn(struct six_lock *lock, void *p)
 	struct btree *b = container_of(lock, struct btree, c.lock);
 	struct btree **rootp = p;
 
-	return b == *rootp ? 0 : -1;
+	if (b != *rootp)
+		return BCH_ERR_lock_fail_root_changed;
+	return 0;
 }
 
 static inline int btree_path_lock_root(struct btree_trans *trans,
@@ -1199,6 +1200,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans,
 	struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
 	enum six_lock_type lock_type;
 	unsigned i;
+	int ret;
 
 	EBUG_ON(path->nodes_locked);
 
@@ -1220,13 +1222,16 @@ static inline int btree_path_lock_root(struct btree_trans *trans,
 		}
 
 		lock_type = __btree_lock_want(path, path->level);
-		if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
-					      path->level, lock_type,
-					      lock_root_check_fn, rootp,
-					      trace_ip))) {
-			if (trans->restarted)
-				return -EINTR;
-			continue;
+		ret = btree_node_lock(trans, path, b, SPOS_MAX,
+				      path->level, lock_type,
+				      lock_root_check_fn, rootp,
+				      trace_ip);
+		if (unlikely(ret)) {
+			if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
+				continue;
+			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+				return ret;
+			BUG();
 		}
 
 		if (likely(b == READ_ONCE(*rootp) &&
@@ -1431,12 +1436,12 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
 	int i, ret = 0;
 
 	if (trans->in_traverse_all)
-		return -EINTR;
+		return -BCH_ERR_transaction_restart_in_traverse_all;
 
 	trans->in_traverse_all = true;
 retry_all:
 	prev = NULL;
-	trans->restarted = false;
+	trans->restarted = 0;
 
 	trans_for_each_path(trans, path)
 		path->should_be_locked = false;
@@ -1480,7 +1485,8 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
 		 */
 		if (path->uptodate) {
 			ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
-			if (ret == -EINTR || ret == -ENOMEM)
+			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+			    ret == -ENOMEM)
 				goto retry_all;
 			if (ret)
 				goto err;
@@ -1587,19 +1593,17 @@ static int btree_path_traverse_one(struct btree_trans *trans,
 				   unsigned long trace_ip)
 {
 	unsigned depth_want = path->level;
-	int ret = 0;
+	int ret = trans->restarted;
 
-	if (unlikely(trans->restarted)) {
-		ret = -EINTR;
+	if (unlikely(ret))
 		goto out;
-	}
 
 	/*
 	 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
 	 * and re-traverse the path without a transaction restart:
 	 */
 	if (path->should_be_locked) {
-		ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
+		ret = bch2_btree_path_relock(trans, path, trace_ip);
 		goto out;
 	}
 
@@ -1648,7 +1652,7 @@ static int btree_path_traverse_one(struct btree_trans *trans,
 
 	path->uptodate = BTREE_ITER_UPTODATE;
 out:
-	BUG_ON((ret == -EINTR) != !!trans->restarted);
+	BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
 	bch2_btree_path_verify(trans, path);
 	return ret;
 }
@@ -2135,8 +2139,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
 		trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
 					   path->btree_id, &path->pos);
-		btree_trans_restart(trans);
-		ret = -EINTR;
+		ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
 		goto err;
 	}
 
@@ -2517,8 +2520,9 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
 	BUG_ON(!iter->path->nodes_locked);
 out:
 	if (iter->update_path) {
-		if (unlikely(!bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_))) {
-			k = bkey_s_c_err(-EINTR);
+		ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_);
+		if (unlikely(ret)) {
+			k = bkey_s_c_err(ret);
 		} else {
 			BUG_ON(!(iter->update_path->nodes_locked & 1));
 			iter->update_path->should_be_locked = true;
@@ -3169,8 +3173,7 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
 
 		if (old_bytes) {
 			trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes);
-			btree_trans_restart(trans);
-			return ERR_PTR(-EINTR);
+			return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
 		}
 	}
 
@@ -3184,9 +3187,9 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
  * bch2_trans_begin() - reset a transaction after a interrupted attempt
  * @trans: transaction to reset
  *
- * While iterating over nodes or updating nodes a attempt to lock a btree
- * node may return EINTR when the trylock fails. When this occurs
- * bch2_trans_begin() should be called and the transaction retried.
+ * While iterating over nodes or updating nodes a attempt to lock a btree node
+ * may return BCH_ERR_transaction_restart when the trylock fails. When this
+ * occurs bch2_trans_begin() should be called and the transaction retried.
  */
 u32 bch2_trans_begin(struct btree_trans *trans)
 {
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 1952a76..79339a6 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -197,27 +197,36 @@ void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
 			      struct btree *, struct btree_node_iter *,
 			      struct bkey_packed *, unsigned, unsigned);
 
-bool bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
+int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
 
 void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
 
-bool bch2_trans_relock(struct btree_trans *);
+int bch2_trans_relock(struct btree_trans *);
 void bch2_trans_unlock(struct btree_trans *);
 
-static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
+static inline bool trans_was_restarted(struct btree_trans *trans, u32 restart_count)
 {
-	return restart_count != trans->restart_count ? -EINTR : 0;
+	return restart_count != trans->restart_count;
 }
 
 void bch2_trans_verify_not_restarted(struct btree_trans *, u32);
 
 __always_inline
-static inline int btree_trans_restart(struct btree_trans *trans)
+static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
 {
-	trans->restarted = true;
+	BUG_ON(err <= 0);
+	BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart));
+
+	trans->restarted = err;
 	trans->restart_count++;
-	bch2_trans_unlock(trans);
-	return -EINTR;
+	return -err;
+}
+
+__always_inline
+static inline int btree_trans_restart(struct btree_trans *trans, int err)
+{
+	btree_trans_restart_nounlock(trans, err);
+	return -err;
 }
 
 bool bch2_btree_node_upgrade(struct btree_trans *,
@@ -338,7 +347,7 @@ __btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter
 	struct btree *b;
 
 	while (b = bch2_btree_iter_peek_node(iter),
-	       PTR_ERR_OR_ZERO(b) == -EINTR)
+	       bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
 		bch2_trans_begin(trans);
 
 	return b;
@@ -387,7 +396,7 @@ static inline int btree_trans_too_many_iters(struct btree_trans *trans)
 {
 	if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX / 2) {
 		trace_trans_restart_too_many_iters(trans->fn, _THIS_IP_);
-		return btree_trans_restart(trans);
+		return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
 	}
 
 	return 0;
@@ -401,7 +410,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
 
 	while (btree_trans_too_many_iters(trans) ||
 	       (k = bch2_btree_iter_peek_type(iter, flags),
-		bkey_err(k) == -EINTR))
+		bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
 		bch2_trans_begin(trans);
 
 	return k;
@@ -414,7 +423,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
 	do {								\
 		bch2_trans_begin(_trans);				\
 		_ret = (_do);						\
-	} while (_ret == -EINTR);					\
+	} while (bch2_err_matches(_ret, BCH_ERR_transaction_restart));	\
 									\
 	_ret;								\
 })
@@ -425,7 +434,8 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
  * These are like lockrestart_do() and commit_do(), with two differences:
  *
  *  - We don't call bch2_trans_begin() unless we had a transaction restart
- *  - We return -EINTR if we succeeded after a transaction restart
+ *  - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
+ *  transaction restart
  */
 #define nested_lockrestart_do(_trans, _do)				\
 ({									\
@@ -434,13 +444,16 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
 									\
 	_restart_count = _orig_restart_count = (_trans)->restart_count;	\
 									\
-	while ((_ret = (_do)) == -EINTR)				\
+	while (bch2_err_matches(_ret = (_do), BCH_ERR_transaction_restart))\
 		_restart_count = bch2_trans_begin(_trans);		\
 									\
 	if (!_ret)							\
 		bch2_trans_verify_not_restarted(_trans, _restart_count);\
 									\
-	_ret ?: trans_was_restarted(_trans, _orig_restart_count);	\
+	if (!_ret && trans_was_restarted(_trans, _orig_restart_count))	\
+		_ret = -BCH_ERR_transaction_restart_nested;		\
+									\
+	_ret;								\
 })
 
 #define for_each_btree_key2(_trans, _iter, _btree_id,			\
@@ -451,7 +464,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
 	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
 			     (_start), (_flags));			\
 									\
-	do {								\
+	while (1) {							\
 		bch2_trans_begin(_trans);				\
 		(_k) = bch2_btree_iter_peek_type(&(_iter), (_flags));	\
 		if (!(_k).k) {						\
@@ -460,9 +473,12 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
 		}							\
 									\
 		_ret = bkey_err(_k) ?: (_do);				\
-		if (!_ret)						\
-			bch2_btree_iter_advance(&(_iter));		\
-	} while (_ret == 0 || _ret == -EINTR);				\
+		if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
+			continue;					\
+		if (_ret)						\
+			break;						\
+		bch2_btree_iter_advance(&(_iter));			\
+	}								\
 									\
 	bch2_trans_iter_exit((_trans), &(_iter));			\
 	_ret;								\
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index e5a2924..549abe6 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -5,6 +5,7 @@
 #include "btree_key_cache.h"
 #include "btree_locking.h"
 #include "btree_update.h"
+#include "errcode.h"
 #include "error.h"
 #include "journal.h"
 #include "journal_reclaim.h"
@@ -292,7 +293,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
 	if (!bch2_btree_node_relock(trans, ck_path, 0)) {
 		trace_trans_restart_relock_key_cache_fill(trans->fn,
 				_THIS_IP_, ck_path->btree_id, &ck_path->pos);
-		ret = btree_trans_restart(trans);
+		ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
 		goto err;
 	}
 
@@ -347,8 +348,10 @@ static int bkey_cached_check_fn(struct six_lock *lock, void *p)
 	struct bkey_cached *ck = container_of(lock, struct bkey_cached, c.lock);
 	const struct btree_path *path = p;
 
-	return ck->key.btree_id == path->btree_id &&
-		!bpos_cmp(ck->key.pos, path->pos) ? 0 : -1;
+	if (ck->key.btree_id != path->btree_id &&
+	    bpos_cmp(ck->key.pos, path->pos))
+		return BCH_ERR_lock_fail_node_reused;
+	return 0;
 }
 
 __flatten
@@ -387,14 +390,15 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
 	} else {
 		enum six_lock_type lock_want = __btree_lock_want(path, 0);
 
-		if (!btree_node_lock(trans, path, (void *) ck, path->pos, 0,
-				     lock_want,
-				     bkey_cached_check_fn, path, _THIS_IP_)) {
-			if (!trans->restarted)
+		ret = btree_node_lock(trans, path, (void *) ck, path->pos, 0,
+				      lock_want,
+				      bkey_cached_check_fn, path, _THIS_IP_);
+		if (ret) {
+			if (bch2_err_matches(ret, BCH_ERR_lock_fail_node_reused))
 				goto retry;
-
-			ret = -EINTR;
-			goto err;
+			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+				goto err;
+			BUG();
 		}
 
 		if (ck->key.btree_id != path->btree_id ||
@@ -413,7 +417,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
 		if (!path->locks_want &&
 		    !__bch2_btree_path_upgrade(trans, path, 1)) {
 			trace_transaction_restart_ip(trans->fn, _THIS_IP_);
-			ret = btree_trans_restart(trans);
+			ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
 			goto err;
 		}
 
@@ -430,7 +434,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
 
 	return ret;
 err:
-	if (ret != -EINTR) {
+	if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
 		btree_node_unlock(trans, path, 0);
 		path->l[0].b = BTREE_ITER_NO_NODE_ERROR;
 	}
@@ -497,13 +501,14 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
 				   ? JOURNAL_WATERMARK_reserved
 				   : 0)|
 				  commit_flags);
-	if (ret) {
-		bch2_fs_fatal_err_on(ret != -EINTR &&
-				     ret != -EAGAIN &&
-				     !bch2_journal_error(j), c,
-			"error flushing key cache: %i", ret);
+
+	bch2_fs_fatal_err_on(ret &&
+			     !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
+			     !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) &&
+			     !bch2_journal_error(j), c,
+			     "error flushing key cache: %s", bch2_err_str(ret));
+	if (ret)
 		goto out;
-	}
 
 	bch2_journal_pin_drop(j, &ck->journal);
 	bch2_journal_preres_put(j, &ck->res);
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index b870846..33a69e2 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -152,7 +152,7 @@ static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
 	}
 }
 
-static inline bool btree_node_lock_type(struct btree_trans *trans,
+static inline int btree_node_lock_type(struct btree_trans *trans,
 				       struct btree_path *path,
 				       struct btree *b,
 				       struct bpos pos, unsigned level,
@@ -161,10 +161,10 @@ static inline bool btree_node_lock_type(struct btree_trans *trans,
 {
 	struct bch_fs *c = trans->c;
 	u64 start_time;
-	bool ret;
+	int ret;
 
 	if (six_trylock_type(&b->c.lock, type))
-		return true;
+		return 0;
 
 	start_time = local_clock();
 
@@ -174,13 +174,14 @@ static inline bool btree_node_lock_type(struct btree_trans *trans,
 	trans->locking_level	= level;
 	trans->locking_lock_type = type;
 	trans->locking		= b;
-	ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
+	ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p);
 	trans->locking = NULL;
 
 	if (ret)
-		bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
+		return ret;
 
-	return ret;
+	bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
+	return 0;
 }
 
 /*
@@ -203,33 +204,34 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans,
 	return false;
 }
 
-bool __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
-			    struct btree *, struct bpos, unsigned,
-			    enum six_lock_type,
-			    six_lock_should_sleep_fn, void *,
-			    unsigned long);
+int __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
+			   struct btree *, struct bpos, unsigned,
+			   enum six_lock_type,
+			   six_lock_should_sleep_fn, void *,
+			   unsigned long);
 
-static inline bool btree_node_lock(struct btree_trans *trans,
+static inline int btree_node_lock(struct btree_trans *trans,
 			struct btree_path *path,
 			struct btree *b, struct bpos pos, unsigned level,
 			enum six_lock_type type,
 			six_lock_should_sleep_fn should_sleep_fn, void *p,
 			unsigned long ip)
 {
+	int ret = 0;
+
 	EBUG_ON(level >= BTREE_MAX_DEPTH);
 	EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
 
 	if (likely(six_trylock_type(&b->c.lock, type)) ||
-		btree_node_lock_increment(trans, b, level, type) ||
-		__bch2_btree_node_lock(trans, path, b, pos, level, type,
-				       should_sleep_fn, p, ip)) {
+	    btree_node_lock_increment(trans, b, level, type) ||
+	    !(ret = __bch2_btree_node_lock(trans, path, b, pos, level, type,
+					   should_sleep_fn, p, ip))) {
 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
 		path->l[b->c.level].lock_taken_time = ktime_get_ns();
 #endif
-		return true;
-	} else {
-		return false;
 	}
+
+	return ret;
 }
 
 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 0650a35..bc1571f 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -405,11 +405,11 @@ struct btree_trans {
 	u8			nr_updates;
 	bool			used_mempool:1;
 	bool			in_traverse_all:1;
-	bool			restarted:1;
 	bool			paths_sorted:1;
 	bool			memory_allocation_failure:1;
 	bool			journal_transaction_names:1;
 	bool			journal_replay_not_finished:1;
+	enum bch_errcode	restarted:16;
 	u32			restart_count;
 	unsigned long		last_restarted_ip;
 
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index 9b5a8b1..89941fb 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -90,7 +90,6 @@ int bch2_trans_log_msg(struct btree_trans *, const char *);
  * This is main entry point for btree updates.
  *
  * Return values:
- * -EINTR: locking changed, this function should be called again.
  * -EROFS: filesystem read only
  * -EIO: journal or btree node IO error
  */
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 9f9ab85..cf02e81 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -996,7 +996,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
 	if (!bch2_btree_path_upgrade(trans, path, U8_MAX)) {
 		trace_trans_restart_iter_upgrade(trans->fn, _RET_IP_,
 						 path->btree_id, &path->pos);
-		ret = btree_trans_restart(trans);
+		ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
 		return ERR_PTR(ret);
 	}
 
@@ -1005,9 +1005,10 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
 	else if (!down_read_trylock(&c->gc_lock)) {
 		bch2_trans_unlock(trans);
 		down_read(&c->gc_lock);
-		if (!bch2_trans_relock(trans)) {
+		ret = bch2_trans_relock(trans);
+		if (ret) {
 			up_read(&c->gc_lock);
-			return ERR_PTR(-EINTR);
+			return ERR_PTR(ret);
 		}
 	}
 
@@ -1053,7 +1054,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
 					      journal_flags);
 		if (ret) {
 			trace_trans_restart_journal_preres_get(trans->fn, _RET_IP_);
-			btree_trans_restart(trans);
+			ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get);
 			goto err;
 		}
 
@@ -1090,10 +1091,9 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
 		goto err;
 	}
 
-	if (!bch2_trans_relock(trans)) {
-		ret = -EINTR;
+	ret = bch2_trans_relock(trans);
+	if (ret)
 		goto err;
-	}
 
 	return as;
 err:
@@ -2030,10 +2030,8 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
 	int ret = 0;
 
 	if (!btree_node_intent_locked(path, b->c.level) &&
-	    !bch2_btree_path_upgrade(trans, path, b->c.level + 1)) {
-		btree_trans_restart(trans);
-		return -EINTR;
-	}
+	    !bch2_btree_path_upgrade(trans, path, b->c.level + 1))
+		return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
 
 	closure_init_stack(&cl);
 
@@ -2046,8 +2044,9 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
 		if (ret) {
 			bch2_trans_unlock(trans);
 			closure_sync(&cl);
-			if (!bch2_trans_relock(trans))
-				return -EINTR;
+			ret = bch2_trans_relock(trans);
+			if (ret)
+				return ret;
 		}
 
 		new_hash = bch2_btree_node_mem_alloc(c, false);
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index c6fe24f..541826d 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -10,6 +10,7 @@
 #include "btree_locking.h"
 #include "buckets.h"
 #include "debug.h"
+#include "errcode.h"
 #include "error.h"
 #include "extent_update.h"
 #include "journal.h"
@@ -282,9 +283,10 @@ bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s,
 	if (ret)
 		return ret;
 
-	if (!bch2_trans_relock(trans)) {
+	ret = bch2_trans_relock(trans);
+	if (ret) {
 		trace_trans_restart_journal_preres_get(trans->fn, trace_ip);
-		return -EINTR;
+		return ret;
 	}
 
 	return 0;
@@ -376,12 +378,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
 	trace_trans_restart_key_cache_key_realloced(trans->fn, _RET_IP_,
 					     path->btree_id, &path->pos,
 					     old_u64s, new_u64s);
-	/*
-	 * Not using btree_trans_restart() because we can't unlock here, we have
-	 * write locks held:
-	 */
-	trans->restarted = true;
-	return -EINTR;
+	return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_key_cache_realloced);
 }
 
 /* Triggers: */
@@ -573,8 +570,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
 
 	if (race_fault()) {
 		trace_trans_restart_fault_inject(trans->fn, trace_ip);
-		trans->restarted = true;
-		return -EINTR;
+		return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
 	}
 
 	/*
@@ -812,6 +808,7 @@ static inline bool have_conflicting_read_lock(struct btree_trans *trans, struct
 static inline int trans_lock_write(struct btree_trans *trans)
 {
 	struct btree_insert_entry *i;
+	int ret;
 
 	trans_for_each_update(trans, i) {
 		if (same_leaf_as_prev(trans, i))
@@ -821,10 +818,11 @@ static inline int trans_lock_write(struct btree_trans *trans)
 			if (have_conflicting_read_lock(trans, i->path))
 				goto fail;
 
-			btree_node_lock_type(trans, i->path,
+			ret = btree_node_lock_type(trans, i->path,
 					     insert_l(i)->b,
 					     i->path->pos, i->level,
 					     SIX_LOCK_write, NULL, NULL);
+			BUG_ON(ret);
 		}
 
 		bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
@@ -840,7 +838,7 @@ static inline int trans_lock_write(struct btree_trans *trans)
 	}
 
 	trace_trans_restart_would_deadlock_write(trans->fn);
-	return btree_trans_restart(trans);
+	return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
 }
 
 static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
@@ -971,10 +969,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
 	switch (ret) {
 	case BTREE_INSERT_BTREE_NODE_FULL:
 		ret = bch2_btree_split_leaf(trans, i->path, trans->flags);
-		if (!ret)
-			return 0;
-
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			trace_trans_restart_btree_node_split(trans->fn, trace_ip,
 						i->btree_id, &i->path->pos);
 		break;
@@ -985,19 +980,16 @@ int bch2_trans_commit_error(struct btree_trans *trans,
 		if (ret)
 			break;
 
-		if (bch2_trans_relock(trans))
-			return 0;
-
-		trace_trans_restart_mark_replicas(trans->fn, trace_ip);
-		ret = -EINTR;
+		ret = bch2_trans_relock(trans);
+		if (ret)
+			trace_trans_restart_mark_replicas(trans->fn, trace_ip);
 		break;
 	case BTREE_INSERT_NEED_JOURNAL_RES:
 		bch2_trans_unlock(trans);
 
 		if ((trans->flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
 		    !(trans->flags & JOURNAL_WATERMARK_reserved)) {
-			trans->restarted = true;
-			ret = -EAGAIN;
+			ret = -BCH_ERR_journal_reclaim_would_deadlock;
 			break;
 		}
 
@@ -1005,11 +997,9 @@ int bch2_trans_commit_error(struct btree_trans *trans,
 		if (ret)
 			break;
 
-		if (bch2_trans_relock(trans))
-			return 0;
-
-		trace_trans_restart_journal_res_get(trans->fn, trace_ip);
-		ret = -EINTR;
+		ret = bch2_trans_relock(trans);
+		if (ret)
+			trace_trans_restart_journal_res_get(trans->fn, trace_ip);
 		break;
 	case BTREE_INSERT_NEED_JOURNAL_RECLAIM:
 		bch2_trans_unlock(trans);
@@ -1021,18 +1011,16 @@ int bch2_trans_commit_error(struct btree_trans *trans,
 		if (ret < 0)
 			break;
 
-		if (bch2_trans_relock(trans))
-			return 0;
-
-		trace_trans_restart_journal_reclaim(trans->fn, trace_ip);
-		ret = -EINTR;
+		ret = bch2_trans_relock(trans);
+		if (ret)
+			trace_trans_restart_journal_reclaim(trans->fn, trace_ip);
 		break;
 	default:
 		BUG_ON(ret >= 0);
 		break;
 	}
 
-	BUG_ON((ret == EINTR || ret == -EAGAIN) && !trans->restarted);
+	BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
 	BUG_ON(ret == -ENOSPC &&
 	       !(trans->flags & BTREE_INSERT_NOWAIT) &&
 	       (trans->flags & BTREE_INSERT_NOFAIL));
@@ -1052,13 +1040,11 @@ bch2_trans_commit_get_rw_cold(struct btree_trans *trans)
 
 	bch2_trans_unlock(trans);
 
-	ret = bch2_fs_read_write_early(c);
+	ret =   bch2_fs_read_write_early(c) ?:
+		bch2_trans_relock(trans);
 	if (ret)
 		return ret;
 
-	if (!bch2_trans_relock(trans))
-		return -EINTR;
-
 	percpu_ref_get(&c->writes);
 	return 0;
 }
@@ -1132,7 +1118,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
 		if (unlikely(!bch2_btree_path_upgrade(trans, i->path, i->level + 1))) {
 			trace_trans_restart_upgrade(trans->fn, _RET_IP_,
 						    i->btree_id, &i->path->pos);
-			ret = btree_trans_restart(trans);
+			ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
 			goto out;
 		}
 
@@ -1654,8 +1640,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
 
 			if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
 				trace_trans_restart_key_cache_raced(trans->fn, _RET_IP_);
-				btree_trans_restart(trans);
-				return -EINTR;
+				return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
 			}
 
 			iter->key_cache_path->should_be_locked = true;
@@ -1783,7 +1768,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
 			break;
 	}
 
-	if (ret == -EINTR) {
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
 		ret = 0;
 		goto retry;
 	}
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 6726bd6..c0d6a48 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -236,7 +236,7 @@ int bch2_data_update_index_update(struct bch_write_op *op)
 				bch2_ob_add_backpointer(c, ec_ob, &insert->k);
 		}
 err:
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			ret = 0;
 		if (ret)
 			break;
@@ -264,7 +264,7 @@ int bch2_data_update_index_update(struct bch_write_op *op)
 	bch2_trans_exit(&trans);
 	bch2_bkey_buf_exit(&_insert, c);
 	bch2_bkey_buf_exit(&_new, c);
-	BUG_ON(ret == -EINTR);
+	BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
 	return ret;
 }
 
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index 0cbb765..4d942d2 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -471,7 +471,7 @@ u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
 
 	ret = __bch2_dirent_lookup_trans(&trans, &iter, dir, hash_info,
 					  name, inum, 0);
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 	if (!ret)
 		bch2_trans_iter_exit(&trans, &iter);
@@ -556,7 +556,7 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
 	}
 	bch2_trans_iter_exit(&trans, &iter);
 err:
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	bch2_trans_exit(&trans);
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 947f2f2..f33acf1 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -572,18 +572,14 @@ static int ec_stripe_mem_alloc(struct btree_trans *trans,
 			       struct btree_iter *iter)
 {
 	size_t idx = iter->pos.offset;
-	int ret = 0;
 
 	if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_NOWAIT|__GFP_NOWARN))
-		return ret;
+		return 0;
 
 	bch2_trans_unlock(trans);
-	ret = -EINTR;
 
-	if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL))
-		return ret;
-
-	return -ENOMEM;
+	return   __ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL) ?:
+		bch2_trans_relock(trans);
 }
 
 static ssize_t stripe_idx_to_delete(struct bch_fs *c)
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index 69cc7cd..7972b01 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -7,7 +7,30 @@
 	x(0,			freelist_empty)				\
 	x(freelist_empty,	no_buckets_found)			\
 	x(0,			insufficient_devices)			\
-	x(0,			need_snapshot_cleanup)
+	x(0,			need_snapshot_cleanup)			\
+	x(0,			transaction_restart)			\
+	x(transaction_restart,	transaction_restart_fault_inject)	\
+	x(transaction_restart,	transaction_restart_relock)		\
+	x(transaction_restart,	transaction_restart_relock_path)	\
+	x(transaction_restart,	transaction_restart_relock_path_intent)	\
+	x(transaction_restart,	transaction_restart_relock_after_fill)	\
+	x(transaction_restart,	transaction_restart_too_many_iters)	\
+	x(transaction_restart,	transaction_restart_lock_node_reused)	\
+	x(transaction_restart,	transaction_restart_fill_relock)	\
+	x(transaction_restart,	transaction_restart_fill_mem_alloc_fail)\
+	x(transaction_restart,	transaction_restart_mem_realloced)	\
+	x(transaction_restart,	transaction_restart_in_traverse_all)	\
+	x(transaction_restart,	transaction_restart_would_deadlock)	\
+	x(transaction_restart,	transaction_restart_would_deadlock_write)\
+	x(transaction_restart,	transaction_restart_upgrade)		\
+	x(transaction_restart,	transaction_restart_key_cache_fill)	\
+	x(transaction_restart,	transaction_restart_key_cache_raced)	\
+	x(transaction_restart,	transaction_restart_key_cache_realloced)\
+	x(transaction_restart,	transaction_restart_journal_preres_get)	\
+	x(transaction_restart,	transaction_restart_nested)		\
+	x(0,			lock_fail_node_reused)			\
+	x(0,			lock_fail_root_changed)			\
+	x(0,			journal_reclaim_would_deadlock)
 
 enum bch_errcode {
 	BCH_ERR_START		= 2048,
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index c0dda29..9f1ecb8 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -408,7 +408,7 @@ static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
 	offset = iter.pos.offset;
 	bch2_trans_iter_exit(&trans, &iter);
 err:
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 	bch2_trans_exit(&trans);
 
@@ -1018,10 +1018,9 @@ static void bchfs_read(struct btree_trans *trans,
 		 * read_extent -> io_time_reset may cause a transaction restart
 		 * without returning an error, we need to check for that here:
 		 */
-		if (!bch2_trans_relock(trans)) {
-			ret = -EINTR;
+		ret = bch2_trans_relock(trans);
+		if (ret)
 			break;
-		}
 
 		bch2_btree_iter_set_pos(&iter,
 				POS(inum.inum, rbio->bio.bi_iter.bi_sector));
@@ -1074,7 +1073,7 @@ static void bchfs_read(struct btree_trans *trans,
 err:
 	bch2_trans_iter_exit(trans, &iter);
 
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	if (ret) {
@@ -2035,7 +2034,7 @@ static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
 	offset = iter.pos.offset;
 	bch2_trans_iter_exit(&trans, &iter);
 err:
-	if (err == -EINTR)
+	if (bch2_err_matches(err, BCH_ERR_transaction_restart))
 		goto retry;
 	bch2_trans_exit(&trans);
 
@@ -2427,7 +2426,7 @@ static inline int range_has_data(struct bch_fs *c, u32 subvol,
 	start = iter.pos;
 	bch2_trans_iter_exit(&trans, &iter);
 err:
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	bch2_trans_exit(&trans);
@@ -2817,7 +2816,8 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
 	bch2_trans_copy_iter(&dst, &src);
 	bch2_trans_copy_iter(&del, &src);
 
-	while (ret == 0 || ret == -EINTR) {
+	while (ret == 0 ||
+	       bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
 		struct disk_reservation disk_res =
 			bch2_disk_reservation_init(c, 0);
 		struct bkey_i delete;
@@ -3019,7 +3019,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
 bkey_err:
 		bch2_quota_reservation_put(c, inode, &quota_res);
 		bch2_disk_reservation_put(c, &disk_res);
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			ret = 0;
 	}
 
@@ -3301,7 +3301,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
 	}
 	bch2_trans_iter_exit(&trans, &iter);
 err:
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	bch2_trans_exit(&trans);
@@ -3416,7 +3416,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
 	}
 	bch2_trans_iter_exit(&trans, &iter);
 err:
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	bch2_trans_exit(&trans);
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 876552a..af494186 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -154,7 +154,7 @@ int __must_check bch2_write_inode(struct bch_fs *c,
 
 	bch2_trans_iter_exit(&trans, &iter);
 
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	bch2_trans_exit(&trans);
@@ -324,7 +324,7 @@ __bch2_create(struct mnt_idmap *idmap,
 		bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1,
 				KEY_TYPE_QUOTA_WARN);
 err_before_quota:
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			goto retry;
 		goto err_trans;
 	}
@@ -755,7 +755,7 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap,
 btree_err:
 	bch2_trans_iter_exit(&trans, &inode_iter);
 
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 	if (unlikely(ret))
 		goto err_trans;
@@ -987,7 +987,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
 	start = iter.pos.offset;
 	bch2_trans_iter_exit(&trans, &iter);
 err:
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	if (!ret && have_extent)
@@ -1337,7 +1337,7 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child
 	memcpy(name, d.v->d_name, name_len);
 	name[name_len] = '\0';
 err:
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	bch2_trans_iter_exit(&trans, &iter1);
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 021affc..29d731a 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -136,7 +136,7 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
 
 	ret = bch2_inode_unpack(k, inode);
 err:
-	if (ret && ret != -EINTR)
+	if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		bch_err(trans->c, "error fetching inode %llu: %s",
 			inode_nr, bch2_err_str(ret));
 	bch2_trans_iter_exit(trans, &iter);
@@ -164,7 +164,7 @@ static int __lookup_inode(struct btree_trans *trans, u64 inode_nr,
 	if (!ret)
 		*snapshot = iter.pos.snapshot;
 err:
-	if (ret && ret != -EINTR)
+	if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		bch_err(trans->c, "error fetching inode %llu:%u: %s",
 			inode_nr, *snapshot, bch2_err_str(ret));
 	bch2_trans_iter_exit(trans, &iter);
@@ -287,7 +287,7 @@ static int fsck_inode_rm(struct btree_trans *trans, u64 inum, u32 snapshot)
 				BTREE_INSERT_NOFAIL);
 err:
 	bch2_trans_iter_exit(trans, &iter);
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	return ret;
@@ -314,7 +314,7 @@ static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
 				  BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
 	bch2_trans_iter_exit(trans, &iter);
 err:
-	if (ret && ret != -EINTR)
+	if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		bch_err(c, "error from __remove_dirent(): %s", bch2_err_str(ret));
 	return ret;
 }
@@ -350,7 +350,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 subvol,
 		goto create_lostfound;
 	}
 
-	if (ret && ret != -EINTR)
+	if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		bch_err(c, "error looking up lost+found: %s", bch2_err_str(ret));
 	if (ret)
 		return ret;
@@ -373,7 +373,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 subvol,
 				lostfound, &lostfound_str,
 				0, 0, S_IFDIR|0700, 0, NULL, NULL,
 				(subvol_inum) { }, 0);
-	if (ret && ret != -EINTR)
+	if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		bch_err(c, "error creating lost+found: %s", bch2_err_str(ret));
 	return ret;
 }
@@ -843,10 +843,10 @@ static int hash_check_key(struct btree_trans *trans,
 
 	ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
 	if (ret) {
-		bch_err(c, "hash_redo_key err %i", ret);
+		bch_err(c, "hash_redo_key err %s", bch2_err_str(ret));
 		return ret;
 	}
-	ret = -EINTR;
+	ret = -BCH_ERR_transaction_restart_nested;
 fsck_err:
 	goto out;
 }
@@ -1144,7 +1144,7 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
 		ret = write_inode(trans, &i->inode, i->snapshot);
 		if (ret)
 			break;
-		ret2 = -EINTR;
+		ret2 = -BCH_ERR_transaction_restart_nested;
 	}
 fsck_err:
 	if (ret)
@@ -1191,7 +1191,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
 		 * it shouldn't be but we need to fix the new i_sectors check
 		 * code and delete the old bch2_count_inode_sectors() first
 		 */
-		return -EINTR;
+		return -BCH_ERR_transaction_restart_nested;
 	}
 #if 0
 	if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
@@ -1202,7 +1202,8 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
 		bch2_bkey_val_to_text(&PBUF(buf2), c, k);
 
 		if (fsck_err(c, "overlapping extents:\n%s\n%s", buf1, buf2)) {
-			ret = fix_overlapping_extent(trans, k, prev.k->k.p) ?: -EINTR;
+			ret = fix_overlapping_extent(trans, k, prev.k->k.p)
+				?: -BCH_ERR_transaction_restart_nested;
 			goto out;
 		}
 	}
@@ -1287,8 +1288,8 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
 fsck_err:
 	printbuf_exit(&buf);
 
-	if (ret && ret != -EINTR)
-		bch_err(c, "error %i from check_extent()", ret);
+	if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+		bch_err(c, "error from check_extent(): %s", bch2_err_str(ret));
 	return ret;
 }
 
@@ -1364,7 +1365,7 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
 			ret = write_inode(trans, &i->inode, i->snapshot);
 			if (ret)
 				break;
-			ret2 = -EINTR;
+			ret2 = -BCH_ERR_transaction_restart_nested;
 		}
 	}
 fsck_err:
@@ -1487,7 +1488,7 @@ static int check_dirent_target(struct btree_trans *trans,
 fsck_err:
 	printbuf_exit(&buf);
 
-	if (ret && ret != -EINTR)
+	if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		bch_err(c, "error from check_target(): %s", bch2_err_str(ret));
 	return ret;
 }
@@ -1530,7 +1531,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
 
 	if (!iter->path->should_be_locked) {
 		/* hack: see check_extent() */
-		return -EINTR;
+		return -BCH_ERR_transaction_restart_nested;
 	}
 
 	ret = __walk_inode(trans, dir, equiv);
@@ -1660,7 +1661,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
 fsck_err:
 	printbuf_exit(&buf);
 
-	if (ret && ret != -EINTR)
+	if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		bch_err(c, "error from check_dirent(): %s", bch2_err_str(ret));
 	return ret;
 }
@@ -1735,7 +1736,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
 
 	ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
 fsck_err:
-	if (ret && ret != -EINTR)
+	if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		bch_err(c, "error from check_xattr(): %s", bch2_err_str(ret));
 	return ret;
 }
@@ -2016,8 +2017,6 @@ static int check_directory_structure(struct bch_fs *c)
 	}
 	bch2_trans_iter_exit(&trans, &iter);
 
-	BUG_ON(ret == -EINTR);
-
 	darray_exit(&path);
 
 	bch2_trans_exit(&trans);
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 5de66d6..fc0f980 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -619,7 +619,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
 		      bch2_trans_commit(trans, NULL, NULL,
 					BTREE_INSERT_NOFAIL);
 err:
-		if (ret && ret != -EINTR)
+		if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			break;
 	}
 
@@ -690,7 +690,7 @@ int bch2_inode_rm(struct bch_fs *c, subvol_inum inum)
 				BTREE_INSERT_NOFAIL);
 err:
 	bch2_trans_iter_exit(&trans, &iter);
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	bch2_trans_exit(&trans);
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index f137a8e..dfa708c 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -390,7 +390,7 @@ int bch2_extent_update(struct btree_trans *trans,
 }
 
 /*
- * Returns -EINTR if we had to drop locks:
+ * Returns -BCH_ERR_transacton_restart if we had to drop locks:
  */
 int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
 		   subvol_inum inum, u64 end,
@@ -403,7 +403,8 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
 	int ret = 0, ret2 = 0;
 	u32 snapshot;
 
-	while (!ret || ret == -EINTR) {
+	while (!ret ||
+	       bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
 		struct disk_reservation disk_res =
 			bch2_disk_reservation_init(c, 0);
 		struct bkey_i delete;
@@ -462,7 +463,10 @@ int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
 	bch2_trans_iter_exit(&trans, &iter);
 	bch2_trans_exit(&trans);
 
-	return ret == -EINTR ? 0 : ret;
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+		ret = 0;
+
+	return ret;
 }
 
 static int bch2_write_index_default(struct bch_write_op *op)
@@ -493,7 +497,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
 
 		ret = bch2_subvolume_get_snapshot(&trans, inum.subvol,
 						  &sk.k->k.p.snapshot);
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			continue;
 		if (ret)
 			break;
@@ -508,7 +512,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
 					 op->flags & BCH_WRITE_CHECK_ENOSPC);
 		bch2_trans_iter_exit(&trans, &iter);
 
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			continue;
 		if (ret)
 			break;
@@ -663,7 +667,7 @@ static void __bch2_write_index(struct bch_write_op *op)
 			? bch2_write_index_default(op)
 			: bch2_data_update_index_update(op);
 
-		BUG_ON(ret == -EINTR);
+		BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
 		BUG_ON(keylist_sectors(keys) && !ret);
 
 		op->written += sectors_start - keylist_sectors(keys);
@@ -2429,10 +2433,9 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
 		 * read_extent -> io_time_reset may cause a transaction restart
 		 * without returning an error, we need to check for that here:
 		 */
-		if (!bch2_trans_relock(&trans)) {
-			ret = -EINTR;
+		ret = bch2_trans_relock(&trans);
+		if (ret)
 			break;
-		}
 
 		bch2_btree_iter_set_pos(&iter,
 				POS(inum.inum, bvec_iter.bi_sector));
@@ -2486,7 +2489,9 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
 err:
 	bch2_trans_iter_exit(&trans, &iter);
 
-	if (ret == -EINTR || ret == READ_RETRY || ret == READ_RETRY_AVOID)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+	    ret == READ_RETRY ||
+	    ret == READ_RETRY_AVOID)
 		goto retry;
 
 	bch2_trans_exit(&trans);
diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c
index d9b4042..5c555b37 100644
--- a/fs/bcachefs/journal_seq_blacklist.c
+++ b/fs/bcachefs/journal_seq_blacklist.c
@@ -272,7 +272,7 @@ void bch2_blacklist_entries_gc(struct work_struct *work)
 		       !test_bit(BCH_FS_STOPPING, &c->flags))
 			b = bch2_btree_iter_next_node(&iter);
 
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			goto retry;
 
 		bch2_trans_iter_exit(&trans, &iter);
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
index baeca0e..8b258d9 100644
--- a/fs/bcachefs/migrate.c
+++ b/fs/bcachefs/migrate.c
@@ -146,7 +146,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
 			}
 
 			ret = bch2_btree_node_update_key(&trans, &iter, b, k.k, false);
-			if (ret == -EINTR) {
+			if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
 				ret = 0;
 				continue;
 			}
@@ -159,7 +159,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
 next:
 			bch2_btree_iter_next_node(&iter);
 		}
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			goto retry;
 
 		bch2_trans_iter_exit(&trans, &iter);
@@ -174,7 +174,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
 	bch2_trans_exit(&trans);
 	bch2_bkey_buf_exit(&k, c);
 
-	BUG_ON(ret == -EINTR);
+	BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
 
 	return ret;
 }
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 7fba0f7..ea9ce6d 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -387,7 +387,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
 			break;
 
 		ret = bkey_err(k);
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			continue;
 		if (ret)
 			break;
@@ -409,7 +409,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
 			ret = lookup_inode(&trans,
 					SPOS(0, k.k->p.inode, k.k->p.snapshot),
 					&inode);
-			if (ret == -EINTR)
+			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 				continue;
 
 			if (!ret)
@@ -432,7 +432,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
 		ret2 = bch2_move_extent(&trans, ctxt, io_opts,
 					btree_id, k, data_opts);
 		if (ret2) {
-			if (ret2 == -EINTR)
+			if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
 				continue;
 
 			if (ret2 == -ENOMEM) {
@@ -546,14 +546,14 @@ static int bch2_move_btree(struct bch_fs *c,
 				goto next;
 
 			ret = bch2_btree_node_rewrite(&trans, &iter, b, 0) ?: ret;
-			if (ret == -EINTR)
+			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 				continue;
 			if (ret)
 				break;
 next:
 			bch2_btree_iter_next_node(&iter);
 		}
-		if (ret == -EINTR)
+		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			goto retry;
 
 		bch2_trans_iter_exit(&trans, &iter);
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index 2038e35..d5c14bb 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -299,7 +299,8 @@ s64 bch2_remap_range(struct bch_fs *c,
 	bch2_trans_iter_init(&trans, &dst_iter, BTREE_ID_extents, dst_start,
 			     BTREE_ITER_INTENT);
 
-	while ((ret == 0 || ret == -EINTR) &&
+	while ((ret == 0 ||
+		bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
 	       bkey_cmp(dst_iter.pos, dst_end) < 0) {
 		struct disk_reservation disk_res = { 0 };
 
@@ -409,7 +410,7 @@ s64 bch2_remap_range(struct bch_fs *c,
 		}
 
 		bch2_trans_iter_exit(&trans, &inode_iter);
-	} while (ret2 == -EINTR);
+	} while (bch2_err_matches(ret2, BCH_ERR_transaction_restart));
 
 	bch2_trans_exit(&trans);
 	bch2_bkey_buf_exit(&new_src, c);
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index 0469b90..b5b0f5e 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -315,7 +315,7 @@ static int check_subvol(struct btree_trans *trans,
 
 	if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
 		ret = bch2_subvolume_delete(trans, iter->pos.offset);
-		if (ret && ret != -EINTR)
+		if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 			bch_err(trans->c, "error deleting subvolume %llu: %s",
 				iter->pos.offset, bch2_err_str(ret));
 		if (ret)
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
index bf0a33c..c6cac5c 100644
--- a/fs/bcachefs/tests.c
+++ b/fs/bcachefs/tests.c
@@ -640,7 +640,7 @@ static int rand_mixed_trans(struct btree_trans *trans,
 
 	k = bch2_btree_iter_peek(iter);
 	ret = bkey_err(k);
-	if (ret && ret != -EINTR)
+	if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		bch_err(trans->c, "lookup error in rand_mixed: %s", bch2_err_str(ret));
 	if (ret)
 		return ret;
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index 5df61b6..37793b3 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -344,7 +344,7 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
 	offset = iter.pos.offset;
 	bch2_trans_iter_exit(&trans, &iter);
 err:
-	if (ret == -EINTR)
+	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
 		goto retry;
 
 	bch2_trans_exit(&trans);