blob: 7773f3d855a9598f97ecc9b94842590aeded47b7 [file] [log] [blame]
David Howells3d3c9502020-05-13 17:41:20 +01001/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* Internal definitions for network filesystem support
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
David Howells47757ea2023-11-20 15:29:09 +00008#include <linux/slab.h>
9#include <linux/seq_file.h>
David Howells3a4a38e62022-02-17 13:30:38 +000010#include <linux/netfs.h>
David Howellsbc899ee2021-06-29 22:37:05 +010011#include <linux/fscache.h>
David Howells915cd302023-11-20 15:55:18 +000012#include <linux/fscache-cache.h>
David Howells3a4a38e62022-02-17 13:30:38 +000013#include <trace/events/netfs.h>
David Howells915cd302023-11-20 15:55:18 +000014#include <trace/events/fscache.h>
David Howells3a4a38e62022-02-17 13:30:38 +000015
David Howells3d3c9502020-05-13 17:41:20 +010016#ifdef pr_fmt
17#undef pr_fmt
18#endif
19
20#define pr_fmt(fmt) "netfs: " fmt
21
22/*
David Howells93345c32022-03-01 15:55:15 +000023 * buffered_read.c
24 */
25void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
David Howellsc38f4e92021-06-17 13:09:21 +010026int netfs_prefetch_for_write(struct file *file, struct folio *folio,
27 size_t offset, size_t len);
David Howells93345c32022-03-01 15:55:15 +000028
29/*
David Howells3be01752022-03-07 21:57:24 +000030 * io.c
31 */
David Howells3be01752022-03-07 21:57:24 +000032int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
33
34/*
David Howellsb900f4b2022-03-01 15:25:00 +000035 * main.c
36 */
David Howellsa9d47a52024-07-18 21:07:32 +010037extern unsigned int netfs_debug;
David Howells87b57a02022-03-04 10:34:27 +000038extern struct list_head netfs_io_requests;
39extern spinlock_t netfs_proc_lock;
David Howellsd9f85a02024-03-15 14:37:18 +000040extern mempool_t netfs_request_pool;
41extern mempool_t netfs_subrequest_pool;
David Howells87b57a02022-03-04 10:34:27 +000042
43#ifdef CONFIG_PROC_FS
44static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
45{
46 spin_lock(&netfs_proc_lock);
47 list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests);
48 spin_unlock(&netfs_proc_lock);
49}
50static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
51{
52 if (!list_empty(&rreq->proc_link)) {
53 spin_lock(&netfs_proc_lock);
54 list_del_rcu(&rreq->proc_link);
55 spin_unlock(&netfs_proc_lock);
56 }
57}
58#else
59static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
60static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
61#endif
David Howellsb900f4b2022-03-01 15:25:00 +000062
63/*
David Howells7d828a02023-09-22 13:25:22 +010064 * misc.c
65 */
David Howells7d828a02023-09-22 13:25:22 +010066
67/*
David Howells3a4a38e62022-02-17 13:30:38 +000068 * objects.c
69 */
David Howells663dfb62021-08-26 09:24:42 -040070struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
71 struct file *file,
David Howells663dfb62021-08-26 09:24:42 -040072 loff_t start, size_t len,
73 enum netfs_io_origin origin);
David Howellsde740232022-02-17 21:13:05 +000074void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
David Howells3a4a38e62022-02-17 13:30:38 +000075void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
David Howellsde740232022-02-17 21:13:05 +000076void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
77 enum netfs_rreq_ref_trace what);
David Howells3a4a38e62022-02-17 13:30:38 +000078struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
David Howells3a4a38e62022-02-17 13:30:38 +000079
David Howellsde740232022-02-17 21:13:05 +000080static inline void netfs_see_request(struct netfs_io_request *rreq,
81 enum netfs_rreq_ref_trace what)
82{
83 trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
84}
85
David Howells3a4a38e62022-02-17 13:30:38 +000086/*
David Howells289af542020-11-03 11:32:41 +000087 * stats.c
88 */
89#ifdef CONFIG_NETFS_STATS
David Howells016dc852022-01-14 17:39:55 +000090extern atomic_t netfs_n_rh_dio_read;
David Howells289af542020-11-03 11:32:41 +000091extern atomic_t netfs_n_rh_readahead;
David Howells4824e592024-03-26 08:48:44 +000092extern atomic_t netfs_n_rh_read_folio;
David Howells289af542020-11-03 11:32:41 +000093extern atomic_t netfs_n_rh_rreq;
94extern atomic_t netfs_n_rh_sreq;
95extern atomic_t netfs_n_rh_download;
96extern atomic_t netfs_n_rh_download_done;
97extern atomic_t netfs_n_rh_download_failed;
98extern atomic_t netfs_n_rh_download_instead;
99extern atomic_t netfs_n_rh_read;
100extern atomic_t netfs_n_rh_read_done;
101extern atomic_t netfs_n_rh_read_failed;
102extern atomic_t netfs_n_rh_zero;
103extern atomic_t netfs_n_rh_short_read;
104extern atomic_t netfs_n_rh_write;
David Howellse1b12402020-09-22 11:06:07 +0100105extern atomic_t netfs_n_rh_write_begin;
David Howells289af542020-11-03 11:32:41 +0000106extern atomic_t netfs_n_rh_write_done;
107extern atomic_t netfs_n_rh_write_failed;
David Howellse1b12402020-09-22 11:06:07 +0100108extern atomic_t netfs_n_rh_write_zskip;
David Howells4824e592024-03-26 08:48:44 +0000109extern atomic_t netfs_n_wh_buffered_write;
110extern atomic_t netfs_n_wh_writethrough;
111extern atomic_t netfs_n_wh_dio_write;
112extern atomic_t netfs_n_wh_writepages;
David Howells92a714d2024-01-04 15:52:11 +0000113extern atomic_t netfs_n_wh_wstream_conflict;
David Howells16af1342022-02-09 19:52:13 +0000114extern atomic_t netfs_n_wh_upload;
115extern atomic_t netfs_n_wh_upload_done;
116extern atomic_t netfs_n_wh_upload_failed;
117extern atomic_t netfs_n_wh_write;
118extern atomic_t netfs_n_wh_write_done;
119extern atomic_t netfs_n_wh_write_failed;
David Howells289af542020-11-03 11:32:41 +0000120
David Howells7eb5b3e2023-11-21 15:43:52 +0000121int netfs_stats_show(struct seq_file *m, void *v);
David Howells289af542020-11-03 11:32:41 +0000122
123static inline void netfs_stat(atomic_t *stat)
124{
125 atomic_inc(stat);
126}
127
128static inline void netfs_stat_d(atomic_t *stat)
129{
130 atomic_dec(stat);
131}
132
133#else
David Howells3d3c9502020-05-13 17:41:20 +0100134#define netfs_stat(x) do {} while(0)
135#define netfs_stat_d(x) do {} while(0)
David Howells289af542020-11-03 11:32:41 +0000136#endif
David Howells3d3c9502020-05-13 17:41:20 +0100137
David Howellsbc899ee2021-06-29 22:37:05 +0100138/*
David Howells288ace22024-03-18 16:52:05 +0000139 * write_collect.c
140 */
141int netfs_folio_written_back(struct folio *folio);
142void netfs_write_collection_worker(struct work_struct *work);
143void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async);
144
145/*
146 * write_issue.c
147 */
148struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
149 struct file *file,
150 loff_t start,
151 enum netfs_io_origin origin);
152void netfs_reissue_write(struct netfs_io_stream *stream,
153 struct netfs_io_subrequest *subreq);
154int netfs_advance_write(struct netfs_io_request *wreq,
155 struct netfs_io_stream *stream,
156 loff_t start, size_t len, bool to_eof);
David Howells2df86542024-03-08 12:36:05 +0000157struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
158int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
159 struct folio *folio, size_t copied, bool to_page_end,
160 struct folio **writethrough_cache);
161int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
162 struct folio *writethrough_cache);
David Howells288ace22024-03-18 16:52:05 +0000163int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
164
165/*
David Howellsbc899ee2021-06-29 22:37:05 +0100166 * Miscellaneous functions.
167 */
David Howells874c8ca12022-06-09 21:46:04 +0100168static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
David Howellsbc899ee2021-06-29 22:37:05 +0100169{
170#if IS_ENABLED(CONFIG_FSCACHE)
171 struct fscache_cookie *cookie = ctx->cache;
172
173 return fscache_cookie_valid(cookie) && cookie->cache_priv &&
174 fscache_cookie_enabled(cookie);
175#else
176 return false;
177#endif
178}
179
David Howells915cd302023-11-20 15:55:18 +0000180/*
David Howells9ebff832023-09-29 17:28:25 +0100181 * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
182 */
183static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
184{
David Howells2ff1e972024-03-19 10:00:09 +0000185 if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
David Howells9ebff832023-09-29 17:28:25 +0100186 refcount_inc(&netfs_group->ref);
187 return netfs_group;
188}
189
190/*
191 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
192 */
193static inline void netfs_put_group(struct netfs_group *netfs_group)
194{
David Howells2ff1e972024-03-19 10:00:09 +0000195 if (netfs_group &&
196 netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
197 refcount_dec_and_test(&netfs_group->ref))
David Howells9ebff832023-09-29 17:28:25 +0100198 netfs_group->free(netfs_group);
199}
200
201/*
202 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
203 */
204static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
205{
David Howells2ff1e972024-03-19 10:00:09 +0000206 if (netfs_group &&
207 netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
208 refcount_sub_and_test(nr, &netfs_group->ref))
David Howells9ebff832023-09-29 17:28:25 +0100209 netfs_group->free(netfs_group);
210}
211
212/*
David Howells915cd302023-11-20 15:55:18 +0000213 * fscache-cache.c
214 */
215#ifdef CONFIG_PROC_FS
216extern const struct seq_operations fscache_caches_seq_ops;
217#endif
218bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
219void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
220struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
221void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
222
223static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
224{
225 return smp_load_acquire(&cache->state);
226}
227
228static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
229{
230 return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
231}
232
233static inline void fscache_set_cache_state(struct fscache_cache *cache,
234 enum fscache_cache_state new_state)
235{
236 smp_store_release(&cache->state, new_state);
237
238}
239
240static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
241 enum fscache_cache_state old_state,
242 enum fscache_cache_state new_state)
243{
244 return try_cmpxchg_release(&cache->state, &old_state, new_state);
245}
246
247/*
248 * fscache-cookie.c
249 */
250extern struct kmem_cache *fscache_cookie_jar;
251#ifdef CONFIG_PROC_FS
252extern const struct seq_operations fscache_cookies_seq_ops;
253#endif
254extern struct timer_list fscache_cookie_lru_timer;
255
256extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
257extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
258 enum fscache_access_trace why);
259
260static inline void fscache_see_cookie(struct fscache_cookie *cookie,
261 enum fscache_cookie_trace where)
262{
263 trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
264 where);
265}
266
267/*
268 * fscache-main.c
269 */
270extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
David Howells7eb5b3e2023-11-21 15:43:52 +0000271#ifdef CONFIG_FSCACHE
272int __init fscache_init(void);
273void __exit fscache_exit(void);
274#else
275static inline int fscache_init(void) { return 0; }
276static inline void fscache_exit(void) {}
277#endif
David Howells915cd302023-11-20 15:55:18 +0000278
279/*
280 * fscache-proc.c
281 */
282#ifdef CONFIG_PROC_FS
283extern int __init fscache_proc_init(void);
284extern void fscache_proc_cleanup(void);
285#else
286#define fscache_proc_init() (0)
287#define fscache_proc_cleanup() do {} while (0)
288#endif
289
290/*
291 * fscache-stats.c
292 */
293#ifdef CONFIG_FSCACHE_STATS
294extern atomic_t fscache_n_volumes;
295extern atomic_t fscache_n_volumes_collision;
296extern atomic_t fscache_n_volumes_nomem;
297extern atomic_t fscache_n_cookies;
298extern atomic_t fscache_n_cookies_lru;
299extern atomic_t fscache_n_cookies_lru_expired;
300extern atomic_t fscache_n_cookies_lru_removed;
301extern atomic_t fscache_n_cookies_lru_dropped;
302
303extern atomic_t fscache_n_acquires;
304extern atomic_t fscache_n_acquires_ok;
305extern atomic_t fscache_n_acquires_oom;
306
307extern atomic_t fscache_n_invalidates;
308
309extern atomic_t fscache_n_relinquishes;
310extern atomic_t fscache_n_relinquishes_retire;
311extern atomic_t fscache_n_relinquishes_dropped;
312
313extern atomic_t fscache_n_resizes;
314extern atomic_t fscache_n_resizes_null;
315
316static inline void fscache_stat(atomic_t *stat)
317{
318 atomic_inc(stat);
319}
320
321static inline void fscache_stat_d(atomic_t *stat)
322{
323 atomic_dec(stat);
324}
325
326#define __fscache_stat(stat) (stat)
327
David Howells7eb5b3e2023-11-21 15:43:52 +0000328int fscache_stats_show(struct seq_file *m);
David Howells915cd302023-11-20 15:55:18 +0000329#else
330
331#define __fscache_stat(stat) (NULL)
332#define fscache_stat(stat) do {} while (0)
333#define fscache_stat_d(stat) do {} while (0)
David Howells7eb5b3e2023-11-21 15:43:52 +0000334
335static inline int fscache_stats_show(struct seq_file *m) { return 0; }
David Howells915cd302023-11-20 15:55:18 +0000336#endif
337
338/*
339 * fscache-volume.c
340 */
341#ifdef CONFIG_PROC_FS
342extern const struct seq_operations fscache_volumes_seq_ops;
343#endif
344
345struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
346 enum fscache_volume_trace where);
David Howells915cd302023-11-20 15:55:18 +0000347bool fscache_begin_volume_access(struct fscache_volume *volume,
348 struct fscache_cookie *cookie,
349 enum fscache_access_trace why);
350void fscache_create_volume(struct fscache_volume *volume, bool wait);
351
David Howells3d3c9502020-05-13 17:41:20 +0100352/*****************************************************************************/
353/*
354 * debug tracing
355 */
356#define dbgprintk(FMT, ...) \
David Howellsa9d47a52024-07-18 21:07:32 +0100357 printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
David Howells3d3c9502020-05-13 17:41:20 +0100358
359#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
360#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
361#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
362
David Howellsa9d47a52024-07-18 21:07:32 +0100363#ifdef __KDEBUG
364#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
365#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
366#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
367
368#elif defined(CONFIG_NETFS_DEBUG)
369#define _enter(FMT, ...) \
370do { \
371 if (netfs_debug) \
372 kenter(FMT, ##__VA_ARGS__); \
373} while (0)
374
375#define _leave(FMT, ...) \
376do { \
377 if (netfs_debug) \
378 kleave(FMT, ##__VA_ARGS__); \
379} while (0)
380
381#define _debug(FMT, ...) \
382do { \
383 if (netfs_debug) \
384 kdebug(FMT, ##__VA_ARGS__); \
385} while (0)
386
387#else
388#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
389#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
390#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
391#endif
392
David Howells915cd302023-11-20 15:55:18 +0000393/*
394 * assertions
395 */
396#if 1 /* defined(__KDEBUGALL) */
397
398#define ASSERT(X) \
399do { \
400 if (unlikely(!(X))) { \
401 pr_err("\n"); \
402 pr_err("Assertion failed\n"); \
403 BUG(); \
404 } \
405} while (0)
406
407#define ASSERTCMP(X, OP, Y) \
408do { \
409 if (unlikely(!((X) OP (Y)))) { \
410 pr_err("\n"); \
411 pr_err("Assertion failed\n"); \
412 pr_err("%lx " #OP " %lx is false\n", \
413 (unsigned long)(X), (unsigned long)(Y)); \
414 BUG(); \
415 } \
416} while (0)
417
418#define ASSERTIF(C, X) \
419do { \
420 if (unlikely((C) && !(X))) { \
421 pr_err("\n"); \
422 pr_err("Assertion failed\n"); \
423 BUG(); \
424 } \
425} while (0)
426
427#define ASSERTIFCMP(C, X, OP, Y) \
428do { \
429 if (unlikely((C) && !((X) OP (Y)))) { \
430 pr_err("\n"); \
431 pr_err("Assertion failed\n"); \
432 pr_err("%lx " #OP " %lx is false\n", \
433 (unsigned long)(X), (unsigned long)(Y)); \
434 BUG(); \
435 } \
436} while (0)
437
438#else
439
440#define ASSERT(X) do {} while (0)
441#define ASSERTCMP(X, OP, Y) do {} while (0)
442#define ASSERTIF(C, X) do {} while (0)
443#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
444
445#endif /* assert or not */