blob: e3716516ca3876a60e9ab0fc6d1f7a2c159b0e1d [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason0f827312007-10-15 16:18:56 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason0f827312007-10-15 16:18:56 -04004 */
5
Al Viro5f60d5f2024-10-01 15:35:57 -04006#include <linux/unaligned.h>
Josef Bacik9b569ea2022-10-19 10:50:49 -04007#include "messages.h"
David Sterba2b712e32024-01-25 17:44:47 +01008#include "extent_io.h"
9#include "fs.h"
Josef Bacikad1ac502022-10-19 10:50:59 -040010#include "accessors.h"
Li Zefan18077bb2012-07-09 20:22:35 -060011
David Sterba5e394682020-04-30 23:38:11 +020012static bool check_setget_bounds(const struct extent_buffer *eb,
13 const void *ptr, unsigned off, int size)
14{
15 const unsigned long member_offset = (unsigned long)ptr + off;
16
David Sterba234fdd22020-07-27 20:59:20 +020017 if (unlikely(member_offset + size > eb->len)) {
David Sterba5e394682020-04-30 23:38:11 +020018 btrfs_warn(eb->fs_info,
David Sterba234fdd22020-07-27 20:59:20 +020019 "bad eb member %s: ptr 0x%lx start %llu member offset %lu size %d",
20 (member_offset > eb->len ? "start" : "end"),
David Sterba5e394682020-04-30 23:38:11 +020021 (unsigned long)ptr, eb->start, member_offset, size);
22 return false;
23 }
24
25 return true;
26}
27
Josef Bacikad1ac502022-10-19 10:50:59 -040028void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb)
29{
30 token->eb = eb;
Qu Wenruo082d5bb2023-12-07 09:39:27 +103031 token->kaddr = folio_address(eb->folios[0]);
Josef Bacikad1ac502022-10-19 10:50:59 -040032 token->offset = 0;
33}
34
Li Zefan18077bb2012-07-09 20:22:35 -060035/*
David Sterba583e4a22020-05-06 20:54:13 +020036 * Macro templates that define helpers to read/write extent buffer data of a
37 * given size, that are also used via ctree.h for access to item members by
38 * specialized helpers.
Chris Masond352ac62008-09-29 15:18:18 -040039 *
David Sterba583e4a22020-05-06 20:54:13 +020040 * Generic helpers:
41 * - btrfs_set_8 (for 8/16/32/64)
42 * - btrfs_get_8 (for 8/16/32/64)
Chris Masond352ac62008-09-29 15:18:18 -040043 *
David Sterba583e4a22020-05-06 20:54:13 +020044 * Generic helpers with a token (cached address of the most recently accessed
45 * page):
46 * - btrfs_set_token_8 (for 8/16/32/64)
47 * - btrfs_get_token_8 (for 8/16/32/64)
Chris Masond352ac62008-09-29 15:18:18 -040048 *
David Sterba583e4a22020-05-06 20:54:13 +020049 * The set/get functions handle data spanning two pages transparently, in case
50 * metadata block size is larger than page. Every pointer to metadata items is
51 * an offset into the extent buffer page array, cast to a specific type. This
52 * gives us all the type checking.
David Sterbacb495112019-08-09 17:12:38 +020053 *
Qu Wenruo082d5bb2023-12-07 09:39:27 +103054 * The extent buffer pages stored in the array folios may not form a contiguous
David Sterba583e4a22020-05-06 20:54:13 +020055 * phyusical range, but the API functions assume the linear offset to the range
56 * from 0 to metadata node size.
Chris Masond352ac62008-09-29 15:18:18 -040057 */
58
Li Zefan18077bb2012-07-09 20:22:35 -060059#define DEFINE_BTRFS_SETGET_BITS(bits) \
David Sterbacc4c13d2020-04-29 02:15:56 +020060u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
61 const void *ptr, unsigned long off) \
Chris Mason0f827312007-10-15 16:18:56 -040062{ \
David Sterba8f9da812020-04-29 17:45:33 +020063 const unsigned long member_offset = (unsigned long)ptr + off; \
Qu Wenruo8d993612023-12-12 12:58:36 +103064 const unsigned long idx = get_eb_folio_index(token->eb, member_offset); \
65 const unsigned long oil = get_eb_offset_in_folio(token->eb, \
66 member_offset);\
Qu Wenruo84cda1a2024-01-05 16:05:55 +103067 const int unit_size = token->eb->folio_size; \
68 const int unit_shift = token->eb->folio_shift; \
David Sterba8f9da812020-04-29 17:45:33 +020069 const int size = sizeof(u##bits); \
David Sterbaba8a9a02020-04-30 17:57:55 +020070 u8 lebytes[sizeof(u##bits)]; \
Qu Wenruo8d993612023-12-12 12:58:36 +103071 const int part = unit_size - oil; \
Li Zefan18077bb2012-07-09 20:22:35 -060072 \
David Sterba48bc3952019-08-09 17:30:23 +020073 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +020074 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +020075 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterba8f9da812020-04-29 17:45:33 +020076 if (token->offset <= member_offset && \
Qu Wenruo8d993612023-12-12 12:58:36 +103077 member_offset + size <= token->offset + unit_size) { \
78 return get_unaligned_le##bits(token->kaddr + oil); \
Li Zefan18077bb2012-07-09 20:22:35 -060079 } \
Qu Wenruo082d5bb2023-12-07 09:39:27 +103080 token->kaddr = folio_address(token->eb->folios[idx]); \
Qu Wenruo8d993612023-12-12 12:58:36 +103081 token->offset = idx << unit_shift; \
82 if (INLINE_EXTENT_BUFFER_PAGES == 1 || oil + size <= unit_size) \
83 return get_unaligned_le##bits(token->kaddr + oil); \
David Sterbaba8a9a02020-04-30 17:57:55 +020084 \
Qu Wenruo8d993612023-12-12 12:58:36 +103085 memcpy(lebytes, token->kaddr + oil, part); \
Qu Wenruo082d5bb2023-12-07 09:39:27 +103086 token->kaddr = folio_address(token->eb->folios[idx + 1]); \
Qu Wenruo8d993612023-12-12 12:58:36 +103087 token->offset = (idx + 1) << unit_shift; \
David Sterbaba8a9a02020-04-30 17:57:55 +020088 memcpy(lebytes + part, token->kaddr, size - part); \
89 return get_unaligned_le##bits(lebytes); \
Chris Mason0f827312007-10-15 16:18:56 -040090} \
David Sterbacb495112019-08-09 17:12:38 +020091u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
92 const void *ptr, unsigned long off) \
93{ \
David Sterba1441ed92020-04-29 16:04:44 +020094 const unsigned long member_offset = (unsigned long)ptr + off; \
Qu Wenruo8d993612023-12-12 12:58:36 +103095 const unsigned long idx = get_eb_folio_index(eb, member_offset);\
96 const unsigned long oil = get_eb_offset_in_folio(eb, \
97 member_offset);\
Qu Wenruo84cda1a2024-01-05 16:05:55 +103098 const int unit_size = eb->folio_size; \
Qu Wenruo082d5bb2023-12-07 09:39:27 +103099 char *kaddr = folio_address(eb->folios[idx]); \
David Sterba1441ed92020-04-29 16:04:44 +0200100 const int size = sizeof(u##bits); \
Qu Wenruo8d993612023-12-12 12:58:36 +1030101 const int part = unit_size - oil; \
David Sterba84da0712020-04-30 17:57:55 +0200102 u8 lebytes[sizeof(u##bits)]; \
David Sterbacb495112019-08-09 17:12:38 +0200103 \
David Sterba5e394682020-04-30 23:38:11 +0200104 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
Qu Wenruo8d993612023-12-12 12:58:36 +1030105 if (INLINE_EXTENT_BUFFER_PAGES == 1 || oil + size <= unit_size) \
106 return get_unaligned_le##bits(kaddr + oil); \
David Sterba84da0712020-04-30 17:57:55 +0200107 \
Qu Wenruo8d993612023-12-12 12:58:36 +1030108 memcpy(lebytes, kaddr + oil, part); \
Qu Wenruo082d5bb2023-12-07 09:39:27 +1030109 kaddr = folio_address(eb->folios[idx + 1]); \
David Sterba84da0712020-04-30 17:57:55 +0200110 memcpy(lebytes + part, kaddr, size - part); \
111 return get_unaligned_le##bits(lebytes); \
David Sterbacb495112019-08-09 17:12:38 +0200112} \
David Sterbacc4c13d2020-04-29 02:15:56 +0200113void btrfs_set_token_##bits(struct btrfs_map_token *token, \
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600114 const void *ptr, unsigned long off, \
David Sterbacc4c13d2020-04-29 02:15:56 +0200115 u##bits val) \
Chris Mason0f827312007-10-15 16:18:56 -0400116{ \
David Sterbace7afe82020-04-29 18:23:37 +0200117 const unsigned long member_offset = (unsigned long)ptr + off; \
Qu Wenruo8d993612023-12-12 12:58:36 +1030118 const unsigned long idx = get_eb_folio_index(token->eb, member_offset); \
119 const unsigned long oil = get_eb_offset_in_folio(token->eb, \
120 member_offset);\
Qu Wenruo84cda1a2024-01-05 16:05:55 +1030121 const int unit_size = token->eb->folio_size; \
122 const int unit_shift = token->eb->folio_shift; \
David Sterbace7afe82020-04-29 18:23:37 +0200123 const int size = sizeof(u##bits); \
David Sterbaf472d3c2020-04-30 17:57:55 +0200124 u8 lebytes[sizeof(u##bits)]; \
Qu Wenruo8d993612023-12-12 12:58:36 +1030125 const int part = unit_size - oil; \
Li Zefan18077bb2012-07-09 20:22:35 -0600126 \
David Sterba48bc3952019-08-09 17:30:23 +0200127 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +0200128 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +0200129 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterbace7afe82020-04-29 18:23:37 +0200130 if (token->offset <= member_offset && \
Qu Wenruo8d993612023-12-12 12:58:36 +1030131 member_offset + size <= token->offset + unit_size) { \
132 put_unaligned_le##bits(val, token->kaddr + oil); \
Li Zefan18077bb2012-07-09 20:22:35 -0600133 return; \
134 } \
Qu Wenruo082d5bb2023-12-07 09:39:27 +1030135 token->kaddr = folio_address(token->eb->folios[idx]); \
Qu Wenruo8d993612023-12-12 12:58:36 +1030136 token->offset = idx << unit_shift; \
137 if (INLINE_EXTENT_BUFFER_PAGES == 1 || \
138 oil + size <= unit_size) { \
139 put_unaligned_le##bits(val, token->kaddr + oil); \
Li Zefan18077bb2012-07-09 20:22:35 -0600140 return; \
141 } \
David Sterbaf472d3c2020-04-30 17:57:55 +0200142 put_unaligned_le##bits(val, lebytes); \
Qu Wenruo8d993612023-12-12 12:58:36 +1030143 memcpy(token->kaddr + oil, lebytes, part); \
Qu Wenruo082d5bb2023-12-07 09:39:27 +1030144 token->kaddr = folio_address(token->eb->folios[idx + 1]); \
Qu Wenruo8d993612023-12-12 12:58:36 +1030145 token->offset = (idx + 1) << unit_shift; \
David Sterbaf472d3c2020-04-30 17:57:55 +0200146 memcpy(token->kaddr, lebytes + part, size - part); \
David Sterbacb495112019-08-09 17:12:38 +0200147} \
David Sterba2b489662020-04-29 03:04:10 +0200148void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
David Sterbacb495112019-08-09 17:12:38 +0200149 unsigned long off, u##bits val) \
150{ \
David Sterba029e4a42020-04-29 18:07:04 +0200151 const unsigned long member_offset = (unsigned long)ptr + off; \
Qu Wenruo8d993612023-12-12 12:58:36 +1030152 const unsigned long idx = get_eb_folio_index(eb, member_offset);\
153 const unsigned long oil = get_eb_offset_in_folio(eb, \
154 member_offset);\
Qu Wenruo84cda1a2024-01-05 16:05:55 +1030155 const int unit_size = eb->folio_size; \
Qu Wenruo082d5bb2023-12-07 09:39:27 +1030156 char *kaddr = folio_address(eb->folios[idx]); \
David Sterba029e4a42020-04-29 18:07:04 +0200157 const int size = sizeof(u##bits); \
Qu Wenruo8d993612023-12-12 12:58:36 +1030158 const int part = unit_size - oil; \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200159 u8 lebytes[sizeof(u##bits)]; \
David Sterbacb495112019-08-09 17:12:38 +0200160 \
David Sterba5e394682020-04-30 23:38:11 +0200161 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
Qu Wenruo8d993612023-12-12 12:58:36 +1030162 if (INLINE_EXTENT_BUFFER_PAGES == 1 || \
163 oil + size <= unit_size) { \
164 put_unaligned_le##bits(val, kaddr + oil); \
David Sterbacb495112019-08-09 17:12:38 +0200165 return; \
166 } \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200167 \
168 put_unaligned_le##bits(val, lebytes); \
Qu Wenruo8d993612023-12-12 12:58:36 +1030169 memcpy(kaddr + oil, lebytes, part); \
Qu Wenruo082d5bb2023-12-07 09:39:27 +1030170 kaddr = folio_address(eb->folios[idx + 1]); \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200171 memcpy(kaddr, lebytes + part, size - part); \
Li Zefan18077bb2012-07-09 20:22:35 -0600172}
Chris Mason0f827312007-10-15 16:18:56 -0400173
Li Zefan18077bb2012-07-09 20:22:35 -0600174DEFINE_BTRFS_SETGET_BITS(8)
175DEFINE_BTRFS_SETGET_BITS(16)
176DEFINE_BTRFS_SETGET_BITS(32)
177DEFINE_BTRFS_SETGET_BITS(64)
Chris Mason0f827312007-10-15 16:18:56 -0400178
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600179void btrfs_node_key(const struct extent_buffer *eb,
Chris Masone644d022007-11-06 15:09:29 -0500180 struct btrfs_disk_key *disk_key, int nr)
181{
Josef Bacike23efd82022-11-15 11:16:16 -0500182 unsigned long ptr = btrfs_node_key_ptr_offset(eb, nr);
Chris Masone644d022007-11-06 15:09:29 -0500183 read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
184 struct btrfs_key_ptr, key, disk_key);
185}