blob: f429256f56dba1ae7482ef68bb7e6c44c8957e81 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason0f827312007-10-15 16:18:56 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason0f827312007-10-15 16:18:56 -04004 */
5
Li Zefan18077bb2012-07-09 20:22:35 -06006#include <asm/unaligned.h>
Chris Masond352ac62008-09-29 15:18:18 -04007
Li Zefan18077bb2012-07-09 20:22:35 -06008#include "ctree.h"
9
David Sterba5e394682020-04-30 23:38:11 +020010static bool check_setget_bounds(const struct extent_buffer *eb,
11 const void *ptr, unsigned off, int size)
12{
13 const unsigned long member_offset = (unsigned long)ptr + off;
14
15 if (member_offset > eb->len) {
16 btrfs_warn(eb->fs_info,
17 "bad eb member start: ptr 0x%lx start %llu member offset %lu size %d",
18 (unsigned long)ptr, eb->start, member_offset, size);
19 return false;
20 }
21 if (member_offset + size > eb->len) {
22 btrfs_warn(eb->fs_info,
23 "bad eb member end: ptr 0x%lx start %llu member offset %lu size %d",
24 (unsigned long)ptr, eb->start, member_offset, size);
25 return false;
26 }
27
28 return true;
29}
30
Li Zefan18077bb2012-07-09 20:22:35 -060031/*
David Sterba583e4a22020-05-06 20:54:13 +020032 * Macro templates that define helpers to read/write extent buffer data of a
33 * given size, that are also used via ctree.h for access to item members by
34 * specialized helpers.
Chris Masond352ac62008-09-29 15:18:18 -040035 *
David Sterba583e4a22020-05-06 20:54:13 +020036 * Generic helpers:
37 * - btrfs_set_8 (for 8/16/32/64)
38 * - btrfs_get_8 (for 8/16/32/64)
Chris Masond352ac62008-09-29 15:18:18 -040039 *
David Sterba583e4a22020-05-06 20:54:13 +020040 * Generic helpers with a token (cached address of the most recently accessed
41 * page):
42 * - btrfs_set_token_8 (for 8/16/32/64)
43 * - btrfs_get_token_8 (for 8/16/32/64)
Chris Masond352ac62008-09-29 15:18:18 -040044 *
David Sterba583e4a22020-05-06 20:54:13 +020045 * The set/get functions handle data spanning two pages transparently, in case
46 * metadata block size is larger than page. Every pointer to metadata items is
47 * an offset into the extent buffer page array, cast to a specific type. This
48 * gives us all the type checking.
David Sterbacb495112019-08-09 17:12:38 +020049 *
David Sterba583e4a22020-05-06 20:54:13 +020050 * The extent buffer pages stored in the array pages do not form a contiguous
51 * phyusical range, but the API functions assume the linear offset to the range
52 * from 0 to metadata node size.
Chris Masond352ac62008-09-29 15:18:18 -040053 */
54
Li Zefan18077bb2012-07-09 20:22:35 -060055#define DEFINE_BTRFS_SETGET_BITS(bits) \
David Sterbacc4c13d2020-04-29 02:15:56 +020056u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
57 const void *ptr, unsigned long off) \
Chris Mason0f827312007-10-15 16:18:56 -040058{ \
David Sterba8f9da812020-04-29 17:45:33 +020059 const unsigned long member_offset = (unsigned long)ptr + off; \
Qu Wenruo884b07d2020-12-02 14:48:04 +080060 const unsigned long idx = get_eb_page_index(member_offset); \
61 const unsigned long oip = get_eb_offset_in_page(token->eb, \
62 member_offset); \
David Sterba8f9da812020-04-29 17:45:33 +020063 const int size = sizeof(u##bits); \
David Sterbaba8a9a02020-04-30 17:57:55 +020064 u8 lebytes[sizeof(u##bits)]; \
65 const int part = PAGE_SIZE - oip; \
Li Zefan18077bb2012-07-09 20:22:35 -060066 \
David Sterba48bc3952019-08-09 17:30:23 +020067 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +020068 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +020069 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterba8f9da812020-04-29 17:45:33 +020070 if (token->offset <= member_offset && \
71 member_offset + size <= token->offset + PAGE_SIZE) { \
72 return get_unaligned_le##bits(token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -060073 } \
David Sterbaba8a9a02020-04-30 17:57:55 +020074 token->kaddr = page_address(token->eb->pages[idx]); \
75 token->offset = idx << PAGE_SHIFT; \
David Sterbab3b7e1d2021-06-23 15:48:53 +020076 if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE ) \
David Sterba8f9da812020-04-29 17:45:33 +020077 return get_unaligned_le##bits(token->kaddr + oip); \
David Sterbaba8a9a02020-04-30 17:57:55 +020078 \
79 memcpy(lebytes, token->kaddr + oip, part); \
David Sterba8f9da812020-04-29 17:45:33 +020080 token->kaddr = page_address(token->eb->pages[idx + 1]); \
81 token->offset = (idx + 1) << PAGE_SHIFT; \
David Sterbaba8a9a02020-04-30 17:57:55 +020082 memcpy(lebytes + part, token->kaddr, size - part); \
83 return get_unaligned_le##bits(lebytes); \
Chris Mason0f827312007-10-15 16:18:56 -040084} \
David Sterbacb495112019-08-09 17:12:38 +020085u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
86 const void *ptr, unsigned long off) \
87{ \
David Sterba1441ed92020-04-29 16:04:44 +020088 const unsigned long member_offset = (unsigned long)ptr + off; \
Qu Wenruo884b07d2020-12-02 14:48:04 +080089 const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \
90 const unsigned long idx = get_eb_page_index(member_offset); \
David Sterba84da0712020-04-30 17:57:55 +020091 char *kaddr = page_address(eb->pages[idx]); \
David Sterba1441ed92020-04-29 16:04:44 +020092 const int size = sizeof(u##bits); \
David Sterba84da0712020-04-30 17:57:55 +020093 const int part = PAGE_SIZE - oip; \
94 u8 lebytes[sizeof(u##bits)]; \
David Sterbacb495112019-08-09 17:12:38 +020095 \
David Sterba5e394682020-04-30 23:38:11 +020096 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
David Sterbab3b7e1d2021-06-23 15:48:53 +020097 if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) \
David Sterba1441ed92020-04-29 16:04:44 +020098 return get_unaligned_le##bits(kaddr + oip); \
David Sterba84da0712020-04-30 17:57:55 +020099 \
100 memcpy(lebytes, kaddr + oip, part); \
101 kaddr = page_address(eb->pages[idx + 1]); \
102 memcpy(lebytes + part, kaddr, size - part); \
103 return get_unaligned_le##bits(lebytes); \
David Sterbacb495112019-08-09 17:12:38 +0200104} \
David Sterbacc4c13d2020-04-29 02:15:56 +0200105void btrfs_set_token_##bits(struct btrfs_map_token *token, \
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600106 const void *ptr, unsigned long off, \
David Sterbacc4c13d2020-04-29 02:15:56 +0200107 u##bits val) \
Chris Mason0f827312007-10-15 16:18:56 -0400108{ \
David Sterbace7afe82020-04-29 18:23:37 +0200109 const unsigned long member_offset = (unsigned long)ptr + off; \
Qu Wenruo884b07d2020-12-02 14:48:04 +0800110 const unsigned long idx = get_eb_page_index(member_offset); \
111 const unsigned long oip = get_eb_offset_in_page(token->eb, \
112 member_offset); \
David Sterbace7afe82020-04-29 18:23:37 +0200113 const int size = sizeof(u##bits); \
David Sterbaf472d3c2020-04-30 17:57:55 +0200114 u8 lebytes[sizeof(u##bits)]; \
115 const int part = PAGE_SIZE - oip; \
Li Zefan18077bb2012-07-09 20:22:35 -0600116 \
David Sterba48bc3952019-08-09 17:30:23 +0200117 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +0200118 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +0200119 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterbace7afe82020-04-29 18:23:37 +0200120 if (token->offset <= member_offset && \
121 member_offset + size <= token->offset + PAGE_SIZE) { \
122 put_unaligned_le##bits(val, token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -0600123 return; \
124 } \
David Sterbaf472d3c2020-04-30 17:57:55 +0200125 token->kaddr = page_address(token->eb->pages[idx]); \
126 token->offset = idx << PAGE_SHIFT; \
David Sterbab3b7e1d2021-06-23 15:48:53 +0200127 if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \
David Sterbace7afe82020-04-29 18:23:37 +0200128 put_unaligned_le##bits(val, token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -0600129 return; \
130 } \
David Sterbaf472d3c2020-04-30 17:57:55 +0200131 put_unaligned_le##bits(val, lebytes); \
132 memcpy(token->kaddr + oip, lebytes, part); \
David Sterbace7afe82020-04-29 18:23:37 +0200133 token->kaddr = page_address(token->eb->pages[idx + 1]); \
134 token->offset = (idx + 1) << PAGE_SHIFT; \
David Sterbaf472d3c2020-04-30 17:57:55 +0200135 memcpy(token->kaddr, lebytes + part, size - part); \
David Sterbacb495112019-08-09 17:12:38 +0200136} \
David Sterba2b489662020-04-29 03:04:10 +0200137void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
David Sterbacb495112019-08-09 17:12:38 +0200138 unsigned long off, u##bits val) \
139{ \
David Sterba029e4a42020-04-29 18:07:04 +0200140 const unsigned long member_offset = (unsigned long)ptr + off; \
Qu Wenruo884b07d2020-12-02 14:48:04 +0800141 const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \
142 const unsigned long idx = get_eb_page_index(member_offset); \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200143 char *kaddr = page_address(eb->pages[idx]); \
David Sterba029e4a42020-04-29 18:07:04 +0200144 const int size = sizeof(u##bits); \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200145 const int part = PAGE_SIZE - oip; \
146 u8 lebytes[sizeof(u##bits)]; \
David Sterbacb495112019-08-09 17:12:38 +0200147 \
David Sterba5e394682020-04-30 23:38:11 +0200148 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
David Sterbab3b7e1d2021-06-23 15:48:53 +0200149 if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \
David Sterba029e4a42020-04-29 18:07:04 +0200150 put_unaligned_le##bits(val, kaddr + oip); \
David Sterbacb495112019-08-09 17:12:38 +0200151 return; \
152 } \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200153 \
154 put_unaligned_le##bits(val, lebytes); \
155 memcpy(kaddr + oip, lebytes, part); \
156 kaddr = page_address(eb->pages[idx + 1]); \
157 memcpy(kaddr, lebytes + part, size - part); \
Li Zefan18077bb2012-07-09 20:22:35 -0600158}
Chris Mason0f827312007-10-15 16:18:56 -0400159
Li Zefan18077bb2012-07-09 20:22:35 -0600160DEFINE_BTRFS_SETGET_BITS(8)
161DEFINE_BTRFS_SETGET_BITS(16)
162DEFINE_BTRFS_SETGET_BITS(32)
163DEFINE_BTRFS_SETGET_BITS(64)
Chris Mason0f827312007-10-15 16:18:56 -0400164
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600165void btrfs_node_key(const struct extent_buffer *eb,
Chris Masone644d022007-11-06 15:09:29 -0500166 struct btrfs_disk_key *disk_key, int nr)
167{
168 unsigned long ptr = btrfs_node_key_ptr_offset(nr);
Chris Masone644d022007-11-06 15:09:29 -0500169 read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
170 struct btrfs_key_ptr, key, disk_key);
171}