| /* |
| * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved. |
| * Copyright (C) 2016-2017 Milan Broz |
| * Copyright (C) 2016-2017 Mikulas Patocka |
| * |
| * This file is released under the GPL. |
| */ |
| |
| #include "dm-bio-record.h" |
| |
| #include <linux/compiler.h> |
| #include <linux/module.h> |
| #include <linux/device-mapper.h> |
| #include <linux/dm-io.h> |
| #include <linux/vmalloc.h> |
| #include <linux/sort.h> |
| #include <linux/rbtree.h> |
| #include <linux/delay.h> |
| #include <linux/random.h> |
| #include <linux/reboot.h> |
| #include <crypto/hash.h> |
| #include <crypto/skcipher.h> |
| #include <linux/async_tx.h> |
| #include <linux/dm-bufio.h> |
| |
| #include "dm-audit.h" |
| |
| #define DM_MSG_PREFIX "integrity" |
| |
| #define DEFAULT_INTERLEAVE_SECTORS 32768 |
| #define DEFAULT_JOURNAL_SIZE_FACTOR 7 |
| #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768 |
| #define DEFAULT_BUFFER_SECTORS 128 |
| #define DEFAULT_JOURNAL_WATERMARK 50 |
| #define DEFAULT_SYNC_MSEC 10000 |
| #define DEFAULT_MAX_JOURNAL_SECTORS 131072 |
| #define MIN_LOG2_INTERLEAVE_SECTORS 3 |
| #define MAX_LOG2_INTERLEAVE_SECTORS 31 |
| #define METADATA_WORKQUEUE_MAX_ACTIVE 16 |
| #define RECALC_SECTORS 32768 |
| #define RECALC_WRITE_SUPER 16 |
| #define BITMAP_BLOCK_SIZE 4096 /* don't change it */ |
| #define BITMAP_FLUSH_INTERVAL (10 * HZ) |
| #define DISCARD_FILLER 0xf6 |
| #define SALT_SIZE 16 |
| |
| /* |
| * Warning - DEBUG_PRINT prints security-sensitive data to the log, |
| * so it should not be enabled in the official kernel |
| */ |
| //#define DEBUG_PRINT |
| //#define INTERNAL_VERIFY |
| |
| /* |
| * On disk structures |
| */ |
| |
| #define SB_MAGIC "integrt" |
| #define SB_VERSION_1 1 |
| #define SB_VERSION_2 2 |
| #define SB_VERSION_3 3 |
| #define SB_VERSION_4 4 |
| #define SB_VERSION_5 5 |
| #define SB_SECTORS 8 |
| #define MAX_SECTORS_PER_BLOCK 8 |
| |
| struct superblock { |
| __u8 magic[8]; |
| __u8 version; |
| __u8 log2_interleave_sectors; |
| __le16 integrity_tag_size; |
| __le32 journal_sections; |
| __le64 provided_data_sectors; /* userspace uses this value */ |
| __le32 flags; |
| __u8 log2_sectors_per_block; |
| __u8 log2_blocks_per_bitmap_bit; |
| __u8 pad[2]; |
| __le64 recalc_sector; |
| __u8 pad2[8]; |
| __u8 salt[SALT_SIZE]; |
| }; |
| |
| #define SB_FLAG_HAVE_JOURNAL_MAC 0x1 |
| #define SB_FLAG_RECALCULATING 0x2 |
| #define SB_FLAG_DIRTY_BITMAP 0x4 |
| #define SB_FLAG_FIXED_PADDING 0x8 |
| #define SB_FLAG_FIXED_HMAC 0x10 |
| |
| #define JOURNAL_ENTRY_ROUNDUP 8 |
| |
| typedef __le64 commit_id_t; |
| #define JOURNAL_MAC_PER_SECTOR 8 |
| |
| struct journal_entry { |
| union { |
| struct { |
| __le32 sector_lo; |
| __le32 sector_hi; |
| } s; |
| __le64 sector; |
| } u; |
| commit_id_t last_bytes[]; |
| /* __u8 tag[0]; */ |
| }; |
| |
| #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) |
| |
| #if BITS_PER_LONG == 64 |
| #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0) |
| #else |
| #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0) |
| #endif |
| #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) |
| #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1)) |
| #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0) |
| #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2)) |
| #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0) |
| |
| #define JOURNAL_BLOCK_SECTORS 8 |
| #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t)) |
| #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS) |
| |
| struct journal_sector { |
| __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR]; |
| __u8 mac[JOURNAL_MAC_PER_SECTOR]; |
| commit_id_t commit_id; |
| }; |
| |
| #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK])) |
| |
| #define METADATA_PADDING_SECTORS 8 |
| |
| #define N_COMMIT_IDS 4 |
| |
| static unsigned char prev_commit_seq(unsigned char seq) |
| { |
| return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS; |
| } |
| |
| static unsigned char next_commit_seq(unsigned char seq) |
| { |
| return (seq + 1) % N_COMMIT_IDS; |
| } |
| |
| /* |
| * In-memory structures |
| */ |
| |
| struct journal_node { |
| struct rb_node node; |
| sector_t sector; |
| }; |
| |
| struct alg_spec { |
| char *alg_string; |
| char *key_string; |
| __u8 *key; |
| unsigned key_size; |
| }; |
| |
| struct dm_integrity_c { |
| struct dm_dev *dev; |
| struct dm_dev *meta_dev; |
| unsigned tag_size; |
| __s8 log2_tag_size; |
| sector_t start; |
| mempool_t journal_io_mempool; |
| struct dm_io_client *io; |
| struct dm_bufio_client *bufio; |
| struct workqueue_struct *metadata_wq; |
| struct superblock *sb; |
| unsigned journal_pages; |
| unsigned n_bitmap_blocks; |
| |
| struct page_list *journal; |
| struct page_list *journal_io; |
| struct page_list *journal_xor; |
| struct page_list *recalc_bitmap; |
| struct page_list *may_write_bitmap; |
| struct bitmap_block_status *bbs; |
| unsigned bitmap_flush_interval; |
| int synchronous_mode; |
| struct bio_list synchronous_bios; |
| struct delayed_work bitmap_flush_work; |
| |
| struct crypto_skcipher *journal_crypt; |
| struct scatterlist **journal_scatterlist; |
| struct scatterlist **journal_io_scatterlist; |
| struct skcipher_request **sk_requests; |
| |
| struct crypto_shash *journal_mac; |
| |
| struct journal_node *journal_tree; |
| struct rb_root journal_tree_root; |
| |
| sector_t provided_data_sectors; |
| |
| unsigned short journal_entry_size; |
| unsigned char journal_entries_per_sector; |
| unsigned char journal_section_entries; |
| unsigned short journal_section_sectors; |
| unsigned journal_sections; |
| unsigned journal_entries; |
| sector_t data_device_sectors; |
| sector_t meta_device_sectors; |
| unsigned initial_sectors; |
| unsigned metadata_run; |
| __s8 log2_metadata_run; |
| __u8 log2_buffer_sectors; |
| __u8 sectors_per_block; |
| __u8 log2_blocks_per_bitmap_bit; |
| |
| unsigned char mode; |
| |
| int failed; |
| |
| struct crypto_shash *internal_hash; |
| |
| struct dm_target *ti; |
| |
| /* these variables are locked with endio_wait.lock */ |
| struct rb_root in_progress; |
| struct list_head wait_list; |
| wait_queue_head_t endio_wait; |
| struct workqueue_struct *wait_wq; |
| struct workqueue_struct *offload_wq; |
| |
| unsigned char commit_seq; |
| commit_id_t commit_ids[N_COMMIT_IDS]; |
| |
| unsigned committed_section; |
| unsigned n_committed_sections; |
| |
| unsigned uncommitted_section; |
| unsigned n_uncommitted_sections; |
| |
| unsigned free_section; |
| unsigned char free_section_entry; |
| unsigned free_sectors; |
| |
| unsigned free_sectors_threshold; |
| |
| struct workqueue_struct *commit_wq; |
| struct work_struct commit_work; |
| |
| struct workqueue_struct *writer_wq; |
| struct work_struct writer_work; |
| |
| struct workqueue_struct *recalc_wq; |
| struct work_struct recalc_work; |
| u8 *recalc_buffer; |
| u8 *recalc_tags; |
| |
| struct bio_list flush_bio_list; |
| |
| unsigned long autocommit_jiffies; |
| struct timer_list autocommit_timer; |
| unsigned autocommit_msec; |
| |
| wait_queue_head_t copy_to_journal_wait; |
| |
| struct completion crypto_backoff; |
| |
| bool journal_uptodate; |
| bool just_formatted; |
| bool recalculate_flag; |
| bool reset_recalculate_flag; |
| bool discard; |
| bool fix_padding; |
| bool fix_hmac; |
| bool legacy_recalculate; |
| |
| struct alg_spec internal_hash_alg; |
| struct alg_spec journal_crypt_alg; |
| struct alg_spec journal_mac_alg; |
| |
| atomic64_t number_of_mismatches; |
| |
| struct notifier_block reboot_notifier; |
| }; |
| |
| struct dm_integrity_range { |
| sector_t logical_sector; |
| sector_t n_sectors; |
| bool waiting; |
| union { |
| struct rb_node node; |
| struct { |
| struct task_struct *task; |
| struct list_head wait_entry; |
| }; |
| }; |
| }; |
| |
| struct dm_integrity_io { |
| struct work_struct work; |
| |
| struct dm_integrity_c *ic; |
| enum req_opf op; |
| bool fua; |
| |
| struct dm_integrity_range range; |
| |
| sector_t metadata_block; |
| unsigned metadata_offset; |
| |
| atomic_t in_flight; |
| blk_status_t bi_status; |
| |
| struct completion *completion; |
| |
| struct dm_bio_details bio_details; |
| }; |
| |
| struct journal_completion { |
| struct dm_integrity_c *ic; |
| atomic_t in_flight; |
| struct completion comp; |
| }; |
| |
| struct journal_io { |
| struct dm_integrity_range range; |
| struct journal_completion *comp; |
| }; |
| |
| struct bitmap_block_status { |
| struct work_struct work; |
| struct dm_integrity_c *ic; |
| unsigned idx; |
| unsigned long *bitmap; |
| struct bio_list bio_queue; |
| spinlock_t bio_queue_lock; |
| |
| }; |
| |
| static struct kmem_cache *journal_io_cache; |
| |
| #define JOURNAL_IO_MEMPOOL 32 |
| |
| #ifdef DEBUG_PRINT |
| #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__) |
| static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...) |
| { |
| va_list args; |
| va_start(args, msg); |
| vprintk(msg, args); |
| va_end(args); |
| if (len) |
| pr_cont(":"); |
| while (len) { |
| pr_cont(" %02x", *bytes); |
| bytes++; |
| len--; |
| } |
| pr_cont("\n"); |
| } |
| #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__) |
| #else |
| #define DEBUG_print(x, ...) do { } while (0) |
| #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0) |
| #endif |
| |
| static void dm_integrity_prepare(struct request *rq) |
| { |
| } |
| |
| static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes) |
| { |
| } |
| |
| /* |
| * DM Integrity profile, protection is performed layer above (dm-crypt) |
| */ |
| static const struct blk_integrity_profile dm_integrity_profile = { |
| .name = "DM-DIF-EXT-TAG", |
| .generate_fn = NULL, |
| .verify_fn = NULL, |
| .prepare_fn = dm_integrity_prepare, |
| .complete_fn = dm_integrity_complete, |
| }; |
| |
| static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map); |
| static void integrity_bio_wait(struct work_struct *w); |
| static void dm_integrity_dtr(struct dm_target *ti); |
| |
| static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) |
| { |
| if (err == -EILSEQ) |
| atomic64_inc(&ic->number_of_mismatches); |
| if (!cmpxchg(&ic->failed, 0, err)) |
| DMERR("Error on %s: %d", msg, err); |
| } |
| |
| static int dm_integrity_failed(struct dm_integrity_c *ic) |
| { |
| return READ_ONCE(ic->failed); |
| } |
| |
| static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic) |
| { |
| if (ic->legacy_recalculate) |
| return false; |
| if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ? |
| ic->internal_hash_alg.key || ic->journal_mac_alg.key : |
| ic->internal_hash_alg.key && !ic->journal_mac_alg.key) |
| return true; |
| return false; |
| } |
| |
| static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, |
| unsigned j, unsigned char seq) |
| { |
| /* |
| * Xor the number with section and sector, so that if a piece of |
| * journal is written at wrong place, it is detected. |
| */ |
| return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j); |
| } |
| |
| static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, |
| sector_t *area, sector_t *offset) |
| { |
| if (!ic->meta_dev) { |
| __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; |
| *area = data_sector >> log2_interleave_sectors; |
| *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1); |
| } else { |
| *area = 0; |
| *offset = data_sector; |
| } |
| } |
| |
| #define sector_to_block(ic, n) \ |
| do { \ |
| BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \ |
| (n) >>= (ic)->sb->log2_sectors_per_block; \ |
| } while (0) |
| |
| static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, |
| sector_t offset, unsigned *metadata_offset) |
| { |
| __u64 ms; |
| unsigned mo; |
| |
| ms = area << ic->sb->log2_interleave_sectors; |
| if (likely(ic->log2_metadata_run >= 0)) |
| ms += area << ic->log2_metadata_run; |
| else |
| ms += area * ic->metadata_run; |
| ms >>= ic->log2_buffer_sectors; |
| |
| sector_to_block(ic, offset); |
| |
| if (likely(ic->log2_tag_size >= 0)) { |
| ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); |
| mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); |
| } else { |
| ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); |
| mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); |
| } |
| *metadata_offset = mo; |
| return ms; |
| } |
| |
| static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) |
| { |
| sector_t result; |
| |
| if (ic->meta_dev) |
| return offset; |
| |
| result = area << ic->sb->log2_interleave_sectors; |
| if (likely(ic->log2_metadata_run >= 0)) |
| result += (area + 1) << ic->log2_metadata_run; |
| else |
| result += (area + 1) * ic->metadata_run; |
| |
| result += (sector_t)ic->initial_sectors + offset; |
| result += ic->start; |
| |
| return result; |
| } |
| |
| static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr) |
| { |
| if (unlikely(*sec_ptr >= ic->journal_sections)) |
| *sec_ptr -= ic->journal_sections; |
| } |
| |
| static void sb_set_version(struct dm_integrity_c *ic) |
| { |
| if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) |
| ic->sb->version = SB_VERSION_5; |
| else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) |
| ic->sb->version = SB_VERSION_4; |
| else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) |
| ic->sb->version = SB_VERSION_3; |
| else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) |
| ic->sb->version = SB_VERSION_2; |
| else |
| ic->sb->version = SB_VERSION_1; |
| } |
| |
| static int sb_mac(struct dm_integrity_c *ic, bool wr) |
| { |
| SHASH_DESC_ON_STACK(desc, ic->journal_mac); |
| int r; |
| unsigned size = crypto_shash_digestsize(ic->journal_mac); |
| |
| if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) { |
| dm_integrity_io_error(ic, "digest is too long", -EINVAL); |
| return -EINVAL; |
| } |
| |
| desc->tfm = ic->journal_mac; |
| |
| r = crypto_shash_init(desc); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_init", r); |
| return r; |
| } |
| |
| r = crypto_shash_update(desc, (__u8 *)ic->sb, (1 << SECTOR_SHIFT) - size); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_update", r); |
| return r; |
| } |
| |
| if (likely(wr)) { |
| r = crypto_shash_final(desc, (__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_final", r); |
| return r; |
| } |
| } else { |
| __u8 result[HASH_MAX_DIGESTSIZE]; |
| r = crypto_shash_final(desc, result); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_final", r); |
| return r; |
| } |
| if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) { |
| dm_integrity_io_error(ic, "superblock mac", -EILSEQ); |
| dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0); |
| return -EILSEQ; |
| } |
| } |
| |
| return 0; |
| } |
| |
| static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) |
| { |
| struct dm_io_request io_req; |
| struct dm_io_region io_loc; |
| int r; |
| |
| io_req.bi_op = op; |
| io_req.bi_op_flags = op_flags; |
| io_req.mem.type = DM_IO_KMEM; |
| io_req.mem.ptr.addr = ic->sb; |
| io_req.notify.fn = NULL; |
| io_req.client = ic->io; |
| io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; |
| io_loc.sector = ic->start; |
| io_loc.count = SB_SECTORS; |
| |
| if (op == REQ_OP_WRITE) { |
| sb_set_version(ic); |
| if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { |
| r = sb_mac(ic, true); |
| if (unlikely(r)) |
| return r; |
| } |
| } |
| |
| r = dm_io(&io_req, 1, &io_loc, NULL); |
| if (unlikely(r)) |
| return r; |
| |
| if (op == REQ_OP_READ) { |
| if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { |
| r = sb_mac(ic, false); |
| if (unlikely(r)) |
| return r; |
| } |
| } |
| |
| return 0; |
| } |
| |
| #define BITMAP_OP_TEST_ALL_SET 0 |
| #define BITMAP_OP_TEST_ALL_CLEAR 1 |
| #define BITMAP_OP_SET 2 |
| #define BITMAP_OP_CLEAR 3 |
| |
| static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, |
| sector_t sector, sector_t n_sectors, int mode) |
| { |
| unsigned long bit, end_bit, this_end_bit, page, end_page; |
| unsigned long *data; |
| |
| if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) { |
| DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)", |
| sector, |
| n_sectors, |
| ic->sb->log2_sectors_per_block, |
| ic->log2_blocks_per_bitmap_bit, |
| mode); |
| BUG(); |
| } |
| |
| if (unlikely(!n_sectors)) |
| return true; |
| |
| bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
| end_bit = (sector + n_sectors - 1) >> |
| (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
| |
| page = bit / (PAGE_SIZE * 8); |
| bit %= PAGE_SIZE * 8; |
| |
| end_page = end_bit / (PAGE_SIZE * 8); |
| end_bit %= PAGE_SIZE * 8; |
| |
| repeat: |
| if (page < end_page) { |
| this_end_bit = PAGE_SIZE * 8 - 1; |
| } else { |
| this_end_bit = end_bit; |
| } |
| |
| data = lowmem_page_address(bitmap[page].page); |
| |
| if (mode == BITMAP_OP_TEST_ALL_SET) { |
| while (bit <= this_end_bit) { |
| if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { |
| do { |
| if (data[bit / BITS_PER_LONG] != -1) |
| return false; |
| bit += BITS_PER_LONG; |
| } while (this_end_bit >= bit + BITS_PER_LONG - 1); |
| continue; |
| } |
| if (!test_bit(bit, data)) |
| return false; |
| bit++; |
| } |
| } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) { |
| while (bit <= this_end_bit) { |
| if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { |
| do { |
| if (data[bit / BITS_PER_LONG] != 0) |
| return false; |
| bit += BITS_PER_LONG; |
| } while (this_end_bit >= bit + BITS_PER_LONG - 1); |
| continue; |
| } |
| if (test_bit(bit, data)) |
| return false; |
| bit++; |
| } |
| } else if (mode == BITMAP_OP_SET) { |
| while (bit <= this_end_bit) { |
| if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { |
| do { |
| data[bit / BITS_PER_LONG] = -1; |
| bit += BITS_PER_LONG; |
| } while (this_end_bit >= bit + BITS_PER_LONG - 1); |
| continue; |
| } |
| __set_bit(bit, data); |
| bit++; |
| } |
| } else if (mode == BITMAP_OP_CLEAR) { |
| if (!bit && this_end_bit == PAGE_SIZE * 8 - 1) |
| clear_page(data); |
| else while (bit <= this_end_bit) { |
| if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { |
| do { |
| data[bit / BITS_PER_LONG] = 0; |
| bit += BITS_PER_LONG; |
| } while (this_end_bit >= bit + BITS_PER_LONG - 1); |
| continue; |
| } |
| __clear_bit(bit, data); |
| bit++; |
| } |
| } else { |
| BUG(); |
| } |
| |
| if (unlikely(page < end_page)) { |
| bit = 0; |
| page++; |
| goto repeat; |
| } |
| |
| return true; |
| } |
| |
| static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src) |
| { |
| unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); |
| unsigned i; |
| |
| for (i = 0; i < n_bitmap_pages; i++) { |
| unsigned long *dst_data = lowmem_page_address(dst[i].page); |
| unsigned long *src_data = lowmem_page_address(src[i].page); |
| copy_page(dst_data, src_data); |
| } |
| } |
| |
| static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector) |
| { |
| unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
| unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8); |
| |
| BUG_ON(bitmap_block >= ic->n_bitmap_blocks); |
| return &ic->bbs[bitmap_block]; |
| } |
| |
| static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset, |
| bool e, const char *function) |
| { |
| #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY) |
| unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors; |
| |
| if (unlikely(section >= ic->journal_sections) || |
| unlikely(offset >= limit)) { |
| DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)", |
| function, section, offset, ic->journal_sections, limit); |
| BUG(); |
| } |
| #endif |
| } |
| |
| static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset, |
| unsigned *pl_index, unsigned *pl_offset) |
| { |
| unsigned sector; |
| |
| access_journal_check(ic, section, offset, false, "page_list_location"); |
| |
| sector = section * ic->journal_section_sectors + offset; |
| |
| *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); |
| *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); |
| } |
| |
| static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, |
| unsigned section, unsigned offset, unsigned *n_sectors) |
| { |
| unsigned pl_index, pl_offset; |
| char *va; |
| |
| page_list_location(ic, section, offset, &pl_index, &pl_offset); |
| |
| if (n_sectors) |
| *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT; |
| |
| va = lowmem_page_address(pl[pl_index].page); |
| |
| return (struct journal_sector *)(va + pl_offset); |
| } |
| |
| static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset) |
| { |
| return access_page_list(ic, ic->journal, section, offset, NULL); |
| } |
| |
| static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n) |
| { |
| unsigned rel_sector, offset; |
| struct journal_sector *js; |
| |
| access_journal_check(ic, section, n, true, "access_journal_entry"); |
| |
| rel_sector = n % JOURNAL_BLOCK_SECTORS; |
| offset = n / JOURNAL_BLOCK_SECTORS; |
| |
| js = access_journal(ic, section, rel_sector); |
| return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size); |
| } |
| |
| static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n) |
| { |
| n <<= ic->sb->log2_sectors_per_block; |
| |
| n += JOURNAL_BLOCK_SECTORS; |
| |
| access_journal_check(ic, section, n, false, "access_journal_data"); |
| |
| return access_journal(ic, section, n); |
| } |
| |
| static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE]) |
| { |
| SHASH_DESC_ON_STACK(desc, ic->journal_mac); |
| int r; |
| unsigned j, size; |
| |
| desc->tfm = ic->journal_mac; |
| |
| r = crypto_shash_init(desc); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_init", r); |
| goto err; |
| } |
| |
| if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { |
| __le64 section_le; |
| |
| r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_update", r); |
| goto err; |
| } |
| |
| section_le = cpu_to_le64(section); |
| r = crypto_shash_update(desc, (__u8 *)§ion_le, sizeof section_le); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_update", r); |
| goto err; |
| } |
| } |
| |
| for (j = 0; j < ic->journal_section_entries; j++) { |
| struct journal_entry *je = access_journal_entry(ic, section, j); |
| r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_update", r); |
| goto err; |
| } |
| } |
| |
| size = crypto_shash_digestsize(ic->journal_mac); |
| |
| if (likely(size <= JOURNAL_MAC_SIZE)) { |
| r = crypto_shash_final(desc, result); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_final", r); |
| goto err; |
| } |
| memset(result + size, 0, JOURNAL_MAC_SIZE - size); |
| } else { |
| __u8 digest[HASH_MAX_DIGESTSIZE]; |
| |
| if (WARN_ON(size > sizeof(digest))) { |
| dm_integrity_io_error(ic, "digest_size", -EINVAL); |
| goto err; |
| } |
| r = crypto_shash_final(desc, digest); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_final", r); |
| goto err; |
| } |
| memcpy(result, digest, JOURNAL_MAC_SIZE); |
| } |
| |
| return; |
| err: |
| memset(result, 0, JOURNAL_MAC_SIZE); |
| } |
| |
| static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr) |
| { |
| __u8 result[JOURNAL_MAC_SIZE]; |
| unsigned j; |
| |
| if (!ic->journal_mac) |
| return; |
| |
| section_mac(ic, section, result); |
| |
| for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) { |
| struct journal_sector *js = access_journal(ic, section, j); |
| |
| if (likely(wr)) |
| memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR); |
| else { |
| if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) { |
| dm_integrity_io_error(ic, "journal mac", -EILSEQ); |
| dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0); |
| } |
| } |
| } |
| } |
| |
| static void complete_journal_op(void *context) |
| { |
| struct journal_completion *comp = context; |
| BUG_ON(!atomic_read(&comp->in_flight)); |
| if (likely(atomic_dec_and_test(&comp->in_flight))) |
| complete(&comp->comp); |
| } |
| |
| static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, |
| unsigned n_sections, struct journal_completion *comp) |
| { |
| struct async_submit_ctl submit; |
| size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; |
| unsigned pl_index, pl_offset, section_index; |
| struct page_list *source_pl, *target_pl; |
| |
| if (likely(encrypt)) { |
| source_pl = ic->journal; |
| target_pl = ic->journal_io; |
| } else { |
| source_pl = ic->journal_io; |
| target_pl = ic->journal; |
| } |
| |
| page_list_location(ic, section, 0, &pl_index, &pl_offset); |
| |
| atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight); |
| |
| init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL); |
| |
| section_index = pl_index; |
| |
| do { |
| size_t this_step; |
| struct page *src_pages[2]; |
| struct page *dst_page; |
| |
| while (unlikely(pl_index == section_index)) { |
| unsigned dummy; |
| if (likely(encrypt)) |
| rw_section_mac(ic, section, true); |
| section++; |
| n_sections--; |
| if (!n_sections) |
| break; |
| page_list_location(ic, section, 0, §ion_index, &dummy); |
| } |
| |
| this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset); |
| dst_page = target_pl[pl_index].page; |
| src_pages[0] = source_pl[pl_index].page; |
| src_pages[1] = ic->journal_xor[pl_index].page; |
| |
| async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit); |
| |
| pl_index++; |
| pl_offset = 0; |
| n_bytes -= this_step; |
| } while (n_bytes); |
| |
| BUG_ON(n_sections); |
| |
| async_tx_issue_pending_all(); |
| } |
| |
| static void complete_journal_encrypt(struct crypto_async_request *req, int err) |
| { |
| struct journal_completion *comp = req->data; |
| if (unlikely(err)) { |
| if (likely(err == -EINPROGRESS)) { |
| complete(&comp->ic->crypto_backoff); |
| return; |
| } |
| dm_integrity_io_error(comp->ic, "asynchronous encrypt", err); |
| } |
| complete_journal_op(comp); |
| } |
| |
| static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) |
| { |
| int r; |
| skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
| complete_journal_encrypt, comp); |
| if (likely(encrypt)) |
| r = crypto_skcipher_encrypt(req); |
| else |
| r = crypto_skcipher_decrypt(req); |
| if (likely(!r)) |
| return false; |
| if (likely(r == -EINPROGRESS)) |
| return true; |
| if (likely(r == -EBUSY)) { |
| wait_for_completion(&comp->ic->crypto_backoff); |
| reinit_completion(&comp->ic->crypto_backoff); |
| return true; |
| } |
| dm_integrity_io_error(comp->ic, "encrypt", r); |
| return false; |
| } |
| |
| static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, |
| unsigned n_sections, struct journal_completion *comp) |
| { |
| struct scatterlist **source_sg; |
| struct scatterlist **target_sg; |
| |
| atomic_add(2, &comp->in_flight); |
| |
| if (likely(encrypt)) { |
| source_sg = ic->journal_scatterlist; |
| target_sg = ic->journal_io_scatterlist; |
| } else { |
| source_sg = ic->journal_io_scatterlist; |
| target_sg = ic->journal_scatterlist; |
| } |
| |
| do { |
| struct skcipher_request *req; |
| unsigned ivsize; |
| char *iv; |
| |
| if (likely(encrypt)) |
| rw_section_mac(ic, section, true); |
| |
| req = ic->sk_requests[section]; |
| ivsize = crypto_skcipher_ivsize(ic->journal_crypt); |
| iv = req->iv; |
| |
| memcpy(iv, iv + ivsize, ivsize); |
| |
| req->src = source_sg[section]; |
| req->dst = target_sg[section]; |
| |
| if (unlikely(do_crypt(encrypt, req, comp))) |
| atomic_inc(&comp->in_flight); |
| |
| section++; |
| n_sections--; |
| } while (n_sections); |
| |
| atomic_dec(&comp->in_flight); |
| complete_journal_op(comp); |
| } |
| |
| static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, |
| unsigned n_sections, struct journal_completion *comp) |
| { |
| if (ic->journal_xor) |
| return xor_journal(ic, encrypt, section, n_sections, comp); |
| else |
| return crypt_journal(ic, encrypt, section, n_sections, comp); |
| } |
| |
| static void complete_journal_io(unsigned long error, void *context) |
| { |
| struct journal_completion *comp = context; |
| if (unlikely(error != 0)) |
| dm_integrity_io_error(comp->ic, "writing journal", -EIO); |
| complete_journal_op(comp); |
| } |
| |
| static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, |
| unsigned sector, unsigned n_sectors, struct journal_completion *comp) |
| { |
| struct dm_io_request io_req; |
| struct dm_io_region io_loc; |
| unsigned pl_index, pl_offset; |
| int r; |
| |
| if (unlikely(dm_integrity_failed(ic))) { |
| if (comp) |
| complete_journal_io(-1UL, comp); |
| return; |
| } |
| |
| pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); |
| pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); |
| |
| io_req.bi_op = op; |
| io_req.bi_op_flags = op_flags; |
| io_req.mem.type = DM_IO_PAGE_LIST; |
| if (ic->journal_io) |
| io_req.mem.ptr.pl = &ic->journal_io[pl_index]; |
| else |
| io_req.mem.ptr.pl = &ic->journal[pl_index]; |
| io_req.mem.offset = pl_offset; |
| if (likely(comp != NULL)) { |
| io_req.notify.fn = complete_journal_io; |
| io_req.notify.context = comp; |
| } else { |
| io_req.notify.fn = NULL; |
| } |
| io_req.client = ic->io; |
| io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; |
| io_loc.sector = ic->start + SB_SECTORS + sector; |
| io_loc.count = n_sectors; |
| |
| r = dm_io(&io_req, 1, &io_loc, NULL); |
| if (unlikely(r)) { |
| dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r); |
| if (comp) { |
| WARN_ONCE(1, "asynchronous dm_io failed: %d", r); |
| complete_journal_io(-1UL, comp); |
| } |
| } |
| } |
| |
| static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section, |
| unsigned n_sections, struct journal_completion *comp) |
| { |
| unsigned sector, n_sectors; |
| |
| sector = section * ic->journal_section_sectors; |
| n_sectors = n_sections * ic->journal_section_sectors; |
| |
| rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp); |
| } |
| |
| static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections) |
| { |
| struct journal_completion io_comp; |
| struct journal_completion crypt_comp_1; |
| struct journal_completion crypt_comp_2; |
| unsigned i; |
| |
| io_comp.ic = ic; |
| init_completion(&io_comp.comp); |
| |
| if (commit_start + commit_sections <= ic->journal_sections) { |
| io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
| if (ic->journal_io) { |
| crypt_comp_1.ic = ic; |
| init_completion(&crypt_comp_1.comp); |
| crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); |
| encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); |
| wait_for_completion_io(&crypt_comp_1.comp); |
| } else { |
| for (i = 0; i < commit_sections; i++) |
| rw_section_mac(ic, commit_start + i, true); |
| } |
| rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, |
| commit_sections, &io_comp); |
| } else { |
| unsigned to_end; |
| io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); |
| to_end = ic->journal_sections - commit_start; |
| if (ic->journal_io) { |
| crypt_comp_1.ic = ic; |
| init_completion(&crypt_comp_1.comp); |
| crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); |
| encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); |
| if (try_wait_for_completion(&crypt_comp_1.comp)) { |
| rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); |
| reinit_completion(&crypt_comp_1.comp); |
| crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); |
| encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); |
| wait_for_completion_io(&crypt_comp_1.comp); |
| } else { |
| crypt_comp_2.ic = ic; |
| init_completion(&crypt_comp_2.comp); |
| crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); |
| encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); |
| wait_for_completion_io(&crypt_comp_1.comp); |
| rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); |
| wait_for_completion_io(&crypt_comp_2.comp); |
| } |
| } else { |
| for (i = 0; i < to_end; i++) |
| rw_section_mac(ic, commit_start + i, true); |
| rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); |
| for (i = 0; i < commit_sections - to_end; i++) |
| rw_section_mac(ic, i, true); |
| } |
| rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp); |
| } |
| |
| wait_for_completion_io(&io_comp.comp); |
| } |
| |
| static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset, |
| unsigned n_sectors, sector_t target, io_notify_fn fn, void *data) |
| { |
| struct dm_io_request io_req; |
| struct dm_io_region io_loc; |
| int r; |
| unsigned sector, pl_index, pl_offset; |
| |
| BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1)); |
| |
| if (unlikely(dm_integrity_failed(ic))) { |
| fn(-1UL, data); |
| return; |
| } |
| |
| sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset; |
| |
| pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); |
| pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); |
| |
| io_req.bi_op = REQ_OP_WRITE; |
| io_req.bi_op_flags = 0; |
| io_req.mem.type = DM_IO_PAGE_LIST; |
| io_req.mem.ptr.pl = &ic->journal[pl_index]; |
| io_req.mem.offset = pl_offset; |
| io_req.notify.fn = fn; |
| io_req.notify.context = data; |
| io_req.client = ic->io; |
| io_loc.bdev = ic->dev->bdev; |
| io_loc.sector = target; |
| io_loc.count = n_sectors; |
| |
| r = dm_io(&io_req, 1, &io_loc, NULL); |
| if (unlikely(r)) { |
| WARN_ONCE(1, "asynchronous dm_io failed: %d", r); |
| fn(-1UL, data); |
| } |
| } |
| |
| static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) |
| { |
| return range1->logical_sector < range2->logical_sector + range2->n_sectors && |
| range1->logical_sector + range1->n_sectors > range2->logical_sector; |
| } |
| |
| static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) |
| { |
| struct rb_node **n = &ic->in_progress.rb_node; |
| struct rb_node *parent; |
| |
| BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1)); |
| |
| if (likely(check_waiting)) { |
| struct dm_integrity_range *range; |
| list_for_each_entry(range, &ic->wait_list, wait_entry) { |
| if (unlikely(ranges_overlap(range, new_range))) |
| return false; |
| } |
| } |
| |
| parent = NULL; |
| |
| while (*n) { |
| struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node); |
| |
| parent = *n; |
| if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) { |
| n = &range->node.rb_left; |
| } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) { |
| n = &range->node.rb_right; |
| } else { |
| return false; |
| } |
| } |
| |
| rb_link_node(&new_range->node, parent, n); |
| rb_insert_color(&new_range->node, &ic->in_progress); |
| |
| return true; |
| } |
| |
| static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range) |
| { |
| rb_erase(&range->node, &ic->in_progress); |
| while (unlikely(!list_empty(&ic->wait_list))) { |
| struct dm_integrity_range *last_range = |
| list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); |
| struct task_struct *last_range_task; |
| last_range_task = last_range->task; |
| list_del(&last_range->wait_entry); |
| if (!add_new_range(ic, last_range, false)) { |
| last_range->task = last_range_task; |
| list_add(&last_range->wait_entry, &ic->wait_list); |
| break; |
| } |
| last_range->waiting = false; |
| wake_up_process(last_range_task); |
| } |
| } |
| |
| static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range) |
| { |
| unsigned long flags; |
| |
| spin_lock_irqsave(&ic->endio_wait.lock, flags); |
| remove_range_unlocked(ic, range); |
| spin_unlock_irqrestore(&ic->endio_wait.lock, flags); |
| } |
| |
| static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) |
| { |
| new_range->waiting = true; |
| list_add_tail(&new_range->wait_entry, &ic->wait_list); |
| new_range->task = current; |
| do { |
| __set_current_state(TASK_UNINTERRUPTIBLE); |
| spin_unlock_irq(&ic->endio_wait.lock); |
| io_schedule(); |
| spin_lock_irq(&ic->endio_wait.lock); |
| } while (unlikely(new_range->waiting)); |
| } |
| |
| static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) |
| { |
| if (unlikely(!add_new_range(ic, new_range, true))) |
| wait_and_add_new_range(ic, new_range); |
| } |
| |
| static void init_journal_node(struct journal_node *node) |
| { |
| RB_CLEAR_NODE(&node->node); |
| node->sector = (sector_t)-1; |
| } |
| |
| static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) |
| { |
| struct rb_node **link; |
| struct rb_node *parent; |
| |
| node->sector = sector; |
| BUG_ON(!RB_EMPTY_NODE(&node->node)); |
| |
| link = &ic->journal_tree_root.rb_node; |
| parent = NULL; |
| |
| while (*link) { |
| struct journal_node *j; |
| parent = *link; |
| j = container_of(parent, struct journal_node, node); |
| if (sector < j->sector) |
| link = &j->node.rb_left; |
| else |
| link = &j->node.rb_right; |
| } |
| |
| rb_link_node(&node->node, parent, link); |
| rb_insert_color(&node->node, &ic->journal_tree_root); |
| } |
| |
| static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node) |
| { |
| BUG_ON(RB_EMPTY_NODE(&node->node)); |
| rb_erase(&node->node, &ic->journal_tree_root); |
| init_journal_node(node); |
| } |
| |
| #define NOT_FOUND (-1U) |
| |
| static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) |
| { |
| struct rb_node *n = ic->journal_tree_root.rb_node; |
| unsigned found = NOT_FOUND; |
| *next_sector = (sector_t)-1; |
| while (n) { |
| struct journal_node *j = container_of(n, struct journal_node, node); |
| if (sector == j->sector) { |
| found = j - ic->journal_tree; |
| } |
| if (sector < j->sector) { |
| *next_sector = j->sector; |
| n = j->node.rb_left; |
| } else { |
| n = j->node.rb_right; |
| } |
| } |
| |
| return found; |
| } |
| |
| static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector) |
| { |
| struct journal_node *node, *next_node; |
| struct rb_node *next; |
| |
| if (unlikely(pos >= ic->journal_entries)) |
| return false; |
| node = &ic->journal_tree[pos]; |
| if (unlikely(RB_EMPTY_NODE(&node->node))) |
| return false; |
| if (unlikely(node->sector != sector)) |
| return false; |
| |
| next = rb_next(&node->node); |
| if (unlikely(!next)) |
| return true; |
| |
| next_node = container_of(next, struct journal_node, node); |
| return next_node->sector != sector; |
| } |
| |
| static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node) |
| { |
| struct rb_node *next; |
| struct journal_node *next_node; |
| unsigned next_section; |
| |
| BUG_ON(RB_EMPTY_NODE(&node->node)); |
| |
| next = rb_next(&node->node); |
| if (unlikely(!next)) |
| return false; |
| |
| next_node = container_of(next, struct journal_node, node); |
| |
| if (next_node->sector != node->sector) |
| return false; |
| |
| next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries; |
| if (next_section >= ic->committed_section && |
| next_section < ic->committed_section + ic->n_committed_sections) |
| return true; |
| if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections) |
| return true; |
| |
| return false; |
| } |
| |
| #define TAG_READ 0 |
| #define TAG_WRITE 1 |
| #define TAG_CMP 2 |
| |
| static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, |
| unsigned *metadata_offset, unsigned total_size, int op) |
| { |
| #define MAY_BE_FILLER 1 |
| #define MAY_BE_HASH 2 |
| unsigned hash_offset = 0; |
| unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0); |
| |
| do { |
| unsigned char *data, *dp; |
| struct dm_buffer *b; |
| unsigned to_copy; |
| int r; |
| |
| r = dm_integrity_failed(ic); |
| if (unlikely(r)) |
| return r; |
| |
| data = dm_bufio_read(ic->bufio, *metadata_block, &b); |
| if (IS_ERR(data)) |
| return PTR_ERR(data); |
| |
| to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); |
| dp = data + *metadata_offset; |
| if (op == TAG_READ) { |
| memcpy(tag, dp, to_copy); |
| } else if (op == TAG_WRITE) { |
| if (memcmp(dp, tag, to_copy)) { |
| memcpy(dp, tag, to_copy); |
| dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); |
| } |
| } else { |
| /* e.g.: op == TAG_CMP */ |
| |
| if (likely(is_power_of_2(ic->tag_size))) { |
| if (unlikely(memcmp(dp, tag, to_copy))) |
| if (unlikely(!ic->discard) || |
| unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) { |
| goto thorough_test; |
| } |
| } else { |
| unsigned i, ts; |
| thorough_test: |
| ts = total_size; |
| |
| for (i = 0; i < to_copy; i++, ts--) { |
| if (unlikely(dp[i] != tag[i])) |
| may_be &= ~MAY_BE_HASH; |
| if (likely(dp[i] != DISCARD_FILLER)) |
| may_be &= ~MAY_BE_FILLER; |
| hash_offset++; |
| if (unlikely(hash_offset == ic->tag_size)) { |
| if (unlikely(!may_be)) { |
| dm_bufio_release(b); |
| return ts; |
| } |
| hash_offset = 0; |
| may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0); |
| } |
| } |
| } |
| } |
| dm_bufio_release(b); |
| |
| tag += to_copy; |
| *metadata_offset += to_copy; |
| if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { |
| (*metadata_block)++; |
| *metadata_offset = 0; |
| } |
| |
| if (unlikely(!is_power_of_2(ic->tag_size))) { |
| hash_offset = (hash_offset + to_copy) % ic->tag_size; |
| } |
| |
| total_size -= to_copy; |
| } while (unlikely(total_size)); |
| |
| return 0; |
| #undef MAY_BE_FILLER |
| #undef MAY_BE_HASH |
| } |
| |
| struct flush_request { |
| struct dm_io_request io_req; |
| struct dm_io_region io_reg; |
| struct dm_integrity_c *ic; |
| struct completion comp; |
| }; |
| |
| static void flush_notify(unsigned long error, void *fr_) |
| { |
| struct flush_request *fr = fr_; |
| if (unlikely(error != 0)) |
| dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO); |
| complete(&fr->comp); |
| } |
| |
| static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data) |
| { |
| int r; |
| |
| struct flush_request fr; |
| |
| if (!ic->meta_dev) |
| flush_data = false; |
| if (flush_data) { |
| fr.io_req.bi_op = REQ_OP_WRITE, |
| fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC, |
| fr.io_req.mem.type = DM_IO_KMEM, |
| fr.io_req.mem.ptr.addr = NULL, |
| fr.io_req.notify.fn = flush_notify, |
| fr.io_req.notify.context = &fr; |
| fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio), |
| fr.io_reg.bdev = ic->dev->bdev, |
| fr.io_reg.sector = 0, |
| fr.io_reg.count = 0, |
| fr.ic = ic; |
| init_completion(&fr.comp); |
| r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL); |
| BUG_ON(r); |
| } |
| |
| r = dm_bufio_write_dirty_buffers(ic->bufio); |
| if (unlikely(r)) |
| dm_integrity_io_error(ic, "writing tags", r); |
| |
| if (flush_data) |
| wait_for_completion(&fr.comp); |
| } |
| |
| static void sleep_on_endio_wait(struct dm_integrity_c *ic) |
| { |
| DECLARE_WAITQUEUE(wait, current); |
| __add_wait_queue(&ic->endio_wait, &wait); |
| __set_current_state(TASK_UNINTERRUPTIBLE); |
| spin_unlock_irq(&ic->endio_wait.lock); |
| io_schedule(); |
| spin_lock_irq(&ic->endio_wait.lock); |
| __remove_wait_queue(&ic->endio_wait, &wait); |
| } |
| |
| static void autocommit_fn(struct timer_list *t) |
| { |
| struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer); |
| |
| if (likely(!dm_integrity_failed(ic))) |
| queue_work(ic->commit_wq, &ic->commit_work); |
| } |
| |
| static void schedule_autocommit(struct dm_integrity_c *ic) |
| { |
| if (!timer_pending(&ic->autocommit_timer)) |
| mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies); |
| } |
| |
| static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) |
| { |
| struct bio *bio; |
| unsigned long flags; |
| |
| spin_lock_irqsave(&ic->endio_wait.lock, flags); |
| bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
| bio_list_add(&ic->flush_bio_list, bio); |
| spin_unlock_irqrestore(&ic->endio_wait.lock, flags); |
| |
| queue_work(ic->commit_wq, &ic->commit_work); |
| } |
| |
| static void do_endio(struct dm_integrity_c *ic, struct bio *bio) |
| { |
| int r = dm_integrity_failed(ic); |
| if (unlikely(r) && !bio->bi_status) |
| bio->bi_status = errno_to_blk_status(r); |
| if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) { |
| unsigned long flags; |
| spin_lock_irqsave(&ic->endio_wait.lock, flags); |
| bio_list_add(&ic->synchronous_bios, bio); |
| queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); |
| spin_unlock_irqrestore(&ic->endio_wait.lock, flags); |
| return; |
| } |
| bio_endio(bio); |
| } |
| |
| static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) |
| { |
| struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
| |
| if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) |
| submit_flush_bio(ic, dio); |
| else |
| do_endio(ic, bio); |
| } |
| |
| static void dec_in_flight(struct dm_integrity_io *dio) |
| { |
| if (atomic_dec_and_test(&dio->in_flight)) { |
| struct dm_integrity_c *ic = dio->ic; |
| struct bio *bio; |
| |
| remove_range(ic, &dio->range); |
| |
| if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD)) |
| schedule_autocommit(ic); |
| |
| bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
| |
| if (unlikely(dio->bi_status) && !bio->bi_status) |
| bio->bi_status = dio->bi_status; |
| if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { |
| dio->range.logical_sector += dio->range.n_sectors; |
| bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); |
| INIT_WORK(&dio->work, integrity_bio_wait); |
| queue_work(ic->offload_wq, &dio->work); |
| return; |
| } |
| do_endio_flush(ic, dio); |
| } |
| } |
| |
| static void integrity_end_io(struct bio *bio) |
| { |
| struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); |
| |
| dm_bio_restore(&dio->bio_details, bio); |
| if (bio->bi_integrity) |
| bio->bi_opf |= REQ_INTEGRITY; |
| |
| if (dio->completion) |
| complete(dio->completion); |
| |
| dec_in_flight(dio); |
| } |
| |
| static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, |
| const char *data, char *result) |
| { |
| __le64 sector_le = cpu_to_le64(sector); |
| SHASH_DESC_ON_STACK(req, ic->internal_hash); |
| int r; |
| unsigned digest_size; |
| |
| req->tfm = ic->internal_hash; |
| |
| r = crypto_shash_init(req); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_init", r); |
| goto failed; |
| } |
| |
| if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { |
| r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_update", r); |
| goto failed; |
| } |
| } |
| |
| r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_update", r); |
| goto failed; |
| } |
| |
| r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_update", r); |
| goto failed; |
| } |
| |
| r = crypto_shash_final(req, result); |
| if (unlikely(r < 0)) { |
| dm_integrity_io_error(ic, "crypto_shash_final", r); |
| goto failed; |
| } |
| |
| digest_size = crypto_shash_digestsize(ic->internal_hash); |
| if (unlikely(digest_size < ic->tag_size)) |
| memset(result + digest_size, 0, ic->tag_size - digest_size); |
| |
| return; |
| |
| failed: |
| /* this shouldn't happen anyway, the hash functions have no reason to fail */ |
| get_random_bytes(result, ic->tag_size); |
| } |
| |
| static void integrity_metadata(struct work_struct *w) |
| { |
| struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); |
| struct dm_integrity_c *ic = dio->ic; |
| |
| int r; |
| |
| if (ic->internal_hash) { |
| struct bvec_iter iter; |
| struct bio_vec bv; |
| unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); |
| struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
| char *checksums; |
| unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; |
| char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; |
| sector_t sector; |
| unsigned sectors_to_process; |
| |
| if (unlikely(ic->mode == 'R')) |
| goto skip_io; |
| |
| if (likely(dio->op != REQ_OP_DISCARD)) |
| checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space, |
| GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); |
| else |
| checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); |
| if (!checksums) { |
| checksums = checksums_onstack; |
| if (WARN_ON(extra_space && |
| digest_size > sizeof(checksums_onstack))) { |
| r = -EINVAL; |
| goto error; |
| } |
| } |
| |
| if (unlikely(dio->op == REQ_OP_DISCARD)) { |
| sector_t bi_sector = dio->bio_details.bi_iter.bi_sector; |
| unsigned bi_size = dio->bio_details.bi_iter.bi_size; |
| unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE; |
| unsigned max_blocks = max_size / ic->tag_size; |
| memset(checksums, DISCARD_FILLER, max_size); |
| |
| while (bi_size) { |
| unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); |
| this_step_blocks = min(this_step_blocks, max_blocks); |
| r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, |
| this_step_blocks * ic->tag_size, TAG_WRITE); |
| if (unlikely(r)) { |
| if (likely(checksums != checksums_onstack)) |
| kfree(checksums); |
| goto error; |
| } |
| |
| /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) { |
| printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size); |
| printk("BUGG: this_step_blocks: %u\n", this_step_blocks); |
| BUG(); |
| }*/ |
| bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); |
| bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block; |
| } |
| |
| if (likely(checksums != checksums_onstack)) |
| kfree(checksums); |
| goto skip_io; |
| } |
| |
| sector = dio->range.logical_sector; |
| sectors_to_process = dio->range.n_sectors; |
| |
| __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { |
| unsigned pos; |
| char *mem, *checksums_ptr; |
| |
| again: |
| mem = bvec_kmap_local(&bv); |
| pos = 0; |
| checksums_ptr = checksums; |
| do { |
| integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr); |
| checksums_ptr += ic->tag_size; |
| sectors_to_process -= ic->sectors_per_block; |
| pos += ic->sectors_per_block << SECTOR_SHIFT; |
| sector += ic->sectors_per_block; |
| } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack); |
| kunmap_local(mem); |
| |
| r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, |
| checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); |
| if (unlikely(r)) { |
| if (r > 0) { |
| char b[BDEVNAME_SIZE]; |
| sector_t s; |
| |
| s = sector - ((r + ic->tag_size - 1) / ic->tag_size); |
| DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", |
| bio_devname(bio, b), s); |
| r = -EILSEQ; |
| atomic64_inc(&ic->number_of_mismatches); |
| dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum", |
| bio, s, 0); |
| } |
| if (likely(checksums != checksums_onstack)) |
| kfree(checksums); |
| goto error; |
| } |
| |
| if (!sectors_to_process) |
| break; |
| |
| if (unlikely(pos < bv.bv_len)) { |
| bv.bv_offset += pos; |
| bv.bv_len -= pos; |
| goto again; |
| } |
| } |
| |
| if (likely(checksums != checksums_onstack)) |
| kfree(checksums); |
| } else { |
| struct bio_integrity_payload *bip = dio->bio_details.bi_integrity; |
| |
| if (bip) { |
| struct bio_vec biv; |
| struct bvec_iter iter; |
| unsigned data_to_process = dio->range.n_sectors; |
| sector_to_block(ic, data_to_process); |
| data_to_process *= ic->tag_size; |
| |
| bip_for_each_vec(biv, bip, iter) { |
| unsigned char *tag; |
| unsigned this_len; |
| |
| BUG_ON(PageHighMem(biv.bv_page)); |
| tag = bvec_virt(&biv); |
| this_len = min(biv.bv_len, data_to_process); |
| r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, |
| this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE); |
| if (unlikely(r)) |
| goto error; |
| data_to_process -= this_len; |
| if (!data_to_process) |
| break; |
| } |
| } |
| } |
| skip_io: |
| dec_in_flight(dio); |
| return; |
| error: |
| dio->bi_status = errno_to_blk_status(r); |
| dec_in_flight(dio); |
| } |
| |
| static int dm_integrity_map(struct dm_target *ti, struct bio *bio) |
| { |
| struct dm_integrity_c *ic = ti->private; |
| struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); |
| struct bio_integrity_payload *bip; |
| |
| sector_t area, offset; |
| |
| dio->ic = ic; |
| dio->bi_status = 0; |
| dio->op = bio_op(bio); |
| |
| if (unlikely(dio->op == REQ_OP_DISCARD)) { |
| if (ti->max_io_len) { |
| sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector); |
| unsigned log2_max_io_len = __fls(ti->max_io_len); |
| sector_t start_boundary = sec >> log2_max_io_len; |
| sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len; |
| if (start_boundary < end_boundary) { |
| sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1)); |
| dm_accept_partial_bio(bio, len); |
| } |
| } |
| } |
| |
| if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { |
| submit_flush_bio(ic, dio); |
| return DM_MAPIO_SUBMITTED; |
| } |
| |
| dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); |
| dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA; |
| if (unlikely(dio->fua)) { |
| /* |
| * Don't pass down the FUA flag because we have to flush |
| * disk cache anyway. |
| */ |
| bio->bi_opf &= ~REQ_FUA; |
| } |
| if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { |
| DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx", |
| dio->range.logical_sector, bio_sectors(bio), |
| ic->provided_data_sectors); |
| return DM_MAPIO_KILL; |
| } |
| if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) { |
| DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x", |
| ic->sectors_per_block, |
| dio->range.logical_sector, bio_sectors(bio)); |
| return DM_MAPIO_KILL; |
| } |
| |
| if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) { |
| struct bvec_iter iter; |
| struct bio_vec bv; |
| bio_for_each_segment(bv, bio, iter) { |
| if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { |
| DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", |
| bv.bv_offset, bv.bv_len, ic->sectors_per_block); |
| return DM_MAPIO_KILL; |
| } |
| } |
| } |
| |
| bip = bio_integrity(bio); |
| if (!ic->internal_hash) { |
| if (bip) { |
| unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; |
| if (ic->log2_tag_size >= 0) |
| wanted_tag_size <<= ic->log2_tag_size; |
| else |
| wanted_tag_size *= ic->tag_size; |
| if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) { |
| DMERR("Invalid integrity data size %u, expected %u", |
| bip->bip_iter.bi_size, wanted_tag_size); |
| return DM_MAPIO_KILL; |
| } |
| } |
| } else { |
| if (unlikely(bip != NULL)) { |
| DMERR("Unexpected integrity data when using internal hash"); |
| return DM_MAPIO_KILL; |
| } |
| } |
| |
| if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ)) |
| return DM_MAPIO_KILL; |
| |
| get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); |
| dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); |
| bio->bi_iter.bi_sector = get_data_sector(ic, area, offset); |
| |
| dm_integrity_map_continue(dio, true); |
| return DM_MAPIO_SUBMITTED; |
| } |
| |
| static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, |
| unsigned journal_section, unsigned journal_entry) |
| { |
| struct dm_integrity_c *ic = dio->ic; |
| sector_t logical_sector; |
| unsigned n_sectors; |
| |
| logical_sector = dio->range.logical_sector; |
| n_sectors = dio->range.n_sectors; |
| do { |
| struct bio_vec bv = bio_iovec(bio); |
| char *mem; |
| |
| if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors)) |
| bv.bv_len = n_sectors << SECTOR_SHIFT; |
| n_sectors -= bv.bv_len >> SECTOR_SHIFT; |
| bio_advance_iter(bio, &bio->bi_iter, bv.bv_len); |
| retry_kmap: |
| mem = bvec_kmap_local(&bv); |
| if (likely(dio->op == REQ_OP_WRITE)) |
| flush_dcache_page(bv.bv_page); |
| |
| do { |
| struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry); |
| |
| if (unlikely(dio->op == REQ_OP_READ)) { |
| struct journal_sector *js; |
| char *mem_ptr; |
| unsigned s; |
| |
| if (unlikely(journal_entry_is_inprogress(je))) { |
| flush_dcache_page(bv.bv_page); |
| kunmap_local(mem); |
| |
| __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); |
| goto retry_kmap; |
| } |
| smp_rmb(); |
| BUG_ON(journal_entry_get_sector(je) != logical_sector); |
| js = access_journal_data(ic, journal_section, journal_entry); |
| mem_ptr = mem + bv.bv_offset; |
| s = 0; |
| do { |
| memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA); |
| *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s]; |
| js++; |
| mem_ptr += 1 << SECTOR_SHIFT; |
| } while (++s < ic->sectors_per_block); |
| #ifdef INTERNAL_VERIFY |
| if (ic->internal_hash) { |
| char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; |
| |
| integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); |
| if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { |
| DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx", |
| logical_sector); |
| dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum", |
| bio, logical_sector, 0); |
| } |
| } |
| #endif |
| } |
| |
| if (!ic->internal_hash) { |
| struct bio_integrity_payload *bip = bio_integrity(bio); |
| unsigned tag_todo = ic->tag_size; |
| char *tag_ptr = journal_entry_tag(ic, je); |
| |
| if (bip) do { |
| struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); |
| unsigned tag_now = min(biv.bv_len, tag_todo); |
| char *tag_addr; |
| BUG_ON(PageHighMem(biv.bv_page)); |
| tag_addr = bvec_virt(&biv); |
| if (likely(dio->op == REQ_OP_WRITE)) |
| memcpy(tag_ptr, tag_addr, tag_now); |
| else |
| memcpy(tag_addr, tag_ptr, tag_now); |
| bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now); |
| tag_ptr += tag_now; |
| tag_todo -= tag_now; |
| } while (unlikely(tag_todo)); else { |
| if (likely(dio->op == REQ_OP_WRITE)) |
| memset(tag_ptr, 0, tag_todo); |
| } |
| } |
| |
| if (likely(dio->op == REQ_OP_WRITE)) { |
| struct journal_sector *js; |
| unsigned s; |
| |
| js = access_journal_data(ic, journal_section, journal_entry); |
| memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); |
| |
| s = 0; |
| do { |
| je->last_bytes[s] = js[s].commit_id; |
| } while (++s < ic->sectors_per_block); |
| |
| if (ic->internal_hash) { |
| unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); |
| if (unlikely(digest_size > ic->tag_size)) { |
| char checksums_onstack[HASH_MAX_DIGESTSIZE]; |
| integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack); |
| memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); |
| } else |
| integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je)); |
| } |
| |
| journal_entry_set_sector(je, logical_sector); |
| } |
| logical_sector += ic->sectors_per_block; |
| |
| journal_entry++; |
| if (unlikely(journal_entry == ic->journal_section_entries)) { |
| journal_entry = 0; |
| journal_section++; |
| wraparound_section(ic, &journal_section); |
| } |
| |
| bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; |
| } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); |
| |
| if (unlikely(dio->op == REQ_OP_READ)) |
| flush_dcache_page(bv.bv_page); |
| kunmap_local(mem); |
| } while (n_sectors); |
| |
| if (likely(dio->op == REQ_OP_WRITE)) { |
| smp_mb(); |
| if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) |
| wake_up(&ic->copy_to_journal_wait); |
| if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { |
| queue_work(ic->commit_wq, &ic->commit_work); |
| } else { |
| schedule_autocommit(ic); |
| } |
| } else { |
| remove_range(ic, &dio->range); |
| } |
| |
| if (unlikely(bio->bi_iter.bi_size)) { |
| sector_t area, offset; |
| |
| dio->range.logical_sector = logical_sector; |
| get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); |
| dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map) |
| { |
| struct dm_integrity_c *ic = dio->ic; |
| struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
| unsigned journal_section, journal_entry; |
| unsigned journal_read_pos; |
| struct completion read_comp; |
| bool discard_retried = false; |
| bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ; |
| if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D') |
| need_sync_io = true; |
| |
| if (need_sync_io && from_map) { |
| INIT_WORK(&dio->work, integrity_bio_wait); |
| queue_work(ic->offload_wq, &dio->work); |
| return; |
| } |
| |
| lock_retry: |
| spin_lock_irq(&ic->endio_wait.lock); |
| retry: |
| if (unlikely(dm_integrity_failed(ic))) { |
| spin_unlock_irq(&ic->endio_wait.lock); |
| do_endio(ic, bio); |
| return; |
| } |
| dio->range.n_sectors = bio_sectors(bio); |
| journal_read_pos = NOT_FOUND; |
| if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) { |
| if (dio->op == REQ_OP_WRITE) { |
| unsigned next_entry, i, pos; |
| unsigned ws, we, range_sectors; |
| |
| dio->range.n_sectors = min(dio->range.n_sectors, |
| (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block); |
| if (unlikely(!dio->range.n_sectors)) { |
| if (from_map) |
| goto offload_to_thread; |
| sleep_on_endio_wait(ic); |
| goto retry; |
| } |
| range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; |
| ic->free_sectors -= range_sectors; |
| journal_section = ic->free_section; |
| journal_entry = ic->free_section_entry; |
| |
| next_entry = ic->free_section_entry + range_sectors; |
| ic->free_section_entry = next_entry % ic->journal_section_entries; |
| ic->free_section += next_entry / ic->journal_section_entries; |
| ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; |
| wraparound_section(ic, &ic->free_section); |
| |
| pos = journal_section * ic->journal_section_entries + journal_entry; |
| ws = journal_section; |
| we = journal_entry; |
| i = 0; |
| do { |
| struct journal_entry *je; |
| |
| add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); |
| pos++; |
| if (unlikely(pos >= ic->journal_entries)) |
| pos = 0; |
| |
| je = access_journal_entry(ic, ws, we); |
| BUG_ON(!journal_entry_is_unused(je)); |
| journal_entry_set_inprogress(je); |
| we++; |
| if (unlikely(we == ic->journal_section_entries)) { |
| we = 0; |
| ws++; |
| wraparound_section(ic, &ws); |
| } |
| } while ((i += ic->sectors_per_block) < dio->range.n_sectors); |
| |
| spin_unlock_irq(&ic->endio_wait.lock); |
| goto journal_read_write; |
| } else { |
| sector_t next_sector; |
| journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); |
| if (likely(journal_read_pos == NOT_FOUND)) { |
| if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector)) |
| dio->range.n_sectors = next_sector - dio->range.logical_sector; |
| } else { |
| unsigned i; |
| unsigned jp = journal_read_pos + 1; |
| for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { |
| if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) |
| break; |
| } |
| dio->range.n_sectors = i; |
| } |
| } |
| } |
| if (unlikely(!add_new_range(ic, &dio->range, true))) { |
| /* |
| * We must not sleep in the request routine because it could |
| * stall bios on current->bio_list. |
| * So, we offload the bio to a workqueue if we have to sleep. |
| */ |
| if (from_map) { |
| offload_to_thread: |
| spin_unlock_irq(&ic->endio_wait.lock); |
| INIT_WORK(&dio->work, integrity_bio_wait); |
| queue_work(ic->wait_wq, &dio->work); |
| return; |
| } |
| if (journal_read_pos != NOT_FOUND) |
| dio->range.n_sectors = ic->sectors_per_block; |
| wait_and_add_new_range(ic, &dio->range); |
| /* |
| * wait_and_add_new_range drops the spinlock, so the journal |
| * may have been changed arbitrarily. We need to recheck. |
| * To simplify the code, we restrict I/O size to just one block. |
| */ |
| if (journal_read_pos != NOT_FOUND) { |
| sector_t next_sector; |
| unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); |
| if (unlikely(new_pos != journal_read_pos)) { |
| remove_range_unlocked(ic, &dio->range); |
| goto retry; |
| } |
| } |
| } |
| if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) { |
| sector_t next_sector; |
| unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); |
| if (unlikely(new_pos != NOT_FOUND) || |
| unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) { |
| remove_range_unlocked(ic, &dio->range); |
| spin_unlock_irq(&ic->endio_wait.lock); |
| queue_work(ic->commit_wq, &ic->commit_work); |
| flush_workqueue(ic->commit_wq); |
| queue_work(ic->writer_wq, &ic->writer_work); |
| flush_workqueue(ic->writer_wq); |
| discard_retried = true; |
| goto lock_retry; |
| } |
| } |
| spin_unlock_irq(&ic->endio_wait.lock); |
| |
| if (unlikely(journal_read_pos != NOT_FOUND)) { |
| journal_section = journal_read_pos / ic->journal_section_entries; |
| journal_entry = journal_read_pos % ic->journal_section_entries; |
| goto journal_read_write; |
| } |
| |
| if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) { |
| if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, |
| dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { |
| struct bitmap_block_status *bbs; |
| |
| bbs = sector_to_bitmap_block(ic, dio->range.logical_sector); |
| spin_lock(&bbs->bio_queue_lock); |
| bio_list_add(&bbs->bio_queue, bio); |
| spin_unlock(&bbs->bio_queue_lock); |
| queue_work(ic->writer_wq, &bbs->work); |
| return; |
| } |
| } |
| |
| dio->in_flight = (atomic_t)ATOMIC_INIT(2); |
| |
| if (need_sync_io) { |
| init_completion(&read_comp); |
| dio->completion = &read_comp; |
| } else |
| dio->completion = NULL; |
| |
| dm_bio_record(&dio->bio_details, bio); |
| bio_set_dev(bio, ic->dev->bdev); |
| bio->bi_integrity = NULL; |
| bio->bi_opf &= ~REQ_INTEGRITY; |
| bio->bi_end_io = integrity_end_io; |
| bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; |
| |
| if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) { |
| integrity_metadata(&dio->work); |
| dm_integrity_flush_buffers(ic, false); |
| |
| dio->in_flight = (atomic_t)ATOMIC_INIT(1); |
| dio->completion = NULL; |
| |
| submit_bio_noacct(bio); |
| |
| return; |
| } |
| |
| submit_bio_noacct(bio); |
| |
| if (need_sync_io) { |
| wait_for_completion_io(&read_comp); |
| if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && |
| dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector)) |
| goto skip_check; |
| if (ic->mode == 'B') { |
| if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector, |
| dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) |
| goto skip_check; |
| } |
| |
| if (likely(!bio->bi_status)) |
| integrity_metadata(&dio->work); |
| else |
| skip_check: |
| dec_in_flight(dio); |
| |
| } else { |
| INIT_WORK(&dio->work, integrity_metadata); |
| queue_work(ic->metadata_wq, &dio->work); |
| } |
| |
| return; |
| |
| journal_read_write: |
| if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry))) |
| goto lock_retry; |
| |
| do_endio_flush(ic, dio); |
| } |
| |
| |
| static void integrity_bio_wait(struct work_struct *w) |
| { |
| struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); |
| |
| dm_integrity_map_continue(dio, false); |
| } |
| |
| static void pad_uncommitted(struct dm_integrity_c *ic) |
| { |
| if (ic->free_section_entry) { |
| ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry; |
| ic->free_section_entry = 0; |
| ic->free_section++; |
| wraparound_section(ic, &ic->free_section); |
| ic->n_uncommitted_sections++; |
| } |
| if (WARN_ON(ic->journal_sections * ic->journal_section_entries != |
| (ic->n_uncommitted_sections + ic->n_committed_sections) * |
| ic->journal_section_entries + ic->free_sectors)) { |
| DMCRIT("journal_sections %u, journal_section_entries %u, " |
| "n_uncommitted_sections %u, n_committed_sections %u, " |
| "journal_section_entries %u, free_sectors %u", |
| ic->journal_sections, ic->journal_section_entries, |
| ic->n_uncommitted_sections, ic->n_committed_sections, |
| ic->journal_section_entries, ic->free_sectors); |
| } |
| } |
| |
| static void integrity_commit(struct work_struct *w) |
| { |
| struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work); |
| unsigned commit_start, commit_sections; |
| unsigned i, j, n; |
| struct bio *flushes; |
| |
| del_timer(&ic->autocommit_timer); |
| |
| spin_lock_irq(&ic->endio_wait.lock); |
| flushes = bio_list_get(&ic->flush_bio_list); |
| if (unlikely(ic->mode != 'J')) { |
| spin_unlock_irq(&ic->endio_wait.lock); |
| dm_integrity_flush_buffers(ic, true); |
| goto release_flush_bios; |
| } |
| |
| pad_uncommitted(ic); |
| commit_start = ic->uncommitted_section; |
| commit_sections = ic->n_uncommitted_sections; |
| spin_unlock_irq(&ic->endio_wait.lock); |
| |
| if (!commit_sections) |
| goto release_flush_bios; |
| |
| i = commit_start; |
| for (n = 0; n < commit_sections; n++) { |
| for (j = 0; j < ic->journal_section_entries; j++) { |
| struct journal_entry *je; |
| je = access_journal_entry(ic, i, j); |
| io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); |
| } |
| for (j = 0; j < ic->journal_section_sectors; j++) { |
| struct journal_sector *js; |
| js = access_journal(ic, i, j); |
| js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq); |
| } |
| i++; |
| if (unlikely(i >= ic->journal_sections)) |
| ic->commit_seq = next_commit_seq(ic->commit_seq); |
| wraparound_section(ic, &i); |
| } |
| smp_rmb(); |
| |
| write_journal(ic, commit_start, commit_sections); |
| |
| spin_lock_irq(&ic->endio_wait.lock); |
| ic->uncommitted_section += commit_sections; |
| wraparound_section(ic, &ic->uncommitted_section); |
| ic->n_uncommitted_sections -= commit_sections; |
| ic->n_committed_sections += commit_sections; |
| spin_unlock_irq(&ic->endio_wait.lock); |
| |
| if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) |
| queue_work(ic->writer_wq, &ic->writer_work); |
| |
| release_flush_bios: |
| while (flushes) { |
| struct bio *next = flushes->bi_next; |
| flushes->bi_next = NULL; |
| do_endio(ic, flushes); |
| flushes = next; |
| } |
| } |
| |
| static void complete_copy_from_journal(unsigned long error, void *context) |
| { |
| struct journal_io *io = context; |
| struct journal_completion *comp = io->comp; |
| struct dm_integrity_c *ic = comp->ic; |
| remove_range(ic, &io->range); |
| mempool_free(io, &ic->journal_io_mempool); |
| if (unlikely(error != 0)) |
| dm_integrity_io_error(ic, "copying from journal", -EIO); |
| complete_journal_op(comp); |
| } |
| |
| static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, |
| struct journal_entry *je) |
| { |
| unsigned s = 0; |
| do { |
| js->commit_id = je->last_bytes[s]; |
| js++; |
| } while (++s < ic->sectors_per_block); |
| } |
| |
| static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, |
| unsigned write_sections, bool from_replay) |
| { |
| unsigned i, j, n; |
| struct journal_completion comp; |
| struct blk_plug plug; |
| |
| blk_start_plug(&plug); |
| |
| comp.ic = ic; |
| comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
| init_completion(&comp.comp); |
| |
| i = write_start; |
| for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { |
| #ifndef INTERNAL_VERIFY |
| if (unlikely(from_replay)) |
| #endif |
| rw_section_mac(ic, i, false); |
| for (j = 0; j < ic->journal_section_entries; j++) { |
| struct journal_entry *je = access_journal_entry(ic, i, j); |
| sector_t sec, area, offset; |
| unsigned k, l, next_loop; |
| sector_t metadata_block; |
| unsigned metadata_offset; |
| struct journal_io *io; |
| |
| if (journal_entry_is_unused(je)) |
| continue; |
| BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay); |
| sec = journal_entry_get_sector(je); |
| if (unlikely(from_replay)) { |
| if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) { |
| dm_integrity_io_error(ic, "invalid sector in journal", -EIO); |
| sec &= ~(sector_t)(ic->sectors_per_block - 1); |
| } |
| } |
| if (unlikely(sec >= ic->provided_data_sectors)) |
| continue; |
| get_area_and_offset(ic, sec, &area, &offset); |
| restore_last_bytes(ic, access_journal_data(ic, i, j), je); |
| for (k = j + 1; k < ic->journal_section_entries; k++) { |
| struct journal_entry *je2 = access_journal_entry(ic, i, k); |
| sector_t sec2, area2, offset2; |
| if (journal_entry_is_unused(je2)) |
| break; |
| BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay); |
| sec2 = journal_entry_get_sector(je2); |
| if (unlikely(sec2 >= ic->provided_data_sectors)) |
| break; |
| get_area_and_offset(ic, sec2, &area2, &offset2); |
| if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block)) |
| break; |
| restore_last_bytes(ic, access_journal_data(ic, i, k), je2); |
| } |
| next_loop = k - 1; |
| |
| io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO); |
| io->comp = ∁ |
| io->range.logical_sector = sec; |
| io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block; |
| |
| spin_lock_irq(&ic->endio_wait.lock); |
| add_new_range_and_wait(ic, &io->range); |
| |
| if (likely(!from_replay)) { |
| struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries]; |
| |
| /* don't write if there is newer committed sector */ |
| while (j < k && find_newer_committed_node(ic, §ion_node[j])) { |
| struct journal_entry *je2 = access_journal_entry(ic, i, j); |
| |
| journal_entry_set_unused(je2); |
| remove_journal_node(ic, §ion_node[j]); |
| j++; |
| sec += ic->sectors_per_block; |
| offset += ic->sectors_per_block; |
| } |
| while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) { |
| struct journal_entry *je2 = access_journal_entry(ic, i, k - 1); |
| |
| journal_entry_set_unused(je2); |
| remove_journal_node(ic, §ion_node[k - 1]); |
| k--; |
| } |
| if (j == k) { |
| remove_range_unlocked(ic, &io->range); |
| spin_unlock_irq(&ic->endio_wait.lock); |
| mempool_free(io, &ic->journal_io_mempool); |
| goto skip_io; |
| } |
| for (l = j; l < k; l++) { |
| remove_journal_node(ic, §ion_node[l]); |
| } |
| } |
| spin_unlock_irq(&ic->endio_wait.lock); |
| |
| metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); |
| for (l = j; l < k; l++) { |
| int r; |
| struct journal_entry *je2 = access_journal_entry(ic, i, l); |
| |
| if ( |
| #ifndef INTERNAL_VERIFY |
| unlikely(from_replay) && |
| #endif |
| ic->internal_hash) { |
| char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; |
| |
| integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), |
| (char *)access_journal_data(ic, i, l), test_tag); |
| if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) { |
| dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); |
| dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0); |
| } |
| } |
| |
| journal_entry_set_unused(je2); |
| r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset, |
| ic->tag_size, TAG_WRITE); |
| if (unlikely(r)) { |
| dm_integrity_io_error(ic, "reading tags", r); |
| } |
| } |
| |
| atomic_inc(&comp.in_flight); |
| copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block, |
| (k - j) << ic->sb->log2_sectors_per_block, |
| get_data_sector(ic, area, offset), |
| complete_copy_from_journal, io); |
| skip_io: |
| j = next_loop; |
| } |
| } |
| |
| dm_bufio_write_dirty_buffers_async(ic->bufio); |
| |
| blk_finish_plug(&plug); |
| |
| complete_journal_op(&comp); |
| wait_for_completion_io(&comp.comp); |
| |
| dm_integrity_flush_buffers(ic, true); |
| } |
| |
| static void integrity_writer(struct work_struct *w) |
| { |
| struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work); |
| unsigned write_start, write_sections; |
| |
| unsigned prev_free_sectors; |
| |
| /* the following test is not needed, but it tests the replay code */ |
| if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) |
| return; |
| |
| spin_lock_irq(&ic->endio_wait.lock); |
| write_start = ic->committed_section; |
| write_sections = ic->n_committed_sections; |
| spin_unlock_irq(&ic->endio_wait.lock); |
| |
| if (!write_sections) |
| return; |
| |
| do_journal_write(ic, write_start, write_sections, false); |
| |
| spin_lock_irq(&ic->endio_wait.lock); |
| |
| ic->committed_section += write_sections; |
| wraparound_section(ic, &ic->committed_section); |
| ic->n_committed_sections -= write_sections; |
| |
| prev_free_sectors = ic->free_sectors; |
| ic->free_sectors += write_sections * ic->journal_section_entries; |
| if (unlikely(!prev_free_sectors)) |
| wake_up_locked(&ic->endio_wait); |
| |
| spin_unlock_irq(&ic->endio_wait.lock); |
| } |
| |
| static void recalc_write_super(struct dm_integrity_c *ic) |
| { |
| int r; |
| |
| dm_integrity_flush_buffers(ic, false); |
| if (dm_integrity_failed(ic)) |
| return; |
| |
| r = sync_rw_sb(ic, REQ_OP_WRITE, 0); |
| if (unlikely(r)) |
| dm_integrity_io_error(ic, "writing superblock", r); |
| } |
| |
| static void integrity_recalc(struct work_struct *w) |
| { |
| struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); |
| struct dm_integrity_range range; |
| struct dm_io_request io_req; |
| struct dm_io_region io_loc; |
| sector_t area, offset; |
| sector_t metadata_block; |
| unsigned metadata_offset; |
| sector_t logical_sector, n_sectors; |
| __u8 *t; |
| unsigned i; |
| int r; |
| unsigned super_counter = 0; |
| |
| DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector)); |
| |
| spin_lock_irq(&ic->endio_wait.lock); |
| |
| next_chunk: |
| |
| if (unlikely(dm_post_suspending(ic->ti))) |
| goto unlock_ret; |
| |
| range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); |
| if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { |
| if (ic->mode == 'B') { |
| block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); |
| DEBUG_print("queue_delayed_work: bitmap_flush_work\n"); |
| queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); |
| } |
| goto unlock_ret; |
| } |
| |
| get_area_and_offset(ic, range.logical_sector, &area, &offset); |
| range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector); |
| if (!ic->meta_dev) |
| range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset); |
| |
| add_new_range_and_wait(ic, &range); |
| spin_unlock_irq(&ic->endio_wait.lock); |
| logical_sector = range.logical_sector; |
| n_sectors = range.n_sectors; |
| |
| if (ic->mode == 'B') { |
| if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) { |
| goto advance_and_next; |
| } |
| while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, |
| ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { |
| logical_sector += ic->sectors_per_block; |
| n_sectors -= ic->sectors_per_block; |
| cond_resched(); |
| } |
| while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block, |
| ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { |
| n_sectors -= ic->sectors_per_block; |
| cond_resched(); |
| } |
| get_area_and_offset(ic, logical_sector, &area, &offset); |
| } |
| |
| DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors); |
| |
| if (unlikely(++super_counter == RECALC_WRITE_SUPER)) { |
| recalc_write_super(ic); |
| if (ic->mode == 'B') { |
| queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); |
| } |
| super_counter = 0; |
| } |
| |
| if (unlikely(dm_integrity_failed(ic))) |
| goto err; |
| |
| io_req.bi_op = REQ_OP_READ; |
| io_req.bi_op_flags = 0; |
| io_req.mem.type = DM_IO_VMA; |
| io_req.mem.ptr.addr = ic->recalc_buffer; |
| io_req.notify.fn = NULL; |
| io_req.client = ic->io; |
| io_loc.bdev = ic->dev->bdev; |
| io_loc.sector = get_data_sector(ic, area, offset); |
| io_loc.count = n_sectors; |
| |
| r = dm_io(&io_req, 1, &io_loc, NULL); |
| if (unlikely(r)) { |
| dm_integrity_io_error(ic, "reading data", r); |
| goto err; |
| } |
| |
| t = ic->recalc_tags; |
| for (i = 0; i < n_sectors; i += ic->sectors_per_block) { |
| integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t); |
| t += ic->tag_size; |
| } |
| |
| metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); |
| |
| r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE); |
| if (unlikely(r)) { |
| dm_integrity_io_error(ic, "writing tags", r); |
| goto err; |
| } |
| |
| if (ic->mode == 'B') { |
| sector_t start, end; |
| start = (range.logical_sector >> |
| (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << |
| (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
| end = ((range.logical_sector + range.n_sectors) >> |
| (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << |
| (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
| block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR); |
| } |
| |
| advance_and_next: |
| cond_resched(); |
| |
| spin_lock_irq(&ic->endio_wait.lock); |
| remove_range_unlocked(ic, &range); |
| ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); |
| goto next_chunk; |
| |
| err: |
| remove_range(ic, &range); |
| return; |
| |
| unlock_ret: |
| spin_unlock_irq(&ic->endio_wait.lock); |
| |
| recalc_write_super(ic); |
| } |
| |
| static void bitmap_block_work(struct work_struct *w) |
| { |
| struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work); |
| struct dm_integrity_c *ic = bbs->ic; |
| struct bio *bio; |
| struct bio_list bio_queue; |
| struct bio_list waiting; |
| |
| bio_list_init(&waiting); |
| |
| spin_lock(&bbs->bio_queue_lock); |
| bio_queue = bbs->bio_queue; |
| bio_list_init(&bbs->bio_queue); |
| spin_unlock(&bbs->bio_queue_lock); |
| |
| while ((bio = bio_list_pop(&bio_queue))) { |
| struct dm_integrity_io *dio; |
| |
| dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); |
| |
| if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, |
| dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { |
| |