mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
[ Upstream commit 2c25716dcc25a0420c4ad49d6e6bf61e60a21434 ] [BUG] If we have a filesystem with 4k sectorsize, and an inlined compressed extent created like this: item 4 key (257 INODE_ITEM 0) itemoff 15863 itemsize 160 generation 8 transid 8 size 4096 nbytes 4096 block group 0 mode 100600 links 1 uid 0 gid 0 rdev 0 sequence 1 flags 0x0(none) item 5 key (257 INODE_REF 256) itemoff 15839 itemsize 24 index 2 namelen 14 name: source_inlined item 6 key (257 EXTENT_DATA 0) itemoff 15770 itemsize 69 generation 8 type 0 (inline) inline extent data size 48 ram_bytes 4096 compression 1 (zlib) Which has an inline compressed extent at file offset 0, and its decompressed size is 4K, allowing us to reflink that 4K range to another location (which will not be compressed). If we do such reflink on a subpage system, it would fail like this: # xfs_io -f -c "reflink $mnt/source_inlined 0 60k 4k" $mnt/dest XFS_IOC_CLONE_RANGE: Input/output error [CAUSE] In zlib_decompress(), we didn't treat @start_byte as just a page offset, but also use it as an indicator on whether we should switch our output buffer. In reality, for subpage cases, although @start_byte can be non-zero, we should never switch input/output buffer, since the whole input/output buffer should never exceed one sector. Note: The above assumption is only not true if we're going to support multi-page sectorsize. Thus the current code using @start_byte as a condition to switch input/output buffer or finish the decompression is completely incorrect. [FIX] The fix involves several modifications: - Rename @start_byte to @dest_pgoff to properly express its meaning - Add an extra ASSERT() inside btrfs_decompress() to make sure the input/output size never exceeds one sector. - Use Z_FINISH flag to make sure the decompression happens in one go - Remove the loop needed to switch input/output buffers - Use correct destination offset inside the destination page - Consider early end as an error After the fix, even on 64K page sized aarch64, above reflink now works as expected: # xfs_io -f -c "reflink $mnt/source_inlined 0 60k 4k" $mnt/dest linked 4096/4096 bytes at offset 61440 And resulted a correct file layout: item 9 key (258 INODE_ITEM 0) itemoff 15542 itemsize 160 generation 10 transid 10 size 65536 nbytes 4096 block group 0 mode 100600 links 1 uid 0 gid 0 rdev 0 sequence 1 flags 0x0(none) item 10 key (258 INODE_REF 256) itemoff 15528 itemsize 14 index 3 namelen 4 name: dest item 11 key (258 XATTR_ITEM 3817753667) itemoff 15445 itemsize 83 location key (0 UNKNOWN.0 0) type XATTR transid 10 data_len 37 name_len 16 name: security.selinux data unconfined_u:object_r:unlabeled_t:s0 item 12 key (258 EXTENT_DATA 61440) itemoff 15392 itemsize 53 generation 10 type 1 (regular) extent data disk byte 13631488 nr 4096 extent data offset 0 nr 4096 ram 4096 extent compression 0 (none) Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
176 lines
5.8 KiB
C
176 lines
5.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2008 Oracle. All rights reserved.
|
|
*/
|
|
|
|
#ifndef BTRFS_COMPRESSION_H
|
|
#define BTRFS_COMPRESSION_H
|
|
|
|
#include <linux/sizes.h>
|
|
#include "bio.h"
|
|
|
|
struct btrfs_inode;
|
|
struct btrfs_ordered_extent;
|
|
|
|
/*
|
|
* We want to make sure that amount of RAM required to uncompress an extent is
|
|
* reasonable, so we limit the total size in ram of a compressed extent to
|
|
* 128k. This is a crucial number because it also controls how easily we can
|
|
* spread reads across cpus for decompression.
|
|
*
|
|
* We also want to make sure the amount of IO required to do a random read is
|
|
* reasonably small, so we limit the size of a compressed extent to 128k.
|
|
*/
|
|
|
|
/* Maximum length of compressed data stored on disk */
|
|
#define BTRFS_MAX_COMPRESSED (SZ_128K)
|
|
#define BTRFS_MAX_COMPRESSED_PAGES (BTRFS_MAX_COMPRESSED / PAGE_SIZE)
|
|
static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
|
|
|
|
/* Maximum size of data before compression */
|
|
#define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
|
|
|
|
#define BTRFS_ZLIB_DEFAULT_LEVEL 3
|
|
|
|
struct compressed_bio {
|
|
/* Number of compressed pages in the array */
|
|
unsigned int nr_pages;
|
|
|
|
/* the pages with the compressed data on them */
|
|
struct page **compressed_pages;
|
|
|
|
/* starting offset in the inode for our pages */
|
|
u64 start;
|
|
|
|
/* Number of bytes in the inode we're working on */
|
|
unsigned int len;
|
|
|
|
/* Number of bytes on disk */
|
|
unsigned int compressed_len;
|
|
|
|
/* The compression algorithm for this bio */
|
|
u8 compress_type;
|
|
|
|
/* Whether this is a write for writeback. */
|
|
bool writeback;
|
|
|
|
union {
|
|
/* For reads, this is the bio we are copying the data into */
|
|
struct btrfs_bio *orig_bbio;
|
|
struct work_struct write_end_work;
|
|
};
|
|
|
|
/* Must be last. */
|
|
struct btrfs_bio bbio;
|
|
};
|
|
|
|
static inline unsigned int btrfs_compress_type(unsigned int type_level)
|
|
{
|
|
return (type_level & 0xF);
|
|
}
|
|
|
|
static inline unsigned int btrfs_compress_level(unsigned int type_level)
|
|
{
|
|
return ((type_level & 0xF0) >> 4);
|
|
}
|
|
|
|
int __init btrfs_init_compress(void);
|
|
void __cold btrfs_exit_compress(void);
|
|
|
|
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
|
|
u64 start, struct page **pages,
|
|
unsigned long *out_pages,
|
|
unsigned long *total_in,
|
|
unsigned long *total_out);
|
|
int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
|
|
unsigned long start_byte, size_t srclen, size_t destlen);
|
|
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
|
|
struct compressed_bio *cb, u32 decompressed);
|
|
|
|
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
|
|
struct page **compressed_pages,
|
|
unsigned int nr_pages,
|
|
blk_opf_t write_flags,
|
|
bool writeback);
|
|
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
|
|
|
|
unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
|
|
|
|
enum btrfs_compression_type {
|
|
BTRFS_COMPRESS_NONE = 0,
|
|
BTRFS_COMPRESS_ZLIB = 1,
|
|
BTRFS_COMPRESS_LZO = 2,
|
|
BTRFS_COMPRESS_ZSTD = 3,
|
|
BTRFS_NR_COMPRESS_TYPES = 4,
|
|
};
|
|
|
|
struct workspace_manager {
|
|
struct list_head idle_ws;
|
|
spinlock_t ws_lock;
|
|
/* Number of free workspaces */
|
|
int free_ws;
|
|
/* Total number of allocated workspaces */
|
|
atomic_t total_ws;
|
|
/* Waiters for a free workspace */
|
|
wait_queue_head_t ws_wait;
|
|
};
|
|
|
|
struct list_head *btrfs_get_workspace(int type, unsigned int level);
|
|
void btrfs_put_workspace(int type, struct list_head *ws);
|
|
|
|
struct btrfs_compress_op {
|
|
struct workspace_manager *workspace_manager;
|
|
/* Maximum level supported by the compression algorithm */
|
|
unsigned int max_level;
|
|
unsigned int default_level;
|
|
};
|
|
|
|
/* The heuristic workspaces are managed via the 0th workspace manager */
|
|
#define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES
|
|
|
|
extern const struct btrfs_compress_op btrfs_heuristic_compress;
|
|
extern const struct btrfs_compress_op btrfs_zlib_compress;
|
|
extern const struct btrfs_compress_op btrfs_lzo_compress;
|
|
extern const struct btrfs_compress_op btrfs_zstd_compress;
|
|
|
|
const char* btrfs_compress_type2str(enum btrfs_compression_type type);
|
|
bool btrfs_compress_is_valid_type(const char *str, size_t len);
|
|
|
|
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
|
|
|
|
int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|
u64 start, struct page **pages, unsigned long *out_pages,
|
|
unsigned long *total_in, unsigned long *total_out);
|
|
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
|
int zlib_decompress(struct list_head *ws, const u8 *data_in,
|
|
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
|
|
size_t destlen);
|
|
struct list_head *zlib_alloc_workspace(unsigned int level);
|
|
void zlib_free_workspace(struct list_head *ws);
|
|
struct list_head *zlib_get_workspace(unsigned int level);
|
|
|
|
int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|
u64 start, struct page **pages, unsigned long *out_pages,
|
|
unsigned long *total_in, unsigned long *total_out);
|
|
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
|
int lzo_decompress(struct list_head *ws, const u8 *data_in,
|
|
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
|
size_t destlen);
|
|
struct list_head *lzo_alloc_workspace(unsigned int level);
|
|
void lzo_free_workspace(struct list_head *ws);
|
|
|
|
int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
|
|
u64 start, struct page **pages, unsigned long *out_pages,
|
|
unsigned long *total_in, unsigned long *total_out);
|
|
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
|
|
int zstd_decompress(struct list_head *ws, const u8 *data_in,
|
|
struct page *dest_page, unsigned long start_byte, size_t srclen,
|
|
size_t destlen);
|
|
void zstd_init_workspace_manager(void);
|
|
void zstd_cleanup_workspace_manager(void);
|
|
struct list_head *zstd_alloc_workspace(unsigned int level);
|
|
void zstd_free_workspace(struct list_head *ws);
|
|
struct list_head *zstd_get_workspace(unsigned int level);
|
|
void zstd_put_workspace(struct list_head *ws);
|
|
|
|
#endif
|