Skip to content

Commit

Permalink
Merge tag 'f2fs-for-6.14-rc1' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "In this series, there are several major improvements such as folio
  conversion by Matthew, speed-up of block truncation, and caching more
  dentry pages.

  In addition, we implemented a linear dentry search to address recent
  unicode regression, and figured out some false alarms that we could
  get rid of.

  Enhancements:
   - foilio conversion in various IO paths
   - optimize f2fs_truncate_data_blocks_range()
   - cache more dentry pages
   - remove unnecessary blk_finish_plug
   - procfs: show mtime in segment_bits

  Bug fixes:
   - introduce linear search for dentries
   - don't call block truncation for aliased file
   - fix using wrong 'submitted' value in f2fs_write_cache_pages
   - fix to do sanity check correctly on i_inline_xattr_size
   - avoid trying to get invalid block address
   - fix inconsistent dirty state of atomic file"

* tag 'f2fs-for-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (32 commits)
  f2fs: fix inconsistent dirty state of atomic file
  f2fs: fix to avoid changing 'check only' behaior of recovery
  f2fs: Clean up the loop outside of f2fs_invalidate_blocks()
  f2fs: procfs: show mtime in segment_bits
  f2fs: fix to avoid return invalid mtime from f2fs_get_section_mtime()
  f2fs: Fix format specifier in sanity_check_inode()
  f2fs: avoid trying to get invalid block address
  f2fs: fix to do sanity check correctly on i_inline_xattr_size
  f2fs: remove blk_finish_plug
  f2fs: Optimize f2fs_truncate_data_blocks_range()
  f2fs: fix using wrong 'submitted' value in f2fs_write_cache_pages
  f2fs: add parameter @len to f2fs_invalidate_blocks()
  f2fs: update_sit_entry_for_release() supports consecutive blocks.
  f2fs: introduce update_sit_entry_for_release/alloc()
  f2fs: don't call block truncation for aliased file
  f2fs: Introduce linear search for dentries
  f2fs: add parameter @len to f2fs_invalidate_internal_cache()
  f2fs: expand f2fs_invalidate_compress_page() to f2fs_invalidate_compress_pages_range()
  f2fs: ensure that node info flags are always initialized
  f2fs: The GC triggered by ioctl also needs to mark the segno as victim
  ...
  • Loading branch information
torvalds committed Jan 28, 2025
2 parents f34b580 + 03511e9 commit 6d61a53
Show file tree
Hide file tree
Showing 14 changed files with 324 additions and 201 deletions.
38 changes: 23 additions & 15 deletions fs/f2fs/compress.c
Original file line number Diff line number Diff line change
Expand Up @@ -846,7 +846,7 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
int index, int nr_pages, bool uptodate)
{
unsigned long pgidx = pages[index]->index;
unsigned long pgidx = page_folio(pages[index])->index;
int i = uptodate ? 0 : 1;

/*
Expand All @@ -860,9 +860,11 @@ bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
return false;

for (; i < cc->cluster_size; i++) {
if (pages[index + i]->index != pgidx + i)
struct folio *folio = page_folio(pages[index + i]);

if (folio->index != pgidx + i)
return false;
if (uptodate && !PageUptodate(pages[index + i]))
if (uptodate && !folio_test_uptodate(folio))
return false;
}

Expand Down Expand Up @@ -1195,7 +1197,8 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
.cluster_size = F2FS_I(inode)->i_cluster_size,
.rpages = fsdata,
};
bool first_index = (index == cc.rpages[0]->index);
struct folio *folio = page_folio(cc.rpages[0]);
bool first_index = (index == folio->index);

if (copied)
set_cluster_dirty(&cc);
Expand Down Expand Up @@ -1239,13 +1242,14 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
int i;

for (i = cluster_size - 1; i >= 0; i--) {
loff_t start = rpages[i]->index << PAGE_SHIFT;
struct folio *folio = page_folio(rpages[i]);
loff_t start = folio->index << PAGE_SHIFT;

if (from <= start) {
zero_user_segment(rpages[i], 0, PAGE_SIZE);
folio_zero_segment(folio, 0, folio_size(folio));
} else {
zero_user_segment(rpages[i], from - start,
PAGE_SIZE);
folio_zero_segment(folio, from - start,
folio_size(folio));
break;
}
}
Expand Down Expand Up @@ -1278,6 +1282,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1 : 0,
};
struct folio *folio;
struct dnode_of_data dn;
struct node_info ni;
struct compress_io_ctx *cic;
Expand All @@ -1289,7 +1294,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,

/* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
mapping_set_error(cc->rpages[0]->mapping, -EIO);
mapping_set_error(inode->i_mapping, -EIO);
goto out_free;
}

Expand All @@ -1316,7 +1321,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
goto out_put_dnode;
}

psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
folio = page_folio(cc->rpages[last_index]);
psize = folio_pos(folio) + folio_size(folio);

err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
if (err)
Expand All @@ -1339,7 +1345,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,

for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_set_compressed_page(cc->cpages[i], inode,
cc->rpages[i + 1]->index, cic);
page_folio(cc->rpages[i + 1])->index, cic);
fio.compressed_page = cc->cpages[i];

fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
Expand Down Expand Up @@ -1374,7 +1380,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (blkaddr == COMPRESS_ADDR)
fio.compr_blocks++;
if (__is_valid_data_blkaddr(blkaddr))
f2fs_invalidate_blocks(sbi, blkaddr);
f2fs_invalidate_blocks(sbi, blkaddr, 1);
f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
goto unlock_continue;
}
Expand All @@ -1384,7 +1390,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,

if (i > cc->valid_nr_cpages) {
if (__is_valid_data_blkaddr(blkaddr)) {
f2fs_invalidate_blocks(sbi, blkaddr);
f2fs_invalidate_blocks(sbi, blkaddr, 1);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
}
goto unlock_continue;
Expand Down Expand Up @@ -1545,6 +1551,7 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;

submitted = 0;
ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
&submitted,
NULL, NULL, wbc, io_type,
Expand Down Expand Up @@ -1903,11 +1910,12 @@ struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
return sbi->compress_inode->i_mapping;
}

void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
block_t blkaddr, unsigned int len)
{
if (!sbi->compress_inode)
return;
invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1);
}

void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
Expand Down
62 changes: 25 additions & 37 deletions fs/f2fs/data.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@ bool f2fs_is_cp_guaranteed(struct page *page)
return false;
}

static enum count_type __read_io_type(struct page *page)
static enum count_type __read_io_type(struct folio *folio)
{
struct address_space *mapping = page_file_mapping(page);
struct address_space *mapping = folio->mapping;

if (mapping) {
struct inode *inode = mapping->host;
Expand Down Expand Up @@ -136,27 +136,22 @@ struct bio_post_read_ctx {
*/
static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
{
struct bio_vec *bv;
struct bvec_iter_all iter_all;
struct folio_iter fi;
struct bio_post_read_ctx *ctx = bio->bi_private;

bio_for_each_segment_all(bv, bio, iter_all) {
struct page *page = bv->bv_page;
bio_for_each_folio_all(fi, bio) {
struct folio *folio = fi.folio;

if (f2fs_is_compressed_page(page)) {
if (f2fs_is_compressed_page(&folio->page)) {
if (ctx && !ctx->decompression_attempted)
f2fs_end_read_compressed_page(page, true, 0,
f2fs_end_read_compressed_page(&folio->page, true, 0,
in_task);
f2fs_put_page_dic(page, in_task);
f2fs_put_page_dic(&folio->page, in_task);
continue;
}

if (bio->bi_status)
ClearPageUptodate(page);
else
SetPageUptodate(page);
dec_page_count(F2FS_P_SB(page), __read_io_type(page));
unlock_page(page);
dec_page_count(F2FS_F_SB(folio), __read_io_type(folio));
folio_end_read(folio, bio->bi_status == 0);
}

if (ctx)
Expand Down Expand Up @@ -516,10 +511,6 @@ static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
enum page_type type)
{
WARN_ON_ONCE(is_read_io(bio_op(bio)));

if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type))
blk_finish_plug(current->plug);

trace_f2fs_submit_write_bio(sbi->sb, type, bio);
iostat_update_submit_ctx(bio, type);
submit_bio(bio);
Expand Down Expand Up @@ -689,33 +680,29 @@ void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
struct folio *fio_folio = page_folio(fio->page);
struct folio *data_folio = fio->encrypted_page ?
page_folio(fio->encrypted_page) : fio_folio;

if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
fio->is_por ? META_POR : (__is_meta_io(fio) ?
META_GENERIC : DATA_GENERIC_ENHANCE)))
return -EFSCORRUPTED;

trace_f2fs_submit_page_bio(page, fio);
trace_f2fs_submit_folio_bio(data_folio, fio);

/* Allocate a new bio */
bio = __bio_alloc(fio, 1);

f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
page_folio(fio->page)->index, fio, GFP_NOIO);

if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host,
fio_folio->index, fio, GFP_NOIO);
bio_add_folio_nofail(bio, data_folio, folio_size(data_folio), 0);

if (fio->io_wbc && !is_read_io(fio->op))
wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
PAGE_SIZE);
wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE);

inc_page_count(fio->sbi, is_read_io(fio->op) ?
__read_io_type(page) : WB_DATA_TYPE(fio->page, false));
__read_io_type(data_folio) : WB_DATA_TYPE(fio->page, false));

if (is_read_io(bio_op(bio)))
f2fs_submit_read_bio(fio->sbi, bio, fio->type);
Expand Down Expand Up @@ -894,7 +881,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
return -EFSCORRUPTED;

trace_f2fs_submit_page_bio(page, fio);
trace_f2fs_submit_folio_bio(page_folio(page), fio);

if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
fio->new_blkaddr))
Expand Down Expand Up @@ -1018,7 +1005,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)

io->last_block_in_bio = fio->new_blkaddr;

trace_f2fs_submit_page_write(fio->page, fio);
trace_f2fs_submit_folio_write(page_folio(fio->page), fio);
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
Expand Down Expand Up @@ -1289,7 +1276,7 @@ struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
struct page *page;

page = find_get_page(mapping, index);
page = find_get_page_flags(mapping, index, FGP_ACCESSED);
if (page && PageUptodate(page))
return page;
f2fs_put_page(page, 0);
Expand Down Expand Up @@ -1423,7 +1410,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
return err;

if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
f2fs_invalidate_internal_cache(sbi, old_blkaddr);
f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);

f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
return 0;
Expand Down Expand Up @@ -2464,7 +2451,7 @@ static int f2fs_mpage_readpages(struct inode *inode,

static int f2fs_read_data_folio(struct file *file, struct folio *folio)
{
struct inode *inode = folio_file_mapping(folio)->host;
struct inode *inode = folio->mapping->host;
int ret = -EAGAIN;

trace_f2fs_readpage(folio, DATA);
Expand Down Expand Up @@ -3163,6 +3150,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
continue;
}
#endif
submitted = 0;
ret = f2fs_write_single_data_page(folio,
&submitted, &bio, &last_block,
wbc, io_type, 0, true);
Expand Down
Loading

0 comments on commit 6d61a53

Please sign in to comment.