Skip to content

Commit 9b458a2

Browse files
committed
Merge tag 'vfs-6.10-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
Pull vfs fixes from Christian Brauner: "Misc: - Don't misleadingly warn during filesystem thaw operations. It's possible that a block device which was frozen before it was mounted can cause a failing thaw operation if someone concurrently tried to mount it while that thaw operation was issued and the device had already been temporarily claimed for the mount (The mount will of course be aborted because the device is frozen). netfs: - Fix io_uring based write-through. Make sure that the total request length is correctly set. - Fix partial writes to folio tail. - Remove some xarray helpers that were intended for bounce buffers which got defered to a later patch series. - Make netfs_page_mkwrite() whether folio->mapping is vallid after acquiring the folio lock. - Make netfs_page_mkrite() flush conflicting data instead of waiting. fsnotify: - Ensure that fsnotify creation events are generated before fsnotify open events when a file is created via ->atomic_open(). The ordering was broken before. - Ensure that no fsnotify events are generated for O_PATH file descriptors. While no fsnotify open events were generated, fsnotify close events were. Make it consistent and don't produce any" * tag 'vfs-6.10-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: netfs: Fix netfs_page_mkwrite() to flush conflicting data, not wait netfs: Fix netfs_page_mkwrite() to check folio->mapping is valid netfs: Delete some xarray-wangling functions that aren't used netfs: Fix early issue of write op on partial write to folio tail netfs: Fix io_uring based write-through vfs: generate FS_CREATE before FS_OPEN when ->atomic_open used. fsnotify: Do not generate events for O_PATH file descriptors fs: don't misleadingly warn during thaw operations
2 parents 22a40d1 + 9d66154 commit 9b458a2

9 files changed

Lines changed: 52 additions & 106 deletions

File tree

fs/namei.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3572,8 +3572,12 @@ static const char *open_last_lookups(struct nameidata *nd,
35723572
else
35733573
inode_lock_shared(dir->d_inode);
35743574
dentry = lookup_open(nd, file, op, got_write);
3575-
if (!IS_ERR(dentry) && (file->f_mode & FMODE_CREATED))
3576-
fsnotify_create(dir->d_inode, dentry);
3575+
if (!IS_ERR(dentry)) {
3576+
if (file->f_mode & FMODE_CREATED)
3577+
fsnotify_create(dir->d_inode, dentry);
3578+
if (file->f_mode & FMODE_OPENED)
3579+
fsnotify_open(file);
3580+
}
35773581
if (open_flag & O_CREAT)
35783582
inode_unlock(dir->d_inode);
35793583
else
@@ -3700,6 +3704,8 @@ int vfs_tmpfile(struct mnt_idmap *idmap,
37003704
mode = vfs_prepare_mode(idmap, dir, mode, mode, mode);
37013705
error = dir->i_op->tmpfile(idmap, dir, file, mode);
37023706
dput(child);
3707+
if (file->f_mode & FMODE_OPENED)
3708+
fsnotify_open(file);
37033709
if (error)
37043710
return error;
37053711
/* Don't check for other permissions, the inode was just created */

fs/netfs/buffered_write.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -523,6 +523,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
523523
struct netfs_group *group;
524524
struct folio *folio = page_folio(vmf->page);
525525
struct file *file = vmf->vma->vm_file;
526+
struct address_space *mapping = file->f_mapping;
526527
struct inode *inode = file_inode(file);
527528
struct netfs_inode *ictx = netfs_inode(inode);
528529
vm_fault_t ret = VM_FAULT_RETRY;
@@ -534,6 +535,11 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
534535

535536
if (folio_lock_killable(folio) < 0)
536537
goto out;
538+
if (folio->mapping != mapping) {
539+
folio_unlock(folio);
540+
ret = VM_FAULT_NOPAGE;
541+
goto out;
542+
}
537543

538544
if (folio_wait_writeback_killable(folio)) {
539545
ret = VM_FAULT_LOCKED;
@@ -549,9 +555,9 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
549555
group = netfs_folio_group(folio);
550556
if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
551557
folio_unlock(folio);
552-
err = filemap_fdatawait_range(inode->i_mapping,
553-
folio_pos(folio),
554-
folio_pos(folio) + folio_size(folio));
558+
err = filemap_fdatawrite_range(mapping,
559+
folio_pos(folio),
560+
folio_pos(folio) + folio_size(folio));
555561
switch (err) {
556562
case 0:
557563
ret = VM_FAULT_RETRY;

fs/netfs/direct_write.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,9 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
9292
__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
9393
if (async)
9494
wreq->iocb = iocb;
95+
wreq->len = iov_iter_count(&wreq->io_iter);
9596
wreq->cleanup = netfs_cleanup_dio_write;
96-
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter));
97+
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
9798
if (ret < 0) {
9899
_debug("begin = %zd", ret);
99100
goto out;

fs/netfs/internal.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -63,15 +63,6 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
6363
/*
6464
* misc.c
6565
*/
66-
#define NETFS_FLAG_PUT_MARK BIT(0)
67-
#define NETFS_FLAG_PAGECACHE_MARK BIT(1)
68-
int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
69-
struct folio *folio, unsigned int flags,
70-
gfp_t gfp_mask);
71-
int netfs_add_folios_to_buffer(struct xarray *buffer,
72-
struct address_space *mapping,
73-
pgoff_t index, pgoff_t to, gfp_t gfp_mask);
74-
void netfs_clear_buffer(struct xarray *buffer);
7566

7667
/*
7768
* objects.c

fs/netfs/misc.c

Lines changed: 0 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -8,87 +8,6 @@
88
#include <linux/swap.h>
99
#include "internal.h"
1010

11-
/*
12-
* Attach a folio to the buffer and maybe set marks on it to say that we need
13-
* to put the folio later and twiddle the pagecache flags.
14-
*/
15-
int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
16-
struct folio *folio, unsigned int flags,
17-
gfp_t gfp_mask)
18-
{
19-
XA_STATE_ORDER(xas, xa, index, folio_order(folio));
20-
21-
retry:
22-
xas_lock(&xas);
23-
for (;;) {
24-
xas_store(&xas, folio);
25-
if (!xas_error(&xas))
26-
break;
27-
xas_unlock(&xas);
28-
if (!xas_nomem(&xas, gfp_mask))
29-
return xas_error(&xas);
30-
goto retry;
31-
}
32-
33-
if (flags & NETFS_FLAG_PUT_MARK)
34-
xas_set_mark(&xas, NETFS_BUF_PUT_MARK);
35-
if (flags & NETFS_FLAG_PAGECACHE_MARK)
36-
xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK);
37-
xas_unlock(&xas);
38-
return xas_error(&xas);
39-
}
40-
41-
/*
42-
* Create the specified range of folios in the buffer attached to the read
43-
* request. The folios are marked with NETFS_BUF_PUT_MARK so that we know that
44-
* these need freeing later.
45-
*/
46-
int netfs_add_folios_to_buffer(struct xarray *buffer,
47-
struct address_space *mapping,
48-
pgoff_t index, pgoff_t to, gfp_t gfp_mask)
49-
{
50-
struct folio *folio;
51-
int ret;
52-
53-
if (to + 1 == index) /* Page range is inclusive */
54-
return 0;
55-
56-
do {
57-
/* TODO: Figure out what order folio can be allocated here */
58-
folio = filemap_alloc_folio(readahead_gfp_mask(mapping), 0);
59-
if (!folio)
60-
return -ENOMEM;
61-
folio->index = index;
62-
ret = netfs_xa_store_and_mark(buffer, index, folio,
63-
NETFS_FLAG_PUT_MARK, gfp_mask);
64-
if (ret < 0) {
65-
folio_put(folio);
66-
return ret;
67-
}
68-
69-
index += folio_nr_pages(folio);
70-
} while (index <= to && index != 0);
71-
72-
return 0;
73-
}
74-
75-
/*
76-
* Clear an xarray buffer, putting a ref on the folios that have
77-
* NETFS_BUF_PUT_MARK set.
78-
*/
79-
void netfs_clear_buffer(struct xarray *buffer)
80-
{
81-
struct folio *folio;
82-
XA_STATE(xas, buffer, 0);
83-
84-
rcu_read_lock();
85-
xas_for_each_marked(&xas, folio, ULONG_MAX, NETFS_BUF_PUT_MARK) {
86-
folio_put(folio);
87-
}
88-
rcu_read_unlock();
89-
xa_destroy(buffer);
90-
}
91-
9211
/**
9312
* netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
9413
* @mapping: The mapping the folio belongs to.

fs/netfs/write_issue.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
483483
if (!debug)
484484
kdebug("R=%x: No submit", wreq->debug_id);
485485

486-
if (flen < fsize)
486+
if (foff + flen < fsize)
487487
for (int s = 0; s < NR_IO_STREAMS; s++)
488488
netfs_issue_write(wreq, &wreq->io_streams[s]);
489489

fs/open.c

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1004,11 +1004,6 @@ static int do_dentry_open(struct file *f,
10041004
}
10051005
}
10061006

1007-
/*
1008-
* Once we return a file with FMODE_OPENED, __fput() will call
1009-
* fsnotify_close(), so we need fsnotify_open() here for symmetry.
1010-
*/
1011-
fsnotify_open(f);
10121007
return 0;
10131008

10141009
cleanup_all:
@@ -1085,8 +1080,19 @@ EXPORT_SYMBOL(file_path);
10851080
*/
10861081
int vfs_open(const struct path *path, struct file *file)
10871082
{
1083+
int ret;
1084+
10881085
file->f_path = *path;
1089-
return do_dentry_open(file, NULL);
1086+
ret = do_dentry_open(file, NULL);
1087+
if (!ret) {
1088+
/*
1089+
* Once we return a file with FMODE_OPENED, __fput() will call
1090+
* fsnotify_close(), so we need fsnotify_open() here for
1091+
* symmetry.
1092+
*/
1093+
fsnotify_open(file);
1094+
}
1095+
return ret;
10901096
}
10911097

10921098
struct file *dentry_open(const struct path *path, int flags,
@@ -1177,8 +1183,10 @@ struct file *kernel_file_open(const struct path *path, int flags,
11771183
error = do_dentry_open(f, NULL);
11781184
if (error) {
11791185
fput(f);
1180-
f = ERR_PTR(error);
1186+
return ERR_PTR(error);
11811187
}
1188+
1189+
fsnotify_open(f);
11821190
return f;
11831191
}
11841192
EXPORT_SYMBOL_GPL(kernel_file_open);

fs/super.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1502,8 +1502,17 @@ static int fs_bdev_thaw(struct block_device *bdev)
15021502

15031503
lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
15041504

1505+
/*
1506+
* The block device may have been frozen before it was claimed by a
1507+
* filesystem. Concurrently another process might try to mount that
1508+
* frozen block device and has temporarily claimed the block device for
1509+
* that purpose causing a concurrent fs_bdev_thaw() to end up here. The
1510+
* mounter is already about to abort mounting because they still saw an
1511+
* elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
1512+
* NULL in that case.
1513+
*/
15051514
sb = get_bdev_super(bdev);
1506-
if (WARN_ON_ONCE(!sb))
1515+
if (!sb)
15071516
return -EINVAL;
15081517

15091518
if (sb->s_op->thaw_super)

include/linux/fsnotify.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,13 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
112112
{
113113
const struct path *path;
114114

115-
if (file->f_mode & FMODE_NONOTIFY)
115+
/*
116+
* FMODE_NONOTIFY are fds generated by fanotify itself which should not
117+
* generate new events. We also don't want to generate events for
118+
* FMODE_PATH fds (involves open & close events) as they are just
119+
* handle creation / destruction events and not "real" file events.
120+
*/
121+
if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH))
116122
return 0;
117123

118124
path = &file->f_path;

0 commit comments

Comments
 (0)