This is a small part of a bigger set of fixes needed to get
subpagesize-blocksize to function properly. This patch is required to get 2k
blocksize writes to work correctly.
Without this patch, In lock_and_cleanup_extent_if_need() two consecutive 2k
block writes that map to first and second 2k parts of a 4k page would result
in the second 2k write to *clear* out the EXTENT_DELALLOC bit of the first 2k
block. Fix this.
Signed-off-by: Chandan Rajendra <chandan@xxxxxxxxxxxxxxxxxx>
---
fs/btrfs/file.c | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 006af2f..1aa8fae 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1339,18 +1339,21 @@ fail:
static noinline int
lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
size_t num_pages, loff_t pos,
+ size_t write_bytes,
u64 *lockstart, u64 *lockend,
struct extent_state **cached_state)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start_pos;
u64 last_pos;
int i;
int ret = 0;
- start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
- last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1;
+ start_pos = pos & ~((u64)root->sectorsize - 1);
+ last_pos = start_pos
+ + ALIGN(pos + write_bytes - start_pos, root->sectorsize) - 1;
- if (start_pos < inode->i_size) {
+ if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered;
lock_extent_bits(&BTRFS_I(inode)->io_tree,
start_pos, last_pos, 0, cached_state);
@@ -1536,8 +1539,8 @@ again:
break;
ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
- pos, &lockstart, &lockend,
- &cached_state);
+ pos, write_bytes, &lockstart, &lockend,
+ &cached_state);
if (ret < 0) {
if (ret == -EAGAIN)
goto again;
--
1.8.3.1
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html