On 7.06.20 г. 10:25 ч., Qu Wenruo wrote:
> Signed-off-by: Qu Wenruo <wqu@xxxxxxxx>
> ---
> fs/btrfs/disk-io.c | 6 ++++++
> fs/btrfs/qgroup.c | 43 +++++++++++++++++++++++++++++++++++++++++++
> fs/btrfs/qgroup.h | 2 +-
> 3 files changed, 50 insertions(+), 1 deletion(-)
>
> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> index f8ec2d8606fd..48d047e64461 100644
> --- a/fs/btrfs/disk-io.c
> +++ b/fs/btrfs/disk-io.c
> @@ -4058,6 +4058,12 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
> ASSERT(list_empty(&fs_info->delayed_iputs));
> set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
>
> + if (btrfs_qgroup_has_leak(fs_info)) {
> + WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
> + KERN_ERR "BTRFS: qgroup reserved space leaked\n");
> + btrfs_err(fs_info, "qgroup reserved space leaked\n");
I don't think the message from the WARN() brings any value, it's simply
duplicated by the btrfs_err. IMO it's more concise to do:
WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
btrfs_err(qgroup reserved space leaked);
without losing any information whatsoever.
> + }
Is it safe calling this code here? workqueues are being destroyed after
it in btrfs_stop_all_workers so it's possible that they have some
lingering work which in turn might cause false positive in this check ?
> +
> btrfs_free_qgroup_config(fs_info);
> ASSERT(list_empty(&fs_info->delalloc_roots));
>
> diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
> index 5bd4089ad0e1..3fccf2ffdcf1 100644
> --- a/fs/btrfs/qgroup.c
> +++ b/fs/btrfs/qgroup.c
> @@ -505,6 +505,49 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
> return ret < 0 ? ret : 0;
> }
>
> +static u64 btrfs_qgroup_subvolid(u64 qgroupid)
> +{
> + return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
> +}
> +/*
> + * Get called for close_ctree() when quota is still enabled.
> + * This verifies we don't leak some reserved space.
> + *
> + * Return false if no reserved space is left.
> + * Return true if some reserved space is leaked.
> + */
> +bool btrfs_qgroup_has_leak(struct btrfs_fs_info *fs_info)
> +{
> + struct btrfs_qgroup *qgroup;
nit:This variable is used only in the loop below just define it there to
reduce its scope.
> + struct rb_node *node;
> + bool ret = false;
> +
> + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
> + return ret;
> + /*
> + * Since we're unmounting, there is no race and no need to grab
> + * qgroup lock.
> + * And here we don't go post order to provide a more user friendly
> + * sorted result.
> + */
> + for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
> + int i;
> +
> + qgroup = rb_entry(node, struct btrfs_qgroup, node);
> + for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
> + if (qgroup->rsv.values[i]) {
> + ret = true;
> + btrfs_warn(fs_info,
> + "qgroup %llu/%llu has unreleased space, type=%d rsv=%llu",
> + btrfs_qgroup_level(qgroup->qgroupid),
> + btrfs_qgroup_subvolid(qgroup->qgroupid),
> + i, qgroup->rsv.values[i]);
> + }
> + }
> + }
> + return ret;
> +}
> +
> /*
> * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
> * first two are in single-threaded paths.And for the third one, we have set
> diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
> index 1bc654459469..e3e9f9df8320 100644
> --- a/fs/btrfs/qgroup.h
> +++ b/fs/btrfs/qgroup.h
> @@ -415,5 +415,5 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
> int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
> struct btrfs_root *root, struct extent_buffer *eb);
> void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
> -
> +bool btrfs_qgroup_has_leak(struct btrfs_fs_info *fs_info);
> #endif
>