On Mon, Dec 30, 2019 at 07:08:03PM +0100, David Sterba wrote:
> On Fri, Dec 13, 2019 at 04:22:24PM -0800, Dennis Zhou wrote:
> > Throttle the maximum size of a discard so that we can provide an upper
> > bound for the rate of async discard. While the block layer is able to
> > split discards into the appropriate sized discards, we want to be able
> > to account more accurately the rate at which we are consuming ncq slots
> > as well as limit the upper bound of work for a discard.
> >
> > Signed-off-by: Dennis Zhou <dennis@xxxxxxxxxx>
> > Reviewed-by: Josef Bacik <josef@xxxxxxxxxxxxxx>
> > ---
> > fs/btrfs/discard.h | 5 ++++
> > fs/btrfs/free-space-cache.c | 48 +++++++++++++++++++++++++++----------
> > 2 files changed, 41 insertions(+), 12 deletions(-)
> >
> > diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
> > index 3ed6855e24da..cb6ef0ab879d 100644
> > --- a/fs/btrfs/discard.h
> > +++ b/fs/btrfs/discard.h
> > @@ -3,10 +3,15 @@
> > #ifndef BTRFS_DISCARD_H
> > #define BTRFS_DISCARD_H
> >
> > +#include <linux/sizes.h>
> > +
> > struct btrfs_fs_info;
> > struct btrfs_discard_ctl;
> > struct btrfs_block_group;
> >
> > +/* Discard size limits. */
> > +#define BTRFS_ASYNC_DISCARD_MAX_SIZE (SZ_64M)
> > +
> > /* Work operations. */
> > void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
> > struct btrfs_block_group *block_group);
> > diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
> > index 57df34480b93..0dbcea6c59f9 100644
> > --- a/fs/btrfs/free-space-cache.c
> > +++ b/fs/btrfs/free-space-cache.c
> > @@ -3466,19 +3466,40 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group,
> > if (entry->offset >= end)
> > goto out_unlock;
> >
> > - extent_start = entry->offset;
> > - extent_bytes = entry->bytes;
> > - extent_trim_state = entry->trim_state;
> > - start = max(start, extent_start);
> > - bytes = min(extent_start + extent_bytes, end) - start;
> > - if (bytes < minlen) {
> > - spin_unlock(&ctl->tree_lock);
> > - mutex_unlock(&ctl->cache_writeout_mutex);
> > - goto next;
> > - }
> > + if (async) {
> > + start = extent_start = entry->offset;
> > + bytes = extent_bytes = entry->bytes;
> > + extent_trim_state = entry->trim_state;
> > + if (bytes < minlen) {
> > + spin_unlock(&ctl->tree_lock);
> > + mutex_unlock(&ctl->cache_writeout_mutex);
> > + goto next;
> > + }
> > + unlink_free_space(ctl, entry);
> > + if (bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE) {
> > + bytes = extent_bytes =
> > + BTRFS_ASYNC_DISCARD_MAX_SIZE;
> > + entry->offset += BTRFS_ASYNC_DISCARD_MAX_SIZE;
> > + entry->bytes -= BTRFS_ASYNC_DISCARD_MAX_SIZE;
> > + link_free_space(ctl, entry);
> > + } else {
> > + kmem_cache_free(btrfs_free_space_cachep, entry);
> > + }
> > + } else {
>
> > + extent_start = entry->offset;
> > + extent_bytes = entry->bytes;
> > + extent_trim_state = entry->trim_state;
>
> This is common initialization for both async and sync cases so it could
> be merged to a common block.
>
I removed the chain initialization and moved the common out.
> > + start = max(start, extent_start);
> > + bytes = min(extent_start + extent_bytes, end) - start;
> > + if (bytes < minlen) {
> > + spin_unlock(&ctl->tree_lock);
> > + mutex_unlock(&ctl->cache_writeout_mutex);
> > + goto next;
> > + }