close_fs_devices() closes devices of a given fsid, and it is limited
to all the devices of a fsid, so we don't have to hold the global
uuid_mutex, instead we need the device_list_mutex as the device state is
being changed.
Signed-off-by: Anand Jain <anand.jain@xxxxxxxxxx>
---
v1->v2: Add comment why we don't need uuid_mutex when touching seed.
fs/btrfs/volumes.c | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b1e5ac419ca8..f4afd2e78e92 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1025,7 +1025,6 @@ static void btrfs_prepare_close_one_device(struct btrfs_device *device)
device->uuid);
BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
- /* Safe because we are under uuid_mutex */
if (device->name) {
name = rcu_string_strdup(device->name->str, GFP_NOFS);
BUG_ON(!name); /* -ENOMEM */
@@ -1043,10 +1042,12 @@ static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
INIT_LIST_HEAD(&pending_put);
- if (--fs_devices->opened > 0)
+ mutex_lock(&fs_devices->device_list_mutex);
+ if (--fs_devices->opened > 0) {
+ mutex_unlock(&fs_devices->device_list_mutex);
return 0;
+ }
- mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
btrfs_prepare_close_one_device(device);
list_add(&device->dev_list, &pending_put);
@@ -1080,14 +1081,17 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
struct btrfs_fs_devices *seed_devices = NULL;
int ret;
- mutex_lock(&uuid_mutex);
ret = close_fs_devices(fs_devices);
if (!fs_devices->opened) {
seed_devices = fs_devices->seed;
fs_devices->seed = NULL;
}
- mutex_unlock(&uuid_mutex);
+ /*
+ * As fs_devices::seed is a cloned (and then opned) local copy,
+ * of the actual seed fs_uuids::fs_devices. So we don't need the
+ * uuid_mutex.
+ */
while (seed_devices) {
fs_devices = seed_devices;
seed_devices = fs_devices->seed;
--
2.7.0
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html