5.5.0-0.rc2.git2.1.fc32.x86_64
btrfs-progs-5.4-1.fc31.x86_64
/dev/nvme0n1p7 on / type btrfs
(rw,noatime,seclabel,ssd,space_cache=v2,subvolid=274,subvol=/root)
Workload at the time of this call trace: fstrim + btrfs scrub (yeah
that's asking for trouble, but related to the slow fstrim thread).
Attaching the same dmesg snippet below as a txt file.
[ 3198.185485] hp_wmi: Unknown event_id - 131073 - 0x0
[ 3917.096295] ======================================================
[ 3917.096296] WARNING: possible circular locking dependency detected
[ 3917.096299] 5.5.0-0.rc2.git2.1.fc32.x86_64 #1 Not tainted
[ 3917.096300] ------------------------------------------------------
[ 3917.096301] btrfs/5199 is trying to acquire lock:
[ 3917.096303] ffffffffb5653e50 (cpu_hotplug_lock.rw_sem){++++}, at:
alloc_workqueue+0x3a2/0x480
[ 3917.096309]
but task is already holding lock:
[ 3917.096310] ffff89832ad56120 (&fs_info->scrub_lock){+.+.}, at:
btrfs_scrub_dev+0xf6/0x650 [btrfs]
[ 3917.096339]
which lock already depends on the new lock.
[ 3917.096340]
the existing dependency chain (in reverse order) is:
[ 3917.096342]
-> #8 (&fs_info->scrub_lock){+.+.}:
[ 3917.096346] __mutex_lock+0xac/0x9e0
[ 3917.096366] btrfs_scrub_dev+0xf6/0x650 [btrfs]
[ 3917.096386] btrfs_ioctl+0x72a/0x2cb0 [btrfs]
[ 3917.096389] do_vfs_ioctl+0x580/0x7b0
[ 3917.096391] ksys_ioctl+0x5e/0x90
[ 3917.096393] __x64_sys_ioctl+0x16/0x20
[ 3917.096396] do_syscall_64+0x5c/0xa0
[ 3917.096398] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096399]
-> #7 (&fs_devs->device_list_mutex){+.+.}:
[ 3917.096402] __mutex_lock+0xac/0x9e0
[ 3917.096421] btrfs_run_dev_stats+0x46/0x4a0 [btrfs]
[ 3917.096437] commit_cowonly_roots+0xb5/0x300 [btrfs]
[ 3917.096452] btrfs_commit_transaction+0x506/0xad0 [btrfs]
[ 3917.096481] btrfs_sync_file+0x355/0x490 [btrfs]
[ 3917.096483] do_fsync+0x38/0x70
[ 3917.096484] __x64_sys_fsync+0x10/0x20
[ 3917.096486] do_syscall_64+0x5c/0xa0
[ 3917.096488] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096489]
-> #6 (&fs_info->tree_log_mutex){+.+.}:
[ 3917.096491] __mutex_lock+0xac/0x9e0
[ 3917.096514] btrfs_commit_transaction+0x4ae/0xad0 [btrfs]
[ 3917.096526] btrfs_sync_file+0x355/0x490 [btrfs]
[ 3917.096527] do_fsync+0x38/0x70
[ 3917.096528] __x64_sys_fsync+0x10/0x20
[ 3917.096530] do_syscall_64+0x5c/0xa0
[ 3917.096532] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096532]
-> #5 (&fs_info->reloc_mutex){+.+.}:
[ 3917.096534] __mutex_lock+0xac/0x9e0
[ 3917.096545] btrfs_record_root_in_trans+0x44/0x70 [btrfs]
[ 3917.096555] start_transaction+0xbb/0x540 [btrfs]
[ 3917.096565] btrfs_dirty_inode+0x44/0xd0 [btrfs]
[ 3917.096566] file_update_time+0xeb/0x140
[ 3917.096577] btrfs_page_mkwrite+0xfe/0x560 [btrfs]
[ 3917.096579] do_page_mkwrite+0x4f/0x130
[ 3917.096581] do_wp_page+0x2a0/0x3e0
[ 3917.096582] __handle_mm_fault+0xc6c/0x15a0
[ 3917.096584] handle_mm_fault+0x169/0x360
[ 3917.096586] do_user_addr_fault+0x1fc/0x480
[ 3917.096587] do_page_fault+0x31/0x210
[ 3917.096588] page_fault+0x43/0x50
[ 3917.096589]
-> #4 (sb_pagefaults){.+.+}:
[ 3917.096591] __sb_start_write+0x149/0x230
[ 3917.096602] btrfs_page_mkwrite+0x69/0x560 [btrfs]
[ 3917.096603] do_page_mkwrite+0x4f/0x130
[ 3917.096605] do_wp_page+0x2a0/0x3e0
[ 3917.096606] __handle_mm_fault+0xc6c/0x15a0
[ 3917.096608] handle_mm_fault+0x169/0x360
[ 3917.096609] do_user_addr_fault+0x1fc/0x480
[ 3917.096610] do_page_fault+0x31/0x210
[ 3917.096611] page_fault+0x43/0x50
[ 3917.096612]
-> #3 (&mm->mmap_sem#2){++++}:
[ 3917.096615] __might_fault+0x60/0x80
[ 3917.096617] _copy_to_user+0x1e/0x90
[ 3917.096619] relay_file_read+0xb0/0x2e0
[ 3917.096621] full_proxy_read+0x56/0x80
[ 3917.096623] vfs_read+0xc5/0x180
[ 3917.096625] ksys_read+0x68/0xe0
[ 3917.096626] do_syscall_64+0x5c/0xa0
[ 3917.096628] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096629]
-> #2 (&sb->s_type->i_mutex_key#3){++++}:
[ 3917.096632] down_write+0x45/0x120
[ 3917.096635] start_creating+0x5c/0xf0
[ 3917.096637] __debugfs_create_file+0x3f/0x120
[ 3917.096639] relay_create_buf_file+0x6d/0xa0
[ 3917.096641] relay_open_buf.part.0+0x2a7/0x360
[ 3917.096644] relay_open+0x191/0x2d0
[ 3917.096646] do_blk_trace_setup+0x14f/0x2b0
[ 3917.096647] __blk_trace_setup+0x54/0xb0
[ 3917.096649] blk_trace_ioctl+0x92/0x140
[ 3917.096651] blkdev_ioctl+0x131/0xb70
[ 3917.096653] block_ioctl+0x3f/0x50
[ 3917.096655] do_vfs_ioctl+0x580/0x7b0
[ 3917.096657] ksys_ioctl+0x5e/0x90
[ 3917.096659] __x64_sys_ioctl+0x16/0x20
[ 3917.096661] do_syscall_64+0x5c/0xa0
[ 3917.096663] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096664]
-> #1 (relay_channels_mutex){+.+.}:
[ 3917.096667] __mutex_lock+0xac/0x9e0
[ 3917.096669] relay_prepare_cpu+0x19/0xae
[ 3917.096671] cpuhp_invoke_callback+0xb3/0x900
[ 3917.096673] _cpu_up+0xa1/0x130
[ 3917.096675] do_cpu_up+0x80/0xc0
[ 3917.096678] smp_init+0x58/0xae
[ 3917.096681] kernel_init_freeable+0x16a/0x27d
[ 3917.096683] kernel_init+0xa/0x10b
[ 3917.096685] ret_from_fork+0x3a/0x50
[ 3917.096687]
-> #0 (cpu_hotplug_lock.rw_sem){++++}:
[ 3917.096691] __lock_acquire+0xe13/0x1a30
[ 3917.096693] lock_acquire+0xa2/0x1b0
[ 3917.096695] cpus_read_lock+0x3e/0xb0
[ 3917.096698] alloc_workqueue+0x3a2/0x480
[ 3917.096719] __btrfs_alloc_workqueue+0x160/0x210 [btrfs]
[ 3917.096732] btrfs_alloc_workqueue+0x53/0x170 [btrfs]
[ 3917.096760] scrub_workers_get+0x5a/0x180 [btrfs]
[ 3917.096773] btrfs_scrub_dev+0x1e5/0x650 [btrfs]
[ 3917.096800] btrfs_ioctl+0x72a/0x2cb0 [btrfs]
[ 3917.096801] do_vfs_ioctl+0x580/0x7b0
[ 3917.096803] ksys_ioctl+0x5e/0x90
[ 3917.096804] __x64_sys_ioctl+0x16/0x20
[ 3917.096806] do_syscall_64+0x5c/0xa0
[ 3917.096807] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096808]
other info that might help us debug this:
[ 3917.096809] Chain exists of:
cpu_hotplug_lock.rw_sem -->
&fs_devs->device_list_mutex --> &fs_info->scrub_lock
[ 3917.096811] Possible unsafe locking scenario:
[ 3917.096812] CPU0 CPU1
[ 3917.096812] ---- ----
[ 3917.096813] lock(&fs_info->scrub_lock);
[ 3917.096814] lock(&fs_devs->device_list_mutex);
[ 3917.096815] lock(&fs_info->scrub_lock);
[ 3917.096816] lock(cpu_hotplug_lock.rw_sem);
[ 3917.096817]
*** DEADLOCK ***
[ 3917.096818] 3 locks held by btrfs/5199:
[ 3917.096819] #0: ffff89832cb59498 (sb_writers#13){.+.+}, at:
mnt_want_write_file+0x22/0x60
[ 3917.096822] #1: ffff89832bf7cee8
(&fs_devs->device_list_mutex){+.+.}, at: btrfs_scrub_dev+0x98/0x650
[btrfs]
[ 3917.096837] #2: ffff89832ad56120 (&fs_info->scrub_lock){+.+.}, at:
btrfs_scrub_dev+0xf6/0x650 [btrfs]
[ 3917.096851]
stack backtrace:
[ 3917.096854] CPU: 2 PID: 5199 Comm: btrfs Not tainted
5.5.0-0.rc2.git2.1.fc32.x86_64 #1
[ 3917.096855] Hardware name: HP HP Spectre Notebook/81A0, BIOS F.43 04/16/2019
[ 3917.096855] Call Trace:
[ 3917.096858] dump_stack+0x8f/0xd0
[ 3917.096860] check_noncircular+0x176/0x190
[ 3917.096863] __lock_acquire+0xe13/0x1a30
[ 3917.096866] lock_acquire+0xa2/0x1b0
[ 3917.096868] ? alloc_workqueue+0x3a2/0x480
[ 3917.096870] cpus_read_lock+0x3e/0xb0
[ 3917.096871] ? alloc_workqueue+0x3a2/0x480
[ 3917.096873] alloc_workqueue+0x3a2/0x480
[ 3917.096875] ? rcu_read_lock_sched_held+0x52/0x90
[ 3917.096891] __btrfs_alloc_workqueue+0x160/0x210 [btrfs]
[ 3917.096905] btrfs_alloc_workqueue+0x53/0x170 [btrfs]
[ 3917.096920] scrub_workers_get+0x5a/0x180 [btrfs]
[ 3917.096934] btrfs_scrub_dev+0x1e5/0x650 [btrfs]
[ 3917.096938] ? rcu_read_lock_any_held+0x83/0xb0
[ 3917.096939] ? __sb_start_write+0x18f/0x230
[ 3917.096955] btrfs_ioctl+0x72a/0x2cb0 [btrfs]
[ 3917.096959] ? __lock_acquire+0x24d/0x1a30
[ 3917.096963] ? do_vfs_ioctl+0x580/0x7b0
[ 3917.096986] ? btrfs_ioctl_get_supported_features+0x30/0x30 [btrfs]
[ 3917.096989] do_vfs_ioctl+0x580/0x7b0
[ 3917.096994] ksys_ioctl+0x5e/0x90
[ 3917.096996] __x64_sys_ioctl+0x16/0x20
[ 3917.096998] do_syscall_64+0x5c/0xa0
[ 3917.097001] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.097004] RIP: 0033:0x7fd57e33834b
[ 3917.097006] Code: 0f 1e fa 48 8b 05 3d 9b 0c 00 64 c7 00 26 00 00
00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00
00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 0d 9b 0c 00 f7 d8 64 89
01 48
[ 3917.097008] RSP: 002b:00007fd57e23bd38 EFLAGS: 00000246 ORIG_RAX:
0000000000000010
[ 3917.097011] RAX: ffffffffffffffda RBX: 000055f345aa5420 RCX: 00007fd57e33834b
[ 3917.097012] RDX: 000055f345aa5420 RSI: 00000000c400941b RDI: 0000000000000003
[ 3917.097013] RBP: 0000000000000000 R08: 00007fd57e23c700 R09: 0000000000000000
[ 3917.097015] R10: 00007fd57e23c700 R11: 0000000000000246 R12: 00007ffe04f3614e
[ 3917.097017] R13: 00007ffe04f3614f R14: 00007ffe04f36150 R15: 00007fd57e23be40
[ 3917.097097] BTRFS info (device nvme0n1p7): scrub: started on devid 1
[ 3941.514878] BTRFS info (device nvme0n1p7): scrub: finished on devid
1 with status: 0
[chris@flap ~]$
--
Chris Murphy
[ 3198.185485] hp_wmi: Unknown event_id - 131073 - 0x0
[ 3917.096295] ======================================================
[ 3917.096296] WARNING: possible circular locking dependency detected
[ 3917.096299] 5.5.0-0.rc2.git2.1.fc32.x86_64 #1 Not tainted
[ 3917.096300] ------------------------------------------------------
[ 3917.096301] btrfs/5199 is trying to acquire lock:
[ 3917.096303] ffffffffb5653e50 (cpu_hotplug_lock.rw_sem){++++}, at: alloc_workqueue+0x3a2/0x480
[ 3917.096309]
but task is already holding lock:
[ 3917.096310] ffff89832ad56120 (&fs_info->scrub_lock){+.+.}, at: btrfs_scrub_dev+0xf6/0x650 [btrfs]
[ 3917.096339]
which lock already depends on the new lock.
[ 3917.096340]
the existing dependency chain (in reverse order) is:
[ 3917.096342]
-> #8 (&fs_info->scrub_lock){+.+.}:
[ 3917.096346] __mutex_lock+0xac/0x9e0
[ 3917.096366] btrfs_scrub_dev+0xf6/0x650 [btrfs]
[ 3917.096386] btrfs_ioctl+0x72a/0x2cb0 [btrfs]
[ 3917.096389] do_vfs_ioctl+0x580/0x7b0
[ 3917.096391] ksys_ioctl+0x5e/0x90
[ 3917.096393] __x64_sys_ioctl+0x16/0x20
[ 3917.096396] do_syscall_64+0x5c/0xa0
[ 3917.096398] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096399]
-> #7 (&fs_devs->device_list_mutex){+.+.}:
[ 3917.096402] __mutex_lock+0xac/0x9e0
[ 3917.096421] btrfs_run_dev_stats+0x46/0x4a0 [btrfs]
[ 3917.096437] commit_cowonly_roots+0xb5/0x300 [btrfs]
[ 3917.096452] btrfs_commit_transaction+0x506/0xad0 [btrfs]
[ 3917.096481] btrfs_sync_file+0x355/0x490 [btrfs]
[ 3917.096483] do_fsync+0x38/0x70
[ 3917.096484] __x64_sys_fsync+0x10/0x20
[ 3917.096486] do_syscall_64+0x5c/0xa0
[ 3917.096488] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096489]
-> #6 (&fs_info->tree_log_mutex){+.+.}:
[ 3917.096491] __mutex_lock+0xac/0x9e0
[ 3917.096514] btrfs_commit_transaction+0x4ae/0xad0 [btrfs]
[ 3917.096526] btrfs_sync_file+0x355/0x490 [btrfs]
[ 3917.096527] do_fsync+0x38/0x70
[ 3917.096528] __x64_sys_fsync+0x10/0x20
[ 3917.096530] do_syscall_64+0x5c/0xa0
[ 3917.096532] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096532]
-> #5 (&fs_info->reloc_mutex){+.+.}:
[ 3917.096534] __mutex_lock+0xac/0x9e0
[ 3917.096545] btrfs_record_root_in_trans+0x44/0x70 [btrfs]
[ 3917.096555] start_transaction+0xbb/0x540 [btrfs]
[ 3917.096565] btrfs_dirty_inode+0x44/0xd0 [btrfs]
[ 3917.096566] file_update_time+0xeb/0x140
[ 3917.096577] btrfs_page_mkwrite+0xfe/0x560 [btrfs]
[ 3917.096579] do_page_mkwrite+0x4f/0x130
[ 3917.096581] do_wp_page+0x2a0/0x3e0
[ 3917.096582] __handle_mm_fault+0xc6c/0x15a0
[ 3917.096584] handle_mm_fault+0x169/0x360
[ 3917.096586] do_user_addr_fault+0x1fc/0x480
[ 3917.096587] do_page_fault+0x31/0x210
[ 3917.096588] page_fault+0x43/0x50
[ 3917.096589]
-> #4 (sb_pagefaults){.+.+}:
[ 3917.096591] __sb_start_write+0x149/0x230
[ 3917.096602] btrfs_page_mkwrite+0x69/0x560 [btrfs]
[ 3917.096603] do_page_mkwrite+0x4f/0x130
[ 3917.096605] do_wp_page+0x2a0/0x3e0
[ 3917.096606] __handle_mm_fault+0xc6c/0x15a0
[ 3917.096608] handle_mm_fault+0x169/0x360
[ 3917.096609] do_user_addr_fault+0x1fc/0x480
[ 3917.096610] do_page_fault+0x31/0x210
[ 3917.096611] page_fault+0x43/0x50
[ 3917.096612]
-> #3 (&mm->mmap_sem#2){++++}:
[ 3917.096615] __might_fault+0x60/0x80
[ 3917.096617] _copy_to_user+0x1e/0x90
[ 3917.096619] relay_file_read+0xb0/0x2e0
[ 3917.096621] full_proxy_read+0x56/0x80
[ 3917.096623] vfs_read+0xc5/0x180
[ 3917.096625] ksys_read+0x68/0xe0
[ 3917.096626] do_syscall_64+0x5c/0xa0
[ 3917.096628] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096629]
-> #2 (&sb->s_type->i_mutex_key#3){++++}:
[ 3917.096632] down_write+0x45/0x120
[ 3917.096635] start_creating+0x5c/0xf0
[ 3917.096637] __debugfs_create_file+0x3f/0x120
[ 3917.096639] relay_create_buf_file+0x6d/0xa0
[ 3917.096641] relay_open_buf.part.0+0x2a7/0x360
[ 3917.096644] relay_open+0x191/0x2d0
[ 3917.096646] do_blk_trace_setup+0x14f/0x2b0
[ 3917.096647] __blk_trace_setup+0x54/0xb0
[ 3917.096649] blk_trace_ioctl+0x92/0x140
[ 3917.096651] blkdev_ioctl+0x131/0xb70
[ 3917.096653] block_ioctl+0x3f/0x50
[ 3917.096655] do_vfs_ioctl+0x580/0x7b0
[ 3917.096657] ksys_ioctl+0x5e/0x90
[ 3917.096659] __x64_sys_ioctl+0x16/0x20
[ 3917.096661] do_syscall_64+0x5c/0xa0
[ 3917.096663] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096664]
-> #1 (relay_channels_mutex){+.+.}:
[ 3917.096667] __mutex_lock+0xac/0x9e0
[ 3917.096669] relay_prepare_cpu+0x19/0xae
[ 3917.096671] cpuhp_invoke_callback+0xb3/0x900
[ 3917.096673] _cpu_up+0xa1/0x130
[ 3917.096675] do_cpu_up+0x80/0xc0
[ 3917.096678] smp_init+0x58/0xae
[ 3917.096681] kernel_init_freeable+0x16a/0x27d
[ 3917.096683] kernel_init+0xa/0x10b
[ 3917.096685] ret_from_fork+0x3a/0x50
[ 3917.096687]
-> #0 (cpu_hotplug_lock.rw_sem){++++}:
[ 3917.096691] __lock_acquire+0xe13/0x1a30
[ 3917.096693] lock_acquire+0xa2/0x1b0
[ 3917.096695] cpus_read_lock+0x3e/0xb0
[ 3917.096698] alloc_workqueue+0x3a2/0x480
[ 3917.096719] __btrfs_alloc_workqueue+0x160/0x210 [btrfs]
[ 3917.096732] btrfs_alloc_workqueue+0x53/0x170 [btrfs]
[ 3917.096760] scrub_workers_get+0x5a/0x180 [btrfs]
[ 3917.096773] btrfs_scrub_dev+0x1e5/0x650 [btrfs]
[ 3917.096800] btrfs_ioctl+0x72a/0x2cb0 [btrfs]
[ 3917.096801] do_vfs_ioctl+0x580/0x7b0
[ 3917.096803] ksys_ioctl+0x5e/0x90
[ 3917.096804] __x64_sys_ioctl+0x16/0x20
[ 3917.096806] do_syscall_64+0x5c/0xa0
[ 3917.096807] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.096808]
other info that might help us debug this:
[ 3917.096809] Chain exists of:
cpu_hotplug_lock.rw_sem --> &fs_devs->device_list_mutex --> &fs_info->scrub_lock
[ 3917.096811] Possible unsafe locking scenario:
[ 3917.096812] CPU0 CPU1
[ 3917.096812] ---- ----
[ 3917.096813] lock(&fs_info->scrub_lock);
[ 3917.096814] lock(&fs_devs->device_list_mutex);
[ 3917.096815] lock(&fs_info->scrub_lock);
[ 3917.096816] lock(cpu_hotplug_lock.rw_sem);
[ 3917.096817]
*** DEADLOCK ***
[ 3917.096818] 3 locks held by btrfs/5199:
[ 3917.096819] #0: ffff89832cb59498 (sb_writers#13){.+.+}, at: mnt_want_write_file+0x22/0x60
[ 3917.096822] #1: ffff89832bf7cee8 (&fs_devs->device_list_mutex){+.+.}, at: btrfs_scrub_dev+0x98/0x650 [btrfs]
[ 3917.096837] #2: ffff89832ad56120 (&fs_info->scrub_lock){+.+.}, at: btrfs_scrub_dev+0xf6/0x650 [btrfs]
[ 3917.096851]
stack backtrace:
[ 3917.096854] CPU: 2 PID: 5199 Comm: btrfs Not tainted 5.5.0-0.rc2.git2.1.fc32.x86_64 #1
[ 3917.096855] Hardware name: HP HP Spectre Notebook/81A0, BIOS F.43 04/16/2019
[ 3917.096855] Call Trace:
[ 3917.096858] dump_stack+0x8f/0xd0
[ 3917.096860] check_noncircular+0x176/0x190
[ 3917.096863] __lock_acquire+0xe13/0x1a30
[ 3917.096866] lock_acquire+0xa2/0x1b0
[ 3917.096868] ? alloc_workqueue+0x3a2/0x480
[ 3917.096870] cpus_read_lock+0x3e/0xb0
[ 3917.096871] ? alloc_workqueue+0x3a2/0x480
[ 3917.096873] alloc_workqueue+0x3a2/0x480
[ 3917.096875] ? rcu_read_lock_sched_held+0x52/0x90
[ 3917.096891] __btrfs_alloc_workqueue+0x160/0x210 [btrfs]
[ 3917.096905] btrfs_alloc_workqueue+0x53/0x170 [btrfs]
[ 3917.096920] scrub_workers_get+0x5a/0x180 [btrfs]
[ 3917.096934] btrfs_scrub_dev+0x1e5/0x650 [btrfs]
[ 3917.096938] ? rcu_read_lock_any_held+0x83/0xb0
[ 3917.096939] ? __sb_start_write+0x18f/0x230
[ 3917.096955] btrfs_ioctl+0x72a/0x2cb0 [btrfs]
[ 3917.096959] ? __lock_acquire+0x24d/0x1a30
[ 3917.096963] ? do_vfs_ioctl+0x580/0x7b0
[ 3917.096986] ? btrfs_ioctl_get_supported_features+0x30/0x30 [btrfs]
[ 3917.096989] do_vfs_ioctl+0x580/0x7b0
[ 3917.096994] ksys_ioctl+0x5e/0x90
[ 3917.096996] __x64_sys_ioctl+0x16/0x20
[ 3917.096998] do_syscall_64+0x5c/0xa0
[ 3917.097001] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[ 3917.097004] RIP: 0033:0x7fd57e33834b
[ 3917.097006] Code: 0f 1e fa 48 8b 05 3d 9b 0c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 0d 9b 0c 00 f7 d8 64 89 01 48
[ 3917.097008] RSP: 002b:00007fd57e23bd38 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[ 3917.097011] RAX: ffffffffffffffda RBX: 000055f345aa5420 RCX: 00007fd57e33834b
[ 3917.097012] RDX: 000055f345aa5420 RSI: 00000000c400941b RDI: 0000000000000003
[ 3917.097013] RBP: 0000000000000000 R08: 00007fd57e23c700 R09: 0000000000000000
[ 3917.097015] R10: 00007fd57e23c700 R11: 0000000000000246 R12: 00007ffe04f3614e
[ 3917.097017] R13: 00007ffe04f3614f R14: 00007ffe04f36150 R15: 00007fd57e23be40
[ 3917.097097] BTRFS info (device nvme0n1p7): scrub: started on devid 1
[ 3941.514878] BTRFS info (device nvme0n1p7): scrub: finished on devid 1 with status: 0
[chris@flap ~]$