Linux NFS development
 help / color / mirror / Atom feed
From: Jeff Layton <jlayton@kernel.org>
To: Mike Galbraith <efault@gmx.de>,
	dai.ngo@oracle.com, Chuck Lever III <chuck.lever@oracle.com>
Cc: Linux NFS Mailing List <linux-nfs@vger.kernel.org>
Subject: Re: [PATCH 1/1] NFSD: fix WARN_ON_ONCE in __queue_delayed_work
Date: Wed, 11 Jan 2023 07:44:04 -0500	[thread overview]
Message-ID: <78579e0d1ca805bad4c98c609638305fa63cda67.camel@kernel.org> (raw)
In-Reply-To: <860d74c0a13c8c8330bed91b8085384399e14764.camel@gmx.de>

On Wed, 2023-01-11 at 13:26 +0100, Mike Galbraith wrote:
> On Wed, 2023-01-11 at 03:31 -0800, dai.ngo@oracle.com wrote:
> > 
> > Can you try:
> > 
> > crash7latest> nfsd_net_id
> > nfsd_net_id = $2 = 9                <<===
> > crash7latest> struct net.gen  init_net
> >    gen = 0xffff97fc17d07d80
> > crash7latest> x /10g 0xffff97fc17d07d80
> > 0xffff97fc17d07d80:     0x000000000000000d      0x0000000000000000
> > 0xffff97fc17d07d90:     0x0000000000000000      0xffff97fc0ac40060
> > 0xffff97fc17d07da0:     0xffff994e7bf87600      0xffff98f731172a20
> > 0xffff97fc17d07db0:     0xffff9844b05d9c00      0xffff9832a6a0add0
> > 0xffff97fc17d07dc0:     0xffff984a4470d740      0xffff984a93eb0600   
> > <<=== entry for nfsd_net_id
> > crash7latest> nfsd_net 0xffff984a93eb0600
> 
> (monkey see monkey do.. eep eep)
> 
> crash> nfsd_net_id
> p: gdb request failed: p nfsd_net_id
> crash> struct net.gen  init_net
>   gen = 0xffff88810b7b8a00,
> crash> x /10g 0xffff88810b7b8a00
> 0xffff88810b7b8a00:	0x0000000000000010	0x0000000000000000
> 0xffff88810b7b8a10:	0x0000000000000000	0xffff888101563380
> 0xffff88810b7b8a20:	0xffff888101ebd900	0xffff888101ebda00
> 0xffff88810b7b8a30:	0xffff888101f88b80	0xffff8881022056c0
> 0xffff88810b7b8a40:	0xffff888133b79e00	0xffff888110a2ca00
> crash> nfsd_net 0xffff888110a2ca00
> struct nfsd_net {
>   cld_net = 0xffff888131c3c000,
>   svc_expkey_cache = 0xffff888110a2cc00,
>   svc_export_cache = 0xffff888110a2ce00,
>   idtoname_cache = 0xffff8881061a8a00,
>   nametoid_cache = 0xffff8881061a8c00,
>   nfsd4_manager = {
>     list = {
>       next = 0xffff888141efa000,
>       prev = 0xffff888133e6ea00
>     },
>     block_opens = false
>   },
>   grace_ended = false,
>   boot_time = -131387065447864,
>   nfsd_client_dir = 0xffff888110a2ca48,
>   reclaim_str_hashtbl = 0xffff88810bed7408,
>   reclaim_str_hashtbl_size = 1083333640,
>   conf_id_hashtbl = 0x0,
>   conf_name_tree = {
>     rb_node = 0xffff888140925c00
>   },
>   unconf_id_hashtbl = 0xffff88810181c800,
>   unconf_name_tree = {
>     rb_node = 0x200000000
>   },
>   sessionid_hashtbl = 0x1,
>   client_lru = {
>     next = 0x0,
>     prev = 0x0
>   },
>   close_lru = {
>     next = 0xffff888110a2caa0,
>     prev = 0xffff888110a2caa0
>   },
>   del_recall_lru = {
>     next = 0x0,
>     prev = 0xffffffffffffffff
>   },
>   blocked_locks_lru = {
>     next = 0x0,
>     prev = 0xffff88810a0e0f00
>   },
>   laundromat_work = {
>     work = {
>       data = {
>         counter = 0
>       },
>       entry = {
>         next = 0x0,
>         prev = 0x0
>       },
>       func = 0x0
>     },
>     timer = {
>       entry = {
>         next = 0x0,
>         pprev = 0x0
>       },
>       expires = 520729437059154371,
>       function = 0x0,
>       flags = 3526430787
>     },
>     wq = 0x24448948f6314540,
>     cpu = 1133332496
>   },
>   client_lock = {
>     {
>       rlock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 344528932
>             },
>             {
>               locked = 36 '$',
>               pending = 24 '\030'
>             },
>             {
>               locked_pending = 6180,
>               tail = 5257
>             }
>           }
>         }
>       }
>     }
>   },
>   blocked_locks_lock = {
>     {
>       rlock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 1820937252
>             },
>             {
>               locked = 36 '$',
>               pending = 76 'L'
>             },
>             {
>               locked_pending = 19492,
>               tail = 27785
>             }
>           }
>         }
>       }
>     }
>   },
>   rec_file = 0x4808245c89483824,
>   in_grace = 137,
>   client_tracking_ops = 0xe8df8948005d8b,
>   nfsd4_lease = -8266309238763028480,
>   nfsd4_grace = 5476377146897729659,
>   somebody_reclaimed = 139,
>   track_reclaim_completes = 99,
>   nr_reclaim_complete = {
>     counter = -402096755
>   },
>   nfsd_net_up = false,
>   lockd_up = false,
>   writeverf_lock = {
>     seqcount = {
>       seqcount = {
>         sequence = 140872013
>       }
>     },
>     lock = {
>       {
>         rlock = {
>           raw_lock = {
>             {
>               val = {
>                 counter = -387479220
>               },
>               {
>                 locked = 76 'L',
>                 pending = 137 '\211'
>               },
>               {
>                 locked_pending = 35148,
>                 tail = 59623
>               }
>             }
>           }
>         }
>       }
>     }
>   },
>   writeverf = "\000\000\000\000M\211,$",
>   max_connections = 612141896,
>   clientid_base = 59416,
>   clientid_counter = 2336751616,
>   clverifier_counter = 1275601988,
>   nfsd_serv = 0x1024448b48185889,
>   keep_active = 140740940,
>   s2s_cp_cl_id = 1223133516,
>   s2s_cp_stateids = {
>     idr_rt = {
>       xa_lock = {
>         {
>           rlock = {
>             raw_lock = {
>               {
>                 val = {
>                   counter = 15205257
>                 },
>                 {
>                   locked = 137 '\211',
>                   pending = 3 '\003'
>                 },
>                 {
>                   locked_pending = 905,
>                   tail = 232
>                 }
>               }
>             }
>           }
>         }
>       },
>       xa_flags = 1224736768,
>       xa_head = 0xf74f6854d241c89
>     },
>     idr_base = 276532552,
>     idr_next = 232
>   },
>   s2s_cp_lock = {
>     {
>       rlock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 1933134848
>             },
>             {
>               locked = 0 '\000',
>               pending = 76 'L'
>             },
>             {
>               locked_pending = 19456,
>               tail = 29497
>             }
>           }
>         }
>       }
>     }
>   },
>   nfsd_versions = 0x443924048b012404,
>   nfsd4_minorversions = 0x2b4820f2424,
>   drc_hashtbl = 0x8678d4d107b8d48,
>   max_drc_entries = 232,
>   maskbits = 1938508800,
>   drc_hashsize = 4287187984,
>   num_drc_entries = {
>     counter = 232
>   },
>   counter = {{
>       lock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 931745024
>             },
>             {
>               locked = 0 '\000',
>               pending = 77 'M'
>             },
>             {
>               locked_pending = 19712,
>               tail = 14217
>             }
>           }
>         }
>       },
>       count = -8858645092202691189,
>       list = {
>         next = 0x24648b4cffffff43,
>         prev = 0x246c8b4c24148b40
>       },
>       counters = 0xffffffffa0d0b540 <__this_module>
>     }, {
>       lock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 256
>             },
>             {
>               locked = 0 '\000',
>               pending = 1 '\001'
>             },
>             {
>               locked_pending = 256,
>               tail = 0
>             }
>           }
>         }
>       },
>       count = -131387314532352,
>       list = {
>         next = 0x0,
>         prev = 0xffffffffa0c949c0 <svc_udp_ops+1248>
>       },
>       counters = 0xffffffffa0c67f00 <ip_map_put>
>     }},
>   longest_chain = 2697366144,
>   longest_chain_cachesize = 4294967295,
>   nfsd_reply_cache_shrinker = {
>     count_objects = 0xffffffffa0c67cd0 <ip_map_request>,
>     scan_objects = 0xffffffffa0c68e40 <ip_map_parse>,

Looks like this part of the struct may have been overwritten with
ip_map_cache_template ? Nothing else in here looks recognizable, so I
have to wonder if you actually have the correct nfsd_net pointer here.

>     batch = -1597606560,
>     seeks = 0,
>     flags = 0,
>     list = {
>       next = 0xffffffffa0c67350 <ip_map_alloc>,
>       prev = 0x0
>     },
>     nr_deferred = 0xffffffffa0c68a00 <ip_map_match>
>   },
>   nfsd_ssc_lock = {
>     {
>       rlock = {
>         raw_lock = {
>           {
>             val = {
>               counter = -1597603936
>             },
>             {
>               locked = 160 '\240',
>               pending = 127 '\177'
>             },
>             {
>               locked_pending = 32672,
>               tail = 41158
>             }
>           }
>         }
>       }
>     }
>   },
>   nfsd_ssc_mount_list = {
>     next = 0xffffffffa0c68b10 <update>,
>     prev = 0x49
>   },
>   nfsd_ssc_waitq = {
>     lock = {
>       {
>         rlock = {
>           raw_lock = {
>             {
>               val = {
>                 counter = -1596979232
>               },
>               {
>                 locked = 224 '\340',
>                 pending = 7 '\a'
>               },
>               {
>                 locked_pending = 2016,
>                 tail = 41168
>               }
>             }
>           }
>         }
>       }
>     },
>     head = {
>       next = 0xffff888110a2ce88,
>       prev = 0xc2
>     }
>   },
>   nfsd_name = "\001\000\000\000\000\000\000\000\200\t\021D\201\210\377\377\200\t\021D\201\210\377\377\001\000\000\000\000\000\000\000\032\000\000\000\000\000\000\000\377\377\377\377\377\377\377\377\000\301\303\061\201\210\377\377@$\234\203\377\377\377\377",
>   fcache_disposal = 0x0,
>   siphash_key = {
>     key = {0, 0}
>   },
>   nfs4_client_count = {
>     counter = 451
>   },
>   nfs4_max_clients = 122552490,
>   nfsd_courtesy_clients = {
>     counter = 0
>   },
>   nfsd_client_shrinker = {
>     count_objects = 0xe8000002a0a3894c,
>     scan_objects = 0x98b3894400000000,
>     batch = 5483261796049485826,
>     seeks = 15267721,
>     flags = 1275068416,
>     list = {
>       next = 0x18247c8d4918658b,
>       prev = 0x7c8b4900000000e8
>     },
>     nr_deferred = 0x4800000000e81824
>   },
>   nfsd_shrinker_work = {
>     work = {
>       data = {
>         counter = -8554306017173128307
>       },
>       entry = {
>         next = 0x894c00000000e8c4,
>         prev = 0xf7894c00000268a3
>       },
>       func = 0x6d8b4800000000e8
>     },
>     timer = {
>       entry = {
>         next = 0x270bb8d4818,
>         pprev = 0xbb8d4800000000e8
>       },
>       expires = 8118733695596102332,
>       function = 0xe8000002,
>       flags = 45908935
>     },
>     wq = 0x147424e783410000,
>     cpu = 553616193
>   }
> }
> crash>
> 

-- 
Jeff Layton <jlayton@kernel.org>

  reply	other threads:[~2023-01-11 12:44 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-10  6:48 [PATCH 1/1] NFSD: fix WARN_ON_ONCE in __queue_delayed_work Dai Ngo
2023-01-10 10:30 ` Jeff Layton
2023-01-10 17:33   ` dai.ngo
2023-01-10 18:17     ` Chuck Lever III
2023-01-10 18:34       ` Jeff Layton
2023-01-10 19:17         ` dai.ngo
2023-01-10 19:30           ` Jeff Layton
2023-01-10 19:58             ` dai.ngo
2023-01-11  2:34               ` Mike Galbraith
2023-01-11 10:15                 ` Jeff Layton
2023-01-11 10:55                   ` Jeff Layton
2023-01-11 11:19                     ` Mike Galbraith
2023-01-11 11:31                       ` dai.ngo
2023-01-11 12:26                         ` Mike Galbraith
2023-01-11 12:44                           ` Jeff Layton [this message]
2023-01-11 12:00                       ` Jeff Layton
2023-01-11 12:15                       ` Mike Galbraith
2023-01-11 12:33                         ` Jeff Layton
2023-01-11 13:48                           ` Mike Galbraith
2023-01-11 14:01                           ` Jeff Layton
2023-01-11 14:16                             ` Jeff Layton
2023-01-10 18:46       ` dai.ngo
2023-01-10 18:53         ` Chuck Lever III
2023-01-10 19:07           ` dai.ngo
2023-01-10 19:27             ` Jeff Layton
2023-01-10 19:16           ` Jeff Layton
2023-01-10 14:26 ` Chuck Lever III

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=78579e0d1ca805bad4c98c609638305fa63cda67.camel@kernel.org \
    --to=jlayton@kernel.org \
    --cc=chuck.lever@oracle.com \
    --cc=dai.ngo@oracle.com \
    --cc=efault@gmx.de \
    --cc=linux-nfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox