@@ -534,7 +534,7 @@ static void __inode_add_lru(struct inode *inode, bool rotate)
534534{
535535 if (inode -> i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE ))
536536 return ;
537- if (atomic_read ( & inode -> i_count ))
537+ if (icount_read ( inode ))
538538 return ;
539539 if (!(inode -> i_sb -> s_flags & SB_ACTIVE ))
540540 return ;
@@ -550,11 +550,11 @@ static void __inode_add_lru(struct inode *inode, bool rotate)
550550struct wait_queue_head * inode_bit_waitqueue (struct wait_bit_queue_entry * wqe ,
551551 struct inode * inode , u32 bit )
552552{
553- void * bit_address ;
553+ void * bit_address ;
554554
555- bit_address = inode_state_wait_address (inode , bit );
556- init_wait_var_entry (wqe , bit_address , 0 );
557- return __var_waitqueue (bit_address );
555+ bit_address = inode_state_wait_address (inode , bit );
556+ init_wait_var_entry (wqe , bit_address , 0 );
557+ return __var_waitqueue (bit_address );
558558}
559559EXPORT_SYMBOL (inode_bit_waitqueue );
560560
@@ -871,11 +871,11 @@ void evict_inodes(struct super_block *sb)
871871again :
872872 spin_lock (& sb -> s_inode_list_lock );
873873 list_for_each_entry (inode , & sb -> s_inodes , i_sb_list ) {
874- if (atomic_read ( & inode -> i_count ))
874+ if (icount_read ( inode ))
875875 continue ;
876876
877877 spin_lock (& inode -> i_lock );
878- if (atomic_read ( & inode -> i_count )) {
878+ if (icount_read ( inode )) {
879879 spin_unlock (& inode -> i_lock );
880880 continue ;
881881 }
@@ -937,7 +937,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
937937 * unreclaimable for a while. Remove them lazily here; iput,
938938 * sync, or the last page cache deletion will requeue them.
939939 */
940- if (atomic_read ( & inode -> i_count ) ||
940+ if (icount_read ( inode ) ||
941941 (inode -> i_state & ~I_REFERENCED ) ||
942942 !mapping_shrinkable (& inode -> i_data )) {
943943 list_lru_isolate (lru , & inode -> i_lru );
@@ -1279,6 +1279,8 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
12791279 struct hlist_head * head = inode_hashtable + hash (inode -> i_sb , hashval );
12801280 struct inode * old ;
12811281
1282+ might_sleep ();
1283+
12821284again :
12831285 spin_lock (& inode_hash_lock );
12841286 old = find_inode (inode -> i_sb , head , test , data , true);
@@ -1382,6 +1384,8 @@ struct inode *iget5_locked_rcu(struct super_block *sb, unsigned long hashval,
13821384 struct hlist_head * head = inode_hashtable + hash (sb , hashval );
13831385 struct inode * inode , * new ;
13841386
1387+ might_sleep ();
1388+
13851389again :
13861390 inode = find_inode (sb , head , test , data , false);
13871391 if (inode ) {
@@ -1422,6 +1426,9 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
14221426{
14231427 struct hlist_head * head = inode_hashtable + hash (sb , ino );
14241428 struct inode * inode ;
1429+
1430+ might_sleep ();
1431+
14251432again :
14261433 inode = find_inode_fast (sb , head , ino , false);
14271434 if (inode ) {
@@ -1605,6 +1612,9 @@ struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
16051612 int (* test )(struct inode * , void * ), void * data )
16061613{
16071614 struct inode * inode ;
1615+
1616+ might_sleep ();
1617+
16081618again :
16091619 inode = ilookup5_nowait (sb , hashval , test , data );
16101620 if (inode ) {
@@ -1630,6 +1640,9 @@ struct inode *ilookup(struct super_block *sb, unsigned long ino)
16301640{
16311641 struct hlist_head * head = inode_hashtable + hash (sb , ino );
16321642 struct inode * inode ;
1643+
1644+ might_sleep ();
1645+
16331646again :
16341647 inode = find_inode_fast (sb , head , ino , false);
16351648
@@ -1780,6 +1793,8 @@ int insert_inode_locked(struct inode *inode)
17801793 ino_t ino = inode -> i_ino ;
17811794 struct hlist_head * head = inode_hashtable + hash (sb , ino );
17821795
1796+ might_sleep ();
1797+
17831798 while (1 ) {
17841799 struct inode * old = NULL ;
17851800 spin_lock (& inode_hash_lock );
@@ -1826,6 +1841,8 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
18261841{
18271842 struct inode * old ;
18281843
1844+ might_sleep ();
1845+
18291846 inode -> i_state |= I_CREATING ;
18301847 old = inode_insert5 (inode , hashval , test , NULL , data );
18311848
@@ -1908,20 +1925,45 @@ static void iput_final(struct inode *inode)
19081925 */
19091926void iput (struct inode * inode )
19101927{
1911- if (!inode )
1928+ might_sleep ();
1929+ if (unlikely (!inode ))
19121930 return ;
1913- BUG_ON ( inode -> i_state & I_CLEAR );
1931+
19141932retry :
1915- if (atomic_dec_and_lock (& inode -> i_count , & inode -> i_lock )) {
1916- if (inode -> i_nlink && (inode -> i_state & I_DIRTY_TIME )) {
1917- atomic_inc (& inode -> i_count );
1918- spin_unlock (& inode -> i_lock );
1919- trace_writeback_lazytime_iput (inode );
1920- mark_inode_dirty_sync (inode );
1921- goto retry ;
1922- }
1923- iput_final (inode );
1933+ lockdep_assert_not_held (& inode -> i_lock );
1934+ VFS_BUG_ON_INODE (inode -> i_state & I_CLEAR , inode );
1935+ /*
1936+ * Note this assert is technically racy as if the count is bogusly
1937+ * equal to one, then two CPUs racing to further drop it can both
1938+ * conclude it's fine.
1939+ */
1940+ VFS_BUG_ON_INODE (atomic_read (& inode -> i_count ) < 1 , inode );
1941+
1942+ if (atomic_add_unless (& inode -> i_count , -1 , 1 ))
1943+ return ;
1944+
1945+ if ((inode -> i_state & I_DIRTY_TIME ) && inode -> i_nlink ) {
1946+ trace_writeback_lazytime_iput (inode );
1947+ mark_inode_dirty_sync (inode );
1948+ goto retry ;
1949+ }
1950+
1951+ spin_lock (& inode -> i_lock );
1952+ if (unlikely ((inode -> i_state & I_DIRTY_TIME ) && inode -> i_nlink )) {
1953+ spin_unlock (& inode -> i_lock );
1954+ goto retry ;
1955+ }
1956+
1957+ if (!atomic_dec_and_test (& inode -> i_count )) {
1958+ spin_unlock (& inode -> i_lock );
1959+ return ;
19241960 }
1961+
1962+ /*
1963+ * iput_final() drops ->i_lock, we can't assert on it as the inode may
1964+ * be deallocated by the time the call returns.
1965+ */
1966+ iput_final (inode );
19251967}
19261968EXPORT_SYMBOL (iput );
19271969
@@ -2911,10 +2953,18 @@ EXPORT_SYMBOL(mode_strip_sgid);
29112953 *
29122954 * TODO: add a proper inode dumping routine, this is a stub to get debug off the
29132955 * ground.
2956+ *
2957+ * TODO: handle getting to fs type with get_kernel_nofault()?
2958+ * See dump_mapping() above.
29142959 */
29152960void dump_inode (struct inode * inode , const char * reason )
29162961{
2917- pr_warn ("%s encountered for inode %px" , reason , inode );
2962+ struct super_block * sb = inode -> i_sb ;
2963+
2964+ pr_warn ("%s encountered for inode %px\n"
2965+ "fs %s mode %ho opflags 0x%hx flags 0x%x state 0x%x count %d\n" ,
2966+ reason , inode , sb -> s_type -> name , inode -> i_mode , inode -> i_opflags ,
2967+ inode -> i_flags , inode -> i_state , atomic_read (& inode -> i_count ));
29182968}
29192969
29202970EXPORT_SYMBOL (dump_inode );
0 commit comments