@@ -481,11 +481,9 @@ int gfs2_instantiate(struct gfs2_holder *gh)
481481/**
482482 * do_promote - promote as many requests as possible on the current queue
483483 * @gl: The glock
484- *
485- * Returns true on success (i.e., progress was made or there are no waiters).
486484 */
487485
488- static bool do_promote (struct gfs2_glock * gl )
486+ static void do_promote (struct gfs2_glock * gl )
489487{
490488 struct gfs2_holder * gh , * current_gh ;
491489
@@ -496,13 +494,10 @@ static bool do_promote(struct gfs2_glock *gl)
496494 if (!may_grant (gl , current_gh , gh )) {
497495 /*
498496 * If we get here, it means we may not grant this
499- * holder for some reason. If this holder is at the
500- * head of the list, it means we have a blocked holder
501- * at the head, so return false.
497+ * holder for some reason.
502498 */
503- if (list_is_first (& gh -> gh_list , & gl -> gl_holders ))
504- return false;
505- do_error (gl , 0 );
499+ if (current_gh )
500+ do_error (gl , 0 ); /* Fail queued try locks */
506501 break ;
507502 }
508503 set_bit (HIF_HOLDER , & gh -> gh_iflags );
@@ -511,7 +506,6 @@ static bool do_promote(struct gfs2_glock *gl)
511506 if (!current_gh )
512507 current_gh = gh ;
513508 }
514- return true;
515509}
516510
517511/**
@@ -646,8 +640,10 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
646640 }
647641
648642 /* Fast path - we got what we asked for */
649- if (test_and_clear_bit (GLF_DEMOTE_IN_PROGRESS , & gl -> gl_flags ))
643+ if (test_bit (GLF_DEMOTE_IN_PROGRESS , & gl -> gl_flags )) {
644+ clear_bit (GLF_DEMOTE_IN_PROGRESS , & gl -> gl_flags );
650645 gfs2_demote_wake (gl );
646+ }
651647 if (gl -> gl_state != LM_ST_UNLOCKED ) {
652648 if (glops -> go_xmote_bh ) {
653649 int rv ;
@@ -693,54 +689,33 @@ __acquires(&gl->gl_lockref.lock)
693689 const struct gfs2_glock_operations * glops = gl -> gl_ops ;
694690 struct gfs2_sbd * sdp = gl -> gl_name .ln_sbd ;
695691 struct lm_lockstruct * ls = & sdp -> sd_lockstruct ;
696- unsigned int lck_flags = (unsigned int )(gh ? gh -> gh_flags : 0 );
697692 int ret ;
698693
699694 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw (gl ) &&
700695 gh && !(gh -> gh_flags & LM_FLAG_NOEXP ))
701696 goto skip_inval ;
702697
703- lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP );
704698 GLOCK_BUG_ON (gl , gl -> gl_state == target );
705699 GLOCK_BUG_ON (gl , gl -> gl_state == gl -> gl_target );
706- if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED ) &&
707- glops -> go_inval ) {
708- /*
709- * If another process is already doing the invalidate, let that
710- * finish first. The glock state machine will get back to this
711- * holder again later.
712- */
713- if (test_and_set_bit (GLF_INVALIDATE_IN_PROGRESS ,
714- & gl -> gl_flags ))
715- return ;
716- do_error (gl , 0 ); /* Fail queued try locks */
717- }
718- gl -> gl_req = target ;
719- set_bit (GLF_BLOCKING , & gl -> gl_flags );
720- if ((gl -> gl_req == LM_ST_UNLOCKED ) ||
721- (gl -> gl_state == LM_ST_EXCLUSIVE ) ||
722- (lck_flags & (LM_FLAG_TRY |LM_FLAG_TRY_1CB )))
723- clear_bit (GLF_BLOCKING , & gl -> gl_flags );
724- if (!glops -> go_inval && !glops -> go_sync )
700+ if (!glops -> go_inval || !glops -> go_sync )
725701 goto skip_inval ;
726702
727703 spin_unlock (& gl -> gl_lockref .lock );
728- if (glops -> go_sync ) {
729- ret = glops -> go_sync (gl );
730- /* If we had a problem syncing (due to io errors or whatever,
731- * we should not invalidate the metadata or tell dlm to
732- * release the glock to other nodes.
733- */
734- if (ret ) {
735- if (cmpxchg (& sdp -> sd_log_error , 0 , ret )) {
736- fs_err (sdp , "Error %d syncing glock \n" , ret );
737- gfs2_dump_glock (NULL , gl , true);
738- }
739- spin_lock (& gl -> gl_lockref .lock );
740- goto skip_inval ;
704+ ret = glops -> go_sync (gl );
705+ /* If we had a problem syncing (due to io errors or whatever,
706+ * we should not invalidate the metadata or tell dlm to
707+ * release the glock to other nodes.
708+ */
709+ if (ret ) {
710+ if (cmpxchg (& sdp -> sd_log_error , 0 , ret )) {
711+ fs_err (sdp , "Error %d syncing glock\n" , ret );
712+ gfs2_dump_glock (NULL , gl , true);
741713 }
714+ spin_lock (& gl -> gl_lockref .lock );
715+ goto skip_inval ;
742716 }
743- if (test_bit (GLF_INVALIDATE_IN_PROGRESS , & gl -> gl_flags )) {
717+
718+ if (target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED ) {
744719 /*
745720 * The call to go_sync should have cleared out the ail list.
746721 * If there are still items, we have a problem. We ought to
@@ -755,12 +730,10 @@ __acquires(&gl->gl_lockref.lock)
755730 gfs2_dump_glock (NULL , gl , true);
756731 }
757732 glops -> go_inval (gl , target == LM_ST_DEFERRED ? 0 : DIO_METADATA );
758- clear_bit (GLF_INVALIDATE_IN_PROGRESS , & gl -> gl_flags );
759733 }
760734 spin_lock (& gl -> gl_lockref .lock );
761735
762736skip_inval :
763- gl -> gl_lockref .count ++ ;
764737 /*
765738 * Check for an error encountered since we called go_sync and go_inval.
766739 * If so, we can't withdraw from the glock code because the withdraw
@@ -803,38 +776,41 @@ __acquires(&gl->gl_lockref.lock)
803776 if (!test_bit (GLF_CANCELING , & gl -> gl_flags ))
804777 clear_bit (GLF_LOCK , & gl -> gl_flags );
805778 clear_bit (GLF_DEMOTE_IN_PROGRESS , & gl -> gl_flags );
779+ gl -> gl_lockref .count ++ ;
806780 gfs2_glock_queue_work (gl , GL_GLOCK_DFT_HOLD );
807781 return ;
808- } else {
809- clear_bit (GLF_INVALIDATE_IN_PROGRESS , & gl -> gl_flags );
810782 }
811783 }
812784
813785 if (ls -> ls_ops -> lm_lock ) {
814786 set_bit (GLF_PENDING_REPLY , & gl -> gl_flags );
815787 spin_unlock (& gl -> gl_lockref .lock );
816- ret = ls -> ls_ops -> lm_lock (gl , target , lck_flags );
788+ ret = ls -> ls_ops -> lm_lock (gl , target , gh ? gh -> gh_flags : 0 );
817789 spin_lock (& gl -> gl_lockref .lock );
818790
819- if (ret == - EINVAL && gl -> gl_target == LM_ST_UNLOCKED &&
820- target == LM_ST_UNLOCKED &&
821- test_bit (DFL_UNMOUNT , & ls -> ls_recover_flags )) {
791+ if (!ret ) {
792+ /* The operation will be completed asynchronously. */
793+ gl -> gl_lockref .count ++ ;
794+ return ;
795+ }
796+ clear_bit (GLF_PENDING_REPLY , & gl -> gl_flags );
797+
798+ if (ret == - ENODEV && gl -> gl_target == LM_ST_UNLOCKED &&
799+ target == LM_ST_UNLOCKED ) {
822800 /*
823801 * The lockspace has been released and the lock has
824802 * been unlocked implicitly.
825803 */
826- } else if (ret ) {
827- fs_err (sdp , "lm_lock ret %d\n" , ret );
828- target = gl -> gl_state | LM_OUT_ERROR ;
829804 } else {
830- /* The operation will be completed asynchronously. */
805+ fs_err (sdp , "lm_lock ret %d\n" , ret );
806+ GLOCK_BUG_ON (gl , !gfs2_withdrawing_or_withdrawn (sdp ));
831807 return ;
832808 }
833- clear_bit (GLF_PENDING_REPLY , & gl -> gl_flags );
834809 }
835810
836811 /* Complete the operation now. */
837812 finish_xmote (gl , target );
813+ gl -> gl_lockref .count ++ ;
838814 gfs2_glock_queue_work (gl , 0 );
839815}
840816
@@ -855,11 +831,20 @@ __acquires(&gl->gl_lockref.lock)
855831 return ;
856832 set_bit (GLF_LOCK , & gl -> gl_flags );
857833
858- /* While a demote is in progress, the GLF_LOCK flag must be set. */
834+ /*
835+ * The GLF_DEMOTE_IN_PROGRESS flag is only set intermittently during
836+ * locking operations. We have just started a locking operation by
837+ * setting the GLF_LOCK flag, so the GLF_DEMOTE_IN_PROGRESS flag must
838+ * be cleared.
839+ */
859840 GLOCK_BUG_ON (gl , test_bit (GLF_DEMOTE_IN_PROGRESS , & gl -> gl_flags ));
860841
861- if (test_bit (GLF_DEMOTE , & gl -> gl_flags ) &&
862- gl -> gl_demote_state != gl -> gl_state ) {
842+ if (test_bit (GLF_DEMOTE , & gl -> gl_flags )) {
843+ if (gl -> gl_demote_state == gl -> gl_state ) {
844+ gfs2_demote_wake (gl );
845+ goto promote ;
846+ }
847+
863848 if (find_first_holder (gl ))
864849 goto out_unlock ;
865850 if (nonblock )
@@ -869,31 +854,31 @@ __acquires(&gl->gl_lockref.lock)
869854 gl -> gl_target = gl -> gl_demote_state ;
870855 do_xmote (gl , NULL , gl -> gl_target );
871856 return ;
872- } else {
873- if (test_bit (GLF_DEMOTE , & gl -> gl_flags ))
874- gfs2_demote_wake (gl );
875- if (do_promote (gl ))
876- goto out_unlock ;
877- gh = find_first_waiter (gl );
878- if (!gh )
879- goto out_unlock ;
880- gl -> gl_target = gh -> gh_state ;
881- if (!(gh -> gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB )))
882- do_error (gl , 0 ); /* Fail queued try locks */
883- do_xmote (gl , gh , gl -> gl_target );
884- return ;
885857 }
886858
859+ promote :
860+ do_promote (gl );
861+ if (find_first_holder (gl ))
862+ goto out_unlock ;
863+ gh = find_first_waiter (gl );
864+ if (!gh )
865+ goto out_unlock ;
866+ if (nonblock )
867+ goto out_sched ;
868+ gl -> gl_target = gh -> gh_state ;
869+ if (!(gh -> gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB )))
870+ do_error (gl , 0 ); /* Fail queued try locks */
871+ do_xmote (gl , gh , gl -> gl_target );
872+ return ;
873+
887874out_sched :
888875 clear_bit (GLF_LOCK , & gl -> gl_flags );
889- smp_mb__after_atomic ();
890876 gl -> gl_lockref .count ++ ;
891877 gfs2_glock_queue_work (gl , 0 );
892878 return ;
893879
894880out_unlock :
895881 clear_bit (GLF_LOCK , & gl -> gl_flags );
896- smp_mb__after_atomic ();
897882}
898883
899884/**
@@ -1462,6 +1447,24 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
14621447 va_end (args );
14631448}
14641449
1450+ static bool gfs2_should_queue_trylock (struct gfs2_glock * gl ,
1451+ struct gfs2_holder * gh )
1452+ {
1453+ struct gfs2_holder * current_gh , * gh2 ;
1454+
1455+ current_gh = find_first_holder (gl );
1456+ if (current_gh && !may_grant (gl , current_gh , gh ))
1457+ return false;
1458+
1459+ list_for_each_entry (gh2 , & gl -> gl_holders , gh_list ) {
1460+ if (test_bit (HIF_HOLDER , & gh2 -> gh_iflags ))
1461+ continue ;
1462+ if (!(gh2 -> gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB )))
1463+ return false;
1464+ }
1465+ return true;
1466+ }
1467+
14651468static inline bool pid_is_meaningful (const struct gfs2_holder * gh )
14661469{
14671470 if (!(gh -> gh_flags & GL_NOPID ))
@@ -1480,27 +1483,20 @@ static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
14801483 */
14811484
14821485static inline void add_to_queue (struct gfs2_holder * gh )
1483- __releases (& gl - > gl_lockref .lock )
1484- __acquires (& gl - > gl_lockref .lock )
14851486{
14861487 struct gfs2_glock * gl = gh -> gh_gl ;
14871488 struct gfs2_sbd * sdp = gl -> gl_name .ln_sbd ;
14881489 struct gfs2_holder * gh2 ;
1489- int try_futile = 0 ;
14901490
14911491 GLOCK_BUG_ON (gl , gh -> gh_owner_pid == NULL );
14921492 if (test_and_set_bit (HIF_WAIT , & gh -> gh_iflags ))
14931493 GLOCK_BUG_ON (gl , true);
14941494
1495- if (gh -> gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB )) {
1496- if (test_bit (GLF_LOCK , & gl -> gl_flags )) {
1497- struct gfs2_holder * current_gh ;
1498-
1499- current_gh = find_first_holder (gl );
1500- try_futile = !may_grant (gl , current_gh , gh );
1501- }
1502- if (test_bit (GLF_INVALIDATE_IN_PROGRESS , & gl -> gl_flags ))
1503- goto fail ;
1495+ if ((gh -> gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB )) &&
1496+ !gfs2_should_queue_trylock (gl , gh )) {
1497+ gh -> gh_error = GLR_TRYFAILED ;
1498+ gfs2_holder_wake (gh );
1499+ return ;
15041500 }
15051501
15061502 list_for_each_entry (gh2 , & gl -> gl_holders , gh_list ) {
@@ -1512,15 +1508,6 @@ __acquires(&gl->gl_lockref.lock)
15121508 continue ;
15131509 goto trap_recursive ;
15141510 }
1515- list_for_each_entry (gh2 , & gl -> gl_holders , gh_list ) {
1516- if (try_futile &&
1517- !(gh2 -> gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB ))) {
1518- fail :
1519- gh -> gh_error = GLR_TRYFAILED ;
1520- gfs2_holder_wake (gh );
1521- return ;
1522- }
1523- }
15241511 trace_gfs2_glock_queue (gh , 1 );
15251512 gfs2_glstats_inc (gl , GFS2_LKS_QCOUNT );
15261513 gfs2_sbstats_inc (gl , GFS2_LKS_QCOUNT );
@@ -2321,8 +2308,6 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
23212308 * p ++ = 'y' ;
23222309 if (test_bit (GLF_LFLUSH , gflags ))
23232310 * p ++ = 'f' ;
2324- if (test_bit (GLF_INVALIDATE_IN_PROGRESS , gflags ))
2325- * p ++ = 'i' ;
23262311 if (test_bit (GLF_PENDING_REPLY , gflags ))
23272312 * p ++ = 'R' ;
23282313 if (test_bit (GLF_HAVE_REPLY , gflags ))
0 commit comments