@@ -333,37 +333,24 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
333333 return (u64 )udw << 32 | ldw ;
334334}
335335
336- /**
337- * xe_mmio_wait32() - Wait for a register to match the desired masked value
338- * @gt: MMIO target GT
339- * @reg: register to read value from
340- * @mask: mask to be applied to the value read from the register
341- * @val: desired value after applying the mask
342- * @timeout_us: time out after this period of time. Wait logic tries to be
343- * smart, applying an exponential backoff until @timeout_us is reached.
344- * @out_val: if not NULL, points where to store the last unmasked value
345- * @atomic: needs to be true if calling from an atomic context
346- *
347- * This function polls for the desired masked value and returns zero on success
348- * or -ETIMEDOUT if timed out.
349- *
350- * Note that @timeout_us represents the minimum amount of time to wait before
351- * giving up. The actual time taken by this function can be a little more than
352- * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
353- * it is possible that this function succeeds even after @timeout_us has passed.
354- */
355- int xe_mmio_wait32 (struct xe_gt * gt , struct xe_reg reg , u32 mask , u32 val , u32 timeout_us ,
356- u32 * out_val , bool atomic )
336+ static int __xe_mmio_wait32 (struct xe_gt * gt , struct xe_reg reg , u32 mask , u32 val , u32 timeout_us ,
337+ u32 * out_val , bool atomic , bool expect_match )
357338{
358339 ktime_t cur = ktime_get_raw ();
359340 const ktime_t end = ktime_add_us (cur , timeout_us );
360341 int ret = - ETIMEDOUT ;
361342 s64 wait = 10 ;
362343 u32 read ;
344+ bool check ;
363345
364346 for (;;) {
365347 read = xe_mmio_read32 (gt , reg );
366- if ((read & mask ) == val ) {
348+
349+ check = (read & mask ) == val ;
350+ if (!expect_match )
351+ check = !check ;
352+
353+ if (check ) {
367354 ret = 0 ;
368355 break ;
369356 }
@@ -384,7 +371,12 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
384371
385372 if (ret != 0 ) {
386373 read = xe_mmio_read32 (gt , reg );
387- if ((read & mask ) == val )
374+
375+ check = (read & mask ) == val ;
376+ if (!expect_match )
377+ check = !check ;
378+
379+ if (check )
388380 ret = 0 ;
389381 }
390382
@@ -395,62 +387,45 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
395387}
396388
397389/**
398- * xe_mmio_wait32_not () - Wait for a register to return anything other than the given masked value
390+ * xe_mmio_wait32 () - Wait for a register to match the desired masked value
399391 * @gt: MMIO target GT
400392 * @reg: register to read value from
401393 * @mask: mask to be applied to the value read from the register
402- * @val: value to match after applying the mask
394+ * @val: desired value after applying the mask
403395 * @timeout_us: time out after this period of time. Wait logic tries to be
404396 * smart, applying an exponential backoff until @timeout_us is reached.
405397 * @out_val: if not NULL, points where to store the last unmasked value
406398 * @atomic: needs to be true if calling from an atomic context
407399 *
408- * This function polls for a masked value to change from a given value and
409- * returns zero on success or -ETIMEDOUT if timed out.
400+ * This function polls for the desired masked value and returns zero on success
401+ * or -ETIMEDOUT if timed out.
410402 *
411403 * Note that @timeout_us represents the minimum amount of time to wait before
412404 * giving up. The actual time taken by this function can be a little more than
413405 * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
414406 * it is possible that this function succeeds even after @timeout_us has passed.
415407 */
408+ int xe_mmio_wait32 (struct xe_gt * gt , struct xe_reg reg , u32 mask , u32 val , u32 timeout_us ,
409+ u32 * out_val , bool atomic )
410+ {
411+ return __xe_mmio_wait32 (gt , reg , mask , val , timeout_us , out_val , atomic , true);
412+ }
413+
414+ /**
415+ * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
416+ * @gt: MMIO target GT
417+ * @reg: register to read value from
418+ * @mask: mask to be applied to the value read from the register
419+ * @val: value not to be matched after applying the mask
420+ * @timeout_us: time out after this period of time
421+ * @out_val: if not NULL, points where to store the last unmasked value
422+ * @atomic: needs to be true if calling from an atomic context
423+ *
424+ * This function works exactly like xe_mmio_wait32() with the exception that
425+ * @val is expected not to be matched.
426+ */
416427int xe_mmio_wait32_not (struct xe_gt * gt , struct xe_reg reg , u32 mask , u32 val , u32 timeout_us ,
417428 u32 * out_val , bool atomic )
418429{
419- ktime_t cur = ktime_get_raw ();
420- const ktime_t end = ktime_add_us (cur , timeout_us );
421- int ret = - ETIMEDOUT ;
422- s64 wait = 10 ;
423- u32 read ;
424-
425- for (;;) {
426- read = xe_mmio_read32 (gt , reg );
427- if ((read & mask ) != val ) {
428- ret = 0 ;
429- break ;
430- }
431-
432- cur = ktime_get_raw ();
433- if (!ktime_before (cur , end ))
434- break ;
435-
436- if (ktime_after (ktime_add_us (cur , wait ), end ))
437- wait = ktime_us_delta (end , cur );
438-
439- if (atomic )
440- udelay (wait );
441- else
442- usleep_range (wait , wait << 1 );
443- wait <<= 1 ;
444- }
445-
446- if (ret != 0 ) {
447- read = xe_mmio_read32 (gt , reg );
448- if ((read & mask ) != val )
449- ret = 0 ;
450- }
451-
452- if (out_val )
453- * out_val = read ;
454-
455- return ret ;
430+ return __xe_mmio_wait32 (gt , reg , mask , val , timeout_us , out_val , atomic , false);
456431}
0 commit comments