|
40 | 40 |
|
41 | 41 | typedef int (*pm_callback_t)(struct device *); |
42 | 42 |
|
43 | | -#define list_for_each_entry_rcu_locked(pos, head, member) \ |
44 | | - list_for_each_entry_rcu(pos, head, member, \ |
| 43 | +#define list_for_each_entry_srcu_locked(pos, head, member) \ |
| 44 | + list_for_each_entry_srcu(pos, head, member, \ |
45 | 45 | device_links_read_lock_held()) |
46 | 46 |
|
47 | 47 | /* |
@@ -281,7 +281,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async) |
281 | 281 | * callbacks freeing the link objects for the links in the list we're |
282 | 282 | * walking. |
283 | 283 | */ |
284 | | - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) |
| 284 | + list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) |
285 | 285 | if (READ_ONCE(link->status) != DL_STATE_DORMANT) |
286 | 286 | dpm_wait(link->supplier, async); |
287 | 287 |
|
@@ -338,7 +338,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async) |
338 | 338 | * continue instead of trying to continue in parallel with its |
339 | 339 | * unregistration). |
340 | 340 | */ |
341 | | - list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) |
| 341 | + list_for_each_entry_srcu_locked(link, &dev->links.consumers, s_node) |
342 | 342 | if (READ_ONCE(link->status) != DL_STATE_DORMANT) |
343 | 343 | dpm_wait(link->consumer, async); |
344 | 344 |
|
@@ -675,7 +675,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) |
675 | 675 | idx = device_links_read_lock(); |
676 | 676 |
|
677 | 677 | /* Start processing the device's "async" consumers. */ |
678 | | - list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) |
| 678 | + list_for_each_entry_srcu_locked(link, &dev->links.consumers, s_node) |
679 | 679 | if (READ_ONCE(link->status) != DL_STATE_DORMANT) |
680 | 680 | dpm_async_with_cleanup(link->consumer, func); |
681 | 681 |
|
@@ -1330,7 +1330,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func) |
1330 | 1330 | idx = device_links_read_lock(); |
1331 | 1331 |
|
1332 | 1332 | /* Start processing the device's "async" suppliers. */ |
1333 | | - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) |
| 1333 | + list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) |
1334 | 1334 | if (READ_ONCE(link->status) != DL_STATE_DORMANT) |
1335 | 1335 | dpm_async_with_cleanup(link->supplier, func); |
1336 | 1336 |
|
@@ -1384,7 +1384,7 @@ static void dpm_superior_set_must_resume(struct device *dev) |
1384 | 1384 |
|
1385 | 1385 | idx = device_links_read_lock(); |
1386 | 1386 |
|
1387 | | - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) |
| 1387 | + list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) |
1388 | 1388 | link->supplier->power.must_resume = true; |
1389 | 1389 |
|
1390 | 1390 | device_links_read_unlock(idx); |
@@ -1813,7 +1813,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) |
1813 | 1813 |
|
1814 | 1814 | idx = device_links_read_lock(); |
1815 | 1815 |
|
1816 | | - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { |
| 1816 | + list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) { |
1817 | 1817 | spin_lock_irq(&link->supplier->power.lock); |
1818 | 1818 | link->supplier->power.direct_complete = false; |
1819 | 1819 | spin_unlock_irq(&link->supplier->power.lock); |
@@ -2065,7 +2065,7 @@ static bool device_prepare_smart_suspend(struct device *dev) |
2065 | 2065 |
|
2066 | 2066 | idx = device_links_read_lock(); |
2067 | 2067 |
|
2068 | | - list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { |
| 2068 | + list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) { |
2069 | 2069 | if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) |
2070 | 2070 | continue; |
2071 | 2071 |
|
|
0 commit comments