@@ -108,10 +108,10 @@ static struct cgroup_subsys_state *blkcg_css(void)
108108 return task_css (current , io_cgrp_id );
109109}
110110
111- static bool blkcg_policy_enabled (struct gendisk * disk ,
111+ static bool blkcg_policy_enabled (struct request_queue * q ,
112112 const struct blkcg_policy * pol )
113113{
114- return pol && test_bit (pol -> plid , disk -> blkcg_pols );
114+ return pol && test_bit (pol -> plid , q -> blkcg_pols );
115115}
116116
117117static void blkg_free_workfn (struct work_struct * work )
@@ -123,18 +123,18 @@ static void blkg_free_workfn(struct work_struct *work)
123123 /*
124124 * pd_free_fn() can also be called from blkcg_deactivate_policy(),
125125 * in order to make sure pd_free_fn() is called in order, the deletion
126- * of the list blkg->entry is delayed to here from blkg_destroy(), and
126+ * of the list blkg->q_node is delayed to here from blkg_destroy(), and
127127 * blkcg_mutex is used to synchronize blkg_free_workfn() and
128128 * blkcg_deactivate_policy().
129129 */
130- mutex_lock (& blkg -> disk -> blkcg_mutex );
130+ mutex_lock (& blkg -> disk -> queue -> blkcg_mutex );
131131 for (i = 0 ; i < BLKCG_MAX_POLS ; i ++ )
132132 if (blkg -> pd [i ])
133133 blkcg_policy [i ]-> pd_free_fn (blkg -> pd [i ]);
134134 if (blkg -> parent )
135135 blkg_put (blkg -> parent );
136- list_del_init (& blkg -> entry );
137- mutex_unlock (& blkg -> disk -> blkcg_mutex );
136+ list_del_init (& blkg -> q_node );
137+ mutex_unlock (& blkg -> disk -> queue -> blkcg_mutex );
138138
139139 put_disk (blkg -> disk );
140140 free_percpu (blkg -> iostat_cpu );
@@ -269,7 +269,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
269269 get_device (disk_to_dev (disk ));
270270 blkg -> disk = disk ;
271271
272- INIT_LIST_HEAD (& blkg -> entry );
272+ INIT_LIST_HEAD (& blkg -> q_node );
273273 spin_lock_init (& blkg -> async_bio_lock );
274274 bio_list_init (& blkg -> async_bios );
275275 INIT_WORK (& blkg -> async_bio_work , blkg_async_bio_workfn );
@@ -285,7 +285,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
285285 struct blkcg_policy * pol = blkcg_policy [i ];
286286 struct blkg_policy_data * pd ;
287287
288- if (!blkcg_policy_enabled (disk , pol ))
288+ if (!blkcg_policy_enabled (disk -> queue , pol ))
289289 continue ;
290290
291291 /* alloc per-policy data and attach it to blkg */
@@ -371,7 +371,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
371371 ret = radix_tree_insert (& blkcg -> blkg_tree , disk -> queue -> id , blkg );
372372 if (likely (!ret )) {
373373 hlist_add_head_rcu (& blkg -> blkcg_node , & blkcg -> blkg_list );
374- list_add (& blkg -> entry , & disk -> blkg_list );
374+ list_add (& blkg -> q_node , & disk -> queue -> blkg_list );
375375
376376 for (i = 0 ; i < BLKCG_MAX_POLS ; i ++ ) {
377377 struct blkcg_policy * pol = blkcg_policy [i ];
@@ -444,7 +444,7 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
444444 while (true) {
445445 struct blkcg * pos = blkcg ;
446446 struct blkcg * parent = blkcg_parent (blkcg );
447- struct blkcg_gq * ret_blkg = disk -> root_blkg ;
447+ struct blkcg_gq * ret_blkg = q -> root_blkg ;
448448
449449 while (parent ) {
450450 blkg = blkg_lookup (parent , disk );
@@ -526,7 +526,7 @@ static void blkg_destroy_all(struct gendisk *disk)
526526
527527restart :
528528 spin_lock_irq (& q -> queue_lock );
529- list_for_each_entry_safe (blkg , n , & disk -> blkg_list , entry ) {
529+ list_for_each_entry_safe (blkg , n , & q -> blkg_list , q_node ) {
530530 struct blkcg * blkcg = blkg -> blkcg ;
531531
532532 spin_lock (& blkcg -> lock );
@@ -545,7 +545,7 @@ static void blkg_destroy_all(struct gendisk *disk)
545545 }
546546 }
547547
548- disk -> root_blkg = NULL ;
548+ q -> root_blkg = NULL ;
549549 spin_unlock_irq (& q -> queue_lock );
550550}
551551
@@ -620,7 +620,7 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
620620 rcu_read_lock ();
621621 hlist_for_each_entry_rcu (blkg , & blkcg -> blkg_list , blkcg_node ) {
622622 spin_lock_irq (& blkg -> disk -> queue -> queue_lock );
623- if (blkcg_policy_enabled (blkg -> disk , pol ))
623+ if (blkcg_policy_enabled (blkg -> disk -> queue , pol ))
624624 total += prfill (sf , blkg -> pd [pol -> plid ], data );
625625 spin_unlock_irq (& blkg -> disk -> queue -> queue_lock );
626626 }
@@ -728,7 +728,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
728728 rcu_read_lock ();
729729 spin_lock_irq (& q -> queue_lock );
730730
731- if (!blkcg_policy_enabled (disk , pol )) {
731+ if (!blkcg_policy_enabled (q , pol )) {
732732 ret = - EOPNOTSUPP ;
733733 goto fail_unlock ;
734734 }
@@ -771,7 +771,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
771771 rcu_read_lock ();
772772 spin_lock_irq (& q -> queue_lock );
773773
774- if (!blkcg_policy_enabled (disk , pol )) {
774+ if (!blkcg_policy_enabled (q , pol )) {
775775 blkg_free (new_blkg );
776776 ret = - EOPNOTSUPP ;
777777 goto fail_preloaded ;
@@ -951,7 +951,7 @@ static void blkcg_fill_root_iostats(void)
951951 class_dev_iter_init (& iter , & block_class , NULL , & disk_type );
952952 while ((dev = class_dev_iter_next (& iter ))) {
953953 struct block_device * bdev = dev_to_bdev (dev );
954- struct blkcg_gq * blkg = bdev -> bd_disk -> root_blkg ;
954+ struct blkcg_gq * blkg = bdev -> bd_disk -> queue -> root_blkg ;
955955 struct blkg_iostat tmp ;
956956 int cpu ;
957957 unsigned long flags ;
@@ -1298,8 +1298,8 @@ int blkcg_init_disk(struct gendisk *disk)
12981298 bool preloaded ;
12991299 int ret ;
13001300
1301- INIT_LIST_HEAD (& disk -> blkg_list );
1302- mutex_init (& disk -> blkcg_mutex );
1301+ INIT_LIST_HEAD (& q -> blkg_list );
1302+ mutex_init (& q -> blkcg_mutex );
13031303
13041304 new_blkg = blkg_alloc (& blkcg_root , disk , GFP_KERNEL );
13051305 if (!new_blkg )
@@ -1313,7 +1313,7 @@ int blkcg_init_disk(struct gendisk *disk)
13131313 blkg = blkg_create (& blkcg_root , disk , new_blkg );
13141314 if (IS_ERR (blkg ))
13151315 goto err_unlock ;
1316- disk -> root_blkg = blkg ;
1316+ q -> root_blkg = blkg ;
13171317 spin_unlock_irq (& q -> queue_lock );
13181318
13191319 if (preloaded )
@@ -1426,7 +1426,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
14261426 struct blkcg_gq * blkg , * pinned_blkg = NULL ;
14271427 int ret ;
14281428
1429- if (blkcg_policy_enabled (disk , pol ))
1429+ if (blkcg_policy_enabled (q , pol ))
14301430 return 0 ;
14311431
14321432 if (queue_is_mq (q ))
@@ -1435,7 +1435,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
14351435 spin_lock_irq (& q -> queue_lock );
14361436
14371437 /* blkg_list is pushed at the head, reverse walk to allocate parents first */
1438- list_for_each_entry_reverse (blkg , & disk -> blkg_list , entry ) {
1438+ list_for_each_entry_reverse (blkg , & q -> blkg_list , q_node ) {
14391439 struct blkg_policy_data * pd ;
14401440
14411441 if (blkg -> pd [pol -> plid ])
@@ -1480,16 +1480,16 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
14801480
14811481 /* all allocated, init in the same order */
14821482 if (pol -> pd_init_fn )
1483- list_for_each_entry_reverse (blkg , & disk -> blkg_list , entry )
1483+ list_for_each_entry_reverse (blkg , & q -> blkg_list , q_node )
14841484 pol -> pd_init_fn (blkg -> pd [pol -> plid ]);
14851485
1486- list_for_each_entry_reverse (blkg , & disk -> blkg_list , entry ) {
1486+ list_for_each_entry_reverse (blkg , & q -> blkg_list , q_node ) {
14871487 if (pol -> pd_online_fn )
14881488 pol -> pd_online_fn (blkg -> pd [pol -> plid ]);
14891489 blkg -> pd [pol -> plid ]-> online = true;
14901490 }
14911491
1492- __set_bit (pol -> plid , disk -> blkcg_pols );
1492+ __set_bit (pol -> plid , q -> blkcg_pols );
14931493 ret = 0 ;
14941494
14951495 spin_unlock_irq (& q -> queue_lock );
@@ -1505,7 +1505,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
15051505enomem :
15061506 /* alloc failed, nothing's initialized yet, free everything */
15071507 spin_lock_irq (& q -> queue_lock );
1508- list_for_each_entry (blkg , & disk -> blkg_list , entry ) {
1508+ list_for_each_entry (blkg , & q -> blkg_list , q_node ) {
15091509 struct blkcg * blkcg = blkg -> blkcg ;
15101510
15111511 spin_lock (& blkcg -> lock );
@@ -1535,18 +1535,18 @@ void blkcg_deactivate_policy(struct gendisk *disk,
15351535 struct request_queue * q = disk -> queue ;
15361536 struct blkcg_gq * blkg ;
15371537
1538- if (!blkcg_policy_enabled (disk , pol ))
1538+ if (!blkcg_policy_enabled (q , pol ))
15391539 return ;
15401540
15411541 if (queue_is_mq (q ))
15421542 blk_mq_freeze_queue (q );
15431543
1544- mutex_lock (& disk -> blkcg_mutex );
1544+ mutex_lock (& q -> blkcg_mutex );
15451545 spin_lock_irq (& q -> queue_lock );
15461546
1547- __clear_bit (pol -> plid , disk -> blkcg_pols );
1547+ __clear_bit (pol -> plid , q -> blkcg_pols );
15481548
1549- list_for_each_entry (blkg , & disk -> blkg_list , entry ) {
1549+ list_for_each_entry (blkg , & q -> blkg_list , q_node ) {
15501550 struct blkcg * blkcg = blkg -> blkcg ;
15511551
15521552 spin_lock (& blkcg -> lock );
@@ -1560,7 +1560,7 @@ void blkcg_deactivate_policy(struct gendisk *disk,
15601560 }
15611561
15621562 spin_unlock_irq (& q -> queue_lock );
1563- mutex_unlock (& disk -> blkcg_mutex );
1563+ mutex_unlock (& q -> blkcg_mutex );
15641564
15651565 if (queue_is_mq (q ))
15661566 blk_mq_unfreeze_queue (q );
@@ -1957,7 +1957,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
19571957 * Associate @bio with the blkg found by combining the css's blkg and the
19581958 * request_queue of the @bio. An association failure is handled by walking up
19591959 * the blkg tree. Therefore, the blkg associated can be anything between @blkg
1960- * and disk ->root_blkg. This situation only happens when a cgroup is dying and
1960+ * and q ->root_blkg. This situation only happens when a cgroup is dying and
19611961 * then the remaining bios will spill to the closest alive blkg.
19621962 *
19631963 * A reference will be taken on the blkg and will be released when @bio is
@@ -1972,8 +1972,8 @@ void bio_associate_blkg_from_css(struct bio *bio,
19721972 if (css && css -> parent ) {
19731973 bio -> bi_blkg = blkg_tryget_closest (bio , css );
19741974 } else {
1975- blkg_get (bio -> bi_bdev -> bd_disk -> root_blkg );
1976- bio -> bi_blkg = bio -> bi_bdev -> bd_disk -> root_blkg ;
1975+ blkg_get (bdev_get_queue ( bio -> bi_bdev ) -> root_blkg );
1976+ bio -> bi_blkg = bdev_get_queue ( bio -> bi_bdev ) -> root_blkg ;
19771977 }
19781978}
19791979EXPORT_SYMBOL_GPL (bio_associate_blkg_from_css );
0 commit comments