@@ -272,9 +272,10 @@ void do_page_cache_ra(struct readahead_control *ractl,
272272 * memory at once.
273273 */
274274void force_page_cache_ra (struct readahead_control * ractl ,
275- struct file_ra_state * ra , unsigned long nr_to_read )
275+ unsigned long nr_to_read )
276276{
277277 struct address_space * mapping = ractl -> mapping ;
278+ struct file_ra_state * ra = ractl -> ra ;
278279 struct backing_dev_info * bdi = inode_to_bdi (mapping -> host );
279280 unsigned long max_pages , index ;
280281
@@ -433,10 +434,10 @@ static int try_context_readahead(struct address_space *mapping,
433434 * A minimal readahead algorithm for trivial sequential/random reads.
434435 */
435436static void ondemand_readahead (struct readahead_control * ractl ,
436- struct file_ra_state * ra , bool hit_readahead_marker ,
437- unsigned long req_size )
437+ bool hit_readahead_marker , unsigned long req_size )
438438{
439439 struct backing_dev_info * bdi = inode_to_bdi (ractl -> mapping -> host );
440+ struct file_ra_state * ra = ractl -> ra ;
440441 unsigned long max_pages = ra -> ra_pages ;
441442 unsigned long add_pages ;
442443 unsigned long index = readahead_index (ractl );
@@ -550,7 +551,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
550551}
551552
552553void page_cache_sync_ra (struct readahead_control * ractl ,
553- struct file_ra_state * ra , unsigned long req_count )
554+ unsigned long req_count )
554555{
555556 bool do_forced_ra = ractl -> file && (ractl -> file -> f_mode & FMODE_RANDOM );
556557
@@ -560,7 +561,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
560561 * read-ahead will do the right thing and limit the read to just the
561562 * requested range, which we'll set to 1 page for this case.
562563 */
563- if (!ra -> ra_pages || blk_cgroup_congested ()) {
564+ if (!ractl -> ra -> ra_pages || blk_cgroup_congested ()) {
564565 if (!ractl -> file )
565566 return ;
566567 req_count = 1 ;
@@ -569,21 +570,20 @@ void page_cache_sync_ra(struct readahead_control *ractl,
569570
570571 /* be dumb */
571572 if (do_forced_ra ) {
572- force_page_cache_ra (ractl , ra , req_count );
573+ force_page_cache_ra (ractl , req_count );
573574 return ;
574575 }
575576
576577 /* do read-ahead */
577- ondemand_readahead (ractl , ra , false, req_count );
578+ ondemand_readahead (ractl , false, req_count );
578579}
579580EXPORT_SYMBOL_GPL (page_cache_sync_ra );
580581
581582void page_cache_async_ra (struct readahead_control * ractl ,
582- struct file_ra_state * ra , struct page * page ,
583- unsigned long req_count )
583+ struct page * page , unsigned long req_count )
584584{
585585 /* no read-ahead */
586- if (!ra -> ra_pages )
586+ if (!ractl -> ra -> ra_pages )
587587 return ;
588588
589589 /*
@@ -604,7 +604,7 @@ void page_cache_async_ra(struct readahead_control *ractl,
604604 return ;
605605
606606 /* do read-ahead */
607- ondemand_readahead (ractl , ra , true, req_count );
607+ ondemand_readahead (ractl , true, req_count );
608608}
609609EXPORT_SYMBOL_GPL (page_cache_async_ra );
610610
0 commit comments